diff --git a/.bundle/config b/.bundle/config new file mode 100644 index 0000000..d137d24 --- /dev/null +++ b/.bundle/config @@ -0,0 +1,2 @@ +BUNDLE_PATH: "vendor/bundle" +BUNDLE_FORCE_RUBY_PLATFORM: 1 \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000..34feffb --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,339 @@ +version: 2.1 + +orbs: + gh: circleci/github-cli@2.0 + +parameters: + version: + type: string + default: "" + platform: + type: string + default: "" + git_ref: + type: string + default: "" + gcs_directory: + type: string + default: "" + build_number: + type: string + default: "" + task: + type: string + default: "" + gcs_url: + type: string + default: "" + +jobs: + test_android_in_pr: + docker: + - image: cimg/android:2022.12 + resource_class: xlarge + environment: + TERM: dumb + JAVA_OPTS: -Xms512m -Xmx2g + GRADLE_OPTS: -Xmx3g -Dorg.gradle.daemon=false -Dorg.gradle.jvmargs="-Xmx2g -XX:+HeapDumpOnOutOfMemoryError" + working_directory: ~/galoy-mobile + shell: /bin/bash --login -o pipefail + steps: + # if workflow was triggered by API then don't run the test jobs + - run: | + if [ << pipeline.trigger_source >> = "api" ]; then + circleci-agent step halt + fi + - checkout: + path: ~/galoy-mobile + - run: sudo apt-get update + - run: curl -sL https://deb.nodesource.com/setup_18.x | sudo -E bash - + - run: sudo apt-get install -y gnupg2 gcc g++ make nodejs jq + - run: sudo npm install -g yarn + + - run: sudo apt-get update + - run: sudo apt-get install gnupg2 + - run: curl -sL https://deb.nodesource.com/setup_18.x | sudo -E bash - + - run: sudo apt-get install -y nodejs + - run: sudo apt-get install gcc g++ make + - run: sudo npm install -g yarn + - run: gpg --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB + + - restore_cache: + key: 1-gem-{{ checksum "android/Gemfile.lock" }} + - run: cd android && bundle config set deployment 'true' + - run: cd android && bundle check || bundle install + - save_cache: + key: 1-gem-{{ checksum "android/Gemfile.lock" }} + paths: + - android/vendor + + - restore_cache: + key: yarn-{{ checksum "yarn.lock" }} + - run: yarn install + - save_cache: + key: yarn-{{ checksum "yarn.lock" }} + paths: + - node_modules + - run: echo $RELEASE_KEYSTORE | base64 -d > android/app/release.keystore + - run: yarn android:prepareAssets + - run: + name: Test Browserstack + command: | + set -o pipefail + cd android && bundle exec fastlane browserstack | tee browserstack_output.log + error_code=$? + SESSION_ID=$(cat browserstack_output.log | grep sessionId | head -n1 | sed -n "s/^.*'\(.*\)'.*$/\1/ p") + echo "Session ID" + echo $SESSION_ID + VIDEO_URL=$(curl -s -u "$BROWSERSTACK_USER:$BROWSERSTACK_ACCESS_KEY" -X GET "https://api-cloud.browserstack.com/app-automate/sessions/$SESSION_ID.json" | jq -r '.automation_session.video_url') + echo "Video URL" + echo $VIDEO_URL + exit $error_code + + test_ios_in_pr: + macos: + xcode: 14.2.0 + working_directory: ~/galoy-mobile + environment: + FL_OUTPUT_DIR: output + shell: /bin/bash --login -o pipefail + steps: + # if workflow was triggered by API then don't run the test jobs + - run: | + if [ << pipeline.trigger_source >> = "api" ]; then + circleci-agent step halt + fi + - checkout: + path: ~/galoy-mobile + + - run: + name: Check Ruby version + command: | + rbenv versions + echo "Ruby version in .ruby-version:" + cat .ruby-version + echo "Ruby version in Gemfile:" + grep -E "^ruby" Gemfile + + - run: + name: Install Bundler 2.2.30 + command: | + gem install bundler:2.2.30 + echo "export BUNDLE_PATH=$(bundle show --path)" >> $BASH_ENV + source $BASH_ENV + + - add_ssh_keys: + fingerprints: + - "19:7e:f3:6c:be:a7:17:01:7d:09:ca:39:c3:98:86:90" + - restore_cache: + key: 1-gem-{{ checksum "ios/Gemfile.lock" }} + - run: cd ios && bundle config set deployment 'true' + - run: cd ios && bundle config set --local path 'vendor/bundle' + - run: cd ios && bundle check || bundle install + - run: gem install cocoapods + - save_cache: + key: 1-gem-{{ checksum "ios/Gemfile.lock" }} + paths: + - ios/vendor + - restore_cache: + key: 1-yarn-{{ checksum "yarn.lock" }}-pod1-{{ checksum "ios/Podfile.lock" }} + - run: yarn install + - save_cache: + key: 1-yarn-{{ checksum "yarn.lock" }}-pod1-{{ checksum "ios/Podfile.lock" }} + paths: + - node_modules + - ios/Pods + - run: + name: Browserstack Testing + command: | + set -o pipefail + cd ios && bundle exec fastlane browserstack | tee browserstack_output.log + error_code=$? + SESSION_ID=$(cat browserstack_output.log | grep sessionId | head -n1 | sed -n "s/^.*'\(.*\)'.*$/\1/ p") + echo "Session ID" + echo $SESSION_ID + VIDEO_URL=$(curl -s -u "$BROWSERSTACK_USER:$BROWSERSTACK_ACCESS_KEY" -X GET "https://api-cloud.browserstack.com/app-automate/sessions/$SESSION_ID.json" | jq -r '.automation_session.video_url') + echo "Video URL" + echo $VIDEO_URL + exit $error_code + no_output_timeout: 15m + + build_android: + docker: + - image: cimg/android:2022.12 + resource_class: xlarge + environment: + TERM: dumb + JAVA_OPTS: -Xms2g -Xmx4g + GRADLE_OPTS: -Dorg.gradle.daemon=false -Dorg.gradle.jvmargs="-Xmx4g -XX:+HeapDumpOnOutOfMemoryError" + PUBLIC_VERSION: << pipeline.parameters.version >> + BUILD_NUMBER: << pipeline.parameters.build_number >> + GCS_DIRECTORY: << pipeline.parameters.gcs_directory >> + working_directory: ~/galoy-mobile + shell: /bin/bash --login -o pipefail + steps: + # if workflow was triggered by github then don't run the test jobs + - run: | + if [ << pipeline.trigger_source >> = "webhook" ]; then + circleci-agent step halt + fi + - gh/install + - checkout: + path: ~/galoy-mobile + - run: git checkout << pipeline.parameters.git_ref >> + - run: sudo apt-get update + - run: curl -sL https://deb.nodesource.com/setup_18.x | sudo -E bash - + - run: sudo apt-get install -y nodejs + - run: sudo npm install -g yarn + - run: gpg --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB + - run: echo $GCLOUD_BUCKET_KEY | base64 --decode > key.json + - run: gcloud auth activate-service-account --key-file key.json + + - run: cd android && bundle config set deployment 'true' + - run: cd android && bundle check || bundle install + + - restore_cache: + key: 2-yarn-{{ checksum "yarn.lock" }}-android + - run: yarn install + - save_cache: + key: 2-yarn-{{ checksum "yarn.lock" }}-android + paths: + - node_modules + - run: echo $JAVA_OPTS + - run: echo $GRADLE_OPTS + - run: echo $RELEASE_KEYSTORE | base64 -d > android/app/release.keystore + - run: + name: build + command: | + cd android + sed -i'' -e "s/versionCode .*$/versionCode $BUILD_NUMBER/g" app/build.gradle + bundle exec fastlane android build 2>&1 | tee android_build_output.log + no_output_timeout: 15m + - run: + name: upload to gcs + command: gsutil cp -r android/app/build/outputs/* gs://galoy-build-artifacts/galoy-mobile/$GCS_DIRECTORY/galoy-mobile-$(date +%s)-v${PUBLIC_VERSION}/ + - store_artifacts: + path: android/android_build_output.log + + build_ios: + macos: + xcode: 14.2.0 + resource_class: macos.x86.medium.gen2 + environment: + PUBLIC_VERSION: << pipeline.parameters.version >> + BUILD_NUMBER: << pipeline.parameters.build_number >> + GCS_DIRECTORY: << pipeline.parameters.gcs_directory >> + working_directory: ~/galoy-mobile + shell: /bin/bash --login -o pipefail + steps: + # if workflow was triggered by github then don't run the test jobs + - run: | + if [ << pipeline.trigger_source >> = "webhook" ]; then + circleci-agent step halt + fi + - gh/install + - checkout: + path: ~/galoy-mobile + - run: git checkout << pipeline.parameters.git_ref >> + - run: + name: Install Bundler 2.2.30 + command: | + gem install bundler:2.2.30 + echo "export BUNDLE_PATH=$(bundle show --path)" >> $BASH_ENV + source $BASH_ENV + - add_ssh_keys: + fingerprints: + - "19:7e:f3:6c:be:a7:17:01:7d:09:ca:39:c3:98:86:90" + - run: HOMEBREW_NO_AUTO_UPDATE=1 brew install --cask google-cloud-sdk + - run: echo $GCLOUD_BUCKET_KEY | base64 --decode > key.json + - run: gcloud auth activate-service-account --key-file key.json + - restore_cache: + key: 1-gem-{{ checksum "ios/Gemfile.lock" }} + - run: cd ios && bundle config set deployment 'true' + - run: cd ios && bundle config set --local path 'vendor/bundle' + - run: cd ios && bundle check || bundle install + - run: gem install cocoapods + - save_cache: + key: 1-gem-{{ checksum "ios/Gemfile.lock" }} + paths: + - ios/vendor + - restore_cache: + key: 1-yarn-{{ checksum "yarn.lock" }}-pod1-{{ checksum "ios/Podfile.lock" }} + - run: yarn install + - save_cache: + key: 1-yarn-{{ checksum "yarn.lock" }}-pod1-{{ checksum "ios/Podfile.lock" }} + paths: + - node_modules + - ios/Pods + - run: + name: build + command: | + cd ios + sed -i'' -e "s/MARKETING_VERSION.*/MARKETING_VERSION = $PUBLIC_VERSION;/g" GaloyApp.xcodeproj/project.pbxproj + bundle exec fastlane build 2>&1 | tee ios_build_output.log + no_output_timeout: 15m + - run: + name: upload to gcs + command: | + timestamp=$(date +%s) + gsutil cp -r ~/galoy-mobile/ios/Bitcoin\ Beach.ipa gs://galoy-build-artifacts/galoy-mobile/$GCS_DIRECTORY/galoy-mobile-$timestamp-v${PUBLIC_VERSION}/ + gsutil cp -r ~/galoy-mobile/ios/Bitcoin\ Beach.app.dSYM.zip gs://galoy-build-artifacts/galoy-mobile/$GCS_DIRECTORY/galoy-mobile-$timestamp-v${PUBLIC_VERSION}/ + - store_artifacts: + path: ios/ios_build_output.log + + upload_to_app_store: + macos: + xcode: 14.2.0 + resource_class: macos.x86.medium.gen2 + environment: + GCS_URL: << pipeline.parameters.gcs_url >> + working_directory: ~/galoy-mobile + shell: /bin/bash --login -o pipefail + steps: + # if workflow was triggered by github then don't run the test jobs + - run: | + if [ << pipeline.trigger_source >> = "webhook" ]; then + circleci-agent step halt + fi + - gh/install + - checkout: + path: ~/galoy-mobile + - run: HOMEBREW_NO_AUTO_UPDATE=1 brew install --cask google-cloud-sdk + - run: echo $GCLOUD_BUCKET_KEY | base64 --decode > key.json + - run: gcloud auth activate-service-account --key-file key.json + - run: git checkout << pipeline.parameters.git_ref >> + - run: cd ios && gsutil cp -r "$GCS_URL" . + - restore_cache: + key: 1-gem-{{ checksum "ios/Gemfile.lock" }} + - run: cd ios && bundle check || bundle install + - save_cache: + key: 1-gem-{{ checksum "ios/Gemfile.lock" }} + paths: + - ios/vendor + - run: cd ios && bundle exec fastlane app_store_upload + +workflows: + build_android_and_upload_to_bucket: + when: + equal: [android, << pipeline.parameters.platform >>] + jobs: + - build_android + build_ios_and_upload_to_bucket: + when: + equal: [ios, << pipeline.parameters.platform >>] + jobs: + - build_ios + upload_to_app_store: + when: + and: + - equal: [upload_to_app_store, << pipeline.parameters.task >>] + - equal: ["api", << pipeline.trigger_source >>] + jobs: + - upload_to_app_store + test_in_pr: + when: + not: + equal: [main, << pipeline.git.branch >>] + jobs: + - test_android_in_pr + - test_ios_in_pr diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..7c28613 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,3 @@ +# Windows files +[*.bat] +end_of_line = crlf diff --git a/.eslintrc.json b/.eslintrc.json new file mode 100644 index 0000000..24c2acc --- /dev/null +++ b/.eslintrc.json @@ -0,0 +1,170 @@ +{ + "env": { + "es2021": true, + "react-native/react-native": true, + "jest/globals": true + }, + "ignorePatterns": [".storybook/storybook.requires.js", "./patches", "**/GT3Captcha.bundle/resource.js"], + "parser": "@typescript-eslint/parser", + "parserOptions": { + "ecmaFeatures": { + "jsx": true + }, + "ecmaVersion": 12 + }, + "plugins": ["react", "react-hooks", "react-native", "@typescript-eslint", "import", "prettier", "jest"], + "extends": [ + "eslint:recommended", + "plugin:react/recommended", + "plugin:react-hooks/recommended", + "plugin:react-native/all", + "plugin:@typescript-eslint/recommended", + "prettier", + "plugin:prettier/recommended" + ], + "rules": { + "@typescript-eslint/no-extra-semi": "off", // Prettier work + "@typescript-eslint/no-unused-vars": [ + "error", + { + "args": "after-used", + "ignoreRestSiblings": true, + "varsIgnorePattern": "^_", + "argsIgnorePattern": "^_" + } + ], + "@typescript-eslint/prefer-for-of": "error", + "@typescript-eslint/unified-signatures": "error", + + "@typescript-eslint/explicit-module-boundary-types": "off", + "@typescript-eslint/no-empty-function": "off", + "@typescript-eslint/no-explicit-any": "error", + + "import/no-extraneous-dependencies": "error", + "import/no-unresolved": "off", + "import/prefer-default-export": "off", + + "prettier/prettier": [ + "error", + { + "semi": false, + "trailingComma": "all", + "printWidth": 90, + "quoteProps": "consistent", + "singleQuote": false, + "tabWidth": 2, + "useTabs": false, + "bracketSpacing": true, + "arrowParens": "always", + "proseWrap": "preserve", + "endOfLine": "lf" + } + ], + + "react/no-unescaped-entities": ["error", { "forbid": [">", "}"] }], + "react/prop-types": "off", + "react/react-in-jsx-scope": "off", + + "react-native/sort-styles": "off", + "react-native/no-raw-text": "off", + + "accessor-pairs": "error", + "array-callback-return": "error", + "block-scoped-var": "error", + "camelcase": "error", + "consistent-this": "error", + "default-case-last": "error", + "default-param-last": "error", + "dot-notation": "error", + "eqeqeq": "error", + "func-name-matching": "error", + "func-names": "error", + "grouped-accessor-pairs": "error", + "guard-for-in": "error", + "max-depth": ["error", { "max": 5 }], + "max-lines-per-function": ["error", { "max": 750 }], + "max-nested-callbacks": ["error", { "max": 4 }], + "max-params": ["error", { "max": 3 }], + "max-statements-per-line": ["error", { "max": 2 }], + "max-statements": ["error", { "max": 100 }], + "no-alert": "error", + "no-array-constructor": "error", + "no-bitwise": "error", + "no-caller": "error", + "no-constructor-return": "error", + "no-continue": "error", + "no-div-regex": "error", + "no-duplicate-imports": "error", + "no-else-return": "error", + "no-eq-null": "error", + "no-eval": "error", + "no-extend-native": "error", + "no-extra-bind": "error", + "no-implicit-coercion": "error", + "no-implicit-globals": "error", + "no-implied-eval": "error", + "no-invalid-this": "error", + "no-iterator": "error", + "no-labels": "error", + "no-lone-blocks": "error", + "no-lonely-if": "error", + "no-loop-func": "error", + "no-loss-of-precision": "error", + "no-multi-assign": "error", + "no-negated-condition": "error", + "no-new-func": "error", + "no-new-object": "error", + "no-new-wrappers": "error", + "no-new": "error", + "no-nonoctal-decimal-escape": "error", + "no-octal-escape": "error", + "no-param-reassign": "error", + "no-plusplus": "error", + "no-promise-executor-return": "error", + "no-proto": "error", + "no-return-assign": "error", + "no-return-await": "error", + "no-script-url": "error", + "no-self-compare": "error", + "no-sequences": "error", + "no-template-curly-in-string": "error", + "no-throw-literal": "error", + "no-unmodified-loop-condition": "error", + "no-unneeded-ternary": "error", + "no-unreachable-loop": "error", + "no-unsafe-optional-chaining": "error", + "no-useless-call": "error", + "no-useless-computed-key": "error", + "no-useless-concat": "error", + "no-useless-constructor": "error", + "no-useless-rename": "error", + "no-useless-return": "error", + "no-var": "error", + "no-void": "error", + "object-shorthand": "error", + "prefer-const": ["error", { "destructuring": "all" }], + "prefer-numeric-literals": "error", + "prefer-object-spread": "error", + "prefer-promise-reject-errors": "error", + "prefer-rest-params": "error", + "prefer-spread": "error", + "radix": "error", + "require-atomic-updates": "error", + "spaced-comment": "error", + "symbol-description": "error", + "yoda": "error", + + "jest/no-disabled-tests": "warn", + "jest/no-focused-tests": "error", + "jest/no-identical-title": "error", + "jest/prefer-to-have-length": "warn", + "jest/valid-expect": "error", + "jest/no-async-promise-executor": "off" + + }, + "settings": { + "react": { + "version": "detect" + } + } +} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..2161b4a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +*.pbxproj -text +*.bat text eol=crlf \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..f3c800b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,33 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "" +labels: "" +assignees: "" +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: + +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Smartphone (please complete the following information):** + +- Device: [e.g. iPhone6] +- OS: [e.g. iOS8.1] +- Version [e.g. 22] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..2bc5d5f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "" +labels: "" +assignees: "" +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..8abca40 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "npm" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "daily" diff --git a/.github/workflows/check-code.yml b/.github/workflows/check-code.yml new file mode 100644 index 0000000..ccf0e91 --- /dev/null +++ b/.github/workflows/check-code.yml @@ -0,0 +1,16 @@ +name: "Check code" +on: + pull_request: + branches: [main] +jobs: + check-code: + name: Check Code + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-node@v3 + with: + node-version: 18 + - run: yarn install + - name: Run check code + run: yarn check-code diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..f3a331c --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,76 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "main" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "main" ] + schedule: + - cron: '25 16 * * 3' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'javascript' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/conventional.yaml b/.github/workflows/conventional.yaml new file mode 100644 index 0000000..91b8e12 --- /dev/null +++ b/.github/workflows/conventional.yaml @@ -0,0 +1,16 @@ +name: "Conventional commits" +on: + pull_request: + branches: [main] +jobs: + conventional: + name: "Conventional commits" + runs-on: ubuntu-latest + steps: + - name: Conventional PR + uses: Namchee/conventional-pr@v0.12.1 + with: + access_token: ${{ secrets.GITHUB_TOKEN }} + message: this PR needs to be updated to follow conventional commits message + body: false + issue: false diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml new file mode 100644 index 0000000..a8c9f97 --- /dev/null +++ b/.github/workflows/spelling.yml @@ -0,0 +1,16 @@ +name: Spelling +on: + pull_request: + branches: [main] + +jobs: + spelling: + name: Spell Check with Typos + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v3 + - name: Spell Check Repo + uses: crate-ci/typos@master + with: + config: typos.toml diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..0d0756e --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,16 @@ +name: "Test" +on: + pull_request: + branches: [main] +jobs: + check-code: + name: Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-node@v3 + with: + node-version: 18 + - run: yarn install + - name: Run test + run: yarn test diff --git a/.github/workflows/update_pods.yml b/.github/workflows/update_pods.yml new file mode 100644 index 0000000..0c22a57 --- /dev/null +++ b/.github/workflows/update_pods.yml @@ -0,0 +1,38 @@ +name: Update Cocoapods Dependencies +on: + push: + branches: + - dependabot/npm_and_yarn/** + pull_request: + branches: + - dependabot/npm_and_yarn/** + +jobs: + run: + name: Run pod install + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + with: + token: ${{ secrets.GT }} + ref: ${{ github.head_ref }} + + - name: Install Packages + run: yarn install --non-interactive --frozen-lockfile + + - name: Cache pods + uses: actions/cache@v1 + with: + path: ios/Pods + key: ${{ runner.os }}-pods-${{ hashFiles('**/Podfile.lock') }} + restore-keys: | + ${{ runner.os }}-pods- + + - name: Install Cocoapods Packages + run: pushd ios && pod install --repo-update --verbose && popd + + - uses: stefanzweifel/git-auto-commit-action@v4.1.1 + with: + commit_message: Bump Cocoapods Packages + branch: ${{ github.head_ref }} + token: ${{ secrets.GT }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5361ffe --- /dev/null +++ b/.gitignore @@ -0,0 +1,87 @@ +# OSX +# +.DS_Store + +# Xcode +# +build/ +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata +*.xccheckout +*.moved-aside +DerivedData +*.hmap +*.ipa +*.xcuserstate +project.xcworkspace + +# Android/IntelliJ +# +build/ +.idea +.gradle +local.properties +*.iml +*.hprof +.cxx/ + +# node.js +# +node_modules/ +npm-debug.log +yarn-error.log + +# BUCK +buck-out/ +\.buckd/ +*.keystore + +# VS Code +.vscode + +# ios +ios/Pods +ios/Xcode.app +ios/.bundle +ios/vendor + +# Bundle artifact +*.jsbundle + + +ios/GaloyApp.app.dSYM.zip +ios/AppStore_io.galoy.app.mobileprovision +ios/fastlane/report.xml +app/services/lnd/generated/* +android/api-8350101450647692243-59029-b9bec84a7e5a.json +android/fastlane/report.xml +android/fastlane/metadata/android/en-US/ +android/app/release/ + +galoy.code-workspace + +.phrase.yml +ios/GoogleService-Info.plist + +.yalc +yalc.lock + +*.log + +.env +.envrc +.dependencies +ios/assets +ios/GaloyApp/assets +android/app/src/main/assets +android/app/src/main/res + +# Temporary files created by Metro to check the health of the file watcher +.metro-health-check* diff --git a/.node-version b/.node-version new file mode 100644 index 0000000..3c03207 --- /dev/null +++ b/.node-version @@ -0,0 +1 @@ +18 diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..41961e3 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,10 @@ +node_modules +ios +android +.vscode +ignite/ignite.json +package.json +__generated__ +app/components/price-graph/data-tst.ts +.yalc +app/i18n/**/* \ No newline at end of file diff --git a/.readme/OurOLogo1.png b/.readme/OurOLogo1.png new file mode 100644 index 0000000..af59caa Binary files /dev/null and b/.readme/OurOLogo1.png differ diff --git a/.readme/OurOLogo1.xcf b/.readme/OurOLogo1.xcf new file mode 100644 index 0000000..d615102 Binary files /dev/null and b/.readme/OurOLogo1.xcf differ diff --git a/.readme/OurOLogo250.png b/.readme/OurOLogo250.png new file mode 100644 index 0000000..09bb6b2 Binary files /dev/null and b/.readme/OurOLogo250.png differ diff --git a/.readme/galoy-logo.png b/.readme/galoy-logo.png new file mode 100644 index 0000000..0034ba0 Binary files /dev/null and b/.readme/galoy-logo.png differ diff --git a/.readme/screenshot-1.png b/.readme/screenshot-1.png new file mode 100644 index 0000000..7de75d4 Binary files /dev/null and b/.readme/screenshot-1.png differ diff --git a/.readme/screenshot-2.png b/.readme/screenshot-2.png new file mode 100644 index 0000000..b2151db Binary files /dev/null and b/.readme/screenshot-2.png differ diff --git a/.readme/screenshot-3.png b/.readme/screenshot-3.png new file mode 100644 index 0000000..a286a71 Binary files /dev/null and b/.readme/screenshot-3.png differ diff --git a/.readme/screenshot-4.png b/.readme/screenshot-4.png new file mode 100644 index 0000000..deba96f Binary files /dev/null and b/.readme/screenshot-4.png differ diff --git a/.ruby-version b/.ruby-version new file mode 100644 index 0000000..62ae32d --- /dev/null +++ b/.ruby-version @@ -0,0 +1 @@ +2.7.7 \ No newline at end of file diff --git a/.solidarity b/.solidarity new file mode 100644 index 0000000..582c35e --- /dev/null +++ b/.solidarity @@ -0,0 +1,31 @@ +{ + "$schema": "http://json.schemastore.org/solidaritySchema", + "requirements": { + "Node": [{ "rule": "cli", "binary": "node", "semver": ">=8.6.0" }], + "React Native": [ + { + "rule": "cli", + "binary": "react-native", + "semver": ">=2.0.1" + } + ], + "Xcode": [ + { + "rule": "cli", + "binary": "xcodebuild", + "version": "-version", + "semver": ">=9.2.0", + "platform": "darwin" + } + ], + "CocoaPods": [ + { + "rule": "cli", + "binary": "pod", + "version": "--version", + "semver": ">=1.7.0", + "platform": "darwin" + } + ] + } +} diff --git a/.storybook/index.ts b/.storybook/index.ts new file mode 100644 index 0000000..4a0399a --- /dev/null +++ b/.storybook/index.ts @@ -0,0 +1,4 @@ +// this is the native storybook entry point +// import { StorybookUI } from "./config" + +export * from "./storybook" diff --git a/.storybook/main.js b/.storybook/main.js new file mode 100644 index 0000000..c16c972 --- /dev/null +++ b/.storybook/main.js @@ -0,0 +1,22 @@ +/** + * Parsed by sb-rn-get-stories to auto generate storybook.requires.js + * ES6 mods not (yet) supported, for e.g. + * main.{ts/mjs}: export default { ... } + */ +module.exports = { + addons: [ + /** + * In Storybook v5.3.x, we'd register both device and deviceless addons in rn-addons.js + * Now in Storybook v6.0b, sb-rn-get-stories auto generates them to storybook.requires.js + * Deviceless v6.5.x addons supported, deviceless v7.0b addons not yet supported + * @deprecated @storybook/addon-notes (now @storybook/addon-ondevice-notes) + * @deprecated @storybook/addon-ondevice-knobs, https://github.com/storybookjs/react-native/pull/406 + */ + '@storybook/addon-ondevice-notes', + '@storybook/addon-ondevice-controls', + '@storybook/addon-ondevice-knobs', + '@storybook/addon-ondevice-backgrounds', + '@storybook/addon-ondevice-actions' + ], + stories: ['../app/**/*.stories.?(ts|tsx)'] +} diff --git a/.storybook/storybook.requires.js b/.storybook/storybook.requires.js new file mode 100644 index 0000000..83e6d0d --- /dev/null +++ b/.storybook/storybook.requires.js @@ -0,0 +1,79 @@ +/* do not change this file, it is auto generated by storybook. */ + +import { + configure, + addDecorator, + addParameters, + addArgsEnhancer, + clearDecorators, +} from "@storybook/react-native"; + +global.STORIES = [ + { + titlePrefix: "", + directory: "./app", + files: "**/*.stories.?(ts|tsx)", + importPathMatcher: + "^\\.[\\\\/](?:app(?:\\/(?!\\.)(?:(?:(?!(?:^|\\/)\\.).)*?)\\/|\\/|$)(?!\\.)(?=.)[^/]*?\\.stories\\.(?:ts|tsx)?)$", + }, +]; + +import "@storybook/addon-ondevice-notes/register"; +import "@storybook/addon-ondevice-controls/register"; +import "@storybook/addon-ondevice-knobs/register"; +import "@storybook/addon-ondevice-backgrounds/register"; +import "@storybook/addon-ondevice-actions/register"; + +import { argsEnhancers } from "@storybook/addon-actions/dist/modern/preset/addArgs"; + +try { + argsEnhancers.forEach((enhancer) => addArgsEnhancer(enhancer)); +} catch {} + +const getStories = () => { + return { + "./app/components/amount-input-screen/amount-input-screen.stories.tsx": require("../app/components/amount-input-screen/amount-input-screen.stories.tsx"), + "./app/components/amount-input/amount-input.stories.tsx": require("../app/components/amount-input/amount-input.stories.tsx"), + "./app/components/app-update/app-update.stories.tsx": require("../app/components/app-update/app-update.stories.tsx"), + "./app/components/atomic/galoy-button-field/galoy-button-field.stories.tsx": require("../app/components/atomic/galoy-button-field/galoy-button-field.stories.tsx"), + "./app/components/atomic/galoy-currency-bubble/galoy-currency-bubble.stories.tsx": require("../app/components/atomic/galoy-currency-bubble/galoy-currency-bubble.stories.tsx"), + "./app/components/atomic/galoy-icon-button/galoy-icon-button.stories.tsx": require("../app/components/atomic/galoy-icon-button/galoy-icon-button.stories.tsx"), + "./app/components/atomic/galoy-icon/galoy-icon.stories.tsx": require("../app/components/atomic/galoy-icon/galoy-icon.stories.tsx"), + "./app/components/atomic/galoy-input/galoy-input.stories.tsx": require("../app/components/atomic/galoy-input/galoy-input.stories.tsx"), + "./app/components/atomic/galoy-primary-button/galoy-primary-button.stories.tsx": require("../app/components/atomic/galoy-primary-button/galoy-primary-button.stories.tsx"), + "./app/components/atomic/galoy-secondary-button/galoy-secondary-button.stories.tsx": require("../app/components/atomic/galoy-secondary-button/galoy-secondary-button.stories.tsx"), + "./app/components/atomic/galoy-tertiary-button/galoy-tertiary-button.stories.tsx": require("../app/components/atomic/galoy-tertiary-button/galoy-tertiary-button.stories.tsx"), + "./app/components/atomic/galoy-warning/galoy-warning.stories.tsx": require("../app/components/atomic/galoy-warning/galoy-warning.stories.tsx"), + "./app/components/balance-header/balance-header.stories.tsx": require("../app/components/balance-header/balance-header.stories.tsx"), + "./app/components/currency-keyboard/currency-keyboard.stories.tsx": require("../app/components/currency-keyboard/currency-keyboard.stories.tsx"), + "./app/components/large-button/large-button.stories.tsx": require("../app/components/large-button/large-button.stories.tsx"), + "./app/components/new-name-blink-modal/new-name-blink-modal.stories.tsx": require("../app/components/new-name-blink-modal/new-name-blink-modal.stories.tsx"), + "./app/components/wallet-overview/wallet-overview.stories.tsx": require("../app/components/wallet-overview/wallet-overview.stories.tsx"), + "./app/components/wallet-summary/wallet-summary.stories.tsx": require("../app/components/wallet-summary/wallet-summary.stories.tsx"), + "./app/rne-theme/colors.stories.tsx": require("../app/rne-theme/colors.stories.tsx"), + "./app/rne-theme/text.stories.tsx": require("../app/rne-theme/text.stories.tsx"), + "./app/screens/authentication-screen/authentication-check-screen.stories.tsx": require("../app/screens/authentication-screen/authentication-check-screen.stories.tsx"), + "./app/screens/contacts-screen/contacts-detail.stories.tsx": require("../app/screens/contacts-screen/contacts-detail.stories.tsx"), + "./app/screens/contacts-screen/contacts.stories.tsx": require("../app/screens/contacts-screen/contacts.stories.tsx"), + "./app/screens/conversion-flow/conversion-success-screen.stories.tsx": require("../app/screens/conversion-flow/conversion-success-screen.stories.tsx"), + "./app/screens/earns-map-screen/earns-map-screen.stories.tsx": require("../app/screens/earns-map-screen/earns-map-screen.stories.tsx"), + "./app/screens/earns-screen/earns-quiz.stories.tsx": require("../app/screens/earns-screen/earns-quiz.stories.tsx"), + "./app/screens/earns-screen/earns-sections.stories.tsx": require("../app/screens/earns-screen/earns-sections.stories.tsx"), + "./app/screens/earns-screen/section-completed.stories.tsx": require("../app/screens/earns-screen/section-completed.stories.tsx"), + "./app/screens/home-screen/home-screen.stories.tsx": require("../app/screens/home-screen/home-screen.stories.tsx"), + "./app/screens/phone-auth-screen/phone-flow.stories.tsx": require("../app/screens/phone-auth-screen/phone-flow.stories.tsx"), + "./app/screens/phone-auth-screen/phone-validation.stories.tsx": require("../app/screens/phone-auth-screen/phone-validation.stories.tsx"), + "./app/screens/receive-bitcoin-screen/receive-wrapper.stories.tsx": require("../app/screens/receive-bitcoin-screen/receive-wrapper.stories.tsx"), + "./app/screens/redeem-lnurl-withdrawal-screen/redeem-bitcoin-detail-screen.stories.tsx": require("../app/screens/redeem-lnurl-withdrawal-screen/redeem-bitcoin-detail-screen.stories.tsx"), + "./app/screens/redeem-lnurl-withdrawal-screen/redeem-bitcoin-result-screen.stories.tsx": require("../app/screens/redeem-lnurl-withdrawal-screen/redeem-bitcoin-result-screen.stories.tsx"), + "./app/screens/send-bitcoin-screen/send-bitcoin-confirmation-screen.stories.tsx": require("../app/screens/send-bitcoin-screen/send-bitcoin-confirmation-screen.stories.tsx"), + "./app/screens/send-bitcoin-screen/send-bitcoin-details-screen.stories.tsx": require("../app/screens/send-bitcoin-screen/send-bitcoin-details-screen.stories.tsx"), + "./app/screens/send-bitcoin-screen/send-bitcoin-success-screen.stories.tsx": require("../app/screens/send-bitcoin-screen/send-bitcoin-success-screen.stories.tsx"), + "./app/screens/settings-screen/display-currency-screen.stories.tsx": require("../app/screens/settings-screen/display-currency-screen.stories.tsx"), + "./app/screens/settings-screen/language-screen.stories.tsx": require("../app/screens/settings-screen/language-screen.stories.tsx"), + "./app/screens/settings-screen/settings-screen.stories.tsx": require("../app/screens/settings-screen/settings-screen.stories.tsx"), + "./app/screens/transaction-detail-screen/transaction-detail-screen.stories.tsx": require("../app/screens/transaction-detail-screen/transaction-detail-screen.stories.tsx"), + }; +}; + +configure(getStories, module, false); diff --git a/.storybook/storybook.tsx b/.storybook/storybook.tsx new file mode 100644 index 0000000..58edf89 --- /dev/null +++ b/.storybook/storybook.tsx @@ -0,0 +1,53 @@ +import React from "react" +import { getStorybookUI } from "@storybook/react-native" + +import { NavigationContainer } from "@react-navigation/native" +import { ThemeProvider } from "@rneui/themed" +import { createStackNavigator } from "@react-navigation/stack" +import theme from "@app/rne-theme/theme" +import { ThemeSync } from "@app/utils/theme-sync" +import TypesafeI18n from "@app/i18n/i18n-react" + +// import './doctools' + +// storybook.requires.js auto generated by storybook +// eslint-disable-next-line import/no-unresolved +import "./storybook.requires" +import { detectDefaultLocale } from "../app/utils/locale-detector" + +const StorybookUI = getStorybookUI({ + enableWebsockets: true, // for @storybook/react-native-server + onDeviceUI: true, + initialSelection: { kind: "DisplayCurrency Screen", name: "Default" }, + shouldPersistSelection: false, +}) + +const Stack = createStackNavigator() + +const ThemeWrapper: React.FC = ({ children }) => ( + {children} +) + +const I18nWrapper: React.FC = ({ children }) => ( + {children} +) + +export const StorybookUIRoot: React.FC = () => ( + + + + + + + + + + +) diff --git a/.storybook/views/index.ts b/.storybook/views/index.ts new file mode 100644 index 0000000..a722e53 --- /dev/null +++ b/.storybook/views/index.ts @@ -0,0 +1,3 @@ +export * from "./story-screen" +export * from "./story" +export * from "./use-case" diff --git a/.storybook/views/story-screen.tsx b/.storybook/views/story-screen.tsx new file mode 100644 index 0000000..ffb56bf --- /dev/null +++ b/.storybook/views/story-screen.tsx @@ -0,0 +1,24 @@ +import * as React from "react" +import { PersistentStateContext } from "../../app/store/persistent-state" + +const PersistentStateWrapper: React.FC = ({ children }) => ( + {}, + resetState: () => {}, + }} + > + <>{children} + +) + +export const StoryScreen: React.FC = ({ children }) => ( + {children} +) diff --git a/.storybook/views/story.tsx b/.storybook/views/story.tsx new file mode 100644 index 0000000..fdc9023 --- /dev/null +++ b/.storybook/views/story.tsx @@ -0,0 +1,10 @@ +import * as React from "react" +import { ScrollView, View, ViewStyle } from "react-native" + +const ROOT: ViewStyle = { flex: 1 } + +export const Story: React.FC = (props) => ( + + {props.children} + +) diff --git a/.storybook/views/use-case.tsx b/.storybook/views/use-case.tsx new file mode 100644 index 0000000..23a5731 --- /dev/null +++ b/.storybook/views/use-case.tsx @@ -0,0 +1,71 @@ +import * as React from "react" +import { View, Text, TextStyle, ViewStyle } from "react-native" +import { color } from "../../app/theme" + +const ROOT: ViewStyle = { backgroundColor: "#eee" } +const TITLE: TextStyle = { fontWeight: "600", color: "#3d3d3d" } +const TITLE_WRAPPER: ViewStyle = {} +const USE_CASE_WRAPPER: ViewStyle = { + position: "absolute", + top: 0, + left: 0, + right: 0, + borderTopColor: "#e6e6e6", + borderTopWidth: 1, + flexDirection: "row", +} +const USE_CASE: TextStyle = { + fontSize: 10, + color: "#666", + paddingHorizontal: 4, + paddingBottom: 2, +} +const USAGE: TextStyle = { color: "#666", fontSize: 10, paddingTop: 0 } +const HEADER: ViewStyle = { + paddingTop: 20, + paddingBottom: 10, + paddingHorizontal: 10, + borderBottomColor: "#e6e6e6", + borderBottomWidth: 1, +} +const COMPONENT: ViewStyle = { backgroundColor: color.palette.white } + +export interface UseCaseProps { + /** The title. */ + text: string + /** When should we be using this? */ + usage?: string + /** The component use case. */ + children: React.ReactNode + /** A style override. Rarely used. */ + style?: ViewStyle + /** Don't use any padding because it's important to see the spacing. */ + noPad?: boolean + /** Don't use background color because it's important to see the color. */ + noBackground?: boolean +} + +export const UseCase: React.FC = (props) => { + const style: ViewStyle = { + ...COMPONENT, + ...{ padding: props.noPad ? 0 : 10 }, + ...{ + backgroundColor: props.noBackground ? color.transparent : COMPONENT.backgroundColor, + }, + ...props.style, + } + return ( + + + + Use Case + + + {props.text} + + {props.usage ? {props.usage} : null} + + {props.children} + + ) +} diff --git a/.storybook_server/main.js b/.storybook_server/main.js new file mode 100644 index 0000000..ec54242 --- /dev/null +++ b/.storybook_server/main.js @@ -0,0 +1,6 @@ +module.exports = { + stories: ['../app/**/*.stories.?(ts|tsx|js|jsx)'], + logLevel: 'debug', + env: () => ({}), + addons: ['@storybook/addon-essentials'] + } \ No newline at end of file diff --git a/.svgrrc b/.svgrrc new file mode 100644 index 0000000..08a7641 --- /dev/null +++ b/.svgrrc @@ -0,0 +1,5 @@ +{ + "replaceAttrValues": { + "black": "currentColor" + } +} diff --git a/.typesafe-i18n.json b/.typesafe-i18n.json new file mode 100644 index 0000000..37ae54d --- /dev/null +++ b/.typesafe-i18n.json @@ -0,0 +1,5 @@ +{ + "adapter": "react", + "$schema": "https://unpkg.com/typesafe-i18n@5.24.3/schema/typesafe-i18n.json", + "outputPath": "./app/i18n" +} \ No newline at end of file diff --git a/.watchmanconfig b/.watchmanconfig new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/.watchmanconfig @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/CONTRIBUTING.MD b/CONTRIBUTING.MD new file mode 100644 index 0000000..767ff8b --- /dev/null +++ b/CONTRIBUTING.MD @@ -0,0 +1,11 @@ +# Contributing + +Galoy is an open source project and so contributions to this repository are welcome. If this is your first contribution then please take a look at the issues in the backlog with the [good first issue](https://github.com/GaloyMoney/galoy-mobile/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) tag. If you would like to discuss anything with the team before picking up an issue then feel free to join our [public workspace on mattermost](https://chat.galoy.io). + +## Pull Request Process + +1. Fork our repository. +2. Create a branch in your forked repository. +3. Make the required code changes. +4. Ensure you run `yarn check-code` and run the fix commands if necessary. +5. Open a pull request from your forked branch to main branch on galoy repository. diff --git a/Gemfile b/Gemfile new file mode 100644 index 0000000..e566b59 --- /dev/null +++ b/Gemfile @@ -0,0 +1,6 @@ +source 'https://rubygems.org' +# You may use http://rbenv.org/ or https://rvm.io/ to install and use this version +ruby File.read(File.join(__dir__, '.ruby-version')).strip +gem 'cocoapods', '~> 1.11', '>= 1.11.3' + +gem "fastlane", "~> 2.212" diff --git a/Gemfile.lock b/Gemfile.lock new file mode 100644 index 0000000..c985f21 --- /dev/null +++ b/Gemfile.lock @@ -0,0 +1,285 @@ +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.6) + rexml + activesupport (7.0.4.3) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + addressable (2.8.4) + public_suffix (>= 2.0.2, < 6.0) + algoliasearch (1.27.5) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + artifactory (3.0.15) + atomos (0.1.3) + aws-eventstream (1.2.0) + aws-partitions (1.751.0) + aws-sdk-core (3.171.0) + aws-eventstream (~> 1, >= 1.0.2) + aws-partitions (~> 1, >= 1.651.0) + aws-sigv4 (~> 1.5) + jmespath (~> 1, >= 1.6.1) + aws-sdk-kms (1.63.0) + aws-sdk-core (~> 3, >= 3.165.0) + aws-sigv4 (~> 1.1) + aws-sdk-s3 (1.121.0) + aws-sdk-core (~> 3, >= 3.165.0) + aws-sdk-kms (~> 1) + aws-sigv4 (~> 1.4) + aws-sigv4 (1.5.2) + aws-eventstream (~> 1, >= 1.0.2) + babosa (1.0.4) + claide (1.1.0) + cocoapods (1.12.1) + addressable (~> 2.8) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.12.1) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.6.0, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-trunk (>= 1.6.0, < 2.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.8.0) + nap (~> 1.0) + ruby-macho (>= 2.3.0, < 3.0) + xcodeproj (>= 1.21.0, < 2.0) + cocoapods-core (1.12.1) + activesupport (>= 5.0, < 8) + addressable (~> 2.8) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 4.0) + typhoeus (~> 1.0) + cocoapods-deintegrate (1.0.5) + cocoapods-downloader (1.6.3) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.1) + cocoapods-trunk (1.6.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + cocoapods-try (1.2.0) + colored (1.2) + colored2 (3.1.2) + commander (4.6.0) + highline (~> 2.0.0) + concurrent-ruby (1.2.2) + declarative (0.0.20) + digest-crc (0.6.4) + rake (>= 12.0.0, < 14.0.0) + domain_name (0.5.20190701) + unf (>= 0.0.5, < 1.0.0) + dotenv (2.8.1) + emoji_regex (3.2.3) + escape (0.0.4) + ethon (0.16.0) + ffi (>= 1.15.0) + excon (0.99.0) + faraday (1.10.3) + faraday-em_http (~> 1.0) + faraday-em_synchrony (~> 1.0) + faraday-excon (~> 1.1) + faraday-httpclient (~> 1.0) + faraday-multipart (~> 1.0) + faraday-net_http (~> 1.0) + faraday-net_http_persistent (~> 1.0) + faraday-patron (~> 1.0) + faraday-rack (~> 1.0) + faraday-retry (~> 1.0) + ruby2_keywords (>= 0.0.4) + faraday-cookie_jar (0.0.7) + faraday (>= 0.8.0) + http-cookie (~> 1.0.0) + faraday-em_http (1.0.0) + faraday-em_synchrony (1.0.0) + faraday-excon (1.1.0) + faraday-httpclient (1.0.1) + faraday-multipart (1.0.4) + multipart-post (~> 2) + faraday-net_http (1.0.1) + faraday-net_http_persistent (1.2.0) + faraday-patron (1.0.0) + faraday-rack (1.0.0) + faraday-retry (1.0.3) + faraday_middleware (1.2.0) + faraday (~> 1.0) + fastimage (2.2.6) + fastlane (2.212.2) + CFPropertyList (>= 2.3, < 4.0.0) + addressable (>= 2.8, < 3.0.0) + artifactory (~> 3.0) + aws-sdk-s3 (~> 1.0) + babosa (>= 1.0.3, < 2.0.0) + bundler (>= 1.12.0, < 3.0.0) + colored + commander (~> 4.6) + dotenv (>= 2.1.1, < 3.0.0) + emoji_regex (>= 0.1, < 4.0) + excon (>= 0.71.0, < 1.0.0) + faraday (~> 1.0) + faraday-cookie_jar (~> 0.0.6) + faraday_middleware (~> 1.0) + fastimage (>= 2.1.0, < 3.0.0) + gh_inspector (>= 1.1.2, < 2.0.0) + google-apis-androidpublisher_v3 (~> 0.3) + google-apis-playcustomapp_v1 (~> 0.1) + google-cloud-storage (~> 1.31) + highline (~> 2.0) + json (< 3.0.0) + jwt (>= 2.1.0, < 3) + mini_magick (>= 4.9.4, < 5.0.0) + multipart-post (~> 2.0.0) + naturally (~> 2.2) + optparse (~> 0.1.1) + plist (>= 3.1.0, < 4.0.0) + rubyzip (>= 2.0.0, < 3.0.0) + security (= 0.1.3) + simctl (~> 1.6.3) + terminal-notifier (>= 2.0.0, < 3.0.0) + terminal-table (>= 1.4.5, < 2.0.0) + tty-screen (>= 0.6.3, < 1.0.0) + tty-spinner (>= 0.8.0, < 1.0.0) + word_wrap (~> 1.0.0) + xcodeproj (>= 1.13.0, < 2.0.0) + xcpretty (~> 0.3.0) + xcpretty-travis-formatter (>= 0.0.3) + ffi (1.15.5) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + google-apis-androidpublisher_v3 (0.39.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-core (0.11.0) + addressable (~> 2.5, >= 2.5.1) + googleauth (>= 0.16.2, < 2.a) + httpclient (>= 2.8.1, < 3.a) + mini_mime (~> 1.0) + representable (~> 3.0) + retriable (>= 2.0, < 4.a) + rexml + webrick + google-apis-iamcredentials_v1 (0.17.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-playcustomapp_v1 (0.13.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-storage_v1 (0.19.0) + google-apis-core (>= 0.9.0, < 2.a) + google-cloud-core (1.6.0) + google-cloud-env (~> 1.0) + google-cloud-errors (~> 1.0) + google-cloud-env (1.6.0) + faraday (>= 0.17.3, < 3.0) + google-cloud-errors (1.3.1) + google-cloud-storage (1.44.0) + addressable (~> 2.8) + digest-crc (~> 0.4) + google-apis-iamcredentials_v1 (~> 0.1) + google-apis-storage_v1 (~> 0.19.0) + google-cloud-core (~> 1.6) + googleauth (>= 0.16.2, < 2.a) + mini_mime (~> 1.0) + googleauth (1.5.2) + faraday (>= 0.17.3, < 3.a) + jwt (>= 1.4, < 3.0) + memoist (~> 0.16) + multi_json (~> 1.11) + os (>= 0.9, < 2.0) + signet (>= 0.16, < 2.a) + highline (2.0.3) + http-cookie (1.0.5) + domain_name (~> 0.5) + httpclient (2.8.3) + i18n (1.12.0) + concurrent-ruby (~> 1.0) + jmespath (1.6.2) + json (2.6.3) + jwt (2.7.0) + memoist (0.16.2) + mini_magick (4.12.0) + mini_mime (1.1.2) + minitest (5.18.0) + molinillo (0.8.0) + multi_json (1.15.0) + multipart-post (2.0.0) + nanaimo (0.3.0) + nap (1.1.0) + naturally (2.2.1) + netrc (0.11.0) + optparse (0.1.1) + os (1.1.4) + plist (3.7.0) + public_suffix (4.0.7) + rake (13.0.6) + representable (3.2.0) + declarative (< 0.1.0) + trailblazer-option (>= 0.1.1, < 0.2.0) + uber (< 0.2.0) + retriable (3.1.2) + rexml (3.2.5) + rouge (2.0.7) + ruby-macho (2.5.1) + ruby2_keywords (0.0.5) + rubyzip (2.3.2) + security (0.1.3) + signet (0.17.0) + addressable (~> 2.8) + faraday (>= 0.17.5, < 3.a) + jwt (>= 1.5, < 3.0) + multi_json (~> 1.10) + simctl (1.6.10) + CFPropertyList + naturally + terminal-notifier (2.0.0) + terminal-table (1.8.0) + unicode-display_width (~> 1.1, >= 1.1.1) + trailblazer-option (0.1.2) + tty-cursor (0.7.1) + tty-screen (0.8.1) + tty-spinner (0.9.3) + tty-cursor (~> 0.7) + typhoeus (1.4.0) + ethon (>= 0.9.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + uber (0.1.0) + unf (0.1.4) + unf_ext + unf_ext (0.0.8.2) + unicode-display_width (1.8.0) + webrick (1.8.1) + word_wrap (1.0.0) + xcodeproj (1.22.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + xcpretty (0.3.0) + rouge (~> 2.0.7) + xcpretty-travis-formatter (1.0.1) + xcpretty (~> 0.2, >= 0.0.7) + +PLATFORMS + ruby + +DEPENDENCIES + cocoapods (~> 1.11, >= 1.11.3) + fastlane (~> 2.212) + +RUBY VERSION + ruby 2.7.7p221 + +BUNDLED WITH + 2.1.4 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..fc1fcb8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Galoy Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..28545a5 --- /dev/null +++ b/Makefile @@ -0,0 +1,16 @@ +check-code: + yarn check-code + +unit: + yarn test + +test: unit check-code + +codegen: + yarn dev:codegen + yarn update-translations + +reset-ios: + yarn cache:clear + yarn install + yarn ios \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..9fd56b7 --- /dev/null +++ b/README.md @@ -0,0 +1,76 @@ +# OuroWallet + +## Goal + +This repository is the Ouro Wallet, a cloned and modified version of the Galoy Bitcoin Beach Wallet mobile application. The goal is to make a mobile application compatible with Galoy's backend that can be customized and used by any community or organization. It is built with [React Native](https://reactnative.dev/), and runs on both iOS and Android. + +## Start + +Prerequisite -- [Set up React Native](https://reactnative.dev/docs/environment-setup) by following the instructions in the **React Native CLI Quickstart** tab + +Clone into the project + +cd into the directory + +type `yarn install` + +type `yarn start` + +In another window +type `yarn ios` or `yarn android` to run locally. + +The app is built and pushed to the App Store and Play Store on demand with CircleCI. + +To run the app fully locally the backend must also be set up by following the instructions at https://github.com/GaloyMoney/galoy. + +--- + +### _Notes for Running on M1 Mac:_ + +The app currently only builds for x86_64 simulators. Simulators prior to IOS 13.7 are x86_64 by default, however starting with 13.7 they become platform specific. In order to get an x86_64 simulator of a newer IOS version, one must set XCode to open in [emulation using Rosetta](https://www.macworld.com/article/338843/how-to-force-a-native-m1-mac-app-to-run-as-an-intel-app-instead.html). To run the project, open [GaloyApp.xcworkspace](./ios/GaloyApp.xcworkspace/) in XCode, choose an x86_64 simulator, and click the play button. This should start the Metro bundler in a new terminal as well as launch the simulator with the app. + +--- + +## Running Storybook + +From the command line in your generated app's root directory, enter `yarn storybook` +This starts up the storybook server. + +In `index.js`, change `SHOW_STORYBOOK` to `true` and reload the app and run `yarn storybook`. + +For Visual Studio Code users, there is a handy extension that makes it easy to load Storybook use cases into a running emulator via tapping on items in the editor sidebar. Install the `React Native Storybook` extension by `Orta`, hit `cmd + shift + P` and select "Reconnect Storybook to VSCode". Expand the STORYBOOK section in the sidebar to see all use cases for components that have `.story.tsx` files in their directories. + +## E2E testing: + +see [readme](docs/e2e-testing.md) + +## Local development with libraries + +The mobile app uses the [galoy-client](https://github.com/GaloyMoney/galoy-client) for generic functions, such as parsing bitcoin transactions, lightning invoice, or managing translations across the different frontend of the Galoy stack. If you want to make changes to the galoy-client locally to test changes in the app e.g. you want to add a key to the translations file this is the steps you should take. Since the metro bundler [does not support](https://github.com/facebook/metro/issues/68) `yarn link`, we have to use [yalc](https://www.npmjs.com/package/yalc). + +**Before you run these commands the client will need to be published using `yalc`. Instructions for this are included in the readme for the client.** + +`npx yalc add @galoymoney/client` + +When you are finished developing locally and are ready to push to github you will need to remove the yalc dependency using the following command. + +`npx yalc remove @galoymoney/client` + +## Adding new fonts + +**These instructions are correct as of react-native 0.69. If you are using a different version, you may need to adjust the instructions.** + +1. Add the new fonts to the `app/assets/fonts` directory. +2. Run `yarn fonts` to link the font files to the native projects. +3. You should see the new font in the `ios/GaloyApp/Info.plist` file and the `android/app/src/main/assets/fonts` directory. + +## Adding translation keys + +To add a new string to be used in the application navigate to [en/index.ts](app/i18n/en/index.ts) and add the phrase you need in english. Proceed to run the command `yarn update-translations`. This command will update the translation types as well as the raw english translation file. The new phrase can now be used throughout the application, with all languages falling back to the english translation. + +**Warning**: Do not update files in the [raw translations folder](/app/i18n/raw-i18n/). These files are managed programtically. + +## Icons + +**Warning** +We use [react-native-vector-icons](https://github.com/oblador/react-native-vector-icons) in this repo. Our main component library [react-native-elements](https://github.com/react-native-elements/react-native-elements) also uses the icons from this set in some of the components. We have added custom icons from the Ionicons sets to the existing components we import from the library. If you import a new component from react native elements which uses an icon from a set which isn't Ionicons then it might not render on the screen. \ No newline at end of file diff --git a/__mocks__/@react-native-async-storage/async-storage.js b/__mocks__/@react-native-async-storage/async-storage.js new file mode 100644 index 0000000..e19713d --- /dev/null +++ b/__mocks__/@react-native-async-storage/async-storage.js @@ -0,0 +1 @@ +export { AsyncStorageMock as default } from "@react-native-async-storage/async-storage/jest/async-storage-mock" diff --git a/__mocks__/@react-native-clipboard/clipboard.js b/__mocks__/@react-native-clipboard/clipboard.js new file mode 100644 index 0000000..85fad92 --- /dev/null +++ b/__mocks__/@react-native-clipboard/clipboard.js @@ -0,0 +1,6 @@ +const mockedClipboard = { + setString: jest.fn(), + getString: jest.fn().mockResolvedValue(""), +} + +export default mockedClipboard diff --git a/__mocks__/@react-native-firebase/analytics.js b/__mocks__/@react-native-firebase/analytics.js new file mode 100644 index 0000000..638c584 --- /dev/null +++ b/__mocks__/@react-native-firebase/analytics.js @@ -0,0 +1,5 @@ +const logEvent = jest.fn() + +export default () => ({ + logEvent, +}) diff --git a/__mocks__/@react-native-firebase/crashlytics.js b/__mocks__/@react-native-firebase/crashlytics.js new file mode 100644 index 0000000..05aa88a --- /dev/null +++ b/__mocks__/@react-native-firebase/crashlytics.js @@ -0,0 +1,6 @@ +/* eslint-disable */ + +export default () => ({ + log: (message) => {}, + recordError: (err) => {}, +}) diff --git a/__mocks__/@react-native-firebase/messaging.js b/__mocks__/@react-native-firebase/messaging.js new file mode 100644 index 0000000..d9b841f --- /dev/null +++ b/__mocks__/@react-native-firebase/messaging.js @@ -0,0 +1,9 @@ +const messaging = { + requestPermission: jest.fn(() => Promise.resolve()), + getToken: jest.fn(() => Promise.resolve("mocked_token")), + onMessage: jest.fn(() => () => {}), + onNotificationOpenedApp: jest.fn(() => () => {}), + setBackgroundMessageHandler: jest.fn(), +} + +export default messaging diff --git a/__mocks__/react-native-device-info.js b/__mocks__/react-native-device-info.js new file mode 100644 index 0000000..3865cf5 --- /dev/null +++ b/__mocks__/react-native-device-info.js @@ -0,0 +1,4 @@ +export default { + getReadableVersion: jest.fn(() => "1.0.0"), + getBuildNumber: jest.fn(() => "1234"), +} diff --git a/__mocks__/react-native-keyboard-aware-scroll-view.js b/__mocks__/react-native-keyboard-aware-scroll-view.js new file mode 100644 index 0000000..1f4474c --- /dev/null +++ b/__mocks__/react-native-keyboard-aware-scroll-view.js @@ -0,0 +1,3 @@ +const KeyboardAwareScrollView = () => null + +export { KeyboardAwareScrollView } diff --git a/__mocks__/react-native-localize.js b/__mocks__/react-native-localize.js new file mode 100644 index 0000000..e3ce0b5 --- /dev/null +++ b/__mocks__/react-native-localize.js @@ -0,0 +1,44 @@ +// THIS IS THE EXAMPLE FILE FROM https://www.npmjs.com/package/react-native-localize?activeTab=readme + +const getLocales = () => [ + // you can choose / add the locales you want + { countryCode: "US", languageTag: "en-US", languageCode: "en", isRTL: false }, + { countryCode: "FR", languageTag: "fr-FR", languageCode: "fr", isRTL: false }, +] + +// use a provided translation, or return undefined to test your fallback +const findBestAvailableLanguage = () => ({ + languageTag: "en-US", + isRTL: false, +}) + +const getNumberFormatSettings = () => ({ + decimalSeparator: ".", + groupingSeparator: ",", +}) + +const getCalendar = () => "gregorian" // or "japanese", "buddhist" +const getCountry = () => "US" // the country code you want +const getCurrencies = () => ["USD", "EUR"] // can be empty array +const getTemperatureUnit = () => "celsius" // or "fahrenheit" +const getTimeZone = () => "Europe/Paris" // the timezone you want +const uses24HourClock = () => true +const usesMetricSystem = () => true + +const addEventListener = jest.fn() +const removeEventListener = jest.fn() + +export { + findBestAvailableLanguage, + getLocales, + getNumberFormatSettings, + getCalendar, + getCountry, + getCurrencies, + getTemperatureUnit, + getTimeZone, + uses24HourClock, + usesMetricSystem, + addEventListener, + removeEventListener, +} diff --git a/__mocks__/react-native-qrcode-svg.js b/__mocks__/react-native-qrcode-svg.js new file mode 100644 index 0000000..a5b0355 --- /dev/null +++ b/__mocks__/react-native-qrcode-svg.js @@ -0,0 +1,3 @@ +export default function QRCode() { + return null +} diff --git a/__tests__/components/app-update.spec.tsx b/__tests__/components/app-update.spec.tsx new file mode 100644 index 0000000..3fbde01 --- /dev/null +++ b/__tests__/components/app-update.spec.tsx @@ -0,0 +1,79 @@ +import { isUpdateAvailableOrRequired } from "@app/components/app-update/app-update.logic" +import { Platform } from "react-native" + +const mobileVersions = [ + { + __typename: "MobileVersions", + platform: "android", + currentSupported: 294, + minSupported: 182, + }, + { + __typename: "MobileVersions", + platform: "ios", + currentSupported: 295, + minSupported: 182, + }, +] + +const OS = "ios" as Platform["OS"] + +describe("testing isUpdateAvailableOrRequired with normal build number", () => { + it("outdated should return true", () => { + const buildNumber = 150 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(true) + expect(result.available).toBe(true) + }) + + it("above minSupported should should return true for available", () => { + const buildNumber = 200 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(false) + expect(result.available).toBe(true) + }) + + it("current should return false", () => { + const buildNumber = 295 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(false) + expect(result.available).toBe(false) + }) + + it("above should return false", () => { + const buildNumber = 300 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(false) + expect(result.available).toBe(false) + }) +}) + +describe("testing isUpdateAvailableOrRequired with android abi", () => { + it("outdated should return true", () => { + const buildNumber = 150 + 10000000 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(true) + expect(result.available).toBe(true) + }) + + it("above minSupported should should return true for available", () => { + const buildNumber = 200 + 10000000 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(false) + expect(result.available).toBe(true) + }) + + it("current should return false", () => { + const buildNumber = 295 + 20000000 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(false) + expect(result.available).toBe(false) + }) + + it("above should return false", () => { + const buildNumber = 300 + 30000000 + const result = isUpdateAvailableOrRequired({ buildNumber, mobileVersions, OS }) + expect(result.required).toBe(false) + expect(result.available).toBe(false) + }) +}) diff --git a/__tests__/components/transaction-date.spec.tsx b/__tests__/components/transaction-date.spec.tsx new file mode 100644 index 0000000..381b7ad --- /dev/null +++ b/__tests__/components/transaction-date.spec.tsx @@ -0,0 +1,53 @@ +import * as React from "react" +import { render } from "@testing-library/react-native" +import { createMock } from "ts-auto-mock" +import moment from "moment" + +import { TransactionDate } from "../../app/components/transaction-date" +import { i18nObject } from "../../app/i18n/i18n-util" +import { Transaction } from "../../app/graphql/generated" + +jest.mock("@app/i18n/i18n-react", () => ({ + useI18nContext: () => { + return i18nObject("en") + }, +})) + +describe("Display the createdAt date for a transaction", () => { + it("Displays pending for a pending onchain transaction", () => { + const mockedTransaction = createMock({ + status: "PENDING", + createdAt: new Date().getDate(), + }) + + const { queryAllByText } = render( + , + ) + expect(queryAllByText("pending")).not.toBeNull() + }) + it("Displays friendly date", () => { + const testTransactionCreatedAtDate = moment().subtract(1, "days") + const mockedTransaction = createMock({ + createdAt: testTransactionCreatedAtDate.unix(), + }) + + const { queryByText } = render( + , + ) + expect( + queryByText( + moment + .duration(Math.min(0, moment.unix(mockedTransaction.createdAt).diff(moment()))) + .humanize(true), + ), + ).not.toBeNull() + }) +}) diff --git a/__tests__/config/galoy-instances.spec.ts b/__tests__/config/galoy-instances.spec.ts new file mode 100644 index 0000000..c5430bb --- /dev/null +++ b/__tests__/config/galoy-instances.spec.ts @@ -0,0 +1,29 @@ +import { resolveGaloyInstanceOrDefault, GALOY_INSTANCES } from "@app/config" + +it("get a full object with BBW", () => { + const res = resolveGaloyInstanceOrDefault({ id: "Main" }) + + expect(res).toBe(GALOY_INSTANCES[0]) +}) + +it("get a full object with Staging", () => { + const res = resolveGaloyInstanceOrDefault({ id: "Staging" }) + + expect(res).toBe(GALOY_INSTANCES[1]) +}) + +it("get a full object with Custom", () => { + const CustomInstance = { + id: "Custom", + name: "Custom", + graphqlUri: "https://api.custom.com/graphql", + graphqlWsUri: "ws://ws.custom.com/graphql", + posUrl: "https://pay.custom.com/", + lnAddressHostname: "custom.com", + blockExplorer: "https://mempool.space/tx/", + } as const + + const res = resolveGaloyInstanceOrDefault(CustomInstance) + + expect(res).toBe(CustomInstance) +}) diff --git a/__tests__/currencies/match-currencies.spec.ts b/__tests__/currencies/match-currencies.spec.ts new file mode 100644 index 0000000..227fb7f --- /dev/null +++ b/__tests__/currencies/match-currencies.spec.ts @@ -0,0 +1,94 @@ +import { + getMatchingCurrencies, + wordMatchesCurrency, +} from "../../app/screens/settings-screen/display-currency-screen" + +const currency = { + flag: "🇹🇹", + id: "TTD", + name: "Trinidad and Tobago Dollar", + symbol: "TT$", + fractionDigits: 2, + __typename: "Currency", +} as const + +const currencies = [ + { + flag: "🇹🇹", + id: "TTD", + name: "Trinidad and Tobago Dollar", + symbol: "TT$", + fractionDigits: 2, + __typename: "Currency", + } as const, + { + flag: "🇹🇷", + id: "TRY", + name: "Turkish Lira", + symbol: "₤", + fractionDigits: 2, + __typename: "Currency", + } as const, + { + flag: "🇮đŸ‡ŗ", + id: "INR", + name: "Indian Rupee", + symbol: "₹", + fractionDigits: 2, + __typename: "Currency", + } as const, + { + flag: "đŸ‡ē🇸", + id: "USD", + name: "US Dollar", + symbol: "$", + fractionDigits: 2, + __typename: "Currency", + } as const, +] + +describe("match-currencies", () => { + it("wordMatchesCurrency", () => { + expect(wordMatchesCurrency("TTD", currency)).toBe(true) + expect(wordMatchesCurrency("ttd", currency)).toBe(true) + expect(wordMatchesCurrency("dollar", currency)).toBe(true) + expect(wordMatchesCurrency("toba", currency)).toBe(true) + expect(wordMatchesCurrency("Trini", currency)).toBe(true) + + expect(wordMatchesCurrency("US", currency)).toBe(false) + expect(wordMatchesCurrency("USD", currency)).toBe(false) + expect(wordMatchesCurrency("usd", currency)).toBe(false) + }) + + it("getMatchingCurrencies", () => { + expect(getMatchingCurrencies("EUR", currencies.slice())).toEqual([]) + expect(getMatchingCurrencies("USD", currencies.slice())).toEqual([ + { + flag: "đŸ‡ē🇸", + id: "USD", + name: "US Dollar", + symbol: "$", + fractionDigits: 2, + __typename: "Currency", + }, + ]) + expect(getMatchingCurrencies("dollar", currencies.slice())).toEqual([ + { + flag: "🇹🇹", + id: "TTD", + name: "Trinidad and Tobago Dollar", + symbol: "TT$", + fractionDigits: 2, + __typename: "Currency", + }, + { + flag: "đŸ‡ē🇸", + id: "USD", + name: "US Dollar", + symbol: "$", + fractionDigits: 2, + __typename: "Currency", + }, + ]) + }) +}) diff --git a/__tests__/hooks/use-display-currency.spec.tsx b/__tests__/hooks/use-display-currency.spec.tsx new file mode 100644 index 0000000..18b1a58 --- /dev/null +++ b/__tests__/hooks/use-display-currency.spec.tsx @@ -0,0 +1,231 @@ +import { renderHook } from "@testing-library/react-hooks" + +import { useDisplayCurrency } from "@app/hooks/use-display-currency" +import { MockedProvider } from "@apollo/client/testing" +import { PropsWithChildren } from "react" +import * as React from "react" +import { IsAuthedContextProvider } from "@app/graphql/is-authed-context" +import { CurrencyListDocument, RealtimePriceDocument } from "@app/graphql/generated" + +const mocksNgn = [ + { + request: { + query: RealtimePriceDocument, + }, + result: { + data: { + me: { + __typename: "User", + id: "70df9822-efe0-419c-b864-c9efa99872ea", + defaultAccount: { + __typename: "Account", + id: "84b26b88-89b0-5c6f-9d3d-fbead08f79d8", + realtimePrice: { + btcSatPrice: { + base: 24015009766, + offset: 12, + currencyUnit: "USDCENT", + __typename: "PriceOfOneSat", + }, + denominatorCurrency: "NGN", + id: "67b6e1d2-04c8-509a-abbd-b1cab08575d5", + timestamp: 1677184189, + usdCentPrice: { + base: 100000000, + offset: 6, + currencyUnit: "USDCENT", + __typename: "PriceOfOneUsdCent", + }, + __typename: "RealtimePrice", + }, + }, + }, + }, + }, + }, + { + request: { + query: CurrencyListDocument, + }, + result: { + data: { + currencyList: [ + { + flag: "đŸ‡ŗđŸ‡Ŧ", + id: "NGN", + name: "Nigerian Naira", + symbol: "â‚Ļ", + fractionDigits: 2, + __typename: "Currency", + }, + ], + }, + }, + }, +] + +const mocksJpy = [ + { + request: { + query: RealtimePriceDocument, + }, + result: { + data: { + me: { + __typename: "User", + id: "70df9822-efe0-419c-b864-c9efa99872ea", + defaultAccount: { + __typename: "Account", + id: "84b26b88-89b0-5c6f-9d3d-fbead08f79d8", + realtimePrice: { + btcSatPrice: { + base: 24015009766, + offset: 12, + __typename: "PriceOfOneSat", + }, + denominatorCurrency: "JPY", + id: "67b6e1d2-04c8-509a-abbd-b1cab08575d5", + timestamp: 1677184189, + usdCentPrice: { + base: 100000000, + offset: 6, + __typename: "PriceOfOneUsdCent", + }, + __typename: "RealtimePrice", + }, + }, + }, + }, + }, + }, + { + request: { + query: CurrencyListDocument, + }, + result: { + data: { + currencyList: [ + { + flag: "", + id: "JPY", + name: "Japanese Yen", + symbol: "ÂĨ", + fractionDigits: 0, + __typename: "Currency", + }, + ], + }, + }, + }, +] + +/* eslint-disable react/display-name */ +/* eslint @typescript-eslint/ban-ts-comment: "off" */ +const wrapWithMocks = + // @ts-ignore-next-line no-implicit-any error + + + (mocks) => + ({ children }: PropsWithChildren) => + ( + + {children} + + ) + +describe("usePriceConversion", () => { + describe("testing moneyAmountToMajorUnitOrSats", () => { + it("with 0 digits", async () => { + const { result, waitForNextUpdate } = renderHook(useDisplayCurrency, { + wrapper: wrapWithMocks(mocksJpy), + }) + + await waitForNextUpdate() + + const res = result.current.moneyAmountToMajorUnitOrSats({ + amount: 100, + currency: "DisplayCurrency", + }) + + expect(res).toBe(100) + }) + + it("with 2 digits", async () => { + const { result, waitForNextUpdate } = renderHook(useDisplayCurrency, { + wrapper: wrapWithMocks(mocksNgn), + }) + + await waitForNextUpdate() + + const res = result.current.moneyAmountToMajorUnitOrSats({ + amount: 10, + currency: "DisplayCurrency", + }) + + expect(res).toBe(0.1) + }) + }) + + it("unAuthed should return default value", async () => { + const { result } = renderHook(useDisplayCurrency, { + wrapper: wrapWithMocks([]), + }) + + expect(result.current).toMatchObject({ + fractionDigits: 2, + fiatSymbol: "$", + displayCurrency: "USD", + }) + }) + + it("authed but empty query should return default value", async () => { + const { result, waitForNextUpdate } = renderHook(useDisplayCurrency, { + wrapper: wrapWithMocks([]), + }) + + expect(result.current).toMatchObject({ + fractionDigits: 2, + fiatSymbol: "$", + displayCurrency: "USD", + }) + + await waitForNextUpdate() + + expect(result.current).toMatchObject({ + fractionDigits: 2, + fiatSymbol: "$", + displayCurrency: "USD", + }) + }) + + it("authed should return NGN from mock", async () => { + const { result, waitFor } = renderHook(useDisplayCurrency, { + wrapper: wrapWithMocks(mocksNgn), + }) + + expect(result.current).toMatchObject({ + fractionDigits: 2, + fiatSymbol: "$", + displayCurrency: "USD", + }) + + // ultimately this is what we want + // but this is failing in CI + // await waitForNextUpdate() + + await waitFor( + () => { + return result.current.displayCurrency === "NGN" + }, + { + timeout: 4000, + }, + ) + + expect(result.current).toMatchObject({ + fractionDigits: 2, + fiatSymbol: "â‚Ļ", + displayCurrency: "NGN", + }) + }) +}) diff --git a/__tests__/hooks/use-price-conversion.spec.ts b/__tests__/hooks/use-price-conversion.spec.ts new file mode 100644 index 0000000..933530a --- /dev/null +++ b/__tests__/hooks/use-price-conversion.spec.ts @@ -0,0 +1,121 @@ +import { renderHook } from "@testing-library/react-hooks" + +type MockUseRealtimePriceResponse = Pick, "data"> +const mockUseRealtimePriceQuery = jest.fn< + MockUseRealtimePriceResponse, + Parameters +>() +import { usePriceConversion } from "@app/hooks/use-price-conversion" +import { useRealtimePriceQuery, WalletCurrency } from "@app/graphql/generated" +import { + BtcMoneyAmount, + DisplayAmount, + DisplayCurrency, + UsdMoneyAmount, +} from "@app/types/amounts" + +jest.mock("@app/graphql/generated", () => { + return { + ...jest.requireActual("@app/graphql/generated"), + useRealtimePriceQuery: mockUseRealtimePriceQuery, + } +}) + +const mockPriceData: MockUseRealtimePriceResponse = { + data: { + __typename: "Query", + me: { + id: "f2b1d23f-816c-51db-aea4-4b773cfdf7a7", + __typename: "User", + defaultAccount: { + __typename: "ConsumerAccount", + id: "f2b1d0bf-816c-51db-aea4-4b773cfdf7a7", + realtimePrice: { + __typename: "RealtimePrice", + btcSatPrice: { + __typename: "PriceOfOneSatInMinorUnit", + base: 10118784000000, + offset: 12, + }, + denominatorCurrency: "NGN", + id: "f2b1d0bf-816c-51db-aea4-4b773cfdf7a7", + timestamp: 1678314952, + usdCentPrice: { + __typename: "PriceOfOneUsdCentInMinorUnit", + base: 460434879, + offset: 6, + }, + }, + }, + }, + }, +} + +const oneThousandDollars: UsdMoneyAmount = { + amount: 100000, + currency: WalletCurrency.Usd, +} // $1,000 +const oneThousandDollarsInSats: BtcMoneyAmount = { + amount: 4550299, + currency: WalletCurrency.Btc, +} // 4,550,299 sats +const oneThousandDollarsInNairaMinorUnits: DisplayAmount = { + amount: 46043488, + currency: DisplayCurrency, +} // 460,434.88 Naira + +const amounts = { + oneThousandDollars, + oneThousandDollarsInSats, + oneThousandDollarsInNairaMinorUnits, +} + +describe("usePriceConversion", () => { + beforeEach(() => { + jest.clearAllMocks() + }) + + it("should return null fields when no price is provided", () => { + mockUseRealtimePriceQuery.mockReturnValue({ data: undefined }) + + const { result } = renderHook(() => usePriceConversion()) + expect(result.current).toEqual({ + convertMoneyAmount: undefined, + usdPerSat: null, + }) + }) + + describe("convertMoneyAmount", () => { + mockUseRealtimePriceQuery.mockReturnValue(mockPriceData) + + const { result } = renderHook(() => usePriceConversion()) + const convertMoneyAmount = result.current.convertMoneyAmount + if (!convertMoneyAmount) { + throw new Error("convertMoneyAmount is undefined") + } + + it("should make proper conversions", () => { + // test all conversions + for (const fromCurrency of Object.keys(amounts)) { + for (const toCurrency of Object.keys(amounts)) { + const fromAmount = amounts[fromCurrency as keyof typeof amounts] + const toAmount = amounts[toCurrency as keyof typeof amounts] + + const convertedAmount = convertMoneyAmount(fromAmount, toAmount.currency) + // expect amounts to be within .01% of each other due to rounding + expect( + (toAmount.amount - convertedAmount.amount) / convertedAmount.amount, + ).toBeLessThan(0.0001) + } + } + }) + + it("should return input if the toCurrency is the same", () => { + const amountsArray = Object.values(amounts) + + amountsArray.forEach((amount) => { + expect(convertMoneyAmount(amount, amount.currency)).toBe(amount) + }) + }) + }) +}) diff --git a/__tests__/lnurl.spec.ts b/__tests__/lnurl.spec.ts new file mode 100644 index 0000000..8a5fea3 --- /dev/null +++ b/__tests__/lnurl.spec.ts @@ -0,0 +1,12 @@ +import { bech32 } from "bech32" + +const lnurl1 = + "LNURL1DP68GURN8GHJ7MRWWPSHJTNRDUHKG6TNW3EX7TTJDA6HGETJ9AKXUATJDSKHW6T5DPJ8YCTH8AKXU5RP09GXZUNPD4EN6ETEFFAXZ3EE09JYVWF3VDKHW620D9YNYN28F4M5U7NRD9XYXJNTV9VYUVRRD5UKVCNDGE69556FXEYK6MR5T9TKGMZFD9MKJCJ8X4G4JKRVGAV4S4N2TFV9Y42E2A3KJNMFFFX9VKRGDDYKJAMFV3VYUMRRDSUHWK2CD3NXY46KXPS5WWTTF94X76TZGU6NZCMDWA5KV5F9XDZZ2V6YYENXZATRV46976RPWD5R6ENRW30KGSJXVF5YX5R4XPS56KPJ23CHXVJ6X9MXU3TJXVS3AT2F" + +it("I can get username from a custom invoice", () => { + const decoded = bech32.decode(lnurl1, 1024) + const url = Buffer.from(bech32.fromWords(decoded.words)).toString() + + // FIXME what is the goal of this test? + expect(url).toContain("https://lnpay") +}) diff --git a/__tests__/payment-destination/helpers.ts b/__tests__/payment-destination/helpers.ts new file mode 100644 index 0000000..30bad16 --- /dev/null +++ b/__tests__/payment-destination/helpers.ts @@ -0,0 +1,9 @@ +import { WalletCurrency } from "@app/graphql/generated" + +export const defaultPaymentDetailParams = { + convertMoneyAmount: jest.fn(), + sendingWalletDescriptor: { + currency: WalletCurrency.Btc, + id: "testid", + }, +} diff --git a/__tests__/payment-destination/intraledger.spec.ts b/__tests__/payment-destination/intraledger.spec.ts new file mode 100644 index 0000000..9b5e3da --- /dev/null +++ b/__tests__/payment-destination/intraledger.spec.ts @@ -0,0 +1,101 @@ +import { WalletCurrency } from "@app/graphql/generated" +import { + PaymentDetail, + CreateIntraledgerPaymentDetailsParams, +} from "@app/screens/send-bitcoin-screen/payment-details" + +const mockCreateIntraledgerPaymentDetail = jest.fn< + PaymentDetail, + [CreateIntraledgerPaymentDetailsParams] +>() + +jest.mock("@app/screens/send-bitcoin-screen/payment-details", () => { + return { + createIntraledgerPaymentDetails: mockCreateIntraledgerPaymentDetail, + } +}) +import { + createIntraLedgerDestination, + resolveIntraledgerDestination, +} from "@app/screens/send-bitcoin-screen/payment-destination" +import { defaultPaymentDetailParams } from "./helpers" +import { InvalidDestinationReason } from "@app/screens/send-bitcoin-screen/payment-destination/index.types" + +describe("resolve intraledger", () => { + const defaultIntraledgerParams = { + parsedIntraledgerDestination: { + paymentType: "intraledger", + handle: "testhandle", + } as const, + accountDefaultWalletQuery: jest.fn(), + myWalletIds: ["testwalletid"], + } + + it("returns invalid destination if wallet is not found", async () => { + defaultIntraledgerParams.accountDefaultWalletQuery.mockResolvedValue({ data: {} }) + const destination = await resolveIntraledgerDestination(defaultIntraledgerParams) + + expect(destination).toEqual({ + valid: false, + invalidReason: InvalidDestinationReason.UsernameDoesNotExist, + invalidPaymentDestination: defaultIntraledgerParams.parsedIntraledgerDestination, + }) + }) + + it("returns invalid destination if user is owned by self", async () => { + defaultIntraledgerParams.accountDefaultWalletQuery.mockResolvedValue({ + data: { accountDefaultWallet: { id: "testwalletid" } }, + }) + const destination = await resolveIntraledgerDestination(defaultIntraledgerParams) + expect(destination).toEqual({ + valid: false, + invalidReason: InvalidDestinationReason.SelfPayment, + invalidPaymentDestination: defaultIntraledgerParams.parsedIntraledgerDestination, + }) + }) + + it("returns a valid destination if username exists", async () => { + defaultIntraledgerParams.accountDefaultWalletQuery.mockResolvedValue({ + data: { accountDefaultWallet: { id: "successwalletid" } }, + }) + const destination = await resolveIntraledgerDestination(defaultIntraledgerParams) + expect(destination).toEqual( + expect.objectContaining({ + valid: true, + validDestination: { + ...defaultIntraledgerParams.parsedIntraledgerDestination, + walletId: "successwalletid", + valid: true, + }, + }), + ) + }) +}) + +describe("create intraledger destination", () => { + const createIntraLedgerDestinationParams = { + parsedIntraledgerDestination: { + paymentType: "intraledger", + handle: "testhandle", + }, + walletId: "testwalletid", + } as const + + it("correctly creates payment detail", () => { + const intraLedgerDestination = createIntraLedgerDestination( + createIntraLedgerDestinationParams, + ) + intraLedgerDestination.createPaymentDetail(defaultPaymentDetailParams) + + expect(mockCreateIntraledgerPaymentDetail).toBeCalledWith({ + handle: createIntraLedgerDestinationParams.parsedIntraledgerDestination.handle, + recipientWalletId: createIntraLedgerDestinationParams.walletId, + sendingWalletDescriptor: defaultPaymentDetailParams.sendingWalletDescriptor, + convertMoneyAmount: defaultPaymentDetailParams.convertMoneyAmount, + unitOfAccountAmount: { + amount: 0, + currency: WalletCurrency.Btc, + }, + }) + }) +}) diff --git a/__tests__/payment-destination/lightning.spec.ts b/__tests__/payment-destination/lightning.spec.ts new file mode 100644 index 0000000..2691f64 --- /dev/null +++ b/__tests__/payment-destination/lightning.spec.ts @@ -0,0 +1,80 @@ +import { WalletCurrency } from "@app/graphql/generated" +import { + PaymentDetail, + CreateAmountLightningPaymentDetailsParams, + CreateNoAmountLightningPaymentDetailsParams, +} from "@app/screens/send-bitcoin-screen/payment-details" + +const mockCreateAmountLightningPaymentDetail = jest.fn< + PaymentDetail, + [CreateAmountLightningPaymentDetailsParams] +>() +const mockCreateNoAmountLightningPaymentDetail = jest.fn< + PaymentDetail, + [CreateNoAmountLightningPaymentDetailsParams] +>() + +jest.mock("@app/screens/send-bitcoin-screen/payment-details", () => { + return { + createAmountLightningPaymentDetails: mockCreateAmountLightningPaymentDetail, + createNoAmountLightningPaymentDetails: mockCreateNoAmountLightningPaymentDetail, + } +}) +import { createLightningDestination } from "@app/screens/send-bitcoin-screen/payment-destination" +import { defaultPaymentDetailParams } from "./helpers" + +describe("create lightning destination", () => { + const baseParsedLightningDestination = { + paymentType: "lightning", + valid: true, + paymentRequest: "testinvoice", + memo: "testmemo", + } as const + + describe("with amount", () => { + const parsedLightningDestinationWithAmount = { + ...baseParsedLightningDestination, + amount: 1000, + } as const + it("correctly creates payment detail", () => { + const amountLightningDestination = createLightningDestination( + parsedLightningDestinationWithAmount, + ) + + amountLightningDestination.createPaymentDetail(defaultPaymentDetailParams) + + expect(mockCreateAmountLightningPaymentDetail).toBeCalledWith({ + paymentRequest: parsedLightningDestinationWithAmount.paymentRequest, + paymentRequestAmount: { + amount: parsedLightningDestinationWithAmount.amount, + currency: WalletCurrency.Btc, + }, + convertMoneyAmount: defaultPaymentDetailParams.convertMoneyAmount, + destinationSpecifiedMemo: parsedLightningDestinationWithAmount.memo, + sendingWalletDescriptor: defaultPaymentDetailParams.sendingWalletDescriptor, + }) + }) + }) + + describe("without amount", () => { + const parsedLightningDestinationWithoutAmount = { + ...baseParsedLightningDestination, + } as const + it("correctly creates payment detail", () => { + const noAmountLightningDestination = createLightningDestination( + parsedLightningDestinationWithoutAmount, + ) + noAmountLightningDestination.createPaymentDetail(defaultPaymentDetailParams) + expect(mockCreateNoAmountLightningPaymentDetail).toBeCalledWith({ + paymentRequest: parsedLightningDestinationWithoutAmount.paymentRequest, + unitOfAccountAmount: { + amount: 0, + currency: WalletCurrency.Btc, + }, + convertMoneyAmount: defaultPaymentDetailParams.convertMoneyAmount, + destinationSpecifiedMemo: parsedLightningDestinationWithoutAmount.memo, + sendingWalletDescriptor: defaultPaymentDetailParams.sendingWalletDescriptor, + }) + }) + }) +}) diff --git a/__tests__/payment-destination/lnurl.spec.ts b/__tests__/payment-destination/lnurl.spec.ts new file mode 100644 index 0000000..73ed373 --- /dev/null +++ b/__tests__/payment-destination/lnurl.spec.ts @@ -0,0 +1,192 @@ +import { createLnurlPaymentDetails } from "@app/screens/send-bitcoin-screen/payment-details" +import { createMock } from "ts-auto-mock" + +import { + createLnurlPaymentDestination, + resolveLnurlDestination, +} from "@app/screens/send-bitcoin-screen/payment-destination" +import { LnUrlPayServiceResponse } from "lnurl-pay/dist/types/types" +import { defaultPaymentDetailParams } from "./helpers" +import { fetchLnurlPaymentParams } from "@galoymoney/client" +import { getParams, LNURLPayParams, LNURLResponse, LNURLWithdrawParams } from "js-lnurl" +import { PaymentType } from "@galoymoney/client/dist/parsing-v2" +import { DestinationDirection } from "@app/screens/send-bitcoin-screen/payment-destination/index.types" +import { WalletCurrency } from "@app/graphql/generated" + +jest.mock("@galoymoney/client", () => { + return { + fetchLnurlPaymentParams: jest.fn(), + } +}) + +jest.mock("js-lnurl", () => { + return { + getParams: jest.fn(), + } +}) +jest.mock("@app/screens/send-bitcoin-screen/payment-details", () => { + return { + createLnurlPaymentDetails: jest.fn(), + } +}) + +const mockFetchLnurlPaymentParams = fetchLnurlPaymentParams as jest.MockedFunction< + typeof fetchLnurlPaymentParams +> +const mockGetParams = getParams as jest.MockedFunction +const mockCreateLnurlPaymentDetail = createLnurlPaymentDetails as jest.MockedFunction< + typeof createLnurlPaymentDetails +> + +const throwError = () => { + throw new Error("test error") +} + +describe("resolve lnurl destination", () => { + describe("with ln address", () => { + const lnurlPaymentDestinationParams = { + parsedLnurlDestination: { + paymentType: PaymentType.Lnurl, + valid: true, + lnurl: "test@domain.com", + } as const, + lnurlDomains: ["ourdomain.com"], + accountDefaultWalletQuery: jest.fn(), + myWalletIds: ["testwalletid"], + } + + it("creates lnurl pay destination", async () => { + const lnurlPayParams = createMock({ + identifier: lnurlPaymentDestinationParams.parsedLnurlDestination.lnurl, + }) + mockFetchLnurlPaymentParams.mockResolvedValue(lnurlPayParams) + mockGetParams.mockResolvedValue(createMock()) + + const destination = await resolveLnurlDestination(lnurlPaymentDestinationParams) + + expect(destination).toEqual( + expect.objectContaining({ + valid: true, + destinationDirection: DestinationDirection.Send, + validDestination: { + ...lnurlPaymentDestinationParams.parsedLnurlDestination, + lnurlParams: lnurlPayParams, + valid: true, + }, + }), + ) + }) + }) + + describe("with lnurl pay string", () => { + const lnurlPaymentDestinationParams = { + parsedLnurlDestination: { + paymentType: PaymentType.Lnurl, + valid: true, + lnurl: "lnurlrandomstring", + } as const, + lnurlDomains: ["ourdomain.com"], + accountDefaultWalletQuery: jest.fn(), + myWalletIds: ["testwalletid"], + } + + it("creates lnurl pay destination", async () => { + const lnurlPayParams = createMock({ + identifier: lnurlPaymentDestinationParams.parsedLnurlDestination.lnurl, + }) + mockFetchLnurlPaymentParams.mockResolvedValue(lnurlPayParams) + mockGetParams.mockResolvedValue(createMock()) + + const destination = await resolveLnurlDestination(lnurlPaymentDestinationParams) + + expect(destination).toEqual( + expect.objectContaining({ + valid: true, + destinationDirection: DestinationDirection.Send, + validDestination: { + ...lnurlPaymentDestinationParams.parsedLnurlDestination, + lnurlParams: lnurlPayParams, + valid: true, + }, + }), + ) + }) + }) + + describe("with lnurl withdraw string", () => { + const lnurlPaymentDestinationParams = { + parsedLnurlDestination: { + paymentType: PaymentType.Lnurl, + valid: true, + lnurl: "lnurlrandomstring", + } as const, + lnurlDomains: ["ourdomain.com"], + accountDefaultWalletQuery: jest.fn(), + myWalletIds: ["testwalletid"], + } + + it("creates lnurl withdraw destination", async () => { + mockFetchLnurlPaymentParams.mockImplementation(throwError) + const mockLnurlWithdrawParams = createMock() + mockGetParams.mockResolvedValue(mockLnurlWithdrawParams) + + const destination = await resolveLnurlDestination(lnurlPaymentDestinationParams) + + const { + callback, + domain, + k1, + maxWithdrawable, + minWithdrawable, + defaultDescription, + } = mockLnurlWithdrawParams + + expect(destination).toEqual( + expect.objectContaining({ + valid: true, + destinationDirection: DestinationDirection.Receive, + validDestination: { + paymentType: PaymentType.Lnurl, + callback, + domain, + k1, + maxWithdrawable, + minWithdrawable, + defaultDescription, + valid: true, + lnurl: lnurlPaymentDestinationParams.parsedLnurlDestination.lnurl, + }, + }), + ) + }) + }) +}) + +describe("create lnurl destination", () => { + it("correctly creates payment detail", () => { + const lnurlPaymentDestinationParams = { + paymentType: "lnurl", + valid: true, + lnurl: "testlnurl", + lnurlParams: createMock(), + } as const + + const lnurlPayDestination = createLnurlPaymentDestination( + lnurlPaymentDestinationParams, + ) + + lnurlPayDestination.createPaymentDetail(defaultPaymentDetailParams) + + expect(mockCreateLnurlPaymentDetail).toBeCalledWith({ + lnurl: lnurlPaymentDestinationParams.lnurl, + lnurlParams: lnurlPaymentDestinationParams.lnurlParams, + unitOfAccountAmount: { + amount: 0, + currency: WalletCurrency.Btc, + }, + convertMoneyAmount: defaultPaymentDetailParams.convertMoneyAmount, + sendingWalletDescriptor: defaultPaymentDetailParams.sendingWalletDescriptor, + destinationSpecifiedMemo: lnurlPaymentDestinationParams.lnurlParams.description, + }) + }) +}) diff --git a/__tests__/payment-destination/onchain.spec.ts b/__tests__/payment-destination/onchain.spec.ts new file mode 100644 index 0000000..0c45718 --- /dev/null +++ b/__tests__/payment-destination/onchain.spec.ts @@ -0,0 +1,104 @@ +import { WalletCurrency } from "@app/graphql/generated" +import { + PaymentDetail, + CreateAmountLightningPaymentDetailsParams, + CreateNoAmountLightningPaymentDetailsParams, + CreateLnurlPaymentDetailsParams, + CreateNoAmountOnchainPaymentDetailsParams, + CreateAmountOnchainPaymentDetailsParams, + CreateIntraledgerPaymentDetailsParams, +} from "@app/screens/send-bitcoin-screen/payment-details" + +const mockCreateAmountLightningPaymentDetail = jest.fn< + PaymentDetail, + [CreateAmountLightningPaymentDetailsParams] +>() +const mockCreateNoAmountLightningPaymentDetail = jest.fn< + PaymentDetail, + [CreateNoAmountLightningPaymentDetailsParams] +>() +const mockCreateLnurlPaymentDetail = jest.fn< + PaymentDetail, + [CreateLnurlPaymentDetailsParams] +>() +const mockCreateNoAmountOnchainPaymentDetail = jest.fn< + PaymentDetail, + [CreateNoAmountOnchainPaymentDetailsParams] +>() +const mockCreateAmountOnchainPaymentDetail = jest.fn< + PaymentDetail, + [CreateAmountOnchainPaymentDetailsParams] +>() +const mockCreateIntraledgerPaymentDetail = jest.fn< + PaymentDetail, + [CreateIntraledgerPaymentDetailsParams] +>() + +jest.mock("@app/screens/send-bitcoin-screen/payment-details", () => { + return { + createAmountLightningPaymentDetails: mockCreateAmountLightningPaymentDetail, + createNoAmountLightningPaymentDetails: mockCreateNoAmountLightningPaymentDetail, + createLnurlPaymentDetails: mockCreateLnurlPaymentDetail, + createNoAmountOnchainPaymentDetails: mockCreateNoAmountOnchainPaymentDetail, + createAmountOnchainPaymentDetails: mockCreateAmountOnchainPaymentDetail, + createIntraledgerPaymentDetails: mockCreateIntraledgerPaymentDetail, + } +}) +import { createOnchainDestination } from "@app/screens/send-bitcoin-screen/payment-destination" +import { defaultPaymentDetailParams } from "./helpers" + +describe("create onchain destination", () => { + const baseParsedOnchainDestination = { + paymentType: "onchain", + valid: true, + address: "testaddress", + memo: "testmemo", + } as const + + describe("with amount", () => { + const parsedOnchainDestinationWithAmount = { + ...baseParsedOnchainDestination, + amount: 1000, + } as const + it("correctly creates payment detail", () => { + const amountOnchainDestination = createOnchainDestination( + parsedOnchainDestinationWithAmount, + ) + + amountOnchainDestination.createPaymentDetail(defaultPaymentDetailParams) + + expect(mockCreateAmountOnchainPaymentDetail).toBeCalledWith({ + address: parsedOnchainDestinationWithAmount.address, + destinationSpecifiedAmount: { + amount: parsedOnchainDestinationWithAmount.amount, + currency: WalletCurrency.Btc, + }, + convertMoneyAmount: defaultPaymentDetailParams.convertMoneyAmount, + sendingWalletDescriptor: defaultPaymentDetailParams.sendingWalletDescriptor, + destinationSpecifiedMemo: parsedOnchainDestinationWithAmount.memo, + }) + }) + }) + + describe("without amount", () => { + const parsedOnchainDestinationWithoutAmount = { + ...baseParsedOnchainDestination, + } as const + it("correctly creates payment detail", () => { + const noAmountOnchainDestination = createOnchainDestination( + parsedOnchainDestinationWithoutAmount, + ) + noAmountOnchainDestination.createPaymentDetail(defaultPaymentDetailParams) + expect(mockCreateNoAmountOnchainPaymentDetail).toBeCalledWith({ + address: parsedOnchainDestinationWithoutAmount.address, + unitOfAccountAmount: { + amount: 0, + currency: WalletCurrency.Btc, + }, + convertMoneyAmount: defaultPaymentDetailParams.convertMoneyAmount, + sendingWalletDescriptor: defaultPaymentDetailParams.sendingWalletDescriptor, + destinationSpecifiedMemo: parsedOnchainDestinationWithoutAmount.memo, + }) + }) + }) +}) diff --git a/__tests__/payment-details/amount-lightning-payment-details.spec.ts b/__tests__/payment-details/amount-lightning-payment-details.spec.ts new file mode 100644 index 0000000..8180324 --- /dev/null +++ b/__tests__/payment-details/amount-lightning-payment-details.spec.ts @@ -0,0 +1,186 @@ +import { WalletCurrency } from "@app/graphql/generated" +import * as PaymentDetails from "@app/screens/send-bitcoin-screen/payment-details/lightning" +import { + btcSendingWalletDescriptor, + btcTestAmount, + convertMoneyAmountMock, + createGetFeeMocks, + createSendPaymentMocks, + expectDestinationSpecifiedMemoCannotSetMemo, + getTestSetMemo, + getTestSetSendingWalletDescriptor, + usdSendingWalletDescriptor, +} from "./helpers" + +const defaultParams: PaymentDetails.CreateAmountLightningPaymentDetailsParams = + { + paymentRequest: "testinvoice", + paymentRequestAmount: btcTestAmount, + convertMoneyAmount: convertMoneyAmountMock, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + +const spy = jest.spyOn(PaymentDetails, "createAmountLightningPaymentDetails") + +describe("amount lightning payment details", () => { + const { createAmountLightningPaymentDetails } = PaymentDetails + + beforeEach(() => { + spy.mockClear() + }) + + it("properly sets fields with all arguments provided", () => { + const paymentDetails = createAmountLightningPaymentDetails(defaultParams) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParams.paymentRequest, + destinationSpecifiedAmount: defaultParams.paymentRequestAmount, + settlementAmount: defaultParams.convertMoneyAmount( + defaultParams.paymentRequestAmount, + defaultParams.sendingWalletDescriptor.currency, + ), + unitOfAccountAmount: defaultParams.paymentRequestAmount, + sendingWalletDescriptor: defaultParams.sendingWalletDescriptor, + canGetFee: true, + canSendPayment: true, + canSetAmount: false, + canSetMemo: true, + convertMoneyAmount: defaultParams.convertMoneyAmount, + }), + ) + }) + + describe("sending from a btc wallet", () => { + const btcSendingWalletParams = { + ...defaultParams, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + const paymentDetails = createAmountLightningPaymentDetails(btcSendingWalletParams) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.lnInvoiceFeeProbe).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.lnInvoicePaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + describe("sending from a usd wallet", () => { + const usdSendingWalletParams = { + ...defaultParams, + sendingWalletDescriptor: usdSendingWalletDescriptor, + } + const paymentDetails = createAmountLightningPaymentDetails(usdSendingWalletParams) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.lnUsdInvoiceFeeProbe).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.lnInvoicePaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + it("cannot set memo if memo is provided", () => { + const defaultParamsWithMemo = { + ...defaultParams, + destinationSpecifiedMemo: "sender memo", + } + const paymentDetails = createAmountLightningPaymentDetails(defaultParamsWithMemo) + expectDestinationSpecifiedMemoCannotSetMemo( + paymentDetails, + defaultParamsWithMemo.destinationSpecifiedMemo, + ) + }) + + it("can set memo if no memo provided", () => { + const testSetMemo = getTestSetMemo() + testSetMemo({ + defaultParams, + spy, + creatorFunction: createAmountLightningPaymentDetails, + }) + }) + + it("can set sending wallet descriptor", () => { + const testSetSendingWalletDescriptor = getTestSetSendingWalletDescriptor() + testSetSendingWalletDescriptor({ + defaultParams, + spy, + creatorFunction: createAmountLightningPaymentDetails, + }) + }) +}) diff --git a/__tests__/payment-details/amount-onchain.spec.ts b/__tests__/payment-details/amount-onchain.spec.ts new file mode 100644 index 0000000..e509b58 --- /dev/null +++ b/__tests__/payment-details/amount-onchain.spec.ts @@ -0,0 +1,150 @@ +import { WalletCurrency } from "@app/graphql/generated" +import * as PaymentDetails from "@app/screens/send-bitcoin-screen/payment-details/onchain" +import { + btcSendingWalletDescriptor, + convertMoneyAmountMock, + createGetFeeMocks, + createSendPaymentMocks, + expectDestinationSpecifiedMemoCannotSetMemo, + getTestSetMemo, + getTestSetSendingWalletDescriptor, + testAmount, + usdSendingWalletDescriptor, +} from "./helpers" + +const defaultParams: PaymentDetails.CreateAmountOnchainPaymentDetailsParams = + { + address: "testaddress", + destinationSpecifiedAmount: testAmount, + convertMoneyAmount: convertMoneyAmountMock, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + +const spy = jest.spyOn(PaymentDetails, "createAmountOnchainPaymentDetails") + +describe("no amount lightning payment details", () => { + const { createAmountOnchainPaymentDetails } = PaymentDetails + + beforeEach(() => { + spy.mockClear() + }) + + it("properly sets fields with all arguments provided", () => { + const paymentDetails = createAmountOnchainPaymentDetails(defaultParams) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParams.address, + settlementAmount: defaultParams.convertMoneyAmount( + defaultParams.destinationSpecifiedAmount, + defaultParams.sendingWalletDescriptor.currency, + ), + unitOfAccountAmount: defaultParams.destinationSpecifiedAmount, + sendingWalletDescriptor: defaultParams.sendingWalletDescriptor, + settlementAmountIsEstimated: false, + canGetFee: true, + canSendPayment: true, + canSetAmount: false, + canSetMemo: true, + convertMoneyAmount: defaultParams.convertMoneyAmount, + }), + ) + }) + + describe("sending from a btc wallet", () => { + const btcSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + const paymentDetails = createAmountOnchainPaymentDetails(btcSendingWalletParams) + const settlementAmount = defaultParams.convertMoneyAmount( + testAmount, + btcSendingWalletDescriptor.currency, + ) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.onChainTxFee).toHaveBeenCalledWith({ + variables: { + address: defaultParams.address, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.onChainPaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + address: defaultParams.address, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + describe("sending from a usd wallet", () => { + it("throws an error", () => { + const usdSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: usdSendingWalletDescriptor, + } + expect(() => createAmountOnchainPaymentDetails(usdSendingWalletParams)).toThrow() + }) + }) + + it("cannot set memo if memo is provided", () => { + const paramsWithMemo = { + ...defaultParams, + destinationSpecifiedMemo: "sender memo", + } + const paymentDetails = createAmountOnchainPaymentDetails(paramsWithMemo) + expectDestinationSpecifiedMemoCannotSetMemo( + paymentDetails, + paramsWithMemo.destinationSpecifiedMemo, + ) + }) + + it("can set memo if no memo provided", () => { + const testSetMemo = getTestSetMemo() + testSetMemo({ + defaultParams, + spy, + creatorFunction: createAmountOnchainPaymentDetails, + }) + }) + + it("can set sending wallet descriptor", () => { + const testSetSendingWalletDescriptor = getTestSetSendingWalletDescriptor() + testSetSendingWalletDescriptor({ + defaultParams, + spy, + creatorFunction: createAmountOnchainPaymentDetails, + }) + }) +}) diff --git a/__tests__/payment-details/helpers.ts b/__tests__/payment-details/helpers.ts new file mode 100644 index 0000000..fdf4543 --- /dev/null +++ b/__tests__/payment-details/helpers.ts @@ -0,0 +1,141 @@ +import { WalletCurrency } from "@app/graphql/generated" +import { + ConvertMoneyAmount, + GetFeeParams, + PaymentDetail, + SendPaymentParams, +} from "@app/screens/send-bitcoin-screen/payment-details" + +export const convertMoneyAmountMock: ConvertMoneyAmount = (amount, currency) => { + return { + amount: amount.amount, + currency, + } +} + +export const zeroAmount = { + amount: 0, + currency: WalletCurrency.Btc, +} + +export const btcTestAmount = { + amount: 1232, + currency: WalletCurrency.Btc, +} + +export const usdTestAmount = { + amount: 3212, + currency: WalletCurrency.Usd, +} + +export const testAmount = { + amount: 100, + currency: WalletCurrency.Btc, +} + +export const btcSendingWalletDescriptor = { + currency: WalletCurrency.Btc, + id: "testwallet", +} + +export const usdSendingWalletDescriptor = { + currency: WalletCurrency.Usd, + id: "testwallet", +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type CreateFunctionWithSpyParams any> = { + spy: jest.SpyInstance, Parameters> + defaultParams: Parameters + creatorFunction: T +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type CreateFunctionWithSpy = any>() => ( + params: CreateFunctionWithSpyParams, +) => void + +export const expectCannotSetAmount = (paymentDetails: PaymentDetail) => { + expect(paymentDetails.canSetAmount).toBeFalsy() + expect(paymentDetails.setAmount).toBeUndefined() +} + +export const expectDestinationSpecifiedMemoCannotSetMemo = ( + paymentDetails: PaymentDetail, + destinationSpecifiedMemo: string, +) => { + expect(paymentDetails.canSetMemo).toBeFalsy() + expect(paymentDetails.setMemo).toBeUndefined() + expect(paymentDetails.memo).toEqual(destinationSpecifiedMemo) +} + +export const expectCannotGetFee = (paymentDetails: PaymentDetail) => { + expect(paymentDetails.canGetFee).toBeFalsy() + expect(paymentDetails.getFee).toBeUndefined() +} + +export const expectCannotSendPayment = ( + paymentDetails: PaymentDetail, +) => { + expect(paymentDetails.canSendPayment).toBeFalsy() + expect(paymentDetails.sendPayment).toBeUndefined() +} + +export const getTestSetMemo: CreateFunctionWithSpy = () => (params) => { + const { defaultParams, creatorFunction, spy } = params + const senderSpecifiedMemo = "sender memo" + const paymentDetails = creatorFunction(defaultParams) + + if (!paymentDetails.canSetMemo) throw new Error("Memo is unable to be set") + paymentDetails.setMemo(senderSpecifiedMemo) + + const lastCall = spy.mock.lastCall && spy.mock.lastCall[0] + expect(lastCall).toEqual({ ...defaultParams, senderSpecifiedMemo }) +} + +export const getTestSetAmount: CreateFunctionWithSpy = () => (params) => { + const { defaultParams, creatorFunction, spy } = params + const paymentDetails = creatorFunction(defaultParams) + const unitOfAccountAmount = { + amount: 100, + currency: WalletCurrency.Btc, + } + if (!paymentDetails.canSetAmount) throw new Error("Amount is unable to be set") + paymentDetails.setAmount(unitOfAccountAmount) + const lastCall = spy.mock.lastCall && spy.mock.lastCall[0] + expect(lastCall).toEqual({ ...defaultParams, unitOfAccountAmount }) +} + +export const getTestSetSendingWalletDescriptor: CreateFunctionWithSpy = + () => (params) => { + const { defaultParams, creatorFunction, spy } = params + const paymentDetails = creatorFunction(defaultParams) + const sendingWalletDescriptor = { + currency: WalletCurrency.Btc, + id: "newtestwallet", + } + paymentDetails.setSendingWalletDescriptor(sendingWalletDescriptor) + const lastCall = spy.mock.lastCall && spy.mock.lastCall[0] + expect(lastCall).toEqual({ ...defaultParams, sendingWalletDescriptor }) + } + +export const createGetFeeMocks = (): GetFeeParams => { + return { + lnInvoiceFeeProbe: jest.fn(), + lnUsdInvoiceFeeProbe: jest.fn(), + lnNoAmountInvoiceFeeProbe: jest.fn(), + lnNoAmountUsdInvoiceFeeProbe: jest.fn(), + onChainTxFee: jest.fn(), + } +} + +export const createSendPaymentMocks = (): SendPaymentParams => { + return { + lnInvoicePaymentSend: jest.fn(), + lnNoAmountInvoicePaymentSend: jest.fn(), + lnNoAmountUsdInvoicePaymentSend: jest.fn(), + onChainPaymentSend: jest.fn(), + intraLedgerPaymentSend: jest.fn(), + intraLedgerUsdPaymentSend: jest.fn(), + } +} diff --git a/__tests__/payment-details/intraledger-payment-details.spec.ts b/__tests__/payment-details/intraledger-payment-details.spec.ts new file mode 100644 index 0000000..b99a711 --- /dev/null +++ b/__tests__/payment-details/intraledger-payment-details.spec.ts @@ -0,0 +1,177 @@ +import { WalletCurrency } from "@app/graphql/generated" +import * as PaymentDetails from "@app/screens/send-bitcoin-screen/payment-details/intraledger" +import { + btcSendingWalletDescriptor, + convertMoneyAmountMock, + createSendPaymentMocks, + expectCannotGetFee, + expectCannotSendPayment, + expectDestinationSpecifiedMemoCannotSetMemo, + getTestSetAmount, + getTestSetMemo, + getTestSetSendingWalletDescriptor, + testAmount, + usdSendingWalletDescriptor, + zeroAmount, +} from "./helpers" + +const defaultParams: PaymentDetails.CreateIntraledgerPaymentDetailsParams = + { + handle: "test", + recipientWalletId: "testid", + convertMoneyAmount: convertMoneyAmountMock, + sendingWalletDescriptor: btcSendingWalletDescriptor, + unitOfAccountAmount: testAmount, + } + +const spy = jest.spyOn(PaymentDetails, "createIntraledgerPaymentDetails") + +describe("intraledger payment details", () => { + const { createIntraledgerPaymentDetails } = PaymentDetails + + beforeEach(() => { + spy.mockClear() + }) + + it("properly sets fields with all arguments provided", () => { + const paymentDetails = createIntraledgerPaymentDetails(defaultParams) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParams.handle, + settlementAmount: defaultParams.convertMoneyAmount( + defaultParams.unitOfAccountAmount, + defaultParams.sendingWalletDescriptor.currency, + ), + unitOfAccountAmount: defaultParams.unitOfAccountAmount, + sendingWalletDescriptor: defaultParams.sendingWalletDescriptor, + settlementAmountIsEstimated: false, + canGetFee: true, + canSendPayment: true, + canSetAmount: true, + canSetMemo: true, + convertMoneyAmount: defaultParams.convertMoneyAmount, + }), + ) + }) + + describe("sending from a btc wallet", () => { + const btcSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + const paymentDetails = createIntraledgerPaymentDetails(btcSendingWalletParams) + const settlementAmount = defaultParams.convertMoneyAmount( + testAmount, + btcSendingWalletDescriptor.currency, + ) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.intraLedgerPaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + recipientWalletId: defaultParams.recipientWalletId, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + describe("sending from a usd wallet", () => { + const usdSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: usdSendingWalletDescriptor, + } + const settlementAmount = defaultParams.convertMoneyAmount( + testAmount, + usdSendingWalletDescriptor.currency, + ) + const paymentDetails = createIntraledgerPaymentDetails(usdSendingWalletParams) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.intraLedgerUsdPaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + recipientWalletId: defaultParams.recipientWalletId, + amount: settlementAmount.amount, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + it("cannot calculate fee or send payment with zero amount", () => { + const paramsWithMemo = { + ...defaultParams, + unitOfAccountAmount: zeroAmount, + } + const paymentDetails = createIntraledgerPaymentDetails(paramsWithMemo) + expectCannotGetFee(paymentDetails) + expectCannotSendPayment(paymentDetails) + }) + + it("cannot set memo if memo is provided", () => { + const paramsWithMemo = { + ...defaultParams, + destinationSpecifiedMemo: "sender memo", + } + const paymentDetails = createIntraledgerPaymentDetails(paramsWithMemo) + expectDestinationSpecifiedMemoCannotSetMemo( + paymentDetails, + paramsWithMemo.destinationSpecifiedMemo, + ) + }) + + it("can set memo if no memo provided", () => { + const testSetMemo = getTestSetMemo() + testSetMemo({ + defaultParams, + spy, + creatorFunction: createIntraledgerPaymentDetails, + }) + }) + + it("can set amount", () => { + const testSetAmount = getTestSetAmount() + testSetAmount({ + defaultParams, + spy, + creatorFunction: createIntraledgerPaymentDetails, + }) + }) + + it("can set sending wallet descriptor", () => { + const testSetSendingWalletDescriptor = getTestSetSendingWalletDescriptor() + testSetSendingWalletDescriptor({ + defaultParams, + spy, + creatorFunction: createIntraledgerPaymentDetails, + }) + }) +}) diff --git a/__tests__/payment-details/lnurl-payment-details.spec.ts b/__tests__/payment-details/lnurl-payment-details.spec.ts new file mode 100644 index 0000000..a36b64c --- /dev/null +++ b/__tests__/payment-details/lnurl-payment-details.spec.ts @@ -0,0 +1,250 @@ +import { WalletCurrency } from "@app/graphql/generated" +import * as PaymentDetails from "@app/screens/send-bitcoin-screen/payment-details/lightning" +import { LnUrlPayServiceResponse } from "lnurl-pay/dist/types/types" +import { createMock } from "ts-auto-mock" +import { + btcSendingWalletDescriptor, + btcTestAmount, + convertMoneyAmountMock, + createGetFeeMocks, + createSendPaymentMocks, + expectDestinationSpecifiedMemoCannotSetMemo, + getTestSetAmount, + getTestSetMemo, + getTestSetSendingWalletDescriptor, + testAmount, + usdSendingWalletDescriptor, +} from "./helpers" + +const defaultParamsWithoutInvoice = { + lnurl: "testlnurl", + lnurlParams: createMock({ min: 1, max: 1000 }), + convertMoneyAmount: convertMoneyAmountMock, + sendingWalletDescriptor: btcSendingWalletDescriptor, + unitOfAccountAmount: testAmount, +} + +const defaultParamsWithInvoice = { + ...defaultParamsWithoutInvoice, + paymentRequest: "testinvoice", + paymentRequestAmount: btcTestAmount, +} + +const defaultParamsWithEqualMinMaxAmount = { + ...defaultParamsWithoutInvoice, + lnurlParams: createMock({ min: 100, max: 100 }), +} + +const spy = jest.spyOn(PaymentDetails, "createLnurlPaymentDetails") + +describe("lnurl payment details", () => { + const { createLnurlPaymentDetails } = PaymentDetails + + beforeEach(() => { + spy.mockClear() + }) + + it("properly sets fields if min and max amount is equal", () => { + const paymentDetails = createLnurlPaymentDetails(defaultParamsWithEqualMinMaxAmount) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParamsWithEqualMinMaxAmount.lnurl, + settlementAmount: defaultParamsWithEqualMinMaxAmount.unitOfAccountAmount, + unitOfAccountAmount: defaultParamsWithEqualMinMaxAmount.unitOfAccountAmount, + sendingWalletDescriptor: + defaultParamsWithEqualMinMaxAmount.sendingWalletDescriptor, + settlementAmountIsEstimated: + defaultParamsWithEqualMinMaxAmount.sendingWalletDescriptor.currency !== + WalletCurrency.Btc, + canGetFee: false, + canSendPayment: false, + canSetAmount: false, + canSetMemo: true, + convertMoneyAmount: defaultParamsWithoutInvoice.convertMoneyAmount, + }), + ) + }) + + it("properly sets fields without invoice", () => { + const paymentDetails = createLnurlPaymentDetails(defaultParamsWithoutInvoice) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParamsWithoutInvoice.lnurl, + settlementAmount: defaultParamsWithoutInvoice.unitOfAccountAmount, + unitOfAccountAmount: defaultParamsWithoutInvoice.unitOfAccountAmount, + sendingWalletDescriptor: defaultParamsWithoutInvoice.sendingWalletDescriptor, + canGetFee: false, + settlementAmountIsEstimated: + defaultParamsWithInvoice.sendingWalletDescriptor.currency !== + WalletCurrency.Btc, + canSendPayment: false, + canSetAmount: true, + canSetMemo: true, + convertMoneyAmount: defaultParamsWithoutInvoice.convertMoneyAmount, + }), + ) + }) + + it("properly sets fields with invoice set", () => { + const paymentDetails = createLnurlPaymentDetails(defaultParamsWithInvoice) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParamsWithInvoice.lnurl, + settlementAmount: defaultParamsWithInvoice.paymentRequestAmount, + unitOfAccountAmount: defaultParamsWithInvoice.unitOfAccountAmount, + sendingWalletDescriptor: defaultParamsWithInvoice.sendingWalletDescriptor, + settlementAmountIsEstimated: + defaultParamsWithInvoice.sendingWalletDescriptor.currency !== + WalletCurrency.Btc, + canGetFee: true, + canSendPayment: true, + canSetAmount: true, + canSetMemo: true, + convertMoneyAmount: defaultParamsWithoutInvoice.convertMoneyAmount, + }), + ) + }) + + describe("sending from a btc wallet", () => { + const btcSendingWalletParams = { + ...defaultParamsWithInvoice, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + const paymentDetails = createLnurlPaymentDetails(btcSendingWalletParams) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.lnInvoiceFeeProbe).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: btcSendingWalletParams.paymentRequest, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.lnInvoicePaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: btcSendingWalletParams.paymentRequest, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + describe("sending from a usd wallet", () => { + const usdSendingWalletParams = { + ...defaultParamsWithInvoice, + sendingWalletDescriptor: usdSendingWalletDescriptor, + } + const paymentDetails = createLnurlPaymentDetails(usdSendingWalletParams) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.lnUsdInvoiceFeeProbe).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: usdSendingWalletParams.paymentRequest, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.lnInvoicePaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: usdSendingWalletParams.paymentRequest, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + it("cannot set memo if memo is provided", () => { + const paramsWithMemo = { + ...defaultParamsWithoutInvoice, + destinationSpecifiedMemo: "sender memo", + } + const paymentDetails = createLnurlPaymentDetails(paramsWithMemo) + expectDestinationSpecifiedMemoCannotSetMemo( + paymentDetails, + paramsWithMemo.destinationSpecifiedMemo, + ) + }) + + it("can set memo if no memo provided", () => { + const testSetMemo = getTestSetMemo() + testSetMemo({ + defaultParams: defaultParamsWithoutInvoice, + spy, + creatorFunction: createLnurlPaymentDetails, + }) + }) + + it("can set amount", () => { + const testSetAmount = getTestSetAmount() + testSetAmount({ + defaultParams: defaultParamsWithoutInvoice, + spy, + creatorFunction: createLnurlPaymentDetails, + }) + }) + + it("can set sending wallet descriptor", () => { + const testSetSendingWalletDescriptor = getTestSetSendingWalletDescriptor() + testSetSendingWalletDescriptor({ + defaultParams: defaultParamsWithoutInvoice, + spy, + creatorFunction: createLnurlPaymentDetails, + }) + }) +}) diff --git a/__tests__/payment-details/no-amount-lightning-payment-details.spec.ts b/__tests__/payment-details/no-amount-lightning-payment-details.spec.ts new file mode 100644 index 0000000..7686ccd --- /dev/null +++ b/__tests__/payment-details/no-amount-lightning-payment-details.spec.ts @@ -0,0 +1,224 @@ +import { WalletCurrency } from "@app/graphql/generated" +import * as PaymentDetails from "@app/screens/send-bitcoin-screen/payment-details/lightning" +import { + testAmount, + btcSendingWalletDescriptor, + convertMoneyAmountMock, + createGetFeeMocks, + createSendPaymentMocks, + expectCannotGetFee, + expectCannotSendPayment, + expectDestinationSpecifiedMemoCannotSetMemo, + getTestSetAmount, + getTestSetMemo, + getTestSetSendingWalletDescriptor, + usdSendingWalletDescriptor, + zeroAmount, +} from "./helpers" + +const defaultParams: PaymentDetails.CreateNoAmountLightningPaymentDetailsParams = + { + paymentRequest: "testinvoice", + convertMoneyAmount: convertMoneyAmountMock, + sendingWalletDescriptor: btcSendingWalletDescriptor, + unitOfAccountAmount: testAmount, + } + +const spy = jest.spyOn(PaymentDetails, "createNoAmountLightningPaymentDetails") + +describe("no amount lightning payment details", () => { + const { createNoAmountLightningPaymentDetails } = PaymentDetails + + beforeEach(() => { + spy.mockClear() + }) + + it("properly sets fields with all arguments provided", () => { + const paymentDetails = createNoAmountLightningPaymentDetails(defaultParams) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParams.paymentRequest, + settlementAmount: defaultParams.convertMoneyAmount( + defaultParams.unitOfAccountAmount, + defaultParams.sendingWalletDescriptor.currency, + ), + unitOfAccountAmount: defaultParams.unitOfAccountAmount, + sendingWalletDescriptor: defaultParams.sendingWalletDescriptor, + settlementAmountIsEstimated: false, + canGetFee: true, + canSendPayment: true, + canSetAmount: true, + canSetMemo: true, + convertMoneyAmount: defaultParams.convertMoneyAmount, + }), + ) + }) + + describe("sending from a btc wallet", () => { + const btcSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + const paymentDetails = createNoAmountLightningPaymentDetails(btcSendingWalletParams) + const settlementAmount = defaultParams.convertMoneyAmount( + testAmount, + btcSendingWalletDescriptor.currency, + ) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.lnNoAmountInvoiceFeeProbe).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.lnNoAmountInvoicePaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + describe("sending from a usd wallet", () => { + const usdSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: usdSendingWalletDescriptor, + } + const settlementAmount = defaultParams.convertMoneyAmount( + testAmount, + usdSendingWalletDescriptor.currency, + ) + const paymentDetails = createNoAmountLightningPaymentDetails(usdSendingWalletParams) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.lnNoAmountUsdInvoiceFeeProbe).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + amount: settlementAmount.amount, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.lnNoAmountUsdInvoicePaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + paymentRequest: defaultParams.paymentRequest, + amount: settlementAmount.amount, + walletId: usdSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + it("cannot calculate fee or send payment with zero amount", () => { + const params: PaymentDetails.CreateNoAmountLightningPaymentDetailsParams = + { + ...defaultParams, + unitOfAccountAmount: zeroAmount, + } + const paymentDetails = createNoAmountLightningPaymentDetails(params) + expectCannotGetFee(paymentDetails) + expectCannotSendPayment(paymentDetails) + }) + + it("cannot set memo if memo is provided", () => { + const paramsWithMemo = { + ...defaultParams, + destinationSpecifiedMemo: "sender memo", + } + const paymentDetails = createNoAmountLightningPaymentDetails(paramsWithMemo) + expectDestinationSpecifiedMemoCannotSetMemo( + paymentDetails, + paramsWithMemo.destinationSpecifiedMemo, + ) + }) + + it("can set memo if no memo provided", () => { + const testSetMemo = getTestSetMemo() + testSetMemo({ + defaultParams, + spy, + creatorFunction: createNoAmountLightningPaymentDetails, + }) + }) + + it("can set amount", () => { + const testSetAmount = getTestSetAmount() + testSetAmount({ + defaultParams, + spy, + creatorFunction: createNoAmountLightningPaymentDetails, + }) + }) + + it("can set sending wallet descriptor", () => { + const testSetSendingWalletDescriptor = getTestSetSendingWalletDescriptor() + testSetSendingWalletDescriptor({ + defaultParams, + spy, + creatorFunction: createNoAmountLightningPaymentDetails, + }) + }) +}) diff --git a/__tests__/payment-details/no-amount-onchain.spec.ts b/__tests__/payment-details/no-amount-onchain.spec.ts new file mode 100644 index 0000000..bdde6b3 --- /dev/null +++ b/__tests__/payment-details/no-amount-onchain.spec.ts @@ -0,0 +1,174 @@ +import { WalletCurrency } from "@app/graphql/generated" +import * as PaymentDetails from "@app/screens/send-bitcoin-screen/payment-details/onchain" +import { + testAmount, + btcSendingWalletDescriptor, + convertMoneyAmountMock, + createGetFeeMocks, + createSendPaymentMocks, + expectCannotGetFee, + expectCannotSendPayment, + expectDestinationSpecifiedMemoCannotSetMemo, + getTestSetAmount, + getTestSetMemo, + getTestSetSendingWalletDescriptor, + usdSendingWalletDescriptor, + zeroAmount, +} from "./helpers" + +const defaultParams: PaymentDetails.CreateNoAmountOnchainPaymentDetailsParams = + { + address: "testaddress", + convertMoneyAmount: convertMoneyAmountMock, + sendingWalletDescriptor: btcSendingWalletDescriptor, + unitOfAccountAmount: testAmount, + } + +const spy = jest.spyOn(PaymentDetails, "createNoAmountOnchainPaymentDetails") + +describe("no amount lightning payment details", () => { + const { createNoAmountOnchainPaymentDetails } = PaymentDetails + + beforeEach(() => { + spy.mockClear() + }) + + it("properly sets fields with all arguments provided", () => { + const paymentDetails = createNoAmountOnchainPaymentDetails(defaultParams) + expect(paymentDetails).toEqual( + expect.objectContaining({ + destination: defaultParams.address, + settlementAmount: defaultParams.convertMoneyAmount( + defaultParams.unitOfAccountAmount, + defaultParams.sendingWalletDescriptor.currency, + ), + unitOfAccountAmount: defaultParams.unitOfAccountAmount, + sendingWalletDescriptor: defaultParams.sendingWalletDescriptor, + settlementAmountIsEstimated: false, + canGetFee: true, + canSendPayment: true, + canSetAmount: true, + canSetMemo: true, + convertMoneyAmount: defaultParams.convertMoneyAmount, + }), + ) + }) + + describe("sending from a btc wallet", () => { + const btcSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: btcSendingWalletDescriptor, + } + const paymentDetails = createNoAmountOnchainPaymentDetails(btcSendingWalletParams) + const settlementAmount = defaultParams.convertMoneyAmount( + testAmount, + btcSendingWalletDescriptor.currency, + ) + + it("uses the correct fee mutations and args", async () => { + const feeParamsMocks = createGetFeeMocks() + if (!paymentDetails.canGetFee) { + throw new Error("Cannot get fee") + } + + try { + await paymentDetails.getFee(feeParamsMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the fee response + } + + expect(feeParamsMocks.onChainTxFee).toHaveBeenCalledWith({ + variables: { + address: defaultParams.address, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }) + }) + + it("uses the correct send payment mutation and args", async () => { + const sendPaymentMocks = createSendPaymentMocks() + if (!paymentDetails.canSendPayment) { + throw new Error("Cannot send payment") + } + + try { + await paymentDetails.sendPayment(sendPaymentMocks) + } catch { + // do nothing as function is expected to throw since we are not mocking the send payment response + } + + expect(sendPaymentMocks.onChainPaymentSend).toHaveBeenCalledWith({ + variables: { + input: { + address: defaultParams.address, + amount: settlementAmount.amount, + walletId: btcSendingWalletParams.sendingWalletDescriptor.id, + }, + }, + }) + }) + }) + + describe("sending from a usd wallet", () => { + it("throws an error", () => { + const usdSendingWalletParams = { + ...defaultParams, + unitOfAccountAmount: testAmount, + sendingWalletDescriptor: usdSendingWalletDescriptor, + } + expect(() => createNoAmountOnchainPaymentDetails(usdSendingWalletParams)).toThrow() + }) + }) + + it("cannot calculate fee or send payment with zero amount", () => { + const params: PaymentDetails.CreateNoAmountOnchainPaymentDetailsParams = + { + ...defaultParams, + unitOfAccountAmount: zeroAmount, + } + const paymentDetails = createNoAmountOnchainPaymentDetails(params) + expectCannotGetFee(paymentDetails) + expectCannotSendPayment(paymentDetails) + }) + + it("cannot set memo if memo is provided", () => { + const paramsWithMemo = { + ...defaultParams, + destinationSpecifiedMemo: "sender memo", + } + const paymentDetails = createNoAmountOnchainPaymentDetails(paramsWithMemo) + expectDestinationSpecifiedMemoCannotSetMemo( + paymentDetails, + paramsWithMemo.destinationSpecifiedMemo, + ) + }) + + it("can set memo if no memo provided", () => { + const testSetMemo = getTestSetMemo() + testSetMemo({ + defaultParams, + spy, + creatorFunction: createNoAmountOnchainPaymentDetails, + }) + }) + + it("can set amount", () => { + const testSetAmount = getTestSetAmount() + testSetAmount({ + defaultParams, + spy, + creatorFunction: createNoAmountOnchainPaymentDetails, + }) + }) + + it("can set sending wallet descriptor", () => { + const testSetSendingWalletDescriptor = getTestSetSendingWalletDescriptor() + testSetSendingWalletDescriptor({ + defaultParams, + spy, + creatorFunction: createNoAmountOnchainPaymentDetails, + }) + }) +}) diff --git a/__tests__/payment-requests/payment-requests.spec.ts b/__tests__/payment-requests/payment-requests.spec.ts new file mode 100644 index 0000000..9cd25ad --- /dev/null +++ b/__tests__/payment-requests/payment-requests.spec.ts @@ -0,0 +1,375 @@ +import { WalletCurrency, LnInvoice } from "@app/graphql/generated" +import { + createPaymentRequestDetails, + CreatePaymentRequestDetailsParams, +} from "@app/screens/receive-bitcoin-screen/payment-requests" +import { + PaymentRequest, + GqlGeneratePaymentRequestMutations, +} from "@app/screens/receive-bitcoin-screen/payment-requests/index.types" +import { MoneyAmount, WalletOrDisplayCurrency } from "@app/types/amounts" +import { createMock } from "ts-auto-mock" + +const usdAmountInvoice = + "lnbc49100n1p3l2q6cpp5y8lc3dv7qnplxhc3z9j0sap4n0hu99g39tl3srx6zj0hrqy2snwsdqqcqzpuxqzfvsp5q6t5f3xeruu4k5sk5nlmxx2kzlw2pydmmjk9g4qqmsc9c6ffzldq9qyyssq9lesnumasvvlvwc7yckvuepklttlvwhjqw3539qqqttsyh5s5j246spy9gezng7ng3d40qsrn6dhsrgs7rccaftzulx5auqqd5lz0psqfskeg4" +const noAmountInvoice = + "lnbc1p3l2qmfpp5t2ne20k97f3n24el9a792fte4q6n7jqr6x8qjjnklgktrdvpqq2sdqqcqzpuxqyz5vqsp5n23d3as4jxvpaemnsnvyynlpsg6pzsmxhn3tcwxealcyh6566nys9qyyssqce802uft9d44llekxqedzufkeaq7anldzpf64s4hmskwd9h5ppe4xrgq4dpq8rc3ph048066wgexjtgw4fs8032xwuazw9kdjcq8ujgpdk07ht" +const btcAmountInvoice = + "lnbc23690n1p3l2qugpp5jeflfqjpxhe0hg3tzttc325j5l6czs9vq9zqx5edpt0yf7k6cypsdqqcqzpuxqyz5vqsp5lteanmnwddszwut839etrgjenfr3dv5tnvz2d2ww2mvggq7zn46q9qyyssqzcz0rvt7r30q7jul79xqqwpr4k2e8mgd23fkjm422sdgpndwql93d4wh3lap9yfwahue9n7ju80ynkqly0lrqqd2978dr8srkrlrjvcq2v5s6k" +const mockOnChainAddress = "tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx" + +const mockLnInvoice = createMock({ + paymentRequest: btcAmountInvoice, +}) + +const mockLnInvoiceCreate = jest.fn().mockResolvedValue({ + data: { + lnInvoiceCreate: { + invoice: mockLnInvoice, + errors: [], + }, + }, + errors: [], +}) + +const mockLnUsdInvoice = createMock({ + paymentRequest: usdAmountInvoice, +}) + +const mockLnUsdInvoiceCreate = jest.fn().mockResolvedValue({ + data: { + lnUsdInvoiceCreate: { + invoice: mockLnUsdInvoice, + errors: [], + }, + }, + errors: [], +}) + +const mockLnNoAmountInvoice = createMock({ + paymentRequest: noAmountInvoice, +}) + +const mockLnNoAmountInvoiceCreate = jest.fn().mockResolvedValue({ + data: { + lnNoAmountInvoiceCreate: { + invoice: mockLnNoAmountInvoice, + errors: [], + }, + }, + errors: [], +}) + +const mockOnChainAddressCurrent = jest.fn().mockResolvedValue({ + data: { + onChainAddressCurrent: { + address: mockOnChainAddress, + errors: [], + }, + }, + errors: [], +}) + +export const mockMutations: GqlGeneratePaymentRequestMutations = { + lnInvoiceCreate: mockLnInvoiceCreate, + lnUsdInvoiceCreate: mockLnUsdInvoiceCreate, + lnNoAmountInvoiceCreate: mockLnNoAmountInvoiceCreate, + onChainAddressCurrent: mockOnChainAddressCurrent, +} + +export const clearMocks = () => { + mockLnInvoiceCreate.mockClear() + mockLnUsdInvoiceCreate.mockClear() + mockLnNoAmountInvoiceCreate.mockClear() + mockOnChainAddressCurrent.mockClear() +} + +describe("create paymentRequestDetails", () => { + const defaultParams = { + memo: "Test", + convertMoneyAmount: ( + amount: MoneyAmount, + toCurrency: T, + ): MoneyAmount => { + return { amount: amount.amount, currency: toCurrency } + }, + bitcoinNetwork: "mainnet", + } as const + + const defaultExpectedPaymentRequestDetails = { + convertMoneyAmount: defaultParams.convertMoneyAmount, + memo: defaultParams.memo, + } + + describe("with usd receiving wallet", () => { + const usdWalletParams = { + ...defaultParams, + receivingWalletDescriptor: { id: "wallet-id", currency: WalletCurrency.Usd }, + } + + it("creates no amount lightning invoice", async () => { + const usdNoAmountLightningParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Usd + > = { + ...usdWalletParams, + paymentRequestType: PaymentRequest.Lightning, + } + + const paymentRequestDetails = createPaymentRequestDetails( + usdNoAmountLightningParams, + ) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.Lightning, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: noAmountInvoice, + }), + }), + ) + }) + + it("creates no amount lightning invoice with 0 amount", async () => { + const usdNoAmountLightningParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Usd + > = { + ...usdWalletParams, + paymentRequestType: PaymentRequest.Lightning, + unitOfAccountAmount: { amount: 0, currency: WalletCurrency.Usd }, + } + + const paymentRequestDetails = createPaymentRequestDetails( + usdNoAmountLightningParams, + ) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.Lightning, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: noAmountInvoice, + }), + }), + ) + }) + + it("creates amount lightning invoice", async () => { + const usdAmountLightningParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Usd + > = { + ...usdWalletParams, + paymentRequestType: PaymentRequest.Lightning, + unitOfAccountAmount: { amount: 1, currency: WalletCurrency.Usd }, + } + + const paymentRequestDetails = createPaymentRequestDetails(usdAmountLightningParams) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.Lightning, + unitOfAccountAmount: { amount: 1, currency: WalletCurrency.Usd }, + settlementAmount: { amount: 1, currency: WalletCurrency.Usd }, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + expiration: expect.any(Date), + paymentRequestDisplay: usdAmountInvoice, + }), + }), + ) + }) + }) + + describe("with btc receiving wallet", () => { + const btcWalletParams = { + ...defaultParams, + receivingWalletDescriptor: { id: "wallet-id", currency: WalletCurrency.Btc }, + } + + it("creates no amount lightning invoice", async () => { + const btcNoAmountLightningParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Btc + > = { + ...btcWalletParams, + paymentRequestType: PaymentRequest.Lightning, + } + + const paymentRequestDetails = createPaymentRequestDetails( + btcNoAmountLightningParams, + ) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.Lightning, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: noAmountInvoice, + }), + }), + ) + }) + + it("creates amount lightning invoice", async () => { + const btcAmountLightningParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Btc + > = { + ...btcWalletParams, + paymentRequestType: PaymentRequest.Lightning, + unitOfAccountAmount: { amount: 1, currency: WalletCurrency.Usd }, + } + + const paymentRequestDetails = createPaymentRequestDetails(btcAmountLightningParams) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.Lightning, + unitOfAccountAmount: { amount: 1, currency: WalletCurrency.Usd }, + settlementAmount: { amount: 1, currency: WalletCurrency.Btc }, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: btcAmountInvoice, + }), + }), + ) + }) + + it("creates an amount onchain address", async () => { + const btcOnchainParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Btc + > = { + ...btcWalletParams, + paymentRequestType: PaymentRequest.OnChain, + unitOfAccountAmount: { amount: 1, currency: WalletCurrency.Usd }, + } + + const paymentRequestDetails = createPaymentRequestDetails(btcOnchainParams) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.OnChain, + unitOfAccountAmount: { amount: 1, currency: WalletCurrency.Usd }, + settlementAmount: { amount: 1, currency: WalletCurrency.Btc }, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: `bitcoin:tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx?amount=1e-8&message=Test`, + }), + }), + ) + }) + + it("creates a no amount onchain address", async () => { + const btcOnchainParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Btc + > = { + ...btcWalletParams, + paymentRequestType: PaymentRequest.OnChain, + } + + const paymentRequestDetails = createPaymentRequestDetails(btcOnchainParams) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.OnChain, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: `bitcoin:tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx?message=Test`, + }), + }), + ) + }) + + it("creates a no amount onchain address with 0 amount", async () => { + const btcOnchainParams: CreatePaymentRequestDetailsParams< + typeof WalletCurrency.Btc + > = { + ...btcWalletParams, + paymentRequestType: PaymentRequest.OnChain, + unitOfAccountAmount: { amount: 0, currency: WalletCurrency.Usd }, + } + + const paymentRequestDetails = createPaymentRequestDetails(btcOnchainParams) + + expect(paymentRequestDetails).toEqual( + expect.objectContaining({ + ...defaultExpectedPaymentRequestDetails, + paymentRequestType: PaymentRequest.OnChain, + }), + ) + + const paymentRequest = await paymentRequestDetails.generatePaymentRequest( + mockMutations, + ) + + expect(paymentRequest).toEqual( + expect.objectContaining({ + paymentRequest: expect.objectContaining({ + paymentRequestDisplay: `bitcoin:tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx?message=Test`, + }), + }), + ) + }) + }) +}) diff --git a/__tests__/persistent-storage.spec.ts b/__tests__/persistent-storage.spec.ts new file mode 100644 index 0000000..1a07874 --- /dev/null +++ b/__tests__/persistent-storage.spec.ts @@ -0,0 +1,57 @@ +import { + defaultPersistentState, + migrateAndGetPersistentState, +} from "../app/store/persistent-state/state-migrations" + +it("uses default state when none is present", async () => { + const state = await migrateAndGetPersistentState({}) + expect(state).toEqual(defaultPersistentState) +}) + +it("migrates persistent state", async () => { + const state = await migrateAndGetPersistentState({ + schemaVersion: 0, + isUsdDisabled: true, + }) + expect(state).toEqual({ + ...defaultPersistentState, + }) +}) + +it("returns default when schema is not present", async () => { + const state = await migrateAndGetPersistentState({ + schemaVersion: -2, + }) + expect(state).toEqual(defaultPersistentState) +}) + +import { defaultTheme } from "@app/theme/default-theme" + +it("migration from 4 to 5", async () => { + const state4 = { + schemaVersion: 4, + hasShownStableSatsWelcome: false, + isUsdDisabled: false, + galoyInstance: { + id: "Main", + name: "BBW", + graphqlUri: "https://api.mainnet.galoy.io/graphql", + graphqlWsUri: "wss://api.mainnet.galoy.io/graphql", + posUrl: "https://pay.bbw.sv", + lnAddressHostname: "pay.bbw.sv", + }, + galoyAuthToken: "myToken", + isAnalyticsEnabled: true, + theme: defaultTheme, + } + + const state5 = { + schemaVersion: 5, + galoyInstance: { id: "Main" }, + galoyAuthToken: "myToken", + } + + const res = await migrateAndGetPersistentState(state4) + + expect(res).toStrictEqual(state5) +}) diff --git a/__tests__/receive-bitcoin/helpers.spec.ts b/__tests__/receive-bitcoin/helpers.spec.ts new file mode 100644 index 0000000..4433027 --- /dev/null +++ b/__tests__/receive-bitcoin/helpers.spec.ts @@ -0,0 +1,87 @@ +import { + getPaymentRequestFullUri, + satsToBTC, +} from "../../app/screens/receive-bitcoin-screen/payment-requests/helpers" + +describe("getInvoiceFullUri", () => { + it("returns a prefixed bitcoin uri", () => { + const uri = getPaymentRequestFullUri({ + input: "btc1234567890address", + type: "BITCOIN_ONCHAIN", + }) + + expect(uri).toBe("bitcoin:btc1234567890address") + }) + + it("returns a non-prefixed bitcoin uri", () => { + const uri = getPaymentRequestFullUri({ + input: "btc1234567890address", + type: "BITCOIN_ONCHAIN", + prefix: false, + }) + + expect(uri).toBe("btc1234567890address") + }) + + it("contains amount in the uri", () => { + const uri = getPaymentRequestFullUri({ + input: "btc1234567890address", + type: "BITCOIN_ONCHAIN", + amount: 100, + }) + + expect(uri).toBe(`bitcoin:btc1234567890address?amount=${100 / 10 ** 8}`) + }) + + it("contains memo in the uri", () => { + const uri = getPaymentRequestFullUri({ + input: "btc1234567890address", + type: "BITCOIN_ONCHAIN", + memo: "will not forget", + }) + + expect(uri).toBe(`bitcoin:btc1234567890address?message=will%2520not%2520forget`) + }) + + it("contains memo and amount in the uri", () => { + const uri = getPaymentRequestFullUri({ + input: "btc1234567890address", + type: "BITCOIN_ONCHAIN", + amount: 100, + memo: "will not forget", + }) + + expect(uri).toBe( + `bitcoin:btc1234567890address?amount=${ + 100 / 10 ** 8 + }&message=will%2520not%2520forget`, + ) + }) + + it("returns a non-prefixed lightning uri", () => { + const uri = getPaymentRequestFullUri({ + input: "lnurl12567890", + type: "LIGHTNING_BTC", + }) + + expect(uri).toBe("lnurl12567890") + }) + + it("returns return an uppercase string", () => { + const uri = getPaymentRequestFullUri({ + input: "lnurl12567890", + uppercase: true, + type: "LIGHTNING_BTC", + }) + + expect(uri).toMatch(/^[^a-z]*$/g) + }) +}) + +describe("satsToBTC", () => { + it("returns the correct BTC number", () => { + expect(satsToBTC(1000)).toBe(0.00001) + expect(satsToBTC(0)).toBe(0) + expect(satsToBTC(-1000)).toBe(-0.00001) + }) +}) diff --git a/__tests__/screens/helper.tsx b/__tests__/screens/helper.tsx new file mode 100644 index 0000000..fff1ee2 --- /dev/null +++ b/__tests__/screens/helper.tsx @@ -0,0 +1,38 @@ +import { MockedProvider } from "@apollo/client/testing" +import React, { PropsWithChildren } from "react" +import { StoryScreen } from "../../.storybook/views" +import { createCache } from "../../app/graphql/cache" +import { IsAuthedContextProvider } from "../../app/graphql/is-authed-context" + +import theme from "@app/rne-theme/theme" +import { NavigationContainer } from "@react-navigation/native" +import { ThemeProvider } from "@rneui/themed" + +import mocks from "@app/graphql/mocks" +import { createStackNavigator } from "@react-navigation/stack" +import TypesafeI18n from "@app/i18n/i18n-react" +import { detectDefaultLocale } from "@app/utils/locale-detector" + +const Stack = createStackNavigator() + +export const ContextForScreen: React.FC = ({ children }) => ( + + + + + {() => ( + + + + + {children} + + + + + )} + + + + +) diff --git a/__tests__/screens/home.spec.tsx b/__tests__/screens/home.spec.tsx new file mode 100644 index 0000000..445c7a6 --- /dev/null +++ b/__tests__/screens/home.spec.tsx @@ -0,0 +1,13 @@ +import React from "react" +import { HomeScreen } from "../../app/screens/home-screen" + +import { render } from "@testing-library/react-native" +import { ContextForScreen } from "./helper" + +it("HomeAuthed", () => { + render( + + + , + ) +}) diff --git a/__tests__/screens/receive.spec.tsx b/__tests__/screens/receive.spec.tsx new file mode 100644 index 0000000..90d3c92 --- /dev/null +++ b/__tests__/screens/receive.spec.tsx @@ -0,0 +1,13 @@ +import React from "react" + +import { render } from "@testing-library/react-native" +import { ContextForScreen } from "./helper" +import ReceiveWrapperScreen from "@app/screens/receive-bitcoin-screen/receive-wrapper" + +it("Receive", () => { + render( + + + , + ) +}) diff --git a/__tests__/screens/send-confirmation.spec.tsx b/__tests__/screens/send-confirmation.spec.tsx new file mode 100644 index 0000000..7a8cf8a --- /dev/null +++ b/__tests__/screens/send-confirmation.spec.tsx @@ -0,0 +1,21 @@ +import React from "react" + +import { act, render } from "@testing-library/react-native" +import { Intraledger } from "../../app/screens/send-bitcoin-screen/send-bitcoin-confirmation-screen.stories" +import { ContextForScreen } from "./helper" + +it("SendScreen Confirmation", async () => { + const { findByLabelText } = render( + + + , + ) + + // it seems we need multiple act because the component re-render multiple times + // probably this could be debug with why-did-you-render + await act(async () => {}) + await act(async () => {}) + + const { children } = await findByLabelText("Successful Fee") + expect(children).toEqual(["â‚Ļ0 ($0.00)"]) +}) diff --git a/__tests__/screens/send-destination.spec.tsx b/__tests__/screens/send-destination.spec.tsx new file mode 100644 index 0000000..d44842c --- /dev/null +++ b/__tests__/screens/send-destination.spec.tsx @@ -0,0 +1,22 @@ +import React from "react" + +import SendBitcoinDestinationScreen from "@app/screens/send-bitcoin-screen/send-bitcoin-destination-screen" +import { render } from "@testing-library/react-native" +import { ContextForScreen } from "./helper" + +const sendBitcoinDestination = { + name: "sendBitcoinDestination", + key: "sendBitcoinDestination", + params: { + payment: "", + username: "", + }, +} as const + +it("SendScreen Destination", () => { + render( + + + , + ) +}) diff --git a/__tests__/screens/send-details.spec.tsx b/__tests__/screens/send-details.spec.tsx new file mode 100644 index 0000000..e1a8ce1 --- /dev/null +++ b/__tests__/screens/send-details.spec.tsx @@ -0,0 +1,14 @@ +import React from "react" + +import { render } from "@testing-library/react-native" +import { ContextForScreen } from "./helper" + +import { Intraledger } from "../../app/screens/send-bitcoin-screen/send-bitcoin-details-screen.stories" + +it("SendScreen Details", () => { + render( + + + , + ) +}) diff --git a/__tests__/timer.spec.ts b/__tests__/timer.spec.ts new file mode 100644 index 0000000..8e37e94 --- /dev/null +++ b/__tests__/timer.spec.ts @@ -0,0 +1,23 @@ +import { parseTimer } from "../app/utils/timer" + +describe("parseTimer", () => { + it("parse time when is more than 1 minute", () => { + const outputTime = parseTimer(65) + expect(outputTime).toStrictEqual("01:05") + }) + + it("parse time when is less than 1 minute", () => { + const outputTime = parseTimer(40) + expect(outputTime).toStrictEqual("00:40") + }) + + it("parse time when is less than 10 second", () => { + const outputTime = parseTimer(8) + expect(outputTime).toStrictEqual("00:08") + }) + + it("parse time when is negative", () => { + const outputTime = parseTimer(-5) + expect(outputTime).toStrictEqual("00:00") + }) +}) diff --git a/__tests__/utils/earns-utils.test.ts b/__tests__/utils/earns-utils.test.ts new file mode 100644 index 0000000..436c706 --- /dev/null +++ b/__tests__/utils/earns-utils.test.ts @@ -0,0 +1,380 @@ +import { i18nObject } from "../../app/i18n/i18n-util" +import { loadLocale } from "../../app/i18n/i18n-util.sync" +import { getQuizQuestionsContent } from "@app/screens/earns-screen/earns-utils" +const expectedEnglishQuizSections = [ + { + section: { id: "bitcoinWhatIsIt", title: "Bitcoin: What is it?" }, + content: [ + { + id: "whatIsBitcoin", + title: "So what exactly is Bitcoin?", + text: "Bitcoin is digital money. \n\nIt can be transferred instantly and securely between any two people in the world — without the need for a bank or any other financial company in the middle.", + question: "So what exactly is Bitcoin?", + answers: ["Digital money", "A video game", "A new cartoon character"], + feedback: [ + "Correct. You just earned 1 “sat”!", + "Incorrect, please try again.", + "Nope. At least not one that we know of!", + ], + }, + { + id: "sat", + title: 'I just earned a “Sat". What is that?', + text: "One “Sat” is the smallest unit of a bitcoin. \n\nWe all know that one US Dollar can be divided into 100 cents. Similarly, one Bitcoin can be divided into 100,000,000 sats. \n\nIn fact, you do not need to own one whole bitcoin in order to use it. You can use bitcoin whether you have 20 sats, 3000 sats — or 100,000,000 sats (which you now know is equal to one bitcoin).", + question: 'I just earned a “Sat". What is that?', + answers: [ + "The smallest unit of Bitcoin", + "A small satellite", + "A space cat 🐱🚀", + ], + feedback: [ + "Correct. You just earned another two sats!!", + "Maybeâ€Ļ but that is not the correct answer in this context 🙂", + "Ummm.... not quite!", + ], + }, + { + id: "whereBitcoinExist", + title: "Where do the bitcoins exist?", + text: "Bitcoin is a new form of money. It can be used by anyone, anytime -- anywhere in the world. \n\nIt is not tied to a specific government or region (like US Dollars). There are also no paper bills, metal coins or plastic cards. \n\nEverything is 100% digital. Bitcoin is a network of computers running on the internet. \n\nYour bitcoin is easily managed with software on your smartphone or computer!", + question: "Where do the bitcoins exist?", + answers: ["On the Internet", "On the moon", "In a Federal bank account"], + feedback: [ + "Correct. You just earned another 5 sats.", + "Incorrect. Wellâ€Ļ at least not yet ;)", + "Wrong. Please try again.", + ], + }, + { + id: "whoControlsBitcoin", + title: "Who controls Bitcoin?", + text: "Bitcoin is not controlled by any person, company or government. \n\nIt is run by the community of users -- people and companies all around the world -- voluntarily running bitcoin software on their computers and smartphones.", + question: "Who controls Bitcoin?", + answers: [ + "A voluntary community of users around the world", + "Mr Burns from The Simpsons", + "The government of France", + ], + feedback: [ + "That is right. Bitcoin is made possible by people all around the world running bitcoin software on their computers and smartphones.", + "An amusing thought — but not correct!", + "Wrong. There is no company nor government that controls Bitcoin.", + ], + }, + { + id: "copyBitcoin", + title: + "If Bitcoin is digital money, can’t someone just copy it — and create free money?", + text: "The value of a bitcoin can never be copied. This is the very reason why Bitcoin is such a powerful new invention!!\n\nMost digital files — such as an iPhone photo, an MP3 song, or a Microsoft Word document — can easily be duplicated and shared. \n\nThe Bitcoin software uniquely prevents the duplication — or “double spending” — of digital money. We will share exactly how this works later on!", + question: + "If Bitcoin is digital money, can’t someone just copy it — and create free money?", + answers: [ + "No — it is impossible to copy or duplicate the value of bitcoin", + "Yes, you can copy bitcoins just as easily as copying a digital photo", + "Yes, but copying bitcoin requires very specialized computers", + ], + feedback: [ + "That is absolutely correct!", + "You know that it is not true. Try again.", + "Incorrect. There is no way for anyone to copy, or create a duplicate, of bitcoin.", + ], + }, + ], + }, + { + section: { id: "WhatIsMoney", title: "What is Money? " }, + content: [ + { + id: "moneySocialAggrement", + title: "Money is a social agreement.", + text: "Money requires people to trust. \n\nPeople trust the paper dollar bills in their pocket. They trust the digits in their online bank account. They trust the balance on a store gift card will be redeemable. \n\nHaving money allows people to easy trade it immediately for a good, or a service.", + question: "Why does money have value?", + answers: [ + "Because people trust that other people will value money similarly", + "Because your mother told you so", + "Because a dollar bill is worth its weight in gold", + ], + feedback: [ + "Correct. This is what allows money to work!", + "She may well have. But that is not the correct answer here!", + "Nope. In the past you could exchange US dollars for gold. But this is no longer the case.", + ], + }, + { + id: "coincidenceOfWants", + title: "Money solves the “coincidence of wants”... What is that??", + text: "Centuries ago, before people had money, they would barter -- or haggle over how to trade one unique item, in exchange for another item or service. \n\nLet’s say you wanted to have a meal at the local restaurant, and offered the owner a broom. The owner might say “no” -- but I will accept three hats instead, if you happen to have them. \n\nYou can imagine how difficult and inefficient a “barter economy” would be ! \n\nBy contrast, with money, you can simply present a $20 bill. And you know that the restaurant owner will readily accept it.", + question: "Which coincidence does money solve?", + answers: [ + "Coincidence of wants", + "Coincidence of day and night", + "Coincidence of the moon blocking the sun", + ], + feedback: [ + "That is right. Money allows you to easily purchase something, without haggling about the form of payment", + "No silly, you know that is not the answer.", + "Not quite. We call that a solar eclipse 🌚", + ], + }, + { + id: "moneyEvolution", + title: "Money has evolved, since almost the beginning of time.", + text: "Thousands of years ago, society in Micronesia used very large and scarce stones as a form of agreed currency. \n\nStarting in the 1500’s, rare Cowrie shells (found in the ocean) became commonly used in many nations as a form of money.\n\nAnd for millennia, gold has been used as a form of money for countries around the world -- including the United States (until 1971).", + question: + "What are some items that have been historically used as a unit of money?", + answers: [ + "Stones, seashells and gold", + "Tiny plastic Monopoly board game houses", + "Coins made of chocolate", + ], + feedback: [ + "Correct. Items that are rare and difficult to copy have often been used as money.", + "Wrong. They may have value when playing a game -- but not in the real word!", + "Nope. They may be tasty. But they are not useful as money.", + ], + }, + { + id: "whyStonesShellGold", + title: "Why were stones, shells and gold commonly used as money in the past?", + text: "Well, these items all had some -- but not all -- of the characteristics of good money. \n\nSo what characteristics make for “good” money? \nScarce: not abundant, nor easy to reproduce or copy \nAccepted: relatively easy for people to verify its authenticity \nDurable: easy to maintain, and does not perish or fall apart\nUniform: readily interchangeable with another item of the same form\nPortable: easy to transport\nDivisible: can be split and shared in smaller pieces", + question: "Why were stones, seashells and gold used as units of money?", + answers: [ + "Because they have key characteristics -- such as being durable, uniform and divisible.", + "Because they are pretty and shiny.", + "Because they fit inside of your pocket", + ], + feedback: [ + "Correct. More key characteristics include being scarce and portable.", + "Incorrect. That may be true, but alone are not great characteristics of money.", + "Not quite. Although these items were surely portable, that alone was not the reason to be used as money.", + ], + }, + { + id: "moneyIsImportant", + title: "Money is important to individuals", + text: "Everybody knows that money matters.\n\nMost people exchange their time and energy -- in the form of work -- to obtain money. People do so, to be able to buy goods and services today -- and in the future.", + question: "What is the primary reason money is important?", + answers: [ + "Money allows people to buy goods and services today -- and tomorrow.", + "Money allows you to go to the moon.", + "Money is the solution to all problems.", + ], + feedback: [ + "That is right!", + "Incorrect. Although that may change in the future ;)", + "Not quite. Although some people may believe such, this answer does not address the primary purpose of money.", + ], + }, + { + id: "moneyImportantGovernement", + title: "Money is also important to governments", + text: "Modern-day economies are organized by nation-states: USA, Japan, Switzerland, Brazil, Norway, China, etc. \n\nAccordingly, in most every nation, the government holds the power to issue and control money. \n\nIn the United States, the Central Bank (known as the Federal Reserve, or “Fed”) can print, or create, more US Dollars at any time it wants. \n\nThe “Fed” does not need permission from the President, nor Congress, and certainly not from US citizens. \n\nImagine if you had the ability to print US Dollars anytime you wanted to -- what would you do??", + question: "Who can legally print US Dollars, anytime they wish?", + answers: [ + "The US Central Bank (The Federal Reserve)", + "Mr Burns from The Simpsons", + "A guy with a printing press in his basement", + ], + feedback: [ + "Correct. The US Government can print as much money as they want at any time.", + "Incorrect. Although it did seem like he always had a lot of money.", + "No. Whilst some people do create fake dollar bills, it is definitely not legal!", + ], + }, + ], + }, + { + section: { id: "HowDoesMoneyWork", title: "How Does Money Work? " }, + content: [ + { + id: "WhatIsFiat", + title: "Fiat Currency: What is that?", + text: "All national currencies in circulation today are called “fiat” money. This includes US Dollars, Japanese Yen, Swiss Francs, and so forth. \n\nThe word “fiat” is latin for “by decree” -- which means “by official order”. \n\nThis means that all fiat money -- including the US Dollar -- is simply created by the order of the national government.", + question: "Who creates fiat money, such as US Dollars or Swiss Francs?", + answers: [ + "It is created by order of the National government in a given country.", + "By the manager of the local branch bank.", + "The Monopoly Money Man.", + ], + feedback: [ + "Correct. The central bank of a government creates fiat money.", + "Incorrect. A local bank can only manage money that has been previously created by the government.", + "Nope. Try again!", + ], + }, + { + id: "whyCareAboutFiatMoney", + title: "I trust my government. \nWhy should I care about fiat money?", + text: "As shared in a prior quiz, the US Central Bank is the Federal Reserve, or the “Fed”.\n\nThe Fed can print more dollars at any time -- and does not need permission from the President, nor Congress, and certainly not from US citizens. \n\nHaving control of money can be very tempting for authorities to abuse -- and often time leads to massive inflation, arbitrary confiscation and corruption. \n\nIn fact, Alan Greenspan, the famous former chairman of The Fed, famously said the US “can pay any debt that it has, because we can always print more to do that”.", + question: "Why should I care about the government controlling fiat money?", + answers: [ + "All fiat currency is eventually abused by government authorities.", + "Local banks might not have enough vault space to hold all of the dollar bills.", + "There might not be enough trees to make paper for all of the additional dollar bills.", + ], + feedback: [ + "Correct. Throughout history, governments have been unable to resist the ability to print money, as they effectively have no obligation to repay this money.", + "Nope, that is certainly not the case.", + "Wrong. Please try again.", + ], + }, + { + id: "GovernementCanPrintMoney", + title: "Who should care that the government can print unlimited money?", + text: "Well, everybody should care! \n\nThe practice of government printing money -- or increasing the supply of dollars -- leads to inflation.\n\nInflation is an increase in the price of goods and services. In other words, the price for something in the future will be more expensive than today.\n\nSo what does inflation mean for citizens? \n\nIn the United Kingdom, the Pound Sterling has lost 99.5% of its value since being introduced over 300 years ago. \n\nIn the United States, the dollar has lost 97% of its value since the end of WWI, about 100 years ago. \n\nThis means a steak that cost $0.30 in 1920... was $3 in 1990â€Ļ and about $15 today, in the year 2020!", + question: "What does it mean when the government prints money?", + answers: [ + "The printing of additional money leads to inflation.", + "People must exchange old dollar bills at the bank every year.", + "The appearance of the dollar bill changes.", + ], + feedback: [ + "Correct. This means that goods and services will cost more in the future.", + "Nope. Older dollar bills are just as valid as newer ones.", + "Incorrect. Although the government may issue new looks for bills, this has nothing to do with increasing the money supply.", + ], + }, + { + id: "FiatLosesValueOverTime", + title: "Does this mean that all fiat money loses value over time?", + text: "That is correct. \n\nIn the history of the world, there have been 775 fiat currencies created. Most no longer exist, and the average life for any fiat money is only 27 years.\n\nThe British Pound is the oldest fiat currency. It has lost more than 99% of its value since 1694. \n\nThere is no precedent for any fiat money maintaining its value over time. This is inflation. \nIt is effectively a form of theft of your own hard earned money !", + question: "What happens to the value of all fiat money over time?", + answers: [ + "Every fiat currency that ever existed has lost a massive amount of value.", + "The value stays the same forever.", + "The look and design of paper bills is updated every 10 years or so.", + ], + feedback: [ + "Correct. This is true even for USD, which has lost 97% of its value during the last 100 years.", + "Incorrect. Please try again.", + "Not quite. Although the design of papers bills may change, this has nothing to do with their value.", + ], + }, + { + id: "OtherIssues", + title: "OK, fiat money loses value over time. Are there other issues?", + text: "Yes, there are many other issues that exist with modern fiat money. \n\nFirst, it can be extremely difficult to move money around the world. Often, governments will outright restrict the movement -- and sometimes even confiscate money -- without a valid reason or explanation. And even when you can send money, high transaction fees make it very expensive.\n\nSecond, even in the US, there has been a complete loss of privacy, as the majority of commerce takes places with debit and credit cards, as well as online with other systems such as PayPal and Apple Pay.\n\nEver notice how an ad appears in your social media or Gmail just moments after searching for a certain product or service? This is known as “surveillance capitalism”, and is based on companies selling your personal financial data.", + question: "What are some other issues that exist with fiat money?", + answers: [ + "Money is difficult to move around the world, and can also be surveilled.", + "Money is no longer needed in the 21st Century.", + "Money is the root of all evil.", + ], + feedback: [ + "Correct. We will explain more about these issues in subsequent quiz modules. Keep digging!!", + "Wrong answer. You know that is not true.", + "While some may believe this to be so, it is not the answer we are looking for here.", + ], + }, + ], + }, + { + section: { id: "BitcoinWhySpecial", title: "Bitcoin: Why is it special? " }, + content: [ + { + id: "LimitedSupply", + title: "Special Characteristic #1:\nLimited Supply", + text: "Governments can print fiat money in unlimited quantities. \n\nBy way of contrast, the supply of Bitcoin is fixed — and can never exceed 21 million coins. \n\nA continually increasing supply of fiat money creates inflation. This means that the money you hold today is less valuable in the future. \n\nOne simple example: \nA loaf of bread that cost about 8 cents in 1920. In the year 1990 one loaf cost about $1.00, and today the price is closer to $2.50 ! \n\nThe limited supply of bitcoin has the opposite effect, one of deflation. \n\nThis means that the bitcoin you hold today is designed to be more valuable in the future — because it is scarce.", + question: "Is the supply of bitcoin limited forever?", + answers: [ + "Yes. There can never be more than 21 million bitcoin created.", + "No. The government can create more bitcoin at any time.", + "No, the bitcoin software can be changed to allow more bitcoins to be created.", + ], + feedback: [ + "Correct. By limiting the amount that can be created, Bitcoin is designed to increase in value over time.", + "Wrong answer. The government has no control over Bitcoin.", + "Incorrect. One of the key attributes of bitcoin is that the supply is limited forever.", + ], + }, + { + id: "Decentralized", + title: "Special Characteristic #2: Decentralized", + text: "Fiat money is controlled by banks and governments — which is why people refer to it as a “centralized” currency.\n\nBitcoin is not controlled by any person, government or company — which makes it “decentralized” \n\nNot having banks involved means that nobody can deny you access to bitcoin — because of race, gender, income, credit history, geographical location — or any other factor. \n\nAnybody — anywhere in the world — can access and use Bitcoin anytime you want. All you need is a computer or smartphone, and an internet connection!", + question: "Is bitcoin centralized?", + answers: [ + "No. Bitcoin is completely “decentralized”.", + "Yes. It is centrally controlled by the United Nations.", + "Yes. It is centrally controlled by the world’s largest banks.", + ], + feedback: [ + "That is correct. There is no company, government or institution that controls bitcoin. Anyone can use bitcoin — all need is a smartphone and an internet connection.", + "Wrong answer. Please try again.", + "Incorrect. You already know this is not true!", + ], + }, + { + id: "NoCounterfeitMoney", + title: "Special Characteristic #3: \nNo Counterfeit Money", + text: "Paper money, checks and credit card transactions can all be counterfeit, or faked. \n\nThe unique software that runs the Bitcoin network eliminates the possibility of duplicating money for counterfeit purposes. \n\nNew bitcoin can only be issued if there is agreement amongst the participants in the network. People who are voluntarily running bitcoin software on their own computers and smartphones.\n\nThis ensures that it is impossible to counterfeit, or create fake bitcoins.", + question: "Can people counterfeit Bitcoin?", + answers: [ + "No. It is impossible to counterfeit Bitcoin.", + "Yes. Although creating fake bitcoin requires very specialized computers.", + "Yes. The government can print as much bitcoin as it likes.", + ], + feedback: [ + "That is the right answer. In a subsequent quiz, Honey Badger will explain details as to why this is so!", + "Incorrect. There is no way for anyone to copy or duplicate the value of a bitcoin.", + "Wrong. Although the government can print unlimited dollars, it can not print bitcoin.", + ], + }, + { + id: "HighlyDivisible", + title: "Special Characteristic #4: \nHighly Divisible", + text: 'Old-fashioned fiat money can only be spent in amounts as small as one penny — or two decimal places for one US Dollar ($0.01).\n\nOn the other hand, Bitcoin can be divided 100,000,000 times over. This means that you could spend as little as â‚ŋ0.00000001. You will note the "â‚ŋ" symbol, which is the Bitcoin equivalent of "$". Sometimes you will also see the use of BTC, instead of â‚ŋ.\n\nBy way of contrast, Bitcoin can handle very small payments — even those less than one US penny!', + question: "What is the smallest amount of Bitcoin one can own, or use?", + answers: [ + "0.00000001 BTC", + "One whole bitcoin. It is not possible to use anything less.", + "0.01 BTC", + ], + feedback: [ + "Yes. You can divide a bitcoin into 100,000,000 pieces. As you already know, the smallest unit of bitcoin — B0.00000001 — is known as a “sat”.", + "Wrong. Bitcoin is highly divisible. You can easily use a very small fraction of a bitcoin.", + "Incorrect. Although the smallest unit of US currency is one penny, a bitcoin is divisible by much more than 100x.", + ], + }, + { + id: "securePartOne", + title: "Special Characteristic #5: \nSecure -- Part I", + text: "The bitcoin network is worth well over $100 billion today. Accordingly, the network must be very secure — so that money is never stolen. \n\nBitcoin is known as the world’s first cryptocurrency. \n\nThe “crypto” part of the name comes from cryptography. Simply put, cryptography protects information through very complex math functions. \n\nMost people do not realize — but Bitcoin is actually the most secure computer network in the world ! \n\n(you may have heard about bitcoin “hacks” — which we will debunk in the next quiz)", + question: "Is the Bitcoin network secure?", + answers: [ + "Yes. The bitcoin network is very secure.", + "Maybe. It depends on the day of the week.", + "No. It is open source software, and is easily attacked.", + ], + feedback: [ + "Correct. In fact, the Bitcoin network has never once been hacked. Answer the next question to learn more!", + "Nice try, but wrong. The bitcoin network is safe and secure — 24 hours a day, 365 days a year.", + "Icorrect. Although bitcoin is indeed “open source” software — or available to the public for free — is still extremely secure.", + ], + }, + { + id: "securePartTwo", + title: "Special Characteristic #5: \nSecure -- Part II", + text: "To be direct: the bitcoin network itself has never been hacked. Never once.\n\nThen what exactly has been hacked? \n\nCertain digital wallets that did not have proper security in place. \n\nJust like a physical wallet holds fiat currency (in the form of paper bills), digital wallets hold some amount of bitcoin. \n\nIn the physical world, criminals rob banks — and walk away with US Dollars. The fact that someone robbed a bank does not have any relationship as to whether the US Dollar is stable or reliable money. \n\nSimilarly, some computer hackers have stolen bitcoin from insecure digital wallets — the online equivalent of a bank robbery. \n\nHowever, it is important to know that the bitcoin network has never been hacked or compromised !", + question: "Has Bitcoin ever been hacked?", + answers: [ + "No. Bitcoin has never been hacked.", + "Yes. Bitcoin gets hacked frequently.", + "Yes. Bitcoin usually gets hacked on holidays, when traditional banks are closed.", + ], + feedback: [ + "That is correct. The bitcoin network has never been compromised. However, it is important to use only secure digital wallets to keep your personal bitcoins safe at all times.", + "Wrong. Please try again.", + "No silly, you know that is not the correct answer.", + ], + }, + ], + }, +] +describe("Earn utils test", () => { + it("Converts quiz sections to proper types", () => { + loadLocale("en") + const LL = i18nObject("en") + const quizSectionContent = getQuizQuestionsContent({ LL }) + expect(quizSectionContent).toStrictEqual(expectedEnglishQuizSections) + }) +}) diff --git a/__tests__/utils/locale-detector.test.ts b/__tests__/utils/locale-detector.test.ts new file mode 100644 index 0000000..80c0cf5 --- /dev/null +++ b/__tests__/utils/locale-detector.test.ts @@ -0,0 +1,27 @@ +import { matchOsLocaleToSupportedLocale } from "../../app/utils/locale-detector" + +describe("matchOsLocaleToSupportedLocale", () => { + it("exactly matches a supported locale", () => { + const supportedCountyAndLang = [ + { countryCode: "CA", languageTag: "fr-CA", languageCode: "fr", isRTL: false }, + ] + const locale = matchOsLocaleToSupportedLocale(supportedCountyAndLang) + expect(locale).toEqual("fr") + }) + + it("approximately matches a supported locale", () => { + const unsupportedCountrySupportedLang = [ + { countryCode: "SV", languageTag: "es-SV", languageCode: "es", isRTL: false }, + ] + const locale = matchOsLocaleToSupportedLocale(unsupportedCountrySupportedLang) + expect(locale).toEqual("es") + }) + + it("returns english when there is no locale match", () => { + const unsupportedCountryAndLang = [ + { countryCode: "XY", languageTag: "na-XY", languageCode: "na", isRTL: false }, + ] + const locale = matchOsLocaleToSupportedLocale(unsupportedCountryAndLang) + expect(locale).toEqual("en") + }) +}) diff --git a/android/Gemfile b/android/Gemfile new file mode 100644 index 0000000..cdd3a6b --- /dev/null +++ b/android/Gemfile @@ -0,0 +1,6 @@ +source "https://rubygems.org" + +gem "fastlane" + +plugins_path = File.join(File.dirname(__FILE__), 'fastlane', 'Pluginfile') +eval_gemfile(plugins_path) if File.exist?(plugins_path) diff --git a/android/Gemfile.lock b/android/Gemfile.lock new file mode 100644 index 0000000..6c2d92d --- /dev/null +++ b/android/Gemfile.lock @@ -0,0 +1,237 @@ +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.6) + rexml + addressable (2.8.4) + public_suffix (>= 2.0.2, < 6.0) + artifactory (3.0.15) + atomos (0.1.3) + aws-eventstream (1.2.0) + aws-partitions (1.751.0) + aws-sdk-core (3.171.0) + aws-eventstream (~> 1, >= 1.0.2) + aws-partitions (~> 1, >= 1.651.0) + aws-sigv4 (~> 1.5) + jmespath (~> 1, >= 1.6.1) + aws-sdk-kms (1.63.0) + aws-sdk-core (~> 3, >= 3.165.0) + aws-sigv4 (~> 1.1) + aws-sdk-s3 (1.121.0) + aws-sdk-core (~> 3, >= 3.165.0) + aws-sdk-kms (~> 1) + aws-sigv4 (~> 1.4) + aws-sigv4 (1.5.2) + aws-eventstream (~> 1, >= 1.0.2) + babosa (1.0.4) + claide (1.1.0) + colored (1.2) + colored2 (3.1.2) + commander (4.6.0) + highline (~> 2.0.0) + declarative (0.0.20) + digest-crc (0.6.4) + rake (>= 12.0.0, < 14.0.0) + domain_name (0.5.20190701) + unf (>= 0.0.5, < 1.0.0) + dotenv (2.8.1) + emoji_regex (3.2.3) + excon (0.99.0) + faraday (1.10.3) + faraday-em_http (~> 1.0) + faraday-em_synchrony (~> 1.0) + faraday-excon (~> 1.1) + faraday-httpclient (~> 1.0) + faraday-multipart (~> 1.0) + faraday-net_http (~> 1.0) + faraday-net_http_persistent (~> 1.0) + faraday-patron (~> 1.0) + faraday-rack (~> 1.0) + faraday-retry (~> 1.0) + ruby2_keywords (>= 0.0.4) + faraday-cookie_jar (0.0.7) + faraday (>= 0.8.0) + http-cookie (~> 1.0.0) + faraday-em_http (1.0.0) + faraday-em_synchrony (1.0.0) + faraday-excon (1.1.0) + faraday-httpclient (1.0.1) + faraday-multipart (1.0.4) + multipart-post (~> 2) + faraday-net_http (1.0.1) + faraday-net_http_persistent (1.2.0) + faraday-patron (1.0.0) + faraday-rack (1.0.0) + faraday-retry (1.0.3) + faraday_middleware (1.2.0) + faraday (~> 1.0) + fastimage (2.2.6) + fastlane (2.212.2) + CFPropertyList (>= 2.3, < 4.0.0) + addressable (>= 2.8, < 3.0.0) + artifactory (~> 3.0) + aws-sdk-s3 (~> 1.0) + babosa (>= 1.0.3, < 2.0.0) + bundler (>= 1.12.0, < 3.0.0) + colored + commander (~> 4.6) + dotenv (>= 2.1.1, < 3.0.0) + emoji_regex (>= 0.1, < 4.0) + excon (>= 0.71.0, < 1.0.0) + faraday (~> 1.0) + faraday-cookie_jar (~> 0.0.6) + faraday_middleware (~> 1.0) + fastimage (>= 2.1.0, < 3.0.0) + gh_inspector (>= 1.1.2, < 2.0.0) + google-apis-androidpublisher_v3 (~> 0.3) + google-apis-playcustomapp_v1 (~> 0.1) + google-cloud-storage (~> 1.31) + highline (~> 2.0) + json (< 3.0.0) + jwt (>= 2.1.0, < 3) + mini_magick (>= 4.9.4, < 5.0.0) + multipart-post (~> 2.0.0) + naturally (~> 2.2) + optparse (~> 0.1.1) + plist (>= 3.1.0, < 4.0.0) + rubyzip (>= 2.0.0, < 3.0.0) + security (= 0.1.3) + simctl (~> 1.6.3) + terminal-notifier (>= 2.0.0, < 3.0.0) + terminal-table (>= 1.4.5, < 2.0.0) + tty-screen (>= 0.6.3, < 1.0.0) + tty-spinner (>= 0.8.0, < 1.0.0) + word_wrap (~> 1.0.0) + xcodeproj (>= 1.13.0, < 2.0.0) + xcpretty (~> 0.3.0) + xcpretty-travis-formatter (>= 0.0.3) + fastlane-plugin-browserstack (0.3.2) + rest-client (~> 2.0, >= 2.0.2) + fastlane-plugin-huawei_appgallery_connect (1.0.23) + fastlane-plugin-increment_version_code (0.4.3) + fastlane-plugin-versioning_android (0.1.1) + gh_inspector (1.1.3) + google-apis-androidpublisher_v3 (0.39.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-core (0.11.0) + addressable (~> 2.5, >= 2.5.1) + googleauth (>= 0.16.2, < 2.a) + httpclient (>= 2.8.1, < 3.a) + mini_mime (~> 1.0) + representable (~> 3.0) + retriable (>= 2.0, < 4.a) + rexml + webrick + google-apis-iamcredentials_v1 (0.17.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-playcustomapp_v1 (0.13.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-storage_v1 (0.19.0) + google-apis-core (>= 0.9.0, < 2.a) + google-cloud-core (1.6.0) + google-cloud-env (~> 1.0) + google-cloud-errors (~> 1.0) + google-cloud-env (1.6.0) + faraday (>= 0.17.3, < 3.0) + google-cloud-errors (1.3.1) + google-cloud-storage (1.44.0) + addressable (~> 2.8) + digest-crc (~> 0.4) + google-apis-iamcredentials_v1 (~> 0.1) + google-apis-storage_v1 (~> 0.19.0) + google-cloud-core (~> 1.6) + googleauth (>= 0.16.2, < 2.a) + mini_mime (~> 1.0) + googleauth (1.5.2) + faraday (>= 0.17.3, < 3.a) + jwt (>= 1.4, < 3.0) + memoist (~> 0.16) + multi_json (~> 1.11) + os (>= 0.9, < 2.0) + signet (>= 0.16, < 2.a) + highline (2.0.3) + http-accept (1.7.0) + http-cookie (1.0.5) + domain_name (~> 0.5) + httpclient (2.8.3) + jmespath (1.6.2) + json (2.6.3) + jwt (2.7.0) + memoist (0.16.2) + mime-types (3.4.1) + mime-types-data (~> 3.2015) + mime-types-data (3.2023.0218.1) + mini_magick (4.12.0) + mini_mime (1.1.2) + multi_json (1.15.0) + multipart-post (2.0.0) + nanaimo (0.3.0) + naturally (2.2.1) + netrc (0.11.0) + optparse (0.1.1) + os (1.1.4) + plist (3.7.0) + public_suffix (5.0.1) + rake (13.0.6) + representable (3.2.0) + declarative (< 0.1.0) + trailblazer-option (>= 0.1.1, < 0.2.0) + uber (< 0.2.0) + rest-client (2.1.0) + http-accept (>= 1.7.0, < 2.0) + http-cookie (>= 1.0.2, < 2.0) + mime-types (>= 1.16, < 4.0) + netrc (~> 0.8) + retriable (3.1.2) + rexml (3.2.5) + rouge (2.0.7) + ruby2_keywords (0.0.5) + rubyzip (2.3.2) + security (0.1.3) + signet (0.17.0) + addressable (~> 2.8) + faraday (>= 0.17.5, < 3.a) + jwt (>= 1.5, < 3.0) + multi_json (~> 1.10) + simctl (1.6.10) + CFPropertyList + naturally + terminal-notifier (2.0.0) + terminal-table (1.8.0) + unicode-display_width (~> 1.1, >= 1.1.1) + trailblazer-option (0.1.2) + tty-cursor (0.7.1) + tty-screen (0.8.1) + tty-spinner (0.9.3) + tty-cursor (~> 0.7) + uber (0.1.0) + unf (0.1.4) + unf_ext + unf_ext (0.0.8.2) + unicode-display_width (1.8.0) + webrick (1.8.1) + word_wrap (1.0.0) + xcodeproj (1.22.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + xcpretty (0.3.0) + rouge (~> 2.0.7) + xcpretty-travis-formatter (1.0.1) + xcpretty (~> 0.2, >= 0.0.7) + +PLATFORMS + ruby + +DEPENDENCIES + fastlane + fastlane-plugin-browserstack + fastlane-plugin-huawei_appgallery_connect + fastlane-plugin-increment_version_code + fastlane-plugin-versioning_android + +BUNDLED WITH + 2.4.12 diff --git a/android/app/BUCK b/android/app/BUCK new file mode 100644 index 0000000..a5caf05 --- /dev/null +++ b/android/app/BUCK @@ -0,0 +1,55 @@ +# To learn about Buck see [Docs](https://buckbuild.com/). +# To run your application with Buck: +# - install Buck +# - `npm start` - to start the packager +# - `cd android` +# - `keytool -genkey -v -keystore keystores/debug.keystore -storepass android -alias androiddebugkey -keypass android -dname "CN=Android Debug,O=Android,C=US"` +# - `./gradlew :app:copyDownloadableDepsToLibs` - make all Gradle compile dependencies available to Buck +# - `buck install -r android/app` - compile, install and run application +# + +load(":build_defs.bzl", "create_aar_targets", "create_jar_targets") + +lib_deps = [] + +create_aar_targets(glob(["libs/*.aar"])) + +create_jar_targets(glob(["libs/*.jar"])) + +android_library( + name = "all-libs", + exported_deps = lib_deps, +) + +android_library( + name = "app-code", + srcs = glob([ + "src/main/java/**/*.java", + ]), + deps = [ + ":all-libs", + ":build_config", + ":res", + ], +) + +android_build_config( + name = "build_config", + package = "com.galoyapp", +) + +android_resource( + name = "res", + package = "com.galoyapp", + res = "src/main/res", +) + +android_binary( + name = "app", + keystore = "//android/keystores:debug", + manifest = "src/main/AndroidManifest.xml", + package_type = "debug", + deps = [ + ":app-code", + ], +) diff --git a/android/app/build.gradle b/android/app/build.gradle new file mode 100644 index 0000000..78501d3 --- /dev/null +++ b/android/app/build.gradle @@ -0,0 +1,176 @@ +apply plugin: "com.android.application" +apply plugin: "com.facebook.react" + +apply plugin: 'com.google.gms.google-services' // firebase +apply plugin: 'com.google.firebase.crashlytics' + +import com.android.build.OutputFile + +/** + * This is the configuration block to customize your React Native Android app. + * By default you don't need to apply any configuration, just uncomment the lines you need. + */ + +react { + /* Folders */ + // The root of your project, i.e. where "package.json" lives. Default is '..' + // root = file("../") + // The folder where the react-native NPM package is. Default is ../node_modules/react-native + // reactNativeDir = file("../node_modules/react-native") + // The folder where the react-native Codegen package is. Default is ../node_modules/react-native-codegen + // codegenDir = file("../node_modules/react-native-codegen") + // The cli.js file which is the React Native CLI entrypoint. Default is ../node_modules/react-native/cli.js + // cliFile = file("../node_modules/react-native/cli.js") + /* Variants */ + // The list of variants to that are debuggable. For those we're going to + // skip the bundling of the JS bundle and the assets. By default is just 'debug'. + // If you add flavors like lite, prod, etc. you'll have to list your debuggableVariants. + // debuggableVariants = ["liteDebug", "prodDebug"] + /* Bundling */ + // A list containing the node command and its flags. Default is just 'node'. + // nodeExecutableAndArgs = ["node"] + // + // The command to run when bundling. By default is 'bundle' + // bundleCommand = "ram-bundle" + // + // The path to the CLI configuration file. Default is empty. + // bundleConfig = file(../rn-cli.config.js) + // + // The name of the generated asset file containing your JS bundle + // bundleAssetName = "MyApplication.android.bundle" + // + // The entry file for bundle generation. Default is 'index.android.js' or 'index.js' + // entryFile = file("../js/MyApplication.android.js") + // + // A list of extra flags to pass to the 'bundle' commands. + // See https://github.com/react-native-community/cli/blob/main/docs/commands.md#bundle + // extraPackagerArgs = [] + /* Hermes Commands */ + // The hermes compiler command to run. By default it is 'hermesc' + // hermesCommand = "$rootDir/my-custom-hermesc/bin/hermesc" + // + // The list of flags to pass to the Hermes compiler. By default is "-O", "-output-source-map" + // hermesFlags = ["-O", "-output-source-map"] +} + +/** + * Set this to true to create four separate APKs instead of one, + * one for each native architecture. This is useful if you don't + * use App Bundles (https://developer.android.com/guide/app-bundle/) + * and want to have separate APKs to upload to the Play Store. + */ +def enableSeparateBuildPerCPUArchitecture = true + +/** + * Set this to true to Run Proguard on Release builds to minify the Java bytecode. + */ +def enableProguardInReleaseBuilds = true + +/** + * The preferred build flavor of JavaScriptCore (JSC) + * + * For example, to use the international variant, you can use: + * `def jscFlavor = 'org.webkit:android-jsc-intl:+'` + * + * The international variant includes ICU i18n library and necessary data + * allowing to use e.g. `Date.toLocaleString` and `String.localeCompare` that + * give correct results when using with locales other than en-US. Note that + * this variant is about 6MiB larger per architecture than default. + */ +def jscFlavor = 'org.webkit:android-jsc-intl:+' + +/** + * Private function to get the list of Native Architectures you want to build. + * This reads the value from reactNativeArchitectures in your gradle.properties + * file and works together with the --active-arch-only flag of react-native run-android. + */ +def reactNativeArchitectures() { + def value = project.getProperties().get("reactNativeArchitectures") + return value ? value.split(",") : ["armeabi-v7a", "x86", "x86_64", "arm64-v8a"] +} + +android { + ndkVersion rootProject.ext.ndkVersion + + compileSdkVersion rootProject.ext.compileSdkVersion + + namespace "com.ourowallet" + defaultConfig { + applicationId "com.ourowallet" + minSdkVersion rootProject.ext.minSdkVersion + targetSdkVersion rootProject.ext.targetSdkVersion + versionCode 433 + versionName "2.1.55" + missingDimensionStrategy 'react-native-camera', 'general' // React native camera + } + splits { + abi { + reset() + enable enableSeparateBuildPerCPUArchitecture + universalApk true // If true, also generate a universal APK + include "armeabi-v7a", "x86", "arm64-v8a", "x86_64" + include (*reactNativeArchitectures()) + } + } + signingConfigs { + release { + storeFile file('release.keystore') + storePassword 'android' + keyAlias 'androiddebugkey' + keyPassword 'android' + } + } + buildTypes { + debug { + signingConfig signingConfigs.debug + } + release { + // Caution! In production, you need to generate your own keystore file. + // see https://reactnative.dev/docs/signed-apk-android. + signingConfig signingConfigs.release + minifyEnabled enableProguardInReleaseBuilds + proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro" + } + } + + // applicationVariants are e.g. debug, release + applicationVariants.all { variant -> + variant.outputs.each { output -> + // For each separate APK per architecture, set a unique version code as described here: + // https://developer.android.com/studio/build/configure-apk-splits.html + def versionCodes = ["armeabi-v7a": 1, "x86": 2, "arm64-v8a": 3, "x86_64": 4] + def abi = output.getFilter(OutputFile.ABI) + if (abi != null) { // null for the universal-debug, universal-release variants + output.versionCodeOverride = + versionCodes.get(abi) * 10000000 + defaultConfig.versionCode + } + } + } +} + +dependencies { + // The version of react-native is set by the React Native Gradle Plugin + implementation("com.facebook.react:react-android") + + implementation("androidx.swiperefreshlayout:swiperefreshlayout:1.0.0") + + debugImplementation("com.facebook.flipper:flipper:${FLIPPER_VERSION}") + debugImplementation("com.facebook.flipper:flipper-network-plugin:${FLIPPER_VERSION}") { + exclude group:'com.squareup.okhttp3', module:'okhttp' + } + + debugImplementation("com.facebook.flipper:flipper-fresco-plugin:${FLIPPER_VERSION}") + if (hermesEnabled.toBoolean()) { + implementation("com.facebook.react:hermes-android") + } else { + implementation jscFlavor + } +} + +apply from: file("../../node_modules/@react-native-community/cli-platform-android/native_modules.gradle"); applyNativeModulesAppBuildGradle(project) + +project.ext.vectoricons = [ + iconFontNames: [ 'Ionicons.ttf' ] +] + +apply from: "../../node_modules/react-native-vector-icons/fonts.gradle" diff --git a/android/app/google-services.json b/android/app/google-services.json new file mode 100644 index 0000000..d99828c --- /dev/null +++ b/android/app/google-services.json @@ -0,0 +1,55 @@ +{ + "project_info": { + "project_number": "72279297366", + "firebase_url": "https://galoyapp.firebaseio.com", + "project_id": "galoyapp", + "storage_bucket": "galoyapp.appspot.com" + }, + "client": [ + { + "client_info": { + "mobilesdk_app_id": "1:72279297366:android:35666807ae916c5aa75af7", + "android_client_info": { + "package_name": "com.ourowallet" + } + }, + "oauth_client": [ + { + "client_id": "72279297366-pm5ng6a6lrqirbi80kfgbf206uh567go.apps.googleusercontent.com", + "client_type": 1, + "android_info": { + "package_name": "com.ourowallet", + "certificate_hash": "e3fc1f6c695772c31c706eea22f66efd680c5c64" + } + }, + { + "client_id": "72279297366-k1eh0iqfsi3f47c0k18l31l7rbbdcf84.apps.googleusercontent.com", + "client_type": 3 + } + ], + "api_key": [ + { + "current_key": "AIzaSyCttrzic-C_V-mbnZjU0qLhw80f82HZ50k" + } + ], + "services": { + "appinvite_service": { + "other_platform_oauth_client": [ + { + "client_id": "72279297366-k1eh0iqfsi3f47c0k18l31l7rbbdcf84.apps.googleusercontent.com", + "client_type": 3 + }, + { + "client_id": "72279297366-feuhvvmtop8ba5j0sgjbu4pubqa6cnfh.apps.googleusercontent.com", + "client_type": 2, + "ios_info": { + "bundle_id": "io.galoy.app" + } + } + ] + } + } + } + ], + "configuration_version": "1" +} \ No newline at end of file diff --git a/android/app/proguard-rules.pro b/android/app/proguard-rules.pro new file mode 100644 index 0000000..cf6d9f4 --- /dev/null +++ b/android/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# By default, the flags in this file are appended to flags specified +# in /usr/local/Cellar/android-sdk/24.3.3/tools/proguard/proguard-android.txt +# You can edit the include path and order by changing the proguardFiles +# directive in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# Add any project specific keep options here: + +-keep class com.facebook.hermes.unicode.** { *; } +-keep class com.facebook.jni.** { *; } +-keep public class com.horcrux.svg.** {*;} + +# GeeTest SDK has already been obfuscated, please do not obfuscate it again +-dontwarn com.geetest.sdk.** +-keep class com.geetest.sdk.**{*;} + +-keep class com.swmansion.reanimated.** { *; } +-keep class com.facebook.react.turbomodule.** { *; } \ No newline at end of file diff --git a/android/app/src/debug/AndroidManifest.xml b/android/app/src/debug/AndroidManifest.xml new file mode 100644 index 0000000..8114d19 --- /dev/null +++ b/android/app/src/debug/AndroidManifest.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + diff --git a/android/app/src/debug/java/com/galoyapp/ReactNativeFlipper.java b/android/app/src/debug/java/com/galoyapp/ReactNativeFlipper.java new file mode 100644 index 0000000..e82e1cc --- /dev/null +++ b/android/app/src/debug/java/com/galoyapp/ReactNativeFlipper.java @@ -0,0 +1,75 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + *

This source code is licensed under the MIT license found in the LICENSE file in the root + * directory of this source tree. + */ +package com.ourowallet; + +import android.content.Context; +import com.facebook.flipper.android.AndroidFlipperClient; +import com.facebook.flipper.android.utils.FlipperUtils; +import com.facebook.flipper.core.FlipperClient; +import com.facebook.flipper.plugins.crashreporter.CrashReporterPlugin; +import com.facebook.flipper.plugins.databases.DatabasesFlipperPlugin; +import com.facebook.flipper.plugins.fresco.FrescoFlipperPlugin; +import com.facebook.flipper.plugins.inspector.DescriptorMapping; +import com.facebook.flipper.plugins.inspector.InspectorFlipperPlugin; +import com.facebook.flipper.plugins.network.FlipperOkhttpInterceptor; +import com.facebook.flipper.plugins.network.NetworkFlipperPlugin; +import com.facebook.flipper.plugins.sharedpreferences.SharedPreferencesFlipperPlugin; +import com.facebook.react.ReactInstanceManager; +import com.facebook.react.ReactInstanceEventListener; +import com.facebook.react.bridge.ReactContext; +import com.facebook.react.modules.network.NetworkingModule; +import okhttp3.OkHttpClient; + +/** + * Class responsible of loading Flipper inside your React Native application. This is the debug + * flavor of it. Here you can add your own plugins and customize the Flipper setup. + */ +public class ReactNativeFlipper { + public static void initializeFlipper(Context context, ReactInstanceManager reactInstanceManager) { + if (FlipperUtils.shouldEnableFlipper(context)) { + final FlipperClient client = AndroidFlipperClient.getInstance(context); + + client.addPlugin(new InspectorFlipperPlugin(context, DescriptorMapping.withDefaults())); + client.addPlugin(new DatabasesFlipperPlugin(context)); + client.addPlugin(new SharedPreferencesFlipperPlugin(context)); + client.addPlugin(CrashReporterPlugin.getInstance()); + + NetworkFlipperPlugin networkFlipperPlugin = new NetworkFlipperPlugin(); + NetworkingModule.setCustomClientBuilder( + new NetworkingModule.CustomClientBuilder() { + @Override + public void apply(OkHttpClient.Builder builder) { + builder.addNetworkInterceptor(new FlipperOkhttpInterceptor(networkFlipperPlugin)); + } + }); + client.addPlugin(networkFlipperPlugin); + client.start(); + + // Fresco Plugin needs to ensure that ImagePipelineFactory is initialized + // Hence we run if after all native modules have been initialized + ReactContext reactContext = reactInstanceManager.getCurrentReactContext(); + if (reactContext == null) { + reactInstanceManager.addReactInstanceEventListener( + new ReactInstanceEventListener() { + @Override + public void onReactContextInitialized(ReactContext reactContext) { + reactInstanceManager.removeReactInstanceEventListener(this); + reactContext.runOnNativeModulesQueueThread( + new Runnable() { + @Override + public void run() { + client.addPlugin(new FrescoFlipperPlugin()); + } + }); + } + }); + } else { + client.addPlugin(new FrescoFlipperPlugin()); + } + } + } +} diff --git a/android/app/src/main/AndroidManifest.xml b/android/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000..36c9e09 --- /dev/null +++ b/android/app/src/main/AndroidManifest.xml @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/android/app/src/main/java/com/galoyapp/MainActivity.java b/android/app/src/main/java/com/galoyapp/MainActivity.java new file mode 100644 index 0000000..ba2a1f0 --- /dev/null +++ b/android/app/src/main/java/com/galoyapp/MainActivity.java @@ -0,0 +1,45 @@ +package com.ourowallet; +import expo.modules.ReactActivityDelegateWrapper; + +import com.facebook.react.ReactActivity; +import com.facebook.react.ReactActivityDelegate; +import com.facebook.react.defaults.DefaultNewArchitectureEntryPoint; +import com.facebook.react.defaults.DefaultReactActivityDelegate; + +// for react-native-screens +import android.os.Bundle; + +public class MainActivity extends ReactActivity { + + /** + * Returns the name of the main component registered from JavaScript. This is used to schedule + * rendering of the component. + */ + @Override + protected String getMainComponentName() { + return "OuroWallet"; + } + + //react-native-screens override + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(null); + } + + /** + * Returns the instance of the {@link ReactActivityDelegate}. Here we use a util class {@link + * DefaultReactActivityDelegate} which allows you to easily enable Fabric and Concurrent React + * (aka React 18) with two boolean flags. + */ + @Override + protected ReactActivityDelegate createReactActivityDelegate() { + return new ReactActivityDelegateWrapper(this, BuildConfig.IS_NEW_ARCHITECTURE_ENABLED, new DefaultReactActivityDelegate( + this, + getMainComponentName(), + // If you opted-in for the New Architecture, we enable the Fabric Renderer. + DefaultNewArchitectureEntryPoint.getFabricEnabled(), // fabricEnabled + // If you opted-in for the New Architecture, we enable Concurrent React (i.e. React 18). + DefaultNewArchitectureEntryPoint.getConcurrentReactEnabled() // concurrentRootEnabled + )); + } +} diff --git a/android/app/src/main/java/com/galoyapp/MainApplication.java b/android/app/src/main/java/com/galoyapp/MainApplication.java new file mode 100644 index 0000000..31fc9bf --- /dev/null +++ b/android/app/src/main/java/com/galoyapp/MainApplication.java @@ -0,0 +1,72 @@ +package com.galoyapp; +import android.content.res.Configuration; +import expo.modules.ApplicationLifecycleDispatcher; +import expo.modules.ReactNativeHostWrapper; + +import android.app.Application; +import com.facebook.react.PackageList; +import com.facebook.react.ReactApplication; +import com.facebook.react.ReactNativeHost; +import com.facebook.react.ReactPackage; +import com.facebook.react.defaults.DefaultNewArchitectureEntryPoint; +import com.facebook.react.defaults.DefaultReactNativeHost; +import com.facebook.soloader.SoLoader; +import java.util.List; + +public class MainApplication extends Application implements ReactApplication { + + private final ReactNativeHost mReactNativeHost = + new ReactNativeHostWrapper(this, new DefaultReactNativeHost(this) { + @Override + public boolean getUseDeveloperSupport() { + return BuildConfig.DEBUG; + } + + @Override + protected List getPackages() { + @SuppressWarnings("UnnecessaryLocalVariable") + List packages = new PackageList(this).getPackages(); + // Packages that cannot be autolinked yet can be added manually here, for example: + // packages.add(new MyReactNativePackage()); + return packages; + } + + @Override + protected String getJSMainModuleName() { + return "index"; + } + + @Override + protected boolean isNewArchEnabled() { + return BuildConfig.IS_NEW_ARCHITECTURE_ENABLED; + } + + @Override + protected Boolean isHermesEnabled() { + return BuildConfig.IS_HERMES_ENABLED; + } + }); + + @Override + public ReactNativeHost getReactNativeHost() { + return mReactNativeHost; + } + + @Override + public void onCreate() { + super.onCreate(); + SoLoader.init(this, /* native exopackage */ false); + if (BuildConfig.IS_NEW_ARCHITECTURE_ENABLED) { + // If you opted-in for the New Architecture, we load the native entry point for this app. + DefaultNewArchitectureEntryPoint.load(); + } + ReactNativeFlipper.initializeFlipper(this, getReactNativeHost().getReactInstanceManager()); + ApplicationLifecycleDispatcher.onApplicationCreate(this); + } + + @Override + public void onConfigurationChanged(Configuration newConfig) { + super.onConfigurationChanged(newConfig); + ApplicationLifecycleDispatcher.onConfigurationChanged(this, newConfig); + } +} diff --git a/android/app/src/release/java/com/galoyapp/ReactNativeFlipper.java b/android/app/src/release/java/com/galoyapp/ReactNativeFlipper.java new file mode 100644 index 0000000..f27a80c --- /dev/null +++ b/android/app/src/release/java/com/galoyapp/ReactNativeFlipper.java @@ -0,0 +1,18 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + *

This source code is licensed under the MIT license found in the LICENSE file in the root + * directory of this source tree. + */ +package com.galoyapp; +import android.content.Context; +import com.facebook.react.ReactInstanceManager; +/** + * Class responsible of loading Flipper inside your React Native application. This is the release + * flavor of it so it's empty as we don't want to load Flipper. + */ +public class ReactNativeFlipper { + public static void initializeFlipper(Context context, ReactInstanceManager reactInstanceManager) { + // Do nothing as we don't want to initialize Flipper on Release. + } +} diff --git a/android/build.gradle b/android/build.gradle new file mode 100644 index 0000000..7d88e6b --- /dev/null +++ b/android/build.gradle @@ -0,0 +1,26 @@ +// Top-level build file where you can add configuration options common to all sub-projects/modules. +import org.apache.tools.ant.taskdefs.condition.Os +buildscript { + ext { + buildToolsVersion = "33.0.0" + minSdkVersion = 23 + compileSdkVersion = 33 + targetSdkVersion = 33 + + // We use NDK 23 which has both M1 support and is the side-by-side NDK version from AGP. + ndkVersion = "23.1.7779620" + } + repositories { + google() + mavenCentral() + jcenter() + maven { url 'https://www.jitpack.io' } + } + dependencies { + classpath("com.android.tools.build:gradle:7.4.1") + classpath("com.facebook.react:react-native-gradle-plugin") + + classpath 'com.google.gms:google-services:4.3.14' // firebase + classpath 'com.google.firebase:firebase-crashlytics-gradle:2.9.2' + } +} diff --git a/android/fastlane/Appfile b/android/fastlane/Appfile new file mode 100644 index 0000000..8495b88 --- /dev/null +++ b/android/fastlane/Appfile @@ -0,0 +1,2 @@ +json_key_file("galoyapp-2e25e160d4ba.json") # Path to the json secret file - Follow https://docs.fastlane.tools/actions/supply/#setup to get one +package_name("com.ourowallet") # e.g. com.krausefx.app diff --git a/android/fastlane/Fastfile b/android/fastlane/Fastfile new file mode 100644 index 0000000..cb0b467 --- /dev/null +++ b/android/fastlane/Fastfile @@ -0,0 +1,115 @@ +# This file contains the fastlane.tools configuration +# You can find the documentation at https://docs.fastlane.tools +# +# For a list of all available actions, check out +# +# https://docs.fastlane.tools/actions +# +# For a list of all available plugins, check out +# +# https://docs.fastlane.tools/plugins/available-plugins +# + +# Uncomment the line if you want fastlane to automatically update itself +# update_fastlane + +default_platform(:android) + +platform :android do + desc "Build Releasable APK" + lane :build do + android_set_version_name(version_name: ENV["PUBLIC_VERSION"]) + gradle(task: 'clean') + gradle( + task: "assemble", + build_type: "Release", + flags: "--no-daemon --stacktrace --info", + ) + end + + desc "Deploy a new version to the Google Play" + lane :play_store_upload do + upload_to_play_store( + apk_paths: [ + "./app/build/outputs/apk/release/app-arm64-v8a-release.apk", + "./app/build/outputs/apk/release/app-armeabi-v7a-release.apk", + "./app/build/outputs/apk/release/app-universal-release.apk", + "./app/build/outputs/apk/release/app-x86-release.apk", + "./app/build/outputs/apk/release/app-x86_64-release.apk", + ], + metadata_path: "./app/build/outputs/apk/release", + track: "internal", + skip_upload_changelogs: true, + skip_upload_images: true, + ) + end + + desc "Deploy a new version to Huawei App Gallery" + lane :huawei_store_upload do + huawei_appgallery_connect( + client_id: ENV["HUAWEI_CLIENT_ID"], + client_secret: ENV["HUAWEI_CLIENT_SECRET"], + app_id: ENV["HUAWEI_APP_ID"], + apk_path: "./app-universal-release.apk", + phase_wise_release: true, + phase_release_percent: "1", + phase_release_description: "Phased Release", + submit_for_review: false + ) + end + + desc "Promote Internal Testing build to Beta" + lane :promote_to_beta do + upload_to_play_store( + track: "internal", + track_promote_to: "alpha", + version_name: ENV["VERSION"], + version_code: ENV["VERSION_CODE"], + skip_upload_apk: true + ) + end + + desc "Promote Internal Testing build to Public" + lane :promote_to_public do + upload_to_play_store( + track: "alpha", + track_promote_to: "beta", + version_name: ENV["VERSION"], + version_code: ENV["VERSION_CODE"], + skip_upload_apk: true + ) + end + + desc "Phased Public Rollout" + lane :public_phased_percent do + upload_to_play_store( + track: "beta", + track_promote_to: "production", + version_name: ENV["VERSION"], + version_code: ENV["VERSION_CODE"], + rollout: ENV["ROLLOUT"], + skip_upload_apk: true + ) + end + + desc "Build for end to end testing" + lane :build_e2e do + gradle( + task: "clean assemble", + build_type: "Debug", + print_command: false, + flags: "--no-daemon --max-workers=8 --stacktrace --info", + ) + end + + desc "End to end testing on browserstack" + lane :browserstack do + build_e2e + upload_to_browserstack_app_automate( + browserstack_username: ENV["BROWSERSTACK_USER"], + browserstack_access_key: ENV["BROWSERSTACK_ACCESS_KEY"], + file_path: ENV["GRADLE_APK_OUTPUT_PATH"] + ) + sh("GALOY_TEST_TOKENS=$GALOY_TEST_TOKENS && GALOY_TOKEN_2=$GALOY_TOKEN_2 && yarn test:browserstack:android") + end +end diff --git a/android/fastlane/Pluginfile b/android/fastlane/Pluginfile new file mode 100644 index 0000000..7c62ee7 --- /dev/null +++ b/android/fastlane/Pluginfile @@ -0,0 +1,8 @@ +# Autogenerated by fastlane +# +# Ensure this file is checked in to source control! + +gem 'fastlane-plugin-increment_version_code' +gem 'fastlane-plugin-huawei_appgallery_connect' +gem 'fastlane-plugin-browserstack' +gem 'fastlane-plugin-versioning_android' diff --git a/android/fastlane/README.md b/android/fastlane/README.md new file mode 100644 index 0000000..6912f9e --- /dev/null +++ b/android/fastlane/README.md @@ -0,0 +1,88 @@ +fastlane documentation +---- + +# Installation + +Make sure you have the latest version of the Xcode command line tools installed: + +```sh +xcode-select --install +``` + +For _fastlane_ installation instructions, see [Installing _fastlane_](https://docs.fastlane.tools/#installing-fastlane) + +# Available Actions + +## Android + +### android build + +```sh +[bundle exec] fastlane android build +``` + +Build Releasable APK + +### android play_store_upload + +```sh +[bundle exec] fastlane android play_store_upload +``` + +Deploy a new version to the Google Play + +### android huawei_store_upload + +```sh +[bundle exec] fastlane android huawei_store_upload +``` + +Deploy a new version to Huawei App Gallery + +### android promote_to_beta + +```sh +[bundle exec] fastlane android promote_to_beta +``` + +Promote Internal Testing build to Beta + +### android promote_to_public + +```sh +[bundle exec] fastlane android promote_to_public +``` + +Promote Internal Testing build to Public + +### android public_phased_percent + +```sh +[bundle exec] fastlane android public_phased_percent +``` + +Phased Public Rollout + +### android build_e2e + +```sh +[bundle exec] fastlane android build_e2e +``` + +Build for end to end testing + +### android browserstack + +```sh +[bundle exec] fastlane android browserstack +``` + +End to end testing on browserstack + +---- + +This README.md is auto-generated and will be re-generated every time [_fastlane_](https://fastlane.tools) is run. + +More information about _fastlane_ can be found on [fastlane.tools](https://fastlane.tools). + +The documentation of _fastlane_ can be found on [docs.fastlane.tools](https://docs.fastlane.tools). diff --git a/android/gradle.properties b/android/gradle.properties new file mode 100644 index 0000000..82721c0 --- /dev/null +++ b/android/gradle.properties @@ -0,0 +1,44 @@ +# Project-wide Gradle settings. + +# IDE (e.g. Android Studio) users: +# Gradle settings configured through the IDE *will override* +# any settings specified in this file. + +# For more details on how to configure your build environment visit +# http://www.gradle.org/docs/current/userguide/build_environment.html + +# Specifies the JVM arguments used for the daemon process. +# The setting is particularly useful for tweaking memory settings. +# Default value: -Xmx512m -XX:MaxMetaspaceSize=256m +org.gradle.jvmargs=-Xmx4g -XX:MaxMetaspaceSize=2g + +# When configured, Gradle will run in incubating parallel mode. +# This option should only be used with decoupled projects. More details, visit +# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects +# org.gradle.parallel=true + +# AndroidX package structure to make it clearer which packages are bundled with the +# Android operating system, and which are packaged with your app's APK +# https://developer.android.com/topic/libraries/support-library/androidx-rn +android.useAndroidX=true +# Automatically convert third-party libraries to use AndroidX +android.enableJetifier=true + +# Version of flipper SDK to use with React Native +FLIPPER_VERSION=0.125.0 + +# Use this property to specify which architecture you want to build. +# You can also override it from the CLI using +# ./gradlew -PreactNativeArchitectures=x86_64 +reactNativeArchitectures=armeabi-v7a,arm64-v8a,x86,x86_64 + +# Use this property to enable support to the new architecture. +# This will allow you to use TurboModules and the Fabric render in +# your application. You should enable this flag either if you want +# to write custom TurboModules/Fabric components OR use libraries that +# are providing them. +newArchEnabled=false + +# Use this property to enable or disable the Hermes JS engine. +# If set to false, you will be using JSC instead. +hermesEnabled=true diff --git a/android/gradle/wrapper/gradle-wrapper.jar b/android/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..41d9927 Binary files /dev/null and b/android/gradle/wrapper/gradle-wrapper.jar differ diff --git a/android/gradle/wrapper/gradle-wrapper.properties b/android/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..1076148 --- /dev/null +++ b/android/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Sat Apr 22 13:50:59 GFT 2023 +distributionBase=GRADLE_USER_HOME +distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-bin.zip +distributionPath=wrapper/dists +zipStorePath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME diff --git a/android/gradlew b/android/gradlew new file mode 100644 index 0000000..a58591e --- /dev/null +++ b/android/gradlew @@ -0,0 +1,234 @@ +#!/bin/sh + +# +# Copyright Š 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions ÂĢ$varÂģ, ÂĢ${var}Âģ, ÂĢ${var:-default}Âģ, ÂĢ${var+SET}Âģ, +# ÂĢ${var#prefix}Âģ, ÂĢ${var%suffix}Âģ, and ÂĢ$( cmd )Âģ; +# * compound commands having a testable exit status, especially ÂĢcaseÂģ; +# * various built-in commands including ÂĢcommandÂģ, ÂĢsetÂģ, and ÂĢulimitÂģ. +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit + +APP_NAME="Gradle" +APP_BASE_NAME=${0##*/} + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" \ No newline at end of file diff --git a/android/gradlew.bat b/android/gradlew.bat new file mode 100644 index 0000000..107acd3 --- /dev/null +++ b/android/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/android/link-assets-manifest.json b/android/link-assets-manifest.json new file mode 100644 index 0000000..15aa921 --- /dev/null +++ b/android/link-assets-manifest.json @@ -0,0 +1,33 @@ +{ + "migIndex": 1, + "data": [ + { + "path": "app/assets/fonts/DMSans-Bold.ttf", + "sha1": "ba828d9137dac8d986a3ea97ca56e390897a0b14" + }, + { + "path": "app/assets/fonts/SourceSansPro-Bold.ttf", + "sha1": "167ac25366e252f07c090a8113910337f3d5255b" + }, + { + "path": "app/assets/fonts/SourceSansPro-BoldItalic.ttf", + "sha1": "c320303ae126e23bd566d412916f68cc347d573f" + }, + { + "path": "app/assets/fonts/SourceSansPro-Italic.ttf", + "sha1": "1c774bad0dad64ee30d7f2e71e89891870a2d304" + }, + { + "path": "app/assets/fonts/SourceSansPro-Regular.ttf", + "sha1": "48d6928e08ba5e2ca1e15d754c146580b1c8febd" + }, + { + "path": "app/assets/fonts/SourceSansPro-SemiBold.ttf", + "sha1": "3eeb4bae47e308c768d8c65a89480159d4e0e7c2" + }, + { + "path": "app/assets/fonts/SourceSansPro-SemiBoldItalic.ttf", + "sha1": "ddbf657592d067296ef891b03f17f452907c8a7b" + } + ] +} diff --git a/android/settings.gradle b/android/settings.gradle new file mode 100644 index 0000000..c4992e2 --- /dev/null +++ b/android/settings.gradle @@ -0,0 +1,11 @@ +rootProject.name = 'OuroWallet' +include ':react-native-vector-icons' +project(':react-native-vector-icons').projectDir = new File(rootProject.projectDir, '../node_modules/react-native-vector-icons/android') +include ':react-native-secure-key-store' +project(':react-native-secure-key-store').projectDir = new File(rootProject.projectDir, '../node_modules/react-native-secure-key-store/android') +apply from: file("../node_modules/@react-native-community/cli-platform-android/native_modules.gradle"); applyNativeModulesSettingsGradle(settings) +include ':app' +includeBuild('../node_modules/react-native-gradle-plugin') + +apply from: new File(["node", "--print", "require.resolve('expo/package.json')"].execute(null, rootDir).text.trim(), "../scripts/autolinking.gradle") +useExpoModules() \ No newline at end of file diff --git a/apollo.client.js b/apollo.client.js new file mode 100644 index 0000000..497d909 --- /dev/null +++ b/apollo.client.js @@ -0,0 +1,9 @@ +module.exports = { + client: { + includes: ["app/**/*.{ts,tsx,js,jsx,graphql}"], + service: { + name: `galoy`, + url: `http://localhost:4000/graphql`, + }, + }, +} diff --git a/app.json b/app.json new file mode 100644 index 0000000..a89279b --- /dev/null +++ b/app.json @@ -0,0 +1,4 @@ +{ + "name": "OuroWallet", + "displayName": "OuroWallet" +} diff --git a/app/app.tsx b/app/app.tsx new file mode 100644 index 0000000..7ad0ae6 --- /dev/null +++ b/app/app.tsx @@ -0,0 +1,65 @@ +// Welcome to the main entry point of the app. +// +// In this file, we'll be kicking off our app + +// language related import +import "intl-pluralrules" +import "moment/locale/es" +import "moment/locale/fr-ca" +import "moment/locale/pt-br" +import "./utils/polyfill" + +import "react-native-reanimated" + +import "@react-native-firebase/crashlytics" +import { ThemeProvider } from "@rneui/themed" +import "node-libs-react-native/globals" // needed for Buffer? +import * as React from "react" +import ErrorBoundary from "react-native-error-boundary" +import { RootSiblingParent } from "react-native-root-siblings" +import { GaloyToast } from "./components/galoy-toast" +import { NotificationComponent } from "./components/notification" +import { GaloyClient } from "./graphql/client" +import TypesafeI18n from "./i18n/i18n-react" +import { loadAllLocales } from "./i18n/i18n-util.sync" +import { AppStateWrapper } from "./navigation/app-state" +import { NavigationContainerWrapper } from "./navigation/navigation-container-wrapper" +import { RootStack } from "./navigation/root-navigator" +import theme from "./rne-theme/theme" +import { ErrorScreen } from "./screens/error-screen" +import { PersistentStateProvider } from "./store/persistent-state" +import { detectDefaultLocale } from "./utils/locale-detector" +import { ThemeSyncGraphql } from "./utils/theme-sync" + +// FIXME should we only load the currently used local? +// this would help to make the app load faster +// this will become more important when we add more languages +// and when the earn section will be added +// +// alternatively, could try loadAllLocalesAsync() +loadAllLocales() + +/** + * This is the root component of our app. + */ +export const App = () => ( + + + + + + + + + + + + + + + + + + + +) diff --git a/app/assets/fonts/DMSans-Bold.ttf b/app/assets/fonts/DMSans-Bold.ttf new file mode 100644 index 0000000..7fd94a2 Binary files /dev/null and b/app/assets/fonts/DMSans-Bold.ttf differ diff --git a/app/assets/fonts/SourceSansPro-Bold.ttf b/app/assets/fonts/SourceSansPro-Bold.ttf new file mode 100644 index 0000000..b8879af Binary files /dev/null and b/app/assets/fonts/SourceSansPro-Bold.ttf differ diff --git a/app/assets/fonts/SourceSansPro-BoldItalic.ttf b/app/assets/fonts/SourceSansPro-BoldItalic.ttf new file mode 100644 index 0000000..b38bae9 Binary files /dev/null and b/app/assets/fonts/SourceSansPro-BoldItalic.ttf differ diff --git a/app/assets/fonts/SourceSansPro-Italic.ttf b/app/assets/fonts/SourceSansPro-Italic.ttf new file mode 100644 index 0000000..7dbece7 Binary files /dev/null and b/app/assets/fonts/SourceSansPro-Italic.ttf differ diff --git a/app/assets/fonts/SourceSansPro-Regular.ttf b/app/assets/fonts/SourceSansPro-Regular.ttf new file mode 100644 index 0000000..98e8579 Binary files /dev/null and b/app/assets/fonts/SourceSansPro-Regular.ttf differ diff --git a/app/assets/fonts/SourceSansPro-SemiBold.ttf b/app/assets/fonts/SourceSansPro-SemiBold.ttf new file mode 100644 index 0000000..99dcc81 Binary files /dev/null and b/app/assets/fonts/SourceSansPro-SemiBold.ttf differ diff --git a/app/assets/fonts/SourceSansPro-SemiBoldItalic.ttf b/app/assets/fonts/SourceSansPro-SemiBoldItalic.ttf new file mode 100644 index 0000000..9c3891b Binary files /dev/null and b/app/assets/fonts/SourceSansPro-SemiBoldItalic.ttf differ diff --git a/app/assets/icons-redesign/arrow-right.svg b/app/assets/icons-redesign/arrow-right.svg new file mode 100644 index 0000000..a0f9b0c --- /dev/null +++ b/app/assets/icons-redesign/arrow-right.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/back-space.svg b/app/assets/icons-redesign/back-space.svg new file mode 100644 index 0000000..9183612 --- /dev/null +++ b/app/assets/icons-redesign/back-space.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/bank.svg b/app/assets/icons-redesign/bank.svg new file mode 100644 index 0000000..59d47b4 --- /dev/null +++ b/app/assets/icons-redesign/bank.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/app/assets/icons-redesign/bitcoin.svg b/app/assets/icons-redesign/bitcoin.svg new file mode 100644 index 0000000..e0ebf6f --- /dev/null +++ b/app/assets/icons-redesign/bitcoin.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/app/assets/icons-redesign/book.svg b/app/assets/icons-redesign/book.svg new file mode 100644 index 0000000..9b512ef --- /dev/null +++ b/app/assets/icons-redesign/book.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/btc-book.svg b/app/assets/icons-redesign/btc-book.svg new file mode 100644 index 0000000..81f69bf --- /dev/null +++ b/app/assets/icons-redesign/btc-book.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons-redesign/caret-down.svg b/app/assets/icons-redesign/caret-down.svg new file mode 100644 index 0000000..fe63a89 --- /dev/null +++ b/app/assets/icons-redesign/caret-down.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/caret-left.svg b/app/assets/icons-redesign/caret-left.svg new file mode 100644 index 0000000..205e322 --- /dev/null +++ b/app/assets/icons-redesign/caret-left.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/caret-right.svg b/app/assets/icons-redesign/caret-right.svg new file mode 100644 index 0000000..7a2e3cc --- /dev/null +++ b/app/assets/icons-redesign/caret-right.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/caret-up.svg b/app/assets/icons-redesign/caret-up.svg new file mode 100644 index 0000000..69cdf9c --- /dev/null +++ b/app/assets/icons-redesign/caret-up.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/check-circle.svg b/app/assets/icons-redesign/check-circle.svg new file mode 100644 index 0000000..c6cdaab --- /dev/null +++ b/app/assets/icons-redesign/check-circle.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/check.svg b/app/assets/icons-redesign/check.svg new file mode 100644 index 0000000..4a58f9d --- /dev/null +++ b/app/assets/icons-redesign/check.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/close.svg b/app/assets/icons-redesign/close.svg new file mode 100644 index 0000000..ca5e880 --- /dev/null +++ b/app/assets/icons-redesign/close.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/coins.svg b/app/assets/icons-redesign/coins.svg new file mode 100644 index 0000000..124f2a3 --- /dev/null +++ b/app/assets/icons-redesign/coins.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/app/assets/icons-redesign/contact.svg b/app/assets/icons-redesign/contact.svg new file mode 100644 index 0000000..38ba0c5 --- /dev/null +++ b/app/assets/icons-redesign/contact.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/app/assets/icons-redesign/copy-paste.svg b/app/assets/icons-redesign/copy-paste.svg new file mode 100644 index 0000000..1f4b5bc --- /dev/null +++ b/app/assets/icons-redesign/copy-paste.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/dollar.svg b/app/assets/icons-redesign/dollar.svg new file mode 100644 index 0000000..82205f9 --- /dev/null +++ b/app/assets/icons-redesign/dollar.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/eye-slash.svg b/app/assets/icons-redesign/eye-slash.svg new file mode 100644 index 0000000..ac2005f --- /dev/null +++ b/app/assets/icons-redesign/eye-slash.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/app/assets/icons-redesign/eye.svg b/app/assets/icons-redesign/eye.svg new file mode 100644 index 0000000..14febc3 --- /dev/null +++ b/app/assets/icons-redesign/eye.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/filter.svg b/app/assets/icons-redesign/filter.svg new file mode 100644 index 0000000..96ec6f6 --- /dev/null +++ b/app/assets/icons-redesign/filter.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/graph.svg b/app/assets/icons-redesign/graph.svg new file mode 100644 index 0000000..0d5a6ea --- /dev/null +++ b/app/assets/icons-redesign/graph.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/image.svg b/app/assets/icons-redesign/image.svg new file mode 100644 index 0000000..acaafab --- /dev/null +++ b/app/assets/icons-redesign/image.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/info.svg b/app/assets/icons-redesign/info.svg new file mode 100644 index 0000000..e183705 --- /dev/null +++ b/app/assets/icons-redesign/info.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/lightning.svg b/app/assets/icons-redesign/lightning.svg new file mode 100644 index 0000000..a230abb --- /dev/null +++ b/app/assets/icons-redesign/lightning.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons-redesign/link.svg b/app/assets/icons-redesign/link.svg new file mode 100644 index 0000000..a096a0e --- /dev/null +++ b/app/assets/icons-redesign/link.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/loading.svg b/app/assets/icons-redesign/loading.svg new file mode 100644 index 0000000..444bd00 --- /dev/null +++ b/app/assets/icons-redesign/loading.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons-redesign/magnifying-glass.svg b/app/assets/icons-redesign/magnifying-glass.svg new file mode 100644 index 0000000..b80b02b --- /dev/null +++ b/app/assets/icons-redesign/magnifying-glass.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/map.svg b/app/assets/icons-redesign/map.svg new file mode 100644 index 0000000..fdbb9c9 --- /dev/null +++ b/app/assets/icons-redesign/map.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/menu.svg b/app/assets/icons-redesign/menu.svg new file mode 100644 index 0000000..9050958 --- /dev/null +++ b/app/assets/icons-redesign/menu.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/payment-error.svg b/app/assets/icons-redesign/payment-error.svg new file mode 100644 index 0000000..740d26a --- /dev/null +++ b/app/assets/icons-redesign/payment-error.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/app/assets/icons-redesign/payment-pending.svg b/app/assets/icons-redesign/payment-pending.svg new file mode 100644 index 0000000..4287d84 --- /dev/null +++ b/app/assets/icons-redesign/payment-pending.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/app/assets/icons-redesign/payment-success.svg b/app/assets/icons-redesign/payment-success.svg new file mode 100644 index 0000000..7858a34 --- /dev/null +++ b/app/assets/icons-redesign/payment-success.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/pencil.svg b/app/assets/icons-redesign/pencil.svg new file mode 100644 index 0000000..c248902 --- /dev/null +++ b/app/assets/icons-redesign/pencil.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/qr-code.svg b/app/assets/icons-redesign/qr-code.svg new file mode 100644 index 0000000..1949d9f --- /dev/null +++ b/app/assets/icons-redesign/qr-code.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/app/assets/icons-redesign/question.svg b/app/assets/icons-redesign/question.svg new file mode 100644 index 0000000..85f39d8 --- /dev/null +++ b/app/assets/icons-redesign/question.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/receive.svg b/app/assets/icons-redesign/receive.svg new file mode 100644 index 0000000..fb8b54a --- /dev/null +++ b/app/assets/icons-redesign/receive.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/send.svg b/app/assets/icons-redesign/send.svg new file mode 100644 index 0000000..9b3c82f --- /dev/null +++ b/app/assets/icons-redesign/send.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/settings.svg b/app/assets/icons-redesign/settings.svg new file mode 100644 index 0000000..97bbe13 --- /dev/null +++ b/app/assets/icons-redesign/settings.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/share.svg b/app/assets/icons-redesign/share.svg new file mode 100644 index 0000000..67df6c3 --- /dev/null +++ b/app/assets/icons-redesign/share.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/transfer.svg b/app/assets/icons-redesign/transfer.svg new file mode 100644 index 0000000..27cf862 --- /dev/null +++ b/app/assets/icons-redesign/transfer.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/app/assets/icons-redesign/user.svg b/app/assets/icons-redesign/user.svg new file mode 100644 index 0000000..f3571a3 --- /dev/null +++ b/app/assets/icons-redesign/user.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons-redesign/video.svg b/app/assets/icons-redesign/video.svg new file mode 100644 index 0000000..bc69cbb --- /dev/null +++ b/app/assets/icons-redesign/video.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons-redesign/warning.svg b/app/assets/icons-redesign/warning.svg new file mode 100644 index 0000000..5a4952b --- /dev/null +++ b/app/assets/icons-redesign/warning.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/app/assets/icons/calculator.svg b/app/assets/icons/calculator.svg new file mode 100644 index 0000000..5753d63 --- /dev/null +++ b/app/assets/icons/calculator.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/cancel.svg b/app/assets/icons/cancel.svg new file mode 100644 index 0000000..abad43d --- /dev/null +++ b/app/assets/icons/cancel.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/chain.svg b/app/assets/icons/chain.svg new file mode 100644 index 0000000..33950e1 --- /dev/null +++ b/app/assets/icons/chain.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/chevron-down.svg b/app/assets/icons/chevron-down.svg new file mode 100644 index 0000000..549526e --- /dev/null +++ b/app/assets/icons/chevron-down.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons/chevron.svg b/app/assets/icons/chevron.svg new file mode 100644 index 0000000..0e036c3 --- /dev/null +++ b/app/assets/icons/chevron.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons/contacts.svg b/app/assets/icons/contacts.svg new file mode 100644 index 0000000..f9803b6 --- /dev/null +++ b/app/assets/icons/contacts.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/copy.svg b/app/assets/icons/copy.svg new file mode 100644 index 0000000..3d520d5 --- /dev/null +++ b/app/assets/icons/copy.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/default.svg b/app/assets/icons/default.svg new file mode 100644 index 0000000..a82f11c --- /dev/null +++ b/app/assets/icons/default.svg @@ -0,0 +1,4 @@ + + + + diff --git a/app/assets/icons/destination.svg b/app/assets/icons/destination.svg new file mode 100644 index 0000000..af4c7c6 --- /dev/null +++ b/app/assets/icons/destination.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/dollar.svg b/app/assets/icons/dollar.svg new file mode 100644 index 0000000..3eb7ffc --- /dev/null +++ b/app/assets/icons/dollar.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/downarrow.svg b/app/assets/icons/downarrow.svg new file mode 100644 index 0000000..78dc27e --- /dev/null +++ b/app/assets/icons/downarrow.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/app/assets/icons/error.svg b/app/assets/icons/error.svg new file mode 100644 index 0000000..c412d29 --- /dev/null +++ b/app/assets/icons/error.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/app/assets/icons/fee.svg b/app/assets/icons/fee.svg new file mode 100644 index 0000000..d4a592a --- /dev/null +++ b/app/assets/icons/fee.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/home.svg b/app/assets/icons/home.svg new file mode 100644 index 0000000..61e0367 --- /dev/null +++ b/app/assets/icons/home.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/info.svg b/app/assets/icons/info.svg new file mode 100644 index 0000000..92eab9d --- /dev/null +++ b/app/assets/icons/info.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/app/assets/icons/learn.svg b/app/assets/icons/learn.svg new file mode 100644 index 0000000..cb97fe5 --- /dev/null +++ b/app/assets/icons/learn.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/lightning-sats.png b/app/assets/icons/lightning-sats.png new file mode 100644 index 0000000..2c92f7b Binary files /dev/null and b/app/assets/icons/lightning-sats.png differ diff --git a/app/assets/icons/lightning-sats.svg b/app/assets/icons/lightning-sats.svg new file mode 100644 index 0000000..f709d68 --- /dev/null +++ b/app/assets/icons/lightning-sats.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/lightning-usd.png b/app/assets/icons/lightning-usd.png new file mode 100644 index 0000000..706d1a6 Binary files /dev/null and b/app/assets/icons/lightning-usd.png differ diff --git a/app/assets/icons/lightning-usd.svg b/app/assets/icons/lightning-usd.svg new file mode 100644 index 0000000..551bb19 --- /dev/null +++ b/app/assets/icons/lightning-usd.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/lightning.svg b/app/assets/icons/lightning.svg new file mode 100644 index 0000000..a350757 --- /dev/null +++ b/app/assets/icons/lightning.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/map.svg b/app/assets/icons/map.svg new file mode 100644 index 0000000..377f866 --- /dev/null +++ b/app/assets/icons/map.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/merchant.svg b/app/assets/icons/merchant.svg new file mode 100644 index 0000000..7597776 --- /dev/null +++ b/app/assets/icons/merchant.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/app/assets/icons/note.svg b/app/assets/icons/note.svg new file mode 100644 index 0000000..ea15d50 --- /dev/null +++ b/app/assets/icons/note.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/onchain-btc.png b/app/assets/icons/onchain-btc.png new file mode 100644 index 0000000..777b83e Binary files /dev/null and b/app/assets/icons/onchain-btc.png differ diff --git a/app/assets/icons/onchain-sats.svg b/app/assets/icons/onchain-sats.svg new file mode 100644 index 0000000..86f539c --- /dev/null +++ b/app/assets/icons/onchain-sats.svg @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/onchain.svg b/app/assets/icons/onchain.svg new file mode 100644 index 0000000..1dca53c --- /dev/null +++ b/app/assets/icons/onchain.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/price.svg b/app/assets/icons/price.svg new file mode 100644 index 0000000..4652b37 --- /dev/null +++ b/app/assets/icons/price.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/qr-code.svg b/app/assets/icons/qr-code.svg new file mode 100644 index 0000000..a796728 --- /dev/null +++ b/app/assets/icons/qr-code.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons/receive-bitcoin.svg b/app/assets/icons/receive-bitcoin.svg new file mode 100644 index 0000000..87647b8 --- /dev/null +++ b/app/assets/icons/receive-bitcoin.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/receive.svg b/app/assets/icons/receive.svg new file mode 100644 index 0000000..a8c71e5 --- /dev/null +++ b/app/assets/icons/receive.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/receive_bitcoin.svg b/app/assets/icons/receive_bitcoin.svg new file mode 100644 index 0000000..8ead629 --- /dev/null +++ b/app/assets/icons/receive_bitcoin.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/receive_usd.svg b/app/assets/icons/receive_usd.svg new file mode 100644 index 0000000..80c5ee0 --- /dev/null +++ b/app/assets/icons/receive_usd.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/sat.svg b/app/assets/icons/sat.svg new file mode 100644 index 0000000..6cd9fae --- /dev/null +++ b/app/assets/icons/sat.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/app/assets/icons/scan.svg b/app/assets/icons/scan.svg new file mode 100644 index 0000000..9756a8c --- /dev/null +++ b/app/assets/icons/scan.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/send.svg b/app/assets/icons/send.svg new file mode 100644 index 0000000..16526f6 --- /dev/null +++ b/app/assets/icons/send.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/app/assets/icons/settings.svg b/app/assets/icons/settings.svg new file mode 100644 index 0000000..42f4482 --- /dev/null +++ b/app/assets/icons/settings.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/app/assets/icons/share.svg b/app/assets/icons/share.svg new file mode 100644 index 0000000..2b32474 --- /dev/null +++ b/app/assets/icons/share.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/app/assets/icons/switch.svg b/app/assets/icons/switch.svg new file mode 100644 index 0000000..02e63f9 --- /dev/null +++ b/app/assets/icons/switch.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/icons/transfer.svg b/app/assets/icons/transfer.svg new file mode 100644 index 0000000..0ea62c3 --- /dev/null +++ b/app/assets/icons/transfer.svg @@ -0,0 +1,3 @@ + + + diff --git a/app/assets/icons/web-link.svg b/app/assets/icons/web-link.svg new file mode 100644 index 0000000..47310cb --- /dev/null +++ b/app/assets/icons/web-link.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/app/assets/images/blink_modal_darkmode.png b/app/assets/images/blink_modal_darkmode.png new file mode 100644 index 0000000..6e34964 Binary files /dev/null and b/app/assets/images/blink_modal_darkmode.png differ diff --git a/app/assets/images/blink_modal_lightmode.png b/app/assets/images/blink_modal_lightmode.png new file mode 100644 index 0000000..8885f44 Binary files /dev/null and b/app/assets/images/blink_modal_lightmode.png differ diff --git a/app/assets/images/stable-sats.png b/app/assets/images/stable-sats.png new file mode 100644 index 0000000..1d80636 Binary files /dev/null and b/app/assets/images/stable-sats.png differ diff --git a/app/components/amount-input-screen/amount-input-screen-ui.tsx b/app/components/amount-input-screen/amount-input-screen-ui.tsx new file mode 100644 index 0000000..4dc1811 --- /dev/null +++ b/app/components/amount-input-screen/amount-input-screen-ui.tsx @@ -0,0 +1,188 @@ +import * as React from "react" +import { useI18nContext } from "@app/i18n/i18n-react" +import { makeStyles, Text } from "@rneui/themed" +import { View } from "react-native" +import { GaloyIconButton } from "../atomic/galoy-icon-button" +import { GaloyPrimaryButton } from "../atomic/galoy-primary-button" +import { GaloyWarning } from "../atomic/galoy-warning" +import { CurrencyKeyboard } from "../currency-keyboard" +import { Key } from "./number-pad-reducer" +import { testProps } from "@app/utils/testProps" + +export type AmountInputScreenUIProps = { + primaryCurrencySymbol?: string + primaryCurrencyFormattedAmount?: string + primaryCurrencyCode: string + secondaryCurrencySymbol?: string + secondaryCurrencyFormattedAmount?: string + secondaryCurrencyCode?: string + errorMessage?: string + setAmountDisabled?: boolean + onKeyPress: (key: Key) => void + onToggleCurrency?: () => void + onClearAmount: () => void + onSetAmountPress?: () => void + goBack: () => void +} + +export const AmountInputScreenUI: React.FC = ({ + primaryCurrencySymbol, + primaryCurrencyFormattedAmount, + primaryCurrencyCode, + secondaryCurrencySymbol, + secondaryCurrencyFormattedAmount, + secondaryCurrencyCode, + errorMessage, + onKeyPress, + onToggleCurrency, + onSetAmountPress, + setAmountDisabled, + goBack, +}) => { + const { LL } = useI18nContext() + const styles = useStyles() + + return ( + + + {LL.AmountInputScreen.enterAmount()} + + + + + + {primaryCurrencySymbol && ( + {primaryCurrencySymbol} + )} + {primaryCurrencyFormattedAmount ? ( + + {primaryCurrencyFormattedAmount} + + ) : ( + 0 + )} + {primaryCurrencyCode} + + {Boolean(secondaryCurrencyFormattedAmount) && ( + <> + + + + + + + + {secondaryCurrencySymbol} + {secondaryCurrencyFormattedAmount} + + + {secondaryCurrencyCode} + + + + )} + + + {errorMessage && } + + + + + + + + ) +} + +const useStyles = makeStyles((theme) => ({ + amountInputScreenContainer: { + flex: 1, + backgroundColor: theme.colors.white, + }, + headerContainer: { + flexDirection: "row", + justifyContent: "space-between", + alignItems: "center", + padding: 16, + borderBottomColor: theme.colors.primary9, + borderBottomWidth: 1, + }, + amountContainer: { + marginBottom: 16, + }, + primaryAmountContainer: { + flexDirection: "row", + height: 30, + alignItems: "center", + }, + primaryCurrencySymbol: { + fontSize: 28, + lineHeight: 32, + fontWeight: "bold", + }, + primaryNumberText: { + fontSize: 28, + lineHeight: 32, + flex: 1, + fontWeight: "bold", + }, + faintPrimaryNumberText: { + fontSize: 28, + lineHeight: 32, + flex: 1, + fontWeight: "bold", + color: theme.colors.grey8, + }, + primaryCurrencyCodeText: { + fontSize: 28, + lineHeight: 32, + fontWeight: "bold", + textAlign: "right", + }, + secondaryAmountContainer: { + flexDirection: "row", + }, + secondaryAmountText: { + fontSize: 18, + lineHeight: 24, + fontWeight: "bold", + flex: 1, + }, + secondaryAmountCurrencyCodeText: { + fontSize: 18, + lineHeight: 24, + fontWeight: "bold", + }, + swapContainer: { + alignItems: "center", + flexDirection: "row", + marginVertical: 8, + }, + horizontalLine: { + borderBottomColor: theme.colors.primary9, + borderBottomWidth: 1, + flex: 1, + }, + infoContainer: { + flex: 1, + justifyContent: "flex-start", + }, + bodyContainer: { + flex: 1, + padding: 24, + }, + buttonContainer: {}, + keyboardContainer: { + paddingHorizontal: 16, + marginBottom: 30, + }, +})) diff --git a/app/components/amount-input-screen/amount-input-screen.stories.tsx b/app/components/amount-input-screen/amount-input-screen.stories.tsx new file mode 100644 index 0000000..af3f3bf --- /dev/null +++ b/app/components/amount-input-screen/amount-input-screen.stories.tsx @@ -0,0 +1,91 @@ +import React from "react" +import { StoryScreen } from "../../../.storybook/views" +import { ComponentMeta } from "@storybook/react" +import { MockedProvider } from "@apollo/client/testing" +import { createCache } from "../../graphql/cache" +import { AmountInputScreen, AmountInputScreenProps } from "./amount-input-screen" +import { WalletCurrency } from "../../graphql/generated" +import mocks from "../../graphql/mocks" +import { + DisplayCurrency, + MoneyAmount, + WalletOrDisplayCurrency, +} from "../../types/amounts" + +export default { + title: "Amount Input Screen", + component: AmountInputScreen, + decorators: [ + (Story) => ( + + {Story()} + + ), + ], +} as ComponentMeta + +const amountInputDefaultProps: AmountInputScreenProps = { + initialAmount: { + amount: 0, + currency: DisplayCurrency, + }, + walletCurrency: WalletCurrency.Btc, + setAmount: (moneyAmount: MoneyAmount) => + console.log("set amount: ", moneyAmount), + goBack: () => console.log("go back"), + convertMoneyAmount: (moneyAmount, toCurrency) => { + return { + amount: moneyAmount.amount, + currency: toCurrency, + } + }, +} + +export const NoAmount = () => ( + + + +) + +const amountProps: AmountInputScreenProps = { + ...amountInputDefaultProps, + initialAmount: { + amount: 100, + currency: DisplayCurrency, + }, +} + +export const Amount = () => ( + + + +) + +const maxAmountExceededProps: AmountInputScreenProps = { + ...amountInputDefaultProps, + initialAmount: { + amount: 200, + currency: DisplayCurrency, + }, + maxAmount: { + amount: 100, + currency: DisplayCurrency, + }, +} + +export const MaxAmountExceeded = () => ( + + + +) + +const noSecondaryCurrencyProps: AmountInputScreenProps = { + ...amountInputDefaultProps, + walletCurrency: WalletCurrency.Usd, +} + +export const NoSecondaryCurrency = () => ( + + + +) diff --git a/app/components/amount-input-screen/amount-input-screen.tsx b/app/components/amount-input-screen/amount-input-screen.tsx new file mode 100644 index 0000000..a14e86a --- /dev/null +++ b/app/components/amount-input-screen/amount-input-screen.tsx @@ -0,0 +1,249 @@ +import * as React from "react" +import { WalletCurrency } from "@app/graphql/generated" +import { useDisplayCurrency } from "@app/hooks/use-display-currency" +import { useI18nContext } from "@app/i18n/i18n-react" +import { ConvertMoneyAmount } from "@app/screens/send-bitcoin-screen/payment-details" +import { + DisplayCurrency, + greaterThan, + lessThan, + MoneyAmount, + WalletOrDisplayCurrency, +} from "@app/types/amounts" +import { useCallback, useEffect, useReducer } from "react" +import { AmountInputScreenUI } from "./amount-input-screen-ui" +import { + Key, + NumberPadNumber, + numberPadReducer, + NumberPadReducerActionType, + NumberPadReducerState, +} from "./number-pad-reducer" + +export type AmountInputScreenProps = { + goBack: () => void + initialAmount: MoneyAmount + setAmount?: (amount: MoneyAmount) => void + walletCurrency: WalletCurrency + convertMoneyAmount: ConvertMoneyAmount + maxAmount?: MoneyAmount + minAmount?: MoneyAmount +} + +const formatNumberPadNumber = (numberPadNumber: NumberPadNumber) => { + const { majorAmount, minorAmount, hasDecimal } = numberPadNumber + + if (!majorAmount && !minorAmount && !hasDecimal) { + return "" + } + + const formattedMajorAmount = Number(majorAmount).toLocaleString() + + if (hasDecimal) { + return `${formattedMajorAmount}.${minorAmount}` + } + + return formattedMajorAmount +} + +const numberPadNumberToMoneyAmount = ({ + numberPadNumber, + currency, + minorUnitToMajorUnitOffset, +}: { + numberPadNumber: NumberPadNumber + currency: WalletOrDisplayCurrency + minorUnitToMajorUnitOffset: number +}): MoneyAmount => { + const { majorAmount, minorAmount } = numberPadNumber + + const majorAmountInMinorUnit = + Math.pow(10, minorUnitToMajorUnitOffset) * Number(majorAmount) + + // if minorUnitToMajorUnitOffset is 2, slice 234354 to 23 + const slicedMinorAmount = minorAmount.slice(0, minorUnitToMajorUnitOffset) + // if minorAmount is 4 and minorUnitToMajorUnitOffset is 2, then missing zeros is 1 + const minorAmountMissingZeros = minorUnitToMajorUnitOffset - slicedMinorAmount.length + + const amount = + majorAmountInMinorUnit + Number(minorAmount) * Math.pow(10, minorAmountMissingZeros) + return { + amount, + currency, + } +} + +const moneyAmountToNumberPadReducerState = ({ + moneyAmount, + currencyInfo, +}: { + moneyAmount: MoneyAmount + currencyInfo: ReturnType["currencyInfo"] +}): NumberPadReducerState => { + const amountString = moneyAmount.amount.toString() + const { minorUnitToMajorUnitOffset, showFractionDigits } = + currencyInfo[moneyAmount.currency] + + let numberPadNumber: NumberPadNumber + + if (amountString === "0") { + numberPadNumber = { + majorAmount: "", + minorAmount: "", + hasDecimal: false, + } + } else if (amountString.length <= minorUnitToMajorUnitOffset) { + numberPadNumber = { + majorAmount: "0", + minorAmount: showFractionDigits + ? amountString.padStart(minorUnitToMajorUnitOffset, "0") + : "", + hasDecimal: showFractionDigits, + } + } else { + numberPadNumber = { + majorAmount: amountString.slice( + 0, + amountString.length - minorUnitToMajorUnitOffset, + ), + minorAmount: showFractionDigits + ? amountString.slice(amountString.length - minorUnitToMajorUnitOffset) + : "", + hasDecimal: showFractionDigits && minorUnitToMajorUnitOffset > 0, + } + } + + return { + numberPadNumber, + numberOfDecimalsAllowed: showFractionDigits ? minorUnitToMajorUnitOffset : 0, + currency: moneyAmount.currency, + } +} + +export const AmountInputScreen: React.FC = ({ + goBack, + initialAmount, + setAmount, + walletCurrency, + convertMoneyAmount, + maxAmount, + minAmount, +}) => { + const { currencyInfo, getSecondaryAmountIfCurrencyIsDifferent, formatMoneyAmount } = + useDisplayCurrency() + + const { LL } = useI18nContext() + + const [numberPadState, dispatchNumberPadAction] = useReducer( + numberPadReducer, + moneyAmountToNumberPadReducerState({ + moneyAmount: initialAmount, + currencyInfo, + }), + ) + + const newPrimaryAmount = numberPadNumberToMoneyAmount({ + numberPadNumber: numberPadState.numberPadNumber, + currency: numberPadState.currency, + minorUnitToMajorUnitOffset: + currencyInfo[numberPadState.currency].minorUnitToMajorUnitOffset, + }) + + const secondaryNewAmount = getSecondaryAmountIfCurrencyIsDifferent({ + primaryAmount: newPrimaryAmount, + walletAmount: convertMoneyAmount(newPrimaryAmount, walletCurrency), + displayAmount: convertMoneyAmount(newPrimaryAmount, DisplayCurrency), + }) + + const onKeyPress = (key: Key) => { + dispatchNumberPadAction({ + action: NumberPadReducerActionType.HandleKeyPress, + payload: { + key, + }, + }) + } + + const onClear = () => { + dispatchNumberPadAction({ + action: NumberPadReducerActionType.ClearAmount, + }) + } + + const setNumberPadAmount = useCallback( + (amount: MoneyAmount) => { + dispatchNumberPadAction({ + action: NumberPadReducerActionType.SetAmount, + payload: moneyAmountToNumberPadReducerState({ + moneyAmount: amount, + currencyInfo, + }), + }) + }, + [currencyInfo], + ) + + const onToggleCurrency = + secondaryNewAmount && + (() => { + setNumberPadAmount(secondaryNewAmount) + }) + + useEffect(() => { + setNumberPadAmount(initialAmount) + }, [initialAmount, setNumberPadAmount]) + + let errorMessage = "" + if ( + maxAmount && + greaterThan({ + value: convertMoneyAmount(newPrimaryAmount, maxAmount.currency), + greaterThan: maxAmount, + }) + ) { + errorMessage = LL.AmountInputScreen.maxAmountExceeded({ + maxAmount: formatMoneyAmount({ moneyAmount: maxAmount }), + }) + } else if ( + minAmount && + lessThan({ + value: convertMoneyAmount(newPrimaryAmount, minAmount.currency), + lessThan: minAmount, + }) + ) { + errorMessage = LL.AmountInputScreen.minAmountNotMet({ + minAmount: formatMoneyAmount({ moneyAmount: minAmount }), + }) + } + + const primaryCurrencyInfo = currencyInfo[newPrimaryAmount.currency] + const secondaryCurrencyInfo = + secondaryNewAmount && currencyInfo[secondaryNewAmount.currency] + + return ( + setAmount(newPrimaryAmount))} + goBack={goBack} + /> + ) +} diff --git a/app/components/amount-input-screen/index.ts b/app/components/amount-input-screen/index.ts new file mode 100644 index 0000000..7114ecd --- /dev/null +++ b/app/components/amount-input-screen/index.ts @@ -0,0 +1 @@ +export * from "./amount-input-screen" diff --git a/app/components/amount-input-screen/number-pad-reducer.ts b/app/components/amount-input-screen/number-pad-reducer.ts new file mode 100644 index 0000000..336512f --- /dev/null +++ b/app/components/amount-input-screen/number-pad-reducer.ts @@ -0,0 +1,156 @@ +import { WalletOrDisplayCurrency } from "@app/types/amounts" + +export type NumberPadNumber = { + majorAmount: string + minorAmount: string + hasDecimal: boolean +} + +export type NumberPadReducerState = { + numberPadNumber: NumberPadNumber + numberOfDecimalsAllowed: number + currency: WalletOrDisplayCurrency +} + +export const NumberPadReducerActionType = { + SetAmount: "SetAmount", + HandleKeyPress: "HandleKeyPress", + ClearAmount: "ClearAmount", +} as const + +export type NumberPadReducerAction = + | { + action: typeof NumberPadReducerActionType.SetAmount + payload: { + numberPadNumber: NumberPadNumber + numberOfDecimalsAllowed: number + currency: WalletOrDisplayCurrency + } + } + | { + action: typeof NumberPadReducerActionType.HandleKeyPress + payload: { + key: Key + } + } + | { + action: typeof NumberPadReducerActionType.ClearAmount + } + +export const Key = { + Backspace: "âŒĢ", + 0: "0", + 1: "1", + 2: "2", + 3: "3", + 4: "4", + 5: "5", + 6: "6", + 7: "7", + 8: "8", + 9: "9", + Decimal: ".", +} as const + +export type Key = (typeof Key)[keyof typeof Key] + +export type NumberPadReducerActionType = + (typeof NumberPadReducerActionType)[keyof typeof NumberPadReducerActionType] + +export const numberPadReducer = ( + state: NumberPadReducerState, + action: NumberPadReducerAction, +): NumberPadReducerState => { + const { + numberPadNumber: { majorAmount, minorAmount, hasDecimal }, + numberOfDecimalsAllowed, + } = state + + switch (action.action) { + case NumberPadReducerActionType.SetAmount: + return action.payload + case NumberPadReducerActionType.HandleKeyPress: + if (action.payload.key === Key.Backspace) { + if (minorAmount.length > 0) { + return { + ...state, + numberPadNumber: { + majorAmount, + hasDecimal, + minorAmount: minorAmount.slice(0, -1), + }, + } + } + + if (hasDecimal) { + return { + ...state, + numberPadNumber: { + majorAmount, + hasDecimal: false, + minorAmount, + }, + } + } + + return { + ...state, + numberPadNumber: { + majorAmount: majorAmount.slice(0, -1), + hasDecimal, + minorAmount, + }, + } + } + + if (action.payload.key === Key.Decimal) { + if (numberOfDecimalsAllowed > 0) { + return { + ...state, + numberPadNumber: { + majorAmount, + minorAmount, + hasDecimal: true, + }, + } + } + return state + } + + if (hasDecimal && minorAmount.length < numberOfDecimalsAllowed) { + return { + ...state, + numberPadNumber: { + majorAmount, + hasDecimal, + minorAmount: minorAmount + action.payload.key, + }, + } + } + + if (hasDecimal && minorAmount.length >= numberOfDecimalsAllowed) { + return state + } + + return { + ...state, + numberPadNumber: { + majorAmount: majorAmount + action.payload.key, + minorAmount, + hasDecimal, + }, + } + case NumberPadReducerActionType.ClearAmount: + return { + ...state, + numberPadNumber: { + majorAmount: "", + minorAmount: "", + hasDecimal: false, + }, + } + + default: + return state + } +} diff --git a/app/components/amount-input/amount-input-button.tsx b/app/components/amount-input/amount-input-button.tsx new file mode 100644 index 0000000..6b3f148 --- /dev/null +++ b/app/components/amount-input/amount-input-button.tsx @@ -0,0 +1,112 @@ +import { useTheme, Text, makeStyles } from "@rneui/themed" +import React from "react" +import { Pressable, PressableProps, StyleProp, View, ViewStyle } from "react-native" +import { GaloyIcon } from "@app/components/atomic/galoy-icon" +import { testProps } from "@app/utils/testProps" + +export type AmountInputButtonProps = { + placeholder?: string + value?: string + iconName?: "pencil" | "info" + error?: boolean + disabled?: boolean + secondaryValue?: string + primaryTextTestProps?: string +} & PressableProps + +export const AmountInputButton: React.FC = ({ + placeholder, + value, + iconName, + error, + disabled, + secondaryValue, + primaryTextTestProps, + ...props +}) => { + const { theme } = useTheme() + const styles = useStyles() + + const pressableStyle = ({ pressed }: { pressed: boolean }): StyleProp => { + let colorStyles = {} + switch (true) { + case error: + colorStyles = { + backgroundColor: theme.colors.error9, + } + break + case pressed: + colorStyles = { + backgroundColor: theme.colors.primary9, + } + break + case disabled: + colorStyles = { + backgroundColor: theme.colors.white, + } + break + default: + colorStyles = { + backgroundColor: theme.colors.white, + } + } + + const sizeStyles = { + paddingVertical: 8, + paddingHorizontal: 12, + borderRadius: 8, + height: secondaryValue ? 60 : 40, + justifyContent: secondaryValue ? "space-between" : "center", + } + + return [colorStyles, sizeStyles] + } + + const primaryText = value || placeholder || "" + + return ( + + + + {primaryText} + + {iconName && ( + + )} + + {secondaryValue && ( + + {secondaryValue} + + )} + + ) +} + +const useStyles = makeStyles(() => ({ + contentContainerStyle: { + flexDirection: "row", + justifyContent: "space-between", + alignItems: "center", + flex: 1, + }, + iconStyle: { + marginLeft: 8, + flex: 1, + }, + primaryTextStyle: { + flex: 1, + }, +})) diff --git a/app/components/amount-input/amount-input-modal.tsx b/app/components/amount-input/amount-input-modal.tsx new file mode 100644 index 0000000..1cfc918 --- /dev/null +++ b/app/components/amount-input/amount-input-modal.tsx @@ -0,0 +1,67 @@ +import * as React from "react" +import { WalletCurrency } from "@app/graphql/generated" +import { ConvertMoneyAmount } from "@app/screens/send-bitcoin-screen/payment-details" +import { + MoneyAmount, + WalletOrDisplayCurrency, + ZeroDisplayAmount, +} from "@app/types/amounts" +import { makeStyles } from "@rneui/themed" +import { SafeAreaView } from "react-native" +import ReactNativeModal from "react-native-modal" +import { AmountInputScreen } from "../amount-input-screen" + +export type AmountInputModalProps = { + moneyAmount?: MoneyAmount + walletCurrency: WalletCurrency + convertMoneyAmount: ConvertMoneyAmount + onSetAmount?: (moneyAmount: MoneyAmount) => void + maxAmount?: MoneyAmount + minAmount?: MoneyAmount + isOpen: boolean + close: () => void +} + +export const AmountInputModal: React.FC = ({ + moneyAmount, + walletCurrency, + onSetAmount, + maxAmount, + minAmount, + convertMoneyAmount, + isOpen, + close, +}) => { + const styles = useStyles() + + return ( + + + + + + ) +} + +const useStyles = makeStyles((theme) => ({ + amountInputScreenContainer: { + flex: 1, + }, + modal: { + backgroundColor: theme.colors.white, + margin: 0, + }, +})) diff --git a/app/components/amount-input/amount-input.stories.tsx b/app/components/amount-input/amount-input.stories.tsx new file mode 100644 index 0000000..716a604 --- /dev/null +++ b/app/components/amount-input/amount-input.stories.tsx @@ -0,0 +1,96 @@ +import React from "react" +import { StoryScreen } from "../../../.storybook/views" +import { ComponentMeta } from "@storybook/react" +import { MockedProvider } from "@apollo/client/testing" +import { createCache } from "../../graphql/cache" +import { AmountInput, AmountInputProps } from "./amount-input" +import { WalletCurrency } from "../../graphql/generated" +import mocks from "../../graphql/mocks" +import { + DisplayCurrency, + MoneyAmount, + WalletOrDisplayCurrency, + ZeroDisplayAmount, +} from "../../types/amounts" + +export default { + title: "Amount Input", + component: AmountInput, + decorators: [ + (Story) => ( + + {Story()} + + ), + ], + parameters: { + backgrounds: { + values: [ + { name: "black", value: "#000" }, + { name: "white", value: "#fff" }, + ], + }, + }, +} as ComponentMeta + +const moneyAmountInputModalDefaultProps: AmountInputProps = { + moneyAmount: { + amount: 0, + currency: DisplayCurrency, + }, + walletCurrency: WalletCurrency.Btc, + setAmount: (moneyAmount: MoneyAmount) => + console.log("set amount: ", moneyAmount), + convertMoneyAmount: (moneyAmount, toCurrency) => { + return { + amount: moneyAmount.amount, + currency: toCurrency, + } + }, +} + +export const Default = () => { + const [moneyAmount, setMoneyAmount] = + React.useState>(ZeroDisplayAmount) + + return ( + + ) +} + +export const WalletCurrencyIsDisplayCurrency = () => { + const [moneyAmount, setMoneyAmount] = + React.useState>(ZeroDisplayAmount) + + return ( + + ) +} + +export const AmountIsNotEditable = () => { + const [moneyAmount, setMoneyAmount] = React.useState< + MoneyAmount + >({ + amount: 1234, + currency: WalletCurrency.Usd, + }) + + return ( + + ) +} diff --git a/app/components/amount-input/amount-input.tsx b/app/components/amount-input/amount-input.tsx new file mode 100644 index 0000000..e174bfb --- /dev/null +++ b/app/components/amount-input/amount-input.tsx @@ -0,0 +1,100 @@ +import * as React from "react" +import { WalletCurrency } from "@app/graphql/generated" +import { useDisplayCurrency } from "@app/hooks/use-display-currency" +import { useI18nContext } from "@app/i18n/i18n-react" +import { ConvertMoneyAmount } from "@app/screens/send-bitcoin-screen/payment-details" +import { + DisplayCurrency, + isNonZeroMoneyAmount, + MoneyAmount, + WalletOrDisplayCurrency, +} from "@app/types/amounts" +import { testProps } from "@app/utils/testProps" +import { AmountInputModal } from "./amount-input-modal" +import { AmountInputButton } from "./amount-input-button" + +export type AmountInputProps = { + moneyAmount?: MoneyAmount + walletCurrency: WalletCurrency + convertMoneyAmount: ConvertMoneyAmount + setAmount?: (moneyAmount: MoneyAmount) => void + maxAmount?: MoneyAmount + minAmount?: MoneyAmount + canSetAmount?: boolean +} + +export const AmountInput: React.FC = ({ + moneyAmount, + walletCurrency, + setAmount, + maxAmount, + minAmount, + convertMoneyAmount, + canSetAmount = true, +}) => { + const [isSettingAmount, setIsSettingAmount] = React.useState(false) + const { formatMoneyAmount, getSecondaryAmountIfCurrencyIsDifferent } = + useDisplayCurrency() + const { LL } = useI18nContext() + + const onSetAmount = (amount: MoneyAmount) => { + setAmount && setAmount(amount) + setIsSettingAmount(false) + } + + if (isSettingAmount) { + return ( + setIsSettingAmount(false)} + /> + ) + } + + let formattedPrimaryAmount = undefined + let formattedSecondaryAmount = undefined + + if (isNonZeroMoneyAmount(moneyAmount)) { + formattedPrimaryAmount = formatMoneyAmount({ moneyAmount }) + const secondaryAmount = getSecondaryAmountIfCurrencyIsDifferent({ + primaryAmount: moneyAmount, + walletAmount: convertMoneyAmount(moneyAmount, walletCurrency), + displayAmount: convertMoneyAmount(moneyAmount, DisplayCurrency), + }) + formattedSecondaryAmount = + secondaryAmount && formatMoneyAmount({ moneyAmount: secondaryAmount }) + } + + const onPressInputButton = () => { + setIsSettingAmount(true) + } + + if (canSetAmount) { + return ( + + ) + } + return ( + + ) +} diff --git a/app/components/amount-input/index.ts b/app/components/amount-input/index.ts new file mode 100644 index 0000000..5146ab2 --- /dev/null +++ b/app/components/amount-input/index.ts @@ -0,0 +1,2 @@ +export * from "./amount-input" +export * from "./amount-input-modal" diff --git a/app/components/app-update/app-update.logic.ts b/app/components/app-update/app-update.logic.ts new file mode 100644 index 0000000..f930861 --- /dev/null +++ b/app/components/app-update/app-update.logic.ts @@ -0,0 +1,43 @@ +import { Platform } from "react-native" + +export const isUpdateAvailableOrRequired = ({ + buildNumber, + mobileVersions, + OS, +}: { + buildNumber: number + mobileVersions: + | readonly ({ + readonly platform: string + readonly currentSupported: number + readonly minSupported: number + } | null)[] + | null + | undefined + OS: Platform["OS"] +}) => { + if (!mobileVersions) { + return { + required: false, + available: false, + } + } + + // we need to use the modulo because the build number is not the same across ABI + // and we are multiple by a factor of 10000000 to differentiate between platforms + // https://github.com/GaloyMoney/galoy-mobile/blob/c971ace92e420e8f90cab209cb9e2c341b71ab42/android/app/build.gradle#L145 + const buildNumberNoAbi = buildNumber % 10000000 + + const minSupportedVersion = + mobileVersions.find((mobileVersion) => mobileVersion?.platform === OS) + ?.minSupported ?? NaN + + const currentSupportedVersion = + mobileVersions.find((mobileVersion) => mobileVersion?.platform === OS) + ?.currentSupported ?? NaN + + return { + required: buildNumberNoAbi < minSupportedVersion, + available: buildNumberNoAbi < currentSupportedVersion, + } +} diff --git a/app/components/app-update/app-update.stories.tsx b/app/components/app-update/app-update.stories.tsx new file mode 100644 index 0000000..f684b50 --- /dev/null +++ b/app/components/app-update/app-update.stories.tsx @@ -0,0 +1,103 @@ +import React from "react" +import { AppUpdate, AppUpdateModal } from "./app-update" +import { StoryScreen } from "../../../.storybook/views" +import { ComponentMeta } from "@storybook/react" +import { MockedProvider } from "@apollo/client/testing" +import { createCache } from "../../graphql/cache" +import { IsAuthedContextProvider } from "../../graphql/is-authed-context" +import { MobileUpdateDocument } from "../../graphql/generated" +import { GaloyPrimaryButton } from "../../components/atomic/galoy-primary-button" +import { View } from "react-native" + +const updateAvailable = [ + { + request: { + query: MobileUpdateDocument, + }, + result: { + data: { + mobileVersions: [ + { + platform: "android", + currentSupported: 500, + minSupported: 400, + }, + { + platform: "ios", + currentSupported: 500, + minSupported: 400, + }, + ], + }, + }, + }, +] + +const updateRequired = [ + { + request: { + query: MobileUpdateDocument, + }, + result: { + data: { + mobileVersions: [ + { + platform: "android", + currentSupported: 500, + minSupported: 450, + }, + { + platform: "ios", + currentSupported: 500, + minSupported: 450, + }, + ], + }, + }, + }, +] + +// TODO: look at how to use mocks in storybook +// we need to get a consistent number if we don't want to have to update the +// number in the query every time. +// +// alternatively, +// we could use do some math and do currentSupported: getBuildNumber() + 1 +// +// jest.mock("react-native-device-info", () => ({ +// getBuildNumber: () => 427, +// })) + +export default { + title: "App Update", + component: AppUpdate, + decorators: [ + (Story) => ( + + {Story()} + + ), + ], +} as ComponentMeta + +export const UpdateAvailable = () => ( + + + +) + +export const UpdateRequiredModal = () => { + const [visible, setVisible] = React.useState(false) + + const openModal = () => setVisible(true) + const closeModal = () => setVisible(false) + return ( + + + + + + + + ) +} diff --git a/app/components/app-update/app-update.tsx b/app/components/app-update/app-update.tsx new file mode 100644 index 0000000..c091bd4 --- /dev/null +++ b/app/components/app-update/app-update.tsx @@ -0,0 +1,129 @@ +import { gql } from "@apollo/client" +import { useMobileUpdateQuery } from "@app/graphql/generated" + +import * as React from "react" +import { Linking, Platform, Pressable, StyleSheet, Text, View } from "react-native" +import DeviceInfo from "react-native-device-info" + +import { VersionComponent } from "@app/components/version" +import { APP_STORE_LINK, PLAY_STORE_LINK } from "@app/config" +import { useI18nContext } from "@app/i18n/i18n-react" +import { palette } from "@app/theme" +import ReactNativeModal from "react-native-modal" +import { isIos } from "../../utils/helper" +import { Button } from "@rneui/base" +import { openWhatsAppAction } from "@app/components/contact-modal" +import { isUpdateAvailableOrRequired } from "./app-update.logic" + +gql` + query mobileUpdate { + mobileVersions { + platform + currentSupported + minSupported + } + } +` + +const styles = StyleSheet.create({ + bottom: { + alignItems: "center", + marginVertical: 16, + }, + + lightningText: { + fontSize: 20, + marginBottom: 12, + textAlign: "center", + }, + + versionComponent: { flex: 1, justifyContent: "flex-end", marginVertical: 48 }, + main: { flex: 5, justifyContent: "center" }, + button: { marginVertical: 12 }, +}) + +export const AppUpdate: React.FC = () => { + const { LL } = useI18nContext() + + const { data } = useMobileUpdateQuery({ fetchPolicy: "no-cache" }) + + const buildNumber = Number(DeviceInfo.getBuildNumber()) + const mobileVersions = data?.mobileVersions + + const { available, required } = isUpdateAvailableOrRequired({ + buildNumber, + mobileVersions, + OS: Platform.OS, + }) + + const openInStore = async () => { + if (isIos) { + Linking.openURL(APP_STORE_LINK) + } else { + // TODO: differentiate between PlayStore and Huawei AppGallery + Linking.openURL(PLAY_STORE_LINK) + } + } + + const linkUpgrade = () => + openInStore().catch((err) => { + console.log({ err }, "error app link on link") + }) + + if (required) { + return + } + + if (available) { + return ( + + + {LL.HomeScreen.updateAvailable()} + + + ) + } + + return null +} + +export const AppUpdateModal = ({ + linkUpgrade, + isVisible, +}: { + linkUpgrade: () => void + isVisible: boolean +}) => { + const { LL } = useI18nContext() + + const message = LL.AppUpdate.needToUpdateSupportMessage({ + os: isIos ? "iOS" : "Android", + version: DeviceInfo.getReadableVersion(), + }) + + return ( + + + {LL.AppUpdate.versionNotSupported()} + {LL.AppUpdate.updateMandatory()} + + # + def with_options(options, &block) + option_merger = ActiveSupport::OptionMerger.new(self, options) + + if block + block.arity.zero? ? option_merger.instance_eval(&block) : block.call(option_merger) + else + option_merger + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/pathname.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/pathname.rb new file mode 100644 index 0000000..611a43e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/pathname.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/pathname/existence" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/pathname/existence.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/pathname/existence.rb new file mode 100644 index 0000000..9bb9b17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/pathname/existence.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +class Pathname + # Returns the receiver if the named file exists otherwise returns +nil+. + # pathname.existence is equivalent to + # + # pathname.exist? ? pathname : nil + # + # For example, something like + # + # content = pathname.read if pathname.exist? + # + # becomes + # + # content = pathname.existence&.read + # + # @return [Pathname] + def existence + self if exist? + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range.rb new file mode 100644 index 0000000..ba6bc9b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require "active_support/core_ext/range/conversions" +require "active_support/core_ext/range/deprecated_conversions" unless ENV["RAILS_DISABLE_DEPRECATED_TO_S_CONVERSION"] +require "active_support/core_ext/range/compare_range" +require "active_support/core_ext/range/overlaps" +require "active_support/core_ext/range/each" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/compare_range.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/compare_range.rb new file mode 100644 index 0000000..affbbeb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/compare_range.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module ActiveSupport + module CompareWithRange + # Extends the default Range#=== to support range comparisons. + # (1..5) === (1..5) # => true + # (1..5) === (2..3) # => true + # (1..5) === (1...6) # => true + # (1..5) === (2..6) # => false + # + # The native Range#=== behavior is untouched. + # ('a'..'f') === ('c') # => true + # (5..9) === (11) # => false + # + # The given range must be fully bounded, with both start and end. + def ===(value) + if value.is_a?(::Range) + is_backwards_op = value.exclude_end? ? :>= : :> + return false if value.begin && value.end && value.begin.public_send(is_backwards_op, value.end) + # 1...10 includes 1..9 but it does not include 1..10. + # 1..10 includes 1...11 but it does not include 1...12. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + value_max = !exclude_end? && value.exclude_end? ? value.max : value.last + super(value.first) && (self.end.nil? || value_max.public_send(operator, last)) + else + super + end + end + + # Extends the default Range#include? to support range comparisons. + # (1..5).include?(1..5) # => true + # (1..5).include?(2..3) # => true + # (1..5).include?(1...6) # => true + # (1..5).include?(2..6) # => false + # + # The native Range#include? behavior is untouched. + # ('a'..'f').include?('c') # => true + # (5..9).include?(11) # => false + # + # The given range must be fully bounded, with both start and end. + def include?(value) + if value.is_a?(::Range) + is_backwards_op = value.exclude_end? ? :>= : :> + return false if value.begin && value.end && value.begin.public_send(is_backwards_op, value.end) + # 1...10 includes 1..9 but it does not include 1..10. + # 1..10 includes 1...11 but it does not include 1...12. + operator = exclude_end? && !value.exclude_end? ? :< : :<= + value_max = !exclude_end? && value.exclude_end? ? value.max : value.last + super(value.first) && (self.end.nil? || value_max.public_send(operator, last)) + else + super + end + end + end +end + +Range.prepend(ActiveSupport::CompareWithRange) diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/conversions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/conversions.rb new file mode 100644 index 0000000..1c80ea3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/conversions.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module ActiveSupport + module RangeWithFormat + RANGE_FORMATS = { + db: -> (start, stop) do + case start + when String then "BETWEEN '#{start}' AND '#{stop}'" + else + "BETWEEN '#{start.to_fs(:db)}' AND '#{stop.to_fs(:db)}'" + end + end + } + + # Convert range to a formatted string. See RANGE_FORMATS for predefined formats. + # + # This method is aliased to to_formatted_s. + # + # range = (1..100) # => 1..100 + # + # range.to_s # => "1..100" + # range.to_fs(:db) # => "BETWEEN '1' AND '100'" + # + # == Adding your own range formats to to_s + # You can add your own formats to the Range::RANGE_FORMATS hash. + # Use the format name as the hash key and a Proc instance. + # + # # config/initializers/range_formats.rb + # Range::RANGE_FORMATS[:short] = ->(start, stop) { "Between #{start.to_fs(:db)} and #{stop.to_fs(:db)}" } + def to_fs(format = :default) + if formatter = RANGE_FORMATS[format] + formatter.call(first, last) + else + to_s + end + end + alias_method :to_formatted_s, :to_fs + end +end + +Range.prepend(ActiveSupport::RangeWithFormat) diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/deprecated_conversions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/deprecated_conversions.rb new file mode 100644 index 0000000..86b377f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/deprecated_conversions.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module ActiveSupport + module DeprecatedRangeWithFormat # :nodoc: + NOT_SET = Object.new # :nodoc: + def to_s(format = NOT_SET) + if formatter = RangeWithFormat::RANGE_FORMATS[format] + ActiveSupport::Deprecation.warn( + "Range#to_s(#{format.inspect}) is deprecated. Please use Range#to_fs(#{format.inspect}) instead." + ) + formatter.call(first, last) + elsif format == NOT_SET + super() + else + ActiveSupport::Deprecation.warn( + "Range#to_s(#{format.inspect}) is deprecated. Please use Range#to_fs(#{format.inspect}) instead." + ) + super() + end + end + alias_method :to_default_s, :to_s + deprecate :to_default_s + end +end + +Range.prepend(ActiveSupport::DeprecatedRangeWithFormat) diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/each.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/each.rb new file mode 100644 index 0000000..1c44cc8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/each.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" + +module ActiveSupport + module EachTimeWithZone # :nodoc: + def each(&block) + ensure_iteration_allowed + super + end + + def step(n = 1, &block) + ensure_iteration_allowed + super + end + + private + def ensure_iteration_allowed + raise TypeError, "can't iterate from #{first.class}" if first.is_a?(TimeWithZone) + end + end +end + +Range.prepend(ActiveSupport::EachTimeWithZone) diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/include_time_with_zone.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/include_time_with_zone.rb new file mode 100644 index 0000000..f2378d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/include_time_with_zone.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +# frozen_string_literal: true + +ActiveSupport::Deprecation.warn(<<-MSG.squish) + `active_support/core_ext/range/include_time_with_zone` is deprecated and will be removed in Rails 7.1. +MSG diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/overlaps.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/overlaps.rb new file mode 100644 index 0000000..c286988 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/range/overlaps.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +class Range + # Compare two ranges and see if they overlap each other + # (1..5).overlaps?(4..6) # => true + # (1..5).overlaps?(7..9) # => false + def overlaps?(other) + other.begin == self.begin || cover?(other.begin) || other.cover?(self.begin) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/regexp.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/regexp.rb new file mode 100644 index 0000000..15534ff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/regexp.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +class Regexp + # Returns +true+ if the regexp has the multiline flag set. + # + # (/./).multiline? # => false + # (/./m).multiline? # => true + # + # Regexp.new(".").multiline? # => false + # Regexp.new(".", Regexp::MULTILINE).multiline? # => true + def multiline? + options & MULTILINE == MULTILINE + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/securerandom.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/securerandom.rb new file mode 100644 index 0000000..fa6b68b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/securerandom.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +require "securerandom" + +module SecureRandom + BASE58_ALPHABET = ("0".."9").to_a + ("A".."Z").to_a + ("a".."z").to_a - ["0", "O", "I", "l"] + BASE36_ALPHABET = ("0".."9").to_a + ("a".."z").to_a + + # SecureRandom.base58 generates a random base58 string. + # + # The argument _n_ specifies the length of the random string to be generated. + # + # If _n_ is not specified or is +nil+, 16 is assumed. It may be larger in the future. + # + # The result may contain alphanumeric characters except 0, O, I, and l. + # + # p SecureRandom.base58 # => "4kUgL2pdQMSCQtjE" + # p SecureRandom.base58(24) # => "77TMHrHJFvFDwodq8w7Ev2m7" + def self.base58(n = 16) + SecureRandom.random_bytes(n).unpack("C*").map do |byte| + idx = byte % 64 + idx = SecureRandom.random_number(58) if idx >= 58 + BASE58_ALPHABET[idx] + end.join + end + + # SecureRandom.base36 generates a random base36 string in lowercase. + # + # The argument _n_ specifies the length of the random string to be generated. + # + # If _n_ is not specified or is +nil+, 16 is assumed. It may be larger in the future. + # This method can be used over +base58+ if a deterministic case key is necessary. + # + # The result will contain alphanumeric characters in lowercase. + # + # p SecureRandom.base36 # => "4kugl2pdqmscqtje" + # p SecureRandom.base36(24) # => "77tmhrhjfvfdwodq8w7ev2m7" + def self.base36(n = 16) + SecureRandom.random_bytes(n).unpack("C*").map do |byte| + idx = byte % 64 + idx = SecureRandom.random_number(36) if idx >= 36 + BASE36_ALPHABET[idx] + end.join + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string.rb new file mode 100644 index 0000000..757d15c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/filters" +require "active_support/core_ext/string/multibyte" +require "active_support/core_ext/string/starts_ends_with" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/string/output_safety" +require "active_support/core_ext/string/exclude" +require "active_support/core_ext/string/strip" +require "active_support/core_ext/string/inquiry" +require "active_support/core_ext/string/indent" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/access.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/access.rb new file mode 100644 index 0000000..f6a14c0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/access.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +class String + # If you pass a single integer, returns a substring of one character at that + # position. The first character of the string is at position 0, the next at + # position 1, and so on. If a range is supplied, a substring containing + # characters at offsets given by the range is returned. In both cases, if an + # offset is negative, it is counted from the end of the string. Returns +nil+ + # if the initial offset falls outside the string. Returns an empty string if + # the beginning of the range is greater than the end of the string. + # + # str = "hello" + # str.at(0) # => "h" + # str.at(1..3) # => "ell" + # str.at(-2) # => "l" + # str.at(-2..-1) # => "lo" + # str.at(5) # => nil + # str.at(5..-1) # => "" + # + # If a Regexp is given, the matching portion of the string is returned. + # If a String is given, that given string is returned if it occurs in + # the string. In both cases, +nil+ is returned if there is no match. + # + # str = "hello" + # str.at(/lo/) # => "lo" + # str.at(/ol/) # => nil + # str.at("lo") # => "lo" + # str.at("ol") # => nil + def at(position) + self[position] + end + + # Returns a substring from the given position to the end of the string. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.from(0) # => "hello" + # str.from(3) # => "lo" + # str.from(-2) # => "lo" + # + # You can mix it with +to+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def from(position) + self[position, length] + end + + # Returns a substring from the beginning of the string to the given position. + # If the position is negative, it is counted from the end of the string. + # + # str = "hello" + # str.to(0) # => "h" + # str.to(3) # => "hell" + # str.to(-2) # => "hell" + # + # You can mix it with +from+ method and do fun things like: + # + # str = "hello" + # str.from(0).to(-1) # => "hello" + # str.from(1).to(-2) # => "ell" + def to(position) + position += size if position < 0 + self[0, position + 1] || +"" + end + + # Returns the first character. If a limit is supplied, returns a substring + # from the beginning of the string until it reaches the limit value. If the + # given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.first # => "h" + # str.first(1) # => "h" + # str.first(2) # => "he" + # str.first(0) # => "" + # str.first(6) # => "hello" + def first(limit = 1) + self[0, limit] || raise(ArgumentError, "negative limit") + end + + # Returns the last character of the string. If a limit is supplied, returns a substring + # from the end of the string until it reaches the limit value (counting backwards). If + # the given limit is greater than or equal to the string length, returns a copy of self. + # + # str = "hello" + # str.last # => "o" + # str.last(1) # => "o" + # str.last(2) # => "lo" + # str.last(0) # => "" + # str.last(6) # => "hello" + def last(limit = 1) + self[[length - limit, 0].max, limit] || raise(ArgumentError, "negative limit") + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/behavior.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/behavior.rb new file mode 100644 index 0000000..35a5aa7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/behavior.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +class String + # Enables more predictable duck-typing on String-like classes. See Object#acts_like?. + def acts_like_string? + true + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/conversions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/conversions.rb new file mode 100644 index 0000000..58e3289 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/conversions.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "date" +require "active_support/core_ext/time/calculations" + +class String + # Converts a string to a Time value. + # The +form+ can be either +:utc+ or +:local+ (default +:local+). + # + # The time is parsed using Time.parse method. + # If +form+ is +:local+, then the time is in the system timezone. + # If the date part is missing then the current date is used and if + # the time part is missing then it is assumed to be 00:00:00. + # + # "13-12-2012".to_time # => 2012-12-13 00:00:00 +0100 + # "06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13 06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time # => 2012-12-13 06:12:00 +0100 + # "2012-12-13T06:12".to_time(:utc) # => 2012-12-13 06:12:00 UTC + # "12/13/2012".to_time # => ArgumentError: argument out of range + # "1604326192".to_time # => ArgumentError: argument out of range + def to_time(form = :local) + parts = Date._parse(self, false) + used_keys = %i(year mon mday hour min sec sec_fraction offset) + return if (parts.keys & used_keys).empty? + + now = Time.now + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, form == :utc ? 0 : nil) + ) + + form == :utc ? time.utc : time.to_time + end + + # Converts a string to a Date value. + # + # "1-1-2012".to_date # => Sun, 01 Jan 2012 + # "01/01/2012".to_date # => Sun, 01 Jan 2012 + # "2012-12-13".to_date # => Thu, 13 Dec 2012 + # "12/13/2012".to_date # => ArgumentError: invalid date + def to_date + ::Date.parse(self, false) unless blank? + end + + # Converts a string to a DateTime value. + # + # "1-1-2012".to_datetime # => Sun, 01 Jan 2012 00:00:00 +0000 + # "01/01/2012 23:59:59".to_datetime # => Sun, 01 Jan 2012 23:59:59 +0000 + # "2012-12-13 12:50".to_datetime # => Thu, 13 Dec 2012 12:50:00 +0000 + # "12/13/2012".to_datetime # => ArgumentError: invalid date + def to_datetime + ::DateTime.parse(self, false) unless blank? + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/exclude.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/exclude.rb new file mode 100644 index 0000000..8e46268 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/exclude.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class String + # The inverse of String#include?. Returns true if the string + # does not include the other string. + # + # "hello".exclude? "lo" # => false + # "hello".exclude? "ol" # => true + # "hello".exclude? ?h # => false + def exclude?(string) + !include?(string) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/filters.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/filters.rb new file mode 100644 index 0000000..dc163c5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/filters.rb @@ -0,0 +1,145 @@ +# frozen_string_literal: true + +class String + # Returns the string, first removing all whitespace on both ends of + # the string, and then changing remaining consecutive whitespace + # groups into one space each. + # + # Note that it handles both ASCII and Unicode whitespace. + # + # %{ Multi-line + # string }.squish # => "Multi-line string" + # " foo bar \n \t boo".squish # => "foo bar boo" + def squish + dup.squish! + end + + # Performs a destructive squish. See String#squish. + # str = " foo bar \n \t boo" + # str.squish! # => "foo bar boo" + # str # => "foo bar boo" + def squish! + gsub!(/[[:space:]]+/, " ") + strip! + self + end + + # Returns a new string with all occurrences of the patterns removed. + # str = "foo bar test" + # str.remove(" test") # => "foo bar" + # str.remove(" test", /bar/) # => "foo " + # str # => "foo bar test" + def remove(*patterns) + dup.remove!(*patterns) + end + + # Alters the string by removing all occurrences of the patterns. + # str = "foo bar test" + # str.remove!(" test", /bar/) # => "foo " + # str # => "foo " + def remove!(*patterns) + patterns.each do |pattern| + gsub! pattern, "" + end + + self + end + + # Truncates a given +text+ after a given length if +text+ is longer than length: + # + # 'Once upon a time in a world far far away'.truncate(27) + # # => "Once upon a time in a wo..." + # + # Pass a string or regexp :separator to truncate +text+ at a natural break: + # + # 'Once upon a time in a world far far away'.truncate(27, separator: ' ') + # # => "Once upon a time in a..." + # + # 'Once upon a time in a world far far away'.truncate(27, separator: /\s/) + # # => "Once upon a time in a..." + # + # The last characters will be replaced with the :omission string (defaults to "...") + # for a total length not exceeding length: + # + # 'And they found that many people were sleeping better.'.truncate(25, omission: '... (continued)') + # # => "And they f... (continued)" + def truncate(truncate_at, options = {}) + return dup unless length > truncate_at + + omission = options[:omission] || "..." + length_with_room_for_omission = truncate_at - omission.length + stop = \ + if options[:separator] + rindex(options[:separator], length_with_room_for_omission) || length_with_room_for_omission + else + length_with_room_for_omission + end + + +"#{self[0, stop]}#{omission}" + end + + # Truncates +text+ to at most bytesize bytes in length without + # breaking string encoding by splitting multibyte characters or breaking + # grapheme clusters ("perceptual characters") by truncating at combining + # characters. + # + # >> "đŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”Ē".size + # => 20 + # >> "đŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”Ē".bytesize + # => 80 + # >> "đŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”ĒđŸ”Ē".truncate_bytes(20) + # => "đŸ”ĒđŸ”ĒđŸ”ĒđŸ”Ēâ€Ļ" + # + # The truncated text ends with the :omission string, defaulting + # to "â€Ļ", for a total length not exceeding bytesize. + def truncate_bytes(truncate_at, omission: "â€Ļ") + omission ||= "" + + case + when bytesize <= truncate_at + dup + when omission.bytesize > truncate_at + raise ArgumentError, "Omission #{omission.inspect} is #{omission.bytesize}, larger than the truncation length of #{truncate_at} bytes" + when omission.bytesize == truncate_at + omission.dup + else + self.class.new.tap do |cut| + cut_at = truncate_at - omission.bytesize + + each_grapheme_cluster do |grapheme| + if cut.bytesize + grapheme.bytesize <= cut_at + cut << grapheme + else + break + end + end + + cut << omission + end + end + end + + # Truncates a given +text+ after a given number of words (words_count): + # + # 'Once upon a time in a world far far away'.truncate_words(4) + # # => "Once upon a time..." + # + # Pass a string or regexp :separator to specify a different separator of words: + # + # 'Once
upon
a
time
in
a
world'.truncate_words(5, separator: '
') + # # => "Once
upon
a
time
in..." + # + # The last characters will be replaced with the :omission string (defaults to "..."): + # + # 'And they found that many people were sleeping better.'.truncate_words(5, omission: '... (continued)') + # # => "And they found that many... (continued)" + def truncate_words(words_count, options = {}) + sep = options[:separator] || /\s+/ + sep = Regexp.escape(sep.to_s) unless Regexp === sep + if self =~ /\A((?>.+?#{sep}){#{words_count - 1}}.+?)#{sep}.*/m + $1 + (options[:omission] || "...") + else + dup + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/indent.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/indent.rb new file mode 100644 index 0000000..af9d181 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/indent.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +class String + # Same as +indent+, except it indents the receiver in-place. + # + # Returns the indented string, or +nil+ if there was nothing to indent. + def indent!(amount, indent_string = nil, indent_empty_lines = false) + indent_string = indent_string || self[/^[ \t]/] || " " + re = indent_empty_lines ? /^/ : /^(?!$)/ + gsub!(re, indent_string * amount) + end + + # Indents the lines in the receiver: + # + # < + # def some_method + # some_code + # end + # + # The second argument, +indent_string+, specifies which indent string to + # use. The default is +nil+, which tells the method to make a guess by + # peeking at the first indented line, and fallback to a space if there is + # none. + # + # " foo".indent(2) # => " foo" + # "foo\n\t\tbar".indent(2) # => "\t\tfoo\n\t\t\t\tbar" + # "foo".indent(2, "\t") # => "\t\tfoo" + # + # While +indent_string+ is typically one space or tab, it may be any string. + # + # The third argument, +indent_empty_lines+, is a flag that says whether + # empty lines should be indented. Default is false. + # + # "foo\n\nbar".indent(2) # => " foo\n\n bar" + # "foo\n\nbar".indent(2, nil, true) # => " foo\n \n bar" + # + def indent(amount, indent_string = nil, indent_empty_lines = false) + dup.tap { |_| _.indent!(amount, indent_string, indent_empty_lines) } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/inflections.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/inflections.rb new file mode 100644 index 0000000..e0749f9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/inflections.rb @@ -0,0 +1,293 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" +require "active_support/inflector/transliterate" + +# String inflections define new methods on the String class to transform names for different purposes. +# For instance, you can figure out the name of a table from the name of a class. +# +# 'ScaleScore'.tableize # => "scale_scores" +# +class String + # Returns the plural form of the word in the string. + # + # If the optional parameter +count+ is specified, + # the singular form will be returned if count == 1. + # For any other value of +count+ the plural will be returned. + # + # If the optional parameter +locale+ is specified, + # the word will be pluralized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'post'.pluralize # => "posts" + # 'octopus'.pluralize # => "octopi" + # 'sheep'.pluralize # => "sheep" + # 'words'.pluralize # => "words" + # 'the blue mailman'.pluralize # => "the blue mailmen" + # 'CamelOctopus'.pluralize # => "CamelOctopi" + # 'apple'.pluralize(1) # => "apple" + # 'apple'.pluralize(2) # => "apples" + # 'ley'.pluralize(:es) # => "leyes" + # 'ley'.pluralize(1, :es) # => "ley" + # + # See ActiveSupport::Inflector.pluralize. + def pluralize(count = nil, locale = :en) + locale = count if count.is_a?(Symbol) + if count == 1 + dup + else + ActiveSupport::Inflector.pluralize(self, locale) + end + end + + # The reverse of +pluralize+, returns the singular form of a word in a string. + # + # If the optional parameter +locale+ is specified, + # the word will be singularized as a word of that language. + # By default, this parameter is set to :en. + # You must define your own inflection rules for languages other than English. + # + # 'posts'.singularize # => "post" + # 'octopi'.singularize # => "octopus" + # 'sheep'.singularize # => "sheep" + # 'word'.singularize # => "word" + # 'the blue mailmen'.singularize # => "the blue mailman" + # 'CamelOctopi'.singularize # => "CamelOctopus" + # 'leyes'.singularize(:es) # => "ley" + # + # See ActiveSupport::Inflector.singularize. + def singularize(locale = :en) + ActiveSupport::Inflector.singularize(self, locale) + end + + # +constantize+ tries to find a declared constant with the name specified + # in the string. It raises a NameError when the name is not in CamelCase + # or is not initialized. + # + # 'Module'.constantize # => Module + # 'Class'.constantize # => Class + # 'blargle'.constantize # => NameError: wrong constant name blargle + # + # See ActiveSupport::Inflector.constantize. + def constantize + ActiveSupport::Inflector.constantize(self) + end + + # +safe_constantize+ tries to find a declared constant with the name specified + # in the string. It returns +nil+ when the name is not in CamelCase + # or is not initialized. + # + # 'Module'.safe_constantize # => Module + # 'Class'.safe_constantize # => Class + # 'blargle'.safe_constantize # => nil + # + # See ActiveSupport::Inflector.safe_constantize. + def safe_constantize + ActiveSupport::Inflector.safe_constantize(self) + end + + # By default, +camelize+ converts strings to UpperCamelCase. If the argument to camelize + # is set to :lower then camelize produces lowerCamelCase. + # + # +camelize+ will also convert '/' to '::' which is useful for converting paths to namespaces. + # + # 'active_record'.camelize # => "ActiveRecord" + # 'active_record'.camelize(:lower) # => "activeRecord" + # 'active_record/errors'.camelize # => "ActiveRecord::Errors" + # 'active_record/errors'.camelize(:lower) # => "activeRecord::Errors" + # + # +camelize+ is also aliased as +camelcase+. + # + # See ActiveSupport::Inflector.camelize. + def camelize(first_letter = :upper) + case first_letter + when :upper + ActiveSupport::Inflector.camelize(self, true) + when :lower + ActiveSupport::Inflector.camelize(self, false) + else + raise ArgumentError, "Invalid option, use either :upper or :lower." + end + end + alias_method :camelcase, :camelize + + # Capitalizes all the words and replaces some characters in the string to create + # a nicer looking title. +titleize+ is meant for creating pretty output. It is not + # used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # 'man from the boondocks'.titleize # => "Man From The Boondocks" + # 'x-men: the last stand'.titleize # => "X Men: The Last Stand" + # 'string_ending_with_id'.titleize(keep_id_suffix: true) # => "String Ending With Id" + # + # +titleize+ is also aliased as +titlecase+. + # + # See ActiveSupport::Inflector.titleize. + def titleize(keep_id_suffix: false) + ActiveSupport::Inflector.titleize(self, keep_id_suffix: keep_id_suffix) + end + alias_method :titlecase, :titleize + + # The reverse of +camelize+. Makes an underscored, lowercase form from the expression in the string. + # + # +underscore+ will also change '::' to '/' to convert namespaces to paths. + # + # 'ActiveModel'.underscore # => "active_model" + # 'ActiveModel::Errors'.underscore # => "active_model/errors" + # + # See ActiveSupport::Inflector.underscore. + def underscore + ActiveSupport::Inflector.underscore(self) + end + + # Replaces underscores with dashes in the string. + # + # 'puni_puni'.dasherize # => "puni-puni" + # + # See ActiveSupport::Inflector.dasherize. + def dasherize + ActiveSupport::Inflector.dasherize(self) + end + + # Removes the module part from the constant expression in the string. + # + # 'ActiveSupport::Inflector::Inflections'.demodulize # => "Inflections" + # 'Inflections'.demodulize # => "Inflections" + # '::Inflections'.demodulize # => "Inflections" + # ''.demodulize # => '' + # + # See ActiveSupport::Inflector.demodulize. + # + # See also +deconstantize+. + def demodulize + ActiveSupport::Inflector.demodulize(self) + end + + # Removes the rightmost segment from the constant expression in the string. + # + # 'Net::HTTP'.deconstantize # => "Net" + # '::Net::HTTP'.deconstantize # => "::Net" + # 'String'.deconstantize # => "" + # '::String'.deconstantize # => "" + # ''.deconstantize # => "" + # + # See ActiveSupport::Inflector.deconstantize. + # + # See also +demodulize+. + def deconstantize + ActiveSupport::Inflector.deconstantize(self) + end + + # Replaces special characters in a string so that it may be used as part of a 'pretty' URL. + # + # If the optional parameter +locale+ is specified, + # the word will be parameterized as a word of that language. + # By default, this parameter is set to nil and it will use + # the configured I18n.locale. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # class Person + # def to_param + # "#{id}-#{name.parameterize(preserve_case: true)}" + # end + # end + # + # @person = Person.find(1) + # # => # + # + # <%= link_to(@person.name, person_path) %> + # # => Donald E. Knuth + # + # See ActiveSupport::Inflector.parameterize. + def parameterize(separator: "-", preserve_case: false, locale: nil) + ActiveSupport::Inflector.parameterize(self, separator: separator, preserve_case: preserve_case, locale: locale) + end + + # Creates the name of a table like Rails does for models to table names. This method + # uses the +pluralize+ method on the last word in the string. + # + # 'RawScaledScorer'.tableize # => "raw_scaled_scorers" + # 'ham_and_egg'.tableize # => "ham_and_eggs" + # 'fancyCategory'.tableize # => "fancy_categories" + # + # See ActiveSupport::Inflector.tableize. + def tableize + ActiveSupport::Inflector.tableize(self) + end + + # Creates a class name from a plural table name like Rails does for table names to models. + # Note that this returns a string and not a class. (To convert to an actual class + # follow +classify+ with +constantize+.) + # + # 'ham_and_eggs'.classify # => "HamAndEgg" + # 'posts'.classify # => "Post" + # + # See ActiveSupport::Inflector.classify. + def classify + ActiveSupport::Inflector.classify(self) + end + + # Capitalizes the first word, turns underscores into spaces, and (by default)strips a + # trailing '_id' if present. + # Like +titleize+, this is meant for creating pretty output. + # + # The capitalization of the first word can be turned off by setting the + # optional parameter +capitalize+ to false. + # By default, this parameter is true. + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # 'employee_salary'.humanize # => "Employee salary" + # 'author_id'.humanize # => "Author" + # 'author_id'.humanize(capitalize: false) # => "author" + # '_id'.humanize # => "Id" + # 'author_id'.humanize(keep_id_suffix: true) # => "Author id" + # + # See ActiveSupport::Inflector.humanize. + def humanize(capitalize: true, keep_id_suffix: false) + ActiveSupport::Inflector.humanize(self, capitalize: capitalize, keep_id_suffix: keep_id_suffix) + end + + # Converts just the first character to uppercase. + # + # 'what a Lovely Day'.upcase_first # => "What a Lovely Day" + # 'w'.upcase_first # => "W" + # ''.upcase_first # => "" + # + # See ActiveSupport::Inflector.upcase_first. + def upcase_first + ActiveSupport::Inflector.upcase_first(self) + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # 'Message'.foreign_key # => "message_id" + # 'Message'.foreign_key(false) # => "messageid" + # 'Admin::Post'.foreign_key # => "post_id" + # + # See ActiveSupport::Inflector.foreign_key. + def foreign_key(separate_class_name_and_id_with_underscore = true) + ActiveSupport::Inflector.foreign_key(self, separate_class_name_and_id_with_underscore) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/inquiry.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/inquiry.rb new file mode 100644 index 0000000..a3b42da --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/inquiry.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/string_inquirer" +require "active_support/environment_inquirer" + +class String + # Wraps the current string in the ActiveSupport::StringInquirer class, + # which gives you a prettier way to test for equality. + # + # env = 'production'.inquiry + # env.production? # => true + # env.development? # => false + def inquiry + ActiveSupport::StringInquirer.new(self) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/multibyte.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/multibyte.rb new file mode 100644 index 0000000..0542121 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/multibyte.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +require "active_support/multibyte" + +class String + # == Multibyte proxy + # + # +mb_chars+ is a multibyte safe proxy for string methods. + # + # It creates and returns an instance of the ActiveSupport::Multibyte::Chars class which + # encapsulates the original string. A Unicode safe version of all the String methods are defined on this proxy + # class. If the proxy class doesn't respond to a certain method, it's forwarded to the encapsulated string. + # + # >> "Į‰".mb_chars.upcase.to_s + # => "Į‡" + # + # NOTE: Ruby 2.4 and later support native Unicode case mappings: + # + # >> "Į‰".upcase + # => "Į‡" + # + # == Method chaining + # + # All the methods on the Chars proxy which normally return a string will return a Chars object. This allows + # method chaining on the result of any of these methods. + # + # name.mb_chars.reverse.length # => 12 + # + # == Interoperability and configuration + # + # The Chars object tries to be as interchangeable with String objects as possible: sorting and comparing between + # String and Char work like expected. The bang! methods change the internal string representation in the Chars + # object. Interoperability problems can be resolved easily with a +to_s+ call. + # + # For more information about the methods defined on the Chars proxy see ActiveSupport::Multibyte::Chars. For + # information about how to change the default Multibyte behavior see ActiveSupport::Multibyte. + def mb_chars + ActiveSupport::Multibyte.proxy_class.new(self) + end + + # Returns +true+ if string has utf_8 encoding. + # + # utf_8_str = "some string".encode "UTF-8" + # iso_str = "some string".encode "ISO-8859-1" + # + # utf_8_str.is_utf8? # => true + # iso_str.is_utf8? # => false + def is_utf8? + case encoding + when Encoding::UTF_8, Encoding::US_ASCII + valid_encoding? + when Encoding::ASCII_8BIT + dup.force_encoding(Encoding::UTF_8).valid_encoding? + else + false + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/output_safety.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/output_safety.rb new file mode 100644 index 0000000..74b187e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/output_safety.rb @@ -0,0 +1,371 @@ +# frozen_string_literal: true + +require "erb" +require "active_support/core_ext/module/redefine_method" +require "active_support/multibyte/unicode" + +class ERB + module Util + HTML_ESCAPE = { "&" => "&", ">" => ">", "<" => "<", '"' => """, "'" => "'" } + JSON_ESCAPE = { "&" => '\u0026', ">" => '\u003e', "<" => '\u003c', "\u2028" => '\u2028', "\u2029" => '\u2029' } + HTML_ESCAPE_ONCE_REGEXP = /["><']|&(?!([a-zA-Z]+|(#\d+)|(#[xX][\dA-Fa-f]+));)/ + JSON_ESCAPE_REGEXP = /[\u2028\u2029&><]/u + + # Following XML requirements: https://www.w3.org/TR/REC-xml/#NT-Name + TAG_NAME_START_REGEXP_SET = "@:A-Z_a-z\u{C0}-\u{D6}\u{D8}-\u{F6}\u{F8}-\u{2FF}\u{370}-\u{37D}\u{37F}-\u{1FFF}" \ + "\u{200C}-\u{200D}\u{2070}-\u{218F}\u{2C00}-\u{2FEF}\u{3001}-\u{D7FF}\u{F900}-\u{FDCF}" \ + "\u{FDF0}-\u{FFFD}\u{10000}-\u{EFFFF}" + TAG_NAME_START_REGEXP = /[^#{TAG_NAME_START_REGEXP_SET}]/ + TAG_NAME_FOLLOWING_REGEXP = /[^#{TAG_NAME_START_REGEXP_SET}\-.0-9\u{B7}\u{0300}-\u{036F}\u{203F}-\u{2040}]/ + TAG_NAME_REPLACEMENT_CHAR = "_" + + # A utility method for escaping HTML tag characters. + # This method is also aliased as h. + # + # puts html_escape('is a > 0 & a < 10?') + # # => is a > 0 & a < 10? + def html_escape(s) + unwrapped_html_escape(s).html_safe + end + + silence_redefinition_of_method :h + alias h html_escape + + module_function :h + + singleton_class.silence_redefinition_of_method :html_escape + module_function :html_escape + + # HTML escapes strings but doesn't wrap them with an ActiveSupport::SafeBuffer. + # This method is not for public consumption! Seriously! + def unwrapped_html_escape(s) # :nodoc: + s = s.to_s + if s.html_safe? + s + else + CGI.escapeHTML(ActiveSupport::Multibyte::Unicode.tidy_bytes(s)) + end + end + module_function :unwrapped_html_escape + + # A utility method for escaping HTML without affecting existing escaped entities. + # + # html_escape_once('1 < 2 & 3') + # # => "1 < 2 & 3" + # + # html_escape_once('<< Accept & Checkout') + # # => "<< Accept & Checkout" + def html_escape_once(s) + result = ActiveSupport::Multibyte::Unicode.tidy_bytes(s.to_s).gsub(HTML_ESCAPE_ONCE_REGEXP, HTML_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :html_escape_once + + # A utility method for escaping HTML entities in JSON strings. Specifically, the + # &, > and < characters are replaced with their equivalent unicode escaped form - + # \u0026, \u003e, and \u003c. The Unicode sequences \u2028 and \u2029 are also + # escaped as they are treated as newline characters in some JavaScript engines. + # These sequences have identical meaning as the original characters inside the + # context of a JSON string, so assuming the input is a valid and well-formed + # JSON value, the output will have equivalent meaning when parsed: + # + # json = JSON.generate({ name: ""}) + # # => "{\"name\":\"\"}" + # + # json_escape(json) + # # => "{\"name\":\"\\u003C/script\\u003E\\u003Cscript\\u003Ealert('PWNED!!!')\\u003C/script\\u003E\"}" + # + # JSON.parse(json) == JSON.parse(json_escape(json)) + # # => true + # + # The intended use case for this method is to escape JSON strings before including + # them inside a script tag to avoid XSS vulnerability: + # + # + # + # It is necessary to +raw+ the result of +json_escape+, so that quotation marks + # don't get converted to " entities. +json_escape+ doesn't + # automatically flag the result as HTML safe, since the raw value is unsafe to + # use inside HTML attributes. + # + # If your JSON is being used downstream for insertion into the DOM, be aware of + # whether or not it is being inserted via html(). Most jQuery plugins do this. + # If that is the case, be sure to +html_escape+ or +sanitize+ any user-generated + # content returned by your JSON. + # + # If you need to output JSON elsewhere in your HTML, you can just do something + # like this, as any unsafe characters (including quotation marks) will be + # automatically escaped for you: + # + #

...
+ # + # WARNING: this helper only works with valid JSON. Using this on non-JSON values + # will open up serious XSS vulnerabilities. For example, if you replace the + # +current_user.to_json+ in the example above with user input instead, the browser + # will happily eval() that string as JavaScript. + # + # The escaping performed in this method is identical to those performed in the + # Active Support JSON encoder when +ActiveSupport.escape_html_entities_in_json+ is + # set to true. Because this transformation is idempotent, this helper can be + # applied even if +ActiveSupport.escape_html_entities_in_json+ is already true. + # + # Therefore, when you are unsure if +ActiveSupport.escape_html_entities_in_json+ + # is enabled, or if you are unsure where your JSON string originated from, it + # is recommended that you always apply this helper (other libraries, such as the + # JSON gem, do not provide this kind of protection by default; also some gems + # might override +to_json+ to bypass Active Support's encoder). + def json_escape(s) + result = s.to_s.gsub(JSON_ESCAPE_REGEXP, JSON_ESCAPE) + s.html_safe? ? result.html_safe : result + end + + module_function :json_escape + + # A utility method for escaping XML names of tags and names of attributes. + # + # xml_name_escape('1 < 2 & 3') + # # => "1___2___3" + # + # It follows the requirements of the specification: https://www.w3.org/TR/REC-xml/#NT-Name + def xml_name_escape(name) + name = name.to_s + return "" if name.blank? + + starting_char = name[0].gsub(TAG_NAME_START_REGEXP, TAG_NAME_REPLACEMENT_CHAR) + + return starting_char if name.size == 1 + + following_chars = name[1..-1].gsub(TAG_NAME_FOLLOWING_REGEXP, TAG_NAME_REPLACEMENT_CHAR) + + starting_char + following_chars + end + module_function :xml_name_escape + end +end + +class Object + def html_safe? + false + end +end + +class Numeric + def html_safe? + true + end +end + +module ActiveSupport # :nodoc: + class SafeBuffer < String + UNSAFE_STRING_METHODS = %w( + capitalize chomp chop delete delete_prefix delete_suffix + downcase lstrip next reverse rstrip scrub slice squeeze strip + succ swapcase tr tr_s unicode_normalize upcase + ) + + UNSAFE_STRING_METHODS_WITH_BACKREF = %w(gsub sub) + + alias_method :original_concat, :concat + private :original_concat + + # Raised when ActiveSupport::SafeBuffer#safe_concat is called on unsafe buffers. + class SafeConcatError < StandardError + def initialize + super "Could not concatenate to the buffer because it is not html safe." + end + end + + def [](*args) + if html_safe? + new_string = super + + return unless new_string + + new_safe_buffer = new_string.is_a?(SafeBuffer) ? new_string : SafeBuffer.new(new_string) + new_safe_buffer.instance_variable_set :@html_safe, true + new_safe_buffer + else + to_str[*args] + end + end + + def safe_concat(value) + raise SafeConcatError unless html_safe? + original_concat(value) + end + + def initialize(str = "") + @html_safe = true + super + end + + def initialize_copy(other) + super + @html_safe = other.html_safe? + end + + def clone_empty + self[0, 0] + end + + def concat(value) + unless value.nil? + super(implicit_html_escape_interpolated_argument(value)) + end + self + end + alias << concat + + def bytesplice(*args, value) + super(*args, implicit_html_escape_interpolated_argument(value)) + end + + def insert(index, value) + super(index, implicit_html_escape_interpolated_argument(value)) + end + + def prepend(value) + super(implicit_html_escape_interpolated_argument(value)) + end + + def replace(value) + super(implicit_html_escape_interpolated_argument(value)) + end + + def []=(*args) + if args.length == 3 + super(args[0], args[1], implicit_html_escape_interpolated_argument(args[2])) + else + super(args[0], implicit_html_escape_interpolated_argument(args[1])) + end + end + + def +(other) + dup.concat(other) + end + + def *(*) + new_string = super + new_safe_buffer = new_string.is_a?(SafeBuffer) ? new_string : SafeBuffer.new(new_string) + new_safe_buffer.instance_variable_set(:@html_safe, @html_safe) + new_safe_buffer + end + + def %(args) + case args + when Hash + escaped_args = args.transform_values { |arg| explicit_html_escape_interpolated_argument(arg) } + else + escaped_args = Array(args).map { |arg| explicit_html_escape_interpolated_argument(arg) } + end + + self.class.new(super(escaped_args)) + end + + def html_safe? + defined?(@html_safe) && @html_safe + end + + def to_s + self + end + + def to_param + to_str + end + + def encode_with(coder) + coder.represent_object nil, to_str + end + + UNSAFE_STRING_METHODS.each do |unsafe_method| + if unsafe_method.respond_to?(unsafe_method) + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{unsafe_method}(*args, &block) # def capitalize(*args, &block) + to_str.#{unsafe_method}(*args, &block) # to_str.capitalize(*args, &block) + end # end + + def #{unsafe_method}!(*args) # def capitalize!(*args) + @html_safe = false # @html_safe = false + super # super + end # end + EOT + end + end + + UNSAFE_STRING_METHODS_WITH_BACKREF.each do |unsafe_method| + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{unsafe_method}(*args, &block) # def gsub(*args, &block) + if block # if block + to_str.#{unsafe_method}(*args) { |*params| # to_str.gsub(*args) { |*params| + set_block_back_references(block, $~) # set_block_back_references(block, $~) + block.call(*params) # block.call(*params) + } # } + else # else + to_str.#{unsafe_method}(*args) # to_str.gsub(*args) + end # end + end # end + + def #{unsafe_method}!(*args, &block) # def gsub!(*args, &block) + @html_safe = false # @html_safe = false + if block # if block + super(*args) { |*params| # super(*args) { |*params| + set_block_back_references(block, $~) # set_block_back_references(block, $~) + block.call(*params) # block.call(*params) + } # } + else # else + super # super + end # end + end # end + EOT + end + + private + def explicit_html_escape_interpolated_argument(arg) + (!html_safe? || arg.html_safe?) ? arg : CGI.escapeHTML(arg.to_s) + end + + def implicit_html_escape_interpolated_argument(arg) + if !html_safe? || arg.html_safe? + arg + else + arg_string = begin + arg.to_str + rescue NoMethodError => error + if error.name == :to_str + str = arg.to_s + ActiveSupport::Deprecation.warn <<~MSG.squish + Implicit conversion of #{arg.class} into String by ActiveSupport::SafeBuffer + is deprecated and will be removed in Rails 7.1. + You must explicitly cast it to a String. + MSG + str + else + raise + end + end + CGI.escapeHTML(arg_string) + end + end + + def set_block_back_references(block, match_data) + block.binding.eval("proc { |m| $~ = m }").call(match_data) + rescue ArgumentError + # Can't create binding from C level Proc + end + end +end + +class String + # Marks a string as trusted safe. It will be inserted into HTML with no + # additional escaping performed. It is your responsibility to ensure that the + # string contains no malicious content. This method is equivalent to the + # +raw+ helper in views. It is recommended that you use +sanitize+ instead of + # this method. It should never be called on user input. + def html_safe + ActiveSupport::SafeBuffer.new(self) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/starts_ends_with.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/starts_ends_with.rb new file mode 100644 index 0000000..1e21637 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/starts_ends_with.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +class String + alias :starts_with? :start_with? + alias :ends_with? :end_with? +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/strip.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/strip.rb new file mode 100644 index 0000000..60e9952 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/strip.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +class String + # Strips indentation in heredocs. + # + # For example in + # + # if options[:usage] + # puts <<-USAGE.strip_heredoc + # This command does such and such. + # + # Supported options are: + # -h This message + # ... + # USAGE + # end + # + # the user would see the usage message aligned against the left margin. + # + # Technically, it looks for the least indented non-empty line + # in the whole string, and removes that amount of leading whitespace. + def strip_heredoc + gsub(/^#{scan(/^[ \t]*(?=\S)/).min}/, "").tap do |stripped| + stripped.freeze if frozen? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/zones.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/zones.rb new file mode 100644 index 0000000..55dc231 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/string/zones.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/time/zones" + +class String + # Converts String to a TimeWithZone in the current zone if Time.zone or Time.zone_default + # is set, otherwise converts String to a Time via String#to_time + def in_time_zone(zone = ::Time.zone) + if zone + ::Time.find_zone!(zone).parse(self) + else + to_time + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/symbol.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/symbol.rb new file mode 100644 index 0000000..709fed2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/symbol.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require "active_support/core_ext/symbol/starts_ends_with" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/symbol/starts_ends_with.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/symbol/starts_ends_with.rb new file mode 100644 index 0000000..4f85217 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/symbol/starts_ends_with.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +class Symbol + alias :starts_with? :start_with? + alias :ends_with? :end_with? +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time.rb new file mode 100644 index 0000000..9716257 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/time/calculations" +require "active_support/core_ext/time/compatibility" +require "active_support/core_ext/time/conversions" +require "active_support/core_ext/time/deprecated_conversions" unless ENV["RAILS_DISABLE_DEPRECATED_TO_S_CONVERSION"] +require "active_support/core_ext/time/zones" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/acts_like.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/acts_like.rb new file mode 100644 index 0000000..8572b49 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/acts_like.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/acts_like" + +class Time + # Duck-types as a Time-like class. See Object#acts_like?. + def acts_like_time? + true + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/calculations.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/calculations.rb new file mode 100644 index 0000000..31ef3f2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/calculations.rb @@ -0,0 +1,364 @@ +# frozen_string_literal: true + +require "active_support/duration" +require "active_support/core_ext/time/conversions" +require "active_support/time_with_zone" +require "active_support/core_ext/time/zones" +require "active_support/core_ext/date_and_time/calculations" +require "active_support/core_ext/date/calculations" +require "active_support/core_ext/module/remove_method" + +class Time + include DateAndTime::Calculations + + COMMON_YEAR_DAYS_IN_MONTH = [nil, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + + class << self + # Overriding case equality method so that it returns true for ActiveSupport::TimeWithZone instances + def ===(other) + super || (self == Time && other.is_a?(ActiveSupport::TimeWithZone)) + end + + # Returns the number of days in the given month. + # If no year is specified, it will use the current year. + def days_in_month(month, year = current.year) + if month == 2 && ::Date.gregorian_leap?(year) + 29 + else + COMMON_YEAR_DAYS_IN_MONTH[month] + end + end + + # Returns the number of days in the given year. + # If no year is specified, it will use the current year. + def days_in_year(year = current.year) + days_in_month(2, year) + 337 + end + + # Returns Time.zone.now when Time.zone or config.time_zone are set, otherwise just returns Time.now. + def current + ::Time.zone ? ::Time.zone.now : ::Time.now + end + + # Layers additional behavior on Time.at so that ActiveSupport::TimeWithZone and DateTime + # instances can be used when called with a single argument + def at_with_coercion(*args, **kwargs) + return at_without_coercion(*args, **kwargs) if args.size != 1 || !kwargs.empty? + + # Time.at can be called with a time or numerical value + time_or_number = args.first + + if time_or_number.is_a?(ActiveSupport::TimeWithZone) + at_without_coercion(time_or_number.to_r).getlocal + elsif time_or_number.is_a?(DateTime) + at_without_coercion(time_or_number.to_f).getlocal + else + at_without_coercion(time_or_number) + end + end + alias_method :at_without_coercion, :at + alias_method :at, :at_with_coercion + + # Creates a +Time+ instance from an RFC 3339 string. + # + # Time.rfc3339('1999-12-31T14:00:00-10:00') # => 2000-01-01 00:00:00 -1000 + # + # If the time or offset components are missing then an +ArgumentError+ will be raised. + # + # Time.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + end + end + + # Returns the number of seconds since 00:00:00. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_since_midnight # => 0.0 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_since_midnight # => 45296.0 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_since_midnight # => 86399.0 + def seconds_since_midnight + to_i - change(hour: 0).to_i + (usec / 1.0e+6) + end + + # Returns the number of seconds until 23:59:59. + # + # Time.new(2012, 8, 29, 0, 0, 0).seconds_until_end_of_day # => 86399 + # Time.new(2012, 8, 29, 12, 34, 56).seconds_until_end_of_day # => 41103 + # Time.new(2012, 8, 29, 23, 59, 59).seconds_until_end_of_day # => 0 + def seconds_until_end_of_day + end_of_day.to_i - to_i + end + + # Returns the fraction of a second as a +Rational+ + # + # Time.new(2012, 8, 29, 0, 0, 0.5).sec_fraction # => (1/2) + def sec_fraction + subsec + end + + unless Time.method_defined?(:floor) + def floor(precision = 0) + change(nsec: 0) + subsec.floor(precision) + end + end + + # Restricted Ruby version due to a bug in `Time#ceil` + # See https://bugs.ruby-lang.org/issues/17025 for more details + if RUBY_VERSION <= "2.8" + remove_possible_method :ceil + def ceil(precision = 0) + change(nsec: 0) + subsec.ceil(precision) + end + end + + # Returns a new Time where one or more of the elements have been changed according + # to the +options+ parameter. The time options (:hour, :min, + # :sec, :usec, :nsec) reset cascadingly, so if only + # the hour is passed, then minute, sec, usec, and nsec is set to 0. If the hour + # and minute is passed, then sec, usec, and nsec is set to 0. The +options+ parameter + # takes a hash with any of these keys: :year, :month, :day, + # :hour, :min, :sec, :usec, :nsec, + # :offset. Pass either :usec or :nsec, not both. + # + # Time.new(2012, 8, 29, 22, 35, 0).change(day: 1) # => Time.new(2012, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, day: 1) # => Time.new(1981, 8, 1, 22, 35, 0) + # Time.new(2012, 8, 29, 22, 35, 0).change(year: 1981, hour: 0) # => Time.new(1981, 8, 29, 0, 0, 0) + def change(options) + new_year = options.fetch(:year, year) + new_month = options.fetch(:month, month) + new_day = options.fetch(:day, day) + new_hour = options.fetch(:hour, hour) + new_min = options.fetch(:min, options[:hour] ? 0 : min) + new_sec = options.fetch(:sec, (options[:hour] || options[:min]) ? 0 : sec) + new_offset = options.fetch(:offset, nil) + + if new_nsec = options[:nsec] + raise ArgumentError, "Can't change both :nsec and :usec at the same time: #{options.inspect}" if options[:usec] + new_usec = Rational(new_nsec, 1000) + else + new_usec = options.fetch(:usec, (options[:hour] || options[:min] || options[:sec]) ? 0 : Rational(nsec, 1000)) + end + + raise ArgumentError, "argument out of range" if new_usec >= 1000000 + + new_sec += Rational(new_usec, 1000000) + + if new_offset + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, new_offset) + elsif utc? + ::Time.utc(new_year, new_month, new_day, new_hour, new_min, new_sec) + elsif zone&.respond_to?(:utc_to_local) + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, zone) + elsif zone + ::Time.local(new_year, new_month, new_day, new_hour, new_min, new_sec) + else + ::Time.new(new_year, new_month, new_day, new_hour, new_min, new_sec, utc_offset) + end + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The +options+ parameter + # takes a hash with any of these keys: :years, :months, + # :weeks, :days, :hours, :minutes, + # :seconds. + # + # Time.new(2015, 8, 1, 14, 35, 0).advance(seconds: 1) # => 2015-08-01 14:35:01 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(minutes: 1) # => 2015-08-01 14:36:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(hours: 1) # => 2015-08-01 15:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(days: 1) # => 2015-08-02 14:35:00 -0700 + # Time.new(2015, 8, 1, 14, 35, 0).advance(weeks: 1) # => 2015-08-08 14:35:00 -0700 + def advance(options) + unless options[:weeks].nil? + options[:weeks], partial_weeks = options[:weeks].divmod(1) + options[:days] = options.fetch(:days, 0) + 7 * partial_weeks + end + + unless options[:days].nil? + options[:days], partial_days = options[:days].divmod(1) + options[:hours] = options.fetch(:hours, 0) + 24 * partial_days + end + + d = to_date.gregorian.advance(options) + time_advanced_by_date = change(year: d.year, month: d.month, day: d.day) + seconds_to_advance = \ + options.fetch(:seconds, 0) + + options.fetch(:minutes, 0) * 60 + + options.fetch(:hours, 0) * 3600 + + if seconds_to_advance.zero? + time_advanced_by_date + else + time_advanced_by_date.since(seconds_to_advance) + end + end + + # Returns a new Time representing the time a number of seconds ago, this is basically a wrapper around the Numeric extension + def ago(seconds) + since(-seconds) + end + + # Returns a new Time representing the time a number of seconds since the instance time + def since(seconds) + self + seconds + rescue + to_datetime.since(seconds) + end + alias :in :since + + # Returns a new Time representing the start of the day (0:00) + def beginning_of_day + change(hour: 0) + end + alias :midnight :beginning_of_day + alias :at_midnight :beginning_of_day + alias :at_beginning_of_day :beginning_of_day + + # Returns a new Time representing the middle of the day (12:00) + def middle_of_day + change(hour: 12) + end + alias :midday :middle_of_day + alias :noon :middle_of_day + alias :at_midday :middle_of_day + alias :at_noon :middle_of_day + alias :at_middle_of_day :middle_of_day + + # Returns a new Time representing the end of the day, 23:59:59.999999 + def end_of_day + change( + hour: 23, + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_day :end_of_day + + # Returns a new Time representing the start of the hour (x:00) + def beginning_of_hour + change(min: 0) + end + alias :at_beginning_of_hour :beginning_of_hour + + # Returns a new Time representing the end of the hour, x:59:59.999999 + def end_of_hour + change( + min: 59, + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_hour :end_of_hour + + # Returns a new Time representing the start of the minute (x:xx:00) + def beginning_of_minute + change(sec: 0) + end + alias :at_beginning_of_minute :beginning_of_minute + + # Returns a new Time representing the end of the minute, x:xx:59.999999 + def end_of_minute + change( + sec: 59, + usec: Rational(999999999, 1000) + ) + end + alias :at_end_of_minute :end_of_minute + + def plus_with_duration(other) # :nodoc: + if ActiveSupport::Duration === other + other.since(self) + else + plus_without_duration(other) + end + end + alias_method :plus_without_duration, :+ + alias_method :+, :plus_with_duration + + def minus_with_duration(other) # :nodoc: + if ActiveSupport::Duration === other + other.until(self) + else + minus_without_duration(other) + end + end + alias_method :minus_without_duration, :- + alias_method :-, :minus_with_duration + + # Time#- can also be used to determine the number of seconds between two Time instances. + # We're layering on additional behavior so that ActiveSupport::TimeWithZone instances + # are coerced into values that Time#- will recognize + def minus_with_coercion(other) + other = other.comparable_time if other.respond_to?(:comparable_time) + other.is_a?(DateTime) ? to_f - other.to_f : minus_without_coercion(other) + end + alias_method :minus_without_coercion, :- + alias_method :-, :minus_with_coercion # rubocop:disable Lint/DuplicateMethods + + # Layers additional behavior on Time#<=> so that DateTime and ActiveSupport::TimeWithZone instances + # can be chronologically compared with a Time + def compare_with_coercion(other) + # we're avoiding Time#to_datetime and Time#to_time because they're expensive + if other.class == Time + compare_without_coercion(other) + elsif other.is_a?(Time) + compare_without_coercion(other.to_time) + else + to_datetime <=> other + end + end + alias_method :compare_without_coercion, :<=> + alias_method :<=>, :compare_with_coercion + + # Layers additional behavior on Time#eql? so that ActiveSupport::TimeWithZone instances + # can be eql? to an equivalent Time + def eql_with_coercion(other) + # if other is an ActiveSupport::TimeWithZone, coerce a Time instance from it so we can do eql? comparison + other = other.comparable_time if other.respond_to?(:comparable_time) + eql_without_coercion(other) + end + alias_method :eql_without_coercion, :eql? + alias_method :eql?, :eql_with_coercion + + # Returns a new time the specified number of days ago. + def prev_day(days = 1) + advance(days: -days) + end + + # Returns a new time the specified number of days in the future. + def next_day(days = 1) + advance(days: days) + end + + # Returns a new time the specified number of months ago. + def prev_month(months = 1) + advance(months: -months) + end + + # Returns a new time the specified number of months in the future. + def next_month(months = 1) + advance(months: months) + end + + # Returns a new time the specified number of years ago. + def prev_year(years = 1) + advance(years: -years) + end + + # Returns a new time the specified number of years in the future. + def next_year(years = 1) + advance(years: years) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/compatibility.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/compatibility.rb new file mode 100644 index 0000000..495e4f3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/compatibility.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/core_ext/date_and_time/compatibility" +require "active_support/core_ext/module/redefine_method" + +class Time + include DateAndTime::Compatibility + + silence_redefinition_of_method :to_time + + # Either return +self+ or the time in the local system timezone depending + # on the setting of +ActiveSupport.to_time_preserves_timezone+. + def to_time + preserve_timezone ? self : getlocal + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/conversions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/conversions.rb new file mode 100644 index 0000000..aeb8e14 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/conversions.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require "time" +require "active_support/inflector/methods" +require "active_support/values/time_zone" + +class Time + DATE_FORMATS = { + db: "%Y-%m-%d %H:%M:%S", + inspect: "%Y-%m-%d %H:%M:%S.%9N %z", + number: "%Y%m%d%H%M%S", + nsec: "%Y%m%d%H%M%S%9N", + usec: "%Y%m%d%H%M%S%6N", + time: "%H:%M", + short: "%d %b %H:%M", + long: "%B %d, %Y %H:%M", + long_ordinal: lambda { |time| + day_format = ActiveSupport::Inflector.ordinalize(time.day) + time.strftime("%B #{day_format}, %Y %H:%M") + }, + rfc822: lambda { |time| + offset_format = time.formatted_offset(false) + time.strftime("%a, %d %b %Y %H:%M:%S #{offset_format}") + }, + iso8601: lambda { |time| time.iso8601 } + } + + # Converts to a formatted string. See DATE_FORMATS for built-in formats. + # + # This method is aliased to to_formatted_s. + # + # time = Time.now # => 2007-01-18 06:10:17 -06:00 + # + # time.to_fs(:time) # => "06:10" + # time.to_formatted_s(:time) # => "06:10" + # + # time.to_fs(:db) # => "2007-01-18 06:10:17" + # time.to_fs(:number) # => "20070118061017" + # time.to_fs(:short) # => "18 Jan 06:10" + # time.to_fs(:long) # => "January 18, 2007 06:10" + # time.to_fs(:long_ordinal) # => "January 18th, 2007 06:10" + # time.to_fs(:rfc822) # => "Thu, 18 Jan 2007 06:10:17 -0600" + # time.to_fs(:iso8601) # => "2007-01-18T06:10:17-06:00" + # + # == Adding your own time formats to +to_fs+ + # You can add your own formats to the Time::DATE_FORMATS hash. + # Use the format name as the hash key and either a strftime string + # or Proc instance that takes a time argument as the value. + # + # # config/initializers/time_formats.rb + # Time::DATE_FORMATS[:month_and_year] = '%B %Y' + # Time::DATE_FORMATS[:short_ordinal] = ->(time) { time.strftime("%B #{time.day.ordinalize}") } + def to_fs(format = :default) + if formatter = DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + # Change to `to_s` when deprecation is gone. Also deprecate `to_default_s`. + to_default_s + end + end + alias_method :to_formatted_s, :to_fs + alias_method :to_default_s, :to_s + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.local(2000).formatted_offset # => "-06:00" + # Time.local(2000).formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || ActiveSupport::TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Aliased to +xmlschema+ for compatibility with +DateTime+ + alias_method :rfc3339, :xmlschema +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/deprecated_conversions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/deprecated_conversions.rb new file mode 100644 index 0000000..2fe730b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/deprecated_conversions.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "time" + +class Time + NOT_SET = Object.new # :nodoc: + def to_s(format = NOT_SET) # :nodoc: + if formatter = DATE_FORMATS[format] + ActiveSupport::Deprecation.warn( + "Time#to_s(#{format.inspect}) is deprecated. Please use Time#to_fs(#{format.inspect}) instead." + ) + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + elsif format == NOT_SET + to_default_s + else + ActiveSupport::Deprecation.warn( + "Time#to_s(#{format.inspect}) is deprecated. Please use Time#to_fs(#{format.inspect}) instead." + ) + to_default_s + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/zones.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/zones.rb new file mode 100644 index 0000000..21ba4d1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/time/zones.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: true + +require "active_support/time_with_zone" +require "active_support/core_ext/time/acts_like" +require "active_support/core_ext/date_and_time/zones" + +class Time + include DateAndTime::Zones + class << self + attr_accessor :zone_default + + # Returns the TimeZone for the current request, if this has been set (via Time.zone=). + # If Time.zone has not been set for the current request, returns the TimeZone specified in config.time_zone. + def zone + ::ActiveSupport::IsolatedExecutionState[:time_zone] || zone_default + end + + # Sets Time.zone to a TimeZone object for the current request/thread. + # + # This method accepts any of the following: + # + # * A Rails TimeZone object. + # * An identifier for a Rails TimeZone object (e.g., "Eastern Time (US & Canada)", -5.hours). + # * A TZInfo::Timezone object. + # * An identifier for a TZInfo::Timezone object (e.g., "America/New_York"). + # + # Here's an example of how you might set Time.zone on a per request basis and reset it when the request is done. + # current_user.time_zone just needs to return a string identifying the user's preferred time zone: + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # def set_time_zone + # if logged_in? + # Time.use_zone(current_user.time_zone) { yield } + # else + # yield + # end + # end + # end + def zone=(time_zone) + ::ActiveSupport::IsolatedExecutionState[:time_zone] = find_zone!(time_zone) + end + + # Allows override of Time.zone locally inside supplied block; + # resets Time.zone to existing value when done. + # + # class ApplicationController < ActionController::Base + # around_action :set_time_zone + # + # private + # + # def set_time_zone + # Time.use_zone(current_user.timezone) { yield } + # end + # end + # + # NOTE: This won't affect any ActiveSupport::TimeWithZone + # objects that have already been created, e.g. any model timestamp + # attributes that have been read before the block will remain in + # the application's default timezone. + def use_zone(time_zone) + new_zone = find_zone!(time_zone) + begin + old_zone, ::Time.zone = ::Time.zone, new_zone + yield + ensure + ::Time.zone = old_zone + end + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Raises an +ArgumentError+ for invalid time zones. + # + # Time.find_zone! "America/New_York" # => # + # Time.find_zone! "EST" # => # + # Time.find_zone! -5.hours # => # + # Time.find_zone! nil # => nil + # Time.find_zone! false # => false + # Time.find_zone! "NOT-A-TIMEZONE" # => ArgumentError: Invalid Timezone: NOT-A-TIMEZONE + def find_zone!(time_zone) + return time_zone unless time_zone + + ActiveSupport::TimeZone[time_zone] || raise(ArgumentError, "Invalid Timezone: #{time_zone}") + end + + # Returns a TimeZone instance matching the time zone provided. + # Accepts the time zone in any format supported by Time.zone=. + # Returns +nil+ for invalid time zones. + # + # Time.find_zone "America/New_York" # => # + # Time.find_zone "NOT-A-TIMEZONE" # => nil + def find_zone(time_zone) + find_zone!(time_zone) rescue nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/uri.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/uri.rb new file mode 100644 index 0000000..9811477 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/core_ext/uri.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +ActiveSupport::Deprecation.warn(<<-MSG.squish) + `active_support/core_ext/uri` is deprecated and will be removed in Rails 7.1. +MSG diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/current_attributes.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/current_attributes.rb new file mode 100644 index 0000000..b4c6a5d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/current_attributes.rb @@ -0,0 +1,226 @@ +# frozen_string_literal: true + +require "active_support/callbacks" +require "active_support/core_ext/enumerable" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + # Abstract super class that provides a thread-isolated attributes singleton, which resets automatically + # before and after each request. This allows you to keep all the per-request attributes easily + # available to the whole system. + # + # The following full app-like example demonstrates how to use a Current class to + # facilitate easy access to the global, per-request attributes without passing them deeply + # around everywhere: + # + # # app/models/current.rb + # class Current < ActiveSupport::CurrentAttributes + # attribute :account, :user + # attribute :request_id, :user_agent, :ip_address + # + # resets { Time.zone = nil } + # + # def user=(user) + # super + # self.account = user.account + # Time.zone = user.time_zone + # end + # end + # + # # app/controllers/concerns/authentication.rb + # module Authentication + # extend ActiveSupport::Concern + # + # included do + # before_action :authenticate + # end + # + # private + # def authenticate + # if authenticated_user = User.find_by(id: cookies.encrypted[:user_id]) + # Current.user = authenticated_user + # else + # redirect_to new_session_url + # end + # end + # end + # + # # app/controllers/concerns/set_current_request_details.rb + # module SetCurrentRequestDetails + # extend ActiveSupport::Concern + # + # included do + # before_action do + # Current.request_id = request.uuid + # Current.user_agent = request.user_agent + # Current.ip_address = request.ip + # end + # end + # end + # + # class ApplicationController < ActionController::Base + # include Authentication + # include SetCurrentRequestDetails + # end + # + # class MessagesController < ApplicationController + # def create + # Current.account.messages.create(message_params) + # end + # end + # + # class Message < ApplicationRecord + # belongs_to :creator, default: -> { Current.user } + # after_create { |message| Event.create(record: message) } + # end + # + # class Event < ApplicationRecord + # before_create do + # self.request_id = Current.request_id + # self.user_agent = Current.user_agent + # self.ip_address = Current.ip_address + # end + # end + # + # A word of caution: It's easy to overdo a global singleton like Current and tangle your model as a result. + # Current should only be used for a few, top-level globals, like account, user, and request details. + # The attributes stuck in Current should be used by more or less all actions on all requests. If you start + # sticking controller-specific attributes in there, you're going to create a mess. + class CurrentAttributes + include ActiveSupport::Callbacks + define_callbacks :reset + + class << self + # Returns singleton instance for this class in this thread. If none exists, one is created. + def instance + current_instances[current_instances_key] ||= new + end + + # Declares one or more attributes that will be given both class and instance accessor methods. + def attribute(*names) + ActiveSupport::CodeGenerator.batch(generated_attribute_methods, __FILE__, __LINE__) do |owner| + names.each do |name| + owner.define_cached_method(name, namespace: :current_attributes) do |batch| + batch << + "def #{name}" << + "attributes[:#{name}]" << + "end" + end + owner.define_cached_method("#{name}=", namespace: :current_attributes) do |batch| + batch << + "def #{name}=(value)" << + "attributes[:#{name}] = value" << + "end" + end + end + end + + ActiveSupport::CodeGenerator.batch(singleton_class, __FILE__, __LINE__) do |owner| + names.each do |name| + owner.define_cached_method(name, namespace: :current_attributes_delegation) do |batch| + batch << + "def #{name}" << + "instance.#{name}" << + "end" + end + owner.define_cached_method("#{name}=", namespace: :current_attributes_delegation) do |batch| + batch << + "def #{name}=(value)" << + "instance.#{name} = value" << + "end" + end + end + end + end + + # Calls this block before #reset is called on the instance. Used for resetting external collaborators that depend on current values. + def before_reset(&block) + set_callback :reset, :before, &block + end + + # Calls this block after #reset is called on the instance. Used for resetting external collaborators, like Time.zone. + def resets(&block) + set_callback :reset, :after, &block + end + alias_method :after_reset, :resets + + delegate :set, :reset, to: :instance + + def reset_all # :nodoc: + current_instances.each_value(&:reset) + end + + def clear_all # :nodoc: + reset_all + current_instances.clear + end + + private + def generated_attribute_methods + @generated_attribute_methods ||= Module.new.tap { |mod| include mod } + end + + def current_instances + IsolatedExecutionState[:current_attributes_instances] ||= {} + end + + def current_instances_key + @current_instances_key ||= name.to_sym + end + + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + ruby2_keywords(:method_missing) + + def respond_to_missing?(name, _) + super || instance.respond_to?(name) + end + end + + attr_accessor :attributes + + def initialize + @attributes = {} + end + + # Expose one or more attributes within a block. Old values are returned after the block concludes. + # Example demonstrating the common use of needing to set Current attributes outside the request-cycle: + # + # class Chat::PublicationJob < ApplicationJob + # def perform(attributes, room_number, creator) + # Current.set(person: creator) do + # Chat::Publisher.publish(attributes: attributes, room_number: room_number) + # end + # end + # end + def set(set_attributes) + old_attributes = compute_attributes(set_attributes.keys) + assign_attributes(set_attributes) + yield + ensure + assign_attributes(old_attributes) + end + + # Reset all attributes. Should be called before and after actions, when used as a per-request singleton. + def reset + run_callbacks :reset do + self.attributes = {} + end + end + + private + def assign_attributes(new_attributes) + new_attributes.each { |key, value| public_send("#{key}=", value) } + end + + def compute_attributes(keys) + keys.index_with { |key| public_send(key) } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/current_attributes/test_helper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/current_attributes/test_helper.rb new file mode 100644 index 0000000..2016384 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/current_attributes/test_helper.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +module ActiveSupport::CurrentAttributes::TestHelper # :nodoc: + def before_setup + ActiveSupport::CurrentAttributes.reset_all + super + end + + def after_teardown + super + ActiveSupport::CurrentAttributes.reset_all + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies.rb new file mode 100644 index 0000000..bb1fc46 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: true + +require "set" +require "active_support/dependencies/interlock" + +module ActiveSupport # :nodoc: + module Dependencies # :nodoc: + require_relative "dependencies/require_dependency" + + singleton_class.attr_accessor :interlock + @interlock = Interlock.new + + # :doc: + + # Execute the supplied block without interference from any + # concurrent loads. + def self.run_interlock(&block) + interlock.running(&block) + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.load_interlock(&block) + interlock.loading(&block) + end + + # Execute the supplied block while holding an exclusive lock, + # preventing any other thread from being inside a #run_interlock + # block at the same time. + def self.unload_interlock(&block) + interlock.unloading(&block) + end + + # :nodoc: + + # The array of directories from which we autoload and reload, if reloading + # is enabled. The public interface to push directories to this collection + # from applications or engines is config.autoload_paths. + # + # This collection is allowed to have intersection with autoload_once_paths. + # Common directories are not reloaded. + singleton_class.attr_accessor :autoload_paths + self.autoload_paths = [] + + # The array of directories from which we autoload and never reload, even if + # reloading is enabled. The public interface to push directories to this + # collection from applications or engines is config.autoload_once_paths. + singleton_class.attr_accessor :autoload_once_paths + self.autoload_once_paths = [] + + # This is a private set that collects all eager load paths during bootstrap. + # Useful for Zeitwerk integration. The public interface to push custom + # directories to this collection from applications or engines is + # config.eager_load_paths. + singleton_class.attr_accessor :_eager_load_paths + self._eager_load_paths = Set.new + + # If reloading is enabled, this private set holds autoloaded classes tracked + # by the descendants tracker. It is populated by an on_load callback in the + # main autoloader. Used to clear state. + singleton_class.attr_accessor :_autoloaded_tracked_classes + self._autoloaded_tracked_classes = Set.new + + # If reloading is enabled, this private attribute stores the main autoloader + # of a Rails application. It is `nil` otherwise. + # + # The public interface for this autoloader is `Rails.autoloaders.main`. + singleton_class.attr_accessor :autoloader + + # Private method that reloads constants autoloaded by the main autoloader. + # + # Rails.application.reloader.reload! is the public interface for application + # reload. That involves more things, like deleting unloaded classes from the + # internal state of the descendants tracker, or reloading routes. + def self.clear + unload_interlock do + _autoloaded_tracked_classes.clear + autoloader.reload + end + end + + # Private method used by require_dependency. + def self.search_for_file(relpath) + relpath += ".rb" unless relpath.end_with?(".rb") + autoload_paths.each do |autoload_path| + abspath = File.join(autoload_path, relpath) + return abspath if File.file?(abspath) + end + nil + end + + # Private method that helps configuring the autoloaders. + def self.eager_load?(path) + _eager_load_paths.member?(path) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/autoload.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/autoload.rb new file mode 100644 index 0000000..1cee85d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/autoload.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require "active_support/inflector/methods" + +module ActiveSupport + # Autoload and eager load conveniences for your library. + # + # This module allows you to define autoloads based on + # Rails conventions (i.e. no need to define the path + # it is automatically guessed based on the filename) + # and also define a set of constants that needs to be + # eager loaded: + # + # module MyLib + # extend ActiveSupport::Autoload + # + # autoload :Model + # + # eager_autoload do + # autoload :Cache + # end + # end + # + # Then your library can be eager loaded by simply calling: + # + # MyLib.eager_load! + module Autoload + def self.extended(base) # :nodoc: + base.class_eval do + @_autoloads = {} + @_under_path = nil + @_at_path = nil + @_eager_autoload = false + end + end + + def autoload(const_name, path = @_at_path) + unless path + full = [name, @_under_path, const_name.to_s].compact.join("::") + path = Inflector.underscore(full) + end + + if @_eager_autoload + @_autoloads[const_name] = path + end + + super const_name, path + end + + def autoload_under(path) + @_under_path, old_path = path, @_under_path + yield + ensure + @_under_path = old_path + end + + def autoload_at(path) + @_at_path, old_path = path, @_at_path + yield + ensure + @_at_path = old_path + end + + def eager_autoload + old_eager, @_eager_autoload = @_eager_autoload, true + yield + ensure + @_eager_autoload = old_eager + end + + def eager_load! + @_autoloads.each_value { |file| require file } + end + + def autoloads + @_autoloads + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/interlock.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/interlock.rb new file mode 100644 index 0000000..e0e32e8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/interlock.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +require "active_support/concurrency/share_lock" + +module ActiveSupport # :nodoc: + module Dependencies # :nodoc: + class Interlock + def initialize # :nodoc: + @lock = ActiveSupport::Concurrency::ShareLock.new + end + + def loading(&block) + @lock.exclusive(purpose: :load, compatible: [:load], after_compatible: [:load], &block) + end + + def unloading(&block) + @lock.exclusive(purpose: :unload, compatible: [:load, :unload], after_compatible: [:load, :unload], &block) + end + + def start_unloading + @lock.start_exclusive(purpose: :unload, compatible: [:load, :unload]) + end + + def done_unloading + @lock.stop_exclusive(compatible: [:load, :unload]) + end + + def start_running + @lock.start_sharing + end + + def done_running + @lock.stop_sharing + end + + def running(&block) + @lock.sharing(&block) + end + + def permit_concurrent_loads(&block) + @lock.yield_shares(compatible: [:load], &block) + end + + def raw_state(&block) # :nodoc: + @lock.raw_state(&block) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/require_dependency.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/require_dependency.rb new file mode 100644 index 0000000..403f5fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/dependencies/require_dependency.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module ActiveSupport::Dependencies::RequireDependency + # Warning: This method is obsolete. The semantics of the autoloader + # match Ruby's and you do not need to be defensive with load order anymore. + # Just refer to classes and modules normally. + # + # Engines that do not control the mode in which their parent application runs + # should call +require_dependency+ where needed in case the runtime mode is + # +:classic+. + def require_dependency(filename) + filename = filename.to_path if filename.respond_to?(:to_path) + + unless filename.is_a?(String) + raise ArgumentError, "the file name must be either a String or implement #to_path -- you passed #{filename.inspect}" + end + + if abspath = ActiveSupport::Dependencies.search_for_file(filename) + require abspath + else + require filename + end + end + + # We could define require_dependency in Object directly, but a module makes + # the extension apparent if you list ancestors. + Object.prepend(self) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation.rb new file mode 100644 index 0000000..d728386 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "singleton" + +module ActiveSupport + # \Deprecation specifies the API used by Rails to deprecate methods, instance + # variables, objects, and constants. + class Deprecation + # active_support.rb sets an autoload for ActiveSupport::Deprecation. + # + # If these requires were at the top of the file the constant would not be + # defined by the time their files were loaded. Since some of them reopen + # ActiveSupport::Deprecation its autoload would be triggered, resulting in + # a circular require warning for active_support/deprecation.rb. + # + # So, we define the constant first, and load dependencies later. + require "active_support/deprecation/instance_delegator" + require "active_support/deprecation/behaviors" + require "active_support/deprecation/reporting" + require "active_support/deprecation/disallowed" + require "active_support/deprecation/constant_accessor" + require "active_support/deprecation/method_wrappers" + require "active_support/deprecation/proxy_wrappers" + require "active_support/core_ext/module/deprecation" + require "concurrent/atomic/thread_local_var" + + include Singleton + include InstanceDelegator + include Behavior + include Reporting + include Disallowed + include MethodWrapper + + # The version number in which the deprecated behavior will be removed, by default. + attr_accessor :deprecation_horizon + + # It accepts two parameters on initialization. The first is a version of library + # and the second is a library name. + # + # ActiveSupport::Deprecation.new('2.0', 'MyLibrary') + def initialize(deprecation_horizon = "7.1", gem_name = "Rails") + self.gem_name = gem_name + self.deprecation_horizon = deprecation_horizon + # By default, warnings are not silenced and debugging is off. + self.silenced = false + self.debug = false + @silenced_thread = Concurrent::ThreadLocalVar.new(false) + @explicitly_allowed_warnings = Concurrent::ThreadLocalVar.new(nil) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/behaviors.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/behaviors.rb new file mode 100644 index 0000000..bb31fba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/behaviors.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require "active_support/notifications" + +module ActiveSupport + # Raised when ActiveSupport::Deprecation::Behavior#behavior is set with :raise. + # You would set :raise, as a behavior to raise errors and proactively report exceptions from deprecations. + class DeprecationException < StandardError + end + + class Deprecation + # Default warning behaviors per Rails.env. + DEFAULT_BEHAVIORS = { + raise: ->(message, callstack, deprecation_horizon, gem_name) { + e = DeprecationException.new(message) + e.set_backtrace(callstack.map(&:to_s)) + raise e + }, + + stderr: ->(message, callstack, deprecation_horizon, gem_name) { + $stderr.puts(message) + $stderr.puts callstack.join("\n ") if debug + }, + + log: ->(message, callstack, deprecation_horizon, gem_name) { + logger = + if defined?(Rails.logger) && Rails.logger + Rails.logger + else + require "active_support/logger" + ActiveSupport::Logger.new($stderr) + end + logger.warn message + logger.debug callstack.join("\n ") if debug + }, + + notify: ->(message, callstack, deprecation_horizon, gem_name) { + notification_name = "deprecation.#{gem_name.underscore.tr('/', '_')}" + ActiveSupport::Notifications.instrument(notification_name, + message: message, + callstack: callstack, + gem_name: gem_name, + deprecation_horizon: deprecation_horizon) + }, + + silence: ->(message, callstack, deprecation_horizon, gem_name) { }, + } + + # Behavior module allows to determine how to display deprecation messages. + # You can create a custom behavior or set any from the +DEFAULT_BEHAVIORS+ + # constant. Available behaviors are: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to $stderr. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. On Rails, set config.active_support.report_deprecations = false to disable all behaviors. + # + # Setting behaviors only affects deprecations that happen after boot time. + # For more information you can read the documentation of the +behavior=+ method. + module Behavior + # Whether to print a backtrace along with the warning. + attr_accessor :debug + + # Returns the current behavior or if one isn't set, defaults to +:stderr+. + def behavior + @behavior ||= [DEFAULT_BEHAVIORS[:stderr]] + end + + # Returns the current behavior for disallowed deprecations or if one isn't set, defaults to +:raise+. + def disallowed_behavior + @disallowed_behavior ||= [DEFAULT_BEHAVIORS[:raise]] + end + + # Sets the behavior to the specified value. Can be a single value, array, + # or an object that responds to +call+. + # + # Available behaviors: + # + # [+raise+] Raise ActiveSupport::DeprecationException. + # [+stderr+] Log all deprecation warnings to $stderr. + # [+log+] Log all deprecation warnings to +Rails.logger+. + # [+notify+] Use +ActiveSupport::Notifications+ to notify +deprecation.rails+. + # [+silence+] Do nothing. + # + # Setting behaviors only affects deprecations that happen after boot time. + # Deprecation warnings raised by gems are not affected by this setting + # because they happen before Rails boots up. + # + # ActiveSupport::Deprecation.behavior = :stderr + # ActiveSupport::Deprecation.behavior = [:stderr, :log] + # ActiveSupport::Deprecation.behavior = MyCustomHandler + # ActiveSupport::Deprecation.behavior = ->(message, callstack, deprecation_horizon, gem_name) { + # # custom stuff + # } + # + # If you are using Rails, you can set config.active_support.report_deprecations = false to disable + # all deprecation behaviors. This is similar to the +silence+ option but more performant. + def behavior=(behavior) + @behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || arity_coerce(b) } + end + + # Sets the behavior for disallowed deprecations (those configured by + # ActiveSupport::Deprecation.disallowed_warnings=) to the specified + # value. As with +behavior=+, this can be a single value, array, or an + # object that responds to +call+. + def disallowed_behavior=(behavior) + @disallowed_behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || arity_coerce(b) } + end + + private + def arity_coerce(behavior) + unless behavior.respond_to?(:call) + raise ArgumentError, "#{behavior.inspect} is not a valid deprecation behavior." + end + + if behavior.respond_to?(:arity) && behavior.arity == 2 + -> message, callstack, _, _ { behavior.call(message, callstack) } + else + behavior + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/constant_accessor.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/constant_accessor.rb new file mode 100644 index 0000000..1ed0015 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/constant_accessor.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + # DeprecatedConstantAccessor transforms a constant into a deprecated one by + # hooking +const_missing+. + # + # It takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. + # + # The deprecated constant now returns the same object as the new one rather + # than a proxy object, so it can be used transparently in +rescue+ blocks + # etc. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # include ActiveSupport::Deprecation::DeprecatedConstantAccessor + # deprecate_constant 'PLANETS', 'PLANETS_POST_2006' + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace informationâ€Ļ) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + module DeprecatedConstantAccessor + def self.included(base) + require "active_support/inflector/methods" + + extension = Module.new do + def const_missing(missing_const_name) + if class_variable_defined?(:@@_deprecated_constants) + if (replacement = class_variable_get(:@@_deprecated_constants)[missing_const_name.to_s]) + replacement[:deprecator].warn(replacement[:message] || "#{name}::#{missing_const_name} is deprecated! Use #{replacement[:new]} instead.", caller_locations) + return ActiveSupport::Inflector.constantize(replacement[:new].to_s) + end + end + super + end + + def deprecate_constant(const_name, new_constant, message: nil, deprecator: ActiveSupport::Deprecation.instance) + class_variable_set(:@@_deprecated_constants, {}) unless class_variable_defined?(:@@_deprecated_constants) + class_variable_get(:@@_deprecated_constants)[const_name.to_s] = { new: new_constant, message: message, deprecator: deprecator } + end + end + base.singleton_class.prepend extension + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/disallowed.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/disallowed.rb new file mode 100644 index 0000000..096ecaa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/disallowed.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + module Disallowed + # Sets the criteria used to identify deprecation messages which should be + # disallowed. Can be an array containing strings, symbols, or regular + # expressions. (Symbols are treated as strings). These are compared against + # the text of the generated deprecation warning. + # + # Additionally the scalar symbol +:all+ may be used to treat all + # deprecations as disallowed. + # + # Deprecations matching a substring or regular expression will be handled + # using the configured +ActiveSupport::Deprecation.disallowed_behavior+ + # rather than +ActiveSupport::Deprecation.behavior+ + attr_writer :disallowed_warnings + + # Returns the configured criteria used to identify deprecation messages + # which should be treated as disallowed. + def disallowed_warnings + @disallowed_warnings ||= [] + end + + private + def deprecation_disallowed?(message) + disallowed = ActiveSupport::Deprecation.disallowed_warnings + return false if explicitly_allowed?(message) + return true if disallowed == :all + disallowed.any? do |rule| + case rule + when String, Symbol + message.include?(rule.to_s) + when Regexp + rule.match?(message) + end + end + end + + def explicitly_allowed?(message) + allowances = @explicitly_allowed_warnings.value + return false unless allowances + return true if allowances == :all + allowances = [allowances] unless allowances.kind_of?(Array) + allowances.any? do |rule| + case rule + when String, Symbol + message.include?(rule.to_s) + when Regexp + rule.match?(message) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/instance_delegator.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/instance_delegator.rb new file mode 100644 index 0000000..59dd30a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/instance_delegator.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class Deprecation + module InstanceDelegator # :nodoc: + def self.included(base) + base.extend(ClassMethods) + base.singleton_class.prepend(OverrideDelegators) + base.public_class_method :new + end + + module ClassMethods # :nodoc: + def include(included_module) + included_module.instance_methods.each { |m| method_added(m) } + super + end + + def method_added(method_name) + singleton_class.delegate(method_name, to: :instance) + end + end + + module OverrideDelegators # :nodoc: + def warn(message = nil, callstack = nil) + callstack ||= caller_locations(2) + super + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/method_wrappers.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/method_wrappers.rb new file mode 100644 index 0000000..5437598 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/method_wrappers.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/extract_options" +require "active_support/core_ext/module/redefine_method" + +module ActiveSupport + class Deprecation + module MethodWrapper + # Declare that a method has been deprecated. + # + # class Fred + # def aaa; end + # def bbb; end + # def ccc; end + # def ddd; end + # def eee; end + # end + # + # Using the default deprecator: + # ActiveSupport::Deprecation.deprecate_methods(Fred, :aaa, bbb: :zzz, ccc: 'use Bar#ccc instead') + # # => Fred + # + # Fred.new.aaa + # # DEPRECATION WARNING: aaa is deprecated and will be removed from Rails 5.1. (called from irb_binding at (irb):10) + # # => nil + # + # Fred.new.bbb + # # DEPRECATION WARNING: bbb is deprecated and will be removed from Rails 5.1 (use zzz instead). (called from irb_binding at (irb):11) + # # => nil + # + # Fred.new.ccc + # # DEPRECATION WARNING: ccc is deprecated and will be removed from Rails 5.1 (use Bar#ccc instead). (called from irb_binding at (irb):12) + # # => nil + # + # Passing in a custom deprecator: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # ActiveSupport::Deprecation.deprecate_methods(Fred, ddd: :zzz, deprecator: custom_deprecator) + # # => [:ddd] + # + # Fred.new.ddd + # DEPRECATION WARNING: ddd is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):15) + # # => nil + # + # Using a custom deprecator directly: + # custom_deprecator = ActiveSupport::Deprecation.new('next-release', 'MyGem') + # custom_deprecator.deprecate_methods(Fred, eee: :zzz) + # # => [:eee] + # + # Fred.new.eee + # DEPRECATION WARNING: eee is deprecated and will be removed from MyGem next-release (use zzz instead). (called from irb_binding at (irb):18) + # # => nil + def deprecate_methods(target_module, *method_names) + options = method_names.extract_options! + deprecator = options.delete(:deprecator) || self + method_names += options.keys + mod = nil + + method_names.each do |method_name| + message = options[method_name] + if target_module.method_defined?(method_name) || target_module.private_method_defined?(method_name) + method = target_module.instance_method(method_name) + target_module.module_eval do + redefine_method(method_name) do |*args, &block| + deprecator.deprecation_warning(method_name, message) + method.bind_call(self, *args, &block) + end + ruby2_keywords(method_name) + end + else + mod ||= Module.new + mod.module_eval do + define_method(method_name) do |*args, &block| + deprecator.deprecation_warning(method_name, message) + super(*args, &block) + end + ruby2_keywords(method_name) + end + end + end + + target_module.prepend(mod) if mod + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/proxy_wrappers.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/proxy_wrappers.rb new file mode 100644 index 0000000..1584f71 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/proxy_wrappers.rb @@ -0,0 +1,177 @@ +# frozen_string_literal: true + +module ActiveSupport + class Deprecation + class DeprecationProxy # :nodoc: + def self.new(*args, &block) + object = args.first + + return object unless object + super + end + + instance_methods.each { |m| undef_method m unless /^__|^object_id$/.match?(m) } + + # Don't give a deprecation warning on inspect since test/unit and error + # logs rely on it for diagnostics. + def inspect + target.inspect + end + + private + def method_missing(called, *args, &block) + warn caller_locations, called, args + target.__send__(called, *args, &block) + end + end + + # DeprecatedObjectProxy transforms an object into a deprecated one. It + # takes an object, a deprecation message, and optionally a deprecator. The + # deprecator defaults to +ActiveSupport::Deprecator+ if none is specified. + # + # deprecated_object = ActiveSupport::Deprecation::DeprecatedObjectProxy.new(Object.new, "This object is now deprecated") + # # => # + # + # deprecated_object.to_s + # DEPRECATION WARNING: This object is now deprecated. + # (Backtrace) + # # => "#" + class DeprecatedObjectProxy < DeprecationProxy + def initialize(object, message, deprecator = ActiveSupport::Deprecation.instance) + @object = object + @message = message + @deprecator = deprecator + end + + private + def target + @object + end + + def warn(callstack, called, args) + @deprecator.warn(@message, callstack) + end + end + + # DeprecatedInstanceVariableProxy transforms an instance variable into a + # deprecated one. It takes an instance of a class, a method on that class + # and an instance variable. It optionally takes a deprecator as the last + # argument. The deprecator defaults to +ActiveSupport::Deprecator+ if none + # is specified. + # + # class Example + # def initialize + # @request = ActiveSupport::Deprecation::DeprecatedInstanceVariableProxy.new(self, :request, :@request) + # @_request = :special_request + # end + # + # def request + # @_request + # end + # + # def old_request + # @request + # end + # end + # + # example = Example.new + # # => # + # + # example.old_request.to_s + # # => DEPRECATION WARNING: @request is deprecated! Call request.to_s instead of + # @request.to_s + # (Backtrace informationâ€Ļ) + # "special_request" + # + # example.request.to_s + # # => "special_request" + class DeprecatedInstanceVariableProxy < DeprecationProxy + def initialize(instance, method, var = "@#{method}", deprecator = ActiveSupport::Deprecation.instance) + @instance = instance + @method = method + @var = var + @deprecator = deprecator + end + + private + def target + @instance.__send__(@method) + end + + def warn(callstack, called, args) + @deprecator.warn("#{@var} is deprecated! Call #{@method}.#{called} instead of #{@var}.#{called}. Args: #{args.inspect}", callstack) + end + end + + # DeprecatedConstantProxy transforms a constant into a deprecated one. It + # takes the names of an old (deprecated) constant and of a new constant + # (both in string form) and optionally a deprecator. The deprecator defaults + # to +ActiveSupport::Deprecator+ if none is specified. The deprecated constant + # now returns the value of the new one. + # + # PLANETS = %w(mercury venus earth mars jupiter saturn uranus neptune pluto) + # + # # (In a later update, the original implementation of `PLANETS` has been removed.) + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # + # PLANETS.map { |planet| planet.capitalize } + # # => DEPRECATION WARNING: PLANETS is deprecated! Use PLANETS_POST_2006 instead. + # (Backtrace informationâ€Ļ) + # ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"] + class DeprecatedConstantProxy < Module + def self.new(*args, **options, &block) + object = args.first + + return object unless object + super + end + + def initialize(old_const, new_const, deprecator = ActiveSupport::Deprecation.instance, message: "#{old_const} is deprecated! Use #{new_const} instead.") + Kernel.require "active_support/inflector/methods" + + @old_const = old_const + @new_const = new_const + @deprecator = deprecator + @message = message + end + + instance_methods.each { |m| undef_method m unless /^__|^object_id$/.match?(m) } + + # Don't give a deprecation warning on inspect since test/unit and error + # logs rely on it for diagnostics. + def inspect + target.inspect + end + + # Don't give a deprecation warning on methods that IRB may invoke + # during tab-completion. + delegate :hash, :instance_methods, :name, :respond_to?, to: :target + + # Returns the class of the new constant. + # + # PLANETS_POST_2006 = %w(mercury venus earth mars jupiter saturn uranus neptune) + # PLANETS = ActiveSupport::Deprecation::DeprecatedConstantProxy.new('PLANETS', 'PLANETS_POST_2006') + # PLANETS.class # => Array + def class + target.class + end + + private + def target + ActiveSupport::Inflector.constantize(@new_const.to_s) + end + + def const_missing(name) + @deprecator.warn(@message, caller_locations) + target.const_get(name) + end + + def method_missing(called, *args, &block) + @deprecator.warn(@message, caller_locations) + target.__send__(called, *args, &block) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/reporting.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/reporting.rb new file mode 100644 index 0000000..51514eb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/deprecation/reporting.rb @@ -0,0 +1,157 @@ +# frozen_string_literal: true + +require "rbconfig" + +module ActiveSupport + class Deprecation + module Reporting + # Whether to print a message (silent mode) + attr_writer :silenced + # Name of gem where method is deprecated + attr_accessor :gem_name + + # Outputs a deprecation warning to the output configured by + # ActiveSupport::Deprecation.behavior. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + def warn(message = nil, callstack = nil) + return if silenced + + callstack ||= caller_locations(2) + deprecation_message(callstack, message).tap do |m| + if deprecation_disallowed?(message) + disallowed_behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) } + else + behavior.each { |b| b.call(m, callstack, deprecation_horizon, gem_name) } + end + end + end + + # Silence deprecation warnings within the block. + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => "DEPRECATION WARNING: something broke! (called from your_code.rb:1)" + # + # ActiveSupport::Deprecation.silence do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => nil + def silence(&block) + @silenced_thread.bind(true, &block) + end + + # Allow previously disallowed deprecation warnings within the block. + # allowed_warnings can be an array containing strings, symbols, or regular + # expressions. (Symbols are treated as strings). These are compared against + # the text of deprecation warning messages generated within the block. + # Matching warnings will be exempt from the rules set by + # +ActiveSupport::Deprecation.disallowed_warnings+ + # + # The optional if: argument accepts a truthy/falsy value or an object that + # responds to .call. If truthy, then matching warnings will be allowed. + # If falsey then the method yields to the block without allowing the warning. + # + # ActiveSupport::Deprecation.disallowed_behavior = :raise + # ActiveSupport::Deprecation.disallowed_warnings = [ + # "something broke" + # ] + # + # ActiveSupport::Deprecation.warn('something broke!') + # # => ActiveSupport::DeprecationException + # + # ActiveSupport::Deprecation.allow ['something broke'] do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => nil + # + # ActiveSupport::Deprecation.allow ['something broke'], if: Rails.env.production? do + # ActiveSupport::Deprecation.warn('something broke!') + # end + # # => ActiveSupport::DeprecationException for dev/test, nil for production + def allow(allowed_warnings = :all, if: true, &block) + conditional = binding.local_variable_get(:if) + conditional = conditional.call if conditional.respond_to?(:call) + if conditional + @explicitly_allowed_warnings.bind(allowed_warnings, &block) + else + yield + end + end + + def silenced + @silenced || @silenced_thread.value + end + + def deprecation_warning(deprecated_method_name, message = nil, caller_backtrace = nil) + caller_backtrace ||= caller_locations(2) + deprecated_method_warning(deprecated_method_name, message).tap do |msg| + warn(msg, caller_backtrace) + end + end + + private + # Outputs a deprecation warning message + # + # deprecated_method_warning(:method_name) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon}" + # deprecated_method_warning(:method_name, :another_method) + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (use another_method instead)" + # deprecated_method_warning(:method_name, "Optional message") + # # => "method_name is deprecated and will be removed from Rails #{deprecation_horizon} (Optional message)" + def deprecated_method_warning(method_name, message = nil) + warning = "#{method_name} is deprecated and will be removed from #{gem_name} #{deprecation_horizon}" + case message + when Symbol then "#{warning} (use #{message} instead)" + when String then "#{warning} (#{message})" + else warning + end + end + + def deprecation_message(callstack, message = nil) + message ||= "You are using deprecated behavior which will be removed from the next major or minor release." + "DEPRECATION WARNING: #{message} #{deprecation_caller_message(callstack)}" + end + + def deprecation_caller_message(callstack) + file, line, method = extract_callstack(callstack) + if file + if line && method + "(called from #{method} at #{file}:#{line})" + else + "(called from #{file}:#{line})" + end + end + end + + def extract_callstack(callstack) + return _extract_callstack(callstack) if callstack.first.is_a? String + + offending_line = callstack.find { |frame| + frame.absolute_path && !ignored_callstack(frame.absolute_path) + } || callstack.first + + [offending_line.path, offending_line.lineno, offending_line.label] + end + + def _extract_callstack(callstack) + warn "Please pass `caller_locations` to the deprecation API" if $VERBOSE + offending_line = callstack.find { |line| !ignored_callstack(line) } || callstack.first + + if offending_line + if md = offending_line.match(/^(.+?):(\d+)(?::in `(.*?)')?/) + md.captures + else + offending_line + end + end + end + + RAILS_GEM_ROOT = File.expand_path("../../../..", __dir__) + "/" + + def ignored_callstack(path) + path.start_with?(RAILS_GEM_ROOT) || path.start_with?(RbConfig::CONFIG["rubylibdir"]) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/descendants_tracker.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/descendants_tracker.rb new file mode 100644 index 0000000..ea3ed1f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/descendants_tracker.rb @@ -0,0 +1,218 @@ +# frozen_string_literal: true + +require "weakref" +require "active_support/ruby_features" + +module ActiveSupport + # This module provides an internal implementation to track descendants + # which is faster than iterating through ObjectSpace. + module DescendantsTracker + class << self + def direct_descendants(klass) + ActiveSupport::Deprecation.warn(<<~MSG) + ActiveSupport::DescendantsTracker.direct_descendants is deprecated and will be removed in Rails 7.1. + Use ActiveSupport::DescendantsTracker.subclasses instead. + MSG + subclasses(klass) + end + end + + @clear_disabled = false + + if RubyFeatures::CLASS_SUBCLASSES + @@excluded_descendants = if RUBY_ENGINE == "ruby" + # On MRI `ObjectSpace::WeakMap` keys are weak references. + # So we can simply use WeakMap as a `Set`. + ObjectSpace::WeakMap.new + else + # On TruffleRuby `ObjectSpace::WeakMap` keys are strong references. + # So we use `object_id` as a key and the actual object as a value. + # + # JRuby for now doesn't have Class#descendant, but when it will, it will likely + # have the same WeakMap semantic than Truffle so we future proof this as much as possible. + class WeakSet # :nodoc: + def initialize + @map = ObjectSpace::WeakMap.new + end + + def [](object) + @map.key?(object.object_id) + end + + def []=(object, _present) + @map[object.object_id] = object + end + end + WeakSet.new + end + + class << self + def disable_clear! # :nodoc: + unless @clear_disabled + @clear_disabled = true + remove_method(:subclasses) + @@excluded_descendants = nil + end + end + + def subclasses(klass) + klass.subclasses + end + + def descendants(klass) + klass.descendants + end + + def clear(classes) # :nodoc: + raise "DescendantsTracker.clear was disabled because config.cache_classes = true" if @clear_disabled + + classes.each do |klass| + @@excluded_descendants[klass] = true + klass.descendants.each do |descendant| + @@excluded_descendants[descendant] = true + end + end + end + + def native? # :nodoc: + true + end + end + + def subclasses + subclasses = super + subclasses.reject! { |d| @@excluded_descendants[d] } + subclasses + end + + def descendants + subclasses.concat(subclasses.flat_map(&:descendants)) + end + + def direct_descendants + ActiveSupport::Deprecation.warn(<<~MSG) + ActiveSupport::DescendantsTracker#direct_descendants is deprecated and will be removed in Rails 7.1. + Use #subclasses instead. + MSG + subclasses + end + else + @@direct_descendants = {} + + class << self + def disable_clear! # :nodoc: + @clear_disabled = true + end + + def subclasses(klass) + descendants = @@direct_descendants[klass] + descendants ? descendants.to_a : [] + end + + def descendants(klass) + arr = [] + accumulate_descendants(klass, arr) + arr + end + + def clear(classes) # :nodoc: + raise "DescendantsTracker.clear was disabled because config.cache_classes = true" if @clear_disabled + + @@direct_descendants.each do |klass, direct_descendants_of_klass| + if classes.member?(klass) + @@direct_descendants.delete(klass) + else + direct_descendants_of_klass.reject! do |direct_descendant_of_class| + classes.member?(direct_descendant_of_class) + end + end + end + end + + def native? # :nodoc: + false + end + + # This is the only method that is not thread safe, but is only ever called + # during the eager loading phase. + def store_inherited(klass, descendant) + (@@direct_descendants[klass] ||= DescendantsArray.new) << descendant + end + + private + def accumulate_descendants(klass, acc) + if direct_descendants = @@direct_descendants[klass] + direct_descendants.each do |direct_descendant| + acc << direct_descendant + accumulate_descendants(direct_descendant, acc) + end + end + end + end + + def inherited(base) + DescendantsTracker.store_inherited(self, base) + super + end + + def direct_descendants + ActiveSupport::Deprecation.warn(<<~MSG) + ActiveSupport::DescendantsTracker#direct_descendants is deprecated and will be removed in Rails 7.1. + Use #subclasses instead. + MSG + DescendantsTracker.subclasses(self) + end + + def subclasses + DescendantsTracker.subclasses(self) + end + + def descendants + DescendantsTracker.descendants(self) + end + + # DescendantsArray is an array that contains weak references to classes. + class DescendantsArray # :nodoc: + include Enumerable + + def initialize + @refs = [] + end + + def initialize_copy(orig) + @refs = @refs.dup + end + + def <<(klass) + @refs << WeakRef.new(klass) + end + + def each + @refs.reject! do |ref| + yield ref.__getobj__ + false + rescue WeakRef::RefError + true + end + self + end + + def refs_size + @refs.size + end + + def cleanup! + @refs.delete_if { |ref| !ref.weakref_alive? } + end + + def reject! + @refs.reject! do |ref| + yield ref.__getobj__ + rescue WeakRef::RefError + true + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/digest.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/digest.rb new file mode 100644 index 0000000..a3d27be --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/digest.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "openssl" + +module ActiveSupport + class Digest # :nodoc: + class << self + def hash_digest_class + @hash_digest_class ||= OpenSSL::Digest::MD5 + end + + def hash_digest_class=(klass) + raise ArgumentError, "#{klass} is expected to implement hexdigest class method" unless klass.respond_to?(:hexdigest) + @hash_digest_class = klass + end + + def hexdigest(arg) + hash_digest_class.hexdigest(arg)[0...32] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration.rb new file mode 100644 index 0000000..19986d1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration.rb @@ -0,0 +1,514 @@ +# frozen_string_literal: true + +require "active_support/core_ext/array/conversions" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/string/filters" + +module ActiveSupport + # Provides accurate date and time measurements using Date#advance and + # Time#advance, respectively. It mainly supports the methods on Numeric. + # + # 1.month.ago # equivalent to Time.now.advance(months: -1) + class Duration + class Scalar < Numeric # :nodoc: + attr_reader :value + delegate :to_i, :to_f, :to_s, to: :value + + def initialize(value) + @value = value + end + + def coerce(other) + [Scalar.new(other), self] + end + + def -@ + Scalar.new(-value) + end + + def <=>(other) + if Scalar === other || Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + else + nil + end + end + + def +(other) + if Duration === other + seconds = value + other._parts.fetch(:seconds, 0) + new_parts = other._parts.merge(seconds: seconds) + new_value = value + other.value + + Duration.new(new_value, new_parts, other.variable?) + else + calculate(:+, other) + end + end + + def -(other) + if Duration === other + seconds = value - other._parts.fetch(:seconds, 0) + new_parts = other._parts.transform_values(&:-@) + new_parts = new_parts.merge(seconds: seconds) + new_value = value - other.value + + Duration.new(new_value, new_parts, other.variable?) + else + calculate(:-, other) + end + end + + def *(other) + if Duration === other + new_parts = other._parts.transform_values { |other_value| value * other_value } + new_value = value * other.value + + Duration.new(new_value, new_parts, other.variable?) + else + calculate(:*, other) + end + end + + def /(other) + if Duration === other + value / other.value + else + calculate(:/, other) + end + end + + def %(other) + if Duration === other + Duration.build(value % other.value) + else + calculate(:%, other) + end + end + + def variable? # :nodoc: + false + end + + private + def calculate(op, other) + if Scalar === other + Scalar.new(value.public_send(op, other.value)) + elsif Numeric === other + Scalar.new(value.public_send(op, other)) + else + raise_type_error(other) + end + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end + + SECONDS_PER_MINUTE = 60 + SECONDS_PER_HOUR = 3600 + SECONDS_PER_DAY = 86400 + SECONDS_PER_WEEK = 604800 + SECONDS_PER_MONTH = 2629746 # 1/12 of a gregorian year + SECONDS_PER_YEAR = 31556952 # length of a gregorian year (365.2425 days) + + PARTS_IN_SECONDS = { + seconds: 1, + minutes: SECONDS_PER_MINUTE, + hours: SECONDS_PER_HOUR, + days: SECONDS_PER_DAY, + weeks: SECONDS_PER_WEEK, + months: SECONDS_PER_MONTH, + years: SECONDS_PER_YEAR + }.freeze + + PARTS = [:years, :months, :weeks, :days, :hours, :minutes, :seconds].freeze + VARIABLE_PARTS = [:years, :months, :weeks, :days].freeze + + attr_reader :value + + autoload :ISO8601Parser, "active_support/duration/iso8601_parser" + autoload :ISO8601Serializer, "active_support/duration/iso8601_serializer" + + class << self + # Creates a new Duration from string formatted according to ISO 8601 Duration. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # This method allows negative parts to be present in pattern. + # If invalid string is provided, it will raise +ActiveSupport::Duration::ISO8601Parser::ParsingError+. + def parse(iso8601duration) + parts = ISO8601Parser.new(iso8601duration).parse! + new(calculate_total_seconds(parts), parts) + end + + def ===(other) # :nodoc: + other.is_a?(Duration) + rescue ::NoMethodError + false + end + + def seconds(value) # :nodoc: + new(value, { seconds: value }, false) + end + + def minutes(value) # :nodoc: + new(value * SECONDS_PER_MINUTE, { minutes: value }, false) + end + + def hours(value) # :nodoc: + new(value * SECONDS_PER_HOUR, { hours: value }, false) + end + + def days(value) # :nodoc: + new(value * SECONDS_PER_DAY, { days: value }, true) + end + + def weeks(value) # :nodoc: + new(value * SECONDS_PER_WEEK, { weeks: value }, true) + end + + def months(value) # :nodoc: + new(value * SECONDS_PER_MONTH, { months: value }, true) + end + + def years(value) # :nodoc: + new(value * SECONDS_PER_YEAR, { years: value }, true) + end + + # Creates a new Duration from a seconds value that is converted + # to the individual parts: + # + # ActiveSupport::Duration.build(31556952).parts # => {:years=>1} + # ActiveSupport::Duration.build(2716146).parts # => {:months=>1, :days=>1} + # + def build(value) + unless value.is_a?(::Numeric) + raise TypeError, "can't build an #{self.name} from a #{value.class.name}" + end + + parts = {} + remainder_sign = value <=> 0 + remainder = value.round(9).abs + variable = false + + PARTS.each do |part| + unless part == :seconds + part_in_seconds = PARTS_IN_SECONDS[part] + parts[part] = remainder.div(part_in_seconds) * remainder_sign + remainder %= part_in_seconds + + unless parts[part].zero? + variable ||= VARIABLE_PARTS.include?(part) + end + end + end unless value == 0 + + parts[:seconds] = remainder * remainder_sign + + new(value, parts, variable) + end + + private + def calculate_total_seconds(parts) + parts.inject(0) do |total, (part, value)| + total + value * PARTS_IN_SECONDS[part] + end + end + end + + def initialize(value, parts, variable = nil) # :nodoc: + @value, @parts = value, parts + @parts.reject! { |k, v| v.zero? } unless value == 0 + @parts.freeze + @variable = variable + + if @variable.nil? + @variable = @parts.any? { |part, _| VARIABLE_PARTS.include?(part) } + end + end + + # Returns a copy of the parts hash that defines the duration + def parts + @parts.dup + end + + def coerce(other) # :nodoc: + case other + when Scalar + [other, self] + when Duration + [Scalar.new(other.value), self] + else + [Scalar.new(other), self] + end + end + + # Compares one Duration with another or a Numeric to this Duration. + # Numeric values are treated as seconds. + def <=>(other) + if Duration === other + value <=> other.value + elsif Numeric === other + value <=> other + end + end + + # Adds another Duration or a Numeric to this Duration. Numeric values + # are treated as seconds. + def +(other) + if Duration === other + parts = @parts.merge(other._parts) do |_key, value, other_value| + value + other_value + end + Duration.new(value + other.value, parts, @variable || other.variable?) + else + seconds = @parts.fetch(:seconds, 0) + other + Duration.new(value + other, @parts.merge(seconds: seconds), @variable) + end + end + + # Subtracts another Duration or a Numeric from this Duration. Numeric + # values are treated as seconds. + def -(other) + self + (-other) + end + + # Multiplies this Duration by a Numeric and returns a new Duration. + def *(other) + if Scalar === other || Duration === other + Duration.new(value * other.value, @parts.transform_values { |number| number * other.value }, @variable || other.variable?) + elsif Numeric === other + Duration.new(value * other, @parts.transform_values { |number| number * other }, @variable) + else + raise_type_error(other) + end + end + + # Divides this Duration by a Numeric and returns a new Duration. + def /(other) + if Scalar === other + Duration.new(value / other.value, @parts.transform_values { |number| number / other.value }, @variable) + elsif Duration === other + value / other.value + elsif Numeric === other + Duration.new(value / other, @parts.transform_values { |number| number / other }, @variable) + else + raise_type_error(other) + end + end + + # Returns the modulo of this Duration by another Duration or Numeric. + # Numeric values are treated as seconds. + def %(other) + if Duration === other || Scalar === other + Duration.build(value % other.value) + elsif Numeric === other + Duration.build(value % other) + else + raise_type_error(other) + end + end + + def -@ # :nodoc: + Duration.new(-value, @parts.transform_values(&:-@), @variable) + end + + def +@ # :nodoc: + self + end + + def is_a?(klass) # :nodoc: + Duration == klass || value.is_a?(klass) + end + alias :kind_of? :is_a? + + def instance_of?(klass) # :nodoc: + Duration == klass || value.instance_of?(klass) + end + + # Returns +true+ if +other+ is also a Duration instance with the + # same +value+, or if other == value. + def ==(other) + if Duration === other + other.value == value + else + other == value + end + end + + # Returns the amount of seconds a duration covers as a string. + # For more information check to_i method. + # + # 1.day.to_s # => "86400" + def to_s + @value.to_s + end + + # Returns the number of seconds that this Duration represents. + # + # 1.minute.to_i # => 60 + # 1.hour.to_i # => 3600 + # 1.day.to_i # => 86400 + # + # Note that this conversion makes some assumptions about the + # duration of some periods, e.g. months are always 1/12 of year + # and years are 365.2425 days: + # + # # equivalent to (1.year / 12).to_i + # 1.month.to_i # => 2629746 + # + # # equivalent to 365.2425.days.to_i + # 1.year.to_i # => 31556952 + # + # In such cases, Ruby's core + # Date[https://ruby-doc.org/stdlib/libdoc/date/rdoc/Date.html] and + # Time[https://ruby-doc.org/stdlib/libdoc/time/rdoc/Time.html] should be used for precision + # date and time arithmetic. + def to_i + @value.to_i + end + alias :in_seconds :to_i + + # Returns the amount of minutes a duration covers as a float + # + # 1.day.in_minutes # => 1440.0 + def in_minutes + in_seconds / SECONDS_PER_MINUTE.to_f + end + + # Returns the amount of hours a duration covers as a float + # + # 1.day.in_hours # => 24.0 + def in_hours + in_seconds / SECONDS_PER_HOUR.to_f + end + + # Returns the amount of days a duration covers as a float + # + # 12.hours.in_days # => 0.5 + def in_days + in_seconds / SECONDS_PER_DAY.to_f + end + + # Returns the amount of weeks a duration covers as a float + # + # 2.months.in_weeks # => 8.696 + def in_weeks + in_seconds / SECONDS_PER_WEEK.to_f + end + + # Returns the amount of months a duration covers as a float + # + # 9.weeks.in_months # => 2.07 + def in_months + in_seconds / SECONDS_PER_MONTH.to_f + end + + # Returns the amount of years a duration covers as a float + # + # 30.days.in_years # => 0.082 + def in_years + in_seconds / SECONDS_PER_YEAR.to_f + end + + # Returns +true+ if +other+ is also a Duration instance, which has the + # same parts as this one. + def eql?(other) + Duration === other && other.value.eql?(value) + end + + def hash + @value.hash + end + + # Calculates a new Time or Date that is as far in the future + # as this Duration represents. + def since(time = ::Time.current) + sum(1, time) + end + alias :from_now :since + alias :after :since + + # Calculates a new Time or Date that is as far in the past + # as this Duration represents. + def ago(time = ::Time.current) + sum(-1, time) + end + alias :until :ago + alias :before :ago + + def inspect # :nodoc: + return "#{value} seconds" if @parts.empty? + + @parts. + sort_by { |unit, _ | PARTS.index(unit) }. + map { |unit, val| "#{val} #{val == 1 ? unit.to_s.chop : unit.to_s}" }. + to_sentence(locale: false) + end + + def as_json(options = nil) # :nodoc: + to_i + end + + def init_with(coder) # :nodoc: + initialize(coder["value"], coder["parts"]) + end + + def encode_with(coder) # :nodoc: + coder.map = { "value" => @value, "parts" => @parts } + end + + # Build ISO 8601 Duration string for this duration. + # The +precision+ parameter can be used to limit seconds' precision of duration. + def iso8601(precision: nil) + ISO8601Serializer.new(self, precision: precision).serialize + end + + def variable? # :nodoc: + @variable + end + + def _parts # :nodoc: + @parts + end + + private + def sum(sign, time = ::Time.current) + unless time.acts_like?(:time) || time.acts_like?(:date) + raise ::ArgumentError, "expected a time or date, got #{time.inspect}" + end + + if @parts.empty? + time.since(sign * value) + else + @parts.inject(time) do |t, (type, number)| + if type == :seconds + t.since(sign * number) + elsif type == :minutes + t.since(sign * number * 60) + elsif type == :hours + t.since(sign * number * 3600) + else + t.advance(type => sign * number) + end + end + end + end + + def respond_to_missing?(method, _) + value.respond_to?(method) + end + + def method_missing(method, *args, &block) + value.public_send(method, *args, &block) + end + + def raise_type_error(other) + raise TypeError, "no implicit conversion of #{other.class} into #{self.class}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration/iso8601_parser.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration/iso8601_parser.rb new file mode 100644 index 0000000..839caab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration/iso8601_parser.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +require "strscan" + +module ActiveSupport + class Duration + # Parses a string formatted according to ISO 8601 Duration into the hash. + # + # See {ISO 8601}[https://en.wikipedia.org/wiki/ISO_8601#Durations] for more information. + # + # This parser allows negative parts to be present in pattern. + class ISO8601Parser # :nodoc: + class ParsingError < ::ArgumentError; end + + PERIOD_OR_COMMA = /\.|,/ + PERIOD = "." + COMMA = "," + + SIGN_MARKER = /\A-|\+|/ + DATE_MARKER = /P/ + TIME_MARKER = /T/ + DATE_COMPONENT = /(-?\d+(?:[.,]\d+)?)(Y|M|D|W)/ + TIME_COMPONENT = /(-?\d+(?:[.,]\d+)?)(H|M|S)/ + + DATE_TO_PART = { "Y" => :years, "M" => :months, "W" => :weeks, "D" => :days } + TIME_TO_PART = { "H" => :hours, "M" => :minutes, "S" => :seconds } + + DATE_COMPONENTS = [:years, :months, :days] + TIME_COMPONENTS = [:hours, :minutes, :seconds] + + attr_reader :parts, :scanner + attr_accessor :mode, :sign + + def initialize(string) + @scanner = StringScanner.new(string) + @parts = {} + @mode = :start + @sign = 1 + end + + def parse! + while !finished? + case mode + when :start + if scan(SIGN_MARKER) + self.sign = (scanner.matched == "-") ? -1 : 1 + self.mode = :sign + else + raise_parsing_error + end + + when :sign + if scan(DATE_MARKER) + self.mode = :date + else + raise_parsing_error + end + + when :date + if scan(TIME_MARKER) + self.mode = :time + elsif scan(DATE_COMPONENT) + parts[DATE_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + when :time + if scan(TIME_COMPONENT) + parts[TIME_TO_PART[scanner[2]]] = number * sign + else + raise_parsing_error + end + + end + end + + validate! + parts + end + + private + def finished? + scanner.eos? + end + + # Parses number which can be a float with either comma or period. + def number + PERIOD_OR_COMMA.match?(scanner[1]) ? scanner[1].tr(COMMA, PERIOD).to_f : scanner[1].to_i + end + + def scan(pattern) + scanner.scan(pattern) + end + + def raise_parsing_error(reason = nil) + raise ParsingError, "Invalid ISO 8601 duration: #{scanner.string.inspect} #{reason}".strip + end + + # Checks for various semantic errors as stated in ISO 8601 standard. + def validate! + raise_parsing_error("is empty duration") if parts.empty? + + # Mixing any of Y, M, D with W is invalid. + if parts.key?(:weeks) && (parts.keys & DATE_COMPONENTS).any? + raise_parsing_error("mixing weeks with other date parts not allowed") + end + + # Specifying an empty T part is invalid. + if mode == :time && (parts.keys & TIME_COMPONENTS).empty? + raise_parsing_error("time part marker is present but time part is empty") + end + + fractions = parts.values.reject(&:zero?).select { |a| (a % 1) != 0 } + unless fractions.empty? || (fractions.size == 1 && fractions.last == @parts.values.reject(&:zero?).last) + raise_parsing_error "(only last part can be fractional)" + end + + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration/iso8601_serializer.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration/iso8601_serializer.rb new file mode 100644 index 0000000..9353c64 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/duration/iso8601_serializer.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" + +module ActiveSupport + class Duration + # Serializes duration to string according to ISO 8601 Duration format. + class ISO8601Serializer # :nodoc: + DATE_COMPONENTS = %i(years months days) + + def initialize(duration, precision: nil) + @duration = duration + @precision = precision + end + + # Builds and returns output string. + def serialize + parts = normalize + return "PT0S" if parts.empty? + + output = +"P" + output << "#{parts[:years]}Y" if parts.key?(:years) + output << "#{parts[:months]}M" if parts.key?(:months) + output << "#{parts[:days]}D" if parts.key?(:days) + output << "#{parts[:weeks]}W" if parts.key?(:weeks) + time = +"" + time << "#{parts[:hours]}H" if parts.key?(:hours) + time << "#{parts[:minutes]}M" if parts.key?(:minutes) + if parts.key?(:seconds) + time << "#{format_seconds(parts[:seconds])}S" + end + output << "T#{time}" unless time.empty? + output + end + + private + # Return pair of duration's parts and whole duration sign. + # Parts are summarized (as they can become repetitive due to addition, etc). + # Zero parts are removed as not significant. + # If all parts are negative it will negate all of them and return minus as a sign. + def normalize + parts = @duration.parts.each_with_object(Hash.new(0)) do |(k, v), p| + p[k] += v unless v.zero? + end + + # Convert weeks to days and remove weeks if mixed with date parts + if week_mixed_with_date?(parts) + parts[:days] += parts.delete(:weeks) * SECONDS_PER_WEEK / SECONDS_PER_DAY + end + + parts + end + + def week_mixed_with_date?(parts) + parts.key?(:weeks) && (parts.keys & DATE_COMPONENTS).any? + end + + def format_seconds(seconds) + if @precision + sprintf("%0.0#{@precision}f", seconds) + else + seconds.to_s + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/encrypted_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/encrypted_configuration.rb new file mode 100644 index 0000000..3382fe7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/encrypted_configuration.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +require "yaml" +require "active_support/encrypted_file" +require "active_support/ordered_options" +require "active_support/core_ext/object/inclusion" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class EncryptedConfiguration < EncryptedFile + delegate :[], :fetch, to: :config + delegate_missing_to :options + + def initialize(config_path:, key_path:, env_key:, raise_if_missing_key:) + super content_path: config_path, key_path: key_path, + env_key: env_key, raise_if_missing_key: raise_if_missing_key + end + + # Allow a config to be started without a file present + def read + super + rescue ActiveSupport::EncryptedFile::MissingContentError + "" + end + + def write(contents) + deserialize(contents) + + super + end + + def config + @config ||= deserialize(read).deep_symbolize_keys + end + + private + def deep_transform(hash) + return hash unless hash.is_a?(Hash) + + h = ActiveSupport::InheritableOptions.new + hash.each do |k, v| + h[k] = deep_transform(v) + end + h + end + + def options + @options ||= ActiveSupport::InheritableOptions.new(deep_transform(config)) + end + + def deserialize(config) + doc = YAML.respond_to?(:unsafe_load) ? YAML.unsafe_load(config) : YAML.load(config) + doc.presence || {} + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/encrypted_file.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/encrypted_file.rb new file mode 100644 index 0000000..d2c9e62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/encrypted_file.rb @@ -0,0 +1,129 @@ +# frozen_string_literal: true + +require "pathname" +require "tmpdir" +require "active_support/message_encryptor" + +module ActiveSupport + class EncryptedFile + class MissingContentError < RuntimeError + def initialize(content_path) + super "Missing encrypted content file in #{content_path}." + end + end + + class MissingKeyError < RuntimeError + def initialize(key_path:, env_key:) + super \ + "Missing encryption key to decrypt file with. " + + "Ask your team for your master key and write it to #{key_path} or put it in the ENV['#{env_key}']." + end + end + + class InvalidKeyLengthError < RuntimeError + def initialize + super "Encryption key must be exactly #{EncryptedFile.expected_key_length} characters." + end + end + + CIPHER = "aes-128-gcm" + + def self.generate_key + SecureRandom.hex(ActiveSupport::MessageEncryptor.key_len(CIPHER)) + end + + def self.expected_key_length # :nodoc: + @expected_key_length ||= generate_key.length + end + + + attr_reader :content_path, :key_path, :env_key, :raise_if_missing_key + + def initialize(content_path:, key_path:, env_key:, raise_if_missing_key:) + @content_path = Pathname.new(content_path).yield_self { |path| path.symlink? ? path.realpath : path } + @key_path = Pathname.new(key_path) + @env_key, @raise_if_missing_key = env_key, raise_if_missing_key + end + + # Returns the encryption key, first trying the environment variable + # specified by +env_key+, then trying the key file specified by +key_path+. + # If +raise_if_missing_key+ is true, raises MissingKeyError if the + # environment variable is not set and the key file does not exist. + def key + read_env_key || read_key_file || handle_missing_key + end + + # Reads the file and returns the decrypted content. + # + # Raises: + # - MissingKeyError if the key is missing and +raise_if_missing_key+ is true. + # - MissingContentError if the encrypted file does not exist or otherwise + # if the key is missing. + # - ActiveSupport::MessageEncryptor::InvalidMessage if the content cannot be + # decrypted or verified. + def read + if !key.nil? && content_path.exist? + decrypt content_path.binread + else + raise MissingContentError, content_path + end + end + + def write(contents) + IO.binwrite "#{content_path}.tmp", encrypt(contents) + FileUtils.mv "#{content_path}.tmp", content_path + end + + def change(&block) + writing read, &block + end + + + private + def writing(contents) + tmp_file = "#{Process.pid}.#{content_path.basename.to_s.chomp('.enc')}" + tmp_path = Pathname.new File.join(Dir.tmpdir, tmp_file) + tmp_path.binwrite contents + + yield tmp_path + + updated_contents = tmp_path.binread + + write(updated_contents) if updated_contents != contents + ensure + FileUtils.rm(tmp_path) if tmp_path&.exist? + end + + + def encrypt(contents) + check_key_length + encryptor.encrypt_and_sign contents + end + + def decrypt(contents) + encryptor.decrypt_and_verify contents + end + + def encryptor + @encryptor ||= ActiveSupport::MessageEncryptor.new([ key ].pack("H*"), cipher: CIPHER) + end + + + def read_env_key + ENV[env_key].presence + end + + def read_key_file + return @key_file_contents if defined?(@key_file_contents) + @key_file_contents = (key_path.binread.strip if key_path.exist?) + end + + def handle_missing_key + raise MissingKeyError.new(key_path: key_path, env_key: env_key) if raise_if_missing_key + end + + def check_key_length + raise InvalidKeyLengthError if key&.length != self.class.expected_key_length + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/environment_inquirer.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/environment_inquirer.rb new file mode 100644 index 0000000..770cddd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/environment_inquirer.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +require "active_support/string_inquirer" + +module ActiveSupport + class EnvironmentInquirer < StringInquirer # :nodoc: + DEFAULT_ENVIRONMENTS = ["development", "test", "production"] + def initialize(env) + super(env) + + DEFAULT_ENVIRONMENTS.each do |default| + instance_variable_set :"@#{default}", env == default + end + end + + DEFAULT_ENVIRONMENTS.each do |env| + class_eval "def #{env}?; @#{env}; end" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/error_reporter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/error_reporter.rb new file mode 100644 index 0000000..8219a3e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/error_reporter.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true + +module ActiveSupport + # +ActiveSupport::ErrorReporter+ is a common interface for error reporting services. + # + # To rescue and report any unhandled error, you can use the +handle+ method: + # + # Rails.error.handle do + # do_something! + # end + # + # If an error is raised, it will be reported and swallowed. + # + # Alternatively if you want to report the error but not swallow it, you can use +record+ + # + # Rails.error.record do + # do_something! + # end + # + # Both methods can be restricted to only handle a specific exception class + # + # maybe_tags = Rails.error.handle(Redis::BaseError) { redis.get("tags") } + # + # You can also pass some extra context information that may be used by the error subscribers: + # + # Rails.error.handle(context: { section: "admin" }) do + # # ... + # end + # + # Additionally a +severity+ can be passed along to communicate how important the error report is. + # +severity+ can be one of +:error+, +:warning+, or +:info+. Handled errors default to the +:warning+ + # severity, and unhandled ones to +:error+. + # + # Both +handle+ and +record+ pass through the return value from the block. In the case of +handle+ + # rescuing an error, a fallback can be provided. The fallback must be a callable whose result will + # be returned when the block raises and is handled: + # + # user = Rails.error.handle(fallback: -> { User.anonymous }) do + # User.find_by(params) + # end + class ErrorReporter + SEVERITIES = %i(error warning info) + + attr_accessor :logger + + def initialize(*subscribers, logger: nil) + @subscribers = subscribers.flatten + @logger = logger + end + + # Report any unhandled exception, and swallow it. + # + # Rails.error.handle do + # 1 + '1' + # end + # + def handle(error_class = StandardError, severity: :warning, context: {}, fallback: nil) + yield + rescue error_class => error + report(error, handled: true, severity: severity, context: context) + fallback.call if fallback + end + + def record(error_class = StandardError, severity: :error, context: {}) + yield + rescue error_class => error + report(error, handled: false, severity: severity, context: context) + raise + end + + # Register a new error subscriber. The subscriber must respond to + # + # report(Exception, handled: Boolean, context: Hash) + # + # The +report+ method +should+ never raise an error. + def subscribe(subscriber) + unless subscriber.respond_to?(:report) + raise ArgumentError, "Error subscribers must respond to #report" + end + @subscribers << subscriber + end + + # Update the execution context that is accessible to error subscribers + # + # Rails.error.set_context(section: "checkout", user_id: @user.id) + # + # See +ActiveSupport::ExecutionContext.set+ + def set_context(...) + ActiveSupport::ExecutionContext.set(...) + end + + # When the block based +handle+ and +record+ methods are not suitable, you can directly use +report+ + # + # Rails.error.report(error, handled: true) + def report(error, handled:, severity: handled ? :warning : :error, context: {}) + unless SEVERITIES.include?(severity) + raise ArgumentError, "severity must be one of #{SEVERITIES.map(&:inspect).join(", ")}, got: #{severity.inspect}" + end + + full_context = ActiveSupport::ExecutionContext.to_h.merge(context) + @subscribers.each do |subscriber| + subscriber.report(error, handled: handled, severity: severity, context: full_context) + rescue => subscriber_error + if logger + logger.fatal( + "Error subscriber raised an error: #{subscriber_error.message} (#{subscriber_error.class})\n" + + subscriber_error.backtrace.join("\n") + ) + else + raise + end + end + + nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/evented_file_update_checker.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/evented_file_update_checker.rb new file mode 100644 index 0000000..bca31f4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/evented_file_update_checker.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true + +require "set" +require "pathname" +require "concurrent/atomic/atomic_boolean" +require "listen" +require "active_support/fork_tracker" + +module ActiveSupport + # Allows you to "listen" to changes in a file system. + # The evented file updater does not hit disk when checking for updates. + # Instead, it uses platform-specific file system events to trigger a change + # in state. + # + # The file checker takes an array of files to watch or a hash specifying directories + # and file extensions to watch. It also takes a block that is called when + # EventedFileUpdateChecker#execute is run or when EventedFileUpdateChecker#execute_if_updated + # is run and there have been changes to the file system. + # + # Example: + # + # checker = ActiveSupport::EventedFileUpdateChecker.new(["/tmp/foo"]) { puts "changed" } + # checker.updated? + # # => false + # checker.execute_if_updated + # # => nil + # + # FileUtils.touch("/tmp/foo") + # + # checker.updated? + # # => true + # checker.execute_if_updated + # # => "changed" + # + class EventedFileUpdateChecker # :nodoc: all + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize an EventedFileUpdateChecker" + end + + @block = block + @core = Core.new(files, dirs) + ObjectSpace.define_finalizer(self, @core.finalizer) + end + + def updated? + if @core.restart? + @core.thread_safely(&:restart) + @core.updated.make_true + end + + @core.updated.true? + end + + def execute + @core.updated.make_false + @block.call + end + + def execute_if_updated + if updated? + yield if block_given? + execute + true + end + end + + class Core + attr_reader :updated + + def initialize(files, dirs) + @files = files.map { |file| Pathname(file).expand_path }.to_set + + @dirs = dirs.each_with_object({}) do |(dir, exts), hash| + hash[Pathname(dir).expand_path] = Array(exts).map { |ext| ext.to_s.sub(/\A\.?/, ".") }.to_set + end + + @common_path = common_path(@dirs.keys) + + @dtw = directories_to_watch + @missing = [] + + @updated = Concurrent::AtomicBoolean.new(false) + @mutex = Mutex.new + + start + @after_fork = ActiveSupport::ForkTracker.after_fork { start } + end + + def finalizer + proc do + stop + ActiveSupport::ForkTracker.unregister(@after_fork) + end + end + + def thread_safely + @mutex.synchronize do + yield self + end + end + + def start + normalize_dirs! + @dtw, @missing = [*@dtw, *@missing].partition(&:exist?) + @listener = @dtw.any? ? Listen.to(*@dtw, &method(:changed)) : nil + @listener&.start + end + + def stop + @listener&.stop + end + + def restart + stop + start + end + + def restart? + @missing.any?(&:exist?) + end + + def normalize_dirs! + @dirs.transform_keys! do |dir| + dir.exist? ? dir.realpath : dir + end + end + + def changed(modified, added, removed) + unless @updated.true? + @updated.make_true if (modified + added + removed).any? { |f| watching?(f) } + end + end + + def watching?(file) + file = Pathname(file) + + if @files.member?(file) + true + elsif file.directory? + false + else + ext = file.extname + + file.dirname.ascend do |dir| + matching = @dirs[dir] + + if matching && (matching.empty? || matching.include?(ext)) + break true + elsif dir == @common_path || dir.root? + break false + end + end + end + end + + def directories_to_watch + dtw = @dirs.keys | @files.map(&:dirname) + accounted_for = dtw.to_set + Gem.path.map { |path| Pathname(path) } + dtw.reject { |dir| dir.ascend.drop(1).any? { |parent| accounted_for.include?(parent) } } + end + + def common_path(paths) + paths.map { |path| path.ascend.to_a }.reduce(&:&)&.first + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_context.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_context.rb new file mode 100644 index 0000000..1c95188 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_context.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +module ActiveSupport + module ExecutionContext # :nodoc: + @after_change_callbacks = [] + class << self + def after_change(&block) + @after_change_callbacks << block + end + + # Updates the execution context. If a block is given, it resets the provided keys to their + # previous value once the block exits. + def set(**options) + options.symbolize_keys! + keys = options.keys + + store = self.store + + previous_context = keys.zip(store.values_at(*keys)).to_h + + store.merge!(options) + @after_change_callbacks.each(&:call) + + if block_given? + begin + yield + ensure + store.merge!(previous_context) + @after_change_callbacks.each(&:call) + end + end + end + + def []=(key, value) + store[key.to_sym] = value + @after_change_callbacks.each(&:call) + end + + def to_h + store.dup + end + + def clear + store.clear + end + + private + def store + IsolatedExecutionState[:active_support_execution_context] ||= {} + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_context/test_helper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_context/test_helper.rb new file mode 100644 index 0000000..ae8c43e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_context/test_helper.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +module ActiveSupport::ExecutionContext::TestHelper # :nodoc: + def before_setup + ActiveSupport::ExecutionContext.clear + super + end + + def after_teardown + super + ActiveSupport::ExecutionContext.clear + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_wrapper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_wrapper.rb new file mode 100644 index 0000000..5a4a9b2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/execution_wrapper.rb @@ -0,0 +1,151 @@ +# frozen_string_literal: true + +require "active_support/error_reporter" +require "active_support/callbacks" +require "concurrent/hash" + +module ActiveSupport + class ExecutionWrapper + include ActiveSupport::Callbacks + + Null = Object.new # :nodoc: + def Null.complete! # :nodoc: + end + + define_callbacks :run + define_callbacks :complete + + def self.to_run(*args, &block) + set_callback(:run, *args, &block) + end + + def self.to_complete(*args, &block) + set_callback(:complete, *args, &block) + end + + RunHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + hook_state[hook] = hook.run + end + end + + CompleteHook = Struct.new(:hook) do # :nodoc: + def before(target) + hook_state = target.send(:hook_state) + if hook_state.key?(hook) + hook.complete hook_state[hook] + end + end + alias after before + end + + # Register an object to be invoked during both the +run+ and + # +complete+ steps. + # + # +hook.complete+ will be passed the value returned from +hook.run+, + # and will only be invoked if +run+ has previously been called. + # (Mostly, this means it won't be invoked if an exception occurs in + # a preceding +to_run+ block; all ordinary +to_complete+ blocks are + # invoked in that situation.) + def self.register_hook(hook, outer: false) + if outer + to_run RunHook.new(hook), prepend: true + to_complete :after, CompleteHook.new(hook) + else + to_run RunHook.new(hook) + to_complete CompleteHook.new(hook) + end + end + + # Run this execution. + # + # Returns an instance, whose +complete!+ method *must* be invoked + # after the work has been performed. + # + # Where possible, prefer +wrap+. + def self.run!(reset: false) + if reset + lost_instance = IsolatedExecutionState.delete(active_key) + lost_instance&.complete! + else + return Null if active? + end + + new.tap do |instance| + success = nil + begin + instance.run! + success = true + ensure + instance.complete! unless success + end + end + end + + # Perform the work in the supplied block as an execution. + def self.wrap + return yield if active? + + instance = run! + begin + yield + rescue => error + error_reporter.report(error, handled: false) + raise + ensure + instance.complete! + end + end + + def self.perform # :nodoc: + instance = new + instance.run + begin + yield + ensure + instance.complete + end + end + + def self.error_reporter + @error_reporter ||= ActiveSupport::ErrorReporter.new + end + + def self.active_key # :nodoc: + @active_key ||= :"active_execution_wrapper_#{object_id}" + end + + def self.active? # :nodoc: + IsolatedExecutionState.key?(active_key) + end + + def run! # :nodoc: + IsolatedExecutionState[self.class.active_key] = self + run + end + + def run # :nodoc: + run_callbacks(:run) + end + + # Complete this in-flight execution. This method *must* be called + # exactly once on the result of any call to +run!+. + # + # Where possible, prefer +wrap+. + def complete! + complete + ensure + IsolatedExecutionState.delete(self.class.active_key) + end + + def complete # :nodoc: + run_callbacks(:complete) + end + + private + def hook_state + @_hook_state ||= {} + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/executor.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/executor.rb new file mode 100644 index 0000000..ce391b0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/executor.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" + +module ActiveSupport + class Executor < ExecutionWrapper + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/executor/test_helper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/executor/test_helper.rb new file mode 100644 index 0000000..97f489d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/executor/test_helper.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module ActiveSupport::Executor::TestHelper # :nodoc: + def run(...) + Rails.application.executor.perform { super } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/file_update_checker.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/file_update_checker.rb new file mode 100644 index 0000000..9b665e7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/file_update_checker.rb @@ -0,0 +1,162 @@ +# frozen_string_literal: true + +require "active_support/core_ext/time/calculations" + +module ActiveSupport + # FileUpdateChecker specifies the API used by Rails to watch files + # and control reloading. The API depends on four methods: + # + # * +initialize+ which expects two parameters and one block as + # described below. + # + # * +updated?+ which returns a boolean if there were updates in + # the filesystem or not. + # + # * +execute+ which executes the given block on initialization + # and updates the latest watched files and timestamp. + # + # * +execute_if_updated+ which just executes the block if it was updated. + # + # After initialization, a call to +execute_if_updated+ must execute + # the block only if there was really a change in the filesystem. + # + # This class is used by Rails to reload the I18n framework whenever + # they are changed upon a new request. + # + # i18n_reloader = ActiveSupport::FileUpdateChecker.new(paths) do + # I18n.reload! + # end + # + # ActiveSupport::Reloader.to_prepare do + # i18n_reloader.execute_if_updated + # end + class FileUpdateChecker + # It accepts two parameters on initialization. The first is an array + # of files and the second is an optional hash of directories. The hash must + # have directories as keys and the value is an array of extensions to be + # watched under that directory. + # + # This method must also receive a block that will be called once a path + # changes. The array of files and list of directories cannot be changed + # after FileUpdateChecker has been initialized. + def initialize(files, dirs = {}, &block) + unless block + raise ArgumentError, "A block is required to initialize a FileUpdateChecker" + end + + @files = files.freeze + @glob = compile_glob(dirs) + @block = block + + @watched = nil + @updated_at = nil + + @last_watched = watched + @last_update_at = updated_at(@last_watched) + end + + # Check if any of the entries were updated. If so, the watched and/or + # updated_at values are cached until the block is executed via +execute+ + # or +execute_if_updated+. + def updated? + current_watched = watched + if @last_watched.size != current_watched.size + @watched = current_watched + true + else + current_updated_at = updated_at(current_watched) + if @last_update_at < current_updated_at + @watched = current_watched + @updated_at = current_updated_at + true + else + false + end + end + end + + # Executes the given block and updates the latest watched files and + # timestamp. + def execute + @last_watched = watched + @last_update_at = updated_at(@last_watched) + @block.call + ensure + @watched = nil + @updated_at = nil + end + + # Execute the block given if updated. + def execute_if_updated + if updated? + yield if block_given? + execute + true + else + false + end + end + + private + def watched + @watched || begin + all = @files.select { |f| File.exist?(f) } + all.concat(Dir[@glob]) if @glob + all + end + end + + def updated_at(paths) + @updated_at || max_mtime(paths) || Time.at(0) + end + + # This method returns the maximum mtime of the files in +paths+, or +nil+ + # if the array is empty. + # + # Files with a mtime in the future are ignored. Such abnormal situation + # can happen for example if the user changes the clock by hand. It is + # healthy to consider this edge case because with mtimes in the future + # reloading is not triggered. + def max_mtime(paths) + time_now = Time.now + max_mtime = nil + + # Time comparisons are performed with #compare_without_coercion because + # AS redefines these operators in a way that is much slower and does not + # bring any benefit in this particular code. + # + # Read t1.compare_without_coercion(t2) < 0 as t1 < t2. + paths.each do |path| + mtime = File.mtime(path) + + next if time_now.compare_without_coercion(mtime) < 0 + + if max_mtime.nil? || max_mtime.compare_without_coercion(mtime) < 0 + max_mtime = mtime + end + end + + max_mtime + end + + def compile_glob(hash) + hash.freeze # Freeze so changes aren't accidentally pushed + return if hash.empty? + + globs = hash.map do |key, value| + "#{escape(key)}/**/*#{compile_ext(value)}" + end + "{#{globs.join(",")}}" + end + + def escape(key) + key.gsub(",", '\,') + end + + def compile_ext(array) + array = Array(array) + return if array.empty? + ".{#{array.join(",")}}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/fork_tracker.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/fork_tracker.rb new file mode 100644 index 0000000..bc1d32b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/fork_tracker.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +module ActiveSupport + module ForkTracker # :nodoc: + module ModernCoreExt + def _fork + pid = super + if pid == 0 + ForkTracker.check! + end + pid + end + end + + module CoreExt + def fork(...) + if block_given? + super do + ForkTracker.check! + yield + end + else + unless pid = super + ForkTracker.check! + end + pid + end + end + end + + module CoreExtPrivate + include CoreExt + private :fork + end + + @pid = Process.pid + @callbacks = [] + + class << self + def check! + new_pid = Process.pid + if @pid != new_pid + @callbacks.each(&:call) + @pid = new_pid + end + end + + def hook! + if Process.respond_to?(:_fork) # Ruby 3.1+ + ::Process.singleton_class.prepend(ModernCoreExt) + elsif Process.respond_to?(:fork) + ::Object.prepend(CoreExtPrivate) if RUBY_VERSION < "3.0" + ::Kernel.prepend(CoreExtPrivate) + ::Kernel.singleton_class.prepend(CoreExt) + ::Process.singleton_class.prepend(CoreExt) + end + end + + def after_fork(&block) + @callbacks << block + block + end + + def unregister(callback) + @callbacks.delete(callback) + end + end + end +end + +ActiveSupport::ForkTracker.hook! diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/gem_version.rb new file mode 100644 index 0000000..0ff6eb2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/gem_version.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module ActiveSupport + # Returns the currently loaded version of Active Support as a Gem::Version. + def self.gem_version + Gem::Version.new VERSION::STRING + end + + module VERSION + MAJOR = 7 + MINOR = 0 + TINY = 4 + PRE = "3" + + STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".") + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/gzip.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/gzip.rb new file mode 100644 index 0000000..7ffa6d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/gzip.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "zlib" +require "stringio" + +module ActiveSupport + # A convenient wrapper for the zlib standard library that allows + # compression/decompression of strings with gzip. + # + # gzip = ActiveSupport::Gzip.compress('compress me!') + # # => "\x1F\x8B\b\x00o\x8D\xCDO\x00\x03K\xCE\xCF-(J-.V\xC8MU\x04\x00R>n\x83\f\x00\x00\x00" + # + # ActiveSupport::Gzip.decompress(gzip) + # # => "compress me!" + module Gzip + class Stream < StringIO + def initialize(*) + super + set_encoding "BINARY" + end + def close; rewind; end + end + + # Decompresses a gzipped string. + def self.decompress(source) + Zlib::GzipReader.wrap(StringIO.new(source), &:read) + end + + # Compresses a string using gzip. + def self.compress(source, level = Zlib::DEFAULT_COMPRESSION, strategy = Zlib::DEFAULT_STRATEGY) + output = Stream.new + gz = Zlib::GzipWriter.new(output, level, strategy) + gz.write(source) + gz.close + output.string + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/hash_with_indifferent_access.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/hash_with_indifferent_access.rb new file mode 100644 index 0000000..aed884f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/hash_with_indifferent_access.rb @@ -0,0 +1,425 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/keys" +require "active_support/core_ext/hash/reverse_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" + +module ActiveSupport + # Implements a hash where keys :foo and "foo" are considered + # to be the same. + # + # rgb = ActiveSupport::HashWithIndifferentAccess.new + # + # rgb[:black] = '#000000' + # rgb[:black] # => '#000000' + # rgb['black'] # => '#000000' + # + # rgb['white'] = '#FFFFFF' + # rgb[:white] # => '#FFFFFF' + # rgb['white'] # => '#FFFFFF' + # + # Internally symbols are mapped to strings when used as keys in the entire + # writing interface (calling []=, merge, etc). This + # mapping belongs to the public interface. For example, given: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # + # You are guaranteed that the key is returned as a string: + # + # hash.keys # => ["a"] + # + # Technically other types of keys are accepted: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1) + # hash[0] = 0 + # hash # => {"a"=>1, 0=>0} + # + # but this class is intended for use cases where strings or symbols are the + # expected keys and it is convenient to understand both as the same. For + # example the +params+ hash in Ruby on Rails. + # + # Note that core extensions define Hash#with_indifferent_access: + # + # rgb = { black: '#000000', white: '#FFFFFF' }.with_indifferent_access + # + # which may be handy. + # + # To access this class outside of Rails, require the core extension with: + # + # require "active_support/core_ext/hash/indifferent_access" + # + # which will, in turn, require this file. + class HashWithIndifferentAccess < Hash + # Returns +true+ so that Array#extract_options! finds members of + # this class. + def extractable_options? + true + end + + def with_indifferent_access + dup + end + + def nested_under_indifferent_access + self + end + + def initialize(constructor = nil) + if constructor.respond_to?(:to_hash) + super() + update(constructor) + + hash = constructor.is_a?(Hash) ? constructor : constructor.to_hash + self.default = hash.default if hash.default + self.default_proc = hash.default_proc if hash.default_proc + elsif constructor.nil? + super() + else + super(constructor) + end + end + + def self.[](*args) + new.merge!(Hash[*args]) + end + + alias_method :regular_writer, :[]= unless method_defined?(:regular_writer) + alias_method :regular_update, :update unless method_defined?(:regular_update) + + # Assigns a new value to the hash: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:key] = 'value' + # + # This value can be later fetched using either +:key+ or 'key'. + def []=(key, value) + regular_writer(convert_key(key), convert_value(value, conversion: :assignment)) + end + + alias_method :store, :[]= + + # Updates the receiver in-place, merging in the hashes passed as arguments: + # + # hash_1 = ActiveSupport::HashWithIndifferentAccess.new + # hash_1[:key] = 'value' + # + # hash_2 = ActiveSupport::HashWithIndifferentAccess.new + # hash_2[:key] = 'New Value!' + # + # hash_1.update(hash_2) # => {"key"=>"New Value!"} + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash.update({ "a" => 1 }, { "b" => 2 }) # => { "a" => 1, "b" => 2 } + # + # The arguments can be either an + # ActiveSupport::HashWithIndifferentAccess or a regular +Hash+. + # In either case the merge respects the semantics of indifferent access. + # + # If the argument is a regular hash with keys +:key+ and "key" only one + # of the values end up in the receiver, but which one is unspecified. + # + # When given a block, the value for duplicated keys will be determined + # by the result of invoking the block with the duplicated key, the value + # in the receiver, and the value in +other_hash+. The rules for duplicated + # keys follow the semantics of indifferent access: + # + # hash_1[:key] = 10 + # hash_2['key'] = 12 + # hash_1.update(hash_2) { |key, old, new| old + new } # => {"key"=>22} + def update(*other_hashes, &block) + if other_hashes.size == 1 + update_with_single_argument(other_hashes.first, block) + else + other_hashes.each do |other_hash| + update_with_single_argument(other_hash, block) + end + end + self + end + + alias_method :merge!, :update + + # Checks the hash for a key matching the argument passed in: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['key'] = 'value' + # hash.key?(:key) # => true + # hash.key?('key') # => true + def key?(key) + super(convert_key(key)) + end + + alias_method :include?, :key? + alias_method :has_key?, :key? + alias_method :member?, :key? + + # Same as Hash#[] where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters['foo'] # => 1 + # counters[:foo] # => 1 + # counters[:zoo] # => nil + def [](key) + super(convert_key(key)) + end + + # Same as Hash#assoc where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.assoc('foo') # => ["foo", 1] + # counters.assoc(:foo) # => ["foo", 1] + # counters.assoc(:zoo) # => nil + def assoc(key) + super(convert_key(key)) + end + + # Same as Hash#fetch where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = 1 + # + # counters.fetch('foo') # => 1 + # counters.fetch(:bar, 0) # => 0 + # counters.fetch(:bar) { |key| 0 } # => 0 + # counters.fetch(:zoo) # => KeyError: key not found: "zoo" + def fetch(key, *extras) + super(convert_key(key), *extras) + end + + # Same as Hash#dig where the key passed as argument can be + # either a string or a symbol: + # + # counters = ActiveSupport::HashWithIndifferentAccess.new + # counters[:foo] = { bar: 1 } + # + # counters.dig('foo', 'bar') # => 1 + # counters.dig(:foo, :bar) # => 1 + # counters.dig(:zoo) # => nil + def dig(*args) + args[0] = convert_key(args[0]) if args.size > 0 + super(*args) + end + + # Same as Hash#default where the key passed as argument can be + # either a string or a symbol: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new(1) + # hash.default # => 1 + # + # hash = ActiveSupport::HashWithIndifferentAccess.new { |hash, key| key } + # hash.default # => nil + # hash.default('foo') # => 'foo' + # hash.default(:foo) # => 'foo' + def default(*args) + super(*args.map { |arg| convert_key(arg) }) + end + + # Returns an array of the values at the specified indices: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.values_at('a', 'b') # => ["x", "y"] + def values_at(*keys) + super(*keys.map { |key| convert_key(key) }) + end + + # Returns an array of the values at the specified indices, but also + # raises an exception when one of the keys can't be found. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash[:a] = 'x' + # hash[:b] = 'y' + # hash.fetch_values('a', 'b') # => ["x", "y"] + # hash.fetch_values('a', 'c') { |key| 'z' } # => ["x", "z"] + # hash.fetch_values('a', 'c') # => KeyError: key not found: "c" + def fetch_values(*indices, &block) + super(*indices.map { |key| convert_key(key) }, &block) + end + + # Returns a shallow copy of the hash. + # + # hash = ActiveSupport::HashWithIndifferentAccess.new({ a: { b: 'b' } }) + # dup = hash.dup + # dup[:a][:c] = 'c' + # + # hash[:a][:c] # => "c" + # dup[:a][:c] # => "c" + def dup + self.class.new(self).tap do |new_hash| + set_defaults(new_hash) + end + end + + # This method has the same semantics of +update+, except it does not + # modify the receiver but rather returns a new hash with indifferent + # access with the result of the merge. + def merge(*hashes, &block) + dup.update(*hashes, &block) + end + + # Like +merge+ but the other way around: Merges the receiver into the + # argument and returns a new hash with indifferent access as result: + # + # hash = ActiveSupport::HashWithIndifferentAccess.new + # hash['a'] = nil + # hash.reverse_merge(a: 0, b: 1) # => {"a"=>nil, "b"=>1} + def reverse_merge(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults, :reverse_merge + + # Same semantics as +reverse_merge+ but modifies the receiver in-place. + def reverse_merge!(other_hash) + super(self.class.new(other_hash)) + end + alias_method :with_defaults!, :reverse_merge! + + # Replaces the contents of this hash with other_hash. + # + # h = { "a" => 100, "b" => 200 } + # h.replace({ "c" => 300, "d" => 400 }) # => {"c"=>300, "d"=>400} + def replace(other_hash) + super(self.class.new(other_hash)) + end + + # Removes the specified key from the hash. + def delete(key) + super(convert_key(key)) + end + + # Returns a hash with indifferent access that includes everything except given keys. + # hash = { a: "x", b: "y", c: 10 }.with_indifferent_access + # hash.except(:a, "b") # => {c: 10}.with_indifferent_access + # hash # => { a: "x", b: "y", c: 10 }.with_indifferent_access + def except(*keys) + slice(*self.keys - keys.map { |key| convert_key(key) }) + end + alias_method :without, :except + + def stringify_keys!; self end + def deep_stringify_keys!; self end + def stringify_keys; dup end + def deep_stringify_keys; dup end + undef :symbolize_keys! + undef :deep_symbolize_keys! + def symbolize_keys; to_hash.symbolize_keys! end + alias_method :to_options, :symbolize_keys + def deep_symbolize_keys; to_hash.deep_symbolize_keys! end + def to_options!; self end + + def select(*args, &block) + return to_enum(:select) unless block_given? + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + return to_enum(:reject) unless block_given? + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def transform_values(*args, &block) + return to_enum(:transform_values) unless block_given? + dup.tap { |hash| hash.transform_values!(*args, &block) } + end + + def transform_keys(*args, &block) + return to_enum(:transform_keys) unless block_given? + dup.tap { |hash| hash.transform_keys!(*args, &block) } + end + + def transform_keys! + return enum_for(:transform_keys!) { size } unless block_given? + keys.each do |key| + self[yield(key)] = delete(key) + end + self + end + + def slice(*keys) + keys.map! { |key| convert_key(key) } + self.class.new(super) + end + + def slice!(*keys) + keys.map! { |key| convert_key(key) } + super + end + + def compact + dup.tap(&:compact!) + end + + # Convert to a regular hash with string keys. + def to_hash + _new_hash = Hash.new + set_defaults(_new_hash) + + each do |key, value| + _new_hash[key] = convert_value(value, conversion: :to_hash) + end + _new_hash + end + + private + if Symbol.method_defined?(:name) + def convert_key(key) + key.kind_of?(Symbol) ? key.name : key + end + else + def convert_key(key) + key.kind_of?(Symbol) ? key.to_s : key + end + end + + def convert_value(value, conversion: nil) + if value.is_a? Hash + if conversion == :to_hash + value.to_hash + else + value.nested_under_indifferent_access + end + elsif value.is_a?(Array) + if conversion != :assignment || value.frozen? + value = value.dup + end + value.map! { |e| convert_value(e, conversion: conversion) } + else + value + end + end + + def set_defaults(target) + if default_proc + target.default_proc = default_proc.dup + else + target.default = default + end + end + + def update_with_single_argument(other_hash, block) + if other_hash.is_a? HashWithIndifferentAccess + regular_update(other_hash, &block) + else + other_hash.to_hash.each_pair do |key, value| + if block && key?(key) + value = block.call(convert_key(key), self[key], value) + end + regular_writer(convert_key(key), convert_value(value)) + end + end + end + end +end + +# :stopdoc: + +HashWithIndifferentAccess = ActiveSupport::HashWithIndifferentAccess diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/html_safe_translation.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/html_safe_translation.rb new file mode 100644 index 0000000..2d06a0d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/html_safe_translation.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module ActiveSupport + module HtmlSafeTranslation # :nodoc: + extend self + + def translate(key, **options) + if html_safe_translation_key?(key) + html_safe_options = html_escape_translation_options(options) + translation = I18n.translate(key, **html_safe_options) + html_safe_translation(translation) + else + I18n.translate(key, **options) + end + end + + private + def html_safe_translation_key?(key) + /(?:_|\b)html\z/.match?(key) + end + + def html_escape_translation_options(options) + options.each do |name, value| + unless i18n_option?(name) || (name == :count && value.is_a?(Numeric)) + options[name] = ERB::Util.html_escape(value.to_s) + end + end + end + + def i18n_option?(name) + (@i18n_option_names ||= I18n::RESERVED_KEYS.to_set).include?(name) + end + + + def html_safe_translation(translation) + if translation.respond_to?(:map) + translation.map { |element| element.respond_to?(:html_safe) ? element.html_safe : element } + else + translation.respond_to?(:html_safe) ? translation.html_safe : translation + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/i18n.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/i18n.rb new file mode 100644 index 0000000..832a9fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/i18n.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" +require "active_support/core_ext/hash/except" +require "active_support/core_ext/hash/slice" +begin + require "i18n" + require "i18n/backend/fallbacks" +rescue LoadError => e + $stderr.puts "The i18n gem is not available. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/lazy_load_hooks" + +ActiveSupport.run_load_hooks(:i18n) +I18n.load_path << File.expand_path("locale/en.yml", __dir__) +I18n.load_path << File.expand_path("locale/en.rb", __dir__) diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/i18n_railtie.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/i18n_railtie.rb new file mode 100644 index 0000000..1e7185e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/i18n_railtie.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/core_ext/array/wrap" + +# :enddoc: + +module I18n + class Railtie < Rails::Railtie + config.i18n = ActiveSupport::OrderedOptions.new + config.i18n.railties_load_path = [] + config.i18n.load_path = [] + config.i18n.fallbacks = ActiveSupport::OrderedOptions.new + + config.eager_load_namespaces << I18n + + # Set the i18n configuration after initialization since a lot of + # configuration is still usually done in application initializers. + config.after_initialize do |app| + I18n::Railtie.initialize_i18n(app) + end + + # Trigger i18n config before any eager loading has happened + # so it's ready if any classes require it when eager loaded. + config.before_eager_load do |app| + I18n::Railtie.initialize_i18n(app) + end + + @i18n_inited = false + + # Setup i18n configuration. + def self.initialize_i18n(app) + return if @i18n_inited + + fallbacks = app.config.i18n.delete(:fallbacks) + + # Avoid issues with setting the default_locale by disabling available locales + # check while configuring. + enforce_available_locales = app.config.i18n.delete(:enforce_available_locales) + enforce_available_locales = I18n.enforce_available_locales if enforce_available_locales.nil? + I18n.enforce_available_locales = false + + reloadable_paths = [] + app.config.i18n.each do |setting, value| + case setting + when :railties_load_path + reloadable_paths = value + app.config.i18n.load_path.unshift(*value.flat_map(&:existent)) + when :load_path + I18n.load_path += value + when :raise_on_missing_translations + forward_raise_on_missing_translations_config(app) + else + I18n.public_send("#{setting}=", value) + end + end + + init_fallbacks(fallbacks) if fallbacks && validate_fallbacks(fallbacks) + + # Restore available locales check so it will take place from now on. + I18n.enforce_available_locales = enforce_available_locales + + directories = watched_dirs_with_extensions(reloadable_paths) + reloader = app.config.file_watcher.new(I18n.load_path.dup, directories) do + I18n.load_path.keep_if { |p| File.exist?(p) } + I18n.load_path |= reloadable_paths.flat_map(&:existent) + end + + app.reloaders << reloader + app.reloader.to_run do + reloader.execute_if_updated { require_unload_lock! } + end + reloader.execute + + @i18n_inited = true + end + + def self.forward_raise_on_missing_translations_config(app) + ActiveSupport.on_load(:action_view) do + ActionView::Helpers::TranslationHelper.raise_on_missing_translations = app.config.i18n.raise_on_missing_translations + end + + ActiveSupport.on_load(:action_controller) do + AbstractController::Translation.raise_on_missing_translations = app.config.i18n.raise_on_missing_translations + end + end + + def self.include_fallbacks_module + I18n.backend.class.include(I18n::Backend::Fallbacks) + end + + def self.init_fallbacks(fallbacks) + include_fallbacks_module + + args = \ + case fallbacks + when ActiveSupport::OrderedOptions + [*(fallbacks[:defaults] || []) << fallbacks[:map]].compact + when Hash, Array + Array.wrap(fallbacks) + else # TrueClass + [I18n.default_locale] + end + + I18n.fallbacks = I18n::Locale::Fallbacks.new(*args) + end + + def self.validate_fallbacks(fallbacks) + case fallbacks + when ActiveSupport::OrderedOptions + !fallbacks.empty? + when TrueClass, Array, Hash + true + else + raise "Unexpected fallback type #{fallbacks.inspect}" + end + end + + def self.watched_dirs_with_extensions(paths) + paths.each_with_object({}) do |path, result| + result[path.absolute_current] = path.extensions + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflections.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflections.rb new file mode 100644 index 0000000..baf1cb3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflections.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "active_support/inflector/inflections" + +#-- +# Defines the standard inflection rules. These are the starting point for +# new projects and are not considered complete. The current set of inflection +# rules is frozen. This means, we do not change them to become more complete. +# This is a safety measure to keep existing applications from breaking. +#++ +module ActiveSupport + Inflector.inflections(:en) do |inflect| + inflect.plural(/$/, "s") + inflect.plural(/s$/i, "s") + inflect.plural(/^(ax|test)is$/i, '\1es') + inflect.plural(/(octop|vir)us$/i, '\1i') + inflect.plural(/(octop|vir)i$/i, '\1i') + inflect.plural(/(alias|status)$/i, '\1es') + inflect.plural(/(bu)s$/i, '\1ses') + inflect.plural(/(buffal|tomat)o$/i, '\1oes') + inflect.plural(/([ti])um$/i, '\1a') + inflect.plural(/([ti])a$/i, '\1a') + inflect.plural(/sis$/i, "ses") + inflect.plural(/(?:([^f])fe|([lr])f)$/i, '\1\2ves') + inflect.plural(/(hive)$/i, '\1s') + inflect.plural(/([^aeiouy]|qu)y$/i, '\1ies') + inflect.plural(/(x|ch|ss|sh)$/i, '\1es') + inflect.plural(/(matr|vert|ind)(?:ix|ex)$/i, '\1ices') + inflect.plural(/^(m|l)ouse$/i, '\1ice') + inflect.plural(/^(m|l)ice$/i, '\1ice') + inflect.plural(/^(ox)$/i, '\1en') + inflect.plural(/^(oxen)$/i, '\1') + inflect.plural(/(quiz)$/i, '\1zes') + + inflect.singular(/s$/i, "") + inflect.singular(/(ss)$/i, '\1') + inflect.singular(/(n)ews$/i, '\1ews') + inflect.singular(/([ti])a$/i, '\1um') + inflect.singular(/((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$/i, '\1sis') + inflect.singular(/(^analy)(sis|ses)$/i, '\1sis') + inflect.singular(/([^f])ves$/i, '\1fe') + inflect.singular(/(hive)s$/i, '\1') + inflect.singular(/(tive)s$/i, '\1') + inflect.singular(/([lr])ves$/i, '\1f') + inflect.singular(/([^aeiouy]|qu)ies$/i, '\1y') + inflect.singular(/(s)eries$/i, '\1eries') + inflect.singular(/(m)ovies$/i, '\1ovie') + inflect.singular(/(x|ch|ss|sh)es$/i, '\1') + inflect.singular(/^(m|l)ice$/i, '\1ouse') + inflect.singular(/(bus)(es)?$/i, '\1') + inflect.singular(/(o)es$/i, '\1') + inflect.singular(/(shoe)s$/i, '\1') + inflect.singular(/(cris|test)(is|es)$/i, '\1is') + inflect.singular(/^(a)x[ie]s$/i, '\1xis') + inflect.singular(/(octop|vir)(us|i)$/i, '\1us') + inflect.singular(/(alias|status)(es)?$/i, '\1') + inflect.singular(/^(ox)en/i, '\1') + inflect.singular(/(vert|ind)ices$/i, '\1ex') + inflect.singular(/(matr)ices$/i, '\1ix') + inflect.singular(/(quiz)zes$/i, '\1') + inflect.singular(/(database)s$/i, '\1') + + inflect.irregular("person", "people") + inflect.irregular("man", "men") + inflect.irregular("child", "children") + inflect.irregular("sex", "sexes") + inflect.irregular("move", "moves") + inflect.irregular("zombie", "zombies") + + inflect.uncountable(%w(equipment information rice money species series fish sheep jeans police)) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector.rb new file mode 100644 index 0000000..d77f04c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +# in case active_support/inflector is required without the rest of active_support +require "active_support/inflector/inflections" +require "active_support/inflector/transliterate" +require "active_support/inflector/methods" + +require "active_support/inflections" +require "active_support/core_ext/string/inflections" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/inflections.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/inflections.rb new file mode 100644 index 0000000..a9943a8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/inflections.rb @@ -0,0 +1,271 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "active_support/i18n" + +module ActiveSupport + module Inflector + extend self + + # A singleton instance of this class is yielded by Inflector.inflections, + # which can then be used to specify additional inflection rules. If passed + # an optional locale, rules for other languages can be specified. The + # default locale is :en. Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.plural /^(ox)$/i, '\1\2en' + # inflect.singular /^(ox)en/i, '\1' + # + # inflect.irregular 'cactus', 'cacti' + # + # inflect.uncountable 'equipment' + # end + # + # New rules are added at the top. So in the example above, the irregular + # rule for cactus will now be the first of the pluralization and + # singularization rules that is runs. This guarantees that your rules run + # before any of the rules that may already have been loaded. + class Inflections + @__instance__ = Concurrent::Map.new + + class Uncountables < Array + def initialize + @regex_array = [] + super + end + + def delete(entry) + super entry + @regex_array.delete(to_regex(entry)) + end + + def <<(*word) + add(word) + end + + def add(words) + words = words.flatten.map(&:downcase) + concat(words) + @regex_array += words.map { |word| to_regex(word) } + self + end + + def uncountable?(str) + @regex_array.any? { |regex| regex.match? str } + end + + private + def to_regex(string) + /\b#{::Regexp.escape(string)}\Z/i + end + end + + def self.instance(locale = :en) + @__instance__[locale] ||= new + end + + def self.instance_or_fallback(locale) + I18n.fallbacks[locale].each do |k| + return @__instance__[k] if @__instance__.key?(k) + end + instance(locale) + end + + attr_reader :plurals, :singulars, :uncountables, :humans, :acronyms + + attr_reader :acronyms_camelize_regex, :acronyms_underscore_regex # :nodoc: + + def initialize + @plurals, @singulars, @uncountables, @humans, @acronyms = [], [], Uncountables.new, [], {} + define_acronym_regex_patterns + end + + # Private, for the test suite. + def initialize_dup(orig) # :nodoc: + %w(plurals singulars uncountables humans acronyms).each do |scope| + instance_variable_set("@#{scope}", orig.public_send(scope).dup) + end + define_acronym_regex_patterns + end + + # Specifies a new acronym. An acronym must be specified as it will appear + # in a camelized string. An underscore string that contains the acronym + # will retain the acronym when passed to +camelize+, +humanize+, or + # +titleize+. A camelized string that contains the acronym will maintain + # the acronym when titleized or humanized, and will convert the acronym + # into a non-delimited single lowercase word when passed to +underscore+. + # + # acronym 'HTML' + # titleize 'html' # => 'HTML' + # camelize 'html' # => 'HTML' + # underscore 'MyHTML' # => 'my_html' + # + # The acronym, however, must occur as a delimited unit and not be part of + # another word for conversions to recognize it: + # + # acronym 'HTTP' + # camelize 'my_http_delimited' # => 'MyHTTPDelimited' + # camelize 'https' # => 'Https', not 'HTTPs' + # underscore 'HTTPS' # => 'http_s', not 'https' + # + # acronym 'HTTPS' + # camelize 'https' # => 'HTTPS' + # underscore 'HTTPS' # => 'https' + # + # Note: Acronyms that are passed to +pluralize+ will no longer be + # recognized, since the acronym will not occur as a delimited unit in the + # pluralized result. To work around this, you must specify the pluralized + # form as an acronym as well: + # + # acronym 'API' + # camelize(pluralize('api')) # => 'Apis' + # + # acronym 'APIs' + # camelize(pluralize('api')) # => 'APIs' + # + # +acronym+ may be used to specify any word that contains an acronym or + # otherwise needs to maintain a non-standard capitalization. The only + # restriction is that the word must begin with a capital letter. + # + # acronym 'RESTful' + # underscore 'RESTful' # => 'restful' + # underscore 'RESTfulController' # => 'restful_controller' + # titleize 'RESTfulController' # => 'RESTful Controller' + # camelize 'restful' # => 'RESTful' + # camelize 'restful_controller' # => 'RESTfulController' + # + # acronym 'McDonald' + # underscore 'McDonald' # => 'mcdonald' + # camelize 'mcdonald' # => 'McDonald' + def acronym(word) + @acronyms[word.downcase] = word + define_acronym_regex_patterns + end + + # Specifies a new pluralization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def plural(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @plurals.prepend([rule, replacement]) + end + + # Specifies a new singularization rule and its replacement. The rule can + # either be a string or a regular expression. The replacement should + # always be a string that may include references to the matched data from + # the rule. + def singular(rule, replacement) + @uncountables.delete(rule) if rule.is_a?(String) + @uncountables.delete(replacement) + @singulars.prepend([rule, replacement]) + end + + # Specifies a new irregular that applies to both pluralization and + # singularization at the same time. This can only be used for strings, not + # regular expressions. You simply pass the irregular in singular and + # plural form. + # + # irregular 'cactus', 'cacti' + # irregular 'person', 'people' + def irregular(singular, plural) + @uncountables.delete(singular) + @uncountables.delete(plural) + + s0 = singular[0] + srest = singular[1..-1] + + p0 = plural[0] + prest = plural[1..-1] + + if s0.upcase == p0.upcase + plural(/(#{s0})#{srest}$/i, '\1' + prest) + plural(/(#{p0})#{prest}$/i, '\1' + prest) + + singular(/(#{s0})#{srest}$/i, '\1' + srest) + singular(/(#{p0})#{prest}$/i, '\1' + srest) + else + plural(/#{s0.upcase}(?i)#{srest}$/, p0.upcase + prest) + plural(/#{s0.downcase}(?i)#{srest}$/, p0.downcase + prest) + plural(/#{p0.upcase}(?i)#{prest}$/, p0.upcase + prest) + plural(/#{p0.downcase}(?i)#{prest}$/, p0.downcase + prest) + + singular(/#{s0.upcase}(?i)#{srest}$/, s0.upcase + srest) + singular(/#{s0.downcase}(?i)#{srest}$/, s0.downcase + srest) + singular(/#{p0.upcase}(?i)#{prest}$/, s0.upcase + srest) + singular(/#{p0.downcase}(?i)#{prest}$/, s0.downcase + srest) + end + end + + # Specifies words that are uncountable and should not be inflected. + # + # uncountable 'money' + # uncountable 'money', 'information' + # uncountable %w( money information rice ) + def uncountable(*words) + @uncountables.add(words) + end + + # Specifies a humanized form of a string by a regular expression rule or + # by a string mapping. When using a regular expression based replacement, + # the normal humanize formatting is called after the replacement. When a + # string is used, the human form should be specified as desired (example: + # 'The name', not 'the_name'). + # + # human /_cnt$/i, '\1_count' + # human 'legacy_col_person_name', 'Name' + def human(rule, replacement) + @humans.prepend([rule, replacement]) + end + + # Clears the loaded inflections within a given scope (default is + # :all). Give the scope as a symbol of the inflection type, the + # options are: :plurals, :singulars, :uncountables, + # :humans, :acronyms. + # + # clear :all + # clear :plurals + def clear(scope = :all) + case scope + when :all + clear(:acronyms) + clear(:plurals) + clear(:singulars) + clear(:uncountables) + clear(:humans) + when :acronyms + @acronyms = {} + define_acronym_regex_patterns + when :uncountables + @uncountables = Uncountables.new + when :plurals, :singulars, :humans + instance_variable_set "@#{scope}", [] + end + end + + private + def define_acronym_regex_patterns + @acronym_regex = @acronyms.empty? ? /(?=a)b/ : /#{@acronyms.values.join("|")}/ + @acronyms_camelize_regex = /^(?:#{@acronym_regex}(?=\b|[A-Z_])|\w)/ + @acronyms_underscore_regex = /(?:(?<=([A-Za-z\d]))|\b)(#{@acronym_regex})(?=\b|[^a-z])/ + end + end + + # Yields a singleton instance of Inflector::Inflections so you can specify + # additional inflector rules. If passed an optional locale, rules for other + # languages can be specified. If not specified, defaults to :en. + # Only rules for English are provided. + # + # ActiveSupport::Inflector.inflections(:en) do |inflect| + # inflect.uncountable 'rails' + # end + def inflections(locale = :en) + if block_given? + yield Inflections.instance(locale) + else + Inflections.instance_or_fallback(locale) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/methods.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/methods.rb new file mode 100644 index 0000000..43abb9c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/methods.rb @@ -0,0 +1,377 @@ +# frozen_string_literal: true + +require "active_support/inflections" +require "active_support/core_ext/object/blank" + +module ActiveSupport + # The Inflector transforms words from singular to plural, class names to table + # names, modularized class names to ones without, and class names to foreign + # keys. The default inflections for pluralization, singularization, and + # uncountable words are kept in inflections.rb. + # + # The Rails core team has stated patches for the inflections library will not + # be accepted in order to avoid breaking legacy applications which may be + # relying on errant inflections. If you discover an incorrect inflection and + # require it for your application or wish to define rules for languages other + # than English, please correct or add them yourself (explained below). + module Inflector + extend self + + # Returns the plural form of the word in the string. + # + # If passed an optional +locale+ parameter, the word will be + # pluralized using rules defined for that language. By default, + # this parameter is set to :en. + # + # pluralize('post') # => "posts" + # pluralize('octopus') # => "octopi" + # pluralize('sheep') # => "sheep" + # pluralize('words') # => "words" + # pluralize('CamelOctopus') # => "CamelOctopi" + # pluralize('ley', :es) # => "leyes" + def pluralize(word, locale = :en) + apply_inflections(word, inflections(locale).plurals, locale) + end + + # The reverse of #pluralize, returns the singular form of a word in a + # string. + # + # If passed an optional +locale+ parameter, the word will be + # singularized using rules defined for that language. By default, + # this parameter is set to :en. + # + # singularize('posts') # => "post" + # singularize('octopi') # => "octopus" + # singularize('sheep') # => "sheep" + # singularize('word') # => "word" + # singularize('CamelOctopi') # => "CamelOctopus" + # singularize('leyes', :es) # => "ley" + def singularize(word, locale = :en) + apply_inflections(word, inflections(locale).singulars, locale) + end + + # Converts strings to UpperCamelCase. + # If the +uppercase_first_letter+ parameter is set to false, then produces + # lowerCamelCase. + # + # Also converts '/' to '::' which is useful for converting + # paths to namespaces. + # + # camelize('active_model') # => "ActiveModel" + # camelize('active_model', false) # => "activeModel" + # camelize('active_model/errors') # => "ActiveModel::Errors" + # camelize('active_model/errors', false) # => "activeModel::Errors" + # + # As a rule of thumb you can think of +camelize+ as the inverse of + # #underscore, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def camelize(term, uppercase_first_letter = true) + string = term.to_s + # String#camelize takes a symbol (:upper or :lower), so here we also support :lower to keep the methods consistent. + if !uppercase_first_letter || uppercase_first_letter == :lower + string = string.sub(inflections.acronyms_camelize_regex) { |match| match.downcase! || match } + else + string = string.sub(/^[a-z\d]*/) { |match| inflections.acronyms[match] || match.capitalize! || match } + end + string.gsub!(/(?:_|(\/))([a-z\d]*)/i) do + word = $2 + substituted = inflections.acronyms[word] || word.capitalize! || word + $1 ? "::#{substituted}" : substituted + end + string + end + + # Makes an underscored, lowercase form from the expression in the string. + # + # Changes '::' to '/' to convert namespaces to paths. + # + # underscore('ActiveModel') # => "active_model" + # underscore('ActiveModel::Errors') # => "active_model/errors" + # + # As a rule of thumb you can think of +underscore+ as the inverse of + # #camelize, though there are cases where that does not hold: + # + # camelize(underscore('SSLError')) # => "SslError" + def underscore(camel_cased_word) + return camel_cased_word.to_s unless /[A-Z-]|::/.match?(camel_cased_word) + word = camel_cased_word.to_s.gsub("::", "/") + word.gsub!(inflections.acronyms_underscore_regex) { "#{$1 && '_' }#{$2.downcase}" } + word.gsub!(/([A-Z])(?=[A-Z][a-z])|([a-z\d])(?=[A-Z])/) { ($1 || $2) << "_" } + word.tr!("-", "_") + word.downcase! + word + end + + # Tweaks an attribute name for display to end users. + # + # Specifically, performs these transformations: + # + # * Applies human inflection rules to the argument. + # * Deletes leading underscores, if any. + # * Removes an "_id" suffix if present. + # * Replaces underscores with spaces, if any. + # * Downcases all words except acronyms. + # * Capitalizes the first word. + # The capitalization of the first word can be turned off by setting the + # +:capitalize+ option to false (default is true). + # + # The trailing '_id' can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true (default is false). + # + # humanize('employee_salary') # => "Employee salary" + # humanize('author_id') # => "Author" + # humanize('author_id', capitalize: false) # => "author" + # humanize('_id') # => "Id" + # humanize('author_id', keep_id_suffix: true) # => "Author id" + # + # If "SSL" was defined to be an acronym: + # + # humanize('ssl_error') # => "SSL error" + # + def humanize(lower_case_and_underscored_word, capitalize: true, keep_id_suffix: false) + result = lower_case_and_underscored_word.to_s.dup + + inflections.humans.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + + result.tr!("_", " ") + result.lstrip! + unless keep_id_suffix + result.delete_suffix!(" id") + end + + result.gsub!(/([a-z\d]+)/i) do |match| + match.downcase! + inflections.acronyms[match] || match + end + + if capitalize + result.sub!(/\A\w/) do |match| + match.upcase! + match + end + end + + result + end + + # Converts just the first character to uppercase. + # + # upcase_first('what a Lovely Day') # => "What a Lovely Day" + # upcase_first('w') # => "W" + # upcase_first('') # => "" + def upcase_first(string) + string.length > 0 ? string[0].upcase.concat(string[1..-1]) : "" + end + + # Capitalizes all the words and replaces some characters in the string to + # create a nicer looking title. +titleize+ is meant for creating pretty + # output. It is not used in the Rails internals. + # + # The trailing '_id','Id'.. can be kept and capitalized by setting the + # optional parameter +keep_id_suffix+ to true. + # By default, this parameter is false. + # + # +titleize+ is also aliased as +titlecase+. + # + # titleize('man from the boondocks') # => "Man From The Boondocks" + # titleize('x-men: the last stand') # => "X Men: The Last Stand" + # titleize('TheManWithoutAPast') # => "The Man Without A Past" + # titleize('raiders_of_the_lost_ark') # => "Raiders Of The Lost Ark" + # titleize('string_ending_with_id', keep_id_suffix: true) # => "String Ending With Id" + def titleize(word, keep_id_suffix: false) + humanize(underscore(word), keep_id_suffix: keep_id_suffix).gsub(/\b(? "raw_scaled_scorers" + # tableize('ham_and_egg') # => "ham_and_eggs" + # tableize('fancyCategory') # => "fancy_categories" + def tableize(class_name) + pluralize(underscore(class_name)) + end + + # Creates a class name from a plural table name like Rails does for table + # names to models. Note that this returns a string and not a Class (To + # convert to an actual class follow +classify+ with #constantize). + # + # classify('ham_and_eggs') # => "HamAndEgg" + # classify('posts') # => "Post" + # + # Singular names are not handled correctly: + # + # classify('calculus') # => "Calculu" + def classify(table_name) + # strip out any leading schema name + camelize(singularize(table_name.to_s.sub(/.*\./, ""))) + end + + # Replaces underscores with dashes in the string. + # + # dasherize('puni_puni') # => "puni-puni" + def dasherize(underscored_word) + underscored_word.tr("_", "-") + end + + # Removes the module part from the expression in the string. + # + # demodulize('ActiveSupport::Inflector::Inflections') # => "Inflections" + # demodulize('Inflections') # => "Inflections" + # demodulize('::Inflections') # => "Inflections" + # demodulize('') # => "" + # + # See also #deconstantize. + def demodulize(path) + path = path.to_s + if i = path.rindex("::") + path[(i + 2)..-1] + else + path + end + end + + # Removes the rightmost segment from the constant expression in the string. + # + # deconstantize('Net::HTTP') # => "Net" + # deconstantize('::Net::HTTP') # => "::Net" + # deconstantize('String') # => "" + # deconstantize('::String') # => "" + # deconstantize('') # => "" + # + # See also #demodulize. + def deconstantize(path) + path.to_s[0, path.rindex("::") || 0] # implementation based on the one in facets' Module#spacename + end + + # Creates a foreign key name from a class name. + # +separate_class_name_and_id_with_underscore+ sets whether + # the method should put '_' between the name and 'id'. + # + # foreign_key('Message') # => "message_id" + # foreign_key('Message', false) # => "messageid" + # foreign_key('Admin::Post') # => "post_id" + def foreign_key(class_name, separate_class_name_and_id_with_underscore = true) + underscore(demodulize(class_name)) + (separate_class_name_and_id_with_underscore ? "_id" : "id") + end + + # Tries to find a constant with the name specified in the argument string. + # + # constantize('Module') # => Module + # constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # constantize('C') # => 'outside', same as ::C + # end + # + # NameError is raised when the name is not in CamelCase or the constant is + # unknown. + def constantize(camel_cased_word) + Object.const_get(camel_cased_word) + end + + # Tries to find a constant with the name specified in the argument string. + # + # safe_constantize('Module') # => Module + # safe_constantize('Foo::Bar') # => Foo::Bar + # + # The name is assumed to be the one of a top-level constant, no matter + # whether it starts with "::" or not. No lexical context is taken into + # account: + # + # C = 'outside' + # module M + # C = 'inside' + # C # => 'inside' + # safe_constantize('C') # => 'outside', same as ::C + # end + # + # +nil+ is returned when the name is not in CamelCase or the constant (or + # part of it) is unknown. + # + # safe_constantize('blargle') # => nil + # safe_constantize('UnknownModule') # => nil + # safe_constantize('UnknownModule::Foo::Bar') # => nil + def safe_constantize(camel_cased_word) + constantize(camel_cased_word) + rescue NameError => e + raise if e.name && !(camel_cased_word.to_s.split("::").include?(e.name.to_s) || + e.name.to_s == camel_cased_word.to_s) + rescue LoadError => e + message = e.respond_to?(:original_message) ? e.original_message : e.message + raise unless /Unable to autoload constant #{const_regexp(camel_cased_word)}/.match?(message) + end + + # Returns the suffix that should be added to a number to denote the position + # in an ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinal(1) # => "st" + # ordinal(2) # => "nd" + # ordinal(1002) # => "nd" + # ordinal(1003) # => "rd" + # ordinal(-11) # => "th" + # ordinal(-1021) # => "st" + def ordinal(number) + I18n.translate("number.nth.ordinals", number: number) + end + + # Turns a number into an ordinal string used to denote the position in an + # ordered sequence such as 1st, 2nd, 3rd, 4th. + # + # ordinalize(1) # => "1st" + # ordinalize(2) # => "2nd" + # ordinalize(1002) # => "1002nd" + # ordinalize(1003) # => "1003rd" + # ordinalize(-11) # => "-11th" + # ordinalize(-1021) # => "-1021st" + def ordinalize(number) + I18n.translate("number.nth.ordinalized", number: number) + end + + private + # Mounts a regular expression, returned as a string to ease interpolation, + # that will match part by part the given constant. + # + # const_regexp("Foo::Bar::Baz") # => "Foo(::Bar(::Baz)?)?" + # const_regexp("::") # => "::" + def const_regexp(camel_cased_word) + parts = camel_cased_word.split("::") + + return Regexp.escape(camel_cased_word) if parts.blank? + + last = parts.pop + + parts.reverse!.inject(last) do |acc, part| + part.empty? ? acc : "#{part}(::#{acc})?" + end + end + + # Applies inflection rules for +singularize+ and +pluralize+. + # + # If passed an optional +locale+ parameter, the uncountables will be + # found for that locale. + # + # apply_inflections('post', inflections.plurals, :en) # => "posts" + # apply_inflections('posts', inflections.singulars, :en) # => "post" + def apply_inflections(word, rules, locale = :en) + result = word.to_s.dup + + if word.empty? || inflections(locale).uncountables.uncountable?(result) + result + else + rules.each { |(rule, replacement)| break if result.sub!(rule, replacement) } + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/transliterate.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/transliterate.rb new file mode 100644 index 0000000..e2a4189 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/inflector/transliterate.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +require "active_support/core_ext/string/multibyte" +require "active_support/i18n" + +module ActiveSupport + module Inflector + ALLOWED_ENCODINGS_FOR_TRANSLITERATE = [Encoding::UTF_8, Encoding::US_ASCII, Encoding::GB18030].freeze + + # Replaces non-ASCII characters with an ASCII approximation, or if none + # exists, a replacement character which defaults to "?". + # + # transliterate('Ærøskøbing') + # # => "AEroskobing" + # + # Default approximations are provided for Western/Latin characters, + # e.g, "ø", "Ãą", "Ê", "ß", etc. + # + # This method is I18n aware, so you can set up custom approximations for a + # locale. This can be useful, for example, to transliterate German's "Ãŧ" + # and "Ãļ" to "ue" and "oe", or to add support for transliterating Russian + # to ASCII. + # + # In order to make your custom transliterations available, you must set + # them as the i18n.transliterate.rule i18n key: + # + # # Store the transliterations in locales/de.yml + # i18n: + # transliterate: + # rule: + # Ãŧ: "ue" + # Ãļ: "oe" + # + # # Or set them using Ruby + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: { + # 'Ãŧ' => 'ue', + # 'Ãļ' => 'oe' + # } + # } + # }) + # + # The value for i18n.transliterate.rule can be a simple Hash that + # maps characters to ASCII approximations as shown above, or, for more + # complex requirements, a Proc: + # + # I18n.backend.store_translations(:de, i18n: { + # transliterate: { + # rule: ->(string) { MyTransliterator.transliterate(string) } + # } + # }) + # + # Now you can have different transliterations for each locale: + # + # transliterate('JÃŧrgen', locale: :en) + # # => "Jurgen" + # + # transliterate('JÃŧrgen', locale: :de) + # # => "Juergen" + # + # Transliteration is restricted to UTF-8, US-ASCII, and GB18030 strings. + # Other encodings will raise an ArgumentError. + def transliterate(string, replacement = "?", locale: nil) + string = string.dup if string.frozen? + raise ArgumentError, "Can only transliterate strings. Received #{string.class.name}" unless string.is_a?(String) + raise ArgumentError, "Cannot transliterate strings with #{string.encoding} encoding" unless ALLOWED_ENCODINGS_FOR_TRANSLITERATE.include?(string.encoding) + + input_encoding = string.encoding + + # US-ASCII is a subset of UTF-8 so we'll force encoding as UTF-8 if + # US-ASCII is given. This way we can let tidy_bytes handle the string + # in the same way as we do for UTF-8 + string.force_encoding(Encoding::UTF_8) if string.encoding == Encoding::US_ASCII + + # GB18030 is Unicode compatible but is not a direct mapping so needs to be + # transcoded. Using invalid/undef :replace will result in loss of data in + # the event of invalid characters, but since tidy_bytes will replace + # invalid/undef with a "?" we're safe to do the same beforehand + string.encode!(Encoding::UTF_8, invalid: :replace, undef: :replace) if string.encoding == Encoding::GB18030 + + transliterated = I18n.transliterate( + ActiveSupport::Multibyte::Unicode.tidy_bytes(string).unicode_normalize(:nfc), + replacement: replacement, + locale: locale + ) + + # Restore the string encoding of the input if it was not UTF-8. + # Apply invalid/undef :replace as tidy_bytes does + transliterated.encode!(input_encoding, invalid: :replace, undef: :replace) if input_encoding != transliterated.encoding + + transliterated + end + + # Replaces special characters in a string so that it may be used as part of + # a 'pretty' URL. + # + # parameterize("Donald E. Knuth") # => "donald-e-knuth" + # parameterize("^très|Jolie-- ") # => "tres-jolie" + # + # To use a custom separator, override the +separator+ argument. + # + # parameterize("Donald E. Knuth", separator: '_') # => "donald_e_knuth" + # parameterize("^très|Jolie__ ", separator: '_') # => "tres_jolie" + # + # To preserve the case of the characters in a string, use the +preserve_case+ argument. + # + # parameterize("Donald E. Knuth", preserve_case: true) # => "Donald-E-Knuth" + # parameterize("^très|Jolie-- ", preserve_case: true) # => "tres-Jolie" + # + # It preserves dashes and underscores unless they are used as separators: + # + # parameterize("^très|Jolie__ ") # => "tres-jolie__" + # parameterize("^très|Jolie-- ", separator: "_") # => "tres_jolie--" + # parameterize("^très_Jolie-- ", separator: ".") # => "tres_jolie--" + # + # If the optional parameter +locale+ is specified, + # the word will be parameterized as a word of that language. + # By default, this parameter is set to nil and it will use + # the configured I18n.locale. + def parameterize(string, separator: "-", preserve_case: false, locale: nil) + # Replace accented chars with their ASCII equivalents. + parameterized_string = transliterate(string, locale: locale) + + # Turn unwanted chars into the separator. + parameterized_string.gsub!(/[^a-z0-9\-_]+/i, separator) + + unless separator.nil? || separator.empty? + if separator == "-" + re_duplicate_separator = /-{2,}/ + re_leading_trailing_separator = /^-|-$/i + else + re_sep = Regexp.escape(separator) + re_duplicate_separator = /#{re_sep}{2,}/ + re_leading_trailing_separator = /^#{re_sep}|#{re_sep}$/i + end + # No more than one of the separator in a row. + parameterized_string.gsub!(re_duplicate_separator, separator) + # Remove leading/trailing separator. + parameterized_string.gsub!(re_leading_trailing_separator, "") + end + + parameterized_string.downcase! unless preserve_case + parameterized_string + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/isolated_execution_state.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/isolated_execution_state.rb new file mode 100644 index 0000000..2b8b06f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/isolated_execution_state.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require "fiber" + +module ActiveSupport + module IsolatedExecutionState # :nodoc: + @isolation_level = :thread + + Thread.attr_accessor :active_support_execution_state + Fiber.attr_accessor :active_support_execution_state + + class << self + attr_reader :isolation_level + + def isolation_level=(level) + unless %i(thread fiber).include?(level) + raise ArgumentError, "isolation_level must be `:thread` or `:fiber`, got: `#{level.inspect}`" + end + + if level != isolation_level + clear + singleton_class.alias_method(:current, "current_#{level}") + singleton_class.send(:private, :current) + @isolation_level = level + end + end + + def unique_id + self[:__id__] ||= Object.new + end + + def [](key) + current[key] + end + + def []=(key, value) + current[key] = value + end + + def key?(key) + current.key?(key) + end + + def delete(key) + current.delete(key) + end + + def clear + current.clear + end + + def share_with(other) + # Action Controller streaming spawns a new thread and copy thread locals. + # We do the same here for backward compatibility, but this is very much a hack + # and streaming should be rethought. + context = @isolation_level == :thread ? Thread.current : Fiber.current + context.active_support_execution_state = other.active_support_execution_state.dup + end + + private + def current_thread + Thread.current.active_support_execution_state ||= {} + end + + def current_fiber + Fiber.current.active_support_execution_state ||= {} + end + + alias_method :current, :current_thread + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json.rb new file mode 100644 index 0000000..d788717 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require "active_support/json/decoding" +require "active_support/json/encoding" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json/decoding.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json/decoding.rb new file mode 100644 index 0000000..e40957e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json/decoding.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/module/delegation" +require "json" + +module ActiveSupport + # Look for and parse json strings that look like ISO 8601 times. + mattr_accessor :parse_json_times + + module JSON + # matches YAML-formatted dates + DATE_REGEX = /\A\d{4}-\d{2}-\d{2}\z/ + DATETIME_REGEX = /\A(?:\d{4}-\d{2}-\d{2}|\d{4}-\d{1,2}-\d{1,2}[T \t]+\d{1,2}:\d{2}:\d{2}(\.[0-9]*)?(([ \t]*)Z|[-+]\d{2}?(:\d{2})?)?)\z/ + + class << self + # Parses a JSON string (JavaScript Object Notation) into a hash. + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.decode("{\"team\":\"rails\",\"players\":\"36\"}") + # => {"team" => "rails", "players" => "36"} + def decode(json) + data = ::JSON.parse(json, quirks_mode: true) + + if ActiveSupport.parse_json_times + convert_dates_from(data) + else + data + end + end + + # Returns the class of the error that will be raised when there is an + # error in decoding JSON. Using this method means you won't directly + # depend on the ActiveSupport's JSON implementation, in case it changes + # in the future. + # + # begin + # obj = ActiveSupport::JSON.decode(some_string) + # rescue ActiveSupport::JSON.parse_error + # Rails.logger.warn("Attempted to decode invalid JSON: #{some_string}") + # end + def parse_error + ::JSON::ParserError + end + + private + def convert_dates_from(data) + case data + when nil + nil + when DATE_REGEX + begin + Date.parse(data) + rescue ArgumentError + data + end + when DATETIME_REGEX + begin + Time.zone.parse(data) + rescue ArgumentError + data + end + when Array + data.map! { |d| convert_dates_from(d) } + when Hash + data.transform_values! do |value| + convert_dates_from(value) + end + else + data + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json/encoding.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json/encoding.rb new file mode 100644 index 0000000..8e08b24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/json/encoding.rb @@ -0,0 +1,138 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/json" +require "active_support/core_ext/module/delegation" + +module ActiveSupport + class << self + delegate :use_standard_json_time_format, :use_standard_json_time_format=, + :time_precision, :time_precision=, + :escape_html_entities_in_json, :escape_html_entities_in_json=, + :json_encoder, :json_encoder=, + to: :'ActiveSupport::JSON::Encoding' + end + + module JSON + # Dumps objects in JSON (JavaScript Object Notation). + # See http://www.json.org for more info. + # + # ActiveSupport::JSON.encode({ team: 'rails', players: '36' }) + # # => "{\"team\":\"rails\",\"players\":\"36\"}" + def self.encode(value, options = nil) + Encoding.json_encoder.new(options).encode(value) + end + + module Encoding # :nodoc: + class JSONGemEncoder # :nodoc: + attr_reader :options + + def initialize(options = nil) + @options = options || {} + end + + # Encode the given object into a JSON string + def encode(value) + stringify jsonify value.as_json(options.dup) + end + + private + # Rails does more escaping than the JSON gem natively does (we + # escape \u2028 and \u2029 and optionally >, <, & to work around + # certain browser problems). + ESCAPED_CHARS = { + "\u2028" => '\u2028', + "\u2029" => '\u2029', + ">" => '\u003e', + "<" => '\u003c', + "&" => '\u0026', + } + + ESCAPE_REGEX_WITH_HTML_ENTITIES = /[\u2028\u2029><&]/u + ESCAPE_REGEX_WITHOUT_HTML_ENTITIES = /[\u2028\u2029]/u + + # This class wraps all the strings we see and does the extra escaping + class EscapedString < String # :nodoc: + def to_json(*) + if Encoding.escape_html_entities_in_json + s = super + s.gsub! ESCAPE_REGEX_WITH_HTML_ENTITIES, ESCAPED_CHARS + s + else + s = super + s.gsub! ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, ESCAPED_CHARS + s + end + end + + def to_s + self + end + end + + # Mark these as private so we don't leak encoding-specific constructs + private_constant :ESCAPED_CHARS, :ESCAPE_REGEX_WITH_HTML_ENTITIES, + :ESCAPE_REGEX_WITHOUT_HTML_ENTITIES, :EscapedString + + # Convert an object into a "JSON-ready" representation composed of + # primitives like Hash, Array, String, Numeric, + # and +true+/+false+/+nil+. + # Recursively calls #as_json to the object to recursively build a + # fully JSON-ready object. + # + # This allows developers to implement #as_json without having to + # worry about what base types of objects they are allowed to return + # or having to remember to call #as_json recursively. + # + # Note: the +options+ hash passed to +object.to_json+ is only passed + # to +object.as_json+, not any of this method's recursive +#as_json+ + # calls. + def jsonify(value) + case value + when String + EscapedString.new(value) + when Numeric, NilClass, TrueClass, FalseClass + value.as_json + when Hash + result = {} + value.each do |k, v| + result[jsonify(k)] = jsonify(v) + end + result + when Array + value.map { |v| jsonify(v) } + else + jsonify value.as_json + end + end + + # Encode a "jsonified" Ruby data structure using the JSON gem + def stringify(jsonified) + ::JSON.generate(jsonified, quirks_mode: true, max_nesting: false) + end + end + + class << self + # If true, use ISO 8601 format for dates and times. Otherwise, fall back + # to the Active Support legacy format. + attr_accessor :use_standard_json_time_format + + # If true, encode >, <, & as escaped unicode sequences (e.g. > as \u003e) + # as a safety measure. + attr_accessor :escape_html_entities_in_json + + # Sets the precision of encoded time values. + # Defaults to 3 (equivalent to millisecond precision) + attr_accessor :time_precision + + # Sets the encoder used by Rails to encode Ruby objects into JSON strings + # in +Object#to_json+ and +ActiveSupport::JSON.encode+. + attr_accessor :json_encoder + end + + self.use_standard_json_time_format = true + self.escape_html_entities_in_json = true + self.json_encoder = JSONGemEncoder + self.time_precision = 3 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/key_generator.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/key_generator.rb new file mode 100644 index 0000000..660bffd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/key_generator.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +require "concurrent/map" +require "openssl" + +module ActiveSupport + # KeyGenerator is a simple wrapper around OpenSSL's implementation of PBKDF2. + # It can be used to derive a number of keys for various purposes from a given secret. + # This lets Rails applications have a single secure secret, but avoid reusing that + # key in multiple incompatible contexts. + class KeyGenerator + class << self + def hash_digest_class=(klass) + if klass.kind_of?(Class) && klass < OpenSSL::Digest + @hash_digest_class = klass + else + raise ArgumentError, "#{klass} is expected to be an OpenSSL::Digest subclass" + end + end + + def hash_digest_class + @hash_digest_class ||= OpenSSL::Digest::SHA1 + end + end + + def initialize(secret, options = {}) + @secret = secret + # The default iterations are higher than required for our key derivation uses + # on the off chance someone uses this for password storage + @iterations = options[:iterations] || 2**16 + # Also allow configuration here so people can use this to build a rotation + # scheme when switching the digest class. + @hash_digest_class = options[:hash_digest_class] || self.class.hash_digest_class + end + + # Returns a derived key suitable for use. The default +key_size+ is chosen + # to be compatible with the default settings of ActiveSupport::MessageVerifier. + # i.e. OpenSSL::Digest::SHA1#block_length + def generate_key(salt, key_size = 64) + OpenSSL::PKCS5.pbkdf2_hmac(@secret, salt, @iterations, key_size, @hash_digest_class.new) + end + end + + # CachingKeyGenerator is a wrapper around KeyGenerator which allows users to avoid + # re-executing the key generation process when it's called using the same +salt+ and + # +key_size+. + class CachingKeyGenerator + def initialize(key_generator) + @key_generator = key_generator + @cache_keys = Concurrent::Map.new + end + + # Returns a derived key suitable for use. + def generate_key(*args) + @cache_keys[args.join("|")] ||= @key_generator.generate_key(*args) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/lazy_load_hooks.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/lazy_load_hooks.rb new file mode 100644 index 0000000..8ab6cf7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/lazy_load_hooks.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module ActiveSupport + # LazyLoadHooks allows Rails to lazily load a lot of components and thus + # making the app boot faster. Because of this feature now there is no need to + # require ActiveRecord::Base at boot time purely to apply + # configuration. Instead a hook is registered that applies configuration once + # ActiveRecord::Base is loaded. Here ActiveRecord::Base is + # used as example but this feature can be applied elsewhere too. + # + # Here is an example where on_load method is called to register a hook. + # + # initializer 'active_record.initialize_timezone' do + # ActiveSupport.on_load(:active_record) do + # self.time_zone_aware_attributes = true + # self.default_timezone = :utc + # end + # end + # + # When the entirety of +ActiveRecord::Base+ has been + # evaluated then run_load_hooks is invoked. The very last line of + # +ActiveRecord::Base+ is: + # + # ActiveSupport.run_load_hooks(:active_record, ActiveRecord::Base) + # + # run_load_hooks will then execute all the hooks that were registered + # with the on_load method. In the case of the above example, it will + # execute the block of code that is in the +initializer+. + # + # Registering a hook that has already run results in that hook executing + # immediately. This allows hooks to be nested for code that relies on + # multiple lazily loaded components: + # + # initializer "action_text.renderer" do + # ActiveSupport.on_load(:action_controller_base) do + # ActiveSupport.on_load(:action_text_content) do + # self.default_renderer = Class.new(ActionController::Base).renderer + # end + # end + # end + module LazyLoadHooks + def self.extended(base) # :nodoc: + base.class_eval do + @load_hooks = Hash.new { |h, k| h[k] = [] } + @loaded = Hash.new { |h, k| h[k] = [] } + @run_once = Hash.new { |h, k| h[k] = [] } + end + end + + # Declares a block that will be executed when a Rails component is fully + # loaded. If the component has already loaded, the block is executed + # immediately. + # + # Options: + # + # * :yield - Yields the object that run_load_hooks to +block+. + # * :run_once - Given +block+ will run only once. + def on_load(name, options = {}, &block) + @loaded[name].each do |base| + execute_hook(name, base, options, block) + end + + @load_hooks[name] << [block, options] + end + + # Executes all blocks registered to +name+ via on_load, using +base+ as the + # evaluation context. + # + # ActiveSupport.run_load_hooks(:active_record, ActiveRecord::Base) + # + # In the case of the above example, it will execute all hooks registered + # for +:active_record+ within the class +ActiveRecord::Base+. + def run_load_hooks(name, base = Object) + @loaded[name] << base + @load_hooks[name].each do |hook, options| + execute_hook(name, base, options, hook) + end + end + + private + def with_execution_control(name, block, once) + unless @run_once[name].include?(block) + @run_once[name] << block if once + + yield + end + end + + def execute_hook(name, base, options, block) + with_execution_control(name, block, options[:run_once]) do + if options[:yield] + block.call(base) + else + if base.is_a?(Module) + base.class_eval(&block) + else + base.instance_eval(&block) + end + end + end + end + end + + extend LazyLoadHooks +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/locale/en.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/locale/en.rb new file mode 100644 index 0000000..29eb9de --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/locale/en.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +{ + en: { + number: { + nth: { + ordinals: lambda do |_key, options| + number = options[:number] + case number + when 1; "st" + when 2; "nd" + when 3; "rd" + when 4, 5, 6, 7, 8, 9, 10, 11, 12, 13; "th" + else + num_modulo = number.to_i.abs % 100 + num_modulo %= 10 if num_modulo > 13 + case num_modulo + when 1; "st" + when 2; "nd" + when 3; "rd" + else "th" + end + end + end, + + ordinalized: lambda do |_key, options| + number = options[:number] + "#{number}#{ActiveSupport::Inflector.ordinal(number)}" + end + } + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/locale/en.yml b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/locale/en.yml new file mode 100644 index 0000000..0453883 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/locale/en.yml @@ -0,0 +1,139 @@ +en: + date: + formats: + # Use the strftime parameters for formats. + # When no format has been given, it uses default. + # You can provide other formats here if you like! + default: "%Y-%m-%d" + short: "%b %d" + long: "%B %d, %Y" + + day_names: [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday] + abbr_day_names: [Sun, Mon, Tue, Wed, Thu, Fri, Sat] + + # Don't forget the nil at the beginning; there's no such thing as a 0th month + month_names: [~, January, February, March, April, May, June, July, August, September, October, November, December] + abbr_month_names: [~, Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec] + # Used in date_select and datetime_select. + order: + - year + - month + - day + + time: + formats: + default: "%a, %d %b %Y %H:%M:%S %z" + short: "%d %b %H:%M" + long: "%B %d, %Y %H:%M" + am: "am" + pm: "pm" + +# Used in array.to_sentence. + support: + array: + words_connector: ", " + two_words_connector: " and " + last_word_connector: ", and " + number: + # Used in NumberHelper.number_to_delimited() + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: "." + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: "," + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3 + # Determine how rounding is performed (see BigDecimal::mode) + round_mode: default + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false + # If set, the zeros after the decimal separator will always be stripped (e.g.: 1.200 will be 1.2) + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_currency() + currency: + format: + # Where is the currency sign? %u is the currency unit, %n is the number (default: $5.00) + format: "%u%n" + unit: "$" + # These six are to override number.format and are optional + separator: "." + delimiter: "," + precision: 2 + # round_mode: + significant: false + strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_percentage() + percentage: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + format: "%n%" + + # Used in NumberHelper.number_to_rounded() + precision: + format: + # These five are to override number.format and are optional + # separator: + delimiter: "" + # precision: + # significant: false + # strip_insignificant_zeros: false + + # Used in NumberHelper.number_to_human_size() and NumberHelper.number_to_human() + human: + format: + # These six are to override number.format and are optional + # separator: + delimiter: "" + precision: 3 + # round_mode: + significant: true + strip_insignificant_zeros: true + # Used in number_to_human_size() + storage_units: + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u" + units: + byte: + one: "Byte" + other: "Bytes" + kb: "KB" + mb: "MB" + gb: "GB" + tb: "TB" + pb: "PB" + eb: "EB" + # Used in NumberHelper.number_to_human() + decimal_units: + format: "%n %u" + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "" + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: Thousand + million: Million + billion: Billion + trillion: Trillion + quadrillion: Quadrillion diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/log_subscriber.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/log_subscriber.rb new file mode 100644 index 0000000..ddeaff0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/log_subscriber.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/attribute_accessors" +require "active_support/core_ext/class/attribute" +require "active_support/subscriber" + +module ActiveSupport + # ActiveSupport::LogSubscriber is an object set to consume + # ActiveSupport::Notifications with the sole purpose of logging them. + # The log subscriber dispatches notifications to a registered object based + # on its given namespace. + # + # An example would be Active Record log subscriber responsible for logging + # queries: + # + # module ActiveRecord + # class LogSubscriber < ActiveSupport::LogSubscriber + # def sql(event) + # info "#{event.payload[:name]} (#{event.duration}) #{event.payload[:sql]}" + # end + # end + # end + # + # And it's finally registered as: + # + # ActiveRecord::LogSubscriber.attach_to :active_record + # + # Since we need to know all instance methods before attaching the log + # subscriber, the line above should be called after your + # ActiveRecord::LogSubscriber definition. + # + # A logger also needs to be set with ActiveRecord::LogSubscriber.logger=. + # This is assigned automatically in a Rails environment. + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event + # (ActiveSupport::Notifications::Event) to the sql method. + # + # Being an ActiveSupport::Notifications consumer, + # ActiveSupport::LogSubscriber exposes a simple interface to check if + # instrumented code raises an exception. It is common to log a different + # message in case of an error, and this can be achieved by extending + # the previous example: + # + # module ActiveRecord + # class LogSubscriber < ActiveSupport::LogSubscriber + # def sql(event) + # exception = event.payload[:exception] + # + # if exception + # exception_object = event.payload[:exception_object] + # + # error "[ERROR] #{event.payload[:name]}: #{exception.join(', ')} " \ + # "(#{exception_object.backtrace.first})" + # else + # # standard logger code + # end + # end + # end + # end + # + # Log subscriber also has some helpers to deal with logging and automatically + # flushes all logs when the request finishes + # (via action_dispatch.callback notification) in a Rails environment. + class LogSubscriber < Subscriber + # Embed in a String to clear all previous ANSI sequences. + CLEAR = "\e[0m" + BOLD = "\e[1m" + + # Colors + BLACK = "\e[30m" + RED = "\e[31m" + GREEN = "\e[32m" + YELLOW = "\e[33m" + BLUE = "\e[34m" + MAGENTA = "\e[35m" + CYAN = "\e[36m" + WHITE = "\e[37m" + + mattr_accessor :colorize_logging, default: true + + class << self + def logger + @logger ||= if defined?(Rails) && Rails.respond_to?(:logger) + Rails.logger + end + end + + attr_writer :logger + + def log_subscribers + subscribers + end + + # Flush all log_subscribers' logger. + def flush_all! + logger.flush if logger.respond_to?(:flush) + end + + private + def fetch_public_methods(subscriber, inherit_all) + subscriber.public_methods(inherit_all) - LogSubscriber.public_instance_methods(true) + end + end + + def logger + LogSubscriber.logger + end + + def start(name, id, payload) + super if logger + end + + def finish(name, id, payload) + super if logger + rescue => e + log_exception(name, e) + end + + def publish_event(event) + super if logger + rescue => e + log_exception(event.name, e) + end + + private + %w(info debug warn error fatal unknown).each do |level| + class_eval <<-METHOD, __FILE__, __LINE__ + 1 + def #{level}(progname = nil, &block) + logger.#{level}(progname, &block) if logger + end + METHOD + end + + # Set color by using a symbol or one of the defined constants. If a third + # option is set to +true+, it also adds bold to the string. This is based + # on the Highline implementation and will automatically append CLEAR to the + # end of the returned String. + def color(text, color, bold = false) # :doc: + return text unless colorize_logging + color = self.class.const_get(color.upcase) if color.is_a?(Symbol) + bold = bold ? BOLD : "" + "#{bold}#{color}#{text}#{CLEAR}" + end + + def log_exception(name, e) + if logger + logger.error "Could not log #{name.inspect} event. #{e.class}: #{e.message} #{e.backtrace}" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/log_subscriber/test_helper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/log_subscriber/test_helper.rb new file mode 100644 index 0000000..b528a7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/log_subscriber/test_helper.rb @@ -0,0 +1,106 @@ +# frozen_string_literal: true + +require "active_support/log_subscriber" +require "active_support/logger" +require "active_support/notifications" + +module ActiveSupport + class LogSubscriber + # Provides some helpers to deal with testing log subscribers by setting up + # notifications. Take for instance Active Record subscriber tests: + # + # class SyncLogSubscriberTest < ActiveSupport::TestCase + # include ActiveSupport::LogSubscriber::TestHelper + # + # setup do + # ActiveRecord::LogSubscriber.attach_to(:active_record) + # end + # + # def test_basic_query_logging + # Developer.all.to_a + # wait + # assert_equal 1, @logger.logged(:debug).size + # assert_match(/Developer Load/, @logger.logged(:debug).last) + # assert_match(/SELECT \* FROM "developers"/, @logger.logged(:debug).last) + # end + # end + # + # All you need to do is to ensure that your log subscriber is added to + # Rails::Subscriber, as in the second line of the code above. The test + # helpers are responsible for setting up the queue and subscriptions, and + # turning colors in logs off. + # + # The messages are available in the @logger instance, which is a logger with + # limited powers (it actually does not send anything to your output), and + # you can collect them doing @logger.logged(level), where level is the level + # used in logging, like info, debug, warn, and so on. + module TestHelper + def setup # :nodoc: + @logger = MockLogger.new + @notifier = ActiveSupport::Notifications::Fanout.new + + ActiveSupport::LogSubscriber.colorize_logging = false + + @old_notifier = ActiveSupport::Notifications.notifier + set_logger(@logger) + ActiveSupport::Notifications.notifier = @notifier + end + + def teardown # :nodoc: + set_logger(nil) + ActiveSupport::Notifications.notifier = @old_notifier + end + + class MockLogger + include ActiveSupport::Logger::Severity + + attr_reader :flush_count + attr_accessor :level + + def initialize(level = DEBUG) + @flush_count = 0 + @level = level + @logged = Hash.new { |h, k| h[k] = [] } + end + + def method_missing(level, message = nil) + if block_given? + @logged[level] << yield + else + @logged[level] << message + end + end + + def logged(level) + @logged[level].compact.map { |l| l.to_s.strip } + end + + def flush + @flush_count += 1 + end + + ActiveSupport::Logger::Severity.constants.each do |severity| + class_eval <<-EOT, __FILE__, __LINE__ + 1 + def #{severity.downcase}? + #{severity} >= @level + end + EOT + end + end + + # Wait notifications to be published. + def wait + @notifier.wait + end + + # Overwrite if you use another logger in your log subscriber. + # + # def logger + # ActiveRecord::Base.logger = @logger + # end + def set_logger(logger) + ActiveSupport::LogSubscriber.logger = logger + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger.rb new file mode 100644 index 0000000..1e241c1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require "active_support/logger_silence" +require "active_support/logger_thread_safe_level" +require "logger" + +module ActiveSupport + class Logger < ::Logger + include LoggerSilence + + # Returns true if the logger destination matches one of the sources + # + # logger = Logger.new(STDOUT) + # ActiveSupport::Logger.logger_outputs_to?(logger, STDOUT) + # # => true + def self.logger_outputs_to?(logger, *sources) + logdev = logger.instance_variable_get(:@logdev) + logger_source = logdev.dev if logdev.respond_to?(:dev) + sources.any? { |source| source == logger_source } + end + + # Broadcasts logs to multiple loggers. + def self.broadcast(logger) # :nodoc: + Module.new do + define_method(:add) do |*args, &block| + logger.add(*args, &block) + super(*args, &block) + end + + define_method(:<<) do |x| + logger << x + super(x) + end + + define_method(:close) do + logger.close + super() + end + + define_method(:progname=) do |name| + logger.progname = name + super(name) + end + + define_method(:formatter=) do |formatter| + logger.formatter = formatter + super(formatter) + end + + define_method(:level=) do |level| + logger.level = level + super(level) + end + + define_method(:local_level=) do |level| + logger.local_level = level if logger.respond_to?(:local_level=) + super(level) if respond_to?(:local_level=) + end + + define_method(:silence) do |level = Logger::ERROR, &block| + if logger.respond_to?(:silence) + logger.silence(level) do + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + else + if defined?(super) + super(level, &block) + else + block.call(self) + end + end + end + end + end + + def initialize(*args, **kwargs) + super + @formatter = SimpleFormatter.new + end + + # Simple formatter which only displays the message. + class SimpleFormatter < ::Logger::Formatter + # This method is invoked when a log event occurs + def call(severity, timestamp, progname, msg) + "#{String === msg ? msg : msg.inspect}\n" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger_silence.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger_silence.rb new file mode 100644 index 0000000..8567eff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger_silence.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/logger_thread_safe_level" + +module ActiveSupport + module LoggerSilence + extend ActiveSupport::Concern + + included do + cattr_accessor :silencer, default: true + include ActiveSupport::LoggerThreadSafeLevel + end + + # Silences the logger for the duration of the block. + def silence(severity = Logger::ERROR) + silencer ? log_at(severity) { yield self } : yield(self) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger_thread_safe_level.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger_thread_safe_level.rb new file mode 100644 index 0000000..042f484 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/logger_thread_safe_level.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/module/attribute_accessors" +require "concurrent" +require "fiber" + +module ActiveSupport + module LoggerThreadSafeLevel # :nodoc: + extend ActiveSupport::Concern + + Logger::Severity.constants.each do |severity| + class_eval(<<-EOT, __FILE__, __LINE__ + 1) + def #{severity.downcase}? # def debug? + Logger::#{severity} >= level # DEBUG >= level + end # end + EOT + end + + def local_level + IsolatedExecutionState[:logger_thread_safe_level] + end + + def local_level=(level) + case level + when Integer + when Symbol + level = Logger::Severity.const_get(level.to_s.upcase) + when nil + else + raise ArgumentError, "Invalid log level: #{level.inspect}" + end + IsolatedExecutionState[:logger_thread_safe_level] = level + end + + def level + local_level || super + end + + # Change the thread-local level for the duration of the given block. + def log_at(level) + old_local_level, self.local_level = local_level, level + yield + ensure + self.local_level = old_local_level + end + + # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+. + # FIXME: Remove when the minimum Ruby version supports overriding Logger#level. + def add(severity, message = nil, progname = nil, &block) # :nodoc: + severity ||= UNKNOWN + progname ||= @progname + + return true if @logdev.nil? || severity < level + + if message.nil? + if block_given? + message = yield + else + message = progname + progname = @progname + end + end + + @logdev.write \ + format_message(format_severity(severity), Time.now, progname, message) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/message_encryptor.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/message_encryptor.rb new file mode 100644 index 0000000..6528d1e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/message_encryptor.rb @@ -0,0 +1,230 @@ +# frozen_string_literal: true + +require "openssl" +require "base64" +require "active_support/core_ext/module/attribute_accessors" +require "active_support/message_verifier" +require "active_support/messages/metadata" + +module ActiveSupport + # MessageEncryptor is a simple way to encrypt values which get stored + # somewhere you don't trust. + # + # The cipher text and initialization vector are base64 encoded and returned + # to you. + # + # This can be used in situations similar to the MessageVerifier, but + # where you don't want users to be able to determine the value of the payload. + # + # len = ActiveSupport::MessageEncryptor.key_len + # salt = SecureRandom.random_bytes(len) + # key = ActiveSupport::KeyGenerator.new('password').generate_key(salt, len) # => "\x89\xE0\x156\xAC..." + # crypt = ActiveSupport::MessageEncryptor.new(key) # => # + # encrypted_data = crypt.encrypt_and_sign('my secret data') # => "NlFBTTMwOUV5UlA1QlNEN2xkY2d6eThYWWh..." + # crypt.decrypt_and_verify(encrypted_data) # => "my secret data" + # + # The +decrypt_and_verify+ method will raise an + # ActiveSupport::MessageEncryptor::InvalidMessage exception if the data + # provided cannot be decrypted or verified. + # + # crypt.decrypt_and_verify('not encrypted data') # => ActiveSupport::MessageEncryptor::InvalidMessage + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = crypt.encrypt_and_sign("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # crypt.decrypt_and_verify(token, purpose: :login) # => "this is the chair" + # crypt.decrypt_and_verify(token, purpose: :shipping) # => nil + # crypt.decrypt_and_verify(token) # => nil + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = crypt.encrypt_and_sign("the conversation is lively") + # crypt.decrypt_and_verify(token, purpose: :scare_tactics) # => nil + # crypt.decrypt_and_verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # crypt.encrypt_and_sign(parcel, expires_in: 1.month) + # crypt.encrypt_and_sign(doowad, expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned up to the expire time. + # Thereafter, verifying returns +nil+. + # + # === Rotating keys + # + # MessageEncryptor also supports rotating out old configurations by falling + # back to a stack of encryptors. Call +rotate+ to build and add an encryptor + # so +decrypt_and_verify+ will also try the fallback. + # + # By default any rotated encryptors use the values of the primary + # encryptor unless specified otherwise. + # + # You'd give your encryptor the new defaults: + # + # crypt = ActiveSupport::MessageEncryptor.new(@secret, cipher: "aes-256-gcm") + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # crypt.rotate old_secret # Fallback to an old secret instead of @secret. + # crypt.rotate cipher: "aes-256-cbc" # Fallback to an old cipher instead of aes-256-gcm. + # + # Though if both the secret and the cipher was changed at the same time, + # the above should be combined into: + # + # crypt.rotate old_secret, cipher: "aes-256-cbc" + class MessageEncryptor + prepend Messages::Rotator::Encryptor + + cattr_accessor :use_authenticated_message_encryption, instance_accessor: false, default: false + + class << self + def default_cipher # :nodoc: + if use_authenticated_message_encryption + "aes-256-gcm" + else + "aes-256-cbc" + end + end + end + + module NullSerializer # :nodoc: + def self.load(value) + value + end + + def self.dump(value) + value + end + end + + module NullVerifier # :nodoc: + def self.verify(value) + value + end + + def self.generate(value) + value + end + end + + class InvalidMessage < StandardError; end + OpenSSLCipherError = OpenSSL::Cipher::CipherError + + # Initialize a new MessageEncryptor. +secret+ must be at least as long as + # the cipher key size. For the default 'aes-256-gcm' cipher, this is 256 + # bits. If you are using a user-entered secret, you can generate a suitable + # key by using ActiveSupport::KeyGenerator or a similar key + # derivation function. + # + # First additional parameter is used as the signature key for MessageVerifier. + # This allows you to specify keys to encrypt and sign data. + # + # ActiveSupport::MessageEncryptor.new('secret', 'signature_secret') + # + # Options: + # * :cipher - Cipher to use. Can be any cipher returned by + # OpenSSL::Cipher.ciphers. Default is 'aes-256-gcm'. + # * :digest - String of digest to use for signing. Default is + # +SHA1+. Ignored when using an AEAD cipher like 'aes-256-gcm'. + # * :serializer - Object serializer to use. Default is +Marshal+. + def initialize(secret, sign_secret = nil, cipher: nil, digest: nil, serializer: nil) + @secret = secret + @sign_secret = sign_secret + @cipher = cipher || self.class.default_cipher + @digest = digest || "SHA1" unless aead_mode? + @verifier = resolve_verifier + @serializer = serializer || Marshal + end + + # Encrypt and sign a message. We need to sign the message in order to avoid + # padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def encrypt_and_sign(value, expires_at: nil, expires_in: nil, purpose: nil) + verifier.generate(_encrypt(value, expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + end + + # Decrypt and verify a message. We need to verify the message in order to + # avoid padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/. + def decrypt_and_verify(data, purpose: nil, **) + _decrypt(verifier.verify(data), purpose) + end + + # Given a cipher, returns the key length of the cipher to help generate the key of desired size + def self.key_len(cipher = default_cipher) + OpenSSL::Cipher.new(cipher).key_len + end + + private + def _encrypt(value, **metadata_options) + cipher = new_cipher + cipher.encrypt + cipher.key = @secret + + # Rely on OpenSSL for the initialization vector + iv = cipher.random_iv + cipher.auth_data = "" if aead_mode? + + encrypted_data = cipher.update(Messages::Metadata.wrap(@serializer.dump(value), **metadata_options)) + encrypted_data << cipher.final + + blob = "#{::Base64.strict_encode64 encrypted_data}--#{::Base64.strict_encode64 iv}" + blob = "#{blob}--#{::Base64.strict_encode64 cipher.auth_tag}" if aead_mode? + blob + end + + def _decrypt(encrypted_message, purpose) + cipher = new_cipher + encrypted_data, iv, auth_tag = encrypted_message.split("--").map { |v| ::Base64.strict_decode64(v) } + + # Currently the OpenSSL bindings do not raise an error if auth_tag is + # truncated, which would allow an attacker to easily forge it. See + # https://github.com/ruby/openssl/issues/63 + raise InvalidMessage if aead_mode? && (auth_tag.nil? || auth_tag.bytes.length != 16) + + cipher.decrypt + cipher.key = @secret + cipher.iv = iv + if aead_mode? + cipher.auth_tag = auth_tag + cipher.auth_data = "" + end + + decrypted_data = cipher.update(encrypted_data) + decrypted_data << cipher.final + + message = Messages::Metadata.verify(decrypted_data, purpose) + @serializer.load(message) if message + rescue OpenSSLCipherError, TypeError, ArgumentError + raise InvalidMessage + end + + def new_cipher + OpenSSL::Cipher.new(@cipher) + end + + attr_reader :verifier + + def aead_mode? + @aead_mode ||= new_cipher.authenticated? + end + + def resolve_verifier + if aead_mode? + NullVerifier + else + MessageVerifier.new(@sign_secret || @secret, digest: @digest, serializer: NullSerializer) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/message_verifier.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/message_verifier.rb new file mode 100644 index 0000000..c224bdc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/message_verifier.rb @@ -0,0 +1,237 @@ +# frozen_string_literal: true + +require "openssl" +require "base64" +require "active_support/core_ext/object/blank" +require "active_support/security_utils" +require "active_support/messages/metadata" +require "active_support/messages/rotator" + +module ActiveSupport + # +MessageVerifier+ makes it easy to generate and verify messages which are + # signed to prevent tampering. + # + # This is useful for cases like remember-me tokens and auto-unsubscribe links + # where the session store isn't suitable or available. + # + # Remember Me: + # cookies[:remember_me] = @verifier.generate([@user.id, 2.weeks.from_now]) + # + # In the authentication filter: + # + # id, time = @verifier.verify(cookies[:remember_me]) + # if Time.now < time + # self.current_user = User.find(id) + # end + # + # By default it uses Marshal to serialize the message. If you want to use + # another serialization method, you can set the serializer in the options + # hash upon initialization: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', serializer: YAML) + # + # +MessageVerifier+ creates HMAC signatures using SHA1 hash algorithm by default. + # If you want to use a different hash algorithm, you can change it by providing + # +:digest+ key as an option while initializing the verifier: + # + # @verifier = ActiveSupport::MessageVerifier.new('s3Krit', digest: 'SHA256') + # + # === Confining messages to a specific purpose + # + # By default any message can be used throughout your app. But they can also be + # confined to a specific +:purpose+. + # + # token = @verifier.generate("this is the chair", purpose: :login) + # + # Then that same purpose must be passed when verifying to get the data back out: + # + # @verifier.verified(token, purpose: :login) # => "this is the chair" + # @verifier.verified(token, purpose: :shipping) # => nil + # @verifier.verified(token) # => nil + # + # @verifier.verify(token, purpose: :login) # => "this is the chair" + # @verifier.verify(token, purpose: :shipping) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => ActiveSupport::MessageVerifier::InvalidSignature + # + # Likewise, if a message has no purpose it won't be returned when verifying with + # a specific purpose. + # + # token = @verifier.generate("the conversation is lively") + # @verifier.verified(token, purpose: :scare_tactics) # => nil + # @verifier.verified(token) # => "the conversation is lively" + # + # @verifier.verify(token, purpose: :scare_tactics) # => ActiveSupport::MessageVerifier::InvalidSignature + # @verifier.verify(token) # => "the conversation is lively" + # + # === Making messages expire + # + # By default messages last forever and verifying one year from now will still + # return the original value. But messages can be set to expire at a given + # time with +:expires_in+ or +:expires_at+. + # + # @verifier.generate("parcel", expires_in: 1.month) + # @verifier.generate("doowad", expires_at: Time.now.end_of_year) + # + # Then the messages can be verified and returned up to the expire time. + # Thereafter, the +verified+ method returns +nil+ while +verify+ raises + # ActiveSupport::MessageVerifier::InvalidSignature. + # + # === Rotating keys + # + # MessageVerifier also supports rotating out old configurations by falling + # back to a stack of verifiers. Call +rotate+ to build and add a verifier so + # either +verified+ or +verify+ will also try verifying with the fallback. + # + # By default any rotated verifiers use the values of the primary + # verifier unless specified otherwise. + # + # You'd give your verifier the new defaults: + # + # verifier = ActiveSupport::MessageVerifier.new(@secret, digest: "SHA512", serializer: JSON) + # + # Then gradually rotate the old values out by adding them as fallbacks. Any message + # generated with the old values will then work until the rotation is removed. + # + # verifier.rotate old_secret # Fallback to an old secret instead of @secret. + # verifier.rotate digest: "SHA256" # Fallback to an old digest instead of SHA512. + # verifier.rotate serializer: Marshal # Fallback to an old serializer instead of JSON. + # + # Though the above would most likely be combined into one rotation: + # + # verifier.rotate old_secret, digest: "SHA256", serializer: Marshal + class MessageVerifier + prepend Messages::Rotator::Verifier + + class InvalidSignature < StandardError; end + + SEPARATOR = "--" # :nodoc: + SEPARATOR_LENGTH = SEPARATOR.length # :nodoc: + + def initialize(secret, digest: nil, serializer: nil) + raise ArgumentError, "Secret should not be nil." unless secret + @secret = secret + @digest = digest&.to_s || "SHA1" + @serializer = serializer || Marshal + end + + # Checks if a signed message could have been generated by signing an object + # with the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # verifier.valid_message?(signed_message) # => true + # + # tampered_message = signed_message.chop # editing the message invalidates the signature + # verifier.valid_message?(tampered_message) # => false + def valid_message?(signed_message) + data, digest = get_data_and_digest_from(signed_message) + digest_matches_data?(digest, data) + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # + # signed_message = verifier.generate 'a private message' + # verifier.verified(signed_message) # => 'a private message' + # + # Returns +nil+ if the message was not signed with the same secret. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verified(signed_message) # => nil + # + # Returns +nil+ if the message is not Base64-encoded. + # + # invalid_message = "f--46a0120593880c733a53b6dad75b42ddc1c8996d" + # verifier.verified(invalid_message) # => nil + # + # Raises any error raised while decoding the signed message. + # + # incompatible_message = "test--dad7b06c94abba8d46a15fafaef56c327665d5ff" + # verifier.verified(incompatible_message) # => TypeError: incompatible marshal file format + def verified(signed_message, purpose: nil, **) + data, digest = get_data_and_digest_from(signed_message) + if digest_matches_data?(digest, data) + begin + message = Messages::Metadata.verify(decode(data), purpose) + @serializer.load(message) if message + rescue ArgumentError => argument_error + return if argument_error.message.include?("invalid base64") + raise + end + end + end + + # Decodes the signed message using the +MessageVerifier+'s secret. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # signed_message = verifier.generate 'a private message' + # + # verifier.verify(signed_message) # => 'a private message' + # + # Raises +InvalidSignature+ if the message was not signed with the same + # secret or was not Base64-encoded. + # + # other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit' + # other_verifier.verify(signed_message) # => ActiveSupport::MessageVerifier::InvalidSignature + def verify(*args, **options) + verified(*args, **options) || raise(InvalidSignature) + end + + # Generates a signed message for the provided value. + # + # The message is signed with the +MessageVerifier+'s secret. + # Returns Base64-encoded message joined with the generated signature. + # + # verifier = ActiveSupport::MessageVerifier.new 's3Krit' + # verifier.generate 'a private message' # => "BAhJIhRwcml2YXRlLW1lc3NhZ2UGOgZFVA==--e2d724331ebdee96a10fb99b089508d1c72bd772" + def generate(value, expires_at: nil, expires_in: nil, purpose: nil) + data = encode(Messages::Metadata.wrap(@serializer.dump(value), expires_at: expires_at, expires_in: expires_in, purpose: purpose)) + "#{data}#{SEPARATOR}#{generate_digest(data)}" + end + + private + def encode(data) + ::Base64.strict_encode64(data) + end + + def decode(data) + ::Base64.strict_decode64(data) + end + + def generate_digest(data) + OpenSSL::HMAC.hexdigest(@digest, @secret, data) + end + + def digest_length_in_hex + # In hexadecimal (AKA base16) it takes 4 bits to represent a character, + # hence we multiply the digest's length (in bytes) by 8 to get it in + # bits and divide by 4 to get its number of characters it hex. Well, 8 + # divided by 4 is 2. + @digest_length_in_hex ||= OpenSSL::Digest.new(@digest).digest_length * 2 + end + + def separator_index_for(signed_message) + index = signed_message.length - digest_length_in_hex - SEPARATOR_LENGTH + return if index.negative? || signed_message[index, SEPARATOR_LENGTH] != SEPARATOR + + index + end + + def get_data_and_digest_from(signed_message) + return if signed_message.nil? || !signed_message.valid_encoding? || signed_message.empty? + + separator_index = separator_index_for(signed_message) + return if separator_index.nil? + + data = signed_message[0...separator_index] + digest = signed_message[separator_index + SEPARATOR_LENGTH..-1] + + [data, digest] + end + + def digest_matches_data?(digest, data) + data.present? && digest.present? && ActiveSupport::SecurityUtils.secure_compare(digest, generate_digest(data)) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/metadata.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/metadata.rb new file mode 100644 index 0000000..4719d8e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/metadata.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "time" + +module ActiveSupport + module Messages # :nodoc: + class Metadata # :nodoc: + def initialize(message, expires_at = nil, purpose = nil) + @message, @purpose = message, purpose + @expires_at = expires_at.is_a?(String) ? parse_expires_at(expires_at) : expires_at + end + + def as_json(options = {}) + { _rails: { message: @message, exp: @expires_at, pur: @purpose } } + end + + class << self + def wrap(message, expires_at: nil, expires_in: nil, purpose: nil) + if expires_at || expires_in || purpose + JSON.encode new(encode(message), pick_expiry(expires_at, expires_in), purpose) + else + message + end + end + + def verify(message, purpose) + extract_metadata(message).verify(purpose) + end + + private + def pick_expiry(expires_at, expires_in) + if expires_at + expires_at.utc.iso8601(3) + elsif expires_in + Time.now.utc.advance(seconds: expires_in).iso8601(3) + end + end + + def extract_metadata(message) + data = JSON.decode(message) rescue nil + + if data.is_a?(Hash) && data.key?("_rails") + new(decode(data["_rails"]["message"]), data["_rails"]["exp"], data["_rails"]["pur"]) + else + new(message) + end + end + + def encode(message) + ::Base64.strict_encode64(message) + end + + def decode(message) + ::Base64.strict_decode64(message) + end + end + + def verify(purpose) + @message if match?(purpose) && fresh? + end + + private + def match?(purpose) + @purpose.to_s == purpose.to_s + end + + def fresh? + @expires_at.nil? || Time.now.utc < @expires_at + end + + def parse_expires_at(expires_at) + if ActiveSupport.use_standard_json_time_format + Time.iso8601(expires_at) + else + Time.parse(expires_at) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/rotation_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/rotation_configuration.rb new file mode 100644 index 0000000..eef05fe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/rotation_configuration.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + class RotationConfiguration # :nodoc: + attr_reader :signed, :encrypted + + def initialize + @signed, @encrypted = [], [] + end + + def rotate(kind, *args, **options) + args << options unless options.empty? + case kind + when :signed + @signed << args + when :encrypted + @encrypted << args + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/rotator.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/rotator.rb new file mode 100644 index 0000000..b19e185 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/messages/rotator.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module ActiveSupport + module Messages + module Rotator # :nodoc: + def initialize(*secrets, on_rotation: nil, **options) + super(*secrets, **options) + + @options = options + @rotations = [] + @on_rotation = on_rotation + end + + def rotate(*secrets, **options) + @rotations << build_rotation(*secrets, @options.merge(options)) + end + + module Encryptor + include Rotator + + def decrypt_and_verify(*args, on_rotation: @on_rotation, **options) + super + rescue MessageEncryptor::InvalidMessage, MessageVerifier::InvalidSignature + run_rotations(on_rotation) { |encryptor| encryptor.decrypt_and_verify(*args, **options) } || raise + end + + private + def build_rotation(secret = @secret, sign_secret = @sign_secret, options) + self.class.new(secret, sign_secret, **options) + end + end + + module Verifier + include Rotator + + def verified(*args, on_rotation: @on_rotation, **options) + super || run_rotations(on_rotation) { |verifier| verifier.verified(*args, **options) } + end + + private + def build_rotation(secret = @secret, options) + self.class.new(secret, **options) + end + end + + private + def run_rotations(on_rotation) + @rotations.find do |rotation| + if message = yield(rotation) rescue next + on_rotation&.call + return message + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte.rb new file mode 100644 index 0000000..0366350 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module ActiveSupport # :nodoc: + module Multibyte + autoload :Chars, "active_support/multibyte/chars" + autoload :Unicode, "active_support/multibyte/unicode" + + # The proxy class returned when calling mb_chars. You can use this accessor + # to configure your own proxy class so you can support other encodings. See + # the ActiveSupport::Multibyte::Chars implementation for an example how to + # do this. + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + def self.proxy_class=(klass) + @proxy_class = klass + end + + # Returns the current proxy class. + def self.proxy_class + @proxy_class ||= ActiveSupport::Multibyte::Chars + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte/chars.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte/chars.rb new file mode 100644 index 0000000..79dda27 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte/chars.rb @@ -0,0 +1,176 @@ +# frozen_string_literal: true + +require "active_support/json" +require "active_support/core_ext/string/access" +require "active_support/core_ext/string/behavior" +require "active_support/core_ext/module/delegation" + +module ActiveSupport # :nodoc: + module Multibyte # :nodoc: + # Chars enables you to work transparently with UTF-8 encoding in the Ruby + # String class without having extensive knowledge about the encoding. A + # Chars object accepts a string upon initialization and proxies String + # methods in an encoding safe manner. All the normal String methods are also + # implemented on the proxy. + # + # String methods are proxied through the Chars object, and can be accessed + # through the +mb_chars+ method. Methods which would normally return a + # String object now return a Chars object so methods can be chained. + # + # 'The Perfect String '.mb_chars.downcase.strip + # # => # + # + # Chars objects are perfectly interchangeable with String objects as long as + # no explicit class checks are made. If certain methods do explicitly check + # the class, call +to_s+ before you pass chars objects to them. + # + # bad.explicit_checking_method 'T'.mb_chars.downcase.to_s + # + # The default Chars implementation assumes that the encoding of the string + # is UTF-8, if you want to handle different encodings you can write your own + # multibyte string handler and configure it through + # ActiveSupport::Multibyte.proxy_class. + # + # class CharsForUTF32 + # def size + # @wrapped_string.size / 4 + # end + # + # def self.accepts?(string) + # string.length % 4 == 0 + # end + # end + # + # ActiveSupport::Multibyte.proxy_class = CharsForUTF32 + class Chars + include Comparable + attr_reader :wrapped_string + alias to_s wrapped_string + alias to_str wrapped_string + + delegate :<=>, :=~, :match?, :acts_like_string?, to: :wrapped_string + + # Creates a new Chars instance by wrapping _string_. + def initialize(string) + @wrapped_string = string + @wrapped_string.force_encoding(Encoding::UTF_8) unless @wrapped_string.frozen? + end + + # Forward all undefined methods to the wrapped string. + def method_missing(method, *args, &block) + result = @wrapped_string.__send__(method, *args, &block) + if method.end_with?("!") + self if result + else + result.kind_of?(String) ? chars(result) : result + end + end + + # Returns +true+ if _obj_ responds to the given method. Private methods + # are included in the search only if the optional second parameter + # evaluates to +true+. + def respond_to_missing?(method, include_private) + @wrapped_string.respond_to?(method, include_private) + end + + # Works just like String#split, with the exception that the items + # in the resulting list are Chars instances instead of String. This makes + # chaining methods easier. + # + # 'CafÊ pÊriferôl'.mb_chars.split(/Ê/).map { |part| part.upcase.to_s } # => ["CAF", " P", "RIFERÔL"] + def split(*args) + @wrapped_string.split(*args).map { |i| self.class.new(i) } + end + + # Works like String#slice!, but returns an instance of + # Chars, or +nil+ if the string was not modified. The string will not be + # modified if the range given is out of bounds + # + # string = 'Welcome' + # string.mb_chars.slice!(3) # => # + # string # => 'Welome' + # string.mb_chars.slice!(0..3) # => # + # string # => 'me' + def slice!(*args) + string_sliced = @wrapped_string.slice!(*args) + if string_sliced + chars(string_sliced) + end + end + + # Reverses all characters in the string. + # + # 'CafÊ'.mb_chars.reverse.to_s # => 'ÊfaC' + def reverse + chars(@wrapped_string.grapheme_clusters.reverse.join) + end + + # Limits the byte size of the string to a number of bytes without breaking + # characters. Usable when the storage for a string is limited for some + # reason. + # + # 'こんãĢãĄã¯'.mb_chars.limit(7).to_s # => "こん" + def limit(limit) + chars(@wrapped_string.truncate_bytes(limit, omission: nil)) + end + + # Capitalizes the first letter of every word, when possible. + # + # "ÉL QUE SE ENTERÓ".mb_chars.titleize.to_s # => "Él Que Se EnterÃŗ" + # "æ—ĨæœŦčĒž".mb_chars.titleize.to_s # => "æ—ĨæœŦčĒž" + def titleize + chars(downcase.to_s.gsub(/\b('?\S)/u) { $1.upcase }) + end + alias_method :titlecase, :titleize + + # Performs canonical decomposition on all the characters. + # + # 'Ê'.length # => 1 + # 'Ê'.mb_chars.decompose.to_s.length # => 2 + def decompose + chars(Unicode.decompose(:canonical, @wrapped_string.codepoints.to_a).pack("U*")) + end + + # Performs composition on all the characters. + # + # 'Ê'.length # => 1 + # 'Ê'.mb_chars.compose.to_s.length # => 1 + def compose + chars(Unicode.compose(@wrapped_string.codepoints.to_a).pack("U*")) + end + + # Returns the number of grapheme clusters in the string. + # + # 'ā¤•āĨā¤ˇā¤ŋ'.mb_chars.length # => 4 + # 'ā¤•āĨā¤ˇā¤ŋ'.mb_chars.grapheme_length # => 2 + def grapheme_length + @wrapped_string.grapheme_clusters.length + end + + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(force = false) + chars(Unicode.tidy_bytes(@wrapped_string, force)) + end + + def as_json(options = nil) # :nodoc: + to_s.as_json(options) + end + + %w(reverse tidy_bytes).each do |method| + define_method("#{method}!") do |*args| + @wrapped_string = public_send(method, *args).to_s + self + end + end + + private + def chars(string) + self.class.new(string) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte/unicode.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte/unicode.rb new file mode 100644 index 0000000..1c3e98b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/multibyte/unicode.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +module ActiveSupport + module Multibyte + module Unicode + extend self + + # The Unicode version that is supported by the implementation + UNICODE_VERSION = RbConfig::CONFIG["UNICODE_VERSION"] + + # Decompose composed characters to the decomposed form. + def decompose(type, codepoints) + if type == :compatibility + codepoints.pack("U*").unicode_normalize(:nfkd).codepoints + else + codepoints.pack("U*").unicode_normalize(:nfd).codepoints + end + end + + # Compose decomposed characters to the composed form. + def compose(codepoints) + codepoints.pack("U*").unicode_normalize(:nfc).codepoints + end + + # Rubinius' String#scrub, however, doesn't support ASCII-incompatible chars. + if !defined?(Rubinius) + # Replaces all ISO-8859-1 or CP1252 characters by their UTF-8 equivalent + # resulting in a valid UTF-8 string. + # + # Passing +true+ will forcibly tidy all bytes, assuming that the string's + # encoding is entirely CP1252 or ISO-8859-1. + def tidy_bytes(string, force = false) + return string if string.empty? || string.ascii_only? + return recode_windows1252_chars(string) if force + string.scrub { |bad| recode_windows1252_chars(bad) } + end + else + def tidy_bytes(string, force = false) + return string if string.empty? + return recode_windows1252_chars(string) if force + + # We can't transcode to the same format, so we choose a nearly-identical encoding. + # We're going to 'transcode' bytes from UTF-8 when possible, then fall back to + # CP1252 when we get errors. The final string will be 'converted' back to UTF-8 + # before returning. + reader = Encoding::Converter.new(Encoding::UTF_8, Encoding::UTF_16LE) + + source = string.dup + out = "".force_encoding(Encoding::UTF_16LE) + + loop do + reader.primitive_convert(source, out) + _, _, _, error_bytes, _ = reader.primitive_errinfo + break if error_bytes.nil? + out << error_bytes.encode(Encoding::UTF_16LE, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + + reader.finish + + out.encode!(Encoding::UTF_8) + end + end + + private + def recode_windows1252_chars(string) + string.encode(Encoding::UTF_8, Encoding::Windows_1252, invalid: :replace, undef: :replace) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications.rb new file mode 100644 index 0000000..f2bcf82 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications.rb @@ -0,0 +1,280 @@ +# frozen_string_literal: true + +require "active_support/notifications/instrumenter" +require "active_support/notifications/fanout" + +module ActiveSupport + # = \Notifications + # + # ActiveSupport::Notifications provides an instrumentation API for + # Ruby. + # + # == Instrumenters + # + # To instrument an event you just need to do: + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # That first executes the block and then notifies all subscribers once done. + # + # In the example above +render+ is the name of the event, and the rest is called + # the _payload_. The payload is a mechanism that allows instrumenters to pass + # extra information to subscribers. Payloads consist of a hash whose contents + # are arbitrary and generally depend on the event. + # + # == Subscribers + # + # You can consume those events and the information they provide by registering + # a subscriber. + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end + # + # Here, the +start+ and +finish+ values represent wall-clock time. If you are + # concerned about accuracy, you can register a monotonic subscriber. + # + # ActiveSupport::Notifications.monotonic_subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Monotonic time, when the instrumented block started execution + # finish # => Monotonic time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end + # + # The +start+ and +finish+ values above represent monotonic time. + # + # For instance, let's store all "render" events in an array: + # + # events = [] + # + # ActiveSupport::Notifications.subscribe('render') do |*args| + # events << ActiveSupport::Notifications::Event.new(*args) + # end + # + # That code returns right away, you are just subscribing to "render" events. + # The block is saved and will be called whenever someone instruments "render": + # + # ActiveSupport::Notifications.instrument('render', extra: :information) do + # render plain: 'Foo' + # end + # + # event = events.first + # event.name # => "render" + # event.duration # => 10 (in milliseconds) + # event.payload # => { extra: :information } + # + # The block in the subscribe call gets the name of the event, start + # timestamp, end timestamp, a string with a unique identifier for that event's instrumenter + # (something like "535801666f04d0298cd6"), and a hash with the payload, in + # that order. + # + # If an exception happens during that particular instrumentation the payload will + # have a key :exception with an array of two elements as value: a string with + # the name of the exception class, and the exception message. + # The :exception_object key of the payload will have the exception + # itself as the value: + # + # event.payload[:exception] # => ["ArgumentError", "Invalid value"] + # event.payload[:exception_object] # => # + # + # As the earlier example depicts, the class ActiveSupport::Notifications::Event + # is able to take the arguments as they come and provide an object-oriented + # interface to that data. + # + # It is also possible to pass an object which responds to call method + # as the second parameter to the subscribe method instead of a block: + # + # module ActionController + # class PageRequest + # def call(name, started, finished, unique_id, payload) + # Rails.logger.debug ['notification:', name, started, finished, unique_id, payload].join(' ') + # end + # end + # end + # + # ActiveSupport::Notifications.subscribe('process_action.action_controller', ActionController::PageRequest.new) + # + # resulting in the following output within the logs including a hash with the payload: + # + # notification: process_action.action_controller 2012-04-13 01:08:35 +0300 2012-04-13 01:08:35 +0300 af358ed7fab884532ec7 { + # controller: "Devise::SessionsController", + # action: "new", + # params: {"action"=>"new", "controller"=>"devise/sessions"}, + # format: :html, + # method: "GET", + # path: "/login/sign_in", + # status: 200, + # view_runtime: 279.3080806732178, + # db_runtime: 40.053 + # } + # + # You can also subscribe to all events whose name matches a certain regexp: + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # ... + # end + # + # and even pass no argument to subscribe, in which case you are subscribing + # to all events. + # + # == Temporary Subscriptions + # + # Sometimes you do not want to subscribe to an event for the entire life of + # the application. There are two ways to unsubscribe. + # + # WARNING: The instrumentation framework is designed for long-running subscribers, + # use this feature sparingly because it wipes some internal caches and that has + # a negative impact on performance. + # + # === Subscribe While a Block Runs + # + # You can subscribe to some event temporarily while some block runs. For + # example, in + # + # callback = lambda {|*args| ... } + # ActiveSupport::Notifications.subscribed(callback, "sql.active_record") do + # ... + # end + # + # the callback will be called for all "sql.active_record" events instrumented + # during the execution of the block. The callback is unsubscribed automatically + # after that. + # + # To record +started+ and +finished+ values with monotonic time, + # specify the optional :monotonic option to the + # subscribed method. The :monotonic option is set + # to +false+ by default. + # + # callback = lambda {|name, started, finished, unique_id, payload| ... } + # ActiveSupport::Notifications.subscribed(callback, "sql.active_record", monotonic: true) do + # ... + # end + # + # === Manual Unsubscription + # + # The +subscribe+ method returns a subscriber object: + # + # subscriber = ActiveSupport::Notifications.subscribe("render") do |*args| + # ... + # end + # + # To prevent that block from being called anymore, just unsubscribe passing + # that reference: + # + # ActiveSupport::Notifications.unsubscribe(subscriber) + # + # You can also unsubscribe by passing the name of the subscriber object. Note + # that this will unsubscribe all subscriptions with the given name: + # + # ActiveSupport::Notifications.unsubscribe("render") + # + # Subscribers using a regexp or other pattern-matching object will remain subscribed + # to all events that match their original pattern, unless those events match a string + # passed to +unsubscribe+: + # + # subscriber = ActiveSupport::Notifications.subscribe(/render/) { } + # ActiveSupport::Notifications.unsubscribe('render_template.action_view') + # subscriber.matches?('render_template.action_view') # => false + # subscriber.matches?('render_partial.action_view') # => true + # + # == Default Queue + # + # Notifications ships with a queue implementation that consumes and publishes events + # to all log subscribers. You can use any queue implementation you want. + # + module Notifications + class << self + attr_accessor :notifier + + def publish(name, *args) + notifier.publish(name, *args) + end + + def publish_event(event) # :nodoc: + notifier.publish_event(event) + end + + def instrument(name, payload = {}) + if notifier.listening?(name) + instrumenter.instrument(name, payload) { yield payload if block_given? } + else + yield payload if block_given? + end + end + + # Subscribe to a given event name with the passed +block+. + # + # You can subscribe to events by passing a String to match exact event + # names, or by passing a Regexp to match all events that match a pattern. + # + # ActiveSupport::Notifications.subscribe(/render/) do |*args| + # @event = ActiveSupport::Notifications::Event.new(*args) + # end + # + # The +block+ will receive five parameters with information about the event: + # + # ActiveSupport::Notifications.subscribe('render') do |name, start, finish, id, payload| + # name # => String, name of the event (such as 'render' from above) + # start # => Time, when the instrumented block started execution + # finish # => Time, when the instrumented block ended execution + # id # => String, unique ID for the instrumenter that fired the event + # payload # => Hash, the payload + # end + # + # If the block passed to the method only takes one parameter, + # it will yield an event object to the block: + # + # ActiveSupport::Notifications.subscribe(/render/) do |event| + # @event = event + # end + # + # Raises an error if invalid event name type is passed: + # + # ActiveSupport::Notifications.subscribe(:render) {|*args| ...} + # #=> ArgumentError (pattern must be specified as a String, Regexp or empty) + # + def subscribe(pattern = nil, callback = nil, &block) + notifier.subscribe(pattern, callback, monotonic: false, &block) + end + + # Performs the same functionality as #subscribe, but the +start+ and + # +finish+ block arguments are in monotonic time instead of wall-clock + # time. Monotonic time will not jump forward or backward (due to NTP or + # Daylights Savings). Use +monotonic_subscribe+ when accuracy of time + # duration is important. For example, computing elapsed time between + # two events. + def monotonic_subscribe(pattern = nil, callback = nil, &block) + notifier.subscribe(pattern, callback, monotonic: true, &block) + end + + def subscribed(callback, pattern = nil, monotonic: false, &block) + subscriber = notifier.subscribe(pattern, callback, monotonic: monotonic) + yield + ensure + unsubscribe(subscriber) + end + + def unsubscribe(subscriber_or_name) + notifier.unsubscribe(subscriber_or_name) + end + + def instrumenter + registry[notifier] ||= Instrumenter.new(notifier) + end + + private + def registry + ActiveSupport::IsolatedExecutionState[:active_support_notifications_registry] ||= {} + end + end + + self.notifier = Fanout.new + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications/fanout.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications/fanout.rb new file mode 100644 index 0000000..0759d3a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications/fanout.rb @@ -0,0 +1,285 @@ +# frozen_string_literal: true + +require "mutex_m" +require "concurrent/map" +require "set" +require "active_support/core_ext/object/try" + +module ActiveSupport + module Notifications + class InstrumentationSubscriberError < RuntimeError + attr_reader :exceptions + + def initialize(exceptions) + @exceptions = exceptions + exception_class_names = exceptions.map { |e| e.class.name } + super "Exception(s) occurred within instrumentation subscribers: #{exception_class_names.join(', ')}" + end + end + + # This is a default queue implementation that ships with Notifications. + # It just pushes events to all registered log subscribers. + # + # This class is thread safe. All methods are reentrant. + class Fanout + include Mutex_m + + def initialize + @string_subscribers = Hash.new { |h, k| h[k] = [] } + @other_subscribers = [] + @listeners_for = Concurrent::Map.new + super + end + + def subscribe(pattern = nil, callable = nil, monotonic: false, &block) + subscriber = Subscribers.new(pattern, callable || block, monotonic) + synchronize do + case pattern + when String + @string_subscribers[pattern] << subscriber + @listeners_for.delete(pattern) + when NilClass, Regexp + @other_subscribers << subscriber + @listeners_for.clear + else + raise ArgumentError, "pattern must be specified as a String, Regexp or empty" + end + end + subscriber + end + + def unsubscribe(subscriber_or_name) + synchronize do + case subscriber_or_name + when String + @string_subscribers[subscriber_or_name].clear + @listeners_for.delete(subscriber_or_name) + @other_subscribers.each { |sub| sub.unsubscribe!(subscriber_or_name) } + else + pattern = subscriber_or_name.try(:pattern) + if String === pattern + @string_subscribers[pattern].delete(subscriber_or_name) + @listeners_for.delete(pattern) + else + @other_subscribers.delete(subscriber_or_name) + @listeners_for.clear + end + end + end + end + + def start(name, id, payload) + iterate_guarding_exceptions(listeners_for(name)) { |s| s.start(name, id, payload) } + end + + def finish(name, id, payload, listeners = listeners_for(name)) + iterate_guarding_exceptions(listeners) { |s| s.finish(name, id, payload) } + end + + def publish(name, *args) + iterate_guarding_exceptions(listeners_for(name)) { |s| s.publish(name, *args) } + end + + def publish_event(event) + iterate_guarding_exceptions(listeners_for(event.name)) { |s| s.publish_event(event) } + end + + def iterate_guarding_exceptions(listeners) + exceptions = nil + + listeners.each do |s| + yield s + rescue Exception => e + exceptions ||= [] + exceptions << e + end + + if exceptions + if exceptions.size == 1 + raise exceptions.first + else + raise InstrumentationSubscriberError.new(exceptions), cause: exceptions.first + end + end + + listeners + end + + def listeners_for(name) + # this is correctly done double-checked locking (Concurrent::Map's lookups have volatile semantics) + @listeners_for[name] || synchronize do + # use synchronisation when accessing @subscribers + @listeners_for[name] ||= + @string_subscribers[name] + @other_subscribers.select { |s| s.subscribed_to?(name) } + end + end + + def listening?(name) + listeners_for(name).any? + end + + # This is a sync queue, so there is no waiting. + def wait + end + + module Subscribers # :nodoc: + def self.new(pattern, listener, monotonic) + subscriber_class = monotonic ? MonotonicTimed : Timed + + if listener.respond_to?(:start) && listener.respond_to?(:finish) + subscriber_class = Evented + else + # Doing this to detect a single argument block or callable + # like `proc { |x| }` vs `proc { |*x| }`, `proc { |**x| }`, + # or `proc { |x, **y| }` + procish = listener.respond_to?(:parameters) ? listener : listener.method(:call) + + if procish.arity == 1 && procish.parameters.length == 1 + subscriber_class = EventObject + end + end + + subscriber_class.new(pattern, listener) + end + + class Matcher # :nodoc: + attr_reader :pattern, :exclusions + + def self.wrap(pattern) + if String === pattern + pattern + elsif pattern.nil? + AllMessages.new + else + new(pattern) + end + end + + def initialize(pattern) + @pattern = pattern + @exclusions = Set.new + end + + def unsubscribe!(name) + exclusions << -name if pattern === name + end + + def ===(name) + pattern === name && !exclusions.include?(name) + end + + class AllMessages + def ===(name) + true + end + + def unsubscribe!(*) + false + end + end + end + + class Evented # :nodoc: + attr_reader :pattern + + def initialize(pattern, delegate) + @pattern = Matcher.wrap(pattern) + @delegate = delegate + @can_publish = delegate.respond_to?(:publish) + @can_publish_event = delegate.respond_to?(:publish_event) + end + + def publish(name, *args) + if @can_publish + @delegate.publish name, *args + end + end + + def publish_event(event) + if @can_publish_event + @delegate.publish_event event + else + publish(event.name, event.time, event.end, event.transaction_id, event.payload) + end + end + + def start(name, id, payload) + @delegate.start name, id, payload + end + + def finish(name, id, payload) + @delegate.finish name, id, payload + end + + def subscribed_to?(name) + pattern === name + end + + def unsubscribe!(name) + pattern.unsubscribe!(name) + end + end + + class Timed < Evented # :nodoc: + def publish(name, *args) + @delegate.call name, *args + end + + def start(name, id, payload) + timestack = IsolatedExecutionState[:_timestack] ||= [] + timestack.push Time.now + end + + def finish(name, id, payload) + timestack = IsolatedExecutionState[:_timestack] + started = timestack.pop + @delegate.call(name, started, Time.now, id, payload) + end + end + + class MonotonicTimed < Evented # :nodoc: + def publish(name, *args) + @delegate.call name, *args + end + + def start(name, id, payload) + timestack = IsolatedExecutionState[:_timestack_monotonic] ||= [] + timestack.push Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + + def finish(name, id, payload) + timestack = IsolatedExecutionState[:_timestack_monotonic] + started = timestack.pop + @delegate.call(name, started, Process.clock_gettime(Process::CLOCK_MONOTONIC), id, payload) + end + end + + class EventObject < Evented + def start(name, id, payload) + stack = IsolatedExecutionState[:_event_stack] ||= [] + event = build_event name, id, payload + event.start! + stack.push event + end + + def finish(name, id, payload) + stack = IsolatedExecutionState[:_event_stack] + event = stack.pop + event.payload = payload + event.finish! + @delegate.call event + end + + def publish_event(event) + @delegate.call event + end + + private + def build_event(name, id, payload) + ActiveSupport::Notifications::Event.new name, nil, nil, id, payload + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications/instrumenter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications/instrumenter.rb new file mode 100644 index 0000000..c69e8cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/notifications/instrumenter.rb @@ -0,0 +1,172 @@ +# frozen_string_literal: true + +require "securerandom" + +module ActiveSupport + module Notifications + # Instrumenters are stored in a thread local. + class Instrumenter + attr_reader :id + + def initialize(notifier) + @id = unique_id + @notifier = notifier + end + + # Given a block, instrument it by measuring the time taken to execute + # and publish it. Without a block, simply send a message via the + # notifier. Notice that events get sent even if an error occurs in the + # passed-in block. + def instrument(name, payload = {}) + # some of the listeners might have state + listeners_state = start name, payload + begin + yield payload if block_given? + rescue Exception => e + payload[:exception] = [e.class.name, e.message] + payload[:exception_object] = e + raise e + ensure + finish_with_state listeners_state, name, payload + end + end + + def new_event(name, payload = {}) # :nodoc: + Event.new(name, nil, nil, @id, payload) + end + + # Send a start notification with +name+ and +payload+. + def start(name, payload) + @notifier.start name, @id, payload + end + + # Send a finish notification with +name+ and +payload+. + def finish(name, payload) + @notifier.finish name, @id, payload + end + + def finish_with_state(listeners_state, name, payload) + @notifier.finish name, @id, payload, listeners_state + end + + private + def unique_id + SecureRandom.hex(10) + end + end + + class Event + attr_reader :name, :time, :end, :transaction_id, :children + attr_accessor :payload + + def initialize(name, start, ending, transaction_id, payload) + @name = name + @payload = payload.dup + @time = start ? start.to_f * 1_000.0 : start + @transaction_id = transaction_id + @end = ending ? ending.to_f * 1_000.0 : ending + @children = [] + @cpu_time_start = 0.0 + @cpu_time_finish = 0.0 + @allocation_count_start = 0 + @allocation_count_finish = 0 + end + + def record + start! + begin + yield payload if block_given? + rescue Exception => e + payload[:exception] = [e.class.name, e.message] + payload[:exception_object] = e + raise e + ensure + finish! + end + end + + # Record information at the time this event starts + def start! + @time = now + @cpu_time_start = now_cpu + @allocation_count_start = now_allocations + end + + # Record information at the time this event finishes + def finish! + @cpu_time_finish = now_cpu + @end = now + @allocation_count_finish = now_allocations + end + + # Returns the CPU time (in milliseconds) passed since the call to + # +start!+ and the call to +finish!+ + def cpu_time + @cpu_time_finish - @cpu_time_start + end + + # Returns the idle time time (in milliseconds) passed since the call to + # +start!+ and the call to +finish!+ + def idle_time + duration - cpu_time + end + + # Returns the number of allocations made since the call to +start!+ and + # the call to +finish!+ + def allocations + @allocation_count_finish - @allocation_count_start + end + + # Returns the difference in milliseconds between when the execution of the + # event started and when it ended. + # + # ActiveSupport::Notifications.subscribe('wait') do |*args| + # @event = ActiveSupport::Notifications::Event.new(*args) + # end + # + # ActiveSupport::Notifications.instrument('wait') do + # sleep 1 + # end + # + # @event.duration # => 1000.138 + def duration + self.end - time + end + + def <<(event) + @children << event + end + + def parent_of?(event) + @children.include? event + end + + private + def now + Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond) + end + + begin + Process.clock_gettime(Process::CLOCK_THREAD_CPUTIME_ID, :float_millisecond) + + def now_cpu + Process.clock_gettime(Process::CLOCK_THREAD_CPUTIME_ID, :float_millisecond) + end + rescue + def now_cpu # rubocop:disable Lint/DuplicateMethods + 0.0 + end + end + + if GC.stat.key?(:total_allocated_objects) + def now_allocations + GC.stat(:total_allocated_objects) + end + else # Likely on JRuby, TruffleRuby + def now_allocations + 0 + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper.rb new file mode 100644 index 0000000..8b66e3c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper.rb @@ -0,0 +1,395 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + extend ActiveSupport::Autoload + + eager_autoload do + autoload :NumberConverter + autoload :RoundingHelper + autoload :NumberToRoundedConverter + autoload :NumberToDelimitedConverter + autoload :NumberToHumanConverter + autoload :NumberToHumanSizeConverter + autoload :NumberToPhoneConverter + autoload :NumberToCurrencyConverter + autoload :NumberToPercentageConverter + end + + extend self + + # Formats a +number+ into a phone number (US by default e.g., (555) + # 123-9876). You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :area_code - Adds parentheses around the area code. + # * :delimiter - Specifies the delimiter to use + # (defaults to "-"). + # * :extension - Specifies an extension to add to the + # end of the generated number. + # * :country_code - Sets the country code for the phone + # number. + # * :pattern - Specifies how the number is divided into three + # groups with the custom regexp to override the default format. + # ==== Examples + # + # number_to_phone(5551234) # => "555-1234" + # number_to_phone('5551234') # => "555-1234" + # number_to_phone(1235551234) # => "123-555-1234" + # number_to_phone(1235551234, area_code: true) # => "(123) 555-1234" + # number_to_phone(1235551234, delimiter: ' ') # => "123 555 1234" + # number_to_phone(1235551234, area_code: true, extension: 555) # => "(123) 555-1234 x 555" + # number_to_phone(1235551234, country_code: 1) # => "+1-123-555-1234" + # number_to_phone('123a456') # => "123a456" + # + # number_to_phone(1235551234, country_code: 1, extension: 1343, delimiter: '.') + # # => "+1.123.555.1234 x 1343" + # + # number_to_phone(75561234567, pattern: /(\d{1,4})(\d{4})(\d{4})$/, area_code: true) + # # => "(755) 6123-4567" + # number_to_phone(13312345678, pattern: /(\d{3})(\d{4})(\d{4})$/) + # # => "133-1234-5678" + def number_to_phone(number, options = {}) + NumberToPhoneConverter.convert(number, options) + end + + # Formats a +number+ into a currency string (e.g., $13.65). You + # can customize the format in the +options+ hash. + # + # The currency unit and number formatting of the current locale will be used + # unless otherwise specified in the provided options. No currency conversion + # is performed. If the user is given a way to change their locale, they will + # also be able to change the relative value of the currency displayed with + # this helper. If your application will ever support multiple locales, you + # may want to specify a constant :locale option or consider + # using a library capable of currency conversion. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the level of precision (defaults + # to 2). + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :unit - Sets the denomination of the currency + # (defaults to "$"). + # * :separator - Sets the separator between the units + # (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :format - Sets the format for non-negative numbers + # (defaults to "%u%n"). Fields are %u for the + # currency, and %n for the number. + # * :negative_format - Sets the format for negative + # numbers (defaults to prepending a hyphen to the formatted + # number given by :format). Accepts the same fields + # than :format, except %n is here the + # absolute value of the number. + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # + # ==== Examples + # + # number_to_currency(1234567890.50) # => "$1,234,567,890.50" + # number_to_currency(1234567890.506) # => "$1,234,567,890.51" + # number_to_currency(1234567890.506, precision: 3) # => "$1,234,567,890.506" + # number_to_currency(1234567890.506, locale: :fr) # => "1 234 567 890,51 â‚Ŧ" + # number_to_currency('123a456') # => "$123a456" + # + # number_to_currency(-0.456789, precision: 0) + # # => "$0" + # number_to_currency(-1234567890.50, negative_format: '(%u%n)') + # # => "($1,234,567,890.50)" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '') + # # => "£1234567890,50" + # number_to_currency(1234567890.50, unit: '£', separator: ',', delimiter: '', format: '%n %u') + # # => "1234567890,50 £" + # number_to_currency(1234567890.50, strip_insignificant_zeros: true) + # # => "$1,234,567,890.5" + # number_to_currency(1234567890.50, precision: 0, round_mode: :up) + # # => "$1,234,567,891" + def number_to_currency(number, options = {}) + NumberToCurrencyConverter.convert(number, options) + end + + # Formats a +number+ as a percentage string (e.g., 65%). You can + # customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # * :format - Specifies the format of the percentage + # string The number field is %n (defaults to "%n%"). + # + # ==== Examples + # + # number_to_percentage(100) # => "100.000%" + # number_to_percentage('98') # => "98.000%" + # number_to_percentage(100, precision: 0) # => "100%" + # number_to_percentage(1000, delimiter: '.', separator: ',') # => "1.000,000%" + # number_to_percentage(302.24398923423, precision: 5) # => "302.24399%" + # number_to_percentage(1000, locale: :fr) # => "1000,000%" + # number_to_percentage(1000, precision: nil) # => "1000%" + # number_to_percentage('98a') # => "98a%" + # number_to_percentage(100, format: '%n %') # => "100.000 %" + # number_to_percentage(302.24398923423, precision: 5, round_mode: :down) # => "302.24398%" + def number_to_percentage(number, options = {}) + NumberToPercentageConverter.convert(number, options) + end + + # Formats a +number+ with grouped thousands using +delimiter+ + # (e.g., 12,324). You can customize the format in the +options+ + # hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :delimiter - Sets the thousands delimiter (defaults + # to ","). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter_pattern - Sets a custom regular expression used for + # deriving the placement of delimiter. Helpful when using currency formats + # like INR. + # + # ==== Examples + # + # number_to_delimited(12345678) # => "12,345,678" + # number_to_delimited('123456') # => "123,456" + # number_to_delimited(12345678.05) # => "12,345,678.05" + # number_to_delimited(12345678, delimiter: '.') # => "12.345.678" + # number_to_delimited(12345678, delimiter: ',') # => "12,345,678" + # number_to_delimited(12345678.05, separator: ' ') # => "12,345,678 05" + # number_to_delimited(12345678.05, locale: :fr) # => "12 345 678,05" + # number_to_delimited('112a') # => "112a" + # number_to_delimited(98765432.98, delimiter: ' ', separator: ',') + # # => "98 765 432,98" + # number_to_delimited("123456.78", + # delimiter_pattern: /(\d+?)(?=(\d\d)+(\d)(?!\d))/) + # # => "1,23,456.78" + def number_to_delimited(number, options = {}) + NumberToDelimitedConverter.convert(number, options) + end + + # Formats a +number+ with the specified level of + # :precision (e.g., 112.32 has a precision of 2 if + # +:significant+ is +false+, and 5 if +:significant+ is +true+). + # You can customize the format in the +options+ hash. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). Keeps the number's precision if +nil+. + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +false+). + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +false+). + # + # ==== Examples + # + # number_to_rounded(111.2345) # => "111.235" + # number_to_rounded(111.2345, precision: 2) # => "111.23" + # number_to_rounded(13, precision: 5) # => "13.00000" + # number_to_rounded(389.32314, precision: 0) # => "389" + # number_to_rounded(111.2345, significant: true) # => "111" + # number_to_rounded(111.2345, precision: 1, significant: true) # => "100" + # number_to_rounded(13, precision: 5, significant: true) # => "13.000" + # number_to_rounded(13, precision: nil) # => "13" + # number_to_rounded(389.32314, precision: 0, round_mode: :up) # => "390" + # number_to_rounded(111.234, locale: :fr) # => "111,234" + # + # number_to_rounded(13, precision: 5, significant: true, strip_insignificant_zeros: true) + # # => "13" + # + # number_to_rounded(389.32314, precision: 4, significant: true) # => "389.3" + # number_to_rounded(1111.2345, precision: 2, separator: ',', delimiter: '.') + # # => "1.111,23" + def number_to_rounded(number, options = {}) + NumberToRoundedConverter.convert(number, options) + end + + # Formats the bytes in +number+ into a more understandable + # representation (e.g., giving it 1500 yields 1.46 KB). This + # method is useful for reporting file sizes to users. You can + # customize the format in the +options+ hash. + # + # See number_to_human if you want to pretty-print a + # generic number. + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # + # ==== Examples + # + # number_to_human_size(123) # => "123 Bytes" + # number_to_human_size(1234) # => "1.21 KB" + # number_to_human_size(12345) # => "12.1 KB" + # number_to_human_size(1234567) # => "1.18 MB" + # number_to_human_size(1234567890) # => "1.15 GB" + # number_to_human_size(1234567890123) # => "1.12 TB" + # number_to_human_size(1234567890123456) # => "1.1 PB" + # number_to_human_size(1234567890123456789) # => "1.07 EB" + # number_to_human_size(1234567, precision: 2) # => "1.2 MB" + # number_to_human_size(483989, precision: 2) # => "470 KB" + # number_to_human_size(483989, precision: 2, round_mode: :up) # => "480 KB" + # number_to_human_size(1234567, precision: 2, separator: ',') # => "1,2 MB" + # number_to_human_size(1234567890123, precision: 5) # => "1.1228 TB" + # number_to_human_size(524288000, precision: 5) # => "500 MB" + def number_to_human_size(number, options = {}) + NumberToHumanSizeConverter.convert(number, options) + end + + # Pretty prints (formats and approximates) a number in a way it + # is more readable by humans (e.g.: 1200000000 becomes "1.2 + # Billion"). This is useful for numbers that can get very large + # (and too hard to read). + # + # See number_to_human_size if you want to print a file + # size. + # + # You can also define your own unit-quantifier names if you want + # to use other decimal units (e.g.: 1500 becomes "1.5 + # kilometers", 0.150 becomes "150 milliliters", etc). You may + # define a wide range of unit quantifiers, even fractional ones + # (centi, deci, mili, etc). + # + # ==== Options + # + # * :locale - Sets the locale to be used for formatting + # (defaults to current locale). + # * :precision - Sets the precision of the number + # (defaults to 3). + # * :round_mode - Determine how rounding is performed + # (defaults to :default. See BigDecimal::mode) + # * :significant - If +true+, precision will be the number + # of significant_digits. If +false+, the number of fractional + # digits (defaults to +true+) + # * :separator - Sets the separator between the + # fractional and integer digits (defaults to "."). + # * :delimiter - Sets the thousands delimiter (defaults + # to ""). + # * :strip_insignificant_zeros - If +true+ removes + # insignificant zeros after the decimal separator (defaults to + # +true+) + # * :units - A Hash of unit quantifier names. Or a + # string containing an i18n scope where to find this hash. It + # might have the following keys: + # * *integers*: :unit, :ten, + # :hundred, :thousand, :million, + # :billion, :trillion, + # :quadrillion + # * *fractionals*: :deci, :centi, + # :mili, :micro, :nano, + # :pico, :femto + # * :format - Sets the format of the output string + # (defaults to "%n %u"). The field types are: + # * %u - The quantifier (ex.: 'thousand') + # * %n - The number + # + # ==== Examples + # + # number_to_human(123) # => "123" + # number_to_human(1234) # => "1.23 Thousand" + # number_to_human(12345) # => "12.3 Thousand" + # number_to_human(1234567) # => "1.23 Million" + # number_to_human(1234567890) # => "1.23 Billion" + # number_to_human(1234567890123) # => "1.23 Trillion" + # number_to_human(1234567890123456) # => "1.23 Quadrillion" + # number_to_human(1234567890123456789) # => "1230 Quadrillion" + # number_to_human(489939, precision: 2) # => "490 Thousand" + # number_to_human(489939, precision: 4) # => "489.9 Thousand" + # number_to_human(489939, precision: 2 + # , round_mode: :down) # => "480 Thousand" + # number_to_human(1234567, precision: 4, + # significant: false) # => "1.2346 Million" + # number_to_human(1234567, precision: 1, + # separator: ',', + # significant: false) # => "1,2 Million" + # + # number_to_human(500000000, precision: 5) # => "500 Million" + # number_to_human(12345012345, significant: false) # => "12.345 Billion" + # + # Non-significant zeros after the decimal separator are stripped + # out by default (set :strip_insignificant_zeros to + # +false+ to change that): + # + # number_to_human(12.00001) # => "12" + # number_to_human(12.00001, strip_insignificant_zeros: false) # => "12.0" + # + # ==== Custom Unit Quantifiers + # + # You can also use your own custom unit quantifiers: + # number_to_human(500000, units: { unit: 'ml', thousand: 'lt' }) # => "500 lt" + # + # If in your I18n locale you have: + # + # distance: + # centi: + # one: "centimeter" + # other: "centimeters" + # unit: + # one: "meter" + # other: "meters" + # thousand: + # one: "kilometer" + # other: "kilometers" + # billion: "gazillion-distance" + # + # Then you could do: + # + # number_to_human(543934, units: :distance) # => "544 kilometers" + # number_to_human(54393498, units: :distance) # => "54400 kilometers" + # number_to_human(54393498000, units: :distance) # => "54.4 gazillion-distance" + # number_to_human(343, units: :distance, precision: 1) # => "300 meters" + # number_to_human(1, units: :distance) # => "1 meter" + # number_to_human(0.34, units: :distance) # => "34 centimeters" + def number_to_human(number, options = {}) + NumberToHumanConverter.convert(number, options) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_converter.rb new file mode 100644 index 0000000..168b1a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_converter.rb @@ -0,0 +1,181 @@ +# frozen_string_literal: true + +require "active_support/core_ext/big_decimal/conversions" +require "active_support/core_ext/object/blank" +require "active_support/core_ext/hash/keys" +require "active_support/i18n" +require "active_support/core_ext/class/attribute" + +module ActiveSupport + module NumberHelper + class NumberConverter # :nodoc: + # Default and i18n option namespace per class + class_attribute :namespace + + # Does the object need a number that is a valid float? + class_attribute :validate_float + + attr_reader :number, :opts + + DEFAULTS = { + # Used in number_to_delimited + # These are also the defaults for 'currency', 'percentage', 'precision', and 'human' + format: { + # Sets the separator between the units, for more precision (e.g. 1.0 / 2.0 == 0.5) + separator: ".", + # Delimits thousands (e.g. 1,000,000 is a million) (always in groups of three) + delimiter: ",", + # Number of decimals, behind the separator (the number 1 with a precision of 2 gives: 1.00) + precision: 3, + # If set to true, precision will mean the number of significant digits instead + # of the number of decimal digits (1234 with precision 2 becomes 1200, 1.23543 becomes 1.2) + significant: false, + # If set, the zeros after the decimal separator will always be stripped (e.g.: 1.200 will be 1.2) + strip_insignificant_zeros: false + }, + + # Used in number_to_currency + currency: { + format: { + format: "%u%n", + negative_format: "-%u%n", + unit: "$", + # These five are to override number.format and are optional + separator: ".", + delimiter: ",", + precision: 2, + significant: false, + strip_insignificant_zeros: false + } + }, + + # Used in number_to_percentage + percentage: { + format: { + delimiter: "", + format: "%n%" + } + }, + + # Used in number_to_rounded + precision: { + format: { + delimiter: "" + } + }, + + # Used in number_to_human_size and number_to_human + human: { + format: { + # These five are to override number.format and are optional + delimiter: "", + precision: 3, + significant: true, + strip_insignificant_zeros: true + }, + # Used in number_to_human_size + storage_units: { + # Storage units output formatting. + # %u is the storage unit, %n is the number (default: 2 MB) + format: "%n %u", + units: { + byte: "Bytes", + kb: "KB", + mb: "MB", + gb: "GB", + tb: "TB" + } + }, + # Used in number_to_human + decimal_units: { + format: "%n %u", + # Decimal units output formatting + # By default we will only quantify some of the exponents + # but the commented ones might be defined or overridden + # by the user. + units: { + # femto: Quadrillionth + # pico: Trillionth + # nano: Billionth + # micro: Millionth + # mili: Thousandth + # centi: Hundredth + # deci: Tenth + unit: "", + # ten: + # one: Ten + # other: Tens + # hundred: Hundred + thousand: "Thousand", + million: "Million", + billion: "Billion", + trillion: "Trillion", + quadrillion: "Quadrillion" + } + } + } + } + + def self.convert(number, options) + new(number, options).execute + end + + def initialize(number, options) + @number = number + @opts = options.symbolize_keys + end + + def execute + if !number + nil + elsif validate_float? && !valid_float? + number + else + convert + end + end + + private + def options + @options ||= format_options.merge(opts) + end + + def format_options + default_format_options.merge!(i18n_format_options) + end + + def default_format_options + options = DEFAULTS[:format].dup + options.merge!(DEFAULTS[namespace][:format]) if namespace + options + end + + def i18n_format_options + locale = opts[:locale] + options = I18n.translate(:'number.format', locale: locale, default: {}).dup + + if namespace + options.merge!(I18n.translate(:"number.#{namespace}.format", locale: locale, default: {})) + end + + options + end + + def translate_number_value_with_default(key, **i18n_options) + I18n.translate(key, **{ default: default_value(key), scope: :number }.merge!(i18n_options)) + end + + def translate_in_locale(key, **i18n_options) + translate_number_value_with_default(key, **{ locale: options[:locale] }.merge(i18n_options)) + end + + def default_value(key) + key.split(".").reduce(DEFAULTS) { |defaults, k| defaults[k.to_sym] } + end + + def valid_float? + Float(number, exception: false) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_currency_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_currency_converter.rb new file mode 100644 index 0000000..241ec90 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_currency_converter.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToCurrencyConverter < NumberConverter # :nodoc: + self.namespace = :currency + + def convert + format = options[:format] + + number_f = valid_float? + if number_f + if number_f.negative? + number_f = number_f.abs + format = options[:negative_format] if (number_f * 10**options[:precision]) >= 0.5 + end + number_s = NumberToRoundedConverter.convert(number_f, options) + else + number_s = number.to_s.strip + format = options[:negative_format] if number_s.sub!(/^-/, "") + end + + format.gsub("%n", number_s).gsub("%u", options[:unit]) + end + + private + def options + @options ||= begin + defaults = default_format_options.merge(i18n_opts) + # Override negative format if format options are given + defaults[:negative_format] = "-#{opts[:format]}" if opts[:format] + defaults.merge!(opts) + end + end + + def i18n_opts + # Set International negative format if it does not exist + i18n = i18n_format_options + i18n[:negative_format] ||= "-#{i18n[:format]}" if i18n[:format] + i18n + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_delimited_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_delimited_converter.rb new file mode 100644 index 0000000..4fb2fb7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_delimited_converter.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToDelimitedConverter < NumberConverter # :nodoc: + self.validate_float = true + + DEFAULT_DELIMITER_REGEX = /(\d)(?=(\d\d\d)+(?!\d))/ + + def convert + parts.join(options[:separator]) + end + + private + def parts + left, right = number.to_s.split(".") + left.gsub!(delimiter_pattern) do |digit_to_delimit| + "#{digit_to_delimit}#{options[:delimiter]}" + end + [left, right].compact + end + + def delimiter_pattern + options.fetch(:delimiter_pattern, DEFAULT_DELIMITER_REGEX) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_human_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_human_converter.rb new file mode 100644 index 0000000..3f92628 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_human_converter.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToHumanConverter < NumberConverter # :nodoc: + DECIMAL_UNITS = { 0 => :unit, 1 => :ten, 2 => :hundred, 3 => :thousand, 6 => :million, 9 => :billion, 12 => :trillion, 15 => :quadrillion, + -1 => :deci, -2 => :centi, -3 => :mili, -6 => :micro, -9 => :nano, -12 => :pico, -15 => :femto } + INVERTED_DECIMAL_UNITS = DECIMAL_UNITS.invert + + self.namespace = :human + self.validate_float = true + + def convert # :nodoc: + @number = RoundingHelper.new(options).round(number) + @number = Float(number) + + # For backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files. + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + units = opts[:units] + exponent = calculate_exponent(units) + @number = number / (10**exponent) + + rounded_number = NumberToRoundedConverter.convert(number, options) + unit = determine_unit(units, exponent) + format.gsub("%n", rounded_number).gsub("%u", unit).strip + end + + private + def format + options[:format] || translate_in_locale("human.decimal_units.format") + end + + def determine_unit(units, exponent) + exp = DECIMAL_UNITS[exponent] + case units + when Hash + units[exp] || "" + when String, Symbol + I18n.translate("#{units}.#{exp}", locale: options[:locale], count: number.to_i) + else + translate_in_locale("human.decimal_units.units.#{exp}", count: number.to_i) + end + end + + def calculate_exponent(units) + exponent = number != 0 ? Math.log10(number.abs).floor : 0 + unit_exponents(units).find { |e| exponent >= e } || 0 + end + + def unit_exponents(units) + case units + when Hash + units + when String, Symbol + I18n.translate(units.to_s, locale: options[:locale], raise: true) + when nil + translate_in_locale("human.decimal_units.units", raise: true) + else + raise ArgumentError, ":units must be a Hash or String translation scope." + end.keys.map { |e_name| INVERTED_DECIMAL_UNITS[e_name] }.sort_by(&:-@) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_human_size_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_human_size_converter.rb new file mode 100644 index 0000000..57e1ffe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_human_size_converter.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToHumanSizeConverter < NumberConverter # :nodoc: + STORAGE_UNITS = [:byte, :kb, :mb, :gb, :tb, :pb, :eb] + + self.namespace = :human + self.validate_float = true + + def convert + @number = Float(number) + + # For backwards compatibility with those that didn't add strip_insignificant_zeros to their locale files. + unless options.key?(:strip_insignificant_zeros) + options[:strip_insignificant_zeros] = true + end + + if smaller_than_base? + number_to_format = number.to_i.to_s + else + human_size = number / (base**exponent) + number_to_format = NumberToRoundedConverter.convert(human_size, options) + end + conversion_format.gsub("%n", number_to_format).gsub("%u", unit) + end + + private + def conversion_format + translate_number_value_with_default("human.storage_units.format", locale: options[:locale], raise: true) + end + + def unit + translate_number_value_with_default(storage_unit_key, locale: options[:locale], count: number.to_i, raise: true) + end + + def storage_unit_key + key_end = smaller_than_base? ? "byte" : STORAGE_UNITS[exponent] + "human.storage_units.units.#{key_end}" + end + + def exponent + max = STORAGE_UNITS.size - 1 + exp = (Math.log(number) / Math.log(base)).to_i + exp = max if exp > max # avoid overflow for the highest unit + exp + end + + def smaller_than_base? + number.to_i < base + end + + def base + 1024 + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_percentage_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_percentage_converter.rb new file mode 100644 index 0000000..0c2e190 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_percentage_converter.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToPercentageConverter < NumberConverter # :nodoc: + self.namespace = :percentage + + def convert + rounded_number = NumberToRoundedConverter.convert(number, options) + options[:format].gsub("%n", rounded_number) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_phone_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_phone_converter.rb new file mode 100644 index 0000000..c9771d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_phone_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToPhoneConverter < NumberConverter # :nodoc: + def convert + str = country_code(opts[:country_code]).dup + str << convert_to_phone_number(number.to_s.strip) + str << phone_ext(opts[:extension]) + end + + private + def convert_to_phone_number(number) + if opts[:area_code] + convert_with_area_code(number) + else + convert_without_area_code(number) + end + end + + def convert_with_area_code(number) + default_pattern = /(\d{1,3})(\d{3})(\d{4}$)/ + number.gsub!(regexp_pattern(default_pattern), + "(\\1) \\2#{delimiter}\\3") + number + end + + def convert_without_area_code(number) + default_pattern = /(\d{0,3})(\d{3})(\d{4})$/ + number.gsub!(regexp_pattern(default_pattern), + "\\1#{delimiter}\\2#{delimiter}\\3") + number.slice!(0, 1) if start_with_delimiter?(number) + number + end + + def start_with_delimiter?(number) + delimiter.present? && number.start_with?(delimiter) + end + + def delimiter + opts[:delimiter] || "-" + end + + def country_code(code) + code.blank? ? "" : "+#{code}#{delimiter}" + end + + def phone_ext(ext) + ext.blank? ? "" : " x #{ext}" + end + + def regexp_pattern(default_pattern) + opts.fetch :pattern, default_pattern + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_rounded_converter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_rounded_converter.rb new file mode 100644 index 0000000..f48a515 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/number_to_rounded_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require "active_support/number_helper/number_converter" + +module ActiveSupport + module NumberHelper + class NumberToRoundedConverter < NumberConverter # :nodoc: + self.namespace = :precision + self.validate_float = true + + def convert + helper = RoundingHelper.new(options) + rounded_number = helper.round(number) + + if precision = options[:precision] + if options[:significant] && precision > 0 + digits = helper.digit_count(rounded_number) + precision -= digits + precision = 0 if precision < 0 # don't let it be negative + end + + formatted_string = + if rounded_number.finite? + s = rounded_number.to_s("F") + a, b = s.split(".", 2) + if precision != 0 + b << "0" * precision + a << "." + a << b[0, precision] + end + a + else + # Infinity/NaN + "%f" % rounded_number + end + else + formatted_string = rounded_number + end + + delimited_number = NumberToDelimitedConverter.convert(formatted_string, options) + format_number(delimited_number) + end + + private + def strip_insignificant_zeros + options[:strip_insignificant_zeros] + end + + def format_number(number) + if strip_insignificant_zeros + escaped_separator = Regexp.escape(options[:separator]) + number.sub(/(#{escaped_separator})(\d*[1-9])?0+\z/, '\1\2').sub(/#{escaped_separator}\z/, "") + else + number + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/rounding_helper.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/rounding_helper.rb new file mode 100644 index 0000000..14deca1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/number_helper/rounding_helper.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +module ActiveSupport + module NumberHelper + class RoundingHelper # :nodoc: + attr_reader :options + + def initialize(options) + @options = options + end + + def round(number) + precision = absolute_precision(number) + return number unless precision + + rounded_number = convert_to_decimal(number).round(precision, options.fetch(:round_mode, :default).to_sym) + rounded_number.zero? ? rounded_number.abs : rounded_number # prevent showing negative zeros + end + + def digit_count(number) + return 1 if number.zero? + (Math.log10(number.abs) + 1).floor + end + + private + def convert_to_decimal(number) + case number + when Float, String + BigDecimal(number.to_s) + when Rational + BigDecimal(number, digit_count(number.to_i) + options[:precision]) + else + number.to_d + end + end + + def absolute_precision(number) + if options[:significant] && options[:precision] > 0 + options[:precision] - digit_count(convert_to_decimal(number)) + else + options[:precision] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/option_merger.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/option_merger.rb new file mode 100644 index 0000000..b6dd126 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/option_merger.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "active_support/core_ext/hash/deep_merge" + +module ActiveSupport + class OptionMerger # :nodoc: + instance_methods.each do |method| + undef_method(method) unless method.start_with?("__", "instance_eval", "class", "object_id") + end + + def initialize(context, options) + @context, @options = context, options + end + + private + def method_missing(method, *arguments, &block) + options = nil + if arguments.size == 1 && arguments.first.is_a?(Proc) + proc = arguments.shift + arguments << lambda { |*args| @options.deep_merge(proc.call(*args)) } + elsif arguments.last.respond_to?(:to_hash) + options = @options.deep_merge(arguments.pop) + else + options = @options + end + + if options + @context.__send__(method, *arguments, **options, &block) + else + @context.__send__(method, *arguments, &block) + end + end + + def respond_to_missing?(*arguments) + @context.respond_to?(*arguments) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ordered_hash.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ordered_hash.rb new file mode 100644 index 0000000..39505bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ordered_hash.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require "yaml" + +YAML.add_builtin_type("omap") do |type, val| + ActiveSupport::OrderedHash[val.map { |v| v.to_a.first }] +end + +module ActiveSupport + # DEPRECATED: ActiveSupport::OrderedHash implements a hash that preserves + # insertion order. + # + # oh = ActiveSupport::OrderedHash.new + # oh[:a] = 1 + # oh[:b] = 2 + # oh.keys # => [:a, :b], this order is guaranteed + # + # Also, maps the +omap+ feature for YAML files + # (See https://yaml.org/type/omap.html) to support ordered items + # when loading from yaml. + # + # ActiveSupport::OrderedHash is namespaced to prevent conflicts + # with other implementations. + class OrderedHash < ::Hash # :nodoc: + def to_yaml_type + "!tag:yaml.org,2002:omap" + end + + def encode_with(coder) + coder.represent_seq "!omap", map { |k, v| { k => v } } + end + + def select(*args, &block) + dup.tap { |hash| hash.select!(*args, &block) } + end + + def reject(*args, &block) + dup.tap { |hash| hash.reject!(*args, &block) } + end + + def nested_under_indifferent_access + self + end + + # Returns true to make sure that this hash is extractable via Array#extract_options! + def extractable_options? + true + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ordered_options.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ordered_options.rb new file mode 100644 index 0000000..1a7f4d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ordered_options.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/blank" + +module ActiveSupport + # +OrderedOptions+ inherits from +Hash+ and provides dynamic accessor methods. + # + # With a +Hash+, key-value pairs are typically managed like this: + # + # h = {} + # h[:boy] = 'John' + # h[:girl] = 'Mary' + # h[:boy] # => 'John' + # h[:girl] # => 'Mary' + # h[:dog] # => nil + # + # Using +OrderedOptions+, the above code can be written as: + # + # h = ActiveSupport::OrderedOptions.new + # h.boy = 'John' + # h.girl = 'Mary' + # h.boy # => 'John' + # h.girl # => 'Mary' + # h.dog # => nil + # + # To raise an exception when the value is blank, append a + # bang to the key name, like: + # + # h.dog! # => raises KeyError: :dog is blank + # + class OrderedOptions < Hash + alias_method :_get, :[] # preserve the original #[] method + protected :_get # make it protected + + def []=(key, value) + super(key.to_sym, value) + end + + def [](key) + super(key.to_sym) + end + + def method_missing(name, *args) + name_string = +name.to_s + if name_string.chomp!("=") + self[name_string] = args.first + else + bangs = name_string.chomp!("!") + + if bangs + self[name_string].presence || raise(KeyError.new(":#{name_string} is blank")) + else + self[name_string] + end + end + end + + def respond_to_missing?(name, include_private) + true + end + + def extractable_options? + true + end + + def inspect + "#<#{self.class.name} #{super}>" + end + end + + # +InheritableOptions+ provides a constructor to build an OrderedOptions + # hash inherited from another hash. + # + # Use this if you already have some hash and you want to create a new one based on it. + # + # h = ActiveSupport::InheritableOptions.new({ girl: 'Mary', boy: 'John' }) + # h.girl # => 'Mary' + # h.boy # => 'John' + class InheritableOptions < OrderedOptions + def initialize(parent = nil) + if parent.kind_of?(OrderedOptions) + # use the faster _get when dealing with OrderedOptions + super() { |h, k| parent._get(k) } + elsif parent + super() { |h, k| parent[k] } + else + super() + end + end + + def inheritable_copy + self.class.new(self) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/parameter_filter.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/parameter_filter.rb new file mode 100644 index 0000000..b2a19d6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/parameter_filter.rb @@ -0,0 +1,138 @@ +# frozen_string_literal: true + +require "active_support/core_ext/object/duplicable" + +module ActiveSupport + # +ParameterFilter+ allows you to specify keys for sensitive data from + # hash-like object and replace corresponding value. Filtering only certain + # sub-keys from a hash is possible by using the dot notation: + # 'credit_card.number'. If a proc is given, each key and value of a hash and + # all sub-hashes are passed to it, where the value or the key can be replaced + # using String#replace or similar methods. + # + # ActiveSupport::ParameterFilter.new([:password]) + # => replaces the value to all keys matching /password/i with "[FILTERED]" + # + # ActiveSupport::ParameterFilter.new([:foo, "bar"]) + # => replaces the value to all keys matching /foo|bar/i with "[FILTERED]" + # + # ActiveSupport::ParameterFilter.new([/\Apin\z/i, /\Apin_/i]) + # => replaces the value for the exact (case-insensitive) key 'pin' and all + # (case-insensitive) keys beginning with 'pin_', with "[FILTERED]". + # Does not match keys with 'pin' as a substring, such as 'shipping_id'. + # + # ActiveSupport::ParameterFilter.new(["credit_card.code"]) + # => replaces { credit_card: {code: "xxxx"} } with "[FILTERED]", does not + # change { file: { code: "xxxx"} } + # + # ActiveSupport::ParameterFilter.new([-> (k, v) do + # v.reverse! if /secret/i.match?(k) + # end]) + # => reverses the value to all keys matching /secret/i + class ParameterFilter + FILTERED = "[FILTERED]" # :nodoc: + + # Create instance with given filters. Supported type of filters are +String+, +Regexp+, and +Proc+. + # Other types of filters are treated as +String+ using +to_s+. + # For +Proc+ filters, key, value, and optional original hash is passed to block arguments. + # + # ==== Options + # + # * :mask - A replaced object when filtered. Defaults to "[FILTERED]". + def initialize(filters = [], mask: FILTERED) + @filters = filters + @mask = mask + end + + # Mask value of +params+ if key matches one of filters. + def filter(params) + compiled_filter.call(params) + end + + # Returns filtered value for given key. For +Proc+ filters, third block argument is not populated. + def filter_param(key, value) + @filters.empty? ? value : compiled_filter.value_for_key(key, value) + end + + private + def compiled_filter + @compiled_filter ||= CompiledFilter.compile(@filters, mask: @mask) + end + + class CompiledFilter # :nodoc: + def self.compile(filters, mask:) + return lambda { |params| params.dup } if filters.empty? + + strings, regexps, blocks, deep_regexps, deep_strings = [], [], [], nil, nil + + filters.each do |item| + case item + when Proc + blocks << item + when Regexp + if item.to_s.include?("\\.") + (deep_regexps ||= []) << item + else + regexps << item + end + else + s = Regexp.escape(item.to_s) + if s.include?("\\.") + (deep_strings ||= []) << s + else + strings << s + end + end + end + + regexps << Regexp.new(strings.join("|"), true) unless strings.empty? + (deep_regexps ||= []) << Regexp.new(deep_strings.join("|"), true) if deep_strings&.any? + + new regexps, deep_regexps, blocks, mask: mask + end + + attr_reader :regexps, :deep_regexps, :blocks + + def initialize(regexps, deep_regexps, blocks, mask:) + @regexps = regexps + @deep_regexps = deep_regexps&.any? ? deep_regexps : nil + @blocks = blocks + @mask = mask + end + + def call(params, parents = [], original_params = params) + filtered_params = params.class.new + + params.each do |key, value| + filtered_params[key] = value_for_key(key, value, parents, original_params) + end + + filtered_params + end + + def value_for_key(key, value, parents = [], original_params = nil) + parents.push(key) if deep_regexps + if regexps.any? { |r| r.match?(key.to_s) } + value = @mask + elsif deep_regexps && (joined = parents.join(".")) && deep_regexps.any? { |r| r.match?(joined) } + value = @mask + elsif value.is_a?(Hash) + value = call(value, parents, original_params) + elsif value.is_a?(Array) + # If we don't pop the current parent it will be duplicated as we + # process each array value. + parents.pop if deep_regexps + value = value.map { |v| value_for_key(key, v, parents, original_params) } + # Restore the parent stack after processing the array. + parents.push(key) if deep_regexps + elsif blocks.any? + key = key.dup if key.duplicable? + value = value.dup if value.duplicable? + blocks.each { |b| b.arity == 2 ? b.call(key, value) : b.call(key, value, original_params) } + end + parents.pop if deep_regexps + value + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/per_thread_registry.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/per_thread_registry.rb new file mode 100644 index 0000000..cd8db91 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/per_thread_registry.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" + +module ActiveSupport + # NOTE: This approach has been deprecated for end-user code in favor of {thread_mattr_accessor}[rdoc-ref:Module#thread_mattr_accessor] and friends. + # Please use that approach instead. + # + # This module is used to encapsulate access to thread local variables. + # + # Instead of polluting the thread locals namespace: + # + # Thread.current[:connection_handler] + # + # you define a class that extends this module: + # + # module ActiveRecord + # class RuntimeRegistry + # extend ActiveSupport::PerThreadRegistry + # + # attr_accessor :connection_handler + # end + # end + # + # and invoke the declared instance accessors as class methods. So + # + # ActiveRecord::RuntimeRegistry.connection_handler = connection_handler + # + # sets a connection handler local to the current thread, and + # + # ActiveRecord::RuntimeRegistry.connection_handler + # + # returns a connection handler local to the current thread. + # + # This feature is accomplished by instantiating the class and storing the + # instance as a thread local keyed by the class name. In the example above + # a key "ActiveRecord::RuntimeRegistry" is stored in Thread.current. + # The class methods proxy to said thread local instance. + # + # If the class has an initializer, it must accept no arguments. + module PerThreadRegistry + def self.extended(object) + ActiveSupport::Deprecation.warn(<<~MSG) + ActiveSupport::PerThreadRegistry is deprecated and will be removed in Rails 7.1. + Use `Module#thread_mattr_accessor` instead. + MSG + object.instance_variable_set :@per_thread_registry_key, object.name.freeze + end + + def instance + Thread.current[@per_thread_registry_key] ||= new + end + + private + def method_missing(name, *args, &block) + # Caches the method definition as a singleton method of the receiver. + # + # By letting #delegate handle it, we avoid an enclosure that'll capture args. + singleton_class.delegate name, to: :instance + + send(name, *args, &block) + end + ruby2_keywords(:method_missing) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/proxy_object.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/proxy_object.rb new file mode 100644 index 0000000..0965fcd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/proxy_object.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module ActiveSupport + # A class with no predefined methods that behaves similarly to Builder's + # BlankSlate. Used for proxy classes. + class ProxyObject < ::BasicObject + undef_method :== + undef_method :equal? + + # Let ActiveSupport::ProxyObject at least raise exceptions. + def raise(*args) + ::Object.send(:raise, *args) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/rails.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/rails.rb new file mode 100644 index 0000000..75676a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/rails.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# This is a private interface. +# +# Rails components cherry pick from Active Support as needed, but there are a +# few features that are used for sure in some way or another and it is not worth +# putting individual requires absolutely everywhere. Think blank? for example. +# +# This file is loaded by every Rails component except Active Support itself, +# but it does not belong to the Rails public interface. It is internal to +# Rails and can change anytime. + +# Defines Object#blank? and Object#present?. +require "active_support/core_ext/object/blank" + +# Support for ClassMethods and the included macro. +require "active_support/concern" + +# Defines Class#class_attribute. +require "active_support/core_ext/class/attribute" + +# Defines Module#delegate. +require "active_support/core_ext/module/delegation" + +# Defines ActiveSupport::Deprecation. +require "active_support/deprecation" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/railtie.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/railtie.rb new file mode 100644 index 0000000..e86aa43 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/railtie.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true + +require "active_support" +require "active_support/i18n_railtie" + +module ActiveSupport + class Railtie < Rails::Railtie # :nodoc: + config.active_support = ActiveSupport::OrderedOptions.new + config.active_support.disable_to_s_conversion = false + + config.eager_load_namespaces << ActiveSupport + + initializer "active_support.isolation_level" do |app| + config.after_initialize do + if level = app.config.active_support.delete(:isolation_level) + ActiveSupport::IsolatedExecutionState.isolation_level = level + end + end + end + + initializer "active_support.remove_deprecated_time_with_zone_name" do |app| + config.after_initialize do + if app.config.active_support.remove_deprecated_time_with_zone_name + require "active_support/time_with_zone" + TimeWithZone.singleton_class.remove_method(:name) + end + end + end + + initializer "active_support.set_authenticated_message_encryption" do |app| + config.after_initialize do + unless app.config.active_support.use_authenticated_message_encryption.nil? + ActiveSupport::MessageEncryptor.use_authenticated_message_encryption = + app.config.active_support.use_authenticated_message_encryption + end + end + end + + initializer "active_support.reset_execution_context" do |app| + app.reloader.before_class_unload { ActiveSupport::ExecutionContext.clear } + app.executor.to_run { ActiveSupport::ExecutionContext.clear } + app.executor.to_complete { ActiveSupport::ExecutionContext.clear } + end + + initializer "active_support.reset_all_current_attributes_instances" do |app| + app.reloader.before_class_unload { ActiveSupport::CurrentAttributes.clear_all } + app.executor.to_run { ActiveSupport::CurrentAttributes.reset_all } + app.executor.to_complete { ActiveSupport::CurrentAttributes.reset_all } + + ActiveSupport.on_load(:active_support_test_case) do + if app.config.active_support.executor_around_test_case + require "active_support/executor/test_helper" + include ActiveSupport::Executor::TestHelper + else + require "active_support/current_attributes/test_helper" + include ActiveSupport::CurrentAttributes::TestHelper + + require "active_support/execution_context/test_helper" + include ActiveSupport::ExecutionContext::TestHelper + end + end + end + + initializer "active_support.deprecation_behavior" do |app| + if app.config.active_support.report_deprecations == false + ActiveSupport::Deprecation.silenced = true + ActiveSupport::Deprecation.behavior = :silence + ActiveSupport::Deprecation.disallowed_behavior = :silence + else + if deprecation = app.config.active_support.deprecation + ActiveSupport::Deprecation.behavior = deprecation + end + + if disallowed_deprecation = app.config.active_support.disallowed_deprecation + ActiveSupport::Deprecation.disallowed_behavior = disallowed_deprecation + end + + if disallowed_warnings = app.config.active_support.disallowed_deprecation_warnings + ActiveSupport::Deprecation.disallowed_warnings = disallowed_warnings + end + end + end + + # Sets the default value for Time.zone + # If assigned value cannot be matched to a TimeZone, an exception will be raised. + initializer "active_support.initialize_time_zone" do |app| + begin + TZInfo::DataSource.get + rescue TZInfo::DataSourceNotFound => e + raise e.exception "tzinfo-data is not present. Please add gem 'tzinfo-data' to your Gemfile and run bundle install" + end + require "active_support/core_ext/time/zones" + Time.zone_default = Time.find_zone!(app.config.time_zone) + end + + # Sets the default week start + # If assigned value is not a valid day symbol (e.g. :sunday, :monday, ...), an exception will be raised. + initializer "active_support.initialize_beginning_of_week" do |app| + require "active_support/core_ext/date/calculations" + beginning_of_week_default = Date.find_beginning_of_week!(app.config.beginning_of_week) + + Date.beginning_of_week_default = beginning_of_week_default + end + + initializer "active_support.require_master_key" do |app| + if app.config.respond_to?(:require_master_key) && app.config.require_master_key + begin + app.credentials.key + rescue ActiveSupport::EncryptedFile::MissingKeyError => error + $stderr.puts error.message + exit 1 + end + end + end + + initializer "active_support.set_error_reporter" do |app| + ActiveSupport.error_reporter = app.executor.error_reporter + end + + initializer "active_support.set_configs" do |app| + app.config.active_support.each do |k, v| + k = "#{k}=" + ActiveSupport.public_send(k, v) if ActiveSupport.respond_to? k + end + end + + initializer "active_support.set_hash_digest_class" do |app| + config.after_initialize do + if klass = app.config.active_support.hash_digest_class + ActiveSupport::Digest.hash_digest_class = klass + end + end + end + + initializer "active_support.set_key_generator_hash_digest_class" do |app| + config.after_initialize do + if klass = app.config.active_support.key_generator_hash_digest_class + ActiveSupport::KeyGenerator.hash_digest_class = klass + end + end + end + + initializer "active_support.set_rfc4122_namespaced_uuids" do |app| + config.after_initialize do + if app.config.active_support.use_rfc4122_namespaced_uuids + require "active_support/core_ext/digest" + ::Digest::UUID.use_rfc4122_namespaced_uuids = app.config.active_support.use_rfc4122_namespaced_uuids + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/reloader.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/reloader.rb new file mode 100644 index 0000000..e751866 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/reloader.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require "active_support/execution_wrapper" +require "active_support/executor" + +module ActiveSupport + #-- + # This class defines several callbacks: + # + # to_prepare -- Run once at application startup, and also from + # +to_run+. + # + # to_run -- Run before a work run that is reloading. If + # +reload_classes_only_on_change+ is true (the default), the class + # unload will have already occurred. + # + # to_complete -- Run after a work run that has reloaded. If + # +reload_classes_only_on_change+ is false, the class unload will + # have occurred after the work run, but before this callback. + # + # before_class_unload -- Run immediately before the classes are + # unloaded. + # + # after_class_unload -- Run immediately after the classes are + # unloaded. + # + class Reloader < ExecutionWrapper + define_callbacks :prepare + + define_callbacks :class_unload + + # Registers a callback that will run once at application startup and every time the code is reloaded. + def self.to_prepare(*args, &block) + set_callback(:prepare, *args, &block) + end + + # Registers a callback that will run immediately before the classes are unloaded. + def self.before_class_unload(*args, &block) + set_callback(:class_unload, *args, &block) + end + + # Registers a callback that will run immediately after the classes are unloaded. + def self.after_class_unload(*args, &block) + set_callback(:class_unload, :after, *args, &block) + end + + to_run(:after) { self.class.prepare! } + + # Initiate a manual reload + def self.reload! + executor.wrap do + new.tap do |instance| + instance.run! + ensure + instance.complete! + end + end + prepare! + end + + def self.run!(reset: false) # :nodoc: + if check! + super + else + Null + end + end + + # Run the supplied block as a work unit, reloading code as needed + def self.wrap + executor.wrap do + super + end + end + + class_attribute :executor, default: Executor + class_attribute :check, default: lambda { false } + + def self.check! # :nodoc: + @should_reload ||= check.call + end + + def self.reloaded! # :nodoc: + @should_reload = false + end + + def self.prepare! # :nodoc: + new.run_callbacks(:prepare) + end + + def initialize + super + @locked = false + end + + # Acquire the ActiveSupport::Dependencies::Interlock unload lock, + # ensuring it will be released automatically + def require_unload_lock! + unless @locked + ActiveSupport::Dependencies.interlock.start_unloading + @locked = true + end + end + + # Release the unload lock if it has been previously obtained + def release_unload_lock! + if @locked + @locked = false + ActiveSupport::Dependencies.interlock.done_unloading + end + end + + def run! # :nodoc: + super + release_unload_lock! + end + + def class_unload!(&block) # :nodoc: + require_unload_lock! + run_callbacks(:class_unload, &block) + end + + def complete! # :nodoc: + super + self.class.reloaded! + ensure + release_unload_lock! + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/rescuable.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/rescuable.rb new file mode 100644 index 0000000..3199d77 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/rescuable.rb @@ -0,0 +1,174 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/core_ext/class/attribute" +require "active_support/core_ext/string/inflections" + +module ActiveSupport + # Rescuable module adds support for easier exception handling. + module Rescuable + extend Concern + + included do + class_attribute :rescue_handlers, default: [] + end + + module ClassMethods + # Registers exception classes with a handler to be called by rescue_with_handler. + # + # rescue_from receives a series of exception classes or class + # names, and an exception handler specified by a trailing :with + # option containing the name of a method or a Proc object. Alternatively, a block + # can be given as the handler. + # + # Handlers that take one argument will be called with the exception, so + # that the exception can be inspected when dealing with it. + # + # Handlers are inherited. They are searched from right to left, from + # bottom to top, and up the hierarchy. The handler of the first class for + # which exception.is_a?(klass) holds true is the one invoked, if + # any. + # + # class ApplicationController < ActionController::Base + # rescue_from User::NotAuthorized, with: :deny_access + # rescue_from ActiveRecord::RecordInvalid, with: :show_record_errors + # + # rescue_from "MyApp::BaseError" do |exception| + # redirect_to root_url, alert: exception.message + # end + # + # private + # def deny_access + # head :forbidden + # end + # + # def show_record_errors(exception) + # redirect_back_or_to root_url, alert: exception.record.errors.full_messages.to_sentence + # end + # end + # + # Exceptions raised inside exception handlers are not propagated up. + def rescue_from(*klasses, with: nil, &block) + unless with + if block_given? + with = block + else + raise ArgumentError, "Need a handler. Pass the with: keyword argument or provide a block." + end + end + + klasses.each do |klass| + key = if klass.is_a?(Module) && klass.respond_to?(:===) + klass.name + elsif klass.is_a?(String) + klass + else + raise ArgumentError, "#{klass.inspect} must be an Exception class or a String referencing an Exception class" + end + + # Put the new handler at the end because the list is read in reverse. + self.rescue_handlers += [[key, with]] + end + end + + # Matches an exception to a handler based on the exception class. + # + # If no handler matches the exception, check for a handler matching the + # (optional) +exception.cause+. If no handler matches the exception or its + # cause, this returns +nil+, so you can deal with unhandled exceptions. + # Be sure to re-raise unhandled exceptions if this is what you expect. + # + # begin + # # ... + # rescue => exception + # rescue_with_handler(exception) || raise + # end + # + # Returns the exception if it was handled and +nil+ if it was not. + def rescue_with_handler(exception, object: self, visited_exceptions: []) + visited_exceptions << exception + + if handler = handler_for_rescue(exception, object: object) + handler.call exception + exception + elsif exception + if visited_exceptions.include?(exception.cause) + nil + else + rescue_with_handler(exception.cause, object: object, visited_exceptions: visited_exceptions) + end + end + end + + def handler_for_rescue(exception, object: self) # :nodoc: + case rescuer = find_rescue_handler(exception) + when Symbol + method = object.method(rescuer) + if method.arity == 0 + -> e { method.call } + else + method + end + when Proc + if rescuer.arity == 0 + -> e { object.instance_exec(&rescuer) } + else + -> e { object.instance_exec(e, &rescuer) } + end + end + end + + private + def find_rescue_handler(exception) + if exception + # Handlers are in order of declaration but the most recently declared + # is the highest priority match, so we search for matching handlers + # in reverse. + _, handler = rescue_handlers.reverse_each.detect do |class_or_name, _| + if klass = constantize_rescue_handler_class(class_or_name) + klass === exception + end + end + + handler + end + end + + def constantize_rescue_handler_class(class_or_name) + case class_or_name + when String, Symbol + begin + # Try a lexical lookup first since we support + # + # class Super + # rescue_from 'Error', with: â€Ļ + # end + # + # class Sub + # class Error < StandardError; end + # end + # + # so an Error raised in Sub will hit the 'Error' handler. + const_get class_or_name + rescue NameError + class_or_name.safe_constantize + end + else + class_or_name + end + end + end + + # Delegates to the class method, but uses the instance as the subject for + # rescue_from handlers (method calls, +instance_exec+ blocks). + def rescue_with_handler(exception) + self.class.rescue_with_handler exception, object: self + end + + # Internal handler lookup. Delegates to class method. Some libraries call + # this directly, so keeping it around for compatibility. + def handler_for_rescue(exception) # :nodoc: + self.class.handler_for_rescue exception, object: self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ruby_features.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ruby_features.rb new file mode 100644 index 0000000..8cdb89c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/ruby_features.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module ActiveSupport + module RubyFeatures # :nodoc: + CLASS_SUBCLASSES = Class.method_defined?(:subclasses) # RUBY_VERSION >= "3.1" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/secure_compare_rotator.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/secure_compare_rotator.rb new file mode 100644 index 0000000..982ebf1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/secure_compare_rotator.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/security_utils" +require "active_support/messages/rotator" + +module ActiveSupport + # The ActiveSupport::SecureCompareRotator is a wrapper around ActiveSupport::SecurityUtils.secure_compare + # and allows you to rotate a previously defined value to a new one. + # + # It can be used as follow: + # + # rotator = ActiveSupport::SecureCompareRotator.new('new_production_value') + # rotator.rotate('previous_production_value') + # rotator.secure_compare!('previous_production_value') + # + # One real use case example would be to rotate a basic auth credentials: + # + # class MyController < ApplicationController + # def authenticate_request + # rotator = ActiveSupport::SecureCompareRotator.new('new_password') + # rotator.rotate('old_password') + # + # authenticate_or_request_with_http_basic do |username, password| + # rotator.secure_compare!(password) + # rescue ActiveSupport::SecureCompareRotator::InvalidMatch + # false + # end + # end + # end + class SecureCompareRotator + include SecurityUtils + prepend Messages::Rotator + + InvalidMatch = Class.new(StandardError) + + def initialize(value, **_options) + @value = value + end + + def secure_compare!(other_value, on_rotation: @on_rotation) + secure_compare(@value, other_value) || + run_rotations(on_rotation) { |wrapper| wrapper.secure_compare!(other_value) } || + raise(InvalidMatch) + end + + private + def build_rotation(previous_value, _options) + self.class.new(previous_value) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/security_utils.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/security_utils.rb new file mode 100644 index 0000000..aa00474 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/security_utils.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module ActiveSupport + module SecurityUtils + # Constant time string comparison, for fixed length strings. + # + # The values compared should be of fixed length, such as strings + # that have already been processed by HMAC. Raises in case of length mismatch. + + if defined?(OpenSSL.fixed_length_secure_compare) + def fixed_length_secure_compare(a, b) + OpenSSL.fixed_length_secure_compare(a, b) + end + else + def fixed_length_secure_compare(a, b) + raise ArgumentError, "string length mismatch." unless a.bytesize == b.bytesize + + l = a.unpack "C#{a.bytesize}" + + res = 0 + b.each_byte { |byte| res |= byte ^ l.shift } + res == 0 + end + end + module_function :fixed_length_secure_compare + + # Secure string comparison for strings of variable length. + # + # While a timing attack would not be able to discern the content of + # a secret compared via secure_compare, it is possible to determine + # the secret length. This should be considered when using secure_compare + # to compare weak, short secrets to user input. + def secure_compare(a, b) + a.bytesize == b.bytesize && fixed_length_secure_compare(a, b) + end + module_function :secure_compare + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/string_inquirer.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/string_inquirer.rb new file mode 100644 index 0000000..8c4bf55 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/string_inquirer.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module ActiveSupport + # Wrapping a string in this class gives you a prettier way to test + # for equality. The value returned by Rails.env is wrapped + # in a StringInquirer object, so instead of calling this: + # + # Rails.env == 'production' + # + # you can call this: + # + # Rails.env.production? + # + # == Instantiating a new StringInquirer + # + # vehicle = ActiveSupport::StringInquirer.new('car') + # vehicle.car? # => true + # vehicle.bike? # => false + class StringInquirer < String + private + def respond_to_missing?(method_name, include_private = false) + method_name.end_with?("?") || super + end + + def method_missing(method_name, *arguments) + if method_name.end_with?("?") + self == method_name[0..-2] + else + super + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/subscriber.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/subscriber.rb new file mode 100644 index 0000000..0f71443 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/subscriber.rb @@ -0,0 +1,163 @@ +# frozen_string_literal: true + +require "active_support/notifications" + +module ActiveSupport + # ActiveSupport::Subscriber is an object set to consume + # ActiveSupport::Notifications. The subscriber dispatches notifications to + # a registered object based on its given namespace. + # + # An example would be an Active Record subscriber responsible for collecting + # statistics about queries: + # + # module ActiveRecord + # class StatsSubscriber < ActiveSupport::Subscriber + # attach_to :active_record + # + # def sql(event) + # Statsd.timing("sql.#{event.payload[:name]}", event.duration) + # end + # end + # end + # + # After configured, whenever a "sql.active_record" notification is published, + # it will properly dispatch the event (ActiveSupport::Notifications::Event) to + # the +sql+ method. + # + # We can detach a subscriber as well: + # + # ActiveRecord::StatsSubscriber.detach_from(:active_record) + class Subscriber + class << self + # Attach the subscriber to a namespace. + def attach_to(namespace, subscriber = new, notifier = ActiveSupport::Notifications, inherit_all: false) + @namespace = namespace + @subscriber = subscriber + @notifier = notifier + @inherit_all = inherit_all + + subscribers << subscriber + + # Add event subscribers for all existing methods on the class. + fetch_public_methods(subscriber, inherit_all).each do |event| + add_event_subscriber(event) + end + end + + # Detach the subscriber from a namespace. + def detach_from(namespace, notifier = ActiveSupport::Notifications) + @namespace = namespace + @subscriber = find_attached_subscriber + @notifier = notifier + + return unless subscriber + + subscribers.delete(subscriber) + + # Remove event subscribers of all existing methods on the class. + fetch_public_methods(subscriber, true).each do |event| + remove_event_subscriber(event) + end + + # Reset notifier so that event subscribers will not add for new methods added to the class. + @notifier = nil + end + + # Adds event subscribers for all new methods added to the class. + def method_added(event) + # Only public methods are added as subscribers, and only if a notifier + # has been set up. This means that subscribers will only be set up for + # classes that call #attach_to. + if public_method_defined?(event) && notifier + add_event_subscriber(event) + end + end + + def subscribers + @@subscribers ||= [] + end + + private + attr_reader :subscriber, :notifier, :namespace + + def add_event_subscriber(event) # :doc: + return if invalid_event?(event) + + pattern = prepare_pattern(event) + + # Don't add multiple subscribers (e.g. if methods are redefined). + return if pattern_subscribed?(pattern) + + subscriber.patterns[pattern] = notifier.subscribe(pattern, subscriber) + end + + def remove_event_subscriber(event) # :doc: + return if invalid_event?(event) + + pattern = prepare_pattern(event) + + return unless pattern_subscribed?(pattern) + + notifier.unsubscribe(subscriber.patterns[pattern]) + subscriber.patterns.delete(pattern) + end + + def find_attached_subscriber + subscribers.find { |attached_subscriber| attached_subscriber.instance_of?(self) } + end + + def invalid_event?(event) + %i{ start finish }.include?(event.to_sym) + end + + def prepare_pattern(event) + "#{event}.#{namespace}" + end + + def pattern_subscribed?(pattern) + subscriber.patterns.key?(pattern) + end + + def fetch_public_methods(subscriber, inherit_all) + subscriber.public_methods(inherit_all) - Subscriber.public_instance_methods(true) + end + end + + attr_reader :patterns # :nodoc: + + def initialize + @queue_key = [self.class.name, object_id].join "-" + @patterns = {} + super + end + + def start(name, id, payload) + event = ActiveSupport::Notifications::Event.new(name, nil, nil, id, payload) + event.start! + parent = event_stack.last + parent << event if parent + + event_stack.push event + end + + def finish(name, id, payload) + event = event_stack.pop + event.finish! + event.payload.merge!(payload) + + method = name.split(".").first + send(method, event) + end + + def publish_event(event) # :nodoc: + method = event.name.split(".").first + send(method, event) + end + + private + def event_stack + registry = ActiveSupport::IsolatedExecutionState[:active_support_subscriber_queue_registry] ||= {} + registry[@queue_key] ||= [] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/tagged_logging.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/tagged_logging.rb new file mode 100644 index 0000000..26852e5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/tagged_logging.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/object/blank" +require "logger" +require "active_support/logger" + +module ActiveSupport + # Wraps any standard Logger object to provide tagging capabilities. + # + # May be called with a block: + # + # logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT)) + # logger.tagged('BCX') { logger.info 'Stuff' } # Logs "[BCX] Stuff" + # logger.tagged('BCX', "Jason") { logger.info 'Stuff' } # Logs "[BCX] [Jason] Stuff" + # logger.tagged('BCX') { logger.tagged('Jason') { logger.info 'Stuff' } } # Logs "[BCX] [Jason] Stuff" + # + # If called without a block, a new logger will be returned with applied tags: + # + # logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT)) + # logger.tagged("BCX").info "Stuff" # Logs "[BCX] Stuff" + # logger.tagged("BCX", "Jason").info "Stuff" # Logs "[BCX] [Jason] Stuff" + # logger.tagged("BCX").tagged("Jason").info "Stuff" # Logs "[BCX] [Jason] Stuff" + # + # This is used by the default Rails.logger as configured by Railties to make + # it easy to stamp log lines with subdomains, request ids, and anything else + # to aid debugging of multi-user production applications. + module TaggedLogging + module Formatter # :nodoc: + # This method is invoked when a log event occurs. + def call(severity, timestamp, progname, msg) + super(severity, timestamp, progname, "#{tags_text}#{msg}") + end + + def tagged(*tags) + new_tags = push_tags(*tags) + yield self + ensure + pop_tags(new_tags.size) + end + + def push_tags(*tags) + tags.flatten! + tags.reject!(&:blank?) + current_tags.concat tags + tags + end + + def pop_tags(size = 1) + current_tags.pop size + end + + def clear_tags! + current_tags.clear + end + + def current_tags + # We use our object ID here to avoid conflicting with other instances + thread_key = @thread_key ||= "activesupport_tagged_logging_tags:#{object_id}" + IsolatedExecutionState[thread_key] ||= [] + end + + def tags_text + tags = current_tags + if tags.one? + "[#{tags[0]}] " + elsif tags.any? + tags.collect { |tag| "[#{tag}] " }.join + end + end + end + + module LocalTagStorage # :nodoc: + attr_accessor :current_tags + + def self.extended(base) + base.current_tags = [] + end + end + + def self.new(logger) + logger = logger.clone + + if logger.formatter + logger.formatter = logger.formatter.dup + else + # Ensure we set a default formatter so we aren't extending nil! + logger.formatter = ActiveSupport::Logger::SimpleFormatter.new + end + + logger.formatter.extend Formatter + logger.extend(self) + end + + delegate :push_tags, :pop_tags, :clear_tags!, to: :formatter + + def tagged(*tags) + if block_given? + formatter.tagged(*tags) { yield self } + else + logger = ActiveSupport::TaggedLogging.new(self) + logger.formatter.extend LocalTagStorage + logger.push_tags(*formatter.current_tags, *tags) + logger + end + end + + def flush + clear_tags! + super if defined?(super) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/test_case.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/test_case.rb new file mode 100644 index 0000000..2df7b80 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/test_case.rb @@ -0,0 +1,155 @@ +# frozen_string_literal: true + +gem "minitest" # make sure we get the gem, not stdlib +require "minitest" +require "active_support/testing/tagged_logging" +require "active_support/testing/setup_and_teardown" +require "active_support/testing/assertions" +require "active_support/testing/deprecation" +require "active_support/testing/declarative" +require "active_support/testing/isolation" +require "active_support/testing/constant_lookup" +require "active_support/testing/time_helpers" +require "active_support/testing/file_fixtures" +require "active_support/testing/parallelization" +require "active_support/testing/parallelize_executor" +require "concurrent/utility/processor_counter" + +module ActiveSupport + class TestCase < ::Minitest::Test + Assertion = Minitest::Assertion + + class << self + # Sets the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order = :random # => :random + # + # Valid values are: + # * +:random+ (to run tests in random order) + # * +:parallel+ (to run tests in parallel) + # * +:sorted+ (to run tests alphabetically by method name) + # * +:alpha+ (equivalent to +:sorted+) + def test_order=(new_order) + ActiveSupport.test_order = new_order + end + + # Returns the order in which test cases are run. + # + # ActiveSupport::TestCase.test_order # => :random + # + # Possible values are +:random+, +:parallel+, +:alpha+, +:sorted+. + # Defaults to +:random+. + def test_order + ActiveSupport.test_order ||= :random + end + + # Parallelizes the test suite. + # + # Takes a +workers+ argument that controls how many times the process + # is forked. For each process a new database will be created suffixed + # with the worker number. + # + # test-database-0 + # test-database-1 + # + # If ENV["PARALLEL_WORKERS"] is set the workers argument will be ignored + # and the environment variable will be used instead. This is useful for CI + # environments, or other environments where you may need more workers than + # you do for local testing. + # + # If the number of workers is set to +1+ or fewer, the tests will not be + # parallelized. + # + # If +workers+ is set to +:number_of_processors+, the number of workers will be + # set to the actual core count on the machine you are on. + # + # The default parallelization method is to fork processes. If you'd like to + # use threads instead you can pass with: :threads to the +parallelize+ + # method. Note the threaded parallelization does not create multiple + # database and will not work with system tests at this time. + # + # parallelize(workers: :number_of_processors, with: :threads) + # + # The threaded parallelization uses minitest's parallel executor directly. + # The processes parallelization uses a Ruby DRb server. + # + # Because parallelization presents an overhead, it is only enabled when the + # number of tests to run is above the +threshold+ param. The default value is + # 50, and it's configurable via +config.active_support.test_parallelization_threshold+. + def parallelize(workers: :number_of_processors, with: :processes, threshold: ActiveSupport.test_parallelization_threshold) + workers = Concurrent.physical_processor_count if workers == :number_of_processors + workers = ENV["PARALLEL_WORKERS"].to_i if ENV["PARALLEL_WORKERS"] + + return if workers <= 1 + + Minitest.parallel_executor = ActiveSupport::Testing::ParallelizeExecutor.new(size: workers, with: with, threshold: threshold) + end + + # Set up hook for parallel testing. This can be used if you have multiple + # databases or any behavior that needs to be run after the process is forked + # but before the tests run. + # + # Note: this feature is not available with the threaded parallelization. + # + # In your +test_helper.rb+ add the following: + # + # class ActiveSupport::TestCase + # parallelize_setup do + # # create databases + # end + # end + def parallelize_setup(&block) + ActiveSupport::Testing::Parallelization.after_fork_hook(&block) + end + + # Clean up hook for parallel testing. This can be used to drop databases + # if your app uses multiple write/read databases or other clean up before + # the tests finish. This runs before the forked process is closed. + # + # Note: this feature is not available with the threaded parallelization. + # + # In your +test_helper.rb+ add the following: + # + # class ActiveSupport::TestCase + # parallelize_teardown do + # # drop databases + # end + # end + def parallelize_teardown(&block) + ActiveSupport::Testing::Parallelization.run_cleanup_hook(&block) + end + end + + alias_method :method_name, :name + + include ActiveSupport::Testing::TaggedLogging + prepend ActiveSupport::Testing::SetupAndTeardown + include ActiveSupport::Testing::Assertions + include ActiveSupport::Testing::Deprecation + include ActiveSupport::Testing::TimeHelpers + include ActiveSupport::Testing::FileFixtures + extend ActiveSupport::Testing::Declarative + + # test/unit backwards compatibility methods + alias :assert_raise :assert_raises + alias :assert_not_empty :refute_empty + alias :assert_not_equal :refute_equal + alias :assert_not_in_delta :refute_in_delta + alias :assert_not_in_epsilon :refute_in_epsilon + alias :assert_not_includes :refute_includes + alias :assert_not_instance_of :refute_instance_of + alias :assert_not_kind_of :refute_kind_of + alias :assert_no_match :refute_match + alias :assert_not_nil :refute_nil + alias :assert_not_operator :refute_operator + alias :assert_not_predicate :refute_predicate + alias :assert_not_respond_to :refute_respond_to + alias :assert_not_same :refute_same + + ActiveSupport.run_load_hooks(:active_support_test_case, self) + + def inspect # :nodoc: + Object.instance_method(:to_s).bind_call(self) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/assertions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/assertions.rb new file mode 100644 index 0000000..2e48ba1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/assertions.rb @@ -0,0 +1,265 @@ +# frozen_string_literal: true + +require "active_support/core_ext/enumerable" + +module ActiveSupport + module Testing + module Assertions + UNTRACKED = Object.new # :nodoc: + + # Asserts that an expression is not truthy. Passes if object is + # +nil+ or +false+. "Truthy" means "considered true in a conditional" + # like if foo. + # + # assert_not nil # => true + # assert_not false # => true + # assert_not 'foo' # => Expected "foo" to be nil or false + # + # An error message can be specified. + # + # assert_not foo, 'foo should be false' + def assert_not(object, message = nil) + message ||= "Expected #{mu_pp(object)} to be nil or false" + assert !object, message + end + + # Assertion that the block should not raise an exception. + # + # Passes if evaluated code in the yielded block raises no exception. + # + # assert_nothing_raised do + # perform_service(param: 'no_exception') + # end + def assert_nothing_raised + yield + rescue => error + raise Minitest::UnexpectedError.new(error) + end + + # Test numeric difference between the return value of an expression as a + # result of what is evaluated in the yielded block. + # + # assert_difference 'Article.count' do + # post :create, params: { article: {...} } + # end + # + # An arbitrary expression is passed in and evaluated. + # + # assert_difference 'Article.last.comments(:reload).size' do + # post :create, params: { comment: {...} } + # end + # + # An arbitrary positive or negative difference can be specified. + # The default is 1. + # + # assert_difference 'Article.count', -1 do + # post :delete, params: { id: ... } + # end + # + # An array of expressions can also be passed in and evaluated. + # + # assert_difference [ 'Article.count', 'Post.count' ], 2 do + # post :create, params: { article: {...} } + # end + # + # A hash of expressions/numeric differences can also be passed in and evaluated. + # + # assert_difference ->{ Article.count } => 1, ->{ Notification.count } => 2 do + # post :create, params: { article: {...} } + # end + # + # A lambda or a list of lambdas can be passed in and evaluated: + # + # assert_difference ->{ Article.count }, 2 do + # post :create, params: { article: {...} } + # end + # + # assert_difference [->{ Article.count }, ->{ Post.count }], 2 do + # post :create, params: { article: {...} } + # end + # + # An error message can be specified. + # + # assert_difference 'Article.count', -1, 'An Article should be destroyed' do + # post :delete, params: { id: ... } + # end + def assert_difference(expression, *args, &block) + expressions = + if expression.is_a?(Hash) + message = args[0] + expression + else + difference = args[0] || 1 + message = args[1] + Array(expression).index_with(difference) + end + + exps = expressions.keys.map { |e| + e.respond_to?(:call) ? e : lambda { eval(e, block.binding) } + } + before = exps.map(&:call) + + retval = _assert_nothing_raised_or_warn("assert_difference", &block) + + expressions.zip(exps, before) do |(code, diff), exp, before_value| + error = "#{code.inspect} didn't change by #{diff}" + error = "#{message}.\n#{error}" if message + assert_equal(before_value + diff, exp.call, error) + end + + retval + end + + # Assertion that the numeric result of evaluating an expression is not + # changed before and after invoking the passed in block. + # + # assert_no_difference 'Article.count' do + # post :create, params: { article: invalid_attributes } + # end + # + # A lambda can be passed in and evaluated. + # + # assert_no_difference -> { Article.count } do + # post :create, params: { article: invalid_attributes } + # end + # + # An error message can be specified. + # + # assert_no_difference 'Article.count', 'An Article should not be created' do + # post :create, params: { article: invalid_attributes } + # end + # + # An array of expressions can also be passed in and evaluated. + # + # assert_no_difference [ 'Article.count', -> { Post.count } ] do + # post :create, params: { article: invalid_attributes } + # end + def assert_no_difference(expression, message = nil, &block) + assert_difference expression, 0, message, &block + end + + # Assertion that the result of evaluating an expression is changed before + # and after invoking the passed in block. + # + # assert_changes 'Status.all_good?' do + # post :create, params: { status: { ok: false } } + # end + # + # You can pass the block as a string to be evaluated in the context of + # the block. A lambda can be passed for the block as well. + # + # assert_changes -> { Status.all_good? } do + # post :create, params: { status: { ok: false } } + # end + # + # The assertion is useful to test side effects. The passed block can be + # anything that can be converted to string with #to_s. + # + # assert_changes :@object do + # @object = 42 + # end + # + # The keyword arguments +:from+ and +:to+ can be given to specify the + # expected initial value and the expected value after the block was + # executed. + # + # assert_changes :@object, from: nil, to: :foo do + # @object = :foo + # end + # + # An error message can be specified. + # + # assert_changes -> { Status.all_good? }, 'Expected the status to be bad' do + # post :create, params: { status: { incident: true } } + # end + def assert_changes(expression, message = nil, from: UNTRACKED, to: UNTRACKED, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = _assert_nothing_raised_or_warn("assert_changes", &block) + + unless from == UNTRACKED + error = "Expected change from #{from.inspect}" + error = "#{message}.\n#{error}" if message + assert from === before, error + end + + after = exp.call + + error = "#{expression.inspect} didn't change" + error = "#{error}. It was already #{to}" if before == to + error = "#{message}.\n#{error}" if message + refute_equal before, after, error + + unless to == UNTRACKED + error = "Expected change to #{to}\n" + error = "#{message}.\n#{error}" if message + assert to === after, error + end + + retval + end + + # Assertion that the result of evaluating an expression is not changed before + # and after invoking the passed in block. + # + # assert_no_changes 'Status.all_good?' do + # post :create, params: { status: { ok: true } } + # end + # + # Provide the optional keyword argument :from to specify the expected + # initial value. + # + # assert_no_changes -> { Status.all_good? }, from: true do + # post :create, params: { status: { ok: true } } + # end + # + # An error message can be specified. + # + # assert_no_changes -> { Status.all_good? }, 'Expected the status to be good' do + # post :create, params: { status: { ok: false } } + # end + def assert_no_changes(expression, message = nil, from: UNTRACKED, &block) + exp = expression.respond_to?(:call) ? expression : -> { eval(expression.to_s, block.binding) } + + before = exp.call + retval = _assert_nothing_raised_or_warn("assert_no_changes", &block) + + unless from == UNTRACKED + error = "Expected initial value of #{from.inspect}" + error = "#{message}.\n#{error}" if message + assert from === before, error + end + + after = exp.call + + error = "#{expression.inspect} changed" + error = "#{message}.\n#{error}" if message + + if before.nil? + assert_nil after, error + else + assert_equal before, after, error + end + + retval + end + + private + def _assert_nothing_raised_or_warn(assertion, &block) + assert_nothing_raised(&block) + rescue Minitest::UnexpectedError => e + if tagged_logger && tagged_logger.warn? + warning = <<~MSG + #{self.class} - #{name}: #{e.error.class} raised. + If you expected this exception, use `assert_raises` as near to the code that raises as possible. + Other block based assertions (e.g. `#{assertion}`) can be used, as long as `assert_raises` is inside their block. + MSG + tagged_logger.warn warning + end + + raise + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/autorun.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/autorun.rb new file mode 100644 index 0000000..889b416 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/autorun.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +gem "minitest" + +require "minitest" + +Minitest.autorun diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/constant_lookup.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/constant_lookup.rb new file mode 100644 index 0000000..51167e9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/constant_lookup.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require "active_support/concern" +require "active_support/inflector" + +module ActiveSupport + module Testing + # Resolves a constant from a minitest spec name. + # + # Given the following spec-style test: + # + # describe WidgetsController, :index do + # describe "authenticated user" do + # describe "returns widgets" do + # it "has a controller that exists" do + # assert_kind_of WidgetsController, @controller + # end + # end + # end + # end + # + # The test will have the following name: + # + # "WidgetsController::index::authenticated user::returns widgets" + # + # The constant WidgetsController can be resolved from the name. + # The following code will resolve the constant: + # + # controller = determine_constant_from_test_name(name) do |constant| + # Class === constant && constant < ::ActionController::Metal + # end + module ConstantLookup + extend ::ActiveSupport::Concern + + module ClassMethods # :nodoc: + def determine_constant_from_test_name(test_name) + names = test_name.split "::" + while names.size > 0 do + names.last.sub!(/Test$/, "") + begin + constant = names.join("::").safe_constantize + break(constant) if yield(constant) + ensure + names.pop + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/declarative.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/declarative.rb new file mode 100644 index 0000000..7c34036 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/declarative.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Declarative + unless defined?(Spec) + # Helper to define a test method using a String. Under the hood, it replaces + # spaces with underscores and defines the test method. + # + # test "verify something" do + # ... + # end + def test(name, &block) + test_name = "test_#{name.gsub(/\s+/, '_')}".to_sym + defined = method_defined? test_name + raise "#{test_name} is already defined in #{self}" if defined + if block_given? + define_method(test_name, &block) + else + define_method(test_name) do + flunk "No implementation provided for #{name}" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/deprecation.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/deprecation.rb new file mode 100644 index 0000000..f762d24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/deprecation.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +require "active_support/deprecation" + +module ActiveSupport + module Testing + module Deprecation + # Asserts that a matching deprecation warning was emitted by the given deprecator during the execution of the yielded block. + # + # assert_deprecated(/foo/, CustomDeprecator) do + # CustomDeprecator.warn "foo should no longer be used" + # end + # + # The +match+ object may be a +Regexp+, or +String+ appearing in the message. + # + # assert_deprecated('foo', CustomDeprecator) do + # CustomDeprecator.warn "foo should no longer be used" + # end + # + # If the +match+ is omitted (or explicitly +nil+), any deprecation warning will match. + # + # assert_deprecated(nil, CustomDeprecator) do + # CustomDeprecator.warn "foo should no longer be used" + # end + # + # If no +deprecator+ is given, defaults to ActiveSupport::Deprecation. + # + # assert_deprecated do + # ActiveSupport::Deprecation.warn "foo should no longer be used" + # end + def assert_deprecated(match = nil, deprecator = nil, &block) + result, warnings = collect_deprecations(deprecator, &block) + assert !warnings.empty?, "Expected a deprecation warning within the block but received none" + if match + match = Regexp.new(Regexp.escape(match)) unless match.is_a?(Regexp) + assert warnings.any? { |w| match.match?(w) }, "No deprecation warning matched #{match}: #{warnings.join(', ')}" + end + result + end + + # Asserts that no deprecation warnings are emitted by the given deprecator during the execution of the yielded block. + # + # assert_not_deprecated(CustomDeprecator) do + # CustomDeprecator.warn "message" # fails assertion + # end + # + # If no +deprecator+ is given, defaults to ActiveSupport::Deprecation. + # + # assert_not_deprecated do + # ActiveSupport::Deprecation.warn "message" # fails assertion + # end + # + # assert_not_deprecated do + # CustomDeprecator.warn "message" # passes assertion + # end + def assert_not_deprecated(deprecator = nil, &block) + result, deprecations = collect_deprecations(deprecator, &block) + assert deprecations.empty?, "Expected no deprecation warning within the block but received #{deprecations.size}: \n #{deprecations * "\n "}" + result + end + + # Returns an array of all the deprecation warnings emitted by the given + # +deprecator+ during the execution of the yielded block. + # + # collect_deprecations(CustomDeprecator) do + # CustomDeprecator.warn "message" + # end # => ["message"] + # + # If no +deprecator+ is given, defaults to ActiveSupport::Deprecation. + # + # collect_deprecations do + # CustomDeprecator.warn "custom message" + # ActiveSupport::Deprecation.warn "message" + # end # => ["message"] + def collect_deprecations(deprecator = nil) + deprecator ||= ActiveSupport::Deprecation + old_behavior = deprecator.behavior + deprecations = [] + deprecator.behavior = Proc.new do |message, callstack| + deprecations << message + end + result = yield + [result, deprecations] + ensure + deprecator.behavior = old_behavior + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/file_fixtures.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/file_fixtures.rb new file mode 100644 index 0000000..4eb7a88 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/file_fixtures.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require "active_support/concern" + +module ActiveSupport + module Testing + # Adds simple access to sample files called file fixtures. + # File fixtures are normal files stored in + # ActiveSupport::TestCase.file_fixture_path. + # + # File fixtures are represented as +Pathname+ objects. + # This makes it easy to extract specific information: + # + # file_fixture("example.txt").read # get the file's content + # file_fixture("example.mp3").size # get the file size + module FileFixtures + extend ActiveSupport::Concern + + included do + class_attribute :file_fixture_path, instance_writer: false + end + + # Returns a +Pathname+ to the fixture file named +fixture_name+. + # + # Raises +ArgumentError+ if +fixture_name+ can't be found. + def file_fixture(fixture_name) + path = Pathname.new(File.join(file_fixture_path, fixture_name)) + + if path.exist? + path + else + msg = "the directory '%s' does not contain a file named '%s'" + raise ArgumentError, msg % [file_fixture_path, fixture_name] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/isolation.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/isolation.rb new file mode 100644 index 0000000..129ea69 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/isolation.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Isolation + require "thread" + + def self.included(klass) # :nodoc: + klass.class_eval do + parallelize_me! + end + end + + def self.forking_env? + !ENV["NO_FORK"] && Process.respond_to?(:fork) + end + + def run + serialized = run_in_isolation do + super + end + + Marshal.load(serialized) + end + + module Forking + def run_in_isolation(&blk) + read, write = IO.pipe + read.binmode + write.binmode + + pid = fork do + read.close + yield + begin + if error? + failures.map! { |e| + begin + Marshal.dump e + e + rescue TypeError + ex = Exception.new e.message + ex.set_backtrace e.backtrace + Minitest::UnexpectedError.new ex + end + } + end + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + result = Marshal.dump(test_result) + end + + write.puts [result].pack("m") + exit! + end + + write.close + result = read.read + Process.wait2(pid) + result.unpack1("m") + end + end + + module Subprocess + ORIG_ARGV = ARGV.dup unless defined?(ORIG_ARGV) + + # Complicated H4X to get this working in windows / jruby with + # no forking. + def run_in_isolation(&blk) + require "tempfile" + + if ENV["ISOLATION_TEST"] + yield + test_result = defined?(Minitest::Result) ? Minitest::Result.from(self) : dup + File.open(ENV["ISOLATION_OUTPUT"], "w") do |file| + file.puts [Marshal.dump(test_result)].pack("m") + end + exit! + else + Tempfile.open("isolation") do |tmpfile| + env = { + "ISOLATION_TEST" => self.class.name, + "ISOLATION_OUTPUT" => tmpfile.path + } + + test_opts = "-n#{self.class.name}##{name}" + + load_path_args = [] + $-I.each do |p| + load_path_args << "-I" + load_path_args << File.expand_path(p) + end + + child = IO.popen([env, Gem.ruby, *load_path_args, $0, *ORIG_ARGV, test_opts]) + + begin + Process.wait(child.pid) + rescue Errno::ECHILD # The child process may exit before we wait + nil + end + + return tmpfile.read.unpack1("m") + end + end + end + end + + include forking_env? ? Forking : Subprocess + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/method_call_assertions.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/method_call_assertions.rb new file mode 100644 index 0000000..1d016b0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/method_call_assertions.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require "minitest/mock" + +module ActiveSupport + module Testing + module MethodCallAssertions # :nodoc: + private + def assert_called(object, method_name, message = nil, times: 1, returns: nil, &block) + times_called = 0 + + object.stub(method_name, proc { times_called += 1; returns }, &block) + + error = "Expected #{method_name} to be called #{times} times, " \ + "but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + assert_equal times, times_called, error + end + + def assert_called_with(object, method_name, args, returns: nil, &block) + mock = Minitest::Mock.new + + if args.all?(Array) + args.each { |arg| mock.expect(:call, returns, arg) } + else + mock.expect(:call, returns, args) + end + + object.stub(method_name, mock, &block) + + mock.verify + end + + def assert_not_called(object, method_name, message = nil, &block) + assert_called(object, method_name, message, times: 0, &block) + end + + def assert_called_on_instance_of(klass, method_name, message = nil, times: 1, returns: nil) + times_called = 0 + klass.define_method("stubbed_#{method_name}") do |*| + times_called += 1 + + returns + end + + klass.alias_method "original_#{method_name}", method_name + klass.alias_method method_name, "stubbed_#{method_name}" + + yield + + error = "Expected #{method_name} to be called #{times} times, but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + + assert_equal times, times_called, error + ensure + klass.alias_method method_name, "original_#{method_name}" + klass.undef_method "original_#{method_name}" + klass.undef_method "stubbed_#{method_name}" + end + + def assert_not_called_on_instance_of(klass, method_name, message = nil, &block) + assert_called_on_instance_of(klass, method_name, message, times: 0, &block) + end + + def stub_any_instance(klass, instance: klass.new) + klass.stub(:new, instance) { yield instance } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization.rb new file mode 100644 index 0000000..d1b2734 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "drb" +require "drb/unix" unless Gem.win_platform? +require "active_support/core_ext/module/attribute_accessors" +require "active_support/testing/parallelization/server" +require "active_support/testing/parallelization/worker" + +module ActiveSupport + module Testing + class Parallelization # :nodoc: + @@after_fork_hooks = [] + + def self.after_fork_hook(&blk) + @@after_fork_hooks << blk + end + + cattr_reader :after_fork_hooks + + @@run_cleanup_hooks = [] + + def self.run_cleanup_hook(&blk) + @@run_cleanup_hooks << blk + end + + cattr_reader :run_cleanup_hooks + + def initialize(worker_count) + @worker_count = worker_count + @queue_server = Server.new + @worker_pool = [] + @url = DRb.start_service("drbunix:", @queue_server).uri + end + + def start + @worker_pool = @worker_count.times.map do |worker| + Worker.new(worker, @url).start + end + end + + def <<(work) + @queue_server << work + end + + def size + @worker_count + end + + def shutdown + @queue_server.shutdown + @worker_pool.each { |pid| Process.waitpid pid } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization/server.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization/server.rb new file mode 100644 index 0000000..3961367 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization/server.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require "drb" +require "drb/unix" unless Gem.win_platform? + +module ActiveSupport + module Testing + class Parallelization # :nodoc: + class Server + include DRb::DRbUndumped + + def initialize + @queue = Queue.new + @active_workers = Concurrent::Map.new + @in_flight = Concurrent::Map.new + end + + def record(reporter, result) + raise DRb::DRbConnError if result.is_a?(DRb::DRbUnknown) + + @in_flight.delete([result.klass, result.name]) + + reporter.synchronize do + reporter.record(result) + end + end + + def <<(o) + o[2] = DRbObject.new(o[2]) if o + @queue << o + end + + def pop + if test = @queue.pop + @in_flight[[test[0].to_s, test[1]]] = test + test + end + end + + def start_worker(worker_id) + @active_workers[worker_id] = true + end + + def stop_worker(worker_id) + @active_workers.delete(worker_id) + end + + def active_workers? + @active_workers.size > 0 + end + + def interrupt + @queue.clear + end + + def shutdown + # Wait for initial queue to drain + while @queue.length != 0 + sleep 0.1 + end + + @queue.close + + # Wait until all workers have finished + while active_workers? + sleep 0.1 + end + + @in_flight.values.each do |(klass, name, reporter)| + result = Minitest::Result.from(klass.new(name)) + error = RuntimeError.new("result not reported") + error.set_backtrace([""]) + result.failures << Minitest::UnexpectedError.new(error) + reporter.synchronize do + reporter.record(result) + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization/worker.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization/worker.rb new file mode 100644 index 0000000..393355a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelization/worker.rb @@ -0,0 +1,103 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + class Parallelization # :nodoc: + class Worker + def initialize(number, url) + @id = SecureRandom.uuid + @number = number + @url = url + @setup_exception = nil + end + + def start + fork do + set_process_title("(starting)") + + DRb.stop_service + + @queue = DRbObject.new_with_uri(@url) + @queue.start_worker(@id) + + begin + after_fork + rescue => @setup_exception; end + + work_from_queue + ensure + set_process_title("(stopping)") + + run_cleanup + @queue.stop_worker(@id) + end + end + + def work_from_queue + while job = @queue.pop + perform_job(job) + end + end + + def perform_job(job) + klass = job[0] + method = job[1] + reporter = job[2] + + set_process_title("#{klass}##{method}") + + result = klass.with_info_handler reporter do + Minitest.run_one_method(klass, method) + end + + safe_record(reporter, result) + end + + def safe_record(reporter, result) + add_setup_exception(result) if @setup_exception + + begin + @queue.record(reporter, result) + rescue DRb::DRbConnError + result.failures.map! do |failure| + if failure.respond_to?(:error) + # minitest >5.14.0 + error = DRb::DRbRemoteError.new(failure.error) + else + error = DRb::DRbRemoteError.new(failure.exception) + end + Minitest::UnexpectedError.new(error) + end + @queue.record(reporter, result) + rescue Interrupt + @queue.interrupt + raise + end + + set_process_title("(idle)") + end + + def after_fork + Parallelization.after_fork_hooks.each do |cb| + cb.call(@number) + end + end + + def run_cleanup + Parallelization.run_cleanup_hooks.each do |cb| + cb.call(@number) + end + end + + private + def add_setup_exception(result) + result.failures.prepend Minitest::UnexpectedError.new(@setup_exception) + end + + def set_process_title(status) + Process.setproctitle("Rails test worker #{@number} - #{status}") + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelize_executor.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelize_executor.rb new file mode 100644 index 0000000..88299db --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/parallelize_executor.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + class ParallelizeExecutor # :nodoc: + attr_reader :size, :parallelize_with, :threshold + + def initialize(size:, with:, threshold: ActiveSupport.test_parallelization_threshold) + @size = size + @parallelize_with = with + @threshold = threshold + end + + def start + parallelize if should_parallelize? + show_execution_info + + parallel_executor.start if parallelized? + end + + def <<(work) + parallel_executor << work if parallelized? + end + + def shutdown + parallel_executor.shutdown if parallelized? + end + + private + def parallel_executor + @parallel_executor ||= build_parallel_executor + end + + def build_parallel_executor + case parallelize_with + when :processes + Testing::Parallelization.new(size) + when :threads + ActiveSupport::TestCase.lock_threads = false if defined?(ActiveSupport::TestCase.lock_threads) + Minitest::Parallel::Executor.new(size) + else + raise ArgumentError, "#{parallelize_with} is not a supported parallelization executor." + end + end + + def parallelize + @parallelized = true + Minitest::Test.parallelize_me! + end + + def parallelized? + @parallelized if defined?(@parallelized) + end + + def should_parallelize? + ENV["PARALLEL_WORKERS"] || tests_count > threshold + end + + def tests_count + @tests_count ||= Minitest::Runnable.runnables.sum { |runnable| runnable.runnable_methods.size } + end + + def show_execution_info + puts execution_info + end + + def execution_info + if parallelized? + "Running #{tests_count} tests in parallel using #{parallel_executor.size} #{parallelize_with}" + else + "Running #{tests_count} tests in a single process (parallelization threshold is #{threshold})" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/setup_and_teardown.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/setup_and_teardown.rb new file mode 100644 index 0000000..35321cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/setup_and_teardown.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require "active_support/callbacks" + +module ActiveSupport + module Testing + # Adds support for +setup+ and +teardown+ callbacks. + # These callbacks serve as a replacement to overwriting the + # #setup and #teardown methods of your TestCase. + # + # class ExampleTest < ActiveSupport::TestCase + # setup do + # # ... + # end + # + # teardown do + # # ... + # end + # end + module SetupAndTeardown + def self.prepended(klass) + klass.include ActiveSupport::Callbacks + klass.define_callbacks :setup, :teardown + klass.extend ClassMethods + end + + module ClassMethods + # Add a callback, which runs before TestCase#setup. + def setup(*args, &block) + set_callback(:setup, :before, *args, &block) + end + + # Add a callback, which runs after TestCase#teardown. + def teardown(*args, &block) + set_callback(:teardown, :after, *args, &block) + end + end + + def before_setup # :nodoc: + super + run_callbacks :setup + end + + def after_teardown # :nodoc: + begin + run_callbacks :teardown + rescue => e + self.failures << Minitest::UnexpectedError.new(e) + end + + super + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/stream.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/stream.rb new file mode 100644 index 0000000..55017d3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/stream.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + module Stream # :nodoc: + private + def silence_stream(stream) + old_stream = stream.dup + stream.reopen(IO::NULL) + stream.sync = true + yield + ensure + stream.reopen(old_stream) + old_stream.close + end + + def quietly(&block) + silence_stream(STDOUT) do + silence_stream(STDERR, &block) + end + end + + def capture(stream) + stream = stream.to_s + captured_stream = Tempfile.new(stream) + stream_io = eval("$#{stream}") + origin_stream = stream_io.dup + stream_io.reopen(captured_stream) + + yield + + stream_io.rewind + captured_stream.read + ensure + captured_stream.close + captured_stream.unlink + stream_io.reopen(origin_stream) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/tagged_logging.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/tagged_logging.rb new file mode 100644 index 0000000..7d38268 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/tagged_logging.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module ActiveSupport + module Testing + # Logs a "PostsControllerTest: test name" heading before each test to + # make test.log easier to search and follow along with. + module TaggedLogging # :nodoc: + attr_writer :tagged_logger + + def before_setup + if tagged_logger && tagged_logger.info? + heading = "#{self.class}: #{name}" + divider = "-" * heading.size + tagged_logger.info divider + tagged_logger.info heading + tagged_logger.info divider + end + super + end + + private + def tagged_logger + @tagged_logger ||= (defined?(Rails.logger) && Rails.logger) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/time_helpers.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/time_helpers.rb new file mode 100644 index 0000000..a1155bf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/testing/time_helpers.rb @@ -0,0 +1,246 @@ +# frozen_string_literal: true + +require "active_support/core_ext/module/redefine_method" +require "active_support/core_ext/time/calculations" +require "concurrent/map" + +module ActiveSupport + module Testing + # Manages stubs for TimeHelpers + class SimpleStubs # :nodoc: + Stub = Struct.new(:object, :method_name, :original_method) + + def initialize + @stubs = Concurrent::Map.new { |h, k| h[k] = {} } + end + + # Stubs object.method_name with the given block + # If the method is already stubbed, remove that stub + # so that removing this stub will restore the original implementation. + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # target = Time.zone.local(2004, 11, 24, 1, 4, 44) + # simple_stubs.stub_object(Time, :now) { at(target.to_i) } + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + def stub_object(object, method_name, &block) + if stub = stubbing(object, method_name) + unstub_object(stub) + end + + new_name = "__simple_stub__#{method_name}" + + @stubs[object.object_id][method_name] = Stub.new(object, method_name, new_name) + + object.singleton_class.alias_method new_name, method_name + object.define_singleton_method(method_name, &block) + end + + # Remove all object-method stubs held by this instance + def unstub_all! + @stubs.each_value do |object_stubs| + object_stubs.each_value do |stub| + unstub_object(stub) + end + end + @stubs.clear + end + + # Returns the Stub for object#method_name + # (nil if it is not stubbed) + def stubbing(object, method_name) + @stubs[object.object_id][method_name] + end + + # Returns true if any stubs are set, false if there are none + def stubbed? + !@stubs.empty? + end + + private + # Restores the original object.method described by the Stub + def unstub_object(stub) + singleton_class = stub.object.singleton_class + singleton_class.silence_redefinition_of_method stub.method_name + singleton_class.alias_method stub.method_name, stub.original_method + singleton_class.undef_method stub.original_method + end + end + + # Contains helpers that help you test passage of time. + module TimeHelpers + def after_teardown + travel_back + super + end + + # Changes current time to the time in the future or in the past by a given time difference by + # stubbing +Time.now+, +Date.today+, and +DateTime.now+. The stubs are automatically removed + # at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day + # Time.current # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # Date.current # => Sun, 10 Nov 2013 + # DateTime.current # => Sun, 10 Nov 2013 15:34:49 -0500 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel 1.day do + # User.create.created_at # => Sun, 10 Nov 2013 15:34:49 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel(duration, &block) + travel_to Time.now + duration, &block + end + + # Changes current time to the given time by stubbing +Time.now+, + # +Date.today+, and +DateTime.now+ to return the time or date passed into this method. + # The stubs are automatically removed at the end of the test. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # Date.current # => Wed, 24 Nov 2004 + # DateTime.current # => Wed, 24 Nov 2004 01:04:44 -0500 + # + # Dates are taken as their timestamp at the beginning of the day in the + # application time zone. Time.current returns said timestamp, + # and Time.now its equivalent in the system time zone. Similarly, + # Date.current returns a date equal to the argument, and + # Date.today the date according to Time.now, which may + # be different. (Note that you rarely want to deal with Time.now, + # or Date.today, in order to honor the application time zone + # please always use Time.current and Date.current.) + # + # Note that the usec for the time passed will be set to 0 to prevent rounding + # errors with external services, like MySQL (which will round instead of floor, + # leading to off-by-one-second errors). + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) do + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # end + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + def travel_to(date_or_time) + if block_given? && in_block + travel_to_nested_block_call = <<~MSG + + Calling `travel_to` with a block, when we have previously already made a call to `travel_to`, can lead to confusing time stubbing. + + Instead of: + + travel_to 2.days.from_now do + # 2 days from today + travel_to 3.days.from_now do + # 5 days from today + end + end + + preferred way to achieve above is: + + travel 2.days do + # 2 days from today + end + + travel 5.days do + # 5 days from today + end + + MSG + raise travel_to_nested_block_call + end + + if date_or_time.is_a?(Date) && !date_or_time.is_a?(DateTime) + now = date_or_time.midnight.to_time + elsif date_or_time.is_a?(String) + now = Time.zone.parse(date_or_time) + else + now = date_or_time.to_time.change(usec: 0) + end + + stubbed_time = Time.now if simple_stubs.stubbing(Time, :now) + simple_stubs.stub_object(Time, :now) { at(now.to_i) } + simple_stubs.stub_object(Date, :today) { jd(now.to_date.jd) } + simple_stubs.stub_object(DateTime, :now) { jd(now.to_date.jd, now.hour, now.min, now.sec, Rational(now.utc_offset, 86400)) } + + if block_given? + begin + self.in_block = true + yield + ensure + if stubbed_time + travel_to stubbed_time + else + travel_back + end + self.in_block = false + end + end + end + + # Returns the current time back to its original state, by removing the stubs added by + # +travel+, +travel_to+, and +freeze_time+. + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # + # travel_back + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # + # This method also accepts a block, which brings the stubs back at the end of the block: + # + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # + # travel_to Time.zone.local(2004, 11, 24, 1, 4, 44) + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + # + # travel_back do + # Time.current # => Sat, 09 Nov 2013 15:34:49 EST -05:00 + # end + # + # Time.current # => Wed, 24 Nov 2004 01:04:44 EST -05:00 + def travel_back + stubbed_time = Time.current if block_given? && simple_stubs.stubbed? + + simple_stubs.unstub_all! + yield if block_given? + ensure + travel_to stubbed_time if stubbed_time + end + alias_method :unfreeze_time, :travel_back + + # Calls +travel_to+ with +Time.now+. + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time + # sleep(1) + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # + # This method also accepts a block, which will return the current time back to its original + # state at the end of the block: + # + # Time.current # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # freeze_time do + # sleep(1) + # User.create.created_at # => Sun, 09 Jul 2017 15:34:49 EST -05:00 + # end + # Time.current # => Sun, 09 Jul 2017 15:34:50 EST -05:00 + def freeze_time(&block) + travel_to Time.now, &block + end + + private + def simple_stubs + @simple_stubs ||= SimpleStubs.new + end + + attr_accessor :in_block + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/time.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/time.rb new file mode 100644 index 0000000..5185467 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/time.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module ActiveSupport + autoload :Duration, "active_support/duration" + autoload :TimeWithZone, "active_support/time_with_zone" + autoload :TimeZone, "active_support/values/time_zone" +end + +require "date" +require "time" + +require "active_support/core_ext/time" +require "active_support/core_ext/date" +require "active_support/core_ext/date_time" + +require "active_support/core_ext/integer/time" +require "active_support/core_ext/numeric/time" + +require "active_support/core_ext/string/conversions" +require "active_support/core_ext/string/zones" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/time_with_zone.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/time_with_zone.rb new file mode 100644 index 0000000..dfa4171 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/time_with_zone.rb @@ -0,0 +1,625 @@ +# frozen_string_literal: true + +require "yaml" + +require "active_support/duration" +require "active_support/values/time_zone" +require "active_support/core_ext/object/acts_like" +require "active_support/core_ext/date_and_time/compatibility" + +module ActiveSupport + # A Time-like class that can represent a time in any time zone. Necessary + # because standard Ruby Time instances are limited to UTC and the + # system's ENV['TZ'] zone. + # + # You shouldn't ever need to create a TimeWithZone instance directly via +new+. + # Instead use methods +local+, +parse+, +at+, and +now+ on TimeZone instances, + # and +in_time_zone+ on Time and DateTime instances. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.local(2007, 2, 10, 15, 30, 45) # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # Time.zone.parse('2007-02-10 15:30:45') # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # Time.zone.at(1171139445) # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # Time.zone.now # => Sun, 18 May 2008 13:07:55.754107581 EDT -04:00 + # Time.utc(2007, 2, 10, 20, 30, 45).in_time_zone # => Sat, 10 Feb 2007 15:30:45.000000000 EST -05:00 + # + # See Time and TimeZone for further documentation of these methods. + # + # TimeWithZone instances implement the same API as Ruby Time instances, so + # that Time and TimeWithZone instances are interchangeable. + # + # t = Time.zone.now # => Sun, 18 May 2008 13:27:25.031505668 EDT -04:00 + # t.hour # => 13 + # t.dst? # => true + # t.utc_offset # => -14400 + # t.zone # => "EDT" + # t.to_fs(:rfc822) # => "Sun, 18 May 2008 13:27:25 -0400" + # t + 1.day # => Mon, 19 May 2008 13:27:25.031505668 EDT -04:00 + # t.beginning_of_year # => Tue, 01 Jan 2008 00:00:00.000000000 EST -05:00 + # t > Time.utc(1999) # => true + # t.is_a?(Time) # => true + # t.is_a?(ActiveSupport::TimeWithZone) # => true + class TimeWithZone + # Report class name as 'Time' to thwart type checking. + def self.name + ActiveSupport::Deprecation.warn(<<~EOM) + ActiveSupport::TimeWithZone.name has been deprecated and + from Rails 7.1 will use the default Ruby implementation. + You can set `config.active_support.remove_deprecated_time_with_zone_name = true` + to enable the new behavior now. + EOM + + "Time" + end + + PRECISIONS = Hash.new { |h, n| h[n] = "%FT%T.%#{n}N" } + PRECISIONS[0] = "%FT%T" + + include Comparable, DateAndTime::Compatibility + attr_reader :time_zone + + def initialize(utc_time, time_zone, local_time = nil, period = nil) + @utc = utc_time ? transfer_time_values_to_utc_constructor(utc_time) : nil + @time_zone, @time = time_zone, local_time + @period = @utc ? period : get_period_and_ensure_valid_local_time(period) + end + + # Returns a Time instance that represents the time in +time_zone+. + def time + @time ||= incorporate_utc_offset(@utc, utc_offset) + end + + # Returns a Time instance of the simultaneous time in the UTC timezone. + def utc + @utc ||= incorporate_utc_offset(@time, -utc_offset) + end + alias_method :comparable_time, :utc + alias_method :getgm, :utc + alias_method :getutc, :utc + alias_method :gmtime, :utc + + # Returns the underlying TZInfo::TimezonePeriod. + def period + @period ||= time_zone.period_for_utc(@utc) + end + + # Returns the simultaneous time in Time.zone, or the specified zone. + def in_time_zone(new_zone = ::Time.zone) + return self if time_zone == new_zone + utc.in_time_zone(new_zone) + end + + # Returns a Time instance of the simultaneous time in the system timezone. + def localtime(utc_offset = nil) + utc.getlocal(utc_offset) + end + alias_method :getlocal, :localtime + + # Returns true if the current time is within Daylight Savings Time for the + # specified time zone. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.parse("2012-5-30").dst? # => true + # Time.zone.parse("2012-11-30").dst? # => false + def dst? + period.dst? + end + alias_method :isdst, :dst? + + # Returns true if the current time zone is set to UTC. + # + # Time.zone = 'UTC' # => 'UTC' + # Time.zone.now.utc? # => true + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # Time.zone.now.utc? # => false + def utc? + zone == "UTC" || zone == "UCT" + end + alias_method :gmt?, :utc? + + # Returns the offset from current time to UTC time in seconds. + def utc_offset + period.observed_utc_offset + end + alias_method :gmt_offset, :utc_offset + alias_method :gmtoff, :utc_offset + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.formatted_offset(true) # => "-05:00" + # Time.zone.now.formatted_offset(false) # => "-0500" + # Time.zone = 'UTC' # => "UTC" + # Time.zone.now.formatted_offset(true, "0") # => "0" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc? && alternate_utc_string || TimeZone.seconds_to_utc_offset(utc_offset, colon) + end + + # Returns the time zone abbreviation. + # + # Time.zone = 'Eastern Time (US & Canada)' # => "Eastern Time (US & Canada)" + # Time.zone.now.zone # => "EST" + def zone + period.abbreviation + end + + # Returns a string of the object's date, time, zone, and offset from UTC. + # + # Time.zone.now.inspect # => "Thu, 04 Dec 2014 11:00:25.624541392 EST -05:00" + def inspect + "#{time.strftime('%a, %d %b %Y %H:%M:%S.%9N')} #{zone} #{formatted_offset}" + end + + # Returns a string of the object's date and time in the ISO 8601 standard + # format. + # + # Time.zone.now.xmlschema # => "2014-12-04T11:02:37-05:00" + def xmlschema(fraction_digits = 0) + "#{time.strftime(PRECISIONS[fraction_digits.to_i])}#{formatted_offset(true, 'Z')}" + end + alias_method :iso8601, :xmlschema + alias_method :rfc3339, :xmlschema + + # Coerces time to a string for JSON encoding. The default format is ISO 8601. + # You can get %Y/%m/%d %H:%M:%S +offset style by setting + # ActiveSupport::JSON::Encoding.use_standard_json_time_format + # to +false+. + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = true + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005-02-01T05:15:10.000-10:00" + # + # # With ActiveSupport::JSON::Encoding.use_standard_json_time_format = false + # Time.utc(2005,2,1,15,15,10).in_time_zone("Hawaii").to_json + # # => "2005/02/01 05:15:10 -1000" + def as_json(options = nil) + if ActiveSupport::JSON::Encoding.use_standard_json_time_format + xmlschema(ActiveSupport::JSON::Encoding.time_precision) + else + %(#{time.strftime("%Y/%m/%d %H:%M:%S")} #{formatted_offset(false)}) + end + end + + def init_with(coder) # :nodoc: + initialize(coder["utc"], coder["zone"], coder["time"]) + end + + def encode_with(coder) # :nodoc: + coder.map = { "utc" => utc, "zone" => time_zone, "time" => time } + end + + # Returns a string of the object's date and time in the format used by + # HTTP requests. + # + # Time.zone.now.httpdate # => "Tue, 01 Jan 2013 04:39:43 GMT" + def httpdate + utc.httpdate + end + + # Returns a string of the object's date and time in the RFC 2822 standard + # format. + # + # Time.zone.now.rfc2822 # => "Tue, 01 Jan 2013 04:51:39 +0000" + def rfc2822 + to_fs(:rfc822) + end + alias_method :rfc822, :rfc2822 + + NOT_SET = Object.new # :nodoc: + + # Returns a string of the object's date and time. + def to_s(format = NOT_SET) + if format == :db + ActiveSupport::Deprecation.warn( + "TimeWithZone#to_s(:db) is deprecated. Please use TimeWithZone#to_fs(:db) instead." + ) + utc.to_fs(format) + elsif formatter = ::Time::DATE_FORMATS[format] + ActiveSupport::Deprecation.warn( + "TimeWithZone#to_s(#{format.inspect}) is deprecated. Please use TimeWithZone#to_fs(#{format.inspect}) instead." + ) + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + elsif format == NOT_SET + "#{time.strftime("%Y-%m-%d %H:%M:%S")} #{formatted_offset(false, 'UTC')}" # mimicking Ruby Time#to_s format + else + ActiveSupport::Deprecation.warn( + "TimeWithZone#to_s(#{format.inspect}) is deprecated. Please use TimeWithZone#to_fs(#{format.inspect}) instead." + ) + "#{time.strftime("%Y-%m-%d %H:%M:%S")} #{formatted_offset(false, 'UTC')}" # mimicking Ruby Time#to_s format + end + end + + # Returns a string of the object's date and time. + # + # This method is aliased to to_formatted_s. + # + # Accepts an optional format: + # * :default - default value, mimics Ruby Time#to_s format. + # * :db - format outputs time in UTC :db time. See Time#to_fs(:db). + # * Any key in Time::DATE_FORMATS can be used. See active_support/core_ext/time/conversions.rb. + def to_fs(format = :default) + if format == :db + utc.to_fs(format) + elsif formatter = ::Time::DATE_FORMATS[format] + formatter.respond_to?(:call) ? formatter.call(self).to_s : strftime(formatter) + else + # Change to to_s when deprecation is gone. + "#{time.strftime("%Y-%m-%d %H:%M:%S")} #{formatted_offset(false, 'UTC')}" + end + end + alias_method :to_formatted_s, :to_fs + + # Replaces %Z directive with +zone before passing to Time#strftime, + # so that zone information is correct. + def strftime(format) + format = format.gsub(/((?:\A|[^%])(?:%%)*)%Z/, "\\1#{zone}") + getlocal(utc_offset).strftime(format) + end + + # Use the time in UTC for comparisons. + def <=>(other) + utc <=> other + end + alias_method :before?, :< + alias_method :after?, :> + + # Returns true if the current object's time is within the specified + # +min+ and +max+ time. + def between?(min, max) + utc.between?(min, max) + end + + # Returns true if the current object's time is in the past. + def past? + utc.past? + end + + # Returns true if the current object's time falls within + # the current day. + def today? + time.today? + end + + # Returns true if the current object's time falls within + # the next day (tomorrow). + def tomorrow? + time.tomorrow? + end + alias :next_day? :tomorrow? + + # Returns true if the current object's time falls within + # the previous day (yesterday). + def yesterday? + time.yesterday? + end + alias :prev_day? :yesterday? + + # Returns true if the current object's time is in the future. + def future? + utc.future? + end + + # Returns +true+ if +other+ is equal to current object. + def eql?(other) + other.eql?(utc) + end + + def hash + utc.hash + end + + # Adds an interval of time to the current object's time and returns that + # value as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28.725182881 EDT -04:00 + # now + 1000 # => Sun, 02 Nov 2014 01:43:08.725182881 EDT -04:00 + # + # If we're adding a Duration of variable length (i.e., years, months, days), + # move forward from #time, otherwise move forward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time + 24.hours will advance exactly 24 hours, while a + # time + 1.day will advance 23-25 hours, depending on the day. + # + # now + 24.hours # => Mon, 03 Nov 2014 00:26:28.725182881 EST -05:00 + # now + 1.day # => Mon, 03 Nov 2014 01:26:28.725182881 EST -05:00 + def +(other) + if duration_of_variable_length?(other) + method_missing(:+, other) + else + result = utc.acts_like?(:date) ? utc.since(other) : utc + other rescue utc.since(other) + result.in_time_zone(time_zone) + end + end + alias_method :since, :+ + alias_method :in, :+ + + # Subtracts an interval of time and returns a new TimeWithZone object unless + # the other value +acts_like?+ time. In which case, it will subtract the + # other time and return the difference in seconds as a Float. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28.725182881 EST -05:00 + # now - 1000 # => Mon, 03 Nov 2014 00:09:48.725182881 EST -05:00 + # + # If subtracting a Duration of variable length (i.e., years, months, days), + # move backward from #time, otherwise move backward from #utc, for accuracy + # when moving across DST boundaries. + # + # For instance, a time - 24.hours will go subtract exactly 24 hours, while a + # time - 1.day will subtract 23-25 hours, depending on the day. + # + # now - 24.hours # => Sun, 02 Nov 2014 01:26:28.725182881 EDT -04:00 + # now - 1.day # => Sun, 02 Nov 2014 00:26:28.725182881 EDT -04:00 + # + # If both the TimeWithZone object and the other value act like Time, a Float + # will be returned. + # + # Time.zone.now - 1.day.ago # => 86399.999967 + # + def -(other) + if other.acts_like?(:time) + to_time - other.to_time + elsif duration_of_variable_length?(other) + method_missing(:-, other) + else + result = utc.acts_like?(:date) ? utc.ago(other) : utc - other rescue utc.ago(other) + result.in_time_zone(time_zone) + end + end + + # Subtracts an interval of time from the current object's time and returns + # the result as a new TimeWithZone object. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Mon, 03 Nov 2014 00:26:28.725182881 EST -05:00 + # now.ago(1000) # => Mon, 03 Nov 2014 00:09:48.725182881 EST -05:00 + # + # If we're subtracting a Duration of variable length (i.e., years, months, + # days), move backward from #time, otherwise move backward from #utc, for + # accuracy when moving across DST boundaries. + # + # For instance, time.ago(24.hours) will move back exactly 24 hours, + # while time.ago(1.day) will move back 23-25 hours, depending on + # the day. + # + # now.ago(24.hours) # => Sun, 02 Nov 2014 01:26:28.725182881 EDT -04:00 + # now.ago(1.day) # => Sun, 02 Nov 2014 00:26:28.725182881 EDT -04:00 + def ago(other) + since(-other) + end + + # Returns a new +ActiveSupport::TimeWithZone+ where one or more of the elements have + # been changed according to the +options+ parameter. The time options (:hour, + # :min, :sec, :usec, :nsec) reset cascadingly, + # so if only the hour is passed, then minute, sec, usec, and nsec is set to 0. If the + # hour and minute is passed, then sec, usec, and nsec is set to 0. The +options+ + # parameter takes a hash with any of these keys: :year, :month, + # :day, :hour, :min, :sec, :usec, + # :nsec, :offset, :zone. Pass either :usec + # or :nsec, not both. Similarly, pass either :zone or + # :offset, not both. + # + # t = Time.zone.now # => Fri, 14 Apr 2017 11:45:15.116992711 EST -05:00 + # t.change(year: 2020) # => Tue, 14 Apr 2020 11:45:15.116992711 EST -05:00 + # t.change(hour: 12) # => Fri, 14 Apr 2017 12:00:00.116992711 EST -05:00 + # t.change(min: 30) # => Fri, 14 Apr 2017 11:30:00.116992711 EST -05:00 + # t.change(offset: "-10:00") # => Fri, 14 Apr 2017 11:45:15.116992711 HST -10:00 + # t.change(zone: "Hawaii") # => Fri, 14 Apr 2017 11:45:15.116992711 HST -10:00 + def change(options) + if options[:zone] && options[:offset] + raise ArgumentError, "Can't change both :offset and :zone at the same time: #{options.inspect}" + end + + new_time = time.change(options) + + if options[:zone] + new_zone = ::Time.find_zone(options[:zone]) + elsif options[:offset] + new_zone = ::Time.find_zone(new_time.utc_offset) + end + + new_zone ||= time_zone + periods = new_zone.periods_for_local(new_time) + + self.class.new(nil, new_zone, new_time, periods.include?(period) ? period : nil) + end + + # Uses Date to provide precise Time calculations for years, months, and days + # according to the proleptic Gregorian calendar. The result is returned as a + # new TimeWithZone object. + # + # The +options+ parameter takes a hash with any of these keys: + # :years, :months, :weeks, :days, + # :hours, :minutes, :seconds. + # + # If advancing by a value of variable length (i.e., years, weeks, months, + # days), move forward from #time, otherwise move forward from #utc, for + # accuracy when moving across DST boundaries. + # + # Time.zone = 'Eastern Time (US & Canada)' # => 'Eastern Time (US & Canada)' + # now = Time.zone.now # => Sun, 02 Nov 2014 01:26:28.558049687 EDT -04:00 + # now.advance(seconds: 1) # => Sun, 02 Nov 2014 01:26:29.558049687 EDT -04:00 + # now.advance(minutes: 1) # => Sun, 02 Nov 2014 01:27:28.558049687 EDT -04:00 + # now.advance(hours: 1) # => Sun, 02 Nov 2014 01:26:28.558049687 EST -05:00 + # now.advance(days: 1) # => Mon, 03 Nov 2014 01:26:28.558049687 EST -05:00 + # now.advance(weeks: 1) # => Sun, 09 Nov 2014 01:26:28.558049687 EST -05:00 + # now.advance(months: 1) # => Tue, 02 Dec 2014 01:26:28.558049687 EST -05:00 + # now.advance(years: 1) # => Mon, 02 Nov 2015 01:26:28.558049687 EST -05:00 + def advance(options) + # If we're advancing a value of variable length (i.e., years, weeks, months, days), advance from #time, + # otherwise advance from #utc, for accuracy when moving across DST boundaries + if options.values_at(:years, :weeks, :months, :days).any? + method_missing(:advance, options) + else + utc.advance(options).in_time_zone(time_zone) + end + end + + %w(year mon month day mday wday yday hour min sec usec nsec to_date).each do |method_name| + class_eval <<-EOV, __FILE__, __LINE__ + 1 + def #{method_name} # def month + time.#{method_name} # time.month + end # end + EOV + end + + # Returns Array of parts of Time in sequence of + # [seconds, minutes, hours, day, month, year, weekday, yearday, dst?, zone]. + # + # now = Time.zone.now # => Tue, 18 Aug 2015 02:29:27.485278555 UTC +00:00 + # now.to_a # => [27, 29, 2, 18, 8, 2015, 2, 230, false, "UTC"] + def to_a + [time.sec, time.min, time.hour, time.day, time.mon, time.year, time.wday, time.yday, dst?, zone] + end + + # Returns the object's date and time as a floating-point number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_f # => 1417709320.285418 + def to_f + utc.to_f + end + + # Returns the object's date and time as an integer number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_i # => 1417709320 + def to_i + utc.to_i + end + alias_method :tv_sec, :to_i + + # Returns the object's date and time as a rational number of seconds + # since the Epoch (January 1, 1970 00:00 UTC). + # + # Time.zone.now.to_r # => (708854548642709/500000) + def to_r + utc.to_r + end + + # Returns an instance of DateTime with the timezone's UTC offset + # + # Time.zone.now.to_datetime # => Tue, 18 Aug 2015 02:32:20 +0000 + # Time.current.in_time_zone('Hawaii').to_datetime # => Mon, 17 Aug 2015 16:32:20 -1000 + def to_datetime + @to_datetime ||= utc.to_datetime.new_offset(Rational(utc_offset, 86_400)) + end + + # Returns an instance of +Time+, either with the same UTC offset + # as +self+ or in the local system timezone depending on the setting + # of +ActiveSupport.to_time_preserves_timezone+. + def to_time + if preserve_timezone + @to_time_with_instance_offset ||= getlocal(utc_offset) + else + @to_time_with_system_offset ||= getlocal + end + end + + # So that +self+ acts_like?(:time). + def acts_like_time? + true + end + + # Say we're a Time to thwart type checking. + def is_a?(klass) + klass == ::Time || super + end + alias_method :kind_of?, :is_a? + + # An instance of ActiveSupport::TimeWithZone is never blank + def blank? + false + end + + def freeze + # preload instance variables before freezing + period; utc; time; to_datetime; to_time + super + end + + def marshal_dump + [utc, time_zone.name, time] + end + + def marshal_load(variables) + initialize(variables[0].utc, ::Time.find_zone(variables[1]), variables[2].utc) + end + + # respond_to_missing? is not called in some cases, such as when type conversion is + # performed with Kernel#String + def respond_to?(sym, include_priv = false) + # ensure that we're not going to throw and rescue from NoMethodError in method_missing which is slow + return false if sym.to_sym == :to_str + super + end + + # Ensure proxy class responds to all methods that underlying time instance + # responds to. + def respond_to_missing?(sym, include_priv) + return false if sym.to_sym == :acts_like_date? + time.respond_to?(sym, include_priv) + end + + # Send the missing method to +time+ instance, and wrap result in a new + # TimeWithZone with the existing +time_zone+. + def method_missing(sym, *args, &block) + wrap_with_time_zone time.__send__(sym, *args, &block) + rescue NoMethodError => e + raise e, e.message.sub(time.inspect, inspect).sub("Time", "ActiveSupport::TimeWithZone"), e.backtrace + end + + private + SECONDS_PER_DAY = 86400 + + def incorporate_utc_offset(time, offset) + if time.kind_of?(Date) + time + Rational(offset, SECONDS_PER_DAY) + else + time + offset + end + end + + def get_period_and_ensure_valid_local_time(period) + # we don't want a Time.local instance enforcing its own DST rules as well, + # so transfer time values to a utc constructor if necessary + @time = transfer_time_values_to_utc_constructor(@time) unless @time.utc? + begin + period || @time_zone.period_for_local(@time) + rescue ::TZInfo::PeriodNotFound + # time is in the "spring forward" hour gap, so we're moving the time forward one hour and trying again + @time += 1.hour + retry + end + end + + def transfer_time_values_to_utc_constructor(time) + # avoid creating another Time object if possible + return time if time.instance_of?(::Time) && time.utc? + ::Time.utc(time.year, time.month, time.day, time.hour, time.min, time.sec + time.subsec) + end + + def duration_of_variable_length?(obj) + ActiveSupport::Duration === obj && obj.variable? + end + + def wrap_with_time_zone(time) + if time.acts_like?(:time) + periods = time_zone.periods_for_local(time) + self.class.new(nil, time_zone, time, periods.include?(period) ? period : nil) + elsif time.is_a?(Range) + wrap_with_time_zone(time.begin)..wrap_with_time_zone(time.end) + else + time + end + end + end +end + +# These prevent Psych from calling `ActiveSupport::TimeWithZone.name` +# and triggering the deprecation warning about the change in Rails 7.1. +YAML.load_tags["!ruby/object:ActiveSupport::TimeWithZone"] = "ActiveSupport::TimeWithZone" +YAML.dump_tags[ActiveSupport::TimeWithZone] = "!ruby/object:ActiveSupport::TimeWithZone" diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/values/time_zone.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/values/time_zone.rb new file mode 100644 index 0000000..e059142 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/values/time_zone.rb @@ -0,0 +1,601 @@ +# frozen_string_literal: true + +require "tzinfo" +require "concurrent/map" + +module ActiveSupport + # The TimeZone class serves as a wrapper around TZInfo::Timezone instances. + # It allows us to do the following: + # + # * Limit the set of zones provided by TZInfo to a meaningful subset of 134 + # zones. + # * Retrieve and display zones with a friendlier name + # (e.g., "Eastern Time (US & Canada)" instead of "America/New_York"). + # * Lazily load TZInfo::Timezone instances only when they're needed. + # * Create ActiveSupport::TimeWithZone instances via TimeZone's +local+, + # +parse+, +at+, and +now+ methods. + # + # If you set config.time_zone in the Rails Application, you can + # access this TimeZone object via Time.zone: + # + # # application.rb: + # class Application < Rails::Application + # config.time_zone = 'Eastern Time (US & Canada)' + # end + # + # Time.zone # => # + # Time.zone.name # => "Eastern Time (US & Canada)" + # Time.zone.now # => Sun, 18 May 2008 14:30:44 EDT -04:00 + class TimeZone + # Keys are Rails TimeZone names, values are TZInfo identifiers. + MAPPING = { + "International Date Line West" => "Etc/GMT+12", + "Midway Island" => "Pacific/Midway", + "American Samoa" => "Pacific/Pago_Pago", + "Hawaii" => "Pacific/Honolulu", + "Alaska" => "America/Juneau", + "Pacific Time (US & Canada)" => "America/Los_Angeles", + "Tijuana" => "America/Tijuana", + "Mountain Time (US & Canada)" => "America/Denver", + "Arizona" => "America/Phoenix", + "Chihuahua" => "America/Chihuahua", + "Mazatlan" => "America/Mazatlan", + "Central Time (US & Canada)" => "America/Chicago", + "Saskatchewan" => "America/Regina", + "Guadalajara" => "America/Mexico_City", + "Mexico City" => "America/Mexico_City", + "Monterrey" => "America/Monterrey", + "Central America" => "America/Guatemala", + "Eastern Time (US & Canada)" => "America/New_York", + "Indiana (East)" => "America/Indiana/Indianapolis", + "Bogota" => "America/Bogota", + "Lima" => "America/Lima", + "Quito" => "America/Lima", + "Atlantic Time (Canada)" => "America/Halifax", + "Caracas" => "America/Caracas", + "La Paz" => "America/La_Paz", + "Santiago" => "America/Santiago", + "Newfoundland" => "America/St_Johns", + "Brasilia" => "America/Sao_Paulo", + "Buenos Aires" => "America/Argentina/Buenos_Aires", + "Montevideo" => "America/Montevideo", + "Georgetown" => "America/Guyana", + "Puerto Rico" => "America/Puerto_Rico", + "Greenland" => "America/Godthab", + "Mid-Atlantic" => "Atlantic/South_Georgia", + "Azores" => "Atlantic/Azores", + "Cape Verde Is." => "Atlantic/Cape_Verde", + "Dublin" => "Europe/Dublin", + "Edinburgh" => "Europe/London", + "Lisbon" => "Europe/Lisbon", + "London" => "Europe/London", + "Casablanca" => "Africa/Casablanca", + "Monrovia" => "Africa/Monrovia", + "UTC" => "Etc/UTC", + "Belgrade" => "Europe/Belgrade", + "Bratislava" => "Europe/Bratislava", + "Budapest" => "Europe/Budapest", + "Ljubljana" => "Europe/Ljubljana", + "Prague" => "Europe/Prague", + "Sarajevo" => "Europe/Sarajevo", + "Skopje" => "Europe/Skopje", + "Warsaw" => "Europe/Warsaw", + "Zagreb" => "Europe/Zagreb", + "Brussels" => "Europe/Brussels", + "Copenhagen" => "Europe/Copenhagen", + "Madrid" => "Europe/Madrid", + "Paris" => "Europe/Paris", + "Amsterdam" => "Europe/Amsterdam", + "Berlin" => "Europe/Berlin", + "Bern" => "Europe/Zurich", + "Zurich" => "Europe/Zurich", + "Rome" => "Europe/Rome", + "Stockholm" => "Europe/Stockholm", + "Vienna" => "Europe/Vienna", + "West Central Africa" => "Africa/Algiers", + "Bucharest" => "Europe/Bucharest", + "Cairo" => "Africa/Cairo", + "Helsinki" => "Europe/Helsinki", + "Kyiv" => "Europe/Kiev", + "Riga" => "Europe/Riga", + "Sofia" => "Europe/Sofia", + "Tallinn" => "Europe/Tallinn", + "Vilnius" => "Europe/Vilnius", + "Athens" => "Europe/Athens", + "Istanbul" => "Europe/Istanbul", + "Minsk" => "Europe/Minsk", + "Jerusalem" => "Asia/Jerusalem", + "Harare" => "Africa/Harare", + "Pretoria" => "Africa/Johannesburg", + "Kaliningrad" => "Europe/Kaliningrad", + "Moscow" => "Europe/Moscow", + "St. Petersburg" => "Europe/Moscow", + "Volgograd" => "Europe/Volgograd", + "Samara" => "Europe/Samara", + "Kuwait" => "Asia/Kuwait", + "Riyadh" => "Asia/Riyadh", + "Nairobi" => "Africa/Nairobi", + "Baghdad" => "Asia/Baghdad", + "Tehran" => "Asia/Tehran", + "Abu Dhabi" => "Asia/Muscat", + "Muscat" => "Asia/Muscat", + "Baku" => "Asia/Baku", + "Tbilisi" => "Asia/Tbilisi", + "Yerevan" => "Asia/Yerevan", + "Kabul" => "Asia/Kabul", + "Ekaterinburg" => "Asia/Yekaterinburg", + "Islamabad" => "Asia/Karachi", + "Karachi" => "Asia/Karachi", + "Tashkent" => "Asia/Tashkent", + "Chennai" => "Asia/Kolkata", + "Kolkata" => "Asia/Kolkata", + "Mumbai" => "Asia/Kolkata", + "New Delhi" => "Asia/Kolkata", + "Kathmandu" => "Asia/Kathmandu", + "Astana" => "Asia/Dhaka", + "Dhaka" => "Asia/Dhaka", + "Sri Jayawardenepura" => "Asia/Colombo", + "Almaty" => "Asia/Almaty", + "Novosibirsk" => "Asia/Novosibirsk", + "Rangoon" => "Asia/Rangoon", + "Bangkok" => "Asia/Bangkok", + "Hanoi" => "Asia/Bangkok", + "Jakarta" => "Asia/Jakarta", + "Krasnoyarsk" => "Asia/Krasnoyarsk", + "Beijing" => "Asia/Shanghai", + "Chongqing" => "Asia/Chongqing", + "Hong Kong" => "Asia/Hong_Kong", + "Urumqi" => "Asia/Urumqi", + "Kuala Lumpur" => "Asia/Kuala_Lumpur", + "Singapore" => "Asia/Singapore", + "Taipei" => "Asia/Taipei", + "Perth" => "Australia/Perth", + "Irkutsk" => "Asia/Irkutsk", + "Ulaanbaatar" => "Asia/Ulaanbaatar", + "Seoul" => "Asia/Seoul", + "Osaka" => "Asia/Tokyo", + "Sapporo" => "Asia/Tokyo", + "Tokyo" => "Asia/Tokyo", + "Yakutsk" => "Asia/Yakutsk", + "Darwin" => "Australia/Darwin", + "Adelaide" => "Australia/Adelaide", + "Canberra" => "Australia/Melbourne", + "Melbourne" => "Australia/Melbourne", + "Sydney" => "Australia/Sydney", + "Brisbane" => "Australia/Brisbane", + "Hobart" => "Australia/Hobart", + "Vladivostok" => "Asia/Vladivostok", + "Guam" => "Pacific/Guam", + "Port Moresby" => "Pacific/Port_Moresby", + "Magadan" => "Asia/Magadan", + "Srednekolymsk" => "Asia/Srednekolymsk", + "Solomon Is." => "Pacific/Guadalcanal", + "New Caledonia" => "Pacific/Noumea", + "Fiji" => "Pacific/Fiji", + "Kamchatka" => "Asia/Kamchatka", + "Marshall Is." => "Pacific/Majuro", + "Auckland" => "Pacific/Auckland", + "Wellington" => "Pacific/Auckland", + "Nuku'alofa" => "Pacific/Tongatapu", + "Tokelau Is." => "Pacific/Fakaofo", + "Chatham Is." => "Pacific/Chatham", + "Samoa" => "Pacific/Apia" + } + + UTC_OFFSET_WITH_COLON = "%s%02d:%02d" # :nodoc: + UTC_OFFSET_WITHOUT_COLON = UTC_OFFSET_WITH_COLON.tr(":", "") # :nodoc: + private_constant :UTC_OFFSET_WITH_COLON, :UTC_OFFSET_WITHOUT_COLON + + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + + class << self + # Assumes self represents an offset from UTC in seconds (as returned from + # Time#utc_offset) and turns this into an +HH:MM formatted string. + # + # ActiveSupport::TimeZone.seconds_to_utc_offset(-21_600) # => "-06:00" + def seconds_to_utc_offset(seconds, colon = true) + format = colon ? UTC_OFFSET_WITH_COLON : UTC_OFFSET_WITHOUT_COLON + sign = (seconds < 0 ? "-" : "+") + hours = seconds.abs / 3600 + minutes = (seconds.abs % 3600) / 60 + format % [sign, hours, minutes] + end + + def find_tzinfo(name) + TZInfo::Timezone.get(MAPPING[name] || name) + end + + alias_method :create, :new + + # Returns a TimeZone instance with the given name, or +nil+ if no + # such TimeZone instance exists. (This exists to support the use of + # this class with the +composed_of+ macro.) + def new(name) + self[name] + end + + # Returns an array of all TimeZone objects. There are multiple + # TimeZone objects per time zone, in many cases, to make it easier + # for users to find their own time zone. + def all + @zones ||= zones_map.values.sort + end + + # Locate a specific time zone object. If the argument is a string, it + # is interpreted to mean the name of the timezone to locate. If it is a + # numeric value it is either the hour offset, or the second offset, of the + # timezone to find. (The first one with that offset will be returned.) + # Returns +nil+ if no such time zone is known to the system. + def [](arg) + case arg + when self + arg + when String + begin + @lazy_zones_map[arg] ||= create(arg) + rescue TZInfo::InvalidTimezoneIdentifier + nil + end + when TZInfo::Timezone + @lazy_zones_map[arg.name] ||= create(arg.name, nil, arg) + when Numeric, ActiveSupport::Duration + arg *= 3600 if arg.abs <= 13 + all.find { |z| z.utc_offset == arg.to_i } + else + raise ArgumentError, "invalid argument to TimeZone[]: #{arg.inspect}" + end + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the USA. + def us_zones + country_zones(:us) + end + + # A convenience method for returning a collection of TimeZone objects + # for time zones in the country specified by its ISO 3166-1 Alpha2 code. + def country_zones(country_code) + code = country_code.to_s.upcase + @country_zones[code] ||= load_country_zones(code) + end + + def clear # :nodoc: + @lazy_zones_map = Concurrent::Map.new + @country_zones = Concurrent::Map.new + @zones = nil + @zones_map = nil + end + + private + def load_country_zones(code) + country = TZInfo::Country.get(code) + country.zone_identifiers.flat_map do |tz_id| + if MAPPING.value?(tz_id) + MAPPING.inject([]) do |memo, (key, value)| + memo << self[key] if value == tz_id + memo + end + else + create(tz_id, nil, TZInfo::Timezone.get(tz_id)) + end + end.sort! + end + + def zones_map + @zones_map ||= MAPPING.each_with_object({}) do |(name, _), zones| + timezone = self[name] + zones[name] = timezone if timezone + end + end + end + + include Comparable + attr_reader :name + attr_reader :tzinfo + + # Create a new TimeZone object with the given name and offset. The + # offset is the number of seconds that this time zone is offset from UTC + # (GMT). Seconds were chosen as the offset unit because that is the unit + # that Ruby uses to represent time zone offsets (see Time#utc_offset). + def initialize(name, utc_offset = nil, tzinfo = nil) + @name = name + @utc_offset = utc_offset + @tzinfo = tzinfo || TimeZone.find_tzinfo(name) + end + + # Returns the offset of this time zone from UTC in seconds. + def utc_offset + @utc_offset || tzinfo&.current_period&.base_utc_offset + end + + # Returns a formatted string of the offset from UTC, or an alternative + # string if the time zone is already UTC. + # + # zone = ActiveSupport::TimeZone['Central Time (US & Canada)'] + # zone.formatted_offset # => "-06:00" + # zone.formatted_offset(false) # => "-0600" + def formatted_offset(colon = true, alternate_utc_string = nil) + utc_offset == 0 && alternate_utc_string || self.class.seconds_to_utc_offset(utc_offset, colon) + end + + # Compare this time zone to the parameter. The two are compared first on + # their offsets, and then by name. + def <=>(zone) + return unless zone.respond_to? :utc_offset + result = (utc_offset <=> zone.utc_offset) + result = (name <=> zone.name) if result == 0 + result + end + + # Compare #name and TZInfo identifier to a supplied regexp, returning +true+ + # if a match is found. + def =~(re) + re === name || re === MAPPING[name] + end + + # Compare #name and TZInfo identifier to a supplied regexp, returning +true+ + # if a match is found. + def match?(re) + (re == name) || (re == MAPPING[name]) || + ((Regexp === re) && (re.match?(name) || re.match?(MAPPING[name]))) + end + + # Returns a textual representation of this time zone. + def to_s + "(GMT#{formatted_offset}) #{name}" + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from given values. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.local(2007, 2, 1, 15, 30, 45) # => Thu, 01 Feb 2007 15:30:45 HST -10:00 + def local(*args) + time = Time.utc(*args) + ActiveSupport::TimeWithZone.new(nil, self, time) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from number of seconds since the Unix epoch. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.utc(2000).to_f # => 946684800.0 + # Time.zone.at(946684800.0) # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # A second argument can be supplied to specify sub-second precision. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.at(946684800, 123456.789).nsec # => 123456789 + def at(*args) + Time.at(*args).utc.in_time_zone(self) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an ISO 8601 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31T14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time components are missing then they will be set to zero. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.iso8601('1999-12-31') # => Fri, 31 Dec 1999 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ will be raised unlike +parse+ + # which usually returns +nil+ when given an invalid date string. + def iso8601(str) + # Historically `Date._iso8601(nil)` returns `{}`, but in the `date` gem versions `3.2.1`, `3.1.2`, `3.0.2`, + # and `2.0.1`, `Date._iso8601(nil)` raises `TypeError` https://github.com/ruby/date/issues/39 + # Future `date` releases are expected to revert back to the original behavior. + raise ArgumentError, "invalid date" if str.nil? + + parts = Date._iso8601(str) + + year = parts.fetch(:year) + + if parts.key?(:yday) + ordinal_date = Date.ordinal(year, parts.fetch(:yday)) + month = ordinal_date.month + day = ordinal_date.day + else + month = parts.fetch(:mon) + day = parts.fetch(:mday) + end + + time = Time.new( + year, + month, + day, + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + + if parts[:offset] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + + rescue Date::Error, KeyError + raise ArgumentError, "invalid date" + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from parsed string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.parse('1999-12-31 14:00:00') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.parse('22:30:00') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.parse('Mar 2000') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + # + # If the string is invalid then an +ArgumentError+ could be raised. + def parse(str, now = now()) + parts_to_time(Date._parse(str, false), now) + end + + # Method for creating new ActiveSupport::TimeWithZone instance in time zone + # of +self+ from an RFC 3339 string. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('2000-01-01T00:00:00Z') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If the time or zone components are missing then an +ArgumentError+ will + # be raised. This is much stricter than either +parse+ or +iso8601+ which + # allow for missing components. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.rfc3339('1999-12-31') # => ArgumentError: invalid date + def rfc3339(str) + parts = Date._rfc3339(str) + + raise ArgumentError, "invalid date" if parts.empty? + + time = Time.new( + parts.fetch(:year), + parts.fetch(:mon), + parts.fetch(:mday), + parts.fetch(:hour), + parts.fetch(:min), + parts.fetch(:sec) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset) + ) + + TimeWithZone.new(time.utc, self) + end + + # Parses +str+ according to +format+ and returns an ActiveSupport::TimeWithZone. + # + # Assumes that +str+ is a time in the time zone +self+, + # unless +format+ includes an explicit time zone. + # (This is the same behavior as +parse+.) + # In either case, the returned TimeWithZone has the timezone of +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.strptime('1999-12-31 14:00:00', '%Y-%m-%d %H:%M:%S') # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # + # If upper components are missing from the string, they are supplied from + # TimeZone#now: + # + # Time.zone.now # => Fri, 31 Dec 1999 14:00:00 HST -10:00 + # Time.zone.strptime('22:30:00', '%H:%M:%S') # => Fri, 31 Dec 1999 22:30:00 HST -10:00 + # + # However, if the date component is not provided, but any other upper + # components are supplied, then the day of the month defaults to 1: + # + # Time.zone.strptime('Mar 2000', '%b %Y') # => Wed, 01 Mar 2000 00:00:00 HST -10:00 + def strptime(str, format, now = now()) + parts_to_time(DateTime._strptime(str, format), now) + end + + # Returns an ActiveSupport::TimeWithZone instance representing the current + # time in the time zone represented by +self+. + # + # Time.zone = 'Hawaii' # => "Hawaii" + # Time.zone.now # => Wed, 23 Jan 2008 20:24:27 HST -10:00 + def now + time_now.utc.in_time_zone(self) + end + + # Returns the current date in this time zone. + def today + tzinfo.now.to_date + end + + # Returns the next date in this time zone. + def tomorrow + today + 1 + end + + # Returns the previous date in this time zone. + def yesterday + today - 1 + end + + # Adjust the given time to the simultaneous time in the time zone + # represented by +self+. Returns a local time with the appropriate offset + # -- if you want an ActiveSupport::TimeWithZone instance, use + # Time#in_time_zone() instead. + # + # As of tzinfo 2, utc_to_local returns a Time with a non-zero utc_offset. + # See the +utc_to_local_returns_utc_offset_times+ config for more info. + def utc_to_local(time) + tzinfo.utc_to_local(time).yield_self do |t| + ActiveSupport.utc_to_local_returns_utc_offset_times ? + t : Time.utc(t.year, t.month, t.day, t.hour, t.min, t.sec, t.sec_fraction * 1_000_000) + end + end + + # Adjust the given time to the simultaneous time in UTC. Returns a + # Time.utc() instance. + def local_to_utc(time, dst = true) + tzinfo.local_to_utc(time, dst) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_utc(time) + tzinfo.period_for_utc(time) + end + + # Available so that TimeZone instances respond like TZInfo::Timezone + # instances. + def period_for_local(time, dst = true) + tzinfo.period_for_local(time, dst) { |periods| periods.last } + end + + def periods_for_local(time) # :nodoc: + tzinfo.periods_for_local(time) + end + + def init_with(coder) # :nodoc: + initialize(coder["name"]) + end + + def encode_with(coder) # :nodoc: + coder.tag = "!ruby/object:#{self.class}" + coder.map = { "name" => tzinfo.name } + end + + private + def parts_to_time(parts, now) + raise ArgumentError, "invalid date" if parts.nil? + return if parts.empty? + + if parts[:seconds] + time = Time.at(parts[:seconds]) + else + time = Time.new( + parts.fetch(:year, now.year), + parts.fetch(:mon, now.month), + parts.fetch(:mday, parts[:year] || parts[:mon] ? 1 : now.day), + parts.fetch(:hour, 0), + parts.fetch(:min, 0), + parts.fetch(:sec, 0) + parts.fetch(:sec_fraction, 0), + parts.fetch(:offset, 0) + ) + end + + if parts[:offset] || parts[:seconds] + TimeWithZone.new(time.utc, self) + else + TimeWithZone.new(nil, self, time) + end + end + + def time_now + Time.now + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/version.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/version.rb new file mode 100644 index 0000000..c3a0728 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/version.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require_relative "gem_version" + +module ActiveSupport + # Returns the currently loaded version of Active Support as a Gem::Version. + def self.version + gem_version + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini.rb new file mode 100644 index 0000000..dd6bee0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini.rb @@ -0,0 +1,202 @@ +# frozen_string_literal: true + +require "time" +require "base64" +require "bigdecimal" +require "bigdecimal/util" +require "active_support/core_ext/module/delegation" +require "active_support/core_ext/string/inflections" +require "active_support/core_ext/date_time/calculations" + +module ActiveSupport + # = XmlMini + # + # To use the much faster libxml parser: + # gem 'libxml-ruby', '=0.9.7' + # XmlMini.backend = 'LibXML' + module XmlMini + extend self + + # This module decorates files deserialized using Hash.from_xml with + # the original_filename and content_type methods. + module FileLike # :nodoc: + attr_writer :original_filename, :content_type + + def original_filename + @original_filename || "untitled" + end + + def content_type + @content_type || "application/octet-stream" + end + end + + DEFAULT_ENCODINGS = { + "binary" => "base64" + } unless defined?(DEFAULT_ENCODINGS) + + unless defined?(TYPE_NAMES) + TYPE_NAMES = { + "Symbol" => "symbol", + "Integer" => "integer", + "BigDecimal" => "decimal", + "Float" => "float", + "TrueClass" => "boolean", + "FalseClass" => "boolean", + "Date" => "date", + "DateTime" => "dateTime", + "Time" => "dateTime", + "Array" => "array", + "Hash" => "hash" + } + end + TYPE_NAMES["ActiveSupport::TimeWithZone"] = TYPE_NAMES["Time"] + + FORMATTING = { + "symbol" => Proc.new { |symbol| symbol.to_s }, + "date" => Proc.new { |date| date.to_fs(:db) }, + "dateTime" => Proc.new { |time| time.xmlschema }, + "binary" => Proc.new { |binary| ::Base64.encode64(binary) }, + "yaml" => Proc.new { |yaml| yaml.to_yaml } + } unless defined?(FORMATTING) + + # TODO use regexp instead of Date.parse + unless defined?(PARSING) + PARSING = { + "symbol" => Proc.new { |symbol| symbol.to_s.to_sym }, + "date" => Proc.new { |date| ::Date.parse(date) }, + "datetime" => Proc.new { |time| Time.xmlschema(time).utc rescue ::DateTime.parse(time).utc }, + "integer" => Proc.new { |integer| integer.to_i }, + "float" => Proc.new { |float| float.to_f }, + "decimal" => Proc.new do |number| + if String === number + number.to_d + else + BigDecimal(number) + end + end, + "boolean" => Proc.new { |boolean| %w(1 true).include?(boolean.to_s.strip) }, + "string" => Proc.new { |string| string.to_s }, + "yaml" => Proc.new { |yaml| YAML.load(yaml) rescue yaml }, + "base64Binary" => Proc.new { |bin| ::Base64.decode64(bin) }, + "binary" => Proc.new { |bin, entity| _parse_binary(bin, entity) }, + "file" => Proc.new { |file, entity| _parse_file(file, entity) } + } + + PARSING.update( + "double" => PARSING["float"], + "dateTime" => PARSING["datetime"] + ) + end + + attr_accessor :depth + self.depth = 100 + + delegate :parse, to: :backend + + def backend + current_thread_backend || @backend + end + + def backend=(name) + backend = name && cast_backend_name_to_module(name) + self.current_thread_backend = backend if current_thread_backend + @backend = backend + end + + def with_backend(name) + old_backend = current_thread_backend + self.current_thread_backend = name && cast_backend_name_to_module(name) + yield + ensure + self.current_thread_backend = old_backend + end + + def to_tag(key, value, options) + type_name = options.delete(:type) + merged_options = options.merge(root: key, skip_instruct: true) + + if value.is_a?(::Method) || value.is_a?(::Proc) + if value.arity == 1 + value.call(merged_options) + else + value.call(merged_options, key.to_s.singularize) + end + elsif value.respond_to?(:to_xml) + value.to_xml(merged_options) + else + type_name ||= TYPE_NAMES[value.class.name] + type_name ||= value.class.name if value && !value.respond_to?(:to_str) + type_name = type_name.to_s if type_name + type_name = "dateTime" if type_name == "datetime" + + key = rename_key(key.to_s, options) + + attributes = options[:skip_types] || type_name.nil? ? {} : { type: type_name } + attributes[:nil] = true if value.nil? + + encoding = options[:encoding] || DEFAULT_ENCODINGS[type_name] + attributes[:encoding] = encoding if encoding + + formatted_value = FORMATTING[type_name] && !value.nil? ? + FORMATTING[type_name].call(value) : value + + options[:builder].tag!(key, formatted_value, attributes) + end + end + + def rename_key(key, options = {}) + camelize = options[:camelize] + dasherize = !options.has_key?(:dasherize) || options[:dasherize] + if camelize + key = true == camelize ? key.camelize : key.camelize(camelize) + end + key = _dasherize(key) if dasherize + key + end + + private + def _dasherize(key) + # $2 must be a non-greedy regex for this to work + left, middle, right = /\A(_*)(.*?)(_*)\Z/.match(key.strip)[1, 3] + "#{left}#{middle.tr('_ ', '--')}#{right}" + end + + # TODO: Add support for other encodings + def _parse_binary(bin, entity) + case entity["encoding"] + when "base64" + ::Base64.decode64(bin) + else + bin + end + end + + def _parse_file(file, entity) + f = StringIO.new(::Base64.decode64(file)) + f.extend(FileLike) + f.original_filename = entity["name"] + f.content_type = entity["content_type"] + f + end + + def current_thread_backend + IsolatedExecutionState[:xml_mini_backend] + end + + def current_thread_backend=(name) + IsolatedExecutionState[:xml_mini_backend] = name && cast_backend_name_to_module(name) + end + + def cast_backend_name_to_module(name) + if name.is_a?(Module) + name + else + require "active_support/xml_mini/#{name.downcase}" + ActiveSupport.const_get("XmlMini_#{name}") + end + end + end + + XmlMini.backend = "REXML" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/jdom.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/jdom.rb new file mode 100644 index 0000000..b5aa909 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/jdom.rb @@ -0,0 +1,182 @@ +# frozen_string_literal: true + +raise "JRuby is required to use the JDOM backend for XmlMini" unless RUBY_PLATFORM.include?("java") + +require "jruby" +include Java + +require "active_support/core_ext/object/blank" + +java_import javax.xml.parsers.DocumentBuilder unless defined? DocumentBuilder +java_import javax.xml.parsers.DocumentBuilderFactory unless defined? DocumentBuilderFactory +java_import java.io.StringReader unless defined? StringReader +java_import org.xml.sax.InputSource unless defined? InputSource +java_import org.xml.sax.Attributes unless defined? Attributes +java_import org.w3c.dom.Node unless defined? Node + +module ActiveSupport + module XmlMini_JDOM # :nodoc: + extend self + + CONTENT_KEY = "__content__" + + NODE_TYPE_NAMES = %w{ATTRIBUTE_NODE CDATA_SECTION_NODE COMMENT_NODE DOCUMENT_FRAGMENT_NODE + DOCUMENT_NODE DOCUMENT_TYPE_NODE ELEMENT_NODE ENTITY_NODE ENTITY_REFERENCE_NODE NOTATION_NODE + PROCESSING_INSTRUCTION_NODE TEXT_NODE} + + node_type_map = {} + NODE_TYPE_NAMES.each { |type| node_type_map[Node.send(type)] = type } + + # Parse an XML Document string or IO into a simple hash using Java's jdom. + # data:: + # XML Document string or IO to parse + def parse(data) + if data.respond_to?(:read) + data = data.read + end + + if data.blank? + {} + else + @dbf = DocumentBuilderFactory.new_instance + # secure processing of java xml + # https://archive.is/9xcQQ + @dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) + @dbf.setFeature("http://xml.org/sax/features/external-general-entities", false) + @dbf.setFeature("http://xml.org/sax/features/external-parameter-entities", false) + @dbf.setFeature(javax.xml.XMLConstants::FEATURE_SECURE_PROCESSING, true) + xml_string_reader = StringReader.new(data) + xml_input_source = InputSource.new(xml_string_reader) + doc = @dbf.new_document_builder.parse(xml_input_source) + merge_element!({ CONTENT_KEY => "" }, doc.document_element, XmlMini.depth) + end + end + + private + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise "Document too deep!" if depth == 0 + delete_empty(hash) + merge!(hash, element.tag_name, collapse(element, depth)) + end + + def delete_empty(hash) + hash.delete(CONTENT_KEY) if hash[CONTENT_KEY] == "" + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + child_nodes = element.child_nodes + if child_nodes.length > 0 + (0...child_nodes.length).each do |i| + child = child_nodes.item(i) + merge_element!(hash, child, depth - 1) unless child.node_type == Node.TEXT_NODE + end + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + delete_empty(hash) + text_children = texts(element) + if text_children.join.empty? + hash + else + # must use value to prevent double-escaping + merge!(hash, CONTENT_KEY, text_children.join) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attribute_hash = {} + attributes = element.attributes + (0...attributes.length).each do |i| + attribute_hash[CONTENT_KEY] ||= "" + attribute_hash[attributes.item(i).name] = attributes.item(i).value + end + attribute_hash + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def texts(element) + texts = [] + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + texts << item.get_data + end + end + texts + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + text = +"" + child_nodes = element.child_nodes + (0...child_nodes.length).each do |i| + item = child_nodes.item(i) + if item.node_type == Node.TEXT_NODE + text << item.get_data.strip + end + end + text.strip.length == 0 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/libxml.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/libxml.rb new file mode 100644 index 0000000..65976a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/libxml.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXML # :nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Parser.io(data).parse.to_hash + end + end + end +end + +module LibXML # :nodoc: + module Conversions # :nodoc: + module Document # :nodoc: + def to_hash + root.to_hash + end + end + + module Node # :nodoc: + CONTENT_ROOT = "__content__" + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + each_child do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= +"" + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + each_attr { |a| node_hash[a.name] = a.value } + + hash + end + end + end +end + +# :enddoc: + +LibXML::XML::Document.include(LibXML::Conversions::Document) +LibXML::XML::Node.include(LibXML::Conversions::Node) diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/libxmlsax.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/libxmlsax.rb new file mode 100644 index 0000000..33d594c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/libxmlsax.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +require "libxml" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_LibXMLSAX # :nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder + include LibXML::XML::SaxParser::Callbacks + + CONTENT_KEY = "__content__" + HASH_SIZE_KEY = "__hash_size__" + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def on_start_document + @hash = { CONTENT_KEY => +"" } + @hash_stack = [@hash] + end + + def on_end_document + @hash = @hash_stack.pop + @hash.delete(CONTENT_KEY) + end + + def on_start_element(name, attrs = {}) + new_hash = { CONTENT_KEY => +"" }.merge!(attrs) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def on_end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def on_characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :on_cdata_block, :on_characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + LibXML::XML::Error.set_handler(&LibXML::XML::Error::QUIET_HANDLER) + parser = LibXML::XML::SaxParser.io(data) + document = document_class.new + + parser.callbacks = document + parser.parse + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/nokogiri.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/nokogiri.rb new file mode 100644 index 0000000..3fb58bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/nokogiri.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_Nokogiri # :nodoc: + extend self + + # Parse an XML Document string or IO into a simple hash using libxml / nokogiri. + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + doc = Nokogiri::XML(data) + raise doc.errors.first if doc.errors.length > 0 + doc.to_hash + end + end + + module Conversions # :nodoc: + module Document # :nodoc: + def to_hash + root.to_hash + end + end + + module Node # :nodoc: + CONTENT_ROOT = "__content__" + + # Convert XML document to hash. + # + # hash:: + # Hash to merge the converted element into. + def to_hash(hash = {}) + node_hash = {} + + # Insert node hash into parent hash correctly. + case hash[name] + when Array then hash[name] << node_hash + when Hash then hash[name] = [hash[name], node_hash] + when nil then hash[name] = node_hash + end + + # Handle child elements + children.each do |c| + if c.element? + c.to_hash(node_hash) + elsif c.text? || c.cdata? + node_hash[CONTENT_ROOT] ||= +"" + node_hash[CONTENT_ROOT] << c.content + end + end + + # Remove content node if it is blank and there are child tags + if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank? + node_hash.delete(CONTENT_ROOT) + end + + # Handle attributes + attribute_nodes.each { |a| node_hash[a.node_name] = a.value } + + hash + end + end + end + + Nokogiri::XML::Document.include(Conversions::Document) + Nokogiri::XML::Node.include(Conversions::Node) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/nokogirisax.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/nokogirisax.rb new file mode 100644 index 0000000..f3ba109 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/nokogirisax.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +begin + require "nokogiri" +rescue LoadError => e + $stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install" + raise e +end +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_NokogiriSAX # :nodoc: + extend self + + # Class that will build the hash while the XML document + # is being parsed using SAX events. + class HashBuilder < Nokogiri::XML::SAX::Document + CONTENT_KEY = "__content__" + HASH_SIZE_KEY = "__hash_size__" + + attr_reader :hash + + def current_hash + @hash_stack.last + end + + def start_document + @hash = {} + @hash_stack = [@hash] + end + + def end_document + raise "Parse stack not empty!" if @hash_stack.size > 1 + end + + def error(error_message) + raise error_message + end + + def start_element(name, attrs = []) + new_hash = { CONTENT_KEY => +"" }.merge!(Hash[attrs]) + new_hash[HASH_SIZE_KEY] = new_hash.size + 1 + + case current_hash[name] + when Array then current_hash[name] << new_hash + when Hash then current_hash[name] = [current_hash[name], new_hash] + when nil then current_hash[name] = new_hash + end + + @hash_stack.push(new_hash) + end + + def end_element(name) + if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == "" + current_hash.delete(CONTENT_KEY) + end + @hash_stack.pop + end + + def characters(string) + current_hash[CONTENT_KEY] << string + end + + alias_method :cdata_block, :characters + end + + attr_accessor :document_class + self.document_class = HashBuilder + + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + document = document_class.new + parser = Nokogiri::XML::SAX::Parser.new(document) + parser.parse(data) + document.hash + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/rexml.rb b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/rexml.rb new file mode 100644 index 0000000..e4e1a1f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/activesupport-7.0.4.3/lib/active_support/xml_mini/rexml.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +require "active_support/core_ext/kernel/reporting" +require "active_support/core_ext/object/blank" +require "stringio" + +module ActiveSupport + module XmlMini_REXML # :nodoc: + extend self + + CONTENT_KEY = "__content__" + + # Parse an XML Document string or IO into a simple hash. + # + # Same as XmlSimple::xml_in but doesn't shoot itself in the foot, + # and uses the defaults from Active Support. + # + # data:: + # XML Document string or IO to parse + def parse(data) + if !data.respond_to?(:read) + data = StringIO.new(data || "") + end + + if data.eof? + {} + else + require_rexml unless defined?(REXML::Document) + doc = REXML::Document.new(data) + + if doc.root + merge_element!({}, doc.root, XmlMini.depth) + else + raise REXML::ParseException, + "The document #{doc.to_s.inspect} does not have a valid root" + end + end + end + + private + def require_rexml + silence_warnings { require "rexml/document" } + rescue LoadError => e + $stderr.puts "You don't have rexml installed in your application. Please add it to your Gemfile and run bundle install" + raise e + end + + # Convert an XML element and merge into the hash + # + # hash:: + # Hash to merge the converted element into. + # element:: + # XML element to merge into hash + def merge_element!(hash, element, depth) + raise REXML::ParseException, "The document is too deep" if depth == 0 + merge!(hash, element.name, collapse(element, depth)) + end + + # Actually converts an XML document element into a data structure. + # + # element:: + # The document element to be collapsed. + def collapse(element, depth) + hash = get_attributes(element) + + if element.has_elements? + element.each_element { |child| merge_element!(hash, child, depth - 1) } + merge_texts!(hash, element) unless empty_content?(element) + hash + else + merge_texts!(hash, element) + end + end + + # Merge all the texts of an element into the hash + # + # hash:: + # Hash to add the converted element to. + # element:: + # XML element whose texts are to me merged into the hash + def merge_texts!(hash, element) + unless element.has_text? + hash + else + # must use value to prevent double-escaping + texts = +"" + element.texts.each { |t| texts << t.value } + merge!(hash, CONTENT_KEY, texts) + end + end + + # Adds a new key/value pair to an existing Hash. If the key to be added + # already exists and the existing value associated with key is not + # an Array, it will be wrapped in an Array. Then the new value is + # appended to that Array. + # + # hash:: + # Hash to add key/value pair to. + # key:: + # Key to be added. + # value:: + # Value to be associated with key. + def merge!(hash, key, value) + if hash.has_key?(key) + if hash[key].instance_of?(Array) + hash[key] << value + else + hash[key] = [hash[key], value] + end + elsif value.instance_of?(Array) + hash[key] = [value] + else + hash[key] = value + end + hash + end + + # Converts the attributes array of an XML element into a hash. + # Returns an empty Hash if node has no attributes. + # + # element:: + # XML element to extract attributes from. + def get_attributes(element) + attributes = {} + element.attributes.each { |n, v| attributes[n] = v } + attributes + end + + # Determines if a document element has text content + # + # element:: + # XML element to be checked. + def empty_content?(element) + element.texts.join.blank? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/CHANGELOG.md new file mode 100644 index 0000000..a5c19e0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/CHANGELOG.md @@ -0,0 +1,282 @@ +# Addressable 2.8.4 +- Restore `Addressable::IDNA.unicode_normalize_kc` as a deprecated method ([#504]) + +[#504]: https://github.com/sporkmonger/addressable/pull/504 + +# Addressable 2.8.3 +- Fix template expand level 2 hash support for non-string objects ([#499], [#498]) + +[#499]: https://github.com/sporkmonger/addressable/pull/499 +[#498]: https://github.com/sporkmonger/addressable/pull/498 + +# Addressable 2.8.2 +- Improve cache hits and JIT friendliness ([#486](https://github.com/sporkmonger/addressable/pull/486)) +- Improve code style and test coverage ([#482](https://github.com/sporkmonger/addressable/pull/482)) +- Ensure reset of deferred validation ([#481](https://github.com/sporkmonger/addressable/pull/481)) +- Resolve normalization differences between `IDNA::Native` and `IDNA::Pure` ([#408](https://github.com/sporkmonger/addressable/issues/408), [#492]) +- Remove redundant colon in `Addressable::URI::CharacterClasses::AUTHORITY` regex ([#438](https://github.com/sporkmonger/addressable/pull/438)) (accidentally reverted by [#449] merge but [added back](https://github.com/sporkmonger/addressable/pull/492#discussion_r1105125280) in [#492]) + +[#492]: https://github.com/sporkmonger/addressable/pull/492 + +# Addressable 2.8.1 +- refactor `Addressable::URI.normalize_path` to address linter offenses ([#430](https://github.com/sporkmonger/addressable/pull/430)) +- update gemspec to reflect supported Ruby versions ([#466], [#464], [#463]) +- compatibility w/ public_suffix 5.x ([#466], [#465], [#460]) +- fixes "invalid byte sequence in UTF-8" exception when unencoding URLs containing non UTF-8 characters ([#459](https://github.com/sporkmonger/addressable/pull/459)) +- `Ractor` compatibility ([#449]) +- use the whole string instead of a single line for template match ([#431](https://github.com/sporkmonger/addressable/pull/431)) +- force UTF-8 encoding only if needed ([#341](https://github.com/sporkmonger/addressable/pull/341)) + +[#449]: https://github.com/sporkmonger/addressable/pull/449 +[#460]: https://github.com/sporkmonger/addressable/pull/460 +[#463]: https://github.com/sporkmonger/addressable/pull/463 +[#464]: https://github.com/sporkmonger/addressable/pull/464 +[#465]: https://github.com/sporkmonger/addressable/pull/465 +[#466]: https://github.com/sporkmonger/addressable/pull/466 + +# Addressable 2.8.0 +- fixes ReDoS vulnerability in Addressable::Template#match +- no longer replaces `+` with spaces in queries for non-http(s) schemes +- fixed encoding ipv6 literals +- the `:compacted` flag for `normalized_query` now dedupes parameters +- fix broken `escape_component` alias +- dropping support for Ruby 2.0 and 2.1 +- adding Ruby 3.0 compatibility for development tasks +- drop support for `rack-mount` and remove Addressable::Template#generate +- performance improvements +- switch CI/CD to GitHub Actions + +# Addressable 2.7.0 +- added `:compacted` flag to `normalized_query` +- `heuristic_parse` handles `mailto:` more intuitively +- dropped explicit support for JRuby 9.0.5.0 +- compatibility w/ public_suffix 4.x +- performance improvements + +# Addressable 2.6.0 +- added `tld=` method to allow assignment to the public suffix +- most `heuristic_parse` patterns are now case-insensitive +- `heuristic_parse` handles more `file://` URI variations +- fixes bug in `heuristic_parse` when uri starts with digit +- fixes bug in `request_uri=` with query strings +- fixes template issues with `nil` and `?` operator +- `frozen_string_literal` pragmas added +- minor performance improvements in regexps +- fixes to eliminate warnings + +# Addressable 2.5.2 +- better support for frozen string literals +- fixed bug w/ uppercase characters in scheme +- IDNA errors w/ emoji URLs +- compatibility w/ public_suffix 3.x + +# Addressable 2.5.1 +- allow unicode normalization to be disabled for URI Template expansion +- removed duplicate test + +# Addressable 2.5.0 +- dropping support for Ruby 1.9 +- adding support for Ruby 2.4 preview +- add support for public suffixes and tld; first runtime dependency +- hostname escaping should match RFC; underscores in hostnames no longer escaped +- paths beginning with // and missing an authority are now considered invalid +- validation now also takes place after setting a path +- handle backslashes in authority more like a browser for `heuristic_parse` +- unescaped backslashes in host now raise an `InvalidURIError` +- `merge!`, `join!`, `omit!` and `normalize!` don't disable deferred validation +- `heuristic_parse` now trims whitespace before parsing +- host parts longer than 63 bytes will be ignored and not passed to libidn +- normalized values always encoded as UTF-8 + +# Addressable 2.4.0 +- support for 1.8.x dropped +- double quotes in a host now raises an error +- newlines in host will no longer get unescaped during normalization +- stricter handling of bogus scheme values +- stricter handling of encoded port values +- calling `require 'addressable'` will now load both the URI and Template files +- assigning to the `hostname` component with an `IPAddr` object is now supported +- assigning to the `origin` component is now supported +- fixed minor bug where an exception would be thrown for a missing ACE suffix +- better partial expansion of URI templates + +# Addressable 2.3.8 +- fix warnings +- update dependency gems +- support for 1.8.x officially deprecated + +# Addressable 2.3.7 +- fix scenario in which invalid URIs don't get an exception until inspected +- handle hostnames with two adjacent periods correctly +- upgrade of RSpec + +# Addressable 2.3.6 +- normalization drops empty query string +- better handling in template extract for missing values +- template modifier for `'?'` now treated as optional +- fixed issue where character class parameters were modified +- templates can now be tested for equality +- added `:sorted` option to normalization of query strings +- fixed issue with normalization of hosts given in `'example.com.'` form + +# Addressable 2.3.5 +- added Addressable::URI#empty? method +- Addressable::URI#hostname methods now strip square brackets from IPv6 hosts +- compatibility with Net::HTTP in Ruby 2.0.0 +- Addressable::URI#route_from should always give relative URIs + +# Addressable 2.3.4 +- fixed issue with encoding altering its inputs +- query string normalization now leaves ';' characters alone +- FakeFS is detected before attempting to load unicode tables +- additional testing to ensure frozen objects don't cause problems + +# Addressable 2.3.3 +- fixed issue with converting common primitives during template expansion +- fixed port encoding issue +- removed a few warnings +- normalize should now ignore %2B in query strings +- the IDNA logic should now be handled by libidn in Ruby 1.9 +- no template match should now result in nil instead of an empty MatchData +- added license information to gemspec + +# Addressable 2.3.2 +- added Addressable::URI#default_port method +- fixed issue with Marshalling Unicode data on Windows +- improved heuristic parsing to better handle IPv4 addresses + +# Addressable 2.3.1 +- fixed missing unicode data file + +# Addressable 2.3.0 +- updated Addressable::Template to use RFC 6570, level 4 +- fixed compatibility problems with some versions of Ruby +- moved unicode tables into a data file for performance reasons +- removing support for multiple query value notations + +# Addressable 2.2.8 +- fixed issues with dot segment removal code +- form encoding can now handle multiple values per key +- updated development environment + +# Addressable 2.2.7 +- fixed issues related to Addressable::URI#query_values= +- the Addressable::URI.parse method is now polymorphic + +# Addressable 2.2.6 +- changed the way ambiguous paths are handled +- fixed bug with frozen URIs +- https supported in heuristic parsing + +# Addressable 2.2.5 +- 'parsing' a pre-parsed URI object is now a dup operation +- introduced conditional support for libidn +- fixed normalization issue on ampersands in query strings +- added additional tests around handling of query strings + +# Addressable 2.2.4 +- added origin support from draft-ietf-websec-origin-00 +- resolved issue with attempting to navigate below root +- fixed bug with string splitting in query strings + +# Addressable 2.2.3 +- added :flat_array notation for query strings + +# Addressable 2.2.2 +- fixed issue with percent escaping of '+' character in query strings + +# Addressable 2.2.1 +- added support for application/x-www-form-urlencoded. + +# Addressable 2.2.0 +- added site methods +- improved documentation + +# Addressable 2.1.2 +- added HTTP request URI methods +- better handling of Windows file paths +- validation_deferred boolean replaced with defer_validation block +- normalization of percent-encoded paths should now be correct +- fixed issue with constructing URIs with relative paths +- fixed warnings + +# Addressable 2.1.1 +- more type checking changes +- fixed issue with unicode normalization +- added method to find template defaults +- symbolic keys are now allowed in template mappings +- numeric values and symbolic values are now allowed in template mappings + +# Addressable 2.1.0 +- refactored URI template support out into its own class +- removed extract method due to being useless and unreliable +- removed Addressable::URI.expand_template +- removed Addressable::URI#extract_mapping +- added partial template expansion +- fixed minor bugs in the parse and heuristic_parse methods +- fixed incompatibility with Ruby 1.9.1 +- fixed bottleneck in Addressable::URI#hash and Addressable::URI#to_s +- fixed unicode normalization exception +- updated query_values methods to better handle subscript notation +- worked around issue with freezing URIs +- improved specs + +# Addressable 2.0.2 +- fixed issue with URI template expansion +- fixed issue with percent escaping characters 0-15 + +# Addressable 2.0.1 +- fixed issue with query string assignment +- fixed issue with improperly encoded components + +# Addressable 2.0.0 +- the initialize method now takes an options hash as its only parameter +- added query_values method to URI class +- completely replaced IDNA implementation with pure Ruby +- renamed Addressable::ADDRESSABLE_VERSION to Addressable::VERSION +- completely reworked the Rakefile +- changed the behavior of the port method significantly +- Addressable::URI.encode_segment, Addressable::URI.unencode_segment renamed +- documentation is now in YARD format +- more rigorous type checking +- to_str method implemented, implicit conversion to Strings now allowed +- Addressable::URI#omit method added, Addressable::URI#merge method replaced +- updated URI Template code to match v 03 of the draft spec +- added a bunch of new specifications + +# Addressable 1.0.4 +- switched to using RSpec's pending system for specs that rely on IDN +- fixed issue with creating URIs with paths that are not prefixed with '/' + +# Addressable 1.0.3 +- implemented a hash method + +# Addressable 1.0.2 +- fixed minor bug with the extract_mapping method + +# Addressable 1.0.1 +- fixed minor bug with the extract_mapping method + +# Addressable 1.0.0 +- heuristic parse method added +- parsing is slightly more strict +- replaced to_h with to_hash +- fixed routing methods +- improved specifications +- improved heckle rake task +- no surviving heckle mutations + +# Addressable 0.1.2 +- improved normalization +- fixed bug in joining algorithm +- updated specifications + +# Addressable 0.1.1 +- updated documentation +- added URI Template variable extraction + +# Addressable 0.1.0 +- initial release +- implementation based on RFC 3986, 3987 +- support for IRIs via libidn +- support for the URI Template draft spec diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/Gemfile b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/Gemfile new file mode 100644 index 0000000..0d36ffb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/Gemfile @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +gemspec + +group :test do + gem 'rspec', '~> 3.8' + gem 'rspec-its', '~> 1.3' +end + +group :coverage do + gem "coveralls", "> 0.7", require: false, platforms: :mri + gem "simplecov", require: false +end + +group :development do + gem 'launchy', '~> 2.4', '>= 2.4.3' + gem 'redcarpet', :platform => :mri_19 + gem 'yard' +end + +group :test, :development do + gem 'memory_profiler' + gem "rake", ">= 12.3.3" +end + +unless ENV["IDNA_MODE"] == "pure" + gem "idn-ruby", platform: :mri +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/LICENSE.txt new file mode 100644 index 0000000..ef51da2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/README.md b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/README.md new file mode 100644 index 0000000..9892f61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/README.md @@ -0,0 +1,121 @@ +# Addressable + +
+
Homepage
github.com/sporkmonger/addressable
+
Author
Bob Aman
+
Copyright
Copyright Š Bob Aman
+
License
Apache 2.0
+
+ +[![Gem Version](https://img.shields.io/gem/dt/addressable.svg)][gem] +[![Build Status](https://github.com/sporkmonger/addressable/workflows/CI/badge.svg)][actions] +[![Test Coverage Status](https://img.shields.io/coveralls/sporkmonger/addressable.svg)][coveralls] +[![Documentation Coverage Status](https://inch-ci.org/github/sporkmonger/addressable.svg?branch=master)][inch] + +[gem]: https://rubygems.org/gems/addressable +[actions]: https://github.com/sporkmonger/addressable/actions +[coveralls]: https://coveralls.io/r/sporkmonger/addressable +[inch]: https://inch-ci.org/github/sporkmonger/addressable + +# Description + +Addressable is an alternative implementation to the URI implementation +that is part of Ruby's standard library. It is flexible, offers heuristic +parsing, and additionally provides extensive support for IRIs and URI templates. + +Addressable closely conforms to RFC 3986, RFC 3987, and RFC 6570 (level 4). + +# Reference + +- {Addressable::URI} +- {Addressable::Template} + +# Example usage + +```ruby +require "addressable/uri" + +uri = Addressable::URI.parse("http://example.com/path/to/resource/") +uri.scheme +#=> "http" +uri.host +#=> "example.com" +uri.path +#=> "/path/to/resource/" + +uri = Addressable::URI.parse("http://www.čŠšå§†æ–¯.com/") +uri.normalize +#=> # +``` + + +# URI Templates + +For more details, see [RFC 6570](https://www.rfc-editor.org/rfc/rfc6570.txt). + + +```ruby + +require "addressable/template" + +template = Addressable::Template.new("http://example.com/{?query*}") +template.expand({ + "query" => { + 'foo' => 'bar', + 'color' => 'red' + } +}) +#=> # + +template = Addressable::Template.new("http://example.com/{?one,two,three}") +template.partial_expand({"one" => "1", "three" => 3}).pattern +#=> "http://example.com/?one=1{&two}&three=3" + +template = Addressable::Template.new( + "http://{host}{/segments*}/{?one,two,bogus}{#fragment}" +) +uri = Addressable::URI.parse( + "http://example.com/a/b/c/?one=1&two=2#foo" +) +template.extract(uri) +#=> +# { +# "host" => "example.com", +# "segments" => ["a", "b", "c"], +# "one" => "1", +# "two" => "2", +# "fragment" => "foo" +# } +``` + +# Install + +```console +$ gem install addressable +``` + +You may optionally turn on native IDN support by installing libidn and the +idn gem: + +```console +$ sudo apt-get install libidn11-dev # Debian/Ubuntu +$ brew install libidn # OS X +$ gem install idn-ruby +``` + +# Semantic Versioning + +This project uses [Semantic Versioning](https://semver.org/). You can (and should) specify your +dependency using a pessimistic version constraint covering the major and minor +values: + +```ruby +spec.add_dependency 'addressable', '~> 2.7' +``` + +If you need a specific bug fix, you can also specify minimum tiny versions +without preventing updates to the latest minor release: + +```ruby +spec.add_dependency 'addressable', '~> 2.3', '>= 2.3.7' +``` diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/Rakefile b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/Rakefile new file mode 100644 index 0000000..e19785d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/Rakefile @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require 'rubygems' +require 'rake' + +require File.join(File.dirname(__FILE__), 'lib', 'addressable', 'version') + +PKG_DISPLAY_NAME = 'Addressable' +PKG_NAME = PKG_DISPLAY_NAME.downcase +PKG_VERSION = Addressable::VERSION::STRING +PKG_FILE_NAME = "#{PKG_NAME}-#{PKG_VERSION}" + +RELEASE_NAME = "REL #{PKG_VERSION}" + +PKG_SUMMARY = "URI Implementation" +PKG_DESCRIPTION = <<-TEXT +Addressable is an alternative implementation to the URI implementation that is +part of Ruby's standard library. It is flexible, offers heuristic parsing, and +additionally provides extensive support for IRIs and URI templates. +TEXT + +PKG_FILES = FileList[ + "lib/**/*", "spec/**/*", "vendor/**/*", "data/**/*", + "tasks/**/*", + "[A-Z]*", "Rakefile" +].exclude(/pkg/).exclude(/database\.yml/). + exclude(/Gemfile\.lock/).exclude(/[_\.]git$/). + exclude(/coverage/) + +task :default => "spec" + +WINDOWS = (RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/) rescue false +SUDO = WINDOWS ? '' : ('sudo' unless ENV['SUDOLESS']) + +Dir['tasks/**/*.rake'].each { |rake| load rake } diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/addressable.gemspec b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/addressable.gemspec new file mode 100644 index 0000000..f43698c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/addressable.gemspec @@ -0,0 +1,28 @@ +# -*- encoding: utf-8 -*- +# stub: addressable 2.8.4 ruby lib + +Gem::Specification.new do |s| + s.name = "addressable".freeze + s.version = "2.8.4" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "changelog_uri" => "https://github.com/sporkmonger/addressable/blob/main/CHANGELOG.md" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Bob Aman".freeze] + s.date = "2023-04-09" + s.description = "Addressable is an alternative implementation to the URI implementation that is\npart of Ruby's standard library. It is flexible, offers heuristic parsing, and\nadditionally provides extensive support for IRIs and URI templates.\n".freeze + s.email = "bob@sporkmonger.com".freeze + s.extra_rdoc_files = ["README.md".freeze] + s.files = ["CHANGELOG.md".freeze, "Gemfile".freeze, "LICENSE.txt".freeze, "README.md".freeze, "Rakefile".freeze, "addressable.gemspec".freeze, "data/unicode.data".freeze, "lib/addressable.rb".freeze, "lib/addressable/idna.rb".freeze, "lib/addressable/idna/native.rb".freeze, "lib/addressable/idna/pure.rb".freeze, "lib/addressable/template.rb".freeze, "lib/addressable/uri.rb".freeze, "lib/addressable/version.rb".freeze, "spec/addressable/idna_spec.rb".freeze, "spec/addressable/net_http_compat_spec.rb".freeze, "spec/addressable/security_spec.rb".freeze, "spec/addressable/template_spec.rb".freeze, "spec/addressable/uri_spec.rb".freeze, "spec/spec_helper.rb".freeze, "tasks/clobber.rake".freeze, "tasks/gem.rake".freeze, "tasks/git.rake".freeze, "tasks/metrics.rake".freeze, "tasks/profile.rake".freeze, "tasks/rspec.rake".freeze, "tasks/yard.rake".freeze] + s.homepage = "https://github.com/sporkmonger/addressable".freeze + s.licenses = ["Apache-2.0".freeze] + s.rdoc_options = ["--main".freeze, "README.md".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.2".freeze) + s.rubygems_version = "3.4.10".freeze + s.summary = "URI Implementation".freeze + + s.specification_version = 4 + + s.add_runtime_dependency(%q.freeze, [">= 2.0.2", "< 6.0"]) + s.add_development_dependency(%q.freeze, [">= 1.0", "< 3.0"]) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/data/unicode.data b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/data/unicode.data new file mode 100644 index 0000000..cdfc224 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/data/unicode.data differ diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable.rb new file mode 100644 index 0000000..b4e98b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require 'addressable/uri' +require 'addressable/template' diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna.rb new file mode 100644 index 0000000..2dbd393 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +begin + require "addressable/idna/native" +rescue LoadError + # libidn or the idn gem was not available, fall back on a pure-Ruby + # implementation... + require "addressable/idna/pure" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna/native.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna/native.rb new file mode 100644 index 0000000..a718364 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna/native.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "idn" + +module Addressable + module IDNA + def self.punycode_encode(value) + IDN::Punycode.encode(value.to_s) + end + + def self.punycode_decode(value) + IDN::Punycode.decode(value.to_s) + end + + class << self + # @deprecated Use {String#unicode_normalize(:nfkc)} instead + def unicode_normalize_kc(value) + value.to_s.unicode_normalize(:nfkc) + end + + extend Gem::Deprecate + deprecate :unicode_normalize_kc, "String#unicode_normalize(:nfkc)", 2023, 4 + end + + def self.to_ascii(value) + value.to_s.split('.', -1).map do |segment| + if segment.size > 0 && segment.size < 64 + IDN::Idna.toASCII(segment, IDN::Idna::ALLOW_UNASSIGNED) + elsif segment.size >= 64 + segment + else + '' + end + end.join('.') + end + + def self.to_unicode(value) + value.to_s.split('.', -1).map do |segment| + if segment.size > 0 && segment.size < 64 + IDN::Idna.toUnicode(segment, IDN::Idna::ALLOW_UNASSIGNED) + elsif segment.size >= 64 + segment + else + '' + end + end.join('.') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna/pure.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna/pure.rb new file mode 100644 index 0000000..3d6ffba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/idna/pure.rb @@ -0,0 +1,505 @@ +# frozen_string_literal: true + +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +module Addressable + module IDNA + # This module is loosely based on idn_actionmailer by Mick Staugaard, + # the unicode library by Yoshida Masato, and the punycode implementation + # by Kazuhiro Nishiyama. Most of the code was copied verbatim, but + # some reformatting was done, and some translation from C was done. + # + # Without their code to work from as a base, we'd all still be relying + # on the presence of libidn. Which nobody ever seems to have installed. + # + # Original sources: + # http://github.com/staugaard/idn_actionmailer + # http://www.yoshidam.net/Ruby.html#unicode + # http://rubyforge.org/frs/?group_id=2550 + + + UNICODE_TABLE = File.expand_path( + File.join(File.dirname(__FILE__), '../../..', 'data/unicode.data') + ) + + ACE_PREFIX = "xn--" + + UTF8_REGEX = /\A(?: + [\x09\x0A\x0D\x20-\x7E] # ASCII + | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4nil5 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )*\z/mnx + + UTF8_REGEX_MULTIBYTE = /(?: + [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte + | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs + | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte + | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates + | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 + | [\xF1-\xF3][\x80-\xBF]{3} # planes 4nil5 + | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 + )/mnx + + # :startdoc: + + # Converts from a Unicode internationalized domain name to an ASCII + # domain name as described in RFC 3490. + def self.to_ascii(input) + input = input.to_s unless input.is_a?(String) + input = input.dup.force_encoding(Encoding::UTF_8).unicode_normalize(:nfkc) + if input.respond_to?(:force_encoding) + input.force_encoding(Encoding::ASCII_8BIT) + end + if input =~ UTF8_REGEX && input =~ UTF8_REGEX_MULTIBYTE + parts = unicode_downcase(input).split('.') + parts.map! do |part| + if part.respond_to?(:force_encoding) + part.force_encoding(Encoding::ASCII_8BIT) + end + if part =~ UTF8_REGEX && part =~ UTF8_REGEX_MULTIBYTE + ACE_PREFIX + punycode_encode(part) + else + part + end + end + parts.join('.') + else + input + end + end + + # Converts from an ASCII domain name to a Unicode internationalized + # domain name as described in RFC 3490. + def self.to_unicode(input) + input = input.to_s unless input.is_a?(String) + parts = input.split('.') + parts.map! do |part| + if part =~ /^#{ACE_PREFIX}(.+)/ + begin + punycode_decode(part[/^#{ACE_PREFIX}(.+)/, 1]) + rescue Addressable::IDNA::PunycodeBadInput + # toUnicode is explicitly defined as never-fails by the spec + part + end + else + part + end + end + output = parts.join('.') + if output.respond_to?(:force_encoding) + output.force_encoding(Encoding::UTF_8) + end + output + end + + class << self + # @deprecated Use {String#unicode_normalize(:nfkc)} instead + def unicode_normalize_kc(value) + value.to_s.unicode_normalize(:nfkc) + end + + extend Gem::Deprecate + deprecate :unicode_normalize_kc, "String#unicode_normalize(:nfkc)", 2023, 4 + end + + ## + # Unicode aware downcase method. + # + # @api private + # @param [String] input + # The input string. + # @return [String] The downcased result. + def self.unicode_downcase(input) + input = input.to_s unless input.is_a?(String) + unpacked = input.unpack("U*") + unpacked.map! { |codepoint| lookup_unicode_lowercase(codepoint) } + return unpacked.pack("U*") + end + private_class_method :unicode_downcase + + def self.lookup_unicode_lowercase(codepoint) + codepoint_data = UNICODE_DATA[codepoint] + (codepoint_data ? + (codepoint_data[UNICODE_DATA_LOWERCASE] || codepoint) : + codepoint) + end + private_class_method :lookup_unicode_lowercase + + UNICODE_DATA_COMBINING_CLASS = 0 + UNICODE_DATA_EXCLUSION = 1 + UNICODE_DATA_CANONICAL = 2 + UNICODE_DATA_COMPATIBILITY = 3 + UNICODE_DATA_UPPERCASE = 4 + UNICODE_DATA_LOWERCASE = 5 + UNICODE_DATA_TITLECASE = 6 + + begin + if defined?(FakeFS) + fakefs_state = FakeFS.activated? + FakeFS.deactivate! + end + # This is a sparse Unicode table. Codepoints without entries are + # assumed to have the value: [0, 0, nil, nil, nil, nil, nil] + UNICODE_DATA = File.open(UNICODE_TABLE, "rb") do |file| + Marshal.load(file.read) + end + ensure + if defined?(FakeFS) + FakeFS.activate! if fakefs_state + end + end + + COMPOSITION_TABLE = {} + UNICODE_DATA.each do |codepoint, data| + canonical = data[UNICODE_DATA_CANONICAL] + exclusion = data[UNICODE_DATA_EXCLUSION] + + if canonical && exclusion == 0 + COMPOSITION_TABLE[canonical.unpack("C*")] = codepoint + end + end + + UNICODE_MAX_LENGTH = 256 + ACE_MAX_LENGTH = 256 + + PUNYCODE_BASE = 36 + PUNYCODE_TMIN = 1 + PUNYCODE_TMAX = 26 + PUNYCODE_SKEW = 38 + PUNYCODE_DAMP = 700 + PUNYCODE_INITIAL_BIAS = 72 + PUNYCODE_INITIAL_N = 0x80 + PUNYCODE_DELIMITER = 0x2D + + PUNYCODE_MAXINT = 1 << 64 + + PUNYCODE_PRINT_ASCII = + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" + + "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" + + " !\"\#$%&'()*+,-./" + + "0123456789:;<=>?" + + "@ABCDEFGHIJKLMNO" + + "PQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmno" + + "pqrstuvwxyz{|}~\n" + + # Input is invalid. + class PunycodeBadInput < StandardError; end + # Output would exceed the space provided. + class PunycodeBigOutput < StandardError; end + # Input needs wider integers to process. + class PunycodeOverflow < StandardError; end + + def self.punycode_encode(unicode) + unicode = unicode.to_s unless unicode.is_a?(String) + input = unicode.unpack("U*") + output = [0] * (ACE_MAX_LENGTH + 1) + input_length = input.size + output_length = [ACE_MAX_LENGTH] + + # Initialize the state + n = PUNYCODE_INITIAL_N + delta = out = 0 + max_out = output_length[0] + bias = PUNYCODE_INITIAL_BIAS + + # Handle the basic code points: + input_length.times do |j| + if punycode_basic?(input[j]) + if max_out - out < 2 + raise PunycodeBigOutput, + "Output would exceed the space provided." + end + output[out] = input[j] + out += 1 + end + end + + h = b = out + + # h is the number of code points that have been handled, b is the + # number of basic code points, and out is the number of characters + # that have been output. + + if b > 0 + output[out] = PUNYCODE_DELIMITER + out += 1 + end + + # Main encoding loop: + + while h < input_length + # All non-basic code points < n have been + # handled already. Find the next larger one: + + m = PUNYCODE_MAXINT + input_length.times do |j| + m = input[j] if (n...m) === input[j] + end + + # Increase delta enough to advance the decoder's + # state to , but guard against overflow: + + if m - n > (PUNYCODE_MAXINT - delta) / (h + 1) + raise PunycodeOverflow, "Input needs wider integers to process." + end + delta += (m - n) * (h + 1) + n = m + + input_length.times do |j| + # Punycode does not need to check whether input[j] is basic: + if input[j] < n + delta += 1 + if delta == 0 + raise PunycodeOverflow, + "Input needs wider integers to process." + end + end + + if input[j] == n + # Represent delta as a generalized variable-length integer: + + q = delta; k = PUNYCODE_BASE + while true + if out >= max_out + raise PunycodeBigOutput, + "Output would exceed the space provided." + end + t = ( + if k <= bias + PUNYCODE_TMIN + elsif k >= bias + PUNYCODE_TMAX + PUNYCODE_TMAX + else + k - bias + end + ) + break if q < t + output[out] = + punycode_encode_digit(t + (q - t) % (PUNYCODE_BASE - t)) + out += 1 + q = (q - t) / (PUNYCODE_BASE - t) + k += PUNYCODE_BASE + end + + output[out] = punycode_encode_digit(q) + out += 1 + bias = punycode_adapt(delta, h + 1, h == b) + delta = 0 + h += 1 + end + end + + delta += 1 + n += 1 + end + + output_length[0] = out + + outlen = out + outlen.times do |j| + c = output[j] + unless c >= 0 && c <= 127 + raise StandardError, "Invalid output char." + end + unless PUNYCODE_PRINT_ASCII[c] + raise PunycodeBadInput, "Input is invalid." + end + end + + output[0..outlen].map { |x| x.chr }.join("").sub(/\0+\z/, "") + end + private_class_method :punycode_encode + + def self.punycode_decode(punycode) + input = [] + output = [] + + if ACE_MAX_LENGTH * 2 < punycode.size + raise PunycodeBigOutput, "Output would exceed the space provided." + end + punycode.each_byte do |c| + unless c >= 0 && c <= 127 + raise PunycodeBadInput, "Input is invalid." + end + input.push(c) + end + + input_length = input.length + output_length = [UNICODE_MAX_LENGTH] + + # Initialize the state + n = PUNYCODE_INITIAL_N + + out = i = 0 + max_out = output_length[0] + bias = PUNYCODE_INITIAL_BIAS + + # Handle the basic code points: Let b be the number of input code + # points before the last delimiter, or 0 if there is none, then + # copy the first b code points to the output. + + b = 0 + input_length.times do |j| + b = j if punycode_delimiter?(input[j]) + end + if b > max_out + raise PunycodeBigOutput, "Output would exceed the space provided." + end + + b.times do |j| + unless punycode_basic?(input[j]) + raise PunycodeBadInput, "Input is invalid." + end + output[out] = input[j] + out+=1 + end + + # Main decoding loop: Start just after the last delimiter if any + # basic code points were copied; start at the beginning otherwise. + + in_ = b > 0 ? b + 1 : 0 + while in_ < input_length + + # in_ is the index of the next character to be consumed, and + # out is the number of code points in the output array. + + # Decode a generalized variable-length integer into delta, + # which gets added to i. The overflow checking is easier + # if we increase i as we go, then subtract off its starting + # value at the end to obtain delta. + + oldi = i; w = 1; k = PUNYCODE_BASE + while true + if in_ >= input_length + raise PunycodeBadInput, "Input is invalid." + end + digit = punycode_decode_digit(input[in_]) + in_+=1 + if digit >= PUNYCODE_BASE + raise PunycodeBadInput, "Input is invalid." + end + if digit > (PUNYCODE_MAXINT - i) / w + raise PunycodeOverflow, "Input needs wider integers to process." + end + i += digit * w + t = ( + if k <= bias + PUNYCODE_TMIN + elsif k >= bias + PUNYCODE_TMAX + PUNYCODE_TMAX + else + k - bias + end + ) + break if digit < t + if w > PUNYCODE_MAXINT / (PUNYCODE_BASE - t) + raise PunycodeOverflow, "Input needs wider integers to process." + end + w *= PUNYCODE_BASE - t + k += PUNYCODE_BASE + end + + bias = punycode_adapt(i - oldi, out + 1, oldi == 0) + + # I was supposed to wrap around from out + 1 to 0, + # incrementing n each time, so we'll fix that now: + + if i / (out + 1) > PUNYCODE_MAXINT - n + raise PunycodeOverflow, "Input needs wider integers to process." + end + n += i / (out + 1) + i %= out + 1 + + # Insert n at position i of the output: + + # not needed for Punycode: + # raise PUNYCODE_INVALID_INPUT if decode_digit(n) <= base + if out >= max_out + raise PunycodeBigOutput, "Output would exceed the space provided." + end + + #memmove(output + i + 1, output + i, (out - i) * sizeof *output) + output[i + 1, out - i] = output[i, out - i] + output[i] = n + i += 1 + + out += 1 + end + + output_length[0] = out + + output.pack("U*") + end + private_class_method :punycode_decode + + def self.punycode_basic?(codepoint) + codepoint < 0x80 + end + private_class_method :punycode_basic? + + def self.punycode_delimiter?(codepoint) + codepoint == PUNYCODE_DELIMITER + end + private_class_method :punycode_delimiter? + + def self.punycode_encode_digit(d) + d + 22 + 75 * ((d < 26) ? 1 : 0) + end + private_class_method :punycode_encode_digit + + # Returns the numeric value of a basic codepoint + # (for use in representing integers) in the range 0 to + # base - 1, or PUNYCODE_BASE if codepoint does not represent a value. + def self.punycode_decode_digit(codepoint) + if codepoint - 48 < 10 + codepoint - 22 + elsif codepoint - 65 < 26 + codepoint - 65 + elsif codepoint - 97 < 26 + codepoint - 97 + else + PUNYCODE_BASE + end + end + private_class_method :punycode_decode_digit + + # Bias adaptation method + def self.punycode_adapt(delta, numpoints, firsttime) + delta = firsttime ? delta / PUNYCODE_DAMP : delta >> 1 + # delta >> 1 is a faster way of doing delta / 2 + delta += delta / numpoints + difference = PUNYCODE_BASE - PUNYCODE_TMIN + + k = 0 + while delta > (difference * PUNYCODE_TMAX) / 2 + delta /= difference + k += PUNYCODE_BASE + end + + k + (difference + 1) * delta / (delta + PUNYCODE_SKEW) + end + private_class_method :punycode_adapt + end + # :startdoc: +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/template.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/template.rb new file mode 100644 index 0000000..bc52041 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/template.rb @@ -0,0 +1,1029 @@ +# frozen_string_literal: true + +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "addressable/version" +require "addressable/uri" + +module Addressable + ## + # This is an implementation of a URI template based on + # RFC 6570 (http://tools.ietf.org/html/rfc6570). + class Template + # Constants used throughout the template code. + anything = + Addressable::URI::CharacterClasses::RESERVED + + Addressable::URI::CharacterClasses::UNRESERVED + + + variable_char_class = + Addressable::URI::CharacterClasses::ALPHA + + Addressable::URI::CharacterClasses::DIGIT + '_' + + var_char = + "(?>(?:[#{variable_char_class}]|%[a-fA-F0-9][a-fA-F0-9])+)" + RESERVED = + "(?:[#{anything}]|%[a-fA-F0-9][a-fA-F0-9])" + UNRESERVED = + "(?:[#{ + Addressable::URI::CharacterClasses::UNRESERVED + }]|%[a-fA-F0-9][a-fA-F0-9])" + variable = + "(?:#{var_char}(?:\\.?#{var_char})*)" + varspec = + "(?:(#{variable})(\\*|:\\d+)?)" + VARNAME = + /^#{variable}$/ + VARSPEC = + /^#{varspec}$/ + VARIABLE_LIST = + /^#{varspec}(?:,#{varspec})*$/ + operator = + "+#./;?&=,!@|" + EXPRESSION = + /\{([#{operator}])?(#{varspec}(?:,#{varspec})*)\}/ + + + LEADERS = { + '?' => '?', + '/' => '/', + '#' => '#', + '.' => '.', + ';' => ';', + '&' => '&' + } + JOINERS = { + '?' => '&', + '.' => '.', + ';' => ';', + '&' => '&', + '/' => '/' + } + + ## + # Raised if an invalid template value is supplied. + class InvalidTemplateValueError < StandardError + end + + ## + # Raised if an invalid template operator is used in a pattern. + class InvalidTemplateOperatorError < StandardError + end + + ## + # Raised if an invalid template operator is used in a pattern. + class TemplateOperatorAbortedError < StandardError + end + + ## + # This class represents the data that is extracted when a Template + # is matched against a URI. + class MatchData + ## + # Creates a new MatchData object. + # MatchData objects should never be instantiated directly. + # + # @param [Addressable::URI] uri + # The URI that the template was matched against. + def initialize(uri, template, mapping) + @uri = uri.dup.freeze + @template = template + @mapping = mapping.dup.freeze + end + + ## + # @return [Addressable::URI] + # The URI that the Template was matched against. + attr_reader :uri + + ## + # @return [Addressable::Template] + # The Template used for the match. + attr_reader :template + + ## + # @return [Hash] + # The mapping that resulted from the match. + # Note that this mapping does not include keys or values for + # variables that appear in the Template, but are not present + # in the URI. + attr_reader :mapping + + ## + # @return [Array] + # The list of variables that were present in the Template. + # Note that this list will include variables which do not appear + # in the mapping because they were not present in URI. + def variables + self.template.variables + end + alias_method :keys, :variables + alias_method :names, :variables + + ## + # @return [Array] + # The list of values that were captured by the Template. + # Note that this list will include nils for any variables which + # were in the Template, but did not appear in the URI. + def values + @values ||= self.variables.inject([]) do |accu, key| + accu << self.mapping[key] + accu + end + end + alias_method :captures, :values + + ## + # Accesses captured values by name or by index. + # + # @param [String, Symbol, Fixnum] key + # Capture index or name. Note that when accessing by with index + # of 0, the full URI will be returned. The intention is to mimic + # the ::MatchData#[] behavior. + # + # @param [#to_int, nil] len + # If provided, an array of values will be returend with the given + # parameter used as length. + # + # @return [Array, String, nil] + # The captured value corresponding to the index or name. If the + # value was not provided or the key is unknown, nil will be + # returned. + # + # If the second parameter is provided, an array of that length will + # be returned instead. + def [](key, len = nil) + if len + to_a[key, len] + elsif String === key or Symbol === key + mapping[key.to_s] + else + to_a[key] + end + end + + ## + # @return [Array] + # Array with the matched URI as first element followed by the captured + # values. + def to_a + [to_s, *values] + end + + ## + # @return [String] + # The matched URI as String. + def to_s + uri.to_s + end + alias_method :string, :to_s + + # Returns multiple captured values at once. + # + # @param [String, Symbol, Fixnum] *indexes + # Indices of the captures to be returned + # + # @return [Array] + # Values corresponding to given indices. + # + # @see Addressable::Template::MatchData#[] + def values_at(*indexes) + indexes.map { |i| self[i] } + end + + ## + # Returns a String representation of the MatchData's state. + # + # @return [String] The MatchData's state, as a String. + def inspect + sprintf("#<%s:%#0x RESULT:%s>", + self.class.to_s, self.object_id, self.mapping.inspect) + end + + ## + # Dummy method for code expecting a ::MatchData instance + # + # @return [String] An empty string. + def pre_match + "" + end + alias_method :post_match, :pre_match + end + + ## + # Creates a new Addressable::Template object. + # + # @param [#to_str] pattern The URI Template pattern. + # + # @return [Addressable::Template] The initialized Template object. + def initialize(pattern) + if !pattern.respond_to?(:to_str) + raise TypeError, "Can't convert #{pattern.class} into String." + end + @pattern = pattern.to_str.dup.freeze + end + + ## + # Freeze URI, initializing instance variables. + # + # @return [Addressable::URI] The frozen URI object. + def freeze + self.variables + self.variable_defaults + self.named_captures + super + end + + ## + # @return [String] The Template object's pattern. + attr_reader :pattern + + ## + # Returns a String representation of the Template object's state. + # + # @return [String] The Template object's state, as a String. + def inspect + sprintf("#<%s:%#0x PATTERN:%s>", + self.class.to_s, self.object_id, self.pattern) + end + + ## + # Returns true if the Template objects are equal. This method + # does NOT normalize either Template before doing the comparison. + # + # @param [Object] template The Template to compare. + # + # @return [TrueClass, FalseClass] + # true if the Templates are equivalent, false + # otherwise. + def ==(template) + return false unless template.kind_of?(Template) + return self.pattern == template.pattern + end + + ## + # Addressable::Template makes no distinction between `==` and `eql?`. + # + # @see #== + alias_method :eql?, :== + + ## + # Extracts a mapping from the URI using a URI Template pattern. + # + # @param [Addressable::URI, #to_str] uri + # The URI to extract from. + # + # @param [#restore, #match] processor + # A template processor object may optionally be supplied. + # + # The object should respond to either the restore or + # match messages or both. The restore method should + # take two parameters: `[String] name` and `[String] value`. + # The restore method should reverse any transformations that + # have been performed on the value to ensure a valid URI. + # The match method should take a single + # parameter: `[String] name`. The match method should return + # a String containing a regular expression capture group for + # matching on that particular variable. The default value is `".*?"`. + # The match method has no effect on multivariate operator + # expansions. + # + # @return [Hash, NilClass] + # The Hash mapping that was extracted from the URI, or + # nil if the URI didn't match the template. + # + # @example + # class ExampleProcessor + # def self.restore(name, value) + # return value.gsub(/\+/, " ") if name == "query" + # return value + # end + # + # def self.match(name) + # return ".*?" if name == "first" + # return ".*" + # end + # end + # + # uri = Addressable::URI.parse( + # "http://example.com/search/an+example+search+query/" + # ) + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).extract(uri, ExampleProcessor) + # #=> {"query" => "an example search query"} + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # Addressable::Template.new( + # "http://example.com/{first}/{second}/" + # ).extract(uri, ExampleProcessor) + # #=> {"first" => "a", "second" => "b/c"} + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # Addressable::Template.new( + # "http://example.com/{first}/{-list|/|second}/" + # ).extract(uri) + # #=> {"first" => "a", "second" => ["b", "c"]} + def extract(uri, processor=nil) + match_data = self.match(uri, processor) + return (match_data ? match_data.mapping : nil) + end + + ## + # Extracts match data from the URI using a URI Template pattern. + # + # @param [Addressable::URI, #to_str] uri + # The URI to extract from. + # + # @param [#restore, #match] processor + # A template processor object may optionally be supplied. + # + # The object should respond to either the restore or + # match messages or both. The restore method should + # take two parameters: `[String] name` and `[String] value`. + # The restore method should reverse any transformations that + # have been performed on the value to ensure a valid URI. + # The match method should take a single + # parameter: `[String] name`. The match method should return + # a String containing a regular expression capture group for + # matching on that particular variable. The default value is `".*?"`. + # The match method has no effect on multivariate operator + # expansions. + # + # @return [Hash, NilClass] + # The Hash mapping that was extracted from the URI, or + # nil if the URI didn't match the template. + # + # @example + # class ExampleProcessor + # def self.restore(name, value) + # return value.gsub(/\+/, " ") if name == "query" + # return value + # end + # + # def self.match(name) + # return ".*?" if name == "first" + # return ".*" + # end + # end + # + # uri = Addressable::URI.parse( + # "http://example.com/search/an+example+search+query/" + # ) + # match = Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).match(uri, ExampleProcessor) + # match.variables + # #=> ["query"] + # match.captures + # #=> ["an example search query"] + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # match = Addressable::Template.new( + # "http://example.com/{first}/{+second}/" + # ).match(uri, ExampleProcessor) + # match.variables + # #=> ["first", "second"] + # match.captures + # #=> ["a", "b/c"] + # + # uri = Addressable::URI.parse("http://example.com/a/b/c/") + # match = Addressable::Template.new( + # "http://example.com/{first}{/second*}/" + # ).match(uri) + # match.variables + # #=> ["first", "second"] + # match.captures + # #=> ["a", ["b", "c"]] + def match(uri, processor=nil) + uri = Addressable::URI.parse(uri) unless uri.is_a?(Addressable::URI) + mapping = {} + + # First, we need to process the pattern, and extract the values. + expansions, expansion_regexp = + parse_template_pattern(pattern, processor) + + return nil unless uri.to_str.match(expansion_regexp) + unparsed_values = uri.to_str.scan(expansion_regexp).flatten + + if uri.to_str == pattern + return Addressable::Template::MatchData.new(uri, self, mapping) + elsif expansions.size > 0 + index = 0 + expansions.each do |expansion| + _, operator, varlist = *expansion.match(EXPRESSION) + varlist.split(',').each do |varspec| + _, name, modifier = *varspec.match(VARSPEC) + mapping[name] ||= nil + case operator + when nil, '+', '#', '/', '.' + unparsed_value = unparsed_values[index] + name = varspec[VARSPEC, 1] + value = unparsed_value + value = value.split(JOINERS[operator]) if value && modifier == '*' + when ';', '?', '&' + if modifier == '*' + if unparsed_values[index] + value = unparsed_values[index].split(JOINERS[operator]) + value = value.inject({}) do |acc, v| + key, val = v.split('=') + val = "" if val.nil? + acc[key] = val + acc + end + end + else + if (unparsed_values[index]) + name, value = unparsed_values[index].split('=') + value = "" if value.nil? + end + end + end + if processor != nil && processor.respond_to?(:restore) + value = processor.restore(name, value) + end + if processor == nil + if value.is_a?(Hash) + value = value.inject({}){|acc, (k, v)| + acc[Addressable::URI.unencode_component(k)] = + Addressable::URI.unencode_component(v) + acc + } + elsif value.is_a?(Array) + value = value.map{|v| Addressable::URI.unencode_component(v) } + else + value = Addressable::URI.unencode_component(value) + end + end + if !mapping.has_key?(name) || mapping[name].nil? + # Doesn't exist, set to value (even if value is nil) + mapping[name] = value + end + index = index + 1 + end + end + return Addressable::Template::MatchData.new(uri, self, mapping) + else + return nil + end + end + + ## + # Expands a URI template into another URI template. + # + # @param [Hash] mapping The mapping that corresponds to the pattern. + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError + # exception will be raised if the value is invalid. The transform + # method should return the transformed variable value as a String. + # If a transform method is used, the value will not be percent + # encoded automatically. Unicode normalization will be performed both + # before and after sending the value to the transform method. + # + # @return [Addressable::Template] The partially expanded URI template. + # + # @example + # Addressable::Template.new( + # "http://example.com/{one}/{two}/" + # ).partial_expand({"one" => "1"}).pattern + # #=> "http://example.com/1/{two}/" + # + # Addressable::Template.new( + # "http://example.com/{?one,two}/" + # ).partial_expand({"one" => "1"}).pattern + # #=> "http://example.com/?one=1{&two}/" + # + # Addressable::Template.new( + # "http://example.com/{?one,two,three}/" + # ).partial_expand({"one" => "1", "three" => 3}).pattern + # #=> "http://example.com/?one=1{&two}&three=3" + def partial_expand(mapping, processor=nil, normalize_values=true) + result = self.pattern.dup + mapping = normalize_keys(mapping) + result.gsub!( EXPRESSION ) do |capture| + transform_partial_capture(mapping, capture, processor, normalize_values) + end + return Addressable::Template.new(result) + end + + ## + # Expands a URI template into a full URI. + # + # @param [Hash] mapping The mapping that corresponds to the pattern. + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError + # exception will be raised if the value is invalid. The transform + # method should return the transformed variable value as a String. + # If a transform method is used, the value will not be percent + # encoded automatically. Unicode normalization will be performed both + # before and after sending the value to the transform method. + # + # @return [Addressable::URI] The expanded URI template. + # + # @example + # class ExampleProcessor + # def self.validate(name, value) + # return !!(value =~ /^[\w ]+$/) if name == "query" + # return true + # end + # + # def self.transform(name, value) + # return value.gsub(/ /, "+") if name == "query" + # return value + # end + # end + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "an example search query"}, + # ExampleProcessor + # ).to_str + # #=> "http://example.com/search/an+example+search+query/" + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "an example search query"} + # ).to_str + # #=> "http://example.com/search/an%20example%20search%20query/" + # + # Addressable::Template.new( + # "http://example.com/search/{query}/" + # ).expand( + # {"query" => "bogus!"}, + # ExampleProcessor + # ).to_str + # #=> Addressable::Template::InvalidTemplateValueError + def expand(mapping, processor=nil, normalize_values=true) + result = self.pattern.dup + mapping = normalize_keys(mapping) + result.gsub!( EXPRESSION ) do |capture| + transform_capture(mapping, capture, processor, normalize_values) + end + return Addressable::URI.parse(result) + end + + ## + # Returns an Array of variables used within the template pattern. + # The variables are listed in the Array in the order they appear within + # the pattern. Multiple occurrences of a variable within a pattern are + # not represented in this Array. + # + # @return [Array] The variables present in the template's pattern. + def variables + @variables ||= ordered_variable_defaults.map { |var, val| var }.uniq + end + alias_method :keys, :variables + alias_method :names, :variables + + ## + # Returns a mapping of variables to their default values specified + # in the template. Variables without defaults are not returned. + # + # @return [Hash] Mapping of template variables to their defaults + def variable_defaults + @variable_defaults ||= + Hash[*ordered_variable_defaults.reject { |k, v| v.nil? }.flatten] + end + + ## + # Coerces a template into a `Regexp` object. This regular expression will + # behave very similarly to the actual template, and should match the same + # URI values, but it cannot fully handle, for example, values that would + # extract to an `Array`. + # + # @return [Regexp] A regular expression which should match the template. + def to_regexp + _, source = parse_template_pattern(pattern) + Regexp.new(source) + end + + ## + # Returns the source of the coerced `Regexp`. + # + # @return [String] The source of the `Regexp` given by {#to_regexp}. + # + # @api private + def source + self.to_regexp.source + end + + ## + # Returns the named captures of the coerced `Regexp`. + # + # @return [Hash] The named captures of the `Regexp` given by {#to_regexp}. + # + # @api private + def named_captures + self.to_regexp.named_captures + end + + private + def ordered_variable_defaults + @ordered_variable_defaults ||= begin + expansions, _ = parse_template_pattern(pattern) + expansions.flat_map do |capture| + _, _, varlist = *capture.match(EXPRESSION) + varlist.split(',').map do |varspec| + varspec[VARSPEC, 1] + end + end + end + end + + + ## + # Loops through each capture and expands any values available in mapping + # + # @param [Hash] mapping + # Set of keys to expand + # @param [String] capture + # The expression to expand + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError exception + # will be raised if the value is invalid. The transform method + # should return the transformed variable value as a String. If a + # transform method is used, the value will not be percent encoded + # automatically. Unicode normalization will be performed both before and + # after sending the value to the transform method. + # + # @return [String] The expanded expression + def transform_partial_capture(mapping, capture, processor = nil, + normalize_values = true) + _, operator, varlist = *capture.match(EXPRESSION) + + vars = varlist.split(",") + + if operator == "?" + # partial expansion of form style query variables sometimes requires a + # slight reordering of the variables to produce a valid url. + first_to_expand = vars.find { |varspec| + _, name, _ = *varspec.match(VARSPEC) + mapping.key?(name) && !mapping[name].nil? + } + + vars = [first_to_expand] + vars.reject {|varspec| varspec == first_to_expand} if first_to_expand + end + + vars. + inject("".dup) do |acc, varspec| + _, name, _ = *varspec.match(VARSPEC) + next_val = if mapping.key? name + transform_capture(mapping, "{#{operator}#{varspec}}", + processor, normalize_values) + else + "{#{operator}#{varspec}}" + end + # If we've already expanded at least one '?' operator with non-empty + # value, change to '&' + operator = "&" if (operator == "?") && (next_val != "") + acc << next_val + end + end + + ## + # Transforms a mapped value so that values can be substituted into the + # template. + # + # @param [Hash] mapping The mapping to replace captures + # @param [String] capture + # The expression to replace + # @param [#validate, #transform] processor + # An optional processor object may be supplied. + # @param [Boolean] normalize_values + # Optional flag to enable/disable unicode normalization. Default: true + # + # + # The object should respond to either the validate or + # transform messages or both. Both the validate and + # transform methods should take two parameters: name and + # value. The validate method should return true + # or false; true if the value of the variable is valid, + # false otherwise. An InvalidTemplateValueError exception + # will be raised if the value is invalid. The transform method + # should return the transformed variable value as a String. If a + # transform method is used, the value will not be percent encoded + # automatically. Unicode normalization will be performed both before and + # after sending the value to the transform method. + # + # @return [String] The expanded expression + def transform_capture(mapping, capture, processor=nil, + normalize_values=true) + _, operator, varlist = *capture.match(EXPRESSION) + return_value = varlist.split(',').inject([]) do |acc, varspec| + _, name, modifier = *varspec.match(VARSPEC) + value = mapping[name] + unless value == nil || value == {} + allow_reserved = %w(+ #).include?(operator) + # Common primitives where the .to_s output is well-defined + if Numeric === value || Symbol === value || + value == true || value == false + value = value.to_s + end + length = modifier.gsub(':', '').to_i if modifier =~ /^:\d+/ + + unless (Hash === value) || + value.respond_to?(:to_ary) || value.respond_to?(:to_str) + raise TypeError, + "Can't convert #{value.class} into String or Array." + end + + value = normalize_value(value) if normalize_values + + if processor == nil || !processor.respond_to?(:transform) + # Handle percent escaping + if allow_reserved + encode_map = + Addressable::URI::CharacterClasses::RESERVED + + Addressable::URI::CharacterClasses::UNRESERVED + else + encode_map = Addressable::URI::CharacterClasses::UNRESERVED + end + if value.kind_of?(Array) + transformed_value = value.map do |val| + if length + Addressable::URI.encode_component(val[0...length], encode_map) + else + Addressable::URI.encode_component(val, encode_map) + end + end + unless modifier == "*" + transformed_value = transformed_value.join(',') + end + elsif value.kind_of?(Hash) + transformed_value = value.map do |key, val| + if modifier == "*" + "#{ + Addressable::URI.encode_component( key, encode_map) + }=#{ + Addressable::URI.encode_component( val, encode_map) + }" + else + "#{ + Addressable::URI.encode_component( key, encode_map) + },#{ + Addressable::URI.encode_component( val, encode_map) + }" + end + end + unless modifier == "*" + transformed_value = transformed_value.join(',') + end + else + if length + transformed_value = Addressable::URI.encode_component( + value[0...length], encode_map) + else + transformed_value = Addressable::URI.encode_component( + value, encode_map) + end + end + end + + # Process, if we've got a processor + if processor != nil + if processor.respond_to?(:validate) + if !processor.validate(name, value) + display_value = value.kind_of?(Array) ? value.inspect : value + raise InvalidTemplateValueError, + "#{name}=#{display_value} is an invalid template value." + end + end + if processor.respond_to?(:transform) + transformed_value = processor.transform(name, value) + if normalize_values + transformed_value = normalize_value(transformed_value) + end + end + end + acc << [name, transformed_value] + end + acc + end + return "" if return_value.empty? + join_values(operator, return_value) + end + + ## + # Takes a set of values, and joins them together based on the + # operator. + # + # @param [String, Nil] operator One of the operators from the set + # (?,&,+,#,;,/,.), or nil if there wasn't one. + # @param [Array] return_value + # The set of return values (as [variable_name, value] tuples) that will + # be joined together. + # + # @return [String] The transformed mapped value + def join_values(operator, return_value) + leader = LEADERS.fetch(operator, '') + joiner = JOINERS.fetch(operator, ',') + case operator + when '&', '?' + leader + return_value.map{|k,v| + if v.is_a?(Array) && v.first =~ /=/ + v.join(joiner) + elsif v.is_a?(Array) + v.map{|inner_value| "#{k}=#{inner_value}"}.join(joiner) + else + "#{k}=#{v}" + end + }.join(joiner) + when ';' + return_value.map{|k,v| + if v.is_a?(Array) && v.first =~ /=/ + ';' + v.join(";") + elsif v.is_a?(Array) + ';' + v.map{|inner_value| "#{k}=#{inner_value}"}.join(";") + else + v && v != '' ? ";#{k}=#{v}" : ";#{k}" + end + }.join + else + leader + return_value.map{|k,v| v}.join(joiner) + end + end + + ## + # Takes a set of values, and joins them together based on the + # operator. + # + # @param [Hash, Array, String] value + # Normalizes unicode keys and values with String#unicode_normalize (NFC) + # + # @return [Hash, Array, String] The normalized values + def normalize_value(value) + # Handle unicode normalization + if value.respond_to?(:to_ary) + value.to_ary.map! { |val| normalize_value(val) } + elsif value.kind_of?(Hash) + value = value.inject({}) { |acc, (k, v)| + acc[normalize_value(k)] = normalize_value(v) + acc + } + else + value = value.to_s if !value.kind_of?(String) + if value.encoding != Encoding::UTF_8 + value = value.dup.force_encoding(Encoding::UTF_8) + end + value = value.unicode_normalize(:nfc) + end + value + end + + ## + # Generates a hash with string keys + # + # @param [Hash] mapping A mapping hash to normalize + # + # @return [Hash] + # A hash with stringified keys + def normalize_keys(mapping) + return mapping.inject({}) do |accu, pair| + name, value = pair + if Symbol === name + name = name.to_s + elsif name.respond_to?(:to_str) + name = name.to_str + else + raise TypeError, + "Can't convert #{name.class} into String." + end + accu[name] = value + accu + end + end + + ## + # Generates the Regexp that parses a template pattern. Memoizes the + # value if template processor not set (processors may not be deterministic) + # + # @param [String] pattern The URI template pattern. + # @param [#match] processor The template processor to use. + # + # @return [Array, Regexp] + # An array of expansion variables nad a regular expression which may be + # used to parse a template pattern + def parse_template_pattern(pattern, processor = nil) + if processor.nil? && pattern == @pattern + @cached_template_parse ||= + parse_new_template_pattern(pattern, processor) + else + parse_new_template_pattern(pattern, processor) + end + end + + ## + # Generates the Regexp that parses a template pattern. + # + # @param [String] pattern The URI template pattern. + # @param [#match] processor The template processor to use. + # + # @return [Array, Regexp] + # An array of expansion variables nad a regular expression which may be + # used to parse a template pattern + def parse_new_template_pattern(pattern, processor = nil) + # Escape the pattern. The two gsubs restore the escaped curly braces + # back to their original form. Basically, escape everything that isn't + # within an expansion. + escaped_pattern = Regexp.escape( + pattern + ).gsub(/\\\{(.*?)\\\}/) do |escaped| + escaped.gsub(/\\(.)/, "\\1") + end + + expansions = [] + + # Create a regular expression that captures the values of the + # variables in the URI. + regexp_string = escaped_pattern.gsub( EXPRESSION ) do |expansion| + + expansions << expansion + _, operator, varlist = *expansion.match(EXPRESSION) + leader = Regexp.escape(LEADERS.fetch(operator, '')) + joiner = Regexp.escape(JOINERS.fetch(operator, ',')) + combined = varlist.split(',').map do |varspec| + _, name, modifier = *varspec.match(VARSPEC) + + result = processor && processor.respond_to?(:match) ? processor.match(name) : nil + if result + "(?<#{name}>#{ result })" + else + group = case operator + when '+' + "#{ RESERVED }*?" + when '#' + "#{ RESERVED }*?" + when '/' + "#{ UNRESERVED }*?" + when '.' + "#{ UNRESERVED.gsub('\.', '') }*?" + when ';' + "#{ UNRESERVED }*=?#{ UNRESERVED }*?" + when '?' + "#{ UNRESERVED }*=#{ UNRESERVED }*?" + when '&' + "#{ UNRESERVED }*=#{ UNRESERVED }*?" + else + "#{ UNRESERVED }*?" + end + if modifier == '*' + "(?<#{name}>#{group}(?:#{joiner}?#{group})*)?" + else + "(?<#{name}>#{group})?" + end + end + end.join("#{joiner}?") + "(?:|#{leader}#{combined})" + end + + # Ensure that the regular expression matches the whole URI. + regexp_string = "\\A#{regexp_string}\\z" + return expansions, Regexp.new(regexp_string) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/uri.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/uri.rb new file mode 100644 index 0000000..50ccdaf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/uri.rb @@ -0,0 +1,2576 @@ +# frozen_string_literal: true + +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +require "addressable/version" +require "addressable/idna" +require "public_suffix" + +## +# Addressable is a library for processing links and URIs. +module Addressable + ## + # This is an implementation of a URI parser based on + # RFC 3986, + # RFC 3987. + class URI + ## + # Raised if something other than a uri is supplied. + class InvalidURIError < StandardError + end + + ## + # Container for the character classes specified in + # RFC 3986. + # + # Note: Concatenated and interpolated `String`s are not affected by the + # `frozen_string_literal` directive and must be frozen explicitly. + # + # Interpolated `String`s *were* frozen this way before Ruby 3.0: + # https://bugs.ruby-lang.org/issues/17104 + module CharacterClasses + ALPHA = "a-zA-Z" + DIGIT = "0-9" + GEN_DELIMS = "\\:\\/\\?\\#\\[\\]\\@" + SUB_DELIMS = "\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=" + RESERVED = (GEN_DELIMS + SUB_DELIMS).freeze + UNRESERVED = (ALPHA + DIGIT + "\\-\\.\\_\\~").freeze + PCHAR = (UNRESERVED + SUB_DELIMS + "\\:\\@").freeze + SCHEME = (ALPHA + DIGIT + "\\-\\+\\.").freeze + HOST = (UNRESERVED + SUB_DELIMS + "\\[\\:\\]").freeze + AUTHORITY = (PCHAR + "\\[\\]").freeze + PATH = (PCHAR + "\\/").freeze + QUERY = (PCHAR + "\\/\\?").freeze + FRAGMENT = (PCHAR + "\\/\\?").freeze + end + + module NormalizeCharacterClasses + HOST = /[^#{CharacterClasses::HOST}]/ + UNRESERVED = /[^#{CharacterClasses::UNRESERVED}]/ + PCHAR = /[^#{CharacterClasses::PCHAR}]/ + SCHEME = /[^#{CharacterClasses::SCHEME}]/ + FRAGMENT = /[^#{CharacterClasses::FRAGMENT}]/ + QUERY = %r{[^a-zA-Z0-9\-\.\_\~\!\$\'\(\)\*\+\,\=\:\@\/\?%]|%(?!2B|2b)} + end + + SLASH = '/' + EMPTY_STR = '' + + URIREGEX = /^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$/ + + PORT_MAPPING = { + "http" => 80, + "https" => 443, + "ftp" => 21, + "tftp" => 69, + "sftp" => 22, + "ssh" => 22, + "svn+ssh" => 22, + "telnet" => 23, + "nntp" => 119, + "gopher" => 70, + "wais" => 210, + "ldap" => 389, + "prospero" => 1525 + }.freeze + + ## + # Returns a URI object based on the parsed string. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI string to parse. + # No parsing is performed if the object is already an + # Addressable::URI. + # + # @return [Addressable::URI] The parsed URI. + def self.parse(uri) + # If we were given nil, return nil. + return nil unless uri + # If a URI object is passed, just return itself. + return uri.dup if uri.kind_of?(self) + + # If a URI object of the Ruby standard library variety is passed, + # convert it to a string, then parse the string. + # We do the check this way because we don't want to accidentally + # cause a missing constant exception to be thrown. + if uri.class.name =~ /^URI\b/ + uri = uri.to_s + end + + # Otherwise, convert to a String + begin + uri = uri.to_str + rescue TypeError, NoMethodError + raise TypeError, "Can't convert #{uri.class} into String." + end unless uri.is_a?(String) + + # This Regexp supplied as an example in RFC 3986, and it works great. + scan = uri.scan(URIREGEX) + fragments = scan[0] + scheme = fragments[1] + authority = fragments[3] + path = fragments[4] + query = fragments[6] + fragment = fragments[8] + user = nil + password = nil + host = nil + port = nil + if authority != nil + # The Regexp above doesn't split apart the authority. + userinfo = authority[/^([^\[\]]*)@/, 1] + if userinfo != nil + user = userinfo.strip[/^([^:]*):?/, 1] + password = userinfo.strip[/:(.*)$/, 1] + end + + host = authority.sub( + /^([^\[\]]*)@/, EMPTY_STR + ).sub( + /:([^:@\[\]]*?)$/, EMPTY_STR + ) + + port = authority[/:([^:@\[\]]*?)$/, 1] + port = nil if port == EMPTY_STR + end + + return new( + :scheme => scheme, + :user => user, + :password => password, + :host => host, + :port => port, + :path => path, + :query => query, + :fragment => fragment + ) + end + + ## + # Converts an input to a URI. The input does not have to be a valid + # URI — the method will use heuristics to guess what URI was intended. + # This is not standards-compliant, merely user-friendly. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI string to parse. + # No parsing is performed if the object is already an + # Addressable::URI. + # @param [Hash] hints + # A Hash of hints to the heuristic parser. + # Defaults to {:scheme => "http"}. + # + # @return [Addressable::URI] The parsed URI. + def self.heuristic_parse(uri, hints={}) + # If we were given nil, return nil. + return nil unless uri + # If a URI object is passed, just return itself. + return uri.dup if uri.kind_of?(self) + + # If a URI object of the Ruby standard library variety is passed, + # convert it to a string, then parse the string. + # We do the check this way because we don't want to accidentally + # cause a missing constant exception to be thrown. + if uri.class.name =~ /^URI\b/ + uri = uri.to_s + end + + unless uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + # Otherwise, convert to a String + uri = uri.to_str.dup.strip + hints = { + :scheme => "http" + }.merge(hints) + case uri + when /^http:\//i + uri.sub!(/^http:\/+/i, "http://") + when /^https:\//i + uri.sub!(/^https:\/+/i, "https://") + when /^feed:\/+http:\//i + uri.sub!(/^feed:\/+http:\/+/i, "feed:http://") + when /^feed:\//i + uri.sub!(/^feed:\/+/i, "feed://") + when %r[^file:/{4}]i + uri.sub!(%r[^file:/+]i, "file:////") + when %r[^file://localhost/]i + uri.sub!(%r[^file://localhost/+]i, "file:///") + when %r[^file:/+]i + uri.sub!(%r[^file:/+]i, "file:///") + when /^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/ + uri.sub!(/^/, hints[:scheme] + "://") + when /\A\d+\..*:\d+\z/ + uri = "#{hints[:scheme]}://#{uri}" + end + match = uri.match(URIREGEX) + fragments = match.captures + authority = fragments[3] + if authority && authority.length > 0 + new_authority = authority.tr("\\", "/").gsub(" ", "%20") + # NOTE: We want offset 4, not 3! + offset = match.offset(4) + uri = uri.dup + uri[offset[0]...offset[1]] = new_authority + end + parsed = self.parse(uri) + if parsed.scheme =~ /^[^\/?#\.]+\.[^\/?#]+$/ + parsed = self.parse(hints[:scheme] + "://" + uri) + end + if parsed.path.include?(".") + if parsed.path[/\b@\b/] + parsed.scheme = "mailto" unless parsed.scheme + elsif new_host = parsed.path[/^([^\/]+\.[^\/]*)/, 1] + parsed.defer_validation do + new_path = parsed.path.sub( + Regexp.new("^" + Regexp.escape(new_host)), EMPTY_STR) + parsed.host = new_host + parsed.path = new_path + parsed.scheme = hints[:scheme] unless parsed.scheme + end + end + end + return parsed + end + + ## + # Converts a path to a file scheme URI. If the path supplied is + # relative, it will be returned as a relative URI. If the path supplied + # is actually a non-file URI, it will parse the URI as if it had been + # parsed with Addressable::URI.parse. Handles all of the + # various Microsoft-specific formats for specifying paths. + # + # @param [String, Addressable::URI, #to_str] path + # Typically a String path to a file or directory, but + # will return a sensible return value if an absolute URI is supplied + # instead. + # + # @return [Addressable::URI] + # The parsed file scheme URI or the original URI if some other URI + # scheme was provided. + # + # @example + # base = Addressable::URI.convert_path("/absolute/path/") + # uri = Addressable::URI.convert_path("relative/path") + # (base + uri).to_s + # #=> "file:///absolute/path/relative/path" + # + # Addressable::URI.convert_path( + # "c:\\windows\\My Documents 100%20\\foo.txt" + # ).to_s + # #=> "file:///c:/windows/My%20Documents%20100%20/foo.txt" + # + # Addressable::URI.convert_path("http://example.com/").to_s + # #=> "http://example.com/" + def self.convert_path(path) + # If we were given nil, return nil. + return nil unless path + # If a URI object is passed, just return itself. + return path if path.kind_of?(self) + unless path.respond_to?(:to_str) + raise TypeError, "Can't convert #{path.class} into String." + end + # Otherwise, convert to a String + path = path.to_str.strip + + path.sub!(/^file:\/?\/?/, EMPTY_STR) if path =~ /^file:\/?\/?/ + path = SLASH + path if path =~ /^([a-zA-Z])[\|:]/ + uri = self.parse(path) + + if uri.scheme == nil + # Adjust windows-style uris + uri.path.sub!(/^\/?([a-zA-Z])[\|:][\\\/]/) do + "/#{$1.downcase}:/" + end + uri.path.tr!("\\", SLASH) + if File.exist?(uri.path) && + File.stat(uri.path).directory? + uri.path.chomp!(SLASH) + uri.path = uri.path + '/' + end + + # If the path is absolute, set the scheme and host. + if uri.path.start_with?(SLASH) + uri.scheme = "file" + uri.host = EMPTY_STR + end + uri.normalize! + end + + return uri + end + + ## + # Joins several URIs together. + # + # @param [String, Addressable::URI, #to_str] *uris + # The URIs to join. + # + # @return [Addressable::URI] The joined URI. + # + # @example + # base = "http://example.com/" + # uri = Addressable::URI.parse("relative/path") + # Addressable::URI.join(base, uri) + # #=> # + def self.join(*uris) + uri_objects = uris.collect do |uri| + unless uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + uri.kind_of?(self) ? uri : self.parse(uri.to_str) + end + result = uri_objects.shift.dup + uri_objects.each do |uri| + result.join!(uri) + end + return result + end + + ## + # Tables used to optimize encoding operations in `self.encode_component` + # and `self.normalize_component` + SEQUENCE_ENCODING_TABLE = Hash.new do |hash, sequence| + hash[sequence] = sequence.unpack("C*").map do |c| + format("%02x", c) + end.join + end + + SEQUENCE_UPCASED_PERCENT_ENCODING_TABLE = Hash.new do |hash, sequence| + hash[sequence] = sequence.unpack("C*").map do |c| + format("%%%02X", c) + end.join + end + + ## + # Percent encodes a URI component. + # + # @param [String, #to_str] component The URI component to encode. + # + # @param [String, Regexp] character_class + # The characters which are not percent encoded. If a String + # is passed, the String must be formatted as a regular + # expression character class. (Do not include the surrounding square + # brackets.) For example, "b-zB-Z0-9" would cause + # everything but the letters 'b' through 'z' and the numbers '0' through + # '9' to be percent encoded. If a Regexp is passed, the + # value /[^b-zB-Z0-9]/ would have the same effect. A set of + # useful String values may be found in the + # Addressable::URI::CharacterClasses module. The default + # value is the reserved plus unreserved character classes specified in + # RFC 3986. + # + # @param [Regexp] upcase_encoded + # A string of characters that may already be percent encoded, and whose + # encodings should be upcased. This allows normalization of percent + # encodings for characters not included in the + # character_class. + # + # @return [String] The encoded component. + # + # @example + # Addressable::URI.encode_component("simple/example", "b-zB-Z0-9") + # => "simple%2Fex%61mple" + # Addressable::URI.encode_component("simple/example", /[^b-zB-Z0-9]/) + # => "simple%2Fex%61mple" + # Addressable::URI.encode_component( + # "simple/example", Addressable::URI::CharacterClasses::UNRESERVED + # ) + # => "simple%2Fexample" + def self.encode_component(component, character_class= + CharacterClasses::RESERVED + CharacterClasses::UNRESERVED, + upcase_encoded='') + return nil if component.nil? + + begin + if component.kind_of?(Symbol) || + component.kind_of?(Numeric) || + component.kind_of?(TrueClass) || + component.kind_of?(FalseClass) + component = component.to_s + else + component = component.to_str + end + rescue TypeError, NoMethodError + raise TypeError, "Can't convert #{component.class} into String." + end if !component.is_a? String + + if ![String, Regexp].include?(character_class.class) + raise TypeError, + "Expected String or Regexp, got #{character_class.inspect}" + end + if character_class.kind_of?(String) + character_class = /[^#{character_class}]/ + end + # We can't perform regexps on invalid UTF sequences, but + # here we need to, so switch to ASCII. + component = component.dup + component.force_encoding(Encoding::ASCII_8BIT) + # Avoiding gsub! because there are edge cases with frozen strings + component = component.gsub(character_class) do |sequence| + SEQUENCE_UPCASED_PERCENT_ENCODING_TABLE[sequence] + end + if upcase_encoded.length > 0 + upcase_encoded_chars = upcase_encoded.chars.map do |char| + SEQUENCE_ENCODING_TABLE[char] + end + component = component.gsub(/%(#{upcase_encoded_chars.join('|')})/, + &:upcase) + end + return component + end + + class << self + alias_method :escape_component, :encode_component + end + + ## + # Unencodes any percent encoded characters within a URI component. + # This method may be used for unencoding either components or full URIs, + # however, it is recommended to use the unencode_component + # alias when unencoding components. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI or component to unencode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @param [String] leave_encoded + # A string of characters to leave encoded. If a percent encoded character + # in this list is encountered then it will remain percent encoded. + # + # @return [String, Addressable::URI] + # The unencoded component or URI. + # The return type is determined by the return_type + # parameter. + def self.unencode(uri, return_type=String, leave_encoded='') + return nil if uri.nil? + + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + + result = uri.gsub(/%[0-9a-f]{2}/i) do |sequence| + c = sequence[1..3].to_i(16).chr + c.force_encoding(sequence.encoding) + leave_encoded.include?(c) ? sequence : c + end + + result.force_encoding(Encoding::UTF_8) + if return_type == String + return result + elsif return_type == ::Addressable::URI + return ::Addressable::URI.parse(result) + end + end + + class << self + alias_method :unescape, :unencode + alias_method :unencode_component, :unencode + alias_method :unescape_component, :unencode + end + + + ## + # Normalizes the encoding of a URI component. + # + # @param [String, #to_str] component The URI component to encode. + # + # @param [String, Regexp] character_class + # The characters which are not percent encoded. If a String + # is passed, the String must be formatted as a regular + # expression character class. (Do not include the surrounding square + # brackets.) For example, "b-zB-Z0-9" would cause + # everything but the letters 'b' through 'z' and the numbers '0' + # through '9' to be percent encoded. If a Regexp is passed, + # the value /[^b-zB-Z0-9]/ would have the same effect. A + # set of useful String values may be found in the + # Addressable::URI::CharacterClasses module. The default + # value is the reserved plus unreserved character classes specified in + # RFC 3986. + # + # @param [String] leave_encoded + # When character_class is a String then + # leave_encoded is a string of characters that should remain + # percent encoded while normalizing the component; if they appear percent + # encoded in the original component, then they will be upcased ("%2f" + # normalized to "%2F") but otherwise left alone. + # + # @return [String] The normalized component. + # + # @example + # Addressable::URI.normalize_component("simpl%65/%65xampl%65", "b-zB-Z") + # => "simple%2Fex%61mple" + # Addressable::URI.normalize_component( + # "simpl%65/%65xampl%65", /[^b-zB-Z]/ + # ) + # => "simple%2Fex%61mple" + # Addressable::URI.normalize_component( + # "simpl%65/%65xampl%65", + # Addressable::URI::CharacterClasses::UNRESERVED + # ) + # => "simple%2Fexample" + # Addressable::URI.normalize_component( + # "one%20two%2fthree%26four", + # "0-9a-zA-Z &/", + # "/" + # ) + # => "one two%2Fthree&four" + def self.normalize_component(component, character_class= + CharacterClasses::RESERVED + CharacterClasses::UNRESERVED, + leave_encoded='') + return nil if component.nil? + + begin + component = component.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{component.class} into String." + end if !component.is_a? String + + if ![String, Regexp].include?(character_class.class) + raise TypeError, + "Expected String or Regexp, got #{character_class.inspect}" + end + if character_class.kind_of?(String) + leave_re = if leave_encoded.length > 0 + character_class = "#{character_class}%" unless character_class.include?('%') + + "|%(?!#{leave_encoded.chars.flat_map do |char| + seq = SEQUENCE_ENCODING_TABLE[char] + [seq.upcase, seq.downcase] + end.join('|')})" + end + + character_class = if leave_re + /[^#{character_class}]#{leave_re}/ + else + /[^#{character_class}]/ + end + end + # We can't perform regexps on invalid UTF sequences, but + # here we need to, so switch to ASCII. + component = component.dup + component.force_encoding(Encoding::ASCII_8BIT) + unencoded = self.unencode_component(component, String, leave_encoded) + begin + encoded = self.encode_component( + unencoded.unicode_normalize(:nfc), + character_class, + leave_encoded + ) + rescue ArgumentError + encoded = self.encode_component(unencoded) + end + encoded.force_encoding(Encoding::UTF_8) + return encoded + end + + ## + # Percent encodes any special characters in the URI. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI to encode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @return [String, Addressable::URI] + # The encoded URI. + # The return type is determined by the return_type + # parameter. + def self.encode(uri, return_type=String) + return nil if uri.nil? + + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri_object = uri.kind_of?(self) ? uri : self.parse(uri) + encoded_uri = Addressable::URI.new( + :scheme => self.encode_component(uri_object.scheme, + Addressable::URI::CharacterClasses::SCHEME), + :authority => self.encode_component(uri_object.authority, + Addressable::URI::CharacterClasses::AUTHORITY), + :path => self.encode_component(uri_object.path, + Addressable::URI::CharacterClasses::PATH), + :query => self.encode_component(uri_object.query, + Addressable::URI::CharacterClasses::QUERY), + :fragment => self.encode_component(uri_object.fragment, + Addressable::URI::CharacterClasses::FRAGMENT) + ) + if return_type == String + return encoded_uri.to_s + elsif return_type == ::Addressable::URI + return encoded_uri + end + end + + class << self + alias_method :escape, :encode + end + + ## + # Normalizes the encoding of a URI. Characters within a hostname are + # not percent encoded to allow for internationalized domain names. + # + # @param [String, Addressable::URI, #to_str] uri + # The URI to encode. + # + # @param [Class] return_type + # The type of object to return. + # This value may only be set to String or + # Addressable::URI. All other values are invalid. Defaults + # to String. + # + # @return [String, Addressable::URI] + # The encoded URI. + # The return type is determined by the return_type + # parameter. + def self.normalized_encode(uri, return_type=String) + begin + uri = uri.to_str + rescue NoMethodError, TypeError + raise TypeError, "Can't convert #{uri.class} into String." + end if !uri.is_a? String + + if ![String, ::Addressable::URI].include?(return_type) + raise TypeError, + "Expected Class (String or Addressable::URI), " + + "got #{return_type.inspect}" + end + uri_object = uri.kind_of?(self) ? uri : self.parse(uri) + components = { + :scheme => self.unencode_component(uri_object.scheme), + :user => self.unencode_component(uri_object.user), + :password => self.unencode_component(uri_object.password), + :host => self.unencode_component(uri_object.host), + :port => (uri_object.port.nil? ? nil : uri_object.port.to_s), + :path => self.unencode_component(uri_object.path), + :query => self.unencode_component(uri_object.query), + :fragment => self.unencode_component(uri_object.fragment) + } + components.each do |key, value| + if value != nil + begin + components[key] = value.to_str.unicode_normalize(:nfc) + rescue ArgumentError + # Likely a malformed UTF-8 character, skip unicode normalization + components[key] = value.to_str + end + end + end + encoded_uri = Addressable::URI.new( + :scheme => self.encode_component(components[:scheme], + Addressable::URI::CharacterClasses::SCHEME), + :user => self.encode_component(components[:user], + Addressable::URI::CharacterClasses::UNRESERVED), + :password => self.encode_component(components[:password], + Addressable::URI::CharacterClasses::UNRESERVED), + :host => components[:host], + :port => components[:port], + :path => self.encode_component(components[:path], + Addressable::URI::CharacterClasses::PATH), + :query => self.encode_component(components[:query], + Addressable::URI::CharacterClasses::QUERY), + :fragment => self.encode_component(components[:fragment], + Addressable::URI::CharacterClasses::FRAGMENT) + ) + if return_type == String + return encoded_uri.to_s + elsif return_type == ::Addressable::URI + return encoded_uri + end + end + + ## + # Encodes a set of key/value pairs according to the rules for the + # application/x-www-form-urlencoded MIME type. + # + # @param [#to_hash, #to_ary] form_values + # The form values to encode. + # + # @param [TrueClass, FalseClass] sort + # Sort the key/value pairs prior to encoding. + # Defaults to false. + # + # @return [String] + # The encoded value. + def self.form_encode(form_values, sort=false) + if form_values.respond_to?(:to_hash) + form_values = form_values.to_hash.to_a + elsif form_values.respond_to?(:to_ary) + form_values = form_values.to_ary + else + raise TypeError, "Can't convert #{form_values.class} into Array." + end + + form_values = form_values.inject([]) do |accu, (key, value)| + if value.kind_of?(Array) + value.each do |v| + accu << [key.to_s, v.to_s] + end + else + accu << [key.to_s, value.to_s] + end + accu + end + + if sort + # Useful for OAuth and optimizing caching systems + form_values = form_values.sort + end + escaped_form_values = form_values.map do |(key, value)| + # Line breaks are CRLF pairs + [ + self.encode_component( + key.gsub(/(\r\n|\n|\r)/, "\r\n"), + CharacterClasses::UNRESERVED + ).gsub("%20", "+"), + self.encode_component( + value.gsub(/(\r\n|\n|\r)/, "\r\n"), + CharacterClasses::UNRESERVED + ).gsub("%20", "+") + ] + end + return escaped_form_values.map do |(key, value)| + "#{key}=#{value}" + end.join("&") + end + + ## + # Decodes a String according to the rules for the + # application/x-www-form-urlencoded MIME type. + # + # @param [String, #to_str] encoded_value + # The form values to decode. + # + # @return [Array] + # The decoded values. + # This is not a Hash because of the possibility for + # duplicate keys. + def self.form_unencode(encoded_value) + if !encoded_value.respond_to?(:to_str) + raise TypeError, "Can't convert #{encoded_value.class} into String." + end + encoded_value = encoded_value.to_str + split_values = encoded_value.split("&").map do |pair| + pair.split("=", 2) + end + return split_values.map do |(key, value)| + [ + key ? self.unencode_component( + key.gsub("+", "%20")).gsub(/(\r\n|\n|\r)/, "\n") : nil, + value ? (self.unencode_component( + value.gsub("+", "%20")).gsub(/(\r\n|\n|\r)/, "\n")) : nil + ] + end + end + + ## + # Creates a new uri object from component parts. + # + # @option [String, #to_str] scheme The scheme component. + # @option [String, #to_str] user The user component. + # @option [String, #to_str] password The password component. + # @option [String, #to_str] userinfo + # The userinfo component. If this is supplied, the user and password + # components must be omitted. + # @option [String, #to_str] host The host component. + # @option [String, #to_str] port The port component. + # @option [String, #to_str] authority + # The authority component. If this is supplied, the user, password, + # userinfo, host, and port components must be omitted. + # @option [String, #to_str] path The path component. + # @option [String, #to_str] query The query component. + # @option [String, #to_str] fragment The fragment component. + # + # @return [Addressable::URI] The constructed URI object. + def initialize(options={}) + if options.has_key?(:authority) + if (options.keys & [:userinfo, :user, :password, :host, :port]).any? + raise ArgumentError, + "Cannot specify both an authority and any of the components " + + "within the authority." + end + end + if options.has_key?(:userinfo) + if (options.keys & [:user, :password]).any? + raise ArgumentError, + "Cannot specify both a userinfo and either the user or password." + end + end + + reset_ivs + + defer_validation do + # Bunch of crazy logic required because of the composite components + # like userinfo and authority. + self.scheme = options[:scheme] if options[:scheme] + self.user = options[:user] if options[:user] + self.password = options[:password] if options[:password] + self.userinfo = options[:userinfo] if options[:userinfo] + self.host = options[:host] if options[:host] + self.port = options[:port] if options[:port] + self.authority = options[:authority] if options[:authority] + self.path = options[:path] if options[:path] + self.query = options[:query] if options[:query] + self.query_values = options[:query_values] if options[:query_values] + self.fragment = options[:fragment] if options[:fragment] + end + + to_s # force path validation + end + + ## + # Freeze URI, initializing instance variables. + # + # @return [Addressable::URI] The frozen URI object. + def freeze + self.normalized_scheme + self.normalized_user + self.normalized_password + self.normalized_userinfo + self.normalized_host + self.normalized_port + self.normalized_authority + self.normalized_site + self.normalized_path + self.normalized_query + self.normalized_fragment + self.hash + super + end + + ## + # The scheme component for this URI. + # + # @return [String] The scheme component. + attr_reader :scheme + + ## + # The scheme component for this URI, normalized. + # + # @return [String] The scheme component, normalized. + def normalized_scheme + return nil unless self.scheme + if @normalized_scheme == NONE + @normalized_scheme = if self.scheme =~ /^\s*ssh\+svn\s*$/i + "svn+ssh".dup + else + Addressable::URI.normalize_component( + self.scheme.strip.downcase, + Addressable::URI::NormalizeCharacterClasses::SCHEME + ) + end + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_scheme) + @normalized_scheme + end + + ## + # Sets the scheme component for this URI. + # + # @param [String, #to_str] new_scheme The new scheme component. + def scheme=(new_scheme) + if new_scheme && !new_scheme.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_scheme.class} into String." + elsif new_scheme + new_scheme = new_scheme.to_str + end + if new_scheme && new_scheme !~ /\A[a-z][a-z0-9\.\+\-]*\z/i + raise InvalidURIError, "Invalid scheme format: '#{new_scheme}'" + end + @scheme = new_scheme + @scheme = nil if @scheme.to_s.strip.empty? + + # Reset dependent values + @normalized_scheme = NONE + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The user component for this URI. + # + # @return [String] The user component. + attr_reader :user + + ## + # The user component for this URI, normalized. + # + # @return [String] The user component, normalized. + def normalized_user + return nil unless self.user + return @normalized_user unless @normalized_user == NONE + @normalized_user = begin + if normalized_scheme =~ /https?/ && self.user.strip.empty? && + (!self.password || self.password.strip.empty?) + nil + else + Addressable::URI.normalize_component( + self.user.strip, + Addressable::URI::NormalizeCharacterClasses::UNRESERVED + ) + end + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_user) + @normalized_user + end + + ## + # Sets the user component for this URI. + # + # @param [String, #to_str] new_user The new user component. + def user=(new_user) + if new_user && !new_user.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_user.class} into String." + end + @user = new_user ? new_user.to_str : nil + + # You can't have a nil user with a non-nil password + if password != nil + @user = EMPTY_STR unless user + end + + # Reset dependent values + @userinfo = nil + @normalized_userinfo = NONE + @authority = nil + @normalized_user = NONE + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The password component for this URI. + # + # @return [String] The password component. + attr_reader :password + + ## + # The password component for this URI, normalized. + # + # @return [String] The password component, normalized. + def normalized_password + return nil unless self.password + return @normalized_password unless @normalized_password == NONE + @normalized_password = begin + if self.normalized_scheme =~ /https?/ && self.password.strip.empty? && + (!self.user || self.user.strip.empty?) + nil + else + Addressable::URI.normalize_component( + self.password.strip, + Addressable::URI::NormalizeCharacterClasses::UNRESERVED + ) + end + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_password) + @normalized_password + end + + ## + # Sets the password component for this URI. + # + # @param [String, #to_str] new_password The new password component. + def password=(new_password) + if new_password && !new_password.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_password.class} into String." + end + @password = new_password ? new_password.to_str : nil + + # You can't have a nil user with a non-nil password + if @password != nil + self.user = EMPTY_STR if user.nil? + end + + # Reset dependent values + @userinfo = nil + @normalized_userinfo = NONE + @authority = nil + @normalized_password = NONE + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The userinfo component for this URI. + # Combines the user and password components. + # + # @return [String] The userinfo component. + def userinfo + current_user = self.user + current_password = self.password + (current_user || current_password) && @userinfo ||= begin + if current_user && current_password + "#{current_user}:#{current_password}" + elsif current_user && !current_password + "#{current_user}" + end + end + end + + ## + # The userinfo component for this URI, normalized. + # + # @return [String] The userinfo component, normalized. + def normalized_userinfo + return nil unless self.userinfo + return @normalized_userinfo unless @normalized_userinfo == NONE + @normalized_userinfo = begin + current_user = self.normalized_user + current_password = self.normalized_password + if !current_user && !current_password + nil + elsif current_user && current_password + "#{current_user}:#{current_password}".dup + elsif current_user && !current_password + "#{current_user}".dup + end + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_userinfo) + @normalized_userinfo + end + + ## + # Sets the userinfo component for this URI. + # + # @param [String, #to_str] new_userinfo The new userinfo component. + def userinfo=(new_userinfo) + if new_userinfo && !new_userinfo.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_userinfo.class} into String." + end + new_user, new_password = if new_userinfo + [ + new_userinfo.to_str.strip[/^(.*):/, 1], + new_userinfo.to_str.strip[/:(.*)$/, 1] + ] + else + [nil, nil] + end + + # Password assigned first to ensure validity in case of nil + self.password = new_password + self.user = new_user + + # Reset dependent values + @authority = nil + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The host component for this URI. + # + # @return [String] The host component. + attr_reader :host + + ## + # The host component for this URI, normalized. + # + # @return [String] The host component, normalized. + def normalized_host + return nil unless self.host + + @normalized_host ||= begin + if !self.host.strip.empty? + result = ::Addressable::IDNA.to_ascii( + URI.unencode_component(self.host.strip.downcase) + ) + if result =~ /[^\.]\.$/ + # Single trailing dots are unnecessary. + result = result[0...-1] + end + result = Addressable::URI.normalize_component( + result, + NormalizeCharacterClasses::HOST + ) + result + else + EMPTY_STR.dup + end + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_host) + @normalized_host + end + + ## + # Sets the host component for this URI. + # + # @param [String, #to_str] new_host The new host component. + def host=(new_host) + if new_host && !new_host.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_host.class} into String." + end + @host = new_host ? new_host.to_str : nil + + # Reset dependent values + @authority = nil + @normalized_host = nil + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # This method is same as URI::Generic#host except + # brackets for IPv6 (and 'IPvFuture') addresses are removed. + # + # @see Addressable::URI#host + # + # @return [String] The hostname for this URI. + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + + ## + # This method is same as URI::Generic#host= except + # the argument can be a bare IPv6 address (or 'IPvFuture'). + # + # @see Addressable::URI#host= + # + # @param [String, #to_str] new_hostname The new hostname for this URI. + def hostname=(new_hostname) + if new_hostname && + (new_hostname.respond_to?(:ipv4?) || new_hostname.respond_to?(:ipv6?)) + new_hostname = new_hostname.to_s + elsif new_hostname && !new_hostname.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_hostname.class} into String." + end + v = new_hostname ? new_hostname.to_str : nil + v = "[#{v}]" if /\A\[.*\]\z/ !~ v && /:/ =~ v + self.host = v + end + + ## + # Returns the top-level domain for this host. + # + # @example + # Addressable::URI.parse("http://www.example.co.uk").tld # => "co.uk" + def tld + PublicSuffix.parse(self.host, ignore_private: true).tld + end + + ## + # Sets the top-level domain for this URI. + # + # @param [String, #to_str] new_tld The new top-level domain. + def tld=(new_tld) + replaced_tld = host.sub(/#{tld}\z/, new_tld) + self.host = PublicSuffix::Domain.new(replaced_tld).to_s + end + + ## + # Returns the public suffix domain for this host. + # + # @example + # Addressable::URI.parse("http://www.example.co.uk").domain # => "example.co.uk" + def domain + PublicSuffix.domain(self.host, ignore_private: true) + end + + ## + # The authority component for this URI. + # Combines the user, password, host, and port components. + # + # @return [String] The authority component. + def authority + self.host && @authority ||= begin + authority = String.new + if self.userinfo != nil + authority << "#{self.userinfo}@" + end + authority << self.host + if self.port != nil + authority << ":#{self.port}" + end + authority + end + end + + ## + # The authority component for this URI, normalized. + # + # @return [String] The authority component, normalized. + def normalized_authority + return nil unless self.authority + @normalized_authority ||= begin + authority = String.new + if self.normalized_userinfo != nil + authority << "#{self.normalized_userinfo}@" + end + authority << self.normalized_host + if self.normalized_port != nil + authority << ":#{self.normalized_port}" + end + authority + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_authority) + @normalized_authority + end + + ## + # Sets the authority component for this URI. + # + # @param [String, #to_str] new_authority The new authority component. + def authority=(new_authority) + if new_authority + if !new_authority.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_authority.class} into String." + end + new_authority = new_authority.to_str + new_userinfo = new_authority[/^([^\[\]]*)@/, 1] + if new_userinfo + new_user = new_userinfo.strip[/^([^:]*):?/, 1] + new_password = new_userinfo.strip[/:(.*)$/, 1] + end + new_host = new_authority.sub( + /^([^\[\]]*)@/, EMPTY_STR + ).sub( + /:([^:@\[\]]*?)$/, EMPTY_STR + ) + new_port = + new_authority[/:([^:@\[\]]*?)$/, 1] + end + + # Password assigned first to ensure validity in case of nil + self.password = new_password + self.user = new_user + self.host = new_host + self.port = new_port + + # Reset dependent values + @userinfo = nil + @normalized_userinfo = NONE + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The origin for this URI, serialized to ASCII, as per + # RFC 6454, section 6.2. + # + # @return [String] The serialized origin. + def origin + if self.scheme && self.authority + if self.normalized_port + "#{self.normalized_scheme}://#{self.normalized_host}" + + ":#{self.normalized_port}" + else + "#{self.normalized_scheme}://#{self.normalized_host}" + end + else + "null" + end + end + + ## + # Sets the origin for this URI, serialized to ASCII, as per + # RFC 6454, section 6.2. This assignment will reset the `userinfo` + # component. + # + # @param [String, #to_str] new_origin The new origin component. + def origin=(new_origin) + if new_origin + if !new_origin.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_origin.class} into String." + end + new_origin = new_origin.to_str + new_scheme = new_origin[/^([^:\/?#]+):\/\//, 1] + unless new_scheme + raise InvalidURIError, 'An origin cannot omit the scheme.' + end + new_host = new_origin[/:\/\/([^\/?#:]+)/, 1] + unless new_host + raise InvalidURIError, 'An origin cannot omit the host.' + end + new_port = new_origin[/:([^:@\[\]\/]*?)$/, 1] + end + + self.scheme = new_scheme + self.host = new_host + self.port = new_port + self.userinfo = nil + + # Reset dependent values + @userinfo = nil + @normalized_userinfo = NONE + @authority = nil + @normalized_authority = nil + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + # Returns an array of known ip-based schemes. These schemes typically + # use a similar URI form: + # //:@:/ + def self.ip_based_schemes + return self.port_mapping.keys + end + + # Returns a hash of common IP-based schemes and their default port + # numbers. Adding new schemes to this hash, as necessary, will allow + # for better URI normalization. + def self.port_mapping + PORT_MAPPING + end + + ## + # The port component for this URI. + # This is the port number actually given in the URI. This does not + # infer port numbers from default values. + # + # @return [Integer] The port component. + attr_reader :port + + ## + # The port component for this URI, normalized. + # + # @return [Integer] The port component, normalized. + def normalized_port + return nil unless self.port + return @normalized_port unless @normalized_port == NONE + @normalized_port = begin + if URI.port_mapping[self.normalized_scheme] == self.port + nil + else + self.port + end + end + end + + ## + # Sets the port component for this URI. + # + # @param [String, Integer, #to_s] new_port The new port component. + def port=(new_port) + if new_port != nil && new_port.respond_to?(:to_str) + new_port = Addressable::URI.unencode_component(new_port.to_str) + end + + if new_port.respond_to?(:valid_encoding?) && !new_port.valid_encoding? + raise InvalidURIError, "Invalid encoding in port" + end + + if new_port != nil && !(new_port.to_s =~ /^\d+$/) + raise InvalidURIError, + "Invalid port number: #{new_port.inspect}" + end + + @port = new_port.to_s.to_i + @port = nil if @port == 0 + + # Reset dependent values + @authority = nil + @normalized_port = NONE + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The inferred port component for this URI. + # This method will normalize to the default port for the URI's scheme if + # the port isn't explicitly specified in the URI. + # + # @return [Integer] The inferred port component. + def inferred_port + if self.port.to_i == 0 + self.default_port + else + self.port.to_i + end + end + + ## + # The default port for this URI's scheme. + # This method will always returns the default port for the URI's scheme + # regardless of the presence of an explicit port in the URI. + # + # @return [Integer] The default port. + def default_port + URI.port_mapping[self.scheme.strip.downcase] if self.scheme + end + + ## + # The combination of components that represent a site. + # Combines the scheme, user, password, host, and port components. + # Primarily useful for HTTP and HTTPS. + # + # For example, "http://example.com/path?query" would have a + # site value of "http://example.com". + # + # @return [String] The components that identify a site. + def site + (self.scheme || self.authority) && @site ||= begin + site_string = "".dup + site_string << "#{self.scheme}:" if self.scheme != nil + site_string << "//#{self.authority}" if self.authority != nil + site_string + end + end + + ## + # The normalized combination of components that represent a site. + # Combines the scheme, user, password, host, and port components. + # Primarily useful for HTTP and HTTPS. + # + # For example, "http://example.com/path?query" would have a + # site value of "http://example.com". + # + # @return [String] The normalized components that identify a site. + def normalized_site + return nil unless self.site + @normalized_site ||= begin + site_string = "".dup + if self.normalized_scheme != nil + site_string << "#{self.normalized_scheme}:" + end + if self.normalized_authority != nil + site_string << "//#{self.normalized_authority}" + end + site_string + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_site) + @normalized_site + end + + ## + # Sets the site value for this URI. + # + # @param [String, #to_str] new_site The new site value. + def site=(new_site) + if new_site + if !new_site.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_site.class} into String." + end + new_site = new_site.to_str + # These two regular expressions derived from the primary parsing + # expression + self.scheme = new_site[/^(?:([^:\/?#]+):)?(?:\/\/(?:[^\/?#]*))?$/, 1] + self.authority = new_site[ + /^(?:(?:[^:\/?#]+):)?(?:\/\/([^\/?#]*))?$/, 1 + ] + else + self.scheme = nil + self.authority = nil + end + end + + ## + # The path component for this URI. + # + # @return [String] The path component. + attr_reader :path + + NORMPATH = /^(?!\/)[^\/:]*:.*$/ + ## + # The path component for this URI, normalized. + # + # @return [String] The path component, normalized. + def normalized_path + @normalized_path ||= begin + path = self.path.to_s + if self.scheme == nil && path =~ NORMPATH + # Relative paths with colons in the first segment are ambiguous. + path = path.sub(":", "%2F") + end + # String#split(delimeter, -1) uses the more strict splitting behavior + # found by default in Python. + result = path.strip.split(SLASH, -1).map do |segment| + Addressable::URI.normalize_component( + segment, + Addressable::URI::NormalizeCharacterClasses::PCHAR + ) + end.join(SLASH) + + result = URI.normalize_path(result) + if result.empty? && + ["http", "https", "ftp", "tftp"].include?(self.normalized_scheme) + result = SLASH.dup + end + result + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_path) + @normalized_path + end + + ## + # Sets the path component for this URI. + # + # @param [String, #to_str] new_path The new path component. + def path=(new_path) + if new_path && !new_path.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_path.class} into String." + end + @path = (new_path || EMPTY_STR).to_str + if !@path.empty? && @path[0..0] != SLASH && host != nil + @path = "/#{@path}" + end + + # Reset dependent values + @normalized_path = nil + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # The basename, if any, of the file in the path component. + # + # @return [String] The path's basename. + def basename + # Path cannot be nil + return File.basename(self.path).sub(/;[^\/]*$/, EMPTY_STR) + end + + ## + # The extname, if any, of the file in the path component. + # Empty string if there is no extension. + # + # @return [String] The path's extname. + def extname + return nil unless self.path + return File.extname(self.basename) + end + + ## + # The query component for this URI. + # + # @return [String] The query component. + attr_reader :query + + ## + # The query component for this URI, normalized. + # + # @return [String] The query component, normalized. + def normalized_query(*flags) + return nil unless self.query + return @normalized_query unless @normalized_query == NONE + @normalized_query = begin + modified_query_class = Addressable::URI::CharacterClasses::QUERY.dup + # Make sure possible key-value pair delimiters are escaped. + modified_query_class.sub!("\\&", "").sub!("\\;", "") + pairs = (query || "").split("&", -1) + pairs.delete_if(&:empty?).uniq! if flags.include?(:compacted) + pairs.sort! if flags.include?(:sorted) + component = pairs.map do |pair| + Addressable::URI.normalize_component( + pair, + Addressable::URI::NormalizeCharacterClasses::QUERY, + "+" + ) + end.join("&") + component == "" ? nil : component + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_query) + @normalized_query + end + + ## + # Sets the query component for this URI. + # + # @param [String, #to_str] new_query The new query component. + def query=(new_query) + if new_query && !new_query.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_query.class} into String." + end + @query = new_query ? new_query.to_str : nil + + # Reset dependent values + @normalized_query = NONE + remove_composite_values + end + + ## + # Converts the query component to a Hash value. + # + # @param [Class] return_type The return type desired. Value must be either + # `Hash` or `Array`. + # + # @return [Hash, Array, nil] The query string parsed as a Hash or Array + # or nil if the query string is blank. + # + # @example + # Addressable::URI.parse("?one=1&two=2&three=3").query_values + # #=> {"one" => "1", "two" => "2", "three" => "3"} + # Addressable::URI.parse("?one=two&one=three").query_values(Array) + # #=> [["one", "two"], ["one", "three"]] + # Addressable::URI.parse("?one=two&one=three").query_values(Hash) + # #=> {"one" => "three"} + # Addressable::URI.parse("?").query_values + # #=> {} + # Addressable::URI.parse("").query_values + # #=> nil + def query_values(return_type=Hash) + empty_accumulator = Array == return_type ? [] : {} + if return_type != Hash && return_type != Array + raise ArgumentError, "Invalid return type. Must be Hash or Array." + end + return nil if self.query == nil + split_query = self.query.split("&").map do |pair| + pair.split("=", 2) if pair && !pair.empty? + end.compact + return split_query.inject(empty_accumulator.dup) do |accu, pair| + # I'd rather use key/value identifiers instead of array lookups, + # but in this case I really want to maintain the exact pair structure, + # so it's best to make all changes in-place. + pair[0] = URI.unencode_component(pair[0]) + if pair[1].respond_to?(:to_str) + value = pair[1].to_str + # I loathe the fact that I have to do this. Stupid HTML 4.01. + # Treating '+' as a space was just an unbelievably bad idea. + # There was nothing wrong with '%20'! + # If it ain't broke, don't fix it! + value = value.tr("+", " ") if ["http", "https", nil].include?(scheme) + pair[1] = URI.unencode_component(value) + end + if return_type == Hash + accu[pair[0]] = pair[1] + else + accu << pair + end + accu + end + end + + ## + # Sets the query component for this URI from a Hash object. + # An empty Hash or Array will result in an empty query string. + # + # @param [Hash, #to_hash, Array] new_query_values The new query values. + # + # @example + # uri.query_values = {:a => "a", :b => ["c", "d", "e"]} + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['a', 'a'], ['b', 'c'], ['b', 'd'], ['b', 'e']] + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['a', 'a'], ['b', ['c', 'd', 'e']]] + # uri.query + # # => "a=a&b=c&b=d&b=e" + # uri.query_values = [['flag'], ['key', 'value']] + # uri.query + # # => "flag&key=value" + def query_values=(new_query_values) + if new_query_values == nil + self.query = nil + return nil + end + + if !new_query_values.is_a?(Array) + if !new_query_values.respond_to?(:to_hash) + raise TypeError, + "Can't convert #{new_query_values.class} into Hash." + end + new_query_values = new_query_values.to_hash + new_query_values = new_query_values.map do |key, value| + key = key.to_s if key.kind_of?(Symbol) + [key, value] + end + # Useful default for OAuth and caching. + # Only to be used for non-Array inputs. Arrays should preserve order. + new_query_values.sort! + end + + # new_query_values have form [['key1', 'value1'], ['key2', 'value2']] + buffer = "".dup + new_query_values.each do |key, value| + encoded_key = URI.encode_component( + key, CharacterClasses::UNRESERVED + ) + if value == nil + buffer << "#{encoded_key}&" + elsif value.kind_of?(Array) + value.each do |sub_value| + encoded_value = URI.encode_component( + sub_value, CharacterClasses::UNRESERVED + ) + buffer << "#{encoded_key}=#{encoded_value}&" + end + else + encoded_value = URI.encode_component( + value, CharacterClasses::UNRESERVED + ) + buffer << "#{encoded_key}=#{encoded_value}&" + end + end + self.query = buffer.chop + end + + ## + # The HTTP request URI for this URI. This is the path and the + # query string. + # + # @return [String] The request URI required for an HTTP request. + def request_uri + return nil if self.absolute? && self.scheme !~ /^https?$/i + return ( + (!self.path.empty? ? self.path : SLASH) + + (self.query ? "?#{self.query}" : EMPTY_STR) + ) + end + + ## + # Sets the HTTP request URI for this URI. + # + # @param [String, #to_str] new_request_uri The new HTTP request URI. + def request_uri=(new_request_uri) + if !new_request_uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_request_uri.class} into String." + end + if self.absolute? && self.scheme !~ /^https?$/i + raise InvalidURIError, + "Cannot set an HTTP request URI for a non-HTTP URI." + end + new_request_uri = new_request_uri.to_str + path_component = new_request_uri[/^([^\?]*)\??(?:.*)$/, 1] + query_component = new_request_uri[/^(?:[^\?]*)\?(.*)$/, 1] + path_component = path_component.to_s + path_component = (!path_component.empty? ? path_component : SLASH) + self.path = path_component + self.query = query_component + + # Reset dependent values + remove_composite_values + end + + ## + # The fragment component for this URI. + # + # @return [String] The fragment component. + attr_reader :fragment + + ## + # The fragment component for this URI, normalized. + # + # @return [String] The fragment component, normalized. + def normalized_fragment + return nil unless self.fragment + return @normalized_fragment unless @normalized_fragment == NONE + @normalized_fragment = begin + component = Addressable::URI.normalize_component( + self.fragment, + Addressable::URI::NormalizeCharacterClasses::FRAGMENT + ) + component == "" ? nil : component + end + # All normalized values should be UTF-8 + force_utf8_encoding_if_needed(@normalized_fragment) + @normalized_fragment + end + + ## + # Sets the fragment component for this URI. + # + # @param [String, #to_str] new_fragment The new fragment component. + def fragment=(new_fragment) + if new_fragment && !new_fragment.respond_to?(:to_str) + raise TypeError, "Can't convert #{new_fragment.class} into String." + end + @fragment = new_fragment ? new_fragment.to_str : nil + + # Reset dependent values + @normalized_fragment = NONE + remove_composite_values + + # Ensure we haven't created an invalid URI + validate() + end + + ## + # Determines if the scheme indicates an IP-based protocol. + # + # @return [TrueClass, FalseClass] + # true if the scheme indicates an IP-based protocol. + # false otherwise. + def ip_based? + if self.scheme + return URI.ip_based_schemes.include?( + self.scheme.strip.downcase) + end + return false + end + + ## + # Determines if the URI is relative. + # + # @return [TrueClass, FalseClass] + # true if the URI is relative. false + # otherwise. + def relative? + return self.scheme.nil? + end + + ## + # Determines if the URI is absolute. + # + # @return [TrueClass, FalseClass] + # true if the URI is absolute. false + # otherwise. + def absolute? + return !relative? + end + + ## + # Joins two URIs together. + # + # @param [String, Addressable::URI, #to_str] The URI to join with. + # + # @return [Addressable::URI] The joined URI. + def join(uri) + if !uri.respond_to?(:to_str) + raise TypeError, "Can't convert #{uri.class} into String." + end + if !uri.kind_of?(URI) + # Otherwise, convert to a String, then parse. + uri = URI.parse(uri.to_str) + end + if uri.to_s.empty? + return self.dup + end + + joined_scheme = nil + joined_user = nil + joined_password = nil + joined_host = nil + joined_port = nil + joined_path = nil + joined_query = nil + joined_fragment = nil + + # Section 5.2.2 of RFC 3986 + if uri.scheme != nil + joined_scheme = uri.scheme + joined_user = uri.user + joined_password = uri.password + joined_host = uri.host + joined_port = uri.port + joined_path = URI.normalize_path(uri.path) + joined_query = uri.query + else + if uri.authority != nil + joined_user = uri.user + joined_password = uri.password + joined_host = uri.host + joined_port = uri.port + joined_path = URI.normalize_path(uri.path) + joined_query = uri.query + else + if uri.path == nil || uri.path.empty? + joined_path = self.path + if uri.query != nil + joined_query = uri.query + else + joined_query = self.query + end + else + if uri.path[0..0] == SLASH + joined_path = URI.normalize_path(uri.path) + else + base_path = self.path.dup + base_path = EMPTY_STR if base_path == nil + base_path = URI.normalize_path(base_path) + + # Section 5.2.3 of RFC 3986 + # + # Removes the right-most path segment from the base path. + if base_path.include?(SLASH) + base_path.sub!(/\/[^\/]+$/, SLASH) + else + base_path = EMPTY_STR + end + + # If the base path is empty and an authority segment has been + # defined, use a base path of SLASH + if base_path.empty? && self.authority != nil + base_path = SLASH + end + + joined_path = URI.normalize_path(base_path + uri.path) + end + joined_query = uri.query + end + joined_user = self.user + joined_password = self.password + joined_host = self.host + joined_port = self.port + end + joined_scheme = self.scheme + end + joined_fragment = uri.fragment + + return self.class.new( + :scheme => joined_scheme, + :user => joined_user, + :password => joined_password, + :host => joined_host, + :port => joined_port, + :path => joined_path, + :query => joined_query, + :fragment => joined_fragment + ) + end + alias_method :+, :join + + ## + # Destructive form of join. + # + # @param [String, Addressable::URI, #to_str] The URI to join with. + # + # @return [Addressable::URI] The joined URI. + # + # @see Addressable::URI#join + def join!(uri) + replace_self(self.join(uri)) + end + + ## + # Merges a URI with a Hash of components. + # This method has different behavior from join. Any + # components present in the hash parameter will override the + # original components. The path component is not treated specially. + # + # @param [Hash, Addressable::URI, #to_hash] The components to merge with. + # + # @return [Addressable::URI] The merged URI. + # + # @see Hash#merge + def merge(hash) + unless hash.respond_to?(:to_hash) + raise TypeError, "Can't convert #{hash.class} into Hash." + end + hash = hash.to_hash + + if hash.has_key?(:authority) + if (hash.keys & [:userinfo, :user, :password, :host, :port]).any? + raise ArgumentError, + "Cannot specify both an authority and any of the components " + + "within the authority." + end + end + if hash.has_key?(:userinfo) + if (hash.keys & [:user, :password]).any? + raise ArgumentError, + "Cannot specify both a userinfo and either the user or password." + end + end + + uri = self.class.new + uri.defer_validation do + # Bunch of crazy logic required because of the composite components + # like userinfo and authority. + uri.scheme = + hash.has_key?(:scheme) ? hash[:scheme] : self.scheme + if hash.has_key?(:authority) + uri.authority = + hash.has_key?(:authority) ? hash[:authority] : self.authority + end + if hash.has_key?(:userinfo) + uri.userinfo = + hash.has_key?(:userinfo) ? hash[:userinfo] : self.userinfo + end + if !hash.has_key?(:userinfo) && !hash.has_key?(:authority) + uri.user = + hash.has_key?(:user) ? hash[:user] : self.user + uri.password = + hash.has_key?(:password) ? hash[:password] : self.password + end + if !hash.has_key?(:authority) + uri.host = + hash.has_key?(:host) ? hash[:host] : self.host + uri.port = + hash.has_key?(:port) ? hash[:port] : self.port + end + uri.path = + hash.has_key?(:path) ? hash[:path] : self.path + uri.query = + hash.has_key?(:query) ? hash[:query] : self.query + uri.fragment = + hash.has_key?(:fragment) ? hash[:fragment] : self.fragment + end + + return uri + end + + ## + # Destructive form of merge. + # + # @param [Hash, Addressable::URI, #to_hash] The components to merge with. + # + # @return [Addressable::URI] The merged URI. + # + # @see Addressable::URI#merge + def merge!(uri) + replace_self(self.merge(uri)) + end + + ## + # Returns the shortest normalized relative form of this URI that uses the + # supplied URI as a base for resolution. Returns an absolute URI if + # necessary. This is effectively the opposite of route_to. + # + # @param [String, Addressable::URI, #to_str] uri The URI to route from. + # + # @return [Addressable::URI] + # The normalized relative URI that is equivalent to the original URI. + def route_from(uri) + uri = URI.parse(uri).normalize + normalized_self = self.normalize + if normalized_self.relative? + raise ArgumentError, "Expected absolute URI, got: #{self.to_s}" + end + if uri.relative? + raise ArgumentError, "Expected absolute URI, got: #{uri.to_s}" + end + if normalized_self == uri + return Addressable::URI.parse("##{normalized_self.fragment}") + end + components = normalized_self.to_hash + if normalized_self.scheme == uri.scheme + components[:scheme] = nil + if normalized_self.authority == uri.authority + components[:user] = nil + components[:password] = nil + components[:host] = nil + components[:port] = nil + if normalized_self.path == uri.path + components[:path] = nil + if normalized_self.query == uri.query + components[:query] = nil + end + else + if uri.path != SLASH and components[:path] + self_splitted_path = split_path(components[:path]) + uri_splitted_path = split_path(uri.path) + self_dir = self_splitted_path.shift + uri_dir = uri_splitted_path.shift + while !self_splitted_path.empty? && !uri_splitted_path.empty? and self_dir == uri_dir + self_dir = self_splitted_path.shift + uri_dir = uri_splitted_path.shift + end + components[:path] = (uri_splitted_path.fill('..') + [self_dir] + self_splitted_path).join(SLASH) + end + end + end + end + # Avoid network-path references. + if components[:host] != nil + components[:scheme] = normalized_self.scheme + end + return Addressable::URI.new( + :scheme => components[:scheme], + :user => components[:user], + :password => components[:password], + :host => components[:host], + :port => components[:port], + :path => components[:path], + :query => components[:query], + :fragment => components[:fragment] + ) + end + + ## + # Returns the shortest normalized relative form of the supplied URI that + # uses this URI as a base for resolution. Returns an absolute URI if + # necessary. This is effectively the opposite of route_from. + # + # @param [String, Addressable::URI, #to_str] uri The URI to route to. + # + # @return [Addressable::URI] + # The normalized relative URI that is equivalent to the supplied URI. + def route_to(uri) + return URI.parse(uri).route_from(self) + end + + ## + # Returns a normalized URI object. + # + # NOTE: This method does not attempt to fully conform to specifications. + # It exists largely to correct other people's failures to read the + # specifications, and also to deal with caching issues since several + # different URIs may represent the same resource and should not be + # cached multiple times. + # + # @return [Addressable::URI] The normalized URI. + def normalize + # This is a special exception for the frequently misused feed + # URI scheme. + if normalized_scheme == "feed" + if self.to_s =~ /^feed:\/*http:\/*/ + return URI.parse( + self.to_s[/^feed:\/*(http:\/*.*)/, 1] + ).normalize + end + end + + return self.class.new( + :scheme => normalized_scheme, + :authority => normalized_authority, + :path => normalized_path, + :query => normalized_query, + :fragment => normalized_fragment + ) + end + + ## + # Destructively normalizes this URI object. + # + # @return [Addressable::URI] The normalized URI. + # + # @see Addressable::URI#normalize + def normalize! + replace_self(self.normalize) + end + + ## + # Creates a URI suitable for display to users. If semantic attacks are + # likely, the application should try to detect these and warn the user. + # See RFC 3986, + # section 7.6 for more information. + # + # @return [Addressable::URI] A URI suitable for display purposes. + def display_uri + display_uri = self.normalize + display_uri.host = ::Addressable::IDNA.to_unicode(display_uri.host) + return display_uri + end + + ## + # Returns true if the URI objects are equal. This method + # normalizes both URIs before doing the comparison, and allows comparison + # against Strings. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def ===(uri) + if uri.respond_to?(:normalize) + uri_string = uri.normalize.to_s + else + begin + uri_string = ::Addressable::URI.parse(uri).normalize.to_s + rescue InvalidURIError, TypeError + return false + end + end + return self.normalize.to_s == uri_string + end + + ## + # Returns true if the URI objects are equal. This method + # normalizes both URIs before doing the comparison. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def ==(uri) + return false unless uri.kind_of?(URI) + return self.normalize.to_s == uri.normalize.to_s + end + + ## + # Returns true if the URI objects are equal. This method + # does NOT normalize either URI before doing the comparison. + # + # @param [Object] uri The URI to compare. + # + # @return [TrueClass, FalseClass] + # true if the URIs are equivalent, false + # otherwise. + def eql?(uri) + return false unless uri.kind_of?(URI) + return self.to_s == uri.to_s + end + + ## + # A hash value that will make a URI equivalent to its normalized + # form. + # + # @return [Integer] A hash of the URI. + def hash + @hash ||= self.to_s.hash * -1 + end + + ## + # Clones the URI object. + # + # @return [Addressable::URI] The cloned URI. + def dup + duplicated_uri = self.class.new( + :scheme => self.scheme ? self.scheme.dup : nil, + :user => self.user ? self.user.dup : nil, + :password => self.password ? self.password.dup : nil, + :host => self.host ? self.host.dup : nil, + :port => self.port, + :path => self.path ? self.path.dup : nil, + :query => self.query ? self.query.dup : nil, + :fragment => self.fragment ? self.fragment.dup : nil + ) + return duplicated_uri + end + + ## + # Omits components from a URI. + # + # @param [Symbol] *components The components to be omitted. + # + # @return [Addressable::URI] The URI with components omitted. + # + # @example + # uri = Addressable::URI.parse("http://example.com/path?query") + # #=> # + # uri.omit(:scheme, :authority) + # #=> # + def omit(*components) + invalid_components = components - [ + :scheme, :user, :password, :userinfo, :host, :port, :authority, + :path, :query, :fragment + ] + unless invalid_components.empty? + raise ArgumentError, + "Invalid component names: #{invalid_components.inspect}." + end + duplicated_uri = self.dup + duplicated_uri.defer_validation do + components.each do |component| + duplicated_uri.send((component.to_s + "=").to_sym, nil) + end + duplicated_uri.user = duplicated_uri.normalized_user + end + duplicated_uri + end + + ## + # Destructive form of omit. + # + # @param [Symbol] *components The components to be omitted. + # + # @return [Addressable::URI] The URI with components omitted. + # + # @see Addressable::URI#omit + def omit!(*components) + replace_self(self.omit(*components)) + end + + ## + # Determines if the URI is an empty string. + # + # @return [TrueClass, FalseClass] + # Returns true if empty, false otherwise. + def empty? + return self.to_s.empty? + end + + ## + # Converts the URI to a String. + # + # @return [String] The URI's String representation. + def to_s + if self.scheme == nil && self.path != nil && !self.path.empty? && + self.path =~ NORMPATH + raise InvalidURIError, + "Cannot assemble URI string with ambiguous path: '#{self.path}'" + end + @uri_string ||= begin + uri_string = String.new + uri_string << "#{self.scheme}:" if self.scheme != nil + uri_string << "//#{self.authority}" if self.authority != nil + uri_string << self.path.to_s + uri_string << "?#{self.query}" if self.query != nil + uri_string << "##{self.fragment}" if self.fragment != nil + uri_string.force_encoding(Encoding::UTF_8) + uri_string + end + end + + ## + # URI's are glorified Strings. Allow implicit conversion. + alias_method :to_str, :to_s + + ## + # Returns a Hash of the URI components. + # + # @return [Hash] The URI as a Hash of components. + def to_hash + return { + :scheme => self.scheme, + :user => self.user, + :password => self.password, + :host => self.host, + :port => self.port, + :path => self.path, + :query => self.query, + :fragment => self.fragment + } + end + + ## + # Returns a String representation of the URI object's state. + # + # @return [String] The URI object's state, as a String. + def inspect + sprintf("#<%s:%#0x URI:%s>", URI.to_s, self.object_id, self.to_s) + end + + ## + # This method allows you to make several changes to a URI simultaneously, + # which separately would cause validation errors, but in conjunction, + # are valid. The URI will be revalidated as soon as the entire block has + # been executed. + # + # @param [Proc] block + # A set of operations to perform on a given URI. + def defer_validation + raise LocalJumpError, "No block given." unless block_given? + @validation_deferred = true + yield + @validation_deferred = false + validate + ensure + @validation_deferred = false + end + + protected + SELF_REF = '.' + PARENT = '..' + + RULE_2A = /\/\.\/|\/\.$/ + RULE_2B_2C = /\/([^\/]*)\/\.\.\/|\/([^\/]*)\/\.\.$/ + RULE_2D = /^\.\.?\/?/ + RULE_PREFIXED_PARENT = /^\/\.\.?\/|^(\/\.\.?)+\/?$/ + + ## + # Resolves paths to their simplest form. + # + # @param [String] path The path to normalize. + # + # @return [String] The normalized path. + def self.normalize_path(path) + # Section 5.2.4 of RFC 3986 + + return if path.nil? + normalized_path = path.dup + loop do + mod ||= normalized_path.gsub!(RULE_2A, SLASH) + + pair = normalized_path.match(RULE_2B_2C) + if pair + parent = pair[1] + current = pair[2] + else + parent = nil + current = nil + end + + regexp = "/#{Regexp.escape(parent.to_s)}/\\.\\./|" + regexp += "(/#{Regexp.escape(current.to_s)}/\\.\\.$)" + + if pair && ((parent != SELF_REF && parent != PARENT) || + (current != SELF_REF && current != PARENT)) + mod ||= normalized_path.gsub!(Regexp.new(regexp), SLASH) + end + + mod ||= normalized_path.gsub!(RULE_2D, EMPTY_STR) + # Non-standard, removes prefixed dotted segments from path. + mod ||= normalized_path.gsub!(RULE_PREFIXED_PARENT, SLASH) + break if mod.nil? + end + + normalized_path + end + + ## + # Ensures that the URI is valid. + def validate + return if !!@validation_deferred + if self.scheme != nil && self.ip_based? && + (self.host == nil || self.host.empty?) && + (self.path == nil || self.path.empty?) + raise InvalidURIError, + "Absolute URI missing hierarchical segment: '#{self.to_s}'" + end + if self.host == nil + if self.port != nil || + self.user != nil || + self.password != nil + raise InvalidURIError, "Hostname not supplied: '#{self.to_s}'" + end + end + if self.path != nil && !self.path.empty? && self.path[0..0] != SLASH && + self.authority != nil + raise InvalidURIError, + "Cannot have a relative path with an authority set: '#{self.to_s}'" + end + if self.path != nil && !self.path.empty? && + self.path[0..1] == SLASH + SLASH && self.authority == nil + raise InvalidURIError, + "Cannot have a path with two leading slashes " + + "without an authority set: '#{self.to_s}'" + end + unreserved = CharacterClasses::UNRESERVED + sub_delims = CharacterClasses::SUB_DELIMS + if !self.host.nil? && (self.host =~ /[<>{}\/\\\?\#\@"[[:space:]]]/ || + (self.host[/^\[(.*)\]$/, 1] != nil && self.host[/^\[(.*)\]$/, 1] !~ + Regexp.new("^[#{unreserved}#{sub_delims}:]*$"))) + raise InvalidURIError, "Invalid character in host: '#{self.host.to_s}'" + end + return nil + end + + ## + # Replaces the internal state of self with the specified URI's state. + # Used in destructive operations to avoid massive code repetition. + # + # @param [Addressable::URI] uri The URI to replace self with. + # + # @return [Addressable::URI] self. + def replace_self(uri) + # Reset dependent values + reset_ivs + + @scheme = uri.scheme + @user = uri.user + @password = uri.password + @host = uri.host + @port = uri.port + @path = uri.path + @query = uri.query + @fragment = uri.fragment + return self + end + + ## + # Splits path string with "/" (slash). + # It is considered that there is empty string after last slash when + # path ends with slash. + # + # @param [String] path The path to split. + # + # @return [Array] An array of parts of path. + def split_path(path) + splitted = path.split(SLASH) + splitted << EMPTY_STR if path.end_with? SLASH + splitted + end + + ## + # Resets composite values for the entire URI + # + # @api private + def remove_composite_values + @uri_string = nil + @hash = nil + end + + ## + # Converts the string to be UTF-8 if it is not already UTF-8 + # + # @api private + def force_utf8_encoding_if_needed(str) + if str && str.encoding != Encoding::UTF_8 + str.force_encoding(Encoding::UTF_8) + end + end + + private + + ## + # Resets instance variables + # + # @api private + def reset_ivs + @scheme = nil + @user = nil + @normalized_scheme = NONE + @normalized_user = NONE + @uri_string = nil + @hash = nil + @userinfo = nil + @normalized_userinfo = NONE + @authority = nil + @password = nil + @normalized_authority = nil + @port = nil + @normalized_password = NONE + @host = nil + @normalized_host = nil + @normalized_port = NONE + @path = EMPTY_STR + @normalized_path = nil + @normalized_query = NONE + @fragment = nil + @normalized_fragment = NONE + @query = nil + end + + NONE = Object.new.freeze + + private_constant :NONE + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/version.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/version.rb new file mode 100644 index 0000000..6899157 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/lib/addressable/version.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +#-- +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#++ + + +# Used to prevent the class/module from being loaded more than once +if !defined?(Addressable::VERSION) + module Addressable + module VERSION + MAJOR = 2 + MINOR = 8 + TINY = 4 + + STRING = [MAJOR, MINOR, TINY].join('.') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/idna_spec.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/idna_spec.rb new file mode 100644 index 0000000..428c9ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/idna_spec.rb @@ -0,0 +1,302 @@ +# frozen_string_literal: true + +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +# Have to use RubyGems to load the idn gem. +require "rubygems" + +require "addressable/idna" + +shared_examples_for "converting from unicode to ASCII" do + it "should convert 'www.google.com' correctly" do + expect(Addressable::IDNA.to_ascii("www.google.com")).to eq("www.google.com") + end + + long = 'AcinusFallumTrompetumNullunCreditumVisumEstAtCuadLongumEtCefallum.com' + it "should convert '#{long}' correctly" do + expect(Addressable::IDNA.to_ascii(long)).to eq(long) + end + + it "should convert 'www.čŠšå§†æ–¯.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "www.čŠšå§†æ–¯.com" + )).to eq("www.xn--8ws00zhy3a.com") + end + + it "also accepts unicode strings encoded as ascii-8bit" do + expect(Addressable::IDNA.to_ascii( + "www.čŠšå§†æ–¯.com".b + )).to eq("www.xn--8ws00zhy3a.com") + end + + it "should convert 'www.IÃątÃĢrnÃĸtiônàlizÃĻtiøn.com' correctly" do + "www.IÃątÃĢrnÃĸtiônàlizÃĻtiøn.com" + expect(Addressable::IDNA.to_ascii( + "www.I\xC3\xB1t\xC3\xABrn\xC3\xA2ti\xC3\xB4" + + "n\xC3\xA0liz\xC3\xA6ti\xC3\xB8n.com" + )).to eq("www.xn--itrntinliztin-vdb0a5exd8ewcye.com") + end + + it "should convert 'www.IÃątÃĢrnÃĸtiônàlizÃĻtiøn.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "www.In\xCC\x83te\xCC\x88rna\xCC\x82tio\xCC\x82n" + + "a\xCC\x80liz\xC3\xA6ti\xC3\xB8n.com" + )).to eq("www.xn--itrntinliztin-vdb0a5exd8ewcye.com") + end + + it "should convert " + + "'www.ãģんとうãĢãĒがいわけぎわからãĒいおめいんめいぎらずるぞだãĒがくしãĒいとたりãĒい.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_ascii( + "www.\343\201\273\343\202\223\343\201\250\343\201\206\343\201\253\343" + + "\201\252\343\201\214\343\201\204\343\202\217\343\201\221\343\201\256" + + "\343\202\217\343\201\213\343\202\211\343\201\252\343\201\204\343\201" + + "\251\343\202\201\343\201\204\343\202\223\343\202\201\343\201\204\343" + + "\201\256\343\202\211\343\201\271\343\202\213\343\201\276\343\201\240" + + "\343\201\252\343\201\214\343\201\217\343\201\227\343\201\252\343\201" + + "\204\343\201\250\343\201\237\343\202\212\343\201\252\343\201\204." + + "w3.mag.keio.ac.jp" + )).to eq( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end + + it "should convert " + + "'www.ãģんとうãĢãĒがいわけぎわからãĒいおめいんめいぎらずるぞだãĒがくしãĒいとたりãĒい.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_ascii( + "www.\343\201\273\343\202\223\343\201\250\343\201\206\343\201\253\343" + + "\201\252\343\201\213\343\202\231\343\201\204\343\202\217\343\201\221" + + "\343\201\256\343\202\217\343\201\213\343\202\211\343\201\252\343\201" + + "\204\343\201\250\343\202\231\343\202\201\343\201\204\343\202\223\343" + + "\202\201\343\201\204\343\201\256\343\202\211\343\201\270\343\202\231" + + "\343\202\213\343\201\276\343\201\237\343\202\231\343\201\252\343\201" + + "\213\343\202\231\343\201\217\343\201\227\343\201\252\343\201\204\343" + + "\201\250\343\201\237\343\202\212\343\201\252\343\201\204." + + "w3.mag.keio.ac.jp" + )).to eq( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end + + it "should convert 'į‚šåŋƒå’Œįƒ¤é¸­.w3.mag.keio.ac.jp' correctly" do + expect(Addressable::IDNA.to_ascii( + "į‚šåŋƒå’Œįƒ¤é¸­.w3.mag.keio.ac.jp" + )).to eq("xn--0trv4xfvn8el34t.w3.mag.keio.ac.jp") + end + + it "should convert '가각갂갃간갅갆갇갈갉ížĸížŖ.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "가각갂갃간갅갆갇갈갉ížĸížŖ.com" + )).to eq("xn--o39acdefghijk5883jma.com") + end + + it "should convert " + + "'\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com" + )).to eq("xn--9cs565brid46mda086o.com") + end + + it "should convert 'īž˜åŽ íąã€š.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\357\276\230\345\256\240\355\220\261\343\200\271.com" + )).to eq("xn--eek174hoxfpr4k.com") + end + + it "should convert 'ãƒĒåŽ íąå„.com' correctly" do + expect(Addressable::IDNA.to_ascii( + "\343\203\252\345\256\240\355\220\261\345\215\204.com" + )).to eq("xn--eek174hoxfpr4k.com") + end + + it "should convert 'á†ĩ' correctly" do + expect(Addressable::IDNA.to_ascii( + "\341\206\265" + )).to eq("xn--4ud") + end + + it "should convert 'īž¯' correctly" do + expect(Addressable::IDNA.to_ascii( + "\357\276\257" + )).to eq("xn--4ud") + end + + it "should convert '🌹🌹🌹.ws' correctly" do + expect(Addressable::IDNA.to_ascii( + "\360\237\214\271\360\237\214\271\360\237\214\271.ws" + )).to eq("xn--2h8haa.ws") + end + + it "should handle two adjacent '.'s correctly" do + expect(Addressable::IDNA.to_ascii( + "example..host" + )).to eq("example..host") + end +end + +shared_examples_for "converting from ASCII to unicode" do + long = 'AcinusFallumTrompetumNullunCreditumVisumEstAtCuadLongumEtCefallum.com' + it "should convert '#{long}' correctly" do + expect(Addressable::IDNA.to_unicode(long)).to eq(long) + end + + it "should return the identity conversion when punycode decode fails" do + expect(Addressable::IDNA.to_unicode("xn--zckp1cyg1.sblo.jp")).to eq( + "xn--zckp1cyg1.sblo.jp") + end + + it "should return the identity conversion when the ACE prefix has no suffix" do + expect(Addressable::IDNA.to_unicode("xn--...-")).to eq("xn--...-") + end + + it "should convert 'www.google.com' correctly" do + expect(Addressable::IDNA.to_unicode("www.google.com")).to eq( + "www.google.com") + end + + it "should convert 'www.čŠšå§†æ–¯.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--8ws00zhy3a.com" + )).to eq("www.čŠšå§†æ–¯.com") + end + + it "should convert 'čŠšå§†æ–¯.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--8ws00zhy3a.com" + )).to eq("čŠšå§†æ–¯.com") + end + + it "should convert 'www.iÃątÃĢrnÃĸtiônàlizÃĻtiøn.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--itrntinliztin-vdb0a5exd8ewcye.com" + )).to eq("www.iÃątÃĢrnÃĸtiônàlizÃĻtiøn.com") + end + + it "should convert 'iÃątÃĢrnÃĸtiônàlizÃĻtiøn.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--itrntinliztin-vdb0a5exd8ewcye.com" + )).to eq("iÃątÃĢrnÃĸtiônàlizÃĻtiøn.com") + end + + it "should convert " + + "'www.ãģんとうãĢãĒがいわけぎわからãĒいおめいんめいぎらずるぞだãĒがくしãĒいとたりãĒい.w3.mag.keio.ac.jp' " + + "correctly" do + expect(Addressable::IDNA.to_unicode( + "www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3" + + "fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + )).to eq( + "www.ãģんとうãĢãĒがいわけぎわからãĒいおめいんめいぎらずるぞだãĒがくしãĒいとたりãĒい.w3.mag.keio.ac.jp" + ) + end + + it "should convert 'į‚šåŋƒå’Œįƒ¤é¸­.w3.mag.keio.ac.jp' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--0trv4xfvn8el34t.w3.mag.keio.ac.jp" + )).to eq("į‚šåŋƒå’Œįƒ¤é¸­.w3.mag.keio.ac.jp") + end + + it "should convert '가각갂갃간갅갆갇갈갉ížĸížŖ.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--o39acdefghijk5883jma.com" + )).to eq("가각갂갃간갅갆갇갈갉ížĸížŖ.com") + end + + it "should convert " + + "'\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--9cs565brid46mda086o.com" + )).to eq( + "\347\242\274\346\250\231\346\272\226\350" + + "\220\254\345\234\213\347\242\274.com" + ) + end + + it "should convert 'ãƒĒåŽ íąå„.com' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--eek174hoxfpr4k.com" + )).to eq("\343\203\252\345\256\240\355\220\261\345\215\204.com") + end + + it "should convert 'īž¯' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--4ud" + )).to eq("\341\206\265") + end + + it "should convert '🌹🌹🌹.ws' correctly" do + expect(Addressable::IDNA.to_unicode( + "xn--2h8haa.ws" + )).to eq("\360\237\214\271\360\237\214\271\360\237\214\271.ws") + end + + it "should handle two adjacent '.'s correctly" do + expect(Addressable::IDNA.to_unicode( + "example..host" + )).to eq("example..host") + end +end + +describe Addressable::IDNA, "when using the pure-Ruby implementation" do + before do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + + it_should_behave_like "converting from unicode to ASCII" + it_should_behave_like "converting from ASCII to unicode" + + begin + require "fiber" + + it "should not blow up inside fibers" do + f = Fiber.new do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + f.resume + end + rescue LoadError + # Fibers aren't supported in this version of Ruby, skip this test. + warn('Fibers unsupported.') + end +end + +begin + require "idn" + + describe Addressable::IDNA, "when using the native-code implementation" do + before do + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/native.rb" + end + + it_should_behave_like "converting from unicode to ASCII" + it_should_behave_like "converting from ASCII to unicode" + end +rescue LoadError => error + raise error if ENV["CI"] && TestHelper.native_supported? + + # Cannot test the native implementation without libidn support. + warn('Could not load native IDN implementation.') +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/net_http_compat_spec.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/net_http_compat_spec.rb new file mode 100644 index 0000000..d07a43e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/net_http_compat_spec.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" +require "net/http" + +describe Net::HTTP do + it "should be compatible with Addressable" do + response_body = + Net::HTTP.get(Addressable::URI.parse('http://www.google.com/')) + expect(response_body).not_to be_nil + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/security_spec.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/security_spec.rb new file mode 100644 index 0000000..3bf90a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/security_spec.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" + +describe Addressable::URI, "when created with a URI known to cause crashes " + + "in certain browsers" do + it "should parse correctly" do + uri = Addressable::URI.parse('%%30%30') + expect(uri.path).to eq('%%30%30') + expect(uri.normalize.path).to eq('%2500') + end + + it "should parse correctly as a full URI" do + uri = Addressable::URI.parse('http://www.example.com/%%30%30') + expect(uri.path).to eq('/%%30%30') + expect(uri.normalize.path).to eq('/%2500') + end +end + +describe Addressable::URI, "when created with a URI known to cause crashes " + + "in certain browsers" do + it "should parse correctly" do + uri = Addressable::URI.parse('Ų„ŲØĩŲ‘بŲŲ„ŲŲ„ØĩŲ‘بŲØąØąŲ‹ āĨŖ āĨŖh āĨŖ āĨŖ 冗') + expect(uri.path).to eq('Ų„ŲØĩŲ‘بŲŲ„ŲŲ„ØĩŲ‘بŲØąØąŲ‹ āĨŖ āĨŖh āĨŖ āĨŖ 冗') + expect(uri.normalize.path).to eq( + '%D9%84%D9%8F%D8%B5%D9%91%D8%A8%D9%8F%D9%84%D9%8F%D9%84%D8%B5%D9%91' + + '%D8%A8%D9%8F%D8%B1%D8%B1%D9%8B%20%E0%A5%A3%20%E0%A5%A3h%20%E0%A5' + + '%A3%20%E0%A5%A3%20%E5%86%97' + ) + end + + it "should parse correctly as a full URI" do + uri = Addressable::URI.parse('http://www.example.com/Ų„ŲØĩŲ‘بŲŲ„ŲŲ„ØĩŲ‘بŲØąØąŲ‹ āĨŖ āĨŖh āĨŖ āĨŖ 冗') + expect(uri.path).to eq('/Ų„ŲØĩŲ‘بŲŲ„ŲŲ„ØĩŲ‘بŲØąØąŲ‹ āĨŖ āĨŖh āĨŖ āĨŖ 冗') + expect(uri.normalize.path).to eq( + '/%D9%84%D9%8F%D8%B5%D9%91%D8%A8%D9%8F%D9%84%D9%8F%D9%84%D8%B5%D9%91' + + '%D8%A8%D9%8F%D8%B1%D8%B1%D9%8B%20%E0%A5%A3%20%E0%A5%A3h%20%E0%A5' + + '%A3%20%E0%A5%A3%20%E5%86%97' + ) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/template_spec.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/template_spec.rb new file mode 100644 index 0000000..24616c2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/template_spec.rb @@ -0,0 +1,1264 @@ +# frozen_string_literal: true + +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "bigdecimal" +require "timeout" +require "addressable/template" + +shared_examples_for 'expands' do |tests| + tests.each do |template, expansion| + exp = expansion.is_a?(Array) ? expansion.first : expansion + it "#{template} to #{exp}" do + tmpl = Addressable::Template.new(template).expand(subject) + expect(tmpl.to_str).to eq(expansion) + end + end +end + +describe "eql?" do + let(:template) { Addressable::Template.new('https://www.example.com/{foo}') } + it 'is equal when the pattern matches' do + other_template = Addressable::Template.new('https://www.example.com/{foo}') + expect(template).to be_eql(other_template) + expect(other_template).to be_eql(template) + end + it 'is not equal when the pattern differs' do + other_template = Addressable::Template.new('https://www.example.com/{bar}') + expect(template).to_not be_eql(other_template) + expect(other_template).to_not be_eql(template) + end + it 'is not equal to non-templates' do + uri = 'https://www.example.com/foo/bar' + addressable_template = Addressable::Template.new uri + addressable_uri = Addressable::URI.parse uri + expect(addressable_template).to_not be_eql(addressable_uri) + expect(addressable_uri).to_not be_eql(addressable_template) + end +end + +describe "==" do + let(:template) { Addressable::Template.new('https://www.example.com/{foo}') } + it 'is equal when the pattern matches' do + other_template = Addressable::Template.new('https://www.example.com/{foo}') + expect(template).to eq other_template + expect(other_template).to eq template + end + it 'is not equal when the pattern differs' do + other_template = Addressable::Template.new('https://www.example.com/{bar}') + expect(template).not_to eq other_template + expect(other_template).not_to eq template + end + it 'is not equal to non-templates' do + uri = 'https://www.example.com/foo/bar' + addressable_template = Addressable::Template.new uri + addressable_uri = Addressable::URI.parse uri + expect(addressable_template).not_to eq addressable_uri + expect(addressable_uri).not_to eq addressable_template + end +end + +describe "#to_regexp" do + it "does not match the first line of multiline strings" do + uri = "https://www.example.com/bar" + template = Addressable::Template.new(uri) + expect(template.match(uri)).not_to be_nil + expect(template.match("#{uri}\ngarbage")).to be_nil + end +end + +describe "Type conversion" do + subject { + { + :var => true, + :hello => 1234, + :nothing => nil, + :sym => :symbolic, + :decimal => BigDecimal('1') + } + } + + it_behaves_like 'expands', { + '{var}' => 'true', + '{hello}' => '1234', + '{nothing}' => '', + '{sym}' => 'symbolic', + '{decimal}' => RUBY_VERSION < '2.4.0' ? '0.1E1' : '0.1e1' + } +end + +describe "Level 1:" do + subject { + {:var => "value", :hello => "Hello World!"} + } + it_behaves_like 'expands', { + '{var}' => 'value', + '{hello}' => 'Hello%20World%21' + } +end + +describe "Level 2" do + subject { + { + :var => "value", + :hello => "Hello World!", + :path => "/foo/bar" + } + } + context "Operator +:" do + it_behaves_like 'expands', { + '{+var}' => 'value', + '{+hello}' => 'Hello%20World!', + '{+path}/here' => '/foo/bar/here', + 'here?ref={+path}' => 'here?ref=/foo/bar' + } + end + context "Operator #:" do + it_behaves_like 'expands', { + 'X{#var}' => 'X#value', + 'X{#hello}' => 'X#Hello%20World!' + } + end +end + +describe "Level 3" do + subject { + { + :var => "value", + :hello => "Hello World!", + :empty => "", + :path => "/foo/bar", + :x => "1024", + :y => "768" + } + } + context "Operator nil (multiple vars):" do + it_behaves_like 'expands', { + 'map?{x,y}' => 'map?1024,768', + '{x,hello,y}' => '1024,Hello%20World%21,768' + } + end + context "Operator + (multiple vars):" do + it_behaves_like 'expands', { + '{+x,hello,y}' => '1024,Hello%20World!,768', + '{+path,x}/here' => '/foo/bar,1024/here' + } + end + context "Operator # (multiple vars):" do + it_behaves_like 'expands', { + '{#x,hello,y}' => '#1024,Hello%20World!,768', + '{#path,x}/here' => '#/foo/bar,1024/here' + } + end + context "Operator ." do + it_behaves_like 'expands', { + 'X{.var}' => 'X.value', + 'X{.x,y}' => 'X.1024.768' + } + end + context "Operator /" do + it_behaves_like 'expands', { + '{/var}' => '/value', + '{/var,x}/here' => '/value/1024/here' + } + end + context "Operator ;" do + it_behaves_like 'expands', { + '{;x,y}' => ';x=1024;y=768', + '{;x,y,empty}' => ';x=1024;y=768;empty' + } + end + context "Operator ?" do + it_behaves_like 'expands', { + '{?x,y}' => '?x=1024&y=768', + '{?x,y,empty}' => '?x=1024&y=768&empty=' + } + end + context "Operator &" do + it_behaves_like 'expands', { + '?fixed=yes{&x}' => '?fixed=yes&x=1024', + '{&x,y,empty}' => '&x=1024&y=768&empty=' + } + end +end + +describe "Level 4" do + subject { + { + :var => "value", + :hello => "Hello World!", + :path => "/foo/bar", + :semi => ";", + :list => %w(red green blue), + :keys => {"semi" => ';', "dot" => '.', :comma => ','} + } + } + context "Expansion with value modifiers" do + it_behaves_like 'expands', { + '{var:3}' => 'val', + '{var:30}' => 'value', + '{list}' => 'red,green,blue', + '{list*}' => 'red,green,blue', + '{keys}' => 'semi,%3B,dot,.,comma,%2C', + '{keys*}' => 'semi=%3B,dot=.,comma=%2C', + } + end + context "Operator + with value modifiers" do + it_behaves_like 'expands', { + '{+path:6}/here' => '/foo/b/here', + '{+list}' => 'red,green,blue', + '{+list*}' => 'red,green,blue', + '{+keys}' => 'semi,;,dot,.,comma,,', + '{+keys*}' => 'semi=;,dot=.,comma=,', + } + end + context "Operator # with value modifiers" do + it_behaves_like 'expands', { + '{#path:6}/here' => '#/foo/b/here', + '{#list}' => '#red,green,blue', + '{#list*}' => '#red,green,blue', + '{#keys}' => '#semi,;,dot,.,comma,,', + '{#keys*}' => '#semi=;,dot=.,comma=,', + } + end + context "Operator . with value modifiers" do + it_behaves_like 'expands', { + 'X{.var:3}' => 'X.val', + 'X{.list}' => 'X.red,green,blue', + 'X{.list*}' => 'X.red.green.blue', + 'X{.keys}' => 'X.semi,%3B,dot,.,comma,%2C', + 'X{.keys*}' => 'X.semi=%3B.dot=..comma=%2C', + } + end + context "Operator / with value modifiers" do + it_behaves_like 'expands', { + '{/var:1,var}' => '/v/value', + '{/list}' => '/red,green,blue', + '{/list*}' => '/red/green/blue', + '{/list*,path:4}' => '/red/green/blue/%2Ffoo', + '{/keys}' => '/semi,%3B,dot,.,comma,%2C', + '{/keys*}' => '/semi=%3B/dot=./comma=%2C', + } + end + context "Operator ; with value modifiers" do + it_behaves_like 'expands', { + '{;hello:5}' => ';hello=Hello', + '{;list}' => ';list=red,green,blue', + '{;list*}' => ';list=red;list=green;list=blue', + '{;keys}' => ';keys=semi,%3B,dot,.,comma,%2C', + '{;keys*}' => ';semi=%3B;dot=.;comma=%2C', + } + end + context "Operator ? with value modifiers" do + it_behaves_like 'expands', { + '{?var:3}' => '?var=val', + '{?list}' => '?list=red,green,blue', + '{?list*}' => '?list=red&list=green&list=blue', + '{?keys}' => '?keys=semi,%3B,dot,.,comma,%2C', + '{?keys*}' => '?semi=%3B&dot=.&comma=%2C', + } + end + context "Operator & with value modifiers" do + it_behaves_like 'expands', { + '{&var:3}' => '&var=val', + '{&list}' => '&list=red,green,blue', + '{&list*}' => '&list=red&list=green&list=blue', + '{&keys}' => '&keys=semi,%3B,dot,.,comma,%2C', + '{&keys*}' => '&semi=%3B&dot=.&comma=%2C', + } + end +end +describe "Modifiers" do + subject { + { + :var => "value", + :semi => ";", + :year => [1965, 2000, 2012], + :dom => %w(example com) + } + } + context "length" do + it_behaves_like 'expands', { + '{var:3}' => 'val', + '{var:30}' => 'value', + '{var}' => 'value', + '{semi}' => '%3B', + '{semi:2}' => '%3B' + } + end + context "explode" do + it_behaves_like 'expands', { + 'find{?year*}' => 'find?year=1965&year=2000&year=2012', + 'www{.dom*}' => 'www.example.com', + } + end +end +describe "Expansion" do + subject { + { + :count => ["one", "two", "three"], + :dom => ["example", "com"], + :dub => "me/too", + :hello => "Hello World!", + :half => "50%", + :var => "value", + :who => "fred", + :base => "http://example.com/home/", + :path => "/foo/bar", + :list => ["red", "green", "blue"], + :keys => {"semi" => ";","dot" => ".",:comma => ","}, + :v => "6", + :x => "1024", + :y => "768", + :empty => "", + :empty_keys => {}, + :undef => nil + } + } + context "concatenation" do + it_behaves_like 'expands', { + '{count}' => 'one,two,three', + '{count*}' => 'one,two,three', + '{/count}' => '/one,two,three', + '{/count*}' => '/one/two/three', + '{;count}' => ';count=one,two,three', + '{;count*}' => ';count=one;count=two;count=three', + '{?count}' => '?count=one,two,three', + '{?count*}' => '?count=one&count=two&count=three', + '{&count*}' => '&count=one&count=two&count=three' + } + end + context "simple expansion" do + it_behaves_like 'expands', { + '{var}' => 'value', + '{hello}' => 'Hello%20World%21', + '{half}' => '50%25', + 'O{empty}X' => 'OX', + 'O{undef}X' => 'OX', + '{x,y}' => '1024,768', + '{x,hello,y}' => '1024,Hello%20World%21,768', + '?{x,empty}' => '?1024,', + '?{x,undef}' => '?1024', + '?{undef,y}' => '?768', + '{var:3}' => 'val', + '{var:30}' => 'value', + '{list}' => 'red,green,blue', + '{list*}' => 'red,green,blue', + '{keys}' => 'semi,%3B,dot,.,comma,%2C', + '{keys*}' => 'semi=%3B,dot=.,comma=%2C', + } + end + context "reserved expansion (+)" do + it_behaves_like 'expands', { + '{+var}' => 'value', + '{+hello}' => 'Hello%20World!', + '{+half}' => '50%25', + '{base}index' => 'http%3A%2F%2Fexample.com%2Fhome%2Findex', + '{+base}index' => 'http://example.com/home/index', + 'O{+empty}X' => 'OX', + 'O{+undef}X' => 'OX', + '{+path}/here' => '/foo/bar/here', + 'here?ref={+path}' => 'here?ref=/foo/bar', + 'up{+path}{var}/here' => 'up/foo/barvalue/here', + '{+x,hello,y}' => '1024,Hello%20World!,768', + '{+path,x}/here' => '/foo/bar,1024/here', + '{+path:6}/here' => '/foo/b/here', + '{+list}' => 'red,green,blue', + '{+list*}' => 'red,green,blue', + '{+keys}' => 'semi,;,dot,.,comma,,', + '{+keys*}' => 'semi=;,dot=.,comma=,', + } + end + context "fragment expansion (#)" do + it_behaves_like 'expands', { + '{#var}' => '#value', + '{#hello}' => '#Hello%20World!', + '{#half}' => '#50%25', + 'foo{#empty}' => 'foo#', + 'foo{#undef}' => 'foo', + '{#x,hello,y}' => '#1024,Hello%20World!,768', + '{#path,x}/here' => '#/foo/bar,1024/here', + '{#path:6}/here' => '#/foo/b/here', + '{#list}' => '#red,green,blue', + '{#list*}' => '#red,green,blue', + '{#keys}' => '#semi,;,dot,.,comma,,', + '{#keys*}' => '#semi=;,dot=.,comma=,', + } + end + context "label expansion (.)" do + it_behaves_like 'expands', { + '{.who}' => '.fred', + '{.who,who}' => '.fred.fred', + '{.half,who}' => '.50%25.fred', + 'www{.dom*}' => 'www.example.com', + 'X{.var}' => 'X.value', + 'X{.empty}' => 'X.', + 'X{.undef}' => 'X', + 'X{.var:3}' => 'X.val', + 'X{.list}' => 'X.red,green,blue', + 'X{.list*}' => 'X.red.green.blue', + 'X{.keys}' => 'X.semi,%3B,dot,.,comma,%2C', + 'X{.keys*}' => 'X.semi=%3B.dot=..comma=%2C', + 'X{.empty_keys}' => 'X', + 'X{.empty_keys*}' => 'X' + } + end + context "path expansion (/)" do + it_behaves_like 'expands', { + '{/who}' => '/fred', + '{/who,who}' => '/fred/fred', + '{/half,who}' => '/50%25/fred', + '{/who,dub}' => '/fred/me%2Ftoo', + '{/var}' => '/value', + '{/var,empty}' => '/value/', + '{/var,undef}' => '/value', + '{/var,x}/here' => '/value/1024/here', + '{/var:1,var}' => '/v/value', + '{/list}' => '/red,green,blue', + '{/list*}' => '/red/green/blue', + '{/list*,path:4}' => '/red/green/blue/%2Ffoo', + '{/keys}' => '/semi,%3B,dot,.,comma,%2C', + '{/keys*}' => '/semi=%3B/dot=./comma=%2C', + } + end + context "path-style expansion (;)" do + it_behaves_like 'expands', { + '{;who}' => ';who=fred', + '{;half}' => ';half=50%25', + '{;empty}' => ';empty', + '{;v,empty,who}' => ';v=6;empty;who=fred', + '{;v,bar,who}' => ';v=6;who=fred', + '{;x,y}' => ';x=1024;y=768', + '{;x,y,empty}' => ';x=1024;y=768;empty', + '{;x,y,undef}' => ';x=1024;y=768', + '{;hello:5}' => ';hello=Hello', + '{;list}' => ';list=red,green,blue', + '{;list*}' => ';list=red;list=green;list=blue', + '{;keys}' => ';keys=semi,%3B,dot,.,comma,%2C', + '{;keys*}' => ';semi=%3B;dot=.;comma=%2C', + } + end + context "form query expansion (?)" do + it_behaves_like 'expands', { + '{?who}' => '?who=fred', + '{?half}' => '?half=50%25', + '{?x,y}' => '?x=1024&y=768', + '{?x,y,empty}' => '?x=1024&y=768&empty=', + '{?x,y,undef}' => '?x=1024&y=768', + '{?var:3}' => '?var=val', + '{?list}' => '?list=red,green,blue', + '{?list*}' => '?list=red&list=green&list=blue', + '{?keys}' => '?keys=semi,%3B,dot,.,comma,%2C', + '{?keys*}' => '?semi=%3B&dot=.&comma=%2C', + } + end + context "form query expansion (&)" do + it_behaves_like 'expands', { + '{&who}' => '&who=fred', + '{&half}' => '&half=50%25', + '?fixed=yes{&x}' => '?fixed=yes&x=1024', + '{&x,y,empty}' => '&x=1024&y=768&empty=', + '{&x,y,undef}' => '&x=1024&y=768', + '{&var:3}' => '&var=val', + '{&list}' => '&list=red,green,blue', + '{&list*}' => '&list=red&list=green&list=blue', + '{&keys}' => '&keys=semi,%3B,dot,.,comma,%2C', + '{&keys*}' => '&semi=%3B&dot=.&comma=%2C', + } + end + context "non-string key in match data" do + subject {Addressable::Template.new("http://example.com/{one}")} + + it "raises TypeError" do + expect { subject.expand(Object.new => "1") }.to raise_error TypeError + end + end +end + +class ExampleTwoProcessor + def self.restore(name, value) + return value.gsub(/-/, " ") if name == "query" + return value + end + + def self.match(name) + return ".*?" if name == "first" + return ".*" + end + def self.validate(name, value) + return !!(value =~ /^[\w ]+$/) if name == "query" + return true + end + + def self.transform(name, value) + return value.gsub(/ /, "+") if name == "query" + return value + end +end + +class DumbProcessor + def self.match(name) + return ".*?" if name == "first" + end +end + +describe Addressable::Template do + describe 'initialize' do + context 'with a non-string' do + it 'raises a TypeError' do + expect { Addressable::Template.new(nil) }.to raise_error(TypeError) + end + end + end + + describe 'freeze' do + subject { Addressable::Template.new("http://example.com/{first}/{+second}/") } + it 'freezes the template' do + expect(subject.freeze).to be_frozen + end + end + + describe "Matching" do + let(:uri){ + Addressable::URI.parse( + "http://example.com/search/an-example-search-query/" + ) + } + let(:uri2){ + Addressable::URI.parse("http://example.com/a/b/c/") + } + let(:uri3){ + Addressable::URI.parse("http://example.com/;a=1;b=2;c=3;first=foo") + } + let(:uri4){ + Addressable::URI.parse("http://example.com/?a=1&b=2&c=3&first=foo") + } + let(:uri5){ + "http://example.com/foo" + } + context "first uri with ExampleTwoProcessor" do + subject { + Addressable::Template.new( + "http://example.com/search/{query}/" + ).match(uri, ExampleTwoProcessor) + } + its(:variables){ should == ["query"] } + its(:captures){ should == ["an example search query"] } + end + + context "second uri with ExampleTwoProcessor" do + subject { + Addressable::Template.new( + "http://example.com/{first}/{+second}/" + ).match(uri2, ExampleTwoProcessor) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", "b/c"] } + end + + context "second uri with DumbProcessor" do + subject { + Addressable::Template.new( + "http://example.com/{first}/{+second}/" + ).match(uri2, DumbProcessor) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", "b/c"] } + end + + context "second uri" do + subject { + Addressable::Template.new( + "http://example.com/{first}{/second*}/" + ).match(uri2) + } + its(:variables){ should == ["first", "second"] } + its(:captures){ should == ["a", ["b","c"]] } + end + context "third uri" do + subject { + Addressable::Template.new( + "http://example.com/{;hash*,first}" + ).match(uri3) + } + its(:variables){ should == ["hash", "first"] } + its(:captures){ should == [ + {"a" => "1", "b" => "2", "c" => "3", "first" => "foo"}, nil] } + end + # Note that this expansion is impossible to revert deterministically - the + # * operator means first could have been a key of hash or a separate key. + # Semantically, a separate key is more likely, but both are possible. + context "fourth uri" do + subject { + Addressable::Template.new( + "http://example.com/{?hash*,first}" + ).match(uri4) + } + its(:variables){ should == ["hash", "first"] } + its(:captures){ should == [ + {"a" => "1", "b" => "2", "c" => "3", "first"=> "foo"}, nil] } + end + context "fifth uri" do + subject { + Addressable::Template.new( + "http://example.com/{path}{?hash*,first}" + ).match(uri5) + } + its(:variables){ should == ["path", "hash", "first"] } + its(:captures){ should == ["foo", nil, nil] } + end + end + + describe 'match' do + subject { Addressable::Template.new('http://example.com/first/second/') } + context 'when the URI is the same as the template' do + it 'returns the match data itself with an empty mapping' do + uri = Addressable::URI.parse('http://example.com/first/second/') + match_data = subject.match(uri) + expect(match_data).to be_an Addressable::Template::MatchData + expect(match_data.uri).to eq(uri) + expect(match_data.template).to eq(subject) + expect(match_data.mapping).to be_empty + expect(match_data.inspect).to be_an String + end + end + end + + describe "extract" do + let(:template) { + Addressable::Template.new( + "http://{host}{/segments*}/{?one,two,bogus}{#fragment}" + ) + } + let(:uri){ "http://example.com/a/b/c/?one=1&two=2#foo" } + let(:uri2){ "http://example.com/a/b/c/#foo" } + it "should be able to extract with queries" do + expect(template.extract(uri)).to eq({ + "host" => "example.com", + "segments" => %w(a b c), + "one" => "1", + "bogus" => nil, + "two" => "2", + "fragment" => "foo" + }) + end + it "should be able to extract without queries" do + expect(template.extract(uri2)).to eq({ + "host" => "example.com", + "segments" => %w(a b c), + "one" => nil, + "bogus" => nil, + "two" => nil, + "fragment" => "foo" + }) + end + + context "issue #137" do + subject { Addressable::Template.new('/path{?page,per_page}') } + + it "can match empty" do + data = subject.extract("/path") + expect(data["page"]).to eq(nil) + expect(data["per_page"]).to eq(nil) + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match first var" do + data = subject.extract("/path?page=1") + expect(data["page"]).to eq("1") + expect(data["per_page"]).to eq(nil) + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match second var" do + data = subject.extract("/path?per_page=1") + expect(data["page"]).to eq(nil) + expect(data["per_page"]).to eq("1") + expect(data.keys.sort).to eq(['page', 'per_page']) + end + + it "can match both vars" do + data = subject.extract("/path?page=2&per_page=1") + expect(data["page"]).to eq("2") + expect(data["per_page"]).to eq("1") + expect(data.keys.sort).to eq(['page', 'per_page']) + end + end + end + + describe "Partial expand with symbols" do + context "partial_expand with two simple values" do + subject { + Addressable::Template.new("http://example.com/{one}/{two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/1/{two}/" + ) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1", :three => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand form style query with missing param at beginning" do + subject { + Addressable::Template.new("http://example.com/{?one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:two => "2").pattern).to eq( + "http://example.com/?two=2{&one}/" + ) + end + end + context "issue #307 - partial_expand form query with nil params" do + subject do + Addressable::Template.new("http://example.com/{?one,two,three}/") + end + it "builds a new pattern with two=nil" do + expect(subject.partial_expand(two: nil).pattern).to eq( + "http://example.com/{?one}{&three}/" + ) + end + it "builds a new pattern with one=nil and two=nil" do + expect(subject.partial_expand(one: nil, two: nil).pattern).to eq( + "http://example.com/{?three}/" + ) + end + it "builds a new pattern with one=1 and two=nil" do + expect(subject.partial_expand(one: 1, two: nil).pattern).to eq( + "http://example.com/?one=1{&three}/" + ) + end + it "builds a new pattern with one=nil and two=2" do + expect(subject.partial_expand(one: nil, two: 2).pattern).to eq( + "http://example.com/?two=2{&three}/" + ) + end + it "builds a new pattern with one=nil" do + expect(subject.partial_expand(one: nil).pattern).to eq( + "http://example.com/{?two}{&three}/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand(:one => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + context "partial expand with unicode values" do + subject do + Addressable::Template.new("http://example.com/{resource}/{query}/") + end + it "normalizes unicode by default" do + template = subject.partial_expand("query" => "Cafe\u0301") + expect(template.pattern).to eq( + "http://example.com/{resource}/Caf%C3%A9/" + ) + end + + it "normalizes as unicode even with wrong encoding specified" do + template = subject.partial_expand("query" => "Cafe\u0301".b) + expect(template.pattern).to eq( + "http://example.com/{resource}/Caf%C3%A9/" + ) + end + + it "raises on invalid unicode input" do + expect { + subject.partial_expand("query" => "M\xE9thode".b) + }.to raise_error(ArgumentError, "invalid byte sequence in UTF-8") + end + + it "does not normalize unicode when byte semantics requested" do + template = subject.partial_expand({"query" => "Cafe\u0301"}, nil, false) + expect(template.pattern).to eq( + "http://example.com/{resource}/Cafe%CC%81/" + ) + end + end + end + describe "Partial expand with strings" do + context "partial_expand with two simple values" do + subject { + Addressable::Template.new("http://example.com/{one}/{two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1/{two}/" + ) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1", "three" => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + end + describe "Expand" do + context "expand with unicode values" do + subject do + Addressable::Template.new("http://example.com/search/{query}/") + end + it "normalizes unicode by default" do + uri = subject.expand("query" => "Cafe\u0301").to_str + expect(uri).to eq("http://example.com/search/Caf%C3%A9/") + end + + it "normalizes as unicode even with wrong encoding specified" do + uri = subject.expand("query" => "Cafe\u0301".b).to_str + expect(uri).to eq("http://example.com/search/Caf%C3%A9/") + end + + it "raises on invalid unicode input" do + expect { + subject.expand("query" => "M\xE9thode".b).to_str + }.to raise_error(ArgumentError, "invalid byte sequence in UTF-8") + end + + it "does not normalize unicode when byte semantics requested" do + uri = subject.expand({ "query" => "Cafe\u0301" }, nil, false).to_str + expect(uri).to eq("http://example.com/search/Cafe%CC%81/") + end + end + context "expand with a processor" do + subject { + Addressable::Template.new("http://example.com/search/{query}/") + } + it "processes spaces" do + expect(subject.expand({"query" => "an example search query"}, + ExampleTwoProcessor).to_str).to eq( + "http://example.com/search/an+example+search+query/" + ) + end + it "validates" do + expect{ + subject.expand({"query" => "Bogus!"}, + ExampleTwoProcessor).to_str + }.to raise_error(Addressable::Template::InvalidTemplateValueError) + end + end + context "partial_expand query with missing param in middle" do + subject { + Addressable::Template.new("http://example.com/{?one,two,three}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1", "three" => "3").pattern).to eq( + "http://example.com/?one=1{&two}&three=3/" + ) + end + end + context "partial_expand with query string" do + subject { + Addressable::Template.new("http://example.com/{?two,one}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/?one=1{&two}/" + ) + end + end + context "partial_expand with path operator" do + subject { + Addressable::Template.new("http://example.com{/one,two}/") + } + it "builds a new pattern" do + expect(subject.partial_expand("one" => "1").pattern).to eq( + "http://example.com/1{/two}/" + ) + end + end + end + context "Matching with operators" do + describe "Level 1:" do + subject { Addressable::Template.new("foo{foo}/{bar}baz") } + it "can match" do + data = subject.match("foofoo/bananabaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("banana") + end + it "can fail" do + expect(subject.match("bar/foo")).to be_nil + expect(subject.match("foobaz")).to be_nil + end + it "can match empty" do + data = subject.match("foo/baz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq(nil) + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + + describe "Level 2:" do + subject { Addressable::Template.new("foo{+foo}{#bar}baz") } + it "can match" do + data = subject.match("foo/test/banana#bazbaz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq("baz") + end + it "can match empty level 2 #" do + data = subject.match("foo/test/bananabaz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq(nil) + data = subject.match("foo/test/banana#baz") + expect(data.mapping["foo"]).to eq("/test/banana") + expect(data.mapping["bar"]).to eq("") + end + it "can match empty level 2 +" do + data = subject.match("foobaz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq(nil) + data = subject.match("foo#barbaz") + expect(data.mapping["foo"]).to eq(nil) + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + + describe "Level 3:" do + context "no operator" do + subject { Addressable::Template.new("foo{foo,bar}baz") } + it "can match" do + data = subject.match("foofoo,barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "+ operator" do + subject { Addressable::Template.new("foo{+foo,bar}baz") } + it "can match" do + data = subject.match("foofoo/bar,barbaz") + expect(data.mapping["bar"]).to eq("foo/bar,bar") + expect(data.mapping["foo"]).to eq("") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context ". operator" do + subject { Addressable::Template.new("foo{.foo,bar}baz") } + it "can match" do + data = subject.match("foo.foo.barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "/ operator" do + subject { Addressable::Template.new("foo{/foo,bar}baz") } + it "can match" do + data = subject.match("foo/foo/barbaz") + expect(data.mapping["foo"]).to eq("foo") + expect(data.mapping["bar"]).to eq("bar") + end + it "lists vars" do + expect(subject.variables).to eq(["foo", "bar"]) + end + end + context "; operator" do + subject { Addressable::Template.new("foo{;foo,bar,baz}baz") } + it "can match" do + data = subject.match("foo;foo=bar%20baz;bar=foo;bazbaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + expect(data.mapping["baz"]).to eq("") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar baz)) + end + end + context "? operator" do + context "test" do + subject { Addressable::Template.new("foo{?foo,bar}baz") } + it "can match" do + data = subject.match("foo?foo=bar%20baz&bar=foobaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar)) + end + end + + context "issue #137" do + subject { Addressable::Template.new('/path{?page,per_page}') } + + it "can match empty" do + data = subject.match("/path") + expect(data.mapping["page"]).to eq(nil) + expect(data.mapping["per_page"]).to eq(nil) + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match first var" do + data = subject.match("/path?page=1") + expect(data.mapping["page"]).to eq("1") + expect(data.mapping["per_page"]).to eq(nil) + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match second var" do + data = subject.match("/path?per_page=1") + expect(data.mapping["page"]).to eq(nil) + expect(data.mapping["per_page"]).to eq("1") + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + + it "can match both vars" do + data = subject.match("/path?page=2&per_page=1") + expect(data.mapping["page"]).to eq("2") + expect(data.mapping["per_page"]).to eq("1") + expect(data.mapping.keys.sort).to eq(['page', 'per_page']) + end + end + + context "issue #71" do + subject { Addressable::Template.new("http://cyberscore.dev/api/users{?username}") } + it "can match" do + data = subject.match("http://cyberscore.dev/api/users?username=foobaz") + expect(data.mapping["username"]).to eq("foobaz") + end + it "lists vars" do + expect(subject.variables).to eq(%w(username)) + expect(subject.keys).to eq(%w(username)) + end + end + end + context "& operator" do + subject { Addressable::Template.new("foo{&foo,bar}baz") } + it "can match" do + data = subject.match("foo&foo=bar%20baz&bar=foobaz") + expect(data.mapping["foo"]).to eq("bar baz") + expect(data.mapping["bar"]).to eq("foo") + end + it "lists vars" do + expect(subject.variables).to eq(%w(foo bar)) + end + end + end + end + + context "support regexes:" do + context "EXPRESSION" do + subject { Addressable::Template::EXPRESSION } + it "should be able to match an expression" do + expect(subject).to match("{foo}") + expect(subject).to match("{foo,9}") + expect(subject).to match("{foo.bar,baz}") + expect(subject).to match("{+foo.bar,baz}") + expect(subject).to match("{foo,foo%20bar}") + expect(subject).to match("{#foo:20,baz*}") + expect(subject).to match("stuff{#foo:20,baz*}things") + end + it "should fail on non vars" do + expect(subject).not_to match("!{foo") + expect(subject).not_to match("{foo.bar.}") + expect(subject).not_to match("!{}") + end + end + context "VARNAME" do + subject { Addressable::Template::VARNAME } + it "should be able to match a variable" do + expect(subject).to match("foo") + expect(subject).to match("9") + expect(subject).to match("foo.bar") + expect(subject).to match("foo_bar") + expect(subject).to match("foo_bar.baz") + expect(subject).to match("foo%20bar") + expect(subject).to match("foo%20bar.baz") + end + it "should fail on non vars" do + expect(subject).not_to match("!foo") + expect(subject).not_to match("foo.bar.") + expect(subject).not_to match("foo%2%00bar") + expect(subject).not_to match("foo_ba%r") + expect(subject).not_to match("foo_bar*") + expect(subject).not_to match("foo_bar:20") + end + + it 'should parse in a reasonable time' do + expect do + Timeout.timeout(0.1) do + expect(subject).not_to match("0"*25 + "!") + end + end.not_to raise_error + end + end + context "VARIABLE_LIST" do + subject { Addressable::Template::VARIABLE_LIST } + it "should be able to match a variable list" do + expect(subject).to match("foo,bar") + expect(subject).to match("foo") + expect(subject).to match("foo,bar*,baz") + expect(subject).to match("foo.bar,bar_baz*,baz:12") + end + it "should fail on non vars" do + expect(subject).not_to match(",foo,bar*,baz") + expect(subject).not_to match("foo,*bar,baz") + expect(subject).not_to match("foo,,bar*,baz") + end + end + context "VARSPEC" do + subject { Addressable::Template::VARSPEC } + it "should be able to match a variable with modifier" do + expect(subject).to match("9:8") + expect(subject).to match("foo.bar*") + expect(subject).to match("foo_bar:12") + expect(subject).to match("foo_bar.baz*") + expect(subject).to match("foo%20bar:12") + expect(subject).to match("foo%20bar.baz*") + end + it "should fail on non vars" do + expect(subject).not_to match("!foo") + expect(subject).not_to match("*foo") + expect(subject).not_to match("fo*o") + expect(subject).not_to match("fo:o") + expect(subject).not_to match("foo:") + end + end + end +end + +describe Addressable::Template::MatchData do + let(:template) { Addressable::Template.new('{foo}/{bar}') } + subject(:its) { template.match('ab/cd') } + its(:uri) { should == Addressable::URI.parse('ab/cd') } + its(:template) { should == template } + its(:mapping) { should == { 'foo' => 'ab', 'bar' => 'cd' } } + its(:variables) { should == ['foo', 'bar'] } + its(:keys) { should == ['foo', 'bar'] } + its(:names) { should == ['foo', 'bar'] } + its(:values) { should == ['ab', 'cd'] } + its(:captures) { should == ['ab', 'cd'] } + its(:to_a) { should == ['ab/cd', 'ab', 'cd'] } + its(:to_s) { should == 'ab/cd' } + its(:string) { should == its.to_s } + its(:pre_match) { should == "" } + its(:post_match) { should == "" } + + describe 'values_at' do + it 'returns an array with the values' do + expect(its.values_at(0, 2)).to eq(['ab/cd', 'cd']) + end + it 'allows mixing integer an string keys' do + expect(its.values_at('foo', 1)).to eq(['ab', 'ab']) + end + it 'accepts unknown keys' do + expect(its.values_at('baz', 'foo')).to eq([nil, 'ab']) + end + end + + describe '[]' do + context 'string key' do + it 'returns the corresponding capture' do + expect(its['foo']).to eq('ab') + expect(its['bar']).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its['baz']).to be_nil + end + end + context 'symbol key' do + it 'returns the corresponding capture' do + expect(its[:foo]).to eq('ab') + expect(its[:bar]).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its[:baz]).to be_nil + end + end + context 'integer key' do + it 'returns the full URI for index 0' do + expect(its[0]).to eq('ab/cd') + end + it 'returns the corresponding capture' do + expect(its[1]).to eq('ab') + expect(its[2]).to eq('cd') + end + it 'returns nil for unknown keys' do + expect(its[3]).to be_nil + end + end + context 'other key' do + it 'raises an exception' do + expect { its[Object.new] }.to raise_error(TypeError) + end + end + context 'with length' do + it 'returns an array starting at index with given length' do + expect(its[0, 2]).to eq(['ab/cd', 'ab']) + expect(its[2, 1]).to eq(['cd']) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/uri_spec.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/uri_spec.rb new file mode 100644 index 0000000..c54fc3f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/addressable/uri_spec.rb @@ -0,0 +1,6801 @@ +# frozen_string_literal: true + +# Copyright (C) Bob Aman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +require "spec_helper" + +require "addressable/uri" +require "uri" +require "ipaddr" + +if !"".respond_to?("force_encoding") + class String + def force_encoding(encoding) + @encoding = encoding + end + + def encoding + @encoding ||= Encoding::ASCII_8BIT + end + end + + class Encoding + def initialize(name) + @name = name + end + + def to_s + return @name + end + + UTF_8 = Encoding.new("UTF-8") + ASCII_8BIT = Encoding.new("US-ASCII") + end +end + +module Fake + module URI + class HTTP + def initialize(uri) + @uri = uri + end + + def to_s + return @uri.to_s + end + + alias :to_str :to_s + end + end +end + +describe Addressable::URI, "when created with a non-numeric port number" do + it "should raise an error" do + expect do + Addressable::URI.new(:port => "bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a invalid encoded port number" do + it "should raise an error" do + expect do + Addressable::URI.new(:port => "%eb") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a non-string scheme" do + it "should raise an error" do + expect do + Addressable::URI.new(:scheme => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string user" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string password" do + it "should raise an error" do + expect do + Addressable::URI.new(:password => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string userinfo" do + it "should raise an error" do + expect do + Addressable::URI.new(:userinfo => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string host" do + it "should raise an error" do + expect do + Addressable::URI.new(:host => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string authority" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string path" do + it "should raise an error" do + expect do + Addressable::URI.new(:path => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string query" do + it "should raise an error" do + expect do + Addressable::URI.new(:query => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a non-string fragment" do + it "should raise an error" do + expect do + Addressable::URI.new(:fragment => :bogus) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when created with a scheme but no hierarchical " + + "segment" do + it "should raise an error" do + expect do + Addressable::URI.parse("http:") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "quote handling" do + describe 'in host name' do + it "should raise an error for single quote" do + expect do + Addressable::URI.parse("http://local\"host/") + end.to raise_error(Addressable::URI::InvalidURIError) + end + end +end + +describe Addressable::URI, "newline normalization" do + it "should not accept newlines in scheme" do + expect do + Addressable::URI.parse("ht%0atp://localhost/") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not unescape newline in path" do + uri = Addressable::URI.parse("http://localhost/%0a").normalize + expect(uri.to_s).to eq("http://localhost/%0A") + end + + it "should not unescape newline in hostname" do + uri = Addressable::URI.parse("http://local%0ahost/").normalize + expect(uri.to_s).to eq("http://local%0Ahost/") + end + + it "should not unescape newline in username" do + uri = Addressable::URI.parse("http://foo%0abar@localhost/").normalize + expect(uri.to_s).to eq("http://foo%0Abar@localhost/") + end + + it "should not unescape newline in username" do + uri = Addressable::URI.parse("http://example:foo%0abar@example/").normalize + expect(uri.to_s).to eq("http://example:foo%0Abar@example/") + end + + it "should not accept newline in hostname" do + uri = Addressable::URI.parse("http://localhost/") + expect do + uri.host = "local\nhost" + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with ambiguous path" do + it "should raise an error" do + expect do + Addressable::URI.parse("::http") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with an invalid host" do + it "should raise an error" do + expect do + Addressable::URI.new(:host => "") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host consisting of " + + "sub-delims characters" do + it "should not raise an error" do + expect do + Addressable::URI.new( + :host => Addressable::URI::CharacterClasses::SUB_DELIMS.gsub(/\\/, '') + ) + end.not_to raise_error + end +end + +describe Addressable::URI, "when created with a host consisting of " + + "unreserved characters" do + it "should not raise an error" do + expect do + Addressable::URI.new( + :host => Addressable::URI::CharacterClasses::UNRESERVED.gsub(/\\/, '') + ) + end.not_to raise_error + end +end + +describe Addressable::URI, "when created from nil components" do + before do + @uri = Addressable::URI.new + end + + it "should have a nil site value" do + expect(@uri.site).to eq(nil) + end + + it "should have an empty path" do + expect(@uri.path).to eq("") + end + + it "should be an empty uri" do + expect(@uri.to_s).to eq("") + end + + it "should have a nil default port" do + expect(@uri.default_port).to eq(nil) + end + + it "should be empty" do + expect(@uri).to be_empty + end + + it "should raise an error if the scheme is set to whitespace" do + expect do + @uri.scheme = "\t \n" + end.to raise_error(Addressable::URI::InvalidURIError, /'\t \n'/) + end + + it "should raise an error if the scheme is set to all digits" do + expect do + @uri.scheme = "123" + end.to raise_error(Addressable::URI::InvalidURIError, /'123'/) + end + + it "should raise an error if the scheme begins with a digit" do + expect do + @uri.scheme = "1scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'1scheme'/) + end + + it "should raise an error if the scheme begins with a plus" do + expect do + @uri.scheme = "+scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'\+scheme'/) + end + + it "should raise an error if the scheme begins with a dot" do + expect do + @uri.scheme = ".scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'\.scheme'/) + end + + it "should raise an error if the scheme begins with a dash" do + expect do + @uri.scheme = "-scheme" + end.to raise_error(Addressable::URI::InvalidURIError, /'-scheme'/) + end + + it "should raise an error if the scheme contains an illegal character" do + expect do + @uri.scheme = "scheme!" + end.to raise_error(Addressable::URI::InvalidURIError, /'scheme!'/) + end + + it "should raise an error if the scheme contains whitespace" do + expect do + @uri.scheme = "sch eme" + end.to raise_error(Addressable::URI::InvalidURIError, /'sch eme'/) + end + + it "should raise an error if the scheme contains a newline" do + expect do + @uri.scheme = "sch\neme" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.user = "user" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.password = "pass" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.scheme = "http" + @uri.fragment = "fragment" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should raise an error if set into an invalid state" do + expect do + @uri.fragment = "fragment" + @uri.scheme = "http" + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when initialized from individual components" do + before do + @uri = Addressable::URI.new( + :scheme => "http", + :user => "user", + :password => "password", + :host => "example.com", + :port => 8080, + :path => "/path", + :query => "query=value", + :fragment => "fragment" + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'com' for #tld" do + expect(@uri.tld).to eq("com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when initialized from " + + "frozen individual components" do + before do + @uri = Addressable::URI.new( + :scheme => "http".freeze, + :user => "user".freeze, + :password => "password".freeze, + :host => "example.com".freeze, + :port => "8080".freeze, + :path => "/path".freeze, + :query => "query=value".freeze, + :fragment => "fragment".freeze + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from a frozen string" do + before do + @uri = Addressable::URI.parse( + "http://user:password@example.com:8080/path?query=value#fragment".freeze + ) + end + + it "returns 'http' for #scheme" do + expect(@uri.scheme).to eq("http") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + end + + it "returns 'user' for #user" do + expect(@uri.user).to eq("user") + end + + it "returns 'user' for #normalized_user" do + expect(@uri.normalized_user).to eq("user") + end + + it "returns 'password' for #password" do + expect(@uri.password).to eq("password") + end + + it "returns 'password' for #normalized_password" do + expect(@uri.normalized_password).to eq("password") + end + + it "returns 'user:password' for #userinfo" do + expect(@uri.userinfo).to eq("user:password") + end + + it "returns 'user:password' for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq("user:password") + end + + it "returns 'example.com' for #host" do + expect(@uri.host).to eq("example.com") + end + + it "returns 'example.com' for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + end + + it "returns 'user:password@example.com:8080' for #authority" do + expect(@uri.authority).to eq("user:password@example.com:8080") + end + + it "returns 'user:password@example.com:8080' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("user:password@example.com:8080") + end + + it "returns 8080 for #port" do + expect(@uri.port).to eq(8080) + end + + it "returns 8080 for #normalized_port" do + expect(@uri.normalized_port).to eq(8080) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'http://user:password@example.com:8080' for #site" do + expect(@uri.site).to eq("http://user:password@example.com:8080") + end + + it "returns 'http://user:password@example.com:8080' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://user:password@example.com:8080") + end + + it "returns '/path' for #path" do + expect(@uri.path).to eq("/path") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + end + + it "returns 'query=value' for #query" do + expect(@uri.query).to eq("query=value") + end + + it "returns 'query=value' for #normalized_query" do + expect(@uri.normalized_query).to eq("query=value") + end + + it "returns 'fragment' for #fragment" do + expect(@uri.fragment).to eq("fragment") + end + + it "returns 'fragment' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("fragment") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq( + "http://user:password@example.com:8080/path?query=value#fragment" + ) + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should not be frozen" do + expect(@uri).not_to be_frozen + end + + it "should allow destructive operations" do + expect { @uri.normalize! }.not_to raise_error + end +end + +describe Addressable::URI, "when frozen" do + before do + @uri = Addressable::URI.new.freeze + end + + it "returns nil for #scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "returns nil for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq(nil) + end + + it "returns nil for #user" do + expect(@uri.user).to eq(nil) + end + + it "returns nil for #normalized_user" do + expect(@uri.normalized_user).to eq(nil) + end + + it "returns nil for #password" do + expect(@uri.password).to eq(nil) + end + + it "returns nil for #normalized_password" do + expect(@uri.normalized_password).to eq(nil) + end + + it "returns nil for #userinfo" do + expect(@uri.userinfo).to eq(nil) + end + + it "returns nil for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "returns nil for #host" do + expect(@uri.host).to eq(nil) + end + + it "returns nil for #normalized_host" do + expect(@uri.normalized_host).to eq(nil) + end + + it "returns nil for #authority" do + expect(@uri.authority).to eq(nil) + end + + it "returns nil for #normalized_authority" do + expect(@uri.normalized_authority).to eq(nil) + end + + it "returns nil for #port" do + expect(@uri.port).to eq(nil) + end + + it "returns nil for #normalized_port" do + expect(@uri.normalized_port).to eq(nil) + end + + it "returns nil for #default_port" do + expect(@uri.default_port).to eq(nil) + end + + it "returns nil for #site" do + expect(@uri.site).to eq(nil) + end + + it "returns nil for #normalized_site" do + expect(@uri.normalized_site).to eq(nil) + end + + it "returns '' for #path" do + expect(@uri.path).to eq('') + end + + it "returns '' for #normalized_path" do + expect(@uri.normalized_path).to eq('') + end + + it "returns nil for #query" do + expect(@uri.query).to eq(nil) + end + + it "returns nil for #normalized_query" do + expect(@uri.normalized_query).to eq(nil) + end + + it "returns nil for #fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "returns nil for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq(nil) + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq('') + end + + it "should be empty" do + expect(@uri).to be_empty + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not be frozen after duping" do + expect(@uri.dup).not_to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error { |error| + expect(error.message).to match(/can't modify frozen/) + expect(error).to satisfy { |e| RuntimeError === e || TypeError === e } + } + end +end + +describe Addressable::URI, "when frozen" do + before do + @uri = Addressable::URI.parse( + "HTTP://example.com.:%38%30/%70a%74%68?a=%31#1%323" + ).freeze + end + + it "returns 'HTTP' for #scheme" do + expect(@uri.scheme).to eq("HTTP") + end + + it "returns 'http' for #normalized_scheme" do + expect(@uri.normalized_scheme).to eq("http") + expect(@uri.normalize.scheme).to eq("http") + end + + it "returns nil for #user" do + expect(@uri.user).to eq(nil) + end + + it "returns nil for #normalized_user" do + expect(@uri.normalized_user).to eq(nil) + end + + it "returns nil for #password" do + expect(@uri.password).to eq(nil) + end + + it "returns nil for #normalized_password" do + expect(@uri.normalized_password).to eq(nil) + end + + it "returns nil for #userinfo" do + expect(@uri.userinfo).to eq(nil) + end + + it "returns nil for #normalized_userinfo" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "returns 'example.com.' for #host" do + expect(@uri.host).to eq("example.com.") + end + + it "returns nil for #normalized_host" do + expect(@uri.normalized_host).to eq("example.com") + expect(@uri.normalize.host).to eq("example.com") + end + + it "returns 'example.com.:80' for #authority" do + expect(@uri.authority).to eq("example.com.:80") + end + + it "returns 'example.com:80' for #normalized_authority" do + expect(@uri.normalized_authority).to eq("example.com") + expect(@uri.normalize.authority).to eq("example.com") + end + + it "returns 80 for #port" do + expect(@uri.port).to eq(80) + end + + it "returns nil for #normalized_port" do + expect(@uri.normalized_port).to eq(nil) + expect(@uri.normalize.port).to eq(nil) + end + + it "returns 80 for #default_port" do + expect(@uri.default_port).to eq(80) + end + + it "returns 'HTTP://example.com.:80' for #site" do + expect(@uri.site).to eq("HTTP://example.com.:80") + end + + it "returns 'http://example.com' for #normalized_site" do + expect(@uri.normalized_site).to eq("http://example.com") + expect(@uri.normalize.site).to eq("http://example.com") + end + + it "returns '/%70a%74%68' for #path" do + expect(@uri.path).to eq("/%70a%74%68") + end + + it "returns '/path' for #normalized_path" do + expect(@uri.normalized_path).to eq("/path") + expect(@uri.normalize.path).to eq("/path") + end + + it "returns 'a=%31' for #query" do + expect(@uri.query).to eq("a=%31") + end + + it "returns 'a=1' for #normalized_query" do + expect(@uri.normalized_query).to eq("a=1") + expect(@uri.normalize.query).to eq("a=1") + end + + it "returns '/%70a%74%68?a=%31' for #request_uri" do + expect(@uri.request_uri).to eq("/%70a%74%68?a=%31") + end + + it "returns '1%323' for #fragment" do + expect(@uri.fragment).to eq("1%323") + end + + it "returns '123' for #normalized_fragment" do + expect(@uri.normalized_fragment).to eq("123") + expect(@uri.normalize.fragment).to eq("123") + end + + it "returns #hash" do + expect(@uri.hash).not_to be nil + end + + it "returns #to_s" do + expect(@uri.to_s).to eq('HTTP://example.com.:80/%70a%74%68?a=%31#1%323') + expect(@uri.normalize.to_s).to eq('http://example.com/path?a=1#123') + end + + it "should not be empty" do + expect(@uri).not_to be_empty + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not be frozen after duping" do + expect(@uri.dup).not_to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error { |error| + expect(error.message).to match(/can't modify frozen/) + expect(error).to satisfy { |e| RuntimeError === e || TypeError === e } + } + end +end + +describe Addressable::URI, "when normalized and then deeply frozen" do + before do + @uri = Addressable::URI.parse( + "http://user:password@example.com:8080/path?query=value#fragment" + ).normalize! + + @uri.instance_variables.each do |var| + @uri.instance_variable_set(var, @uri.instance_variable_get(var).freeze) + end + + @uri.freeze + end + + it "#normalized_scheme should not error" do + expect { @uri.normalized_scheme }.not_to raise_error + end + + it "#normalized_user should not error" do + expect { @uri.normalized_user }.not_to raise_error + end + + it "#normalized_password should not error" do + expect { @uri.normalized_password }.not_to raise_error + end + + it "#normalized_userinfo should not error" do + expect { @uri.normalized_userinfo }.not_to raise_error + end + + it "#normalized_host should not error" do + expect { @uri.normalized_host }.not_to raise_error + end + + it "#normalized_authority should not error" do + expect { @uri.normalized_authority }.not_to raise_error + end + + it "#normalized_port should not error" do + expect { @uri.normalized_port }.not_to raise_error + end + + it "#normalized_site should not error" do + expect { @uri.normalized_site }.not_to raise_error + end + + it "#normalized_path should not error" do + expect { @uri.normalized_path }.not_to raise_error + end + + it "#normalized_query should not error" do + expect { @uri.normalized_query }.not_to raise_error + end + + it "#normalized_fragment should not error" do + expect { @uri.normalized_fragment }.not_to raise_error + end + + it "should be frozen" do + expect(@uri).to be_frozen + end + + it "should not allow destructive operations" do + expect { @uri.normalize! }.to raise_error(RuntimeError) + end +end + +describe Addressable::URI, "when created from string components" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :host => "example.com" + ) + end + + it "should have a site value of 'http://example.com'" do + expect(@uri.site).to eq("http://example.com") + end + + it "should be equal to the equivalent parsed URI" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should raise an error if invalid components omitted" do + expect do + @uri.omit(:bogus) + end.to raise_error(ArgumentError) + expect do + @uri.omit(:scheme, :bogus, :path) + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with a nil host but " + + "non-nil authority components" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => "user", :password => "pass", :port => 80) + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with both an authority and a user" do + it "should raise an error" do + expect do + Addressable::URI.new( + :user => "user", :authority => "user@example.com:80" + ) + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with an authority and no port" do + before do + @uri = Addressable::URI.new(:authority => "user@example.com") + end + + it "should not infer a port" do + expect(@uri.port).to eq(nil) + expect(@uri.default_port).to eq(nil) + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a site value of '//user@example.com'" do + expect(@uri.site).to eq("//user@example.com") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when created with a host with trailing dots" do + before do + @uri = Addressable::URI.new(:authority => "example...") + end + + it "should have a stable normalized form" do + expect(@uri.normalize.normalize.normalize.host).to eq( + @uri.normalize.host + ) + end +end + +describe Addressable::URI, "when created with a host with a backslash" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example\\example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host with a slash" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example/example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with a host with a space" do + it "should raise an error" do + expect do + Addressable::URI.new(:authority => "example example") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when created with both a userinfo and a user" do + it "should raise an error" do + expect do + Addressable::URI.new(:user => "user", :userinfo => "user:pass") + end.to raise_error(ArgumentError) + end +end + +describe Addressable::URI, "when created with a path that hasn't been " + + "prefixed with a '/' but a host specified" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :host => "example.com", :path => "path" + ) + end + + it "should prefix a '/' to the path" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/path")) + end + + it "should have a site value of 'http://example.com'" do + expect(@uri.site).to eq("http://example.com") + end + + it "should have an origin of 'http://example.com" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when created with a path that hasn't been " + + "prefixed with a '/' but no host specified" do + before do + @uri = Addressable::URI.new( + :scheme => "http", :path => "path" + ) + end + + it "should not prefix a '/' to the path" do + expect(@uri).to eq(Addressable::URI.parse("http:path")) + end + + it "should have a site value of 'http:'" do + expect(@uri.site).to eq("http:") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from an Addressable::URI object" do + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.parse(original_uri) + new_uri.host = 'www.example.com' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('http://www.example.com/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.heuristic_parse(original_uri) + new_uri.host = 'www.example.com' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('http://www.example.com/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.parse(original_uri) + new_uri.origin = 'https://www.example.com:8080' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('https://www.example.com:8080/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end + + it "should not have unexpected side-effects" do + original_uri = Addressable::URI.parse("http://example.com/") + new_uri = Addressable::URI.heuristic_parse(original_uri) + new_uri.origin = 'https://www.example.com:8080' + expect(new_uri.host).to eq('www.example.com') + expect(new_uri.to_s).to eq('https://www.example.com:8080/') + expect(original_uri.host).to eq('example.com') + expect(original_uri.to_s).to eq('http://example.com/') + end +end + +describe Addressable::URI, "when parsed from something that looks " + + "like a URI object" do + it "should parse without error" do + uri = Addressable::URI.parse(Fake::URI::HTTP.new("http://example.com/")) + expect do + Addressable::URI.parse(uri) + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from a standard library URI object" do + it "should parse without error" do + uri = Addressable::URI.parse(URI.parse("http://example.com/")) + expect do + Addressable::URI.parse(uri) + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from ''" do + before do + @uri = Addressable::URI.parse("") + end + + it "should have no scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'ftp://ftp.is.co.za/rfc/rfc1808.txt'" do + before do + @uri = Addressable::URI.parse("ftp://ftp.is.co.za/rfc/rfc1808.txt") + end + + it "should use the 'ftp' scheme" do + expect(@uri.scheme).to eq("ftp") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of 'ftp.is.co.za'" do + expect(@uri.host).to eq("ftp.is.co.za") + end + + it "should have inferred_port of 21" do + expect(@uri.inferred_port).to eq(21) + end + + it "should have a path of '/rfc/rfc1808.txt'" do + expect(@uri.path).to eq("/rfc/rfc1808.txt") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'ftp://ftp.is.co.za'" do + expect(@uri.origin).to eq('ftp://ftp.is.co.za') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'http://www.ietf.org/rfc/rfc2396.txt'" do + before do + @uri = Addressable::URI.parse("http://www.ietf.org/rfc/rfc2396.txt") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of 'www.ietf.org'" do + expect(@uri.host).to eq("www.ietf.org") + end + + it "should have inferred_port of 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/rfc/rfc2396.txt'" do + expect(@uri.path).to eq("/rfc/rfc2396.txt") + end + + it "should have a request URI of '/rfc/rfc2396.txt'" do + expect(@uri.request_uri).to eq("/rfc/rfc2396.txt") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should correctly omit components" do + expect(@uri.omit(:scheme).to_s).to eq("//www.ietf.org/rfc/rfc2396.txt") + expect(@uri.omit(:path).to_s).to eq("http://www.ietf.org") + end + + it "should correctly omit components destructively" do + @uri.omit!(:scheme) + expect(@uri.to_s).to eq("//www.ietf.org/rfc/rfc2396.txt") + end + + it "should have an origin of 'http://www.ietf.org'" do + expect(@uri.origin).to eq('http://www.ietf.org') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'ldap://[2001:db8::7]/c=GB?objectClass?one'" do + before do + @uri = Addressable::URI.parse("ldap://[2001:db8::7]/c=GB?objectClass?one") + end + + it "should use the 'ldap' scheme" do + expect(@uri.scheme).to eq("ldap") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a host of '[2001:db8::7]'" do + expect(@uri.host).to eq("[2001:db8::7]") + end + + it "should have inferred_port of 389" do + expect(@uri.inferred_port).to eq(389) + end + + it "should have a path of '/c=GB'" do + expect(@uri.path).to eq("/c=GB") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should not allow request URI assignment" do + expect do + @uri.request_uri = "/" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should have a query of 'objectClass?one'" do + expect(@uri.query).to eq("objectClass?one") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should correctly omit components" do + expect(@uri.omit(:scheme, :authority).to_s).to eq("/c=GB?objectClass?one") + expect(@uri.omit(:path).to_s).to eq("ldap://[2001:db8::7]?objectClass?one") + end + + it "should correctly omit components destructively" do + @uri.omit!(:scheme, :authority) + expect(@uri.to_s).to eq("/c=GB?objectClass?one") + end + + it "should raise an error if omission would create an invalid URI" do + expect do + @uri.omit(:authority, :path) + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should have an origin of 'ldap://[2001:db8::7]'" do + expect(@uri.origin).to eq('ldap://[2001:db8::7]') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'mailto:John.Doe@example.com'" do + before do + @uri = Addressable::URI.parse("mailto:John.Doe@example.com") + end + + it "should use the 'mailto' scheme" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of 'John.Doe@example.com'" do + expect(@uri.path).to eq("John.Doe@example.com") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 2 of RFC 6068 +describe Addressable::URI, "when parsed from " + + "'mailto:?to=addr1@an.example,addr2@an.example'" do + before do + @uri = Addressable::URI.parse( + "mailto:?to=addr1@an.example,addr2@an.example" + ) + end + + it "should use the 'mailto' scheme" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should have the To: field value parameterized" do + expect(@uri.query_values(Hash)["to"]).to eq( + "addr1@an.example,addr2@an.example" + ) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'news:comp.infosystems.www.servers.unix'" do + before do + @uri = Addressable::URI.parse("news:comp.infosystems.www.servers.unix") + end + + it "should use the 'news' scheme" do + expect(@uri.scheme).to eq("news") + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of 'comp.infosystems.www.servers.unix'" do + expect(@uri.path).to eq("comp.infosystems.www.servers.unix") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'tel:+1-816-555-1212'" do + before do + @uri = Addressable::URI.parse("tel:+1-816-555-1212") + end + + it "should use the 'tel' scheme" do + expect(@uri.scheme).to eq("tel") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should have a path of '+1-816-555-1212'" do + expect(@uri.path).to eq("+1-816-555-1212") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'telnet://192.0.2.16:80/'" do + before do + @uri = Addressable::URI.parse("telnet://192.0.2.16:80/") + end + + it "should use the 'telnet' scheme" do + expect(@uri.scheme).to eq("telnet") + end + + it "should have a host of '192.0.2.16'" do + expect(@uri.host).to eq("192.0.2.16") + end + + it "should have a port of 80" do + expect(@uri.port).to eq(80) + end + + it "should have a inferred_port of 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a default_port of 23" do + expect(@uri.default_port).to eq(23) + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'telnet://192.0.2.16:80'" do + expect(@uri.origin).to eq('telnet://192.0.2.16:80') + end +end + +# Section 1.1.2 of RFC 3986 +describe Addressable::URI, "when parsed from " + + "'urn:oasis:names:specification:docbook:dtd:xml:4.1.2'" do + before do + @uri = Addressable::URI.parse( + "urn:oasis:names:specification:docbook:dtd:xml:4.1.2") + end + + it "should use the 'urn' scheme" do + expect(@uri.scheme).to eq("urn") + end + + it "should not have an inferred_port" do + expect(@uri.inferred_port).to eq(nil) + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of " + + "'oasis:names:specification:docbook:dtd:xml:4.1.2'" do + expect(@uri.path).to eq("oasis:names:specification:docbook:dtd:xml:4.1.2") + end + + it "should not have a request URI" do + expect(@uri.request_uri).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when heuristically parsed from " + + "'192.0.2.16:8000/path'" do + before do + @uri = Addressable::URI.heuristic_parse("192.0.2.16:8000/path") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a host of '192.0.2.16'" do + expect(@uri.host).to eq("192.0.2.16") + end + + it "should have a port of '8000'" do + expect(@uri.port).to eq(8000) + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/path'" do + expect(@uri.path).to eq("/path") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have an origin of 'http://192.0.2.16:8000'" do + expect(@uri.origin).to eq('http://192.0.2.16:8000') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com'" do + before do + @uri = Addressable::URI.parse("http://example.com") + end + + it "when inspected, should have the correct URI" do + expect(@uri.inspect).to include("http://example.com") + end + + it "when inspected, should have the correct class name" do + expect(@uri.inspect).to include("Addressable::URI") + end + + it "when inspected, should have the correct object id" do + expect(@uri.inspect).to include("%#0x" % @uri.object_id) + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should be considered ip-based" do + expect(@uri).to be_ip_based + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not have a specified port" do + expect(@uri.port).to eq(nil) + end + + it "should have an empty path" do + expect(@uri.path).to eq("") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + expect(@uri.query_values).to eq(nil) + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should not be exactly equal to 42" do + expect(@uri.eql?(42)).to eq(false) + end + + it "should not be equal to 42" do + expect(@uri == 42).to eq(false) + end + + it "should not be roughly equal to 42" do + expect(@uri === 42).to eq(false) + end + + it "should be exactly equal to http://example.com" do + expect(@uri.eql?(Addressable::URI.parse("http://example.com"))).to eq(true) + end + + it "should be roughly equal to http://example.com/" do + expect(@uri === Addressable::URI.parse("http://example.com/")).to eq(true) + end + + it "should be roughly equal to the string 'http://example.com/'" do + expect(@uri === "http://example.com/").to eq(true) + end + + it "should not be roughly equal to the string " + + "'http://example.com:bogus/'" do + expect do + expect(@uri === "http://example.com:bogus/").to eq(false) + end.not_to raise_error + end + + it "should result in itself when joined with itself" do + expect(@uri.join(@uri).to_s).to eq("http://example.com") + expect(@uri.join!(@uri).to_s).to eq("http://example.com") + end + + it "should be equivalent to http://EXAMPLE.com" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com")) + end + + it "should be equivalent to http://EXAMPLE.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com:80/")) + end + + it "should have the same hash as http://example.com" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have the same hash as http://EXAMPLE.com after assignment" do + @uri.origin = "http://EXAMPLE.com" + expect(@uri.hash).to eq(Addressable::URI.parse("http://EXAMPLE.com").hash) + end + + it "should have a different hash from http://EXAMPLE.com" do + expect(@uri.hash).not_to eq(Addressable::URI.parse("http://EXAMPLE.com").hash) + end + + it "should not allow origin assignment without scheme" do + expect do + @uri.origin = "example.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow origin assignment without host" do + expect do + @uri.origin = "http://" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow origin assignment with bogus type" do + expect do + @uri.origin = :bogus + end.to raise_error(TypeError) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equivalent to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Section 6.2.2.1 of RFC 3986 + it "should be equivalent to http://EXAMPLE.COM/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.COM/")) + end + + it "should have a route of '/path/' to 'http://example.com/path/'" do + expect(@uri.route_to("http://example.com/path/")).to eq( + Addressable::URI.parse("/path/") + ) + end + + it "should have a route of '..' from 'http://example.com/path/'" do + expect(@uri.route_from("http://example.com/path/")).to eq( + Addressable::URI.parse("..") + ) + end + + it "should have a route of '#' to 'http://example.com/'" do + expect(@uri.route_to("http://example.com/")).to eq( + Addressable::URI.parse("#") + ) + end + + it "should have a route of 'http://elsewhere.com/' to " + + "'http://elsewhere.com/'" do + expect(@uri.route_to("http://elsewhere.com/")).to eq( + Addressable::URI.parse("http://elsewhere.com/") + ) + end + + it "when joined with 'relative/path' should be " + + "'http://example.com/relative/path'" do + expect(@uri.join('relative/path')).to eq( + Addressable::URI.parse("http://example.com/relative/path") + ) + end + + it "when joined with a bogus object a TypeError should be raised" do + expect do + @uri.join(42) + end.to raise_error(TypeError) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should have the correct username after assignment" do + @uri.user = "user@123!" + expect(@uri.user).to eq("user@123!") + expect(@uri.normalized_user).to eq("user%40123%21") + expect(@uri.password).to eq(nil) + expect(@uri.normalize.to_s).to eq("http://user%40123%21@example.com/") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "#secret@123!" + expect(@uri.password).to eq("#secret@123!") + expect(@uri.normalized_password).to eq("%23secret%40123%21") + expect(@uri.user).to eq("") + expect(@uri.normalize.to_s).to eq("http://:%23secret%40123%21@example.com/") + expect(@uri.omit(:password).to_s).to eq("http://example.com") + end + + it "should have the correct user/pass after repeated assignment" do + @uri.user = nil + expect(@uri.user).to eq(nil) + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + # Username cannot be nil if the password is set + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = nil + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = "" + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + @uri.user = nil + # Username cannot be nil if the password is set + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct user/pass after userinfo assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + @uri.userinfo = nil + expect(@uri.userinfo).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +# Section 5.1.2 of RFC 2616 +describe Addressable::URI, "when parsed from " + + "'HTTP://www.w3.org/pub/WWW/TheProject.html'" do + before do + @uri = Addressable::URI.parse("HTTP://www.w3.org/pub/WWW/TheProject.html") + end + + it "should have the correct request URI" do + expect(@uri.request_uri).to eq("/pub/WWW/TheProject.html") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/pub/WWW/TheProject.html?" + expect(@uri.request_uri).to eq("/pub/WWW/TheProject.html?") + expect(@uri.path).to eq("/pub/WWW/TheProject.html") + expect(@uri.query).to eq("") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/some/where/else.html" + expect(@uri.request_uri).to eq("/some/where/else.html") + expect(@uri.path).to eq("/some/where/else.html") + expect(@uri.query).to eq(nil) + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "/some/where/else.html?query?string" + expect(@uri.request_uri).to eq("/some/where/else.html?query?string") + expect(@uri.path).to eq("/some/where/else.html") + expect(@uri.query).to eq("query?string") + end + + it "should have the correct request URI after assignment" do + @uri.request_uri = "?x=y" + expect(@uri.request_uri).to eq("/?x=y") + expect(@uri.path).to eq("/") + expect(@uri.query).to eq("x=y") + end + + it "should raise an error if the site value is set to something bogus" do + expect do + @uri.site = 42 + end.to raise_error(TypeError) + end + + it "should raise an error if the request URI is set to something bogus" do + expect do + @uri.request_uri = 42 + end.to raise_error(TypeError) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "HTTP", + :user => nil, + :password => nil, + :host => "www.w3.org", + :port => nil, + :path => "/pub/WWW/TheProject.html", + :query => nil, + :fragment => nil + }) + end + + it "should have an origin of 'http://www.w3.org'" do + expect(@uri.origin).to eq('http://www.w3.org') + end +end + +describe Addressable::URI, "when parsing IPv6 addresses" do + it "should not raise an error for " + + "'http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[fe80:0:0:0:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[fe80:0:0:0:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[fe80::200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[fe80::200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[::1]/'" do + Addressable::URI.parse("http://[::1]/") + end + + it "should not raise an error for " + + "'http://[fe80::1]/'" do + Addressable::URI.parse("http://[fe80::1]/") + end + + it "should raise an error for " + + "'http://[]/'" do + expect do + Addressable::URI.parse("http://[]/") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsing IPv6 address" do + subject { Addressable::URI.parse("http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") } + its(:host) { should == '[3ffe:1900:4545:3:200:f8ff:fe21:67cf]' } + its(:hostname) { should == '3ffe:1900:4545:3:200:f8ff:fe21:67cf' } +end + +describe Addressable::URI, "when assigning IPv6 address" do + it "should allow to set bare IPv6 address as hostname" do + uri = Addressable::URI.parse("http://[::1]/") + uri.hostname = '3ffe:1900:4545:3:200:f8ff:fe21:67cf' + expect(uri.to_s).to eq('http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/') + end + + it "should allow to set bare IPv6 address as hostname with IPAddr object" do + uri = Addressable::URI.parse("http://[::1]/") + uri.hostname = IPAddr.new('3ffe:1900:4545:3:200:f8ff:fe21:67cf') + expect(uri.to_s).to eq('http://[3ffe:1900:4545:3:200:f8ff:fe21:67cf]/') + end + + it "should not allow to set bare IPv6 address as host" do + uri = Addressable::URI.parse("http://[::1]/") + skip "not checked" + expect do + uri.host = '3ffe:1900:4545:3:200:f8ff:fe21:67cf' + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsing IPvFuture addresses" do + it "should not raise an error for " + + "'http://[v9.3ffe:1900:4545:3:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[v9.3ffe:1900:4545:3:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[vff.fe80:0:0:0:200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[vff.fe80:0:0:0:200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[v12.fe80::200:f8ff:fe21:67cf]/'" do + Addressable::URI.parse("http://[v12.fe80::200:f8ff:fe21:67cf]/") + end + + it "should not raise an error for " + + "'http://[va0.::1]/'" do + Addressable::URI.parse("http://[va0.::1]/") + end + + it "should not raise an error for " + + "'http://[v255.fe80::1]/'" do + Addressable::URI.parse("http://[v255.fe80::1]/") + end + + it "should raise an error for " + + "'http://[v0.]/'" do + expect do + Addressable::URI.parse("http://[v0.]/") + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/'" do + before do + @uri = Addressable::URI.parse("http://example.com/") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to HTTP://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("HTTP://example.com/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://Example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://Example.com/")) + end + + it "should have the correct username after assignment" do + @uri.user = nil + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should have the correct password after assignment" do + @uri.password = nil + expect(@uri.password).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should have a request URI of '/'" do + expect(@uri.request_uri).to eq("/") + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have the same hash as its duplicate" do + expect(@uri.hash).to eq(@uri.dup.hash) + end + + it "should have a different hash from its equivalent String value" do + expect(@uri.hash).not_to eq(@uri.to_s.hash) + end + + it "should have the same hash as an equal URI" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/").hash) + end + + it "should be equivalent to http://EXAMPLE.com" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com")) + end + + it "should be equivalent to http://EXAMPLE.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.com:80/")) + end + + it "should have the same hash as http://example.com/" do + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/").hash) + end + + it "should have the same hash as http://example.com after assignment" do + @uri.path = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have the same hash as http://example.com/? after assignment" do + @uri.query = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/?").hash) + end + + it "should have the same hash as http://example.com/? after assignment" do + @uri.query_values = {} + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/?").hash) + end + + it "should have the same hash as http://example.com/# after assignment" do + @uri.fragment = "" + expect(@uri.hash).to eq(Addressable::URI.parse("http://example.com/#").hash) + end + + it "should have a different hash from http://example.com" do + expect(@uri.hash).not_to eq(Addressable::URI.parse("http://example.com").hash) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com?#'" do + before do + @uri = Addressable::URI.parse("http://example.com?#") + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "", + :query => "", + :fragment => "" + }) + end + + it "should have a request URI of '/?'" do + expect(@uri.request_uri).to eq("/?") + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq("http://example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://@example.com/'" do + before do + @uri = Addressable::URI.parse("http://@example.com/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => "", + :password => nil, + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com./'" do + before do + @uri = Addressable::URI.parse("http://example.com./") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:@example.com/'" do + before do + @uri = Addressable::URI.parse("http://:@example.com/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => "", + :password => "", + :host => "example.com", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'HTTP://EXAMPLE.COM/'" do + before do + @uri = Addressable::URI.parse("HTTP://EXAMPLE.COM/") + end + + it "should be equivalent to http://example.com" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "HTTP", + :user => nil, + :password => nil, + :host => "EXAMPLE.COM", + :port => nil, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end + + it "should have a tld of 'com'" do + expect(@uri.tld).to eq('com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.example.co.uk/'" do + before do + @uri = Addressable::URI.parse("http://www.example.co.uk/") + end + + it "should have an origin of 'http://www.example.co.uk'" do + expect(@uri.origin).to eq('http://www.example.co.uk') + end + + it "should have a tld of 'co.uk'" do + expect(@uri.tld).to eq('co.uk') + end + + it "should have a domain of 'example.co.uk'" do + expect(@uri.domain).to eq('example.co.uk') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://sub_domain.blogspot.com/'" do + before do + @uri = Addressable::URI.parse("http://sub_domain.blogspot.com/") + end + + it "should have an origin of 'http://sub_domain.blogspot.com'" do + expect(@uri.origin).to eq('http://sub_domain.blogspot.com') + end + + it "should have a tld of 'com'" do + expect(@uri.tld).to eq('com') + end + + it "should have a domain of 'blogspot.com'" do + expect(@uri.domain).to eq('blogspot.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/~smith/'" do + before do + @uri = Addressable::URI.parse("http://example.com/~smith/") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com/%7Esmith/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/%7Esmith/")) + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to http://example.com/%7esmith/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/%7esmith/")) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%E8'" do + before do + @uri = Addressable::URI.parse("http://example.com/%E8") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/%E8" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/%E8" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path%2Fsegment/'" do + before do + @uri = Addressable::URI.parse("http://example.com/path%2Fsegment/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should be equal to 'http://example.com/path%2Fsegment/'" do + expect(@uri.normalize).to be_eql( + Addressable::URI.parse("http://example.com/path%2Fsegment/") + ) + end + + it "should not be equal to 'http://example.com/path/segment/'" do + expect(@uri).not_to eq( + Addressable::URI.parse("http://example.com/path/segment/") + ) + end + + it "should not be equal to 'http://example.com/path/segment/'" do + expect(@uri.normalize).not_to be_eql( + Addressable::URI.parse("http://example.com/path/segment/") + ) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?%F6'" do + before do + @uri = Addressable::URI.parse("http://example.com/?%F6") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/?%F6" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/?%F6" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/#%F6'" do + before do + @uri = Addressable::URI.parse("http://example.com/#%F6") + end + + it "should not raise an exception when normalized" do + expect do + @uri.normalize + end.not_to raise_error + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/#%F6" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/#%F6" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%C3%87'" do + before do + @uri = Addressable::URI.parse("http://example.com/%C3%87") + end + + # Based on http://intertwingly.net/blog/2004/07/31/URI-Equivalence + it "should be equivalent to 'http://example.com/C%CC%A7'" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/C%CC%A7")) + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com/%C3%87" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com/%C3%87" + end + + it "should raise an error if encoding with an unexpected return type" do + expect do + Addressable::URI.normalized_encode(@uri, Integer) + end.to raise_error(TypeError) + end + + it "if percent encoded should be 'http://example.com/C%25CC%25A7'" do + expect(Addressable::URI.encode(@uri).to_s).to eq( + "http://example.com/%25C3%2587" + ) + end + + it "if percent encoded should be 'http://example.com/C%25CC%25A7'" do + expect(Addressable::URI.encode(@uri, Addressable::URI)).to eq( + Addressable::URI.parse("http://example.com/%25C3%2587") + ) + end + + it "should raise an error if encoding with an unexpected return type" do + expect do + Addressable::URI.encode(@uri, Integer) + end.to raise_error(TypeError) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=string'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=string") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have a query string of 'q=string'" do + expect(@uri.query).to eq("q=string") + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:80/'" do + before do + @uri = Addressable::URI.parse("http://example.com:80/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com:80'" do + expect(@uri.authority).to eq("example.com:80") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have explicit port 80" do + expect(@uri.port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:80/" do + expect(@uri.eql?(Addressable::URI.parse("http://example.com:80/"))).to eq(true) + end + + it "should be roughly equal to http://example.com/" do + expect(@uri === Addressable::URI.parse("http://example.com/")).to eq(true) + end + + it "should be roughly equal to the string 'http://example.com/'" do + expect(@uri === "http://example.com/").to eq(true) + end + + it "should not be roughly equal to the string " + + "'http://example.com:bogus/'" do + expect do + expect(@uri === "http://example.com:bogus/").to eq(false) + end.not_to raise_error + end + + it "should result in itself when joined with itself" do + expect(@uri.join(@uri).to_s).to eq("http://example.com:80/") + expect(@uri.join!(@uri).to_s).to eq("http://example.com:80/") + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com:/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:/")) + end + + # Section 6.2.3 of RFC 3986 + it "should be equal to http://example.com:80/" do + expect(@uri).to eq(Addressable::URI.parse("http://example.com:80/")) + end + + # Section 6.2.2.1 of RFC 3986 + it "should be equal to http://EXAMPLE.COM/" do + expect(@uri).to eq(Addressable::URI.parse("http://EXAMPLE.COM/")) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => 80, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com:80/" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com:80/" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:8080/'" do + before do + @uri = Addressable::URI.parse("http://example.com:8080/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com:8080'" do + expect(@uri.authority).to eq("example.com:8080") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 8080" do + expect(@uri.inferred_port).to eq(8080) + end + + it "should have explicit port 8080" do + expect(@uri.port).to eq(8080) + end + + it "should have default port 80" do + expect(@uri.default_port).to eq(80) + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:8080/" do + expect(@uri.eql?(Addressable::URI.parse( + "http://example.com:8080/"))).to eq(true) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://example.com/path/to/'" do + expect(@uri.route_from("http://example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://example.com:80/path/to/'" do + expect(@uri.route_from("http://example.com:80/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should have a route of '../../' from " + + "'http://example.com:8080/path/to/'" do + expect(@uri.route_from("http://example.com:8080/path/to/")).to eq( + Addressable::URI.parse("../../") + ) + end + + it "should have a route of 'http://example.com:8080/' from " + + "'http://user:pass@example.com/path/to/'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com:8080/") + ) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => 8080, + :path => "/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com:8080'" do + expect(@uri.origin).to eq('http://example.com:8080') + end + + it "should not change if encoded with the normalizing algorithm" do + expect(Addressable::URI.normalized_encode(@uri).to_s).to eq( + "http://example.com:8080/" + ) + expect(Addressable::URI.normalized_encode(@uri, Addressable::URI).to_s).to be === + "http://example.com:8080/" + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com:%38%30/'" do + before do + @uri = Addressable::URI.parse("http://example.com:%38%30/") + end + + it "should have the correct port" do + expect(@uri.port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed with empty port" do + subject(:uri) do + Addressable::URI.parse("//example.com:") + end + + it "should not infer a port" do + expect(uri.port).to be(nil) + end + + it "should have a site value of '//example.com'" do + expect(uri.site).to eq("//example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/%2E/'" do + before do + @uri = Addressable::URI.parse("http://example.com/%2E/") + end + + it "should be considered to be in normal form" do + skip( + 'path segment normalization should happen before ' + + 'percent escaping normalization' + ) + @uri.normalize.should be_eql(@uri) + end + + it "should normalize to 'http://example.com/%2E/'" do + skip( + 'path segment normalization should happen before ' + + 'percent escaping normalization' + ) + expect(@uri.normalize).to eq("http://example.com/%2E/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/../..'" do + before do + @uri = Addressable::URI.parse("http://example.com/../..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path(/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/path(/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/(path)/..'" do + before do + @uri = Addressable::URI.parse("http://example.com/(path)/..") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path(/../'" do + before do + @uri = Addressable::URI.parse("http://example.com/path(/../") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/(path)/../'" do + before do + @uri = Addressable::URI.parse("http://example.com/(path)/../") + end + + it "should have the correct port" do + expect(@uri.inferred_port).to eq(80) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'/..//example.com'" do + before do + @uri = Addressable::URI.parse("/..//example.com") + end + + it "should become invalid when normalized" do + expect do + @uri.normalize + end.to raise_error(Addressable::URI::InvalidURIError, /authority/) + end + + it "should have a path of '/..//example.com'" do + expect(@uri.path).to eq("/..//example.com") + end +end + +describe Addressable::URI, "when parsed from '/a/b/c/./../../g'" do + before do + @uri = Addressable::URI.parse("/a/b/c/./../../g") + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + # Section 5.2.4 of RFC 3986 + it "should normalize to '/a/g'" do + expect(@uri.normalize.to_s).to eq("/a/g") + end +end + +describe Addressable::URI, "when parsed from 'mid/content=5/../6'" do + before do + @uri = Addressable::URI.parse("mid/content=5/../6") + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + # Section 5.2.4 of RFC 3986 + it "should normalize to 'mid/6'" do + expect(@uri.normalize.to_s).to eq("mid/6") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.example.com///../'" do + before do + @uri = Addressable::URI.parse('http://www.example.com///../') + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end + + it "should normalize to 'http://www.example.com//'" do + expect(@uri.normalize.to_s).to eq("http://www.example.com//") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/path/to/resource/'" do + before do + @uri = Addressable::URI.parse("http://example.com/path/to/resource/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/path/to/resource/'" do + expect(@uri.path).to eq("/path/to/resource/") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should not be considered relative" do + expect(@uri).not_to be_relative + end + + it "should be exactly equal to http://example.com:8080/" do + expect(@uri.eql?(Addressable::URI.parse( + "http://example.com/path/to/resource/"))).to eq(true) + end + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/'" do + expect(@uri.route_from("http://example.com/path/to/")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of '../' from " + + "'http://example.com/path/to/resource/sub'" do + expect(@uri.route_from("http://example.com/path/to/resource/sub")).to eq( + Addressable::URI.parse("../") + ) + end + + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/another'" do + expect(@uri.route_from("http://example.com/path/to/another")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'resource/' from " + + "'http://example.com/path/to/res'" do + expect(@uri.route_from("http://example.com/path/to/res")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'resource/' from " + + "'http://example.com:80/path/to/'" do + expect(@uri.route_from("http://example.com:80/path/to/")).to eq( + Addressable::URI.parse("resource/") + ) + end + + it "should have a route of 'http://example.com/path/to/' from " + + "'http://example.com:8080/path/to/'" do + expect(@uri.route_from("http://example.com:8080/path/to/")).to eq( + Addressable::URI.parse("http://example.com/path/to/resource/") + ) + end + + it "should have a route of 'http://example.com/path/to/' from " + + "'http://user:pass@example.com/path/to/'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/")).to eq( + Addressable::URI.parse("http://example.com/path/to/resource/") + ) + end + + it "should have a route of '../../path/to/resource/' from " + + "'http://example.com/to/resource/'" do + expect(@uri.route_from("http://example.com/to/resource/")).to eq( + Addressable::URI.parse("../../path/to/resource/") + ) + end + + it "should correctly convert to a hash" do + expect(@uri.to_hash).to eq({ + :scheme => "http", + :user => nil, + :password => nil, + :host => "example.com", + :port => nil, + :path => "/path/to/resource/", + :query => nil, + :fragment => nil + }) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'relative/path/to/resource'" do + before do + @uri = Addressable::URI.parse("relative/path/to/resource") + end + + it "should not have a scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an authority segment" do + expect(@uri.authority).to eq(nil) + end + + it "should not have a host" do + expect(@uri.host).to eq(nil) + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not have a port" do + expect(@uri.port).to eq(nil) + end + + it "should have a path of 'relative/path/to/resource'" do + expect(@uri.path).to eq("relative/path/to/resource") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should not be considered absolute" do + expect(@uri).not_to be_absolute + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should raise an error if routing is attempted" do + expect do + @uri.route_to("http://example.com/") + end.to raise_error(ArgumentError, /relative\/path\/to\/resource/) + expect do + @uri.route_from("http://example.com/") + end.to raise_error(ArgumentError, /relative\/path\/to\/resource/) + end + + it "when joined with 'another/relative/path' should be " + + "'relative/path/to/another/relative/path'" do + expect(@uri.join('another/relative/path')).to eq( + Addressable::URI.parse("relative/path/to/another/relative/path") + ) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end +end + +describe Addressable::URI, "when parsed from " + + "'relative_path_with_no_slashes'" do + before do + @uri = Addressable::URI.parse("relative_path_with_no_slashes") + end + + it "should not have a scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should not be considered ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should not have an authority segment" do + expect(@uri.authority).to eq(nil) + end + + it "should not have a host" do + expect(@uri.host).to eq(nil) + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not have a port" do + expect(@uri.port).to eq(nil) + end + + it "should have a path of 'relative_path_with_no_slashes'" do + expect(@uri.path).to eq("relative_path_with_no_slashes") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should not be considered absolute" do + expect(@uri).not_to be_absolute + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "when joined with 'another_relative_path' should be " + + "'another_relative_path'" do + expect(@uri.join('another_relative_path')).to eq( + Addressable::URI.parse("another_relative_path") + ) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt'" do + expect(@uri.path).to eq("/file.txt") + end + + it "should have a basename of 'file.txt'" do + expect(@uri.basename).to eq("file.txt") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt;parameter'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt;parameter") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt;parameter'" do + expect(@uri.path).to eq("/file.txt;parameter") + end + + it "should have a basename of 'file.txt'" do + expect(@uri.basename).to eq("file.txt") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/file.txt;x=y'" do + before do + @uri = Addressable::URI.parse("http://example.com/file.txt;x=y") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have a scheme of 'http'" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'example.com'" do + expect(@uri.authority).to eq("example.com") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have no username" do + expect(@uri.user).to eq(nil) + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/file.txt;x=y'" do + expect(@uri.path).to eq("/file.txt;x=y") + end + + it "should have an extname of '.txt'" do + expect(@uri.extname).to eq(".txt") + end + + it "should have no query string" do + expect(@uri.query).to eq(nil) + end + + it "should have no fragment" do + expect(@uri.fragment).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'svn+ssh://developername@rubyforge.org/var/svn/project'" do + before do + @uri = Addressable::URI.parse( + "svn+ssh://developername@rubyforge.org/var/svn/project" + ) + end + + it "should have a scheme of 'svn+ssh'" do + expect(@uri.scheme).to eq("svn+ssh") + end + + it "should be considered to be ip-based" do + expect(@uri).to be_ip_based + end + + it "should have a path of '/var/svn/project'" do + expect(@uri.path).to eq("/var/svn/project") + end + + it "should have a username of 'developername'" do + expect(@uri.user).to eq("developername") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'ssh+svn://developername@RUBYFORGE.ORG/var/svn/project'" do + before do + @uri = Addressable::URI.parse( + "ssh+svn://developername@RUBYFORGE.ORG/var/svn/project" + ) + end + + it "should have a scheme of 'ssh+svn'" do + expect(@uri.scheme).to eq("ssh+svn") + end + + it "should have a normalized scheme of 'svn+ssh'" do + expect(@uri.normalized_scheme).to eq("svn+ssh") + end + + it "should have a normalized site of 'svn+ssh'" do + expect(@uri.normalized_site).to eq("svn+ssh://developername@rubyforge.org") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of '/var/svn/project'" do + expect(@uri.path).to eq("/var/svn/project") + end + + it "should have a username of 'developername'" do + expect(@uri.user).to eq("developername") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should not be considered to be in normal form" do + expect(@uri.normalize).not_to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'mailto:user@example.com'" do + before do + @uri = Addressable::URI.parse("mailto:user@example.com") + end + + it "should have a scheme of 'mailto'" do + expect(@uri.scheme).to eq("mailto") + end + + it "should not be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of 'user@example.com'" do + expect(@uri.path).to eq("user@example.com") + end + + it "should have no user" do + expect(@uri.user).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'tag:example.com,2006-08-18:/path/to/something'" do + before do + @uri = Addressable::URI.parse( + "tag:example.com,2006-08-18:/path/to/something") + end + + it "should have a scheme of 'tag'" do + expect(@uri.scheme).to eq("tag") + end + + it "should be considered to be ip-based" do + expect(@uri).not_to be_ip_based + end + + it "should have a path of " + + "'example.com,2006-08-18:/path/to/something'" do + expect(@uri.path).to eq("example.com,2006-08-18:/path/to/something") + end + + it "should have no user" do + expect(@uri.user).to eq(nil) + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/x;y/'" do + before do + @uri = Addressable::URI.parse("http://example.com/x;y/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?x=1&y=2'" do + before do + @uri = Addressable::URI.parse("http://example.com/?x=1&y=2") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end +end + +describe Addressable::URI, "when parsed from " + + "'view-source:http://example.com/'" do + before do + @uri = Addressable::URI.parse("view-source:http://example.com/") + end + + it "should have a scheme of 'view-source'" do + expect(@uri.scheme).to eq("view-source") + end + + it "should have a path of 'http://example.com/'" do + expect(@uri.path).to eq("http://example.com/") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment'" do + before do + @uri = Addressable::URI.parse( + "http://user:pass@example.com/path/to/resource?query=x#fragment") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have an authority segment of 'user:pass@example.com'" do + expect(@uri.authority).to eq("user:pass@example.com") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have a password of 'pass'" do + expect(@uri.password).to eq("pass") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/path/to/resource'" do + expect(@uri.path).to eq("/path/to/resource") + end + + it "should have a query string of 'query=x'" do + expect(@uri.query).to eq("query=x") + end + + it "should have a fragment of 'fragment'" do + expect(@uri.fragment).to eq("fragment") + end + + it "should be considered to be in normal form" do + expect(@uri.normalize).to be_eql(@uri) + end + + it "should have a route of '../../' to " + + "'http://user:pass@example.com/path/'" do + expect(@uri.route_to("http://user:pass@example.com/path/")).to eq( + Addressable::URI.parse("../../") + ) + end + + it "should have a route of 'to/resource?query=x#fragment' " + + "from 'http://user:pass@example.com/path/'" do + expect(@uri.route_from("http://user:pass@example.com/path/")).to eq( + Addressable::URI.parse("to/resource?query=x#fragment") + ) + end + + it "should have a route of '?query=x#fragment' " + + "from 'http://user:pass@example.com/path/to/resource'" do + expect(@uri.route_from("http://user:pass@example.com/path/to/resource")).to eq( + Addressable::URI.parse("?query=x#fragment") + ) + end + + it "should have a route of '#fragment' " + + "from 'http://user:pass@example.com/path/to/resource?query=x'" do + expect(@uri.route_from( + "http://user:pass@example.com/path/to/resource?query=x")).to eq( + Addressable::URI.parse("#fragment") + ) + end + + it "should have a route of '#fragment' from " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment'" do + expect(@uri.route_from( + "http://user:pass@example.com/path/to/resource?query=x#fragment" + )).to eq(Addressable::URI.parse("#fragment")) + end + + it "should have a route of 'http://elsewhere.com/' to " + + "'http://elsewhere.com/'" do + expect(@uri.route_to("http://elsewhere.com/")).to eq( + Addressable::URI.parse("http://elsewhere.com/") + ) + end + + it "should have a route of " + + "'http://user:pass@example.com/path/to/resource?query=x#fragment' " + + "from 'http://example.com/path/to/'" do + expect(@uri.route_from("http://elsewhere.com/path/to/")).to eq( + Addressable::URI.parse( + "http://user:pass@example.com/path/to/resource?query=x#fragment") + ) + end + + it "should have the correct scheme after assignment" do + @uri.scheme = "ftp" + expect(@uri.scheme).to eq("ftp") + expect(@uri.to_s).to eq( + "ftp://user:pass@example.com/path/to/resource?query=x#fragment" + ) + expect(@uri.to_str).to eq( + "ftp://user:pass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct site segment after assignment" do + @uri.site = "https://newuser:newpass@example.com:443" + expect(@uri.scheme).to eq("https") + expect(@uri.authority).to eq("newuser:newpass@example.com:443") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.normalized_userinfo).to eq("newuser:newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(443) + expect(@uri.inferred_port).to eq(443) + expect(@uri.to_s).to eq( + "https://newuser:newpass@example.com:443" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser:newpass@example.com:80" + expect(@uri.authority).to eq("newuser:newpass@example.com:80") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.normalized_userinfo).to eq("newuser:newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(80) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq( + "http://newuser:newpass@example.com:80" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct userinfo segment after assignment" do + @uri.userinfo = "newuser:newpass" + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.authority).to eq("newuser:newpass@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq( + "http://newuser:newpass@example.com" + + "/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.authority).to eq("newuser:pass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.authority).to eq("user:newpass@example.com") + end + + it "should have the correct host after assignment" do + @uri.host = "newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.authority).to eq("user:pass@newexample.com") + end + + it "should have the correct host after assignment" do + @uri.hostname = "newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.hostname).to eq("newexample.com") + expect(@uri.authority).to eq("user:pass@newexample.com") + end + + it "should raise an error if assigning a bogus object to the hostname" do + expect do + @uri.hostname = Object.new + end.to raise_error(TypeError) + end + + it "should have the correct port after assignment" do + @uri.port = 8080 + expect(@uri.port).to eq(8080) + expect(@uri.authority).to eq("user:pass@example.com:8080") + end + + it "should have the correct origin after assignment" do + @uri.origin = "http://newexample.com" + expect(@uri.host).to eq("newexample.com") + expect(@uri.authority).to eq("newexample.com") + end + + it "should have the correct path after assignment" do + @uri.path = "/newpath/to/resource" + expect(@uri.path).to eq("/newpath/to/resource") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/newpath/to/resource?query=x#fragment" + ) + end + + it "should have the correct scheme and authority after nil assignment" do + @uri.site = nil + expect(@uri.scheme).to eq(nil) + expect(@uri.authority).to eq(nil) + expect(@uri.to_s).to eq("/path/to/resource?query=x#fragment") + end + + it "should have the correct scheme and authority after assignment" do + @uri.site = "file://" + expect(@uri.scheme).to eq("file") + expect(@uri.authority).to eq("") + expect(@uri.to_s).to eq("file:///path/to/resource?query=x#fragment") + end + + it "should have the correct path after nil assignment" do + @uri.path = nil + expect(@uri.path).to eq("") + expect(@uri.to_s).to eq( + "http://user:pass@example.com?query=x#fragment" + ) + end + + it "should have the correct query string after assignment" do + @uri.query = "newquery=x" + expect(@uri.query).to eq("newquery=x") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?newquery=x#fragment" + ) + @uri.query = nil + expect(@uri.query).to eq(nil) + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource#fragment" + ) + end + + it "should have the correct query string after hash assignment" do + @uri.query_values = {"?uestion mark" => "=sign", "hello" => "g\xC3\xBCnther"} + expect(@uri.query.split("&")).to include("%3Fuestion%20mark=%3Dsign") + expect(@uri.query.split("&")).to include("hello=g%C3%BCnther") + expect(@uri.query_values).to eq({ + "?uestion mark" => "=sign", "hello" => "g\xC3\xBCnther" + }) + end + + it "should have the correct query string after flag hash assignment" do + @uri.query_values = {'flag?1' => nil, 'fl=ag2' => nil, 'flag3' => nil} + expect(@uri.query.split("&")).to include("flag%3F1") + expect(@uri.query.split("&")).to include("fl%3Dag2") + expect(@uri.query.split("&")).to include("flag3") + expect(@uri.query_values(Array).sort).to eq([["fl=ag2"], ["flag3"], ["flag?1"]]) + expect(@uri.query_values(Hash)).to eq({ + 'flag?1' => nil, 'fl=ag2' => nil, 'flag3' => nil + }) + end + + it "should raise an error if query values are set to a bogus type" do + expect do + @uri.query_values = "bogus" + end.to raise_error(TypeError) + end + + it "should have the correct fragment after assignment" do + @uri.fragment = "newfragment" + expect(@uri.fragment).to eq("newfragment") + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#newfragment" + ) + + @uri.fragment = nil + expect(@uri.fragment).to eq(nil) + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:fragment => "newfragment").to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#newfragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:fragment => nil).to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:userinfo => "newuser:newpass").to_s).to eq( + "http://newuser:newpass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:userinfo => nil).to_s).to eq( + "http://example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:path => "newpath").to_s).to eq( + "http://user:pass@example.com/newpath?query=x#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:port => "42", :path => "newpath", :query => "").to_s).to eq( + "http://user:pass@example.com:42/newpath?#fragment" + ) + end + + it "should have the correct values after a merge" do + expect(@uri.merge(:authority => "foo:bar@baz:42").to_s).to eq( + "http://foo:bar@baz:42/path/to/resource?query=x#fragment" + ) + # Ensure the operation was not destructive + expect(@uri.to_s).to eq( + "http://user:pass@example.com/path/to/resource?query=x#fragment" + ) + end + + it "should have the correct values after a destructive merge" do + @uri.merge!(:authority => "foo:bar@baz:42") + # Ensure the operation was destructive + expect(@uri.to_s).to eq( + "http://foo:bar@baz:42/path/to/resource?query=x#fragment" + ) + end + + it "should fail to merge with bogus values" do + expect do + @uri.merge(:port => "bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should fail to merge with bogus values" do + expect do + @uri.merge(:authority => "bar@baz:bogus") + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should fail to merge with bogus parameters" do + expect do + @uri.merge(42) + end.to raise_error(TypeError) + end + + it "should fail to merge with bogus parameters" do + expect do + @uri.merge("http://example.com/") + end.to raise_error(TypeError) + end + + it "should fail to merge with both authority and subcomponents" do + expect do + @uri.merge(:authority => "foo:bar@baz:42", :port => "42") + end.to raise_error(ArgumentError) + end + + it "should fail to merge with both userinfo and subcomponents" do + expect do + @uri.merge(:userinfo => "foo:bar", :user => "foo") + end.to raise_error(ArgumentError) + end + + it "should be identical to its duplicate" do + expect(@uri).to eq(@uri.dup) + end + + it "should have an origin of 'http://example.com'" do + expect(@uri.origin).to eq('http://example.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/search?q=Q%26A'" do + + before do + @uri = Addressable::URI.parse("http://example.com/search?q=Q%26A") + end + + it "should have a query of 'q=Q%26A'" do + expect(@uri.query).to eq("q=Q%26A") + end + + it "should have query_values of {'q' => 'Q&A'}" do + expect(@uri.query_values).to eq({ 'q' => 'Q&A' }) + end + + it "should normalize to the original uri " + + "(with the ampersand properly percent-encoded)" do + expect(@uri.normalize.to_s).to eq("http://example.com/search?q=Q%26A") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?&x=b") + end + + it "should have a query of '&x=b'" do + expect(@uri.query).to eq("&x=b") + end + + it "should have query_values of {'x' => 'b'}" do + expect(@uri.query_values).to eq({'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q='one;two'&x=1'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q='one;two'&x=1") + end + + it "should have a query of 'q='one;two'&x=1'" do + expect(@uri.query).to eq("q='one;two'&x=1") + end + + it "should have query_values of {\"q\" => \"'one;two'\", \"x\" => \"1\"}" do + expect(@uri.query_values).to eq({"q" => "'one;two'", "x" => "1"}) + end + + it "should escape the ';' character when normalizing to avoid ambiguity " + + "with the W3C HTML 4.01 specification" do + # HTML 4.01 Section B.2.2 + expect(@uri.normalize.query).to eq("q='one%3Btwo'&x=1") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?&&x=b") + end + + it "should have a query of '&&x=b'" do + expect(@uri.query).to eq("&&x=b") + end + + it "should have query_values of {'x' => 'b'}" do + expect(@uri.query_values).to eq({'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a&&x=b") + end + + it "should have a query of 'q=a&&x=b'" do + expect(@uri.query).to eq("q=a&&x=b") + end + + it "should have query_values of {'q' => 'a, 'x' => 'b'}" do + expect(@uri.query_values).to eq({'q' => 'a', 'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q&&x=b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q&&x=b") + end + + it "should have a query of 'q&&x=b'" do + expect(@uri.query).to eq("q&&x=b") + end + + it "should have query_values of {'q' => true, 'x' => 'b'}" do + expect(@uri.query_values).to eq({'q' => nil, 'x' => 'b'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a+b'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a+b") + end + + it "should have a query of 'q=a+b'" do + expect(@uri.query).to eq("q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq({'q' => 'a b'}) + end + + it "should have a normalized query of 'q=a+b'" do + expect(@uri.normalized_query).to eq("q=a+b") + end +end + +describe Addressable::URI, "when parsed from 'https://example.com/?q=a+b'" do + before do + @uri = Addressable::URI.parse("https://example.com/?q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq("q" => "a b") + end +end + +describe Addressable::URI, "when parsed from 'example.com?q=a+b'" do + before do + @uri = Addressable::URI.parse("example.com?q=a+b") + end + + it "should have query_values of {'q' => 'a b'}" do + expect(@uri.query_values).to eq("q" => "a b") + end +end + +describe Addressable::URI, "when parsed from 'mailto:?q=a+b'" do + before do + @uri = Addressable::URI.parse("mailto:?q=a+b") + end + + it "should have query_values of {'q' => 'a+b'}" do + expect(@uri.query_values).to eq("q" => "a+b") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q=a%2bb'" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=a%2bb") + end + + it "should have a query of 'q=a+b'" do + expect(@uri.query).to eq("q=a%2bb") + end + + it "should have query_values of {'q' => 'a+b'}" do + expect(@uri.query_values).to eq({'q' => 'a+b'}) + end + + it "should have a normalized query of 'q=a%2Bb'" do + expect(@uri.normalized_query).to eq("q=a%2Bb") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?v=%7E&w=%&x=%25&y=%2B&z=C%CC%A7'" do + before do + @uri = Addressable::URI.parse("http://example.com/?v=%7E&w=%&x=%25&y=%2B&z=C%CC%A7") + end + + it "should have a normalized query of 'v=~&w=%25&x=%25&y=%2B&z=%C3%87'" do + expect(@uri.normalized_query).to eq("v=~&w=%25&x=%25&y=%2B&z=%C3%87") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?v=%7E&w=%&x=%25&y=+&z=C%CC%A7'" do + before do + @uri = Addressable::URI.parse("http://example.com/?v=%7E&w=%&x=%25&y=+&z=C%CC%A7") + end + + it "should have a normalized query of 'v=~&w=%25&x=%25&y=+&z=%C3%87'" do + expect(@uri.normalized_query).to eq("v=~&w=%25&x=%25&y=+&z=%C3%87") + end +end + +describe Addressable::URI, "when parsed from 'http://example/?b=1&a=2&c=3'" do + before do + @uri = Addressable::URI.parse("http://example/?b=1&a=2&c=3") + end + + it "should have a sorted normalized query of 'a=2&b=1&c=3'" do + expect(@uri.normalized_query(:sorted)).to eq("a=2&b=1&c=3") + end +end + +describe Addressable::URI, "when parsed from 'http://example/?&a&&c&'" do + before do + @uri = Addressable::URI.parse("http://example/?&a&&c&") + end + + it "should have a compacted normalized query of 'a&c'" do + expect(@uri.normalized_query(:compacted)).to eq("a&c") + end +end + +describe Addressable::URI, "when parsed from 'http://example.com/?a=1&a=1'" do + before do + @uri = Addressable::URI.parse("http://example.com/?a=1&a=1") + end + + it "should have a compacted normalized query of 'a=1'" do + expect(@uri.normalized_query(:compacted)).to eq("a=1") + end +end + +describe Addressable::URI, "when parsed from 'http://example.com/?a=1&a=2'" do + before do + @uri = Addressable::URI.parse("http://example.com/?a=1&a=2") + end + + it "should have a compacted normalized query of 'a=1&a=2'" do + expect(@uri.normalized_query(:compacted)).to eq("a=1&a=2") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/sound%2bvision'" do + before do + @uri = Addressable::URI.parse("http://example.com/sound%2bvision") + end + + it "should have a normalized path of '/sound+vision'" do + expect(@uri.normalized_path).to eq('/sound+vision') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/?q='" do + before do + @uri = Addressable::URI.parse("http://example.com/?q=") + end + + it "should have a query of 'q='" do + expect(@uri.query).to eq("q=") + end + + it "should have query_values of {'q' => ''}" do + expect(@uri.query_values).to eq({'q' => ''}) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user@example.com'" do + before do + @uri = Addressable::URI.parse("http://user@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have no password" do + expect(@uri.password).to eq(nil) + end + + it "should have a userinfo of 'user'" do + expect(@uri.userinfo).to eq("user") + end + + it "should have a normalized userinfo of 'user'" do + expect(@uri.normalized_userinfo).to eq("user") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have default_port 80" do + expect(@uri.default_port).to eq(80) + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.to_s).to eq("http://user:newpass@example.com") + end + + it "should have the correct userinfo segment after assignment" do + @uri.userinfo = "newuser:newpass" + expect(@uri.userinfo).to eq("newuser:newpass") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser:newpass@example.com") + end + + it "should have the correct userinfo segment after nil assignment" do + @uri.userinfo = nil + expect(@uri.userinfo).to eq(nil) + expect(@uri.user).to eq(nil) + expect(@uri.password).to eq(nil) + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser@example.com" + expect(@uri.authority).to eq("newuser@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq(nil) + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser@example.com") + end + + it "should raise an error after nil assignment of authority segment" do + expect do + # This would create an invalid URI + @uri.authority = nil + end.to raise_error(Addressable::URI::InvalidURIError) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://user:@example.com'" do + before do + @uri = Addressable::URI.parse("http://user:@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of 'user'" do + expect(@uri.user).to eq("user") + end + + it "should have a password of ''" do + expect(@uri.password).to eq("") + end + + it "should have a normalized userinfo of 'user:'" do + expect(@uri.normalized_userinfo).to eq("user:") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.to_s).to eq("http://user:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = "newuser:@example.com" + expect(@uri.authority).to eq("newuser:@example.com") + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://newuser:@example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:pass@example.com'" do + before do + @uri = Addressable::URI.parse("http://:pass@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of ''" do + expect(@uri.user).to eq("") + end + + it "should have a password of 'pass'" do + expect(@uri.password).to eq("pass") + end + + it "should have a userinfo of ':pass'" do + expect(@uri.userinfo).to eq(":pass") + end + + it "should have a normalized userinfo of ':pass'" do + expect(@uri.normalized_userinfo).to eq(":pass") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("pass") + expect(@uri.to_s).to eq("http://newuser:pass@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = ":newpass@example.com" + expect(@uri.authority).to eq(":newpass@example.com") + expect(@uri.user).to eq("") + expect(@uri.password).to eq("newpass") + expect(@uri.host).to eq("example.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://:newpass@example.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://:@example.com'" do + before do + @uri = Addressable::URI.parse("http://:@example.com") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a username of ''" do + expect(@uri.user).to eq("") + end + + it "should have a password of ''" do + expect(@uri.password).to eq("") + end + + it "should have a normalized userinfo of nil" do + expect(@uri.normalized_userinfo).to eq(nil) + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have the correct username after assignment" do + @uri.user = "newuser" + expect(@uri.user).to eq("newuser") + expect(@uri.password).to eq("") + expect(@uri.to_s).to eq("http://newuser:@example.com") + end + + it "should have the correct password after assignment" do + @uri.password = "newpass" + expect(@uri.password).to eq("newpass") + expect(@uri.user).to eq("") + expect(@uri.to_s).to eq("http://:newpass@example.com") + end + + it "should have the correct authority segment after assignment" do + @uri.authority = ":@newexample.com" + expect(@uri.authority).to eq(":@newexample.com") + expect(@uri.user).to eq("") + expect(@uri.password).to eq("") + expect(@uri.host).to eq("newexample.com") + expect(@uri.port).to eq(nil) + expect(@uri.inferred_port).to eq(80) + expect(@uri.to_s).to eq("http://:@newexample.com") + end +end + +describe Addressable::URI, "when parsed from " + + "'#example'" do + before do + @uri = Addressable::URI.parse("#example") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have a host of nil" do + expect(@uri.host).to eq(nil) + end + + it "should have a site of nil" do + expect(@uri.site).to eq(nil) + end + + it "should have a normalized_site of nil" do + expect(@uri.normalized_site).to eq(nil) + end + + it "should have a path of ''" do + expect(@uri.path).to eq("") + end + + it "should have a query string of nil" do + expect(@uri.query).to eq(nil) + end + + it "should have a fragment of 'example'" do + expect(@uri.fragment).to eq("example") + end +end + +describe Addressable::URI, "when parsed from " + + "the network-path reference '//example.com/'" do + before do + @uri = Addressable::URI.parse("//example.com/") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should have a path of '/'" do + expect(@uri.path).to eq("/") + end + + it "should raise an error if routing is attempted" do + expect do + @uri.route_to("http://example.com/") + end.to raise_error(ArgumentError, /\/\/example.com\//) + expect do + @uri.route_from("http://example.com/") + end.to raise_error(ArgumentError, /\/\/example.com\//) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'feed://http://example.com/'" do + before do + @uri = Addressable::URI.parse("feed://http://example.com/") + end + + it "should have a host of 'http'" do + expect(@uri.host).to eq("http") + end + + it "should have a path of '//example.com/'" do + expect(@uri.path).to eq("//example.com/") + end +end + +describe Addressable::URI, "when parsed from " + + "'feed:http://example.com/'" do + before do + @uri = Addressable::URI.parse("feed:http://example.com/") + end + + it "should have a path of 'http://example.com/'" do + expect(@uri.path).to eq("http://example.com/") + end + + it "should normalize to 'http://example.com/'" do + expect(@uri.normalize.to_s).to eq("http://example.com/") + expect(@uri.normalize!.to_s).to eq("http://example.com/") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'example://a/b/c/%7Bfoo%7D'" do + before do + @uri = Addressable::URI.parse("example://a/b/c/%7Bfoo%7D") + end + + # Section 6.2.2 of RFC 3986 + it "should be equivalent to eXAMPLE://a/./b/../b/%63/%7bfoo%7d" do + expect(@uri).to eq( + Addressable::URI.parse("eXAMPLE://a/./b/../b/%63/%7bfoo%7d") + ) + end + + it "should have an origin of 'example://a'" do + expect(@uri.origin).to eq('example://a') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://example.com/indirect/path/./to/../resource/'" do + before do + @uri = Addressable::URI.parse( + "http://example.com/indirect/path/./to/../resource/") + end + + it "should use the 'http' scheme" do + expect(@uri.scheme).to eq("http") + end + + it "should have a host of 'example.com'" do + expect(@uri.host).to eq("example.com") + end + + it "should use port 80" do + expect(@uri.inferred_port).to eq(80) + end + + it "should have a path of '/indirect/path/./to/../resource/'" do + expect(@uri.path).to eq("/indirect/path/./to/../resource/") + end + + # Section 6.2.2.3 of RFC 3986 + it "should have a normalized path of '/indirect/path/resource/'" do + expect(@uri.normalize.path).to eq("/indirect/path/resource/") + expect(@uri.normalize!.path).to eq("/indirect/path/resource/") + end +end + +describe Addressable::URI, "when parsed from " + + "'http://under_score.example.com/'" do + it "should not cause an error" do + expect do + Addressable::URI.parse("http://under_score.example.com/") + end.not_to raise_error + end +end + +describe Addressable::URI, "when parsed from " + + "'./this:that'" do + before do + @uri = Addressable::URI.parse("./this:that") + end + + it "should be considered relative" do + expect(@uri).to be_relative + end + + it "should have no scheme" do + expect(@uri.scheme).to eq(nil) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from " + + "'this:that'" do + before do + @uri = Addressable::URI.parse("this:that") + end + + it "should be considered absolute" do + expect(@uri).to be_absolute + end + + it "should have a scheme of 'this'" do + expect(@uri.scheme).to eq("this") + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?'" do + before do + @uri = Addressable::URI.parse("?") + end + + it "should normalize to ''" do + expect(@uri.normalize.to_s).to eq("") + end + + it "should have the correct return type" do + expect(@uri.query_values).to eq({}) + expect(@uri.query_values(Hash)).to eq({}) + expect(@uri.query_values(Array)).to eq([]) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?one=1&two=2&three=3'" do + before do + @uri = Addressable::URI.parse("?one=1&two=2&three=3") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one" => "1", "two" => "2", "three" => "3"}) + end + + it "should raise an error for invalid return type values" do + expect do + @uri.query_values(Integer) + end.to raise_error(ArgumentError) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one", "1"], ["two", "2"], ["three", "3"] + ]) + end + + it "should have a 'null' origin" do + expect(@uri.origin).to eq('null') + end +end + +describe Addressable::URI, "when parsed from '?one=1=uno&two=2=dos'" do + before do + @uri = Addressable::URI.parse("?one=1=uno&two=2=dos") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one" => "1=uno", "two" => "2=dos"}) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one", "1=uno"], ["two", "2=dos"] + ]) + end +end + +describe Addressable::URI, "when parsed from '?one[two][three]=four'" do + before do + @uri = Addressable::URI.parse("?one[two][three]=four") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({"one[two][three]" => "four"}) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three]", "four"] + ]) + end +end + +describe Addressable::URI, "when parsed from '?one.two.three=four'" do + before do + @uri = Addressable::URI.parse("?one.two.three=four") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one.two.three" => "four" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one.two.three", "four"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three]=four&one[two][five]=six'" do + before do + @uri = Addressable::URI.parse("?one[two][three]=four&one[two][five]=six") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three]" => "four", "one[two][five]" => "six" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three]", "four"], ["one[two][five]", "six"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one.two.three=four&one.two.five=six'" do + before do + @uri = Addressable::URI.parse("?one.two.three=four&one.two.five=six") + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one.two.three" => "four", "one.two.five" => "six" + }) + end + + it "should have the correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one.two.three", "four"], ["one.two.five", "six"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one=two&one=three'" do + before do + @uri = Addressable::URI.parse( + "?one=two&one=three&one=four" + ) + end + + it "should have correct array query values" do + expect(@uri.query_values(Array)).to eq( + [['one', 'two'], ['one', 'three'], ['one', 'four']] + ) + end + + it "should have correct hash query values" do + skip("This is probably more desirable behavior.") + expect(@uri.query_values(Hash)).to eq( + {'one' => ['two', 'three', 'four']} + ) + end + + it "should handle assignment with keys of mixed type" do + @uri.query_values = @uri.query_values(Hash).merge({:one => 'three'}) + expect(@uri.query_values(Hash)).to eq({'one' => 'three'}) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][]=four&one[two][three][]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][]=four&one[two][three][]=five" + ) + end + + it "should have correct query values" do + expect(@uri.query_values(Hash)).to eq({"one[two][three][]" => "five"}) + end + + it "should have correct array query values" do + expect(@uri.query_values(Array)).to eq([ + ["one[two][three][]", "four"], ["one[two][three][]", "five"] + ]) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][0]=four&one[two][three][1]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][0]=four&one[two][three][1]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][0]" => "four", "one[two][three][1]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][1]=four&one[two][three][0]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][1]=four&one[two][three][0]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][1]" => "four", "one[two][three][0]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'?one[two][three][2]=four&one[two][three][1]=five'" do + before do + @uri = Addressable::URI.parse( + "?one[two][three][2]=four&one[two][three][1]=five" + ) + end + + it "should have the correct query values" do + expect(@uri.query_values).to eq({ + "one[two][three][2]" => "four", "one[two][three][1]" => "five" + }) + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.čŠšå§†æ–¯.com/'" do + before do + @uri = Addressable::URI.parse("http://www.čŠšå§†æ–¯.com/") + end + + it "should be equivalent to 'http://www.xn--8ws00zhy3a.com/'" do + expect(@uri).to eq( + Addressable::URI.parse("http://www.xn--8ws00zhy3a.com/") + ) + end + + it "should not have domain name encoded during normalization" do + expect(Addressable::URI.normalized_encode(@uri.to_s)).to eq( + "http://www.čŠšå§†æ–¯.com/" + ) + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.čŠšå§†æ–¯.com/ some spaces /'" do + before do + @uri = Addressable::URI.parse("http://www.čŠšå§†æ–¯.com/ some spaces /") + end + + it "should be equivalent to " + + "'http://www.xn--8ws00zhy3a.com/%20some%20spaces%20/'" do + expect(@uri).to eq( + Addressable::URI.parse( + "http://www.xn--8ws00zhy3a.com/%20some%20spaces%20/") + ) + end + + it "should not have domain name encoded during normalization" do + expect(Addressable::URI.normalized_encode(@uri.to_s)).to eq( + "http://www.čŠšå§†æ–¯.com/%20some%20spaces%20/" + ) + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.xn--8ws00zhy3a.com/'" do + before do + @uri = Addressable::URI.parse("http://www.xn--8ws00zhy3a.com/") + end + + it "should be displayed as http://www.čŠšå§†æ–¯.com/" do + expect(@uri.display_uri.to_s).to eq("http://www.čŠšå§†æ–¯.com/") + end + + it "should properly force the encoding" do + display_string = @uri.display_uri.to_str + expect(display_string).to eq("http://www.čŠšå§†æ–¯.com/") + if display_string.respond_to?(:encoding) + expect(display_string.encoding.to_s).to eq(Encoding::UTF_8.to_s) + end + end + + it "should have an origin of 'http://www.xn--8ws00zhy3a.com'" do + expect(@uri.origin).to eq('http://www.xn--8ws00zhy3a.com') + end +end + +describe Addressable::URI, "when parsed from " + + "'http://www.čŠšå§†æ–¯.com/atomtests/iri/芚.html'" do + before do + @uri = Addressable::URI.parse("http://www.čŠšå§†æ–¯.com/atomtests/iri/芚.html") + end + + it "should normalize to " + + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" do + expect(@uri.normalize.to_s).to eq( + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" + ) + expect(@uri.normalize!.to_s).to eq( + "http://www.xn--8ws00zhy3a.com/atomtests/iri/%E8%A9%B9.html" + ) + end +end + +describe Addressable::URI, "when parsed from a percent-encoded IRI" do + before do + @uri = Addressable::URI.parse( + "http://www.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA" + + "%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3" + + "%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82" + + "%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0" + + "%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3" + + "%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp" + ) + end + + it "should normalize to something sane" do + expect(@uri.normalize.to_s).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/" + ) + expect(@uri.normalize!.to_s).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/" + ) + end + + it "should have the correct origin" do + expect(@uri.origin).to eq( + "http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3f" + + "g11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp" + ) + end +end + +describe Addressable::URI, "with a base uri of 'http://a/b/c/d;p?q'" do + before do + @uri = Addressable::URI.parse("http://a/b/c/d;p?q") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g:h' should resolve to g:h" do + expect((@uri + "g:h").to_s).to eq("g:h") + expect(Addressable::URI.join(@uri, "g:h").to_s).to eq("g:h") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g' should resolve to http://a/b/c/g" do + expect((@uri + "g").to_s).to eq("http://a/b/c/g") + expect(Addressable::URI.join(@uri.to_s, "g").to_s).to eq("http://a/b/c/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with './g' should resolve to http://a/b/c/g" do + expect((@uri + "./g").to_s).to eq("http://a/b/c/g") + expect(Addressable::URI.join(@uri.to_s, "./g").to_s).to eq("http://a/b/c/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g/' should resolve to http://a/b/c/g/" do + expect((@uri + "g/").to_s).to eq("http://a/b/c/g/") + expect(Addressable::URI.join(@uri.to_s, "g/").to_s).to eq("http://a/b/c/g/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '/g' should resolve to http://a/g" do + expect((@uri + "/g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/g").to_s).to eq("http://a/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '//g' should resolve to http://g" do + expect((@uri + "//g").to_s).to eq("http://g") + expect(Addressable::URI.join(@uri.to_s, "//g").to_s).to eq("http://g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '?y' should resolve to http://a/b/c/d;p?y" do + expect((@uri + "?y").to_s).to eq("http://a/b/c/d;p?y") + expect(Addressable::URI.join(@uri.to_s, "?y").to_s).to eq("http://a/b/c/d;p?y") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g?y' should resolve to http://a/b/c/g?y" do + expect((@uri + "g?y").to_s).to eq("http://a/b/c/g?y") + expect(Addressable::URI.join(@uri.to_s, "g?y").to_s).to eq("http://a/b/c/g?y") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '#s' should resolve to http://a/b/c/d;p?q#s" do + expect((@uri + "#s").to_s).to eq("http://a/b/c/d;p?q#s") + expect(Addressable::URI.join(@uri.to_s, "#s").to_s).to eq( + "http://a/b/c/d;p?q#s" + ) + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g#s' should resolve to http://a/b/c/g#s" do + expect((@uri + "g#s").to_s).to eq("http://a/b/c/g#s") + expect(Addressable::URI.join(@uri.to_s, "g#s").to_s).to eq("http://a/b/c/g#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g?y#s' should resolve to http://a/b/c/g?y#s" do + expect((@uri + "g?y#s").to_s).to eq("http://a/b/c/g?y#s") + expect(Addressable::URI.join( + @uri.to_s, "g?y#s").to_s).to eq("http://a/b/c/g?y#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with ';x' should resolve to http://a/b/c/;x" do + expect((@uri + ";x").to_s).to eq("http://a/b/c/;x") + expect(Addressable::URI.join(@uri.to_s, ";x").to_s).to eq("http://a/b/c/;x") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g;x' should resolve to http://a/b/c/g;x" do + expect((@uri + "g;x").to_s).to eq("http://a/b/c/g;x") + expect(Addressable::URI.join(@uri.to_s, "g;x").to_s).to eq("http://a/b/c/g;x") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with 'g;x?y#s' should resolve to http://a/b/c/g;x?y#s" do + expect((@uri + "g;x?y#s").to_s).to eq("http://a/b/c/g;x?y#s") + expect(Addressable::URI.join( + @uri.to_s, "g;x?y#s").to_s).to eq("http://a/b/c/g;x?y#s") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '' should resolve to http://a/b/c/d;p?q" do + expect((@uri + "").to_s).to eq("http://a/b/c/d;p?q") + expect(Addressable::URI.join(@uri.to_s, "").to_s).to eq("http://a/b/c/d;p?q") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '.' should resolve to http://a/b/c/" do + expect((@uri + ".").to_s).to eq("http://a/b/c/") + expect(Addressable::URI.join(@uri.to_s, ".").to_s).to eq("http://a/b/c/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with './' should resolve to http://a/b/c/" do + expect((@uri + "./").to_s).to eq("http://a/b/c/") + expect(Addressable::URI.join(@uri.to_s, "./").to_s).to eq("http://a/b/c/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '..' should resolve to http://a/b/" do + expect((@uri + "..").to_s).to eq("http://a/b/") + expect(Addressable::URI.join(@uri.to_s, "..").to_s).to eq("http://a/b/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../' should resolve to http://a/b/" do + expect((@uri + "../").to_s).to eq("http://a/b/") + expect(Addressable::URI.join(@uri.to_s, "../").to_s).to eq("http://a/b/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../g' should resolve to http://a/b/g" do + expect((@uri + "../g").to_s).to eq("http://a/b/g") + expect(Addressable::URI.join(@uri.to_s, "../g").to_s).to eq("http://a/b/g") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../..' should resolve to http://a/" do + expect((@uri + "../..").to_s).to eq("http://a/") + expect(Addressable::URI.join(@uri.to_s, "../..").to_s).to eq("http://a/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../../' should resolve to http://a/" do + expect((@uri + "../../").to_s).to eq("http://a/") + expect(Addressable::URI.join(@uri.to_s, "../../").to_s).to eq("http://a/") + end + + # Section 5.4.1 of RFC 3986 + it "when joined with '../../g' should resolve to http://a/g" do + expect((@uri + "../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '../../../g' should resolve to http://a/g" do + expect((@uri + "../../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../../../g").to_s).to eq("http://a/g") + end + + it "when joined with '../.././../g' should resolve to http://a/g" do + expect((@uri + "../.././../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "../.././../g").to_s).to eq( + "http://a/g" + ) + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '../../../../g' should resolve to http://a/g" do + expect((@uri + "../../../../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join( + @uri.to_s, "../../../../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '/./g' should resolve to http://a/g" do + expect((@uri + "/./g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/./g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '/../g' should resolve to http://a/g" do + expect((@uri + "/../g").to_s).to eq("http://a/g") + expect(Addressable::URI.join(@uri.to_s, "/../g").to_s).to eq("http://a/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g.' should resolve to http://a/b/c/g." do + expect((@uri + "g.").to_s).to eq("http://a/b/c/g.") + expect(Addressable::URI.join(@uri.to_s, "g.").to_s).to eq("http://a/b/c/g.") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '.g' should resolve to http://a/b/c/.g" do + expect((@uri + ".g").to_s).to eq("http://a/b/c/.g") + expect(Addressable::URI.join(@uri.to_s, ".g").to_s).to eq("http://a/b/c/.g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g..' should resolve to http://a/b/c/g.." do + expect((@uri + "g..").to_s).to eq("http://a/b/c/g..") + expect(Addressable::URI.join(@uri.to_s, "g..").to_s).to eq("http://a/b/c/g..") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with '..g' should resolve to http://a/b/c/..g" do + expect((@uri + "..g").to_s).to eq("http://a/b/c/..g") + expect(Addressable::URI.join(@uri.to_s, "..g").to_s).to eq("http://a/b/c/..g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with './../g' should resolve to http://a/b/g" do + expect((@uri + "./../g").to_s).to eq("http://a/b/g") + expect(Addressable::URI.join(@uri.to_s, "./../g").to_s).to eq("http://a/b/g") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with './g/.' should resolve to http://a/b/c/g/" do + expect((@uri + "./g/.").to_s).to eq("http://a/b/c/g/") + expect(Addressable::URI.join(@uri.to_s, "./g/.").to_s).to eq("http://a/b/c/g/") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g/./h' should resolve to http://a/b/c/g/h" do + expect((@uri + "g/./h").to_s).to eq("http://a/b/c/g/h") + expect(Addressable::URI.join(@uri.to_s, "g/./h").to_s).to eq("http://a/b/c/g/h") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g/../h' should resolve to http://a/b/c/h" do + expect((@uri + "g/../h").to_s).to eq("http://a/b/c/h") + expect(Addressable::URI.join(@uri.to_s, "g/../h").to_s).to eq("http://a/b/c/h") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g;x=1/./y' " + + "should resolve to http://a/b/c/g;x=1/y" do + expect((@uri + "g;x=1/./y").to_s).to eq("http://a/b/c/g;x=1/y") + expect(Addressable::URI.join( + @uri.to_s, "g;x=1/./y").to_s).to eq("http://a/b/c/g;x=1/y") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g;x=1/../y' should resolve to http://a/b/c/y" do + expect((@uri + "g;x=1/../y").to_s).to eq("http://a/b/c/y") + expect(Addressable::URI.join( + @uri.to_s, "g;x=1/../y").to_s).to eq("http://a/b/c/y") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g?y/./x' " + + "should resolve to http://a/b/c/g?y/./x" do + expect((@uri + "g?y/./x").to_s).to eq("http://a/b/c/g?y/./x") + expect(Addressable::URI.join( + @uri.to_s, "g?y/./x").to_s).to eq("http://a/b/c/g?y/./x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g?y/../x' " + + "should resolve to http://a/b/c/g?y/../x" do + expect((@uri + "g?y/../x").to_s).to eq("http://a/b/c/g?y/../x") + expect(Addressable::URI.join( + @uri.to_s, "g?y/../x").to_s).to eq("http://a/b/c/g?y/../x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g#s/./x' " + + "should resolve to http://a/b/c/g#s/./x" do + expect((@uri + "g#s/./x").to_s).to eq("http://a/b/c/g#s/./x") + expect(Addressable::URI.join( + @uri.to_s, "g#s/./x").to_s).to eq("http://a/b/c/g#s/./x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'g#s/../x' " + + "should resolve to http://a/b/c/g#s/../x" do + expect((@uri + "g#s/../x").to_s).to eq("http://a/b/c/g#s/../x") + expect(Addressable::URI.join( + @uri.to_s, "g#s/../x").to_s).to eq("http://a/b/c/g#s/../x") + end + + # Section 5.4.2 of RFC 3986 + it "when joined with 'http:g' should resolve to http:g" do + expect((@uri + "http:g").to_s).to eq("http:g") + expect(Addressable::URI.join(@uri.to_s, "http:g").to_s).to eq("http:g") + end + + # Edge case to be sure + it "when joined with '//example.com/' should " + + "resolve to http://example.com/" do + expect((@uri + "//example.com/").to_s).to eq("http://example.com/") + expect(Addressable::URI.join( + @uri.to_s, "//example.com/").to_s).to eq("http://example.com/") + end + + it "when joined with a bogus object a TypeError should be raised" do + expect do + Addressable::URI.join(@uri, 42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when converting the path " + + "'relative/path/to/something'" do + before do + @path = 'relative/path/to/something' + end + + it "should convert to " + + "\'relative/path/to/something\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("relative/path/to/something") + end + + it "should join with an absolute file path correctly" do + @base = Addressable::URI.convert_path("/absolute/path/") + @uri = Addressable::URI.convert_path(@path) + expect((@base + @uri).to_str).to eq( + "file:///absolute/path/relative/path/to/something" + ) + end +end + +describe Addressable::URI, "when converting a bogus path" do + it "should raise a TypeError" do + expect do + Addressable::URI.convert_path(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when given a UNIX root directory" do + before do + @path = "/" + end + + it "should convert to \'file:///\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given a Windows root directory" do + before do + @path = "C:\\" + end + + it "should convert to \'file:///c:/\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path '/one/two/'" do + before do + @path = '/one/two/' + end + + it "should convert to " + + "\'file:///one/two/\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///one/two/") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the tld " do + it "'uk' should have a tld of 'uk'" do + uri = Addressable::URI.parse("http://example.com") + uri.tld = "uk" + + expect(uri.tld).to eq("uk") + end + + context "which " do + let (:uri) { Addressable::URI.parse("http://www.comrade.net/path/to/source/") } + + it "contains a subdomain" do + uri.tld = "co.uk" + + expect(uri.to_s).to eq("http://www.comrade.co.uk/path/to/source/") + end + + it "is part of the domain" do + uri.tld = "com" + + expect(uri.to_s).to eq("http://www.comrade.com/path/to/source/") + end + end +end + +describe Addressable::URI, "when given the path " + + "'c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file://c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file://c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file:c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:/c:\\windows\\My Documents 100%20\\foo.txt'" do + before do + @path = "file:/c:\\windows\\My Documents 100%20\\foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given the path " + + "'file:///c|/windows/My%20Documents%20100%20/foo.txt'" do + before do + @path = "file:///c|/windows/My%20Documents%20100%20/foo.txt" + end + + it "should convert to " + + "\'file:///c:/windows/My%20Documents%20100%20/foo.txt\'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("file:///c:/windows/My%20Documents%20100%20/foo.txt") + end + + it "should have an origin of 'file://'" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.origin).to eq('file://') + end +end + +describe Addressable::URI, "when given an http protocol URI" do + before do + @path = "http://example.com/" + end + + it "should not do any conversion at all" do + @uri = Addressable::URI.convert_path(@path) + expect(@uri.to_str).to eq("http://example.com/") + end +end + +class SuperString + def initialize(string) + @string = string.to_s + end + + def to_str + return @string + end +end + +describe Addressable::URI, "when parsing a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.parse(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.parse(42) + end.to raise_error(TypeError) + end + + it "should correctly parse heuristically anything with a 'to_str' method" do + Addressable::URI.heuristic_parse(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.heuristic_parse(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when form encoding a hash" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + [["&one", "/1"], ["=two", "?2"], [":three", "#3"]] + )).to eq("%26one=%2F1&%3Dtwo=%3F2&%3Athree=%233") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"q" => "one two three"} + )).to eq("q=one+two+three") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"key" => nil} + )).to eq("key=") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode( + {"q" => ["one", "two", "three"]} + )).to eq("q=one&q=two&q=three") + end + + it "should result in correctly encoded newlines" do + expect(Addressable::URI.form_encode( + {"text" => "one\ntwo\rthree\r\nfour\n\r"} + )).to eq("text=one%0D%0Atwo%0D%0Athree%0D%0Afour%0D%0A%0D%0A") + end + + it "should result in a sorted percent encoded sequence" do + expect(Addressable::URI.form_encode( + [["a", "1"], ["dup", "3"], ["dup", "2"]], true + )).to eq("a=1&dup=2&dup=3") + end +end + +describe Addressable::URI, "when form encoding a non-Array object" do + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.form_encode(42) + end.to raise_error(TypeError) + end +end + +# See https://tools.ietf.org/html/rfc6749#appendix-B +describe Addressable::URI, "when form encoding the example value from OAuth 2" do + it "should result in correct values" do + expect(Addressable::URI.form_encode( + {"value" => " %&+ÂŖâ‚Ŧ"} + )).to eq("value=+%25%26%2B%C2%A3%E2%82%AC") + end +end + +# See https://tools.ietf.org/html/rfc6749#appendix-B +describe Addressable::URI, "when form unencoding the example value from OAuth 2" do + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "value=+%25%26%2B%C2%A3%E2%82%AC" + )).to eq([["value", " %&+ÂŖâ‚Ŧ"]]) + end +end + +describe Addressable::URI, "when form unencoding a string" do + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "%26one=%2F1&%3Dtwo=%3F2&%3Athree=%233" + )).to eq([["&one", "/1"], ["=two", "?2"], [":three", "#3"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "q=one+two+three" + )).to eq([["q", "one two three"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "text=one%0D%0Atwo%0D%0Athree%0D%0Afour%0D%0A%0D%0A" + )).to eq([["text", "one\ntwo\nthree\nfour\n\n"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "a=1&dup=2&dup=3" + )).to eq([["a", "1"], ["dup", "2"], ["dup", "3"]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode( + "key" + )).to eq([["key", nil]]) + end + + it "should result in correct values" do + expect(Addressable::URI.form_unencode("GivenName=Ren%C3%A9")).to eq( + [["GivenName", "RenÊ"]] + ) + end +end + +describe Addressable::URI, "when form unencoding a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.form_unencode(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.form_unencode(42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when normalizing a non-String object" do + it "should correctly parse anything with a 'to_str' method" do + Addressable::URI.normalize_component(SuperString.new(42)) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.normalize_component(42) + end.to raise_error(TypeError) + end + + it "should raise a TypeError for objects than cannot be converted" do + expect do + Addressable::URI.normalize_component("component", 42) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when normalizing a path with an encoded slash" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.parse("/path%2Fsegment/").normalize.path).to eq( + "/path%2Fsegment/" + ) + end +end + +describe Addressable::URI, "when normalizing a path with special unicode" do + it "does not stop at or ignore null bytes" do + expect(Addressable::URI.parse("/path%00segment/").normalize.path).to eq( + "/path%00segment/" + ) + end + + it "does apply NFC unicode normalization" do + expect(Addressable::URI.parse("/%E2%84%A6").normalize.path).to eq( + "/%CE%A9" + ) + end + + it "does not apply NFKC unicode normalization" do + expect(Addressable::URI.parse("/%C2%AF%C2%A0").normalize.path).to eq( + "/%C2%AF%C2%A0" + ) + end +end + +describe Addressable::URI, "when normalizing a partially encoded string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "partially % encoded%21" + )).to eq("partially%20%25%20encoded!") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "partially %25 encoded!" + )).to eq("partially%20%25%20encoded!") + end +end + +describe Addressable::URI, "when normalizing a unicode sequence" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "/C%CC%A7" + )).to eq("/%C3%87") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component( + "/%C3%87" + )).to eq("/%C3%87") + end +end + +describe Addressable::URI, "when normalizing a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("gÃŧnther")).to eq( + "g%C3%BCnther" + ) + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("g%C3%BCnther")).to eq( + "g%C3%BCnther" + ) + end +end + +describe Addressable::URI, "when normalizing a string but leaving some characters encoded" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.normalize_component("%58X%59Y%5AZ", "0-9a-zXY", "Y")).to eq( + "XX%59Y%5A%5A" + ) + end + + it "should not modify the character class" do + character_class = "0-9a-zXY" + + character_class_copy = character_class.dup + + Addressable::URI.normalize_component("%58X%59Y%5AZ", character_class, "Y") + + expect(character_class).to eq(character_class_copy) + end +end + +describe Addressable::URI, "when encoding IP literals" do + it "should work for IPv4" do + input = "http://127.0.0.1/" + expect(Addressable::URI.encode(input)).to eq(input) + end + + it "should work for IPv6" do + input = "http://[fe80::200:f8ff:fe21:67cf]/" + expect(Addressable::URI.encode(input)).to eq(input) + end +end + +describe Addressable::URI, "when encoding a string with existing encodings to upcase" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("JK%4c", "0-9A-IKM-Za-z%", "L")).to eq("%4AK%4C") + end +end + +describe Addressable::URI, "when encoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("gÃŧnther")).to eq("g%C3%BCnther") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component( + "gÃŧnther", /[^a-zA-Z0-9\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\-\.\_\~]/ + )).to eq("g%C3%BCnther") + end +end + +describe Addressable::URI, "when form encoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.form_encode({"GivenName" => "RenÊ"})).to eq( + "GivenName=Ren%C3%A9" + ) + end +end + +describe Addressable::URI, "when encoding a string with ASCII chars 0-15" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component("one\ntwo")).to eq("one%0Atwo") + end + + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.encode_component( + "one\ntwo", /[^a-zA-Z0-9\:\/\?\#\[\]\@\!\$\&\'\(\)\*\+\,\;\=\-\.\_\~]/ + )).to eq("one%0Atwo") + end +end + +describe Addressable::URI, "when unencoding a multibyte string" do + it "should result in correct percent encoded sequence" do + expect(Addressable::URI.unencode_component("g%C3%BCnther")).to eq("gÃŧnther") + end + + it "should consistently use UTF-8 internally" do + expect(Addressable::URI.unencode_component("ski=%BA%DAÉĢ")).to eq("ski=\xBA\xDAÉĢ") + end + + it "should not fail with UTF-8 incompatible string" do + url = "/M%E9/\xE9?p=\xFC".b + expect(Addressable::URI.unencode_component(url)).to eq("/M\xE9/\xE9?p=\xFC") + end + + it "should result in correct percent encoded sequence as a URI" do + expect(Addressable::URI.unencode( + "/path?g%C3%BCnther", ::Addressable::URI + )).to eq(Addressable::URI.new( + :path => "/path", :query => "gÃŧnther" + )) + end +end + +describe Addressable::URI, "when partially unencoding a string" do + it "should unencode all characters by default" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String)).to eq('%%~~++') + end + + it "should unencode characters not in leave_encoded" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String, '~')).to eq('%%~%7e++') + end + + it "should leave characters in leave_encoded alone" do + expect(Addressable::URI.unencode('%%25~%7e+%2b', String, '%~+')).to eq('%%25~%7e+%2b') + end +end + +describe Addressable::URI, "when unencoding a bogus object" do + it "should raise a TypeError" do + expect do + Addressable::URI.unencode_component(42) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.unencode("/path?g%C3%BCnther", Integer) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when encoding a bogus object" do + it "should raise a TypeError" do + expect do + Addressable::URI.encode(Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.normalized_encode(Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.encode_component("gÃŧnther", Object.new) + end.to raise_error(TypeError) + end + + it "should raise a TypeError" do + expect do + Addressable::URI.encode_component(Object.new) + end.to raise_error(TypeError) + end +end + +describe Addressable::URI, "when given the input " + + "'http://example.com/'" do + before do + @input = "http://example.com/" + end + + it "should heuristically parse to 'http://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should not raise error when frozen" do + expect do + Addressable::URI.heuristic_parse(@input).freeze.to_s + end.not_to raise_error + end +end + +describe Addressable::URI, "when given the input " + + "'https://example.com/'" do + before do + @input = "https://example.com/" + end + + it "should heuristically parse to 'https://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("https://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http:example.com/'" do + before do + @input = "http:example.com/" + end + + it "should heuristically parse to 'http://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/") + end + + it "should heuristically parse to 'http://example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'https:example.com/'" do + before do + @input = "https:example.com/" + end + + it "should heuristically parse to 'https://example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("https://example.com/") + end + + it "should heuristically parse to 'https://example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("https://example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://example.com/example.com/'" do + before do + @input = "http://example.com/example.com/" + end + + it "should heuristically parse to 'http://example.com/example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix\\.example.com/'" do + before do + @input = "http://prefix\\.example.com/" + end + + it "should heuristically parse to 'http://prefix/.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix") + expect(@uri.to_s).to eq("http://prefix/.example.com/") + end + + it "should heuristically parse to 'http://prefix/.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix/.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p:\\/'" do + before do + @input = "http://p:\\/" + end + + it "should heuristically parse to 'http://p//'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//") + end + + it "should heuristically parse to 'http://p//' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p://'" do + before do + @input = "http://p://" + end + + it "should heuristically parse to 'http://p//'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//") + end + + it "should heuristically parse to 'http://p//' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//") + end +end + +describe Addressable::URI, "when given the input " + + "'http://p://p'" do + before do + @input = "http://p://p" + end + + it "should heuristically parse to 'http://p//p'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("p") + expect(@uri.to_s).to eq("http://p//p") + end + + it "should heuristically parse to 'http://p//p' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://p//p") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix .example.com/'" do + before do + @input = "http://prefix .example.com/" + end + + # Justification here being that no browser actually tries to resolve this. + # They all treat this as a web search. + it "should heuristically parse to 'http://prefix%20.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix%20.example.com") + expect(@uri.to_s).to eq("http://prefix%20.example.com/") + end + + it "should heuristically parse to 'http://prefix%20.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix%20.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "' http://www.example.com/ '" do + before do + @input = " http://www.example.com/ " + end + + it "should heuristically parse to 'http://prefix%20.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.scheme).to eq("http") + expect(@uri.path).to eq("/") + expect(@uri.to_s).to eq("http://www.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'http://prefix%2F.example.com/'" do + before do + @input = "http://prefix%2F.example.com/" + end + + it "should heuristically parse to 'http://prefix%2F.example.com/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.authority).to eq("prefix%2F.example.com") + expect(@uri.to_s).to eq("http://prefix%2F.example.com/") + end + + it "should heuristically parse to 'http://prefix%2F.example.com/' " + + "even with a scheme hint of 'ftp'" do + @uri = Addressable::URI.heuristic_parse(@input, {:scheme => 'ftp'}) + expect(@uri.to_s).to eq("http://prefix%2F.example.com/") + end +end + +describe Addressable::URI, "when given the input " + + "'/path/to/resource'" do + before do + @input = "/path/to/resource" + end + + it "should heuristically parse to '/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'relative/path/to/resource'" do + before do + @input = "relative/path/to/resource" + end + + it "should heuristically parse to 'relative/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("relative/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com'" do + before do + @input = "example.com" + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com' and a scheme hint of 'ftp'" do + before do + @input = "example.com" + @hints = {:scheme => 'ftp'} + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input, @hints) + expect(@uri.to_s).to eq("ftp://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com:21' and a scheme hint of 'ftp'" do + before do + @input = "example.com:21" + @hints = {:scheme => 'ftp'} + end + + it "should heuristically parse to 'http://example.com:21'" do + @uri = Addressable::URI.heuristic_parse(@input, @hints) + expect(@uri.to_s).to eq("ftp://example.com:21") + end +end + +describe Addressable::URI, "when given the input " + + "'example.com/path/to/resource'" do + before do + @input = "example.com/path/to/resource" + end + + it "should heuristically parse to 'http://example.com/path/to/resource'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com/path/to/resource") + end +end + +describe Addressable::URI, "when given the input " + + "'http:///example.com'" do + before do + @input = "http:///example.com" + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input which "\ + "start with digits and has specified port" do + before do + @input = "7777.example.org:8089" + end + + it "should heuristically parse to 'http://7777.example.org:8089'" do + uri = Addressable::URI.heuristic_parse(@input) + expect(uri.to_s).to eq("http://7777.example.org:8089") + end +end + +describe Addressable::URI, "when given the input " + + "'feed:///example.com'" do + before do + @input = "feed:///example.com" + end + + it "should heuristically parse to 'feed://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("feed://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "'file://localhost/path/to/resource/'" do + before do + @input = "file://localhost/path/to/resource/" + end + + it "should heuristically parse to 'file:///path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:///path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'file://path/to/resource/'" do + before do + @input = "file://path/to/resource/" + end + + it "should heuristically parse to 'file:///path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:///path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'file://///path/to/resource/'" do + before do + @input = "file:///////path/to/resource/" + end + + it "should heuristically parse to 'file:////path/to/resource/'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("file:////path/to/resource/") + end +end + +describe Addressable::URI, "when given the input " + + "'feed://http://example.com'" do + before do + @input = "feed://http://example.com" + end + + it "should heuristically parse to 'feed:http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("feed:http://example.com") + end +end + +describe Addressable::URI, "when given the input " + + "::URI.parse('http://example.com')" do + before do + @input = ::URI.parse('http://example.com') + end + + it "should heuristically parse to 'http://example.com'" do + @uri = Addressable::URI.heuristic_parse(@input) + expect(@uri.to_s).to eq("http://example.com") + end +end + +describe Addressable::URI, "when given the input: 'user@domain.com'" do + before do + @input = "user@domain.com" + end + + context "for heuristic parse" do + it "should remain 'mailto:user@domain.com'" do + uri = Addressable::URI.heuristic_parse("mailto:#{@input}") + expect(uri.to_s).to eq("mailto:user@domain.com") + end + + it "should have a scheme of 'mailto'" do + uri = Addressable::URI.heuristic_parse(@input) + expect(uri.to_s).to eq("mailto:user@domain.com") + expect(uri.scheme).to eq("mailto") + end + + it "should remain 'acct:user@domain.com'" do + uri = Addressable::URI.heuristic_parse("acct:#{@input}") + expect(uri.to_s).to eq("acct:user@domain.com") + end + + context "HTTP" do + before do + @uri = Addressable::URI.heuristic_parse("http://#{@input}/") + end + + it "should remain 'http://user@domain.com/'" do + expect(@uri.to_s).to eq("http://user@domain.com/") + end + + it "should have the username 'user' for HTTP basic authentication" do + expect(@uri.user).to eq("user") + end + end + end +end + +describe Addressable::URI, "when assigning query values" do + before do + @uri = Addressable::URI.new + end + + it "should correctly assign {:a => 'a', :b => ['c', 'd', 'e']}" do + @uri.query_values = {:a => "a", :b => ["c", "d", "e"]} + expect(@uri.query).to eq("a=a&b=c&b=d&b=e") + end + + it "should raise an error attempting to assign {'a' => {'b' => ['c']}}" do + expect do + @uri.query_values = { 'a' => {'b' => ['c'] } } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:b => '2', :a => {:c => '1'}}" do + expect do + @uri.query_values = {:b => '2', :a => {:c => '1'}} + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => [{:c => 'c', :d => 'd'}, " + + "{:e => 'e', :f => 'f'}]}" do + expect do + @uri.query_values = { + :a => "a", :b => [{:c => "c", :d => "d"}, {:e => "e", :f => "f"}] + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => [{:c => true, :d => 'd'}, " + + "{:e => 'e', :f => 'f'}]}" do + expect do + @uri.query_values = { + :a => 'a', :b => [{:c => true, :d => 'd'}, {:e => 'e', :f => 'f'}] + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => {:c => true, :d => 'd'}}" do + expect do + @uri.query_values = { + :a => 'a', :b => {:c => true, :d => 'd'} + } + end.to raise_error(TypeError) + end + + it "should raise an error attempting to assign " + + "{:a => 'a', :b => {:c => true, :d => 'd'}}" do + expect do + @uri.query_values = { + :a => 'a', :b => {:c => true, :d => 'd'} + } + end.to raise_error(TypeError) + end + + it "should correctly assign {:a => 1, :b => 1.5}" do + @uri.query_values = { :a => 1, :b => 1.5 } + expect(@uri.query).to eq("a=1&b=1.5") + end + + it "should raise an error attempting to assign " + + "{:z => 1, :f => [2, {999.1 => [3,'4']}, ['h', 'i']], " + + ":a => {:b => ['c', 'd'], :e => true, :y => 0.5}}" do + expect do + @uri.query_values = { + :z => 1, + :f => [ 2, {999.1 => [3,'4']}, ['h', 'i'] ], + :a => { :b => ['c', 'd'], :e => true, :y => 0.5 } + } + end.to raise_error(TypeError) + end + + it "should correctly assign {}" do + @uri.query_values = {} + expect(@uri.query).to eq('') + end + + it "should correctly assign nil" do + @uri.query_values = nil + expect(@uri.query).to eq(nil) + end + + it "should correctly sort {'ab' => 'c', :ab => 'a', :a => 'x'}" do + @uri.query_values = {'ab' => 'c', :ab => 'a', :a => 'x'} + expect(@uri.query).to eq("a=x&ab=a&ab=c") + end + + it "should correctly assign " + + "[['b', 'c'], ['b', 'a'], ['a', 'a']]" do + # Order can be guaranteed in this format, so preserve it. + @uri.query_values = [['b', 'c'], ['b', 'a'], ['a', 'a']] + expect(@uri.query).to eq("b=c&b=a&a=a") + end + + it "should preserve query string order" do + query_string = (('a'..'z').to_a.reverse.map { |e| "#{e}=#{e}" }).join("&") + @uri.query = query_string + original_uri = @uri.to_s + @uri.query_values = @uri.query_values(Array) + expect(@uri.to_s).to eq(original_uri) + end + + describe 'when a hash with mixed types is assigned to query_values' do + it 'should not raise an error' do + skip 'Issue #94' + expect { subject.query_values = { "page" => "1", :page => 2 } }.to_not raise_error + end + end +end + +describe Addressable::URI, "when assigning path values" do + before do + @uri = Addressable::URI.new + end + + it "should correctly assign paths containing colons" do + @uri.path = "acct:bob@sporkmonger.com" + expect(@uri.path).to eq("acct:bob@sporkmonger.com") + expect(@uri.normalize.to_str).to eq("acct%2Fbob@sporkmonger.com") + expect { @uri.to_s }.to raise_error( + Addressable::URI::InvalidURIError + ) + end + + it "should correctly assign paths containing colons" do + @uri.path = "/acct:bob@sporkmonger.com" + @uri.authority = "example.com" + expect(@uri.normalize.to_str).to eq("//example.com/acct:bob@sporkmonger.com") + end + + it "should correctly assign paths containing colons" do + @uri.path = "acct:bob@sporkmonger.com" + @uri.scheme = "something" + expect(@uri.normalize.to_str).to eq("something:acct:bob@sporkmonger.com") + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.scheme = "http" + @uri.host = "example.com" + @uri.path = "acct:bob@sporkmonger.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.path = "acct:bob@sporkmonger.com" + @uri.scheme = "http" + @uri.host = "example.com" + end.to raise_error(Addressable::URI::InvalidURIError) + end + + it "should not allow relative paths to be assigned on absolute URIs" do + expect do + @uri.path = "uuid:0b3ecf60-3f93-11df-a9c3-001f5bfffe12" + @uri.scheme = "urn" + end.not_to raise_error + end +end + +describe Addressable::URI, "when initializing a subclass of Addressable::URI" do + before do + @uri = Class.new(Addressable::URI).new + end + + it "should have the same class after being parsed" do + expect(@uri.class).to eq(Addressable::URI.parse(@uri).class) + end + + it "should have the same class as its duplicate" do + expect(@uri.class).to eq(@uri.dup.class) + end + + it "should have the same class after being normalized" do + expect(@uri.class).to eq(@uri.normalize.class) + end + + it "should have the same class after being merged" do + expect(@uri.class).to eq(@uri.merge(:path => 'path').class) + end + + it "should have the same class after being joined" do + expect(@uri.class).to eq(@uri.join('path').class) + end +end + +describe Addressable::URI, "when initialized in a non-main `Ractor`" do + it "should have the same value as if used in the main `Ractor`" do + pending("Ruby 3.0+ for `Ractor` support") unless defined?(Ractor) + main = Addressable::URI.parse("http://example.com") + expect( + Ractor.new { Addressable::URI.parse("http://example.com") }.take + ).to eq(main) + end +end + +describe Addressable::URI, "when deferring validation" do + subject(:deferred) { uri.instance_variable_get(:@validation_deferred) } + + let(:uri) { Addressable::URI.parse("http://example.com") } + + it "defers validation within the block" do + uri.defer_validation do + expect(deferred).to be true + end + end + + it "always resets deferral afterward" do + expect { uri.defer_validation { raise "boom" } }.to raise_error("boom") + expect(deferred).to be false + end + + it "returns nil" do + res = uri.defer_validation {} + expect(res).to be nil + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/spec_helper.rb new file mode 100644 index 0000000..bd8e395 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/spec/spec_helper.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'bundler/setup' +require 'rspec/its' + +begin + require 'coveralls' + Coveralls.wear! do + add_filter "spec/" + add_filter "vendor/" + end +rescue LoadError + warn "warning: coveralls gem not found; skipping Coveralls" + require 'simplecov' + SimpleCov.start do + add_filter "spec/" + add_filter "vendor/" + end +end if Gem.loaded_specs.key?("simplecov") + +class TestHelper + def self.native_supported? + mri = RUBY_ENGINE == "ruby" + windows = RUBY_PLATFORM.include?("mingw") + + mri && !windows + end +end + +RSpec.configure do |config| + config.warnings = true + config.filter_run_when_matching :focus +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/clobber.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/clobber.rake new file mode 100644 index 0000000..a9e32b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/clobber.rake @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +desc "Remove all build products" +task "clobber" diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/gem.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/gem.rake new file mode 100644 index 0000000..24d9714 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/gem.rake @@ -0,0 +1,95 @@ +# frozen_string_literal: true + +require "rubygems/package_task" + +namespace :gem do + GEM_SPEC = Gem::Specification.new do |s| + s.name = PKG_NAME + s.version = PKG_VERSION + s.summary = PKG_SUMMARY + s.description = PKG_DESCRIPTION + + s.files = PKG_FILES.to_a + + s.extra_rdoc_files = %w( README.md ) + s.rdoc_options.concat ["--main", "README.md"] + + if !s.respond_to?(:add_development_dependency) + puts "Cannot build Gem with this version of RubyGems." + exit(1) + end + + s.required_ruby_version = ">= 2.2" + + s.add_runtime_dependency "public_suffix", ">= 2.0.2", "< 6.0" + s.add_development_dependency "bundler", ">= 1.0", "< 3.0" + + s.require_path = "lib" + + s.author = "Bob Aman" + s.email = "bob@sporkmonger.com" + s.homepage = "https://github.com/sporkmonger/addressable" + s.license = "Apache-2.0" + s.metadata = { + "changelog_uri" => "https://github.com/sporkmonger/addressable/blob/main/CHANGELOG.md" + } + end + + Gem::PackageTask.new(GEM_SPEC) do |p| + p.gem_spec = GEM_SPEC + p.need_tar = true + p.need_zip = true + end + + desc "Generates .gemspec file" + task :gemspec do + spec_string = GEM_SPEC.to_ruby + File.open("#{GEM_SPEC.name}.gemspec", "w") do |file| + file.write spec_string + end + end + + desc "Show information about the gem" + task :debug do + puts GEM_SPEC.to_ruby + end + + desc "Install the gem" + task :install => ["clobber", "gem:package"] do + sh "#{SUDO} gem install --local pkg/#{GEM_SPEC.full_name}" + end + + desc "Uninstall the gem" + task :uninstall do + installed_list = Gem.source_index.find_name(PKG_NAME) + if installed_list && + (installed_list.collect { |s| s.version.to_s}.include?(PKG_VERSION)) + sh( + "#{SUDO} gem uninstall --version '#{PKG_VERSION}' " + + "--ignore-dependencies --executables #{PKG_NAME}" + ) + end + end + + desc "Reinstall the gem" + task :reinstall => [:uninstall, :install] + + desc "Package for release" + task :release => ["gem:package", "gem:gemspec"] do |t| + v = ENV["VERSION"] or abort "Must supply VERSION=x.y.z" + abort "Versions don't match #{v} vs #{PROJ.version}" if v != PKG_VERSION + pkg = "pkg/#{GEM_SPEC.full_name}" + + changelog = File.open("CHANGELOG.md") { |file| file.read } + + puts "Releasing #{PKG_NAME} v. #{PKG_VERSION}" + Rake::Task["git:tag:create"].invoke + end +end + +desc "Alias to gem:package" +task "gem" => "gem:package" + +task "gem:release" => "gem:gemspec" + +task "clobber" => ["gem:clobber_package"] diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/git.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/git.rake new file mode 100644 index 0000000..1238c8d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/git.rake @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +namespace :git do + namespace :tag do + desc "List tags from the Git repository" + task :list do + tags = `git tag -l` + tags.gsub!("\r", "") + tags = tags.split("\n").sort {|a, b| b <=> a } + puts tags.join("\n") + end + + desc "Create a new tag in the Git repository" + task :create do + changelog = File.open("CHANGELOG.md", "r") { |file| file.read } + puts "-" * 80 + puts changelog + puts "-" * 80 + puts + + v = ENV["VERSION"] or abort "Must supply VERSION=x.y.z" + abort "Versions don't match #{v} vs #{PKG_VERSION}" if v != PKG_VERSION + + git_status = `git status` + if git_status !~ /^nothing to commit/ + abort "Working directory isn't clean." + end + + tag = "#{PKG_NAME}-#{PKG_VERSION}" + msg = "Release #{PKG_NAME}-#{PKG_VERSION}" + + existing_tags = `git tag -l #{PKG_NAME}-*`.split('\n') + if existing_tags.include?(tag) + warn("Tag already exists, deleting...") + unless system "git tag -d #{tag}" + abort "Tag deletion failed." + end + end + puts "Creating git tag '#{tag}'..." + unless system "git tag -a -m \"#{msg}\" #{tag}" + abort "Tag creation failed." + end + end + end +end + +task "gem:release" => "git:tag:create" diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/metrics.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/metrics.rake new file mode 100644 index 0000000..107cc24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/metrics.rake @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +namespace :metrics do + task :lines do + lines, codelines, total_lines, total_codelines = 0, 0, 0, 0 + for file_name in FileList["lib/**/*.rb"] + f = File.open(file_name) + while line = f.gets + lines += 1 + next if line =~ /^\s*$/ + next if line =~ /^\s*#/ + codelines += 1 + end + puts "L: #{sprintf("%4d", lines)}, " + + "LOC #{sprintf("%4d", codelines)} | #{file_name}" + total_lines += lines + total_codelines += codelines + + lines, codelines = 0, 0 + end + + puts "Total: Lines #{total_lines}, LOC #{total_codelines}" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/profile.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/profile.rake new file mode 100644 index 0000000..b697d48 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/profile.rake @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +namespace :profile do + desc "Profile Template match memory allocations" + task :template_match_memory do + require "memory_profiler" + require "addressable/template" + + start_at = Time.now.to_f + template = Addressable::Template.new("http://example.com/{?one,two,three}") + report = MemoryProfiler.report do + 30_000.times do + template.match( + "http://example.com/?one=one&two=floo&three=me" + ) + end + end + end_at = Time.now.to_f + print_options = { scale_bytes: true, normalize_paths: true } + puts "\n\n" + + if ENV["CI"] + report.pretty_print(print_options) + else + t_allocated = report.scale_bytes(report.total_allocated_memsize) + t_retained = report.scale_bytes(report.total_retained_memsize) + + puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)" + puts "Total retained: #{t_retained} (#{report.total_retained} objects)" + puts "Took #{end_at - start_at} seconds" + + FileUtils.mkdir_p("tmp") + report.pretty_print(to_file: "tmp/memprof.txt", **print_options) + end + end + + desc "Profile URI parse memory allocations" + task :memory do + require "memory_profiler" + require "addressable/uri" + if ENV["IDNA_MODE"] == "pure" + Addressable.send(:remove_const, :IDNA) + load "addressable/idna/pure.rb" + end + + start_at = Time.now.to_f + report = MemoryProfiler.report do + 30_000.times do + Addressable::URI.parse( + "http://google.com/stuff/../?with_lots=of¶ms=asdff#!stuff" + ).normalize + end + end + end_at = Time.now.to_f + print_options = { scale_bytes: true, normalize_paths: true } + puts "\n\n" + + if ENV["CI"] + report.pretty_print(**print_options) + else + t_allocated = report.scale_bytes(report.total_allocated_memsize) + t_retained = report.scale_bytes(report.total_retained_memsize) + + puts "Total allocated: #{t_allocated} (#{report.total_allocated} objects)" + puts "Total retained: #{t_retained} (#{report.total_retained} objects)" + puts "Took #{end_at - start_at} seconds" + + FileUtils.mkdir_p("tmp") + report.pretty_print(to_file: "tmp/memprof.txt", **print_options) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/rspec.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/rspec.rake new file mode 100644 index 0000000..e3d9f01 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/rspec.rake @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require "rspec/core/rake_task" + +namespace :spec do + RSpec::Core::RakeTask.new(:simplecov) do |t| + t.pattern = FileList['spec/**/*_spec.rb'] + t.rspec_opts = %w[--color --format documentation] unless ENV["CI"] + end + + namespace :simplecov do + desc "Browse the code coverage report." + task :browse => "spec:simplecov" do + require "launchy" + Launchy.open("coverage/index.html") + end + end +end + +desc "Alias to spec:simplecov" +task "spec" => "spec:simplecov" + +task "clobber" => ["spec:clobber_simplecov"] diff --git a/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/yard.rake b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/yard.rake new file mode 100644 index 0000000..515f960 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/addressable-2.8.4/tasks/yard.rake @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require "rake" + +begin + require "yard" + require "yard/rake/yardoc_task" + + namespace :doc do + desc "Generate Yardoc documentation" + YARD::Rake::YardocTask.new do |yardoc| + yardoc.name = "yard" + yardoc.options = ["--verbose", "--markup", "markdown"] + yardoc.files = FileList[ + "lib/**/*.rb", "ext/**/*.c", + "README.md", "CHANGELOG.md", "LICENSE.txt" + ].exclude(/idna/) + end + end + + task "clobber" => ["doc:clobber_yard"] + + desc "Alias to doc:yard" + task "doc" => "doc:yard" +rescue LoadError + # If yard isn't available, it's not the end of the world + desc "Alias to doc:rdoc" + task "doc" => "doc:rdoc" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/.rspec b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/.rspec new file mode 100644 index 0000000..397921f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/.rspec @@ -0,0 +1,2 @@ +--color +--format d diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/.travis.yml new file mode 100644 index 0000000..4c090ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/.travis.yml @@ -0,0 +1,33 @@ +language: ruby + +branches: + only: + - master + +rvm: +- 1.9.3 +- 2.0 +- 2.1 +- 2.2 +- 2.3 +- 2.4 +- 2.5 +- jruby +- rbx-3 + +matrix: + allow_failures: + - rvm: rbx-3 + - rvm: jruby + include: + - rvm: 1.8.7 + dist: precise + + +cache: bundler + +before_script: + - wget https://alg.li/algolia-keys && chmod +x algolia-keys + +script: + - if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [[ ! "$TRAVIS_PULL_REQUEST_SLUG" =~ ^algolia\/ ]]; then eval $(./algolia-keys export) && bundle exec rspec --tag ~maintainers_only; else bundle exec rspec; fi diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/CHANGELOG.md new file mode 100644 index 0000000..26969f8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/CHANGELOG.md @@ -0,0 +1,454 @@ +# ChangeLog + +## Unreleased + +## [1.27.4](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.3...1.27.4) (2020-09-16) + +**Fixed** + +* Retrieve all objects when using `copy_index` from `AccountClient` class ([399](https://github.com/algolia/algoliasearch-client-ruby/pull/399)) + +## [1.27.3](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.2...1.27.3) (2020-06-03) + +**Fixed** + +* Replace expired certificate within embedded certificate chain ([9087dd1](https://github.com/algolia/algoliasearch-client-ruby/commit/9087dd14a97bf77c9391a3360c4803edf686086d)) + +## [1.27.2](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.1...1.27.2) (2020-04-28) + +**Fixed** + +* In `search_user_id`, retrieve param `cluster` instead of `clusterName`. [368](https://github.com/algolia/algoliasearch-client-ruby/issues/368) + +## [1.27.1](https://github.com/algolia/algoliasearch-client-ruby/compare/1.27.0...1.27.1) (2019-09-26) + +**Fixed** + +* Update `Algolia::Index.exists` method to `Algolia::Index.exists?`. [364](https://github.com/algolia/algoliasearch-client-ruby/issues/364) + +## [1.27.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.27.0) (2019-09-16) + +**Added** + +* Introduce `Algolia::Index.exists` method. [358](https://github.com/algolia/algoliasearch-client-ruby/issues/358) + + Check whether an index exists or not. + +* Introduce `Algolia::Index.find_object` method. [359](https://github.com/algolia/algoliasearch-client-ruby/issues/359) + + Find object by the given condition. + +* Introduce `Algolia::Index.get_object_position` method. [359](https://github.com/algolia/algoliasearch-client-ruby/issues/359) + + Retrieve the given object position in a set of results. + +* Introduce `Algolia.get_secured_api_key_remaining_validity` method. [361](https://github.com/algolia/algoliasearch-client-ruby/issues/361) + + Returns the remaining validity time for the given API key in seconds. + + +## [1.26.1](https://github.com/algolia/algoliasearch-client-ruby/compare/1.26.0...1.26.1) (2019-07-31) + +### Chore + +- stop using coveralls because of a GPL-licensed transitive dep ([d2fbe8c](https://github.com/algolia/algoliasearch-client-ruby/commit/d2fbe8c)) + + +[1.26.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.26.0) (2019-02-12) + +**Added** + +* Introduce `Algolia.restore_api_key` method. + + If you delete your API key by mistake, you can now restore it via + this new method. This especially useful if this key is used in a + mobile app or somewhere you can't update easily. + + +## [1.25.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.2) (2018-12-19) + +## [1.25.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.1) (2018-12-19) + +**Fixed** + +* Missing `insights.rb` in gemspec - [7d2f3ab](https://github.com/algolia/algoliasearch-client-ruby/commit/7d2f3abe6e4338f0f7364f6f52ac1d371f066464) + +## [1.25.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.25.0) (2018-12-19) + +**Added** + +* Introduce Insights client to send events to Algolia Insights API - [326](https://github.com/algolia/algoliasearch-client-ruby/issues/326) + +* Add `multiple_get_objects` to retrieve objects by objectID across multiple indices - [329](https://github.com/algolia/algoliasearch-client-ruby/issues/329) + +**Modified** + +* Use the correct `hitsPerPage` key when exporting index resources - [319](https://github.com/algolia/algoliasearch-client-ruby/issues/319) + +## [1.24.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.24.0) (2018-11-28) + +* Feat(adds-account-client-copy-index): adds `copy_index` to account client ([#324](https://github.com/algolia/algoliasearch-client-ruby/pull/324)) +* Feat(replace-all-methods): adds `replace_all_rules`, `replace_all_objects` and `replace_all_synonyms` to search index ([#323](https://github.com/algolia/algoliasearch-client-ruby/pull/323)) +* Feat(scoped-copy-methods): adds `copy_settings`, `copy_synonyms` and `copy_rules` to search client ([#322](https://github.com/algolia/algoliasearch-client-ruby/pull/322)) + +## [1.23.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.23.2) (2018-06-19) + +* Fix(analytics): gem without new analytics class (#306) + +## [1.23.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.23.0) (2018-06-19) + +* Feat(analytics): introduce new analytics class +* Chore(rake): use unshift to keep compat with older ruby versions +* Ruby version must be after client version in ua +* Fix ua tests with new format +* Rewrite ua +* Feat(ua): add ruby version +* Fix(syntax): this isn't php +* Tests(mcm): use unique userid everytime +* Tests(mcm): introduce auto_retry for more stable tests +* Feat(client): expose waittask in the client (#302) +* Fix(travis): always test on the latest patches (#295) + +## [1.22.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.22.0) (2018-05-30) + +* Rename license file (#297) +* Fix release task (#294) +* Introduce multi cluster management (#285) +* Fix(browse): ensure cursor is passed in the body (#288) +* Chore(md): update contribution-related files (#293) + +## [1.21.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.21.0) (2018-05-24) + +* Fix(tests): fix warning for unspecified exception (#287) +* Fix release task missing github link (#291) +* Api review (#292) + +## [1.20.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.20.1) (2018-05-15) + +* Fix changelog link in gemspec (#290) +* Utils: move to changelog.md and add rake task for release (#289) + +## [1.20.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.20.0) (2018-05-07) + +* Feat: deprecate api keys methods on index in favor of client ones (#286) +* Chore(gemfile): remove useless dependencies (#280) +* Fix(env): adding default env var (#279) +* Chore(travis): test against Rubinius 3 (#281) +* Fix: prevent saving a rule with an empty `objectID` (#283) + +## [1.19.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.2) (2018-04-03) + +* Fix `Algolia.delete_index` wrong number of arguments (#277) + +## [1.19.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.1) (2017-12-18) + +* Fix hard dependency on `hashdiff` (#262) + +## [1.19.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.19.0) (2017-12-15) + +* Add request options to any method using API calls (#213) +* Add `export_synonyms` index method (#260) +* Add `export_rules` index method (#261) + +## [1.18.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.5) (2017-12-07) + +* Fix missing requirement + +## [1.18.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.4) (2017-12-06) + +* Remove remaining unnecessary requirements (#256) +* Remove Gemfile.lock (#257) + +## [1.18.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.3) (2017-12-04) + +* Remove Bundler and RubyGems requirements (#253) + +## [1.18.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.2) (2017-11-28) + +* Add (undocumented) gzip option to disable gzip (#240) + +## [1.18.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.1) (2017-11-15) + +* Fix `get_logs` always returning type `all` (#244) +* New scopes to `copy_index` method (#243) + +## [1.18.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.18.0) (2017-11-02) + +* Allow to reuse the webmocks using `Algolia::WebMock.mock!` (#256) + +## [1.17.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.17.0) (2017-10-10) + +* New `delete_by` method + +## [1.16.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.16.0) (2017-09-14) + +* New Query Rules API + +## [1.15.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.15.1) (2017-08-17) + +* Fixed regression introduced in 1.15.0 + +## [1.15.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.15.0) (2017-08-17) + +* Make `delete_by_query` not `wait_task` by default (also, make it mockable) +* Add a new `delete_by_query!` doing the last `wait_task` + +## [1.14.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.14.0) (2017-07-31) + +* Ability to override the underlying user-agent + +## [1.13.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.13.0) (2017-03-17) + +* Add a `index.get_task_status(taskID)` method (#199) + +## [1.12.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.7) (2017-03-01) + +* Renamed all `*_user_key` methods to `*_api_key` + +## [1.12.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.6) (2017-01-25) + +* Upgraded `httpclient` to 2.8.3 + +## [1.12.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.5) (2016-12-07) + +* Fixed retry strategy not keeping the `current_host` first (#163) + +## [1.12.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.4) (2016-12-07) + +* Fix DNS tests + +## [1.12.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.3) (2016-12-06) + +* Allow for multiple clients on different app ids on the same thread + +## [1.12.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.2) (2016-12-05) + +* Fix client scoped methods + +## [1.12.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.1) (2016-11-25) + +* Rename `search_facet` to `search_for_facet_values` + +## [1.12.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.12.0) (2016-10-31) + +* Add `search_facet` + +## [1.11.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.11.0) (2016-08-21) + +* Upgraded to httpclient 2.8.1 to avoid reseting the keep-alive while changing timeouts + +## [1.10.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.10.0) (2016-07-11) + +* `{get,set}_settings` now take optional custom query parameters + +## [1.9.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.9.0) (2016-06-17) + +* New synonyms API + +## [1.8.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.8.1) (2016-04-14) + +* Ensure we're using an absolute path for the `ca-bundle.crt` file (could fix some `OpenSSL::X509::StoreError: system lib` errors) + +## [1.8.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.8.0) (2016-04-06) + +* Upgraded to `httpclient` 2.7.1 (includes ruby 2.3.0 deprecation fixes) +* Upgraded WebMock URLs + +## [1.7.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.7.0) (2016-01-09) + +* New `generate_secured_api_key` embedding the filters in the resulting key + +## [1.6.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.6.1) (2015-08-01) + +* Search queries are now using POST requests + +## [1.6.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.6.0) (2015-07-19) + +* Ability to instantiate multiple API clients in the same process (was using a class variable before). + +## [1.5.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.5.1) (2015-07-14) + +* Ability to retrieve a single page from a cursor with `browse_from` + +## [1.5.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.5.0) (2015-06-05) + +* New cursor-based `browse()` implementation taking query parameters + +## [1.4.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.3) (2015-05-27) + +* Do not call `WebMock.disable!` in the helper + +## [1.4.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4,2) (2015-05-04) + +* Add new methods to add/update api key +* Add batch method to target multiple indices +* Add strategy parameter for the multipleQueries +* Add new method to generate secured api key from query parameters + +## [1.4.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.1) (2015-04-10) + +* Force the default connect/read/write/search/batch timeouts to Algolia-specific values + +## [1.4.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.4.0) (2015-03-17) + +* High-available DNS: search queries are now targeting `APPID-DSN.algolia.net` first, then the main cluster using NSOne, then the main cluster using Route53. +* High-available DNS: Indexing queries are targeting `APPID.algolia.net` first, then the main cluster using NSOne, then the main cluster using Route53. + +## [1.3.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.3.1) (2014-11-29) + +* Fixed wrong deployed version (1.3.0 was based on 1.2.13 instead of 1.2.14) + +## [1.3.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.3.0) (2014-11-29) + +* Use `algolia.net` domain instead of `algolia.io` + +## [1.2.14](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.14) (2014-11-10) + +* Force the underlying `httpclient` dependency to be >= 2.4 in the gemspec as well +* Ability to force the SSL version + +## [1.2.13](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.13) (2014-10-22) + +* Fix the loop on hosts to retry when the http code is different than 200, 201, 400, 403, 404 + +## [1.2.12](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.12) (2014-10-08) + +* Upgrade to `httpclient` 2.4 +* Do not reset the timeout on each requests + +## [1.2.11](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.11) (2014-09-14) + +* Ability to update API keys + +## [1.2.10](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.10) (2014-08-22) + +* Using Digest to remove "Digest::Digest is deprecated; Use Digest" warning (author: @dglancy) + +## [1.2.9](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.9) (2014-07-10) + +* Expose `connect_timeout`, `receive_timeout` and `send_timeout` +* Add new `delete_by_query` method to delete all objects matching a specific query +* Add new `get_objects` method to retrieve a list of objects from a single API call +* Add a helper to perform disjunctive faceting + +## [1.2.8](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.8) (2014-03-27) + +* Catch all exceptions before retrying with another host + +## [1.2.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.7) (2014-03-24) + +* Ruby 1.8 compatibility + +## [1.2.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.6) (2014-03-19) + +* Raise an exception if no `APPLICATION_ID` is provided +* Ability to get last API call errors +* Ability to send multiple queries using a single API call +* Secured API keys generation is now based on secured HMAC-SHA256 + +## [1.2.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.5) (2014-02-24) + +* Ability to generate secured API key from a list of tags + optional `user_token` +* Ability to specify a list of indexes targeted by the user key + +## [1.2.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.4) (2014-02-21) + +* Add `delete_objects` + +## [1.2.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.3) (2014-02-10) + +* `add_object`: POST request if `objectID` is `nil` OR `empty` + +## [1.2.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.2) (2014-01-11) + +* Expose `batch` requests + +## [1.2.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.1) (2014-01-07) + +* Removed `jeweler` since it doesn't support platform specific deps (see https://github.com/technicalpickles/jeweler/issues/170) + +## [1.2.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.2.0) (2014-01-07) + +* Removed `curb` dependency and switched on `httpclient` to avoid fork-safety issue (see issue #5) + +## [1.1.18](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.18) (2014-01-06) + +* Fixed batch request builder (broken since last refactoring) + +## [1.1.17](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.17) (2014-01-02) + +* Ability to use IP rate limit behind a proxy forwarding the end-user's IP +* Add documentation for the new `attributeForDistinct` setting and `distinct` search parameter + +## [1.1.16](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.16) (2013-12-16) + +* Add arguments type-checking +* Normalize save_object/partial_update/add_object signatures +* Force dependencies versions + +## [1.1.15](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.15) (2013-12-16) + +* Embed ca-bundle.crt + +## [1.1.14](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.14) (2013-12-11) + +* Added `index.add_user_key(acls, validity, rate_limit, maxApiCalls)`` + +## [1.1.13](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.13) (2013-12-10) + +* WebMock integration + +## [1.1.12](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.12) (2013-12-05) + +* Add `browse` command + +## [1.1.11](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.11) (2013-11-29) + +* Remove rubysl (rbx required dependencies) + +## [1.1.10](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.10) (2013-11-29) + +* Fixed gzip handling bug + +## [1.1.9](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.9) (2013-11-28) + +* Added gzip support + +## [1.1.8](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.8) (2013-11-26) + +* Add `partial_update_objects` method + +## [1.1.7](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.7) (2013-11-08) + +* Search params: encode array-based parameters (`tagFilters`, `facetFilters`, ...) + +## [1.1.6](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.6) (2013-11-07) + +* Index: `clear` and `clear!` methods can now be used the delete the whole content of an index +* User keys: plug new `maxQueriesPerIPPerHour` and `maxHitsPerQuery` parameters + +## [1.1.5](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.5) (2013-10-17) + +* Version is now part of the user-agent + +## [1.1.4](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.4) (2013-10-17) + +* Fixed `wait_task` not sleeping at all + +## [1.1.3](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.3) (2013-10-15) + +* Fixed thread-safety issues +* Curl sessions are now thread-local + +## [1.1.2](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.2) (2013-10-02) + +* Fixed instance/class method conflict + +## [1.1.1](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.1) (2013-10-01) + +* Updated documentation +* Plug copy/move index + +## [1.1.0](https://github.com/algolia/algoliasearch-client-ruby/releases/tag/1.1.0) (2013-09-17) + +* Initial import diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Gemfile b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Gemfile new file mode 100644 index 0000000..e05662e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Gemfile @@ -0,0 +1,28 @@ +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# Load algoliasearch.gemspec dependencies +gemspec + +# See https://github.com/algolia/algoliasearch-client-ruby/pull/257/files/36bcd0b1c4d05776dcbdb362c15a609c81f41cde +if Gem::Version.new(RUBY_VERSION) <= Gem::Version.new('1.9.3') + gem 'hashdiff', '< 0.3.6' # Hashdiff 0.3.6 no longer supports Ruby 1.8 + gem 'highline', '< 1.7.0' + gem 'mime-types', '< 2.0' + gem 'rubysl', '~> 2.0', :platform => :rbx +else + gem 'rubysl', '~> 2.2', :platform => :rbx +end + +group :development do + gem 'rake' + gem 'rdoc' + gem 'travis' +end + +group :test do + gem 'rspec', '>= 2.5.0' + gem 'webmock' + gem 'simplecov' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Gemfile.lock new file mode 100644 index 0000000..998dc7c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Gemfile.lock @@ -0,0 +1,99 @@ +PATH + remote: . + specs: + algoliasearch (1.27.4) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + +GEM + remote: https://rubygems.org/ + specs: + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + backports (3.18.2) + connection_pool (2.2.3) + crack (0.4.3) + safe_yaml (~> 1.0.0) + diff-lcs (1.4.4) + docile (1.3.2) + ethon (0.12.0) + ffi (>= 1.3.0) + faraday (0.17.3) + multipart-post (>= 1.2, < 3) + faraday_middleware (0.14.0) + faraday (>= 0.7.4, < 1.0) + ffi (1.13.1) + gh (0.14.0) + addressable + backports + faraday (~> 0.8) + multi_json (~> 1.0) + net-http-persistent (>= 2.7) + net-http-pipeline + hashdiff (1.0.1) + highline (1.7.10) + httpclient (2.8.3) + json (2.3.1) + launchy (2.5.0) + addressable (~> 2.7) + multi_json (1.15.0) + multipart-post (2.1.1) + net-http-persistent (4.0.0) + connection_pool (~> 2.2) + net-http-pipeline (1.0.1) + public_suffix (4.0.6) + pusher-client (0.6.2) + json + websocket (~> 1.0) + rake (13.0.1) + rdoc (6.2.1) + rspec (3.9.0) + rspec-core (~> 3.9.0) + rspec-expectations (~> 3.9.0) + rspec-mocks (~> 3.9.0) + rspec-core (3.9.2) + rspec-support (~> 3.9.3) + rspec-expectations (3.9.2) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-mocks (3.9.1) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.9.0) + rspec-support (3.9.3) + safe_yaml (1.0.5) + simplecov (0.19.0) + docile (~> 1.1) + simplecov-html (~> 0.11) + simplecov-html (0.12.2) + travis (1.8.13) + backports + faraday (~> 0.9) + faraday_middleware (~> 0.9, >= 0.9.1) + gh (~> 0.13) + highline (~> 1.6) + launchy (~> 2.1) + pusher-client (~> 0.4) + typhoeus (~> 0.6, >= 0.6.8) + typhoeus (0.8.0) + ethon (>= 0.8.0) + webmock (3.8.3) + addressable (>= 2.3.6) + crack (>= 0.3.2) + hashdiff (>= 0.4.0, < 2.0.0) + websocket (1.2.8) + +PLATFORMS + ruby + +DEPENDENCIES + algoliasearch! + rake + rdoc + rspec (>= 2.5.0) + rubysl (~> 2.2) + simplecov + travis + webmock + +BUNDLED WITH + 1.17.2 diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/LICENSE b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/LICENSE new file mode 100644 index 0000000..fddf416 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-Present Algolia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/README.md b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/README.md new file mode 100644 index 0000000..a19b944 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/README.md @@ -0,0 +1,61 @@ +

+ + Algolia for Ruby + + +

The perfect starting point to integrate Algolia within your Ruby project

+ +

+ Build Status + Gem Version + License +

+

+ +

+ Documentation â€ĸ + Rails â€ĸ + Community Forum â€ĸ + Stack Overflow â€ĸ + Report a bug â€ĸ + FAQ â€ĸ + Support +

+ +## ✨ Features + +- Thin & minimal low-level HTTP client to interact with Algolia's API +- Supports Ruby `^1.8.7`. + +## 💡 Getting Started + +First, install Algolia Ruby API Client via the [RubyGems](https://rubygems.org/) package manager: +```bash +gem install algoliasearch +``` + +Then, create objects on your index: + + +```ruby +Algolia.init(application_id: 'YourApplicationID', + api_key: 'YourAPIKey') +index = Algolia::Index.new('your_index_name') + +index.save_objects([objectID: 1, name: 'Foo']) +``` + +Finally, you may begin searching a object using the `search` method: +```ruby +objects = index.search('Fo') +``` + +For full documentation, visit the **[Algolia Ruby API Client](https://www.algolia.com/doc/api-client/getting-started/install/ruby/)**. + +## ❓ Troubleshooting + +Encountering an issue? Before reaching out to support, we recommend heading to our [FAQ](https://www.algolia.com/doc/api-client/troubleshooting/faq/ruby/) where you will find answers for the most common issues and gotchas with the client. + +## 📄 License + +Algolia Ruby API Client is an open-sourced software licensed under the [MIT license](LICENSE.md). diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Rakefile b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Rakefile new file mode 100644 index 0000000..b328a8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/Rakefile @@ -0,0 +1,111 @@ +# encoding: utf-8 + +require 'bundler/gem_tasks' + +begin + Bundler.setup(:default, :development) +rescue Bundler::BundlerError => e + $stderr.puts e.message + $stderr.puts "Run `bundle install` to install missing gems" + exit e.status_code +end +require 'rake' + +require File.expand_path('../lib/algolia/version', __FILE__) + +require 'rake/testtask' +Rake::TestTask.new(:test) do |test| + test.libs << 'lib' << 'test' + test.pattern = 'test/**/test_*.rb' + test.verbose = true +end + +require 'rdoc/task' +Rake::RDocTask.new do |rdoc| + version = Algolia::VERSION + rdoc.rdoc_dir = 'rdoc' + rdoc.title = "algoliasearch #{version}" + rdoc.rdoc_files.include('README*') + rdoc.rdoc_files.include('lib/**/*.rb') +end + +require 'rspec/core/rake_task' +RSpec::Core::RakeTask.new(:spec) + +task :default => :spec + +namespace :algolia do + GEM_VERSION_FILE = File.expand_path('../lib/algolia/version.rb', __FILE__) + GIT_TAG_URL = 'https://github.com/algolia/algoliasearch-client-ruby/releases/tag/' + + def last_commit_date + `git log -1 --date=short --format=%cd`.chomp + end + + def latest_tag + `git describe --tags --abbrev=0`.chomp + end + + def changelog(git_start = latest_tag(), git_end = 'HEAD', format = '%s') + `git log --no-decorate --no-merges --pretty=format:#{format} #{git_start}..#{git_end}` + end + + desc 'Write latest changes to CHANGELOG.md' + task :changelog, [:version] do |t, args| + # Filters-out commits containing some keywords and adds header + exceptions_regexp = Regexp.union(['README']) + title = "## [%s](%s%s) (%s)\n\n" % [args[:version], GIT_TAG_URL, args[:version], last_commit_date] + changes = changelog.each_line + .map { |line| (exceptions_regexp === line) ? nil : "* #{line.capitalize}" } + .unshift(title) + .append("\n\n") + .join + + puts changes + puts "\n\e[31mDo you want to update the CHANGELOG.md with the text above? [y/N]\e[0m" + exit if STDIN.gets.chomp.downcase != 'y' + + # Rewrite CHANGELOG.md + old_changes = File.readlines('CHANGELOG.md', 'r').join + File.open('CHANGELOG.md', 'w') { |file| file.write(changes, old_changes) } + + puts 'CHANGELOG.md successfully updated' + end + + desc 'Bump gem version' + task :semver, [:version] do |t, args| + + File.open(GEM_VERSION_FILE, 'w') do |file| + file.write <<~SEMVER + module Algolia + VERSION = "#{args[:version]}" + end + SEMVER + end + + # This force to reload the file with the newest version. + # Hence, `gemspec.version` invoked by Bundler later on will be correct. + load GEM_VERSION_FILE + + puts "Bumped gem version from #{latest_tag} to #{args[:version]}" + end + + desc 'Release a new version of this gem' + task :release, [:version] => [:changelog, :semver] do |t, args| + `git add #{File.expand_path('../lib/algolia/version.rb', __FILE__)} CHANGELOG.md` + `git commit -m "Bump to version #{args[:version]}"` + + # Invoke Bundler :release task + # https://github.com/bundler/bundler/blob/master/lib/bundler/gem_helper.rb + # + Rake::Task[:release].invoke + end +end + +module Bundler + class GemHelper + def version_tag + "#{version}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec new file mode 100644 index 0000000..eeb620c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/algoliasearch.gemspec @@ -0,0 +1,86 @@ +# -*- encoding: utf-8 -*- + +require 'date' +require File.join(File.dirname(__FILE__), 'lib', 'algolia', 'version') + +Gem::Specification.new do |s| + s.name = "algoliasearch" + s.version = Algolia::VERSION + s.authors = ["Algolia"] + s.email = "contact@algolia.com" + + s.date = Date.today + s.licenses = ["MIT"] + s.summary = "A simple Ruby client for the algolia.com REST API" + s.description = "A simple Ruby client for the algolia.com REST API" + s.homepage = "https://github.com/algolia/algoliasearch-client-ruby" + + s.metadata = { + "bug_tracker_uri" => "https://github.com/algolia/algoliasearch-client-ruby/issues", + "changelog_uri" => "https://github.com/algolia/algoliasearch-client-ruby/blob/master/CHANGELOG.md", + "documentation_uri" => "http://www.rubydoc.info/gems/algoliasearch", + "homepage_uri" => "https://www.algolia.com/doc/api-client/ruby/getting-started/", + "source_code_uri" => "https://github.com/algolia/algoliasearch-client-ruby" + } + + s.post_install_message = "A new major version is available for Algolia! Please now use the https://rubygems.org/gems/algolia gem to get the latest features." + + s.require_paths = ["lib"] + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + + s.extra_rdoc_files = [ + "CHANGELOG.md", + "LICENSE", + "README.md" + ] + s.files = [ + ".rspec", + ".travis.yml", + "CHANGELOG.md", + "Gemfile", + "Gemfile.lock", + "LICENSE", + "README.md", + "Rakefile", + "algoliasearch.gemspec", + "contacts.json", + "lib/algolia/analytics.rb", + "lib/algolia/account_client.rb", + "lib/algolia/client.rb", + "lib/algolia/error.rb", + "lib/algolia/index.rb", + "lib/algolia/insights.rb", + "lib/algolia/protocol.rb", + "lib/algolia/version.rb", + "lib/algolia/webmock.rb", + "lib/algoliasearch.rb", + "resources/ca-bundle.crt", + "spec/account_client_spec.rb", + "spec/client_spec.rb", + "spec/mock_spec.rb", + "spec/spec_helper.rb", + "spec/stub_spec.rb" + ] + + if s.respond_to? :specification_version then + s.specification_version = 4 + + if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then + if defined?(RUBY_VERSION) && RUBY_VERSION < '2.0' + s.add_runtime_dependency 'json', '>= 1.5.1', '< 2.3' + else + s.add_runtime_dependency 'json', '>= 1.5.1' + end + s.add_runtime_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_development_dependency 'travis', '~> 0' + s.add_development_dependency 'rake', '~> 0' + s.add_development_dependency 'rdoc', '~> 0' + else + s.add_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_dependency 'json', '>= 1.5.1', '< 2.3' + end + else + s.add_dependency 'httpclient', '~> 2.8', '>= 2.8.3' + s.add_dependency 'json', '>= 1.5.1', '< 2.3' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/contacts.json b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/contacts.json new file mode 100644 index 0000000..8ba54fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/contacts.json @@ -0,0 +1,7504 @@ +[ + { + "firstname": "Essie", + "lastname": "Vaill", + "company": "Litronic Industries", + "address": "14225 Hancock Dr", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99515", + "phone": "907-345-0962", + "fax": "907-345-1215", + "email": "essie@vaill.com", + "web": "http://www.essievaill.com", + "followers": 3574 + }, + { + "firstname": "Cruz", + "lastname": "Roudabush", + "company": "Meridian Products", + "address": "2202 S Central Ave", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85004", + "phone": "602-252-4827", + "fax": "602-252-4009", + "email": "cruz@roudabush.com", + "web": "http://www.cruzroudabush.com", + "followers": 6548 + }, + { + "firstname": "Billie", + "lastname": "Tinnes", + "company": "D & M Plywood Inc", + "address": "28 W 27th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10001", + "phone": "212-889-5775", + "fax": "212-889-5764", + "email": "billie@tinnes.com", + "web": "http://www.billietinnes.com", + "followers": 3536 + }, + { + "firstname": "Zackary", + "lastname": "Mockus", + "company": "Metropolitan Elevator Co", + "address": "286 State St", + "city": "Perth Amboy", + "county": "Middlesex", + "state": "NJ", + "zip": "08861", + "phone": "732-442-0638", + "fax": "732-442-5218", + "email": "zackary@mockus.com", + "web": "http://www.zackarymockus.com", + "followers": 1497 + }, + { + "firstname": "Rosemarie", + "lastname": "Fifield", + "company": "Technology Services", + "address": "3131 N Nimitz Hwy #-105", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-836-8966", + "fax": "808-836-6008", + "email": "rosemarie@fifield.com", + "web": "http://www.rosemariefifield.com", + "followers": 4812 + }, + { + "firstname": "Bernard", + "lastname": "Laboy", + "company": "Century 21 Keewaydin Prop", + "address": "22661 S Frontage Rd", + "city": "Channahon", + "county": "Will", + "state": "IL", + "zip": "60410", + "phone": "815-467-0487", + "fax": "815-467-1244", + "email": "bernard@laboy.com", + "web": "http://www.bernardlaboy.com", + "followers": 6891 + }, + { + "firstname": "Sue", + "lastname": "Haakinson", + "company": "Kim Peacock Beringhause", + "address": "9617 N Metro Pky W", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85051", + "phone": "602-953-2753", + "fax": "602-953-0355", + "email": "sue@haakinson.com", + "web": "http://www.suehaakinson.com", + "followers": 5787 + }, + { + "firstname": "Valerie", + "lastname": "Pou", + "company": "Sea Port Record One Stop Inc", + "address": "7475 Hamilton Blvd", + "city": "Trexlertown", + "county": "Lehigh", + "state": "PA", + "zip": "18087", + "phone": "610-395-8743", + "fax": "610-395-6995", + "email": "valerie@pou.com", + "web": "http://www.valeriepou.com", + "followers": 8990 + }, + { + "firstname": "Lashawn", + "lastname": "Hasty", + "company": "Kpff Consulting Engineers", + "address": "815 S Glendora Ave", + "city": "West Covina", + "county": "Los Angeles", + "state": "CA", + "zip": "91790", + "phone": "626-960-6738", + "fax": "626-960-1503", + "email": "lashawn@hasty.com", + "web": "http://www.lashawnhasty.com", + "followers": 2131 + }, + { + "firstname": "Marianne", + "lastname": "Earman", + "company": "Albers Technologies Corp", + "address": "6220 S Orange Blossom Trl", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32809", + "phone": "407-857-0431", + "fax": "407-857-2506", + "email": "marianne@earman.com", + "web": "http://www.marianneearman.com", + "followers": 1992 + }, + { + "firstname": "Justina", + "lastname": "Dragaj", + "company": "Uchner, David D Esq", + "address": "2552 Poplar Ave", + "city": "Memphis", + "county": "Shelby", + "state": "TN", + "zip": "38112", + "phone": "901-327-5336", + "fax": "901-327-2911", + "email": "justina@dragaj.com", + "web": "http://www.justinadragaj.com", + "followers": 7149 + }, + { + "firstname": "Mandy", + "lastname": "Mcdonnell", + "company": "Southern Vermont Surveys", + "address": "343 Bush St Se", + "city": "Salem", + "county": "Marion", + "state": "OR", + "zip": "97302", + "phone": "503-371-8219", + "fax": "503-371-1118", + "email": "mandy@mcdonnell.com", + "web": "http://www.mandymcdonnell.com", + "followers": 1329 + }, + { + "firstname": "Conrad", + "lastname": "Lanfear", + "company": "Kahler, Karen T Esq", + "address": "49 Roche Way", + "city": "Youngstown", + "county": "Mahoning", + "state": "OH", + "zip": "44512", + "phone": "330-758-0314", + "fax": "330-758-3536", + "email": "conrad@lanfear.com", + "web": "http://www.conradlanfear.com", + "followers": 2906 + }, + { + "firstname": "Cyril", + "lastname": "Behen", + "company": "National Paper & Envelope Corp", + "address": "1650 S Harbor Blvd", + "city": "Anaheim", + "county": "Orange", + "state": "CA", + "zip": "92802", + "phone": "714-772-5050", + "fax": "714-772-3859", + "email": "cyril@behen.com", + "web": "http://www.cyrilbehen.com", + "followers": 7784 + }, + { + "firstname": "Shelley", + "lastname": "Groden", + "company": "Norton, Robert L Esq", + "address": "110 Broadway St", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78205", + "phone": "210-229-3017", + "fax": "210-229-9757", + "email": "shelley@groden.com", + "web": "http://www.shelleygroden.com", + "followers": 2012 + }, + { + "firstname": "Rosalind", + "lastname": "Krenzke", + "company": "Waldein Manufacturing", + "address": "7000 Bass Lake Rd #-200", + "city": "Minneapolis", + "county": "Hennepin", + "state": "MN", + "zip": "55428", + "phone": "763-537-4194", + "fax": "763-537-3885", + "email": "rosalind@krenzke.com", + "web": "http://www.rosalindkrenzke.com", + "followers": 5547 + }, + { + "firstname": "Davis", + "lastname": "Brevard", + "company": "Transit Trading Corp", + "address": "6715 Tippecanoe Rd", + "city": "Canfield", + "county": "Mahoning", + "state": "OH", + "zip": "44406", + "phone": "330-533-6346", + "fax": "330-533-8211", + "email": "davis@brevard.com", + "web": "http://www.davisbrevard.com", + "followers": 4259 + }, + { + "firstname": "Winnie", + "lastname": "Reich", + "company": "Pacific Seating Co", + "address": "1535 Hawkins Blvd", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79925", + "phone": "915-771-2730", + "fax": "915-771-5729", + "email": "winnie@reich.com", + "web": "http://www.winniereich.com", + "followers": 6621 + }, + { + "firstname": "Trudy", + "lastname": "Worlds", + "company": "Shapiro, Mark R Esq", + "address": "24907 Tibbitts Aven #-b", + "city": "Valencia", + "county": "Los Angeles", + "state": "CA", + "zip": "91355", + "phone": "661-257-3083", + "fax": "661-257-0924", + "email": "trudy@worlds.com", + "web": "http://www.trudyworlds.com", + "followers": 5782 + }, + { + "firstname": "Deshawn", + "lastname": "Inafuku", + "company": "Telair Div Of Teleflex Inc", + "address": "3508 Leopard St", + "city": "Corpus Christi", + "county": "Nueces", + "state": "TX", + "zip": "78408", + "phone": "361-884-8433", + "fax": "361-884-3985", + "email": "deshawn@inafuku.com", + "web": "http://www.deshawninafuku.com", + "followers": 1195 + }, + { + "firstname": "Claudio", + "lastname": "Loose", + "company": "Audiotek Electronics", + "address": "286 State St", + "city": "Perth Amboy", + "county": "Middlesex", + "state": "NJ", + "zip": "08861", + "phone": "732-442-8514", + "fax": "732-442-1775", + "email": "claudio@loose.com", + "web": "http://www.claudioloose.com", + "followers": 6043 + }, + { + "firstname": "Sal", + "lastname": "Pindell", + "company": "Wrigley, Robert I Esq", + "address": "1112 Se 1st St", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47713", + "phone": "812-421-4804", + "fax": "812-421-7625", + "email": "sal@pindell.com", + "web": "http://www.salpindell.com", + "followers": 4359 + }, + { + "firstname": "Cristina", + "lastname": "Sharper", + "company": "Tax Office", + "address": "111 W 40th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10018", + "phone": "212-719-3952", + "fax": "212-719-0754", + "email": "cristina@sharper.com", + "web": "http://www.cristinasharper.com", + "followers": 4823 + }, + { + "firstname": "Betty Jane", + "lastname": "Mccamey", + "company": "Vita Foods Inc", + "address": "100 E Broad St", + "city": "Columbus", + "county": "Franklin", + "state": "OH", + "zip": "43215", + "phone": "614-225-0900", + "fax": "614-225-1612", + "email": "cary@mccamey.com", + "web": "http://www.carymccamey.com", + "followers": 8863 + }, + { + "firstname": "Haley", + "lastname": "Rocheford", + "company": "Davis, Robert L Esq", + "address": "6030 Greenwood Plaza Blvd", + "city": "Englewood", + "county": "Arapahoe", + "state": "CO", + "zip": "80111", + "phone": "303-689-7729", + "fax": "303-689-5219", + "email": "haley@rocheford.com", + "web": "http://www.haleyrocheford.com", + "followers": 9872 + }, + { + "firstname": "Dorothea", + "lastname": "Sweem", + "company": "Ehrmann, Rolfe F Esq", + "address": "100 Thanet Circ", + "city": "Trenton", + "county": "Mercer", + "state": "NJ", + "zip": "08648", + "phone": "609-896-5871", + "fax": "609-896-2099", + "email": "dorothea@sweem.com", + "web": "http://www.dorotheasweem.com", + "followers": 8897 + }, + { + "firstname": "Fannie", + "lastname": "Steese", + "company": "Chiapete, W Richard Esq", + "address": "926 E Park Ave", + "city": "Tallahassee", + "county": "Leon", + "state": "FL", + "zip": "32301", + "phone": "850-222-8103", + "fax": "850-222-0105", + "email": "fannie@steese.com", + "web": "http://www.fanniesteese.com", + "followers": 7140 + }, + { + "firstname": "Allyson", + "lastname": "Gillispie", + "company": "De Friese Theo & Sons", + "address": "1722 White Horse Mercerville R", + "city": "Trenton", + "county": "Mercer", + "state": "NJ", + "zip": "08619", + "phone": "609-584-1794", + "fax": "609-584-0645", + "email": "allyson@gillispie.com", + "web": "http://www.allysongillispie.com", + "followers": 1414 + }, + { + "firstname": "Roger", + "lastname": "Seid", + "company": "Yoshida, Gerald C Esq", + "address": "3738 N Monroe St", + "city": "Tallahassee", + "county": "Leon", + "state": "FL", + "zip": "32303", + "phone": "850-422-1535", + "fax": "850-422-8152", + "email": "roger@seid.com", + "web": "http://www.rogerseid.com", + "followers": 6661 + }, + { + "firstname": "Dollie", + "lastname": "Daquino", + "company": "Jd Edwards & Co", + "address": "1005 Congress Ave", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78701", + "phone": "512-478-9636", + "fax": "512-478-9874", + "email": "dollie@daquino.com", + "web": "http://www.dolliedaquino.com", + "followers": 5262 + }, + { + "firstname": "Eva", + "lastname": "Seahorn", + "company": "Saunders Appraisal Inc", + "address": "3 Northern Blvd", + "city": "Amherst", + "county": "Hillsborough", + "state": "NH", + "zip": "03031", + "phone": "603-673-6072", + "fax": "603-673-5009", + "email": "eva@seahorn.com", + "web": "http://www.evaseahorn.com", + "followers": 9192 + }, + { + "firstname": "Mamie", + "lastname": "Mcintee", + "company": "Jacobs, Brian Realtor", + "address": "2810 Jacobs Ave", + "city": "Eureka", + "county": "Humboldt", + "state": "CA", + "zip": "95501", + "phone": "707-443-0621", + "fax": "707-443-9147", + "email": "mamie@mcintee.com", + "web": "http://www.mamiemcintee.com", + "followers": 6954 + }, + { + "firstname": "Lyndon", + "lastname": "Bellerdine", + "company": "A B C Lock & Key", + "address": "200 California St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94111", + "phone": "415-705-1956", + "fax": "415-705-2887", + "email": "lyndon@bellerdine.com", + "web": "http://www.lyndonbellerdine.com", + "followers": 146 + }, + { + "firstname": "Lashonda", + "lastname": "Derouen", + "company": "Travel Agent Magazine", + "address": "101 Royal St", + "city": "Alexandria", + "county": "Alexandria City", + "state": "VA", + "zip": "22314", + "phone": "703-684-3394", + "fax": "703-684-0307", + "email": "lashonda@derouen.com", + "web": "http://www.lashondaderouen.com", + "followers": 3792 + }, + { + "firstname": "Jacklyn", + "lastname": "Emayo", + "company": "Super 8 Motel Temple", + "address": "101 Us Highway 46", + "city": "Fairfield", + "county": "Essex", + "state": "NJ", + "zip": "07004", + "phone": "973-882-3960", + "fax": "973-882-1908", + "email": "jacklyn@emayo.com", + "web": "http://www.jacklynemayo.com", + "followers": 4575 + }, + { + "firstname": "Rubin", + "lastname": "Crotts", + "company": "Misko, Charles G Esq", + "address": "303 N Indian Canyon Dr", + "city": "Palm Springs", + "county": "Riverside", + "state": "CA", + "zip": "92262", + "phone": "760-327-0337", + "fax": "760-327-0929", + "email": "rubin@crotts.com", + "web": "http://www.rubincrotts.com", + "followers": 4736 + }, + { + "firstname": "Boris", + "lastname": "Catino", + "company": "Dream Homes Usa Inc", + "address": "645 Church St", + "city": "Norfolk", + "county": "Norfolk City", + "state": "VA", + "zip": "23510", + "phone": "757-627-8408", + "fax": "757-627-6195", + "email": "boris@catino.com", + "web": "http://www.boriscatino.com", + "followers": 2330 + }, + { + "firstname": "Jannie", + "lastname": "Bowditch", + "company": "Lindsays Landing Rv Pk & Mrna", + "address": "1102 Main St", + "city": "Grandview", + "county": "Jackson", + "state": "MO", + "zip": "64030", + "phone": "816-765-0961", + "fax": "816-765-3469", + "email": "jannie@bowditch.com", + "web": "http://www.janniebowditch.com", + "followers": 7302 + }, + { + "firstname": "Colin", + "lastname": "Altonen", + "company": "Smith, Arthur D Esq", + "address": "1201 18th St", + "city": "Denver", + "county": "Denver", + "state": "CO", + "zip": "80202", + "phone": "303-292-5477", + "fax": "303-292-4239", + "email": "colin@altonen.com", + "web": "http://www.colinaltonen.com", + "followers": 2587 + }, + { + "firstname": "Kerry", + "lastname": "Evertt", + "company": "Washington Crossing Inn", + "address": "337 S North Lake Blvd", + "city": "Altamonte Springs", + "county": "Seminole", + "state": "FL", + "zip": "32701", + "phone": "407-332-9851", + "fax": "407-332-1718", + "email": "kerry@evertt.com", + "web": "http://www.kerryevertt.com", + "followers": 6663 + }, + { + "firstname": "Kathie", + "lastname": "Argenti", + "company": "Microtel Franchise & Dev Corp", + "address": "410 Front St", + "city": "Brainerd", + "county": "Crow Wing", + "state": "MN", + "zip": "56401", + "phone": "218-828-9253", + "fax": "218-828-1401", + "email": "kathie@argenti.com", + "web": "http://www.kathieargenti.com", + "followers": 6260 + }, + { + "firstname": "Henrietta", + "lastname": "Cintora", + "company": "Keyes, Judith Droz Esq", + "address": "1063 Fm Wzzw", + "city": "Milton", + "county": "Cabell", + "state": "WV", + "zip": "25541", + "phone": "304-743-5440", + "fax": "304-743-7475", + "email": "henrietta@cintora.com", + "web": "http://www.henriettacintora.com", + "followers": 9622 + }, + { + "firstname": "Mariano", + "lastname": "Maury", + "company": "Donut & Sandwich Shop", + "address": "1092 Saint Georges Ave", + "city": "Rahway", + "county": "Union", + "state": "NJ", + "zip": "07065", + "phone": "732-388-1579", + "fax": "732-388-9355", + "email": "mariano@maury.com", + "web": "http://www.marianomaury.com", + "followers": 6254 + }, + { + "firstname": "Lottie", + "lastname": "Voll", + "company": "Mason, Joseph G Esq", + "address": "55 E 10th Ave", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97401", + "phone": "541-342-7282", + "fax": "541-342-4657", + "email": "lottie@voll.com", + "web": "http://www.lottievoll.com", + "followers": 1092 + }, + { + "firstname": "Ofelia", + "lastname": "Sheffler", + "company": "Rimpsey Agency", + "address": "628 Pacific Ave", + "city": "Oxnard", + "county": "Ventura", + "state": "CA", + "zip": "93030", + "phone": "805-483-7161", + "fax": "805-483-5693", + "email": "ofelia@sheffler.com", + "web": "http://www.ofeliasheffler.com", + "followers": 1096 + }, + { + "firstname": "Gaston", + "lastname": "Cieloszyk", + "company": "Center For Hope Hospice Inc", + "address": "1160 Wccs", + "city": "Homer City", + "county": "Indiana", + "state": "PA", + "zip": "15748", + "phone": "724-479-0355", + "fax": "724-479-7077", + "email": "gaston@cieloszyk.com", + "web": "http://www.gastoncieloszyk.com", + "followers": 7409 + }, + { + "firstname": "Karla", + "lastname": "Ken", + "company": "Nicollet Process Engineering", + "address": "2135 11th St", + "city": "Rockford", + "county": "Winnebago", + "state": "IL", + "zip": "61104", + "phone": "815-968-0369", + "fax": "815-968-7904", + "email": "karla@ken.com", + "web": "http://www.karlaken.com", + "followers": 1296 + }, + { + "firstname": "Avery", + "lastname": "Parbol", + "company": "Schlackman, William H", + "address": "22343 Se Stark St", + "city": "Gresham", + "county": "Multnomah", + "state": "OR", + "zip": "97030", + "phone": "503-666-1948", + "fax": "503-666-9241", + "email": "avery@parbol.com", + "web": "http://www.averyparbol.com", + "followers": 3515 + }, + { + "firstname": "John", + "lastname": "Chipley", + "company": "Manpower Temporary Services", + "address": "2 Horizon Rd #-2", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-224-7741", + "fax": "201-224-7282", + "email": "john@chipley.com", + "web": "http://www.johnchipley.com", + "followers": 7710 + }, + { + "firstname": "Luella", + "lastname": "Pliner", + "company": "United Waste Systems", + "address": "3437 N 12th Ave", + "city": "Pensacola", + "county": "Escambia", + "state": "FL", + "zip": "32503", + "phone": "850-434-2521", + "fax": "850-434-5228", + "email": "luella@pliner.com", + "web": "http://www.luellapliner.com", + "followers": 5191 + }, + { + "firstname": "Elvira", + "lastname": "Blumenthal", + "company": "Stell Mortgage Investors", + "address": "108 Washington St", + "city": "Keokuk", + "county": "Lee", + "state": "IA", + "zip": "52632", + "phone": "319-524-6237", + "fax": "319-524-9435", + "email": "elvira@blumenthal.com", + "web": "http://www.elvirablumenthal.com", + "followers": 6616 + }, + { + "firstname": "Tyree", + "lastname": "Courrege", + "company": "Stitch Craft", + "address": "13201 Northwest Fwy", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77040", + "phone": "713-690-9216", + "fax": "713-690-4043", + "email": "tyree@courrege.com", + "web": "http://www.tyreecourrege.com", + "followers": 5210 + }, + { + "firstname": "Ramon", + "lastname": "Amaral", + "company": "Air Academy Federal Credit Un", + "address": "700 W Highway 287", + "city": "Lander", + "county": "Fremont", + "state": "WY", + "zip": "82520", + "phone": "307-332-2667", + "fax": "307-332-3893", + "email": "ramon@amaral.com", + "web": "http://www.ramonamaral.com", + "followers": 8659 + }, + { + "firstname": "Wilfredo", + "lastname": "Gidley", + "company": "Mclaughlin, John F Esq", + "address": "2255 Kuhio Ave #-1203", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-924-2610", + "fax": "808-924-7666", + "email": "wilfredo@gidley.com", + "web": "http://www.wilfredogidley.com", + "followers": 8860 + }, + { + "firstname": "Gracie", + "lastname": "Ehn", + "company": "P C Systems", + "address": "Kahala", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96816", + "phone": "808-247-8624", + "fax": "808-247-7982", + "email": "gracie@ehn.com", + "web": "http://www.gracieehn.com", + "followers": 2870 + }, + { + "firstname": "Dorthy", + "lastname": "Alexy", + "company": "Frank Siviglia & Co", + "address": "Pearlridge", + "city": "Aiea", + "county": "Honolulu", + "state": "HI", + "zip": "96701", + "phone": "808-247-4421", + "fax": "808-247-7192", + "email": "dorthy@alexy.com", + "web": "http://www.dorthyalexy.com", + "followers": 1029 + }, + { + "firstname": "Bertie", + "lastname": "Luby", + "company": "Puckett, Dennis L Esq", + "address": "Windward", + "city": "Kaneohe", + "county": "Honolulu", + "state": "HI", + "zip": "96744", + "phone": "808-247-8062", + "fax": "808-247-2529", + "email": "bertie@luby.com", + "web": "http://www.bertieluby.com", + "followers": 2660 + }, + { + "firstname": "Rudy", + "lastname": "Kuhle", + "company": "General Insurcorp Inc", + "address": "1418 3rd Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10028", + "phone": "212-628-9987", + "fax": "212-628-1234", + "email": "rudy@kuhle.com", + "web": "http://www.rudykuhle.com", + "followers": 7201 + }, + { + "firstname": "Gale", + "lastname": "Nolau", + "company": "Lust, C James Esq", + "address": "104 N Aurora St", + "city": "Ithaca", + "county": "Tompkins", + "state": "NY", + "zip": "14850", + "phone": "607-277-1567", + "fax": "607-277-6524", + "email": "gale@nolau.com", + "web": "http://www.galenolau.com", + "followers": 6842 + }, + { + "firstname": "Kenya", + "lastname": "Bruni", + "company": "Hurley, Thomas J Jr", + "address": "280 N Midland Ave", + "city": "Saddle Brook", + "county": "Bergen", + "state": "NJ", + "zip": "07663", + "phone": "201-646-9077", + "fax": "201-646-8526", + "email": "kenya@bruni.com", + "web": "http://www.kenyabruni.com", + "followers": 9368 + }, + { + "firstname": "Tricia", + "lastname": "Kruss", + "company": "Edwards, Elwood L", + "address": "4685 Ne 14th St", + "city": "Des Moines", + "county": "Polk", + "state": "IA", + "zip": "50313", + "phone": "515-262-3267", + "fax": "515-262-6264", + "email": "tricia@kruss.com", + "web": "http://www.triciakruss.com", + "followers": 9671 + }, + { + "firstname": "Mack", + "lastname": "Jurasin", + "company": "Sherman, Michael D Esq", + "address": "1180 Dora Hwy", + "city": "Pulaski", + "county": "Pulaski", + "state": "VA", + "zip": "24301", + "phone": "540-980-4958", + "fax": "540-980-2978", + "email": "mack@jurasin.com", + "web": "http://www.mackjurasin.com", + "followers": 4557 + }, + { + "firstname": "Margarito", + "lastname": "Kornbau", + "company": "Acker Knitting Mills Inc", + "address": "303 W 15th St", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78701", + "phone": "512-478-0371", + "fax": "512-478-4449", + "email": "margarito@kornbau.com", + "web": "http://www.margaritokornbau.com", + "followers": 2072 + }, + { + "firstname": "Lucien", + "lastname": "Iurato", + "company": "Anderson Consulting", + "address": "3918 16th Ave", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11218", + "phone": "718-871-7952", + "fax": "718-871-3483", + "email": "lucien@iurato.com", + "web": "http://www.lucieniurato.com", + "followers": 9434 + }, + { + "firstname": "Jarvis", + "lastname": "Galas", + "company": "Younghans & Burke", + "address": "307 E President St", + "city": "Savannah", + "county": "Chatham", + "state": "GA", + "zip": "31401", + "phone": "912-236-8524", + "fax": "912-236-8705", + "email": "jarvis@galas.com", + "web": "http://www.jarvisgalas.com", + "followers": 2359 + }, + { + "firstname": "Billie", + "lastname": "Cowley", + "company": "Spears, Robert M Esq", + "address": "1700 Street Rd", + "city": "Warrington", + "county": "Bucks", + "state": "PA", + "zip": "18976", + "phone": "215-548-0842", + "fax": "215-548-4706", + "email": "billie@cowley.com", + "web": "http://www.billiecowley.com", + "followers": 2416 + }, + { + "firstname": "Jacinto", + "lastname": "Gawron", + "company": "Matt Kokkonen Insurance Agency", + "address": "1740 House", + "city": "Lumberville", + "county": "Bucks", + "state": "PA", + "zip": "18933", + "phone": "215-297-0120", + "fax": "215-297-5442", + "email": "jacinto@gawron.com", + "web": "http://www.jacintogawron.com", + "followers": 310 + }, + { + "firstname": "Randall", + "lastname": "Kluemper", + "company": "Lifestyles Organization", + "address": "Rt 16", + "city": "North Conway", + "county": "Carroll", + "state": "NH", + "zip": "03860", + "phone": "603-356-3217", + "fax": "603-356-6174", + "email": "randall@kluemper.com", + "web": "http://www.randallkluemper.com", + "followers": 5669 + }, + { + "firstname": "Enrique", + "lastname": "Oroark", + "company": "Callaghan, Kathleen M Esq", + "address": "34 W 17th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10011", + "phone": "212-366-5568", + "fax": "212-366-6877", + "email": "enrique@oroark.com", + "web": "http://www.enriqueoroark.com", + "followers": 3911 + }, + { + "firstname": "Alva", + "lastname": "Pennigton", + "company": "Citizens Savings Bank", + "address": "1275 County Road 210 W", + "city": "Jacksonville", + "county": "Saint Johns", + "state": "FL", + "zip": "32259", + "phone": "904-260-2345", + "fax": "904-260-3735", + "email": "alva@pennigton.com", + "web": "http://www.alvapennigton.com", + "followers": 7564 + }, + { + "firstname": "Socorro", + "lastname": "Balandran", + "company": "Mooring", + "address": "401 S Main St", + "city": "Greensburg", + "county": "Westmoreland", + "state": "PA", + "zip": "15601", + "phone": "724-834-6908", + "fax": "724-834-8831", + "email": "socorro@balandran.com", + "web": "http://www.socorrobalandran.com", + "followers": 7056 + }, + { + "firstname": "Nadia", + "lastname": "Wilshire", + "company": "Midwest Undercar Distributors", + "address": "1801 West Ave S", + "city": "La Crosse", + "county": "La Crosse", + "state": "WI", + "zip": "54601", + "phone": "608-788-4965", + "fax": "608-788-5946", + "email": "nadia@wilshire.com", + "web": "http://www.nadiawilshire.com", + "followers": 9311 + }, + { + "firstname": "Reginald", + "lastname": "Humes", + "company": "Cowley & Chidester", + "address": "44 N Main St", + "city": "Wolfeboro", + "county": "Carroll", + "state": "NH", + "zip": "03894", + "phone": "603-569-7730", + "fax": "603-569-8142", + "email": "reginald@humes.com", + "web": "http://www.reginaldhumes.com", + "followers": 8347 + }, + { + "firstname": "Lynda", + "lastname": "Caraway", + "company": "Lowe Art Museum", + "address": "1822 Spring Garden St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19130", + "phone": "215-564-3171", + "fax": "215-564-2241", + "email": "lynda@caraway.com", + "web": "http://www.lyndacaraway.com", + "followers": 3853 + }, + { + "firstname": "Saundra", + "lastname": "Mcaulay", + "company": "Rcf Inc", + "address": "2401 Cleveland Rd W", + "city": "Huron", + "county": "Erie", + "state": "OH", + "zip": "44839", + "phone": "419-433-5558", + "fax": "419-433-9756", + "email": "saundra@mcaulay.com", + "web": "http://www.saundramcaulay.com", + "followers": 1620 + }, + { + "firstname": "Allan", + "lastname": "Schwantd", + "company": "Micro Wire Products", + "address": "406 Ne 3rd St", + "city": "McMinnville", + "county": "Yamhill", + "state": "OR", + "zip": "97128", + "phone": "503-434-9666", + "fax": "503-434-3863", + "email": "allan@schwantd.com", + "web": "http://www.allanschwantd.com", + "followers": 6069 + }, + { + "firstname": "Wilmer", + "lastname": "Constantineau", + "company": "Nutra Source", + "address": "1745 W 18th Ave", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97402", + "phone": "541-345-4729", + "fax": "541-345-4884", + "email": "wilmer@constantineau.com", + "web": "http://www.wilmerconstantineau.com", + "followers": 1648 + }, + { + "firstname": "Savannah", + "lastname": "Kesich", + "company": "Wbnd Am", + "address": "221 Main", + "city": "Park City", + "county": "Summit", + "state": "UT", + "zip": "84060", + "phone": "435-645-0986", + "fax": "435-645-9504", + "email": "savannah@kesich.com", + "web": "http://www.savannahkesich.com", + "followers": 7325 + }, + { + "firstname": "Dwain", + "lastname": "Cuttitta", + "company": "Kintech Stamping Inc", + "address": "1919 Connecticut Ave Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20009", + "phone": "202-265-7854", + "fax": "202-265-9475", + "email": "dwain@cuttitta.com", + "web": "http://www.dwaincuttitta.com", + "followers": 8300 + }, + { + "firstname": "Krystle", + "lastname": "Stika", + "company": "Signature Inn", + "address": "3730 Fm", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77068", + "phone": "281-537-5324", + "fax": "281-537-3235", + "email": "krystle@stika.com", + "web": "http://www.krystlestika.com", + "followers": 2603 + }, + { + "firstname": "Felipe", + "lastname": "Gould", + "company": "Black, Ronald H", + "address": "2308 Bienville Blvd", + "city": "Ocean Springs", + "county": "Jackson", + "state": "MS", + "zip": "39564", + "phone": "228-875-2811", + "fax": "228-875-6402", + "email": "felipe@gould.com", + "web": "http://www.felipegould.com", + "followers": 9237 + }, + { + "firstname": "Steve", + "lastname": "Schorr", + "company": "Midwest Fire Protection Inc", + "address": "1810 N King St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-842-7045", + "fax": "808-842-7338", + "email": "steve@schorr.com", + "web": "http://www.steveschorr.com", + "followers": 1468 + }, + { + "firstname": "Naomi", + "lastname": "Caetano", + "company": "Bashlin Industries Inc", + "address": "50 Spring St #-1", + "city": "Cresskill", + "county": "Bergen", + "state": "NJ", + "zip": "07626", + "phone": "201-569-3572", + "fax": "201-569-5795", + "email": "naomi@caetano.com", + "web": "http://www.naomicaetano.com", + "followers": 1743 + }, + { + "firstname": "Melody", + "lastname": "Saddat", + "company": "Richards, Edward W Esq", + "address": "3540 S 84th St", + "city": "Omaha", + "county": "Douglas", + "state": "NE", + "zip": "68124", + "phone": "402-397-0581", + "fax": "402-397-8391", + "email": "melody@saddat.com", + "web": "http://www.melodysaddat.com", + "followers": 2442 + }, + { + "firstname": "Mitchel", + "lastname": "Harnar", + "company": "Copycat Quick Print", + "address": "1810 Pioneer Ave", + "city": "Cheyenne", + "county": "Laramie", + "state": "WY", + "zip": "82001", + "phone": "307-632-0256", + "fax": "307-632-2516", + "email": "mitchel@harnar.com", + "web": "http://www.mitchelharnar.com", + "followers": 4662 + }, + { + "firstname": "Sharlene", + "lastname": "Circelli", + "company": "Calibron Systems Inc", + "address": "4018 W Clearwater Ave", + "city": "Kennewick", + "county": "Benton", + "state": "WA", + "zip": "99336", + "phone": "509-783-5167", + "fax": "509-783-7346", + "email": "sharlene@circelli.com", + "web": "http://www.sharlenecircelli.com", + "followers": 6539 + }, + { + "firstname": "Sean", + "lastname": "Bonnet", + "company": "Corporate Alternatives Inc", + "address": "3043 Ridge Rd", + "city": "Lansing", + "county": "Cook", + "state": "IL", + "zip": "60438", + "phone": "708-474-4766", + "fax": "708-474-0011", + "email": "sean@bonnet.com", + "web": "http://www.seanbonnet.com", + "followers": 867 + }, + { + "firstname": "Travis", + "lastname": "Brockert", + "company": "Santa Cruz Title Co", + "address": "7828 N 19th Ave", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85021", + "phone": "602-995-1362", + "fax": "602-995-0966", + "email": "travis@brockert.com", + "web": "http://www.travisbrockert.com", + "followers": 7421 + }, + { + "firstname": "Candice", + "lastname": "Bruckman", + "company": "Fernando Foods Inc", + "address": "611 1st Ave N", + "city": "Humboldt", + "county": "Humboldt", + "state": "IA", + "zip": "50548", + "phone": "515-332-0809", + "fax": "515-332-9083", + "email": "candice@bruckman.com", + "web": "http://www.candicebruckman.com", + "followers": 7084 + }, + { + "firstname": "Mabel", + "lastname": "Weeden", + "company": "Pepsi Cola Gen Bottlers Inc", + "address": "300 E Phillips St", + "city": "Richardson", + "county": "Dallas", + "state": "TX", + "zip": "75081", + "phone": "972-235-5619", + "fax": "972-235-1843", + "email": "mabel@weeden.com", + "web": "http://www.mabelweeden.com", + "followers": 2674 + }, + { + "firstname": "Armando", + "lastname": "Papik", + "company": "Cryogenic Society Of America", + "address": "615 W Markham St", + "city": "Little Rock", + "county": "Pulaski", + "state": "AR", + "zip": "72201", + "phone": "501-376-4154", + "fax": "501-376-0608", + "email": "armando@papik.com", + "web": "http://www.armandopapik.com", + "followers": 7152 + }, + { + "firstname": "Kevin", + "lastname": "Edd", + "company": "Peebles, William J Esq", + "address": "64 Dyerville Ave", + "city": "Johnston", + "county": "Providence", + "state": "RI", + "zip": "02919", + "phone": "401-453-8514", + "fax": "401-453-7085", + "email": "kevin@edd.com", + "web": "http://www.kevinedd.com", + "followers": 3568 + }, + { + "firstname": "Raphael", + "lastname": "Bickel", + "company": "S Shamash & Sons Inc", + "address": "550 N Brand Blvd #-800", + "city": "Glendale", + "county": "Los Angeles", + "state": "CA", + "zip": "91203", + "phone": "818-246-1195", + "fax": "818-246-4734", + "email": "raphael@bickel.com", + "web": "http://www.raphaelbickel.com", + "followers": 1365 + }, + { + "firstname": "Darren", + "lastname": "Merlin", + "company": "Pozzuolo & Perkiss Pc", + "address": "550 N Edward St", + "city": "Decatur", + "county": "Macon", + "state": "IL", + "zip": "62522", + "phone": "217-428-0453", + "fax": "217-428-1491", + "email": "darren@merlin.com", + "web": "http://www.darrenmerlin.com", + "followers": 7653 + }, + { + "firstname": "Francis", + "lastname": "Soo", + "company": "Allen Industrial Supply", + "address": "218 W Main St", + "city": "Sparta", + "county": "Monroe", + "state": "WI", + "zip": "54656", + "phone": "608-269-7306", + "fax": "608-269-3359", + "email": "francis@soo.com", + "web": "http://www.francissoo.com", + "followers": 2482 + }, + { + "firstname": "Nelly", + "lastname": "Jakuboski", + "company": "Hammerman, Stanley M Esq", + "address": "103 Main St", + "city": "Ridgefield", + "county": "Fairfield", + "state": "CT", + "zip": "06877", + "phone": "203-438-9250", + "fax": "203-438-5109", + "email": "nelly@jakuboski.com", + "web": "http://www.nellyjakuboski.com", + "followers": 5338 + }, + { + "firstname": "Mitzi", + "lastname": "Ihenyen", + "company": "Helm, Norman O", + "address": "979 3rd Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-838-8303", + "fax": "212-838-3221", + "email": "mitzi@ihenyen.com", + "web": "http://www.mitziihenyen.com", + "followers": 9264 + }, + { + "firstname": "Kathleen", + "lastname": "Beresnyak", + "company": "R & E Associates", + "address": "100 W 25th Ave", + "city": "San Mateo", + "county": "San Mateo", + "state": "CA", + "zip": "94403", + "phone": "650-349-6809", + "fax": "650-349-5962", + "email": "kathleen@beresnyak.com", + "web": "http://www.kathleenberesnyak.com", + "followers": 2853 + }, + { + "firstname": "Adela", + "lastname": "Cervantsz", + "company": "Arizona Awards", + "address": "102 5th St N", + "city": "Clanton", + "county": "Chilton", + "state": "AL", + "zip": "35045", + "phone": "205-755-4137", + "fax": "205-755-1034", + "email": "adela@cervantsz.com", + "web": "http://www.adelacervantsz.com", + "followers": 9876 + }, + { + "firstname": "Randal", + "lastname": "Gansen", + "company": "Quik Print", + "address": "1 First Federal Plz", + "city": "Rochester", + "county": "Monroe", + "state": "NY", + "zip": "14614", + "phone": "585-238-8558", + "fax": "585-238-7764", + "email": "randal@gansen.com", + "web": "http://www.randalgansen.com", + "followers": 4019 + }, + { + "firstname": "Alyssa", + "lastname": "Biasotti", + "company": "Johnson Hardware Co", + "address": "22 James St", + "city": "Middletown", + "county": "Orange", + "state": "NY", + "zip": "10940", + "phone": "845-343-1878", + "fax": "845-343-5354", + "email": "alyssa@biasotti.com", + "web": "http://www.alyssabiasotti.com", + "followers": 3684 + }, + { + "firstname": "Janet", + "lastname": "Schaffter", + "company": "Hall, Camden M Esq", + "address": "131 Rimbach St", + "city": "Hammond", + "county": "Lake", + "state": "IN", + "zip": "46320", + "phone": "219-853-9283", + "fax": "219-853-9329", + "email": "janet@schaffter.com", + "web": "http://www.janetschaffter.com", + "followers": 2431 + }, + { + "firstname": "Armando", + "lastname": "Kolm", + "company": "Cooper & Cooper Cpas", + "address": "201 N Main St", + "city": "Anderson", + "county": "Anderson", + "state": "SC", + "zip": "29621", + "phone": "864-260-3642", + "fax": "864-260-9205", + "email": "armando@kolm.com", + "web": "http://www.armandokolm.com", + "followers": 4357 + }, + { + "firstname": "Gil", + "lastname": "Scarpa", + "company": "Hughes, James D Esq", + "address": "12 E Broad St", + "city": "Hazleton", + "county": "Luzerne", + "state": "PA", + "zip": "18201", + "phone": "570-459-9281", + "fax": "570-459-5191", + "email": "gil@scarpa.com", + "web": "http://www.gilscarpa.com", + "followers": 7691 + }, + { + "firstname": "Vanessa", + "lastname": "Lewallen", + "company": "Fargo Glass & Paint Co", + "address": "5 E Main", + "city": "Centerburg", + "county": "Knox", + "state": "OH", + "zip": "43011", + "phone": "740-625-8098", + "fax": "740-625-1696", + "email": "vanessa@lewallen.com", + "web": "http://www.vanessalewallen.com", + "followers": 2710 + }, + { + "firstname": "Burton", + "lastname": "Brining", + "company": "Corcoran Machine Company", + "address": "135 E Liberty St", + "city": "Wooster", + "county": "Wayne", + "state": "OH", + "zip": "44691", + "phone": "330-262-5481", + "fax": "330-262-7555", + "email": "burton@brining.com", + "web": "http://www.burtonbrining.com", + "followers": 8158 + }, + { + "firstname": "Rosalie", + "lastname": "Krigger", + "company": "Aaron, William Esq", + "address": "330 Route 211 E", + "city": "Middletown", + "county": "Orange", + "state": "NY", + "zip": "10940", + "phone": "845-343-2313", + "fax": "845-343-2979", + "email": "rosalie@krigger.com", + "web": "http://www.rosaliekrigger.com", + "followers": 1411 + }, + { + "firstname": "Tammie", + "lastname": "Schwartzwalde", + "company": "Cox, Thomas E", + "address": "415 Center St", + "city": "Ironton", + "county": "Lawrence", + "state": "OH", + "zip": "45638", + "phone": "740-532-5488", + "fax": "740-532-0319", + "email": "tammie@schwartzwalde.com", + "web": "http://www.tammieschwartzwalde.com", + "followers": 1367 + }, + { + "firstname": "Darrin", + "lastname": "Neiss", + "company": "Delaney, James J Jr", + "address": "101 W Central Blvd", + "city": "Kewanee", + "county": "Henry", + "state": "IL", + "zip": "61443", + "phone": "309-852-5127", + "fax": "309-852-8638", + "email": "darrin@neiss.com", + "web": "http://www.darrinneiss.com", + "followers": 5748 + }, + { + "firstname": "Rosalia", + "lastname": "Kennemur", + "company": "Reagan, Thomas J Esq", + "address": "222 S 10th St", + "city": "Oakdale", + "county": "Allen", + "state": "LA", + "zip": "71463", + "phone": "318-335-5586", + "fax": "318-335-1873", + "email": "rosalia@kennemur.com", + "web": "http://www.rosaliakennemur.com", + "followers": 5984 + }, + { + "firstname": "Callie", + "lastname": "Leboeuf", + "company": "Town Motors", + "address": "100 S 2nd Ave", + "city": "Alpena", + "county": "Alpena", + "state": "MI", + "zip": "49707", + "phone": "989-354-3344", + "fax": "989-354-3712", + "email": "callie@leboeuf.com", + "web": "http://www.callieleboeuf.com", + "followers": 3607 + }, + { + "firstname": "Patty", + "lastname": "Bernasconi", + "company": "Porter Wright Morris & Arthur", + "address": "851 Fort Street Mall", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96813", + "phone": "808-531-2621", + "fax": "808-531-6234", + "email": "patty@bernasconi.com", + "web": "http://www.pattybernasconi.com", + "followers": 3012 + }, + { + "firstname": "Elmo", + "lastname": "Gabouer", + "company": "Conduit & Foundation Corp", + "address": "275 W Bridge St", + "city": "New Hope", + "county": "Bucks", + "state": "PA", + "zip": "18938", + "phone": "215-862-6538", + "fax": "215-862-7006", + "email": "elmo@gabouer.com", + "web": "http://www.elmogabouer.com", + "followers": 9593 + }, + { + "firstname": "Logan", + "lastname": "Muhl", + "company": "Brown, Phillip C Esq", + "address": "126 S Bellevue Ave", + "city": "Langhorne", + "county": "Bucks", + "state": "PA", + "zip": "19047", + "phone": "215-757-6124", + "fax": "215-757-2785", + "email": "logan@muhl.com", + "web": "http://www.loganmuhl.com", + "followers": 741 + }, + { + "firstname": "Vivian", + "lastname": "Brzostowski", + "company": "Savage, Philip M Iii", + "address": "118 Mill St", + "city": "Bristol", + "county": "Bucks", + "state": "PA", + "zip": "19007", + "phone": "215-788-2791", + "fax": "215-788-3902", + "email": "vivian@brzostowski.com", + "web": "http://www.vivianbrzostowski.com", + "followers": 1121 + }, + { + "firstname": "Efren", + "lastname": "Baucher", + "company": "R O Binson Inc", + "address": "Rts 232 & 413", + "city": "Newtown", + "county": "Bucks", + "state": "PA", + "zip": "18940", + "phone": "215-598-4644", + "fax": "215-598-5929", + "email": "efren@baucher.com", + "web": "http://www.efrenbaucher.com", + "followers": 8199 + }, + { + "firstname": "Kurtis", + "lastname": "Mcbay", + "company": "P C Enterprises Ltd", + "address": "737 Levittown Ctr", + "city": "Levittown", + "county": "Bucks", + "state": "PA", + "zip": "19055", + "phone": "215-946-6048", + "fax": "215-946-6458", + "email": "kurtis@mcbay.com", + "web": "http://www.kurtismcbay.com", + "followers": 8315 + }, + { + "firstname": "Guillermo", + "lastname": "Tsang", + "company": "Gillis, Donald W Esq", + "address": "16 Highland Park Way", + "city": "Levittown", + "county": "Bucks", + "state": "PA", + "zip": "19056", + "phone": "215-949-7912", + "fax": "215-949-8919", + "email": "guillermo@tsang.com", + "web": "http://www.guillermotsang.com", + "followers": 4007 + }, + { + "firstname": "Milton", + "lastname": "Kuhlman", + "company": "Imag Corp", + "address": "237 Jackson St Sw", + "city": "Camden", + "county": "Ouachita", + "state": "AR", + "zip": "71701", + "phone": "870-836-9021", + "fax": "870-836-2283", + "email": "milton@kuhlman.com", + "web": "http://www.miltonkuhlman.com", + "followers": 7034 + }, + { + "firstname": "Naomi", + "lastname": "Greenly", + "company": "Kpmg Peat Marwick Llp", + "address": "1400 Gault Ave N", + "city": "Fort Payne", + "county": "De Kalb", + "state": "AL", + "zip": "35967", + "phone": "256-845-1216", + "fax": "256-845-2469", + "email": "naomi@greenly.com", + "web": "http://www.naomigreenly.com", + "followers": 916 + }, + { + "firstname": "Mary", + "lastname": "Maurizio", + "company": "Carey Filter White & Boland", + "address": "404 Main St", + "city": "Delta", + "county": "Fulton", + "state": "OH", + "zip": "43515", + "phone": "419-822-7176", + "fax": "419-822-0591", + "email": "mary@maurizio.com", + "web": "http://www.marymaurizio.com", + "followers": 6083 + }, + { + "firstname": "Caitlin", + "lastname": "Reiniger", + "company": "White, Lawrence R Esq", + "address": "140 N Columbus St", + "city": "Galion", + "county": "Crawford", + "state": "OH", + "zip": "44833", + "phone": "419-468-6910", + "fax": "419-468-9018", + "email": "caitlin@reiniger.com", + "web": "http://www.caitlinreiniger.com", + "followers": 641 + }, + { + "firstname": "Coleman", + "lastname": "Cuneo", + "company": "M & M Mars", + "address": "25 E High St", + "city": "Waynesburg", + "county": "Greene", + "state": "PA", + "zip": "15370", + "phone": "724-627-4378", + "fax": "724-627-2305", + "email": "coleman@cuneo.com", + "web": "http://www.colemancuneo.com", + "followers": 8657 + }, + { + "firstname": "Rachel", + "lastname": "Larrison", + "company": "Ipa The Editing House", + "address": "3721 Oberlin Ave", + "city": "Lorain", + "county": "Lorain", + "state": "OH", + "zip": "44053", + "phone": "440-282-3729", + "fax": "440-282-6918", + "email": "rachel@larrison.com", + "web": "http://www.rachellarrison.com", + "followers": 4562 + }, + { + "firstname": "Dwayne", + "lastname": "Maddalena", + "company": "Ebbeson, James O Esq", + "address": "532 Court St", + "city": "Pekin", + "county": "Tazewell", + "state": "IL", + "zip": "61554", + "phone": "309-347-1137", + "fax": "309-347-9282", + "email": "dwayne@maddalena.com", + "web": "http://www.dwaynemaddalena.com", + "followers": 7384 + }, + { + "firstname": "Angelique", + "lastname": "Schermerhorn", + "company": "Safety Direct Inc", + "address": "511 Saint Johns Ave", + "city": "Palatka", + "county": "Putnam", + "state": "FL", + "zip": "32177", + "phone": "386-328-7869", + "fax": "386-328-1499", + "email": "angelique@schermerhorn.com", + "web": "http://www.angeliqueschermerhorn.com", + "followers": 6181 + }, + { + "firstname": "Junior", + "lastname": "Wadlinger", + "company": "Sonos Music", + "address": "185 E Market St", + "city": "Warren", + "county": "Trumbull", + "state": "OH", + "zip": "44481", + "phone": "330-393-9794", + "fax": "330-393-6808", + "email": "junior@wadlinger.com", + "web": "http://www.juniorwadlinger.com", + "followers": 7690 + }, + { + "firstname": "Darrel", + "lastname": "Tork", + "company": "S & T Machining", + "address": "2121 S Mannheim Rd", + "city": "Westchester", + "county": "Cook", + "state": "IL", + "zip": "60154", + "phone": "708-865-8091", + "fax": "708-865-8984", + "email": "darrel@tork.com", + "web": "http://www.darreltork.com", + "followers": 9708 + }, + { + "firstname": "Lana", + "lastname": "Garrigus", + "company": "Russell Builders & Hardware", + "address": "118 Ne 3rd St", + "city": "McMinnville", + "county": "Yamhill", + "state": "OR", + "zip": "97128", + "phone": "503-434-2642", + "fax": "503-434-8121", + "email": "lana@garrigus.com", + "web": "http://www.lanagarrigus.com", + "followers": 3048 + }, + { + "firstname": "Jonathon", + "lastname": "Waldall", + "company": "Mission Hills Escrow", + "address": "300 Hampton St", + "city": "Walterboro", + "county": "Colleton", + "state": "SC", + "zip": "29488", + "phone": "843-549-9461", + "fax": "843-549-0125", + "email": "jonathon@waldall.com", + "web": "http://www.jonathonwaldall.com", + "followers": 8039 + }, + { + "firstname": "Kristine", + "lastname": "Paker", + "company": "Chagrin Valley Massotherapy", + "address": "301 N Pine St", + "city": "Creston", + "county": "Union", + "state": "IA", + "zip": "50801", + "phone": "641-782-7169", + "fax": "641-782-7962", + "email": "kristine@paker.com", + "web": "http://www.kristinepaker.com", + "followers": 7977 + }, + { + "firstname": "Dwain", + "lastname": "Agricola", + "company": "Beatty Satchell Everngam & Co", + "address": "211 N Main St", + "city": "Leitchfield", + "county": "Grayson", + "state": "KY", + "zip": "42754", + "phone": "270-259-5194", + "fax": "270-259-0821", + "email": "dwain@agricola.com", + "web": "http://www.dwainagricola.com", + "followers": 8410 + }, + { + "firstname": "Jewel", + "lastname": "Agresta", + "company": "Md Assn Cert Pub Accts Inc", + "address": "4565 Harrison St", + "city": "Hillside", + "county": "Cook", + "state": "IL", + "zip": "60162", + "phone": "708-449-7139", + "fax": "708-449-2963", + "email": "jewel@agresta.com", + "web": "http://www.jewelagresta.com", + "followers": 293 + }, + { + "firstname": "Georgette", + "lastname": "Bandyk", + "company": "Specialty Alumn Castings Inc", + "address": "1965 Wakefield Ave", + "city": "Petersburg", + "county": "Petersburg City", + "state": "VA", + "zip": "23805", + "phone": "804-796-2746", + "fax": "804-796-5351", + "email": "georgette@bandyk.com", + "web": "http://www.georgettebandyk.com", + "followers": 9865 + }, + { + "firstname": "Geri", + "lastname": "Forness", + "company": "Quality Dynamics Group", + "address": "Capitol Ave", + "city": "Corydon", + "county": "Harrison", + "state": "IN", + "zip": "47112", + "phone": "812-738-9416", + "fax": "812-738-4816", + "email": "geri@forness.com", + "web": "http://www.geriforness.com", + "followers": 7788 + }, + { + "firstname": "Modesto", + "lastname": "Scroggie", + "company": "Bulloch, Bruce Cpa", + "address": "300 Orlando Dr", + "city": "Raritan", + "county": "Somerset", + "state": "NJ", + "zip": "08869", + "phone": "908-980-5621", + "fax": "908-980-9842", + "email": "modesto@scroggie.com", + "web": "http://www.modestoscroggie.com", + "followers": 5671 + }, + { + "firstname": "Curt", + "lastname": "Menedez", + "company": "J & J Machinery Repair Inc", + "address": "207 Yukon", + "city": "Tampa", + "county": "Hillsborough", + "state": "FL", + "zip": "33604", + "phone": "813-932-8602", + "fax": "813-932-4548", + "email": "curt@menedez.com", + "web": "http://www.curtmenedez.com", + "followers": 1311 + }, + { + "firstname": "Karen", + "lastname": "Zombo", + "company": "Healthcare Family Credit Union", + "address": "3112 W Kennedy Blvd", + "city": "Tampa", + "county": "Hillsborough", + "state": "FL", + "zip": "33609", + "phone": "813-872-4288", + "fax": "813-872-8262", + "email": "karen@zombo.com", + "web": "http://www.karenzombo.com", + "followers": 2543 + }, + { + "firstname": "Lora", + "lastname": "Lendor", + "company": "Advanced Electromagnetics Inc", + "address": "7 W Darlington Ave", + "city": "Kissimmee", + "county": "Osceola", + "state": "FL", + "zip": "34741", + "phone": "407-870-0382", + "fax": "407-870-6229", + "email": "lora@lendor.com", + "web": "http://www.loralendor.com", + "followers": 5947 + }, + { + "firstname": "Felipe", + "lastname": "Mahone", + "company": "Apartment Mart", + "address": "1001 Bishop St #-2850", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96813", + "phone": "808-536-3239", + "fax": "808-536-1231", + "email": "felipe@mahone.com", + "web": "http://www.felipemahone.com", + "followers": 4427 + }, + { + "firstname": "Rosalyn", + "lastname": "Daulton", + "company": "Rodgard Corp", + "address": "300 Broadacres Dr", + "city": "Bloomfield", + "county": "Essex", + "state": "NJ", + "zip": "07003", + "phone": "973-338-8552", + "fax": "973-338-1603", + "email": "rosalyn@daulton.com", + "web": "http://www.rosalyndaulton.com", + "followers": 2667 + }, + { + "firstname": "Marquita", + "lastname": "Bousman", + "company": "Constantine, Katherine A Esq", + "address": "30 Highland Ave", + "city": "Warwick", + "county": "Orange", + "state": "NY", + "zip": "10990", + "phone": "845-986-0909", + "fax": "845-986-2447", + "email": "marquita@bousman.com", + "web": "http://www.marquitabousman.com", + "followers": 4315 + }, + { + "firstname": "Carla", + "lastname": "Sirbaugh", + "company": "Urso, Natale L Esq", + "address": "110 S La Brea Ave #-22", + "city": "Inglewood", + "county": "Los Angeles", + "state": "CA", + "zip": "90301", + "phone": "310-412-6653", + "fax": "310-412-1067", + "email": "carla@sirbaugh.com", + "web": "http://www.carlasirbaugh.com", + "followers": 9701 + }, + { + "firstname": "Wes", + "lastname": "Fontanella", + "company": "Woodside Travel Trust", + "address": "1369 W Redondo Beach Blvd", + "city": "Gardena", + "county": "Los Angeles", + "state": "CA", + "zip": "90247", + "phone": "310-515-3065", + "fax": "310-515-2515", + "email": "wes@fontanella.com", + "web": "http://www.wesfontanella.com", + "followers": 1717 + }, + { + "firstname": "Meredith", + "lastname": "Ivrin", + "company": "Hamilton Financial Corp", + "address": "323 N Gilbert St", + "city": "Danville", + "county": "Vermilion", + "state": "IL", + "zip": "61832", + "phone": "217-446-7172", + "fax": "217-446-2369", + "email": "meredith@ivrin.com", + "web": "http://www.meredithivrin.com", + "followers": 7827 + }, + { + "firstname": "Laurie", + "lastname": "Bigg", + "company": "Essc Inc", + "address": "14500 Lakeside Cir", + "city": "Sterling Heights", + "county": "Macomb", + "state": "MI", + "zip": "48313", + "phone": "586-247-6171", + "fax": "586-247-9791", + "email": "laurie@bigg.com", + "web": "http://www.lauriebigg.com", + "followers": 8684 + }, + { + "firstname": "Barton", + "lastname": "Friesner", + "company": "Optical Supply", + "address": "1 Summit Ct", + "city": "Fishkill", + "county": "Dutchess", + "state": "NY", + "zip": "12524", + "phone": "845-896-6652", + "fax": "845-896-1692", + "email": "barton@friesner.com", + "web": "http://www.bartonfriesner.com", + "followers": 4889 + }, + { + "firstname": "Sophie", + "lastname": "Langner", + "company": "Kapetanakis, Alexander Esq", + "address": "535 Ward Ave #-204", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96814", + "phone": "808-545-7695", + "fax": "808-545-8636", + "email": "sophie@langner.com", + "web": "http://www.sophielangner.com", + "followers": 1596 + }, + { + "firstname": "Garfield", + "lastname": "Lijewski", + "company": "Denker, Aaron Esq", + "address": "6401 N Lincoln Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60645", + "phone": "773-976-3827", + "fax": "773-976-5586", + "email": "garfield@lijewski.com", + "web": "http://www.garfieldlijewski.com", + "followers": 5955 + }, + { + "firstname": "Warren", + "lastname": "Speach", + "company": "E Norwalk Crmc Tile & Mrbl Co", + "address": "361 Park Ave", + "city": "Scotch Plains", + "county": "Union", + "state": "NJ", + "zip": "07076", + "phone": "908-322-3846", + "fax": "908-322-6744", + "email": "warren@speach.com", + "web": "http://www.warrenspeach.com", + "followers": 6741 + }, + { + "firstname": "Madonna", + "lastname": "Cosby", + "company": "Emanuel Reider Architects Inc", + "address": "135 Main St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94105", + "phone": "415-956-4437", + "fax": "415-956-5134", + "email": "madonna@cosby.com", + "web": "http://www.madonnacosby.com", + "followers": 3985 + }, + { + "firstname": "Valeria", + "lastname": "Lingbeek", + "company": "Recreation Director", + "address": "state", + "city": "Newtown", + "county": "Bucks", + "state": "PA", + "zip": "18940", + "phone": "215-968-8421", + "fax": "215-968-1567", + "email": "valeria@lingbeek.com", + "web": "http://www.valerialingbeek.com", + "followers": 8824 + }, + { + "firstname": "Heath", + "lastname": "Vanalphen", + "company": "California Stat Min & Mnrl Mus", + "address": "227 Commercial St", + "city": "Provincetown", + "county": "Barnstable", + "state": "MA", + "zip": "02657", + "phone": "508-487-6010", + "fax": "508-487-0597", + "email": "heath@vanalphen.com", + "web": "http://www.heathvanalphen.com", + "followers": 6846 + }, + { + "firstname": "Marisa", + "lastname": "Woldridge", + "company": "Wegner, Tim Esq", + "address": "153 Baltimore St", + "city": "Cumberland", + "county": "Allegany", + "state": "MD", + "zip": "21502", + "phone": "301-759-7421", + "fax": "301-759-9676", + "email": "marisa@woldridge.com", + "web": "http://www.marisawoldridge.com", + "followers": 6009 + }, + { + "firstname": "Rene", + "lastname": "Dummermuth", + "company": "Super 8 Motel", + "address": "2 Ridgedale Ave", + "city": "Cedar Knolls", + "county": "Morris", + "state": "NJ", + "zip": "07927", + "phone": "973-292-7918", + "fax": "973-292-5898", + "email": "rene@dummermuth.com", + "web": "http://www.renedummermuth.com", + "followers": 1687 + }, + { + "firstname": "Helga", + "lastname": "Windle", + "company": "Loew, Andrea H Esq", + "address": "99185 Moanalua Rd #-101", + "city": "Aiea", + "county": "Honolulu", + "state": "HI", + "zip": "96701", + "phone": "808-487-7779", + "fax": "808-487-6258", + "email": "helga@windle.com", + "web": "http://www.helgawindle.com", + "followers": 56 + }, + { + "firstname": "Margot", + "lastname": "Arenburg", + "company": "Mcivor, Carolyn Md", + "address": "736 N Mills Ave", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32803", + "phone": "407-896-1593", + "fax": "407-896-6679", + "email": "margot@arenburg.com", + "web": "http://www.margotarenburg.com", + "followers": 7445 + }, + { + "firstname": "Sheila", + "lastname": "Holloran", + "company": "Warehouse On Wheels", + "address": "126 S Main St", + "city": "Clyde", + "county": "Sandusky", + "state": "OH", + "zip": "43410", + "phone": "419-547-9428", + "fax": "419-547-4835", + "email": "sheila@holloran.com", + "web": "http://www.sheilaholloran.com", + "followers": 9682 + }, + { + "firstname": "Melinda", + "lastname": "Carleton", + "company": "Cenol Co", + "address": "395 Revilo Ave", + "city": "Shirley", + "county": "Suffolk", + "state": "NY", + "zip": "11967", + "phone": "631-399-1636", + "fax": "631-399-6025", + "email": "melinda@carleton.com", + "web": "http://www.melindacarleton.com", + "followers": 7154 + }, + { + "firstname": "Ike", + "lastname": "Zeolla", + "company": "Halpin, Irene A Esq", + "address": "1900 L St Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20036", + "phone": "202-331-1409", + "fax": "202-331-7781", + "email": "ike@zeolla.com", + "web": "http://www.ikezeolla.com", + "followers": 7418 + }, + { + "firstname": "Elmo", + "lastname": "Dagenais", + "company": "P C Routing Inc", + "address": "12914 Old Stage Rd", + "city": "Chester", + "county": "Chesterfield", + "state": "VA", + "zip": "23831", + "phone": "804-796-5647", + "fax": "804-796-9493", + "email": "elmo@dagenais.com", + "web": "http://www.elmodagenais.com", + "followers": 7355 + }, + { + "firstname": "Valentine", + "lastname": "Granberry", + "company": "Sunnyvale Travel", + "address": "1019 Shadick Dr", + "city": "Orange City", + "county": "Volusia", + "state": "FL", + "zip": "32763", + "phone": "407-775-4269", + "fax": "407-775-0598", + "email": "valentine@granberry.com", + "web": "http://www.valentinegranberry.com", + "followers": 7021 + }, + { + "firstname": "Waldo", + "lastname": "Sisk", + "company": "Muller Drugs Inc", + "address": "2211 Us Highway 19", + "city": "Holiday", + "county": "Pasco", + "state": "FL", + "zip": "34691", + "phone": "727-934-3827", + "fax": "727-934-7181", + "email": "waldo@sisk.com", + "web": "http://www.waldosisk.com", + "followers": 2109 + }, + { + "firstname": "Robt", + "lastname": "Braithwaite", + "company": "Meyer, Janet Md", + "address": "320 W Mclane St", + "city": "Osceola", + "county": "Clarke", + "state": "IA", + "zip": "50213", + "phone": "641-342-1276", + "fax": "641-342-6031", + "email": "robt@braithwaite.com", + "web": "http://www.robtbraithwaite.com", + "followers": 1336 + }, + { + "firstname": "Corinne", + "lastname": "Cowan", + "company": "Ward Equipment Co", + "address": "20 Montana Ave", + "city": "Laurel", + "county": "Yellowstone", + "state": "MT", + "zip": "59044", + "phone": "406-628-4030", + "fax": "406-628-9418", + "email": "corinne@cowan.com", + "web": "http://www.corinnecowan.com", + "followers": 7049 + }, + { + "firstname": "Rebeca", + "lastname": "Brumet", + "company": "Kingston Office Supplies Inc", + "address": "936 N Western Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60622", + "phone": "773-772-4015", + "fax": "773-772-1603", + "email": "rebeca@brumet.com", + "web": "http://www.rebecabrumet.com", + "followers": 202 + }, + { + "firstname": "Lynn", + "lastname": "Saulsberry", + "company": "Printing Factory Inc", + "address": "2725 W Mcdowell Rd", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85009", + "phone": "602-272-8326", + "fax": "602-272-3143", + "email": "lynn@saulsberry.com", + "web": "http://www.lynnsaulsberry.com", + "followers": 5265 + }, + { + "firstname": "Hannah", + "lastname": "Facio", + "company": "Cmptr Pros For Scl", + "address": "115 E Church St", + "city": "Elberton", + "county": "Elbert", + "state": "GA", + "zip": "30635", + "phone": "706-283-8280", + "fax": "706-283-6916", + "email": "hannah@facio.com", + "web": "http://www.hannahfacio.com", + "followers": 4321 + }, + { + "firstname": "Benjamin", + "lastname": "Schkade", + "company": "Port Brownsville Pub Scale Inc", + "address": "1636 E 1st Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-278-8687", + "fax": "907-278-7166", + "email": "benjamin@schkade.com", + "web": "http://www.benjaminschkade.com", + "followers": 5846 + }, + { + "firstname": "Athena", + "lastname": "Fontanilla", + "company": "Willamette Hobbies", + "address": "5020 Germantown Ave", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19144", + "phone": "215-438-9675", + "fax": "215-438-1716", + "email": "athena@fontanilla.com", + "web": "http://www.athenafontanilla.com", + "followers": 5342 + }, + { + "firstname": "Alene", + "lastname": "Rabeck", + "company": "Bucks County Of", + "address": "475 E 162nd St", + "city": "South Holland", + "county": "Cook", + "state": "IL", + "zip": "60473", + "phone": "708-333-8056", + "fax": "708-333-2125", + "email": "alene@rabeck.com", + "web": "http://www.alenerabeck.com", + "followers": 2815 + }, + { + "firstname": "Yvette", + "lastname": "Kokoska", + "company": "Automation Products Inc", + "address": "200 Valley Dr", + "city": "Brisbane", + "county": "San Mateo", + "state": "CA", + "zip": "94005", + "phone": "650-468-3592", + "fax": "650-468-7716", + "email": "yvette@kokoska.com", + "web": "http://www.yvettekokoska.com", + "followers": 6175 + }, + { + "firstname": "Petra", + "lastname": "Clemmens", + "company": "Belton Industries Inc", + "address": "980 N Federal Hwy", + "city": "Boca Raton", + "county": "Palm Beach", + "state": "FL", + "zip": "33432", + "phone": "561-394-2152", + "fax": "561-394-1574", + "email": "petra@clemmens.com", + "web": "http://www.petraclemmens.com", + "followers": 5263 + }, + { + "firstname": "Carmel", + "lastname": "Overfelt", + "company": "Woodworkers Supply Inc", + "address": "6801 Lake Worth Rd", + "city": "Lake Worth", + "county": "Palm Beach", + "state": "FL", + "zip": "33467", + "phone": "561-965-5167", + "fax": "561-965-1433", + "email": "carmel@overfelt.com", + "web": "http://www.carmeloverfelt.com", + "followers": 5868 + }, + { + "firstname": "Danette", + "lastname": "Fostervold", + "company": "Flach, Douglas Esq", + "address": "6920 Santa Teresa Blvd", + "city": "San Jose", + "county": "Santa Clara", + "state": "CA", + "zip": "95119", + "phone": "408-225-1319", + "fax": "408-225-5205", + "email": "danette@fostervold.com", + "web": "http://www.danettefostervold.com", + "followers": 1315 + }, + { + "firstname": "Vince", + "lastname": "Ettel", + "company": "Breen Trucking", + "address": "408 Main St", + "city": "Springfield", + "county": "Sarpy", + "state": "NE", + "zip": "68059", + "phone": "402-399-6999", + "fax": "402-399-6478", + "email": "vince@ettel.com", + "web": "http://www.vinceettel.com", + "followers": 7780 + }, + { + "firstname": "Davis", + "lastname": "Heideman", + "company": "Dennis J Wall Atty At Law Pa", + "address": "801 W 5th St", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76102", + "phone": "817-332-7902", + "fax": "817-332-5439", + "email": "davis@heideman.com", + "web": "http://www.davisheideman.com", + "followers": 4778 + }, + { + "firstname": "Bradly", + "lastname": "Hasselvander", + "company": "Public Works Department Office", + "address": "2302 Artesia Blvd", + "city": "Redondo Beach", + "county": "Los Angeles", + "state": "CA", + "zip": "90278", + "phone": "310-374-2374", + "fax": "310-374-2363", + "email": "bradly@hasselvander.com", + "web": "http://www.bradlyhasselvander.com", + "followers": 7831 + }, + { + "firstname": "Nathanial", + "lastname": "Phoenix", + "company": "Precision Steel Rule Die Co", + "address": "1000 Nw 105th St", + "city": "Oklahoma City", + "county": "Oklahoma", + "state": "OK", + "zip": "73114", + "phone": "405-748-7637", + "fax": "405-748-1856", + "email": "nathanial@phoenix.com", + "web": "http://www.nathanialphoenix.com", + "followers": 8308 + }, + { + "firstname": "Lamar", + "lastname": "Mckibben", + "company": "Battaglia, Jack M Esq", + "address": "1620 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-864-7338", + "fax": "415-864-7623", + "email": "lamar@mckibben.com", + "web": "http://www.lamarmckibben.com", + "followers": 4193 + }, + { + "firstname": "Shanna", + "lastname": "Numkena", + "company": "Anderson Independent Mail", + "address": "1426 5th Pl Nw", + "city": "Rochester", + "county": "Olmsted", + "state": "MN", + "zip": "55901", + "phone": "507-280-1856", + "fax": "507-280-6844", + "email": "shanna@numkena.com", + "web": "http://www.shannanumkena.com", + "followers": 1364 + }, + { + "firstname": "Helena", + "lastname": "Suermann", + "company": "Stubenberge, James A Esq", + "address": "897 Independence Ave", + "city": "Mountain View", + "county": "Santa Clara", + "state": "CA", + "zip": "94043", + "phone": "650-965-0255", + "fax": "650-965-3368", + "email": "helena@suermann.com", + "web": "http://www.helenasuermann.com", + "followers": 4536 + }, + { + "firstname": "Delphine", + "lastname": "Helmich", + "company": "Friends Hospital", + "address": "50 Aviation Way", + "city": "Watsonville", + "county": "Santa Cruz", + "state": "CA", + "zip": "95076", + "phone": "831-763-4348", + "fax": "831-763-0923", + "email": "delphine@helmich.com", + "web": "http://www.delphinehelmich.com", + "followers": 7383 + }, + { + "firstname": "Barbara", + "lastname": "Hindley", + "company": "Kirin Amgen", + "address": "904 N Lake St", + "city": "Burbank", + "county": "Los Angeles", + "state": "CA", + "zip": "91502", + "phone": "818-841-8886", + "fax": "818-841-8221", + "email": "barbara@hindley.com", + "web": "http://www.barbarahindley.com", + "followers": 9155 + }, + { + "firstname": "Sheryl", + "lastname": "Sisofo", + "company": "Thrifty Sign Stop", + "address": "1049 S Mccord Rd", + "city": "Holland", + "county": "Lucas", + "state": "OH", + "zip": "43528", + "phone": "419-865-8702", + "fax": "419-865-1836", + "email": "sheryl@sisofo.com", + "web": "http://www.sherylsisofo.com", + "followers": 5693 + }, + { + "firstname": "Robyn", + "lastname": "Christophel", + "company": "Woodward, John C Esq", + "address": "3420 E Flamingo Rd", + "city": "Las Vegas", + "county": "Clark", + "state": "NV", + "zip": "89121", + "phone": "702-458-1072", + "fax": "702-458-2093", + "email": "robyn@christophel.com", + "web": "http://www.robynchristophel.com", + "followers": 3971 + }, + { + "firstname": "Gayla", + "lastname": "Geimer", + "company": "Ortman Mccain Co", + "address": "1280 Price Ave", + "city": "Pomona", + "county": "Los Angeles", + "state": "CA", + "zip": "91767", + "phone": "909-620-6453", + "fax": "909-620-2768", + "email": "gayla@geimer.com", + "web": "http://www.gaylageimer.com", + "followers": 8969 + }, + { + "firstname": "Evan", + "lastname": "Pyfrom", + "company": "Nevada Baking Co", + "address": "5430 Alpha Rd", + "city": "Dallas", + "county": "Dallas", + "state": "TX", + "zip": "75240", + "phone": "214-661-4625", + "fax": "214-661-8804", + "email": "evan@pyfrom.com", + "web": "http://www.evanpyfrom.com", + "followers": 2516 + }, + { + "firstname": "Chad", + "lastname": "Miklas", + "company": "Red Carpet Inn", + "address": "31 S Grove St", + "city": "East Aurora", + "county": "Erie", + "state": "NY", + "zip": "14052", + "phone": "716-655-2736", + "fax": "716-655-2749", + "email": "chad@miklas.com", + "web": "http://www.chadmiklas.com", + "followers": 5357 + }, + { + "firstname": "Trey", + "lastname": "Tout", + "company": "Breen, Sean E Esq", + "address": "100 Mbc Dr", + "city": "Shawano", + "county": "Shawano", + "state": "WI", + "zip": "54166", + "phone": "715-526-6806", + "fax": "715-526-2421", + "email": "trey@tout.com", + "web": "http://www.treytout.com", + "followers": 205 + }, + { + "firstname": "Isabell", + "lastname": "Armout", + "company": "True Electric Corp", + "address": "7895 S Cessna Ave", + "city": "Gaithersburg", + "county": "Montgomery", + "state": "MD", + "zip": "20879", + "phone": "301-921-0406", + "fax": "301-921-1251", + "email": "isabell@armout.com", + "web": "http://www.isabellarmout.com", + "followers": 4878 + }, + { + "firstname": "Alejandro", + "lastname": "Mascall", + "company": "Railway Educational Bureau", + "address": "2350 Duke St", + "city": "Alexandria", + "county": "Alexandria City", + "state": "VA", + "zip": "22314", + "phone": "703-684-2882", + "fax": "703-684-8561", + "email": "alejandro@mascall.com", + "web": "http://www.alejandromascall.com", + "followers": 3512 + }, + { + "firstname": "Kennith", + "lastname": "Kirklin", + "company": "Sears Roebuck And Co", + "address": "2303 21st Ave S", + "city": "Nashville", + "county": "Davidson", + "state": "TN", + "zip": "37212", + "phone": "615-385-1598", + "fax": "615-385-6946", + "email": "kennith@kirklin.com", + "web": "http://www.kennithkirklin.com", + "followers": 5087 + }, + { + "firstname": "Ike", + "lastname": "Benthin", + "company": "Lee, Harry Esq", + "address": "1062 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-255-5277", + "fax": "415-255-6543", + "email": "ike@benthin.com", + "web": "http://www.ikebenthin.com", + "followers": 8473 + }, + { + "firstname": "Donald", + "lastname": "Sherretts", + "company": "Nylonge Corporation", + "address": "1062 Folsom St", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94103", + "phone": "415-255-7718", + "fax": "415-255-7088", + "email": "donald@sherretts.com", + "web": "http://www.donaldsherretts.com", + "followers": 2332 + }, + { + "firstname": "Lina", + "lastname": "Hybarger", + "company": "L & H Central Office", + "address": "1828 Jefferson Pl Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20036", + "phone": "202-833-4983", + "fax": "202-833-3174", + "email": "lina@hybarger.com", + "web": "http://www.linahybarger.com", + "followers": 9793 + }, + { + "firstname": "Rebekah", + "lastname": "Padley", + "company": "Reed Engineering Inc", + "address": "200 E Delawr Pl", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60611", + "phone": "312-944-1877", + "fax": "312-944-1477", + "email": "rebekah@padley.com", + "web": "http://www.rebekahpadley.com", + "followers": 3839 + }, + { + "firstname": "Marion", + "lastname": "Gaulden", + "company": "Madden, John H Jr", + "address": "200 W South St", + "city": "Charlottesville", + "county": "Charlottesville City", + "state": "VA", + "zip": "22902", + "phone": "434-979-9335", + "fax": "434-979-2694", + "email": "marion@gaulden.com", + "web": "http://www.mariongaulden.com", + "followers": 5625 + }, + { + "firstname": "Maurine", + "lastname": "Monroy", + "company": "Central Distribution System", + "address": "2000 Linwood Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-947-8922", + "fax": "201-947-4235", + "email": "maurine@monroy.com", + "web": "http://www.maurinemonroy.com", + "followers": 5828 + }, + { + "firstname": "Rosanna", + "lastname": "Sandrock", + "company": "Computer X Consulting", + "address": "1797 Lakewood Ter Se", + "city": "Atlanta", + "county": "Fulton", + "state": "GA", + "zip": "30315", + "phone": "404-627-4604", + "fax": "404-627-4276", + "email": "rosanna@sandrock.com", + "web": "http://www.rosannasandrock.com", + "followers": 3044 + }, + { + "firstname": "Marcelino", + "lastname": "Maggs", + "company": "Rascher & Betzold Inc", + "address": "201 E Pine St", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32801", + "phone": "407-420-1152", + "fax": "407-420-7195", + "email": "marcelino@maggs.com", + "web": "http://www.marcelinomaggs.com", + "followers": 5320 + }, + { + "firstname": "Florine", + "lastname": "Willardson", + "company": "Lunt, Donald C Esq", + "address": "5605 Ne 105th Ave", + "city": "Portland", + "county": "Multnomah", + "state": "OR", + "zip": "97220", + "phone": "503-256-6559", + "fax": "503-256-8982", + "email": "florine@willardson.com", + "web": "http://www.florinewillardson.com", + "followers": 2336 + }, + { + "firstname": "Jude", + "lastname": "Haza", + "company": "Howard Fabrication", + "address": "1348 Liberty Pike", + "city": "Franklin", + "county": "Williamson", + "state": "TN", + "zip": "37067", + "phone": "615-790-3984", + "fax": "615-790-3042", + "email": "jude@haza.com", + "web": "http://www.judehaza.com", + "followers": 7311 + }, + { + "firstname": "Eldon", + "lastname": "Sutch", + "company": "Friesen And Kane Public Accts", + "address": "1818 E Atlantic St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19134", + "phone": "215-743-2414", + "fax": "215-743-2529", + "email": "eldon@sutch.com", + "web": "http://www.eldonsutch.com", + "followers": 6895 + }, + { + "firstname": "Lashonda", + "lastname": "Enote", + "company": "Nichols Village The Inn", + "address": "6301 Owensmouth Ave", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91367", + "phone": "818-704-8490", + "fax": "818-704-7539", + "email": "lashonda@enote.com", + "web": "http://www.lashondaenote.com", + "Note": "Ancien Dailymotion, recontrÊ à LeWeb London 2012", + "followers": 6383 + }, + { + "firstname": "Marla", + "lastname": "Folz", + "company": "Odonoghue C Kevin", + "address": "201 Electronics Blvd Sw", + "city": "Huntsville", + "county": "Madison", + "state": "AL", + "zip": "35824", + "phone": "256-464-3329", + "fax": "256-464-6964", + "email": "marla@folz.com", + "web": "http://www.marlafolz.com", + "Note": "Product Manager at Sage France & WebMaster of ConseilsMarketing.Fr Interview at LEWeb", + "followers": 5861 + }, + { + "firstname": "Reginald", + "lastname": "Lunan", + "company": "Healey Chevy Olds Buick Geo", + "address": "985 Parker Ct", + "city": "Santa Clara", + "county": "Santa Clara", + "state": "CA", + "zip": "95050", + "phone": "408-727-1747", + "fax": "408-727-0884", + "email": "reginald@lunan.com", + "web": "http://www.reginaldlunan.com", + "followers": 7075 + }, + { + "firstname": "Kyle", + "lastname": "Lindauer", + "company": "Gem Tec Inc", + "address": "2000 E Jefferson St", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85034", + "phone": "602-258-5196", + "fax": "602-258-8609", + "email": "kyle@lindauer.com", + "web": "http://www.kylelindauer.com", + "followers": 6277 + }, + { + "firstname": "Son", + "lastname": "Marschke", + "company": "Evenings Dlght Fireplaces", + "address": "1119 Wheeler Ave", + "city": "Scranton", + "county": "Lackawanna", + "state": "PA", + "zip": "18510", + "phone": "570-969-0886", + "fax": "570-969-8176", + "email": "son@marschke.com", + "web": "http://www.sonmarschke.com", + "followers": 3481 + }, + { + "firstname": "Johnie", + "lastname": "Minaai", + "company": "Darling, Pamela E", + "address": "2100 Linwood Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-592-4771", + "fax": "201-592-8423", + "email": "johnie@minaai.com", + "web": "http://www.johnieminaai.com", + "followers": 5903 + }, + { + "firstname": "Kelli", + "lastname": "Varrato", + "company": "Frances Meyer Inc", + "address": "2505 Congress St", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92110", + "phone": "858-298-3969", + "fax": "858-298-6695", + "email": "kelli@varrato.com", + "web": "http://www.kellivarrato.com", + "followers": 9891 + }, + { + "firstname": "Neva", + "lastname": "Marsell", + "company": "Comfort Inn Wilshire", + "address": "1312 W Lincoln Ave", + "city": "Olivia", + "county": "Renville", + "state": "MN", + "zip": "56277", + "phone": "320-523-4975", + "fax": "320-523-8378", + "email": "neva@marsell.com", + "web": "http://www.nevamarsell.com", + "followers": 4114 + }, + { + "firstname": "Brice", + "lastname": "Hedglin", + "company": "Cupkovic, Walter D Esq", + "address": "2809 Granny White Pike", + "city": "Nashville", + "county": "Davidson", + "state": "TN", + "zip": "37204", + "phone": "615-292-9016", + "fax": "615-292-9027", + "email": "brice@hedglin.com", + "web": "http://www.bricehedglin.com", + "followers": 7730 + }, + { + "firstname": "Terrance", + "lastname": "Nimmer", + "company": "C D Short Foods Inc", + "address": "1400 N Woodward Ave", + "city": "Bloomfield Hills", + "county": "Oakland", + "state": "MI", + "zip": "48304", + "phone": "248-647-0653", + "fax": "248-647-1999", + "email": "terrance@nimmer.com", + "web": "http://www.terrancenimmer.com", + "followers": 7388 + }, + { + "firstname": "Carol", + "lastname": "Krisman", + "company": "Uniglobe Transeas Travel", + "address": "100 E 85th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10028", + "phone": "212-472-7877", + "fax": "212-472-9579", + "email": "carol@krisman.com", + "web": "http://www.carolkrisman.com", + "followers": 5985 + }, + { + "firstname": "Dollie", + "lastname": "Pillitteri", + "company": "Jiffy Moving & Storage Company", + "address": "4024 Merchant Rd", + "city": "Fort Wayne", + "county": "Allen", + "state": "IN", + "zip": "46818", + "phone": "260-489-3094", + "fax": "260-489-4697", + "email": "dollie@pillitteri.com", + "web": "http://www.dolliepillitteri.com", + "followers": 2624 + }, + { + "firstname": "Mellissa", + "lastname": "Sule", + "company": "Dowse, Geoffrey Esq", + "address": "92 Argonaut #-270", + "city": "Aliso Viejo", + "county": "Orange", + "state": "CA", + "zip": "92656", + "phone": "949-768-6176", + "fax": "949-768-8107", + "email": "mellissa@sule.com", + "web": "http://www.mellissasule.com", + "followers": 2709 + }, + { + "firstname": "Antony", + "lastname": "Thierauf", + "company": "Gutzwiller, Robert H Esq", + "address": "4915 Industrial Way", + "city": "Coeur d Alene", + "county": "Kootenai", + "state": "ID", + "zip": "83814", + "phone": "208-667-5252", + "fax": "208-667-5935", + "email": "antony@thierauf.com", + "web": "http://www.antonythierauf.com", + "followers": 1044 + }, + { + "firstname": "Reina", + "lastname": "Reisenauer", + "company": "Terrance Fox", + "address": "207 N Main St", + "city": "Hutchins", + "county": "Dallas", + "state": "TX", + "zip": "75141", + "phone": "972-225-9930", + "fax": "972-225-9569", + "email": "reina@reisenauer.com", + "web": "http://www.reinareisenauer.com", + "followers": 2953 + }, + { + "firstname": "Zane", + "lastname": "Sulikowski", + "company": "Meijer Associates Credit Union", + "address": "2375 3rd St", + "city": "Riverside", + "county": "Riverside", + "state": "CA", + "zip": "92507", + "phone": "951-683-4479", + "fax": "951-683-9932", + "email": "zane@sulikowski.com", + "web": "http://www.zanesulikowski.com", + "followers": 7275 + }, + { + "firstname": "Hilario", + "lastname": "Cassa", + "company": "Independence Assocaites Inc", + "address": "2222 Santa Monica Blvd", + "city": "Santa Monica", + "county": "Los Angeles", + "state": "CA", + "zip": "90404", + "phone": "310-828-6710", + "fax": "310-828-1895", + "email": "hilario@cassa.com", + "web": "http://www.hilariocassa.com", + "followers": 994 + }, + { + "firstname": "Veronica", + "lastname": "Radman", + "company": "Martin, Anthony D Esq", + "address": "235 W Main St", + "city": "Charlottesville", + "county": "Charlottesville City", + "state": "VA", + "zip": "22902", + "phone": "434-979-3306", + "fax": "434-979-9777", + "email": "veronica@radman.com", + "web": "http://www.veronicaradman.com", + "followers": 7568 + }, + { + "firstname": "Teri", + "lastname": "Erlewine", + "company": "League Of Kans Municipalities", + "address": "370 34th St St", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33711", + "phone": "727-327-3850", + "fax": "727-327-8494", + "email": "teri@erlewine.com", + "web": "http://www.terierlewine.com", + "followers": 6077 + }, + { + "firstname": "Alissa", + "lastname": "Mountjoy", + "company": "Technical & Mgmt Svc Corp", + "address": "6585 Commerce Blvd", + "city": "Rohnert Park", + "county": "Sonoma", + "state": "CA", + "zip": "94928", + "phone": "707-585-9715", + "fax": "707-585-7011", + "email": "alissa@mountjoy.com", + "web": "http://www.alissamountjoy.com", + "followers": 4886 + }, + { + "firstname": "Helene", + "lastname": "Iberg", + "company": "Spec Check Inc", + "address": "24800 Rockside Rd", + "city": "Bedford", + "county": "Cuyahoga", + "state": "OH", + "zip": "44146", + "phone": "440-786-6052", + "fax": "440-786-9246", + "email": "helene@iberg.com", + "web": "http://www.heleneiberg.com", + "followers": 716 + }, + { + "firstname": "Lona", + "lastname": "Scronce", + "company": "L & L Builders", + "address": "Rte 6 & 209", + "city": "Matamoras", + "county": "Pike", + "state": "PA", + "zip": "18336", + "phone": "570-296-4820", + "fax": "570-296-2054", + "email": "lona@scronce.com", + "web": "http://www.lonascronce.com", + "followers": 4687 + }, + { + "firstname": "Jeremy", + "lastname": "Lampi", + "company": "E Henderson Inc", + "address": "150 Sawkill Ave", + "city": "Milford", + "county": "Pike", + "state": "PA", + "zip": "18337", + "phone": "570-296-7797", + "fax": "570-296-4647", + "email": "jeremy@lampi.com", + "web": "http://www.jeremylampi.com", + "followers": 4714 + }, + { + "firstname": "Mitch", + "lastname": "Schattner", + "company": "Cosgrove Eisenberg & Kiley Pc", + "address": "3001 Geary Blvd", + "city": "San Francisco", + "county": "San Francisco", + "state": "CA", + "zip": "94118", + "phone": "415-668-8105", + "fax": "415-668-5841", + "email": "mitch@schattner.com", + "web": "http://www.mitchschattner.com", + "followers": 4388 + }, + { + "firstname": "Hans", + "lastname": "Carlan", + "company": "Midlen & Guillot Chartered", + "address": "509 W 4th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-276-2956", + "fax": "907-276-6002", + "email": "hans@carlan.com", + "web": "http://www.hanscarlan.com", + "followers": 985 + }, + { + "firstname": "Concetta", + "lastname": "Sarchett", + "company": "Barco/chromatics Inc", + "address": "2405 Grand Blvd", + "city": "Kansas City", + "county": "Jackson", + "state": "MO", + "zip": "64108", + "phone": "816-274-3833", + "fax": "816-274-6897", + "email": "concetta@sarchett.com", + "web": "http://www.concettasarchett.com", + "followers": 3086 + }, + { + "firstname": "Isaac", + "lastname": "Zackery", + "company": "Holiday Inn Of Issaquah", + "address": "321 Palmer Rd", + "city": "Denville", + "county": "Morris", + "state": "NJ", + "zip": "07834", + "phone": "973-328-5943", + "fax": "973-328-1903", + "email": "isaac@zackery.com", + "web": "http://www.isaaczackery.com", + "followers": 501 + }, + { + "firstname": "Doug", + "lastname": "Matrisciano", + "company": "Cefpi", + "address": "12500 Ne 10th Pl", + "city": "Bellevue", + "county": "King", + "state": "WA", + "zip": "98005", + "phone": "425-451-5906", + "fax": "425-451-1273", + "email": "doug@matrisciano.com", + "web": "http://www.dougmatrisciano.com", + "followers": 9054 + }, + { + "firstname": "Devon", + "lastname": "Samrah", + "company": "Software Pursuits Inc", + "address": "1219 Pine Ave", + "city": "Orlando", + "county": "Orange", + "state": "FL", + "zip": "32824", + "phone": "407-240-2401", + "fax": "407-240-8312", + "email": "devon@samrah.com", + "web": "http://www.devonsamrah.com", + "followers": 6795 + }, + { + "firstname": "Amos", + "lastname": "Linnan", + "company": "Quincy, Jim", + "address": "3960 W 26th St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60623", + "phone": "773-277-8332", + "fax": "773-277-4756", + "email": "amos@linnan.com", + "web": "http://www.amoslinnan.com", + "followers": 6297 + }, + { + "firstname": "Manuel", + "lastname": "Dienhart", + "company": "Bohning Co Ltd", + "address": "40 E Mcmicken Ave", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45210", + "phone": "513-357-4669", + "fax": "513-357-7989", + "email": "manuel@dienhart.com", + "web": "http://www.manueldienhart.com", + "followers": 6771 + }, + { + "firstname": "Audra", + "lastname": "Cantu", + "company": "Chesapeake Telephone Systems", + "address": "935 S 2nd St", + "city": "Plainfield", + "county": "Union", + "state": "NJ", + "zip": "07063", + "phone": "908-756-1816", + "fax": "908-756-5441", + "email": "audra@cantu.com", + "web": "http://www.audracantu.com", + "followers": 3088 + }, + { + "firstname": "Keisha", + "lastname": "Ransonet", + "company": "Hsk Decker", + "address": "1049 Lakloey Dr", + "city": "North Pole", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99705", + "phone": "907-488-6897", + "fax": "907-488-2093", + "email": "keisha@ransonet.com", + "web": "http://www.keisharansonet.com", + "followers": 5340 + }, + { + "firstname": "Rolando", + "lastname": "Baumann", + "company": "Erie Brush Co", + "address": "921 Sw Washington St #-321", + "city": "Portland", + "county": "Multnomah", + "state": "OR", + "zip": "97205", + "phone": "503-241-6723", + "fax": "503-241-7691", + "email": "rolando@baumann.com", + "web": "http://www.rolandobaumann.com", + "followers": 9754 + }, + { + "firstname": "Maryanne", + "lastname": "Whyman", + "company": "Walls, Robert E Esq", + "address": "1008 S San Pedro St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-748-1137", + "fax": "213-748-0447", + "email": "maryanne@whyman.com", + "web": "http://www.maryannewhyman.com", + "followers": 844 + }, + { + "firstname": "Kurtis", + "lastname": "Asberry", + "company": "Budgetel Inns", + "address": "Box #-37223", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79937", + "phone": "915-591-1621", + "fax": "915-591-3614", + "email": "kurtis@asberry.com", + "web": "http://www.kurtisasberry.com", + "followers": 8502 + }, + { + "firstname": "Ed", + "lastname": "Gompf", + "company": "Corro Therm Inc", + "address": "3 B Floor Care & Hskpg Svc", + "city": "Catawissa", + "county": "Columbia", + "state": "PA", + "zip": "17820", + "phone": "570-799-2838", + "fax": "570-799-4583", + "email": "ed@gompf.com", + "web": "http://www.edgompf.com", + "followers": 8705 + }, + { + "firstname": "Norman", + "lastname": "Betance", + "company": "Precision Electric Co Inc", + "address": "7949 E Acoma Dr", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85260", + "phone": "480-991-7884", + "fax": "480-991-6547", + "email": "norman@betance.com", + "web": "http://www.normanbetance.com", + "followers": 4602 + }, + { + "firstname": "Berta", + "lastname": "Karczewski", + "company": "Sather Eng Inc", + "address": "1035 N Mcqueen Rd #-133", + "city": "Gilbert", + "county": "Maricopa", + "state": "AZ", + "zip": "85233", + "phone": "480-926-0770", + "fax": "480-926-7533", + "email": "berta@karczewski.com", + "web": "http://www.bertakarczewski.com", + "followers": 1093 + }, + { + "firstname": "Mac", + "lastname": "Marksberry", + "company": "Pursell, David B Esq", + "address": "112 W Plum", + "city": "Doniphan", + "county": "Hall", + "state": "NE", + "zip": "68832", + "phone": "402-845-4275", + "fax": "402-845-8229", + "email": "mac@marksberry.com", + "web": "http://www.macmarksberry.com", + "followers": 6081 + }, + { + "firstname": "Sandra", + "lastname": "Graen", + "company": "Action Remediation Co", + "address": "106 Erie St", + "city": "Hutchinson", + "county": "McLeod", + "state": "MN", + "zip": "55350", + "phone": "320-587-3844", + "fax": "320-587-7201", + "email": "sandra@graen.com", + "web": "http://www.sandragraen.com", + "followers": 4844 + }, + { + "firstname": "Lee", + "lastname": "Javens", + "company": "Dyer, James R Esq", + "address": "6086 N Lyons Rd", + "city": "Burlington", + "county": "Racine", + "state": "WI", + "zip": "53105", + "phone": "262-763-9582", + "fax": "262-763-3845", + "email": "lee@javens.com", + "web": "http://www.leejavens.com", + "followers": 2954 + }, + { + "firstname": "Fran", + "lastname": "Zanders", + "company": "River City Body Co", + "address": "6312 S Yellowstone Hwy", + "city": "Idaho Falls", + "county": "Bonneville", + "state": "ID", + "zip": "83402", + "phone": "208-525-6418", + "fax": "208-525-5501", + "email": "fran@zanders.com", + "web": "http://www.franzanders.com", + "followers": 9590 + }, + { + "firstname": "Lane", + "lastname": "Brantz", + "company": "Kings Inn", + "address": "106 Erie St", + "city": "Hutchinson", + "county": "McLeod", + "state": "MN", + "zip": "55350", + "phone": "320-587-2903", + "fax": "320-587-3448", + "email": "lane@brantz.com", + "web": "http://www.lanebrantz.com", + "followers": 361 + }, + { + "firstname": "Bess", + "lastname": "Marso", + "company": "Lesher Printers Inc", + "address": "15542 Chemical Ln", + "city": "Huntington Beach", + "county": "Orange", + "state": "CA", + "zip": "92649", + "phone": "714-895-4582", + "fax": "714-895-8188", + "email": "bess@marso.com", + "web": "http://www.bessmarso.com", + "followers": 9552 + }, + { + "firstname": "Tamara", + "lastname": "Declue", + "company": "Glen Burnie The Bank Of", + "address": "1900 W Loop S #-600", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77027", + "phone": "713-871-3958", + "fax": "713-871-6355", + "email": "tamara@declue.com", + "web": "http://www.tamaradeclue.com", + "followers": 3229 + }, + { + "firstname": "Denise", + "lastname": "Speegle", + "company": "Shipley Oil Company", + "address": "2078 Foster Ave", + "city": "Wheeling", + "county": "Cook", + "state": "IL", + "zip": "60090", + "phone": "847-870-8743", + "fax": "847-870-6026", + "email": "denise@speegle.com", + "web": "http://www.denisespeegle.com", + "followers": 1139 + }, + { + "firstname": "Lynda", + "lastname": "Youtsey", + "company": "Accurate Color Inc", + "address": "1370 S Bertelsen Rd", + "city": "Eugene", + "county": "Lane", + "state": "OR", + "zip": "97402", + "phone": "541-342-0606", + "fax": "541-342-0655", + "email": "lynda@youtsey.com", + "web": "http://www.lyndayoutsey.com", + "followers": 378 + }, + { + "firstname": "Diann", + "lastname": "Burigsay", + "company": "Snyder, Stephen E Esq", + "address": "13026 S Normandie Ave", + "city": "Gardena", + "county": "Los Angeles", + "state": "CA", + "zip": "90249", + "phone": "310-321-8278", + "fax": "310-321-0564", + "email": "diann@burigsay.com", + "web": "http://www.diannburigsay.com", + "followers": 6193 + }, + { + "firstname": "Mari", + "lastname": "Hwang", + "company": "Play Craft Pontoon Co", + "address": "23352 El Toro Rd", + "city": "Lake Forest", + "county": "Orange", + "state": "CA", + "zip": "92630", + "phone": "949-583-6901", + "fax": "949-583-7758", + "email": "mari@hwang.com", + "web": "http://www.marihwang.com", + "followers": 6719 + }, + { + "firstname": "Shanna", + "lastname": "Neundorfer", + "company": "Door Systems", + "address": "833 E Allegheny Ave", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19134", + "phone": "215-426-9722", + "fax": "215-426-8416", + "email": "shanna@neundorfer.com", + "web": "http://www.shannaneundorfer.com", + "followers": 9759 + }, + { + "firstname": "Sherwood", + "lastname": "Detillier", + "company": "Minteq International", + "address": "Box #-851", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91365", + "phone": "818-703-9160", + "fax": "818-703-0447", + "email": "sherwood@detillier.com", + "web": "http://www.sherwooddetillier.com", + "followers": 6816 + }, + { + "firstname": "Walton", + "lastname": "Schwallie", + "company": "Nielsen, Laura W Md", + "address": "154 Main St", + "city": "Upton", + "county": "Worcester", + "state": "MA", + "zip": "01568", + "phone": "508-529-8783", + "fax": "508-529-8368", + "email": "walton@schwallie.com", + "web": "http://www.waltonschwallie.com", + "followers": 4498 + }, + { + "firstname": "Judy", + "lastname": "Gartenmayer", + "company": "Pro Infusion Pharm Inc", + "address": "3260 W New Haven Ave", + "city": "Melbourne", + "county": "Brevard", + "state": "FL", + "zip": "32904", + "phone": "321-676-3091", + "fax": "321-676-3378", + "email": "judy@gartenmayer.com", + "web": "http://www.judygartenmayer.com", + "followers": 5730 + }, + { + "firstname": "Antione", + "lastname": "Mccleary", + "company": "Cerberus Pyrotronics", + "address": "1110 25th Ave N", + "city": "Fargo", + "county": "Cass", + "state": "ND", + "zip": "58102", + "phone": "701-293-8410", + "fax": "701-293-7439", + "email": "antione@mccleary.com", + "web": "http://www.antionemccleary.com", + "followers": 5273 + }, + { + "firstname": "Kay", + "lastname": "Ganguli", + "company": "Hanson, Bruce Esq", + "address": "3763 Scripps Dr", + "city": "Las Vegas", + "county": "Clark", + "state": "NV", + "zip": "89103", + "phone": "702-876-3089", + "fax": "702-876-9367", + "email": "kay@ganguli.com", + "web": "http://www.kayganguli.com", + "followers": 6368 + }, + { + "firstname": "Oma", + "lastname": "Duffy", + "company": "Laun Law Offices", + "address": "255 Industrial Dr", + "city": "Franklin", + "county": "Warren", + "state": "OH", + "zip": "45005", + "phone": "937-746-7537", + "fax": "937-746-4129", + "email": "oma@duffy.com", + "web": "http://www.omaduffy.com", + "followers": 9375 + }, + { + "firstname": "Devon", + "lastname": "Teston", + "company": "Bilton, Dean H Esq", + "address": "1900 W Loop S", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77027", + "phone": "713-871-9773", + "fax": "713-871-0838", + "email": "devon@teston.com", + "web": "http://www.devonteston.com", + "followers": 3426 + }, + { + "firstname": "Jade", + "lastname": "Erlebach", + "company": "Vickery Tape & Label Co Inc", + "address": "975 Flynn Rd", + "city": "Camarillo", + "county": "Ventura", + "state": "CA", + "zip": "93012", + "phone": "805-445-8331", + "fax": "805-445-9961", + "email": "jade@erlebach.com", + "web": "http://www.jadeerlebach.com", + "followers": 4178 + }, + { + "firstname": "Roseann", + "lastname": "Jerko", + "company": "Larry Farmer Appraisal Co Inc", + "address": "850 Glen Ave", + "city": "Moorestown", + "county": "Burlington", + "state": "NJ", + "zip": "08057", + "phone": "856-866-4945", + "fax": "856-866-1542", + "email": "roseann@jerko.com", + "web": "http://www.roseannjerko.com", + "followers": 5397 + }, + { + "firstname": "Ruthie", + "lastname": "Zortman", + "company": "Review Monterey Peninsula", + "address": "4508 Enterprise St", + "city": "Fremont", + "county": "Alameda", + "state": "CA", + "zip": "94538", + "phone": "510-651-1410", + "fax": "510-651-1242", + "email": "ruthie@zortman.com", + "web": "http://www.ruthiezortman.com", + "followers": 6079 + }, + { + "firstname": "Leif", + "lastname": "Arguin", + "company": "Cmplt Cmptg Solutions Ne", + "address": "4508 Enterprise St", + "city": "Fremont", + "county": "Alameda", + "state": "CA", + "zip": "94538", + "phone": "510-651-4937", + "fax": "510-651-8302", + "email": "leif@arguin.com", + "web": "http://www.leifarguin.com", + "followers": 6571 + }, + { + "firstname": "Millicent", + "lastname": "Ekstrom", + "company": "Personal Creations Inc", + "address": "151 Brown St #-b", + "city": "Lawrenceburg", + "county": "Dearborn", + "state": "IN", + "zip": "47025", + "phone": "812-537-7287", + "fax": "812-537-5442", + "email": "millicent@ekstrom.com", + "web": "http://www.millicentekstrom.com", + "followers": 5739 + }, + { + "firstname": "Val", + "lastname": "Oborne", + "company": "Stone Container Corporation", + "address": "7846 Clybourn Ave", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-767-1347", + "fax": "818-767-5123", + "email": "val@oborne.com", + "web": "http://www.valoborne.com", + "followers": 6746 + }, + { + "firstname": "Bridgett", + "lastname": "Retort", + "company": "Womens Resource & Refrl Ntwrk", + "address": "6747 Signat Dr", + "city": "Houston", + "county": "Harris", + "state": "TX", + "zip": "77041", + "phone": "713-466-7259", + "fax": "713-466-3278", + "email": "bridgett@retort.com", + "web": "http://www.bridgettretort.com", + "followers": 3060 + }, + { + "firstname": "Tia", + "lastname": "Lino", + "company": "Superior Gundrilling", + "address": "35375 Highway 228", + "city": "Brownsville", + "county": "Linn", + "state": "OR", + "zip": "97327", + "phone": "541-466-2483", + "fax": "541-466-1661", + "email": "tia@lino.com", + "web": "http://www.tialino.com", + "followers": 8942 + }, + { + "firstname": "Jarrett", + "lastname": "Kenzie", + "company": "Young Men Christian Assn Cnty", + "address": "11551 Riverpark Way", + "city": "Chesterfield", + "county": "Chesterfield", + "state": "VA", + "zip": "23838", + "phone": "804-739-3007", + "fax": "804-739-7905", + "email": "jarrett@kenzie.com", + "web": "http://www.jarrettkenzie.com", + "followers": 9459 + }, + { + "firstname": "Mara", + "lastname": "Vanderzwaag", + "company": "Brown & Brown Law Office", + "address": "2550 E Lucas Dr", + "city": "Beaumont", + "county": "Jefferson", + "state": "TX", + "zip": "77703", + "phone": "409-892-1231", + "fax": "409-892-8492", + "email": "mara@vanderzwaag.com", + "web": "http://www.maravanderzwaag.com", + "followers": 2331 + }, + { + "firstname": "Tiffany", + "lastname": "Knust", + "company": "Vantage Products", + "address": "1425 Koll Cir #-107", + "city": "San Jose", + "county": "Santa Clara", + "state": "CA", + "zip": "95112", + "phone": "408-453-0357", + "fax": "408-453-1525", + "email": "tiffany@knust.com", + "web": "http://www.tiffanyknust.com", + "followers": 2896 + }, + { + "firstname": "Fabian", + "lastname": "Mcshaw", + "company": "Bodik, Michael G Esq", + "address": "6023 Garfield Ave", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90040", + "phone": "323-726-5319", + "fax": "323-726-8499", + "email": "fabian@mcshaw.com", + "web": "http://www.fabianmcshaw.com", + "followers": 4892 + }, + { + "firstname": "Annabelle", + "lastname": "Coger", + "company": "Center For Resource Management", + "address": "1200 Shreveport Barksdale Hwy", + "city": "Shreveport", + "county": "Caddo", + "state": "LA", + "zip": "71105", + "phone": "318-865-8418", + "fax": "318-865-7381", + "email": "annabelle@coger.com", + "web": "http://www.annabellecoger.com", + "followers": 2623 + }, + { + "firstname": "Marisa", + "lastname": "Smiler", + "company": "Star Systems Inc", + "address": "200 Broadhollow Rd", + "city": "Melville", + "county": "Suffolk", + "state": "NY", + "zip": "11747", + "phone": "631-673-3339", + "fax": "631-673-1556", + "email": "marisa@smiler.com", + "web": "http://www.marisasmiler.com", + "followers": 4693 + }, + { + "firstname": "Samantha", + "lastname": "Bordwell", + "company": "Interwest Freight System Inc", + "address": "23405 Sw 152nd Ct", + "city": "Homestead", + "county": "Miami-Dade", + "state": "FL", + "zip": "33032", + "phone": "305-247-8402", + "fax": "305-247-4599", + "email": "samantha@bordwell.com", + "web": "http://www.samanthabordwell.com", + "followers": 6109 + }, + { + "firstname": "Felecia", + "lastname": "Riedl", + "company": "Benson, John S", + "address": "333 Andrew Ave", + "city": "Salt Lake City", + "county": "Salt Lake", + "state": "UT", + "zip": "84115", + "phone": "801-486-6484", + "fax": "801-486-6755", + "email": "felecia@riedl.com", + "web": "http://www.feleciariedl.com", + "followers": 7849 + }, + { + "firstname": "Kris", + "lastname": "Persson", + "company": "Tweel, Ronald R Esq", + "address": "1765 Sw Highway 97", + "city": "Madras", + "county": "Jefferson", + "state": "OR", + "zip": "97741", + "phone": "541-475-8404", + "fax": "541-475-0021", + "email": "kris@persson.com", + "web": "http://www.krispersson.com", + "followers": 232 + }, + { + "firstname": "Kylie", + "lastname": "Bridgeman", + "company": "Thomas & Libowitz Pa", + "address": "118 Lenzner Ct", + "city": "Sewickley", + "county": "Allegheny", + "state": "PA", + "zip": "15143", + "phone": "412-741-4604", + "fax": "412-741-4236", + "email": "kylie@bridgeman.com", + "web": "http://www.kyliebridgeman.com", + "followers": 9868 + }, + { + "firstname": "Eduardo", + "lastname": "Bellendir", + "company": "Powers & Assocs", + "address": "1800 Pine Run Rd", + "city": "Wilkes Barre", + "county": "Luzerne", + "state": "PA", + "zip": "18702", + "phone": "570-822-0721", + "fax": "570-822-6267", + "email": "eduardo@bellendir.com", + "web": "http://www.eduardobellendir.com", + "followers": 597 + }, + { + "firstname": "Waldo", + "lastname": "Edberg", + "company": "Bush Building Corporation", + "address": "2017 W Jackson St", + "city": "Tupelo", + "county": "Lee", + "state": "MS", + "zip": "38801", + "phone": "662-842-4133", + "fax": "662-842-8645", + "email": "waldo@edberg.com", + "web": "http://www.waldoedberg.com", + "followers": 6609 + }, + { + "firstname": "Brent", + "lastname": "Vaidya", + "company": "Crain Industries", + "address": "45 Church St", + "city": "Stamford", + "county": "Fairfield", + "state": "CT", + "zip": "06906", + "phone": "203-359-2824", + "fax": "203-359-6466", + "email": "brent@vaidya.com", + "web": "http://www.brentvaidya.com", + "followers": 3729 + }, + { + "firstname": "Bette", + "lastname": "Barcelona", + "company": "Fischer, William R Esq", + "address": "432 Lignite Ave", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-6738", + "fax": "907-456-4144", + "email": "bette@barcelona.com", + "web": "http://www.bettebarcelona.com", + "followers": 5857 + }, + { + "firstname": "Rich", + "lastname": "Gleave", + "company": "Finkelstein, Bernard A Cpa", + "address": "827 E 10th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-277-9294", + "fax": "907-277-2227", + "email": "rich@gleave.com", + "web": "http://www.richgleave.com", + "followers": 488 + }, + { + "firstname": "Lyman", + "lastname": "Whittley", + "company": "Berry, Robert A Esq", + "address": "3030 Bridgeway", + "city": "Sausalito", + "county": "Marin", + "state": "CA", + "zip": "94965", + "phone": "415-332-9570", + "fax": "415-332-7303", + "email": "lyman@whittley.com", + "web": "http://www.lymanwhittley.com", + "followers": 7950 + }, + { + "firstname": "Maryann", + "lastname": "Garnette", + "company": "Catholic University Of", + "address": "582 Centerville Rd", + "city": "Lancaster", + "county": "Lancaster", + "state": "PA", + "zip": "17601", + "phone": "717-560-6671", + "fax": "717-560-5625", + "email": "maryann@garnette.com", + "web": "http://www.maryanngarnette.com", + "followers": 5412 + }, + { + "firstname": "Jimmie", + "lastname": "Zarzycki", + "company": "John C Auth", + "address": "215 E Pikes Peak Ave", + "city": "Colorado Springs", + "county": "El Paso", + "state": "CO", + "zip": "80903", + "phone": "719-632-0667", + "fax": "719-632-5612", + "email": "jimmie@zarzycki.com", + "web": "http://www.jimmiezarzycki.com", + "followers": 4291 + }, + { + "firstname": "Gisela", + "lastname": "Kosicki", + "company": "Lisher, John L Esq", + "address": "22140 Ventura Blvd #-4", + "city": "Woodland Hills", + "county": "Los Angeles", + "state": "CA", + "zip": "91364", + "phone": "818-713-6306", + "fax": "818-713-8346", + "email": "gisela@kosicki.com", + "web": "http://www.giselakosicki.com", + "followers": 377 + }, + { + "firstname": "Marlene", + "lastname": "Hammeren", + "company": "Chamot, Philip S Esq", + "address": "1000 Monte Sano Blvd Se", + "city": "Huntsville", + "county": "Madison", + "state": "AL", + "zip": "35801", + "phone": "256-533-8674", + "fax": "256-533-1176", + "email": "marlene@hammeren.com", + "web": "http://www.marlenehammeren.com", + "followers": 428 + }, + { + "firstname": "Kris", + "lastname": "Stanzak", + "company": "Cenref Labs", + "address": "3100 Dodge St", + "city": "Dubuque", + "county": "Dubuque", + "state": "IA", + "zip": "52003", + "phone": "563-557-2588", + "fax": "563-557-6308", + "email": "kris@stanzak.com", + "web": "http://www.krisstanzak.com", + "followers": 368 + }, + { + "firstname": "Roman", + "lastname": "Simone", + "company": "Spottswood, William B Esq", + "address": "610 W Main St", + "city": "Batavia", + "county": "Clermont", + "state": "OH", + "zip": "45103", + "phone": "513-732-3089", + "fax": "513-732-2547", + "email": "roman@simone.com", + "web": "http://www.romansimone.com", + "followers": 3460 + }, + { + "firstname": "Cathryn", + "lastname": "Nicolaus", + "company": "Perkins Photo/graphics Inc", + "address": "2575 State Highway 32", + "city": "Chico", + "county": "Butte", + "state": "CA", + "zip": "95973", + "phone": "530-345-4627", + "fax": "530-345-8372", + "email": "cathryn@nicolaus.com", + "web": "http://www.cathrynnicolaus.com", + "followers": 9494 + }, + { + "firstname": "Lana", + "lastname": "Keels", + "company": "Veron, J Michael Esq", + "address": "711 Park Ave", + "city": "Freehold", + "county": "Monmouth", + "state": "NJ", + "zip": "07728", + "phone": "732-462-1106", + "fax": "732-462-3575", + "email": "lana@keels.com", + "web": "http://www.lanakeels.com", + "followers": 2796 + }, + { + "firstname": "Malissa", + "lastname": "Ziesemer", + "company": "Electron Rentals Inc", + "address": "330 S Ocean Blvd", + "city": "Palm Beach", + "county": "Palm Beach", + "state": "FL", + "zip": "33480", + "phone": "561-655-6443", + "fax": "561-655-9129", + "email": "malissa@ziesemer.com", + "web": "http://www.malissaziesemer.com", + "followers": 7525 + }, + { + "firstname": "Pamala", + "lastname": "Brodtmann", + "company": "Gagen, William E Jr", + "address": "342 Seaside Ave", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-926-0776", + "fax": "808-926-6173", + "email": "pamala@brodtmann.com", + "web": "http://www.pamalabrodtmann.com", + "followers": 134 + }, + { + "firstname": "Heriberto", + "lastname": "Tivis", + "company": "Newood", + "address": "1135 Kildaire Farm Rd", + "city": "Cary", + "county": "Wake", + "state": "NC", + "zip": "27511", + "phone": "919-460-8104", + "fax": "919-460-4304", + "email": "heriberto@tivis.com", + "web": "http://www.heribertotivis.com", + "followers": 2418 + }, + { + "firstname": "Edgardo", + "lastname": "Prudente", + "company": "R A Hamilton Corp", + "address": "1110 N Highway 360", + "city": "Grand Prairie", + "county": "Dallas", + "state": "TX", + "zip": "75050", + "phone": "972-660-3960", + "fax": "972-660-0934", + "email": "edgardo@prudente.com", + "web": "http://www.edgardoprudente.com", + "followers": 3269 + }, + { + "firstname": "Fred", + "lastname": "Kunde", + "company": "Swanson, Victoria C Esq", + "address": "5321 Sterling Center Dr", + "city": "Westlake Village", + "county": "Ventura", + "state": "CA", + "zip": "91361", + "phone": "805-991-9740", + "fax": "805-991-4665", + "email": "fred@kunde.com", + "web": "http://www.fredkunde.com", + "followers": 7741 + }, + { + "firstname": "Pilar", + "lastname": "Suddeth", + "company": "Slant Fin Corp", + "address": "3700 Campus Dr", + "city": "Newport Beach", + "county": "Orange", + "state": "CA", + "zip": "92660", + "phone": "949-852-5463", + "fax": "949-852-9027", + "email": "pilar@suddeth.com", + "web": "http://www.pilarsuddeth.com", + "followers": 4094 + }, + { + "firstname": "Eliseo", + "lastname": "Wice", + "company": "A Limousine Service", + "address": "711 W 38th St", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78705", + "phone": "512-458-0034", + "fax": "512-458-7226", + "email": "eliseo@wice.com", + "web": "http://www.eliseowice.com", + "followers": 7508 + }, + { + "firstname": "Bridget", + "lastname": "Knightly", + "company": "Teamsters Union Local 20", + "address": "66 Flint St", + "city": "Asheville", + "county": "Buncombe", + "state": "NC", + "zip": "28801", + "phone": "828-251-0817", + "fax": "828-251-4242", + "email": "bridget@knightly.com", + "web": "http://www.bridgetknightly.com", + "followers": 1212 + }, + { + "firstname": "Tori", + "lastname": "Villaescusa", + "company": "Church Point Whol Gr Co Inc", + "address": "1006 S San Pedro St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-748-7617", + "fax": "213-748-5668", + "email": "tori@villaescusa.com", + "web": "http://www.torivillaescusa.com", + "followers": 9093 + }, + { + "firstname": "Claire", + "lastname": "Moyerman", + "company": "Grt Nthrn Shoe Rpr & Dry Clnrs", + "address": "2280 S Xanadu Way #-300", + "city": "Aurora", + "county": "Arapahoe", + "state": "CO", + "zip": "80014", + "phone": "303-337-5701", + "fax": "303-337-5796", + "email": "claire@moyerman.com", + "web": "http://www.clairemoyerman.com", + "followers": 5959 + }, + { + "firstname": "Judi", + "lastname": "Kivel", + "company": "Postal Place At 111th Square", + "address": "125 N Emporia St", + "city": "Wichita", + "county": "Sedgwick", + "state": "KS", + "zip": "67202", + "phone": "316-267-2178", + "fax": "316-267-5183", + "email": "judi@kivel.com", + "web": "http://www.judikivel.com", + "followers": 6139 + }, + { + "firstname": "Terrell", + "lastname": "Rodda", + "company": "Woodland Village Nursing Home", + "address": "200 Cottontail Ln", + "city": "Somerset", + "county": "Somerset", + "state": "NJ", + "zip": "08873", + "phone": "732-563-5361", + "fax": "732-563-1293", + "email": "terrell@rodda.com", + "web": "http://www.terrellrodda.com", + "followers": 3803 + }, + { + "firstname": "Jasmin", + "lastname": "Gum", + "company": "White, Samuel I Esq", + "address": "43 Parmenter Rd", + "city": "Hudson", + "county": "Middlesex", + "state": "MA", + "zip": "01749", + "phone": "978-562-2524", + "fax": "978-562-5062", + "email": "jasmin@gum.com", + "web": "http://www.jasmingum.com", + "followers": 9074 + }, + { + "firstname": "Bridget", + "lastname": "Bottella", + "company": "Mid State Construction Prod", + "address": "112 E Pecan St", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78205", + "phone": "210-224-9708", + "fax": "210-224-4881", + "email": "bridget@bottella.com", + "web": "http://www.bridgetbottella.com", + "followers": 5951 + }, + { + "firstname": "Tami", + "lastname": "Trybus", + "company": "Seawright, Rodney", + "address": "3963 Virginia Ave", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45227", + "phone": "513-561-1096", + "fax": "513-561-7531", + "email": "tami@trybus.com", + "web": "http://www.tamitrybus.com", + "followers": 370 + }, + { + "firstname": "Maynard", + "lastname": "Kaewprasert", + "company": "Decatur Studio Inc", + "address": "1224 S Hope St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90015", + "phone": "213-747-6026", + "fax": "213-747-3088", + "email": "maynard@kaewprasert.com", + "web": "http://www.maynardkaewprasert.com", + "followers": 711 + }, + { + "firstname": "Viola", + "lastname": "Mcsorley", + "company": "First Bank Of Brunswick", + "address": "6850 S Harlem Ave", + "city": "Summit Argo", + "county": "Cook", + "state": "IL", + "zip": "60501", + "phone": "708-496-7428", + "fax": "708-496-8587", + "email": "viola@mcsorley.com", + "web": "http://www.violamcsorley.com", + "followers": 7687 + }, + { + "firstname": "Reggie", + "lastname": "Streu", + "company": "Als Motors Inc", + "address": "15 Henderson Dr", + "city": "Caldwell", + "county": "Essex", + "state": "NJ", + "zip": "07006", + "phone": "973-575-5898", + "fax": "973-575-9328", + "email": "reggie@streu.com", + "web": "http://www.reggiestreu.com", + "followers": 971 + }, + { + "firstname": "Rena", + "lastname": "Griffeth", + "company": "Reliable Optical", + "address": "19901 Nordhoff St", + "city": "Northridge", + "county": "Los Angeles", + "state": "CA", + "zip": "91324", + "phone": "818-709-9165", + "fax": "818-709-2649", + "email": "rena@griffeth.com", + "web": "http://www.renagriffeth.com", + "followers": 6336 + }, + { + "firstname": "Pierre", + "lastname": "Salera", + "company": "Ridley Ridley & Burnette", + "address": "1601 S Shamrock Ave", + "city": "Monrovia", + "county": "Los Angeles", + "state": "CA", + "zip": "91016", + "phone": "626-303-9233", + "fax": "626-303-2569", + "email": "pierre@salera.com", + "web": "http://www.pierresalera.com", + "followers": 6542 + }, + { + "firstname": "Carolina", + "lastname": "Kinlaw", + "company": "Loftus, Daniel B Esq", + "address": "3m County", + "city": "Belle Mead", + "county": "Somerset", + "state": "NJ", + "zip": "08502", + "phone": "908-874-0864", + "fax": "908-874-4873", + "email": "carolina@kinlaw.com", + "web": "http://www.carolinakinlaw.com", + "followers": 8944 + }, + { + "firstname": "Alejandra", + "lastname": "Prenatt", + "company": "Mitchell De Burring Co", + "address": "6850 S Harlem Ave", + "city": "Summit Argo", + "county": "Cook", + "state": "IL", + "zip": "60501", + "phone": "708-496-6958", + "fax": "708-496-4617", + "email": "alejandra@prenatt.com", + "web": "http://www.alejandraprenatt.com", + "followers": 4118 + }, + { + "firstname": "Quintin", + "lastname": "Isacson", + "company": "Spectrum Constrctn & Dev Corp", + "address": "1015 N Cahuenga Blvd", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90038", + "phone": "323-469-0643", + "fax": "323-469-3082", + "email": "quintin@isacson.com", + "web": "http://www.quintinisacson.com", + "followers": 94 + }, + { + "firstname": "Robin", + "lastname": "Grotz", + "company": "Hackett, Peter J", + "address": "2601 Summerhill Rd", + "city": "Texarkana", + "county": "Bowie", + "state": "TX", + "zip": "75503", + "phone": "903-792-2081", + "fax": "903-792-5309", + "email": "robin@grotz.com", + "web": "http://www.robingrotz.com", + "followers": 2532 + }, + { + "firstname": "Lacy", + "lastname": "Woodfin", + "company": "Enviro Dynamics", + "address": "130 Wyoming Ave", + "city": "Scranton", + "county": "Lackawanna", + "state": "PA", + "zip": "18503", + "phone": "570-348-3754", + "fax": "570-348-4204", + "email": "lacy@woodfin.com", + "web": "http://www.lacywoodfin.com", + "followers": 4644 + }, + { + "firstname": "Daniel", + "lastname": "Zill", + "company": "Safety Team Inc", + "address": "550 N Brand Blvd #-1940", + "city": "Glendale", + "county": "Los Angeles", + "state": "CA", + "zip": "91203", + "phone": "818-507-7207", + "fax": "818-507-1925", + "email": "daniel@zill.com", + "web": "http://www.danielzill.com", + "followers": 2051 + }, + { + "firstname": "Reina", + "lastname": "Wolchesky", + "company": "Highland Management Group", + "address": "305 W Washington St", + "city": "Brainerd", + "county": "Crow Wing", + "state": "MN", + "zip": "56401", + "phone": "218-828-7281", + "fax": "218-828-3231", + "email": "reina@wolchesky.com", + "web": "http://www.reinawolchesky.com", + "followers": 9240 + }, + { + "firstname": "Marc", + "lastname": "Wanger", + "company": "Welsh Company", + "address": "33 Harrison Ave", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02111", + "phone": "617-426-6393", + "fax": "617-426-1114", + "email": "marc@wanger.com", + "web": "http://www.marcwanger.com", + "followers": 2846 + }, + { + "firstname": "Damion", + "lastname": "Matkin", + "company": "Data Ware Development Inc", + "address": "5830 Downing St #-d", + "city": "Denver", + "county": "Denver", + "state": "CO", + "zip": "80216", + "phone": "303-295-4797", + "fax": "303-295-3867", + "email": "damion@matkin.com", + "web": "http://www.damionmatkin.com", + "followers": 5752 + }, + { + "firstname": "Lucius", + "lastname": "Winchester", + "company": "Barrett Paving Materials Inc", + "address": "670 S Barrington Rd", + "city": "Streamwood", + "county": "Cook", + "state": "IL", + "zip": "60107", + "phone": "630-289-9458", + "fax": "630-289-9033", + "email": "lucius@winchester.com", + "web": "http://www.luciuswinchester.com", + "followers": 8815 + }, + { + "firstname": "Petra", + "lastname": "Mcnichol", + "company": "Hyacinth Foundation Aids Proj", + "address": "670 S Barrington Rd", + "city": "Streamwood", + "county": "Cook", + "state": "IL", + "zip": "60107", + "phone": "630-289-8190", + "fax": "630-289-8985", + "email": "petra@mcnichol.com", + "web": "http://www.petramcnichol.com", + "followers": 9459 + }, + { + "firstname": "Katina", + "lastname": "Ramano", + "company": "Fuhrmann, Chris C Esq", + "address": "580 Fountain Ave", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11208", + "phone": "718-272-2553", + "fax": "718-272-8498", + "email": "katina@ramano.com", + "web": "http://www.katinaramano.com", + "followers": 924 + }, + { + "firstname": "Leslie", + "lastname": "Cackowski", + "company": "Re, Matthew R Esq", + "address": "2103 W Main St", + "city": "Farmington", + "county": "San Juan", + "state": "NM", + "zip": "87401", + "phone": "505-325-3933", + "fax": "505-325-9042", + "email": "leslie@cackowski.com", + "web": "http://www.lesliecackowski.com", + "followers": 3028 + }, + { + "firstname": "Cristopher", + "lastname": "Wiget", + "company": "Roche, Patrick Esq", + "address": "4354 Highway 64", + "city": "Kirtland", + "county": "San Juan", + "state": "NM", + "zip": "87417", + "phone": "505-598-9742", + "fax": "505-598-3063", + "email": "cristopher@wiget.com", + "web": "http://www.cristopherwiget.com", + "followers": 2445 + }, + { + "firstname": "Garth", + "lastname": "Skiffington", + "company": "Fox & Killbride", + "address": "22 Mill St", + "city": "Paterson", + "county": "Passaic", + "state": "NJ", + "zip": "07501", + "phone": "973-684-7654", + "fax": "973-684-6309", + "email": "garth@skiffington.com", + "web": "http://www.garthskiffington.com", + "followers": 1725 + }, + { + "firstname": "Brendan", + "lastname": "Qin", + "company": "Allegro Copy & Print", + "address": "305 Griffin Ave Sw", + "city": "Eastman", + "county": "Dodge", + "state": "GA", + "zip": "31023", + "phone": "478-374-5686", + "fax": "478-374-6992", + "email": "brendan@qin.com", + "web": "http://www.brendanqin.com", + "followers": 8035 + }, + { + "firstname": "Chase", + "lastname": "Furler", + "company": "Hostetler & Kowalik", + "address": "3333 Se 21st St", + "city": "Topeka", + "county": "Shawnee", + "state": "KS", + "zip": "66607", + "phone": "785-354-7091", + "fax": "785-354-3042", + "email": "chase@furler.com", + "web": "http://www.chasefurler.com", + "followers": 2977 + }, + { + "firstname": "Marietta", + "lastname": "Bjornberg", + "company": "Consolidated Mechanical Inc", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-0501", + "fax": "406-728-5507", + "email": "marietta@bjornberg.com", + "web": "http://www.mariettabjornberg.com", + "followers": 54 + }, + { + "firstname": "Carmella", + "lastname": "Wishman", + "company": "Meicher Cpa", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-6772", + "fax": "406-728-7668", + "email": "carmella@wishman.com", + "web": "http://www.carmellawishman.com", + "followers": 624 + }, + { + "firstname": "Erica", + "lastname": "Eyrich", + "company": "C & I Computer Services Inc", + "address": "1515 Wyoming St", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59801", + "phone": "406-728-7293", + "fax": "406-728-4789", + "email": "erica@eyrich.com", + "web": "http://www.ericaeyrich.com", + "followers": 8217 + }, + { + "firstname": "Lucius", + "lastname": "Bagnoli", + "company": "American Music Teacher", + "address": "404 W Boonville New Harmony Rd", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47711", + "phone": "812-867-2916", + "fax": "812-867-2593", + "email": "lucius@bagnoli.com", + "web": "http://www.luciusbagnoli.com", + "followers": 8666 + }, + { + "firstname": "Bart", + "lastname": "Hachey", + "company": "Pip Printing", + "address": "490 S Broad St", + "city": "Canfield", + "county": "Mahoning", + "state": "OH", + "zip": "44406", + "phone": "330-533-9769", + "fax": "330-533-6543", + "email": "bart@hachey.com", + "web": "http://www.barthachey.com", + "followers": 8457 + }, + { + "firstname": "Isiah", + "lastname": "Phernetton", + "company": "Wyckoff Florist & Gifts", + "address": "246 Griffing Ave", + "city": "Riverhead", + "county": "Suffolk", + "state": "NY", + "zip": "11901", + "phone": "631-727-0917", + "fax": "631-727-2147", + "email": "isiah@phernetton.com", + "web": "http://www.isiahphernetton.com", + "followers": 6662 + }, + { + "firstname": "Morton", + "lastname": "Crummell", + "company": "Boykin Management Co", + "address": "5485 Conestoga Ct", + "city": "Boulder", + "county": "Boulder", + "state": "CO", + "zip": "80301", + "phone": "303-546-3698", + "fax": "303-546-1589", + "email": "morton@crummell.com", + "web": "http://www.mortoncrummell.com", + "followers": 8143 + }, + { + "firstname": "Prince", + "lastname": "Kauk", + "company": "Mckesson Drug Co", + "address": "2320 W Louise Dr", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85027", + "phone": "623-581-7435", + "fax": "623-581-2472", + "email": "prince@kauk.com", + "web": "http://www.princekauk.com", + "followers": 177 + }, + { + "firstname": "Marta", + "lastname": "Horner", + "company": "Scisci, Pasquale M", + "address": "4694 Alvarado Canyon Rd #-f", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92120", + "phone": "858-265-2270", + "fax": "858-265-6604", + "email": "marta@horner.com", + "web": "http://www.martahorner.com", + "followers": 5749 + }, + { + "firstname": "Teodoro", + "lastname": "Gaboury", + "company": "Bostek, Eva M Dvm", + "address": "4251 Glenwood Ave", + "city": "Youngstown", + "county": "Mahoning", + "state": "OH", + "zip": "44512", + "phone": "330-783-8123", + "fax": "330-783-9674", + "email": "teodoro@gaboury.com", + "web": "http://www.teodorogaboury.com", + "followers": 3663 + }, + { + "firstname": "Jess", + "lastname": "Assad", + "company": "Livingston City", + "address": "9100 F St", + "city": "Omaha", + "county": "Douglas", + "state": "NE", + "zip": "68127", + "phone": "402-331-0470", + "fax": "402-331-4974", + "email": "jess@assad.com", + "web": "http://www.jessassad.com", + "followers": 6985 + }, + { + "firstname": "Freeman", + "lastname": "Soula", + "company": "Henson, Claudia", + "address": "Hwy 8e E", + "city": "Mena", + "county": "Polk", + "state": "AR", + "zip": "71953", + "phone": "479-394-6308", + "fax": "479-394-7509", + "email": "freeman@soula.com", + "web": "http://www.freemansoula.com", + "followers": 7262 + }, + { + "firstname": "Rita", + "lastname": "Center", + "company": "R D Playman Co", + "address": "4013 Cameron St #-b", + "city": "Lafayette", + "county": "Lafayette", + "state": "LA", + "zip": "70506", + "phone": "337-269-8825", + "fax": "337-269-7011", + "email": "rita@center.com", + "web": "http://www.ritacenter.com", + "followers": 4484 + }, + { + "firstname": "Kira", + "lastname": "Papen", + "company": "Tile City & Carpet Of Pa", + "address": "8387 University Ave", + "city": "La Mesa", + "county": "San Diego", + "state": "CA", + "zip": "91941", + "phone": "619-464-3649", + "fax": "619-464-8499", + "email": "kira@papen.com", + "web": "http://www.kirapapen.com", + "followers": 5711 + }, + { + "firstname": "Miquel", + "lastname": "Demicco", + "company": "Welk Resort Center", + "address": "5921 S Middlefield Rd", + "city": "Littleton", + "county": "Jefferson", + "state": "CO", + "zip": "80123", + "phone": "303-730-8080", + "fax": "303-730-8087", + "email": "miquel@demicco.com", + "web": "http://www.miqueldemicco.com", + "followers": 6863 + }, + { + "firstname": "William", + "lastname": "Mahmud", + "company": "Campbell Inn", + "address": "1003 Northern Blvd", + "city": "Manhasset", + "county": "Nassau", + "state": "NY", + "zip": "11030", + "phone": "516-365-3496", + "fax": "516-365-0493", + "email": "william@mahmud.com", + "web": "http://www.williammahmud.com", + "followers": 3591 + }, + { + "firstname": "Lacy", + "lastname": "Belmont", + "company": "House Boat Rentals Inc", + "address": "585 Bedford Rd", + "city": "Bedford Hills", + "county": "Westchester", + "state": "NY", + "zip": "10507", + "phone": "914-241-8888", + "fax": "914-241-2272", + "email": "lacy@belmont.com", + "web": "http://www.lacybelmont.com", + "followers": 1399 + }, + { + "firstname": "Van", + "lastname": "Leanen", + "company": "Mail Boxes Etc", + "address": "Bus Rt 54 & Hh", + "city": "Lake Ozark", + "county": "Camden", + "state": "MO", + "zip": "65049", + "phone": "573-365-0319", + "fax": "573-365-1055", + "email": "van@leanen.com", + "web": "http://www.vanleanen.com", + "followers": 1949 + }, + { + "firstname": "Mayme", + "lastname": "Staub", + "company": "River House Hotel", + "address": "2470 Lamington Rd", + "city": "Bedminster", + "county": "Somerset", + "state": "NJ", + "zip": "07921", + "phone": "908-234-9338", + "fax": "908-234-9433", + "email": "mayme@staub.com", + "web": "http://www.maymestaub.com", + "followers": 2216 + }, + { + "firstname": "Gregg", + "lastname": "Guevarra", + "company": "Arbor Center", + "address": "221 S Kerr Ave", + "city": "Wilmington", + "county": "New Hanover", + "state": "NC", + "zip": "28403", + "phone": "910-799-9811", + "fax": "910-799-1965", + "email": "gregg@guevarra.com", + "web": "http://www.greggguevarra.com", + "followers": 2704 + }, + { + "firstname": "Minh", + "lastname": "Leclare", + "company": "Osach, Ronald C Esq", + "address": "106 S 4th St", + "city": "Forest City", + "county": "Winnebago", + "state": "IA", + "zip": "50436", + "phone": "641-582-0973", + "fax": "641-582-0424", + "email": "minh@leclare.com", + "web": "http://www.minhleclare.com", + "followers": 7681 + }, + { + "firstname": "Joey", + "lastname": "Sedore", + "company": "Goodwill Speciality Co", + "address": "1647 E Palmdale Blvd", + "city": "Palmdale", + "county": "Los Angeles", + "state": "CA", + "zip": "93550", + "phone": "661-273-4188", + "fax": "661-273-6263", + "email": "joey@sedore.com", + "web": "http://www.joeysedore.com", + "followers": 273 + }, + { + "firstname": "Jeanie", + "lastname": "Dalen", + "company": "Ivs Media Inc", + "address": "2900 Ford Rd", + "city": "Bristol", + "county": "Bucks", + "state": "PA", + "zip": "19007", + "phone": "215-788-5062", + "fax": "215-788-7666", + "email": "jeanie@dalen.com", + "web": "http://www.jeaniedalen.com", + "followers": 290 + }, + { + "firstname": "Eddie", + "lastname": "Gauer", + "company": "Easy Mail", + "address": "506 Kellogg Ave", + "city": "Ames", + "county": "Story", + "state": "IA", + "zip": "50010", + "phone": "515-233-2381", + "fax": "515-233-6551", + "email": "eddie@gauer.com", + "web": "http://www.eddiegauer.com", + "followers": 2350 + }, + { + "firstname": "Jessie", + "lastname": "Barkle", + "company": "Days Inn Airport By Mall Amer", + "address": "1002 S Treadaway Blvd", + "city": "Abilene", + "county": "Taylor", + "state": "TX", + "zip": "79602", + "phone": "325-677-1190", + "fax": "325-677-2343", + "email": "jessie@barkle.com", + "web": "http://www.jessiebarkle.com", + "followers": 5427 + }, + { + "firstname": "Deandre", + "lastname": "Resendiz", + "company": "Sav Mart", + "address": "4 Trvl Svc Carlson Trvl Ways", + "city": "Twin Falls", + "county": "Twin Falls", + "state": "ID", + "zip": "83301", + "phone": "208-733-8306", + "fax": "208-733-3476", + "email": "deandre@resendiz.com", + "web": "http://www.deandreresendiz.com", + "followers": 1120 + }, + { + "firstname": "Janet", + "lastname": "Rathrock", + "company": "Ryder, Edward A Esq", + "address": "2181 Harlem Rd", + "city": "Loves Park", + "county": "Winnebago", + "state": "IL", + "zip": "61111", + "phone": "815-877-4376", + "fax": "815-877-9538", + "email": "janet@rathrock.com", + "web": "http://www.janetrathrock.com", + "followers": 2725 + }, + { + "firstname": "Denice", + "lastname": "Nordlinger", + "company": "Bickel, Daniel R Cpa", + "address": "7210 Gateway Blvd E", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79915", + "phone": "915-593-2344", + "fax": "915-593-8069", + "email": "denice@nordlinger.com", + "web": "http://www.denicenordlinger.com", + "followers": 2003 + }, + { + "firstname": "Danny", + "lastname": "Dales", + "company": "Barrett Bindery Co", + "address": "3530 E Washington St", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85034", + "phone": "602-225-9543", + "fax": "602-225-9028", + "email": "danny@dales.com", + "web": "http://www.dannydales.com", + "followers": 2843 + }, + { + "firstname": "Robbie", + "lastname": "Deshay", + "company": "Cornhusker Press", + "address": "1700 Terminal St", + "city": "West Sacramento", + "county": "Yolo", + "state": "CA", + "zip": "95691", + "phone": "916-372-5032", + "fax": "916-372-1333", + "email": "robbie@deshay.com", + "web": "http://www.robbiedeshay.com", + "followers": 3460 + }, + { + "firstname": "Carla", + "lastname": "Humble", + "company": "Express Printing Center", + "address": "11796 Sheldon St", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-768-0662", + "fax": "818-768-1832", + "email": "carla@humble.com", + "web": "http://www.carlahumble.com", + "followers": 4639 + }, + { + "firstname": "Ashley", + "lastname": "Leonesio", + "company": "Martinique Resort Hotel", + "address": "15 Park Row", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10038", + "phone": "212-227-3681", + "fax": "212-227-4343", + "email": "ashley@leonesio.com", + "web": "http://www.ashleyleonesio.com", + "followers": 4801 + }, + { + "firstname": "Josephine", + "lastname": "Sotlar", + "company": "Shipp Storage", + "address": "400 1st St Nw", + "city": "Washington", + "county": "District of Columbia", + "state": "DC", + "zip": "20001", + "phone": "202-783-2772", + "fax": "202-783-8805", + "email": "josephine@sotlar.com", + "web": "http://www.josephinesotlar.com", + "followers": 515 + }, + { + "firstname": "Derek", + "lastname": "Kreutzbender", + "company": "Recycle Metals Corp", + "address": "411 E Wisconsin Ave", + "city": "Milwaukee", + "county": "Milwaukee", + "state": "WI", + "zip": "53202", + "phone": "414-271-5253", + "fax": "414-271-6234", + "email": "derek@kreutzbender.com", + "web": "http://www.derekkreutzbender.com", + "followers": 2430 + }, + { + "firstname": "Kira", + "lastname": "Staffon", + "company": "International Management Assoc", + "address": "933 Wiliwili St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96826", + "phone": "808-949-0941", + "fax": "808-949-8257", + "email": "kira@staffon.com", + "web": "http://www.kirastaffon.com", + "followers": 5769 + }, + { + "firstname": "Isaac", + "lastname": "Davensizer", + "company": "Dillon Measurement Instruments", + "address": "444 Nahua St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96815", + "phone": "808-544-5794", + "fax": "808-544-6357", + "email": "isaac@davensizer.com", + "web": "http://www.isaacdavensizer.com", + "followers": 3453 + }, + { + "firstname": "Reva", + "lastname": "Bayer", + "company": "Hirt, Stanley Esq", + "address": "4011 Wallingford Ave N", + "city": "Seattle", + "county": "King", + "state": "WA", + "zip": "98103", + "phone": "206-634-9998", + "fax": "206-634-2589", + "email": "reva@bayer.com", + "web": "http://www.revabayer.com", + "followers": 7415 + }, + { + "firstname": "Melvin", + "lastname": "Auteri", + "company": "Interstate Unltd Fed Crdt Un", + "address": "463 Beacon St", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02115", + "phone": "617-247-8022", + "fax": "617-247-6002", + "email": "melvin@auteri.com", + "web": "http://www.melvinauteri.com", + "followers": 2705 + }, + { + "firstname": "Stephen", + "lastname": "Seiters", + "company": "Us Mortgage Corp", + "address": "814 Blue Mound Rd", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76131", + "phone": "817-947-3102", + "fax": "817-947-6272", + "email": "stephen@seiters.com", + "web": "http://www.stephenseiters.com", + "followers": 9568 + }, + { + "firstname": "Lucas", + "lastname": "Santellana", + "company": "Constantino, James P Esq", + "address": "4820 E Mcdowell Rd #-300", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85008", + "phone": "602-225-3469", + "fax": "602-225-8897", + "email": "lucas@santellana.com", + "web": "http://www.lucassantellana.com", + "followers": 8420 + }, + { + "firstname": "Traci", + "lastname": "Toomey", + "company": "Pea Press", + "address": "Box #-1948", + "city": "Jackson", + "county": "Teton", + "state": "WY", + "zip": "83001", + "phone": "307-733-9708", + "fax": "307-733-6525", + "email": "traci@toomey.com", + "web": "http://www.tracitoomey.com", + "followers": 7718 + }, + { + "firstname": "Vernice", + "lastname": "Resendes", + "company": "Sorrento Cheese Co Inc", + "address": "11796 Sheldon St", + "city": "Sun Valley", + "county": "Los Angeles", + "state": "CA", + "zip": "91352", + "phone": "818-768-6234", + "fax": "818-768-7585", + "email": "vernice@resendes.com", + "web": "http://www.verniceresendes.com", + "followers": 912 + }, + { + "firstname": "Hillary", + "lastname": "Holmes", + "company": "Dennis N Brager Law Offices Of", + "address": "7565 Green Valley Rd", + "city": "Placerville", + "county": "El Dorado", + "state": "CA", + "zip": "95667", + "phone": "530-626-1934", + "fax": "530-626-6128", + "email": "hillary@holmes.com", + "web": "http://www.hillaryholmes.com", + "followers": 7918 + }, + { + "firstname": "Robin", + "lastname": "Schartz", + "company": "Fairmount Country Club", + "address": "10175 Joerschke Dr", + "city": "Grass Valley", + "county": "Nevada", + "state": "CA", + "zip": "95945", + "phone": "530-477-9983", + "fax": "530-477-7396", + "email": "robin@schartz.com", + "web": "http://www.robinschartz.com", + "followers": 2169 + }, + { + "firstname": "Sabrina", + "lastname": "Deppert", + "company": "Americold", + "address": "2701 E Thomas Rd #-j", + "city": "Phoenix", + "county": "Maricopa", + "state": "AZ", + "zip": "85016", + "phone": "602-954-4343", + "fax": "602-954-6266", + "email": "sabrina@deppert.com", + "web": "http://www.sabrinadeppert.com", + "followers": 5488 + }, + { + "firstname": "Luciano", + "lastname": "Truiolo", + "company": "Currier Gallery Of Art", + "address": "435 Mira Vista Ter", + "city": "Pasadena", + "county": "Los Angeles", + "state": "CA", + "zip": "91105", + "phone": "626-792-6850", + "fax": "626-792-5166", + "email": "luciano@truiolo.com", + "web": "http://www.lucianotruiolo.com", + "followers": 9047 + }, + { + "firstname": "Ezekiel", + "lastname": "Mildon", + "company": "Ace Pro Pest Cntrl Inc", + "address": "10 Rogers St", + "city": "Cambridge", + "county": "Middlesex", + "state": "MA", + "zip": "02142", + "phone": "617-494-5618", + "fax": "617-494-3365", + "email": "ezekiel@mildon.com", + "web": "http://www.ezekielmildon.com", + "followers": 399 + }, + { + "firstname": "Hanna", + "lastname": "Cinkan", + "company": "Northbrook Flowers", + "address": "1150 Nw 72nd Ave #-333", + "city": "Miami", + "county": "Miami-Dade", + "state": "FL", + "zip": "33126", + "phone": "305-477-7869", + "fax": "305-477-7089", + "email": "hanna@cinkan.com", + "web": "http://www.hannacinkan.com", + "followers": 4636 + }, + { + "firstname": "Kory", + "lastname": "Wooldridge", + "company": "Paper Stock Dealers Inc", + "address": "1525 E 53rd St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60615", + "phone": "312-753-6734", + "fax": "312-753-2693", + "email": "kory@wooldridge.com", + "web": "http://www.korywooldridge.com", + "followers": 4528 + }, + { + "firstname": "Darrel", + "lastname": "Ruffins", + "company": "Brickman, Arthur Cpa", + "address": "2800 4th St N", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33704", + "phone": "727-821-8502", + "fax": "727-821-5104", + "email": "darrel@ruffins.com", + "web": "http://www.darrelruffins.com", + "followers": 6468 + }, + { + "firstname": "Miranda", + "lastname": "Hammitt", + "company": "Steindel, Carl R Md", + "address": "964 E Saint Francis St", + "city": "Brownsville", + "county": "Cameron", + "state": "TX", + "zip": "78520", + "phone": "956-541-5918", + "fax": "956-541-9021", + "email": "miranda@hammitt.com", + "web": "http://www.mirandahammitt.com", + "followers": 8847 + }, + { + "firstname": "Sadie", + "lastname": "Rowlett", + "company": "Port City Taxi Inc", + "address": "2659 Webster Ave", + "city": "Bronx", + "county": "Bronx", + "state": "NY", + "zip": "10458", + "phone": "718-365-1753", + "fax": "718-365-5353", + "email": "sadie@rowlett.com", + "web": "http://www.sadierowlett.com", + "followers": 9836 + }, + { + "firstname": "Deanna", + "lastname": "Gerbi", + "company": "Centerville Historical Society", + "address": "264 Broadway", + "city": "Jersey City", + "county": "Hudson", + "state": "NJ", + "zip": "07306", + "phone": "201-433-0391", + "fax": "201-433-3619", + "email": "deanna@gerbi.com", + "web": "http://www.deannagerbi.com", + "followers": 9112 + }, + { + "firstname": "Alfonso", + "lastname": "Griglen", + "company": "Paul D Friedman", + "address": "577 Township Road #-30s", + "city": "Ada", + "county": "Hardin", + "state": "OH", + "zip": "45810", + "phone": "419-634-3513", + "fax": "419-634-5733", + "email": "alfonso@griglen.com", + "web": "http://www.alfonsogriglen.com", + "followers": 7361 + }, + { + "firstname": "Vernon", + "lastname": "Engelman", + "company": "Atlas Metal Cutting Inc", + "address": "321 Watson St", + "city": "Ripon", + "county": "Fond du Lac", + "state": "WI", + "zip": "54971", + "phone": "920-748-1387", + "fax": "920-748-3703", + "email": "vernon@engelman.com", + "web": "http://www.vernonengelman.com", + "followers": 656 + }, + { + "firstname": "Johnnie", + "lastname": "Rheaves", + "company": "Roberts, James A Cpa", + "address": "2910 E La Cresta Ave", + "city": "Anaheim", + "county": "Orange", + "state": "CA", + "zip": "92806", + "phone": "714-632-1291", + "fax": "714-632-7337", + "email": "johnnie@rheaves.com", + "web": "http://www.johnnierheaves.com", + "followers": 2812 + }, + { + "firstname": "Ella", + "lastname": "Pahnke", + "company": "Davis, Randle S Esq", + "address": "2640 Junction Hwy", + "city": "Kerrville", + "county": "Kerr", + "state": "TX", + "zip": "78028", + "phone": "830-367-8513", + "fax": "830-367-9231", + "email": "ella@pahnke.com", + "web": "http://www.ellapahnke.com", + "followers": 4762 + }, + { + "firstname": "Veronica", + "lastname": "Achorn", + "company": "Guy Spradling", + "address": "5201 Hanawalt Dr", + "city": "El Paso", + "county": "El Paso", + "state": "TX", + "zip": "79903", + "phone": "915-772-3217", + "fax": "915-772-2346", + "email": "veronica@achorn.com", + "web": "http://www.veronicaachorn.com", + "followers": 9145 + }, + { + "firstname": "Kasey", + "lastname": "Nguyen", + "company": "Pettine, Paul A Iii", + "address": "5603 Arapahoe Ave", + "city": "Boulder", + "county": "Boulder", + "state": "CO", + "zip": "80303", + "phone": "303-440-3916", + "fax": "303-440-2452", + "email": "kasey@nguyen.com", + "web": "http://www.kaseynguyen.com", + "followers": 2867 + }, + { + "firstname": "Frankie", + "lastname": "Morein", + "company": "Oliver, Jerrold B Esq", + "address": "104 North St", + "city": "Stamford", + "county": "Fairfield", + "state": "CT", + "zip": "06902", + "phone": "203-975-3712", + "fax": "203-975-4688", + "email": "frankie@morein.com", + "web": "http://www.frankiemorein.com", + "followers": 5663 + }, + { + "firstname": "Elaine", + "lastname": "Renzi", + "company": "Delvel Chem Co", + "address": "221 E 59th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-826-7966", + "fax": "212-826-2043", + "email": "elaine@renzi.com", + "web": "http://www.elainerenzi.com", + "followers": 9525 + }, + { + "firstname": "Timothy", + "lastname": "Janski", + "company": "Snyder Chevrolet Olds Geo Co", + "address": "800 E Dimond Blvd", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99515", + "phone": "907-344-4330", + "fax": "907-344-4086", + "email": "timothy@janski.com", + "web": "http://www.timothyjanski.com", + "followers": 1000 + }, + { + "firstname": "Warren", + "lastname": "Hacher", + "company": "Josel, Stephen C Esq", + "address": "4600 S 1st St", + "city": "Abilene", + "county": "Taylor", + "state": "TX", + "zip": "79605", + "phone": "325-691-7220", + "fax": "325-691-7394", + "email": "warren@hacher.com", + "web": "http://www.warrenhacher.com", + "followers": 6287 + }, + { + "firstname": "Brant", + "lastname": "Darnel", + "company": "Cody, Daniel S Esq", + "address": "8250 Tyler Blvd", + "city": "Mentor", + "county": "Lake", + "state": "OH", + "zip": "44060", + "phone": "440-974-8416", + "fax": "440-974-7476", + "email": "brant@darnel.com", + "web": "http://www.brantdarnel.com", + "followers": 5217 + }, + { + "firstname": "Mara", + "lastname": "Rineheart", + "company": "Berry Naturipe Growers", + "address": "39 W 21st St", + "city": "Northampton", + "county": "Northampton", + "state": "PA", + "zip": "18067", + "phone": "610-262-2444", + "fax": "610-262-6836", + "email": "mara@rineheart.com", + "web": "http://www.mararineheart.com", + "followers": 9213 + }, + { + "firstname": "Karen", + "lastname": "Flierl", + "company": "Cook Vetter Doerhoff", + "address": "501 N I #-h35", + "city": "Austin", + "county": "Travis", + "state": "TX", + "zip": "78702", + "phone": "512-477-1826", + "fax": "512-477-1407", + "email": "karen@flierl.com", + "web": "http://www.karenflierl.com", + "followers": 754 + }, + { + "firstname": "Virgil", + "lastname": "Chinni", + "company": "Dorfman Abrams Music & Co", + "address": "248 Libby St", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-841-9811", + "fax": "808-841-9646", + "email": "virgil@chinni.com", + "web": "http://www.virgilchinni.com", + "followers": 6740 + }, + { + "firstname": "Jimmie", + "lastname": "Kertzman", + "company": "Oregon Pacific Trading Co", + "address": "279 Puuhale Rd", + "city": "Honolulu", + "county": "Honolulu", + "state": "HI", + "zip": "96819", + "phone": "808-841-2883", + "fax": "808-841-1772", + "email": "jimmie@kertzman.com", + "web": "http://www.jimmiekertzman.com", + "followers": 4795 + }, + { + "firstname": "Leif", + "lastname": "Bachta", + "company": "Hislop, Lorna Brumfield Esq", + "address": "91246 Oihana St", + "city": "Kapolei", + "county": "Honolulu", + "state": "HI", + "zip": "96707", + "phone": "808-682-8942", + "fax": "808-682-2789", + "email": "leif@bachta.com", + "web": "http://www.leifbachta.com", + "followers": 6016 + }, + { + "firstname": "Ione", + "lastname": "Kucera", + "company": "Tweedy Penney & Crawford", + "address": "94210 Pupukahi St #-201a", + "city": "Waipahu", + "county": "Honolulu", + "state": "HI", + "zip": "96797", + "phone": "808-671-5253", + "fax": "808-671-3048", + "email": "ione@kucera.com", + "web": "http://www.ionekucera.com", + "followers": 17 + }, + { + "firstname": "Doreen", + "lastname": "Sakurai", + "company": "Airlifter", + "address": "211 E 50th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10022", + "phone": "212-759-4757", + "fax": "212-759-7548", + "email": "doreen@sakurai.com", + "web": "http://www.doreensakurai.com", + "followers": 8051 + }, + { + "firstname": "Joel", + "lastname": "Nardo", + "company": "New Era Canning Co", + "address": "5150 Town Center Cir", + "city": "Boca Raton", + "county": "Palm Beach", + "state": "FL", + "zip": "33486", + "phone": "561-395-2277", + "fax": "561-395-7825", + "email": "joel@nardo.com", + "web": "http://www.joelnardo.com", + "followers": 9041 + }, + { + "firstname": "Neil", + "lastname": "Backus", + "company": "Smith Capital Management", + "address": "521 5th Ave", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10175", + "phone": "212-537-4955", + "fax": "212-537-1181", + "email": "neil@backus.com", + "web": "http://www.neilbackus.com", + "followers": 9529 + }, + { + "firstname": "Fausto", + "lastname": "Marks", + "company": "Donohue, Brian C Esq", + "address": "200 Stuart St", + "city": "Boston", + "county": "Suffolk", + "state": "MA", + "zip": "02116", + "phone": "617-451-9353", + "fax": "617-451-2136", + "email": "fausto@marks.com", + "web": "http://www.faustomarks.com", + "followers": 9763 + }, + { + "firstname": "Christa", + "lastname": "Bodenschatz", + "company": "Stationers Inc", + "address": "Tilton Rd", + "city": "Danville", + "county": "Vermilion", + "state": "IL", + "zip": "61832", + "phone": "217-443-6280", + "fax": "217-443-6382", + "email": "christa@bodenschatz.com", + "web": "http://www.christabodenschatz.com", + "followers": 1744 + }, + { + "firstname": "Chi", + "lastname": "Greenlaw", + "company": "E & T Screw Machine Products", + "address": "4300 N Miller Rd #-143", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85251", + "phone": "480-946-1537", + "fax": "480-946-1657", + "email": "chi@greenlaw.com", + "web": "http://www.chigreenlaw.com", + "followers": 2707 + }, + { + "firstname": "Kyle", + "lastname": "Ferri", + "company": "Ames Plumbing & Heating", + "address": "1801 E 5th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-272-2216", + "fax": "907-272-7109", + "email": "kyle@ferri.com", + "web": "http://www.kyleferri.com", + "followers": 7533 + }, + { + "firstname": "Freida", + "lastname": "Michelfelder", + "company": "Fun Discovery Inc", + "address": "117 Martin Luther King Jr Dr S", + "city": "Atlanta", + "county": "Fulton", + "state": "GA", + "zip": "30303", + "phone": "404-521-3372", + "fax": "404-521-3223", + "email": "freida@michelfelder.com", + "web": "http://www.freidamichelfelder.com", + "followers": 4298 + }, + { + "firstname": "Bryant", + "lastname": "Bouliouris", + "company": "Medlin, Charles K Jr", + "address": "480 W Pearl Ave", + "city": "Jackson", + "county": "Teton", + "state": "WY", + "zip": "83001", + "phone": "307-733-8286", + "fax": "307-733-6041", + "email": "bryant@bouliouris.com", + "web": "http://www.bryantbouliouris.com", + "followers": 9520 + }, + { + "firstname": "Emilia", + "lastname": "Oxley", + "company": "Buckeye Reserve Title", + "address": "14651 Dallas Pky", + "city": "Dallas", + "county": "Dallas", + "state": "TX", + "zip": "75240", + "phone": "214-702-8125", + "fax": "214-702-4766", + "email": "emilia@oxley.com", + "web": "http://www.emiliaoxley.com", + "followers": 6873 + }, + { + "firstname": "Naomi", + "lastname": "Mcraven", + "company": "Oak Brook Capital Corp", + "address": "13456 Se 27th Pl", + "city": "Bellevue", + "county": "King", + "state": "WA", + "zip": "98005", + "phone": "425-641-5463", + "fax": "425-641-5923", + "email": "naomi@mcraven.com", + "web": "http://www.naomimcraven.com", + "followers": 7029 + }, + { + "firstname": "Dionne", + "lastname": "Borycz", + "company": "Pepsi Cola Dr Pepper Bottling", + "address": "77 S Washington St #-207", + "city": "Rockville", + "county": "Montgomery", + "state": "MD", + "zip": "20850", + "phone": "301-294-0154", + "fax": "301-294-7523", + "email": "dionne@borycz.com", + "web": "http://www.dionneborycz.com", + "followers": 6973 + }, + { + "firstname": "Jimmy", + "lastname": "Hrobsky", + "company": "Howard Johnson", + "address": "8253 Ronson Rd", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92111", + "phone": "858-268-4663", + "fax": "858-268-4964", + "email": "jimmy@hrobsky.com", + "web": "http://www.jimmyhrobsky.com", + "followers": 544 + }, + { + "firstname": "Peggy", + "lastname": "Hohlstein", + "company": "Steritek Inc", + "address": "Rt 20", + "city": "Westfield", + "county": "Hampden", + "state": "MA", + "zip": "01085", + "phone": "413-543-2933", + "fax": "413-543-3805", + "email": "peggy@hohlstein.com", + "web": "http://www.peggyhohlstein.com", + "followers": 8885 + }, + { + "firstname": "Genevieve", + "lastname": "Kekiwi", + "company": "Lawson, John F Esq", + "address": "8300 Bell Ter", + "city": "Newburgh", + "county": "Warrick", + "state": "IN", + "zip": "47630", + "phone": "812-477-3620", + "fax": "812-477-3646", + "email": "genevieve@kekiwi.com", + "web": "http://www.genevievekekiwi.com", + "followers": 8134 + }, + { + "firstname": "Terra", + "lastname": "Plagge", + "company": "Beach, Jeffrey E", + "address": "60 Minute Photo Colormax", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47715", + "phone": "812-477-9524", + "fax": "812-477-9617", + "email": "terra@plagge.com", + "web": "http://www.terraplagge.com", + "followers": 4323 + }, + { + "firstname": "Allie", + "lastname": "Pumphrey", + "company": "Asher, Ronald L Md", + "address": "501 N Weinbach Ave", + "city": "Evansville", + "county": "Vanderburgh", + "state": "IN", + "zip": "47711", + "phone": "812-477-0753", + "fax": "812-477-4604", + "email": "allie@pumphrey.com", + "web": "http://www.alliepumphrey.com", + "followers": 3985 + }, + { + "firstname": "Katina", + "lastname": "Survant", + "company": "Kgtv Channel 10", + "address": "590 N 2nd E", + "city": "Mountain Home", + "county": "Elmore", + "state": "ID", + "zip": "83647", + "phone": "208-587-3734", + "fax": "208-587-5574", + "email": "katina@survant.com", + "web": "http://www.katinasurvant.com", + "followers": 3440 + }, + { + "firstname": "Marta", + "lastname": "Warran", + "company": "Wirth, John T Esq", + "address": "2929 W Kennewick Ave", + "city": "Kennewick", + "county": "Benton", + "state": "WA", + "zip": "99336", + "phone": "509-735-8388", + "fax": "509-735-9193", + "email": "marta@warran.com", + "web": "http://www.martawarran.com", + "followers": 9891 + }, + { + "firstname": "Rebekah", + "lastname": "Lindboe", + "company": "Granite Corporation", + "address": "600 Las Colinas Blvd E", + "city": "Irving", + "county": "Dallas", + "state": "TX", + "zip": "75039", + "phone": "972-556-1121", + "fax": "972-556-0801", + "email": "rebekah@lindboe.com", + "web": "http://www.rebekahlindboe.com", + "followers": 180 + }, + { + "firstname": "Roxie", + "lastname": "Varenhorst", + "company": "Good Neighbor Real Estate", + "address": "1301 Dublin Rd", + "city": "Columbus", + "county": "Franklin", + "state": "OH", + "zip": "43215", + "phone": "614-487-2917", + "fax": "614-487-4227", + "email": "roxie@varenhorst.com", + "web": "http://www.roxievarenhorst.com", + "followers": 1352 + }, + { + "firstname": "Kennith", + "lastname": "Peto", + "company": "Kirschbaum, Thomas A Esq", + "address": "2080 Peachtree Industrial Ct", + "city": "Atlanta", + "county": "Dekalb", + "state": "GA", + "zip": "30341", + "phone": "770-455-4277", + "fax": "770-455-6746", + "email": "kennith@peto.com", + "web": "http://www.kennithpeto.com", + "followers": 8164 + }, + { + "firstname": "Darrell", + "lastname": "Amrich", + "company": "Harris, James P Iii", + "address": "64 W Convenient", + "city": "Apex", + "county": "Wake", + "state": "NC", + "zip": "27502", + "phone": "919-362-8201", + "fax": "919-362-7475", + "email": "darrell@amrich.com", + "web": "http://www.darrellamrich.com", + "followers": 9098 + }, + { + "firstname": "Savannah", + "lastname": "Loffier", + "company": "Saint Charles Catv", + "address": "501 S Johnstone Ave", + "city": "Bartlesville", + "county": "Washington", + "state": "OK", + "zip": "74003", + "phone": "918-337-3201", + "fax": "918-337-4947", + "email": "savannah@loffier.com", + "web": "http://www.savannahloffier.com", + "followers": 7227 + }, + { + "firstname": "Martin", + "lastname": "Carley", + "company": "Heil, John P Esq", + "address": "680 Country W", + "city": "Sylva", + "county": "Jackson", + "state": "NC", + "zip": "28779", + "phone": "828-586-3914", + "fax": "828-586-8059", + "email": "martin@carley.com", + "web": "http://www.martincarley.com", + "followers": 7412 + }, + { + "firstname": "Lacy", + "lastname": "Hyten", + "company": "Buy & Sell Press", + "address": "1 Palmer Sq", + "city": "Princeton", + "county": "Mercer", + "state": "NJ", + "zip": "08542", + "phone": "609-683-3558", + "fax": "609-683-0649", + "email": "lacy@hyten.com", + "web": "http://www.lacyhyten.com", + "followers": 2184 + }, + { + "firstname": "Forest", + "lastname": "Orea", + "company": "Clark, Francis J", + "address": "6858 S Ashland Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60636", + "phone": "773-436-4531", + "fax": "773-436-2636", + "email": "forest@orea.com", + "web": "http://www.forestorea.com", + "followers": 8308 + }, + { + "firstname": "Courtney", + "lastname": "Shishido", + "company": "Beymers Jewelry", + "address": "145 W 6th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-279-2737", + "fax": "907-279-2025", + "email": "courtney@shishido.com", + "web": "http://www.courtneyshishido.com", + "followers": 6286 + }, + { + "firstname": "Annette", + "lastname": "Frietas", + "company": "Monsanto Chemical Company", + "address": "45 W 46th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10036", + "phone": "212-944-8670", + "fax": "212-944-9464", + "email": "annette@frietas.com", + "web": "http://www.annettefrietas.com", + "followers": 9185 + }, + { + "firstname": "Karyn", + "lastname": "Jinks", + "company": "Maslen, David Esq", + "address": "2318 N Galloway Ave", + "city": "Mesquite", + "county": "Dallas", + "state": "TX", + "zip": "75150", + "phone": "972-289-4090", + "fax": "972-289-3319", + "email": "karyn@jinks.com", + "web": "http://www.karynjinks.com", + "followers": 3549 + }, + { + "firstname": "Edwin", + "lastname": "Lavelli", + "company": "Letterguide Co", + "address": "W 1st St", + "city": "East Liverpool", + "county": "Columbiana", + "state": "OH", + "zip": "43920", + "phone": "330-385-4581", + "fax": "330-385-9959", + "email": "edwin@lavelli.com", + "web": "http://www.edwinlavelli.com", + "followers": 1645 + }, + { + "firstname": "Jimmie", + "lastname": "Barninger", + "company": "California Paint & Wlpaper Str", + "address": "Box #-4038", + "city": "Modesto", + "county": "Stanislaus", + "state": "CA", + "zip": "95352", + "phone": "209-525-7568", + "fax": "209-525-4389", + "email": "jimmie@barninger.com", + "web": "http://www.jimmiebarninger.com", + "followers": 3947 + }, + { + "firstname": "Merle", + "lastname": "Wyrosdick", + "company": "Keil, James J Esq", + "address": "1350 Campus Pky", + "city": "Neptune", + "county": "Monmouth", + "state": "NJ", + "zip": "07753", + "phone": "732-938-7301", + "fax": "732-938-7237", + "email": "merle@wyrosdick.com", + "web": "http://www.merlewyrosdick.com", + "followers": 5762 + }, + { + "firstname": "Amelia", + "lastname": "Caputo", + "company": "Security Marketing Agency", + "address": "1800 Airport Way", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-1748", + "fax": "907-456-7535", + "email": "amelia@caputo.com", + "web": "http://www.ameliacaputo.com", + "followers": 9583 + }, + { + "firstname": "Germaine", + "lastname": "Bruski", + "company": "Alloy Founders", + "address": "2301 S Cushman St", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99701", + "phone": "907-456-8225", + "fax": "907-456-9261", + "email": "germaine@bruski.com", + "web": "http://www.germainebruski.com", + "followers": 5075 + }, + { + "firstname": "Willa", + "lastname": "Dutt", + "company": "Gutmann Leather Co Inc", + "address": "2110 Peger Rd", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99709", + "phone": "907-456-2885", + "fax": "907-456-2187", + "email": "willa@dutt.com", + "web": "http://www.willadutt.com", + "followers": 2775 + }, + { + "firstname": "Cherie", + "lastname": "Fuhri", + "company": "Continental Baking Co", + "address": "3679 College Rd", + "city": "Fairbanks", + "county": "Fairbanks North Star", + "state": "AK", + "zip": "99709", + "phone": "907-456-9072", + "fax": "907-456-8467", + "email": "cherie@fuhri.com", + "web": "http://www.cheriefuhri.com", + "followers": 5839 + }, + { + "firstname": "Tyron", + "lastname": "Quillman", + "company": "Analysts International Corp", + "address": "5300 Shawnee Rd", + "city": "Alexandria", + "county": "Fairfax", + "state": "VA", + "zip": "22312", + "phone": "703-354-9266", + "fax": "703-354-2554", + "email": "tyron@quillman.com", + "web": "http://www.tyronquillman.com", + "followers": 9439 + }, + { + "firstname": "Charity", + "lastname": "Dyckman", + "company": "Marriott, Frank Jr", + "address": "6927 Old Seward Hwy", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99518", + "phone": "907-349-9880", + "fax": "907-349-4449", + "email": "charity@dyckman.com", + "web": "http://www.charitydyckman.com", + "followers": 2058 + }, + { + "firstname": "Nanette", + "lastname": "Turansky", + "company": "Sheraton Society Hill Hotel", + "address": "52 S 2nd St", + "city": "Easton", + "county": "Northampton", + "state": "PA", + "zip": "18042", + "phone": "610-250-6188", + "fax": "610-250-4334", + "email": "nanette@turansky.com", + "web": "http://www.nanetteturansky.com", + "followers": 5905 + }, + { + "firstname": "Cherie", + "lastname": "Schronce", + "company": "Varda, John Duncan Esq", + "address": "21603 Devonshire St", + "city": "Chatsworth", + "county": "Los Angeles", + "state": "CA", + "zip": "91311", + "phone": "818-718-2001", + "fax": "818-718-7339", + "email": "cherie@schronce.com", + "web": "http://www.cherieschronce.com", + "followers": 9409 + }, + { + "firstname": "Theron", + "lastname": "Hambright", + "company": "Sensor Oil And Gas Inc", + "address": "905 Brooks Ave", + "city": "Holland", + "county": "Ottawa", + "state": "MI", + "zip": "49423", + "phone": "616-392-2074", + "fax": "616-392-0226", + "email": "theron@hambright.com", + "web": "http://www.theronhambright.com", + "followers": 6532 + }, + { + "firstname": "Laurie", + "lastname": "Bibbs", + "company": "Action Nursing Care Llc", + "address": "2101 Claremont Ave Ne", + "city": "Albuquerque", + "county": "Bernalillo", + "state": "NM", + "zip": "87107", + "phone": "505-881-2899", + "fax": "505-881-3771", + "email": "laurie@bibbs.com", + "web": "http://www.lauriebibbs.com", + "followers": 5422 + }, + { + "firstname": "Angelo", + "lastname": "Ferentz", + "company": "Reagan, William L Esq", + "address": "3220 E 26th St", + "city": "Los Angeles", + "county": "Los Angeles", + "state": "CA", + "zip": "90023", + "phone": "323-262-5047", + "fax": "323-262-8693", + "email": "angelo@ferentz.com", + "web": "http://www.angeloferentz.com", + "followers": 6580 + }, + { + "firstname": "Denver", + "lastname": "Topete", + "company": "Insight Cablevision", + "address": "5770 Morehouse Dr", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92121", + "phone": "858-457-3538", + "fax": "858-457-0465", + "email": "denver@topete.com", + "web": "http://www.denvertopete.com", + "followers": 6542 + }, + { + "firstname": "Tommie", + "lastname": "Reuland", + "company": "Schaff, Michael D Esq", + "address": "1166 Arroyo St", + "city": "San Fernando", + "county": "Los Angeles", + "state": "CA", + "zip": "91340", + "phone": "818-361-4035", + "fax": "818-361-7493", + "email": "tommie@reuland.com", + "web": "http://www.tommiereuland.com", + "followers": 7735 + }, + { + "firstname": "Delmer", + "lastname": "Delucas", + "company": "American Processing Co Inc", + "address": "2770 Walden Ave", + "city": "Buffalo", + "county": "Erie", + "state": "NY", + "zip": "14225", + "phone": "716-874-1439", + "fax": "716-874-3467", + "email": "delmer@delucas.com", + "web": "http://www.delmerdelucas.com", + "followers": 8605 + }, + { + "firstname": "Latisha", + "lastname": "Bahls", + "company": "Search South Inc", + "address": "3 E 4th St", + "city": "Cincinnati", + "county": "Hamilton", + "state": "OH", + "zip": "45202", + "phone": "513-784-5007", + "fax": "513-784-5275", + "email": "latisha@bahls.com", + "web": "http://www.latishabahls.com", + "followers": 8504 + }, + { + "firstname": "Simone", + "lastname": "Lundie", + "company": "Casebolt, Victor S Esq", + "address": "701 S 17th St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19146", + "phone": "215-732-9026", + "fax": "215-732-8257", + "email": "simone@lundie.com", + "web": "http://www.simonelundie.com", + "followers": 2165 + }, + { + "firstname": "Ross", + "lastname": "Spurger", + "company": "Hoover Group Inc", + "address": "710 S Illinois Ave", + "city": "Carbondale", + "county": "Jackson", + "state": "IL", + "zip": "62901", + "phone": "618-453-9968", + "fax": "618-453-6144", + "email": "ross@spurger.com", + "web": "http://www.rossspurger.com", + "followers": 361 + }, + { + "firstname": "Abel", + "lastname": "Tuter", + "company": "Wernsing Plumbing & Heating", + "address": "2457 Perkiomen Ave", + "city": "Reading", + "county": "Berks", + "state": "PA", + "zip": "19606", + "phone": "610-370-6549", + "fax": "610-370-0856", + "email": "abel@tuter.com", + "web": "http://www.abeltuter.com", + "followers": 6215 + }, + { + "firstname": "Beverley", + "lastname": "Bunche", + "company": "Nurses Organization Vets Affrs", + "address": "1727 Nw 79th Ave", + "city": "Miami", + "county": "Miami-Dade", + "state": "FL", + "zip": "33126", + "phone": "305-591-4141", + "fax": "305-591-7751", + "email": "beverley@bunche.com", + "web": "http://www.beverleybunche.com", + "followers": 8121 + }, + { + "firstname": "Lizzie", + "lastname": "Torregrossa", + "company": "Salerno & Son", + "address": "78 Faunce Corner Rd", + "city": "North Dartmouth", + "county": "Bristol", + "state": "MA", + "zip": "02747", + "phone": "508-997-1409", + "fax": "508-997-9846", + "email": "lizzie@torregrossa.com", + "web": "http://www.lizzietorregrossa.com", + "followers": 1134 + }, + { + "firstname": "Tia", + "lastname": "Neumaier", + "company": "Carterville Mini Storage", + "address": "2500 Maitland Center Pky", + "city": "Maitland", + "county": "Orange", + "state": "FL", + "zip": "32751", + "phone": "407-660-7426", + "fax": "407-660-7628", + "email": "tia@neumaier.com", + "web": "http://www.tianeumaier.com", + "followers": 3010 + }, + { + "firstname": "Lesa", + "lastname": "Chantry", + "company": "Lutz Cichy Selig & Zeronda", + "address": "1119 N Bodine St", + "city": "Philadelphia", + "county": "Philadelphia", + "state": "PA", + "zip": "19123", + "phone": "215-923-0136", + "fax": "215-923-9492", + "email": "lesa@chantry.com", + "web": "http://www.lesachantry.com", + "followers": 5201 + }, + { + "firstname": "Marcelo", + "lastname": "Arostegui", + "company": "Moraschs Quality Meats", + "address": "76 Mall Comp", + "city": "Branson", + "county": "Taney", + "state": "MO", + "zip": "65616", + "phone": "417-336-9702", + "fax": "417-336-2664", + "email": "marcelo@arostegui.com", + "web": "http://www.marceloarostegui.com", + "followers": 1195 + }, + { + "firstname": "Jimmie", + "lastname": "Hardgrove", + "company": "Prosthodontic Associates", + "address": "305 E 47th St", + "city": "New York", + "county": "New York", + "state": "NY", + "zip": "10017", + "phone": "212-980-0445", + "fax": "212-980-6914", + "email": "jimmie@hardgrove.com", + "web": "http://www.jimmiehardgrove.com", + "followers": 1002 + }, + { + "firstname": "Renae", + "lastname": "Eldrige", + "company": "Pexco Packaging Corp", + "address": "1716 Rt 77", + "city": "Attica", + "county": "Wyoming", + "state": "NY", + "zip": "14011", + "phone": "585-591-3118", + "fax": "585-591-9104", + "email": "renae@eldrige.com", + "web": "http://www.renaeeldrige.com", + "followers": 4105 + }, + { + "firstname": "Tisha", + "lastname": "Gorder", + "company": "Paroly Rampart Sec Systems", + "address": "77 W Huron St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60610", + "phone": "312-642-2897", + "fax": "312-642-2664", + "email": "tisha@gorder.com", + "web": "http://www.tishagorder.com", + "followers": 3305 + }, + { + "firstname": "Clarice", + "lastname": "Knower", + "company": "Yaffa, Andrew B Esq", + "address": "210 W 79th St", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60620", + "phone": "773-846-1489", + "fax": "773-846-1462", + "email": "clarice@knower.com", + "web": "http://www.clariceknower.com", + "followers": 5497 + }, + { + "firstname": "Sybil", + "lastname": "Marmerchant", + "company": "Blvd Distillers & Importers", + "address": "12 E 7th Ave", + "city": "York", + "county": "York", + "state": "PA", + "zip": "17404", + "phone": "717-845-4718", + "fax": "717-845-4736", + "email": "sybil@marmerchant.com", + "web": "http://www.sybilmarmerchant.com", + "followers": 4656 + }, + { + "firstname": "Floyd", + "lastname": "Veazey", + "company": "Ramada Inn San Fran Arprt N", + "address": "2877 E Florence Ave", + "city": "Huntington Park", + "county": "Los Angeles", + "state": "CA", + "zip": "90255", + "phone": "323-862-9133", + "fax": "323-862-5589", + "email": "floyd@veazey.com", + "web": "http://www.floydveazey.com", + "followers": 4208 + }, + { + "firstname": "Reyna", + "lastname": "Bangle", + "company": "C & R Contractors Inc", + "address": "85 Long Island Expy", + "city": "New Hyde Park", + "county": "Nassau", + "state": "NY", + "zip": "11040", + "phone": "516-627-8715", + "fax": "516-627-9033", + "email": "reyna@bangle.com", + "web": "http://www.reynabangle.com", + "followers": 9261 + }, + { + "firstname": "Owen", + "lastname": "Sparacino", + "company": "Murtha, Thomas D Esq", + "address": "2865 Poplar Ave", + "city": "Memphis", + "county": "Shelby", + "state": "TN", + "zip": "38111", + "phone": "901-324-9274", + "fax": "901-324-4381", + "email": "owen@sparacino.com", + "web": "http://www.owensparacino.com", + "followers": 154 + }, + { + "firstname": "Eli", + "lastname": "Bettner", + "company": "Shea, David J Esq", + "address": "825 W Main Ave", + "city": "Brewster", + "county": "Okanogan", + "state": "WA", + "zip": "98812", + "phone": "509-689-7964", + "fax": "509-689-1394", + "email": "eli@bettner.com", + "web": "http://www.elibettner.com", + "followers": 9176 + }, + { + "firstname": "Taylor", + "lastname": "Fogerty", + "company": "Khoury Factory Outlet", + "address": "2000 W 120th Ave", + "city": "Denver", + "county": "Adams", + "state": "CO", + "zip": "80234", + "phone": "303-465-0070", + "fax": "303-465-2109", + "email": "taylor@fogerty.com", + "web": "http://www.taylorfogerty.com", + "followers": 2464 + }, + { + "firstname": "Reva", + "lastname": "Lecates", + "company": "First American Rlty Assoc Inc", + "address": "829 S 14th St", + "city": "Fernandina Beach", + "county": "Nassau", + "state": "FL", + "zip": "32034", + "phone": "904-261-0604", + "fax": "904-261-2123", + "email": "reva@lecates.com", + "web": "http://www.revalecates.com", + "followers": 8963 + }, + { + "firstname": "Rodrigo", + "lastname": "Wildrick", + "company": "Midwest Wrecking Company Inc", + "address": "1201 34th St N", + "city": "Saint Petersburg", + "county": "Pinellas", + "state": "FL", + "zip": "33713", + "phone": "727-323-5060", + "fax": "727-323-5982", + "email": "rodrigo@wildrick.com", + "web": "http://www.rodrigowildrick.com", + "followers": 1406 + }, + { + "firstname": "George", + "lastname": "Tukis", + "company": "Andow Personnel Services", + "address": "8 Inn Of Americus", + "city": "Americus", + "county": "Sumter", + "state": "GA", + "zip": "31709", + "phone": "229-924-0263", + "fax": "229-924-4251", + "email": "george@tukis.com", + "web": "http://www.georgetukis.com", + "followers": 5243 + }, + { + "firstname": "Titus", + "lastname": "Rodreguez", + "company": "Vasquez & Co", + "address": "7677 Engineer Rd", + "city": "San Diego", + "county": "San Diego", + "state": "CA", + "zip": "92111", + "phone": "858-571-0819", + "fax": "858-571-9047", + "email": "titus@rodreguez.com", + "web": "http://www.titusrodreguez.com", + "followers": 9258 + }, + { + "firstname": "Emilio", + "lastname": "Lampkin", + "company": "Hendricks, Kenneth J Cpa", + "address": "834 Lois Dr", + "city": "Williamstown", + "county": "Gloucester", + "state": "NJ", + "zip": "08094", + "phone": "856-629-8933", + "fax": "856-629-3337", + "email": "emilio@lampkin.com", + "web": "http://www.emiliolampkin.com", + "followers": 5810 + }, + { + "firstname": "Maryjane", + "lastname": "Arata", + "company": "Larson, John R Esq", + "address": "342 Wolverine Way", + "city": "Sparks", + "county": "Washoe", + "state": "NV", + "zip": "89431", + "phone": "775-352-5822", + "fax": "775-352-1557", + "email": "maryjane@arata.com", + "web": "http://www.maryjanearata.com", + "followers": 5044 + }, + { + "firstname": "Marcie", + "lastname": "Shulz", + "company": "United Printers Inc", + "address": "Blanco Rd", + "city": "San Antonio", + "county": "Bexar", + "state": "TX", + "zip": "78216", + "phone": "210-524-6711", + "fax": "210-524-1693", + "email": "marcie@shulz.com", + "web": "http://www.marcieshulz.com", + "followers": 4807 + }, + { + "firstname": "Celia", + "lastname": "Slavin", + "company": "Acra Aerospace Inc", + "address": "8750 W Bryn Mawr Ave", + "city": "Chicago", + "county": "Cook", + "state": "IL", + "zip": "60631", + "phone": "773-256-3550", + "fax": "773-256-2162", + "email": "celia@slavin.com", + "web": "http://www.celiaslavin.com", + "followers": 3493 + }, + { + "firstname": "Suzette", + "lastname": "Devaughan", + "company": "Anchor Graphics Inc", + "address": "385 Prospect Ave", + "city": "Hackensack", + "county": "Bergen", + "state": "NJ", + "zip": "07601", + "phone": "201-342-8964", + "fax": "201-342-9862", + "email": "suzette@devaughan.com", + "web": "http://www.suzettedevaughan.com", + "followers": 6410 + }, + { + "firstname": "Christian", + "lastname": "Marnell", + "company": "Wenick, George D Esq", + "address": "2750 Springboro Rd", + "city": "Dayton", + "county": "Montgomery", + "state": "OH", + "zip": "45439", + "phone": "937-293-9728", + "fax": "937-293-6782", + "email": "christian@marnell.com", + "web": "http://www.christianmarnell.com", + "followers": 3071 + }, + { + "firstname": "Misty", + "lastname": "Ericksen", + "company": "Graham & Associates Inc", + "address": "1808 2nd", + "city": "Cheney", + "county": "Spokane", + "state": "WA", + "zip": "99004", + "phone": "509-235-8873", + "fax": "509-235-1856", + "email": "misty@ericksen.com", + "web": "http://www.mistyericksen.com", + "followers": 1779 + }, + { + "firstname": "Bert", + "lastname": "Schadle", + "company": "Guaranty Chevrolet Geo", + "address": "500 Sw Loop #-820", + "city": "Fort Worth", + "county": "Tarrant", + "state": "TX", + "zip": "76115", + "phone": "817-921-5560", + "fax": "817-921-5913", + "email": "bert@schadle.com", + "web": "http://www.bertschadle.com", + "followers": 9753 + }, + { + "firstname": "Bertram", + "lastname": "Quertermous", + "company": "Florida Mining & Materials", + "address": "222 Delaware Ave", + "city": "Wilmington", + "county": "New Castle", + "state": "DE", + "zip": "19801", + "phone": "302-655-8039", + "fax": "302-655-4522", + "email": "bertram@quertermous.com", + "web": "http://www.bertramquertermous.com", + "followers": 4349 + }, + { + "firstname": "Buster", + "lastname": "Wubbel", + "company": "Collins, Joseph B Esq", + "address": "315 Us Rt 1", + "city": "Fairless Hills", + "county": "Bucks", + "state": "PA", + "zip": "19030", + "phone": "215-943-3689", + "fax": "215-943-6049", + "email": "buster@wubbel.com", + "web": "http://www.busterwubbel.com", + "followers": 6911 + }, + { + "firstname": "Mildred", + "lastname": "Gallegas", + "company": "Rogers, William A Jr", + "address": "310 Ridge Rd", + "city": "Claymont", + "county": "New Castle", + "state": "DE", + "zip": "19703", + "phone": "302-792-8044", + "fax": "302-792-1282", + "email": "mildred@gallegas.com", + "web": "http://www.mildredgallegas.com", + "followers": 8618 + }, + { + "firstname": "Pat", + "lastname": "Hoshaw", + "company": "Jorgensen, James L Esq", + "address": "30 Matthews Rd", + "city": "Malvern", + "county": "Chester", + "state": "PA", + "zip": "19355", + "phone": "610-644-7836", + "fax": "610-644-3252", + "email": "pat@hoshaw.com", + "web": "http://www.pathoshaw.com", + "followers": 6030 + }, + { + "firstname": "Marshall", + "lastname": "Hutch", + "company": "Nako, Joy Y", + "address": "Route 519", + "city": "Eighty Four", + "county": "Washington", + "state": "PA", + "zip": "15330", + "phone": "724-225-1729", + "fax": "724-225-7064", + "email": "marshall@hutch.com", + "web": "http://www.marshallhutch.com", + "followers": 6225 + }, + { + "firstname": "Don", + "lastname": "Mestler", + "company": "Coldwell Bnkr Hearthside Rltrs", + "address": "State Hwy #-31", + "city": "Pennington", + "county": "Mercer", + "state": "NJ", + "zip": "08534", + "phone": "609-737-2033", + "fax": "609-737-2374", + "email": "don@mestler.com", + "web": "http://www.donmestler.com", + "followers": 6967 + }, + { + "firstname": "Emery", + "lastname": "Reek", + "company": "Metri Tech Engineering Inc", + "address": "85 S Beachview Dr", + "city": "Jekyll Island", + "county": "Glynn", + "state": "GA", + "zip": "31527", + "phone": "912-635-3866", + "fax": "912-635-4039", + "email": "emery@reek.com", + "web": "http://www.emeryreek.com", + "followers": 2476 + }, + { + "firstname": "Ray", + "lastname": "Srock", + "company": "Tilt Lock Inc", + "address": "8700 E Pinnacle Peak Rd", + "city": "Scottsdale", + "county": "Maricopa", + "state": "AZ", + "zip": "85255", + "phone": "480-585-6138", + "fax": "480-585-4983", + "email": "ray@srock.com", + "web": "http://www.raysrock.com", + "followers": 2387 + }, + { + "firstname": "Nickolas", + "lastname": "Khosravi", + "company": "Brennan, Mary V Esq", + "address": "120 Tustin Ave", + "city": "Newport Beach", + "county": "Orange", + "state": "CA", + "zip": "92663", + "phone": "949-646-6578", + "fax": "949-646-0043", + "email": "nickolas@khosravi.com", + "web": "http://www.nickolaskhosravi.com", + "followers": 3074 + }, + { + "firstname": "Aileen", + "lastname": "Mottern", + "company": "Bennett Hallmark Cards", + "address": "2053 Lemoine Ave", + "city": "Fort Lee", + "county": "Bergen", + "state": "NJ", + "zip": "07024", + "phone": "201-944-1664", + "fax": "201-944-3382", + "email": "aileen@mottern.com", + "web": "http://www.aileenmottern.com", + "followers": 6519 + }, + { + "firstname": "Chad", + "lastname": "Araiza", + "company": "Christiansen, David L Cpa", + "address": "536 Grand Ave", + "city": "Schofield", + "county": "Marathon", + "state": "WI", + "zip": "54476", + "phone": "715-359-8700", + "fax": "715-359-3579", + "email": "chad@araiza.com", + "web": "http://www.chadaraiza.com", + "followers": 4865 + }, + { + "firstname": "Beverly", + "lastname": "Cambel", + "company": "Mcintyre Mcintyre & Mcintyre", + "address": "630 W 8th Ave", + "city": "Anchorage", + "county": "Anchorage", + "state": "AK", + "zip": "99501", + "phone": "907-272-3953", + "fax": "907-272-3618", + "email": "beverly@cambel.com", + "web": "http://www.beverlycambel.com", + "followers": 2393 + }, + { + "firstname": "Janice", + "lastname": "Twiet", + "company": "Henley, Cal Pa", + "address": "136 S Riverside Ave", + "city": "Rialto", + "county": "San Bernardino", + "state": "CA", + "zip": "92376", + "phone": "909-874-6739", + "fax": "909-874-2594", + "email": "janice@twiet.com", + "web": "http://www.janicetwiet.com", + "followers": 7340 + }, + { + "firstname": "Byron", + "lastname": "Fortuna", + "company": "Jackson & Collins Pa", + "address": "700 Sw Higgins Ave", + "city": "Missoula", + "county": "Missoula", + "state": "MT", + "zip": "59803", + "phone": "406-549-8320", + "fax": "406-549-4641", + "email": "byron@fortuna.com", + "web": "http://www.byronfortuna.com", + "followers": 8913 + }, + { + "firstname": "Lynette", + "lastname": "Setlock", + "company": "George S Olive & Co", + "address": "614 W Superior Ave", + "city": "Cleveland", + "county": "Cuyahoga", + "state": "OH", + "zip": "44113", + "phone": "216-566-2265", + "fax": "216-566-2299", + "email": "lynette@setlock.com", + "web": "http://www.lynettesetlock.com", + "followers": 8002 + }, + { + "firstname": "Willard", + "lastname": "Roughen", + "company": "Nakamura Oyama & Assocs Inc", + "address": "9 5officce Product Cent", + "city": "Arlington", + "county": "Tarrant", + "state": "TX", + "zip": "76012", + "phone": "817-265-1847", + "fax": "817-265-0322", + "email": "willard@roughen.com", + "web": "http://www.willardroughen.com", + "followers": 861 + }, + { + "firstname": "Elisa", + "lastname": "Gracely", + "company": "Alexander, Christine T Esq", + "address": "1805 Kings Hwy", + "city": "Brooklyn", + "county": "Kings", + "state": "NY", + "zip": "11229", + "phone": "718-627-1421", + "fax": "718-627-9346", + "email": "elisa@gracely.com", + "web": "http://www.elisagracely.com", + "followers": 5321 + }, + { + "firstname": "Jeri", + "lastname": "Farstvedt", + "company": "Regan, Denis J Esq", + "address": "16133 Ventura Blvd #-700", + "city": "Encino", + "county": "Los Angeles", + "state": "CA", + "zip": "91436", + "phone": "818-986-8843", + "fax": "818-986-6786", + "email": "jeri@farstvedt.com", + "web": "http://www.jerifarstvedt.com", + "followers": 9529 + }, + { + "firstname": "Stacey", + "lastname": "Blow", + "company": "Schechter, Jeffrey S Esq", + "address": "136 S Riverside Ave", + "city": "Rialto", + "county": "San Bernardino", + "state": "CA", + "zip": "92376", + "phone": "909-874-0274", + "fax": "909-874-8538", + "email": "stacey@blow.com", + "web": "http://www.staceyblow.com", + "followers": 6685 + }, + { + "firstname": "Bryan", + "lastname": "Rovell", + "company": "All N All Shop", + "address": "90 Hackensack St", + "city": "East Rutherford", + "county": "Bergen", + "state": "NJ", + "zip": "07073", + "phone": "201-939-2788", + "fax": "201-939-9079", + "email": "bryan@rovell.com", + "web": "http://www.bryanrovell.com", + "followers": 2687 + }, + { + "firstname": "Joey", + "lastname": "Bolick", + "company": "Utility Trailer Sales", + "address": "7700 N Council Rd", + "city": "Oklahoma City", + "county": "Oklahoma", + "state": "OK", + "zip": "73132", + "phone": "405-728-5972", + "fax": "405-728-5244", + "email": "joey@bolick.com", + "web": "http://www.joeybolick.com", + "followers": 8465 + } +] diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb new file mode 100644 index 0000000..2039b59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/account_client.rb @@ -0,0 +1,99 @@ +require 'algolia/error' + +module Algolia + # + # A class which encapsulates the HTTPS communication with the Algolia + # API server for cross-app operations. + # + class AccountClient + class << self + # + # Copies settings, synonyms, rules and objects from the source index to the + # destination index. The replicas of the source index should not be copied. + # + # Throw an exception if the destination index already exists + # Throw an exception if the indices are on the same application + # + # @param source_index the source index object + # @param destination_index the destination index object + # @param request_options contains extra parameters to send with your query + # + def copy_index(source_index, destination_index, request_options = {}) + raise AlgoliaError.new('The indexes are in the same application. Use Algolia::Client.copy_index instead.') if source_index.client.application_id == destination_index.client.application_id + + begin + settings = destination_index.get_settings() + rescue AlgoliaError + # Destination index does not exists. We can proceed. + else + raise AlgoliaError.new("Destination index already exists. Please delete it before copying index across applications.") + end + + responses = [] + + settings = source_index.get_settings() + responses << destination_index.set_settings(settings, {}, request_options) + + synonyms = [] + source_index.export_synonyms(100, request_options) do |synonym| + synonym.delete('_highlightResult') + synonyms << synonym + end + + responses << destination_index.batch_synonyms(synonyms, false, false, request_options) + + rules = [] + source_index.export_rules(100, request_options) do |rule| + rule.delete('_highlightResult') + rules << rule + end + responses << destination_index.batch_rules(rules, false, false, request_options) + + # Copy objects + responses = [] + batch = [] + batch_size = 1000 + count = 0 + + source_index.browse do |obj| + batch << obj + count += 1 + + if count == batch_size + responses << destination_index.save_objects(batch, request_options) + batch = [] + count = 0 + end + end + + if batch.any? + responses << destination_index.save_objects(batch, request_options) + end + + responses + end + + # + # The method copy settings, synonyms, rules and objects from the source index + # to the destination index and wait end of indexing. The replicas of the + # source index should not be copied + # + # Throw an exception if the destination index already exists + # Throw an exception if the indices are on the same application + # + # @param source_index the source index object + # @param destination_index the destination index object + # @param request_options contains extra parameters to send with your query + # + def copy_index!(source_index, destination_index, request_options = {}) + responses = self.copy_index(source_index, destination_index, request_options) + + responses.each do |res| + destination_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + responses + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb new file mode 100644 index 0000000..5c9c8b2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/analytics.rb @@ -0,0 +1,75 @@ +module Algolia + + class Analytics + attr_reader :ssl, :ssl_version, :headers + API_URL='https://analytics.algolia.com' + + def initialize(client, params) + @client = client + @headers = params[:headers] + end + + def get_ab_tests(params = {}) + params = { + :offset => 0, + :limit => 10, + }.merge(params) + + perform_request(:GET, Protocol.ab_tests_uri, params) + end + + def get_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:GET, Protocol.ab_tests_uri(ab_test_id)) + end + + def add_ab_test(ab_test) + perform_request(:POST, Protocol.ab_tests_uri, {}, ab_test.to_json) + end + + def stop_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:POST, Protocol.ab_tests_stop_uri(ab_test_id)) + end + + def delete_ab_test(ab_test_id) + raise ArgumentError.new('ab_test_id cannot be empty') if ab_test_id.nil? || ab_test_id == '' + + perform_request(:DELETE, Protocol.ab_tests_uri(ab_test_id)) + end + + def wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + @client.wait_task(index_name, taskID, time_before_retry, request_options) + end + + private + + def perform_request(method, url, params = {}, data = {}) + http = HTTPClient.new + + url = API_URL + url + + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + url << "?" + Protocol.to_query(encoded_params) + + response = case method + when :GET + http.get(url, { :header => @headers }) + when :POST + http.post(url, { :body => data, :header => @headers }) + when :DELETE + http.delete(url, { :header => @headers }) + end + + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content}") + end + + JSON.parse(response.content) + end + + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb new file mode 100644 index 0000000..08c72f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/client.rb @@ -0,0 +1,1131 @@ +require 'algolia/protocol' +require 'algolia/error' +require 'algolia/version' +require 'json' +require 'zlib' +require 'openssl' +require 'base64' + +module Algolia + WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY = 100 + + # + # A class which encapsulates the HTTPS communication with the Algolia + # API server. Uses the HTTPClient library for low-level HTTP communication. + # + class Client + attr_reader :ssl, :ssl_version, :hosts, :search_hosts, :application_id, :api_key, :headers, :connect_timeout, :send_timeout, :receive_timeout, :search_timeout, :batch_timeout + + DEFAULT_CONNECT_TIMEOUT = 2 + DEFAULT_RECEIVE_TIMEOUT = 30 + DEFAULT_SEND_TIMEOUT = 30 + DEFAULT_BATCH_TIMEOUT = 120 + DEFAULT_SEARCH_TIMEOUT = 5 + DEFAULT_USER_AGENT = ["Algolia for Ruby (#{::Algolia::VERSION})", "Ruby (#{RUBY_VERSION})"] + + def initialize(data = {}) + raise ArgumentError.new('No APPLICATION_ID provided, please set :application_id') if data[:application_id].nil? + + @ssl = data[:ssl].nil? ? true : data[:ssl] + @ssl_version = data[:ssl_version].nil? ? nil : data[:ssl_version] + @gzip = data[:gzip].nil? ? true : data[:gzip] + @application_id = data[:application_id] + @api_key = data[:api_key] + @hosts = data[:hosts] || (["#{@application_id}.algolia.net"] + 1.upto(3).map { |i| "#{@application_id}-#{i}.algolianet.com" }.shuffle) + @search_hosts = data[:search_hosts] || data[:hosts] || (["#{@application_id}-dsn.algolia.net"] + 1.upto(3).map { |i| "#{@application_id}-#{i}.algolianet.com" }.shuffle) + @connect_timeout = data[:connect_timeout] || DEFAULT_CONNECT_TIMEOUT + @send_timeout = data[:send_timeout] || DEFAULT_SEND_TIMEOUT + @batch_timeout = data[:batch_timeout] || DEFAULT_BATCH_TIMEOUT + @receive_timeout = data[:receive_timeout] || DEFAULT_RECEIVE_TIMEOUT + @search_timeout = data[:search_timeout] || DEFAULT_SEARCH_TIMEOUT + @headers = { + Protocol::HEADER_API_KEY => api_key, + Protocol::HEADER_APP_ID => application_id, + 'Content-Type' => 'application/json; charset=utf-8', + 'User-Agent' => DEFAULT_USER_AGENT.push(data[:user_agent]).compact.join('; ') + } + end + + def destroy + Thread.current["algolia_search_hosts_#{application_id}"] = nil + Thread.current["algolia_hosts_#{application_id}"] = nil + Thread.current["algolia_host_index_#{application_id}"] = nil + Thread.current["algolia_search_host_index_#{application_id}"] = nil + end + + # + # Initialize a new index + # + def init_index(name) + Index.new(name, self) + end + + # + # Initialize analytics helper + # + def init_analytics() + Analytics.new(self, { :headers => @headers }) + end + + # + # Allow to set custom headers + # + def set_extra_header(key, value) + headers[key] = value + end + + # + # Allow to use IP rate limit when you have a proxy between end-user and Algolia. + # This option will set the X-Forwarded-For HTTP header with the client IP and the X-Forwarded-API-Key with the API Key having rate limits. + # + # @param admin_api_key the admin API Key you can find in your dashboard + # @param end_user_ip the end user IP (you can use both IPV4 or IPV6 syntax) + # @param rate_limit_api_key the API key on which you have a rate limit + # + def enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + headers[Protocol::HEADER_API_KEY] = admin_api_key + headers[Protocol::HEADER_FORWARDED_IP] = end_user_ip + headers[Protocol::HEADER_FORWARDED_API_KEY] = rate_limit_api_key + end + + # + # Disable IP rate limit enabled with enableRateLimitForward() function + # + def disable_rate_limit_forward + headers[Protocol::HEADER_API_KEY] = api_key + headers.delete(Protocol::HEADER_FORWARDED_IP) + headers.delete(Protocol::HEADER_FORWARDED_API_KEY) + end + + # + # Convenience method thats wraps enable_rate_limit_forward/disable_rate_limit_forward + # + def with_rate_limits(end_user_ip, rate_limit_api_key, &block) + enable_rate_limit_forward(api_key, end_user_ip, rate_limit_api_key) + begin + yield + ensure + disable_rate_limit_forward + end + end + + # + # This method allows to query multiple indexes with one API call + # + # @param queries the array of hash representing the query and associated index name + # @param options - accepts those keys: + # - index_name_key the name of the key used to fetch the index_name (:index_name by default) + # - strategy define the strategy applied on the sequential searches (none by default) + # - request_options contains extra parameters to send with your query + # + def multiple_queries(queries, options = nil, strategy = nil) + if options.is_a?(Hash) + index_name_key = options.delete(:index_name_key) || options.delete('index_name_key') + strategy = options.delete(:strategy) || options.delete('strategy') + request_options = options.delete(:request_options) || options.delete('request_options') + else + # Deprecated def multiple_queries(queries, index_name_key, strategy) + index_name_key = options + end + index_name_key ||= :index_name + strategy ||= 'none' + request_options ||= {} + + requests = { + :requests => queries.map do |query| + query = query.dup + index_name = query.delete(index_name_key) || query.delete(index_name_key.to_s) + raise ArgumentError.new("Missing '#{index_name_key}' option") if index_name.nil? + encoded_params = Hash[query.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + { :indexName => index_name, :params => Protocol.to_query(encoded_params) } + end + } + post(Protocol.multiple_queries_uri(strategy), requests.to_json, :search, request_options) + end + + # + # Get objects by objectID across multiple indexes + # + # @param requests [ + # { "indexName" => index_name_1, "objectID" => "obj1" }, + # { "indexName" => index_name_2, "objectID" => "obj2" } + # ] + # + def multiple_get_objects(requests, request_options = {}) + post(Protocol.objects_uri, {:requests => requests}.to_json, :search, request_options) + end + + # + # List all existing indexes + # return an Answer object with answer in the form + # {"items": [{ "name": "contacts", "createdAt": "2013-01-18T15:33:13.556Z"}, + # {"name": "notes", "createdAt": "2013-01-18T15:33:13.556Z"}]} + # + # @param request_options contains extra parameters to send with your query + # + def list_indexes(request_options = {}) + get(Protocol.indexes_uri, :read, request_options) + end + + # + # Move an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def move_index(src_index, dst_index, request_options = {}) + request = { 'operation' => 'move', 'destination' => dst_index } + post(Protocol.index_operation_uri(src_index), request.to_json, :write, request_options) + end + + # + # Move an existing index and wait until the move has been processed + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def move_index!(src_index, dst_index, request_options = {}) + res = move_index(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def copy_index(src_index, dst_index, scope = nil, request_options = {}) + request = { 'operation' => 'copy', 'destination' => dst_index } + request['scope'] = scope unless scope.nil? + post(Protocol.index_operation_uri(src_index), request.to_json, :write, request_options) + end + + # + # Copy an existing index and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def copy_index!(src_index, dst_index, scope = nil, request_options = {}) + res = copy_index(src_index, dst_index, scope, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index settings. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's settings (destination's settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_settings(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['settings'], request_options) + end + + # + # Copy an existing index settings and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_settings!(src_index, dst_index, request_options = {}) + res = copy_settings(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index synonyms. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's synonyms (destination's synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_synonyms(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['synonyms'], request_options) + end + + # + # Copy an existing index synonyms and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_synonyms!(src_index, dst_index, request_options = {}) + res = copy_synonyms(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Copy an existing index rules. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName's rules (destination's rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_rules(src_index, dst_index, request_options = {}) + copy_index(src_index, dst_index, ['rules'], request_options) + end + + # + # Copy an existing index rules and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of srcIndexName rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def copy_rules!(src_index, dst_index, request_options = {}) + res = copy_rules(src_index, dst_index, request_options) + wait_task(dst_index, res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete an index + # @param name the name of the index to delete + # @param request_options contains extra parameters to send with your query + # + def delete_index(name, request_options = {}) + init_index(name).delete(request_options) + end + + # + # Delete an index and wait until the deletion has been processed. + # @param name the name of the index to delete + # @param request_options contains extra parameters to send with your query + # + def delete_index!(name, request_options = {}) + init_index(name).delete!(request_options) + end + + # + # Return last logs entries. + # + # @param options - accepts those keys: + # - offset Specify the first entry to retrieve (0-based, 0 is the most recent log entry) - Default = 0 + # - length Specify the maximum number of entries to retrieve starting at offset. Maximum allowed value: 1000 - Default = 10 + # - type Type of log entries to retrieve ("all", "query", "build" or "error") - Default = 'all' + # - request_options contains extra parameters to send with your query + # + def get_logs(options = nil, length = nil, type = nil) + if options.is_a?(Hash) + offset = options.delete('offset') || options.delete(:offset) + length = options.delete('length') || options.delete(:length) + type = options.delete('type') || options.delete(:type) + request_options = options.delete('request_options') || options.delete(:request_options) + else + # Deprecated def get_logs(offset, length, type) + offset = options + end + length ||= 10 + type = 'all' if type.nil? + type = type ? 'error' : 'all' if type.is_a?(true.class) + request_options ||= {} + + get(Protocol.logs(offset, length, type), :write, request_options) + end + + # + # List all existing user keys with their associated ACLs + # + # @param request_options contains extra parameters to send with your query + # + def list_api_keys(request_options = {}) + get(Protocol.keys_uri, :read, request_options) + end + + # + # Get ACL of a user key + # + # @param request_options contains extra parameters to send with your query + # + def get_api_key(key, request_options = {}) + get(Protocol.key_uri(key), :read, request_options) + end + + # + # Create a new user key + # + # Deprecated call was add_api_key(acl, validity, maxQueriesPerIPPerHour, maxHitsPerQuery, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def add_api_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + validity = 0 + unless request_options.is_a?(Hash) + validity = request_options + request_options = {} + end + + params[:indexes] = indexes if indexes + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + post(Protocol.keys_uri, params.to_json, :write, request_options) + end + + # + # Update a user key + # + # Deprecated call was update_api_key(key, acl, validity, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param key API Key to update + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def update_api_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + validity = 0 + unless request_options.is_a?(Hash) + validity = request_options + request_options = {} + end + + params[:indexes] = indexes if indexes + params['validity'] = validity.to_i if validity != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + + put(Protocol.key_uri(key), params.to_json, :write, request_options) + end + + # + # Delete an existing user key + # + def delete_api_key(key, request_options = {}) + delete(Protocol.key_uri(key), :write, request_options) + end + + # + # Restore a deleted api key + # + def restore_api_key(key, request_options = {}) + post(Protocol.restore_key_uri(key), :write, request_options) + end + + # + # Send a batch request targeting multiple indices + # + def batch(operations, request_options = {}) + post(Protocol.batch_uri, { 'requests' => operations }.to_json, :batch, request_options) + end + + # + # Send a batch request targeting multiple indices and wait the end of the indexing + # + def batch!(operations, request_options = {}) + res = batch(operations, request_options) + res['taskID'].each do |index, taskID| + wait_task(index, taskID, WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + end + + # + # Check the status of a task on the server. + # All server task are asynchronous and you can check the status of a task with this method. + # + # @param index_name the index name owning the taskID + # @param taskID the id of the task returned by server + # @param request_options contains extra parameters to send with your query + # + def get_task_status(index_name, taskID, request_options = {}) + get(Protocol.task_uri(index_name, taskID), :read, request_options)['status'] + end + + # + # Wait the publication of a task on the server. + # All server task are asynchronous and you can check with this method that the task is published. + # + # @param index_name the index name owning the taskID + # @param taskID the id of the task returned by server + # @param time_before_retry the time in milliseconds before retry (default = 100ms) + # @param request_options contains extra parameters to send with your query + # + def wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + loop do + status = get_task_status(index_name, taskID, request_options) + if status == 'published' + return + end + sleep(time_before_retry.to_f / 1000) + end + end + + def get_personalization_strategy(request_options = {}) + get(Protocol.personalization_strategy_uri, :read, request_options) + end + + def set_personalization_strategy(strategy, request_options = {}) + post(Protocol.personalization_strategy_uri, strategy.to_json, :write, request_options) + end + + # + # Multicluster management + # + def list_clusters(request_options = {}) + get(Protocol.clusters_uri, :read, request_options) + end + + def list_user_ids(page = 0, hits_per_page = 20, request_options = {}) + get(Protocol.list_ids_uri(page, hits_per_page), :read, request_options) + end + + def get_top_user_ids(request_options = {}) + get(Protocol.cluster_top_user_uri, :read, request_options) + end + + def assign_user_id(user_id, cluster_name, request_options = {}) + request_options = add_header_to_request_options(request_options, { :'X-Algolia-User-ID' => user_id}) + + body = { :cluster => cluster_name } + post(Protocol.cluster_mapping_uri, body.to_json, :write, request_options) + end + + def get_user_id(user_id, request_options = {}) + get(Protocol.cluster_mapping_uri(user_id), :read, request_options) + end + + def remove_user_id(user_id, request_options = {}) + request_options = add_header_to_request_options(request_options, { :'X-Algolia-User-ID' => user_id}) + + delete(Protocol.cluster_mapping_uri, :write, request_options) + end + + def search_user_id(query, cluster_name = nil, page = nil, hits_per_page = nil, request_options = {}) + body = { :query => query } + body[:cluster] = cluster_name unless cluster_name.nil? + body[:page] = page unless page.nil? + body[:hitsPerPage] = hits_per_page unless hits_per_page.nil? + post(Protocol.search_user_id_uri, body.to_json, :read, request_options) + end + + # Perform an HTTP request for the given uri and method + # with common basic response handling. Will raise a + # AlgoliaProtocolError if the response has an error status code, + # and will return the parsed JSON body on success, if there is one. + # + def request(uri, method, data = nil, type = :write, request_options = {}) + exceptions = [] + + connect_timeout = @connect_timeout + send_timeout = if type == :search + @search_timeout + elsif type == :batch + type = :write + @batch_timeout + else + @send_timeout + end + receive_timeout = type == :search ? @search_timeout : @receive_timeout + + thread_local_hosts(type != :write).each_with_index do |host, i| + connect_timeout += 2 if i == 2 + send_timeout += 10 if i == 2 + receive_timeout += 10 if i == 2 + + thread_index_key = type != :write ? "algolia_search_host_index_#{application_id}" : "algolia_host_index_#{application_id}" + Thread.current[thread_index_key] = host[:index] + host[:last_call] = Time.now.to_i + + host[:session].connect_timeout = connect_timeout + host[:session].send_timeout = send_timeout + host[:session].receive_timeout = receive_timeout + begin + return perform_request(host[:session], host[:base_url] + uri, method, data, request_options) + rescue AlgoliaProtocolError => e + raise if e.code / 100 == 4 + exceptions << e + rescue => e + exceptions << e + end + host[:session].reset_all + end + raise AlgoliaProtocolError.new(0, "Cannot reach any host: #{exceptions.map { |e| e.to_s }.join(', ')}") + end + + def get(uri, type = :write, request_options = {}) + request(uri, :GET, nil, type, request_options) + end + + def post(uri, body = {}, type = :write, request_options = {}) + request(uri, :POST, body, type, request_options) + end + + def put(uri, body = {}, type = :write, request_options = {}) + request(uri, :PUT, body, type, request_options) + end + + def delete(uri, type = :write, request_options = {}) + request(uri, :DELETE, nil, type, request_options) + end + + private + + # + # This method returns a thread-local array of sessions + # + def thread_local_hosts(read) + thread_hosts_key = read ? "algolia_search_hosts_#{application_id}" : "algolia_hosts_#{application_id}" + Thread.current[thread_hosts_key] ||= (read ? search_hosts : hosts).each_with_index.map do |host, i| + client = HTTPClient.new + client.ssl_config.ssl_version = @ssl_version if @ssl && @ssl_version + client.transparent_gzip_decompression = @gzip + client.ssl_config.add_trust_ca File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'resources', 'ca-bundle.crt')) + { + :index => i, + :base_url => "http#{@ssl ? 's' : ''}://#{host}", + :session => client, + :last_call => nil + } + end + hosts = Thread.current[thread_hosts_key] + thread_index_key = read ? "algolia_search_host_index_#{application_id}" : "algolia_host_index_#{application_id}" + current_host = Thread.current[thread_index_key].to_i # `to_i` to ensure first call is 0 + # we want to always target host 0 first + # if the current host is not 0, then we want to use it first only if (we never used it OR we're using it since less than 1 minute) + if current_host != 0 && (hosts[current_host][:last_call].nil? || hosts[current_host][:last_call] > Time.now.to_i - 60) + # first host will be `current_host` + first = hosts[current_host] + [first] + hosts.reject { |h| h[:index] == 0 || h == first } + hosts.select { |h| h[:index] == 0 } + else + # first host will be `0` + hosts + end + end + + def perform_request(session, url, method, data, request_options) + hs = {} + extra_headers = request_options[:headers] || request_options['headers'] || {} + @headers.each { |key, val| hs[key.to_s] = val } + extra_headers.each { |key, val| hs[key.to_s] = val } + response = case method + when :GET + session.get(url, { :header => hs }) + when :POST + session.post(url, { :body => data, :header => hs }) + when :PUT + session.put(url, { :body => data, :header => hs }) + when :DELETE + session.delete(url, { :header => hs }) + end + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content} (#{response.code})") + end + return JSON.parse(response.content) + end + + def add_header_to_request_options(request_options, headers_to_add) + if !request_options['headers'].is_a?(Hash) + if request_options[:headers].is_a?(Hash) + request_options['headers'] = request_options[:headers] + request_options.delete(:headers) + else + request_options['headers'] = {} + end + end + + request_options['headers'].merge!(headers_to_add) + request_options + end + + # Deprecated + alias_method :list_user_keys, :list_api_keys + alias_method :get_user_key, :get_api_key + alias_method :add_user_key, :add_api_key + alias_method :update_user_key, :update_api_key + alias_method :delete_user_key, :delete_api_key + end + + # Module methods + # ------------------------------------------------------------ + + # A singleton client + # Always use Algolia.client to retrieve the client object. + @@client = nil + + # + # Initialize the singleton instance of Client which is used by all API methods + # + def Algolia.init(options = {}) + application_id = ENV['ALGOLIA_APP_ID'] || ENV['ALGOLIA_API_ID'] || ENV['ALGOLIA_APPLICATION_ID'] + api_key = ENV['ALGOLIA_REST_API_KEY'] || ENV['ALGOLIA_API_KEY'] + + defaulted = { :application_id => application_id, :api_key => api_key } + defaulted.merge!(options) + + @@client = Client.new(defaulted) + end + + # + # Allow to set custom headers + # + def Algolia.set_extra_header(key, value) + Algolia.client.set_extra_header(key, value) + end + + # + # Allow to use IP rate limit when you have a proxy between end-user and Algolia. + # This option will set the X-Forwarded-For HTTP header with the client IP and the + # X-Forwarded-API-Key with the API Key having rate limits. + # + # @param admin_api_key the admin API Key you can find in your dashboard + # @param end_user_ip the end user IP (you can use both IPV4 or IPV6 syntax) + # @param rate_limit_api_key the API key on which you have a rate limit + # + def Algolia.enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + Algolia.client.enable_rate_limit_forward(admin_api_key, end_user_ip, rate_limit_api_key) + end + + # + # Disable IP rate limit enabled with enableRateLimitForward() function + # + def Algolia.disable_rate_limit_forward + Algolia.client.disable_rate_limit_forward + end + + # + # Convenience method thats wraps enable_rate_limit_forward/disable_rate_limit_forward + # + def Algolia.with_rate_limits(end_user_ip, rate_limit_api_key, &block) + Algolia.client.with_rate_limits(end_user_ip, rate_limit_api_key, &block) + end + + # + # Generate a secured and public API Key from a list of tagFilters and an + # optional user token identifying the current user + # + # @param private_api_key your private API Key + # @param tag_filters the list of tags applied to the query (used as security) + # @param user_token an optional token identifying the current user + # + def Algolia.generate_secured_api_key(private_api_key, tag_filters_or_params, user_token = nil) + if tag_filters_or_params.is_a?(Hash) && user_token.nil? + encoded_params = Hash[tag_filters_or_params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + query_str = Protocol.to_query(encoded_params) + hmac = OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), private_api_key, query_str) + Base64.encode64("#{hmac}#{query_str}").gsub("\n", '') + else + tag_filters = if tag_filters_or_params.is_a?(Array) + tag_filters = tag_filters_or_params.map { |t| t.is_a?(Array) ? "(#{t.join(',')})" : t }.join(',') + else + tag_filters_or_params + end + raise ArgumentError.new('Attribute "tag_filters" must be a list of tags') if !tag_filters.is_a?(String) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), private_api_key, "#{tag_filters}#{user_token.to_s}") + end + end + + # + # Returns the remaining validity time for the given API key in seconds + # + # @param [String] secured_api_key the secured API key to check + # + # @return [Integer] remaining validity in seconds + # + def Algolia.get_secured_api_key_remaining_validity(secured_api_key) + now = Time.now.to_i + decoded_key = Base64.decode64(secured_api_key) + regex = 'validUntil=(\d+)' + matches = decoded_key.match(regex) + + if matches === nil + raise ValidUntilNotFoundError.new('The SecuredAPIKey doesn\'t have a validUntil parameter.') + end + + valid_until = matches[1].to_i + + valid_until - now + end + + # + # This method allows to query multiple indexes with one API call + # + def Algolia.multiple_queries(queries, options = nil, strategy = nil) + Algolia.client.multiple_queries(queries, options, strategy) + end + + # + # This method allows to get objects (records) via objectID across + # multiple indexes with one API call + # + def Algolia.multiple_get_objects(requests, request_options = {}) + Algolia.client.multiple_get_objects(requests, request_options) + end + + # + # List all existing indexes + # return an Answer object with answer in the form + # {"items": [{ "name": "contacts", "createdAt": "2013-01-18T15:33:13.556Z"}, + # {"name": "notes", "createdAt": "2013-01-18T15:33:13.556Z"}]} + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.list_indexes(request_options = {}) + Algolia.client.list_indexes(request_options) + end + + # + # Move an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.move_index(src_index, dst_index, request_options = {}) + Algolia.client.move_index(src_index, dst_index, request_options) + end + + # + # Move an existing index and wait until the move has been processed + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.move_index!(src_index, dst_index, request_options = {}) + Algolia.client.move_index!(src_index, dst_index, request_options) + end + + # + # Copy an existing index. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_index(src_index, dst_index, scope = nil, request_options = {}) + Algolia.client.copy_index(src_index, dst_index, scope, request_options) + end + + # + # Copy an existing index and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name (destination will be overriten if it already exist). + # @param scope the optional list of scopes to copy (all if not specified). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_index!(src_index, dst_index, scope = nil, request_options = {}) + Algolia.client.copy_index!(src_index, dst_index, scope, request_options) + end + + # + # Copy an existing index settings. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_settings(src_index, dst_index, request_options = {}) + Algolia.client.copy_settings(src_index, dst_index, request_options) + end + + # + # Copy an existing index settings and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name settings (destination settings will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_settings!(src_index, dst_index, request_options = {}) + Algolia.client.copy_settings!(src_index, dst_index, request_options) + end + + # + # Copy an existing index synonyms. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_synonyms(src_index, dst_index, request_options = {}) + Algolia.client.copy_synonyms(src_index, dst_index, request_options) + end + + # + # Copy an existing index synonyms and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name synonyms (destination synonyms will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_synonyms!(src_index, dst_index, request_options = {}) + Algolia.client.copy_synonyms!(src_index, dst_index, request_options) + end + + # + # Copy an existing index rules. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_rules(src_index, dst_index, request_options = {}) + Algolia.client.copy_rules(src_index, dst_index, request_options) + end + + # + # Copy an existing index rules and wait until the copy has been processed. + # + # @param src_index the name of index to copy. + # @param dst_index the new index name that will contains a copy of src_index_name rules (destination rules will be overriten if it already exist). + # @param request_options contains extra parameters to send with your query + # + def Algolia.copy_rules!(src_index, dst_index, request_options = {}) + Algolia.client.copy_rules!(src_index, dst_index, request_options) + end + + # + # Delete an index + # + def Algolia.delete_index(name, request_options = {}) + Algolia.client.delete_index(name, request_options) + end + + # + # Delete an index and wait until the deletion has been processed. + # + def Algolia.delete_index!(name, request_options = {}) + Algolia.client.delete_index!(name, request_options) + end + + # + # Return last logs entries. + # + # @param offset Specify the first entry to retrieve (0-based, 0 is the most recent log entry). + # @param length Specify the maximum number of entries to retrieve starting at offset. Maximum allowed value: 1000. + # @param type Specify the type of entries you want to retrieve - default: "all" + # @param request_options contains extra parameters to send with your query + # + def Algolia.get_logs(options = nil, length = nil, type = nil) + Algolia.client.get_logs(options, length, type) + end + + # + # List all existing user keys with their associated ACLs + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.list_api_keys(request_options = {}) + Algolia.client.list_api_keys(request_options) + end + + # + # Deprecated + # + def Algolia.list_user_keys(request_options = {}) + Algolia.client.list_api_keys(request_options) + end + + # + # Get ACL of a user key + # + # @param request_options contains extra parameters to send with your query + # + def Algolia.get_api_key(key, request_options = {}) + Algolia.client.get_api_key(key, request_options) + end + + # + # Deprecated + # + def Algolia.get_user_key(key, request_options = {}) + Algolia.client.get_user_key(key, request_options) + end + + # + # Create a new user key + # + # Deprecated call was add_api_key(acl, validity, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a NSDictionary that + # can contains the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - max_hits_per_query: integer + # - queryParameters: string + # - max_queries_per_IP_per_hour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def Algolia.add_api_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.add_api_key(object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Deprecated + # + def Algolia.add_user_key(object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.add_api_key(object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Update a user key + # + # Deprecated call was update_api_key(key, acl, validity, maxQueriesPerIPPerHour, max_hits_per_query, indexes) + # + # ACL can contain an array with those strings: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # + # @param key API Key to update + # @param object The list of parameters for this key. + # Defined by a Hash that can contain the following values: + # - acl: array of string + # - indexes: array of string + # - validity: int + # - referers: array of string + # - description: string + # - max_hits_per_query: integer + # - queryParameters: string + # - max_queries_per_IP_per_hour: integer + # @param request_options contains extra parameters to send with your query - Default = {} + # + def Algolia.update_api_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.update_api_key(key, object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Deprecated + # + def Algolia.update_user_key(key, object, request_options = {}, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, indexes = nil) + Algolia.client.update_api_key(key, object, request_options, max_queries_per_IP_per_hour, max_hits_per_query, indexes) + end + + # + # Delete an existing user key + # + def Algolia.delete_api_key(key, request_options = {}) + Algolia.client.delete_api_key(key, request_options) + end + + # + # Restore an existing api key + # + def Algolia.restore_api_key(key, request_options = {}) + Algolia.client.restore_api_key(key, request_options) + end + + # + # Deprecated + # + def Algolia.delete_user_key(key, request_options = {}) + Algolia.client.delete_api_key(key, request_options) + end + + # + # Send a batch request targeting multiple indices + # + def Algolia.batch(requests, request_options = {}) + Algolia.client.batch(requests, request_options) + end + + # + # Send a batch request targeting multiple indices and wait the end of the indexing + # + def Algolia.batch!(requests, request_options = {}) + Algolia.client.batch!(requests, request_options) + end + + # + # Wait until task is completed by the engine + # + def Algolia.wait_task(index_name, taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + Algolia.client.wait_task(index_name, taskID, time_before_retry, request_options) + end + + def Algolia.get_task_status(index_name, taskID, request_options = {}) + Algolia.client.get_task_status(index_name, taskID, request_options = {}) + end + # + # Used mostly for testing. Lets you delete the api key global vars. + # + def Algolia.destroy + @@client.destroy unless @@client.nil? + @@client = nil + self + end + + def Algolia.client + if !@@client + raise AlgoliaError, 'API not initialized' + end + @@client + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb new file mode 100644 index 0000000..4b76b7b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/error.rb @@ -0,0 +1,31 @@ +module Algolia + + # Base exception class for errors thrown by the Algolia + # client library. AlgoliaError will be raised by any + # network operation if Algolia.init() has not been called. + class AlgoliaError < StandardError #Exception ... why? A:http://www.skorks.com/2009/09/ruby-exceptions-and-exception-handling/ + end + + # An exception class raised when the REST API returns an error. + # The error code and message will be parsed out of the HTTP response, + # which is also included in the response attribute. + class AlgoliaProtocolError < AlgoliaError + attr_accessor :code + attr_accessor :message + + def initialize(code, message) + self.code = code + self.message = message + super("#{self.code}: #{self.message}") + end + end + + # An exception class raised when the given object was not found. + class AlgoliaObjectNotFoundError < AlgoliaError + end + + # An exception class raised when the validUntil parameter is not found + class ValidUntilNotFoundError < AlgoliaError + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb new file mode 100644 index 0000000..dbc64d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/index.rb @@ -0,0 +1,1355 @@ +require 'algolia/client' +require 'algolia/error' + +module Algolia + + class Index + attr_accessor :name, :client + + def initialize(name, client = nil) + self.name = name + self.client = client || Algolia.client + end + + # + # Delete an index + # + # @param request_options contains extra parameters to send with your query + # + # return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" } + # + def delete(request_options = {}) + client.delete(Protocol.index_uri(name), :write, request_options) + end + alias_method :delete_index, :delete + + # + # Delete an index and wait until the deletion has been processed + # + # @param request_options contains extra parameters to send with your query + # + # return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" } + # + def delete!(request_options = {}) + res = delete(request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + alias_method :delete_index!, :delete! + + # + # Add an object in this index + # + # @param object the object to add to the index. + # The object is represented by an associative array + # @param objectID (optional) an objectID you want to attribute to this object + # (if the attribute already exist the old object will be overridden) + # @param request_options contains extra parameters to send with your query + # + def add_object(object, objectID = nil, request_options = {}) + check_object(object) + if objectID.nil? || objectID.to_s.empty? + client.post(Protocol.index_uri(name), object.to_json, :write, request_options) + else + client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options) + end + end + + # + # Add an object in this index and wait end of indexing + # + # @param object the object to add to the index. + # The object is represented by an associative array + # @param objectID (optional) an objectID you want to attribute to this object + # (if the attribute already exist the old object will be overridden) + # @param Request options object. Contains extra URL parameters or headers + # + def add_object!(object, objectID = nil, request_options = {}) + res = add_object(object, objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Add several objects in this index + # + # @param objects the array of objects to add inside the index. + # Each object is represented by an associative array + # @param request_options contains extra parameters to send with your query + # + def add_objects(objects, request_options = {}) + batch(build_batch('addObject', objects, false), request_options) + end + + # + # Add several objects in this index and wait end of indexing + # + # @param objects the array of objects to add inside the index. + # Each object is represented by an associative array + # @param request_options contains extra parameters to send with your query + # + def add_objects!(objects, request_options = {}) + res = add_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Search inside the index + # + # @param query the full text query + # @param args (optional) if set, contains an associative array with query parameters: + # - page: (integer) Pagination parameter used to select the page to retrieve. + # Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9 + # - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20. + # - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size). + # Attributes are separated with a comma (for example "name,address"). + # You can also use a string array encoding (for example ["name","address"]). + # By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index. + # - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query. + # Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]). + # If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted. + # You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted. + # A matchLevel is returned for each highlighted attribute and can contain: + # - full: if all the query terms were found in the attribute, + # - partial: if only some of the query terms were found, + # - none: if none of the query terms were found. + # - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`). + # Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10). + # You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed. + # - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3. + # - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7. + # - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute. + # - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma). + # For example aroundLatLng=47.316669,5.016670). + # You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision + # (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter). + # At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}}) + # - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng). + # For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201). + # At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}}) + # - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma. + # The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`. + # You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000. + # You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]). + # - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas. + # To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3). + # You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3). + # At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}). + # - facetFilters: filter the query by a list of facets. + # Facets are separated by commas and each facet is encoded as `attributeName:value`. + # For example: `facetFilters=category:Book,author:John%20Doe`. + # You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`). + # - facets: List of object attributes that you want to use for faceting. + # Attributes are separated with a comma (for example `"category,author"` ). + # You can also use a JSON string array encoding (for example ["category","author"]). + # Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter. + # You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**. + # - queryType: select how the query words are interpreted, it can be one of the following value: + # - prefixAll: all query words are interpreted as prefixes, + # - prefixLast: only the last word is interpreted as a prefix (default behavior), + # - prefixNone: no query word is interpreted as a prefix. This option is not recommended. + # - optionalWords: a string that contains the list of words that should be considered as optional when found in the query. + # The list of words is comma separated. + # - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set. + # This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter, + # all hits containing a duplicate value for the attributeForDistinct attribute are removed from results. + # For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best + # one is kept and others are removed. + # @param request_options contains extra parameters to send with your query + # + def search(query, params = {}, request_options = {}) + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + encoded_params[:query] = query + client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options) + end + + class IndexBrowser + def initialize(client, name, params) + @client = client + @name = name + @params = params + @cursor = params[:cursor] || params['cursor'] || nil + end + + def browse(request_options = {}, &block) + loop do + answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options) + answer['hits'].each do |hit| + if block.arity == 2 + yield hit, @cursor + else + yield hit + end + end + @cursor = answer['cursor'] + break if @cursor.nil? + end + end + end + + # + # Browse all index content + # + # @param queryParameters The hash of query parameters to use to browse + # To browse from a specific cursor, just add a ":cursor" parameters + # @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first) + # @param request_options contains extra parameters to send with your query + # + # @DEPRECATED: + # @param page Pagination parameter used to select the page to retrieve. + # @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000. + # + def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block) + params = {} + if page_or_query_parameters.is_a?(Hash) + params.merge!(page_or_query_parameters) + else + params[:page] = page_or_query_parameters unless page_or_query_parameters.nil? + end + if hits_per_page.is_a?(Hash) + params.merge!(hits_per_page) + else + params[:hitsPerPage] = hits_per_page unless hits_per_page.nil? + end + + if block_given? + IndexBrowser.new(client, name, params).browse(request_options, &block) + else + params[:page] ||= 0 + params[:hitsPerPage] ||= 1000 + client.get(Protocol.browse_uri(name, params), :read, request_options) + end + end + + # + # Browse a single page from a specific cursor + # + # @param request_options contains extra parameters to send with your query + # + def browse_from(cursor, hits_per_page = 1000, request_options = {}) + client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options) + end + + # + # Get an object from this index + # + # @param objectID the unique identifier of the object to retrieve + # @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by "," + # @param request_options contains extra parameters to send with your query + # + def get_object(objectID, attributes_to_retrieve = nil, request_options = {}) + attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) + if attributes_to_retrieve.nil? + client.get(Protocol.object_uri(name, objectID, nil), :read, request_options) + else + client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options) + end + end + + # + # Get a list of objects from this index + # + # @param objectIDs the array of unique identifier of the objects to retrieve + # @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by "," + # @param request_options contains extra parameters to send with your query + # + def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {}) + attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) + requests = objectIDs.map do |objectID| + req = { :indexName => name, :objectID => objectID.to_s } + req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil? + req + end + client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results'] + end + + # + # Find object by the given condition. + # + # Options can be passed in request_options body: + # - query (string): pass a query + # - paginate (bool): choose if you want to iterate through all the + # documents (true) or only the first page (false). Default is true. + # The function takes a block to filter the results from search query + # Usage example: + # index.find_object({'query' => '', 'paginate' => true}) {|obj| obj.key?('company') and obj['company'] == 'Apple'} + # + # @param request_options contains extra parameters to send with your query + # + # @return [Hash] the matching object and its position in the result set + # + def find_object(request_options = {}) + paginate = true + page = 0 + + query = request_options[:query] || request_options['query'] || '' + request_options.delete(:query) + request_options.delete('query') + + if request_options.has_key? :paginate + paginate = request_options[:paginate] + end + + if request_options.has_key? 'paginate' + paginate = request_options['paginate'] + end + + request_options.delete(:paginate) + request_options.delete('paginate') + + while true + request_options['page'] = page + res = search(query, request_options) + + res['hits'].each_with_index do |hit, i| + if yield(hit) + return { + 'object' => hit, + 'position' => i, + 'page' => page, + } + end + end if block_given? + + has_next_page = page + 1 < res['nbPages'] + if !paginate || !has_next_page + raise AlgoliaObjectNotFoundError.new('Object not found') + end + + page += 1 + end + end + + # + # Retrieve the given object position in a set of results. + # + # @param [Array] objects the result set to browse + # @param [String] object_id the object to look for + # + # @return [Integer] position of the object, or -1 if it's not in the array + # + def self.get_object_position(objects, object_id) + objects['hits'].find_index { |hit| hit['objectID'] == object_id } || -1 + end + + # + # Check the status of a task on the server. + # All server task are asynchronous and you can check the status of a task with this method. + # + # @param taskID the id of the task returned by server + # @param request_options contains extra parameters to send with your query + # + def get_task_status(taskID, request_options = {}) + client.get_task_status(name, taskID, request_options) + end + + # + # Wait the publication of a task on the server. + # All server task are asynchronous and you can check with this method that the task is published. + # + # @param taskID the id of the task returned by server + # @param time_before_retry the time in milliseconds before retry (default = 100ms) + # @param request_options contains extra parameters to send with your query + # + def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {}) + client.wait_task(name, taskID, time_before_retry, request_options) + end + + # + # Override the content of an object + # + # @param object the object to save + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_object(object, objectID = nil, request_options = {}) + client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options) + end + + # + # Override the content of object and wait end of indexing + # + # @param object the object to save + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_object!(object, objectID = nil, request_options = {}) + res = save_object(object, objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Override the content of several objects + # + # @param objects the array of objects to save, each object must contain an 'objectID' key + # @param request_options contains extra parameters to send with your query + # + def save_objects(objects, request_options = {}) + batch(build_batch('updateObject', objects, true), request_options) + end + + # + # Override the content of several objects and wait end of indexing + # + # @param objects the array of objects to save, each object must contain an objectID attribute + # @param request_options contains extra parameters to send with your query + # + def save_objects!(objects, request_options = {}) + res = save_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Override the current objects by the given array of objects and wait end of indexing. Settings, + # synonyms and query rules are untouched. The objects are replaced without any downtime. + # + # @param objects the array of objects to save + # @param request_options contains extra parameters to send with your query + # + def replace_all_objects(objects, request_options = {}) + safe = request_options[:safe] || request_options['safe'] || false + request_options.delete(:safe) + request_options.delete('safe') + + tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s) + + responses = [] + + scope = ['settings', 'synonyms', 'rules'] + res = @client.copy_index(@name, tmp_index.name, scope, request_options) + responses << res + + if safe + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + batch = [] + batch_size = 1000 + count = 0 + + objects.each do |object| + batch << object + count += 1 + if count == batch_size + res = tmp_index.add_objects(batch, request_options) + responses << res + batch = [] + count = 0 + end + end + + if batch.any? + res = tmp_index.add_objects(batch, request_options) + responses << res + end + + if safe + responses.each do |res| + tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + end + + res = @client.move_index(tmp_index.name, @name, request_options) + responses << res + + if safe + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + end + + responses + end + + # + # Override the current objects by the given array of objects and wait end of indexing + # + # @param objects the array of objects to save + # @param request_options contains extra parameters to send with your query + # + def replace_all_objects!(objects, request_options = {}) + replace_all_objects(objects, request_options.merge(:safe => true)) + end + + # + # Update partially an object (only update attributes passed in argument) + # + # @param object the object attributes to override + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {}) + client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options) + end + + # + # Partially override the content of several objects + # + # @param objects an array of objects to update (each object must contains a objectID attribute) + # @param create_if_not_exits a boolean, if true create the objects if they don't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_objects(objects, create_if_not_exits = true, request_options = {}) + if create_if_not_exits + batch(build_batch('partialUpdateObject', objects, true), request_options) + else + batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options) + end + end + + # + # Partially override the content of several objects and wait end of indexing + # + # @param objects an array of objects to update (each object must contains a objectID attribute) + # @param create_if_not_exits a boolean, if true create the objects if they don't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_objects!(objects, create_if_not_exits = true, request_options = {}) + res = partial_update_objects(objects, create_if_not_exits, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Update partially an object (only update attributes passed in argument) and wait indexing + # + # @param object the attributes to override + # @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key + # @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist + # @param request_options contains extra parameters to send with your query + # + def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {}) + res = partial_update_object(object, objectID, create_if_not_exits, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete an object from the index + # + # @param objectID the unique identifier of object to delete + # @param request_options contains extra parameters to send with your query + # + def delete_object(objectID, request_options = {}) + raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' + client.delete(Protocol.object_uri(name, objectID), :write, request_options) + end + + # + # Delete an object from the index and wait end of indexing + # + # @param objectID the unique identifier of object to delete + # @param request_options contains extra parameters to send with your query + # + def delete_object!(objectID, request_options = {}) + res = delete_object(objectID, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete several objects + # + # @param objects an array of objectIDs + # @param request_options contains extra parameters to send with your query + # + def delete_objects(objects, request_options = {}) + check_array(objects) + batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options) + end + + # + # Delete several objects and wait end of indexing + # + # @param objects an array of objectIDs + # @param request_options contains extra parameters to send with your query + # + def delete_objects!(objects, request_options = {}) + res = delete_objects(objects, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Delete all objects matching a query + # This method retrieves all objects synchronously but deletes in batch + # asynchronously + # + # @param query the query string + # @param params the optional query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by_query(query, params = nil, request_options = {}) + raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil? + params = sanitized_delete_by_query_params(params) + + params[:query] = query + params[:hitsPerPage] = 1000 + params[:distinct] = false + params[:attributesToRetrieve] = ['objectID'] + params[:cursor] = '' + ids = [] + + while params[:cursor] != nil + result = browse(params, nil, request_options) + + params[:cursor] = result['cursor'] + + hits = result['hits'] + break if hits.empty? + + ids += hits.map { |hit| hit['objectID'] } + end + + delete_objects(ids, request_options) + end + + # + # Delete all objects matching a query and wait end of indexing + # + # @param query the query string + # @param params the optional query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by_query!(query, params = nil, request_options = {}) + res = delete_by_query(query, params, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res + res + end + + # + # Delete all objects matching a query (doesn't work with actual text queries) + # This method deletes every record matching the filters provided + # + # @param params query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by(params, request_options = {}) + raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil? + params = sanitized_delete_by_query_params(params) + client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options) + end + + # + # Delete all objects matching a query (doesn't work with actual text queries) + # This method deletes every record matching the filters provided and waits for the end of indexing + # @param params query parameters + # @param request_options contains extra parameters to send with your query + # + def delete_by!(params, request_options = {}) + res = delete_by(params, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res + res + end + + # + # Delete the index content + # + # @param request_options contains extra parameters to send with your query + # + def clear(request_options = {}) + client.post(Protocol.clear_uri(name), {}, :write, request_options) + end + alias_method :clear_index, :clear + + # + # Delete the index content and wait end of indexing + # + def clear!(request_options = {}) + res = clear(request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + alias_method :clear_index!, :clear! + + # + # Set settings for this index + # + def set_settings(new_settings, options = {}, request_options = {}) + client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options) + end + + # + # Set settings for this index and wait end of indexing + # + def set_settings!(new_settings, options = {}, request_options = {}) + res = set_settings(new_settings, options, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Get settings of this index + # + def get_settings(options = {}, request_options = {}) + options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion'] + client.get(Protocol.settings_uri(name, options).to_s, :read, request_options) + end + + # + # List all existing user keys with their associated ACLs + # + # Deprecated: Please us `client.list_api_keys` instead. + def list_api_keys(request_options = {}) + client.get(Protocol.index_keys_uri(name), :read, request_options) + end + + # + # Get ACL of a user key + # + # Deprecated: Please us `client.get_api_key` instead. + def get_api_key(key, request_options = {}) + client.get(Protocol.index_key_uri(name, key), :read, request_options) + end + + # + # Create a new user key + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a Hash that can + # contains the following values: + # - acl: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # Or the list of ACL for this key. Defined by an array of String that + # can contains the following values: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key) + # @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited) + # @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited) + # @param request_options contains extra parameters to send with your query + #
 # Deprecated: Please use `client.add_api_key` instead + def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {}) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options) + end + + # + # Update a user key + # + # @param object can be two different parameters: + # The list of parameters for this key. Defined by a Hash that + # can contains the following values: + # - acl: array of string + # - validity: int + # - referers: array of string + # - description: string + # - maxHitsPerQuery: integer + # - queryParameters: string + # - maxQueriesPerIPPerHour: integer + # Or the list of ACL for this key. Defined by an array of String that + # can contains the following values: + # - search: allow to search (https and http) + # - addObject: allows to add/update an object in the index (https only) + # - deleteObject : allows to delete an existing object (https only) + # - deleteIndex : allows to delete index content (https only) + # - settings : allows to get index settings (https only) + # - editSettings : allows to change index settings (https only) + # @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key) + # @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited) + # @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited) + # @param request_options contains extra parameters to send with your query + # + # Deprecated: Please use `client.update_api_key` instead + def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {}) + if object.instance_of?(Array) + params = { :acl => object } + else + params = object + end + + params['validity'] = validity.to_i if validity != 0 + params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0 + params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0 + + client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options) + end + + # + # Delete an existing user key + # + # Deprecated: Please use `client.delete_api_key` instead + def delete_api_key(key, request_options = {}) + client.delete(Protocol.index_key_uri(name, key), :write, request_options) + end + + # + # Send a batch request + # + def batch(request, request_options = {}) + client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options) + end + + # + # Send a batch request and wait the end of the indexing + # + def batch!(request, request_options = {}) + res = batch(request, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Search for facet values + # + # @param facet_name Name of the facet to search. It must have been declared in the + # index's`attributesForFaceting` setting with the `searchable()` modifier. + # @param facet_query Text to search for in the facet's values + # @param search_parameters An optional query to take extra search parameters into account. + # These parameters apply to index objects like in a regular search query. + # Only facet values contained in the matched objects will be returned. + # @param request_options contains extra parameters to send with your query + # + def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {}) + params = search_parameters.clone + params['facetQuery'] = facet_query + client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options) + end + + # deprecated + alias_method :search_facet, :search_for_facet_values + + # + # Perform a search with disjunctive facets generating as many queries as number of disjunctive facets + # + # @param query the query + # @param disjunctive_facets the array of disjunctive facets + # @param params a hash representing the regular query parameters + # @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements + # ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] } + # @param request_options contains extra parameters to send with your query + # + def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {}) + raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array) + raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty? + + # extract disjunctive facets & associated refinements + disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String) + disjunctive_refinements = {} + refinements.each do |k, v| + disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s) + end + + # build queries + queries = [] + ## hits + regular facets query + filters = [] + refinements.to_a.each do |k, values| + r = values.map { |v| "#{k}:#{v}" } + if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] + # disjunctive refinements are ORed + filters << r + else + # regular refinements are ANDed + filters += r + end + end + queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters }) + ## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet) + disjunctive_facets.each do |disjunctive_facet| + filters = [] + refinements.each do |k, values| + if k.to_s != disjunctive_facet.to_s + r = values.map { |v| "#{k}:#{v}" } + if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] + # disjunctive refinements are ORed + filters << r + else + # regular refinements are ANDed + filters += r + end + end + end + queries << params.merge({ + :index_name => self.name, + :query => query, + :page => 0, + :hitsPerPage => 1, + :attributesToRetrieve => [], + :attributesToHighlight => [], + :attributesToSnippet => [], + :facets => disjunctive_facet, + :facetFilters => filters, + :analytics => false + }) + end + answers = client.multiple_queries(queries, { :request_options => request_options }) + + # aggregate answers + ## first answer stores the hits + regular facets + aggregated_answer = answers['results'][0] + ## others store the disjunctive facets + aggregated_answer['disjunctiveFacets'] = {} + answers['results'].each_with_index do |a, i| + next if i == 0 + a['facets'].each do |facet, values| + ## add the facet to the disjunctive facet hash + aggregated_answer['disjunctiveFacets'][facet] = values + ## concatenate missing refinements + (disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r| + if aggregated_answer['disjunctiveFacets'][facet][r].nil? + aggregated_answer['disjunctiveFacets'][facet][r] = 0 + end + end + end + end + + aggregated_answer + end + + # + # Alias of Algolia.list_indexes + # + # @param request_options contains extra parameters to send with your query + # + def Index.all(request_options = {}) + Algolia.list_indexes(request_options) + end + + # + # Search synonyms + # + # @param query the query + # @param params an optional hash of :type, :page, :hitsPerPage + # @param request_options contains extra parameters to send with your query + # + def search_synonyms(query, params = {}, request_options = {}) + type = params[:type] || params['type'] + type = type.join(',') if type.is_a?(Array) + page = params[:page] || params['page'] || 0 + hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20 + params = { + :query => query, + :type => type.to_s, + :page => page, + :hitsPerPage => hits_per_page + } + client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options) + end + + # + # Get a synonym + # + # @param objectID the synonym objectID + # @param request_options contains extra parameters to send with your query + def get_synonym(objectID, request_options = {}) + client.get(Protocol.synonym_uri(name, objectID), :read, request_options) + end + + # + # Delete a synonym + # + # @param objectID the synonym objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_synonym(objectID, forward_to_replicas = false, request_options = {}) + client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options) + end + + # + # Delete a synonym and wait the end of indexing + # + # @param objectID the synonym objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_synonym!(objectID, forward_to_replicas = false, request_options = {}) + res = delete_synonym(objectID, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Save a synonym + # + # @param objectID the synonym objectID + # @param synonym the synonym + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {}) + client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options) + end + + # + # Save a synonym and wait the end of indexing + # + # @param objectID the synonym objectID + # @param synonym the synonym + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {}) + res = save_synonym(objectID, synonym, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Clear all synonyms + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_synonyms(forward_to_replicas = false, request_options = {}) + client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options) + end + + # + # Clear all synonyms and wait the end of indexing + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_synonyms!(forward_to_replicas = false, request_options = {}) + res = clear_synonyms(forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Add/Update an array of synonyms + # + # @param synonyms the array of synonyms to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {}) + client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options) + end + + # + # Add/Update an array of synonyms and wait the end of indexing + # + # @param synonyms the array of synonyms to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {}) + res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Replace synonyms in the index by the given array of synonyms + # + # @param synonyms the array of synonyms to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_synonyms(synonyms, request_options = {}) + forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false + batch_synonyms(synonyms, forward_to_replicas, true, request_options) + end + + # + # Replace synonyms in the index by the given array of synonyms and wait the end of indexing + # + # @param synonyms the array of synonyms to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_synonyms!(synonyms, request_options = {}) + res = replace_all_synonyms(synonyms, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Export the full list of synonyms + # Accepts an optional block to which it will pass each synonym + # Also returns an array with all the synonyms + # + # @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100 + # @param request_options contains extra parameters to send with your query - Optional + # + def export_synonyms(hits_per_page = 100, request_options = {}, &_block) + res = [] + page = 0 + loop do + curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] + curr.each do |synonym| + res << synonym + yield synonym if block_given? + end + break if curr.size < hits_per_page + page += 1 + end + res + end + + # + # Search rules + # + # @param query the query + # @param params an optional hash of :anchoring, :context, :page, :hitsPerPage + # @param request_options contains extra parameters to send with your query + # + def search_rules(query, params = {}, request_options = {}) + anchoring = params[:anchoring] + context = params[:context] + page = params[:page] || params['page'] || 0 + hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20 + params = { + :query => query, + :page => page, + :hitsPerPage => hits_per_page + } + params[:anchoring] = anchoring unless anchoring.nil? + params[:context] = context unless context.nil? + client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options) + end + + # + # Get a rule + # + # @param objectID the rule objectID + # @param request_options contains extra parameters to send with your query + # + def get_rule(objectID, request_options = {}) + client.get(Protocol.rule_uri(name, objectID), :read, request_options) + end + + # + # Delete a rule + # + # @param objectID the rule objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_rule(objectID, forward_to_replicas = false, request_options = {}) + client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options) + end + + # + # Delete a rule and wait the end of indexing + # + # @param objectID the rule objectID + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def delete_rule!(objectID, forward_to_replicas = false, request_options = {}) + res = delete_rule(objectID, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Save a rule + # + # @param objectID the rule objectID + # @param rule the rule + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_rule(objectID, rule, forward_to_replicas = false, request_options = {}) + raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' + client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options) + end + + # + # Save a rule and wait the end of indexing + # + # @param objectID the rule objectID + # @param rule the rule + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {}) + res = save_rule(objectID, rule, forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Clear all rules + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_rules(forward_to_replicas = false, request_options = {}) + client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options) + end + + # + # Clear all rules and wait the end of indexing + # + # @param forward_to_replicas should we forward the delete to replica indices + # @param request_options contains extra parameters to send with your query + # + def clear_rules!(forward_to_replicas = false, request_options = {}) + res = clear_rules(forward_to_replicas, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Add/Update an array of rules + # + # @param rules the array of rules to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param clear_existing_rules should we clear the existing rules before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {}) + client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options) + end + + # + # Add/Update an array of rules and wait the end of indexing + # + # @param rules the array of rules to add/update + # @param forward_to_replicas should we forward the delete to replica indices + # @param clear_existing_rules should we clear the existing rules before adding the new ones + # @param request_options contains extra parameters to send with your query + # + def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {}) + res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + return res + end + + # + # Replace rules in the index by the given array of rules + # + # @param rules the array of rules to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_rules(rules, request_options = {}) + forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false + batch_rules(rules, forward_to_replicas, true, request_options) + end + + # + # Replace rules in the index by the given array of rules and wait the end of indexing + # + # @param rules the array of rules to add + # @param request_options contains extra parameters to send with your query + # + def replace_all_rules!(rules, request_options = {}) + res = replace_all_rules(rules, request_options) + wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) + res + end + + # + # Export the full list of rules + # Accepts an optional block to which it will pass each rule + # Also returns an array with all the rules + # + # @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100 + # @param request_options contains extra parameters to send with your query - Optional + # + def export_rules(hits_per_page = 100, request_options = {}, &_block) + res = [] + page = 0 + loop do + curr = search_rules('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] + curr.each do |rule| + res << rule + yield rule if block_given? + end + break if curr.size < hits_per_page + page += 1 + end + res + end + + # + # Check whether an index exists or not + # + # @return [Boolean] + # + def exists + begin + get_settings + rescue AlgoliaProtocolError => e + if e.code === 404 + return false + end + + raise e + end + return true + end + + # + # Aliases the exists method + # + alias :exists? :exists + + # Deprecated + alias_method :get_user_key, :get_api_key + alias_method :list_user_keys, :list_api_keys + alias_method :add_user_key, :add_api_key + alias_method :update_user_key, :update_api_key + alias_method :delete_user_key, :delete_api_key + + private + + def check_array(object) + raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array) + end + + def check_object(object, in_array = false) + case object + when Array + raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array') + when String, Integer, Float, TrueClass, FalseClass, NilClass + raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}") + else + # ok + end + end + + def get_objectID(object, objectID = nil) + check_object(object) + objectID ||= object[:objectID] || object['objectID'] + raise ArgumentError.new("Missing 'objectID'") if objectID.nil? + return objectID + end + + def build_batch(action, objects, with_object_id = false) + check_array(objects) + { + :requests => objects.map { |object| + check_object(object, true) + h = { :action => action, :body => object } + h[:objectID] = get_objectID(object).to_s if with_object_id + h + } + } + end + + def sanitized_delete_by_query_params(params) + params ||= {} + params.delete(:hitsPerPage) + params.delete('hitsPerPage') + params.delete(:attributesToRetrieve) + params.delete('attributesToRetrieve') + params + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb new file mode 100644 index 0000000..a3d5883 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/insights.rb @@ -0,0 +1,131 @@ +module Algolia + + class Insights + MIN_RUBY_VERSION = '1.9.0' + + def initialize(app_id, api_key, region = 'us', params = {}) + headers = params[:headers] || {} + @app_id = app_id + @api_key = api_key + @url = "https://insights.#{region}.algolia.io" + @headers = headers.merge({ + Protocol::HEADER_APP_ID => app_id, + Protocol::HEADER_API_KEY => api_key, + 'Content-Type' => 'application/json; charset=utf-8', + 'User-Agent' => ["Algolia for Ruby (#{::Algolia::VERSION})", "Ruby (#{RUBY_VERSION})"].join('; ') + }) + end + + def user(user_token) + UserInsights.new(self, user_token) + end + + def send_event(event) + send_events([event]) + end + + def send_events(events) + perform_request(:POST, '/1/events', {}, { 'events' => events }.to_json) + end + + private + + def perform_request(method, path, params = {}, data = {}) + http = HTTPClient.new + + url = @url + path + + encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }] + url << "?" + Protocol.to_query(encoded_params) + + response = case method + when :POST + http.post(url, { :body => data, :header => @headers }) + end + + if response.code / 100 != 2 + raise AlgoliaProtocolError.new(response.code, "Cannot #{method} to #{url}: #{response.content}") + end + + JSON.parse(response.content) + end + end + + class UserInsights + def initialize(insights, user_token) + @insights = insights + @user_token = user_token + end + + def clicked_object_ids(event_name, index_name, object_ids, request_options = {}) + clicked({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def clicked_object_ids_after_search(event_name, index_name, object_ids, positions, query_id, request_options = {}) + clicked({ + 'objectIDs' => object_ids, + 'positions' => positions, + 'queryID' => query_id, + }, event_name, index_name, request_options) + end + + def clicked_filters(event_name, index_name, filters, request_options = {}) + clicked({ 'filters' => filters }, event_name, index_name, request_options) + end + + def converted_object_ids(event_name, index_name, object_ids, request_options = {}) + converted({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def converted_object_ids_after_search(event_name, index_name, object_ids, query_id, request_options = {}) + converted({ + 'objectIDs' => object_ids, + 'queryID' => query_id, + }, event_name, index_name, request_options) + end + + def converted_filters(event_name, index_name, filters, request_options = {}) + converted({ 'filters' => filters }, event_name, index_name, request_options) + end + + def viewed_object_ids(event_name, index_name, object_ids, request_options = {}) + viewed({ 'objectIDs' => object_ids }, event_name, index_name, request_options) + end + + def viewed_filters(event_name, index_name, filters, request_options = {}) + viewed({ 'filters' => filters }, event_name, index_name, request_options) + end + + private + + def clicked(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'click', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def converted(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'conversion', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def viewed(event, event_name, index_name, request_options = {}) + send_event(event.merge({ + 'eventType' => 'view', + 'eventName' => event_name, + 'index' => index_name, + })) + end + + def send_event(event) + @insights.send_event(event.merge({ 'userToken' => @user_token})) + end + + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb new file mode 100644 index 0000000..849ee17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/protocol.rb @@ -0,0 +1,211 @@ +require 'cgi' + +module Algolia + # A module which encapsulates the specifics of Algolia's REST API. + module Protocol + + # Basics + + # The version of the REST API implemented by this module. + VERSION = 1 + + # HTTP Headers + # ---------------------------------------- + + # The HTTP header used for passing your application ID to the Algolia API. + HEADER_APP_ID = "X-Algolia-Application-Id" + + # The HTTP header used for passing your API key to the Algolia API. + HEADER_API_KEY = "X-Algolia-API-Key" + HEADER_FORWARDED_IP = "X-Forwarded-For" + HEADER_FORWARDED_API_KEY = "X-Forwarded-API-Key" + + # HTTP ERROR CODES + # ---------------------------------------- + + ERROR_BAD_REQUEST = 400 + ERROR_FORBIDDEN = 403 + ERROR_NOT_FOUND = 404 + + # URI Helpers + # ---------------------------------------- + + # Construct a uri to list available indexes + def Protocol.indexes_uri + "/#{VERSION}/indexes" + end + + def Protocol.multiple_queries_uri(strategy = 'none') + "/#{VERSION}/indexes/*/queries?strategy=#{strategy}" + end + + def Protocol.objects_uri + "/#{VERSION}/indexes/*/objects" + end + + # Construct a uri referencing a given Algolia index + def Protocol.index_uri(index) + "/#{VERSION}/indexes/#{CGI.escape(index)}" + end + + def Protocol.batch_uri(index = nil) + "#{index.nil? ? "/#{VERSION}/indexes/*" : index_uri(index)}/batch" + end + + def Protocol.index_operation_uri(index) + "#{index_uri(index)}/operation" + end + + def Protocol.task_uri(index, task_id) + "#{index_uri(index)}/task/#{task_id}" + end + + def Protocol.object_uri(index, object_id, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/#{CGI.escape(object_id.to_s)}#{params}" + end + + def Protocol.search_uri(index, query, params = {}) + params = params.nil? || params.size == 0 ? '' : "&#{to_query(params)}" + "#{index_uri(index)}?query=#{CGI.escape(query)}&#{params}" + end + + def Protocol.search_post_uri(index) + "#{index_uri(index)}/query" + end + + def Protocol.browse_uri(index, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/browse#{params}" + end + + def Protocol.search_facet_uri(index, facet) + "#{index_uri(index)}/facets/#{CGI.escape(facet)}/query" + end + + def Protocol.partial_object_uri(index, object_id, create_if_not_exits = true) + params = create_if_not_exits ? '' : '?createIfNotExists=false' + "#{index_uri(index)}/#{CGI.escape(object_id.to_s)}/partial#{params}" + end + + def Protocol.settings_uri(index, params = {}) + params = params.nil? || params.size == 0 ? '' : "?#{to_query(params)}" + "#{index_uri(index)}/settings#{params}" + end + + def Protocol.clear_uri(index) + "#{index_uri(index)}/clear" + end + + def Protocol.logs(offset, length, type) + "/#{VERSION}/logs?offset=#{offset}&length=#{length}&type=#{type}" + end + + def Protocol.keys_uri + "/#{VERSION}/keys" + end + + def Protocol.key_uri(key) + "/#{VERSION}/keys/#{key}" + end + + def Protocol.restore_key_uri(key) + "/#{VERSION}/keys/#{key}/restore" + end + + def Protocol.index_key_uri(index, key) + "#{index_uri(index)}/keys/#{key}" + end + + def Protocol.index_keys_uri(index) + "#{index_uri(index)}/keys" + end + + def Protocol.to_query(params) + params.map do |k, v| + "#{CGI.escape(k.to_s)}=#{CGI.escape(v.to_s)}" + end.join('&') + end + + def Protocol.synonyms_uri(index) + "#{index_uri(index)}/synonyms" + end + + def Protocol.synonym_uri(index, object_id) + "#{synonyms_uri(index)}/#{CGI.escape(object_id.to_s)}" + end + + def Protocol.search_synonyms_uri(index) + "#{synonyms_uri(index)}/search" + end + + def Protocol.clear_synonyms_uri(index) + "#{synonyms_uri(index)}/clear" + end + + def Protocol.batch_synonyms_uri(index) + "#{synonyms_uri(index)}/batch" + end + + def Protocol.rules_uri(index) + "#{index_uri(index)}/rules" + end + + def Protocol.rule_uri(index, object_id) + "#{rules_uri(index)}/#{CGI.escape(object_id.to_s)}" + end + + def Protocol.search_rules_uri(index) + "#{rules_uri(index)}/search" + end + + def Protocol.clear_rules_uri(index) + "#{rules_uri(index)}/clear" + end + + def Protocol.batch_rules_uri(index) + "#{rules_uri(index)}/batch" + end + + def Protocol.delete_by_uri(index) + "#{index_uri(index)}/deleteByQuery" + end + + def Protocol.personalization_strategy_uri + "/1/recommendation/personalization/strategy" + end + + def Protocol.clusters_uri + "/#{VERSION}/clusters" + end + + def Protocol.cluster_mapping_uri(user_id = nil) + user_id = "/#{CGI.escape(user_id)}" if user_id + + "/#{VERSION}/clusters/mapping" + user_id.to_s + end + + def Protocol.list_ids_uri(page, hits_per_page) + Protocol.cluster_mapping_uri+"?page=#{CGI.escape(page.to_s)}&hitsPerPage=#{CGI.escape(hits_per_page.to_s)}" + end + + def Protocol.cluster_top_user_uri + "/#{VERSION}/clusters/mapping/top" + end + + def Protocol.search_user_id_uri + "/#{VERSION}/clusters/mapping/search" + end + + def Protocol.ab_tests_uri(ab_test = nil) + ab_test = "/#{ab_test}" if ab_test + + "/2/abtests" + ab_test.to_s + end + + def Protocol.ab_tests_stop_uri(ab_test) + "/2/abtests/#{ab_test}/stop" + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb new file mode 100644 index 0000000..3926b62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/version.rb @@ -0,0 +1,3 @@ +module Algolia + VERSION = "1.27.5" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb new file mode 100644 index 0000000..e3d112f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algolia/webmock.rb @@ -0,0 +1,54 @@ +begin + require 'webmock' +rescue LoadError + puts 'WebMock was not found, please add "gem \'webmock\'" to your Gemfile.' + exit 1 +end + +module Algolia + class WebMock + def self.mock! + # list indexes + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes/).to_return(:body => '{ "items": [] }') + # query index + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "hits": [ { "objectID": 42 } ], "page": 1, "hitsPerPage": 1, "nbHits": 1, "nbPages": 1 }') + # delete index + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # clear index + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/clear/).to_return(:body => '{ "taskID": 42 }') + # add object + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # save object + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # partial update + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+\/partial/).to_return(:body => '{ "taskID": 42 }') + # get object + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{}') + # delete object + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/[^\/]+/).to_return(:body => '{ "taskID": 42 }') + # batch + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/batch/).to_return(:body => '{ "taskID": 42 }') + # settings + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/settings/).to_return(:body => '{}') + ::WebMock.stub_request(:put, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/settings/).to_return(:body => '{ "taskID": 42 }') + # browse + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/browse/).to_return(:body => '{ "hits": [] }') + # operations + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/operation/).to_return(:body => '{ "taskID": 42 }') + # tasks + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/task\/[^\/]+/).to_return(:body => '{ "status": "published" }') + # index keys + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/keys/).to_return(:body => '{ }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/keys/).to_return(:body => '{ "keys": [] }') + # global keys + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/keys/).to_return(:body => '{ }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/keys/).to_return(:body => '{ "keys": [] }') + ::WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/keys\/[^\/]+/).to_return(:body => '{ }') + ::WebMock.stub_request(:delete, /.*\.algolia(net\.com|\.net)\/1\/keys\/[^\/]+/).to_return(:body => '{ }') + # query POST + ::WebMock.stub_request(:post, /.*\.algolia(net\.com|\.net)\/1\/indexes\/[^\/]+\/query/).to_return(:body => '{ "hits": [ { "objectID": 42 } ], "page": 1, "hitsPerPage": 1, "nbHits": 1, "nbPages": 1 }') + end + end +end + +Algolia::WebMock.mock! diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb new file mode 100644 index 0000000..f000b23 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/lib/algoliasearch.rb @@ -0,0 +1,26 @@ +## ---------------------------------------------------------------------- +## +## Ruby client for algolia.com +## A quick library for playing with algolia.com's REST API for object storage. +## Thanks to Sylvain Utard for the initial version of the library +## ---------------------------------------------------------------------- +require 'json' +if !defined?(RUBY_ENGINE) && defined?(RUBY_VERSION) && RUBY_VERSION == '1.8.7' + # work-around a limitation from nahi/httpclient, using the undefined RUBY_ENGINE constant + RUBY_ENGINE = 'ruby1.8' + require 'httpclient' + Object.send(:remove_const, :RUBY_ENGINE) +else + require 'httpclient' +end +require 'date' +require 'cgi' +require 'pathname' + +cwd = Pathname(__FILE__).dirname +$:.unshift(cwd.to_s) unless $:.include?(cwd.to_s) || $:.include?(cwd.expand_path.to_s) + +require 'algolia/index' +require 'algolia/analytics' +require 'algolia/insights' +require 'algolia/account_client' diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt new file mode 100644 index 0000000..52e0c4e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/resources/ca-bundle.crt @@ -0,0 +1,3908 @@ +## +## ca-bundle.crt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Sat Dec 29 20:03:40 2012 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## + +# @(#) $RCSfile: certdata.txt,v $ $Revision: 1.87 $ $Date: 2012/12/29 16:32:45 $ + +USERTrust RSA root CA +===================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 1 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENnAVljANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMTAeFw05ODEy +MTAxODEwMjNaFw0xODEyMTAxODQwMjNaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUxMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQCgbIGpzzQeJN3+hijM3oMv+V7UQtLodGBmE5gGHKlREmlvMVW5SXIACH7TpWJE +NySZj9mDSI+ZbZUTu0M7LklOiDfBu1h//uG9+LthzfNHwJmm8fOR6Hh8AMthyUQncWlVSn5JTe2i +o74CTADKAqjuAQIxZA9SLRN0dja1erQtcQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTExDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMTAxODEwMjNagQ8yMDE4MTIxMDE4MTAyM1owCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFGp5fpFpRhgTCgJ3pVlbYJglDqL4MB0GA1UdDgQWBBRqeX6RaUYYEwoCd6VZW2CYJQ6i+DAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +ACIS2Hod3IEGtgllsofIH160L+nEHvI8wbsEkBFKg05+k7lNQseSJqBcNJo4cvj9axY+IO6CizEq +kzaFI4iKPANo08kJD038bKTaKHKTDomAsH3+gG9lbRgzl4vCa4nuYD3Im+9/KzJic5PLPON74nZ4 +RbyhkwS7hp86W0N6w4pl +-----END CERTIFICATE----- + +Digital Signature Trust Co. Global CA 3 +======================================= +-----BEGIN CERTIFICATE----- +MIIDKTCCApKgAwIBAgIENm7TzjANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE +ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMjAeFw05ODEy +MDkxOTE3MjZaFw0xODEyMDkxOTQ3MjZaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs +IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUyMIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQC/k48Xku8zExjrEH9OFr//Bo8qhbxe+SSmJIi2A7fBw18DW9Fvrn5C6mYjuGOD +VvsoLeE4i7TuqAHhzhy2iCoiRoX7n6dwqUcUP87eZfCocfdPJmyMvMa1795JJ/9IKn3oTQPMx7JS +xhcxEzu1TdvIxPbDDyQq2gyd55FbgM2UnQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo +BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 +dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTIxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw +IoAPMTk5ODEyMDkxOTE3MjZagQ8yMDE4MTIwOTE5MTcyNlowCwYDVR0PBAQDAgEGMB8GA1UdIwQY +MBaAFB6CTShlgDzJQW6sNS5ay97u+DlbMB0GA1UdDgQWBBQegk0oZYA8yUFurDUuWsve7vg5WzAM +BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB +AEeNg61i8tuwnkUiBbmi1gMOOHLnnvx75pO2mqWilMg0HZHRxdf0CiUPPXiBng+xZ8SQTGPdXqfi +up/1902lMXucKS1M/mQ+7LZT/uqb7YLbdHVLB3luHtgZg3Pe9T7Qtd7nS2h9Qy4qIOF+oHhEngj1 +mPnHfxsb1gYgAlihw6ID +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEEzH6qqYPnHTkxD4PTqJkZIwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMSBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCq0Lq+Fi24g9TK0g+8djHKlNgd +k4xWArzZbxpvUjZudVYKVdPfQ4chEWWKfo+9Id5rMj8bhDSVBZ1BNeuS65bdqlk/AVNtmU/t5eIq +WpDBucSmFc/IReumXY6cPvBkJHalzasab7bYe1FhbqZ/h8jit+U03EGI6glAvnOSPWvndQIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAKlPww3HZ74sy9mozS11534Vnjty637rXC0Jh9ZrbWB85a7FkCMM +XErQr7Fd88e2CtvgFZMN3QO8x3aKtd1Pw5sTdbgBwObJW2uluIncrKTdcu1OofdPvAbT6shkdHvC +lUGcZXNY8ZCaPGqxmMnEh7zPRW1F4m4iP/68DzFc6PLZ +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAzCCAmwCEQC5L2DMiJ+hekYJuFtwbIqvMA0GCSqGSIb3DQEBBQUAMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazAeFw05ODA1MTgwMDAwMDBaFw0yODA4MDEyMzU5NTlaMIHBMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xPDA6BgNVBAsTM0NsYXNzIDIgUHVibGljIFByaW1h +cnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjE6MDgGA1UECxMxKGMpIDE5OTggVmVyaVNp +Z24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAp4gBIXQs5xoD8JjhlzwPIQjx +nNuX6Zr8wgQGE75fUsjMHiwSViy4AWkszJkfrbCWrnkE8hM5wXuYuggs6MKEEyyqaekJ9MepAqRC +wiNPStjwDqL7MWzJ5m+ZJwf15vRMeJ5t60aG+rmGyVTyssSv1EYcWskVMP8NbPUtDm3Of3cCAwEA +ATANBgkqhkiG9w0BAQUFAAOBgQByLvl/0fFx+8Se9sVeUYpAmLho+Jscg9jinb3/7aHmZuovCfTK +1+qlK5X2JGCGTUQug6XELaDTrnhpb3LabK4I8GOSN+a7xDAXrXfMSTWqz9iP0b63GJZHc2pUIjRk +LbYWm1lbtFFZOrMLFPQS32eg9K0yZF6xRnInjBJ7xUS0rg== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDEgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAN2E1Lm0+afY8wR4nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/E +bRrsC+MO8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjVojYJ +rKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjbPG7PoBMAGrgnoeS+ +Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP26KbqxzcSXKMpHgLZ2x87tNcPVkeB +FQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vrn5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +q2aN17O6x5q25lXQBfGfMY1aqtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/N +y9Sn2WCVhDr4wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrspSCAaWihT37h +a88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4E1Z5T21Q6huwtVexN2ZYI/Pc +D98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- + +Verisign Class 2 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1c3QgTmV0d29y +azE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ug +b25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNpZ24gVHJ1 +c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIEluYy4gLSBGb3IgYXV0aG9y +aXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArwoNwtUs22e5LeWUJ92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6 +tW8UvxDOJxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUYwZF7 +C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9okoqQHgiBVrKtaaNS +0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjNqWm6o+sdDZykIKbBoMXRRkwXbdKs +Zj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/ESrg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0 +JhU8wI1NQ0kdvekhktdmnLfexbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf +0xwLRtxyID+u7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RIsH/7NiXaldDx +JBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTPcjnhsUPgKM+351psE2tJs//j +GHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0xOTEyMjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo3QwcjARBglghkgBhvhC +AQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGAvtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdER +gL7YibkIozH5oSQJFrlwMB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0B +AQUFAAOCAQEAWUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQh7A6tcOdBTcS +o8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18f3v/rxzP5tsHrV7bhZ3QKw0z +2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfNB/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjX +OP/swNlQ8C5LWK5Gb9Auw2DaclVyvUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 2 +============================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEXMBUGA1UE +ChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0y +MB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoT +DkVxdWlmYXggU2VjdXJlMSYwJAYDVQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn +2Z0GvxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/BPO3QSQ5 +BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0CAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUx +JjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTkwNjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9e +uSBIplBqy/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAAyGgq3oThr1 +jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia +78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUm +V+GRMOrN +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +UTN-USER First-Network Applications +=================================== +-----BEGIN CERTIFICATE----- +MIIEZDCCA0ygAwIBAgIQRL4Mi1AAJLQR0zYwS8AzdzANBgkqhkiG9w0BAQUFADCBozELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzAp +BgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBBcHBsaWNhdGlvbnMwHhcNOTkwNzA5MTg0ODM5 +WhcNMTkwNzA5MTg1NzQ5WjCBozELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5T +YWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xKzApBgNVBAMTIlVUTi1VU0VSRmlyc3QtTmV0d29yayBB +cHBsaWNhdGlvbnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCz+5Gh5DZVhawGNFug +mliy+LUPBXeDrjKxdpJo7CNKyXY/45y2N3kDuatpjQclthln5LAbGHNhSuh+zdMvZOOmfAz6F4Cj +DUeJT1FxL+78P/m4FoCHiZMlIJpDgmkkdihZNaEdwH+DBmQWICzTSaSFtMBhf1EI+GgVkYDLpdXu +Ozr0hAReYFmnjDRy7rh4xdE7EkpvfmUnuaRVxblvQ6TFHSyZwFKkeEwVs0CYCGtDxgGwenv1axwi +P8vv/6jQOkt2FZ7S0cYu49tXGzKiuG/ohqY/cKvlcJKrRB5AUPuco2LkbG6gyN7igEL66S/ozjIE +j3yNtxyjNTwV3Z7DrpelAgMBAAGjgZEwgY4wCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8w +HQYDVR0OBBYEFPqGydvguul49Uuo1hXf8NPhahQ8ME8GA1UdHwRIMEYwRKBCoECGPmh0dHA6Ly9j +cmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LU5ldHdvcmtBcHBsaWNhdGlvbnMuY3JsMA0G +CSqGSIb3DQEBBQUAA4IBAQCk8yXM0dSRgyLQzDKrm5ZONJFUICU0YV8qAhXhi6r/fWRRzwr/vH3Y +IWp4yy9Rb/hCHTO967V7lMPDqaAt39EpHx3+jz+7qEUqf9FuVSTiuwL7MT++6LzsQCv4AdRWOOTK +RIK1YSAhZ2X28AvnNPilwpyjXEAfhZOVBt5P1CeptqX8Fs1zMT+4ZSfP1FMa8Kxun08FDAOBp4Qp +xFq9ZFdyrTvPNximmMatBrTcCKME1SmklpoSZ0qMYEWd8SOasACcaLWYUNPvji6SZbFIPiG+FTAq +DbUMo2s/rn9X9R+WfN9v3YIwLGUbQErNaLly7HF27FSOH4UMAWr6pjisH8SE +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 1 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBJDANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MxIENBMB4XDTAxMDQwNjEwNDkxM1oXDTIxMDQw +NjEwNDkxM1owOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALWJHytPZwp5/8Ue+H88 +7dF+2rDNbS82rDTG29lkFwhjMDMiikzujrsPDUJVyZ0upe/3p4zDq7mXy47vPxVnqIJyY1MPQYx9 +EJUkoVqlBvqSV536pQHydekfvFYmUk54GWVYVQNYwBSujHxVX3BbdyMGNpfzJLWaRpXk3w0LBUXl +0fIdgrvGE+D+qnr9aTCU89JFhfzyMlsy3uhsXR/LpCJ0sICOXZT3BgBLqdReLjVQCfOAl/QMF645 +2F/NM8EcyonCIvdFEu1eEpOdY6uCLrnrQkFEy0oaAIINnvmLVz5MxxftLItyM19yejhW1ebZrgUa +HXVFsculJRwSVzb9IjcCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQIR+IMi/ZT +iFIwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCLGrLJXWG04bkruVPRsoWdd44W7hE9 +28Jj2VuXZfsSZ9gqXLar5V7DtxYvyOirHYr9qxp81V9jz9yw3Xe5qObSIjiHBxTZ/75Wtf0HDjxV +yhbMp6Z3N/vbXB9OWQaHowND9Rart4S9Tu+fMTfwRvFAttEMpWT4Y14h21VOTzF2nBBhjrZTOqMR +vq9tfB69ri3iDGnHhVNoomG6xT60eVR4ngrHAr5i0RGCS2UvkVrCqIexVmiUefkl98HVrhq4uz2P +qYo4Ffdz0Fpg0YCw8NzVUM1O7pJIae2yIx4wzMiUyLb1O4Z/P6Yun/Y+LLWSlj7fLJOK/4GMDw9Z +IRlXvVWa +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +TDC OCES Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFGTCCBAGgAwIBAgIEPki9xDANBgkqhkiG9w0BAQUFADAxMQswCQYDVQQGEwJESzEMMAoGA1UE +ChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTAeFw0wMzAyMTEwODM5MzBaFw0zNzAyMTEwOTA5 +MzBaMDExCzAJBgNVBAYTAkRLMQwwCgYDVQQKEwNUREMxFDASBgNVBAMTC1REQyBPQ0VTIENBMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGL2YSCyz8DGhdfjeebM7fI5kqSXLmSjhFuH +nEz9pPPEXyG9VhDr2y5h7JNp46PMvZnDBfwGuMo2HP6QjklMxFaaL1a8z3sM8W9Hpg1DTeLpHTk0 +zY0s2RKY+ePhwUp8hjjEqcRhiNJerxomTdXkoCJHhNlktxmW/OwZ5LKXJk5KTMuPJItUGBxIYXvV +iGjaXbXqzRowwYCDdlCqT9HU3Tjw7xb04QxQBr/q+3pJoSgrHPb8FTKjdGqPqcNiKXEx5TukYBde +dObaE+3pHx8b0bJoc8YQNHVGEBDjkAB2QMuLt0MJIf+rTpPGWOmlgtt3xDqZsXKVSQTwtyv6e1mO +3QIDAQABo4ICNzCCAjMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwgewGA1UdIASB +5DCB4TCB3gYIKoFQgSkBAQEwgdEwLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuY2VydGlmaWthdC5k +ay9yZXBvc2l0b3J5MIGdBggrBgEFBQcCAjCBkDAKFgNUREMwAwIBARqBgUNlcnRpZmlrYXRlciBm +cmEgZGVubmUgQ0EgdWRzdGVkZXMgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4xLiBDZXJ0aWZp +Y2F0ZXMgZnJvbSB0aGlzIENBIGFyZSBpc3N1ZWQgdW5kZXIgT0lEIDEuMi4yMDguMTY5LjEuMS4x +LjARBglghkgBhvhCAQEEBAMCAAcwgYEGA1UdHwR6MHgwSKBGoESkQjBAMQswCQYDVQQGEwJESzEM +MAoGA1UEChMDVERDMRQwEgYDVQQDEwtUREMgT0NFUyBDQTENMAsGA1UEAxMEQ1JMMTAsoCqgKIYm +aHR0cDovL2NybC5vY2VzLmNlcnRpZmlrYXQuZGsvb2Nlcy5jcmwwKwYDVR0QBCQwIoAPMjAwMzAy +MTEwODM5MzBagQ8yMDM3MDIxMTA5MDkzMFowHwYDVR0jBBgwFoAUYLWF7FZkfhIZJ2cdUBVLc647 ++RIwHQYDVR0OBBYEFGC1hexWZH4SGSdnHVAVS3OuO/kSMB0GCSqGSIb2fQdBAAQQMA4bCFY2LjA6 +NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEACromJkbTc6gJ82sLMJn9iuFXehHTuJTXCRBuo7E4 +A9G28kNBKWKnctj7fAXmMXAnVBhOinxO5dHKjHiIzxvTkIvmI/gLDjNDfZziChmPyQE+dF10yYsc +A+UYyAFMP8uXBV2YcaaYb7Z8vTd/vuGTJW1v8AqtFxjhA7wHKcitJuj4YfD9IQl+mo6paH1IYnK9 +AOoBmbgGglGBTvH1tJFUuSN6AJqfXY3gPGS5GhKSKseCRHI53OI8xthV9RVOyAUO28bQYqbsFbS1 +AoLbrIyigfCbmTH1ICCoiGEKB5+U/NDXG8wuF/MEJ3Zn61SD/aSQfgY9BKNDLdr8C2LqL19iUw== +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Email Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0 +BgNVBAMTLVVUTi1VU0VSRmlyc3QtQ2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05 +OTA3MDkxNzI4NTBaFw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQx +FzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsx +ITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UEAxMtVVROLVVTRVJGaXJz +dC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWlsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3BYHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIx +B8dOtINknS4p1aJkxIW9hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8 +om+rWV6lL8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLmSGHG +TPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM1tZUOt4KpLoDd7Nl +yP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws6wIDAQABo4G5MIG2MAsGA1UdDwQE +AwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNV +HR8EUTBPME2gS6BJhkdodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGll +bnRBdXRoZW50aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u7mFVbwQ+zzne +xRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0xtcgBEXkzYABurorbs6q15L+ +5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQrfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarV +NZ1yQAOJujEdxRBoUp7fooXFXAimeOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZ +w7JHpsIyYdfHb0gkUSeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +UTN USERFirst Object Root CA +============================ +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAb +BgNVBAMTFFVUTi1VU0VSRmlyc3QtT2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAz +NlowgZUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkx +HjAcBgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3dy51c2Vy +dHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicPHxzfOpuCaDDASmEd8S8O+r5596Uj71VR +loTN2+O5bj4x2AogZ8f02b+U60cEPgLOKqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQ +w5ujm9M89RKZd7G3CeBo5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vu +lBe3/IW+pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehbkkj7 +RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUCAwEAAaOBrzCBrDAL +BgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU2u1kdBScFDyr3ZmpvVsoTYs8 +ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtT2JqZWN0LmNybDApBgNVHSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQw +DQYJKoZIhvcNAQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXBmMiKVl0+7kNO +PmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU4U3GDZlDAQ0Slox4nb9QorFE +qmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK581OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCG +hU3IfdeLA/5u1fedFqySLKAj5ZyRUh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Qualified (Class QA) Root +================================= +-----BEGIN CERTIFICATE----- +MIIG0TCCBbmgAwIBAgIBezANBgkqhkiG9w0BAQUFADCByTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMUIwQAYDVQQDEzlOZXRMb2NrIE1pbm9zaXRldHQgS296amVn +eXpvaSAoQ2xhc3MgUUEpIFRhbnVzaXR2YW55a2lhZG8xHjAcBgkqhkiG9w0BCQEWD2luZm9AbmV0 +bG9jay5odTAeFw0wMzAzMzAwMTQ3MTFaFw0yMjEyMTUwMTQ3MTFaMIHJMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRvbnNhZ2kgS2Z0 +LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxQjBABgNVBAMTOU5ldExvY2sgTWlub3NpdGV0 +dCBLb3pqZWd5em9pIChDbGFzcyBRQSkgVGFudXNpdHZhbnlraWFkbzEeMBwGCSqGSIb3DQEJARYP +aW5mb0BuZXRsb2NrLmh1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx1Ilstg91IRV +CacbvWy5FPSKAtt2/GoqeKvld/Bu4IwjZ9ulZJm53QE+b+8tmjwi8F3JV6BVQX/yQ15YglMxZc4e +8ia6AFQer7C8HORSjKAyr7c3sVNnaHRnUPYtLmTeriZ539+Zhqurf4XsoPuAzPS4DB6TRWO53Lhb +m+1bOdRfYrCnjnxmOCyqsQhjF2d9zL2z8cM/z1A57dEZgxXbhxInlrfa6uWdvLrqOU+L73Sa58XQ +0uqGURzk/mQIKAR5BevKxXEOC++r6uwSEaEYBTJp0QwsGj0lmT+1fMptsK6ZmfoIYOcZwvK9UdPM +0wKswREMgM6r3JSda6M5UzrWhQIDAMV9o4ICwDCCArwwEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAQYwggJ1BglghkgBhvhCAQ0EggJmFoICYkZJR1lFTEVNISBFemVuIHRhbnVzaXR2 +YW55IGEgTmV0TG9jayBLZnQuIE1pbm9zaXRldHQgU3pvbGdhbHRhdGFzaSBTemFiYWx5emF0YWJh +biBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBBIG1pbm9zaXRldHQgZWxla3Ryb25p +a3VzIGFsYWlyYXMgam9naGF0YXMgZXJ2ZW55ZXN1bGVzZW5laywgdmFsYW1pbnQgZWxmb2dhZGFz +YW5hayBmZWx0ZXRlbGUgYSBNaW5vc2l0ZXR0IFN6b2xnYWx0YXRhc2kgU3phYmFseXphdGJhbiwg +YXogQWx0YWxhbm9zIFN6ZXJ6b2Rlc2kgRmVsdGV0ZWxla2JlbiBlbG9pcnQgZWxsZW5vcnplc2kg +ZWxqYXJhcyBtZWd0ZXRlbGUuIEEgZG9rdW1lbnR1bW9rIG1lZ3RhbGFsaGF0b2sgYSBodHRwczov +L3d3dy5uZXRsb2NrLmh1L2RvY3MvIGNpbWVuIHZhZ3kga2VyaGV0b2sgYXogaW5mb0BuZXRsb2Nr +Lm5ldCBlLW1haWwgY2ltZW4uIFdBUk5JTkchIFRoZSBpc3N1YW5jZSBhbmQgdGhlIHVzZSBvZiB0 +aGlzIGNlcnRpZmljYXRlIGFyZSBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIFF1YWxpZmllZCBDUFMg +YXZhaWxhYmxlIGF0IGh0dHBzOi8vd3d3Lm5ldGxvY2suaHUvZG9jcy8gb3IgYnkgZS1tYWlsIGF0 +IGluZm9AbmV0bG9jay5uZXQwHQYDVR0OBBYEFAlqYhaSsFq7VQ7LdTI6MuWyIckoMA0GCSqGSIb3 +DQEBBQUAA4IBAQCRalCc23iBmz+LQuM7/KbD7kPgz/PigDVJRXYC4uMvBcXxKufAQTPGtpvQMznN +wNuhrWw3AkxYQTvyl5LGSKjN5Yo5iWH5Upfpvfb5lHTocQ68d4bDBsxafEp+NFAwLvt/MpqNPfMg +W/hqyobzMUwsWYACff44yTB1HLdV47yfuqhthCgFdbOLDcCRVCHnpgu0mfVRQdzNo0ci2ccBgcTc +R08m6h/t280NmPSjnLRzMkqWmf68f8glWPhY83ZmiVSkpj7EUFy6iRiCdUgh0k8T6GB+B3bbELVR +5qq5aKrN9p2QdRLqOBrKROi3macqaJVmlaut74nLYKkGEsaUR+ko +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Firmaprofesional Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEVzCCAz+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBnTELMAkGA1UEBhMCRVMxIjAgBgNVBAcT +GUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMTOUF1dG9yaWRhZCBkZSBDZXJ0aWZp +Y2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODEmMCQGCSqGSIb3DQEJARYXY2FA +ZmlybWFwcm9mZXNpb25hbC5jb20wHhcNMDExMDI0MjIwMDAwWhcNMTMxMDI0MjIwMDAwWjCBnTEL +MAkGA1UEBhMCRVMxIjAgBgNVBAcTGUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMT +OUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2 +ODEmMCQGCSqGSIb3DQEJARYXY2FAZmlybWFwcm9mZXNpb25hbC5jb20wggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDnIwNvbyOlXnjOlSztlB5uCp4Bx+ow0Syd3Tfom5h5VtP8c9/Qit5V +j1H5WuretXDE7aTt/6MNbg9kUDGvASdYrv5sp0ovFy3Tc9UTHI9ZpTQsHVQERc1ouKDAA6XPhUJH +lShbz++AbOCQl4oBPB3zhxAwJkh91/zpnZFx/0GaqUC1N5wpIE8fUuOgfRNtVLcK3ulqTgesrBlf +3H5idPayBQC6haD9HThuy1q7hryUZzM1gywfI834yJFxzJeL764P3CkDG8A563DtwW4O2GcLiam8 +NeTvtjS0pbbELaW+0MOUJEjb35bTALVmGotmBQ/dPz/LP6pemkr4tErvlTcbAgMBAAGjgZ8wgZww +KgYDVR0RBCMwIYYfaHR0cDovL3d3dy5maXJtYXByb2Zlc2lvbmFsLmNvbTASBgNVHRMBAf8ECDAG +AQH/AgEBMCsGA1UdEAQkMCKADzIwMDExMDI0MjIwMDAwWoEPMjAxMzEwMjQyMjAwMDBaMA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUMwugZtHq2s7eYpMEKFK1FH84aLcwDQYJKoZIhvcNAQEFBQAD +ggEBAEdz/o0nVPD11HecJ3lXV7cVVuzH2Fi3AQL0M+2TUIiefEaxvT8Ub/GzR0iLjJcG1+p+o1wq +u00vR+L4OQbJnC4xGgN49Lw4xiKLMzHwFgQEffl25EvXwOaD7FnMP97/T2u3Z36mhoEyIwOdyPdf +wUpgpZKpsaSgYMN4h7Mi8yrrW6ntBas3D7Hi05V2Y1Z0jFhyGzflZKG+TQyTmAyX9odtsz/ny4Cm +7YjHX1BiAuiZdBbQ5rQ58SfLyEDW44YQqSMSkuBpQWOnryULwMWSyx6Yo1q6xTMPoJcB3X/ge9YG +VM+h4k0460tQtcsm9MracEpqoeJ5quGnM/b9Sh/22WA= +-----END CERTIFICATE----- + +Wells Fargo Root CA +=================== +-----BEGIN CERTIFICATE----- +MIID5TCCAs2gAwIBAgIEOeSXnjANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC1dlbGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTEvMC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDAxMDExMTY0MTI4WhcNMjEwMTE0MTY0MTI4WjCBgjELMAkGA1UEBhMCVVMxFDASBgNVBAoTC1dl +bGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEv +MC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVqDM7Jvk0/82bfuUER84A4n135zHCLielTWi5MbqNQ1mX +x3Oqfz1cQJ4F5aHiidlMuD+b+Qy0yGIZLEWukR5zcUHESxP9cMIlrCL1dQu3U+SlK93OvRw6esP3 +E48mVJwWa2uv+9iWsWCaSOAlIiR5NM4OJgALTqv9i86C1y8IcGjBqAr5dE8Hq6T54oN+J3N0Prj5 +OEL8pahbSCOz6+MlsoCultQKnMJ4msZoGK43YjdeUXWoWGPAUe5AeH6orxqg4bB4nVCMe+ez/I4j +sNtlAHCEAQgAFG5Uhpq6zPk3EPbg3oQtnaSFN9OH4xXQwReQfhkhahKpdv0SAulPIV4XAgMBAAGj +YTBfMA8GA1UdEwEB/wQFMAMBAf8wTAYDVR0gBEUwQzBBBgtghkgBhvt7hwcBCzAyMDAGCCsGAQUF +BwIBFiRodHRwOi8vd3d3LndlbGxzZmFyZ28uY29tL2NlcnRwb2xpY3kwDQYJKoZIhvcNAQEFBQAD +ggEBANIn3ZwKdyu7IvICtUpKkfnRLb7kuxpo7w6kAOnu5+/u9vnldKTC2FJYxHT7zmu1Oyl5GFrv +m+0fazbuSCUlFLZWohDo7qd/0D+j0MNdJu4HzMPBJCGHHt8qElNvQRbn7a6U+oxy+hNH8Dx+rn0R +OhPs7fpvcmR7nX1/Jv16+yWt6j4pf0zjAFcysLPp7VMX2YuyFA4w6OXVE8Zkr8QA1dhYJPz1j+zx +x32l2w8n0cbyQIjmH/ZhqPRCyLk306m+LFZ4wnKbWV01QIroTmMatukgalHizqSQ33ZwmVxwQ023 +tqcZZE6St8WRPH9IFmV7Fv3L/PvZ1dZPIWU7Sn9Ho/s= +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Platinum CA - G2 +========================== +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWduIFBsYXRpbnVtIENBIC0gRzIw +HhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAwWjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMM +U3dpc3NTaWduIEFHMSMwIQYDVQQDExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu +669yIIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2HtnIuJpX+UF +eNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+6ixuEFGSzH7VozPY1kne +WCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5objM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIo +j5+saCB9bzuohTEJfwvH6GXp43gOCWcwizSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/6 +8++QHkwFix7qepF6w9fl+zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34T +aNhxKFrYzt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaPpZjy +domyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtFKwH3HBqi7Ri6Cr2D ++m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuWae5ogObnmLo2t/5u7Su9IPhlGdpV +CX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMBAAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCv +zAeHFUdvOMW0ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUAA4ICAQAIhab1 +Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0uMoI3LQwnkAHFmtllXcBrqS3 +NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4 +U99REJNi54Av4tHgvI42Rncz7Lj7jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8 +KV2LwUvJ4ooTHbG/u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl +9x8DYSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1puEa+S1B +aYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXaicYwu+uPyyIIoK6q8QNs +OktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbGDI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSY +Mdp08YSTcU1f+2BY0fvEwW2JorsgH51xkcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAci +IfNAChs0B0QTwoRqjt8ZWr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +S-TRUST Authentication and Encryption Root CA 2005 PN +===================================================== +-----BEGIN CERTIFICATE----- +MIIEezCCA2OgAwIBAgIQNxkY5lNUfBq1uMtZWts1tzANBgkqhkiG9w0BAQUFADCBrjELMAkGA1UE +BhMCREUxIDAeBgNVBAgTF0JhZGVuLVd1ZXJ0dGVtYmVyZyAoQlcpMRIwEAYDVQQHEwlTdHV0dGdh +cnQxKTAnBgNVBAoTIERldXRzY2hlciBTcGFya2Fzc2VuIFZlcmxhZyBHbWJIMT4wPAYDVQQDEzVT +LVRSVVNUIEF1dGhlbnRpY2F0aW9uIGFuZCBFbmNyeXB0aW9uIFJvb3QgQ0EgMjAwNTpQTjAeFw0w +NTA2MjIwMDAwMDBaFw0zMDA2MjEyMzU5NTlaMIGuMQswCQYDVQQGEwJERTEgMB4GA1UECBMXQmFk +ZW4tV3VlcnR0ZW1iZXJnIChCVykxEjAQBgNVBAcTCVN0dXR0Z2FydDEpMCcGA1UEChMgRGV1dHNj +aGVyIFNwYXJrYXNzZW4gVmVybGFnIEdtYkgxPjA8BgNVBAMTNVMtVFJVU1QgQXV0aGVudGljYXRp +b24gYW5kIEVuY3J5cHRpb24gUm9vdCBDQSAyMDA1OlBOMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA2bVKwdMz6tNGs9HiTNL1toPQb9UY6ZOvJ44TzbUlNlA0EmQpoVXhOmCTnijJ4/Ob +4QSwI7+Vio5bG0F/WsPoTUzVJBY+h0jUJ67m91MduwwA7z5hca2/OnpYH5Q9XIHV1W/fuJvS9eXL +g3KSwlOyggLrra1fFi2SU3bxibYs9cEv4KdKb6AwajLrmnQDaHgTncovmwsdvs91DSaXm8f1Xgqf +eN+zvOyauu9VjxuapgdjKRdZYgkqeQd3peDRF2npW932kKvimAoA0SVtnteFhy+S8dF2g08LOlk3 +KC8zpxdQ1iALCvQm+Z845y2kuJuJja2tyWp9iRe79n+Ag3rm7QIDAQABo4GSMIGPMBIGA1UdEwEB +/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgEGMCkGA1UdEQQiMCCkHjAcMRowGAYDVQQDExFTVFJv +bmxpbmUxLTIwNDgtNTAdBgNVHQ4EFgQUD8oeXHngovMpttKFswtKtWXsa1IwHwYDVR0jBBgwFoAU +D8oeXHngovMpttKFswtKtWXsa1IwDQYJKoZIhvcNAQEFBQADggEBAK8B8O0ZPCjoTVy7pWMciDMD +pwCHpB8gq9Yc4wYfl35UvbfRssnV2oDsF9eK9XvCAPbpEW+EoFolMeKJ+aQAPzFoLtU96G7m1R08 +P7K9n3frndOMusDXtk3sU5wPBG7qNWdX4wple5A64U8+wwCSersFiXOMy6ZNwPv2AtawB6MDwidA +nwzkhYItr5pCHdDHjfhA7p0GVxzZotiAFP7hYy0yh9WUUpY6RsZxlj33mA6ykaqP2vROJAA5Veit +F7nTNCtKqUDMFypVZUF0Qn71wK/Ik63yGFs9iQzbRzkk+OBM8h+wPQrKBU6JIRrjKpms/H+h8Q8b +Hz2eBIPdltkdOpQ= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign CA +========== +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0MRMwEQYDVQQD +EwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTMy +MThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMTCkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNp +Z24xCzAJBgNVBAYTAklMMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49q +ROR+WCf4C9DklBKK8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTy +P2Q298CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb2CEJKHxN +GGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxCejVb7Us6eva1jsz/D3zk +YDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7KpiXd3DTKaCQeQzC6zJMw9kglcq/QytNuEM +rkvF7zuZ2SOzW120V+x0cAwqTwIDAQABo4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAy +oDCgLoYsaHR0cDovL2ZlZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0P +AQH/BAQDAgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRLAZs+ +VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWdfoPPbrxHbvUanlR2 +QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0McXS6hMTXcpuEfDhOZAYnKuGntewI +mbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb +/627HOkthIDYIb6FUtnUdLlphbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VG +zT2ouvDzuFYkRes3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) FőtanÃēsítvÃĄny +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 1 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCED9pHoGc8JpK83P/uUii5N0wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAxIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDlGb9to1ZhLZlIcfZn3rmN67eehoAKkQ76OCWvRoiC5XOooJskXQ0fzGVuDLDQ +VoQYh5oGmxChc9+0WDlrbsH2FdWoqD+qEgaNMax/sDTXjzRniAnNFBHiTkVWaR94AoDa3EeRKbs2 +yWNcxeDXLYd7obcysHswuiovMaruo2fa2wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFgVKTk8d6Pa +XCUDfGD67gmZPCcQcMgMCeazh88K4hiWNWLMv5sneYlfycQJ9M61Hd8qveXbhpxoJeUwfLaJFf5n +0a3hUKw8fGJLj7qE1xIVGx/KXQ/BUpQqEZnae88MNhPVNdwQGVnqlMEAv3WP2fr9dgTbYruQagPZ +RjXZ+Hxb +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +TC TrustCenter Universal CA III +=============================== +-----BEGIN CERTIFICATE----- +MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAe +Fw0wOTA5MDkwODE1MjdaFw0yOTEyMzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNU +QyBUcnVzdENlbnRlciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0Ex +KDAmBgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF5+cvAqBNLaT6hdqbJYUt +QCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYvDIRlzg9uwliT6CwLOunBjvvya8o84pxO +juT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8vzArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+Eut +CHnNaYlAJ/Uqwa1D7KRTyGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1 +M4BDj5yjdipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBhMB8G +A1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI4jANBgkqhkiG9w0BAQUFAAOCAQEA +g8ev6n9NCjw5sWi+e22JLumzCecYV42FmhfzdkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+ +KGwWaODIl0YgoGhnYIg5IFHYaAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhK +BgePxLcHsU0GDeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV +CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPHLQNjO9Po5KIq +woIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - AutoritÊ Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb new file mode 100644 index 0000000..26a1f58 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/account_client_spec.rb @@ -0,0 +1,89 @@ +# encoding: UTF-8 +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) +require 'securerandom' + +describe 'Account client' do + before(:all) do + client_1 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_1'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_1'] + }) + + @index_1 = client_1.init_index(index_name('account_client_1')) + + client_2 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_1'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_1'] + }) + + @index_2 = client_2.init_index(index_name('account_client_2')) + + client_3 = Algolia::Client.new({ + :application_id => ENV['ALGOLIA_APPLICATION_ID_2'], + :api_key => ENV['ALGOLIA_ADMIN_KEY_2'] + }) + + @index_3 = client_3.init_index(index_name('account_client_3')) + + @index_1.delete_index rescue 'not fatal' + @index_2.delete_index rescue 'not fatal' + @index_3.delete_index rescue 'not fatal' + end + + after(:all) do + @index_1.delete_index rescue 'not fatal' + @index_2.delete_index rescue 'not fatal' + @index_3.delete_index rescue 'not fatal' + end + + it 'should not allow operations in the same application' do + expect { + Algolia::AccountClient.copy_index!(@index_1, @index_2) + }.to raise_exception( + Algolia::AlgoliaError, + 'The indexes are in the same application. Use Algolia::Client.copy_index instead.' + ) + end + + it 'should perform a cross app copy index and assert that destination must not exist' do + + @index_1.save_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + + @index_1.batch_rules! ([ + { + :objectID => 'one', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + } + ]) + + @index_1.batch_synonyms! ([ + {:objectID => 'one', :type => 'synonym', :synonyms => ['San Francisco', 'SF']} + ]) + + @index_1.set_settings! ({:searchableAttributes => ['objectID']}) + + Algolia::AccountClient.copy_index!(@index_1, @index_3) + + res = @index_3.search('') + res['nbHits'].should eq(1500) + + res = @index_3.search_rules('')['hits'] + res.size.should eq(1) + res[0]['objectID'].should eq('one') + + res = @index_3.search_synonyms('')['hits'] + res.size.should eq(1) + res[0]['objectID'].should eq('one') + + @index_3.get_settings['searchableAttributes'].should eq(['objectID']) + + expect { + Algolia::AccountClient.copy_index!(@index_1, @index_3) + }.to raise_exception( + Algolia::AlgoliaError, + 'Destination index already exists. Please delete it before copying index across applications.' + ) + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/client_spec.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/client_spec.rb new file mode 100644 index 0000000..fb664ff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/client_spec.rb @@ -0,0 +1,1426 @@ +# encoding: UTF-8 +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) +require 'base64' + +def is_include(array, attr, value) + array.each do |elt| + if elt[attr] == value + return true + end + end + return false +end + +describe 'API keys', :maintainers_only => true do + before(:all) do + @index = Algolia::Index.new(safe_index_name("àlgol?a")) + @index.delete_index rescue "not fatal" + end + + after(:all) do + @index.delete_index rescue "not fatal" + end + + def wait_key(index, key, &block) + 1.upto(60) do # do not wait too long + begin + k = index.get_api_key(key) + if block_given? + return if yield k + # not found + sleep 1 + next + end + return + rescue + # not found + sleep 1 + end + end + end + + def wait_key_missing(index, key) + 1.upto(60) do # do not wait too long + begin + k = index.get_api_key(key) + sleep 1 + rescue + # not found + return + end + end + end + + def wait_global_key(key, &block) + 1.upto(60) do # do not wait too long + begin + k = Algolia.get_api_key(key) + if block_given? + return if yield k + # not found + sleep 1 + next + end + return + rescue + # not found + sleep 1 + end + end + end + + def wait_global_key_missing(key) + 1.upto(60) do # do not wait too long + begin + k = Algolia.get_api_key(key) + sleep 1 + rescue + # not found + return + end + end + end + + it "should test index keys" do + @index.set_settings!({}) # ensure the index exists + + resIndex = @index.list_api_keys + newIndexKey = @index.add_api_key(['search']) + newIndexKey['key'].should_not eq("") + wait_key(@index, newIndexKey['key']) + resIndexAfter = @index.list_api_keys + is_include(resIndex['keys'], 'value', newIndexKey['key']).should eq(false) + is_include(resIndexAfter['keys'], 'value', newIndexKey['key']).should eq(true) + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey['acl'][0].should eq('search') + @index.update_api_key(newIndexKey['key'], ['addObject']) + wait_key(@index, newIndexKey['key']) do |key| + key['acl'] == ['addObject'] + end + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey['acl'][0].should eq('addObject') + @index.delete_api_key(newIndexKey['key']) + wait_key_missing(@index, newIndexKey['key']) + resIndexEnd = @index.list_api_keys + is_include(resIndexEnd['keys'], 'value', newIndexKey['key']).should eq(false) + end + + it "should test global keys" do + res = Algolia.list_api_keys + newKey = Algolia.add_api_key(['search']) + newKey['key'].should_not eq("") + wait_global_key(newKey['key']) + resAfter = Algolia.list_api_keys + is_include(res['keys'], 'value', newKey['key']).should eq(false) + is_include(resAfter['keys'], 'value', newKey['key']).should eq(true) + key = Algolia.get_api_key(newKey['key']) + key['acl'][0].should eq('search') + Algolia.update_api_key(newKey['key'], ['addObject']) + wait_global_key(newKey['key']) do |key| + key['acl'] == ['addObject'] + end + key = Algolia.get_api_key(newKey['key']) + key['acl'][0].should eq('addObject') + Algolia.delete_api_key(newKey['key']) + wait_global_key_missing(newKey['key']) + resEnd = Algolia.list_api_keys + is_include(resEnd['keys'], 'value', newKey['key']).should eq(false) + + # Restore the deleted key + Algolia.restore_api_key(newKey['key']) + wait_global_key(newKey['key']) + key_end = Algolia.list_api_keys + is_include(key_end['keys'], 'value', newKey['key']).should eq(true) + + # Re-delete the key + Algolia.delete_api_key(newKey['key']) + end + + it "Check add keys" do + newIndexKey = @index.add_api_key(['search']) + newIndexKey.should have_key('key') + newIndexKey['key'].should be_a(String) + newIndexKey.should have_key('createdAt') + newIndexKey['createdAt'].should be_a(String) + sleep 5 # no task ID here + resIndex = @index.list_api_keys + resIndex.should have_key('keys') + resIndex['keys'].should be_a(Array) + resIndex['keys'][0].should have_key('value') + resIndex['keys'][0]['value'].should be_a(String) + resIndex['keys'][0].should have_key('acl') + resIndex['keys'][0]['acl'].should be_a(Array) + resIndex['keys'][0].should have_key('validity') + resIndex['keys'][0]['validity'].should be_a(Integer) + indexKey = @index.get_api_key(newIndexKey['key']) + indexKey.should have_key('value') + indexKey['value'].should be_a(String) + indexKey.should have_key('acl') + indexKey['acl'].should be_a(Array) + indexKey.should have_key('validity') + indexKey['validity'].should be_a(Integer) + task = @index.delete_api_key(newIndexKey['key']) + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + end +end + +describe 'Client' do + before(:all) do + @index = Algolia::Index.new(safe_index_name("àlgol?a")) + @index.delete_index rescue "not fatal" + end + + after(:all) do + @index.delete_index rescue "not fatal" + end + + it "should tell if index exists" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + expect(@index.exists?).to be true + end + + it "should tell if index does not exist" do + index = Algolia::Index.new('nonexistent_index') + expect(index.exists?).to be false + end + + it "should add a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = @index.search("john") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = @index.search("john") + res["hits"].length.should eq(1) + @index.partial_update_object!({ :name => "Robert Doe"}, "1") + res = @index.search("robert") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object, or add it if it doesn't exist" do + res = @index.search("tonny@parker.org") + res["hits"].length.should eq(0) + @index.partial_update_object!({ :email => "tonny@parker.org" }, "1") + res = @index.search("tonny@parker.org") + res["hits"].length.should eq(1) + end + + it "should partial update a simple object, but don't add it if it doesn't exist" do + @index.partial_update_object!({ :email => "alex@boom.org" }, "51", false) + res = @index.search("alex@boom.org") + res["hits"].length.should eq(0) + end + + it "should partial update a batch of objects, and add them if they don't exist" do + batch = [ + { :objectID => "1", :email => "john@wanna.org" }, + { :objectID => "2", :email => "robert@wanna.org" } + ] + @index.partial_update_objects!(batch) + res = @index.search("@wanna.org") + res["hits"].length.should eq(2) + end + + it "should partial update a batch of objects, but don't add them if they don't exist" do + create_if_not_exits = false + batch = [ + { :objectID => "11", :email => "john@be.org" }, + { :objectID => "22", :email => "robert@be.org" } + ] + @index.partial_update_objects!(batch, create_if_not_exits) + res = @index.search("@be.org") + res["hits"].length.should eq(0) + end + + it "should add a set of objects" do + @index.add_objects!([ + { :name => "Another", :email => "another1@example.org" }, + { :name => "Another", :email => "another2@example.org" } + ]) + res = @index.search("another") + res["hits"].length.should eq(2) + end + + it "should partial update a simple object" do + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "2") + res = @index.search("john") + res["hits"].length.should eq(2) + @index.partial_update_objects!([{ :name => "Robert Doe", :objectID => "1"}, { :name => "Robert Doe", :objectID => "2"}]) + res = @index.search("robert") + res["hits"].length.should eq(2) + end + + it "should save a set of objects with their ids" do + @index.save_object!({ :name => "objectid", :email => "objectid1@example.org", :objectID => 101 }) + res = @index.search("objectid") + res["hits"].length.should eq(1) + end + + it "should save a set of objects with their ids" do + @index.save_objects!([ + { :name => "objectid", :email => "objectid1@example.org", :objectID => 101 }, + { :name => "objectid", :email => "objectid2@example.org", :objectID => 102 } + ]) + res = @index.search("objectid") + res["hits"].length.should eq(2) + end + + it "should replace all objects" do + @index.save_objects!([{:objectID => '1'}, {:objectID => '2'}]) + @index.replace_all_objects!([{'color' => 'black'}, {:objectID => '4', 'color' => 'green'}]) + + res = @index.search('') + res["hits"].length.should eq(2) + res = @index.search('black') + res["hits"][0]['color'].should eq('black') + @index.get_object('4')['color'].should eq('green') + end + + it "should throw an exception if invalid argument" do + expect { @index.add_object!([ {:name => "test"} ]) }.to raise_error(ArgumentError) + expect { @index.add_objects!([ [ {:name => "test"} ] ]) }.to raise_error(ArgumentError) + expect { @index.save_object(1) }.to raise_error(ArgumentError) + expect { @index.save_object("test") }.to raise_error(ArgumentError) + expect { @index.save_object({ :objectID => 42 }.to_json) }.to raise_error(ArgumentError) + expect { @index.save_objects([{}, ""]) }.to raise_error(ArgumentError) + expect { @index.save_objects([1]) }.to raise_error(ArgumentError) + expect { @index.save_objects!([1]) }.to raise_error(ArgumentError) + expect { @index.save_object({ :foo => 42 }) }.to raise_error(ArgumentError) # missing objectID + end + + it "should be thread safe" do + @index.clear! + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + + threads = [] + 64.times do + t = Thread.new do + 10.times do + res = @index.search("john") + res["hits"].length.should eq(2) + end + end + threads << t + end + threads.each { |t| t.join } + end + + if !defined?(RUBY_ENGINE) || RUBY_ENGINE != 'jruby' + it "should be fork safe" do + 8.times do + Process.fork do + 10.times do + res = @index.search("john") + res["hits"].length.should eq(2) + end + end + end + Process.waitall + end + end + + it "should clear the index" do + @index.clear! + @index.search("")["hits"].length.should eq(0) + end + + it "should have another index after" do + index = Algolia::Index.new(safe_index_name("àlgol?a")) + begin + index.delete_index! + rescue + # friends_2 does not exist + end + res = Algolia.list_indexes + is_include(res['items'], 'name', safe_index_name('àlgol?a')).should eq(false) + index.add_object!({ :name => "Robert" }) + resAfter = Algolia.list_indexes + is_include(resAfter['items'], 'name', safe_index_name('àlgol?a')).should eq(true) + end + + it "should get a object" do + @index.clear_index + @index.add_object!({:firstname => "Robert"}) + @index.add_object!({:firstname => "Robert2"}) + res = @index.search('') + res["nbHits"].should eq(2) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq(res['hits'][0]['firstname']) + + object = @index.get_object(res['hits'][0]['objectID'], 'firstname') + object['firstname'].should eq(res['hits'][0]['firstname']) + + objects = @index.get_objects([ res['hits'][0]['objectID'], res['hits'][1]['objectID'] ]) + objects.size.should eq(2) + end + + it "should restrict attributesToRetrieve" do + @index.clear_index + @index.add_object({:firstname => "Robert", :lastname => "foo", :objectID => 1}) + @index.add_object!({:firstname => "Robert2", :lastname => "bar", :objectID => 2}) + objects = @index.get_objects([1, 2], ['firstname']) + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "objectID"=>"2"}) + + objects = @index.get_objects([1, 2], [:firstname]) + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "objectID"=>"2"}) + + objects = @index.get_objects(["1", "2"], 'firstname,lastname') + objects.size.should eq(2) + objects[0].should eq({"firstname"=>"Robert", "lastname"=>"foo", "objectID"=>"1"}) + objects[1].should eq({"firstname"=>"Robert2", "lastname"=>"bar", "objectID"=>"2"}) + end + + it "should delete the object" do + @index.clear + @index.add_object!({:firstname => "Robert"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(1) + @index.delete_object!(res['hits'][0]['objectID']) + @index.search('')['nbHits'].should eq(0) + end + + it "should not delete the index because the objectID is blank" do + @index.clear + @index.add_object!({:firstname => "Robert"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(1) + expect { @index.delete_object('') }.to raise_error(ArgumentError) + expect { @index.delete_object!(nil) }.to raise_error(ArgumentError) + @index.search('')['nbHits'].should eq(1) + end + + it "should delete several objects" do + @index.clear + @index.add_object!({:firstname => "Robert1"}) + @index.add_object!({:firstname => "Robert2"}) + res = @index.search('') + @index.search('')['nbHits'].should eq(2) + @index.delete_objects!(res['hits'].map { |h| h['objectID'] }) + @index.search('')['nbHits'].should eq(0) + end + + it "should delete several objects by query" do + @index.clear + @index.add_object({:firstname => "Robert1"}) + @index.add_object!({:firstname => "Robert2"}) + @index.search('')['nbHits'].should eq(2) + @index.delete_by_query!('rob') + @index.search('')['nbHits'].should eq(0) + end + + it "should not wipe the entire index with delete_by_query" do + expect { @index.delete_by_query(nil) }.to raise_error(ArgumentError) + end + + context 'delete_by' do + it 'should not wipe the entire index' do + expect { @index.delete_by(nil) }.to raise_error(ArgumentError) + end + + it 'should fail with query passed' do + @index.clear + @index.add_object({:firstname => 'Robert1'}) + @index.add_object!({:firstname => 'Robert2'}) + @index.search('')['nbHits'].should eq(2) + expect { @index.delete_by({ 'query' => 'abc' }) }.to raise_error(Algolia::AlgoliaProtocolError) + @index.search('')['nbHits'].should eq(2) + end + + it 'should work with filters' do + @index.clear + @index.set_settings!({:attributesForFaceting => ['firstname']}) + @index.add_object({:firstname => 'Robert1'}) + @index.add_object!({:firstname => 'Robert2'}) + @index.search('')['nbHits'].should eq(2) + @index.delete_by!({ 'filters' => 'firstname:Robert1' }) + @index.search('')['nbHits'].should eq(1) + end + end + + it 'should find objects when needed' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + + index.save_objects!([ + {:company => 'Algolia', :name => 'Julien Lemoine', :objectID => 'julien-lemoine'}, + {:company => 'Algolia', :name => 'Nicolas Dessaigne', :objectID => 'nicolas-dessaigne'}, + {:company => 'Amazon', :name =>' "Jeff Bezos', :objectID => '162590850'}, + {:company => 'Apple', :name => 'Steve Jobs', :objectID => '162590860'}, + {:company => 'Apple', :name => 'Steve Wozniak', :objectID => '162590870'}, + {:company => 'Arista Networks', :name => 'Jayshree Ullal', :objectID => '162590880'}, + {:company => 'Google', :name => 'Larry Page', :objectID => '162590890'}, + {:company => 'Google', :name => 'Rob Pike', :objectID => '162590900'}, + {:company => 'Google', :name => 'Sergueï Brin', :objectID => '162590910'}, + {:company => 'Microsoft', :name => 'Bill Gates', :objectID => '162590920'}, + {:company => 'SpaceX', :name => 'Elon Musk', :objectID => '162590930'}, + {:company => 'Tesla', :name => 'Elon Musk', :objectID => '162590940'}, + {:company => 'Yahoo', :name => 'Marissa Mayer', :objectID => '162590950'}, + ]) + + res = index.search('algolia') + Algolia::Index.get_object_position(res, 'nicolas-dessaigne').should eq(0) + Algolia::Index.get_object_position(res, 'julien-lemoine').should eq(1) + Algolia::Index.get_object_position(res, '').should eq(-1) + + expect { + index.find_object({'query' => '', 'paginate' => true}) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + expect { + index.find_object({'query' => '', 'paginate' => true}) { false } + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + obj = index.find_object({'query' => '', 'paginate' => true}) { true } + obj['position'].should eq(0) + obj['page'].should eq(0) + + # we use a lambda and convert it to a block with `&` + # so as not to repeat the condition + condition = lambda do |obj| + obj.key?('company') and obj['company'] == 'Apple' + end + + expect { + index.find_object({'query' => 'algolia', 'paginate' => true}, &condition) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + expect { + index.find_object({'query' => '', 'paginate' => false, 'hitsPerPage' => 5}, &condition) + }.to raise_exception( + Algolia::AlgoliaObjectNotFoundError, + 'Object not found' + ) + + obj = index.find_object({'query' => '', 'paginate' => true, 'hitsPerPage' => 5}, &condition) + obj['position'].should eq(0) + obj['page'].should eq(2) + end + + it "should copy the index" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.search('')['nbHits'].should eq(1) + + Algolia.copy_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à")) + @index.delete_index! + + index.search('')['nbHits'].should eq(1) + index.delete_index! + end + + it "should copy only settings" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + res = @index.set_settings!({ + 'searchableAttributes' => ['one'], + }) + + @index.wait_task(res['taskID']) + Algolia.copy_settings!(@index.name, index.name) + @index.delete_index! + + index.get_settings['searchableAttributes'].should eq(['one']) + index.delete_index! + end + + it "should copy only synonyms" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + @index.save_synonym!('foo', { + :objectID => 'foo', :synonyms => ['car', 'vehicle', 'auto'], :type => 'synonym', + }) + + Algolia.copy_synonyms!(@index.name, index.name) + @index.delete_index! + + index.get_synonym('foo')['objectID'].should eq('foo') + index.delete_index! + end + + it "should copy only rules" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index index.name + rescue + end + + @index.save_rule!('bar', { + :objectID => 'bar', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + }) + + Algolia.copy_rules!(@index.name, index.name) + @index.delete_index! + + index.get_rule('bar')['objectID'].should eq('bar') + index.delete_index! + end + + it "should copy parts of the index only" do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + @index.clear_index + Algolia.delete_index! index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + @index.search('')['nbHits'].should eq(1) + @index.search_synonyms('')['nbHits'].should eq(2) + + res = Algolia.copy_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à"), ["synonyms"]) + + @index.delete_index! + + index.search_synonyms('')['nbHits'].should eq(2) + index.delete_index! + end + + it "should move the index" do + @index.clear_index rescue "friends does not exist" + index = Algolia::Index.new(safe_index_name("àlgol?à")) + begin + Algolia.delete_index! index.name + rescue + # friends_2 does not exist + end + + @index.add_object!({:firstname => "Robert"}) + @index.search('')['nbHits'].should eq(1) + + Algolia.move_index!(safe_index_name("àlgol?a"), safe_index_name("àlgol?à")) + + index.search('')['nbHits'].should eq(1) + index.delete_index + end + + it "should retrieve the object" do + @index.clear_index rescue "friends does not exist" + @index.add_object!({:firstname => "Robert"}) + + res = @index.browse + + res['hits'].size.should eq(1) + res['hits'][0]['firstname'].should eq("Robert") + end + + it "should get logs" do + + expect { + Algolia::Index.new(safe_index_name('thisdefinitelyshouldntexist')).get_settings + }.to raise_error(Algolia::AlgoliaProtocolError) + res = Algolia.get_logs(0, 20, true) + + res['logs'].size.should > 0 + (res['logs'][0]['answer_code'].to_i / 100).should eq(4) + end + + it "should search on multipleIndex" do + @index.clear_index! rescue "Not fatal" + @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + res = Algolia.multiple_queries([{:index_name => safe_index_name("àlgol?a"), "query" => ""}]) + res["results"][0]["hits"].length.should eq(1) + + res = Algolia.multiple_queries([{"indexName" => safe_index_name("àlgol?a"), "query" => ""}], "indexName") + res["results"][0]["hits"].length.should eq(1) + end + + it "should get multiple objectIDs" do + index_name_1 = safe_index_name("àlgol?a-multi") + index_1 = Algolia::Index.new(index_name_1) + index_1.save_object!({:objectID => "obj1-multi-get", :name => 'test'}) + + index_name_2 = safe_index_name("àlgol?a-multi") + index_2 = Algolia::Index.new(index_name_2) + index_2.save_object!({:objectID => "obj2-multi-get", :name => 'another index'}) + + requests = [ + { "indexName" => index_name_1, "objectID" => "obj1-multi-get" }, + { "indexName" => index_name_2, "objectID" => "obj2-multi-get" } + ] + + response = Algolia.multiple_get_objects(requests) + + response['results'].count.should eq(2) + + index_1.delete_index rescue "not fatal" + index_2.delete_index rescue "not fatal" + end + + it "should throw if the index_name is missing in multiple_queries" do + expect { Algolia.multiple_queries([{"query" => ""}]) }.to raise_error(ArgumentError) + end + + it "should accept custom batch" do + @index.clear_index! rescue "Not fatal" + request = { "requests" => [ + { + "action" => "addObject", + "body" => {"firstname" => "Jimmie", + "lastname" => "Barninger"} + }, + { + "action" => "addObject", + "body" => {"firstname" => "Warren", + "lastname" => "Speach"} + }, + { + "action" => "updateObject", + "body" => {"firstname" => "Jimmie", + "lastname" => "Barninger", + "objectID" => "43"} + }, + { + "action" => "updateObject", + "body" => {"firstname" => "Warren", + "lastname" => "Speach"}, + "objectID" => "42" + } + ]} + res = @index.batch!(request) + @index.search('')['nbHits'].should eq(4) + end + + it "should allow an array of tags" do + @index.add_object!({ :name => "P1", :_tags => "t1" }) + @index.add_object!({ :name => "P2", :_tags => "t1" }) + @index.add_object!({ :name => "P3", :_tags => "t2" }) + @index.add_object!({ :name => "P4", :_tags => "t3" }) + @index.add_object!({ :name => "P5", :_tags => ["t3", "t4"] }) + + @index.search("", { :tagFilters => ["t1"] })['hits'].length.should eq(2) # t1 + @index.search("", { :tagFilters => ["t1", "t2"] })['hits'].length.should eq(0) # t1 AND t2 + @index.search("", { :tagFilters => ["t3", "t4"] })['hits'].length.should eq(1) # t3 AND t4 + @index.search("", { :tagFilters => [["t1", "t2"]] })['hits'].length.should eq(3) # t1 OR t2 + end + + it "should be facetable" do + @index.clear! + @index.set_settings( { :attributesForFacetting => ["f", "g"] }) + @index.add_object!({ :name => "P1", :f => "f1", :g => "g1" }) + @index.add_object!({ :name => "P2", :f => "f1", :g => "g2" }) + @index.add_object!({ :name => "P3", :f => "f2", :g => "g2" }) + @index.add_object!({ :name => "P4", :f => "f3", :g => "g2" }) + + res = @index.search("", { :facets => "f" }) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should eq(1) + res['facets']['f']['f3'].should eq(1) + + res = @index.search("", { :facets => "f", :facetFilters => ["f:f1"] }) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + + res = @index.search("", { :facets => "f", :facetFilters => ["f:f1", "g:g2"] }) + res['facets']['f']['f1'].should eq(1) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + + res = @index.search("", { :facets => "f,g", :facetFilters => [["f:f1", "g:g2"]] }) + res['nbHits'].should eq(4) + res['facets']['f']['f1'].should eq(2) + res['facets']['f']['f2'].should eq(1) + res['facets']['f']['f3'].should eq(1) + + res = @index.search("", { :facets => "f,g", :facetFilters => [["f:f1", "g:g2"], "g:g1"] }) + res['nbHits'].should eq(1) + res['facets']['f']['f1'].should eq(1) + res['facets']['f']['f2'].should be_nil + res['facets']['f']['f3'].should be_nil + res['facets']['g']['g1'].should eq(1) + res['facets']['g']['g2'].should be_nil + end + + it "should handle slash in objectId" do + @index.clear_index!() + @index.add_object!({:firstname => "Robert", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('Robert') + object = @index.get_object(res['hits'][0]['objectID'], 'firstname') + object['firstname'].should eq('Robert') + + @index.save_object!({:firstname => "George", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('George') + + @index.partial_update_object!({:firstname => "Sylvain", :objectID => "A/go/?a"}) + res = @index.search('') + @index.search("")["nbHits"].should eq(1) + object = @index.get_object(res['hits'][0]['objectID']) + object['firstname'].should eq('Sylvain') + + end + + it "Check attributes list_indexes:" do + res = Algolia::Index.all + res.should have_key('items') + res['items'][0].should have_key('name') + res['items'][0]['name'].should be_a(String) + res['items'][0].should have_key('createdAt') + res['items'][0]['createdAt'].should be_a(String) + res['items'][0].should have_key('updatedAt') + res['items'][0]['updatedAt'].should be_a(String) + res['items'][0].should have_key('entries') + res['items'][0]['entries'].should be_a(Integer) + res['items'][0].should have_key('pendingTask') + [true, false].should include(res['items'][0]['pendingTask']) + end + + it 'Check attributes search : ' do + res = @index.search('') + res.should have_key('hits') + res['hits'].should be_a(Array) + res.should have_key('page') + res['page'].should be_a(Integer) + res.should have_key('nbHits') + res['nbHits'].should be_a(Integer) + res.should have_key('nbPages') + res['nbPages'].should be_a(Integer) + res.should have_key('hitsPerPage') + res['hitsPerPage'].should be_a(Integer) + res.should have_key('processingTimeMS') + res['processingTimeMS'].should be_a(Integer) + res.should have_key('query') + res['query'].should be_a(String) + res.should have_key('params') + res['params'].should be_a(String) + end + + it 'Check attributes delete_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à2")) + index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = index.delete_index() + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes clear_index : ' do + task = @index.clear_index + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes add object : ' do + task = @index.add_object({ :name => "John Doe", :email => "john@doe.org" }) + task.should have_key('createdAt') + task['createdAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectID') + task['objectID'].should be_a(String) + end + + it 'Check attributes add object id: ' do + task = @index.add_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + #task.to_s.should eq("") + task.should have_key('objectID') + task['objectID'].should be_a(String) + task['objectID'].should eq("1") + end + + it 'Check attributes partial update: ' do + task = @index.partial_update_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectID') + task['objectID'].should be_a(String) + task['objectID'].should eq("1") + end + + it 'Check attributes delete object: ' do + @index.add_object({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = @index.delete_object("1") + task.should have_key('deletedAt') + task['deletedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + end + + it 'Check attributes add objects: ' do + task = @index.add_objects([{ :name => "John Doe", :email => "john@doe.org", :objectID => "1" }]) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('objectIDs') + task['objectIDs'].should be_a(Array) + end + + it 'Check attributes browse: ' do + res = @index.browse() + res.should have_key('hits') + res['hits'].should be_a(Array) + res.should have_key('page') + res['page'].should be_a(Integer) + res.should have_key('nbHits') + res['nbHits'].should be_a(Integer) + res.should have_key('nbPages') + res['nbPages'].should be_a(Integer) + res.should have_key('hitsPerPage') + res['hitsPerPage'].should be_a(Integer) + res.should have_key('processingTimeMS') + res['processingTimeMS'].should be_a(Integer) + res.should have_key('query') + res['query'].should be_a(String) + res.should have_key('params') + res['params'].should be_a(String) + end + + it 'Check attributes get settings: ' do + task = @index.set_settings({}) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + end + + it 'Check attributes move_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + index2 = Algolia::Index.new(safe_index_name("àlgol?à2")) + index2.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.move_index!(safe_index_name("àlgol?à2"), safe_index_name("àlgol?à")) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + index.delete_index + end + + it 'Check attributes copy_index : ' do + index = Algolia::Index.new(safe_index_name("àlgol?à")) + index2 = Algolia::Index.new(safe_index_name("àlgol?à2")) + index2.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.copy_index!(safe_index_name("àlgol?à2"), safe_index_name("àlgol?à")) + task.should have_key('updatedAt') + task['updatedAt'].should be_a(String) + task.should have_key('taskID') + task['taskID'].should be_a(Integer) + index.delete_index + index2.delete_index + end + + it 'Check attributes wait_task : ' do + task = @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + task = Algolia.client.get(Algolia::Protocol.task_uri(safe_index_name("àlgol?a"), task['objectID'])) + task.should have_key('status') + task['status'].should be_a(String) + task.should have_key('pendingTask') + [true, false].should include(task['pendingTask']) + end + + it 'Check attributes get_task_status' do + task = @index.add_object!({ :name => "John Doe", :email => "john@doe.org" }, "1") + status = @index.get_task_status(task["taskID"]) + status.should be_a(String) + end + + it 'Check attributes log : ' do + logs = Algolia.get_logs() + logs.should have_key('logs') + logs['logs'].should be_a(Array) + logs['logs'][0].should have_key('timestamp') + logs['logs'][0]['timestamp'].should be_a(String) + logs['logs'][0].should have_key('method') + logs['logs'][0]['method'].should be_a(String) + logs['logs'][0].should have_key('answer_code') + logs['logs'][0]['answer_code'].should be_a(String) + logs['logs'][0].should have_key('query_body') + logs['logs'][0]['query_body'].should be_a(String) + logs['logs'][0].should have_key('answer') + logs['logs'][0]['answer'].should be_a(String) + logs['logs'][0].should have_key('url') + logs['logs'][0]['url'].should be_a(String) + logs['logs'][0].should have_key('ip') + logs['logs'][0]['ip'].should be_a(String) + logs['logs'][0].should have_key('query_headers') + logs['logs'][0]['query_headers'].should be_a(String) + logs['logs'][0].should have_key('sha1') + logs['logs'][0]['sha1'].should be_a(String) + end + + it 'should generate secured api keys (old syntax)' do + key = Algolia.generate_secured_api_key('my_api_key', '(public,user1)') + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', '(public,user1)')) + key = Algolia.generate_secured_api_key('my_api_key', '(public,user1)', 42) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', '(public,user1)42')) + key = Algolia.generate_secured_api_key('my_api_key', ['public']) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'public')) + key = Algolia.generate_secured_api_key('my_api_key', ['public', ['premium','vip']]) + key.should eq(OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'public,(premium,vip)')) + end + + it 'should generate secured api keys (new syntax)' do + key = Algolia.generate_secured_api_key('my_api_key', :tagFilters => '(public,user1)') + key.should eq(Base64.encode64("#{OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), 'my_api_key', 'tagFilters=%28public%2Cuser1%29')}tagFilters=%28public%2Cuser1%29").gsub("\n", '')) + key = Algolia.generate_secured_api_key('182634d8894831d5dbce3b3185c50881', :tagFilters => '(public,user1)', :userToken => 42) + # in ruby 1.8.7, the map iteration doesn't have the same ordering, + # making the hash slightly different + expected_keys = [ + 'ZDU0N2YzZjA3NGZkZGM2OTUxNzY3NzhkZDI3YWFkMjhhNzU5OTBiOGIyYTgyYzFmMjFjZTY4NTA0ODNiN2I1ZnVzZXJUb2tlbj00MiZ0YWdGaWx0ZXJzPSUyOHB1YmxpYyUyQ3VzZXIxJTI5', + 'OGYwN2NlNTdlOGM2ZmM4MjA5NGM0ZmYwNTk3MDBkNzMzZjQ0MDI3MWZjNTNjM2Y3YTAzMWM4NTBkMzRiNTM5YnRhZ0ZpbHRlcnM9JTI4cHVibGljJTJDdXNlcjElMjkmdXNlclRva2VuPTQy' + ] + expected_keys.include?(key).should eq(true) + end + + it 'Check attributes multipleQueries' do + res = Algolia.multiple_queries([{:index_name => safe_index_name("àlgol?a"), "query" => ""}]) + res.should have_key('results') + res['results'].should be_a(Array) + res['results'][0].should have_key('hits') + res['results'][0]['hits'].should be_a(Array) + res['results'][0].should have_key('page') + res['results'][0]['page'].should be_a(Integer) + res['results'][0].should have_key('nbHits') + res['results'][0]['nbHits'].should be_a(Integer) + res['results'][0].should have_key('nbPages') + res['results'][0]['nbPages'].should be_a(Integer) + res['results'][0].should have_key('hitsPerPage') + res['results'][0]['hitsPerPage'].should be_a(Integer) + res['results'][0].should have_key('processingTimeMS') + res['results'][0]['processingTimeMS'].should be_a(Integer) + res['results'][0].should have_key('query') + res['results'][0]['query'].should be_a(String) + res['results'][0].should have_key('params') + res['results'][0]['params'].should be_a(String) + end + + it 'should handle facet search' do + objects = { + :snoopy => { + :objectID => '1', + 'name' => 'Snoopy', + :kind => ['dog', 'animal'], + :born => 1950, + :series => 'Peanuts' + }, + :woodstock => { + :objectID => '2', + :name => 'Woodstock', + :kind => ['bird', 'animal'], + :born => 1960, + :series => 'Peanuts' + }, + :charlie => { + :objectID => '3', + :name => 'Charlie Brown', + :kind => ['human'], + :born => 1950, + :series => 'Peanuts' + }, + :hobbes => { + :objectID => '4', + :name => 'Hobbes', + :kind => ['tiger', 'animal', 'teddy'], + :born => 1985, + :series => 'Calvin & Hobbes' + }, + :calvin => { + :objectID => '5', + :name => 'Calvin', + :kind => ['human'], + :born => 1985, + :series => 'Calvin & Hobbes' + } + } + + index = Algolia::Index.new(safe_index_name('test_facet_search')) + index.set_settings({ + :attributesForFaceting => [ + 'searchable(series)', + 'kind' + ] + }) + index.add_objects! objects.values + + query = { + :facetFilters => ['kind:animal'], + :numericFilters => ['born >= 1955'] + } + answer = index.search_for_facet_values 'series', 'Peanutz', query + expect(answer['facetHits'].size).to eq(1) + expect(answer['facetHits'].first['value']).to eq('Peanuts') + expect(answer['facetHits'].first['count']).to eq(1) + end + + it 'should handle disjunctive faceting' do + index = Algolia::Index.new(safe_index_name("test_hotels")) + index.set_settings :attributesForFacetting => ['city', 'stars', 'facilities'] + index.clear_index rescue nil + index.add_objects! [ + { :name => 'Hotel A', :stars => '*', :facilities => ['wifi', 'bath', 'spa'], :city => 'Paris' }, + { :name => 'Hotel B', :stars => '*', :facilities => ['wifi'], :city => 'Paris' }, + { :name => 'Hotel C', :stars => '**', :facilities => ['bath'], :city => 'San Francisco' }, + { :name => 'Hotel D', :stars => '****', :facilities => ['spa'], :city => 'Paris' }, + { :name => 'Hotel E', :stars => '****', :facilities => ['spa'], :city => 'New York' }, + ] + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }) + answer['nbHits'].should eq(5) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*'] }) + answer['nbHits'].should eq(2) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['**'].should eq(1) + answer['disjunctiveFacets']['stars']['****'].should eq(2) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*'], :city => ['Paris'] }) + answer['nbHits'].should eq(2) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['****'].should eq(1) + + answer = index.search_disjunctive_faceting('h', ['stars', 'facilities'], { :facets => 'city' }, { :stars => ['*', '****'], :city => ['Paris'] }) + answer['nbHits'].should eq(3) + answer['facets'].size.should eq(1) + answer['disjunctiveFacets'].size.should eq(2) + answer['disjunctiveFacets']['stars']['*'].should eq(2) + answer['disjunctiveFacets']['stars']['****'].should eq(1) + end + + it 'should apply jobs one after another if synchronous' do + index = Algolia::Index.new(safe_index_name("sync")) + begin + index.add_object! :objectID => 1 + answer = index.search('') + answer['nbHits'].should eq(1) + answer['hits'][0]['objectID'].to_i.should eq(1) + index.clear_index! + index.add_object! :objectID => 2 + index.add_object! :objectID => 3 + answer = index.search('') + answer['nbHits'].should eq(2) + answer['hits'][0]['objectID'].to_i.should_not eq(1) + ensure + index.delete_index + end + end + + it "should send a custom batch" do + batch = [ + {:action => "addObject", :indexName => @index.name, :body => { :objectID => "11", :email => "john@be.org" }}, + {:action => "addObject", :indexName => @index.name, :body => { :objectID => "22", :email => "robert@be.org" }} + ] + Algolia.batch!(batch) + res = @index.search("@be.org") + res["hits"].length.should eq(2) + end + + def test_browse(expected, *args) + @index.clear + @index.add_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + hits = {} + @index.browse(*args) do |hit| + hits[hit['objectID']] = true + end + hits.size.should eq(expected) + end + + it "should browse the index using cursors" do + test_browse(1500) + test_browse(500, 1, 1000) + test_browse(0, 2, 1000) + end + + it "should browse the index using cursors specifying hitsPerPage" do + test_browse(1500, { :hitsPerPage => 500 }) + end + + it "should browse the index using cursors specifying params" do + test_browse(1, { :hitsPerPage => 500, :numericFilters => 'i=42' }) + test_browse(42, { :numericFilters => 'i<=42' }) + end + + it "should browse the index using cursors from a cursor" do + @index.clear + @index.add_objects!(1.upto(1500).map { |i| { :objectID => i, :i => i } }) + answer = @index.browse(0, 1000) + + hits = {} + @index.browse(:cursor => answer['cursor']) do |hit, cursor| + hits[hit['objectID']] = true + cursor.should eq(answer['cursor']) + end + hits.size.should eq(500) + + @index.browse_from(answer['cursor'])['hits'].size.should eq(500) + end + + it "should test synonyms" do + @index.add_object! :name => '589 Howard St., San Francisco' + @index.search('Howard St San Francisco')['nbHits'].should eq(1) + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + synonyms_search = @index.search_synonyms('')['hits'] + synonyms_search.size.should eq(2) + @index.search('Howard St SF')['nbHits'].should eq(1) + + synonym = @index.get_synonym('city') + synonym['objectID'].should eq('city') + synonym['type'].should eq('synonym') + + @index.search('Howard Street')['nbHits'].should eq(1) + + synonyms_block = [] + synonyms_ret = @index.export_synonyms(1) do |s| + synonyms_block << s + end + + s0 = synonyms_search.map { |s| s['objectID'] }.sort + s1 = synonyms_block.map { |s| s['objectID'] }.sort + s2 = synonyms_ret.map { |s| s['objectID'] }.sort + + s0.should eq(s1) + s1.should eq(s2) + + @index.delete_synonym! 'city' + @index.search('Howard Street SF')['nbHits'].should eq(0) + + @index.clear_synonyms! + @index.search_synonyms('')['nbHits'].should eq(0) + end + + it "should replace all synonyms" do + @index.batch_synonyms! ([ + {:objectID => '1', :type => 'synonym', :synonyms => ['San Francisco', 'SF']}, + {:objectID => '2', :type => 'altCorrection1', :word => 'foo', :corrections => ['st']} + ]) + + @index.replace_all_synonyms! ([ + {:objectID => '3', :type => 'synonym', :synonyms => ['San Francisco', 'SF']}, + {:objectID => '4', :type => 'altCorrection1', :word => 'bar', :corrections => ['st']} + ]) + + synonym = @index.get_synonym('4')['objectID'].should eq('4') + synonyms_search = @index.search_synonyms('')['hits'] + synonyms_search.size.should eq(2) + end + + it 'should test synonyms Export Query' do + @index.batch_synonyms! [ + { :objectID => 'city', :type => 'synonym', :synonyms => ['San Francisco', 'SF'] }, + { :objectID => 'us', :type => 'synonym', :synonyms => ['US', 'USA', 'Untied States of America'] }, + { :objectID => 'ie', :type => 'synonym', :synonyms => ['IE', 'IRL', 'Ireland'] }, + { :objectID => 'street', :type => 'altCorrection1', :word => 'street', :corrections => ['st'] } + ] + + expect(@index).to receive(:search_synonyms).and_call_original.at_least(4) + @index.export_synonyms(1) + + @index.clear_synonyms! + end + + it 'should test Query Rules' do + rule_1 = { + :objectID => '42', + :condition => { :pattern => 'test', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'this is better' } } + } + rule_2 = { + :objectID => '2', + :condition => { :pattern => 'Pura', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'Pura Vida' } } + } + + result = @index.save_rule!(rule_1[:objectID], rule_1) + result.should have_key('taskID') + result.should have_key('updatedAt') + + @index.get_rule(rule_1[:objectID])['objectID'].should eq(rule_1[:objectID]) + + @index.search_rules('better')['nbHits'].should eq(1) + @index.search_rules('', { :anchoring => 'contains' })['nbHits'].should eq(1) + + @index.delete_rule!(rule_1[:objectID]) + @index.search_rules('')['nbHits'].should eq(0) + + @index.batch_rules!([rule_1, rule_2]) + rules_search = @index.search_rules('')['hits'] + rules_search.size.should eq(2) + + rules_block = [] + rules_ret = @index.export_rules(1) do |r| + rules_block << r + end + + r0 = rules_search.map { |r| r['objectID'] }.sort + r1 = rules_block.map { |r| r['objectID'] }.sort + r2 = rules_ret.map { |r| r['objectID'] }.sort + + r0.should eq(r1) + r1.should eq(r2) + + @index.clear_rules! + @index.search_rules('')['nbHits'].should eq(0) + end + + it "should replace all rules" do + rule_1 = { + :objectID => '1', + :condition => {:pattern => 'test', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'this is better'}} + } + rule_2 = { + :objectID => '2', + :condition => {:pattern => 'Pura', :anchoring => 'contains'}, + :consequence => {:params => {:query => 'Pura Vida'}} + } + + @index.batch_rules! [rule_1, rule_2] + + rule_1[:objectID] = '3' + rule_2[:objectID] = '4' + @index.replace_all_rules!([rule_1, rule_2]) + + @index.get_rule('4')['objectID'].should eq('4') + rules_search = @index.search_rules('')['hits'] + rules_search.size.should eq(2) + end + + it 'should not save a query rule with an empty objectID' do + rule = { + :objectID => '', + :condition => { :pattern => 'test', :anchoring => 'contains' }, + :consequence => { :params => { :query => 'this is better' } } + } + + expect { @index.save_rule!(nil, rule) }.to raise_error(ArgumentError) + expect { @index.save_rule!(rule[:objectID], rule) }.to raise_error(ArgumentError) + end + + it "should use request options" do + expect{Algolia.list_indexes}.to_not raise_error + + expect{Algolia.list_indexes('headers' => { 'X-Algolia-API-Key' => 'NotExistentAPIKey' })}.to raise_error(Algolia::AlgoliaProtocolError) + end + + it 'should retrieve the remaining validity time in seconds' do + now = Time.now.to_i + + key = Algolia.generate_secured_api_key('foo', :validUntil => now - (10 * 60)) + expect(Algolia.get_secured_api_key_remaining_validity(key)).to be < 0 + + key = Algolia.generate_secured_api_key('foo', :validUntil => now + (10 * 60)) + expect(Algolia.get_secured_api_key_remaining_validity(key)).to be > 0 + + key = Algolia.generate_secured_api_key('foo', []) + expect { Algolia.get_secured_api_key_remaining_validity(key) }.to raise_error(Algolia::ValidUntilNotFoundError) + end + + context 'DNS timeout' do + before(:each) do + @client = Algolia::Client.new :application_id => ENV['ALGOLIA_APPLICATION_ID'], :api_key => ENV['ALGOLIA_API_KEY'], + :hosts => [ + "10.0.0.1", # this will timeout + "#{ENV['ALGOLIA_APPLICATION_ID']}.algolia.net", + "#{ENV['ALGOLIA_APPLICATION_ID']}-1.algolianet.com", + "#{ENV['ALGOLIA_APPLICATION_ID']}-2.algolianet.com", + "#{ENV['ALGOLIA_APPLICATION_ID']}-3.algolianet.com" + ], + :connect_timeout => 5 + @client.destroy # make sure the thread-local vars are reseted + end + + it "should fallback to the 2nd host after a few seconds" do + start_time = Time.now + @client.list_indexes # fallback on the second host after 5 sec (connection timeout) + expect(start_time.to_i + 5).to be <= Time.now.to_i + 1 + end + + it "should re-use the working (2nd) host after the 1st one failed" do + start_time = Time.now + @client.list_indexes # fallback on the second host after 5 sec (connection timeout) + expect(start_time.to_i + 5).to be <= Time.now.to_i + 1 + start_time = Time.now + @client.list_indexes # re-use the 2nd one + expect(start_time.to_i).to be <= Time.now.to_i + 1 + end + end + + context 'Custom User Agent' do + before(:all) do + WebMock.enable! + end + + before(:each) do + @client = Algolia::Client.new( + :application_id => ENV['ALGOLIA_APPLICATION_ID'], + :api_key => ENV['ALGOLIA_API_KEY'], + :user_agent => 'test agent' + ) + @client.destroy # make sure the thread-local vars are reseted + end + + it "should use a custom user-agent" do + WebMock.stub_request(:get, /.*\.algolia(net\.com|\.net)\/1\/indexes/). + to_return(:status => 200, :body => '{}') + @client.list_indexes + expect(WebMock).to have_requested(:get, /https:\/\/.+-dsn.algolia(net\.com|\.net)\/1\/indexes/). + with(:headers => { 'User-Agent' => "Algolia for Ruby (#{::Algolia::VERSION}); Ruby (#{RUBY_VERSION}); test agent" }) + end + + after(:all) do + WebMock.disable! + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb new file mode 100644 index 0000000..8dae58a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/mock_spec.rb @@ -0,0 +1,31 @@ +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) + +describe 'With a mocked client' do + + before(:all) do + WebMock.enable! + Algolia::WebMock.mock! + # reset session objects + app_id = Algolia.client.application_id + Thread.current["algolia_hosts_#{app_id}"] = nil + Thread.current["algolia_search_hosts_#{app_id}"] = nil + Thread.current["algolia_host_index_#{app_id}"] = nil + Thread.current["algolia_search_host_index_#{app_id}"] = nil + end + + it "should add a simple object" do + index = Algolia::Index.new("friends") + index.add_object!({ :name => "John Doe", :email => "john@doe.org" }) + index.search('').should == { "hits" => [ { "objectID" => 42 } ], "page" => 1, "hitsPerPage" => 1, "nbHits"=>1, "nbPages"=>1 } # mocked + index.list_api_keys + index.browse + index.clear + index.delete + index.delete_by_query 'test' + end + + after(:all) do + WebMock.disable! + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb new file mode 100644 index 0000000..e317c1e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/spec_helper.rb @@ -0,0 +1,69 @@ + +if ENV['COVERAGE'] + require 'simplecov' + SimpleCov.start +end + +require 'bundler/setup' + +Bundler.setup :test + +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) + +require 'algoliasearch' +require 'rspec' +require 'webmock/rspec' +require 'algolia/webmock' +require 'time' + +raise 'missing ALGOLIA_APPLICATION_ID or ALGOLIA_API_KEY environment variables' if ENV['ALGOLIA_APPLICATION_ID'].nil? || ENV['ALGOLIA_API_KEY'].nil? +Algolia.init :application_id => ENV['ALGOLIA_APPLICATION_ID'], :api_key => ENV['ALGOLIA_API_KEY'] + +RSpec.configure do |config| + config.mock_with :rspec + + config.before(:suite) do + WebMock.disable! + end + + config.after(:suite) do + WebMock.disable! + end +end + +# avoid concurrent access to the same index +def safe_index_name(name) + return name if ENV['TRAVIS'].to_s != "true" + id = ENV['TRAVIS_JOB_NUMBER'] + "TRAVIS_RUBY_#{name}-#{id}" +end + +# avoid concurrent access to the same index and follows the CTS standards. +def index_name(name) + date = DateTime.now.strftime('%Y-%m-%d_%H:%M:%S') + + instance = ENV['TRAVIS'].to_s == 'true' ? ENV['TRAVIS_JOB_NUMBER'] : 'unknown' + + 'ruby_%s_%s_%s' % [date, instance, name] +end + +def auto_retry(options = {}) + return if !block_given? + + max_retry = options[:max_retry] || 10 + retry_count = 0 + + loop do + begin + return yield + rescue => e + retry_count += 1 + if retry_count >= max_retry + raise e + else + sleep retry_count + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb new file mode 100644 index 0000000..9c92efa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/algoliasearch-1.27.5/spec/stub_spec.rb @@ -0,0 +1,51 @@ +require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper')) + +require 'webmock' + +describe 'With a rate limited client' do + + before(:each) do + WebMock.enable! + # reset session objects + app_id = Algolia.client.application_id + Thread.current["algolia_hosts_#{app_id}"] = nil + Thread.current["algolia_search_hosts_#{app_id}"] = nil + Thread.current["algolia_host_index_#{app_id}"] = nil + Thread.current["algolia_search_host_index_#{app_id}"] = nil + end + + it "should pass the right headers" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'], 'X-Forwarded-Api-Key'=>'ratelimitapikey', 'X-Forwarded-For'=>'1.2.3.4'}). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 1 }", :headers => {}) + Algolia.enable_rate_limit_forward ENV['ALGOLIA_API_KEY'], "1.2.3.4", "ratelimitapikey" + index = Algolia::Index.new("friends") + index.search('foo')['fakeAttribute'].should == 1 + index.search('bar')['fakeAttribute'].should == 1 + end + + it "should use original headers" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'] }). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 2 }", :headers => {}) + Algolia.disable_rate_limit_forward + index = Algolia::Index.new("friends") + index.search('bar')['fakeAttribute'].should == 2 + end + + it "should pass the right headers in the scope" do + WebMock.stub_request(:post, %r{https://.*\.algolia\.(io|net)/1/indexes/friends/query}). + with(:headers => {'Content-Type'=>'application/json; charset=utf-8', 'User-Agent'=>"Algolia for Ruby (#{Algolia::VERSION}); Ruby (#{RUBY_VERSION})", 'X-Algolia-Api-Key'=>ENV['ALGOLIA_API_KEY'], 'X-Algolia-Application-Id'=>ENV['ALGOLIA_APPLICATION_ID'], 'X-Forwarded-Api-Key'=>'ratelimitapikey', 'X-Forwarded-For'=>'1.2.3.4'}). + to_return(:status => 200, :body => "{ \"hits\": [], \"fakeAttribute\": 1 }", :headers => {}) + Algolia.with_rate_limits "1.2.3.4", "ratelimitapikey" do + index = Algolia::Index.new("friends") + index.search('foo')['fakeAttribute'].should == 1 + index.search('bar')['fakeAttribute'].should == 1 + end + end + + after(:each) do + WebMock.disable! + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/LICENSE b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory.rb new file mode 100644 index 0000000..76c868f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory.rb @@ -0,0 +1,100 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "pathname" +require_relative "artifactory/version" + +module Artifactory + autoload :Client, "artifactory/client" + autoload :Configurable, "artifactory/configurable" + autoload :Defaults, "artifactory/defaults" + autoload :Error, "artifactory/errors" + autoload :Util, "artifactory/util" + + module Collection + autoload :Artifact, "artifactory/collections/artifact" + autoload :Base, "artifactory/collections/base" + autoload :Build, "artifactory/collections/build" + end + + module Resource + autoload :Artifact, "artifactory/resources/artifact" + autoload :Backup, "artifactory/resources/backup" + autoload :Base, "artifactory/resources/base" + autoload :Build, "artifactory/resources/build" + autoload :BuildComponent, "artifactory/resources/build_component" + autoload :Certificate, "artifactory/resources/certificate" + autoload :Group, "artifactory/resources/group" + autoload :Layout, "artifactory/resources/layout" + autoload :LDAPSetting, "artifactory/resources/ldap_setting" + autoload :MailServer, "artifactory/resources/mail_server" + autoload :PermissionTarget, "artifactory/resources/permission_target" + autoload :Plugin, "artifactory/resources/plugin" + autoload :Repository, "artifactory/resources/repository" + autoload :System, "artifactory/resources/system" + autoload :URLBase, "artifactory/resources/url_base" + autoload :User, "artifactory/resources/user" + end + + class << self + include Artifactory::Configurable + + # + # The root of the Artifactory gem. This method is useful for finding files + # relative to the root of the repository. + # + # @return [Pathname] + # + def root + @root ||= Pathname.new(File.expand_path("../../", __FILE__)) + end + + # + # API client object based off the configured options in {Configurable}. + # + # @return [Artifactory::Client] + # + def client + unless defined?(@client) && @client.same_options?(options) + @client = Artifactory::Client.new(options) + end + + @client + end + + # + # Delegate all methods to the client object, essentially making the module + # object behave like a {Client}. + # + def method_missing(m, *args, &block) + if client.respond_to?(m) + client.send(m, *args, &block) + else + super + end + end + + # + # Delegating +respond_to+ to the {Client}. + # + def respond_to_missing?(m, include_private = false) + client.respond_to?(m) || super + end + end +end + +# Load the initial default values +Artifactory.setup diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/client.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/client.rb new file mode 100644 index 0000000..0a2be3f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/client.rb @@ -0,0 +1,414 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "cgi" +require "json" +require "net/http" +require "uri" + +module Artifactory + # + # Client for the Artifactory API. + # + # @see http://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API + # + class Client + class << self + # + # @private + # + def proxy(klass) + namespace = klass.name.split("::").last.downcase + klass.singleton_methods(false).each do |name| + define_method("#{namespace}_#{name}") do |*args| + if args.last.is_a?(Hash) + args.last[:client] = self + else + args << { client: self } + end + + klass.send(name, *args) + end + end + end + end + + include Artifactory::Configurable + + proxy Resource::Artifact + proxy Resource::Backup + proxy Resource::Certificate + proxy Resource::Layout + proxy Resource::LDAPSetting + proxy Resource::MailServer + proxy Resource::PermissionTarget + proxy Resource::Repository + proxy Resource::System + proxy Resource::URLBase + proxy Resource::User + + # + # Create a new Artifactory Client with the given options. Any options + # given take precedence over the default options. + # + # @return [Artifactory::Client] + # + def initialize(options = {}) + # Use any options given, but fall back to the defaults set on the module + Artifactory::Configurable.keys.each do |key| + value = if options[key].nil? + Artifactory.instance_variable_get(:"@#{key}") + else + options[key] + end + + instance_variable_set(:"@#{key}", value) + end + end + + # + # Determine if the given options are the same as ours. + # + # @return [Boolean] + # + def same_options?(opts) + opts.hash == options.hash + end + + # + # Make a HTTP GET request + # + # If a block is provided the response body is yielded in chunks/fragments + # as it is read from the undelrying socket. + # + # @param path (see Client#request) + # @param [Hash] params + # the list of query params + # @param headers (see Client#request) + # + # @yield [chunk] Partial piece of response body + # + # @raise (see Client#request) + # @return (see Client#request) + # + def get(path, params = {}, headers = {}, &block) + request(:get, path, params, headers, &block) + end + + # + # Make a HTTP POST request + # + # @param path (see Client#request) + # @param [String, #read] data + # the body to use for the request + # @param headers (see Client#request) + # + # @raise (see Client#request) + # @return (see Client#request) + # + def post(path, data, headers = {}) + request(:post, path, data, headers) + end + + # + # Make a HTTP PUT request + # + # @param path (see Client#request) + # @param data (see Client#post) + # @param headers (see Client#request) + # + # @raise (see Client#request) + # @return (see Client#request) + # + def put(path, data, headers = {}) + request(:put, path, data, headers) + end + + # + # Make a HTTP PATCH request + # + # @param path (see Client#request) + # @param data (see Client#post) + # @param headers (see Client#request) + # + # @raise (see Client#request) + # @return (see Client#request) + # + def patch(path, data, headers = {}) + request(:patch, path, data, headers) + end + + # + # Make a HTTP DELETE request + # + # @param path (see Client#request) + # @param params (see Client#get) + # @param headers (see Client#request) + # + # @raise (see Client#request) + # @return (see Client#request) + # + def delete(path, params = {}, headers = {}) + request(:delete, path, params, headers) + end + + # + # Make an HTTP request with the given verb, data, params, and headers. If + # the response has a return type of JSON, the JSON is automatically parsed + # and returned as a hash; otherwise it is returned as a string. If a block + # is provided the response body is yielded in chunks/fragments as it is + # read from the undelrying socket. + # + # @raise [Error::HTTPError] + # if the request is not an HTTP 200 OK + # + # @param [Symbol] verb + # the lowercase symbol of the HTTP verb (e.g. :get, :delete) + # @param [String] path + # the absolute or relative path from {Defaults.endpoint} to make the + # request against + # @param [#read, Hash, nil] data + # the data to use (varies based on the +verb+) + # @param [Hash] headers + # the list of headers to use + # + # @yield [chunk] Partial piece of response body + # + # @return [String, Hash] + # the response body + # + def request(verb, path, data = {}, headers = {}, &block) + # Build the URI and request object from the given information + uri = build_uri(verb, path, data) + request = class_for_request(verb).new(uri.request_uri) + + # Add headers + default_headers.merge(headers).each do |key, value| + request.add_field(key, value) + end + + # Add basic authentication + if username && password + request.basic_auth(username, password) + elsif api_key + request.add_field("X-JFrog-Art-Api", api_key) + end + + # Setup PATCH/POST/PUT + if %i{patch post put}.include?(verb) + if data.respond_to?(:read) + request.content_length = data.size + request.body_stream = data + elsif data.is_a?(Hash) + request.form_data = data + else + request.body = data + end + end + + # Create the HTTP connection object - since the proxy information defaults + # to +nil+, we can just pass it to the initializer method instead of doing + # crazy strange conditionals. + connection = Net::HTTP.new(uri.host, uri.port, + proxy_address, proxy_port, proxy_username, proxy_password) + + # The artifacts being uploaded might be large, so there’s a good chance + # we'll need to bump this higher than the `Net::HTTP` default of 60 + # seconds. + connection.read_timeout = read_timeout + + # Apply SSL, if applicable + if uri.scheme == "https" + require "net/https" unless defined?(Net::HTTPS) + + # Turn on SSL + connection.use_ssl = true + + # Custom pem files, no problem! + if ssl_pem_file + pem = File.read(ssl_pem_file) + connection.cert = OpenSSL::X509::Certificate.new(pem) + connection.key = OpenSSL::PKey::RSA.new(pem) + connection.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + + # Naughty, naughty, naughty! Don't blame when when someone hops in + # and executes a MITM attack! + unless ssl_verify + connection.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + end + + # Create a connection using the block form, which will ensure the socket + # is properly closed in the event of an error. + connection.start do |http| + + if block_given? + http.request(request) do |response| + case response + when Net::HTTPRedirection + redirect = response["location"] + request(verb, redirect, data, headers, &block) + when Net::HTTPSuccess + response.read_body do |chunk| + yield chunk + end + else + error(response) + end + end + else + response = http.request(request) + + case response + when Net::HTTPRedirection + redirect = response["location"] + request(verb, redirect, data, headers) + when Net::HTTPSuccess + success(response) + else + error(response) + end + end + end + rescue SocketError, Errno::ECONNREFUSED, EOFError + raise Error::ConnectionError.new(endpoint) + end + + # + # The list of default headers (such as Keep-Alive and User-Agent) for the + # client object. + # + # @return [Hash] + # + def default_headers + { + "Connection" => "keep-alive", + "Keep-Alive" => "30", + "User-Agent" => user_agent, + } + end + + # + # Construct a URL from the given verb and path. If the request is a GET or + # DELETE request, the params are assumed to be query params are are + # converted as such using {Client#to_query_string}. + # + # If the path is relative, it is merged with the {Defaults.endpoint} + # attribute. If the path is absolute, it is converted to a URI object and + # returned. + # + # @param [Symbol] verb + # the lowercase HTTP verb (e.g. :+get+) + # @param [String] path + # the absolute or relative HTTP path (url) to get + # @param [Hash] params + # the list of params to build the URI with (for GET and DELETE requests) + # + # @return [URI] + # + def build_uri(verb, path, params = {}) + # Add any query string parameters + if %i{delete get}.include?(verb) + path = [path, to_query_string(params)].compact.join("?") + end + + # Parse the URI + uri = URI.parse(path) + + # Don't merge absolute URLs + uri = URI.parse(File.join(endpoint, path)) unless uri.absolute? + + # Return the URI object + uri + end + + # + # Helper method to get the corresponding {Net::HTTP} class from the given + # HTTP verb. + # + # @param [#to_s] verb + # the HTTP verb to create a class from + # + # @return [Class] + # + def class_for_request(verb) + Net::HTTP.const_get(verb.to_s.capitalize) + end + + # + # Convert the given hash to a list of query string parameters. Each key and + # value in the hash is URI-escaped for safety. + # + # @param [Hash] hash + # the hash to create the query string from + # + # @return [String, nil] + # the query string as a string, or +nil+ if there are no params + # + def to_query_string(hash) + hash.map do |key, value| + "#{CGI.escape(key.to_s)}=#{CGI.escape(value.to_s)}" + end.join("&")[/.+/] + end + + # + # Parse the response object and manipulate the result based on the given + # +Content-Type+ header. For now, this method only parses JSON, but it + # could be expanded in the future to accept other content types. + # + # @param [HTTP::Message] response + # the response object from the request + # + # @return [String, Hash] + # the parsed response, as an object + # + def success(response) + if (response.content_type || "").include?("json") + JSON.parse(response.body || "{}") + else + response.body || "" + end + end + + # + # Raise a response error, extracting as much information from the server's + # response as possible. + # + # @raise [Error::HTTPError] + # + # @param [HTTP::Message] response + # the response object from the request + # + def error(response) + if (response.content_type || "").include?("json") + # Attempt to parse the error as JSON + begin + json = JSON.parse(response.body) + + if json["errors"] && json["errors"].first + raise Error::HTTPError.new(json["errors"].first) + end + rescue JSON::ParserError; end + end + + raise Error::HTTPError.new( + "status" => response.code, + "message" => response.body + ) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/artifact.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/artifact.rb new file mode 100644 index 0000000..a15781e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/artifact.rb @@ -0,0 +1,28 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Collection::Artifact < Collection::Base + # + # Create a new artifact collection. + # + # @param (see Collection::Base#initialize) + # + def initialize(parent, options = {}, &block) + super(Resource::Artifact, parent, options, &block) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/base.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/base.rb new file mode 100644 index 0000000..c32edfe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/base.rb @@ -0,0 +1,65 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Collection::Base + # + # Create a new collection object (proxy). + # + # @param [Class] klass + # the child class object + # @param [Object] parent + # the parent object who created the collection + # @param [Hash] options + # the list of options given by the parent + # @param [Proc] block + # the block to evaluate for the instance + # + def initialize(klass, parent, options = {}, &block) + @klass = klass + @parent = parent + @options = options + @block = block + end + + # + # Use method missing to delegate methods to the class object or instance + # object. + # + def method_missing(m, *args, &block) + if klass.respond_to?(m) + if args.last.is_a?(Hash) + args.last.merge(options) + end + + klass.send(m, *args, &block) + else + instance.send(m, *args, &block) + end + end + + private + + attr_reader :klass + attr_reader :parent + attr_reader :options + attr_reader :block + + def instance + @instance ||= parent.instance_eval(&block) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/build.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/build.rb new file mode 100644 index 0000000..04c3356 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/collections/build.rb @@ -0,0 +1,28 @@ +# +# Copyright 2015-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Collection::Build < Collection::Base + # + # Create a new build collection. + # + # @param (see Collection::Base#initialize) + # + def initialize(parent, options = {}, &block) + super(Resource::Build, parent, options, &block) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/configurable.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/configurable.rb new file mode 100644 index 0000000..a76ba9a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/configurable.rb @@ -0,0 +1,96 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + # + # A re-usable class containing configuration information for the {Client}. See + # {Defaults} for a list of default values. + # + module Configurable + class << self + # + # The list of configurable keys. + # + # @return [Array] + # + def keys + @keys ||= %i{ + endpoint + username + password + api_key + proxy_address + proxy_password + proxy_port + proxy_username + ssl_pem_file + ssl_verify + user_agent + read_timeout + } + end + end + + # + # Create one attribute getter and setter for each key. + # + Artifactory::Configurable.keys.each do |key| + attr_accessor key + end + + # + # Set the configuration for this config, using a block. + # + # @example Configure the API endpoint + # Artifactory.configure do |config| + # config.endpoint = "http://www.my-artifactory-server.com/artifactory" + # end + # + def configure + yield self + end + + # + # Reset all configuration options to their default values. + # + # @example Reset all settings + # Artifactory.reset! + # + # @return [self] + # + def reset! + Artifactory::Configurable.keys.each do |key| + instance_variable_set(:"@#{key}", Defaults.options[key]) + end + self + end + alias_method :setup, :reset! + + private + + # + # The list of configurable keys, as an options hash. + # + # @return [Hash] + # + def options + map = Artifactory::Configurable.keys.map do |key| + [key, instance_variable_get(:"@#{key}")] + end + Hash[map] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/defaults.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/defaults.rb new file mode 100644 index 0000000..01199ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/defaults.rb @@ -0,0 +1,154 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require_relative "version" + +module Artifactory + module Defaults + # Default API endpoint + ENDPOINT = "http://localhost:8080/artifactory".freeze + + # Default User Agent header string + USER_AGENT = "Artifactory Ruby Gem #{Artifactory::VERSION}".freeze + + class << self + # + # The list of calculated default options for the configuration. + # + # @return [Hash] + # + def options + Hash[Configurable.keys.map { |key| [key, send(key)] }] + end + + # + # The endpoint where artifactory lives + # + # @return [String] + # + def endpoint + ENV["ARTIFACTORY_ENDPOINT"] || ENDPOINT + end + + # + # The User Agent header to send along + # + # @return [String] + # + def user_agent + ENV["ARTIFACTORY_USER_AGENT"] || USER_AGENT + end + + # + # The HTTP Basic Authentication username + # + # @return [String, nil] + # + def username + ENV["ARTIFACTORY_USERNAME"] + end + + # + # The HTTP Basic Authentication password + # + # @return [String, nil] + # + def password + ENV["ARTIFACTORY_PASSWORD"] + end + + # + # The API Key for authentication + # + # @return [String, nil] + # + def api_key + ENV["ARTIFACTORY_API_KEY"] + end + + # + # The HTTP Proxy server address as a string + # + # @return [String, nil] + # + def proxy_address + ENV["ARTIFACTORY_PROXY_ADDRESS"] + end + + # + # The HTTP Proxy user password as a string + # + # @return [String, nil] + # + def proxy_password + ENV["ARTIFACTORY_PROXY_PASSWORD"] + end + + # + # The HTTP Proxy server port as a string + # + # @return [String, nil] + # + def proxy_port + ENV["ARTIFACTORY_PROXY_PORT"] + end + + # + # The HTTP Proxy server username as a string + # + # @return [String, nil] + # + def proxy_username + ENV["ARTIFACTORY_PROXY_USERNAME"] + end + + # + # The path to a pem file on disk for use with a custom SSL verification + # + # @return [String, nil] + # + def ssl_pem_file + ENV["ARTIFACTORY_SSL_PEM_FILE"] + end + + # + # Verify SSL requests (default: true) + # + # @return [true, false] + # + def ssl_verify + if ENV["ARTIFACTORY_SSL_VERIFY"].nil? + true + else + %w{t y}.include?(ENV["ARTIFACTORY_SSL_VERIFY"].downcase[0]) + end + end + + # + # Number of seconds to wait for a response from Artifactory + # + # @return [Integer] + # + def read_timeout + if ENV["ARTIFACTORY_READ_TIMEOUT"] + ENV["ARTIFACTORY_READ_TIMEOUT"].to_i + else + 120 + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/errors.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/errors.rb new file mode 100644 index 0000000..22179d8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/errors.rb @@ -0,0 +1,58 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + module Error + # Base class for all errors + class ArtifactoryError < StandardError; end + + # Class for all HTTP errors + class HTTPError < ArtifactoryError + attr_reader :code + + def initialize(hash = {}) + @code = hash["status"].to_i + @http = hash["message"].to_s + + super "The Artifactory server responded with an HTTP Error " \ + "#{@code}: `#{@http}'" + end + end + + # A general connection error with a more informative message + class ConnectionError < ArtifactoryError + def initialize(endpoint) + super "The Artifactory server at `#{endpoint}' is not currently " \ + "accepting connections. Please ensure that the server is " \ + "running an that your authentication information is correct." + end + end + + # A general connection error with a more informative message + class InvalidBuildType < ArtifactoryError + def initialize(given_type) + super <<~EOH + '#{given_type}' is not a valid build type. + + Valid build types include: + + #{Resource::Build::BUILD_TYPES.join("\n ")}" + + EOH + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/artifact.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/artifact.rb new file mode 100644 index 0000000..4c23151 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/artifact.rb @@ -0,0 +1,723 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "tempfile" +require "time" + +module Artifactory + class Resource::Artifact < Resource::Base + class << self + # + # Search for an artifact by the full or partial filename. + # + # @example Search for all repositories with the name "artifact" + # Artifact.search(name: 'artifact') + # + # @example Search for all artifacts named "artifact" in a specific repo + # Artifact.search(name: 'artifact', repos: 'libs-release-local') + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [String] :name + # the name of the artifact to search (it can be a regular expression) + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [Array] + # a list of artifacts that match the query + # + def search(options = {}) + client = extract_client!(options) + params = Util.slice(options, :name, :repos) + format_repos!(params) + + client.get("/api/search/artifact", params)["results"].map do |artifact| + from_url(artifact["uri"], client: client) + end + end + + # + # Search for an artifact by Maven coordinates: +Group ID+, +Artifact ID+, + # +Version+ and +Classifier+. + # + # @example Search for all repositories with the given gavc + # Artifact.gavc_search( + # group: 'org.acme', + # name: 'artifact', + # version: '1.0', + # classifier: 'sources', + # ) + # + # @example Search for all artifacts with the given gavc in a specific repo + # Artifact.gavc_search( + # group: 'org.acme', + # name: 'artifact', + # version: '1.0', + # classifier: 'sources', + # repos: 'libs-release-local', + # ) + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [String] :group + # the group id to search for + # @option options [String] :name + # the artifact id to search for + # @option options [String] :version + # the version of the artifact to search for + # @option options [String] :classifier + # the classifer to search for + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [Array] + # a list of artifacts that match the query + # + def gavc_search(options = {}) + client = extract_client!(options) + options = Util.rename_keys(options, + group: :g, + name: :a, + version: :v, + classifier: :c) + params = Util.slice(options, :g, :a, :v, :c, :repos) + format_repos!(params) + + client.get("/api/search/gavc", params)["results"].map do |artifact| + from_url(artifact["uri"], client: client) + end + end + + # + # Search for an artifact by the given properties. These are arbitrary + # properties defined by the user on artifact, so the search uses a free- + # form schema. + # + # @example Search for all repositories with the given properties + # Artifact.property_search( + # branch: 'master', + # author: 'sethvargo', + # ) + # + # @example Search for all artifacts with the given gavc in a specific repo + # Artifact.property_search( + # branch: 'master', + # author: 'sethvargo', + # repos: 'libs-release-local', + # ) + # + # @param [Hash] options + # the free-form list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [Array] + # a list of artifacts that match the query + # + def property_search(options = {}) + client = extract_client!(options) + params = options.dup + format_repos!(params) + + client.get("/api/search/prop", params)["results"].map do |artifact| + from_url(artifact["uri"], client: client) + end + end + + # + # Search for an artifact by its checksum + # + # @example Search for all repositories with the given MD5 checksum + # Artifact.checksum_search( + # md5: 'abcd1234...', + # ) + # + # @example Search for all artifacts with the given SHA1 checksum in a repo + # Artifact.checksum_search( + # sha1: 'abcdef123456....', + # repos: 'libs-release-local', + # ) + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [String] :md5 + # the MD5 checksum of the artifact to search for + # @option options [String] :sha1 + # the SHA1 checksum of the artifact to search for + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [Array] + # a list of artifacts that match the query + # + def checksum_search(options = {}) + client = extract_client!(options) + params = Util.slice(options, :md5, :sha1, :repos) + format_repos!(params) + + client.get("/api/search/checksum", params)["results"].map do |artifact| + from_url(artifact["uri"], client: client) + end + end + + # + # Search for an artifact by its usage + # + # @example Search for all repositories with the given usage statistics + # Artifact.usage_search( + # notUsedSince: 1388534400000, + # createdBefore: 1388534400000, + # ) + # + # @example Search for all artifacts with the given usage statistics in a repo + # Artifact.usage_search( + # notUsedSince: 1388534400000, + # createdBefore: 1388534400000, + # repos: 'libs-release-local', + # ) + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [Long] :notUsedSince + # the last downloaded cutoff date of the artifact to search for (millis since epoch) + # @option options [Long] :createdBefore + # the creation cutoff date of the artifact to search for (millis since epoch) + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [Array] + # a list of artifacts that match the query + # + def usage_search(options = {}) + client = extract_client!(options) + params = Util.slice(options, :notUsedSince, :createdBefore, :repos) + format_repos!(params) + + client.get("/api/search/usage", params)["results"].map do |artifact| + from_url(artifact["uri"], client: client) + end + end + + # + # Search for an artifact by its creation date + # + # @example Search for all repositories with the given creation date range + # Artifact.usage_search( + # from : 1414800000000, + # to : 1414871200000, + # ) + # + # @example Search for all artifacts with the given creation date range in a repo + # Artifact.usage_search( + # from : 1414800000000, + # to : 1414871200000, + # repos: 'libs-release-local', + # ) + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [Long] :from + # the creation start date of the artifact to search for (millis since epoch) + # @option options [Long] :to + # the creation end date of the artifact to search for (millis since epoch) + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [Array] + # a list of artifacts that match the query + # + def creation_search(options = {}) + client = extract_client!(options) + params = Util.slice(options, :from, :to, :repos) + format_repos!(params) + + client.get("/api/search/creation", params)["results"].map do |artifact| + from_url(artifact["uri"], client: client) + end + end + + # + # Get all versions of an artifact. + # + # @example Get all versions of a given artifact + # Artifact.versions(name: 'artifact') + # @example Get all versions of a given artifact in a specific repo + # Artifact.versions(name: 'artifact', repos: 'libs-release-local') + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [String] :group + # the + # @option options [String] :sha1 + # the SHA1 checksum of the artifact to search for + # @option options [String, Array] :repos + # the list of repos to search + # + def versions(options = {}) + client = extract_client!(options) + options = Util.rename_keys(options, + group: :g, + name: :a, + version: :v) + params = Util.slice(options, :g, :a, :v, :repos) + format_repos!(params) + + client.get("/api/search/versions", params)["results"] + rescue Error::HTTPError => e + raise unless e.code == 404 + + [] + end + + # + # Get the latest version of an artifact. + # + # @example Find the latest version of an artifact + # Artifact.latest_version(name: 'artifact') + # @example Find the latest version of an artifact in a repo + # Artifact.latest_version( + # name: 'artifact', + # repo: 'libs-release-local', + # ) + # @example Find the latest snapshot version of an artifact + # Artifact.latest_version(name: 'artifact', version: '1.0-SNAPSHOT') + # @example Find the latest version of an artifact in a group + # Artifact.latest_version(name: 'artifact', group: 'org.acme') + # + # @param [Hash] options + # the list of options to search with + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # @option options [String] :group + # the group id to search for + # @option options [String] :name + # the artifact id to search for + # @option options [String] :version + # the version of the artifact to search for + # @option options [Boolean] :remote + # search remote repos (default: +false+) + # @option options [String, Array] :repos + # the list of repos to search + # + # @return [String, nil] + # the latest version as a string (e.g. +1.0-201203131455-2+), or +nil+ + # if no artifact matches the given query + # + def latest_version(options = {}) + client = extract_client!(options) + options = Util.rename_keys(options, + group: :g, + name: :a, + version: :v) + params = Util.slice(options, :g, :a, :v, :repos, :remote) + format_repos!(params) + + # For whatever reason, Artifactory won't accept "true" - they want a + # literal "1"... + params[:remote] = 1 if options[:remote] + + client.get("/api/search/latestVersion", params) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + + # + # @see Artifactory::Resource::Base.from_hash + # + def from_hash(hash, options = {}) + super.tap do |instance| + instance.created = Time.parse(instance.created) rescue nil + instance.last_modified = Time.parse(instance.last_modified) rescue nil + instance.last_updated = Time.parse(instance.last_updated) rescue nil + instance.size = instance.size.to_i + end + end + end + + attribute :uri, -> { raise "API path missing!" } + attribute :checksums + attribute :created + attribute :download_uri, -> { raise "Download URI missing!" } + attribute :key + attribute :last_modified + attribute :last_updated + attribute :local_path, -> { raise "Local destination missing!" } + attribute :mime_type + attribute :repo + attribute :size + + # + # The SHA of this artifact. + # + # @return [String] + # + def sha1 + checksums && checksums["sha1"] + end + + # + # The MD5 of this artifact. + # + # @return [String] + # + def md5 + checksums && checksums["md5"] + end + + # + # @see Artifact#copy_or_move + # + def copy(destination, options = {}) + copy_or_move(:copy, destination, options) + end + + # + # Delete this artifact from repository, suppressing any +ResourceNotFound+ + # exceptions might occur. + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete + !!client.delete(download_uri) + rescue Error::HTTPError + false + end + + # + # @see {Artifact#copy_or_move} + # + def move(destination, options = {}) + copy_or_move(:move, destination, options) + end + + # + # Set properties for this object. If no properties are given it lists the properties for this object. + # + # @example List all properties for an artifact + # artifact.properties #=> { 'licenses'=>['Apache-2.0'] } + # + # @example Set new properties for an artifact + # artifact.properties(maintainer: 'SuperStartup01') #=> { 'licenses'=>['Apache-2.0'], 'maintainer'=>'SuperStartup01' } + # + # @param [Hash] props (default: +nil+) + # A hash of properties and corresponding values to set for the artifact + # + # @return [Hash] + # the list of properties + # + def properties(props = nil) + if props.nil? || props.empty? + get_properties + else + set_properties(props) + get_properties(true) + end + end + + # + # Get compliance info for a given artifact path. The result includes + # license and vulnerabilities, if any. + # + # **This requires the Black Duck addon to be enabled!** + # + # @example Get compliance info for an artifact + # artifact.compliance #=> { 'licenses' => [{ 'name' => 'LGPL v3' }] } + # + # @return [Hash>] + # + def compliance + @compliance ||= client.get(File.join("/api/compliance", relative_path)) + end + + # + # Download the artifact onto the local disk. + # + # @example Download an artifact + # artifact.download #=> /tmp/cache/000adad0-bac/artifact.deb + # + # @example Download a remote artifact into a specific target + # artifact.download('~/Desktop') #=> ~/Desktop/artifact.deb + # + # @param [String] target + # the target directory where the artifact should be downloaded to + # (defaults to a temporary directory). **It is the user's responsibility + # to cleanup the temporary directory when finished!** + # @param [Hash] options + # @option options [String] filename + # the name of the file when downloaded to disk (defaults to the basename + # of the file on the server) + # + # @return [String] + # the path where the file was downloaded on disk + # + def download(target = Dir.mktmpdir, options = {}) + target = File.expand_path(target) + + # Make the directory if it doesn't yet exist + FileUtils.mkdir_p(target) unless File.exist?(target) + + # Use the server artifact's filename if one wasn't given + filename = options[:filename] || File.basename(download_uri) + + # Construct the full path for the file + destination = File.join(target, filename) + + File.open(destination, "wb") do |file| + client.get(download_uri) do |chunk| + file.write chunk + end + end + + destination + end + + # + # Upload an artifact into the repository. If the first parameter is a File + # object, that file descriptor is passed to the uploader. If the first + # parameter is a string, it is assumed to be the path to a local file on + # disk. This method will automatically construct the File object from the + # given path. + # + # @see bit.ly/1dhJRMO Artifactory Matrix Properties + # + # @example Upload an artifact from a File instance + # artifact = Artifact.new(local_path: '/local/path/to/file.deb') + # artifact.upload('libs-release-local', '/remote/path') + # + # @example Upload an artifact with matrix properties + # artifact = Artifact.new(local_path: '/local/path/to/file.deb') + # artifact.upload('libs-release-local', '/remote/path', { + # status: 'DEV', + # rating: 5, + # branch: 'master' + # }) + # + # @param [String] repo + # the key of the repository to which to upload the file + # @param [String] remote_path + # the path where this resource will live in the remote artifactory + # repository, relative to the repository key + # @param [Hash] headers + # the list of headers to send with the request + # @param [Hash] properties + # a list of matrix properties + # + # @return [Resource::Artifact] + # + def upload(repo, remote_path, properties = {}, headers = {}) + file = File.new(File.expand_path(local_path)) + matrix = to_matrix_properties(properties) + endpoint = File.join("#{url_safe(repo)}#{matrix}", remote_path) + + # Include checksums in headers if given. + headers["X-Checksum-Md5"] = md5 if md5 + headers["X-Checksum-Sha1"] = sha1 if sha1 + + response = client.put(endpoint, file, headers) + + return unless response.is_a?(Hash) + + self.class.from_hash(response) + end + + # + # Upload the checksum for this artifact. **The artifact must already be + # uploaded or Artifactory will throw an exception!**. + # + # @example Set an artifact's md5 + # artifact = Artifact.new(local_path: '/local/path/to/file.deb') + # artifact.upload_checksum('libs-release-local', '/remote/path', :md5, 'ABCD1234') + # + # @param (see Artifact#upload) + # @param [Symbol] type + # the type of checksum to write (+md5+ or +sha1+) + # @param [String] value + # the actual checksum + # + # @return [true] + # + def upload_checksum(repo, remote_path, type, value) + file = Tempfile.new("checksum.#{type}") + file.write(value) + file.rewind + + endpoint = File.join(url_safe(repo), "#{remote_path}.#{type}") + + client.put(endpoint, file) + true + ensure + if file + file.close + file.unlink + end + end + + # + # Upload an artifact with the given SHA checksum. Consult the artifactory + # documentation for the possible responses when the checksums fail to + # match. + # + # @see Artifact#upload More syntax examples + # + # @example Upload an artifact with a checksum + # artifact = Artifact.new(local_path: '/local/path/to/file.deb') + # artifact.upload_with_checksum('libs-release-local', /remote/path', 'ABCD1234') + # + # @param (see Artifact#upload) + # @param [String] checksum + # the SHA1 checksum of the artifact to upload + # + def upload_with_checksum(repo, remote_path, checksum, properties = {}) + upload(repo, remote_path, properties, + "X-Checksum-Deploy" => true, + "X-Checksum-Sha1" => checksum) + end + + # + # Upload an artifact with the given archive. Consult the artifactory + # documentation for the format of the archive to upload. + # + # @see Artifact#upload More syntax examples + # + # @example Upload an artifact with a checksum + # artifact = Artifact.new(local_path: '/local/path/to/file.deb') + # artifact.upload_from_archive('/remote/path') + # + # @param (see Repository#upload) + # + def upload_from_archive(repo, remote_path, properties = {}) + upload(repo, remote_path, properties, + "X-Explode-Archive" => true) + end + + private + + # + # Helper method for reading artifact properties + # + # @example List all properties for an artifact + # artifact.get_properties #=> { 'artifactory.licenses'=>['Apache-2.0'] } + # + # @param [TrueClass, FalseClass] refresh_cache (default: +false+) + # wether or not to use the locally cached value if it exists and is not nil + # + # @return [Hash] + # the list of properties + # + def get_properties(refresh_cache = false) + if refresh_cache || @properties.nil? + @properties = client.get(File.join("/api/storage", relative_path), properties: nil)["properties"] + end + + @properties + end + + # + # Helper method for setting artifact properties + # + # @example Set properties for an artifact + # artifact.set_properties({ prop1: 'value1', 'prop2' => 'value2' }) + # + # @param [Hash] properties + # A hash of properties and corresponding values to set for the artifact + # + # @return [Hash] + # the parsed JSON response from the server + # + def set_properties(properties) + matrix = to_matrix_properties(properties) + endpoint = File.join("/api/storage", relative_path) + "?properties=#{matrix}" + + client.put(endpoint, nil) + end + + # + # Helper method for extracting the relative (repo) path, since it's not + # returned as part of the API. + # + # @example Get the relative URI from the resource + # /libs-release-local/org/acme/artifact.deb + # + # @return [String] + # + def relative_path + @relative_path ||= uri.split("/api/storage", 2).last + end + + # + # Copy or move current artifact to a new destination. + # + # @example Move the current artifact to +ext-releases-local+ + # artifact.move(to: '/ext-releaes-local/org/acme') + # @example Copy the current artifact to +ext-releases-local+ + # artifact.move(to: '/ext-releaes-local/org/acme') + # + # @param [Symbol] action + # the action (+:move+ or +:copy+) + # @param [String] destination + # the server-side destination to move or copy the artifact + # @param [Hash] options + # the list of options to pass + # + # @option options [Boolean] :fail_fast (default: +false+) + # fail on the first failure + # @option options [Boolean] :suppress_layouts (default: +false+) + # suppress cross-layout module path translation during copying or moving + # @option options [Boolean] :dry_run (default: +false+) + # pretend to do the copy or move + # + # @return [Hash] + # the parsed JSON response from the server + # + def copy_or_move(action, destination, options = {}) + params = {}.tap do |param| + param[:to] = destination + param[:failFast] = 1 if options[:fail_fast] + param[:suppressLayouts] = 1 if options[:suppress_layouts] + param[:dry] = 1 if options[:dry_run] + end + + endpoint = File.join("/api", action.to_s, relative_path) + "?#{to_query_string_parameters(params)}" + + client.post(endpoint, {}) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/backup.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/backup.rb new file mode 100644 index 0000000..ee71d6c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/backup.rb @@ -0,0 +1,122 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "rexml/document" + +module Artifactory + class Resource::Backup < Resource::Base + class << self + # + # Get a list of all backup jobs in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of backup jobs + # + def all(options = {}) + config = Resource::System.configuration(options) + list_from_config("config/backups/backup", config, options) + end + + # + # Find (fetch) a backup job by its key. + # + # @example Find a Backup by its key. + # backup.find('backup-daily') #=> # + # + # @param [String] key + # the name of the backup job to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::Backup, nil] + # an instance of the backup job that matches the given key, or +nil+ + # if one does not exist + # + def find(key, options = {}) + config = Resource::System.configuration(options) + find_from_config("config/backups/backup/key[text()='#{key}']", config, options) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + + private + + # + # List all the child text elements in the Artifactory configuration file + # of a node matching the specified xpath + # + # @param [String] xpath + # xpath expression for the parent element whose children are to be listed + # + # @param [REXML] config + # Artifactory config as an REXML file + # + # @param [Hash] options + # the list of options + # + # @return [~Resource::Base] + # + def list_from_config(xpath, config, options = {}) + REXML::XPath.match(config, xpath).map do |r| + hash = Util.xml_to_hash(r, "excludedRepositories", false) + from_hash(hash, options) + end + end + + # + # Find all the sibling text elements in the Artifactory configuration file + # of a node matching the specified xpath + # + # @param [String] xpath + # xpath expression for the element whose siblings are to be found + # + # @param [REXML] config + # Artifactory configuration file as an REXML doc + # + # @param [Hash] options + # the list of options + # + def find_from_config(xpath, config, options = {}) + name_node = REXML::XPath.match(config, xpath) + return nil if name_node.empty? + + properties = Util.xml_to_hash(name_node[0].parent, "excludedRepositories", false) + from_hash(properties, options) + end + end + + attribute :key, -> { raise "name missing!" } + attribute :enabled, true + attribute :dir + attribute :cron_exp + attribute :retention_period_hours + attribute :create_archive + attribute :excluded_repositories + attribute :send_mail_on_error + attribute :exclude_builds + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/base.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/base.rb new file mode 100644 index 0000000..b0df902 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/base.rb @@ -0,0 +1,399 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "cgi" +require "json" +require "uri" + +module Artifactory + class Resource::Base + class << self + # + # @macro attribute + # @method $1 + # Return this object's +$1+ + # + # @return [Object] + # + # + # @method $1=(value) + # Set this object's +$1+ + # + # @param [Object] value + # the value to set for +$1+ + # @param [Object] default + # the default value for this attribute + # + # @method $1? + # Determines if the +$1+ value exists and is truthy + # + # @return [Boolean] + # + def attribute(key, default = nil) + key = key.to_sym unless key.is_a?(Symbol) + + # Set this attribute in the top-level hash + attributes[key] = nil + + define_method(key) do + value = attributes[key] + return value unless value.nil? + + if default.nil? + value + elsif default.is_a?(Proc) + default.call + else + default + end + end + + define_method("#{key}?") do + !!attributes[key] + end + + define_method("#{key}=") do |value| + set(key, value) + end + end + + # + # The list of attributes defined by this class. + # + # @return [Array] + # + def attributes + @attributes ||= {} + end + + # + # Determine if this class has a given attribute. + # + # @param [#to_sym] key + # the key to check as an attribute + # + # @return [true, false] + # + def has_attribute?(key) + attributes.key?(key.to_sym) + end + + # + # Construct a new object from the given URL. + # + # @param [String] url + # the URL to find the user from + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [~Resource::Base] + # + def from_url(url, options = {}) + # Parse the URL and only use the path so the configured + # endpoint/proxy/SSL settings are used in the GET request. + path = URI.parse(url_safe(url)).path + client = extract_client!(options) + # If the endpoint contains a path part, we must remove the + # endpoint path part from path, because the client uses + # endpoint + path as its full URI. + endpoint_path = URI.parse(client.endpoint).path + path.slice!(endpoint_path) + from_hash(client.get(path), client: client) + end + + # + # List all the child text elements in the Artifactory configuration file + # of a node matching the specified xpath + # + # @param [String] xpath + # xpath expression for the parent element whose children are to be listed + # + # @param [REXML] config + # Artifactory config as an REXML file + # + # @param [Hash] options + # the list of options + # + def list_from_config(xpath, config, options = {}) + REXML::XPath.match(config, xpath).map do |r| + hash = {} + + r.each_element_with_text do |l| + hash[l.name] = l.get_text + end + from_hash(hash, options) + end + end + + # + # Find the text elements matching a giving xpath + # + # @param [String] xpath + # xpath expression + # + # @param [REXML] config + # Artifactory configuration file as an REXML doc + # + # @param [Hash] options + # the list of options + # + def find_from_config(xpath, config, options = {}) + name_node = REXML::XPath.match(config, xpath) + return nil if name_node.empty? + + properties = {} + name_node[0].parent.each_element_with_text do |e| + properties[e.name] = Util.to_type(e.text) + end + + from_hash(properties, options) + end + + # + # Construct a new object from the hash. + # + # @param [Hash] hash + # the hash to create the object with + # @param [Hash] options + # the list options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [~Resource::Base] + # + def from_hash(hash, options = {}) + instance = new + instance.client = extract_client!(options) + + hash.inject(instance) do |instance, (key, value)| + method = :"#{Util.underscore(key)}=" + + if instance.respond_to?(method) + instance.send(method, value) + end + + instance + end + end + + # + # Get the client (connection) object from the given options. If the + # +:client+ key is preset in the hash, it is assumed to contain the + # connection object to use for the request. If the +:client+ key is not + # present, the default {Artifactory.client} is used. + # + # Warning, the value of {Artifactory.client} is **not** threadsafe! If + # multiple threads or processes are modifying the connection information, + # the same request _could_ use a different client object. If you use the + # {Artifactory::Client} proxy methods, this is handled for you. + # + # Warning, this method will **remove** the +:client+ key from the hash if + # it exists. + # + # @param [Hash] options + # the list of options passed to the method + # + # @option options [Artifactory::Client] :client + # the client object to use for requests + # + def extract_client!(options) + options.delete(:client) || Artifactory.client + end + + # + # Format the repos list from the given options. This method will modify + # the given Hash parameter! + # + # Warning, this method will modify the given hash if it exists. + # + # @param [Hash] options + # the list of options to extract the repos from + # + def format_repos!(options) + return options if options[:repos].nil? || options[:repos].empty? + + options[:repos] = Array(options[:repos]).compact.join(",") + options + end + + # + # Generate a URL-safe string from the given value. + # + # @param [#to_s] value + # the value to sanitize + # + # @return [String] + # the URL-safe version of the string + # + def url_safe(value) + uri_parser.escape(uri_parser.unescape(value.to_s)) + end + + # + # Generate a URI parser + # + # @return [URI::Parser] + def uri_parser + @uri_parser ||= URI::Parser.new + end + end + + attribute :client, -> { Artifactory.client } + + # + # Create a new instance + # + def initialize(attributes = {}) + attributes.each do |key, value| + set(key, value) + end + end + + # + # The list of attributes for this resource. + # + # @return [hash] + # + def attributes + @attributes ||= self.class.attributes.dup + end + + # + # Set a given attribute on this resource. + # + # @param [#to_sym] key + # the attribute to set + # @param [Object] value + # the value to set + # + # @return [Object] + # the set value + # + def set(key, value) + attributes[key.to_sym] = value + end + + # @see Resource::Base.extract_client! + def extract_client!(options) + self.class.extract_client!(options) + end + + # @see Resource::Base.format_repos! + def format_repos!(options) + self.class.format_repos!(options) + end + + # @see Resource::Base.url_safe + def url_safe(value) + self.class.url_safe(value) + end + + # + # The hash representation + # + # @example An example hash response + # { 'key' => 'local-repo1', 'includesPattern' => '**/*' } + # + # @return [Hash] + # + def to_hash + attributes.inject({}) do |hash, (key, value)| + unless Resource::Base.has_attribute?(key) + hash[Util.camelize(key, true)] = send(key.to_sym) + end + + hash + end + end + + # + # The JSON representation of this object. + # + # @see Artifactory::Resource::Base#to_json + # + # @return [String] + # + def to_json + JSON.fast_generate(to_hash) + end + + # + # Create CGI-escaped string from matrix properties + # + # @see http://bit.ly/1qeVYQl + # + def to_matrix_properties(hash = {}) + properties = hash.map do |k, v| + key = CGI.escape(k.to_s) + value = CGI.escape(v.to_s) + + "#{key}=#{value}" + end + + if properties.empty? + nil + else + ";#{properties.join(";")}" + end + end + + # + # Create URI-escaped querystring parameters + # + # @see http://bit.ly/1qeVYQl + # + def to_query_string_parameters(hash = {}) + properties = hash.map do |k, v| + key = self.class.uri_parser.escape(k.to_s) + value = self.class.uri_parser.escape(v.to_s) + + "#{key}=#{value}" + end + + if properties.empty? + nil + else + properties.join("&") + end + end + + # @private + def to_s + "#<#{short_classname}>" + end + + # @private + def inspect + list = attributes.collect do |key, value| + unless Resource::Base.has_attribute?(key) + "#{key}: #{value.inspect}" + end + end.compact + + "#<#{short_classname} #{list.join(", ")}>" + end + + private + + def short_classname + @short_classname ||= self.class.name.split("::").last + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/build.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/build.rb new file mode 100644 index 0000000..9dec0ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/build.rb @@ -0,0 +1,230 @@ +# +# Copyright 2015-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "time" + +module Artifactory + class Resource::Build < Resource::Base + BUILD_SCHEMA_VERSION = "1.0.1".freeze + # valid build types as dictated by the Artifactory API + BUILD_TYPES = %w{ ANT IVY MAVEN GENERIC GRADLE }.freeze + + class << self + # + # Search for all builds in the system. + # + # @param [String] name + # the name of the build component + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of builds + # + def all(name, options = {}) + client = extract_client!(options) + client.get("/api/build/#{url_safe(name)}")["buildsNumbers"].map do |build_number| + # Remove the leading / from the `uri` value. Converts `/484` to `484`. + number = build_number["uri"].slice(1..-1) + find(name, number, client: client) + end.compact.flatten + rescue Error::HTTPError => e + # Artifactory returns a 404 instead of an empty list when there are no + # builds. Whoever decided that was a good idea clearly doesn't + # understand the point of REST interfaces... + raise unless e.code == 404 + + [] + end + + # + # Find (fetch) data for a particular build of a component + # + # @example Find data for a build of a component + # Build.find('wicket', 25) #=> # + # + # @param [String] name + # the name of the build component + # @param [String] number + # the number of the build + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::Build, nil] + # an instance of the build that matches the given name/number + # combination, or +nil+ if one does not exist + # + def find(name, number, options = {}) + client = extract_client!(options) + response = client.get("/api/build/#{url_safe(name)}/#{url_safe(number)}") + from_hash(response["buildInfo"], client: client) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + + # + # @see Artifactory::Resource::Base.from_hash + # + def from_hash(hash, options = {}) + super.tap do |instance| + instance.started = Time.parse(instance.started) rescue nil + instance.duration_millis = instance.duration_millis.to_i + end + end + end + + # Based on https://github.com/JFrogDev/build-info/blob/master/README.md#build-info-json-format + attribute :properties, {} + attribute :version, BUILD_SCHEMA_VERSION + attribute :name, -> { raise "Build component missing!" } + attribute :number, -> { raise "Build number missing!" } + attribute :type, "GENERIC" + attribute :build_agent, {} + attribute :agent, {} + attribute :started, Time.now.utc.iso8601(3) + attribute :duration_millis + attribute :artifactory_principal + attribute :url + attribute :vcs_revision + attribute :vcs_url + attribute :license_control, {} + attribute :build_retention, {} + attribute :modules, [] + attribute :governance + attribute :statuses, [] + + # + # Compare a build artifacts/dependencies/environment with an older + # build to see what has changed (new artifacts added, old dependencies + # deleted etc). + # + # @example List all properties for an artifact + # build.diff(35) #=> { 'artifacts'=>{}, 'dependencies'=>{}, 'properties'=>{} } + # + # @param [String] previous_build_number + # the number of the previous build to compare against + # + # @return [Hash] + # the list of properties + # + def diff(previous_build_number) + endpoint = api_path + "?" "diff=#{url_safe(previous_build_number)}" + client.get(endpoint, {}) + end + + # + # Move a build's artifacts to a new repository optionally moving or + # copying the build's dependencies to the target repository + # and setting properties on promoted artifacts. + # + # @example promote the build to 'omnibus-stable-local' + # build.promote('omnibus-stable-local') + # @example promote a build attaching some new properites + # build.promote('omnibus-stable-local' + # properties: { + # 'promoted_by' => 'hipchat:schisamo@chef.io' + # } + # ) + # + # @param [String] target_repo + # repository to move or copy the build's artifacts and/or dependencies + # @param [Hash] options + # the list of options to pass + # + # @option options [String] :status (default: 'promoted') + # new build status (any string) + # @option options [String] :comment (default: '') + # an optional comment describing the reason for promotion + # @option options [String] :user (default: +Artifactory.username+) + # the user that invoked promotion + # @option options [Boolean] :dry_run (default: +false+) + # pretend to do the promotion + # @option options [Boolean] :copy (default: +false+) + # whether to copy instead of move + # @option options [Boolean] :dependencies (default: +false+) + # whether to move/copy the build's dependencies + # @option options [Array] :scopes (default: []) + # an array of dependency scopes to include when "dependencies" is true + # @option options [Hash>] :properties (default: []) + # a list of properties to attach to the build's artifacts + # @option options [Boolean] :fail_fast (default: +true+) + # fail and abort the operation upon receiving an error + # + # @return [Hash] + # the parsed JSON response from the server + # + def promote(target_repo, options = {}) + request_body = {}.tap do |body| + body[:status] = options[:status] || "promoted" + body[:comment] = options[:comment] || "" + body[:ciUser] = options[:user] || Artifactory.username + body[:dryRun] = options[:dry_run] || false + body[:targetRepo] = target_repo + body[:copy] = options[:copy] || false + body[:artifacts] = true # always move/copy the build's artifacts + body[:dependencies] = options[:dependencies] || false + body[:scopes] = options[:scopes] || [] + body[:properties] = options[:properties] || {} + body[:failFast] = options[:fail_fast] || true + end + + endpoint = "/api/build/promote/#{url_safe(name)}/#{url_safe(number)}" + client.post(endpoint, JSON.fast_generate(request_body), + "Content-Type" => "application/json") + end + + # + # Creates data about a build. + # + # @return [Boolean] + # + def save + raise Error::InvalidBuildType.new(type) unless BUILD_TYPES.include?(type) + + file = Tempfile.new("build.json") + file.write(to_json) + file.rewind + + client.put("/api/build", file, + "Content-Type" => "application/json") + true + ensure + if file + file.close + file.unlink + end + end + + private + + # + # The path to this build on the server. + # + # @return [String] + # + def api_path + "/api/build/#{url_safe(name)}/#{url_safe(number)}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/build_component.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/build_component.rb new file mode 100644 index 0000000..2b7c1a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/build_component.rb @@ -0,0 +1,161 @@ +# +# Copyright 2015 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "time" + +module Artifactory + class Resource::BuildComponent < Resource::Base + class << self + # + # Search for all compoenents for which build data exists. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of builds + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/build")["builds"].map do |component| + from_hash(component, client: client) + end.compact.flatten + rescue Error::HTTPError => e + # Artifactory returns a 404 instead of an empty list when there are no + # builds. Whoever decided that was a good idea clearly doesn't + # understand the point of REST interfaces... + raise unless e.code == 404 + + [] + end + + # + # Find (fetch) data for a particular build component + # + # @example Find a particular build component + # BuildComponent.find('wicket') #=> # + # + # @param [String] name + # the name of the build component + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::BuildComponent, nil] + # an instance of the build component that matches the given name, + # or +nil+ if one does not exist + # + def find(name, options = {}) + client = extract_client!(options) + all.find do |component| + component.name == name + end + end + + # + # @see Artifactory::Resource::Base.from_hash + # + def from_hash(hash, options = {}) + super.tap do |instance| + # Remove the leading / from the `uri` value. Converts `/foo` to `foo`. + instance.name = instance.uri.slice(1..-1) + instance.last_started = Time.parse(instance.last_started) rescue nil + end + end + end + + attribute :uri + attribute :name, -> { raise "Name missing!" } + attribute :last_started + + # + # The list of build data for this component. + # + # @example Get the list of artifacts for a repository + # component = BuildComponent.new(name: 'wicket') + # component.builds #=> [#, ...] + # + # @return [Collection::Build] + # the list of builds + # + def builds + @builds ||= Collection::Build.new(self, name: name) do + Resource::Build.all(name) + end + end + + # + # Remove this component's build data stored in Artifactory + # + # @option options [Array] :build_numbers (default: nil) + # an array of build numbers that should be deleted; if not given + # all builds (for this component) are deleted + # @option options [Boolean] :artifacts (default: +false+) + # if true the component's artifacts are also removed + # @option options [Boolean] :delete_all (default: +false+) + # if true the entire component is removed + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete(options = {}) + params = {}.tap do |param| + param[:buildNumbers] = options[:build_numbers].join(",") if options[:build_numbers] + param[:artifacts] = 1 if options[:artifacts] + param[:deleteAll] = 1 if options[:delete_all] + end + + endpoint = api_path + "?#{to_query_string_parameters(params)}" + client.delete(endpoint, {}) + true + rescue Error::HTTPError => e + false + end + + # + # Rename a build component. + # + # @param [String] new_name + # new name for the component + # + # @return [Boolean] + # true if the object was renamed successfully, false otherwise + # + def rename(new_name, options = {}) + endpoint = "/api/build/rename/#{url_safe(name)}" + "?to=#{new_name}" + client.post(endpoint, {}) + true + rescue Error::HTTPError => e + false + end + + private + + # + # The path to this build component on the server. + # + # @return [String] + # + def api_path + "/api/build/#{url_safe(name)}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/certificate.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/certificate.rb new file mode 100644 index 0000000..76b2c75 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/certificate.rb @@ -0,0 +1,90 @@ +module Artifactory + class Resource::Certificate < Resource::Base + class << self + # + # Get a list of all certificates in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of builds + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/system/security/certificates").map do |cert| + from_hash(cert, client: client) + end.compact + end + + # + # @see Artifactory::Resource::Base.from_hash + # + def from_hash(hash, options = {}) + super.tap do |instance| + instance.issued_on = Time.parse(instance.issued_on) rescue nil + instance.valid_until = Time.parse(instance.valid_until) rescue nil + end + end + end + + attribute :certificate_alias, -> { raise "Certificate alias missing!" } + attribute :fingerprint + attribute :issued_by + attribute :issued_on + attribute :issued_to + attribute :local_path, -> { raise "Local destination missing!" } + attribute :valid_until + + # + # Delete this certificate from artifactory, suppressing any +ResourceNotFound+ + # exceptions might occur. + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete + client.delete(api_path) + true + rescue Error::HTTPError + false + end + + # + # Upload a certificate. If the first parameter is a File object, that file + # descriptor is passed to the uploader. If the first parameter is a string, + # it is assumed to be a path to a local file on disk. This method will + # automatically construct the File object from the given path. + # + # @example Upload a certificate from a File instance + # certificate = Certificate.new(local_path: '/path/to/cert.pem', certificate_alias: 'test') + # certificate.upload + # + # @return [Resource::Certificate] + # + def upload + file = File.new(File.expand_path(local_path)) + headers = { "Content-Type" => "application/text" } + + response = client.post(api_path, file, headers) + + return unless response.is_a?(Hash) + + self.class.all.select { |x| x.certificate_alias.eql?(certificate_alias) }.first + end + + private + + # + # The path to this certificate on the server. + # + # @return [String] + # + def api_path + "/api/system/security/certificates/#{url_safe(certificate_alias)}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/group.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/group.rb new file mode 100644 index 0000000..ad4c02f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/group.rb @@ -0,0 +1,126 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Resource::Group < Resource::Base + class << self + # + # Get a list of all groups in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of groups + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/security/groups").map do |hash| + from_url(hash["uri"], client: client) + end + end + + # + # Find (fetch) a group by its name. + # + # @example Find a group by its name + # Group.find('readers') #=> # + # + # @param [String] name + # the name of the group to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::Group, nil] + # an instance of the group that matches the given name, or +nil+ + # if one does not exist + # + def find(name, options = {}) + client = extract_client!(options) + + response = client.get("/api/security/groups/#{url_safe(name)}") + from_hash(response, client: client) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + end + + attribute :auto_join + attribute :description + attribute :name, -> { raise "Name missing!" } + attribute :realm + attribute :realm_attributes + + # + # Delete this group from artifactory, suppressing any +ResourceNotFound+ + # exceptions might occur. + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete + client.delete(api_path) + true + rescue Error::HTTPError + false + end + + # + # Creates or updates a group configuration depending on if the + # group configuration previously existed. + # + # @return [Boolean] + # + def save + if self.class.find(name, client: client) + client.post(api_path, to_json, headers) + else + client.put(api_path, to_json, headers) + end + true + end + + private + + # + # The path to this group on the server. + # + # @return [String] + # + def api_path + @api_path ||= "/api/security/groups/#{url_safe(name)}" + end + + # + # The default headers for this object. This includes the +Content-Type+. + # + # @return [Hash] + # + def headers + @headers ||= { + "Content-Type" => "application/vnd.org.jfrog.artifactory.security.Group+json", + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/layout.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/layout.rb new file mode 100644 index 0000000..5358850 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/layout.rb @@ -0,0 +1,74 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "rexml/document" + +module Artifactory + class Resource::Layout < Resource::Base + class << self + # + # Get a list of all repository layouts in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of layouts + # + def all(options = {}) + config = Resource::System.configuration(options) + list_from_config("config/repoLayouts/repoLayout", config, options) + end + + # + # Find (fetch) a layout by its name. + # + # @example Find a layout by its name + # Layout.find('maven-2-default') #=> # + # + # @param [String] name + # the name of the layout to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::Layout, nil] + # an instance of the layout that matches the given name, or +nil+ + # if one does not exist + # + def find(name, options = {}) + config = Resource::System.configuration(options) + find_from_config("config/repoLayouts/repoLayout/name[text()='#{name}']", config, options) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + end + + attribute :name, -> { raise "Name missing!" } + attribute :artifact_path_pattern + attribute :distinctive_descriptor_path_pattern, true + attribute :descriptor_path_pattern + attribute :folder_integration_revision_reg_exp + attribute :file_integration_revision_reg_exp + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/ldap_setting.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/ldap_setting.rb new file mode 100644 index 0000000..adebf76 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/ldap_setting.rb @@ -0,0 +1,124 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "rexml/document" + +module Artifactory + class Resource::LDAPSetting < Resource::Base + class << self + # + # Get a list of all ldap settings in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of layouts + # + def all(options = {}) + config = Resource::System.configuration(options) + list_from_config("config/security/ldapSettings/ldapSetting", config, options) + end + + # + # Find (fetch) an ldap setting by its name. + # + # @example Find an LDAPSetting by its name. + # ldap_config.find('ldap.example.com') #=> # + # + # @param [String] name + # the name of the ldap config setting to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::LDAPSetting, nil] + # an instance of the ldap setting that matches the given name, or +nil+ + # if one does not exist + # + def find(name, options = {}) + config = Resource::System.configuration(options) + find_from_config("config/security/ldapSettings/ldapSetting/key[text()='#{name}']", config, options) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + + private + + # + # List all the child text elements in the Artifactory configuration file + # of a node matching the specified xpath + # + # @param [String] xpath + # xpath expression for the parent element whose children are to be listed + # + # @param [REXML] config + # Artifactory config as an REXML file + # + # @param [Hash] options + # the list of options + # + def list_from_config(xpath, config, options = {}) + REXML::XPath.match(config, xpath).map do |r| + hash = Util.xml_to_hash(r, "search") + from_hash(hash, options) + end + end + + # + # Find all the sibling text elements in the Artifactory configuration file + # of a node matching the specified xpath + # + # @param [String] xpath + # xpath expression for the element whose siblings are to be found + # + # @param [REXML] config + # Artifactory configuration file as an REXML doc + # + # @param [Hash] options + # the list of options + # + def find_from_config(xpath, config, options = {}) + name_node = REXML::XPath.match(config, xpath) + return nil if name_node.empty? + + properties = Util.xml_to_hash(name_node[0].parent, "search") + from_hash(properties, options) + end + end + + # Ordered to match the artifactory xsd to make consuming the attributes + # hash when writing artifactory xml more convenient. + # http://bit.ly/UHMrHc + attribute :key, -> { raise "name missing!" } + attribute :enabled, true + attribute :ldap_url + attribute :search_filter + attribute :search_base + attribute :search_sub_tree + attribute :manager_dn + attribute :manager_password + attribute :auto_create_user + attribute :email_attribute, "mail" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/mail_server.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/mail_server.rb new file mode 100644 index 0000000..15d84d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/mail_server.rb @@ -0,0 +1,78 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "rexml/document" + +module Artifactory + class Resource::MailServer < Resource::Base + class << self + # + # Get a list of all mail servers in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of layouts + # + def all(options = {}) + config = Resource::System.configuration(options) + list_from_config("config/mailServer", config, options) + end + + # + # Find (fetch) a mail server by its host. + # + # @example Find a MailServer by its host. + # mail_server.find('smtp.gmail.com') #=> # + # + # @param [String] host + # the host of the mail server to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::MailServer, nil] + # an instance of the mail server that matches the given host, or +nil+ + # if one does not exist + # + def find(host, options = {}) + config = Resource::System.configuration(options) + find_from_config("config/mailServer/host[text()='#{host}']", config, options) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + end + + attribute :enabled + attribute :host, -> { raise "host missing!" } + attribute :port + attribute :username + attribute :password + attribute :from + attribute :subject_prefix + attribute :tls + attribute :ssl + attribute :artifactory_url + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/permission_target.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/permission_target.rb new file mode 100644 index 0000000..3a3fba1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/permission_target.rb @@ -0,0 +1,222 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Resource::PermissionTarget < Resource::Base + VERBOSE_PERMS = { + "d" => "delete", + "m" => "admin", + "n" => "annotate", + "r" => "read", + "w" => "deploy", + }.freeze + class << self + # + # Get a list of all PermissionTargets in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of PermissionTargets + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/security/permissions").map do |hash| + from_url(hash["uri"], client: client) + end + end + + # + # Find (fetch) a permission target by its name. + # + # @example Find a permission target by its name + # PermissionTarget.find('readers') #=> # + # + # @param [String] name + # the name of the permission target to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::PermissionTarget, nil] + # an instance of the permission target that matches the given name, or +nil+ + # if one does not exist + # + def find(name, options = {}) + client = extract_client!(options) + + response = client.get("/api/security/permissions/#{url_safe(name)}") + from_hash(response, client: client) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + + # + # @see Resource::Base.from_hash + # Additionally use verbose names for permissions (e.g. 'read' for 'r') + # + def from_hash(hash, options = {}) + super.tap do |instance| + %w{users groups}.each do |key| + if instance.principals[key] && !instance.principals[key].nil? + instance.principals[key] = Hash[instance.principals[key].map { |k, v| [k, verbose(v)] } ] + end + end + end + end + + private + + # + # Replace an array of permissions with one using verbose permission names + # + def verbose(array) + array.map { |elt| VERBOSE_PERMS[elt] }.sort + end + end + + class Principal + attr_accessor :users, :groups + + def initialize(users = {}, groups = {}) + @users = users + @groups = groups + end + + # + # Converts the user-friendly form of the principals hash to one suitable + # for posting to Artifactory. + # @return [Hash] + # + def to_abbreviated + { "users" => abbreviate_principal(@users), "groups" => abbreviate_principal(@groups) } + end + + private + + # + # Replace an array of verbose permission names with an equivalent array of abbreviated permission names. + # + def abbreviate_permissions(array) + inverse = VERBOSE_PERMS.invert + if (inverse.keys & array).sort != array.sort + raise "One of your principals contains an invalid permission. Valid permissions are #{inverse.keys.join(", ")}" + end + + array.map { |elt| inverse[elt] }.sort + end + + # + # Replace a principal with verbose permissions with an equivalent one with abbreviated permissions. + # + def abbreviate_principal(principal_hash) + Hash[principal_hash.map { |k, v| [k, abbreviate_permissions(v)] } ] + end + end + + attribute :name, -> { raise "Name missing!" } + attribute :includes_pattern, "**" + attribute :excludes_pattern, "" + attribute :repositories + attribute :principals, { "users" => {}, "groups" => {} } + + def client_principal + @client_principal ||= Principal.new(principals["users"], principals["groups"]) + end + + # + # Delete this PermissionTarget from artifactory, suppressing any +ResourceNotFound+ + # exceptions might occur. + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete + client.delete(api_path) + true + rescue Error::HTTPError + false + end + + # + # Save the PermissionTarget to the artifactory server. + # See http://bit.ly/1qMOw0L + # + # @return [Boolean] + # + def save + send("principals=", client_principal.to_abbreviated) + client.put(api_path, to_json, headers) + true + end + + # + # Getter for groups + # + def groups + client_principal.groups + end + + # + # Setter for groups (groups_hash expected to be friendly) + # + def groups=(groups_hash) + client_principal.groups = Hash[groups_hash.map { |k, v| [k, v.sort] } ] + end + + # + # Getter for users + # + def users + client_principal.users + end + + # + # Setter for users (expecting users_hash to be friendly) + # + def users=(users_hash) + client_principal.users = Hash[users_hash.map { |k, v| [k, v.sort] } ] + end + + private + + # + # The path to this PermissionTarget on the server. + # + # @return [String] + # + def api_path + @api_path ||= "/api/security/permissions/#{url_safe(name)}" + end + + # + # The default headers for this object. This includes the +Content-Type+. + # + # @return [Hash] + # + def headers + @headers ||= { + "Content-Type" => "application/vnd.org.jfrog.artifactory.security.PermissionTarget+json", + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/plugin.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/plugin.rb new file mode 100644 index 0000000..e869624 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/plugin.rb @@ -0,0 +1,38 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Resource::Plugin < Resource::Base + class << self + # + # Get a list of all plugins in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of builds + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/plugins") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/repository.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/repository.rb new file mode 100644 index 0000000..8dec3cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/repository.rb @@ -0,0 +1,234 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Resource::Repository < Resource::Base + class << self + # + # Get a list of all repositories in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of builds + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/repositories").map do |hash| + find(hash["key"], client: client) + end.compact + end + + # + # Find (fetch) a repository by name. + # + # @example Find a repository by named key + # Repository.find(name: 'libs-release-local') #=> # + # + # @param [Hash] options + # the list of options + # + # @option options [String] :name + # the name of the repository to find + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::Repository, nil] + # an instance of the repository that matches the given name, or +nil+ + # if one does not exist + # + def find(name, options = {}) + client = extract_client!(options) + + response = client.get("/api/repositories/#{url_safe(name)}") + from_hash(response, client: client) + rescue Error::HTTPError => e + raise unless e.code == 400 + + nil + end + end + + attribute :blacked_out, false + attribute :description + attribute :checksum_policy_type, "client-checksums" + attribute :excludes_pattern, "" + attribute :handle_releases, true + attribute :handle_snapshots, true + attribute :includes_pattern, "**/*" + attribute :key, -> { raise "Key is missing!" } + attribute :max_unique_snapshots, 0 + attribute :notes + attribute :package_type, "generic" + attribute :property_sets, [] + attribute :repo_layout_ref, "simple-default" + attribute :rclass, "local" + attribute :snapshot_version_behavior, "non-unique" + attribute :suppress_pom_consistency_checks, false + attribute :url, "" + attribute :yum_root_depth, 0 + attribute :calculate_yum_metadata, false + attribute :repositories, [] + attribute :external_dependencies_enabled, false + attribute :client_tls_certificate, "" + + # + # Creates or updates a repository configuration depending on if the + # repository configuration previously existed. This method also works + # around Artifactory's dangerous default behavior: + # + # > An existing repository with the same key are removed from the + # > configuration and its content is removed! + # + # @return [Boolean] + # + def save + if self.class.find(key, client: client) + client.post(api_path, to_json, headers) + else + client.put(api_path, to_json, headers) + end + true + end + + # + # Upload to a given repository + # + # @see Artifact#upload Upload syntax examples + # + # @return [Resource::Artifact] + # + def upload(local_path, remote_path, properties = {}, headers = {}) + artifact = Resource::Artifact.new(local_path: local_path) + artifact.upload(key, remote_path, properties, headers) + end + + # + # Upload an artifact with the given SHA checksum. Consult the artifactory + # documentation for the possible responses when the checksums fail to + # match. + # + # @see Artifact#upload_with_checksum More syntax examples + # + def upload_with_checksum(local_path, remote_path, checksum, properties = {}) + artifact = Resource::Artifact.new(local_path: local_path) + artifact.upload_with_checksum(key, remote_path, checksum, properties) + end + + # + # Upload an artifact with the given archive. Consult the artifactory + # documentation for the format of the archive to upload. + # + # @see Artifact#upload_from_archive More syntax examples + # + def upload_from_archive(local_path, remote_path, properties = {}) + artifact = Resource::Artifact.new(local_path: local_path) + artifact.upload_from_archive(key, remote_path, properties) + end + + # + # The list of artifacts in this repository on the remote artifactory + # server. + # + # @see Artifact.search Search syntax examples + # + # @example Get the list of artifacts for a repository + # repo = Repository.new('libs-release-local') + # repo.artifacts #=> [#, ...] + # + # @return [Collection::Artifact] + # the list of artifacts + # + def artifacts + @artifacts ||= Collection::Artifact.new(self, repos: key) do + Resource::Artifact.search(name: ".*", repos: key) + end + end + + # + # + # + def files + response = client.get("/api/storage/#{url_safe(key)}", { + deep: 0, + listFolders: 0, + mdTimestamps: 0, + includeRootPath: 0, + }) + + response["children"] + end + + # + # Delete this repository from artifactory, suppressing any +ResourceNotFound+ + # exceptions might occur. + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete + client.delete(api_path) + true + rescue Error::HTTPError => e + false + end + + private + + # + # The path to this repository on the server. + # + # @return [String] + # + def api_path + "/api/repositories/#{url_safe(key)}" + end + + # + # The default headers for this object. This includes the +Content-Type+. + # + # @return [Hash] + # + def headers + @headers ||= { + "Content-Type" => content_type, + } + end + + # + # The default Content-Type for this repository. It varies based on the + # repository type. + # + # @return [String] + # + def content_type + case rclass.to_s.downcase + when "local" + "application/vnd.org.jfrog.artifactory.repositories.LocalRepositoryConfiguration+json" + when "remote" + "application/vnd.org.jfrog.artifactory.repositories.RemoteRepositoryConfiguration+json" + when "virtual" + "application/vnd.org.jfrog.artifactory.repositories.VirtualRepositoryConfiguration+json" + else + raise "Unknown Repository type `#{rclass}'!" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/system.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/system.rb new file mode 100644 index 0000000..5e9f16f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/system.rb @@ -0,0 +1,134 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Resource::System < Resource::Base + class << self + # + # Get general system information. + # + # @example Get the system information + # System.info #=> "..." + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [String] + # a "table" of the system information as returned by the API + # + def info(options = {}) + client = extract_client!(options) + client.get("/api/system") + end + + # + # Check the status of the Artifactory server and API. This method will + # always return a boolean response, so it's safe to call without + # exception handling. + # + # @example Wait until the Artifactory server is ready + # until System.ping + # sleep(0.5) + # print '.' + # end + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Boolean] + # true if the Artifactory server is ready, false otherwise + # + def ping(options = {}) + client = extract_client!(options) + !!client.get("/api/system/ping") + rescue Error::ConnectionError + false + end + + # + # Get the current system configuration as XML. + # + # @example Get the current configuration + # System.configuration + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [REXML::Document] + # the parsed XML document + # + def configuration(options = {}) + client = extract_client!(options) + response = client.get("/api/system/configuration") + + REXML::Document.new(response) + end + + # + # Update the configuration with the given XML. + # + # @example Update the configuration + # new_config = File.new('/path/to/new.xml') + # System.update_configuration(new_config) + # + # @param [Hash] options + # the list of options + # @param [File] xml + # a pointer to the file descriptor of the XML to upload + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + def update_configuration(xml, options = {}) + client = extract_client!(options) + + # The Artifactory api requires a content type of 'application/xml'. + # See http://bit.ly/1l2IvZY + headers = { "Content-Type" => "application/xml" } + client.post("/api/system/configuration", xml, headers) + end + + # + # Get the version information from the server. + # + # @example Get the version information + # System.version #=> { ... } + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Hash] + # the parsed JSON from the response + # + def version(options = {}) + client = extract_client!(options) + client.get("/api/system/version") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/url_base.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/url_base.rb new file mode 100644 index 0000000..728a752 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/url_base.rb @@ -0,0 +1,92 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require "rexml/document" + +module Artifactory + class Resource::URLBase < Resource::Base + class << self + # + # List UrlBase in the system configuration. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of UrlBases + # + def all(options = {}) + config = Resource::System.configuration(options) + simple_text_from_config("config/urlBase", config, options) + end + + # + # Find (fetch) the url base. + # + # @example Find a URLBase by its url_base. + # url_base.find('http://33.33.33.20/artifactory') #=> # + # + # @param [String] url + # the base url to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::MailServer, nil] + # an instance of the mail server that matches the given host, or +nil+ + # if one does not exist + # + def find(url, options = {}) + config = Resource::System.configuration(options) + find_from_config("config/urlBase[text()='#{url}']", config, options) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + + private + + # + # List all the text elements in the Artifactory configuration file + # matching the given xpath. Ignore any children of elements that match the xpath. + # + # @param [String] xpath + # xpath expression for which matches are to be listed + # + # @param [REXML] config + # Artifactory config as an REXML file + # + # @param [Hash] options + # the list of options + # + def simple_text_from_config(xpath, config, options = {}) + REXML::XPath.match(config, xpath).map do |r| + hash = {} + hash[r.name] = r.text + from_hash(hash, options) + end + end + end + + attribute :url_base, -> { raise "URL base missing!" } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/user.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/user.rb new file mode 100644 index 0000000..40deaa5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/resources/user.rb @@ -0,0 +1,130 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + class Resource::User < Resource::Base + class << self + # + # Get a list of all users in the system. + # + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Array] + # the list of users + # + def all(options = {}) + client = extract_client!(options) + client.get("/api/security/users").map do |hash| + from_url(hash["uri"], client: client) + end + end + + # + # Find (fetch) a user by its name. + # + # @example Find a user by its name + # User.find('readers') #=> # + # + # @param [String] name + # the name of the user to find + # @param [Hash] options + # the list of options + # + # @option options [Artifactory::Client] :client + # the client object to make the request with + # + # @return [Resource::User, nil] + # an instance of the user that matches the given name, or +nil+ + # if one does not exist + # + def find(name, options = {}) + client = extract_client!(options) + + response = client.get("/api/security/users/#{url_safe(name)}") + from_hash(response, client: client) + rescue Error::HTTPError => e + raise unless e.code == 404 + + nil + end + end + + attribute :admin, false + attribute :email + attribute :groups, [] + attribute :internal_password_disabled, false + attribute :last_logged_in + attribute :name, -> { raise "Name missing" } + attribute :password # write only, never returned + attribute :profile_updatable, true + attribute :realm + + # + # Delete this user from artifactory, suppressing any +ResourceNotFound+ + # exceptions might occur. + # + # @return [Boolean] + # true if the object was deleted successfully, false otherwise + # + def delete + client.delete(api_path) + true + rescue Error::HTTPError => e + false + end + + # + # Creates or updates a user configuration depending on if the + # user configuration previously existed. + # + # @return [Boolean] + # + def save + if self.class.find(name, client: client) + client.post(api_path, to_json, headers) + else + client.put(api_path, to_json, headers) + end + true + end + + private + + # + # The path to this user on the server. + # + # @return [String] + # + def api_path + @api_path ||= "/api/security/users/#{url_safe(name)}" + end + + # + # The default headers for this object. This includes the +Content-Type+. + # + # @return [Hash] + # + def headers + @headers ||= { + "Content-Type" => "application/vnd.org.jfrog.artifactory.security.User+json", + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/util.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/util.rb new file mode 100644 index 0000000..77340b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/util.rb @@ -0,0 +1,157 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + module Util + extend self + + # + # Covert the given CaMelCaSeD string to under_score. Graciously borrowed + # from http://stackoverflow.com/questions/1509915. + # + # @param [String] string + # the string to use for transformation + # + # @return [String] + # + def underscore(string) + string + .to_s + .gsub(/::/, "/") + .gsub(/([A-Z]+)([A-Z][a-z])/, '\1_\2') + .gsub(/([a-z\d])([A-Z])/, '\1_\2') + .tr("-", "_") + .downcase + end + + # + # Convert an underscored string to it's camelcase equivalent constant. + # + # @param [String] string + # the string to convert + # + # @return [String] + # + def camelize(string, lowercase = false) + result = string + .to_s + .split("_") + .map(&:capitalize) + .join + + if lowercase + result[0, 1].downcase + result[1..-1] + else + result + end + end + + # + # Truncate the given string to a certain number of characters. + # + # @param [String] string + # the string to truncate + # @param [Hash] options + # the list of options (such as +length+) + # + def truncate(string, options = {}) + length = options[:length] || 30 + + if string.length > length + string[0..length - 3] + "..." + else + string + end + end + + # + # Rename a list of keys to the given map. + # + # @example Rename the given keys + # rename_keys(hash, foo: :bar, zip: :zap) + # + # @param [Hash] options + # the options to map + # @param [Hash] map + # the map of keys to map + # + # @return [Hash] + # + def rename_keys(options, map = {}) + Hash[options.map { |k, v| [map[k] || k, v] }] + end + + # + # Slice the given list of options with the given keys. + # + # @param [Hash] options + # the list of options to slice + # @param [Array] keys + # the keys to slice + # + # @return [Hash] + # the sliced hash + # + def slice(options, *keys) + keys.inject({}) do |hash, key| + hash[key] = options[key] if options[key] + hash + end + end + + # + # Flatten an xml element with at most one child node with children + # into a hash. + # + # @param [REXML] element + # xml element + # + def xml_to_hash(element, child_with_children = "", unique_children = true) + properties = {} + element.each_element_with_text do |e| + if e.name.eql?(child_with_children) + if unique_children + e.each_element_with_text do |t| + properties[t.name] = to_type(t.text) + end + else + children = [] + e.each_element_with_text do |t| + properties[t.name] = children.push(to_type(t.text)) + end + end + else + properties[e.name] = to_type(e.text) + end + end + properties + end + + def to_type(string) + return true if string.eql?("true") + return false if string.eql?("false") + return string.to_i if numeric?(string) + + string + end + + private + + def numeric?(string) + string.to_i.to_s == string || string.to_f.to_s == string + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/version.rb b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/version.rb new file mode 100644 index 0000000..5fe5d15 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/artifactory-3.0.15/lib/artifactory/version.rb @@ -0,0 +1,19 @@ +# +# Copyright 2014-2018 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Artifactory + VERSION = "3.0.15".freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.gitignore b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.gitignore new file mode 100644 index 0000000..b04a8c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.gitignore @@ -0,0 +1,11 @@ +/.bundle/ +/.yardoc +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ + +# rspec failure tracking +.rspec_status diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rspec b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rspec new file mode 100644 index 0000000..34c5164 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rspec @@ -0,0 +1,3 @@ +--format documentation +--color +--require spec_helper diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rubocop.yml b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rubocop.yml new file mode 100644 index 0000000..3ffe2b0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rubocop.yml @@ -0,0 +1,2 @@ +inherit_from: .rubocop_todo.yml + diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rubocop_todo.yml b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rubocop_todo.yml new file mode 100644 index 0000000..826a7a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.rubocop_todo.yml @@ -0,0 +1,32 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2018-02-02 08:32:23 -0800 using RuboCop version 0.52.1. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +# Configuration parameters: Include. +# Include: **/*.gemspec +Gemspec/RequiredRubyVersion: + Exclude: + - 'atomos.gemspec' + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 14 + +# Offense count: 1 +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + - 'lib/atomos.rb' + +# Offense count: 7 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 97 diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.travis.yml new file mode 100644 index 0000000..6a8e36f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/.travis.yml @@ -0,0 +1,5 @@ +sudo: false +language: ruby +rvm: + - 2.5.0 +before_install: gem install bundler -v 1.16.1 diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..3399e24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at segiddins@squareup.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Gemfile b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Gemfile new file mode 100644 index 0000000..2d1d7e6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Gemfile @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +# Specify your gem's dependencies in atomos.gemspec +gemspec diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Gemfile.lock new file mode 100644 index 0000000..edfb2e5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Gemfile.lock @@ -0,0 +1,51 @@ +PATH + remote: . + specs: + atomos (0.1.3) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.3.0) + diff-lcs (1.3) + parallel (1.12.1) + parser (2.4.0.2) + ast (~> 2.3) + powerpack (0.1.1) + rainbow (3.0.0) + rake (10.5.0) + rspec (3.7.0) + rspec-core (~> 3.7.0) + rspec-expectations (~> 3.7.0) + rspec-mocks (~> 3.7.0) + rspec-core (3.7.1) + rspec-support (~> 3.7.0) + rspec-expectations (3.7.0) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.7.0) + rspec-mocks (3.7.0) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.7.0) + rspec-support (3.7.0) + rubocop (0.52.1) + parallel (~> 1.10) + parser (>= 2.4.0.2, < 3.0) + powerpack (~> 0.1) + rainbow (>= 2.2.2, < 4.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-progressbar (1.9.0) + unicode-display_width (1.3.0) + +PLATFORMS + ruby + +DEPENDENCIES + atomos! + bundler (~> 1.16) + rake (~> 10.0) + rspec (~> 3.0) + rubocop + +BUNDLED WITH + 1.16.3 diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/LICENSE.txt new file mode 100644 index 0000000..7a54c62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Samuel Giddins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/README.md b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/README.md new file mode 100644 index 0000000..de832a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/README.md @@ -0,0 +1,43 @@ +# Atomos + +Welcome to your new gem! In this directory, you'll find the files you need to be able to package up your Ruby library into a gem. Put your Ruby code in the file `lib/atomos`. To experiment with that code, run `bin/console` for an interactive prompt. + +TODO: Delete this and the text above, and describe your gem + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'atomos' +``` + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install atomos + +## Usage + +TODO: Write usage instructions here + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/atomos. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct. + +## License + +The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). + +## Code of Conduct + +Everyone interacting in the Atomos project’s codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/[USERNAME]/atomos/blob/master/CODE_OF_CONDUCT.md). diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Rakefile b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Rakefile new file mode 100644 index 0000000..8ce173e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/Rakefile @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require 'bundler/gem_tasks' + +require 'rspec/core/rake_task' +require 'rubocop/rake_task' + +RSpec::Core::RakeTask.new +RuboCop::RakeTask.new + +task default: %i[rubocop spec] diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/VERSION b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/VERSION new file mode 100644 index 0000000..b1e80bb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/VERSION @@ -0,0 +1 @@ +0.1.3 diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/atomos.gemspec b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/atomos.gemspec new file mode 100644 index 0000000..7ad4922 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/atomos.gemspec @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +Gem::Specification.new do |spec| + spec.name = 'atomos' + spec.version = File.read(File.expand_path('../VERSION', __FILE__)) + spec.authors = ['Samuel Giddins'] + spec.email = ['segiddins@segiddins.me'] + + spec.summary = 'A simple gem to atomically write files' + spec.homepage = 'https://github.com/segiddins/atomos' + spec.license = 'MIT' + + spec.files = `git ls-files -z`.split("\x0").reject do |f| + f.match(%r{^(test|spec|features)/}) + end + spec.bindir = 'exe' + spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } + spec.require_paths = ['lib'] + + spec.required_ruby_version = '>= 2.0' + + spec.add_development_dependency 'bundler', '~> 1.16' + spec.add_development_dependency 'rake', '~> 10.0' + spec.add_development_dependency 'rspec', '~> 3.0' + spec.add_development_dependency 'rubocop' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/console b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/console new file mode 100644 index 0000000..535613d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/console @@ -0,0 +1,15 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'bundler/setup' +require 'atomos' + +# You can add fixtures and/or initialization code here to make experimenting +# with your gem easier. You can also use a different console, if you like. + +# (If you use this, don't forget to add pry to your Gemfile!) +# require "pry" +# Pry.start + +require 'irb' +IRB.start(__FILE__) diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rake b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rake new file mode 100644 index 0000000..8226b57 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rake @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rake' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rake', 'rake') diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rspec b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rspec new file mode 100644 index 0000000..d086973 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rspec @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rspec' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rspec-core', 'rspec') diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rubocop b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rubocop new file mode 100644 index 0000000..8424d87 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/rubocop @@ -0,0 +1,29 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'rubocop' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require 'pathname' +ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', + Pathname.new(__FILE__).realpath) + +bundle_binstub = File.expand_path('../bundle', __FILE__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 150).match?(/This file was generated by Bundler/) + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require 'rubygems' +require 'bundler/setup' + +load Gem.bin_path('rubocop', 'rubocop') diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/setup b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/setup new file mode 100644 index 0000000..dce67d8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/bin/setup @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' +set -vx + +bundle install + +# Do any other automated setup that you need to do here diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/lib/atomos.rb b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/lib/atomos.rb new file mode 100644 index 0000000..4b56d05 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/lib/atomos.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +require 'atomos/version' + +module Atomos + module_function + + # rubocop:disable Metrics/MethodLength + def atomic_write(dest, contents = nil, tmpdir: nil, &block) + unless contents.nil? ^ block.nil? + raise ArgumentError, 'must provide either contents or a block' + end + + tmpdir = Atomos.default_tmpdir_for_file(dest, tmpdir) + + require 'tempfile' + Tempfile.open(".atomos.#{File.basename(dest)}", tmpdir) do |tmpfile| + if contents + tmpfile << contents + else + retval = yield tmpfile + end + + tmpfile.close + + File.rename(tmpfile.path, dest) + + retval + end + end + # rubocop:enable Metrics/MethodLength + + def self.default_tmpdir_for_file(dest, tmpdir) + tmpdir ||= begin + require 'tmpdir' + Dir.tmpdir + end + + # Ensure the destination is on the same device as tmpdir + if File.stat(tmpdir).dev != File.stat(File.dirname(dest)).dev + # If not, use the directory of the destination as the tmpdir. + tmpdir = File.dirname(dest) + end + + tmpdir + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/lib/atomos/version.rb b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/lib/atomos/version.rb new file mode 100644 index 0000000..f52f703 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/atomos-0.1.3/lib/atomos/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Atomos + VERSION = File.read(File.expand_path('../../../VERSION', __FILE__)) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/CHANGELOG.md new file mode 100644 index 0000000..bab0747 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/CHANGELOG.md @@ -0,0 +1,37 @@ +Unreleased Changes +------------------ + +1.2.0 (2021-09-01) +------------------ + +* Feature - AWS SDK for Ruby no longer supports Ruby runtime versions 1.9, 2.0, 2.1, and 2.2. + +1.1.1 (2021-03-04) +------------------ + +* Issue - Include LICENSE, CHANGELOG, and VERSION files with this gem. + +1.1.0 (2020-04-08) +------------------ + +* Feature - Remove internal ByteBuffer and replace with String to remove dup and string mutation. + +1.0.3 (2019-04-24) +------------------ + +* Issue - Use single quotes for string where interpolation is not done. + +1.0.2 (2019-03-11) +------------------ + +* Issue - public #encode_headers method + +1.0.1 (2018-06-15) +------------------ + +* Issue - #decode_chunk buffers insufficient prelude message + +1.0.0 (2018-05-10) +------------------ + +* Feature - Initial release of `aws-eventstream` gem. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/VERSION b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/VERSION new file mode 100644 index 0000000..26aaba0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/VERSION @@ -0,0 +1 @@ +1.2.0 diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream.rb new file mode 100644 index 0000000..6ac5b23 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +require_relative 'aws-eventstream/decoder' +require_relative 'aws-eventstream/encoder' + +require_relative 'aws-eventstream/message' +require_relative 'aws-eventstream/header_value' +require_relative 'aws-eventstream/types' +require_relative 'aws-eventstream/errors' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/decoder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/decoder.rb new file mode 100644 index 0000000..f89838e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/decoder.rb @@ -0,0 +1,220 @@ +# frozen_string_literal: true + +require 'stringio' +require 'tempfile' +require 'zlib' + +module Aws + module EventStream + + # This class provides method for decoding binary inputs into + # single or multiple messages (Aws::EventStream::Message). + # + # * {#decode} - decodes messages from an IO like object responds + # to #read that containing binary data, returning decoded + # Aws::EventStream::Message along the way or wrapped in an enumerator + # + # ## Examples + # + # decoder = Aws::EventStream::Decoder.new + # + # # decoding from IO + # decoder.decode(io) do |message| + # message.headers + # # => { ... } + # message.payload + # # => StringIO / Tempfile + # end + # + # # alternatively + # message_pool = decoder.decode(io) + # message_pool.next + # # => Aws::EventStream::Message + # + # * {#decode_chunk} - decodes a single message from a chunk of data, + # returning message object followed by boolean(indicating eof status + # of data) in an array object + # + # ## Examples + # + # # chunk containing exactly one message data + # message, chunk_eof = decoder.decode_chunk(chunk_str) + # message + # # => Aws::EventStream::Message + # chunk_eof + # # => true + # + # # chunk containing a partial message + # message, chunk_eof = decoder.decode_chunk(chunk_str) + # message + # # => nil + # chunk_eof + # # => true + # # chunk data is saved at decoder's message_buffer + # + # # chunk containing more that one data message + # message, chunk_eof = decoder.decode_chunk(chunk_str) + # message + # # => Aws::EventStream::Message + # chunk_eof + # # => false + # # extra chunk data is saved at message_buffer of the decoder + # + class Decoder + + include Enumerable + + ONE_MEGABYTE = 1024 * 1024 + private_constant :ONE_MEGABYTE + + # bytes of prelude part, including 4 bytes of + # total message length, headers length and crc checksum of prelude + PRELUDE_LENGTH = 12 + private_constant :PRELUDE_LENGTH + + # 4 bytes message crc checksum + CRC32_LENGTH = 4 + private_constant :CRC32_LENGTH + + # @param [Hash] options The initialization options. + # @option options [Boolean] :format (true) When `false` it + # disables user-friendly formatting for message header values + # including timestamp and uuid etc. + def initialize(options = {}) + @format = options.fetch(:format, true) + @message_buffer = '' + end + + # Decodes messages from a binary stream + # + # @param [IO#read] io An IO-like object + # that responds to `#read` + # + # @yieldparam [Message] message + # @return [Enumerable, nil] Returns a new Enumerable + # containing decoded messages if no block is given + def decode(io, &block) + raw_message = io.read + decoded_message = decode_message(raw_message) + return wrap_as_enumerator(decoded_message) unless block_given? + # fetch message only + raw_event, _eof = decoded_message + block.call(raw_event) + end + + # Decodes a single message from a chunk of string + # + # @param [String] chunk A chunk of string to be decoded, + # chunk can contain partial event message to multiple event messages + # When not provided, decode data from #message_buffer + # + # @return [Array] Returns single decoded message + # and boolean pair, the boolean flag indicates whether this chunk + # has been fully consumed, unused data is tracked at #message_buffer + def decode_chunk(chunk = nil) + @message_buffer = [@message_buffer, chunk].pack('a*a*') if chunk + decode_message(@message_buffer) + end + + private + + # exposed via object.send for testing + attr_reader :message_buffer + + def wrap_as_enumerator(decoded_message) + Enumerator.new do |yielder| + yielder << decoded_message + end + end + + def decode_message(raw_message) + # incomplete message prelude received + return [nil, true] if raw_message.bytesize < PRELUDE_LENGTH + + prelude, content = raw_message.unpack("a#{PRELUDE_LENGTH}a*") + + # decode prelude + total_length, header_length = decode_prelude(prelude) + + # incomplete message received, leave it in the buffer + return [nil, true] if raw_message.bytesize < total_length + + content, checksum, remaining = content.unpack("a#{total_length - PRELUDE_LENGTH - CRC32_LENGTH}Na*") + unless Zlib.crc32([prelude, content].pack('a*a*')) == checksum + raise Errors::MessageChecksumError + end + + # decode headers and payload + headers, payload = decode_context(content, header_length) + + @message_buffer = remaining + + [Message.new(headers: headers, payload: payload), remaining.empty?] + end + + def decode_prelude(prelude) + # prelude contains length of message and headers, + # followed with CRC checksum of itself + content, checksum = prelude.unpack("a#{PRELUDE_LENGTH - CRC32_LENGTH}N") + raise Errors::PreludeChecksumError unless Zlib.crc32(content) == checksum + content.unpack('N*') + end + + def decode_context(content, header_length) + encoded_header, encoded_payload = content.unpack("a#{header_length}a*") + [ + extract_headers(encoded_header), + extract_payload(encoded_payload) + ] + end + + def extract_headers(buffer) + scanner = buffer + headers = {} + until scanner.bytesize == 0 + # header key + key_length, scanner = scanner.unpack('Ca*') + key, scanner = scanner.unpack("a#{key_length}a*") + + # header value + type_index, scanner = scanner.unpack('Ca*') + value_type = Types.types[type_index] + unpack_pattern, value_length = Types.pattern[value_type] + value = if !!unpack_pattern == unpack_pattern + # boolean types won't have value specified + unpack_pattern + else + value_length, scanner = scanner.unpack('S>a*') unless value_length + unpacked_value, scanner = scanner.unpack("#{unpack_pattern || "a#{value_length}"}a*") + unpacked_value + end + + headers[key] = HeaderValue.new( + format: @format, + value: value, + type: value_type + ) + end + headers + end + + def extract_payload(encoded) + encoded.bytesize <= ONE_MEGABYTE ? + payload_stringio(encoded) : + payload_tempfile(encoded) + end + + def payload_stringio(encoded) + StringIO.new(encoded) + end + + def payload_tempfile(encoded) + payload = Tempfile.new + payload.binmode + payload.write(encoded) + payload.rewind + payload + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/encoder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/encoder.rb new file mode 100644 index 0000000..24e564b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/encoder.rb @@ -0,0 +1,142 @@ +# frozen_string_literal: true + +require 'zlib' + +module Aws + module EventStream + + # This class provides #encode method for encoding + # Aws::EventStream::Message into binary. + # + # * {#encode} - encode Aws::EventStream::Message into binary + # when output IO-like object is provided, binary string + # would be written to IO. If not, the encoded binary string + # would be returned directly + # + # ## Examples + # + # message = Aws::EventStream::Message.new( + # headers: { + # "foo" => Aws::EventStream::HeaderValue.new( + # value: "bar", type: "string" + # ) + # }, + # payload: "payload" + # ) + # encoder = Aws::EventsStream::Encoder.new + # file = Tempfile.new + # + # # encode into IO ouput + # encoder.encode(message, file) + # + # # get encoded binary string + # encoded_message = encoder.encode(message) + # + # file.read == encoded_message + # # => true + # + class Encoder + + # bytes of total overhead in a message, including prelude + # and 4 bytes total message crc checksum + OVERHEAD_LENGTH = 16 + + # Maximum header length allowed (after encode) 128kb + MAX_HEADERS_LENGTH = 131072 + + # Maximum payload length allowed (after encode) 16mb + MAX_PAYLOAD_LENGTH = 16777216 + + # Encodes Aws::EventStream::Message to output IO when + # provided, else return the encoded binary string + # + # @param [Aws::EventStream::Message] message + # + # @param [IO#write, nil] io An IO-like object that + # responds to `#write`, encoded message will be + # written to this IO when provided + # + # @return [nil, String] when output IO is provided, + # encoded message will be written to that IO, nil + # will be returned. Else, encoded binary string is + # returned. + def encode(message, io = nil) + encoded = encode_message(message) + if io + io.write(encoded) + io.close + else + encoded + end + end + + # Encodes an Aws::EventStream::Message + # into String + # + # @param [Aws::EventStream::Message] message + # + # @return [String] + def encode_message(message) + # create context buffer with encode headers + encoded_header = encode_headers(message) + header_length = encoded_header.bytesize + # encode payload + if message.payload.length > MAX_PAYLOAD_LENGTH + raise Aws::EventStream::Errors::EventPayloadLengthExceedError.new + end + encoded_payload = message.payload.read + total_length = header_length + encoded_payload.bytesize + OVERHEAD_LENGTH + + # create message buffer with prelude section + encoded_prelude = encode_prelude(total_length, header_length) + + # append message context (headers, payload) + encoded_content = [ + encoded_prelude, + encoded_header, + encoded_payload, + ].pack('a*a*a*') + # append message checksum + message_checksum = Zlib.crc32(encoded_content) + [encoded_content, message_checksum].pack('a*N') + end + + # Encodes headers part of an Aws::EventStream::Message + # into String + # + # @param [Aws::EventStream::Message] message + # + # @return [String] + def encode_headers(message) + header_entries = message.headers.map do |key, value| + encoded_key = [key.bytesize, key].pack('Ca*') + + # header value + pattern, value_length, type_index = Types.pattern[value.type] + encoded_value = [type_index].pack('C') + # boolean types doesn't need to specify value + next [encoded_key, encoded_value].pack('a*a*') if !!pattern == pattern + encoded_value = [encoded_value, value.value.bytesize].pack('a*S>') unless value_length + + [ + encoded_key, + encoded_value, + pattern ? [value.value].pack(pattern) : value.value, + ].pack('a*a*a*') + end + header_entries.join.tap do |encoded_header| + break encoded_header if encoded_header.bytesize <= MAX_HEADERS_LENGTH + raise Aws::EventStream::Errors::EventHeadersLengthExceedError.new + end + end + + private + + def encode_prelude(total_length, headers_length) + prelude_body = [total_length, headers_length].pack('NN') + checksum = Zlib.crc32(prelude_body) + [prelude_body, checksum].pack('a*N') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/errors.rb new file mode 100644 index 0000000..a0cd941 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/errors.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +module Aws + module EventStream + module Errors + + # Raised when reading bytes exceed buffer total bytes + class ReadBytesExceedLengthError < RuntimeError + def initialize(target_byte, total_len) + msg = "Attempting reading bytes to offset #{target_byte} exceeds"\ + " buffer length of #{total_len}" + super(msg) + end + end + + # Raise when insufficient bytes of a message is received + class IncompleteMessageError < RuntimeError + def initialize(*args) + super('Not enough bytes for event message') + end + end + + class PreludeChecksumError < RuntimeError + def initialize(*args) + super('Prelude checksum mismatch') + end + end + + class MessageChecksumError < RuntimeError + def initialize(*args) + super('Message checksum mismatch') + end + end + + class EventPayloadLengthExceedError < RuntimeError + def initialize(*args) + super("Payload length of a message should be under 16mb.") + end + end + + class EventHeadersLengthExceedError < RuntimeError + def initialize(*args) + super("Encoded headers length of a message should be under 128kb.") + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/header_value.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/header_value.rb new file mode 100644 index 0000000..67badef --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/header_value.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module Aws + module EventStream + + class HeaderValue + + def initialize(options) + @type = options.fetch(:type) + @value = options[:format] ? + format_value(options.fetch(:value)) : + options.fetch(:value) + end + + attr_reader :value + + # @return [String] type of the header value + # complete type list see Aws::EventStream::Types + attr_reader :type + + private + + def format_value(value) + case @type + when 'timestamp' then format_timestamp(value) + when 'uuid' then format_uuid(value) + else + value + end + end + + def format_uuid(value) + bytes = value.bytes + # For user-friendly uuid representation, + # format binary bytes into uuid string format + uuid_pattern = [ [ 3, 2, 1, 0 ], [ 5, 4 ], [ 7, 6 ], [ 8, 9 ], 10..15 ] + uuid_pattern.map {|p| p.map {|n| "%02x" % bytes.to_a[n] }.join }.join('-') + end + + def format_timestamp(value) + # millis_since_epoch to sec_since_epoch + Time.at(value / 1000.0) + end + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/message.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/message.rb new file mode 100644 index 0000000..0f1a2d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/message.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module Aws + module EventStream + class Message + + def initialize(options) + @headers = options[:headers] || {} + @payload = options[:payload] || StringIO.new + end + + # @return [Hash] headers of a message + attr_reader :headers + + # @return [IO] payload of a message, size not exceed 16MB. + # StringIO is returned for <= 1MB payload + # Tempfile is returned for > 1MB payload + attr_reader :payload + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/types.rb b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/types.rb new file mode 100644 index 0000000..19c4280 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-eventstream-1.2.0/lib/aws-eventstream/types.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module Aws + module EventStream + + # Message Header Value Types + module Types + + def self.types + [ + 'bool_true', + 'bool_false', + 'byte', + 'short', + 'integer', + 'long', + 'bytes', + 'string', + 'timestamp', + 'uuid' + ] + end + + # pack/unpack pattern, byte size, type idx + def self.pattern + { + 'bool_true' => [true, 0, 0], + 'bool_false' => [false, 0, 1], + 'byte' => ["c", 1, 2], + 'short' => ["s>", 2, 3], + 'integer' => ["l>", 4, 4], + 'long' => ["q>", 8, 5], + 'bytes' => [nil, nil, 6], + 'string' => [nil, nil, 7], + 'timestamp' => ["q>", 8, 8], + 'uuid' => [nil, 16, 9] + } + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/CHANGELOG.md new file mode 100644 index 0000000..31e0ae3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/CHANGELOG.md @@ -0,0 +1,4016 @@ +Unreleased Changes +------------------ + +1.751.0 (2023-04-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.750.0 (2023-04-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.749.0 (2023-04-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.748.0 (2023-04-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.747.0 (2023-04-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.746.0 (2023-04-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.745.0 (2023-04-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.744.0 (2023-04-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.743.0 (2023-04-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.742.0 (2023-04-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.741.0 (2023-04-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.740.0 (2023-04-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.739.0 (2023-03-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.738.0 (2023-03-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::VPCLattice`. + +1.737.0 (2023-03-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.736.0 (2023-03-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.735.0 (2023-03-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.734.0 (2023-03-23) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IVSRealTime`. + +1.733.0 (2023-03-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.732.0 (2023-03-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.731.0 (2023-03-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.730.0 (2023-03-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.729.0 (2023-03-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.728.0 (2023-03-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.727.0 (2023-03-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.726.0 (2023-03-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.725.0 (2023-03-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.724.0 (2023-03-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.723.0 (2023-03-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.722.0 (2023-03-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.721.0 (2023-03-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.720.0 (2023-03-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.719.0 (2023-03-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.718.0 (2023-02-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.717.0 (2023-02-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::InternetMonitor`. + +1.716.0 (2023-02-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.715.0 (2023-02-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Tnb`. + +1.714.0 (2023-02-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.713.0 (2023-02-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.712.0 (2023-02-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.711.0 (2023-02-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.710.0 (2023-02-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.709.0 (2023-02-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.708.0 (2023-02-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.707.0 (2023-02-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.706.0 (2023-02-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.705.0 (2023-02-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.704.0 (2023-02-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.703.0 (2023-02-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.702.0 (2023-01-31) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CloudTrailData`. + +1.701.0 (2023-01-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.700.0 (2023-01-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.699.0 (2023-01-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.698.0 (2023-01-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.697.0 (2023-01-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.696.0 (2023-01-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.695.0 (2023-01-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.694.0 (2023-01-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.693.0 (2023-01-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.692.0 (2023-01-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CleanRooms`. + +1.691.0 (2023-01-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.690.0 (2023-01-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::KendraRanking`. + +1.689.0 (2023-01-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.688.0 (2023-01-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.687.0 (2023-01-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.686.0 (2022-12-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.685.0 (2022-12-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.684.0 (2022-12-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.683.0 (2022-12-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.682.0 (2022-12-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LicenseManagerLinuxSubscriptions`. + +1.681.0 (2022-12-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.680.0 (2022-12-19) +------------------ + +* Feature - Added support for enumerating regions for `Aws::KinesisVideoWebRTCStorage`. + +1.679.0 (2022-12-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.678.0 (2022-12-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.677.0 (2022-12-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.676.0 (2022-12-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.675.0 (2022-12-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SageMakerMetrics`. + +1.674.0 (2022-12-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.673.0 (2022-12-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.672.0 (2022-12-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.671.0 (2022-12-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.670.0 (2022-12-01) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Pipes`. + +* Feature - Added support for enumerating regions for `Aws::CodeCatalyst`. + +1.669.0 (2022-11-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SageMakerGeospatial`. + +* Feature - Added support for enumerating regions for `Aws::DocDBElastic`. + +1.668.0 (2022-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SimSpaceWeaver`. + +* Feature - Added support for enumerating regions for `Aws::SecurityLake`. + +* Feature - Added support for enumerating regions for `Aws::OpenSearchServerless`. + +* Feature - Added support for enumerating regions for `Aws::Omics`. + +1.667.0 (2022-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ARCZonalShift`. + +1.666.0 (2022-11-28) +------------------ + +* Feature - Added support for enumerating regions for `Aws::OAM`. + +1.665.0 (2022-11-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.664.0 (2022-11-18) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoTRoboRunner`. + +* Feature - Added support for enumerating regions for `Aws::ChimeSDKVoice`. + +1.663.0 (2022-11-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.662.0 (2022-11-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.661.0 (2022-11-15) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SsmSap`. + +1.660.0 (2022-11-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.659.0 (2022-11-10) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Scheduler`. + +1.658.0 (2022-11-08) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ResourceExplorer2`. + +1.657.0 (2022-11-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.656.0 (2022-11-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.655.0 (2022-11-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.654.0 (2022-10-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.653.0 (2022-10-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.652.0 (2022-10-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.651.0 (2022-10-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Feature - Add a metadata method to `Partition` to supplement endpoint generation in service gems. + +1.650.0 (2022-10-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.649.0 (2022-10-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.648.0 (2022-10-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.647.0 (2022-10-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.646.0 (2022-10-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.645.0 (2022-10-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.644.0 (2022-10-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.643.0 (2022-10-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.642.0 (2022-10-04) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ConnectCases`. + +1.641.0 (2022-10-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.640.0 (2022-09-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.639.0 (2022-09-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MigrationHubOrchestrator`. + +1.638.0 (2022-09-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.637.0 (2022-09-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoTFleetWise`. + +1.636.0 (2022-09-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.635.0 (2022-09-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.634.0 (2022-09-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.633.0 (2022-09-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.632.0 (2022-09-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.631.0 (2022-09-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.630.0 (2022-09-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.629.0 (2022-09-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.628.0 (2022-09-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.627.0 (2022-09-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.626.0 (2022-09-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.625.0 (2022-09-01) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ControlTower`. + +1.624.0 (2022-08-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.623.0 (2022-08-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.622.0 (2022-08-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.621.0 (2022-08-22) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SupportApp`. + +1.620.0 (2022-08-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.619.0 (2022-08-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.618.0 (2022-08-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.617.0 (2022-08-11) +------------------ + +* Feature - Added support for enumerating regions for `Aws::PrivateNetworks`. + +* Feature - Added support for enumerating regions for `Aws::BackupStorage`. + +1.616.0 (2022-08-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.615.0 (2022-08-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.614.0 (2022-08-02) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LicenseManagerUserSubscriptions`. + +1.613.0 (2022-07-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.612.0 (2022-07-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.611.0 (2022-07-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.610.0 (2022-07-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.609.0 (2022-07-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.608.0 (2022-07-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.607.0 (2022-07-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.606.0 (2022-07-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.605.0 (2022-07-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.604.0 (2022-07-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.603.0 (2022-07-05) +------------------ + +* Feature - Added support for enumerating regions for `Aws::RolesAnywhere`. + +1.602.0 (2022-06-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.601.0 (2022-06-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.600.0 (2022-06-17) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ConnectCampaignService`. + +1.599.0 (2022-06-16) +------------------ + +* Feature - Added support for enumerating regions for `Aws::RedshiftServerless`. + +1.598.0 (2022-06-08) +------------------ + +* Feature - Added support for enumerating regions for `Aws::RedshiftServerless`. + +* Feature - Added support for enumerating regions for `Aws::MainframeModernization`. + +1.597.0 (2022-06-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.596.0 (2022-06-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.595.0 (2022-05-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.594.0 (2022-05-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::EMRServerless`. + +1.593.0 (2022-05-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::EMRServerlessWebService`. + +1.592.0 (2022-05-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.591.0 (2022-05-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.590.0 (2022-05-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.589.0 (2022-05-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.588.0 (2022-05-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.587.0 (2022-05-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.586.0 (2022-05-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.585.0 (2022-05-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.584.0 (2022-05-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.583.0 (2022-05-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.582.0 (2022-05-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.581.0 (2022-04-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ChimeSDKMediaPipelines`. + +1.580.0 (2022-04-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Ivschat`. + +1.579.0 (2022-04-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.578.0 (2022-04-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.577.0 (2022-04-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.576.0 (2022-04-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.575.0 (2022-04-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.574.0 (2022-04-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.573.0 (2022-04-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.572.0 (2022-03-31) +------------------ + +* Feature - Added support for enumerating regions for `Aws::PinpointSMSVoiceV2`. + +1.571.0 (2022-03-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.570.0 (2022-03-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.569.0 (2022-03-23) +------------------ + +* Feature - Added support for enumerating regions for `Aws::GameSparks`. + +1.568.0 (2022-03-16) +------------------ + +* Feature - Added support for enumerating regions for `Aws::BillingConductor`. + +1.567.0 (2022-03-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.566.0 (2022-03-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.565.0 (2022-03-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.564.0 (2022-03-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.563.0 (2022-03-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.562.0 (2022-03-02) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Keyspaces`. + +1.561.0 (2022-03-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.560.0 (2022-02-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.559.0 (2022-02-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.558.0 (2022-02-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.557.0 (2022-02-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.556.0 (2022-02-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.555.0 (2022-02-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.554.0 (2022-02-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.553.0 (2022-02-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.552.0 (2022-02-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.551.0 (2022-01-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.550.0 (2022-01-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.549.0 (2022-01-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.548.0 (2022-01-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.547.0 (2022-01-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.546.0 (2022-01-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.545.0 (2022-01-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.544.0 (2022-01-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.543.0 (2021-12-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.542.0 (2021-12-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.541.0 (2021-12-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.540.0 (2021-12-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.539.0 (2021-12-02) +------------------ + +* Feature - Added support for enumerating regions for `Aws::AmplifyUIBuilder`. + +1.538.0 (2021-12-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.537.0 (2021-11-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WorkSpacesWeb`. + +* Feature - Added support for enumerating regions for `Aws::IoTTwinMaker`. + +* Feature - Added support for enumerating regions for `Aws::BackupGateway`. + +1.536.0 (2021-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::RecycleBin`. + +* Feature - Added support for enumerating regions for `Aws::Inspector2`. + +* Feature - Added support for enumerating regions for `Aws::CloudWatchRUM`. + +* Feature - Added support for enumerating regions for `Aws::CloudWatchEvidently`. + +1.535.0 (2021-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MigrationHubRefactorSpaces`. + +1.534.0 (2021-11-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.533.0 (2021-11-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.532.0 (2021-11-17) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Drs`. + +* Feature - Added support for enumerating regions for `Aws::AppConfigData`. + +1.531.0 (2021-11-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.530.0 (2021-11-15) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MigrationHubStrategyRecommendations`. + +1.529.0 (2021-11-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.528.0 (2021-11-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.527.0 (2021-11-10) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ResilienceHub`. + +1.526.0 (2021-11-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.525.0 (2021-11-04) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ChimeSDKMeetings`. + +* Feature - Support modeled dualstack and fips endpoints in `Aws::Partitions::EndpointProvider`. + +1.524.0 (2021-11-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.523.0 (2021-11-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.522.0 (2021-11-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.521.0 (2021-10-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.520.1 (2021-10-28) +------------------ + +* Issue - Add `signing_service` method and resolve `credentialScope` correctly for global services/service defaults. + +1.520.0 (2021-10-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.519.0 (2021-10-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.518.0 (2021-10-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.517.0 (2021-10-20) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Panorama`. + +1.516.0 (2021-10-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.515.0 (2021-10-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.514.0 (2021-10-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.513.0 (2021-10-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.512.0 (2021-10-07) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ManagedGrafana`. + +1.511.0 (2021-10-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.510.0 (2021-10-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.509.0 (2021-09-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CloudControlApi`. + +* Feature - Added support for enumerating regions for `Aws::Account`. + +1.508.0 (2021-09-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.507.0 (2021-09-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::VoiceID`. + +* Feature - Added support for enumerating regions for `Aws::ConnectWisdomService`. + +1.506.0 (2021-09-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.505.0 (2021-09-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.504.0 (2021-09-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.503.0 (2021-09-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.502.0 (2021-09-16) +------------------ + +* Feature - Added support for enumerating regions for `Aws::KafkaConnect`. + +1.501.0 (2021-09-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.500.0 (2021-09-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.499.0 (2021-09-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.498.0 (2021-09-08) +------------------ + +* Feature - Added support for enumerating regions for `Aws::OpenSearchService`. + +1.497.0 (2021-09-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.496.0 (2021-09-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.495.0 (2021-09-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.494.0 (2021-09-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Feature - AWS SDK for Ruby no longer supports Ruby runtime versions 1.9, 2.0, 2.1, and 2.2. + +1.493.0 (2021-08-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.492.0 (2021-08-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.491.0 (2021-08-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.490.0 (2021-08-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.489.0 (2021-08-19) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MemoryDB`. + +1.488.0 (2021-08-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.487.0 (2021-08-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.486.0 (2021-08-11) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SnowDeviceManagement`. + +1.485.0 (2021-08-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.484.0 (2021-08-06) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ChimeSDKMessaging`. + +* Feature - Added support for enumerating regions for `Aws::ChimeSDKIdentity`. + +1.483.0 (2021-08-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.482.0 (2021-07-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.481.0 (2021-07-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.480.0 (2021-07-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Route53RecoveryReadiness`. + +* Feature - Added support for enumerating regions for `Aws::Route53RecoveryControlConfig`. + +* Feature - Added support for enumerating regions for `Aws::Route53RecoveryCluster`. + +1.479.0 (2021-07-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.478.0 (2021-07-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.477.0 (2021-07-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.476.0 (2021-07-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.475.0 (2021-07-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.474.0 (2021-07-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.473.0 (2021-07-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.472.0 (2021-06-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.471.0 (2021-06-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.470.0 (2021-06-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.469.0 (2021-06-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.468.0 (2021-06-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.467.0 (2021-06-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.466.0 (2021-06-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Proton`. + +1.465.0 (2021-05-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.464.0 (2021-05-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.463.0 (2021-05-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.462.0 (2021-05-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.461.0 (2021-05-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.460.0 (2021-05-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.459.0 (2021-05-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.458.0 (2021-05-18) +------------------ + +* Feature - Added support for enumerating regions for `Aws::AppRunner`. + +1.457.0 (2021-05-17) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ApplicationCostProfiler`. + +1.456.0 (2021-05-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.455.0 (2021-05-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.454.0 (2021-05-11) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SSMIncidents`. + +* Feature - Added support for enumerating regions for `Aws::SSMContacts`. + +1.453.0 (2021-05-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.452.0 (2021-05-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.451.0 (2021-05-03) +------------------ + +* Feature - Added support for enumerating regions for `Aws::FinSpaceData`. + +1.450.0 (2021-05-03) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Finspace`. + +1.449.0 (2021-04-28) +------------------ + +* Feature - Added support for enumerating regions for `Aws::NimbleStudio`. + +1.448.0 (2021-04-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.447.0 (2021-04-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.446.0 (2021-04-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.445.0 (2021-04-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.444.0 (2021-04-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.443.0 (2021-04-08) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LookoutEquipment`. + +1.442.0 (2021-04-07) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Mgn`. + +1.441.0 (2021-04-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.440.0 (2021-04-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.439.0 (2021-03-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.438.0 (2021-03-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.437.0 (2021-03-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.436.0 (2021-03-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.435.0 (2021-03-25) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LookoutMetrics`. + +1.434.0 (2021-03-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.433.0 (2021-03-15) +------------------ + +* Feature - Added support for enumerating regions for `Aws::FIS`. + +1.432.0 (2021-03-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.431.1 (2021-03-05) +------------------ + +* Issue - Fix an issue where services without regionalized endpoints do not resolve to a provided FIPS global region. + +1.431.0 (2021-03-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Issue - Include LICENSE, CHANGELOG, and VERSION files with this gem. + +1.430.0 (2021-03-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + + +1.429.0 (2021-02-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.428.0 (2021-02-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.427.0 (2021-02-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.426.0 (2021-02-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Issue - Fix incorrect use of `JSON.load` breaking Ruby <= 2.2. + +1.425.0 (2021-02-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Issue - Reduce memory usage by de-duplicating `partitions.json`. + +1.424.0 (2021-02-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.423.0 (2021-02-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.422.0 (2021-01-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.421.0 (2021-01-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.420.0 (2021-01-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.419.0 (2021-01-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.418.0 (2021-01-22) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LexRuntimeV2`. + +* Feature - Added support for enumerating regions for `Aws::LexModelsV2`. + +1.417.0 (2021-01-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.416.0 (2021-01-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.415.0 (2021-01-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.414.0 (2020-12-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.413.0 (2020-12-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.412.0 (2020-12-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.411.0 (2020-12-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.410.0 (2020-12-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.409.0 (2020-12-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.408.0 (2020-12-16) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WellArchitected`. + +* Feature - Added support for enumerating regions for `Aws::LocationService`. + +1.407.0 (2020-12-15) +------------------ + +* Feature - Added support for enumerating regions for `Aws::PrometheusService`. + +* Feature - Added support for enumerating regions for `Aws::IoTWireless`. + +* Feature - Added support for enumerating regions for `Aws::IoTFleetHub`. + +* Feature - Added support for enumerating regions for `Aws::IoTDeviceAdvisor`. + +* Feature - Added support for enumerating regions for `Aws::GreengrassV2`. + +1.406.0 (2020-12-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.405.0 (2020-12-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.404.0 (2020-12-08) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SagemakerEdgeManager`. + +* Feature - Added support for enumerating regions for `Aws::HealthLake`. + +* Feature - Added support for enumerating regions for `Aws::EMRContainers`. + +* Feature - Added support for enumerating regions for `Aws::AuditManager`. + +1.403.0 (2020-12-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.402.0 (2020-12-02) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CustomerProfiles`. + +1.401.0 (2020-12-01) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SageMakerFeatureStoreRuntime`. + +* Feature - Added support for enumerating regions for `Aws::Profile`. + +* Feature - Added support for enumerating regions for `Aws::LookoutforVision`. + +* Feature - Added support for enumerating regions for `Aws::ECRPublic`. + +* Feature - Added support for enumerating regions for `Aws::DevOpsGuru`. + +* Feature - Added support for enumerating regions for `Aws::ConnectContactLens`. + +* Feature - Added support for enumerating regions for `Aws::AppIntegrationsService`. + +* Feature - Added support for enumerating regions for `Aws::AmplifyBackend`. + +1.400.0 (2020-12-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.399.0 (2020-11-24) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MWAA`. + +1.398.0 (2020-11-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.397.0 (2020-11-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.396.0 (2020-11-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.395.0 (2020-11-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.394.0 (2020-11-17) +------------------ + +* Feature - Added support for enumerating regions for `Aws::NetworkFirewall`. + +1.393.0 (2020-11-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.392.0 (2020-11-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::AppRegistry`. + +1.391.0 (2020-11-11) +------------------ + +* Feature - Added support for enumerating regions for `Aws::GlueDataBrew`. + +1.390.0 (2020-11-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.389.0 (2020-11-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.388.0 (2020-10-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.387.0 (2020-10-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.386.0 (2020-10-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.385.0 (2020-10-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.384.0 (2020-10-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.383.0 (2020-10-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.382.0 (2020-10-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.381.0 (2020-10-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.380.0 (2020-10-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.379.0 (2020-10-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.378.0 (2020-09-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::S3Outposts`. + +1.377.0 (2020-09-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::TimestreamWrite`. + +* Feature - Added support for enumerating regions for `Aws::TimestreamQuery`. + +1.376.0 (2020-09-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.375.0 (2020-09-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.374.0 (2020-09-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.373.0 (2020-09-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.372.0 (2020-09-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.371.0 (2020-09-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.370.0 (2020-09-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.369.0 (2020-09-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.368.0 (2020-09-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.367.0 (2020-09-10) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SSOAdmin`. + +1.366.0 (2020-09-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::RedshiftDataAPIService`. + +1.365.0 (2020-09-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.364.0 (2020-09-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.363.0 (2020-08-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.362.0 (2020-08-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.361.0 (2020-08-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.360.0 (2020-08-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Appflow`. + +1.359.0 (2020-08-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.358.0 (2020-08-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.357.0 (2020-08-18) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IdentityStore`. + +1.356.0 (2020-08-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.355.0 (2020-08-13) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Braket`. + +1.354.0 (2020-08-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.353.0 (2020-08-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.352.0 (2020-08-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.351.0 (2020-08-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.350.0 (2020-08-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.349.0 (2020-07-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.348.0 (2020-07-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.347.0 (2020-07-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.346.0 (2020-07-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.345.0 (2020-07-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.344.0 (2020-07-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.343.0 (2020-07-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.342.0 (2020-07-15) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IVS`. + +1.341.0 (2020-07-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.340.0 (2020-07-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.339.0 (2020-07-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.338.0 (2020-07-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.337.0 (2020-06-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.336.0 (2020-06-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.335.0 (2020-06-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.334.0 (2020-06-24) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Honeycode`. + +1.333.0 (2020-06-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.332.0 (2020-06-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.331.0 (2020-06-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.330.0 (2020-06-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.329.0 (2020-06-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Issue - Republish previous version with correct dependency on `aws-sdk-core`. + +1.328.0 (2020-06-10) +------------------ + +* Issue - This version has been yanked. (#2327). +* Feature - Added support for enumerating regions for `Aws::CodeArtifact`. + +1.327.0 (2020-06-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.326.0 (2020-06-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.325.0 (2020-06-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.324.0 (2020-06-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.323.0 (2020-06-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.322.0 (2020-05-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.321.0 (2020-05-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.320.0 (2020-05-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.319.0 (2020-05-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.318.0 (2020-05-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.317.0 (2020-05-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.316.0 (2020-05-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.315.0 (2020-05-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.314.0 (2020-05-13) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Macie2`. + +1.313.0 (2020-05-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.312.0 (2020-05-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.311.0 (2020-05-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.310.0 (2020-05-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.309.0 (2020-05-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.308.0 (2020-05-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.307.0 (2020-04-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.306.0 (2020-04-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoTSiteWise`. + +1.305.0 (2020-04-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.304.0 (2020-04-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.303.0 (2020-04-24) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ElasticInference`. + +1.302.0 (2020-04-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.301.0 (2020-04-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.300.0 (2020-04-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.299.0 (2020-04-20) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Synthetics`. + +1.298.0 (2020-04-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.297.0 (2020-04-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.296.0 (2020-04-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.295.0 (2020-04-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.294.0 (2020-04-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.293.0 (2020-04-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.292.0 (2020-03-31) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ElasticInference`. + +1.291.0 (2020-03-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.290.0 (2020-03-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.289.0 (2020-03-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.288.0 (2020-03-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.287.0 (2020-03-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.286.0 (2020-03-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +* Issue - Change the default of sts_regional_endpoints from 'legacy' to 'regional'. + +1.285.0 (2020-03-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.284.0 (2020-03-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.283.0 (2020-03-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.282.0 (2020-03-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.281.0 (2020-03-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.280.0 (2020-03-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.279.0 (2020-03-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.278.0 (2020-02-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.277.0 (2020-02-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.276.0 (2020-02-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.275.0 (2020-02-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.274.0 (2020-02-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.273.0 (2020-02-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.272.0 (2020-02-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.271.0 (2020-02-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.270.0 (2020-02-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.269.0 (2020-01-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.268.0 (2020-01-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.267.0 (2020-01-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.266.0 (2020-01-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.265.0 (2020-01-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.264.0 (2020-01-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.263.0 (2020-01-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.262.0 (2020-01-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.261.0 (2020-01-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.260.0 (2019-12-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.259.0 (2019-12-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.258.0 (2019-12-19) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CodeStarconnections`. + +1.257.0 (2019-12-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.256.0 (2019-12-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.255.0 (2019-12-13) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Detective`. + +1.254.0 (2019-12-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.253.0 (2019-12-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.252.0 (2019-12-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.251.0 (2019-12-05) +------------------ + +* Feature - Added support for enumerating regions for `Aws::KinesisVideoSignalingChannels`. + +1.250.0 (2019-12-04) +------------------ + +* Feature - Added support for enumerating regions for `Aws::EBS`. + +1.249.0 (2019-12-03) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Outposts`. + +* Feature - Added support for enumerating regions for `Aws::NetworkManager`. + +* Feature - Added support for enumerating regions for `Aws::Kendra`. + +* Feature - Added support for enumerating regions for `Aws::FraudDetector`. + +* Feature - Added support for enumerating regions for `Aws::ComputeOptimizer`. + +* Feature - Added support for enumerating regions for `Aws::CodeGuruReviewer`. + +* Feature - Added support for enumerating regions for `Aws::CodeGuruProfiler`. + +* Feature - Added support for enumerating regions for `Aws::AugmentedAIRuntime`. + +1.248.0 (2019-12-02) +------------------ + +* Feature - Added support for enumerating regions for `Aws::AccessAnalyzer`. + +1.247.0 (2019-12-02) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Schemas`. + +* Feature - Added support for enumerating regions for `Aws::Imagebuilder`. + +1.246.0 (2019-11-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ElasticInference`. + +1.245.0 (2019-11-25) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WAFV2`. + +* Feature - Added support for enumerating regions for `Aws::IoTSecureTunneling`. + +* Feature - Added support for enumerating regions for `Aws::AppConfig`. + +1.244.0 (2019-11-22) +------------------ + +* Feature - Added support for enumerating regions for `Aws::AutoScalingPlans`. + +* Feature - Added `Partition#region?` and `Partition#service?` methods. + +1.243.0 (2019-11-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ConnectParticipant`. + + +1.242.0 (2019-11-20) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MigrationHubConfig`. + +1.241.0 (2019-11-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.240.0 (2019-11-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.239.0 (2019-11-13) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SESV2`. + +* Feature - Added support for enumerating regions for `Aws::DataExchange`. + +* Feature - Added support for S3 IAD regional endpoint. + +1.238.0 (2019-11-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MarketplaceCatalog`. + +1.237.0 (2019-11-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.236.0 (2019-11-07) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SSOOIDC`. + +* Feature - Added support for enumerating regions for `Aws::SSO`. + +1.235.0 (2019-11-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.234.0 (2019-11-06) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SavingsPlans`. + +1.233.0 (2019-11-05) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CodeStarNotifications`. + +1.232.0 (2019-10-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.231.0 (2019-10-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.230.0 (2019-10-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.229.0 (2019-10-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.228.0 (2019-10-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.227.0 (2019-10-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.226.0 (2019-10-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.225.0 (2019-10-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.224.0 (2019-10-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.223.0 (2019-10-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.222.0 (2019-10-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.221.0 (2019-10-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.220.0 (2019-09-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.219.0 (2019-09-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.218.0 (2019-09-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.217.0 (2019-09-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.216.0 (2019-09-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.215.0 (2019-09-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.214.0 (2019-09-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.213.0 (2019-09-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.212.0 (2019-09-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WorkMailMessageFlow`. + +1.211.0 (2019-09-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::QLDBSession`. + +* Feature - Added support for enumerating regions for `Aws::QLDB`. + +1.210.0 (2019-09-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.209.0 (2019-09-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.208.0 (2019-09-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.207.0 (2019-08-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.206.0 (2019-08-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.205.0 (2019-08-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.204.0 (2019-08-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ForecastService`. + +* Feature - Added support for enumerating regions for `Aws::ForecastQueryService`. + +1.203.0 (2019-08-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.202.0 (2019-08-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.201.0 (2019-08-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.200.0 (2019-08-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.199.0 (2019-08-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.198.0 (2019-08-08) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LakeFormation`. + +1.197.0 (2019-08-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.196.0 (2019-08-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.195.0 (2019-07-30) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.194.0 (2019-07-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.193.0 (2019-07-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.192.0 (2019-07-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.191.0 (2019-07-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.190.0 (2019-07-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.189.0 (2019-07-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.188.0 (2019-07-11) +------------------ + +* Feature - Added support for enumerating regions for `Aws::EventBridge`. + +1.187.0 (2019-07-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.186.0 (2019-07-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.185.0 (2019-07-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.184.0 (2019-07-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.183.0 (2019-07-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.182.0 (2019-06-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.181.0 (2019-06-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::EC2InstanceConnect`. + +1.180.0 (2019-06-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.179.0 (2019-06-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.178.0 (2019-06-24) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ServiceQuotas`. + +* Feature - Added support for enumerating regions for `Aws::ApplicationInsights`. + +1.177.0 (2019-06-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.176.0 (2019-06-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.175.0 (2019-06-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.174.0 (2019-06-10) +------------------ + +* Feature - Added support for enumerating regions for `Aws::PersonalizeRuntime`. + +* Feature - Added support for enumerating regions for `Aws::PersonalizeEvents`. + +* Feature - Added support for enumerating regions for `Aws::Personalize`. + +1.173.0 (2019-06-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.172.0 (2019-06-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.171.0 (2019-06-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.170.0 (2019-05-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoTEventsData`. + +* Feature - Added support for enumerating regions for `Aws::IoTEvents`. + +1.169.0 (2019-05-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoTThingsGraph`. + +1.168.0 (2019-05-28) +------------------ + +* Feature - Added support for enumerating regions for `Aws::GroundStation`. + +1.167.0 (2019-05-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.166.0 (2019-05-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + +1.165.0 (2019-05-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.164.0 (2019-05-20) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MediaPackageVod`. + +1.163.0 (2019-05-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.162.0 (2019-05-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.161.0 (2019-05-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.160.0 (2019-05-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.159.0 (2019-05-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.158.0 (2019-05-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.157.0 (2019-05-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.156.0 (2019-04-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ManagedBlockchain`. + +1.155.0 (2019-04-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.154.0 (2019-04-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.153.0 (2019-04-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.152.0 (2019-04-24) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.151.0 (2019-04-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.150.0 (2019-04-16) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.149.0 (2019-04-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.148.0 (2019-03-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.147.0 (2019-03-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.146.0 (2019-03-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.145.0 (2019-03-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::TranscribeStreamingService`. + +1.144.0 (2019-03-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.143.0 (2019-03-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.142.0 (2019-03-05) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Textract`. + +1.141.0 (2019-02-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.140.0 (2019-02-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.139.0 (2019-02-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.138.0 (2019-02-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.137.0 (2019-02-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.136.0 (2019-01-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.135.0 (2019-01-24) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ECR`. + +1.134.0 (2019-01-23) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WorkLink`. + +1.133.0 (2019-01-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.132.0 (2019-01-16) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Backup`. + +1.131.0 (2019-01-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.130.0 (2019-01-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::DocDB`. + +1.129.0 (2019-01-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.128.0 (2019-01-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.127.0 (2018-12-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.126.0 (2018-12-18) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ApiGatewayV2`. + +* Feature - Added support for enumerating regions for `Aws::ApiGatewayManagementApi`. + +1.125.0 (2018-12-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.124.0 (2018-12-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.123.0 (2018-12-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.122.0 (2018-12-04) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.121.0 (2018-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Kafka`. + +1.120.0 (2018-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::LicenseManager`. + +* Feature - Added support for enumerating regions for `Aws::AppMesh`. + +1.119.0 (2018-11-28) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SecurityHub`. + +* Feature - Added support for enumerating regions for `Aws::FSx`. + +1.118.0 (2018-11-28) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MediaConnect`. + +* Feature - Added support for enumerating regions for `Aws::KinesisAnalyticsV2`. + +* Feature - Added support for enumerating regions for `Aws::ComprehendMedical`. + +1.117.0 (2018-11-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::GlobalAccelerator`. + +1.116.0 (2018-11-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Transfer`. + +* Feature - Added support for enumerating regions for `Aws::RoboMaker`. + +* Feature - Added support for enumerating regions for `Aws::DataSync`. + +* Feature - Added support for enumerating regions for `Aws::Amplify`. + +1.115.0 (2018-11-20) +------------------ + +* Feature - Added support for enumerating regions for `Aws::RDSDataService`. + +* Feature - Added support for enumerating regions for `Aws::QuickSight`. + +1.114.0 (2018-11-15) +------------------ + +* Feature - Added support for enumerating regions for `Aws::S3Control`. + +* Feature - Added support for enumerating regions for `Aws::Route53Resolver`. + +* Feature - Added support for enumerating regions for `Aws::RAM`. + +* Feature - Added support for enumerating regions for `Aws::PinpointSMSVoice`. + +1.113.0 (2018-11-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.112.0 (2018-11-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.111.0 (2018-11-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.110.0 (2018-11-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.109.0 (2018-11-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.108.0 (2018-11-06) +------------------ + +* Feature - Added support for enumerating regions for `Aws::PinpointEmail`. + +1.107.0 (2018-10-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Chime`. + +1.106.0 (2018-10-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.105.0 (2018-09-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.104.0 (2018-09-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.103.0 (2018-09-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.102.0 (2018-08-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Signer`. + +1.101.0 (2018-08-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::signer`. + +1.100.0 (2018-08-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.99.0 (2018-08-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.98.0 (2018-08-13) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SageMaker`. + +1.97.0 (2018-08-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.96.0 (2018-07-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::DLM`. + +1.95.0 (2018-07-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.94.0 (2018-06-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.93.0 (2018-06-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Macie`. + +1.92.0 (2018-06-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.91.0 (2018-06-04) +------------------ + +* Feature - Added support for enumerating regions for `Aws::EKS`. + +1.90.0 (2018-06-01) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MediaTailor`. + +1.89.1 (2018-05-31) +------------------ + +* Issue - Revert a few improperly configured endpoints. + +1.89.0 (2018-05-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Neptune`. + +1.88.0 (2018-05-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::PI`. + +1.87.0 (2018-05-14) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoT1ClickProjects`. + +* Feature - Added support for enumerating regions for `Aws::IoT1ClickDevicesService`. + +1.86.0 (2018-05-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.85.0 (2018-05-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.84.0 (2018-05-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.83.0 (2018-05-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.82.0 (2018-04-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.81.0 (2018-04-23) +------------------ + +* Feature - Added support for enumerating regions for `Aws::IoTAnalytics`. + +1.80.0 (2018-04-10) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.79.0 (2018-04-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.78.0 (2018-04-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.77.0 (2018-04-04) +------------------ + +* Feature - Added support for enumerating regions for `Aws::SecretsManager`. + +* Feature - Added support for enumerating regions for `Aws::FMS`. + +* Feature - Added support for enumerating regions for `Aws::ACMPCA`. + +1.76.0 (2018-03-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Connect`. + +1.75.0 (2018-03-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.74.0 (2018-03-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.73.0 (2018-03-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.72.0 (2018-03-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.71.0 (2018-03-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.70.0 (2018-03-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.69.0 (2018-03-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.68.0 (2018-02-28) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.67.0 (2018-02-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.66.0 (2018-02-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.65.0 (2018-02-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.64.0 (2018-02-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.63.0 (2018-02-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.62.0 (2018-02-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.61.0 (2018-02-13) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.60.0 (2018-02-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.59.0 (2018-02-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.58.0 (2018-02-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.57.0 (2018-01-25) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.56.0 (2018-01-19) +------------------ + +* Feature - Added support for enumerating regions for `Aws::TranscribeService`. + +1.55.0 (2018-01-17) +------------------ + +* Feature - Added support for enumerating regions for `Aws::AutoScalingPlans`. + +1.54.0 (2018-01-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.53.0 (2017-12-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.52.0 (2017-12-21) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.51.0 (2017-12-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.50.0 (2017-12-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.49.0 (2017-12-19) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.48.0 (2017-12-14) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.47.0 (2017-12-12) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WorkMail`. + +1.46.0 (2017-12-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.45.0 (2017-12-05) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ServiceDiscovery`. + +1.44.0 (2017-11-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ServerlessApplicationRepository`. + +* Feature - Added support for enumerating regions for `Aws::Cloud9`. + +* Feature - Added support for enumerating regions for `Aws::AlexaForBusiness`. + +1.43.0 (2017-11-30) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ResourceGroups`. + +1.42.0 (2017-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Translate`. + +* Feature - Added support for enumerating regions for `Aws::SageMakerRuntime`. + +* Feature - Added support for enumerating regions for `Aws::SageMaker`. + +* Feature - Added support for enumerating regions for `Aws::KinesisVideoMedia`. + +* Feature - Added support for enumerating regions for `Aws::KinesisVideoArchivedMedia`. + +* Feature - Added support for enumerating regions for `Aws::KinesisVideo`. + +* Feature - Added support for enumerating regions for `Aws::IoTJobsDataPlane`. + +* Feature - Added support for enumerating regions for `Aws::Comprehend`. + +1.41.0 (2017-11-29) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MQ`. + +* Feature - Added support for enumerating regions for `Aws::GuardDuty`. + +* Feature - Added support for enumerating regions for `Aws::AppSync`. + +1.40.0 (2017-11-27) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MediaStoreData`. + +* Feature - Added support for enumerating regions for `Aws::MediaStore`. + +* Feature - Added support for enumerating regions for `Aws::MediaPackage`. + +* Feature - Added support for enumerating regions for `Aws::MediaLive`. + +* Feature - Added support for enumerating regions for `Aws::MediaConvert`. + +1.39.0 (2017-11-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.38.0 (2017-11-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.37.0 (2017-11-20) +------------------ + +* Feature - Added support for enumerating regions for `Aws::CostExplorer`. + +1.36.0 (2017-11-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.35.0 (2017-11-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.34.0 (2017-11-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.33.0 (2017-11-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.32.0 (2017-11-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.31.0 (2017-11-07) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Pricing`. + +1.30.0 (2017-11-03) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.29.0 (2017-11-02) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.28.0 (2017-11-01) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.27.0 (2017-10-26) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +* Issue - Handle service identifier with empty value. + +1.26.0 (2017-10-17) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.25.0 (2017-10-11) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.24.0 (2017-09-27) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.23.0 (2017-09-22) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.22.0 (2017-09-20) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.21.0 (2017-09-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.20.0 (2017-09-07) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.19.0 (2017-09-05) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.18.0 (2017-09-01) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Mobile`. + +1.17.0 (2017-08-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +* Issue - Update the `aws-partitions` gemspec metadata + +1.16.0 (2017-08-23) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.15.0 (2017-08-18) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.14.0 (2017-08-15) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.13.0 (2017-08-14) +------------------ + +* Feature - Added support for enumerating regions for `Aws::MigrationHub`. + +* Feature - Added support for enumerating regions for `Aws::Glue`. + +* Feature - Added support for enumerating regions for `Aws::CloudHSMV2`. + +1.12.0 (2017-07-31) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.11.0 (2017-07-06) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.10.0 (2017-06-29) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.9.0 (2017-06-26) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Greengrass`. + +* Feature - Added support for enumerating regions for `Aws::DAX`. + +1.8.0 (2017-05-23) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Athena`. + +1.7.0 (2017-05-05) +------------------ + +* Feature - Added support for enumerating regions for `Aws::States`. + +* Feature - Added support for enumerating regions for `Aws::MarketplaceEntitlementService`. + +* Feature - Added support for enumerating regions for `Aws::Lex`. + +1.6.0 (2017-04-21) +------------------ + +* Feature - Added support for enumerating regions for `Aws::ResourceGroupsTaggingAPI`. + +* Feature - Added support for enumerating regions for `Aws::LexModelBuildingService`. + +* Feature - Added support for enumerating regions for `Aws::CodeStar`. + +1.5.0 (2017-03-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::WorkDocs`. + +1.4.0 (2017-03-08) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endoints. + +1.3.0 (2017-03-07) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Organizations`. + +* Feature - Added support for enumerating regions for `Aws::MTurk`. + +* Feature - Added support for enumerating regions for `Aws::LexRuntimeService`. + +* Feature - Added support for enumerating regions for `Aws::CloudDirectory`. + +1.2.0 (2017-01-24) +------------------ + +* Feature - Added support for enumerating regions for `Aws::Batch`. + +1.1.0 (2016-12-09) +------------------ + +* Feature - Added support for enumerating regions for `Aws::XRay`. + +* Feature - Added support for enumerating regions for `Aws::WAFRegional`. + +* Feature - Added support for enumerating regions for `Aws::Shield`. + +* Feature - Added support for enumerating regions for `Aws::SFN`. + +* Feature - Added support for enumerating regions for `Aws::Rekognition`. + +* Feature - Added support for enumerating regions for `Aws::Polly`. + +* Feature - Added support for enumerating regions for `Aws::Pinpoint`. + +* Feature - Added support for enumerating regions for `Aws::OpsWorksCM`. + +* Feature - Added support for enumerating regions for `Aws::Lightsail`. + +* Feature - Added support for enumerating regions for `Aws::Health`. + +* Feature - Added support for enumerating regions for `Aws::CodeBuild`. + +* Feature - Added support for enumerating regions for `Aws::AppStream`. + +1.0.0 (2016-12-05) +------------------ + +* Feature - Initial release of the `aws-partitions` gem. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/VERSION b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/VERSION new file mode 100644 index 0000000..f46b9ab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/VERSION @@ -0,0 +1 @@ +1.751.0 diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions.rb b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions.rb new file mode 100644 index 0000000..0030b94 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions.rb @@ -0,0 +1,597 @@ +# frozen_string_literal: true + +require_relative 'aws-partitions/endpoint_provider' +require_relative 'aws-partitions/partition' +require_relative 'aws-partitions/partition_list' +require_relative 'aws-partitions/region' +require_relative 'aws-partitions/service' + +require 'json' + +module Aws + + # A {Partition} is a group of AWS {Region} and {Service} objects. You + # can use a partition to determine what services are available in a region, + # or what regions a service is available in. + # + # ## Partitions + # + # **AWS accounts are scoped to a single partition**. You can get a partition + # by name. Valid partition names include: + # + # * `"aws"` - Public AWS partition + # * `"aws-cn"` - AWS China + # * `"aws-us-gov"` - AWS GovCloud + # + # To get a partition by name: + # + # aws = Aws::Partitions.partition('aws') + # + # You can also enumerate all partitions: + # + # Aws::Partitions.each do |partition| + # puts partition.name + # end + # + # ## Regions + # + # A {Partition} is divided up into one or more regions. For example, the + # "aws" partition contains, "us-east-1", "us-west-1", etc. You can get + # a region by name. Calling {Partition#region} will return an instance + # of {Region}. + # + # region = Aws::Partitions.partition('aws').region('us-west-2') + # region.name + # #=> "us-west-2" + # + # You can also enumerate all regions within a partition: + # + # Aws::Partitions.partition('aws').regions.each do |region| + # puts region.name + # end + # + # Each {Region} object has a name, description and a list of services + # available to that region: + # + # us_west_2 = Aws::Partitions.partition('aws').region('us-west-2') + # + # us_west_2.name #=> "us-west-2" + # us_west_2.description #=> "US West (Oregon)" + # us_west_2.partition_name "aws" + # us_west_2.services #=> # true/false + # + # The service name should be the service's module name as used by + # the AWS SDK for Ruby. To find the complete list of supported + # service names, see {Partition#services}. + # + # Its also possible to enumerate every service for every region in + # every partition. + # + # Aws::Partitions.partitions.each do |partition| + # partition.regions.each do |region| + # region.services.each do |service_name| + # puts "#{partition.name} -> #{region.name} -> #{service_name}" + # end + # end + # end + # + # ## Services + # + # A {Partition} has a list of services available. You can get a + # single {Service} by name: + # + # Aws::Partitions.partition('aws').service('DynamoDB') + # + # You can also enumerate all services in a partition: + # + # Aws::Partitions.partition('aws').services.each do |service| + # puts service.name + # end + # + # Each {Service} object has a name, and information about regions + # that service is available in. + # + # service.name #=> "DynamoDB" + # service.partition_name #=> "aws" + # service.regions #=> # false + # service.partition_region #=> "aws-global" + # + # Its also possible to enumerate every region for every service in + # every partition. + # + # Aws::Partitions.partitions.each do |partition| + # partition.services.each do |service| + # service.regions.each do |region_name| + # puts "#{partition.name} -> #{region_name} -> #{service.name}" + # end + # end + # end + # + # ## Service Names + # + # {Service} names are those used by the the AWS SDK for Ruby. They + # correspond to the service's module. + # + module Partitions + + class << self + + include Enumerable + + # @return [Enumerable] + def each(&block) + default_partition_list.each(&block) + end + + # Return the partition with the given name. A partition describes + # the services and regions available in that partition. + # + # aws = Aws::Partitions.partition('aws') + # + # puts "Regions available in the aws partition:\n" + # aws.regions.each do |region| + # puts region.name + # end + # + # puts "Services available in the aws partition:\n" + # aws.services.each do |services| + # puts services.name + # end + # + # @param [String] name The name of the partition to return. + # Valid names include "aws", "aws-cn", and "aws-us-gov". + # + # @return [Partition] + # + # @raise [ArgumentError] Raises an `ArgumentError` if a partition is + # not found with the given name. The error message contains a list + # of valid partition names. + def partition(name) + default_partition_list.partition(name) + end + + # Returns an array with every partitions. A partition describes + # the services and regions available in that partition. + # + # Aws::Partitions.partitions.each do |partition| + # + # puts "Regions available in #{partition.name}:\n" + # partition.regions.each do |region| + # puts region.name + # end + # + # puts "Services available in #{partition.name}:\n" + # partition.services.each do |service| + # puts service.name + # end + # end + # + # @return [Enumerable] Returns an enumerable of all + # known partitions. + def partitions + default_partition_list + end + + # @param [Hash] new_partitions + # @api private For internal use only. + def add(new_partitions) + new_partitions['partitions'].each do |partition| + default_partition_list.add_partition(Partition.build(partition)) + defaults['partitions'] << partition + end + end + + # @param [Hash] partition metadata + # @api private For Internal use only + def merge_metadata(partition_metadata) + default_partition_list.merge_metadata(partition_metadata) + end + + # @api private For internal use only. + def clear + default_partition_list.clear + defaults['partitions'].clear + end + + # @return [PartitionList] + # @api private + def default_partition_list + @default_partition_list ||= begin + partitions = PartitionList.build(defaults) + partitions.merge_metadata(default_metadata) + partitions + end + end + + # @return [Hash] + # @api private + def defaults + @defaults ||= begin + path = File.expand_path('../../partitions.json', __FILE__) + defaults = JSON.parse(File.read(path), freeze: true) + defaults.merge('partitions' => defaults['partitions'].dup) + end + end + + # @return [Hash] + # @api private + def default_metadata + @default_metadata ||= begin + path = File.expand_path('../../partitions-metadata.json', __FILE__) + defaults = JSON.parse(File.read(path), freeze: true) + defaults.merge('partitions' => defaults['partitions'].dup) + end + end + + # @return [Hash] Returns a map of service module names + # to their id as used in the endpoints.json document. + # @api private For internal use only. + def service_ids + @service_ids ||= begin + # service ids + { + 'ACM' => 'acm', + 'ACMPCA' => 'acm-pca', + 'APIGateway' => 'apigateway', + 'ARCZonalShift' => 'arc-zonal-shift', + 'AccessAnalyzer' => 'access-analyzer', + 'Account' => 'account', + 'AlexaForBusiness' => 'a4b', + 'Amplify' => 'amplify', + 'AmplifyBackend' => 'amplifybackend', + 'AmplifyUIBuilder' => 'amplifyuibuilder', + 'ApiGatewayManagementApi' => 'execute-api', + 'ApiGatewayV2' => 'apigateway', + 'AppConfig' => 'appconfig', + 'AppConfigData' => 'appconfigdata', + 'AppIntegrationsService' => 'app-integrations', + 'AppMesh' => 'appmesh', + 'AppRegistry' => 'servicecatalog-appregistry', + 'AppRunner' => 'apprunner', + 'AppStream' => 'appstream2', + 'AppSync' => 'appsync', + 'Appflow' => 'appflow', + 'ApplicationAutoScaling' => 'application-autoscaling', + 'ApplicationCostProfiler' => 'application-cost-profiler', + 'ApplicationDiscoveryService' => 'discovery', + 'ApplicationInsights' => 'applicationinsights', + 'Athena' => 'athena', + 'AuditManager' => 'auditmanager', + 'AugmentedAIRuntime' => 'a2i-runtime.sagemaker', + 'AutoScaling' => 'autoscaling', + 'AutoScalingPlans' => 'autoscaling-plans', + 'Backup' => 'backup', + 'BackupGateway' => 'backup-gateway', + 'BackupStorage' => 'backupstorage', + 'Batch' => 'batch', + 'BillingConductor' => 'billingconductor', + 'Braket' => 'braket', + 'Budgets' => 'budgets', + 'Chime' => 'chime', + 'ChimeSDKIdentity' => 'identity-chime', + 'ChimeSDKMediaPipelines' => 'media-pipelines-chime', + 'ChimeSDKMeetings' => 'meetings-chime', + 'ChimeSDKMessaging' => 'messaging-chime', + 'ChimeSDKVoice' => 'voice-chime', + 'CleanRooms' => 'cleanrooms', + 'Cloud9' => 'cloud9', + 'CloudControlApi' => 'cloudcontrolapi', + 'CloudDirectory' => 'clouddirectory', + 'CloudFormation' => 'cloudformation', + 'CloudFront' => 'cloudfront', + 'CloudHSM' => 'cloudhsm', + 'CloudHSMV2' => 'cloudhsmv2', + 'CloudSearch' => 'cloudsearch', + 'CloudTrail' => 'cloudtrail', + 'CloudTrailData' => 'cloudtrail-data', + 'CloudWatch' => 'monitoring', + 'CloudWatchEvents' => 'events', + 'CloudWatchEvidently' => 'evidently', + 'CloudWatchLogs' => 'logs', + 'CloudWatchRUM' => 'rum', + 'CodeArtifact' => 'codeartifact', + 'CodeBuild' => 'codebuild', + 'CodeCatalyst' => 'codecatalyst', + 'CodeCommit' => 'codecommit', + 'CodeDeploy' => 'codedeploy', + 'CodeGuruProfiler' => 'codeguru-profiler', + 'CodeGuruReviewer' => 'codeguru-reviewer', + 'CodePipeline' => 'codepipeline', + 'CodeStar' => 'codestar', + 'CodeStarNotifications' => 'codestar-notifications', + 'CodeStarconnections' => 'codestar-connections', + 'CognitoIdentity' => 'cognito-identity', + 'CognitoIdentityProvider' => 'cognito-idp', + 'CognitoSync' => 'cognito-sync', + 'Comprehend' => 'comprehend', + 'ComprehendMedical' => 'comprehendmedical', + 'ComputeOptimizer' => 'compute-optimizer', + 'ConfigService' => 'config', + 'Connect' => 'connect', + 'ConnectCampaignService' => 'connect-campaigns', + 'ConnectCases' => 'cases', + 'ConnectContactLens' => 'contact-lens', + 'ConnectParticipant' => 'participant.connect', + 'ConnectWisdomService' => 'wisdom', + 'ControlTower' => 'controltower', + 'CostExplorer' => 'ce', + 'CostandUsageReportService' => 'cur', + 'CustomerProfiles' => 'profile', + 'DAX' => 'dax', + 'DLM' => 'dlm', + 'DataExchange' => 'dataexchange', + 'DataPipeline' => 'datapipeline', + 'DataSync' => 'datasync', + 'DatabaseMigrationService' => 'dms', + 'Detective' => 'api.detective', + 'DevOpsGuru' => 'devops-guru', + 'DeviceFarm' => 'devicefarm', + 'DirectConnect' => 'directconnect', + 'DirectoryService' => 'ds', + 'DocDB' => 'rds', + 'DocDBElastic' => 'docdb-elastic', + 'Drs' => 'drs', + 'DynamoDB' => 'dynamodb', + 'DynamoDBStreams' => 'streams.dynamodb', + 'EBS' => 'ebs', + 'EC2' => 'ec2', + 'EC2InstanceConnect' => 'ec2-instance-connect', + 'ECR' => 'api.ecr', + 'ECRPublic' => 'api.ecr-public', + 'ECS' => 'ecs', + 'EFS' => 'elasticfilesystem', + 'EKS' => 'eks', + 'EMR' => 'elasticmapreduce', + 'EMRContainers' => 'emr-containers', + 'EMRServerless' => 'emr-serverless', + 'ElastiCache' => 'elasticache', + 'ElasticBeanstalk' => 'elasticbeanstalk', + 'ElasticInference' => 'api.elastic-inference', + 'ElasticLoadBalancing' => 'elasticloadbalancing', + 'ElasticLoadBalancingV2' => 'elasticloadbalancing', + 'ElasticTranscoder' => 'elastictranscoder', + 'ElasticsearchService' => 'es', + 'EventBridge' => 'events', + 'FIS' => 'fis', + 'FMS' => 'fms', + 'FSx' => 'fsx', + 'FinSpaceData' => 'finspace-api', + 'Finspace' => 'finspace', + 'Firehose' => 'firehose', + 'ForecastQueryService' => 'forecastquery', + 'ForecastService' => 'forecast', + 'FraudDetector' => 'frauddetector', + 'GameLift' => 'gamelift', + 'GameSparks' => 'gamesparks', + 'Glacier' => 'glacier', + 'GlobalAccelerator' => 'globalaccelerator', + 'Glue' => 'glue', + 'GlueDataBrew' => 'databrew', + 'Greengrass' => 'greengrass', + 'GreengrassV2' => 'greengrass', + 'GroundStation' => 'groundstation', + 'GuardDuty' => 'guardduty', + 'Health' => 'health', + 'HealthLake' => 'healthlake', + 'Honeycode' => 'honeycode', + 'IAM' => 'iam', + 'IVS' => 'ivs', + 'IVSRealTime' => 'ivsrealtime', + 'IdentityStore' => 'identitystore', + 'Imagebuilder' => 'imagebuilder', + 'ImportExport' => 'importexport', + 'Inspector' => 'inspector', + 'Inspector2' => 'inspector2', + 'InternetMonitor' => 'internetmonitor', + 'IoT' => 'iot', + 'IoT1ClickDevicesService' => 'devices.iot1click', + 'IoT1ClickProjects' => 'projects.iot1click', + 'IoTAnalytics' => 'iotanalytics', + 'IoTDeviceAdvisor' => 'api.iotdeviceadvisor', + 'IoTEvents' => 'iotevents', + 'IoTEventsData' => 'data.iotevents', + 'IoTFleetHub' => 'api.fleethub.iot', + 'IoTFleetWise' => 'iotfleetwise', + 'IoTJobsDataPlane' => 'data.jobs.iot', + 'IoTRoboRunner' => 'iotroborunner', + 'IoTSecureTunneling' => 'api.tunneling.iot', + 'IoTSiteWise' => 'iotsitewise', + 'IoTThingsGraph' => 'iotthingsgraph', + 'IoTTwinMaker' => 'iottwinmaker', + 'IoTWireless' => 'api.iotwireless', + 'Ivschat' => 'ivschat', + 'KMS' => 'kms', + 'Kafka' => 'kafka', + 'KafkaConnect' => 'kafkaconnect', + 'Kendra' => 'kendra', + 'KendraRanking' => 'kendra-ranking', + 'Keyspaces' => 'cassandra', + 'Kinesis' => 'kinesis', + 'KinesisAnalytics' => 'kinesisanalytics', + 'KinesisAnalyticsV2' => 'kinesisanalytics', + 'KinesisVideo' => 'kinesisvideo', + 'KinesisVideoArchivedMedia' => 'kinesisvideo', + 'KinesisVideoMedia' => 'kinesisvideo', + 'KinesisVideoSignalingChannels' => 'kinesisvideo', + 'KinesisVideoWebRTCStorage' => 'kinesisvideo', + 'LakeFormation' => 'lakeformation', + 'Lambda' => 'lambda', + 'LambdaPreview' => 'lambda', + 'Lex' => 'runtime.lex', + 'LexModelBuildingService' => 'models.lex', + 'LexModelsV2' => 'models-v2-lex', + 'LexRuntimeV2' => 'runtime-v2-lex', + 'LicenseManager' => 'license-manager', + 'LicenseManagerLinuxSubscriptions' => 'license-manager-linux-subscriptions', + 'LicenseManagerUserSubscriptions' => 'license-manager-user-subscriptions', + 'Lightsail' => 'lightsail', + 'LocationService' => 'geo', + 'LookoutEquipment' => 'lookoutequipment', + 'LookoutMetrics' => 'lookoutmetrics', + 'LookoutforVision' => 'lookoutvision', + 'MQ' => 'mq', + 'MTurk' => 'mturk-requester', + 'MWAA' => 'airflow', + 'MachineLearning' => 'machinelearning', + 'Macie' => 'macie', + 'Macie2' => 'macie2', + 'MainframeModernization' => 'm2', + 'ManagedBlockchain' => 'managedblockchain', + 'ManagedGrafana' => 'grafana', + 'MarketplaceCatalog' => 'catalog.marketplace', + 'MarketplaceCommerceAnalytics' => 'marketplacecommerceanalytics', + 'MarketplaceEntitlementService' => 'entitlement.marketplace', + 'MarketplaceMetering' => 'metering.marketplace', + 'MediaConnect' => 'mediaconnect', + 'MediaConvert' => 'mediaconvert', + 'MediaLive' => 'medialive', + 'MediaPackage' => 'mediapackage', + 'MediaPackageVod' => 'mediapackage-vod', + 'MediaStore' => 'mediastore', + 'MediaStoreData' => 'data.mediastore', + 'MediaTailor' => 'api.mediatailor', + 'MemoryDB' => 'memory-db', + 'Mgn' => 'mgn', + 'MigrationHub' => 'mgh', + 'MigrationHubConfig' => 'migrationhub-config', + 'MigrationHubOrchestrator' => 'migrationhub-orchestrator', + 'MigrationHubRefactorSpaces' => 'refactor-spaces', + 'MigrationHubStrategyRecommendations' => 'migrationhub-strategy', + 'Mobile' => 'mobile', + 'Neptune' => 'rds', + 'NetworkFirewall' => 'network-firewall', + 'NetworkManager' => 'networkmanager', + 'NimbleStudio' => 'nimble', + 'OAM' => 'oam', + 'Omics' => 'omics', + 'OpenSearchServerless' => 'aoss', + 'OpenSearchService' => 'es', + 'OpsWorks' => 'opsworks', + 'OpsWorksCM' => 'opsworks-cm', + 'Organizations' => 'organizations', + 'Outposts' => 'outposts', + 'PI' => 'pi', + 'Panorama' => 'panorama', + 'Personalize' => 'personalize', + 'PersonalizeEvents' => 'personalize-events', + 'PersonalizeRuntime' => 'personalize-runtime', + 'Pinpoint' => 'pinpoint', + 'PinpointEmail' => 'email', + 'PinpointSMSVoice' => 'sms-voice.pinpoint', + 'PinpointSMSVoiceV2' => 'sms-voice', + 'Pipes' => 'pipes', + 'Polly' => 'polly', + 'Pricing' => 'api.pricing', + 'PrivateNetworks' => 'private-networks', + 'PrometheusService' => 'aps', + 'Proton' => 'proton', + 'QLDB' => 'qldb', + 'QLDBSession' => 'session.qldb', + 'QuickSight' => 'quicksight', + 'RAM' => 'ram', + 'RDS' => 'rds', + 'RDSDataService' => 'rds-data', + 'RecycleBin' => 'rbin', + 'Redshift' => 'redshift', + 'RedshiftDataAPIService' => 'redshift-data', + 'RedshiftServerless' => 'redshift-serverless', + 'Rekognition' => 'rekognition', + 'ResilienceHub' => 'resiliencehub', + 'ResourceExplorer2' => 'resource-explorer-2', + 'ResourceGroups' => 'resource-groups', + 'ResourceGroupsTaggingAPI' => 'tagging', + 'RoboMaker' => 'robomaker', + 'RolesAnywhere' => 'rolesanywhere', + 'Route53' => 'route53', + 'Route53Domains' => 'route53domains', + 'Route53RecoveryCluster' => 'route53-recovery-cluster', + 'Route53RecoveryControlConfig' => 'route53-recovery-control-config', + 'Route53RecoveryReadiness' => 'route53-recovery-readiness', + 'Route53Resolver' => 'route53resolver', + 'S3' => 's3', + 'S3Control' => 's3-control', + 'S3Outposts' => 's3-outposts', + 'SES' => 'email', + 'SESV2' => 'email', + 'SMS' => 'sms', + 'SNS' => 'sns', + 'SQS' => 'sqs', + 'SSM' => 'ssm', + 'SSMContacts' => 'ssm-contacts', + 'SSMIncidents' => 'ssm-incidents', + 'SSO' => 'portal.sso', + 'SSOAdmin' => 'sso', + 'SSOOIDC' => 'oidc', + 'STS' => 'sts', + 'SWF' => 'swf', + 'SageMaker' => 'api.sagemaker', + 'SageMakerFeatureStoreRuntime' => 'featurestore-runtime.sagemaker', + 'SageMakerGeospatial' => 'sagemaker-geospatial', + 'SageMakerMetrics' => 'metrics.sagemaker', + 'SageMakerRuntime' => 'runtime.sagemaker', + 'SagemakerEdgeManager' => 'edge.sagemaker', + 'SavingsPlans' => 'savingsplans', + 'Scheduler' => 'scheduler', + 'Schemas' => 'schemas', + 'SecretsManager' => 'secretsmanager', + 'SecurityHub' => 'securityhub', + 'SecurityLake' => 'securitylake', + 'ServerlessApplicationRepository' => 'serverlessrepo', + 'ServiceCatalog' => 'servicecatalog', + 'ServiceDiscovery' => 'servicediscovery', + 'ServiceQuotas' => 'servicequotas', + 'Shield' => 'shield', + 'Signer' => 'signer', + 'SimSpaceWeaver' => 'simspaceweaver', + 'SimpleDB' => 'sdb', + 'SnowDeviceManagement' => 'snow-device-management', + 'Snowball' => 'snowball', + 'SsmSap' => 'ssm-sap', + 'States' => 'states', + 'StorageGateway' => 'storagegateway', + 'Support' => 'support', + 'SupportApp' => 'supportapp', + 'Synthetics' => 'synthetics', + 'Textract' => 'textract', + 'TimestreamQuery' => 'query.timestream', + 'TimestreamWrite' => 'ingest.timestream', + 'Tnb' => 'tnb', + 'TranscribeService' => 'transcribe', + 'TranscribeStreamingService' => 'transcribestreaming', + 'Transfer' => 'transfer', + 'Translate' => 'translate', + 'VPCLattice' => 'vpc-lattice', + 'VoiceID' => 'voiceid', + 'WAF' => 'waf', + 'WAFRegional' => 'waf-regional', + 'WAFV2' => 'wafv2', + 'WellArchitected' => 'wellarchitected', + 'WorkDocs' => 'workdocs', + 'WorkLink' => 'worklink', + 'WorkMail' => 'workmail', + 'WorkMailMessageFlow' => 'workmailmessageflow', + 'WorkSpaces' => 'workspaces', + 'WorkSpacesWeb' => 'workspaces-web', + 'XRay' => 'xray', + } + # end service ids + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/endpoint_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/endpoint_provider.rb new file mode 100644 index 0000000..b0c3eca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/endpoint_provider.rb @@ -0,0 +1,268 @@ +# frozen_string_literal: true + +module Aws + module Partitions + # @api private + class EndpointProvider + # When sts_regional_endpoint is set to `legacy`, the endpoint + # pattern stays global for the following regions: + STS_LEGACY_REGIONS = %w[ + ap-northeast-1 + ap-south-1 + ap-southeast-1 + ap-southeast-2 + aws-global + ca-central-1 + eu-central-1 + eu-north-1 + eu-west-1 + eu-west-2 + eu-west-3 + sa-east-1 + us-east-1 + us-east-2 + us-west-1 + us-west-2 + ].freeze + + # Intentionally marked private. The format of the endpoint rules + # is an implementation detail. + # @api private + def initialize(rules) + @rules = rules + end + + # @param [String] region The region for the client. + # @param [String] service The endpoint prefix for the service, e.g. + # "monitoring" for cloudwatch. + # @param [String] sts_regional_endpoints [STS only] Whether to use + # `legacy` (global endpoint for legacy regions) or `regional` mode for + # using regional endpoint for supported regions except 'aws-global' + # @param [Hash] variants Endpoint variants such as 'fips' or 'dualstack' + # @option variants [Boolean] :dualstack When true, resolve a dualstack + # endpoint. + # @option variants [Boolean] :fips When true, resolve a FIPS endpoint. + # @api private Use the static class methods instead. + def resolve(region, service, sts_regional_endpoints, variants) + 'https://' + endpoint_for(region, service, build_is_global_fn(sts_regional_endpoints), variants) + end + + # @api private Use the static class methods instead. + def signing_region(region, service, sts_regional_endpoints) + credential_scope(region, service, build_is_global_fn(sts_regional_endpoints)) + .fetch('region', region) + end + + # @api private Use the static class methods instead. + def signing_service(region, service) + # don't default to the service name + # signers should prefer the api metadata's signingName + # if no service is set in the credentialScope + credential_scope(region, service, build_is_global_fn('regional')) + .fetch('service', nil) + end + + # @param [String] region The region used to fetch the partition. + # @param [String] service Used only if dualstack is true. Used to find a + # DNS suffix for a specific service. + # @param [Hash] variants Endpoint variants such as 'fips' or 'dualstack' + # @option variants [Boolean] :dualstack When true, resolve a dualstack + # endpoint. + # @option variants [Boolean] :fips When true, resolve a FIPS endpoint. + # @api private Use the static class methods instead. + def dns_suffix_for(region, service, variants) + if configured_variants?(variants) + resolve_variant(region, service, variants)['dnsSuffix'] + else + get_partition(region)['dnsSuffix'] + end + end + + private + + def configured_variants?(variants) + variants.values.any? + end + + def fetch_variant(cfg, tags) + variants = cfg.fetch('variants', []) + variants.find { |v| tags == Set.new(v['tags']) } || {} + end + + def resolve_variant(region, service, config_variants) + tags = Set.new(config_variants.select { |_k,v| v == true }.map { |k,_v| k.to_s }) + is_global_fn = build_is_global_fn # ignore legacy STS config for variants + + partition_cfg = get_partition(region) + service_cfg = partition_cfg.fetch('services', {}) + .fetch(service, {}) + + endpoints_cfg = service_cfg.fetch('endpoints', {}) + + if is_global_fn.call(service, region, endpoints_cfg, service_cfg) + region = service_cfg.fetch('partitionEndpoint', region) + end + + region_cfg = endpoints_cfg.fetch(region, {}) + warn_deprecation(service, region) if region_cfg['deprecated'] + + partition_defaults = fetch_variant(partition_cfg.fetch('defaults', {}), tags) + service_defaults = fetch_variant(service_cfg.fetch('defaults', {}), tags) + endpoint_cfg = fetch_variant(region_cfg, tags) + + # merge upwards, preferring values from endpoint > service > partition + partition_defaults.merge(service_defaults.merge(endpoint_cfg)) + end + + def validate_variant!(config_variants, resolved_variant) + unless resolved_variant['hostname'] && resolved_variant['dnsSuffix'] + enabled_variants = config_variants.select { |_k, v| v}.map { |k, _v| k.to_s }.join(', ') + raise ArgumentError, + "#{enabled_variants} not supported for this region and partition." + end + end + + def endpoint_for(region, service, is_global_fn, variants) + if configured_variants?(variants) + endpoint_with_variants_for(region, service, variants) + else + endpoint_no_variants_for(region, service, is_global_fn) + end + end + + def endpoint_with_variants_for(region, service, variants) + variant = resolve_variant(region, service, variants) + validate_variant!(variants, variant) + variant['hostname'].sub('{region}', region) + .sub('{service}', service) + .sub('{dnsSuffix}', variant['dnsSuffix']) + end + + def endpoint_no_variants_for(region, service, is_global_fn) + partition = get_partition(region) + service_cfg = partition.fetch('services', {}).fetch(service, {}) + + # Find the default endpoint + default_endpoint = service_cfg + .fetch('defaults', {}) + .fetch('hostname', partition['defaults']['hostname']) + + endpoints = service_cfg.fetch('endpoints', {}) + + # Check for global endpoint. + if is_global_fn.call(service, region, endpoints, service_cfg) + region = service_cfg.fetch('partitionEndpoint', region) + end + + # Check for service/region level endpoint. + region_cfg = endpoints + .fetch(region, {}) + endpoint = region_cfg + .fetch('hostname', default_endpoint) + + warn_deprecation(service, region) if region_cfg['deprecated'] + + # Replace placeholders from the endpoints + endpoint.sub('{region}', region) + .sub('{service}', service) + .sub('{dnsSuffix}', partition['dnsSuffix']) + end + + def warn_deprecation(service, region) + warn("The endpoint for service: #{service}, region: #{region}"\ + ' is deprecated.') + end + + # returns a callable that takes a region + # and returns true if the service is global + def build_is_global_fn(sts_regional_endpoints='regional') + lambda do |service, region, endpoints, service_cfg| + # Check for sts legacy behavior + sts_legacy = service == 'sts' && + sts_regional_endpoints == 'legacy' && + STS_LEGACY_REGIONS.include?(region) + + is_global = !endpoints.key?(region) && + service_cfg['isRegionalized'] == false + + sts_legacy || is_global + end + end + + def credential_scope(region, service, is_global_fn) + partition = get_partition(region) + service_cfg = partition.fetch('services', {}) + .fetch(service, {}) + endpoints = service_cfg.fetch('endpoints', {}) + + # Check for global endpoint. + if is_global_fn.call(service, region, endpoints, service_cfg) + region = service_cfg.fetch('partitionEndpoint', region) + end + + default_credential_scope = service_cfg + .fetch('defaults', {}) + .fetch('credentialScope', {}) + + endpoints + .fetch(region, {}) + .fetch('credentialScope', default_credential_scope) + end + + def get_partition(region_or_partition) + partition_containing_region(region_or_partition) || + partition_matching_region(region_or_partition) || + partition_matching_name(region_or_partition) || + default_partition + end + + def partition_containing_region(region) + @rules['partitions'].find do |p| + p['regions'].key?(region) + end + end + + def partition_matching_region(region) + @rules['partitions'].find do |p| + p['regionRegex'] && region.match(p['regionRegex']) || + p['services'].values.find do |svc| + svc['endpoints'].key?(region) if svc.key?('endpoints') + end + end + end + + def partition_matching_name(partition_name) + @rules['partitions'].find { |p| p['partition'] == partition_name } + end + + def default_partition + @rules['partitions'].find { |p| p['partition'] == 'aws' } || + @rules['partitions'].first + end + + class << self + def resolve(region, service, sts_endpoint = 'regional', variants = {}) + default_provider.resolve(region, service, sts_endpoint, variants) + end + + def signing_region(region, service, sts_regional_endpoints = 'regional') + default_provider.signing_region(region, service, sts_regional_endpoints) + end + + def signing_service(region, service) + default_provider.signing_service(region, service) + end + + def dns_suffix_for(region, service = nil, variants = {}) + default_provider.dns_suffix_for(region, service, variants) + end + + private + + def default_provider + @default_provider ||= EndpointProvider.new(Partitions.defaults) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/partition.rb b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/partition.rb new file mode 100644 index 0000000..f0e8752 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/partition.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: true + +module Aws + module Partitions + class Partition + # @option options [required, String] :name + # @option options [required, Hash] :regions + # @option options [required, Hash] :services + # @api private + def initialize(options = {}) + @name = options[:name] + @regions = options[:regions] + @region_regex = options[:region_regex] + @services = options[:services] + @metadata = options[:metadata] + end + + # @return [String] The partition name, e.g. "aws", "aws-cn", "aws-us-gov". + attr_reader :name + + # @return [String] The regex representing the region format. + attr_reader :region_regex + + # @return [Metadata] The metadata for the partition. + attr_reader :metadata + + # @param [String] region_name The name of the region, e.g. "us-east-1". + # @return [Region] + # @raise [ArgumentError] Raises `ArgumentError` for unknown region name. + def region(region_name) + if @regions.key?(region_name) + @regions[region_name] + else + msg = "invalid region name #{region_name.inspect}; valid region "\ + "names include #{@regions.keys.join(', ')}" + raise ArgumentError, msg + end + end + + # @return [Array] + def regions + @regions.values + end + + # @param [String] region_name The name of the region, e.g. "us-east-1". + # @return [Boolean] true if the region is in the partition. + def region?(region_name) + @regions.key?(region_name) + end + + # @param [String] service_name The service module name. + # @return [Service] + # @raise [ArgumentError] Raises `ArgumentError` for unknown service name. + def service(service_name) + if @services.key?(service_name) + @services[service_name] + else + msg = "invalid service name #{service_name.inspect}; valid service "\ + "names include #{@services.keys.join(', ')}" + raise ArgumentError, msg + end + end + + # @return [Array] + def services + @services.values + end + + # @param [String] service_name The service module name. + # @return [Boolean] true if the service is in the partition. + def service?(service_name) + @services.key?(service_name) + end + + class << self + # @api private + def build(partition) + Partition.new( + name: partition['partition'], + regions: build_regions(partition), + region_regex: partition['regionRegex'], + services: build_services(partition) + ) + end + + private + + # @param [Hash] partition + # @return [Hash] + def build_regions(partition) + partition['regions'].each_with_object({}) do |(region_name, region), regions| + next if region_name == "#{partition['partition']}-global" + + regions[region_name] = Region.build( + region_name, region, partition + ) + end + end + + # @param [Hash] partition + # @return [Hash] + def build_services(partition) + Partitions.service_ids.each_with_object({}) do + |(service_name, service), services| + service_data = partition['services'].fetch( + service, 'endpoints' => {} + ) + services[service_name] = Service.build( + service_name, service_data, partition + ) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/partition_list.rb b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/partition_list.rb new file mode 100644 index 0000000..a43a8b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/partition_list.rb @@ -0,0 +1,114 @@ +# frozen_string_literal: true + +module Aws + module Partitions + class PartitionList + + include Enumerable + + def initialize + @partitions = {} + end + + # @return [Enumerator] + def each(&block) + @partitions.each_value(&block) + end + + # @param [String] partition_name + # @return [Partition] + def partition(partition_name) + if @partitions.key?(partition_name) + @partitions[partition_name] + else + msg = "invalid partition name #{partition_name.inspect}; valid "\ + "partition names include %s" % [@partitions.keys.join(', ')] + raise ArgumentError, msg + end + end + + # @return [Array] + def partitions + @partitions.values + end + + # @param [Partition] partition + # @api private + def add_partition(partition) + if Partition === partition + @partitions[partition.name] = partition + else + raise ArgumentError, "expected Partition, got #{partition.class}" + end + end + + # @param [Partition] partition + # @api private + def merge_metadata(partitions_metadata) + partitions_metadata['partitions'].each do |partition_metadata| + outputs = partition_metadata['outputs'] + + if existing = @partitions[partition_metadata['id']] + @partitions[partition_metadata['id']] = Partition.new( + name: existing.name, + regions: build_metadata_regions( + partition_metadata['id'], + partition_metadata['regions'], + existing), + region_regex: partition_metadata['regionRegex'], + services: existing.services.each_with_object({}) do |s, services| + services[s.name] = s + end, + metadata: outputs + ) + else + @partitions[partition_metadata['id']] = Partition.new( + name: partition_metadata['id'], + regions: build_metadata_regions( + partition_metadata['id'], partition_metadata['regions'] + ), + region_regex: partition_metadata['regionRegex'], + services: {}, + metadata: outputs + ) + end + end + end + + # Removed all partitions. + # @api private + def clear + @partitions = {} + end + + private + + def build_metadata_regions(partition_name, metadata_regions, existing = nil) + metadata_regions.each_with_object({}) do |(region_name, region), regions| + if existing && existing.region?(region_name) + regions[region_name] = existing.region(region_name) + else + regions[region_name] = Region.new( + name: region_name, + description: region['description'], + partition_name: partition_name, + services: Set.new + ) + end + end + end + + class << self + + # @api private + def build(partitions) + partitions['partitions'].inject(PartitionList.new) do |list, partition| + list.add_partition(Partition.build(partition)) + list + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/region.rb b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/region.rb new file mode 100644 index 0000000..6b30fa0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/region.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +require 'set' + +module Aws + module Partitions + class Region + + # @option options [required, String] :name + # @option options [required, String] :description + # @option options [required, String] :partition_name + # @option options [required, Set] :services + # @api private + def initialize(options = {}) + @name = options[:name] + @description = options[:description] + @partition_name = options[:partition_name] + @services = options[:services] + end + + # @return [String] The name of this region, e.g. "us-east-1". + attr_reader :name + + # @return [String] A short description of this region. + attr_reader :description + + # @return [String] The partition this region exists in, e.g. "aws", + # "aws-cn", "aws-us-gov". + attr_reader :partition_name + + # @return [Set] The list of services available in this region. + # Service names are the module names as used by the AWS SDK + # for Ruby. + attr_reader :services + + class << self + + # @api private + def build(region_name, region, partition) + Region.new( + name: region_name, + description: region['description'], + partition_name: partition['partition'], + services: region_services(region_name, partition) + ) + end + + private + + def region_services(region_name, partition) + Partitions.service_ids.inject(Set.new) do |services, (svc_name, svc_id)| + if svc = partition['services'][svc_id] + services << svc_name if service_in_region?(svc, region_name) + else + #raise "missing endpoints for #{svc_name} / #{svc_id}" + end + services + end + end + + def service_in_region?(svc, region_name) + svc.key?('endpoints') && svc['endpoints'].key?(region_name) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/service.rb b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/service.rb new file mode 100644 index 0000000..05f8dca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/lib/aws-partitions/service.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +require 'set' + +module Aws + module Partitions + class Service + + # @option options [required, String] :name + # @option options [required, String] :partition_name + # @option options [required, Set] :region_name + # @option options [required, Boolean] :regionalized + # @option options [String] :partition_region + # @api private + def initialize(options = {}) + @name = options[:name] + @partition_name = options[:partition_name] + @regions = options[:regions] + @fips_regions = options[:fips_regions] + @dualstack_regions = options[:dualstack_regions] + @regionalized = options[:regionalized] + @partition_region = options[:partition_region] + end + + # @return [String] The name of this service. The name is the module + # name as used by the AWS SDK for Ruby. + attr_reader :name + + # @return [String] The partition name, e.g "aws", "aws-cn", "aws-us-gov". + attr_reader :partition_name + + # @return [Set] The regions this service is available in. + # Regions are scoped to the partition. + attr_reader :regions + + # @return [Set] The FIPS compatible regions this service is + # available in. Regions are scoped to the partition. + attr_reader :fips_regions + + # @return [Set] The Dualstack compatible regions this service is + # available in. Regions are scoped to the partition. + attr_reader :dualstack_regions + + # @return [String,nil] The global patition endpoint for this service. + # May be `nil`. + attr_reader :partition_region + + # Returns `false` if the service operates with a single global + # endpoint for the current partition, returns `true` if the service + # is available in multiple regions. + # + # Some services have both a partition endpoint and regional endpoints. + # + # @return [Boolean] + def regionalized? + @regionalized + end + + class << self + + # @api private + def build(service_name, service, partition) + Service.new( + name: service_name, + partition_name: partition['partition'], + regions: regions(service, partition), + fips_regions: variant_regions('fips', service, partition), + dualstack_regions: variant_regions('dualstack', service, partition), + regionalized: service['isRegionalized'] != false, + partition_region: partition_region(service) + ) + end + + private + + def regions(service, partition) + svc_endpoints = service.key?('endpoints') ? service['endpoints'].keys : [] + names = Set.new(partition['regions'].keys & svc_endpoints) + names - ["#{partition['partition']}-global"] + end + + def variant_regions(variant_name, service, partition) + svc_endpoints = service.fetch('endpoints', {}) + names = Set.new + svc_endpoints.each do |key, value| + variants = value.fetch('variants', []) + variants.each do |variant| + tags = variant.fetch('tags', []) + if tags.include?(variant_name) && partition['regions'].key?(key) + names << key + end + end + end + names - ["#{partition['partition']}-global"] + end + + def partition_region(service) + service['partitionEndpoint'] + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/partitions-metadata.json b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/partitions-metadata.json new file mode 100644 index 0000000..169589a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/partitions-metadata.json @@ -0,0 +1,181 @@ +{ + "partitions" : [ { + "id" : "aws", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "name" : "aws", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "aws-global" : { + "description" : "AWS Standard global region" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + } + }, { + "id" : "aws-cn", + "outputs" : { + "dnsSuffix" : "amazonaws.com.cn", + "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "name" : "aws-cn", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "aws-cn-global" : { + "description" : "AWS China global region" + }, + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "AWS GovCloud (US) global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } + }, { + "id" : "aws-iso", + "outputs" : { + "dnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "c2s.ic.gov", + "name" : "aws-iso", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-global" : { + "description" : "AWS ISO (US) global region" + }, + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + } + }, { + "id" : "aws-iso-b", + "outputs" : { + "dnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "sc2s.sgov.gov", + "name" : "aws-iso-b", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-b-global" : { + "description" : "AWS ISOB (US) global region" + }, + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + } + } ], + "version" : "1.1" +} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/partitions.json b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/partitions.json new file mode 100644 index 0000000..d1d5d7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-partitions-1.751.0/partitions.json @@ -0,0 +1,23832 @@ +{ + "partitions" : [ { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "{service}.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "dnsSuffix" : "amazonaws.com", + "partition" : "aws", + "partitionName" : "AWS Standard", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + }, + "services" : { + "a4b" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "access-analyzer" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "access-analyzer-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "access-analyzer-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "access-analyzer-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "access-analyzer-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "access-analyzer-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "access-analyzer-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "access-analyzer-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "access-analyzer-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "access-analyzer-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "access-analyzer-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "account" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "account.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "acm" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "acm-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "acm-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "acm-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "acm-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "acm-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "acm-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "acm-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "acm-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "acm-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "acm-fips.us-west-2.amazonaws.com" + } + } + }, + "acm-pca" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "acm-pca-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "acm-pca-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "acm-pca-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "acm-pca-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "acm-pca-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "airflow" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "amplify" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "amplifybackend" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "amplifyuibuilder" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "aoss" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "api.detective" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "api.detective-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "api.detective-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "api.detective-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "api.detective-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "api.detective-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "api.detective-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "api.detective-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "api.detective-fips.us-west-2.amazonaws.com" + } + } + }, + "api.ecr" : { + "defaults" : { + "variants" : [ { + "hostname" : "ecr-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "api.ecr.af-south-1.amazonaws.com" + }, + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "api.ecr.ap-east-1.amazonaws.com" + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "api.ecr.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "api.ecr.ap-northeast-2.amazonaws.com" + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "api.ecr.ap-northeast-3.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "api.ecr.ap-south-1.amazonaws.com" + }, + "ap-south-2" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "hostname" : "api.ecr.ap-south-2.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "api.ecr.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "api.ecr.ap-southeast-2.amazonaws.com" + }, + "ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "hostname" : "api.ecr.ap-southeast-3.amazonaws.com" + }, + "ap-southeast-4" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "hostname" : "api.ecr.ap-southeast-4.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "api.ecr.ca-central-1.amazonaws.com" + }, + "dkr-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ecr-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "dkr-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ecr-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "dkr-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ecr-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "dkr-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ecr-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "api.ecr.eu-central-1.amazonaws.com" + }, + "eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "hostname" : "api.ecr.eu-central-2.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "api.ecr.eu-north-1.amazonaws.com" + }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "api.ecr.eu-south-1.amazonaws.com" + }, + "eu-south-2" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "hostname" : "api.ecr.eu-south-2.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "api.ecr.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "api.ecr.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "api.ecr.eu-west-3.amazonaws.com" + }, + "fips-dkr-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-east-1.amazonaws.com" + }, + "fips-dkr-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-east-2.amazonaws.com" + }, + "fips-dkr-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-west-1.amazonaws.com" + }, + "fips-dkr-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-west-2.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "hostname" : "api.ecr.me-central-1.amazonaws.com" + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "api.ecr.me-south-1.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "api.ecr.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api.ecr.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "api.ecr.us-east-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "api.ecr.us-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api.ecr.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "api.ecr-public" : { + "endpoints" : { + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api.ecr-public.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api.ecr-public.us-west-2.amazonaws.com" + } + } + }, + "api.elastic-inference" : { + "endpoints" : { + "ap-northeast-1" : { + "hostname" : "api.elastic-inference.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "hostname" : "api.elastic-inference.ap-northeast-2.amazonaws.com" + }, + "eu-west-1" : { + "hostname" : "api.elastic-inference.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "hostname" : "api.elastic-inference.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "hostname" : "api.elastic-inference.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "hostname" : "api.elastic-inference.us-west-2.amazonaws.com" + } + } + }, + "api.fleethub.iot" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "api.fleethub.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "api.fleethub.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "api.fleethub.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "api.fleethub.iot-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "api.fleethub.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "api.fleethub.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "api.fleethub.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "api.iotdeviceadvisor" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "api.iotdeviceadvisor.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api.iotdeviceadvisor.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api.iotdeviceadvisor.us-west-2.amazonaws.com" + } + } + }, + "api.iotwireless" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "api.iotwireless.ap-northeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "api.iotwireless.ap-southeast-2.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "api.iotwireless.eu-central-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "api.iotwireless.eu-west-1.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "api.iotwireless.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api.iotwireless.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api.iotwireless.us-west-2.amazonaws.com" + } + } + }, + "api.mediatailor" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "api.pricing" : { + "defaults" : { + "credentialScope" : { + "service" : "pricing" + } + }, + "endpoints" : { + "ap-south-1" : { }, + "us-east-1" : { } + } + }, + "api.sagemaker" : { + "defaults" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "api-fips.sagemaker.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "api-fips.sagemaker.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "api-fips.sagemaker.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "api-fips.sagemaker.us-west-2.amazonaws.com" + } + } + }, + "api.tunneling.iot" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "apigateway" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "apigateway-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "apigateway-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "apigateway-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "apigateway-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "apigateway-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "apigateway-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "apigateway-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "apigateway-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "apigateway-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "apigateway-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "app-integrations" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "appconfig" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "appconfigdata" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "appflow" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "application-autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "applicationinsights" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "appmesh" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "appmesh.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "appmesh.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "appmesh.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "appmesh.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "appmesh.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "appmesh.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "appmesh.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "appmesh.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "appmesh.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "appmesh-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "appmesh-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "appmesh.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "appmesh-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "appmesh.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "appmesh.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "appmesh.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "appmesh.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "appmesh.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "appmesh.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "appmesh.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "appmesh.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "appmesh-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "appmesh-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "appmesh.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "appmesh-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "appmesh-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "appmesh-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "appmesh.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "appmesh-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "appmesh-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "appmesh-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "appmesh.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "appmesh-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "appmesh-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "appmesh-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "appmesh.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "appmesh-fips.us-west-2.amazonaws.com" + } + } + }, + "apprunner" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "apprunner-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "apprunner-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "apprunner-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "apprunner-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "apprunner-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "apprunner-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "appstream2" : { + "defaults" : { + "credentialScope" : { + "service" : "appstream" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "appstream2-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "appstream2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "appstream2-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { }, + "us-west-2" : { + "variants" : [ { + "hostname" : "appstream2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "appstream2-fips.us-west-2.amazonaws.com" + } + } + }, + "appsync" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "aps" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "arc-zonal-shift" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "athena" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "athena.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "athena.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "athena.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "athena.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "athena.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "athena.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "athena.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "athena.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "athena.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "athena.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "athena.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "athena.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "athena.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "athena-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "athena-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "athena.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "athena.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "athena-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "athena-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "athena-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "athena-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "auditmanager" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "autoscaling-plans" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "backup" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "backup-gateway" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "backupstorage" : { + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "batch" : { + "defaults" : { + "variants" : [ { + "hostname" : "fips.batch.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "fips.batch.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "fips.batch.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "fips.batch.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "fips.batch.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "fips.batch.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "fips.batch.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "fips.batch.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "fips.batch.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "billingconductor" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "billingconductor.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "braket" : { + "endpoints" : { + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "budgets" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "budgets.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "cases" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "deprecated" : true + }, + "fips-us-west-2" : { + "deprecated" : true + }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + } + } + }, + "cassandra" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cassandra-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cassandra-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cassandra-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { + "variants" : [ { + "hostname" : "cassandra-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "catalog.marketplace" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "ce" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ce.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "chime" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "chime.us-east-1.amazonaws.com", + "protocols" : [ "https" ] + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "cleanrooms" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "cloud9" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "cloudcontrolapi" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "clouddirectory" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cloudformation-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cloudformation-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "cloudformation-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cloudformation-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "cloudformation-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cloudformation-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "cloudformation-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cloudformation-fips.us-west-2.amazonaws.com" + } + } + }, + "cloudfront" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "cloudfront.amazonaws.com", + "protocols" : [ "http", "https" ] + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "cloudhsm" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "cloudhsmv2" : { + "defaults" : { + "credentialScope" : { + "service" : "cloudhsm" + } + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "cloudsearch" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cloudtrail-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cloudtrail-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cloudtrail-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cloudtrail-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cloudtrail-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "cloudtrail-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "cloudtrail-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "cloudtrail-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "cloudtrail-data" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "codeartifact" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "codebuild" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "codebuild-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "codebuild-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "codebuild-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "codebuild-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "codebuild-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "codebuild-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "codebuild-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "codebuild-fips.us-west-2.amazonaws.com" + } + } + }, + "codecatalyst" : { + "endpoints" : { + "aws-global" : { + "hostname" : "codecatalyst.global.api.aws" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "codecommit" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "codecommit-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.ca-central-1.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "codecommit-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "codecommit-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "codecommit-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "codecommit-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-west-2.amazonaws.com" + } + } + }, + "codedeploy" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "codedeploy-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "codedeploy-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "codedeploy-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "codedeploy-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "codedeploy-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "codedeploy-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "codedeploy-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "codedeploy-fips.us-west-2.amazonaws.com" + } + } + }, + "codeguru-reviewer" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "codepipeline" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "codepipeline-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "codepipeline-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "codepipeline-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "codepipeline-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "codepipeline-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "codepipeline-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "codepipeline-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "codepipeline-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "codepipeline-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "codepipeline-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "codestar" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "codestar-connections" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "codestar-notifications" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "cognito-identity" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cognito-identity-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cognito-identity-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { }, + "us-west-2" : { + "variants" : [ { + "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "cognito-idp" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "cognito-sync" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "comprehend" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "comprehend-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "comprehend-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "comprehend-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "comprehend-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "comprehend-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "comprehend-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "comprehendmedical" : { + "endpoints" : { + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "comprehendmedical-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "comprehendmedical-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "comprehendmedical-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "comprehendmedical-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "comprehendmedical-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "comprehendmedical-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "compute-optimizer" : { + "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "compute-optimizer.af-south-1.amazonaws.com" + }, + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "compute-optimizer.ap-east-1.amazonaws.com" + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "compute-optimizer.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "compute-optimizer.ap-northeast-2.amazonaws.com" + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "compute-optimizer.ap-northeast-3.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "compute-optimizer.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "compute-optimizer.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "compute-optimizer.ap-southeast-2.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "compute-optimizer.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "compute-optimizer.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "compute-optimizer.eu-north-1.amazonaws.com" + }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "compute-optimizer.eu-south-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "compute-optimizer.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "compute-optimizer.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "compute-optimizer.eu-west-3.amazonaws.com" + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "compute-optimizer.me-south-1.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "compute-optimizer.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "compute-optimizer.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "compute-optimizer.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "compute-optimizer.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "compute-optimizer.us-west-2.amazonaws.com" + } + } + }, + "config" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "config-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "config-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "config-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "config-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "config-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "config-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "config-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "config-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "connect" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "connect-campaigns" : { + "endpoints" : { + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "connect-campaigns-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "connect-campaigns-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "connect-campaigns-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "connect-campaigns-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "contact-lens" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "controltower" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "controltower-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "controltower-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "controltower-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "controltower-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "controltower-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "controltower-fips.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "controltower-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "controltower-fips.us-west-2.amazonaws.com" + } + } + }, + "cur" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "data-ats.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "data.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "data.jobs.iot" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "data.mediastore" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "databrew" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "databrew-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "databrew-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "databrew-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "databrew-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "databrew-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "databrew-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "databrew-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "databrew-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "dataexchange" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "datapipeline" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "datasync" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "datasync-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "datasync-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "datasync-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "dax" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "devicefarm" : { + "endpoints" : { + "us-west-2" : { } + } + }, + "devops-guru" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "devops-guru-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "devops-guru-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "devops-guru-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "devops-guru-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "devops-guru-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { }, + "us-west-2" : { + "variants" : [ { + "hostname" : "devops-guru-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "directconnect" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "directconnect-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "discovery" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "dlm" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "dms" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "dms" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "dms-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "dms-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "dms-fips.us-west-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "dms-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "dms-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "dms-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "dms-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "dms-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "dms-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "dms-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "dms-fips.us-west-2.amazonaws.com" + } + } + }, + "docdb" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "rds.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "rds.ap-northeast-2.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "rds.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "rds.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "rds.ap-southeast-2.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "rds.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "rds.eu-central-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "rds.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "rds.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "rds.eu-west-3.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "rds.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "rds.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "rds.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "rds.us-west-2.amazonaws.com" + } + } + }, + "drs" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "ds" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ds-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ds-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ds-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "dynamodb" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "dynamodb-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "dynamodb-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "local" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "localhost:8000", + "protocols" : [ "http" ] + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "dynamodb-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "dynamodb-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "dynamodb-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "dynamodb-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "dynamodb-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "dynamodb-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "dynamodb-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "dynamodb-fips.us-west-2.amazonaws.com" + } + } + }, + "ebs" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ebs-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ebs-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ebs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ebs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ebs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ebs-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ebs-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ebs-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ebs-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ebs-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "ec2" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "ec2.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ec2-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "ec2.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ec2-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ec2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ec2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ec2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ec2-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "ec2.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ec2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "ec2.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ec2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "ec2.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ec2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ec2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "ec2.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "ecs" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ecs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ecs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ecs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ecs-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ecs-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ecs-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ecs-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ecs-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "edge.sagemaker" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "fips.eks.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "fips.eks.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "fips.eks.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "fips.eks.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "fips.eks.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "fips.eks.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "fips.eks.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "fips.eks.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "fips.eks.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticache" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "elasticache-fips.us-west-1.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "elasticache-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "elasticache-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "elasticache-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "elasticache-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "elasticache-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "elasticache-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "elasticache-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "elasticache-fips.us-west-2.amazonaws.com" + } + } + }, + "elasticbeanstalk" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "elasticbeanstalk-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "elasticbeanstalk-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "elasticbeanstalk-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "elasticbeanstalk-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "elasticbeanstalk-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "elasticbeanstalk-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "elasticbeanstalk-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "elasticbeanstalk-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-central-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-north-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-northeast-3.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-south-2" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-south-2.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-3.amazonaws.com" + }, + "fips-ap-southeast-4" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-4.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-central-2.amazonaws.com" + }, + "fips-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-north-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-south-1.amazonaws.com" + }, + "fips-eu-south-2" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-south-2.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-west-3.amazonaws.com" + }, + "fips-me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.me-central-1.amazonaws.com" + }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.me-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticloadbalancing" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "elasticloadbalancing-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "elasticloadbalancing-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "elasticloadbalancing-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "elasticloadbalancing-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "elasticloadbalancing-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "elasticloadbalancing-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "elasticloadbalancing-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "elasticloadbalancing-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticmapreduce" : { + "defaults" : { + "protocols" : [ "https" ], + "sslCommonName" : "{region}.{service}.{dnsSuffix}" + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "elasticmapreduce-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "sslCommonName" : "{service}.{region}.{dnsSuffix}" + }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "sslCommonName" : "{service}.{region}.{dnsSuffix}", + "variants" : [ { + "hostname" : "elasticmapreduce-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "elasticmapreduce-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "elasticmapreduce-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "elasticmapreduce-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elastictranscoder" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "email" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "email-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "email-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "email-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { + "variants" : [ { + "hostname" : "email-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "emr-containers" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "emr-containers-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "emr-containers-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "emr-containers-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "emr-containers-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "emr-containers-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "emr-containers-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "emr-containers-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "emr-containers-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "emr-containers-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "emr-containers-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "emr-serverless" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "emr-serverless-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "emr-serverless-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "emr-serverless-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "emr-serverless-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "emr-serverless-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "emr-serverless-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "emr-serverless-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "emr-serverless-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "emr-serverless-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "emr-serverless-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "entitlement.marketplace" : { + "defaults" : { + "credentialScope" : { + "service" : "aws-marketplace" + } + }, + "endpoints" : { + "us-east-1" : { } + } + }, + "es" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "es-fips.us-west-1.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "es-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "es-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "es-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "es-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "es-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "es-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "es-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "es-fips.us-west-2.amazonaws.com" + } + } + }, + "events" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "events-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "events-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "events-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "events-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "events-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "events-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "events-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "events-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "evidently" : { + "endpoints" : { + "ap-northeast-1" : { + "hostname" : "evidently.ap-northeast-1.amazonaws.com" + }, + "ap-southeast-1" : { + "hostname" : "evidently.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "hostname" : "evidently.ap-southeast-2.amazonaws.com" + }, + "eu-central-1" : { + "hostname" : "evidently.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "hostname" : "evidently.eu-north-1.amazonaws.com" + }, + "eu-west-1" : { + "hostname" : "evidently.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "hostname" : "evidently.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "hostname" : "evidently.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "hostname" : "evidently.us-west-2.amazonaws.com" + } + } + }, + "finspace" : { + "endpoints" : { + "ca-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "finspace-api" : { + "endpoints" : { + "ca-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "firehose" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "firehose-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "firehose-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "firehose-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "firehose-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "firehose-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "firehose-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "firehose-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "firehose-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "fms" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "fms-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "fms-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "fms-fips.ap-northeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "fms-fips.ap-northeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-3" : { }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "fms-fips.ap-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-2" : { }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "fms-fips.ap-southeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "fms-fips.ap-southeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "fms-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "fms-fips.eu-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "fms-fips.eu-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-2" : { }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "fms-fips.eu-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "fms-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "fms-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "deprecated" : true, + "hostname" : "fms-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "deprecated" : true, + "hostname" : "fms-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.eu-south-1.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "deprecated" : true, + "hostname" : "fms-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "deprecated" : true, + "hostname" : "fms-fips.eu-west-3.amazonaws.com" + }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.me-south-1.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "fms-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "fms-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { + "variants" : [ { + "hostname" : "fms-fips.me-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "fms-fips.sa-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "fms-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "fms-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "fms-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "fms-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "forecast" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "forecast-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "forecast-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "forecast-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "forecast-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "forecast-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "forecast-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "forecastquery" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "forecastquery-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "forecastquery-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "forecastquery-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "forecastquery-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "forecastquery-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "forecastquery-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "frauddetector" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "fsx" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "fsx-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.ca-central-1.amazonaws.com" + }, + "fips-prod-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.ca-central-1.amazonaws.com" + }, + "fips-prod-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-east-1.amazonaws.com" + }, + "fips-prod-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-east-2.amazonaws.com" + }, + "fips-prod-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-west-1.amazonaws.com" + }, + "fips-prod-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-west-2.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "prod-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "prod-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "prod-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "prod-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "prod-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "fsx-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "fsx-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "fsx-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "fsx-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "gamelift" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "gamesparks" : { + "endpoints" : { + "ap-northeast-1" : { }, + "us-east-1" : { } + } + }, + "geo" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "glacier" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "glacier-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "glacier-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "glacier-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "glacier-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "glacier-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "glue" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "glue-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "glue-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "glue-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "glue-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "glue-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "glue-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "glue-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "glue-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "grafana" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "grafana.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "grafana.ap-northeast-2.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "grafana.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "grafana.ap-southeast-2.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "grafana.eu-central-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "grafana.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "grafana.eu-west-2.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "grafana.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "grafana.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "grafana.us-west-2.amazonaws.com" + } + } + }, + "greengrass" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "greengrass-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "greengrass-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "greengrass-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "greengrass-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "greengrass-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "greengrass-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "greengrass-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "greengrass-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + }, + "isRegionalized" : true + }, + "groundstation" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "groundstation-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "groundstation-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "groundstation-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "groundstation-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "groundstation-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "groundstation-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "guardduty" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "guardduty-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "guardduty-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "guardduty-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "guardduty-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "guardduty-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "guardduty-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "guardduty-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "guardduty-fips.us-west-2.amazonaws.com" + } + }, + "isRegionalized" : true + }, + "health" : { + "defaults" : { + "protocols" : [ "https" ], + "sslCommonName" : "health.us-east-1.amazonaws.com" + }, + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "global.health.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "health-fips.us-east-2.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "health-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "healthlake" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-south-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "honeycode" : { + "endpoints" : { + "us-west-2" : { } + } + }, + "iam" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "iam.amazonaws.com", + "variants" : [ { + "hostname" : "iam-fips.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "aws-global-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "iam-fips.amazonaws.com" + }, + "iam" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "iam-fips.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "iam-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "iam-fips.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "identity-chime" : { + "endpoints" : { + "eu-central-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "identity-chime-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "identity-chime-fips.us-east-1.amazonaws.com" + } + } + }, + "identitystore" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "importexport" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1", + "service" : "IngestionService" + }, + "hostname" : "importexport.amazonaws.com", + "signatureVersions" : [ "v2", "v4" ] + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "ingest.timestream" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "ingest-fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ingest.timestream-fips.us-east-1.amazonaws.com" + }, + "ingest-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ingest.timestream-fips.us-east-2.amazonaws.com" + }, + "ingest-fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ingest.timestream-fips.us-west-2.amazonaws.com" + }, + "ingest-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ingest.timestream-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ingest-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ingest.timestream-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ingest-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ingest.timestream-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "inspector" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "inspector-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "inspector-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "inspector-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "inspector-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "inspector-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "inspector-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "inspector-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "inspector-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "inspector2" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "internetmonitor" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { + "hostname" : "internetmonitor.af-south-1.api.aws" + }, + "ap-east-1" : { + "hostname" : "internetmonitor.ap-east-1.api.aws" + }, + "ap-northeast-1" : { + "hostname" : "internetmonitor.ap-northeast-1.api.aws" + }, + "ap-northeast-2" : { + "hostname" : "internetmonitor.ap-northeast-2.api.aws" + }, + "ap-northeast-3" : { + "hostname" : "internetmonitor.ap-northeast-3.api.aws" + }, + "ap-south-1" : { + "hostname" : "internetmonitor.ap-south-1.api.aws" + }, + "ap-south-2" : { + "hostname" : "internetmonitor.ap-south-2.api.aws" + }, + "ap-southeast-1" : { + "hostname" : "internetmonitor.ap-southeast-1.api.aws" + }, + "ap-southeast-2" : { + "hostname" : "internetmonitor.ap-southeast-2.api.aws" + }, + "ap-southeast-3" : { + "hostname" : "internetmonitor.ap-southeast-3.api.aws" + }, + "ap-southeast-4" : { + "hostname" : "internetmonitor.ap-southeast-4.api.aws" + }, + "ca-central-1" : { + "hostname" : "internetmonitor.ca-central-1.api.aws" + }, + "eu-central-1" : { + "hostname" : "internetmonitor.eu-central-1.api.aws" + }, + "eu-central-2" : { + "hostname" : "internetmonitor.eu-central-2.api.aws" + }, + "eu-north-1" : { + "hostname" : "internetmonitor.eu-north-1.api.aws" + }, + "eu-south-1" : { + "hostname" : "internetmonitor.eu-south-1.api.aws" + }, + "eu-south-2" : { + "hostname" : "internetmonitor.eu-south-2.api.aws" + }, + "eu-west-1" : { + "hostname" : "internetmonitor.eu-west-1.api.aws" + }, + "eu-west-2" : { + "hostname" : "internetmonitor.eu-west-2.api.aws" + }, + "eu-west-3" : { + "hostname" : "internetmonitor.eu-west-3.api.aws" + }, + "me-central-1" : { + "hostname" : "internetmonitor.me-central-1.api.aws" + }, + "me-south-1" : { + "hostname" : "internetmonitor.me-south-1.api.aws" + }, + "sa-east-1" : { + "hostname" : "internetmonitor.sa-east-1.api.aws" + }, + "us-east-1" : { + "hostname" : "internetmonitor.us-east-1.api.aws" + }, + "us-east-2" : { + "hostname" : "internetmonitor.us-east-2.api.aws" + }, + "us-west-1" : { + "hostname" : "internetmonitor.us-west-1.api.aws" + }, + "us-west-2" : { + "hostname" : "internetmonitor.us-west-2.api.aws" + } + } + }, + "iot" : { + "defaults" : { + "credentialScope" : { + "service" : "execute-api" + } + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotanalytics" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "iotevents" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "iotevents-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "iotevents-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "iotevents-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "iotevents-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "iotevents-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "iotevents-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "iotevents-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "iotevents-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "ioteventsdata" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "data.iotevents.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "data.iotevents.ap-northeast-2.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "data.iotevents.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "data.iotevents.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "data.iotevents.ap-southeast-2.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "data.iotevents.ca-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "data.iotevents-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "data.iotevents.eu-central-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "data.iotevents.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "data.iotevents.eu-west-2.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "data.iotevents-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "data.iotevents-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "data.iotevents-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "data.iotevents-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "data.iotevents.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "data.iotevents-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "data.iotevents.us-east-2.amazonaws.com", + "variants" : [ { + "hostname" : "data.iotevents-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "data.iotevents.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "data.iotevents-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotfleetwise" : { + "endpoints" : { + "eu-central-1" : { }, + "us-east-1" : { } + } + }, + "iotroborunner" : { + "endpoints" : { + "eu-central-1" : { }, + "us-east-1" : { } + } + }, + "iotsecuredtunneling" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotsitewise" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "iotsitewise-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "iotsitewise-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "iotsitewise-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "iotsitewise-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "iotsitewise-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "iotsitewise-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "iotsitewise-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "iotsitewise-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotthingsgraph" : { + "defaults" : { + "credentialScope" : { + "service" : "iotthingsgraph" + } + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "iottwinmaker" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "iottwinmaker-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "iottwinmaker-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "iottwinmaker-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "iottwinmaker-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotwireless" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "api.iotwireless.ap-northeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "api.iotwireless.ap-southeast-2.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "api.iotwireless.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "api.iotwireless.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "api.iotwireless.us-west-2.amazonaws.com" + } + } + }, + "ivs" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "ivschat" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "ivsrealtime" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "kafka" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "kafkaconnect" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "kendra" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "kendra-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "kendra-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "kendra-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "kendra-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "kendra-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "kendra-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "kendra-ranking" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { + "hostname" : "kendra-ranking.af-south-1.api.aws" + }, + "ap-east-1" : { + "hostname" : "kendra-ranking.ap-east-1.api.aws" + }, + "ap-northeast-1" : { + "hostname" : "kendra-ranking.ap-northeast-1.api.aws" + }, + "ap-northeast-2" : { + "hostname" : "kendra-ranking.ap-northeast-2.api.aws" + }, + "ap-northeast-3" : { + "hostname" : "kendra-ranking.ap-northeast-3.api.aws" + }, + "ap-south-1" : { + "hostname" : "kendra-ranking.ap-south-1.api.aws" + }, + "ap-south-2" : { + "hostname" : "kendra-ranking.ap-south-2.api.aws" + }, + "ap-southeast-1" : { + "hostname" : "kendra-ranking.ap-southeast-1.api.aws" + }, + "ap-southeast-2" : { + "hostname" : "kendra-ranking.ap-southeast-2.api.aws" + }, + "ap-southeast-3" : { + "hostname" : "kendra-ranking.ap-southeast-3.api.aws" + }, + "ap-southeast-4" : { + "hostname" : "kendra-ranking.ap-southeast-4.api.aws" + }, + "ca-central-1" : { + "hostname" : "kendra-ranking.ca-central-1.api.aws" + }, + "eu-central-2" : { + "hostname" : "kendra-ranking.eu-central-2.api.aws" + }, + "eu-north-1" : { + "hostname" : "kendra-ranking.eu-north-1.api.aws" + }, + "eu-south-1" : { + "hostname" : "kendra-ranking.eu-south-1.api.aws" + }, + "eu-south-2" : { + "hostname" : "kendra-ranking.eu-south-2.api.aws" + }, + "eu-west-1" : { + "hostname" : "kendra-ranking.eu-west-1.api.aws" + }, + "eu-west-3" : { + "hostname" : "kendra-ranking.eu-west-3.api.aws" + }, + "me-central-1" : { + "hostname" : "kendra-ranking.me-central-1.api.aws" + }, + "me-south-1" : { + "hostname" : "kendra-ranking.me-south-1.api.aws" + }, + "sa-east-1" : { + "hostname" : "kendra-ranking.sa-east-1.api.aws" + }, + "us-east-1" : { + "hostname" : "kendra-ranking.us-east-1.api.aws" + }, + "us-east-2" : { + "hostname" : "kendra-ranking.us-east-2.api.aws" + }, + "us-west-1" : { + "hostname" : "kendra-ranking.us-west-1.api.aws" + }, + "us-west-2" : { + "hostname" : "kendra-ranking.us-west-2.api.aws" + } + } + }, + "kinesis" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "kinesis-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "kinesis-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "kinesis-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "kinesis-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "kinesis-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "kinesis-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "kinesis-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "kinesis-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "kinesisanalytics" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "kinesisvideo" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-central-2.amazonaws.com" + }, + "af-south-1" : { + "variants" : [ { + "hostname" : "kms-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "af-south-1-fips" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.af-south-1.amazonaws.com" + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "kms-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-east-1-fips" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-east-1.amazonaws.com" + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "kms-fips.ap-northeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-1-fips" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "kms-fips.ap-northeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-2-fips" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-northeast-2.amazonaws.com" + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "kms-fips.ap-northeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-3-fips" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-northeast-3.amazonaws.com" + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "kms-fips.ap-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-1-fips" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-south-1.amazonaws.com" + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "kms-fips.ap-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-2-fips" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-south-2.amazonaws.com" + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "kms-fips.ap-southeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-1-fips" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "kms-fips.ap-southeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-2-fips" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-southeast-2.amazonaws.com" + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "kms-fips.ap-southeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-3-fips" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-southeast-3.amazonaws.com" + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "kms-fips.ap-southeast-4.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-4-fips" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-southeast-4.amazonaws.com" + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.eu-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1-fips" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-central-1.amazonaws.com" + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "kms-fips.eu-central-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-2-fips" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-central-2.amazonaws.com" + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "kms-fips.eu-north-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-north-1-fips" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-north-1.amazonaws.com" + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "kms-fips.eu-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-1-fips" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-south-1.amazonaws.com" + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "kms-fips.eu-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-2-fips" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-south-2.amazonaws.com" + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "kms-fips.eu-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-1-fips" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "kms-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-2-fips" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "kms-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-3-fips" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-west-3.amazonaws.com" + }, + "il-central-1-fips" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "kms-fips.il-central-1.amazonaws.com" + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.me-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "me-central-1-fips" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.me-central-1.amazonaws.com" + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "kms-fips.me-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "me-south-1-fips" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.me-south-1.amazonaws.com" + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "kms-fips.sa-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1-fips" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "kms-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "kms-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-west-2.amazonaws.com" + } + } + }, + "lakeformation" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "lakeformation-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "lakeformation-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "lakeformation-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "lakeformation-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "lakeformation-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "lakeformation-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "lakeformation-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "lambda" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "lambda.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "lambda.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "lambda.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "lambda.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "lambda.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "lambda.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "lambda.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "lambda.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "lambda.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "lambda.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "lambda.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "lambda.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "lambda.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "lambda.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "lambda.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "lambda.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "lambda.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "lambda.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "lambda.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "lambda.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "lambda-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "lambda-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "lambda-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "lambda-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "lambda.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "lambda.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "lambda.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "lambda-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "lambda.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "lambda-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "lambda.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "lambda-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "lambda.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "lambda-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "lambda.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "license-manager" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "license-manager-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "license-manager-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "license-manager-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "license-manager-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "license-manager-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "license-manager-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "license-manager-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "license-manager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "license-manager-linux-subscriptions" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "license-manager-user-subscriptions" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "lightsail" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "logs" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "logs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "logs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "logs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "logs-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "logs-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "logs-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "logs-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "logs-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "lookoutequipment" : { + "endpoints" : { + "ap-northeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { } + } + }, + "lookoutmetrics" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "lookoutvision" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "m2" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "deprecated" : true + }, + "fips-us-east-1" : { + "deprecated" : true + }, + "fips-us-east-2" : { + "deprecated" : true + }, + "fips-us-west-1" : { + "deprecated" : true + }, + "fips-us-west-2" : { + "deprecated" : true + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + } + } + }, + "machinelearning" : { + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { } + } + }, + "macie" : { + "endpoints" : { + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "macie-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "macie-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "macie-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "macie-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "macie2" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "macie2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "macie2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "macie2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "macie2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "managedblockchain" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { } + } + }, + "marketplacecommerceanalytics" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "media-pipelines-chime" : { + "endpoints" : { + "ap-southeast-1" : { }, + "eu-central-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "media-pipelines-chime-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "media-pipelines-chime-fips.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "media-pipelines-chime-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "media-pipelines-chime-fips.us-west-2.amazonaws.com" + } + } + }, + "mediaconnect" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "mediaconvert" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "mediaconvert-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "mediaconvert-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "mediaconvert-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "mediaconvert-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "mediaconvert-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "mediaconvert-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "medialive" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "medialive-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "medialive-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "medialive-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "medialive-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "medialive-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "medialive-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "mediapackage" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "mediapackage-vod" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "mediastore" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "meetings-chime" : { + "endpoints" : { + "ap-southeast-1" : { }, + "eu-central-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.us-west-2.amazonaws.com" + } + } + }, + "memory-db" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "memory-db-fips.us-west-1.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "messaging-chime" : { + "endpoints" : { + "eu-central-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "messaging-chime-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "messaging-chime-fips.us-east-1.amazonaws.com" + } + } + }, + "metering.marketplace" : { + "defaults" : { + "credentialScope" : { + "service" : "aws-marketplace" + } + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "metrics.sagemaker" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "mgh" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "mgn" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "mgn-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "mgn-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "mgn-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "mgn-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "migrationhub-orchestrator" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "migrationhub-strategy" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "mobileanalytics" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "models-v2-lex" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "models.lex" : { + "defaults" : { + "credentialScope" : { + "service" : "lex" + }, + "variants" : [ { + "hostname" : "models-fips.lex.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "models-fips.lex.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "models-fips.lex.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "models-fips.lex.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "models-fips.lex.us-west-2.amazonaws.com" + } + } + }, + "monitoring" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "monitoring-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "monitoring-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "monitoring-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "monitoring-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "monitoring-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "monitoring-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "monitoring-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "monitoring-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "mq" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "mq-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "mq-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "mq-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "mq-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "mq-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "mq-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "mq-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "mq-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "mturk-requester" : { + "endpoints" : { + "sandbox" : { + "hostname" : "mturk-requester-sandbox.us-east-1.amazonaws.com" + }, + "us-east-1" : { } + }, + "isRegionalized" : false + }, + "neptune" : { + "endpoints" : { + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "rds.ap-east-1.amazonaws.com" + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "rds.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "rds.ap-northeast-2.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "rds.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "rds.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "rds.ap-southeast-2.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "rds.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "rds.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "rds.eu-north-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "rds.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "rds.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "rds.eu-west-3.amazonaws.com" + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "rds.me-south-1.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "rds.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "rds.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "rds.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "rds.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "rds.us-west-2.amazonaws.com" + } + } + }, + "network-firewall" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "network-firewall-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "network-firewall-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "network-firewall-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "network-firewall-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "networkmanager" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "networkmanager.us-west-2.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "nimble" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "oam" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "oidc" : { + "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "oidc.af-south-1.amazonaws.com" + }, + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "oidc.ap-east-1.amazonaws.com" + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "oidc.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "oidc.ap-northeast-2.amazonaws.com" + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "oidc.ap-northeast-3.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "oidc.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "oidc.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "oidc.ap-southeast-2.amazonaws.com" + }, + "ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "hostname" : "oidc.ap-southeast-3.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "oidc.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "oidc.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "oidc.eu-north-1.amazonaws.com" + }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "oidc.eu-south-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "oidc.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "oidc.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "oidc.eu-west-3.amazonaws.com" + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "oidc.me-south-1.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "oidc.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "oidc.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "oidc.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "oidc.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "oidc.us-west-2.amazonaws.com" + } + } + }, + "omics" : { + "endpoints" : { + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "omics.ap-southeast-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "omics.eu-central-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "omics.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "omics.eu-west-2.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "omics-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "omics-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "omics.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "omics-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "omics.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "omics-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "opsworks" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "opsworks-cm" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "organizations" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "organizations.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "organizations-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "organizations-fips.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "outposts" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "outposts-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "outposts-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "outposts-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "outposts-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "outposts-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "outposts-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "outposts-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "outposts-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "outposts-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "outposts-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "participant.connect" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "participant.connect-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "participant.connect-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "participant.connect-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "participant.connect-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "personalize" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "pi" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "pinpoint" : { + "defaults" : { + "credentialScope" : { + "service" : "mobiletargeting" + } + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "pinpoint.ca-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "pinpoint-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "pinpoint-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "pinpoint-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "pinpoint-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "pinpoint-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "pinpoint.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "pinpoint-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "pinpoint.us-east-2.amazonaws.com", + "variants" : [ { + "hostname" : "pinpoint-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "pinpoint.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "pinpoint-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "pipes" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "polly" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "polly-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "polly-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "polly-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "polly-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "polly-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "polly-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "polly-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "polly-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "portal.sso" : { + "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "portal.sso.af-south-1.amazonaws.com" + }, + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "portal.sso.ap-east-1.amazonaws.com" + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "portal.sso.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "portal.sso.ap-northeast-2.amazonaws.com" + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "portal.sso.ap-northeast-3.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "portal.sso.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "portal.sso.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "portal.sso.ap-southeast-2.amazonaws.com" + }, + "ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "hostname" : "portal.sso.ap-southeast-3.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "portal.sso.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "portal.sso.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "portal.sso.eu-north-1.amazonaws.com" + }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "portal.sso.eu-south-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "portal.sso.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "portal.sso.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "portal.sso.eu-west-3.amazonaws.com" + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "portal.sso.me-south-1.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "portal.sso.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "portal.sso.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "portal.sso.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "portal.sso.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "portal.sso.us-west-2.amazonaws.com" + } + } + }, + "profile" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "projects.iot1click" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "proton" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "qldb" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "qldb-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "qldb-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "qldb-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "qldb-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "qldb-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "qldb-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "qldb-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "qldb-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "quicksight" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "ram" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ram-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ram-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ram-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ram-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ram-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ram-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ram-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ram-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ram-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ram-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "rbin" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "rbin-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "rbin-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "rbin-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "rds" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "rds-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "rds-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "rds-fips.ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "rds-fips.ca-central-1.amazonaws.com" + }, + "rds-fips.us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-east-1.amazonaws.com" + }, + "rds-fips.us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-east-2.amazonaws.com" + }, + "rds-fips.us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-west-1.amazonaws.com" + }, + "rds-fips.us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-west-2.amazonaws.com" + }, + "rds.ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rds-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rds.us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rds-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rds.us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rds-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rds.us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rds-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rds.us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rds-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1" : { }, + "us-east-1" : { + "sslCommonName" : "{service}.{dnsSuffix}", + "variants" : [ { + "hostname" : "rds-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "rds-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "rds-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "rds-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "rds-fips.us-west-2.amazonaws.com" + } + } + }, + "rds-data" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "rds-data-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "rds-data-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "rds-data-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "rds-data-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "rds-data-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "rds-data-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "rds-data-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "rds-data-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "redshift" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "redshift-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "redshift-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "redshift-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "redshift-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "redshift-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "redshift-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "redshift-serverless" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "rekognition" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "rekognition-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "rekognition-fips.ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.ca-central-1.amazonaws.com" + }, + "rekognition-fips.us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-east-1.amazonaws.com" + }, + "rekognition-fips.us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-east-2.amazonaws.com" + }, + "rekognition-fips.us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-west-1.amazonaws.com" + }, + "rekognition-fips.us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-west-2.amazonaws.com" + }, + "rekognition.ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rekognition-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rekognition.us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rekognition-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rekognition.us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rekognition-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rekognition.us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rekognition-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "rekognition.us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rekognition-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "rekognition-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "rekognition-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "rekognition-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "rekognition-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-west-2.amazonaws.com" + } + } + }, + "resiliencehub" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "resource-explorer-2" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { + "hostname" : "resource-explorer-2.af-south-1.api.aws" + }, + "ap-east-1" : { + "hostname" : "resource-explorer-2.ap-east-1.api.aws" + }, + "ap-northeast-1" : { + "hostname" : "resource-explorer-2.ap-northeast-1.api.aws" + }, + "ap-northeast-2" : { + "hostname" : "resource-explorer-2.ap-northeast-2.api.aws" + }, + "ap-northeast-3" : { + "hostname" : "resource-explorer-2.ap-northeast-3.api.aws" + }, + "ap-south-1" : { + "hostname" : "resource-explorer-2.ap-south-1.api.aws" + }, + "ap-south-2" : { + "hostname" : "resource-explorer-2.ap-south-2.api.aws" + }, + "ap-southeast-1" : { + "hostname" : "resource-explorer-2.ap-southeast-1.api.aws" + }, + "ap-southeast-2" : { + "hostname" : "resource-explorer-2.ap-southeast-2.api.aws" + }, + "ap-southeast-4" : { + "hostname" : "resource-explorer-2.ap-southeast-4.api.aws" + }, + "ca-central-1" : { + "hostname" : "resource-explorer-2.ca-central-1.api.aws" + }, + "eu-central-1" : { + "hostname" : "resource-explorer-2.eu-central-1.api.aws" + }, + "eu-central-2" : { + "hostname" : "resource-explorer-2.eu-central-2.api.aws" + }, + "eu-north-1" : { + "hostname" : "resource-explorer-2.eu-north-1.api.aws" + }, + "eu-west-1" : { + "hostname" : "resource-explorer-2.eu-west-1.api.aws" + }, + "eu-west-2" : { + "hostname" : "resource-explorer-2.eu-west-2.api.aws" + }, + "eu-west-3" : { + "hostname" : "resource-explorer-2.eu-west-3.api.aws" + }, + "sa-east-1" : { + "hostname" : "resource-explorer-2.sa-east-1.api.aws" + }, + "us-east-1" : { + "hostname" : "resource-explorer-2.us-east-1.api.aws" + }, + "us-east-2" : { + "hostname" : "resource-explorer-2.us-east-2.api.aws" + }, + "us-west-1" : { + "hostname" : "resource-explorer-2.us-west-1.api.aws" + }, + "us-west-2" : { + "hostname" : "resource-explorer-2.us-west-2.api.aws" + } + } + }, + "resource-groups" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "resource-groups-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "resource-groups-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "resource-groups-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "resource-groups-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "resource-groups-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "resource-groups-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "resource-groups-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "resource-groups-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "robomaker" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "rolesanywhere" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "route53.amazonaws.com", + "variants" : [ { + "hostname" : "route53-fips.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "route53-fips.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "route53-recovery-control-config" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "route53-recovery-control-config.us-west-2.amazonaws.com" + } + } + }, + "route53domains" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "route53resolver" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "rum" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "runtime-v2-lex" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "runtime.lex" : { + "defaults" : { + "credentialScope" : { + "service" : "lex" + }, + "variants" : [ { + "hostname" : "runtime-fips.lex.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "runtime-fips.lex.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "runtime-fips.lex.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "runtime-fips.lex.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "runtime-fips.lex.us-west-2.amazonaws.com" + } + } + }, + "runtime.sagemaker" : { + "defaults" : { + "variants" : [ { + "hostname" : "runtime-fips.sagemaker.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "runtime-fips.sagemaker.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "runtime-fips.sagemaker.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "runtime-fips.sagemaker.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "runtime-fips.sagemaker.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "runtime-fips.sagemaker.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "runtime-fips.sagemaker.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "runtime-fips.sagemaker.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "runtime-fips.sagemaker.us-west-2.amazonaws.com" + } + } + }, + "s3" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}-fips.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.af-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "hostname" : "s3.ap-northeast-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3.dualstack.ap-northeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-northeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-northeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "hostname" : "s3.ap-southeast-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3.dualstack.ap-southeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "hostname" : "s3.ap-southeast-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3.dualstack.ap-southeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-southeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-southeast-4.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "s3.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "s3-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-fips.dualstack.ca-central-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3.dualstack.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-central-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-north-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "hostname" : "s3.eu-west-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3.dualstack.eu-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "s3.dualstack.eu-west-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "s3-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "s3-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "s3-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "s3-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "s3-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.me-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.me-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "s3-external-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "s3-external-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, + "sa-east-1" : { + "hostname" : "s3.sa-east-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3.dualstack.sa-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "hostname" : "s3.us-east-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3-fips.dualstack.us-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3.dualstack.us-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "s3-fips.dualstack.us-east-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3.dualstack.us-east-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "hostname" : "s3.us-west-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3-fips.dualstack.us-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3.dualstack.us-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "hostname" : "s3.us-west-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "hostname" : "s3-fips.dualstack.us-west-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3.dualstack.us-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + } + }, + "isRegionalized" : true, + "partitionEndpoint" : "aws-global" + }, + "s3-control" : { + "defaults" : { + "protocols" : [ "https" ], + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}-fips.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "s3-control.ap-northeast-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.ap-northeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "s3-control.ap-northeast-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.ap-northeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "s3-control.ap-northeast-3.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.ap-northeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "s3-control.ap-south-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.ap-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "s3-control.ap-southeast-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.ap-southeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "s3-control.ap-southeast-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.ap-southeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "s3-control.ca-central-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control.dualstack.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.ca-central-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "s3-control.eu-central-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.eu-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "s3-control.eu-north-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.eu-north-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "s3-control.eu-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.eu-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "s3-control.eu-west-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.eu-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "s3-control.eu-west-3.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.eu-west-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "s3-control.sa-east-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.sa-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "s3-control.us-east-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.dualstack.us-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control.dualstack.us-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.us-east-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "s3-control.us-east-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.dualstack.us-east-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control.dualstack.us-east-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.us-east-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "s3-control.us-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.dualstack.us-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control.dualstack.us-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.us-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "s3-control.us-west-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.dualstack.us-west-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control.dualstack.us-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.us-west-2.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + } + } + }, + "s3-outposts" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "deprecated" : true + }, + "fips-us-east-1" : { + "deprecated" : true + }, + "fips-us-east-2" : { + "deprecated" : true + }, + "fips-us-west-1" : { + "deprecated" : true + }, + "fips-us-west-2" : { + "deprecated" : true + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + } + } + }, + "sagemaker-geospatial" : { + "endpoints" : { + "us-west-2" : { } + } + }, + "savingsplans" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "savingsplans.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "scheduler" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "schemas" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "sdb" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "v2" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "hostname" : "sdb.amazonaws.com" + }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "secretsmanager" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.us-west-2.amazonaws.com" + } + } + }, + "securityhub" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "securityhub-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "securityhub-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "securityhub-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "securityhub-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "securityhub-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "securityhub-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "securityhub-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "securityhub-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "securitylake" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "serverlessrepo" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-east-1" : { + "protocols" : [ "https" ] + }, + "ap-northeast-1" : { + "protocols" : [ "https" ] + }, + "ap-northeast-2" : { + "protocols" : [ "https" ] + }, + "ap-south-1" : { + "protocols" : [ "https" ] + }, + "ap-southeast-1" : { + "protocols" : [ "https" ] + }, + "ap-southeast-2" : { + "protocols" : [ "https" ] + }, + "ca-central-1" : { + "protocols" : [ "https" ] + }, + "eu-central-1" : { + "protocols" : [ "https" ] + }, + "eu-north-1" : { + "protocols" : [ "https" ] + }, + "eu-west-1" : { + "protocols" : [ "https" ] + }, + "eu-west-2" : { + "protocols" : [ "https" ] + }, + "eu-west-3" : { + "protocols" : [ "https" ] + }, + "me-south-1" : { + "protocols" : [ "https" ] + }, + "sa-east-1" : { + "protocols" : [ "https" ] + }, + "us-east-1" : { + "protocols" : [ "https" ] + }, + "us-east-2" : { + "protocols" : [ "https" ] + }, + "us-west-1" : { + "protocols" : [ "https" ] + }, + "us-west-2" : { + "protocols" : [ "https" ] + } + } + }, + "servicecatalog" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "servicecatalog-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "servicecatalog-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "servicecatalog-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "servicecatalog-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "servicecatalog-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "servicecatalog-fips.us-west-2.amazonaws.com" + } + } + }, + "servicecatalog-appregistry" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-appregistry-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "servicecatalog-appregistry-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-appregistry-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "servicecatalog-appregistry-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "servicediscovery" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "servicediscovery.af-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-northeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-northeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-northeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-southeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-southeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-southeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-southeast-4.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-central-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-north-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "servicediscovery.eu-west-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery.me-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "servicediscovery.me-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "servicediscovery.sa-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "servicediscovery" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "servicediscovery-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.us-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.us-east-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.us-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.us-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-west-2.amazonaws.com" + } + } + }, + "servicequotas" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "session.qldb" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "session.qldb-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "session.qldb-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "session.qldb-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "session.qldb-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "session.qldb-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "session.qldb-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "shield" : { + "defaults" : { + "protocols" : [ "https" ], + "sslCommonName" : "shield.us-east-1.amazonaws.com" + }, + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "shield.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "shield-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "shield-fips.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "simspaceweaver" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "sms" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "sms-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "sms-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "sms-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "sms-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "sms-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "sms-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "sms-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "sms-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sms-voice" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "sms-voice-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "sms-voice-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "sms-voice-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "snowball" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-northeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-northeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-northeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-southeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-southeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-northeast-3.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-west-3.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "snowball-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "snowball-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "snowball-fips.sa-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "snowball-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "snowball-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "snowball-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "snowball-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sns" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "sns-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "sns-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "sns-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "sns-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "sns-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "sns-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "sns-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "sns-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sqs" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "sslCommonName" : "{region}.queue.{dnsSuffix}" + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "sqs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "sqs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "sqs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "sqs-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "sslCommonName" : "queue.{dnsSuffix}", + "variants" : [ { + "hostname" : "sqs-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "sqs-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "sqs-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "sqs-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "ssm" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ssm-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ssm-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ssm-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ssm-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ssm-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ssm-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ssm-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ssm-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ssm-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ssm-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "ssm-incidents" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "ssm-sap" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ssm-sap-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ssm-sap-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ssm-sap-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ssm-sap-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ssm-sap-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ssm-sap-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ssm-sap-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ssm-sap-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ssm-sap-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ssm-sap-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sso" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "states" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "states-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "states-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "states-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "states-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "states-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "states-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "states-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "states-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "storagegateway" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.ca-central-1.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "storagegateway-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "storagegateway-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-west-2.amazonaws.com" + } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "local" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "localhost:8000", + "protocols" : [ "http" ] + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "sts" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "sts.amazonaws.com" + }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "sts-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "sts-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "sts-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "sts-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "sts-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "sts-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "sts-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "sts-fips.us-west-2.amazonaws.com" + } + }, + "partitionEndpoint" : "aws-global" + }, + "support" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "support.us-east-1.amazonaws.com" + } + }, + "partitionEndpoint" : "aws-global" + }, + "supportapp" : { + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "swf" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "swf-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "swf-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "swf-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "swf-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "swf-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "swf-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "swf-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "swf-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "synthetics" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "synthetics-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "synthetics-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "synthetics-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "synthetics-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "synthetics-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "synthetics-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "synthetics-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "synthetics-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "tagging" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "textract" : { + "endpoints" : { + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "textract-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "textract-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "textract-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "textract-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "textract-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "textract-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "textract-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "textract-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "textract-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "textract-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "transcribe" : { + "defaults" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "fips.transcribe.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "fips.transcribe.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "fips.transcribe.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "fips.transcribe.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "fips.transcribe.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "fips.transcribe.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "transcribestreaming" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "sa-east-1" : { }, + "transcribestreaming-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "transcribestreaming-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "transcribestreaming-fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "transcribestreaming-fips.ca-central-1.amazonaws.com" + }, + "transcribestreaming-fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "transcribestreaming-fips.us-east-1.amazonaws.com" + }, + "transcribestreaming-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "transcribestreaming-fips.us-east-2.amazonaws.com" + }, + "transcribestreaming-fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com" + }, + "transcribestreaming-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "transcribestreaming-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "transcribestreaming-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "transcribestreaming-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "transcribestreaming-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "transfer" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "transfer-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "transfer-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "transfer-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "transfer-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "transfer-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "transfer-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "transfer-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "transfer-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "transfer-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "transfer-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "translate" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "translate-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "translate-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "translate-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "translate-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { }, + "us-west-2" : { + "variants" : [ { + "hostname" : "translate-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "translate-fips.us-west-2.amazonaws.com" + } + } + }, + "voice-chime" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "voice-chime-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "voice-chime-fips.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "voice-chime-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "voice-chime-fips.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "voice-chime-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "voice-chime-fips.us-west-2.amazonaws.com" + } + } + }, + "voiceid" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "voiceid-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "voiceid-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "voiceid-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "voiceid-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "voiceid-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "voiceid-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "vpc-lattice" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "waf" : { + "endpoints" : { + "aws" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "waf-fips.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "aws-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "waf-fips.amazonaws.com" + }, + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "waf.amazonaws.com", + "variants" : [ { + "hostname" : "waf-fips.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "aws-global-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "waf-fips.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, + "waf-regional" : { + "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "waf-regional.af-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "waf-regional.ap-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "waf-regional.ap-northeast-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-northeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "waf-regional.ap-northeast-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-northeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "waf-regional.ap-northeast-3.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-northeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "waf-regional.ap-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-2" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "hostname" : "waf-regional.ap-south-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "waf-regional.ap-southeast-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-southeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "waf-regional.ap-southeast-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-southeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "hostname" : "waf-regional.ap-southeast-3.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-southeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-4" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "hostname" : "waf-regional.ap-southeast-4.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ap-southeast-4.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "waf-regional.ca-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "waf-regional.eu-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "hostname" : "waf-regional.eu-central-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-central-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "waf-regional.eu-north-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-north-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "waf-regional.eu-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-2" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "hostname" : "waf-regional.eu-south-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "waf-regional.eu-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "waf-regional.eu-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "waf-regional.eu-west-3.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-northeast-3.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-south-2" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-south-2.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-southeast-3.amazonaws.com" + }, + "fips-ap-southeast-4" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ap-southeast-4.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-central-2.amazonaws.com" + }, + "fips-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-north-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-south-1.amazonaws.com" + }, + "fips-eu-south-2" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-south-2.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.eu-west-3.amazonaws.com" + }, + "fips-me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.me-central-1.amazonaws.com" + }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.me-south-1.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "hostname" : "waf-regional.me-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.me-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "waf-regional.me-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.me-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "waf-regional.sa-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.sa-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "waf-regional.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "waf-regional.us-east-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "waf-regional.us-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "waf-regional.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "wafv2" : { + "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "wafv2.af-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "wafv2.ap-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "wafv2.ap-northeast-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-northeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "wafv2.ap-northeast-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-northeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "wafv2.ap-northeast-3.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-northeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "wafv2.ap-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-south-2" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "hostname" : "wafv2.ap-south-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "wafv2.ap-southeast-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-southeast-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "wafv2.ap-southeast-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-southeast-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "hostname" : "wafv2.ap-southeast-3.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-southeast-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-4" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "hostname" : "wafv2.ap-southeast-4.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-southeast-4.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "wafv2.ca-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "wafv2.eu-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "hostname" : "wafv2.eu-central-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-central-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "wafv2.eu-north-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-north-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "wafv2.eu-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-south-2" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "hostname" : "wafv2.eu-south-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-south-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "wafv2.eu-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "wafv2.eu-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "wafv2.eu-west-3.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-northeast-3.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-south-2" : { + "credentialScope" : { + "region" : "ap-south-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-south-2.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-southeast-3.amazonaws.com" + }, + "fips-ap-southeast-4" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-southeast-4.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-central-2" : { + "credentialScope" : { + "region" : "eu-central-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-central-2.amazonaws.com" + }, + "fips-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-north-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-south-1.amazonaws.com" + }, + "fips-eu-south-2" : { + "credentialScope" : { + "region" : "eu-south-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-south-2.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.eu-west-3.amazonaws.com" + }, + "fips-me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.me-central-1.amazonaws.com" + }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.me-south-1.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "hostname" : "wafv2.me-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.me-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "wafv2.me-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.me-south-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "wafv2.sa-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.sa-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "wafv2.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "wafv2.us-east-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "wafv2.us-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "wafv2.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "wellarchitected" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "wisdom" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "deprecated" : true + }, + "fips-us-west-2" : { + "deprecated" : true + }, + "ui-ap-northeast-1" : { }, + "ui-ap-southeast-2" : { }, + "ui-eu-central-1" : { }, + "ui-eu-west-2" : { }, + "ui-us-east-1" : { }, + "ui-us-west-2" : { }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + } + } + }, + "workdocs" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "workdocs-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "workdocs-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "workdocs-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "workdocs-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "workmail" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "workspaces" : { + "endpoints" : { + "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "workspaces-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "workspaces-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "workspaces-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "workspaces-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "workspaces-web" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "xray" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "xray-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "xray-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "xray-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "xray-fips.us-west-2.amazonaws.com" + }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "xray-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "xray-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "xray-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "xray-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + } + } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com.cn", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + }, { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "{service}.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "dnsSuffix" : "amazonaws.com.cn", + "partition" : "aws-cn", + "partitionName" : "AWS China", + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + }, + "services" : { + "access-analyzer" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "account" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "account.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "acm" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "api.ecr" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "api.ecr.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "api.ecr.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "api.sagemaker" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "api.tunneling.iot" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "apigateway" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "appconfig" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "appconfigdata" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "application-autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "applicationinsights" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "appmesh" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "appmesh.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "appsync" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "athena" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "athena.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "athena.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "autoscaling-plans" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "backup" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "batch" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "budgets" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "budgets.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "cassandra" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "ce" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "ce.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "cloudcontrolapi" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "cloudfront" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "cloudfront.cn-northwest-1.amazonaws.com.cn", + "protocols" : [ "http", "https" ] + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "cloudtrail" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "codebuild" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "codecommit" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "codedeploy" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "codepipeline" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "cognito-identity" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "compute-optimizer" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "compute-optimizer.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "compute-optimizer.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "config" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "cur" : { + "endpoints" : { + "cn-northwest-1" : { } + } + }, + "data-ats.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { + "hostname" : "data.ats.iot.cn-north-1.amazonaws.com.cn", + "protocols" : [ "https" ] + }, + "cn-northwest-1" : { } + } + }, + "data.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "data.jobs.iot" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "databrew" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "datasync" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "dax" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "directconnect" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "dlm" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "dms" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "docdb" : { + "endpoints" : { + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "rds.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "ds" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "dynamodb" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "ebs" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "ec2" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "elasticbeanstalk" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "fips-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn" + }, + "fips-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "elasticloadbalancing" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "elasticmapreduce" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "emr-containers" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "es" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "events" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "firehose" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "firehose.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "fms" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "fsx" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "gamelift" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "glacier" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "glue" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "greengrass" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { } + }, + "isRegionalized" : true + }, + "guardduty" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + }, + "isRegionalized" : true + }, + "health" : { + "defaults" : { + "protocols" : [ "https" ], + "sslCommonName" : "health.cn-northwest-1.amazonaws.com.cn" + }, + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "global.health.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "iam" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "iam.cn-north-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "internetmonitor" : { + "defaults" : { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "variants" : [ { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "cn-north-1" : { + "hostname" : "internetmonitor.cn-north-1.api.amazonwebservices.com.cn" + }, + "cn-northwest-1" : { + "hostname" : "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn" + } + } + }, + "iot" : { + "defaults" : { + "credentialScope" : { + "service" : "execute-api" + } + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "iotanalytics" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "iotevents" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "ioteventsdata" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "data.iotevents.cn-north-1.amazonaws.com.cn" + } + } + }, + "iotsecuredtunneling" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "iotsitewise" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "kafka" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "kendra-ranking" : { + "defaults" : { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "variants" : [ { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "cn-north-1" : { + "hostname" : "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn" + }, + "cn-northwest-1" : { + "hostname" : "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn" + } + } + }, + "kinesis" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "kinesisanalytics" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "kinesisvideo" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "kms" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "lakeformation" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "lambda" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "lambda.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "license-manager" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "logs" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "mediaconvert" : { + "endpoints" : { + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "memory-db" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "metrics.sagemaker" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "monitoring" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "mq" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "neptune" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "rds.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "rds.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "oam" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "organizations" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "organizations.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "personalize" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "pi" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "polly" : { + "endpoints" : { + "cn-northwest-1" : { } + } + }, + "ram" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "rbin" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "rds" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "resource-explorer-2" : { + "defaults" : { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "variants" : [ { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "cn-north-1" : { + "hostname" : "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn" + }, + "cn-northwest-1" : { + "hostname" : "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn" + } + } + }, + "resource-groups" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "rolesanywhere" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "route53.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "route53resolver" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "runtime.sagemaker" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "s3" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com.cn", + "hostname" : "{service}.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.cn-north-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "s3-control" : { + "defaults" : { + "protocols" : [ "https" ], + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com.cn", + "hostname" : "{service}.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "s3-control.cn-north-1.amazonaws.com.cn", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.cn-north-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "s3-control.cn-northwest-1.amazonaws.com.cn", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "secretsmanager" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "securityhub" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "serverlessrepo" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { + "protocols" : [ "https" ] + }, + "cn-northwest-1" : { + "protocols" : [ "https" ] + } + } + }, + "servicecatalog" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "servicediscovery" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "servicediscovery.cn-north-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "servicediscovery.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "servicequotas" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "sms" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "snowball" : { + "endpoints" : { + "cn-north-1" : { + "variants" : [ { + "hostname" : "snowball-fips.cn-north-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "snowball-fips.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "fips-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.cn-north-1.amazonaws.com.cn" + }, + "fips-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "sns" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "sqs" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "sslCommonName" : "{region}.queue.{dnsSuffix}" + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "ssm" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "states" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "storagegateway" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "sts" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "support" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "support.cn-north-1.amazonaws.com.cn" + } + }, + "partitionEndpoint" : "aws-cn-global" + }, + "swf" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "synthetics" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "tagging" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "transcribe" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "cn.transcribe.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "cn.transcribe.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "transcribestreaming" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "transfer" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, + "waf-regional" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "waf-regional.cn-north-1.amazonaws.com.cn", + "variants" : [ { + "hostname" : "waf-regional-fips.cn-north-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "waf-regional.cn-northwest-1.amazonaws.com.cn", + "variants" : [ { + "hostname" : "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "fips-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.cn-north-1.amazonaws.com.cn" + }, + "fips-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "wafv2" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "wafv2.cn-north-1.amazonaws.com.cn", + "variants" : [ { + "hostname" : "wafv2-fips.cn-north-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "wafv2.cn-northwest-1.amazonaws.com.cn", + "variants" : [ { + "hostname" : "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + "tags" : [ "fips" ] + } ] + }, + "fips-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.cn-north-1.amazonaws.com.cn" + }, + "fips-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.cn-northwest-1.amazonaws.com.cn" + } + } + }, + "workspaces" : { + "endpoints" : { + "cn-northwest-1" : { } + } + }, + "xray" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + } + } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "{service}.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "dnsSuffix" : "amazonaws.com", + "partition" : "aws-us-gov", + "partitionName" : "AWS GovCloud (US)", + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + }, + "services" : { + "access-analyzer" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "access-analyzer.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "access-analyzer.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "access-analyzer.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "access-analyzer.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "access-analyzer.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "access-analyzer.us-gov-west-1.amazonaws.com" + } + } + }, + "acm" : { + "defaults" : { + "variants" : [ { + "hostname" : "acm.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "acm.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "acm.us-gov-west-1.amazonaws.com" + } + } + }, + "acm-pca" : { + "defaults" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "acm-pca.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "acm-pca.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "acm-pca.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "acm-pca.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "acm-pca.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "api.detective" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "api.detective-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "api.detective-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "api.detective-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "api.detective-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "api.ecr" : { + "defaults" : { + "variants" : [ { + "hostname" : "ecr-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "dkr-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "dkr-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-dkr-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com" + }, + "fips-dkr-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com" + }, + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "api.ecr.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "api.ecr.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "api.sagemaker" : { + "defaults" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "api-fips.sagemaker.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1-fips-secondary" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "api.sagemaker.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1-secondary" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "api.sagemaker.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "api.tunneling.iot" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "apigateway" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "appconfig" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "appconfig.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "appconfig.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "appconfig.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "appconfig.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "appconfigdata" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "application-autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-gov-east-1" : { + "hostname" : "application-autoscaling.us-gov-east-1.amazonaws.com", + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "application-autoscaling.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "deprecated" : true, + "hostname" : "application-autoscaling.us-gov-east-1.amazonaws.com", + "protocols" : [ "http", "https" ] + }, + "us-gov-west-1" : { + "hostname" : "application-autoscaling.us-gov-west-1.amazonaws.com", + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "application-autoscaling.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "deprecated" : true, + "hostname" : "application-autoscaling.us-gov-west-1.amazonaws.com", + "protocols" : [ "http", "https" ] + } + } + }, + "applicationinsights" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "applicationinsights.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "applicationinsights.us-gov-west-1.amazonaws.com" + } + } + }, + "appstream2" : { + "defaults" : { + "credentialScope" : { + "service" : "appstream" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "appstream2-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "appstream2-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "appstream2-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "appstream2-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "appstream2-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "athena" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "athena-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "athena-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "athena-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "athena.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "autoscaling" : { + "defaults" : { + "variants" : [ { + "hostname" : "autoscaling.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "autoscaling-plans" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-gov-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ] + } + } + }, + "backup" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "backup-gateway" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "batch" : { + "defaults" : { + "variants" : [ { + "hostname" : "batch.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "batch.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "batch.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "batch.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "batch.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "cassandra" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "cassandra.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "cassandra.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "cassandra.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cassandra.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "cassandra.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "cassandra.us-gov-west-1.amazonaws.com" + } + } + }, + "cloudcontrolapi" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "clouddirectory" : { + "endpoints" : { + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "clouddirectory.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "clouddirectory.us-gov-west-1.amazonaws.com" + } + } + }, + "cloudformation" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "cloudformation.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "cloudformation.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "cloudformation.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cloudformation.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "cloudformation.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "cloudformation.us-gov-west-1.amazonaws.com" + } + } + }, + "cloudhsm" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "cloudhsmv2" : { + "defaults" : { + "credentialScope" : { + "service" : "cloudhsm" + } + }, + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "cloudtrail" : { + "defaults" : { + "variants" : [ { + "hostname" : "cloudtrail.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "cloudtrail.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "cloudtrail.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "cloudtrail.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "cloudtrail.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "codebuild" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "codebuild-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "codebuild-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "codebuild-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "codebuild-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "codecommit" : { + "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "codecommit-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "codecommit-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "codecommit-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "codedeploy" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "codedeploy-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "codedeploy-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "codedeploy-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "codedeploy-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "codepipeline" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "codepipeline-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "codepipeline-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "cognito-identity" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "cognito-identity-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "cognito-identity-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "cognito-idp" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "comprehend" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "comprehend-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "comprehend-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "comprehendmedical" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "comprehendmedical-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "compute-optimizer" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "compute-optimizer-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "compute-optimizer-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "config" : { + "defaults" : { + "variants" : [ { + "hostname" : "config.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "config.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "config.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "config.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "config.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "connect" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "controltower" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "data-ats.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "data.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "service" : "iotdata" + }, + "deprecated" : true, + "hostname" : "data.iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "data.iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "data.jobs.iot" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "data.jobs.iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "databrew" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "databrew.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "databrew.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "datasync" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "directconnect" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "directconnect.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "directconnect.us-gov-west-1.amazonaws.com" + } + } + }, + "dlm" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "dlm.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "dlm.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "dlm.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "dlm.us-gov-west-1.amazonaws.com" + } + } + }, + "dms" : { + "defaults" : { + "variants" : [ { + "hostname" : "dms.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "dms" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "dms.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "dms-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "dms.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "dms.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "dms.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "dms.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "dms.us-gov-west-1.amazonaws.com" + } + } + }, + "docdb" : { + "endpoints" : { + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + } + } + }, + "ds" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "dynamodb" : { + "defaults" : { + "variants" : [ { + "hostname" : "dynamodb.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "dynamodb.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "dynamodb.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "dynamodb.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "dynamodb.us-gov-west-1.amazonaws.com" + } + } + }, + "ebs" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "ec2" : { + "defaults" : { + "variants" : [ { + "hostname" : "ec2.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ec2.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ec2.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ec2.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "ec2.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "ecs" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "ecs-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ecs-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "ecs-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "ecs-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "eks.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "eks.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "eks.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "eks.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "eks.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticache" : { + "defaults" : { + "variants" : [ { + "hostname" : "elasticache.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "elasticache.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "elasticache.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "elasticache.us-gov-west-1.amazonaws.com" + } + } + }, + "elasticbeanstalk" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "elasticbeanstalk.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "elasticbeanstalk.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "elasticbeanstalk.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "elasticbeanstalk.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "elasticbeanstalk.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "elasticbeanstalk.us-gov-west-1.amazonaws.com" + } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticloadbalancing" : { + "defaults" : { + "variants" : [ { + "hostname" : "elasticloadbalancing.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "elasticloadbalancing.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "elasticloadbalancing.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "elasticloadbalancing.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "elasticloadbalancing.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticmapreduce" : { + "defaults" : { + "variants" : [ { + "hostname" : "elasticmapreduce.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "elasticmapreduce.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "elasticmapreduce.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "elasticmapreduce.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "email" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "email-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "email-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "emr-containers" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "es" : { + "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "es-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "es-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "es-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "es-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "es-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "events" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "events.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "events.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "events.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "events.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "firehose" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "firehose-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "firehose-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "firehose-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "firehose-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "fms" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "fms-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "fms-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "fsx" : { + "endpoints" : { + "fips-prod-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-gov-east-1.amazonaws.com" + }, + "fips-prod-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-gov-west-1.amazonaws.com" + }, + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "fsx-fips.us-gov-west-1.amazonaws.com" + }, + "prod-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "prod-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "fsx-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "fsx-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "fsx-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "glacier" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "glacier.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "glacier.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "glacier.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "glacier.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "glue" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "glue-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "glue-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "glue-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "glue-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "greengrass" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "dataplane-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "greengrass-ats.iot.us-gov-east-1.amazonaws.com" + }, + "dataplane-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "greengrass-ats.iot.us-gov-west-1.amazonaws.com" + }, + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "greengrass.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "greengrass.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "greengrass.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "greengrass.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + }, + "isRegionalized" : true + }, + "guardduty" : { + "defaults" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "guardduty.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "guardduty.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "guardduty.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "guardduty.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "guardduty.us-gov-west-1.amazonaws.com" + } + }, + "isRegionalized" : true + }, + "health" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "health-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "health-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iam" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "iam.us-gov.amazonaws.com", + "variants" : [ { + "hostname" : "iam.us-gov.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "aws-us-gov-global-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "iam.us-gov.amazonaws.com" + }, + "iam-govcloud" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "iam.us-gov.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "iam-govcloud-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "iam.us-gov.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-us-gov-global" + }, + "identitystore" : { + "defaults" : { + "variants" : [ { + "hostname" : "identitystore.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "identitystore.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "identitystore.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "identitystore.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "identitystore.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "ingest.timestream" : { + "endpoints" : { + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "ingest.timestream.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ingest.timestream.us-gov-west-1.amazonaws.com" + } + } + }, + "inspector" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "inspector-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "inspector-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "inspector-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "inspector-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "inspector2" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "internetmonitor" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "hostname" : "internetmonitor.us-gov-east-1.api.aws" + }, + "us-gov-west-1" : { + "hostname" : "internetmonitor.us-gov-west-1.api.aws" + } + } + }, + "iot" : { + "defaults" : { + "credentialScope" : { + "service" : "execute-api" + } + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "service" : "execute-api" + }, + "deprecated" : true, + "hostname" : "iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotevents" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "iotevents-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "iotevents-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "ioteventsdata" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "data.iotevents-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "data.iotevents.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "data.iotevents-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotsecuredtunneling" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iotsitewise" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "iotsitewise-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "iotsitewise-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "iottwinmaker" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "iottwinmaker-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "kafka" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "kendra" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kendra-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "kendra-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "kendra-ranking" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "hostname" : "kendra-ranking.us-gov-east-1.api.aws" + }, + "us-gov-west-1" : { + "hostname" : "kendra-ranking.us-gov-west-1.api.aws" + } + } + }, + "kinesis" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kinesis.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kinesis.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "kinesis.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "kinesis.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "kinesis.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "kinesis.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "kinesisanalytics" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "lakeformation" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "lakeformation-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "lakeformation-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "lakeformation-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "lakeformation-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "lambda" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "lambda-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "lambda-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "lambda-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "lambda.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "lambda-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "lambda.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "license-manager" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "license-manager-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "license-manager-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "license-manager-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "license-manager-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "logs" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "logs.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "logs.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "logs.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "logs.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "managedblockchain" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "mediaconvert" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "mediaconvert.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "mediaconvert.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "meetings-chime" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "metering.marketplace" : { + "defaults" : { + "credentialScope" : { + "service" : "aws-marketplace" + } + }, + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "metrics.sagemaker" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "models.lex" : { + "defaults" : { + "credentialScope" : { + "service" : "lex" + }, + "variants" : [ { + "hostname" : "models-fips.lex.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "models-fips.lex.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "models-fips.lex.us-gov-west-1.amazonaws.com" + } + } + }, + "monitoring" : { + "defaults" : { + "variants" : [ { + "hostname" : "monitoring.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "monitoring.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "monitoring.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "monitoring.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "monitoring.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "mq" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "mq-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "mq-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "mq-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "mq-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "neptune" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "rds.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + } + } + }, + "network-firewall" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "network-firewall-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "network-firewall-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "network-firewall-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "networkmanager" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "networkmanager.us-gov-west-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-us-gov-global" + }, + "oidc" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "oidc.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "oidc.us-gov-west-1.amazonaws.com" + } + } + }, + "organizations" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "organizations.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "organizations.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "organizations.us-gov-west-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-us-gov-global" + }, + "outposts" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "outposts.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "outposts.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "outposts.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "outposts.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "participant.connect" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "participant.connect.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "participant.connect.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "pi" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "pinpoint" : { + "defaults" : { + "credentialScope" : { + "service" : "mobiletargeting" + } + }, + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "pinpoint-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "pinpoint.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "pinpoint-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "polly" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "polly-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "polly-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "portal.sso" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "portal.sso.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "portal.sso.us-gov-west-1.amazonaws.com" + } + } + }, + "quicksight" : { + "endpoints" : { + "api" : { }, + "us-gov-west-1" : { } + } + }, + "ram" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ram.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ram.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "ram.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ram.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "ram.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ram.us-gov-west-1.amazonaws.com" + } + } + }, + "rbin" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "rds" : { + "defaults" : { + "variants" : [ { + "hostname" : "rds.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "rds.us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "rds.us-gov-east-1.amazonaws.com" + }, + "rds.us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "rds.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "rds.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "rds.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + } + } + }, + "redshift" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "redshift.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "redshift.us-gov-west-1.amazonaws.com" + } + } + }, + "rekognition" : { + "endpoints" : { + "rekognition-fips.us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-gov-west-1.amazonaws.com" + }, + "rekognition.us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "rekognition-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "rekognition-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "rekognition-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "resource-explorer-2" : { + "defaults" : { + "dnsSuffix" : "api.aws", + "variants" : [ { + "dnsSuffix" : "api.aws", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "hostname" : "resource-explorer-2.us-gov-east-1.api.aws" + }, + "us-gov-west-1" : { + "hostname" : "resource-explorer-2.us-gov-west-1.api.aws" + } + } + }, + "resource-groups" : { + "defaults" : { + "variants" : [ { + "hostname" : "resource-groups.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "resource-groups.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "resource-groups.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "resource-groups.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "resource-groups.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "robomaker" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "route53.us-gov.amazonaws.com", + "variants" : [ { + "hostname" : "route53.us-gov.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "route53.us-gov.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-us-gov-global" + }, + "route53resolver" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "runtime.lex" : { + "defaults" : { + "credentialScope" : { + "service" : "lex" + }, + "variants" : [ { + "hostname" : "runtime-fips.lex.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "runtime-fips.lex.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "runtime-fips.lex.us-gov-west-1.amazonaws.com" + } + } + }, + "runtime.sagemaker" : { + "defaults" : { + "variants" : [ { + "hostname" : "runtime.sagemaker.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "runtime.sagemaker.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "runtime.sagemaker.us-gov-west-1.amazonaws.com" + } + } + }, + "s3" : { + "defaults" : { + "signatureVersions" : [ "s3", "s3v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}-fips.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "s3-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "s3-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "hostname" : "s3.us-gov-east-1.amazonaws.com", + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "s3-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3.dualstack.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "hostname" : "s3.us-gov-west-1.amazonaws.com", + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "s3-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3.dualstack.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "s3-control" : { + "defaults" : { + "protocols" : [ "https" ], + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}-fips.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "amazonaws.com", + "hostname" : "{service}.dualstack.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "s3-control.us-gov-east-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control.dualstack.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.us-gov-east-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "s3-control.us-gov-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control.dualstack.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.us-gov-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + } + } + }, + "s3-outposts" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "deprecated" : true + }, + "fips-us-gov-west-1" : { + "deprecated" : true + }, + "us-gov-east-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + } + } + }, + "secretsmanager" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "secretsmanager-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "secretsmanager-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "securityhub" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "securityhub-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "securityhub-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "securityhub-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "securityhub-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "serverlessrepo" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-east-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "serverlessrepo.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "serverlessrepo.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "serverlessrepo.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "serverlessrepo.us-gov-west-1.amazonaws.com" + } + } + }, + "servicecatalog" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "servicecatalog-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "servicecatalog-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "servicecatalog-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "servicecatalog-appregistry" : { + "defaults" : { + "variants" : [ { + "hostname" : "servicecatalog-appregistry.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "servicediscovery" : { + "endpoints" : { + "servicediscovery" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "servicediscovery-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "servicediscovery.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "servicequotas" : { + "defaults" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "servicequotas.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "servicequotas.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "servicequotas.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "servicequotas.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "servicequotas.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sms" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "sms-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "sms-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "sms-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "sms-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sms-voice" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "sms-voice-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "snowball" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "snowball-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "snowball-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sns" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "sns.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "sns.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "sns.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "sns.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sqs" : { + "defaults" : { + "variants" : [ { + "hostname" : "sqs.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sqs.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sqs.us-gov-west-1.amazonaws.com", + "protocols" : [ "http", "https" ], + "sslCommonName" : "{region}.queue.{dnsSuffix}" + } + } + }, + "ssm" : { + "defaults" : { + "variants" : [ { + "hostname" : "ssm.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "ssm.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "ssm.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "ssm.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "ssm.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "sso" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sso.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sso.us-gov-west-1.amazonaws.com" + } + } + }, + "states" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "states-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "states.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "states-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "states.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "storagegateway" : { + "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "variants" : [ { + "hostname" : "streams.dynamodb.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb.us-gov-west-1.amazonaws.com" + } + } + }, + "sts" : { + "defaults" : { + "variants" : [ { + "hostname" : "sts.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "sts.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "sts.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "sts.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "sts.us-gov-west-1.amazonaws.com" + } + } + }, + "support" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "support.us-gov-west-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "support.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "support.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + }, + "partitionEndpoint" : "aws-us-gov-global" + }, + "swf" : { + "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "swf.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "swf.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "swf.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "swf.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "swf.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "swf.us-gov-west-1.amazonaws.com" + } + } + }, + "synthetics" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "synthetics-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "synthetics-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "synthetics-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "synthetics-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "tagging" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "textract" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "textract-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "textract-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "textract-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "textract-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "transcribe" : { + "defaults" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "fips.transcribe.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "fips.transcribe.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "fips.transcribe.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "transcribestreaming" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "transfer" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "transfer-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "transfer-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "transfer-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "transfer-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "translate" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "translate-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "translate-fips.us-gov-west-1.amazonaws.com" + } + } + }, + "waf-regional" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "waf-regional-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "waf-regional.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "waf-regional.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "wafv2" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "wafv2.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "wafv2.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "wellarchitected" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "workspaces" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "workspaces-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "workspaces-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "xray" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "xray-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "xray-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "xray-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "xray-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + } + } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "c2s.ic.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "dnsSuffix" : "c2s.ic.gov", + "partition" : "aws-iso", + "partitionName" : "AWS ISO (US)", + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + }, + "services" : { + "api.ecr" : { + "endpoints" : { + "us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "api.ecr.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "hostname" : "api.ecr.us-iso-west-1.c2s.ic.gov" + } + } + }, + "api.sagemaker" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "apigateway" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "appconfig" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "appconfigdata" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "application-autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "autoscaling" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-iso-west-1" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "codedeploy" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "comprehend" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "config" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "datapipeline" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "directconnect" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "dms" : { + "defaults" : { + "variants" : [ { + "hostname" : "dms.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "dms" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "dms.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "dms-fips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "dms.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "dms.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-east-1-fips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "dms.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "dms.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-west-1-fips" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "dms.us-iso-west-1.c2s.ic.gov" + } + } + }, + "ds" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "dynamodb" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-iso-west-1" : { } + } + }, + "ebs" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "ec2" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov" + }, + "fips-us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticloadbalancing" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-iso-west-1" : { } + } + }, + "elasticmapreduce" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "https" ] + }, + "us-iso-west-1" : { } + } + }, + "es" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "events" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "firehose" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "glacier" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-iso-west-1" : { } + } + }, + "glue" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "health" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "iam" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "iam.us-iso-east-1.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, + "kinesis" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-east-1-fips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-west-1-fips" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-iso-west-1.c2s.ic.gov" + } + } + }, + "lambda" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "license-manager" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "logs" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "medialive" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "mediapackage" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "metrics.sagemaker" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "monitoring" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "outposts" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "ram" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "rds" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "route53.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, + "route53resolver" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "runtime.sagemaker" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "s3" : { + "defaults" : { + "signatureVersions" : [ "s3v4" ] + }, + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ] + }, + "us-iso-west-1" : { } + } + }, + "secretsmanager" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "snowball" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "sns" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-iso-west-1" : { } + } + }, + "sqs" : { + "endpoints" : { + "us-iso-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-iso-west-1" : { } + } + }, + "ssm" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "states" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + } + }, + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "sts" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "support" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "support.us-iso-east-1.c2s.ic.gov" + } + }, + "partitionEndpoint" : "aws-iso-global" + }, + "swf" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "synthetics" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, + "tagging" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "transcribe" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "transcribestreaming" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "translate" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, + "workspaces" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + } + } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "sc2s.sgov.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "dnsSuffix" : "sc2s.sgov.gov", + "partition" : "aws-iso-b", + "partitionName" : "AWS ISOB (US)", + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + }, + "services" : { + "api.ecr" : { + "endpoints" : { + "us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "api.ecr.us-isob-east-1.sc2s.sgov.gov" + } + } + }, + "appconfig" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "appconfigdata" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "application-autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "codedeploy" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "config" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "directconnect" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "dlm" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "dms" : { + "defaults" : { + "variants" : [ { + "hostname" : "dms.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "dms" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "variants" : [ { + "hostname" : "dms.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + }, + "dms-fips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "dms.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "dms.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + }, + "us-isob-east-1-fips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "dms.us-isob-east-1.sc2s.sgov.gov" + } + } + }, + "ds" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "dynamodb" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "ebs" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "ec2" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "fips-us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + } + } + }, + "elasticloadbalancing" : { + "endpoints" : { + "us-isob-east-1" : { + "protocols" : [ "https" ] + } + } + }, + "elasticmapreduce" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "es" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "events" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "glacier" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "health" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "iam" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "iam.us-isob-east-1.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, + "kinesis" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "kms-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + }, + "us-isob-east-1-fips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.us-isob-east-1.sc2s.sgov.gov" + } + } + }, + "lambda" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "license-manager" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "logs" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "metering.marketplace" : { + "defaults" : { + "credentialScope" : { + "service" : "aws-marketplace" + } + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "metrics.sagemaker" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "monitoring" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "ram" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "rds" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "resource-groups" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "route53.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, + "route53resolver" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "s3" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "snowball" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "sns" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "sqs" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "sslCommonName" : "{region}.queue.{dnsSuffix}" + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "ssm" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "states" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "sts" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "support" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "support.us-isob-east-1.sc2s.sgov.gov" + } + }, + "partitionEndpoint" : "aws-iso-b-global" + }, + "swf" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "synthetics" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "tagging" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, + "workspaces" : { + "endpoints" : { + "us-isob-east-1" : { } + } + } + } + } ], + "version" : 3 +} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/CHANGELOG.md new file mode 100644 index 0000000..5534fc7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/CHANGELOG.md @@ -0,0 +1,1599 @@ +Unreleased Changes +------------------ + +3.171.0 (2023-03-22) +------------------ + +* Feature - Add support for `AWS_CONTAINER_CREDENTIALS_FULL_URI` and `AWS_CONTAINER_AUTHORIZATION_TOKEN` environment variables to `ECSCredentials`. + +3.170.1 (2023-03-17) +------------------ + +* Issue - Reduce memory usage in H2::Connection when `http_wire_log` is not set. + +3.170.0 (2023-01-25) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.169.0 (2023-01-18) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Issue - Replace runtime endpoint resolution approach with generated ruby code for STS, SSO, and SSOOIDC. + +3.168.4 (2022-12-08) +------------------ + +* Issue - Fix Sign to not sign Sigv2 requests to S3. + +3.168.3 (2022-12-02) +------------------ + +* Issue - Retry S3's `BadDigest` error + +3.168.2 (2022-11-29) +------------------ + +* Issue - Allow region resolution in `AssumeRoleCredentials` from `CredentialProviderChain`. + +3.168.1 (2022-11-18) +------------------ + +* Issue - Fix initialization of SSOTokenProvider when `AWS_PROFILE` is specified. + +3.168.0 (2022-11-17) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.167.0 (2022-11-09) +------------------ + +* Issue - Ensure the stream_thread is not killed before H2 connection status is updated (#2779). + +* Feature - Add token refresh support to `SSOCredentialProvider`. + +3.166.0 (2022-10-26) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.165.1 (2022-10-25) +------------------ + +* Issue - Require the SignatureV4 plugin to fix compatability with older `aws-sdk-s3` versions (#2774). + +3.165.0 (2022-10-25) +------------------ + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Add support for service gems to dynamically determine their own endpoints via modeling. Service gems now generate a plugin called "Endpoints" that defines configuration for EndpointProvider, a new public type, and any client config related to endpoints. Endpoint providers will resolve values using another new public type, Endpoint Parameters, generated for each service. The plugin will use the endpoint provider to resolve an endpoint and then apply it to the request prior to serialization. Endpoint providers can be composed to change endpoint resolution logic, i.e. for testing. In addition to endpoints, the endpoint provider may also override the authentication scheme (auth scheme) which details how the request should be signed for the endpoint. A new "Sign" plugin in core replaces the SignatureV4 plugin that will generically sign any type of auth scheme that a service might have. + +3.164.0 (2022-10-21) +------------------ + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +3.163.0 (2022-10-20) +------------------ + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +3.162.0 (2022-10-19) +------------------ + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +3.161.0 (2022-10-18) +------------------ + +* Feature - Support AwsQueryCompatible trait to read error code from x-amzn-query-error header. + +3.160.0 (2022-10-13) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.159.0 (2022-10-07) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.158.1 (2022-10-06) +------------------ + +* Issue - Ensure that the ReadCallbackIO is always unwrapped (#2761). + +3.158.0 (2022-09-30) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.157.0 (2022-09-29) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.156.0 (2022-09-27) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.155.0 (2022-09-26) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.154.0 (2022-09-23) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.153.0 (2022-09-22) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.152.0 (2022-09-21) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.151.0 (2022-09-20) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.150.0 (2022-09-19) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.149.0 (2022-09-16) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.148.0 (2022-09-15) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.147.0 (2022-09-14) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.146.0 (2022-09-13) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.145.0 (2022-09-12) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.144.0 (2022-09-09) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.143.0 (2022-09-08) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.142.0 (2022-09-07) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.141.0 (2022-09-06) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.140.0 (2022-09-02) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.139.0 (2022-09-01) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.138.0 (2022-08-31) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.137.0 (2022-08-30) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Issue - Fix errors in recursion detection when `_X_AMZN_TRACE_ID` is unset (#2748). + +3.136.0 (2022-08-25) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Updated Aws::SSOOIDC::Client with the latest API changes. + +3.135.0 (2022-08-24) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.134.0 (2022-08-23) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Add support for Bearer Token Authentication and TokenProviders. + +* Issue - Validate that `_X_AMZN_TRACE_ID` ENV value contains only valid, non-control characters. + +3.133.0 (2022-08-22) +------------------ + +* Feature - Moved functionality from `aws-sdk-ssooidc` into core. + +3.132.0 (2022-08-08) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.131.6 (2022-08-03) +------------------ + +* Issue - Fix typo in `RecursionDetection`, change amz to amzn in header and env name. + +3.131.5 (2022-07-28) +------------------ + +* Issue - Fix `to_json` usage in nested hashes by defining `as_json` (#2733). + +3.131.4 (2022-07-27) +------------------ + +* Issue - Fix `to_json` usage on pageable responses when using Rails (#2733). +* Issue - Use `expand_path` on credential/config paths in SharedConfig (#2735). + +3.131.3 (2022-07-18) +------------------ + +* Issue - Add support for serializing shapes on the body with `jsonvalue` members. + +3.131.2 (2022-06-20) +------------------ + +* Issue - Populate context :request_id for XML error responses. + +3.131.1 (2022-05-20) +------------------ + +* Issue - Bump the minimum version of `jmespath` dependency. + +3.131.0 (2022-05-16) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.130.2 (2022-04-22) +------------------ + +* Issue - Don't pass `:before_refresh` to Client constructors in RefreshingCredential implementations (#2690). + +3.130.1 (2022-04-12) +------------------ + +* Issue - Don't call `refresh!` on non-refreshable `Credentials` when retrying errors (#2685). + +3.130.0 (2022-03-11) +------------------ + +* Feature - Asynchronously refresh AWS credentials (#2641). + +* Issue - Add x-amz-region-set to list of headers deleted for re-sign. + +3.129.1 (2022-03-10) +------------------ + +* Issue - Make stubs thread safe by creating new responses for each operation call (#2675). + +3.129.0 (2022-03-08) +------------------ + +* Feature - Add support for cases when `InstanceProfileCredentials` (IMDS) is unable to refresh credentials. + +3.128.1 (2022-03-07) +------------------ + +* Issue - Fixed `Aws::PageableResponse` invalidating Ruby's global constant cache. + +3.128.0 (2022-03-04) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.127.0 (2022-02-24) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Support `HttpChecksum` trait for requests and responses. + +3.126.2 (2022-02-16) +------------------ + +* Issue - Add a before_refresh callback to AssumeRoleCredentials (#2529). +* Issue - Raise a `NoSuchProfileError` when config and credentials files don't exist. + +3.126.1 (2022-02-14) +------------------ + +* Issue - Set `create_time` on IMDS tokens before fetch to reduce chance of using expired tokens and retry failures due to using expired tokens. + +3.126.0 (2022-02-03) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Add support for recursion detection. + +3.125.6 (2022-02-02) +------------------ + +* Issue - Ensure default message for ServiceError is a string (#2643). + +3.125.5 (2022-01-19) +------------------ + +* Issue - Correctly serialize empty header lists. + +3.125.4 (2022-01-18) +------------------ + +* Issue - Add `InternalError` to `ErrorInspector` for S3 errors. + + +3.125.3 (2022-01-12) +------------------ + +* Issue - Add `ExpiredTokenException` to `ErrorInspector` for Kinesis errors. + +3.125.2 (2022-01-10) +------------------ + +* Issue - Correctly serialize lists of strings in headers with quotes and commas. + +3.125.1 (2022-01-04) +------------------ + +* Issue - Parse a response with consecutive spaces correctly when ox is used as the XML parser. + +3.125.0 (2021-12-21) +------------------ + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Add `:defaults_mode` configuration - that determines how certain default configuration options are resolved in the SDK. + +3.124.0 (2021-11-30) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.123.0 (2021-11-23) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.122.1 (2021-11-09) +------------------ + +* Issue - Correctly serialize/deserialize header lists. + +3.122.0 (2021-11-04) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Issue - Fix parsing of ISO8601 timestamps with millisecond precision in headers. + +* Feature - Support modeled dualstack endpoints. It can be configured with shared configuration (`use_dualstack_endpoint`), an ENV variable (`AWS_USE_DUALSTACK_ENDPOINT`), and a constructor option (`:use_dualstack_endpoint`). Requests made to services without a dualstack endpoint will fail. + +* Feature - Support modeled fips endpoints. It can be configured with shared configuration (`use_fips_endpoint`), an ENV variable (`AWS_USE_FIPS_ENDPOINT`), and a constructor option (`:use_fips_endpoint`). Requests made to services without a fips endpoint will fail. + +3.121.6 (2021-11-02) +------------------ + +* Issue - Improve `SSOCredentials` error handling when profile file does not exist (#2605) + +3.121.5 (2021-10-29) +------------------ + +* Issue - bump minimum version of `aws-partitions` (#2603). + +3.121.4 (2021-10-28) +------------------ + +* Issue - This version has been yanked. (#2603). + +* Issue - use the `EndpointProvider` to lookup signing region and name. + +3.121.3 (2021-10-20) +------------------ + +* Issue - Use endpointPrefix when looking up the `signing_region` from the `EndpointProvider`. + +3.121.2 (2021-10-18) +------------------ + +* Issue - Fix an issue where Rest JSON services do not have a `Content-Type` header. + +* Issue - Remove blank `Content-Type` header from Net::HTTP handler, and prevent a default from being set. + +* Issue - Set `Content-Length` only for HTTP methods that take a body. + +3.121.1 (2021-09-24) +------------------ + +* Issue - Fix error in finding union member for boolean shapes with `false` values. + +3.121.0 (2021-09-02) +------------------ + +* Feature - Add support for S3 Multi-region access point configuration. + +3.120.0 (2021-09-01) +------------------ + +* Feature - AWS SDK for Ruby no longer supports Ruby runtime versions 1.9, 2.0, 2.1, and 2.2. + +3.119.1 (2021-08-20) +------------------ + +* Issue - Refactored `Aws::Json::Engine` to remove dead code and replaced usage of `JSON.load` with `JSON.parse`. + +3.119.0 (2021-07-30) +------------------ + +* Feature - Support Document Types. Document types are used to carry open content. A document type value is serialized using the same format as its surroundings and requires no additional encoding or escaping.(#2523) + +3.118.0 (2021-07-28) +------------------ + +* Feature - Add support for Tagged Unions using a "sealed" classes like approach where each union member has a corresponding subclass. + +3.117.0 (2021-07-12) +------------------ + +* Feature - Support IPv6 endpoints for `Aws::InstanceProfileCredentials`. It supports two shared configuration options (`ec2_metadata_service_endpoint` & `ec2_metadata_service_endpoint_mode`), two ENV variables (`AWS_EC2_METADATA_SERVICE_ENDPOINT` & `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE`), and two constructor options (`:endpoint` & `:endpoint_mode`). + +* Feature - Support IPv6 endpoint for `Aws::EC2Metadata` client. It can be configured with `:endpoint` or `:endpoint_mode`. + +3.116.0 (2021-07-07) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.115.0 (2021-06-23) +------------------ + +* Feature - Add support for Assume Role Chaining in profiles. (#2531) +* Issue - Fixed an issue with `Seahorse::Client::H2::Connection` for non-https endpoints. (#2542) + +3.114.3 (2021-06-15) +------------------ + +* Issue - Fixed an issue with `Aws::PageableResponse` where it was modifying original params hash, causing frozen hashes to fail. + +3.114.2 (2021-06-09) +------------------ + +* Issue - Fixed an issue with `Aws::PageableResponse` where intentionally nil tokens were not merged into the params for the next call. + +3.114.1 (2021-06-02) +------------------ + +* Issue - Change XML Builder to not indent by default + +3.114.0 (2021-04-13) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.113.1 (2021-03-29) +------------------ + +* Issue - Ensure end of line characters are correctly encoded in XML. + +3.113.0 (2021-03-10) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +3.112.1 (2021-03-04) +------------------ + +* Issue - Include LICENSE, CHANGELOG, and VERSION files with this gem. + +3.112.0 (2021-02-02) +------------------ + +* Feature - The `hostPrefix` trait will now be applied to any customer provided `:endpoint`. This bug fix is a minor behavioral change for clients using custom endpoints for `s3control`, `iotsitewise`, and `servicediscovery`. This behavior can be disabled by configuring `:disable_host_prefix_injection` to `true`. + +3.111.2 (2021-01-19) +------------------ + +* Issue - Fix a loading issue with SSO and STS gem aliases using `require_relative` instead of `require`. + +3.111.1 (2021-01-15) +------------------ + +* Issue - Fix an issue with `max_attempts` validation raising incorrectly. + +3.111.0 (2021-01-11) +------------------ + +* Feature - Adds an IMDSv2 client as `Aws::EC2Metadata`. + +3.110.0 (2020-12-03) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Issue - Support `aws-sdk-sts` alias gem. + +* Issue - Retry when `Net:HTTPFatalError` is thrown by the `Net::HTTP` library. This can occur when proxy connections are configured. (#2439) + +3.109.3 (2020-11-17) +------------------ + +* Issue - Use full namespace for SSO Client when creating `SSOCredentials` + +3.109.2 (2020-11-04) +------------------ + +* Issue - Check for flattened on ref for lists when serializing. + +3.109.1 (2020-10-05) +------------------ + +* Issue - For errors without a message, default to the error class. (#2388) + +3.109.0 (2020-09-30) +------------------ + +* Feature - Add `Seahorse::Util.host_label?` to check strings for valid RFC-3986 host labels. +* Feature - Add `Aws::ARN#to_h`. + +3.108.0 (2020-09-25) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.107.0 (2020-09-15) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Issue - Fix circular dependency of `aws-sdk-sso` and `aws-sdk-core` (#2405). + +3.106.0 (2020-09-14) +------------------ + +* Feature - Support `AWS_CA_BUNDLE` ENV variable and `ca_bundle` shared configuration options. The `:ssl_ca_bundle` client option will override either of these options. (#1907) + +3.105.0 (2020-08-25) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::SSO::Client with the latest API changes. + +* Feature - Add `SSOCredentials`. Moved functionality from `aws-sdk-sso` into core. + +3.104.4 (2020-08-19) +------------------ + +* Issue - Use Aws::Json for parsing instead of JSON + +3.104.3 (2020-07-23) +------------------ + +* Issue - Revert duplication of params. Ensure code that relied on internal modification of parameters is not broken. + +3.104.2 (2020-07-22) +------------------ + +* Issue - Validate IO like objects support read,rewind and size unless streaming. Fixes #2364 + +3.104.1 (2020-07-20) +------------------ + +* Issue - Duplicate params to ensure user provided params are not modified. Fixes #2366 + +3.104.0 (2020-07-15) +------------------ + +* Feature - Add headers to the `ResponseTarget` callback. A block passed as the response target on a streaming method will be called with the `chunk` and `headers`. +* Feature - Added the `RequestCallback` plugin which allows clients and methods to set `on_chunk_sent` to a `Proc` which will be called as each chunk of the request body is sent. + +3.103.0 (2020-07-01) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.102.1 (2020-06-25) +------------------ + +* Issue - Set the `response_target` on the context when deleting it from the parameters. + +3.102.0 (2020-06-24) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.101.0 (2020-06-23) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Added sensitive params to request and response Types instead of just on a large list. +* Feature - Provide an option `:filter_sensitive_params` for `Aws::Log::Formatter` to allow disabling of the sensitive param filter (#2312, #2105, #2082). + +3.100.0 (2020-06-15) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.99.2 (2020-06-12) +------------------ + +* Issue - Don't retry streaming requests with blocks (#2311) + +3.99.1 (2020-06-11) +------------------ + +* Issue - Republish after incorrect yank. + +3.99.0 (2020-06-10) +------------------ + +* Issue - This version has been yanked. (#2327). +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +* Feature - Support `httpChecksumRequired` on operations that require Content MD5 validation. +* Issue - Validate `:region` as a valid DNS host label. + +3.98.0 (2020-06-05) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.97.1 (2020-06-01) +------------------ + +* Issue - Convert ENV['AWS_MAX_ATTEMPTS'] String value to Integer when set. (#2319) +* Issue - Handle unknown and unmodeled events from event streams by ignoring them and providing a new callback rather than raising an error. + +3.97.0 (2020-05-28) +------------------ +* Feature - Default endpoint_discovery to `true` for services with at least one operation that requires it. +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.96.1 (2020-05-18) +------------------ + +* Issue - Raise `ArgumentError` for XML services when required URI elements are not included. + +3.96.0 (2020-05-15) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.95.0 (2020-05-07) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.94.1 (2020-05-04) +------------------ + +* Issue - When handling errors in XML responses, don't set a new error on the response if one is already set. + +3.94.0 (2020-04-08) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Issue - Update dependency on aws-eventstream + +3.93.0 (2020-04-06) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.92.0 (2020-03-20) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Issue - Change the default of `sts_regional_endpoints` from 'legacy' to 'regional'. + +3.91.1 (2020-03-10) +------------------ + +* Issue - Rescue from `JSON::ParserError` when using `Oj.mimic_JSON`. (#2247) + +3.91.0 (2020-03-09) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. +* Feature - Add `standard` and `adaptive` retry modes. + +3.90.1 (2020-02-14) +------------------ + +* Issue - Perform a case-insensitive comparison when filtering sensitive parameters from logs +* Issue - Add passthrough of region from client to STS when using `assume_role_web_identity_credentials`. + +3.90.0 (2020-02-12) +------------------ + +* Issue - Updated the list of parameters to filter when logging. +* Issue - Parse all values from shared credentials file when using `Aws.shared_config`. +* Issue - Honor explicit profile in client config when credentials from AWS_ environment variables are present. +* Issue - Fixed a bug where `Transfer-Encoding` could never be set to `chunked` in streaming operations because all body objects (`String`, `StringIO`) would respond to `#size`. + +3.89.1 (2020-01-14) +------------------ + +* Issue - Fix erroneously reaped sessions from `Seahorse::Client::NetHttp::ConnectionPool` due to bad `last_used` time calculation +* Issue - Use monotonic clocks when reaping sessions in `Seahorse::Client::NetHttp::ConnectionPool` +* Issue - Fix "Conn close because of keep_alive_timeout" when reusing `Seahorse::Client::NetHttp::ConnectionPool` sessions + +3.89.0 (2020-01-13) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.88.0 (2020-01-10) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.87.0 (2020-01-09) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Issue - Reuse connections even if `http_wire_trace` is true. + +3.86.0 (2019-12-13) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.85.1 (2019-12-11) +------------------ + +* Issue - Change default timeout to 1 and number of retries to 1 for `InstanceProfileCredentials`. + +3.85.0 (2019-12-09) +------------------ + +* Feature - Add STS Presigner module with a method to generate a presigned EKS token. + +* Issue - Fix issue for log formatters in clients where http_response_body does not respond to `rewind` when using a block. + +3.84.0 (2019-12-04) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.83.0 (2019-12-03) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.82.0 (2019-11-25) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.81.0 (2019-11-22) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.80.0 (2019-11-20) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.79.0 (2019-11-19) +------------------ + +* Feature - Support EC2 IMDS updates. + +3.78.0 (2019-11-15) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.77.0 (2019-11-13) +------------------ + +* Feature - Support `s3_us_east_1_regional_endpoint` from `SharedConfig` + +3.76.0 (2019-11-07) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.75.0 (2019-11-06) +------------------ + +* Feature - Remove deprecated `access_key_id`, `secret_access_key`, and `session_token` methods in credential providers. + +3.74.0 (2019-11-05) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.73.0 (2019-11-04) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.72.1 (2019-10-31) +------------------ + +* Issue - Fix `EndpointCache#key?` to be thread safe. + +3.72.0 (2019-10-24) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Issue - Update minimum `aws-partition` gem dependency version + +3.71.0 (2019-10-23) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Support enable STS regional endpoints by `sts_regional_endpoints: 'regional'` + +3.70.0 (2019-10-22) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.69.1 (2019-10-18) +------------------ + +* Issue - Fix method redefinition warnings + +3.69.0 (2019-10-17) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.68.1 (2019-10-02) +------------------ + +* Issue - Add final deprecation warnings to `access_key_id`, `secret_access_key`, and `session_token` in credential providers. + +* Issue - Remove misleading IO documentation from `BlobShape` error output. + +3.68.0 (2019-09-16) +------------------ + +* Feature - Support assuming a role with `:source_profile` from a profile that can be resolved from a `ProcessCredentials` provider. + +3.67.0 (2019-09-09) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.66.0 (2019-09-04) +------------------ + +* Feature - Support CLI AWS_DEFAULT_PROFILE environment variable [Github Issue](https://github.com/aws/aws-sdk-ruby/issues/1452). + +3.65.1 (2019-08-28) +------------------ + +* Issue - Auto refresh credentials for Route53 `ExpiredToken` errors. + +3.65.0 (2019-08-27) +------------------ + +* Feature - Support assuming a role `:source_profile` profile with `AssumeRoleWebIdentityCredentials`. + +3.64.0 (2019-08-20) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.63.0 (2019-08-15) +------------------ + +* Feature - Support passing AssumeRole `duration_seconds` from shared credentials/config file. + +3.62.0 (2019-08-02) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.61.2 (2019-07-29) +------------------ + +* Issue - Add `Aws::STS::InvalidIdentityToken` and `Aws::Errors::NoSuchEndpointError` error for retry. + +3.61.1 (2019-07-25) +------------------ + +* Issue - Fix default STS Client credential sourcing in `Aws::AssumeRoleWebIdentityCredentialsProvider`. + +3.61.0 (2019-07-24) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.60.0 (2019-07-23) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Issue - Handle `EncodingError` when using Oj gem [Github Issue](https://github.com/aws/aws-sdk-ruby/issues/1831) + +3.59.0 (2019-07-03) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.58.0 (2019-07-01) +------------------ + +* Feature - Support `Aws::AssumeRoleWebIdentityCredentials` provider + +3.57.0 (2019-06-28) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.56.0 (2019-06-17) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Support `:client_side_monitoring_host` configuration for CSM + +3.55.0 (2019-06-14) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.54.2 (2019-06-03) +------------------ + +* Issue - Mirgate Proc.new without a block usage #2058. + +3.54.1 (2019-05-30) +------------------ + +* Issue - Improved exception messages in credential providers to exclude detailed parse errors that may contain sensitive information. + +3.54.0 (2019-05-28) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.53.1 (2019-05-22) +------------------ + +* Issue - Support #to_hash for Struct with `:members` member #2053 + +3.53.0 (2019-05-21) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +* Feature - Adding support for modeled exceptions + +3.52.1 (2019-05-15) +------------------ + +* Issue - Handle paginator stubs with expression #2040 + +3.52.0 (2019-05-14) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +* Feature - Support transfer encoding and `requiresLength` trait + +3.51.0 (2019-05-10) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.50.0 (2019-05-06) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.49.0 (2019-04-30) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.48.6 (2019-04-26) +------------------ + +* Issue - Call RefreshingCredentials initialize method in ProcessCredentials to set mutex. + +3.48.5 (2019-04-24) +------------------ + +* Issue - Add PriorRequestNotComplete to throttling errors. + +3.48.4 (2019-04-18) +------------------ + +* Issue - Small memory retention reduction. + +3.48.3 (2019-03-26) +------------------ + +* Issue - event header ":event-type" uses member name instead of shape name + +3.48.2 (2019-03-20) +------------------ + +* Issue - Support signal events after request only [HTTP2] + +3.48.1 (2019-03-19) +------------------ + +* Issue - Clean up unnecessary error output when 'http-2' gem is not present. + +3.48.0 (2019-03-18) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Feature - Fix http-2 Dependency for Old Ruby Versions (Github Issue #1994) + +3.47.0 (2019-03-14) +------------------ + +* Feature - Support HTTP/2 based AWS event stream operations + +3.46.2 (2019-02-19) +------------------ + +* Issue - Update NetHttp Patches per Ruby version (Github Issue: #1979) + +3.46.1 (2019-02-12) +------------------ + +* Issue - Fix the issue that APIG SDK doesn't have regional endpoint related plugins. + +3.46.0 (2019-01-16) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.45.0 (2019-01-11) +------------------ + +* Feature - Improve Query protocol handling of empty responses, to ensure response is an instance of `Aws::EmptyStructure` rather than the class `Aws::EmptyStructure` itself. +* Issue - Plugin updates to support client-side monitoring. + +3.44.2 (2019-01-04) +------------------ + +* Issue - Update to code paths and plugins for future SDK instrumentation and telemetry. + +3.44.1 (2018-12-17) +------------------ + +* Issue - Update sensitive filtering logic to include `#to_s` calls of shapes. + +3.44.0 (2018-12-07) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.43.0 (2018-12-04) +------------------ + +* Feature - Update user agent structure. + +3.42.0 (2018-11-29) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.41.0 (2018-11-28) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.40.0 (2018-11-27) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.39.0 (2018-11-20) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +* Feature - Adding support for endpoint trait (host prefix) per operation, to disable this feature, set `:disable_host_prefix_injection` to `false` for the client. + +3.38.0 (2018-11-12) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Feature - Adding `TransactionInProgressException` for throttling retry + +3.37.0 (2018-11-08) +------------------ + +* Feature - Adding support for endpoint discovery per operation, to enable this feature, set `:endpoint_discovery` to `true` for the client. Note: only available for services with endpoint discovery support. + +3.36.0 (2018-10-30) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.35.0 (2018-10-24) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.34.0 (2018-10-23) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Allow 429 response code to trigger throttle detection + +3.33.0 (2018-10-22) +------------------ + +* Feature - Update to code paths and plugins for future SDK instrumentation and telemetry. + +3.32.0 (2018-10-18) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.31.0 (2018-10-16) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.30.0 (2018-10-04) +------------------ + +* Feature - Adds to code paths and plugins for future SDK instrumentation and telemetry. + +3.29.0 (2018-09-28) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.28.0 (2018-09-25) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.27.1 (2018-09-21) +------------------ + +* Issue - Fixes a bug in the `:response_target` plugin error callback. Under certain circumstances a special body object can be removed before its error callback is triggered, breaking retry logic. + +3.27.0 (2018-09-06) +------------------ + +* Feature - Adds code paths and plugins for future SDK instrumentation and telemetry to aws-sdk-sts. + +3.26.0 (2018-09-05) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Adds code paths and plugins for future SDK instrumentation and telemetry. + +3.25.0 (2018-08-29) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Issue - Add `:exclude_presign` option for #api_requests at client stubbing to allow excluding non-sent request from presigned url (Github Issue #1866) + +3.24.1 (2018-08-13) +------------------ + +* Issue - Update `ca-bundle.crt` file with newer root certificate authorities. + +3.24.0 (2018-08-03) +------------------ + +* Feature - Extensible Credential Providers, allows you to declare an executable to be run that outputs the credentials as a JSON payload allowing you to develop custom credential providers and easily add them to the credential resolution chain, [Docs](https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#sourcing-credentials-from-external-processes) + +3.23.0 (2018-07-31) +------------------ + +* Feature - Add Logged API Requests interface to stubbed clients + +3.22.1 (2018-06-28) +------------------ + +* Issue - Performance enhancement to instance credential providers, to use a more precisely scoped Time parsing method for improved performance. + +3.22.0 (2018-06-26) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Allows you to set custom paths to shared config and shared credential files via the `AWS_CONFIG_FILE` and `AWS_SHARED_CREDENTIALS_FILE` environment variables. + +* Feature - Flexible retry strategies. Provides the `:retry_max_delay`, `:retry_base_delay`, and `:retry_jitter` options, which modify the default backoff strategy without the need to define a full retry lambda from scratch. + +3.21.3 (2018-06-20) +------------------ + +* Issue - Fix to support URI encoded characters in http_proxy + +3.21.2 (2018-05-22) +------------------ + +* Issue - Update `EventEmitter` to `Aws::EventEmitter` [Github Issue](https://github.com/aws/aws-sdk-ruby/issues/1791) + +3.21.1 (2018-05-18) +------------------ + +* Issue - Remove `raw_stream` tracking, [Github Issue](https://github.com/aws/aws-sdk-ruby/issues/1786) + +3.21.0 (2018-05-17) +------------------ + +* Feature - Support `vnd.amazon.event-stream` binary stream protocol over HTTP1.1 + +3.20.2 (2018-04-26) +------------------ + +* Issue - Avoiding Net::HTTP patching for Ruby 2.5 + +3.20.1 (2018-04-24) +------------------ + +* Issue - Fix parsing flattened XML shape from shape reference for S3 https://github.com/aws/aws-sdk-ruby/issues/1764 + +3.20.0 (2018-04-23) +------------------ + +* Feature - Aws::InstanceProfileCredentials - Add sending a User-Agent other than the default User-Agent in Ruby. Adding the User-Agent `aws-sdk-ruby3/` to allow protection against Server Side Request Forgery (SSRF) credential theft vectors by use of a metadata proxy. + +3.19.0 (2018-04-04) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.18.1 (2018-03-29) +------------------ + +* Issue - Fix undefined method `each`/`next` for `Enumerable::Enumerator` when this class exists in the environment + +3.18.0 (2018-03-28) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.17.1 (2018-03-20) +------------------ + +* Issue - Support timestamp shape in querystring + +3.17.0 (2018-02-27) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Issue - Fix Ruby warnings: Shadowed local variables "parts" and "headers" + +3.16.0 (2018-02-20) +------------------ + +* Feature - Aws::InstanceProfileCredentials - When the `AWS_EC2_METADATA_DISABLED` environment variable is present with the value `true` (not case sensitive), the `Aws::InstanceProfileCredentials` credential provider will not be used. + +3.15.0 (2018-02-06) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.14.0 (2018-01-15) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.13.1 (2018-01-12) +------------------ + +* Issue - Fix Ruby 2.5 warnings. + +3.13.0 (2017-12-21) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.12.0 (2017-12-20) +------------------ + +* Feature - Adds support for credential_source when assuming a role via shared configuration. + +* Issue - Update APIGateway SDK user agent pattern + +3.11.0 (2017-11-29) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.10.0 (2017-11-29) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.9.0 (2017-11-20) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.8.0 (2017-11-19) +------------------ + +* Feature - Add support for APIGateway protocol and custom service build. + +3.7.0 (2017-11-07) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.6.1 (2017-11-07) +------------------ + +* Issue - Update empty struct stubbing shape + +3.6.0 (2017-09-20) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.5.0 (2017-09-13) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.4.0 (2017-09-12) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.3.1 (2017-09-11) +------------------ + +* Issue - Fix core util deep copy issue #1603 + +3.3.0 (2017-09-07) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.2.1 (2017-09-06) +------------------ + +* Issue - Remove redundant version file. + +3.2.0 (2017-08-31) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +* Issue - Update `aws-sdk-core` gemspec metadata. + +* Issue - Update `aws-sdk-core` gemspec metadata + +3.1.0 (2017-08-30) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0 (2017-08-29) +------------------ + +3.0.0.rc20 (2017-08-14) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc19 (2017-07-31) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc18 (2017-07-24) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc17 (2017-07-12) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc16 (2017-07-06) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc15 (2017-07-06) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc14 (2017-06-29) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc13 (2017-06-26) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +* Issue - Aws::CredentialProviderChain - Fetching `AWS_PROFILE` environment variable before using `default` profile. + +3.0.0.rc12 (2017-05-23) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Update throttling error pool of retry + +* Feature - Update `User-Agent` format + +3.0.0.rc11 (2017-05-09) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc10 (2017-05-09) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc9 (2017-05-05) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Added support for Ruby 2.4 + +* Issue - Revert 'cgi/util' change that breaks Ruby 2.4 + +3.0.0.rc8 (2017-04-21) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc7 (2017-03-09) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc6 (2017-03-08) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc5 (2017-03-07) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +3.0.0.rc4 (2017-03-07) +------------------ + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc3 (2017-01-24) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc2 (2016-12-09) +------------------ + +* Feature - Updated Aws::STS::Client with the latest API changes. + +* Feature - Updated the list of parameters to filter when logging. + +3.0.0.rc1 (2016-12-05) +------------------ + +* Feature - Initial preview release of the `aws-sdk-core` gem. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/VERSION b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/VERSION new file mode 100644 index 0000000..430def6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/VERSION @@ -0,0 +1 @@ +3.171.0 diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/ca-bundle.crt b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/ca-bundle.crt new file mode 100644 index 0000000..bdba614 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/ca-bundle.crt @@ -0,0 +1,3628 @@ +# C=ES,O=ACCV,OU=PKIACCV,CN=ACCVRAIZ1 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- +# OU=AC RAIZ FNMT-RCM,O=FNMT-RCM,C=ES +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- +# CN=Actalis Authentication Root CA,O=Actalis S.p.A./03358520967,L=Milan,C=IT +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- +# CN=AddTrust External CA Root,OU=AddTrust External TTP Network,O=AddTrust AB,C=SE +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- +# CN=AffirmTrust Commercial,O=AffirmTrust,C=US +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- +# CN=AffirmTrust Networking,O=AffirmTrust,C=US +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- +# CN=AffirmTrust Premium,O=AffirmTrust,C=US +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- +# CN=AffirmTrust Premium ECC,O=AffirmTrust,C=US +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- +# CN=Amazon Root CA 1,O=Amazon,C=US +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- +# CN=Amazon Root CA 2,O=Amazon,C=US +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- +# CN=Amazon Root CA 3,O=Amazon,C=US +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- +# CN=Amazon Root CA 4,O=Amazon,C=US +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- +# C=DE,O=Atos,CN=Atos TrustedRoot 2011 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- +# CN=Autoridad de Certificacion Firmaprofesional CIF A62634068,C=ES +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- +# CN=Baltimore CyberTrust Root,OU=CyberTrust,O=Baltimore,C=IE +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- +# CN=Buypass Class 2 Root CA,O=Buypass AS-983163327,C=NO +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- +# CN=Buypass Class 3 Root CA,O=Buypass AS-983163327,C=NO +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- +# CN=CA Disig Root R1,O=Disig a.s.,L=Bratislava,C=SK +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy +MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk +D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o +OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A +fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe +IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n +oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK +/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj +rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD +3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE +7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC +yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd +qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI +hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA +SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo +HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB +emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC +AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb +7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x +DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk +F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF +a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT +Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- +# CN=CA Disig Root R2,O=Disig a.s.,L=Bratislava,C=SK +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- +# CN=Chambers of Commerce Root,OU=http://www.chambersign.org,O=AC Camerfirma SA CIF A82743287,C=EU +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg +b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa +MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB +ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw +IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B +AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb +unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d +BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq +7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3 +0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX +roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG +A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j +aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p +26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA +BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud +EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN +BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB +AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd +p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi +1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc +XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0 +eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu +tGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- +# CN=Global Chambersign Root,OU=http://www.chambersign.org,O=AC Camerfirma SA CIF A82743287,C=EU +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo +YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9 +MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy +NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G +A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA +A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0 +Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s +QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV +eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795 +B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh +z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T +AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i +ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w +TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH +MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD +VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE +VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B +AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM +bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi +ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG +VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c +ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ +AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- +# CN=Certigna,O=Dhimyotis,C=FR +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- +# CN=Certinomis - Root CA,OU=0002 433998903,O=Certinomis,C=FR +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- +# CN=Class 2 Primary CA,O=Certplus,C=FR +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- +# CN=Certplus Root CA G1,O=Certplus,C=FR +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a +iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt +6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP +0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f +6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE +EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN +1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc +h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT +mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV +4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO +WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud +DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd +Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq +hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh +66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7 +/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS +S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j +2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R +Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr +RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy +6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV +V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5 +g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl +++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo= +-----END CERTIFICATE----- +# CN=Certplus Root CA G2,O=Certplus,C=FR +-----BEGIN CERTIFICATE----- +MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat +93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x +Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj +FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG +SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch +p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal +U5ORGpOucGpnutee5WEaXw== +-----END CERTIFICATE----- +# OU=certSIGN ROOT CA,O=certSIGN,C=RO +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- +# CN=Certum Trusted Network CA,OU=Certum Certification Authority,O=Unizeto Technologies S.A.,C=PL +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- +# CN=Certum Trusted Network CA 2,OU=Certum Certification Authority,O=Unizeto Technologies S.A.,C=PL +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- +# CN=CFCA EV ROOT,O=China Financial Certification Authority,C=CN +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- +# CN=Chambers of Commerce Root - 2008,O=AC Camerfirma S.A.,serialNumber=A82743287,L=Madrid (see current address at www.camerfirma.com/address),C=EU +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- +# CN=AAA Certificate Services,O=Comodo CA Limited,L=Salford,ST=Greater Manchester,C=GB +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- +# CN=COMODO Certification Authority,O=COMODO CA Limited,L=Salford,ST=Greater Manchester,C=GB +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- +# CN=COMODO ECC Certification Authority,O=COMODO CA Limited,L=Salford,ST=Greater Manchester,C=GB +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- +# CN=COMODO RSA Certification Authority,O=COMODO CA Limited,L=Salford,ST=Greater Manchester,C=GB +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- +# CN=Cybertrust Global Root,O=Cybertrust\, Inc +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- +# CN=Deutsche Telekom Root CA 2,OU=T-TeleSec Trust Center,O=Deutsche Telekom AG,C=DE +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- +# CN=DigiCert Assured ID Root CA,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- +# CN=DigiCert Assured ID Root G2,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- +# CN=DigiCert Assured ID Root G3,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- +# CN=DigiCert Global Root CA,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- +# CN=DigiCert Global Root G2,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- +# CN=DigiCert Global Root G3,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- +# CN=DigiCert High Assurance EV Root CA,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- +# CN=DigiCert Trusted Root G4,OU=www.digicert.com,O=DigiCert Inc,C=US +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- +# CN=DST ACES CA X6,OU=DST ACES,O=Digital Signature Trust,C=US +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx +ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w +MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD +VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx +FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu +ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7 +gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH +fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a +ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT +ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk +c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto +dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt +aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI +hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk +QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/ +h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR +rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 +9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= +-----END CERTIFICATE----- +# CN=DST Root CA X3,O=Digital Signature Trust Co. +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- +# CN=D-TRUST Root Class 3 CA 2 2009,O=D-Trust GmbH,C=DE +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- +# CN=D-TRUST Root Class 3 CA 2 EV 2009,O=D-Trust GmbH,C=DE +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- +# CN=EC-ACC,OU=Jerarquia Entitats de Certificacio Catalanes,OU=Vegeu https://www.catcert.net/verarrel (c)03,OU=Serveis Publics de Certificacio,O=Agencia Catalana de Certificacio (NIF Q-0801176-I),C=ES +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB +8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy +dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1 +YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3 +dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh +IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD +LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG +EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g +KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD +ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu +bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg +ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R +85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm +4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV +HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd +QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t +lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB +o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4 +opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo +dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW +ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN +AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y +/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k +SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy +Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS +Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl +nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI= +-----END CERTIFICATE----- +# emailAddress=pki@sk.ee,CN=EE Certification Centre Root CA,O=AS Sertifitseerimiskeskus,C=EE +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- +# CN=Entrust.net Certification Authority (2048),OU=(c) 1999 Entrust.net Limited,OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.),O=Entrust.net +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- +# CN=Entrust Root Certification Authority,OU=(c) 2006 Entrust\, Inc.,OU=www.entrust.net/CPS is incorporated by reference,O=Entrust\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- +# CN=Entrust Root Certification Authority - EC1,OU=(c) 2012 Entrust\, Inc. - for authorized use only,OU=See www.entrust.net/legal-terms,O=Entrust\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- +# CN=Entrust Root Certification Authority - G2,OU=(c) 2009 Entrust\, Inc. - for authorized use only,OU=See www.entrust.net/legal-terms,O=Entrust\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- +# OU=ePKI Root Certification Authority,O=Chunghwa Telecom Co.\, Ltd.,C=TW +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- +# CN=E-Tugra Certification Authority,OU=E-Tugra Sertifikasyon Merkezi,O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş.,L=Ankara,C=TR +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- +# CN=GDCA TrustAUTH R5 ROOT,O=GUANG DONG CERTIFICATE AUTHORITY CO.\,LTD.,C=CN +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- +# CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- +# CN=GeoTrust Primary Certification Authority,O=GeoTrust Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- +# CN=GeoTrust Primary Certification Authority - G2,OU=(c) 2007 GeoTrust Inc. - For authorized use only,O=GeoTrust Inc.,C=US +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- +# CN=GeoTrust Primary Certification Authority - G3,OU=(c) 2008 GeoTrust Inc. - For authorized use only,O=GeoTrust Inc.,C=US +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- +# CN=GeoTrust Universal CA,O=GeoTrust Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- +# CN=GeoTrust Universal CA 2,O=GeoTrust Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- +# CN=Global Chambersign Root - 2008,O=AC Camerfirma S.A.,serialNumber=A82743287,L=Madrid (see current address at www.camerfirma.com/address),C=EU +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- +# CN=GlobalSign,O=GlobalSign,OU=GlobalSign ECC Root CA - R4 +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- +# CN=GlobalSign,O=GlobalSign,OU=GlobalSign ECC Root CA - R5 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- +# CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- +# CN=GlobalSign,O=GlobalSign,OU=GlobalSign Root CA - R2 +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- +# CN=GlobalSign,O=GlobalSign,OU=GlobalSign Root CA - R3 +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- +# OU=Go Daddy Class 2 Certification Authority,O=The Go Daddy Group\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- +# CN=Go Daddy Root Certificate Authority - G2,O=GoDaddy.com\, Inc.,L=Scottsdale,ST=Arizona,C=US +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- +# CN=Hellenic Academic and Research Institutions ECC RootCA 2015,O=Hellenic Academic and Research Institutions Cert. Authority,L=Athens,C=GR +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- +# CN=Hellenic Academic and Research Institutions RootCA 2011,O=Hellenic Academic and Research Institutions Cert. Authority,C=GR +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- +# CN=Hellenic Academic and Research Institutions RootCA 2015,O=Hellenic Academic and Research Institutions Cert. Authority,L=Athens,C=GR +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- +# CN=Hongkong Post Root CA 1,O=Hongkong Post,C=HK +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- +# CN=IdenTrust Commercial Root CA 1,O=IdenTrust,C=US +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- +# CN=IdenTrust Public Sector Root CA 1,O=IdenTrust,C=US +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- +# CN=ISRG Root X1,O=Internet Security Research Group,C=US +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- +# CN=Izenpe.com,O=IZENPE S.A.,C=ES +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- +# CN=LuxTrust Global Root 2,O=LuxTrust S.A.,C=LU +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL +BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV +BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw +MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B +LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F +ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem +hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 +EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn +Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 +zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ +96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m +j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g +DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ +8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j +X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH +hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB +KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 +Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT ++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL +BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 +BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO +jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 +loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c +qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ +2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ +JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre +zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf +LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ +x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 +oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr +-----END CERTIFICATE----- +# emailAddress=info@e-szigno.hu,CN=Microsec e-Szigno Root CA 2009,O=Microsec Ltd.,L=Budapest,C=HU +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- +# CN=NetLock Arany (Class Gold) FőtanÃēsítvÃĄny,OU=TanÃēsítvÃĄnykiadÃŗk (Certification Services),O=NetLock Kft.,L=Budapest,C=HU +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- +# CN=Network Solutions Certificate Authority,O=Network Solutions L.L.C.,C=US +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- +# CN=OISTE WISeKey Global Root GA CA,OU=OISTE Foundation Endorsed,OU=Copyright (c) 2005,O=WISeKey,C=CH +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- +# CN=OISTE WISeKey Global Root GB CA,OU=OISTE Foundation Endorsed,O=WISeKey,C=CH +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- +# CN=OpenTrust Root CA G1,O=OpenTrust,C=FR +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b +wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX +/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0 +77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP +uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx +p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx +Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2 +TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W +G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw +vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY +EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1 +2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw +DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E +PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf +gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS +FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0 +V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P +XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I +i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t +TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91 +09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky +Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ +AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj +1oxx +-----END CERTIFICATE----- +# CN=OpenTrust Root CA G2,O=OpenTrust,C=FR +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh +/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e +CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6 +1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE +FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS +gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X +G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy +YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH +vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4 +t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/ +gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3 +5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w +DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz +Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0 +nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT +RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT +wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2 +t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa +TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2 +o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU +3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA +iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f +WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM +S1IK +-----END CERTIFICATE----- +# CN=OpenTrust Root CA G3,O=OpenTrust,C=FR +-----BEGIN CERTIFICATE----- +MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx +CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U +cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow +QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl +blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm +3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d +oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5 +DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK +BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q +j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx +4nxp5V2a+EEfOzmTk51V6s2N8fvB +-----END CERTIFICATE----- +# CN=PSCProcert,C=VE,O=Sistema Nacional de Certificacion Electronica,OU=Proveedor de Certificados PROCERT,ST=Miranda,L=Chacao,emailAddress=contacto@procert.net.ve +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1 +dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s +YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz +dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 +aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh +IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ +KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw +MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy +b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx +KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG +A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u +aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9 +7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74 +BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G +ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9 +JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0 +PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2 +0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/ +6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m +v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7 +K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev +bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw +MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w +MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD +gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0 +b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh +bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0 +cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp +ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg +ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq +hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD +AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w +MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag +RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t +UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl +cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG +AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN +AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS +1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB +3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv +Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh +HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm +pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz +sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE +qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb +mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9 +opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H +YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- +# CN=QuoVadis Root Certification Authority,OU=Root Certification Authority,O=QuoVadis Limited,C=BM +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- +# CN=QuoVadis Root CA 1 G3,O=QuoVadis Limited,C=BM +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- +# CN=QuoVadis Root CA 2,O=QuoVadis Limited,C=BM +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- +# CN=QuoVadis Root CA 2 G3,O=QuoVadis Limited,C=BM +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- +# CN=QuoVadis Root CA 3,O=QuoVadis Limited,C=BM +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- +# CN=QuoVadis Root CA 3 G3,O=QuoVadis Limited,C=BM +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- +# CN=Secure Global CA,O=SecureTrust Corporation,C=US +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- +# CN=SecureSign RootCA11,O=Japan Certification Services\, Inc.,C=JP +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- +# CN=SecureTrust CA,O=SecureTrust Corporation,C=US +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- +# OU=Security Communication EV RootCA1,O=SECOM Trust Systems CO.\,LTD.,C=JP +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz +MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N +IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11 +bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE +RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO +zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5 +bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF +MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1 +VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC +OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW +tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ +q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb +EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+ +Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O +VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- +# OU=Security Communication RootCA1,O=SECOM Trust.net,C=JP +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- +# OU=Security Communication RootCA2,O=SECOM Trust Systems CO.\,LTD.,C=JP +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- +# CN=Sonera Class2 CA,O=Sonera,C=FI +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- +# CN=Staat der Nederlanden EV Root CA,O=Staat der Nederlanden,C=NL +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- +# CN=Staat der Nederlanden Root CA - G2,O=Staat der Nederlanden,C=NL +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- +# CN=Staat der Nederlanden Root CA - G3,O=Staat der Nederlanden,C=NL +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- +# OU=Starfield Class 2 Certification Authority,O=Starfield Technologies\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +# CN=Starfield Root Certificate Authority - G2,O=Starfield Technologies\, Inc.,L=Scottsdale,ST=Arizona,C=US +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- +# CN=Starfield Services Root Certificate Authority - G2,O=Starfield Technologies\, Inc.,L=Scottsdale,ST=Arizona,C=US +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- +# CN=SwissSign Gold CA - G2,O=SwissSign AG,C=CH +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- +# CN=SwissSign Silver CA - G2,O=SwissSign AG,C=CH +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- +# CN=SZAFIR ROOT CA2,O=Krajowa Izba Rozliczeniowa S.A.,C=PL +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- +# O=Government Root Certification Authority,C=TW +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/ +MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR +IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q +gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy +yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts +F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2 +jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx +ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC +VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK +YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH +EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN +Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud +DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE +MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK +UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf +qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK +ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE +JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7 +hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1 +EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm +nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX +udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz +ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe +LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl +pYYsfPQS +-----END CERTIFICATE----- +# CN=TeliaSonera Root CA v1,O=TeliaSonera +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- +# CN=thawte Primary Root CA,OU=(c) 2006 thawte\, Inc. - For authorized use only,OU=Certification Services Division,O=thawte\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- +# CN=thawte Primary Root CA - G2,OU=(c) 2007 thawte\, Inc. - For authorized use only,O=thawte\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- +# CN=thawte Primary Root CA - G3,OU=(c) 2008 thawte\, Inc. - For authorized use only,OU=Certification Services Division,O=thawte\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- +# CN=TrustCor ECA-1,OU=TrustCor Certificate Authority,O=TrustCor Systems S. de R.L.,L=Panama City,ST=Panama,C=PA +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- +# CN=TrustCor RootCert CA-1,OU=TrustCor Certificate Authority,O=TrustCor Systems S. de R.L.,L=Panama City,ST=Panama,C=PA +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- +# CN=TrustCor RootCert CA-2,OU=TrustCor Certificate Authority,O=TrustCor Systems S. de R.L.,L=Panama City,ST=Panama,C=PA +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- +# OU=Trustis FPS Root CA,O=Trustis Limited,C=GB +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- +# CN=T-TeleSec GlobalRoot Class 2,OU=T-Systems Trust Center,O=T-Systems Enterprise Services GmbH,C=DE +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- +# CN=T-TeleSec GlobalRoot Class 3,OU=T-Systems Trust Center,O=T-Systems Enterprise Services GmbH,C=DE +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- +# CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1,OU=Kamu Sertifikasyon Merkezi - Kamu SM,O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK,L=Gebze - Kocaeli,C=TR +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- +# CN=TÜRKTRUST Elektronik Sertifika Hizmet SağlayÄącÄąsÄą H5,O=TÜRKTRUST Bilgi Ä°letişim ve Bilişim GÃŧvenliği Hizmetleri A.Ş.,L=Ankara,C=TR +-----BEGIN CERTIFICATE----- +MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE +BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn +aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg +QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg +SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0 +MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD +VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 +dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom +/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR +Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3 +4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z +5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0 +hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID +AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX +SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l +VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq +URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf +peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF +Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW ++qtB4Uu2NQvAmxU= +-----END CERTIFICATE----- +# CN=TWCA Global Root CA,OU=Root CA,O=TAIWAN-CA,C=TW +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- +# CN=TWCA Root Certification Authority,OU=Root CA,O=TAIWAN-CA,C=TW +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- +# CN=USERTrust ECC Certification Authority,O=The USERTRUST Network,L=Jersey City,ST=New Jersey,C=US +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- +# CN=USERTrust RSA Certification Authority,O=The USERTRUST Network,L=Jersey City,ST=New Jersey,C=US +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- +# CN=VeriSign Class 3 Public Primary Certification Authority - G3,OU=(c) 1999 VeriSign\, Inc. - For authorized use only,OU=VeriSign Trust Network,O=VeriSign\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- +# CN=VeriSign Class 3 Public Primary Certification Authority - G4,OU=(c) 2007 VeriSign\, Inc. - For authorized use only,OU=VeriSign Trust Network,O=VeriSign\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- +# CN=VeriSign Class 3 Public Primary Certification Authority - G5,OU=(c) 2006 VeriSign\, Inc. - For authorized use only,OU=VeriSign Trust Network,O=VeriSign\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- +# CN=VeriSign Universal Root Certification Authority,OU=(c) 2008 VeriSign\, Inc. - For authorized use only,OU=VeriSign Trust Network,O=VeriSign\, Inc.,C=US +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- +# CN=Visa eCommerce Root,OU=Visa International Service Association,O=VISA,C=US +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- +# CN=XRamp Global Certification Authority,O=XRamp Security Services Inc,OU=www.xrampsecurity.com,C=US +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults.rb new file mode 100644 index 0000000..9405082 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +require_relative 'aws-defaults/default_configuration' \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults/default_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults/default_configuration.rb new file mode 100644 index 0000000..6aaaa78 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults/default_configuration.rb @@ -0,0 +1,153 @@ +# frozen_string_literal: true + +require_relative 'defaults_mode_config_resolver' + +module Aws + + # A defaults mode determines how certain default configuration options are resolved in the SDK. + # + # *Note*: For any mode other than `'legacy'` the vended default values might change as best practices may + # evolve. As a result, it is encouraged to perform testing when upgrading the SDK if you are using a mode other than + # `'legacy'`. While the `'legacy'` defaults mode is specific to Ruby, + # other modes are standardized across all of the AWS SDKs. + # + # The defaults mode can be configured: + # + # * Directly on a client via `:defaults_mode` + # + # * On a configuration profile via the "defaults_mode" profile file property. + # + # * Globally via the "AWS_DEFAULTS_MODE" environment variable. + # + # + # @code_generation START - documentation + # The following `:default_mode` values are supported: + # + # * `'standard'` - + # The STANDARD mode provides the latest recommended default values + # that should be safe to run in most scenarios + # + # Note that the default values vended from this mode might change as + # best practices may evolve. As a result, it is encouraged to perform + # tests when upgrading the SDK + # + # * `'in-region'` - + # The IN\_REGION mode builds on the standard mode and includes + # optimization tailored for applications which call AWS services from + # within the same AWS region + # + # Note that the default values vended from this mode might change as + # best practices may evolve. As a result, it is encouraged to perform + # tests when upgrading the SDK + # + # * `'cross-region'` - + # The CROSS\_REGION mode builds on the standard mode and includes + # optimization tailored for applications which call AWS services in a + # different region + # + # Note that the default values vended from this mode might change as + # best practices may evolve. As a result, it is encouraged to perform + # tests when upgrading the SDK + # + # * `'mobile'` - + # The MOBILE mode builds on the standard mode and includes + # optimization tailored for mobile applications + # + # Note that the default values vended from this mode might change as + # best practices may evolve. As a result, it is encouraged to perform + # tests when upgrading the SDK + # + # * `'auto'` - + # The AUTO mode is an experimental mode that builds on the standard + # mode. The SDK will attempt to discover the execution environment to + # determine the appropriate settings automatically. + # + # Note that the auto detection is heuristics-based and does not + # guarantee 100% accuracy. STANDARD mode will be used if the execution + # environment cannot be determined. The auto detection might query + # [EC2 Instance Metadata service][1], which might introduce latency. + # Therefore we recommend choosing an explicit defaults\_mode instead + # if startup latency is critical to your application + # + # * `'legacy'` - + # The LEGACY mode provides default settings that vary per SDK and were + # used prior to establishment of defaults\_mode + # + # Based on the provided mode, the SDK will vend sensible default values + # tailored to the mode for the following settings: + # + # * `:retry_mode` - + # A retry mode specifies how the SDK attempts retries. See [Retry + # Mode][2] + # + # * `:sts_regional_endpoints` - + # Specifies how the SDK determines the AWS service endpoint that it + # uses to talk to the AWS Security Token Service (AWS STS). See + # [Setting STS Regional endpoints][3] + # + # * `:s3_us_east_1_regional_endpoint` - + # Specifies how the SDK determines the AWS service endpoint that it + # uses to talk to the Amazon S3 for the us-east-1 region + # + # * `:http_open_timeout` - + # The amount of time after making an initial connection attempt on a + # socket, where if the client does not receive a completion of the + # connect handshake, the client gives up and fails the operation + # + # * `:ssl_timeout` - + # The maximum amount of time that a TLS handshake is allowed to take + # from the time the CLIENT HELLO message is sent to ethe time the + # client and server have fully negotiated ciphers and exchanged keys + # + # All options above can be configured by users, and the overridden value will take precedence. + # + # [1]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + # [2]: https://docs.aws.amazon.com/sdkref/latest/guide/setting-global-retry_mode.html + # [3]: https://docs.aws.amazon.com/sdkref/latest/guide/setting-global-sts_regional_endpoints.html + # + # @code_generation END - documentation + module DefaultsModeConfiguration + # @api private + # @code_generation START - configuration + SDK_DEFAULT_CONFIGURATION = + { + "version" => 1, + "base" => { + "retryMode" => "standard", + "stsRegionalEndpoints" => "regional", + "s3UsEast1RegionalEndpoints" => "regional", + "connectTimeoutInMillis" => 1100, + "tlsNegotiationTimeoutInMillis" => 1100 + }, + "modes" => { + "standard" => { + "connectTimeoutInMillis" => { + "override" => 3100 + }, + "tlsNegotiationTimeoutInMillis" => { + "override" => 3100 + } + }, + "in-region" => { + }, + "cross-region" => { + "connectTimeoutInMillis" => { + "override" => 3100 + }, + "tlsNegotiationTimeoutInMillis" => { + "override" => 3100 + } + }, + "mobile" => { + "connectTimeoutInMillis" => { + "override" => 30000 + }, + "tlsNegotiationTimeoutInMillis" => { + "override" => 30000 + } + } + } + } + # @code_generation END - configuration + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults/defaults_mode_config_resolver.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults/defaults_mode_config_resolver.rb new file mode 100644 index 0000000..df868c2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-defaults/defaults_mode_config_resolver.rb @@ -0,0 +1,107 @@ +# frozen_string_literal: true + +module Aws + #@api private + class DefaultsModeConfigResolver + + @@application_region = nil + @@application_region_mutex = Mutex.new + @@imds_client = EC2Metadata.new(retries: 0, http_open_timeout: 0.01) + + # mappings from Ruby SDK configuration names to the + # sdk defaults option names and (optional) scale modifiers + CFG_OPTIONS = { + retry_mode: { name: "retryMode" }, + sts_regional_endpoints: { name: "stsRegionalEndpoints" }, + s3_us_east_1_regional_endpoint: { name: "s3UsEast1RegionalEndpoints" }, + http_open_timeout: { name: "connectTimeoutInMillis", scale: 0.001 }, + http_read_timeout: { name: "timeToFirstByteTimeoutInMillis", scale: 0.001 }, + ssl_timeout: { name: "tlsNegotiationTimeoutInMillis", scale: 0.001 } + }.freeze + + def initialize(sdk_defaults, cfg) + @sdk_defaults = sdk_defaults + @cfg = cfg + @resolved_mode = nil + @mutex = Mutex.new + end + + # option_name should be the symbolized ruby name to resolve + # returns the ruby appropriate value or nil if none are resolved + def resolve(option_name) + return unless (std_option = CFG_OPTIONS[option_name]) + mode = resolved_mode.downcase + + return nil if mode == 'legacy' + + value = resolve_for_mode(std_option[:name], mode) + value = value * std_option[:scale] if value && std_option[:scale] + + value + end + + private + def resolved_mode + @mutex.synchronize do + return @resolved_mode unless @resolved_mode.nil? + + @resolved_mode = @cfg.defaults_mode == 'auto' ? resolve_auto_mode : @cfg.defaults_mode + end + end + + def resolve_auto_mode + return "mobile" if env_mobile? + + region = application_current_region + + if region + @cfg.region == region ? "in-region": "cross-region" + else + # We don't seem to be mobile, and we couldn't determine whether we're running within an AWS region. Fall back to standard. + 'standard' + end + end + + def application_current_region + resolved_region = @@application_region_mutex.synchronize do + return @@application_region unless @@application_region.nil? + + region = nil + if ENV['AWS_EXECUTION_ENV'] + region = ENV['AWS_REGION'] || ENV['AWS_DEFAULT_REGION'] + end + + if region.nil? && ENV['AWS_EC2_METADATA_DISABLED']&.downcase != "true" + begin + region = @@imds_client.get('/latest/meta-data/placement/region') + rescue + # unable to get region, leave it unset + end + end + + # required so that we cache the unknown/nil result + @@application_region = region || :unknown + end + resolved_region == :unknown ? nil : resolved_region + end + + def resolve_for_mode(name, mode) + base_value = @sdk_defaults['base'][name] + mode_value = @sdk_defaults['modes'].fetch(mode, {})[name] + + if mode_value.nil? + return base_value + end + + return mode_value['override'] unless mode_value['override'].nil? + return base_value + mode_value['add'] unless mode_value['add'].nil? + return base_value * mode_value['multiply'] unless mode_value['multiply'].nil? + return base_value + end + + def env_mobile? + false + end + + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core.rb new file mode 100644 index 0000000..28cecc4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core.rb @@ -0,0 +1,202 @@ +# frozen_string_literal: true + +require 'aws-partitions' +require 'seahorse' +require 'jmespath' + +require_relative 'aws-sdk-core/deprecations' + +# credential providers + +require_relative 'aws-sdk-core/credential_provider' +require_relative 'aws-sdk-core/refreshing_credentials' +require_relative 'aws-sdk-core/assume_role_credentials' +require_relative 'aws-sdk-core/assume_role_web_identity_credentials' +require_relative 'aws-sdk-core/credentials' +require_relative 'aws-sdk-core/credential_provider_chain' +require_relative 'aws-sdk-core/ecs_credentials' +require_relative 'aws-sdk-core/instance_profile_credentials' +require_relative 'aws-sdk-core/shared_credentials' +require_relative 'aws-sdk-core/process_credentials' +require_relative 'aws-sdk-core/sso_credentials' + +# tokens and token providers +require_relative 'aws-sdk-core/token' +require_relative 'aws-sdk-core/token_provider' +require_relative 'aws-sdk-core/static_token_provider' +require_relative 'aws-sdk-core/refreshing_token' +require_relative 'aws-sdk-core/sso_token_provider' +require_relative 'aws-sdk-core/token_provider_chain' +require_relative 'aws-sdk-core/plugins/bearer_authorization' + +# client modules + +require_relative 'aws-sdk-core/client_stubs' +require_relative 'aws-sdk-core/async_client_stubs' +require_relative 'aws-sdk-core/eager_loader' +require_relative 'aws-sdk-core/errors' +require_relative 'aws-sdk-core/pageable_response' +require_relative 'aws-sdk-core/pager' +require_relative 'aws-sdk-core/param_converter' +require_relative 'aws-sdk-core/param_validator' +require_relative 'aws-sdk-core/shared_config' +require_relative 'aws-sdk-core/structure' +require_relative 'aws-sdk-core/type_builder' +require_relative 'aws-sdk-core/util' + +# resource classes + +require_relative 'aws-sdk-core/resources/collection' + +# logging + +require_relative 'aws-sdk-core/log/formatter' +require_relative 'aws-sdk-core/log/param_filter' +require_relative 'aws-sdk-core/log/param_formatter' + +# stubbing + +require_relative 'aws-sdk-core/stubbing/empty_stub' +require_relative 'aws-sdk-core/stubbing/data_applicator' +require_relative 'aws-sdk-core/stubbing/stub_data' +require_relative 'aws-sdk-core/stubbing/xml_error' + +# stubbing protocols + +require_relative 'aws-sdk-core/stubbing/protocols/ec2' +require_relative 'aws-sdk-core/stubbing/protocols/json' +require_relative 'aws-sdk-core/stubbing/protocols/query' +require_relative 'aws-sdk-core/stubbing/protocols/rest' +require_relative 'aws-sdk-core/stubbing/protocols/rest_json' +require_relative 'aws-sdk-core/stubbing/protocols/rest_xml' +require_relative 'aws-sdk-core/stubbing/protocols/api_gateway' + +# protocols + +require_relative 'aws-sdk-core/rest' +require_relative 'aws-sdk-core/xml' +require_relative 'aws-sdk-core/json' + +# event stream + +require_relative 'aws-sdk-core/binary' +require_relative 'aws-sdk-core/event_emitter' + +# endpoint discovery + +require_relative 'aws-sdk-core/endpoint_cache' + +# client metrics + +require_relative 'aws-sdk-core/client_side_monitoring/request_metrics' +require_relative 'aws-sdk-core/client_side_monitoring/publisher' + +# utilities + +require_relative 'aws-sdk-core/arn' +require_relative 'aws-sdk-core/arn_parser' +require_relative 'aws-sdk-core/ec2_metadata' + +# dynamic endpoints +require_relative 'aws-sdk-core/endpoints' +require_relative 'aws-sdk-core/plugins/signature_v4' + +# defaults +require_relative 'aws-defaults' + +# plugins +# loaded through building STS or SSO .. + +# aws-sdk-sts is included to support Aws::AssumeRoleCredentials +require_relative 'aws-sdk-sts' + +# aws-sdk-sso is included to support Aws::SSOCredentials +require_relative 'aws-sdk-sso' +require_relative 'aws-sdk-ssooidc' + +module Aws + + CORE_GEM_VERSION = File.read(File.expand_path('../../VERSION', __FILE__)).strip + + @config = {} + + class << self + + # @api private + def shared_config + enabled = ENV["AWS_SDK_CONFIG_OPT_OUT"] ? false : true + @shared_config ||= SharedConfig.new(config_enabled: enabled) + end + + # @return [Hash] Returns a hash of default configuration options shared + # by all constructed clients. + attr_reader :config + + # @param [Hash] config + def config=(config) + if Hash === config + @config = config + else + raise ArgumentError, 'configuration object must be a hash' + end + end + + # @see (Aws::Partitions.partition) + def partition(partition_name) + Aws::Partitions.partition(partition_name) + end + + # @see (Aws::Partitions.partitions) + def partitions + Aws::Partitions.partitions + end + + # The SDK ships with a ca certificate bundle to use when verifying SSL + # peer certificates. By default, this cert bundle is *NOT* used. The + # SDK will rely on the default cert available to OpenSSL. This ensures + # the cert provided by your OS is used. + # + # For cases where the default cert is unavailable, e.g. Windows, you + # can call this method. + # + # Aws.use_bundled_cert! + # + # @return [String] Returns the path to the bundled cert. + def use_bundled_cert! + config.delete(:ssl_ca_directory) + config.delete(:ssl_ca_store) + config[:ssl_ca_bundle] = File.expand_path(File.join( + File.dirname(__FILE__), + '..', + 'ca-bundle.crt' + )) + end + + # Close any long-lived connections maintained by the SDK's internal + # connection pool. + # + # Applications that rely heavily on the `fork()` system call on POSIX systems + # should call this method in the child process directly after fork to ensure + # there are no race conditions between the parent + # process and its children + # for the pooled TCP connections. + # + # Child processes that make multi-threaded calls to the SDK should block on + # this call before beginning work. + # + # @return [nil] + def empty_connection_pools! + Seahorse::Client::NetHttp::ConnectionPool.pools.each do |pool| + pool.empty! + end + end + + # @api private + def eager_autoload!(*args) + msg = 'Aws.eager_autoload is no longer needed, usage of '\ + 'autoload has been replaced with require statements' + warn(msg) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/arn.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/arn.rb new file mode 100644 index 0000000..651a1ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/arn.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module Aws + # Create and provide access to components of Amazon Resource Names (ARN). + # + # You can create an ARN and access it's components like the following: + # + # arn = Aws::ARN.new( + # partition: 'aws', + # service: 's3', + # region: 'us-west-2', + # account_id: '12345678910', + # resource: 'foo/bar' + # ) + # # => # + # + # arn.to_s + # # => "arn:aws:s3:us-west-2:12345678910:foo/bar" + # + # arn.partition + # # => 'aws' + # arn.service + # # => 's3' + # arn.resource + # # => foo/bar + # + # # Note: parser available for parsing resource details + # @see Aws::ARNParser#parse_resource + # + # @see https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns + class ARN + + # @param [Hash] options + # @option options [String] :partition + # @option options [String] :service + # @option options [String] :region + # @option options [String] :account_id + # @option options [String] :resource + def initialize(options = {}) + @partition = options[:partition] + @service = options[:service] + @region = options[:region] + @account_id = options[:account_id] + @resource = options[:resource] + end + + # @return [String] + attr_reader :partition + + # @return [String] + attr_reader :service + + # @return [String] + attr_reader :region + + # @return [String] + attr_reader :account_id + + # @return [String] + attr_reader :resource + + # Validates ARN contains non-empty required components. + # Region and account_id can be optional. + # + # @return [Boolean] + def valid? + !partition.nil? && !partition.empty? && + !service.nil? && !service.empty? && + !resource.nil? && !resource.empty? + end + + # Return the ARN format in string + # + # @return [String] + def to_s + "arn:#{partition}:#{service}:#{region}:#{account_id}:#{resource}" + end + + # Return the ARN as a hash + # + # @return [Hash] + def to_h + { + partition: @partition, + service: @service, + region: @region, + account_id: @account_id, + resource: @resource + } + end + + # Return the ARN as JSON + # + # @return [Hash] + def as_json(_options = nil) + { + 'partition' => @partition, + 'service' => @service, + 'region' => @region, + 'accountId' => @account_id, + 'resource' => @resource + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/arn_parser.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/arn_parser.rb new file mode 100644 index 0000000..a7717e9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/arn_parser.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module Aws + module ARNParser + # Parse a string with an ARN format into an {Aws::ARN} object. + # `InvalidARNError` would be raised when encountering a parsing error or the + # ARN object contains invalid components (nil/empty). + # + # @param [String] arn_str + # + # @return [Aws::ARN] + # @see https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns + def self.parse(arn_str) + parts = arn_str.nil? ? [] : arn_str.split(':', 6) + raise Aws::Errors::InvalidARNError if parts.size < 6 + + # part[0] is "arn" + arn = ARN.new( + partition: parts[1], + service: parts[2], + region: parts[3], + account_id: parts[4], + resource: parts[5] + ) + raise Aws::Errors::InvalidARNError unless arn.valid? + + arn + end + + # Checks whether a String could be a ARN or not. An ARN starts with 'arn:' + # and has at least 6 segments separated by a colon (:). + # + # @param [String] str + # + # @return [Boolean] + def self.arn?(str) + !str.nil? && str.start_with?('arn:') && str.scan(/:/).length >= 5 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/assume_role_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/assume_role_credentials.rb new file mode 100644 index 0000000..7f7ecc0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/assume_role_credentials.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +require 'set' + +module Aws + # An auto-refreshing credential provider that assumes a role via + # {Aws::STS::Client#assume_role}. + # + # role_credentials = Aws::AssumeRoleCredentials.new( + # client: Aws::STS::Client.new(...), + # role_arn: "linked::account::arn", + # role_session_name: "session-name" + # ) + # ec2 = Aws::EC2::Client.new(credentials: role_credentials) + # + # If you omit `:client` option, a new {Aws::STS::Client} object will be + # constructed with additional options that were provided. + # + # @see Aws::STS::Client#assume_role + class AssumeRoleCredentials + + include CredentialProvider + include RefreshingCredentials + + # @option options [required, String] :role_arn + # @option options [required, String] :role_session_name + # @option options [String] :policy + # @option options [Integer] :duration_seconds + # @option options [String] :external_id + # @option options [STS::Client] :client + # @option options [Callable] before_refresh Proc called before + # credentials are refreshed. Useful for updating tokens. + # `before_refresh` is called when AWS credentials are + # required and need to be refreshed. Tokens can be refreshed using + # the following example: + # + # before_refresh = Proc.new do |assume_role_credentials| do + # assume_role_credentials.assume_role_params['token_code'] = update_token + # end + # + def initialize(options = {}) + client_opts = {} + @assume_role_params = {} + options.each_pair do |key, value| + if self.class.assume_role_options.include?(key) + @assume_role_params[key] = value + elsif !CLIENT_EXCLUDE_OPTIONS.include?(key) + client_opts[key] = value + end + end + @client = client_opts[:client] || STS::Client.new(client_opts) + @async_refresh = true + super + end + + # @return [STS::Client] + attr_reader :client + + # @return [Hash] + attr_reader :assume_role_params + + private + + def refresh + c = @client.assume_role(@assume_role_params).credentials + @credentials = Credentials.new( + c.access_key_id, + c.secret_access_key, + c.session_token + ) + @expiration = c.expiration + end + + class << self + + # @api private + def assume_role_options + @aro ||= begin + input = STS::Client.api.operation(:assume_role).input + Set.new(input.shape.member_names) + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/assume_role_web_identity_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/assume_role_web_identity_credentials.rb new file mode 100644 index 0000000..9d9c44c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/assume_role_web_identity_credentials.rb @@ -0,0 +1,108 @@ +# frozen_string_literal: true + +require 'set' +require 'securerandom' +require 'base64' + +module Aws + # An auto-refreshing credential provider that assumes a role via + # {Aws::STS::Client#assume_role_with_web_identity}. + # + # role_credentials = Aws::AssumeRoleWebIdentityCredentials.new( + # client: Aws::STS::Client.new(...), + # role_arn: "linked::account::arn", + # web_identity_token_file: "/path/to/token/file", + # role_session_name: "session-name" + # ... + # ) + # ec2 = Aws::EC2::Client.new(credentials: role_credentials) + # + # If you omit `:client` option, a new {Aws::STS::Client} object will be + # constructed with additional options that were provided. + # + # @see Aws::STS::Client#assume_role_with_web_identity + class AssumeRoleWebIdentityCredentials + + include CredentialProvider + include RefreshingCredentials + + # @param [Hash] options + # @option options [required, String] :role_arn the IAM role + # to be assumed + # + # @option options [required, String] :web_identity_token_file + # absolute path to the file on disk containing OIDC token + # + # @option options [String] :role_session_name the IAM session + # name used to distinguish session, when not provided, base64 + # encoded UUID is generated as the session name + # + # @option options [STS::Client] :client + # + # @option options [Callable] before_refresh Proc called before + # credentials are refreshed. `before_refresh` is called + # with an instance of this object when + # AWS credentials are required and need to be refreshed. + def initialize(options = {}) + client_opts = {} + @assume_role_web_identity_params = {} + @token_file = options.delete(:web_identity_token_file) + @async_refresh = true + options.each_pair do |key, value| + if self.class.assume_role_web_identity_options.include?(key) + @assume_role_web_identity_params[key] = value + elsif !CLIENT_EXCLUDE_OPTIONS.include?(key) + client_opts[key] = value + end + end + + unless @assume_role_web_identity_params[:role_session_name] + # not provided, generate encoded UUID as session name + @assume_role_web_identity_params[:role_session_name] = _session_name + end + @client = client_opts[:client] || STS::Client.new(client_opts.merge(credentials: false)) + super + end + + # @return [STS::Client] + attr_reader :client + + private + + def refresh + # read from token file everytime it refreshes + @assume_role_web_identity_params[:web_identity_token] = _token_from_file(@token_file) + + c = @client.assume_role_with_web_identity( + @assume_role_web_identity_params).credentials + @credentials = Credentials.new( + c.access_key_id, + c.secret_access_key, + c.session_token + ) + @expiration = c.expiration + end + + def _token_from_file(path) + unless path && File.exist?(path) + raise Aws::Errors::MissingWebIdentityTokenFile.new + end + File.read(path) + end + + def _session_name + Base64.strict_encode64(SecureRandom.uuid) + end + + class << self + + # @api private + def assume_role_web_identity_options + @arwio ||= begin + input = Aws::STS::Client.api.operation(:assume_role_with_web_identity).input + Set.new(input.shape.member_names) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/async_client_stubs.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/async_client_stubs.rb new file mode 100644 index 0000000..f27632b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/async_client_stubs.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +module Aws + module AsyncClientStubs + + include Aws::ClientStubs + + # @api private + def setup_stubbing + @stubs = {} + @stub_mutex = Mutex.new + if Hash === @config.stub_responses + @config.stub_responses.each do |operation_name, stubs| + apply_stubs(operation_name, Array === stubs ? stubs : [stubs]) + end + end + + # When a client is stubbed allow the user to access the requests made + @api_requests = [] + + # allow to access signaled events when client is stubbed + @send_events = [] + + requests = @api_requests + send_events = @send_events + + self.handle do |context| + if input_stream = context[:input_event_stream_handler] + stub_stream = StubStream.new + stub_stream.send_events = send_events + input_stream.event_emitter.stream = stub_stream + input_stream.event_emitter.validate_event = context.config.validate_params + end + requests << { + operation_name: context.operation_name, + params: context.params, + context: context + } + @handler.call(context) + end + end + + def send_events + if config.stub_responses + @send_events + else + msg = 'This method is only implemented for stubbed clients, and is '\ + 'available when you enable stubbing in the constructor with `stub_responses: true`' + raise NotImplementedError.new(msg) + end + end + + class StubStream + + def initialize + @state = :open + end + + attr_accessor :send_events + + attr_reader :state + + def data(bytes, options = {}) + if options[:end_stream] + @state = :closed + else + decoder = Aws::EventStream::Decoder.new + event = decoder.decode_chunk(bytes).first + @send_events << decoder.decode_chunk(event.payload.read).first + end + end + + def closed? + @state == :closed + end + + def close + @state = :closed + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary.rb new file mode 100644 index 0000000..ba7e807 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +require_relative 'binary/decode_handler' +require_relative 'binary/encode_handler' +require_relative 'binary/event_stream_decoder' +require_relative 'binary/event_stream_encoder' +require_relative 'binary/event_builder' +require_relative 'binary/event_parser' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/decode_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/decode_handler.rb new file mode 100644 index 0000000..f4bbe50 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/decode_handler.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Aws + module Binary + + # @api private + class DecodeHandler < Seahorse::Client::Handler + + def call(context) + if eventstream_member = eventstream?(context) + attach_eventstream_listeners(context, eventstream_member) + end + @handler.call(context) + end + + private + + def eventstream?(ctx) + ctx.operation.output.shape.members.each do |_, ref| + return ref if ref.eventstream + end + end + + def attach_eventstream_listeners(context, rules) + + context.http_response.on_headers(200) do + protocol = context.config.api.metadata['protocol'] + output_handler = context[:output_event_stream_handler] || context[:event_stream_handler] + context.http_response.body = EventStreamDecoder.new( + protocol, + rules, + context.operation.output, + context.operation.errors, + context.http_response.body, + output_handler) + if input_emitter = context[:input_event_emitter] + # #emit will be blocked until 200 success + # see Aws::EventEmitter#emit + input_emitter.signal_queue << "ready" + end + end + + context.http_response.on_success(200) do + context.http_response.body = context.http_response.body.events + end + + context.http_response.on_error do + # Potential enhancement to made + # since we don't want to track raw bytes in memory + context.http_response.body = StringIO.new + end + + end + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/encode_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/encode_handler.rb new file mode 100644 index 0000000..59bf2e1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/encode_handler.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Aws + module Binary + + # @api private + class EncodeHandler < Seahorse::Client::Handler + + def call(context) + if eventstream_member = eventstream_input?(context) + input_es_handler = context[:input_event_stream_handler] + input_es_handler.event_emitter.encoder = EventStreamEncoder.new( + context.config.api.metadata['protocol'], + eventstream_member, + context.operation.input, + signer_for(context) + ) + context[:input_event_emitter] = input_es_handler.event_emitter + end + @handler.call(context) + end + + private + + def signer_for(context) + # New endpoint/signing logic, use the auth scheme to make a signer + if context[:auth_scheme] + Aws::Plugins::Sign.signer_for(context[:auth_scheme], context.config) + else + # Previous implementation always assumed sigv4_signer from config. + # Relies only on sigv4 signing (and plugin) for event stream services + context.config.sigv4_signer + end + end + + def eventstream_input?(ctx) + ctx.operation.input.shape.members.each do |_, ref| + return ref if ref.eventstream + end + end + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_builder.rb new file mode 100644 index 0000000..d6bbd59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_builder.rb @@ -0,0 +1,124 @@ +# frozen_string_literal: true + +module Aws + module Binary + # @api private + class EventBuilder + + include Seahorse::Model::Shapes + + # @param [Class] serializer_class + # @param [Seahorse::Model::ShapeRef] rules (of eventstream member) + def initialize(serializer_class, rules) + @serializer_class = serializer_class + @rules = rules + end + + def apply(event_type, params) + event_ref = @rules.shape.member(event_type) + _event_stream_message(event_ref, params) + end + + private + + def _event_stream_message(event_ref, params) + es_headers = {} + payload = "" + + es_headers[":message-type"] = Aws::EventStream::HeaderValue.new( + type: "string", value: "event") + es_headers[":event-type"] = Aws::EventStream::HeaderValue.new( + type: "string", value: event_ref.location_name) + + explicit_payload = false + implicit_payload_members = {} + event_ref.shape.members.each do |member_name, member_ref| + unless member_ref.eventheader + if member_ref.eventpayload + explicit_payload = true + else + implicit_payload_members[member_name] = member_ref + end + end + end + + # implict payload + if !explicit_payload && !implicit_payload_members.empty? + if implicit_payload_members.size > 1 + payload_shape = Shapes::StructureShape.new + implicit_payload_members.each do |m_name, m_ref| + payload_shape.add_member(m_name, m_ref) + end + payload_ref = Shapes::ShapeRef.new(shape: payload_shape) + + payload = build_payload_members(payload_ref, params) + else + m_name, m_ref = implicit_payload_members.first + streaming, content_type = _content_type(m_ref.shape) + + es_headers[":content-type"] = Aws::EventStream::HeaderValue.new( + type: "string", value: content_type) + payload = _build_payload(streaming, m_ref, params[m_name]) + end + end + + + event_ref.shape.members.each do |member_name, member_ref| + if member_ref.eventheader && params[member_name] + header_value = params[member_name] + es_headers[member_ref.shape.name] = Aws::EventStream::HeaderValue.new( + type: _header_value_type(member_ref.shape, header_value), + value: header_value + ) + elsif member_ref.eventpayload && params[member_name] + # explicit payload + streaming, content_type = _content_type(member_ref.shape) + + es_headers[":content-type"] = Aws::EventStream::HeaderValue.new( + type: "string", value: content_type) + payload = _build_payload(streaming, member_ref, params[member_name]) + end + end + + Aws::EventStream::Message.new( + headers: es_headers, + payload: StringIO.new(payload) + ) + end + + def _content_type(shape) + case shape + when BlobShape then [true, "application/octet-stream"] + when StringShape then [true, "text/plain"] + when StructureShape then + if @serializer_class.name.include?('Xml') + [false, "text/xml"] + elsif @serializer_class.name.include?('Json') + [false, "application/json"] + end + else + raise Aws::Errors::EventStreamBuilderError.new( + "Unsupport eventpayload shape: #{shape.name}") + end + end + + def _header_value_type(shape, value) + case shape + when StringShape then "string" + when IntegerShape then "integer" + when TimestampShape then "timestamp" + when BlobShape then "bytes" + when BooleanShape then !!value ? "bool_true" : "bool_false" + else + raise Aws::Errors::EventStreamBuilderError.new( + "Unsupported eventheader shape: #{shape.name}") + end + end + + def _build_payload(streaming, ref, value) + streaming ? value : @serializer_class.new(ref).serialize(value) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_parser.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_parser.rb new file mode 100644 index 0000000..1561ca4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_parser.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true + +module Aws + module Binary + # @api private + class EventParser + + include Seahorse::Model::Shapes + + # @param [Class] parser_class + # @param [Seahorse::Model::ShapeRef] rules (of eventstream member) + # @param [Array] error_refs array of errors ShapeRef + # @param [Seahorse::Model::ShapeRef] output_ref + def initialize(parser_class, rules, error_refs, output_ref) + @parser_class = parser_class + @rules = rules + @error_refs = error_refs + @output_ref = output_ref + end + + # Parse raw event message into event struct + # based on its ShapeRef + # + # @return [Struct] Event Struct + def apply(raw_event) + parse(raw_event) + end + + private + + def parse(raw_event) + message_type = raw_event.headers.delete(":message-type") + if message_type + case message_type.value + when 'error' + parse_error_event(raw_event) + when 'event' + parse_event(raw_event) + when 'exception' + parse_exception(raw_event) + else + raise Aws::Errors::EventStreamParserError.new( + 'Unrecognized :message-type value for the event') + end + else + # no :message-type header, regular event by default + parse_event(raw_event) + end + end + + def parse_exception(raw_event) + exception_type = raw_event.headers.delete(":exception-type").value + name, ref = @rules.shape.member_by_location_name(exception_type) + # exception lives in payload implictly + exception = parse_payload(raw_event.payload.read, ref) + exception.event_type = name + exception + end + + def parse_error_event(raw_event) + error_code = raw_event.headers.delete(":error-code") + error_message = raw_event.headers.delete(":error-message") + Aws::Errors::EventError.new( + :error, + error_code ? error_code.value : error_code, + error_message ? error_message.value : error_message + ) + end + + def parse_event(raw_event) + event_type = raw_event.headers.delete(":event-type").value + # content_type = raw_event.headers.delete(":content-type").value + + if event_type == 'initial-response' + event = Struct.new(:event_type, :response).new + event.event_type = :initial_response + event.response = parse_payload(raw_event.payload.read, @output_ref) + return event + end + + # locate event from eventstream + name, ref = @rules.shape.member_by_location_name(event_type) + unless ref && ref.event + return Struct.new(:event_type, :raw_event_type, :raw_event) + .new(:unknown_event, event_type, raw_event) + end + + event = ref.shape.struct_class.new + + explicit_payload = false + implicit_payload_members = {} + ref.shape.members.each do |member_name, member_ref| + unless member_ref.eventheader + if member_ref.eventpayload + explicit_payload = true + else + implicit_payload_members[member_name] = member_ref + end + end + end + + # implicit payload + if !explicit_payload && !implicit_payload_members.empty? + event = parse_payload(raw_event.payload.read, ref) + end + event.event_type = name + + # locate payload and headers in the event + ref.shape.members.each do |member_name, member_ref| + if member_ref.eventheader + # allow incomplete event members in response + if raw_event.headers.key?(member_ref.location_name) + event.send("#{member_name}=", raw_event.headers[member_ref.location_name].value) + end + elsif member_ref.eventpayload + # explicit payload + eventpayload_streaming?(member_ref) ? + event.send("#{member_name}=", raw_event.payload) : + event.send("#{member_name}=", parse_payload(raw_event.payload.read, member_ref)) + end + end + event + end + + def eventpayload_streaming?(ref) + BlobShape === ref.shape || StringShape === ref.shape + end + + def parse_payload(body, rules) + @parser_class.new(rules).parse(body) if body.size > 0 + end + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_stream_decoder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_stream_decoder.rb new file mode 100644 index 0000000..509235b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_stream_decoder.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +require 'aws-eventstream' + +module Aws + module Binary + # @api private + class EventStreamDecoder + + # @param [String] protocol + # @param [ShapeRef] rules ShapeRef of the eventstream member + # @param [ShapeRef] output_ref ShapeRef of output shape + # @param [Array] error_refs array of ShapeRefs for errors + # @param [EventStream|nil] event_stream_handler A Service EventStream object + # that registered with callbacks for processing events when they arrive + def initialize(protocol, rules, output_ref, error_refs, io, event_stream_handler = nil) + @decoder = Aws::EventStream::Decoder.new + @event_parser = EventParser.new(parser_class(protocol), rules, error_refs, output_ref) + @stream_class = extract_stream_class(rules.shape.struct_class) + @emitter = event_stream_handler.event_emitter + @events = [] + end + + # @return [Array] events Array of arrived event objects + attr_reader :events + + def write(chunk) + raw_event, eof = @decoder.decode_chunk(chunk) + emit_event(raw_event) if raw_event + while !eof + # exhaust message_buffer data + raw_event, eof = @decoder.decode_chunk + emit_event(raw_event) if raw_event + end + end + + private + + def emit_event(raw_event) + event = @event_parser.apply(raw_event) + @events << event + @emitter.signal(event.event_type, event) unless @emitter.nil? + end + + def parser_class(protocol) + case protocol + when 'rest-xml' then Aws::Xml::Parser + when 'rest-json' then Aws::Json::Parser + when 'json' then Aws::Json::Parser + else raise "unsupported protocol #{protocol} for event stream" + end + end + + def extract_stream_class(type_class) + parts = type_class.to_s.split('::') + parts.inject(Kernel) do |const, part_name| + part_name == 'Types' ? const.const_get('EventStreams') + : const.const_get(part_name) + end + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_stream_encoder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_stream_encoder.rb new file mode 100644 index 0000000..8b5afbb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/binary/event_stream_encoder.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require 'aws-eventstream' + +module Aws + module Binary + # @api private + class EventStreamEncoder + + # @param [String] protocol + # @param [ShapeRef] rules ShapeRef of the eventstream member + # @param [ShapeRef] input_ref ShapeRef of the input shape + # @param [Aws::Sigv4::Signer] signer + def initialize(protocol, rules, input_ref, signer) + @encoder = Aws::EventStream::Encoder.new + @event_builder = EventBuilder.new(serializer_class(protocol), rules) + @input_ref = input_ref + @rules = rules + @signer = signer + @prior_signature = nil + end + + attr_reader :rules + + attr_accessor :prior_signature + + def encode(event_type, params) + if event_type == :end_stream + payload = '' + else + payload = @encoder.encode(@event_builder.apply(event_type, params)) + end + headers, signature = @signer.sign_event(@prior_signature, payload, @encoder) + @prior_signature = signature + message = Aws::EventStream::Message.new( + headers: headers, + payload: StringIO.new(payload) + ) + @encoder.encode(message) + end + + private + + def serializer_class(protocol) + case protocol + when 'rest-xml' then Xml::Builder + when 'rest-json' then Json::Builder + when 'json' then Json::Builder + else raise "unsupported protocol #{protocol} for event stream" + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_side_monitoring/publisher.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_side_monitoring/publisher.rb new file mode 100644 index 0000000..04bd4ff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_side_monitoring/publisher.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +require 'thread' +require 'socket' + +module Aws + module ClientSideMonitoring + # @api private + class Publisher + attr_reader :agent_port + attr_reader :agent_host + + def initialize(opts = {}) + @agent_host = opts[:agent_host] || "127.0.0.1" + @agent_port = opts[:agent_port] + @mutex = Mutex.new + end + + def agent_port=(value) + @mutex.synchronize do + @agent_port = value + end + end + + def agent_host=(value) + @mutex.synchronize do + @agent_host = value + end + end + + def publish(request_metrics) + send_datagram(request_metrics.api_call.to_json) + request_metrics.api_call_attempts.each do |attempt| + send_datagram(attempt.to_json) + end + end + + def send_datagram(msg) + if @agent_port + socket = UDPSocket.new + begin + socket.connect(@agent_host, @agent_port) + socket.send(msg, 0) + rescue Errno::ECONNREFUSED + # Drop on the floor + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_side_monitoring/request_metrics.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_side_monitoring/request_metrics.rb new file mode 100644 index 0000000..53db907 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_side_monitoring/request_metrics.rb @@ -0,0 +1,199 @@ +# frozen_string_literal: true + +module Aws + module ClientSideMonitoring + # @api private + class RequestMetrics + attr_reader :api_call, :api_call_attempts + + FIELD_MAX_LENGTH = { + "ClientId" => 255, + "UserAgent" => 256, + "SdkException" => 128, + "SdkExceptionMessage" => 512, + "AwsException" => 128, + "AwsExceptionMessage" => 512, + "FinalAwsException" => 128, + "FinalAwsExceptionMessage" => 512, + "FinalSdkException" => 128, + "FinalSdkExceptionMessage" => 512, + } + + def initialize(opts = {}) + @service = opts[:service] + @api = opts[:operation] + @client_id = opts[:client_id] + @timestamp = opts[:timestamp] # In epoch milliseconds + @region = opts[:region] + @version = 1 + @api_call = ApiCall.new(@service, @api, @client_id, @version, @timestamp, @region) + @api_call_attempts = [] + end + + def build_call_attempt(opts = {}) + timestamp = opts[:timestamp] + fqdn = opts[:fqdn] + region = opts[:region] + user_agent = opts[:user_agent] + access_key = opts[:access_key] + session_token = opts[:session_token] + ApiCallAttempt.new( + @service, + @api, + @client_id, + @version, + timestamp, + fqdn, + region, + user_agent, + access_key, + session_token + ) + end + + def add_call_attempt(attempt) + @api_call_attempts << attempt + end + + class ApiCall + attr_reader :service, :api, :client_id, :timestamp, :version, + :attempt_count, :latency, :region, :max_retries_exceeded, + :final_http_status_code, :user_agent, :final_aws_exception, + :final_aws_exception_message, :final_sdk_exception, + :final_sdk_exception_message + + def initialize(service, api, client_id, version, timestamp, region) + @service = service + @api = api + @client_id = client_id + @version = version + @timestamp = timestamp + @region = region + end + + def complete(opts = {}) + @latency = opts[:latency] + @attempt_count = opts[:attempt_count] + @user_agent = opts[:user_agent] + if opts[:final_error_retryable] + @max_retries_exceeded = 1 + else + @max_retries_exceeded = 0 + end + @final_http_status_code = opts[:final_http_status_code] + @final_aws_exception = opts[:final_aws_exception] + @final_aws_exception_message = opts[:final_aws_exception_message] + @final_sdk_exception = opts[:final_sdk_exception] + @final_sdk_exception_message = opts[:final_sdk_exception_message] + @region = opts[:region] if opts[:region] # in case region changes + end + + def to_json(*a) + document = { + "Type" => "ApiCall", + "Service" => @service, + "Api" => @api, + "ClientId" => @client_id, + "Timestamp" => @timestamp, + "Version" => @version, + "AttemptCount" => @attempt_count, + "Latency" => @latency, + "Region" => @region, + "MaxRetriesExceeded" => @max_retries_exceeded, + "UserAgent" => @user_agent, + "FinalHttpStatusCode" => @final_http_status_code, + } + document["FinalSdkException"] = @final_sdk_exception if @final_sdk_exception + document["FinalSdkExceptionMessage"] = @final_sdk_exception_message if @final_sdk_exception_message + document["FinalAwsException"] = @final_aws_exception if @final_aws_exception + document["FinalAwsExceptionMessage"] = @final_aws_exception_message if @final_aws_exception_message + document = _truncate(document) + document.to_json + end + + private + def _truncate(document) + document.each do |key, value| + limit = FIELD_MAX_LENGTH[key] + if limit && value.to_s.length > limit + document[key] = value.to_s.slice(0...limit) + end + end + document + end + end + + class ApiCallAttempt + attr_reader :service, :api, :client_id, :version, :timestamp, + :user_agent, :access_key, :session_token + attr_accessor :region, :fqdn, :request_latency, :http_status_code, + :aws_exception_msg, :x_amz_request_id, :x_amz_id_2, + :x_amzn_request_id, :sdk_exception, :aws_exception, :sdk_exception_msg + + def initialize( + service, + api, + client_id, + version, + timestamp, + fqdn, + region, + user_agent, + access_key, + session_token + ) + @service = service + @api = api + @client_id = client_id + @version = version + @timestamp = timestamp + @fqdn = fqdn + @region = region + @user_agent = user_agent + @access_key = access_key + @session_token = session_token + end + + def to_json(*a) + json = { + "Type" => "ApiCallAttempt", + "Service" => @service, + "Api" => @api, + "ClientId" => @client_id, + "Timestamp" => @timestamp, + "Version" => @version, + "Fqdn" => @fqdn, + "Region" => @region, + "UserAgent" => @user_agent, + "AccessKey" => @access_key + } + # Optional Fields + json["SessionToken"] = @session_token if @session_token + json["HttpStatusCode"] = @http_status_code if @http_status_code + json["AwsException"] = @aws_exception if @aws_exception + json["AwsExceptionMessage"] = @aws_exception_msg if @aws_exception_msg + json["XAmznRequestId"] = @x_amzn_request_id if @x_amzn_request_id + json["XAmzRequestId"] = @x_amz_request_id if @x_amz_request_id + json["XAmzId2"] = @x_amz_id_2 if @x_amz_id_2 + json["AttemptLatency"] = @request_latency if @request_latency + json["SdkException"] = @sdk_exception if @sdk_exception + json["SdkExceptionMessage"] = @sdk_exception_msg if @sdk_exception_msg + json = _truncate(json) + json.to_json + end + + private + def _truncate(document) + document.each do |key, value| + limit = FIELD_MAX_LENGTH[key] + if limit && value.to_s.length > limit + document[key] = value.to_s.slice(0...limit) + end + end + document + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_stubs.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_stubs.rb new file mode 100644 index 0000000..c875909 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/client_stubs.rb @@ -0,0 +1,317 @@ +# frozen_string_literal: true + +require 'thread' + +module Aws + + # This module provides the ability to specify the data and/or errors to + # return when a client is using stubbed responses. Pass + # `:stub_responses => true` to a client constructor to enable this + # behavior. + # + # Also allows you to see the requests made by the client by reading the + # api_requests instance variable + module ClientStubs + + # @api private + def setup_stubbing + @stubs = {} + @stub_mutex = Mutex.new + if Hash === @config.stub_responses + @config.stub_responses.each do |operation_name, stubs| + apply_stubs(operation_name, Array === stubs ? stubs : [stubs]) + end + end + + # When a client is stubbed allow the user to access the requests made + @api_requests = [] + + requests = @api_requests + self.handle do |context| + requests << { + operation_name: context.operation_name, + params: context.params, + context: context + } + @handler.call(context) + end + end + + # Configures what data / errors should be returned from the named operation + # when response stubbing is enabled. + # + # ## Basic usage + # + # When you enable response stubbing, the client will generate fake + # responses and will not make any HTTP requests. + # + # client = Aws::S3::Client.new(stub_responses: true) + # client.list_buckets + # #=> # + # + # You can provide stub data that will be returned by the client. + # + # # stub data in the constructor + # client = Aws::S3::Client.new(stub_responses: { + # list_buckets: { buckets: [{name: 'my-bucket' }] }, + # get_object: { body: 'data' }, + # }) + # + # client.list_buckets.buckets.map(&:name) #=> ['my-bucket'] + # client.get_object(bucket:'name', key:'key').body.read #=> 'data' + # + # You can also specify the stub data using {#stub_responses} + # + # client = Aws::S3::Client.new(stub_responses: true) + # client.stub_responses(:list_buckets, { + # buckets: [{ name: 'my-bucket' }] + # }) + # + # client.list_buckets.buckets.map(&:name) + # #=> ['my-bucket'] + # + # With a Resource class {#stub_responses} on the corresponding client: + # + # s3 = Aws::S3::Resource.new(stub_responses: true) + # s3.client.stub_responses(:list_buckets, { + # buckets: [{ name: 'my-bucket' }] + # }) + # + # s3.buckets.map(&:name) + # #=> ['my-bucket'] + # + # Lastly, default stubs can be configured via `Aws.config`: + # + # Aws.config[:s3] = { + # stub_responses: { + # list_buckets: { buckets: [{name: 'my-bucket' }] } + # } + # } + # + # Aws::S3::Client.new.list_buckets.buckets.map(&:name) + # #=> ['my-bucket'] + # + # Aws::S3::Resource.new.buckets.map(&:name) + # #=> ['my-bucket'] + # + # ## Dynamic Stubbing + # + # In addition to creating static stubs, it's also possible to generate + # stubs dynamically based on the parameters with which operations were + # called, by passing a `Proc` object: + # + # s3 = Aws::S3::Resource.new(stub_responses: true) + # s3.client.stub_responses(:put_object, -> (context) { + # s3.client.stub_responses(:get_object, content_type: context.params[:content_type]) + # }) + # + # The yielded object is an instance of {Seahorse::Client::RequestContext}. + # + # ## Stubbing Errors + # + # When stubbing is enabled, the SDK will default to generate + # fake responses with placeholder values. You can override the data + # returned. You can also specify errors it should raise. + # + # # simulate service errors, give the error code + # client.stub_responses(:get_object, 'NotFound') + # client.get_object(bucket:'aws-sdk', key:'foo') + # #=> raises Aws::S3::Errors::NotFound + # + # # to simulate other errors, give the error class, you must + # # be able to construct an instance with `.new` + # client.stub_responses(:get_object, Timeout::Error) + # client.get_object(bucket:'aws-sdk', key:'foo') + # #=> raises new Timeout::Error + # + # # or you can give an instance of an error class + # client.stub_responses(:get_object, RuntimeError.new('custom message')) + # client.get_object(bucket:'aws-sdk', key:'foo') + # #=> raises the given runtime error object + # + # ## Stubbing HTTP Responses + # + # As an alternative to providing the response data, you can provide + # an HTTP response. + # + # client.stub_responses(:get_object, { + # status_code: 200, + # headers: { 'header-name' => 'header-value' }, + # body: "...", + # }) + # + # To stub a HTTP response, pass a Hash with all three of the following + # keys set: + # + # * **`:status_code`** - - The HTTP status code + # * **`:headers`** - Hash - A hash of HTTP header keys and values + # * **`:body`** - - The HTTP response body. + # + # ## Stubbing Multiple Responses + # + # Calling an operation multiple times will return similar responses. + # You can configure multiple stubs and they will be returned in sequence. + # + # client.stub_responses(:head_object, [ + # 'NotFound', + # { content_length: 150 }, + # ]) + # + # client.head_object(bucket:'aws-sdk', key:'foo') + # #=> raises Aws::S3::Errors::NotFound + # + # resp = client.head_object(bucket:'aws-sdk', key:'foo') + # resp.content_length #=> 150 + # + # @param [Symbol] operation_name + # + # @param [Mixed] stubs One or more responses to return from the named + # operation. + # + # @return [void] + # + # @raise [RuntimeError] Raises a runtime error when called + # on a client that has not enabled response stubbing via + # `:stub_responses => true`. + def stub_responses(operation_name, *stubs) + if config.stub_responses + apply_stubs(operation_name, stubs.flatten) + else + msg = 'stubbing is not enabled; enable stubbing in the constructor '\ + 'with `:stub_responses => true`' + raise msg + end + end + + # Allows you to access all of the requests that the stubbed client has made. + # + # @param [Hash] options The options for the api requests. + # @option options [Boolean] :exclude_presign (false) Set to true to filter + # out unsent requests from generated presigned urls. + # @return [Array] Returns an array of the api requests made. Each request + # object contains the :operation_name, :params, and :context. + # @raise [NotImplementedError] Raises `NotImplementedError` when the client + # is not stubbed. + def api_requests(options = {}) + if config.stub_responses + if options[:exclude_presign] + @api_requests.reject {|req| req[:context][:presigned_url] } + else + @api_requests + end + else + msg = 'This method is only implemented for stubbed clients, and is '\ + 'available when you enable stubbing in the constructor with `stub_responses: true`' + raise NotImplementedError.new(msg) + end + end + + # Generates and returns stubbed response data from the named operation. + # + # s3 = Aws::S3::Client.new + # s3.stub_data(:list_buckets) + # #=> #> + # + # In addition to generating default stubs, you can provide data to + # apply to the response stub. + # + # s3.stub_data(:list_buckets, buckets:[{name:'aws-sdk'}]) + # #=> #], + # owner=#> + # + # @param [Symbol] operation_name + # @param [Hash] data + # @return [Structure] Returns a stubbed response data structure. The + # actual class returned will depend on the given `operation_name`. + def stub_data(operation_name, data = {}) + Stubbing::StubData.new(config.api.operation(operation_name)).stub(data) + end + + # @api private + def next_stub(context) + operation_name = context.operation_name.to_sym + stub = @stub_mutex.synchronize do + stubs = @stubs[operation_name] || [] + case stubs.length + when 0 then default_stub(operation_name) + when 1 then stubs.first + else stubs.shift + end + end + Proc === stub ? convert_stub(operation_name, stub.call(context)) : stub + end + + private + + def default_stub(operation_name) + stub = stub_data(operation_name) + http_response_stub(operation_name, stub) + end + + # This method converts the given stub data and converts it to a + # HTTP response (when possible). This enables the response stubbing + # plugin to provide a HTTP response that triggers all normal events + # during response handling. + def apply_stubs(operation_name, stubs) + @stub_mutex.synchronize do + @stubs[operation_name.to_sym] = stubs.map do |stub| + convert_stub(operation_name, stub) + end + end + end + + def convert_stub(operation_name, stub) + stub = case stub + when Proc then stub + when Exception, Class then { error: stub } + when String then service_error_stub(stub) + when Hash then http_response_stub(operation_name, stub) + else { data: stub } + end + if Hash === stub + stub[:mutex] = Mutex.new + end + stub + end + + def service_error_stub(error_code) + { http: protocol_helper.stub_error(error_code) } + end + + def http_response_stub(operation_name, data) + if Hash === data && data.keys.sort == [:body, :headers, :status_code] + { http: hash_to_http_resp(data) } + else + { http: data_to_http_resp(operation_name, data) } + end + end + + def hash_to_http_resp(data) + http_resp = Seahorse::Client::Http::Response.new + http_resp.status_code = data[:status_code] + http_resp.headers.update(data[:headers]) + http_resp.body = data[:body] + http_resp + end + + def data_to_http_resp(operation_name, data) + api = config.api + operation = api.operation(operation_name) + ParamValidator.new(operation.output, input: false).validate!(data) + protocol_helper.stub_data(api, operation, data) + end + + def protocol_helper + case config.api.metadata['protocol'] + when 'json' then Stubbing::Protocols::Json + when 'query' then Stubbing::Protocols::Query + when 'ec2' then Stubbing::Protocols::EC2 + when 'rest-json' then Stubbing::Protocols::RestJson + when 'rest-xml' then Stubbing::Protocols::RestXml + when 'api-gateway' then Stubbing::Protocols::ApiGateway + else raise "unsupported protocol" + end.new + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credential_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credential_provider.rb new file mode 100644 index 0000000..1c8d30a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credential_provider.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module Aws + module CredentialProvider + + # @return [Credentials] + attr_reader :credentials + + # @return [Boolean] + def set? + !!credentials && credentials.set? + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credential_provider_chain.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credential_provider_chain.rb new file mode 100644 index 0000000..7574659 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credential_provider_chain.rb @@ -0,0 +1,183 @@ +# frozen_string_literal: true + +module Aws + # @api private + class CredentialProviderChain + def initialize(config = nil) + @config = config + end + + # @return [CredentialProvider, nil] + def resolve + providers.each do |method_name, options| + provider = send(method_name, options.merge(config: @config)) + return provider if provider && provider.set? + end + nil + end + + private + + def providers + [ + [:static_credentials, {}], + [:static_profile_assume_role_web_identity_credentials, {}], + [:static_profile_sso_credentials, {}], + [:static_profile_assume_role_credentials, {}], + [:static_profile_credentials, {}], + [:static_profile_process_credentials, {}], + [:env_credentials, {}], + [:assume_role_web_identity_credentials, {}], + [:sso_credentials, {}], + [:assume_role_credentials, {}], + [:shared_credentials, {}], + [:process_credentials, {}], + [:instance_profile_credentials, { + retries: @config ? @config.instance_profile_credentials_retries : 0, + http_open_timeout: @config ? @config.instance_profile_credentials_timeout : 1, + http_read_timeout: @config ? @config.instance_profile_credentials_timeout : 1 + }] + ] + end + + def static_credentials(options) + if options[:config] + Credentials.new( + options[:config].access_key_id, + options[:config].secret_access_key, + options[:config].session_token + ) + end + end + + def static_profile_assume_role_web_identity_credentials(options) + if Aws.shared_config.config_enabled? && options[:config] && options[:config].profile + Aws.shared_config.assume_role_web_identity_credentials_from_config( + profile: options[:config].profile, + region: options[:config].region + ) + end + end + + def static_profile_sso_credentials(options) + if Aws.shared_config.config_enabled? && options[:config] && options[:config].profile + Aws.shared_config.sso_credentials_from_config( + profile: options[:config].profile + ) + end + end + + def static_profile_assume_role_credentials(options) + if Aws.shared_config.config_enabled? && options[:config] && options[:config].profile + assume_role_with_profile(options, options[:config].profile) + end + end + + def static_profile_credentials(options) + if options[:config] && options[:config].profile + SharedCredentials.new(profile_name: options[:config].profile) + end + rescue Errors::NoSuchProfileError + nil + end + + def static_profile_process_credentials(options) + if Aws.shared_config.config_enabled? && options[:config] && options[:config].profile + process_provider = Aws.shared_config.credential_process(profile: options[:config].profile) + ProcessCredentials.new(process_provider) if process_provider + end + rescue Errors::NoSuchProfileError + nil + end + + def env_credentials(_options) + key = %w[AWS_ACCESS_KEY_ID AMAZON_ACCESS_KEY_ID AWS_ACCESS_KEY] + secret = %w[AWS_SECRET_ACCESS_KEY AMAZON_SECRET_ACCESS_KEY AWS_SECRET_KEY] + token = %w[AWS_SESSION_TOKEN AMAZON_SESSION_TOKEN] + Credentials.new(envar(key), envar(secret), envar(token)) + end + + def envar(keys) + keys.each do |key| + return ENV[key] if ENV.key?(key) + end + nil + end + + def determine_profile_name(options) + (options[:config] && options[:config].profile) || ENV['AWS_PROFILE'] || ENV['AWS_DEFAULT_PROFILE'] || 'default' + end + + def shared_credentials(options) + profile_name = determine_profile_name(options) + SharedCredentials.new(profile_name: profile_name) + rescue Errors::NoSuchProfileError + nil + end + + def process_credentials(options) + profile_name = determine_profile_name(options) + if Aws.shared_config.config_enabled? && + (process_provider = Aws.shared_config.credential_process(profile: profile_name)) + ProcessCredentials.new(process_provider) + end + rescue Errors::NoSuchProfileError + nil + end + + def sso_credentials(options) + profile_name = determine_profile_name(options) + if Aws.shared_config.config_enabled? + Aws.shared_config.sso_credentials_from_config(profile: profile_name) + end + rescue Errors::NoSuchProfileError + nil + end + + def assume_role_credentials(options) + if Aws.shared_config.config_enabled? + assume_role_with_profile(options, determine_profile_name(options)) + end + end + + def assume_role_web_identity_credentials(options) + region = options[:config].region if options[:config] + if (role_arn = ENV['AWS_ROLE_ARN']) && (token_file = ENV['AWS_WEB_IDENTITY_TOKEN_FILE']) + cfg = { + role_arn: role_arn, + web_identity_token_file: token_file, + role_session_name: ENV['AWS_ROLE_SESSION_NAME'] + } + cfg[:region] = region if region + AssumeRoleWebIdentityCredentials.new(cfg) + elsif Aws.shared_config.config_enabled? + profile = options[:config].profile if options[:config] + Aws.shared_config.assume_role_web_identity_credentials_from_config( + profile: profile, + region: region + ) + end + end + + def instance_profile_credentials(options) + profile_name = determine_profile_name(options) + if ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] || + ENV['AWS_CONTAINER_CREDENTIALS_FULL_URI'] + ECSCredentials.new(options) + else + InstanceProfileCredentials.new(options.merge(profile: profile_name)) + end + end + + def assume_role_with_profile(options, profile_name) + assume_opts = { + profile: profile_name, + chain_config: @config + } + if options[:config] && options[:config].region + assume_opts[:region] = options[:config].region + end + Aws.shared_config.assume_role_credentials_from_config(assume_opts) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credentials.rb new file mode 100644 index 0000000..964074f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/credentials.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Aws + class Credentials + + # @param [String] access_key_id + # @param [String] secret_access_key + # @param [String] session_token (nil) + def initialize(access_key_id, secret_access_key, session_token = nil) + @access_key_id = access_key_id + @secret_access_key = secret_access_key + @session_token = session_token + end + + # @return [String, nil] + attr_reader :access_key_id + + # @return [String, nil] + attr_reader :secret_access_key + + # @return [String, nil] + attr_reader :session_token + + # @return [Credentials] + def credentials + self + end + + # @return [Boolean] Returns `true` if the access key id and secret + # access key are both set. + def set? + !access_key_id.nil? && + !access_key_id.empty? && + !secret_access_key.nil? && + !secret_access_key.empty? + end + + # Removing the secret access key from the default inspect string. + # @api private + def inspect + "#<#{self.class.name} access_key_id=#{access_key_id.inspect}>" + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/deprecations.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/deprecations.rb new file mode 100644 index 0000000..167d403 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/deprecations.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module Aws + + # A utility module that provides a class method that wraps + # a method such that it generates a deprecation warning when called. + # Given the following class: + # + # class Example + # + # def do_something + # end + # + # end + # + # If you want to deprecate the `#do_something` method, you can extend + # this module and then call `deprecated` on the method (after it + # has been defined). + # + # class Example + # + # extend Aws::Deprecations + # + # def do_something + # end + # + # def do_something_else + # end + # + # deprecated :do_something + # + # end + # + # The `#do_something` method will continue to function, but will + # generate a deprecation warning when called. + # + # @api private + module Deprecations + + # @param [Symbol] method The name of the deprecated method. + # + # @option options [String] :message The warning message to issue + # when the deprecated method is called. + # + # @option options [String] :use The name of a method that should be used. + # + # @option options [String] :version The version that will remove the + # deprecated method. + # + def deprecated(method, options = {}) + + deprecation_msg = options[:message] || begin + "#################### DEPRECATION WARNING ####################\n"\ + "Called deprecated method `#{method}` of #{self}."\ + "#{" Use `#{options[:use]}` instead.\n" if options[:use]}"\ + "#{"Method `#{method}` will be removed in #{options[:version]}."\ + if options[:version]}"\ + "\n#############################################################" + end + + alias_method(:"deprecated_#{method}", method) + + warned = false # we only want to issue this warning once + + define_method(method) do |*args, &block| + unless warned + warned = true + warn(deprecation_msg + "\n" + caller.join("\n")) + end + send("deprecated_#{method}", *args, &block) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/eager_loader.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/eager_loader.rb new file mode 100644 index 0000000..df7508d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/eager_loader.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +require 'set' + +module Aws + # @api private + class EagerLoader + + def initialize + @loaded = Set.new + end + + # @return [Set] + attr_reader :loaded + + # @param [Module] klass_or_module + # @return [self] + def load(klass_or_module) + @loaded << klass_or_module + klass_or_module.constants.each do |const_name| + path = klass_or_module.autoload?(const_name) + begin + require(path) if path + const = klass_or_module.const_get(const_name) + self.load(const) if Module === const && !@loaded.include?(const) + rescue LoadError + end + end + self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ec2_metadata.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ec2_metadata.rb new file mode 100644 index 0000000..8f4a6aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ec2_metadata.rb @@ -0,0 +1,238 @@ +# frozen_string_literal: true + +require 'time' +require 'net/http' + +module Aws + # A client that can query version 2 of the EC2 Instance Metadata + class EC2Metadata + # Path for PUT request for token + # @api private + METADATA_TOKEN_PATH = '/latest/api/token'.freeze + + # Raised when the PUT request is not valid. This would be thrown if + # `token_ttl` is not an Integer. + # @api private + class TokenRetrievalError < RuntimeError; end + + # Token has expired, and the request can be retried with a new token. + # @api private + class TokenExpiredError < RuntimeError; end + + # The requested metadata path does not exist. + # @api private + class MetadataNotFoundError < RuntimeError; end + + # The request is not allowed or IMDS is turned off. + # @api private + class RequestForbiddenError < RuntimeError; end + + # Creates a client that can query version 2 of the EC2 Instance Metadata + # service (IMDS). + # + # @note Customers using containers may need to increase their hop limit + # to access IMDSv2. + # @see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html#instance-metadata-transition-to-version-2 + # + # @param [Hash] options + # @option options [Integer] :token_ttl (21600) The session token's TTL, + # defaulting to 6 hours. + # @option options [Integer] :retries (3) The number of retries for failed + # requests. + # @option options [String] :endpoint ('http://169.254.169.254') The IMDS + # endpoint. This option has precedence over the :endpoint_mode. + # @option options [String] :endpoint_mode ('IPv4') The endpoint mode for + # the instance metadata service. This is either 'IPv4' + # ('http://169.254.169.254') or 'IPv6' ('http://[fd00:ec2::254]'). + # @option options [Integer] :port (80) The IMDS endpoint port. + # @option options [Integer] :http_open_timeout (1) The number of seconds to + # wait for the connection to open. + # @option options [Integer] :http_read_timeout (1) The number of seconds for + # one chunk of data to be read. + # @option options [IO] :http_debug_output An output stream for debugging. Do + # not use this in production. + # @option options [Integer,Proc] :backoff A backoff used for retryable + # requests. When given an Integer, it sleeps that amount. When given a + # Proc, it is called with the current number of failed retries. + def initialize(options = {}) + @token_ttl = options[:token_ttl] || 21_600 + @retries = options[:retries] || 3 + @backoff = backoff(options[:backoff]) + + endpoint_mode = options[:endpoint_mode] || 'IPv4' + @endpoint = resolve_endpoint(options[:endpoint], endpoint_mode) + @port = options[:port] || 80 + + @http_open_timeout = options[:http_open_timeout] || 1 + @http_read_timeout = options[:http_read_timeout] || 1 + @http_debug_output = options[:http_debug_output] + + @token = nil + @mutex = Mutex.new + end + + # Fetches a given metadata category using a String path, and returns the + # result as a String. A path starts with the API version (usually + # "/latest/"). See the instance data categories for possible paths. + # + # @example Fetching the instance ID + # + # ec2_metadata = Aws::EC2Metadata.new + # ec2_metadata.get('/latest/meta-data/instance-id') + # => "i-023a25f10a73a0f79" + # + # @note This implementation always returns a String and will not parse any + # responses. Parsable responses may include JSON objects or directory + # listings, which are strings separated by line feeds (ASCII 10). + # + # @example Fetching and parsing JSON meta-data + # + # require 'json' + # data = ec2_metadata.get('/latest/dynamic/instance-identity/document') + # JSON.parse(data) + # => {"accountId"=>"012345678912", ... } + # + # @example Fetching and parsing directory listings + # + # listing = ec2_metadata.get('/latest/meta-data') + # listing.split(10.chr) + # => ["ami-id", "ami-launch-index", ...] + # + # @note Unlike other services, IMDS does not have a service API model. This + # means that we cannot confidently generate code with methods and + # response structures. This implementation ensures that new IMDS features + # are always supported by being deployed to the instance and does not + # require code changes. + # + # @see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html + # @see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html + # @param [String] path The full path to the metadata. + def get(path) + retry_errors(max_retries: @retries) do + @mutex.synchronize do + fetch_token unless @token && !@token.expired? + end + + open_connection do |conn| + http_get(conn, path, @token.value) + end + end + end + + private + + def resolve_endpoint(endpoint, endpoint_mode) + return endpoint if endpoint + + case endpoint_mode.downcase + when 'ipv4' then 'http://169.254.169.254' + when 'ipv6' then 'http://[fd00:ec2::254]' + else + raise ArgumentError, + ':endpoint_mode is not valid, expected IPv4 or IPv6, '\ + "got: #{endpoint_mode}" + end + end + + def fetch_token + open_connection do |conn| + created_time = Time.now + token_value, token_ttl = http_put(conn, @token_ttl) + @token = Token.new(value: token_value, ttl: token_ttl, created_time: created_time) + end + end + + def http_get(connection, path, token) + headers = { + 'User-Agent' => "aws-sdk-ruby3/#{CORE_GEM_VERSION}", + 'x-aws-ec2-metadata-token' => token + } + request = Net::HTTP::Get.new(path, headers) + response = connection.request(request) + + case response.code.to_i + when 200 + response.body + when 401 + raise TokenExpiredError + when 404 + raise MetadataNotFoundError + end + end + + def http_put(connection, ttl) + headers = { + 'User-Agent' => "aws-sdk-ruby3/#{CORE_GEM_VERSION}", + 'x-aws-ec2-metadata-token-ttl-seconds' => ttl.to_s + } + request = Net::HTTP::Put.new(METADATA_TOKEN_PATH, headers) + response = connection.request(request) + + case response.code.to_i + when 200 + [ + response.body, + response.header['x-aws-ec2-metadata-token-ttl-seconds'].to_i + ] + when 400 + raise TokenRetrievalError + when 403 + raise RequestForbiddenError + end + end + + def open_connection + uri = URI.parse(@endpoint) + http = Net::HTTP.new(uri.hostname || @endpoint, @port || uri.port) + http.open_timeout = @http_open_timeout + http.read_timeout = @http_read_timeout + http.set_debug_output(@http_debug_output) if @http_debug_output + http.start + yield(http).tap { http.finish } + end + + def retry_errors(options = {}, &_block) + max_retries = options[:max_retries] + retries = 0 + begin + yield + # These errors should not be retried. + rescue TokenRetrievalError, MetadataNotFoundError, RequestForbiddenError + raise + # StandardError is not ideal but it covers Net::HTTP errors. + # https://gist.github.com/tenderlove/245188 + rescue StandardError, TokenExpiredError + raise unless retries < max_retries + + @backoff.call(retries) + retries += 1 + retry + end + end + + def backoff(backoff) + case backoff + when Proc then backoff + when Numeric then ->(_) { Kernel.sleep(backoff) } + else ->(num_failures) { Kernel.sleep(1.2**num_failures) } + end + end + + # @api private + class Token + def initialize(options = {}) + @ttl = options[:ttl] + @value = options[:value] + @created_time = options[:created_time] || Time.now + end + + # [String] Returns the token value. + attr_reader :value + + # [Boolean] Returns true if the token expired. + def expired? + Time.now - @created_time > @ttl + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ecs_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ecs_credentials.rb new file mode 100644 index 0000000..f0f5d36 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ecs_credentials.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +require 'time' +require 'net/http' +require 'resolv' + +module Aws + # An auto-refreshing credential provider that loads credentials from + # instances running in ECS. + # + # ecs_credentials = Aws::ECSCredentials.new(retries: 3) + # ec2 = Aws::EC2::Client.new(credentials: ecs_credentials) + class ECSCredentials + include CredentialProvider + include RefreshingCredentials + + # @api private + class Non200Response < RuntimeError; end + + # These are the errors we trap when attempting to talk to the + # instance metadata service. Any of these imply the service + # is not present, no responding or some other non-recoverable + # error. + # @api private + NETWORK_ERRORS = [ + Errno::EHOSTUNREACH, + Errno::ECONNREFUSED, + Errno::EHOSTDOWN, + Errno::ENETUNREACH, + SocketError, + Timeout::Error, + Non200Response + ].freeze + + # @param [Hash] options + # @option options [Integer] :retries (5) Number of times to retry + # when retrieving credentials. + # @option options [String] :ip_address ('169.254.170.2') This value is + # ignored if `endpoint` is set and `credential_path` is not set. + # @option options [Integer] :port (80) This value is ignored if `endpoint` + # is set and `credential_path` is not set. + # @option options [String] :credential_path By default, the value of the + # AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable. + # @option options [String] :endpoint The ECS credential endpoint. + # By default, this is the value of the AWS_CONTAINER_CREDENTIALS_FULL_URI + # environment variable. This value is ignored if `credential_path` or + # ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] is set. + # @option options [Float] :http_open_timeout (5) + # @option options [Float] :http_read_timeout (5) + # @option options [Numeric, Proc] :delay By default, failures are retried + # with exponential back-off, i.e. `sleep(1.2 ** num_failures)`. You can + # pass a number of seconds to sleep between failed attempts, or + # a Proc that accepts the number of failures. + # @option options [IO] :http_debug_output (nil) HTTP wire + # traces are sent to this object. You can specify something + # like $stdout. + # @option options [Callable] before_refresh Proc called before + # credentials are refreshed. `before_refresh` is called + # with an instance of this object when + # AWS credentials are required and need to be refreshed. + def initialize(options = {}) + credential_path = options[:credential_path] || + ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] + endpoint = options[:endpoint] || + ENV['AWS_CONTAINER_CREDENTIALS_FULL_URI'] + initialize_uri(options, credential_path, endpoint) + @authorization_token = ENV['AWS_CONTAINER_AUTHORIZATION_TOKEN'] + + @retries = options[:retries] || 5 + @http_open_timeout = options[:http_open_timeout] || 5 + @http_read_timeout = options[:http_read_timeout] || 5 + @http_debug_output = options[:http_debug_output] + @backoff = backoff(options[:backoff]) + @async_refresh = false + super + end + + # @return [Integer] The number of times to retry failed attempts to + # fetch credentials from the instance metadata service. Defaults to 0. + attr_reader :retries + + private + + def initialize_uri(options, credential_path, endpoint) + if credential_path + initialize_relative_uri(options, credential_path) + # Use FULL_URI/endpoint only if RELATIVE_URI/path is not set + elsif endpoint + initialize_full_uri(endpoint) + else + raise ArgumentError, + 'Cannot instantiate an ECS Credential Provider '\ + 'without a credential path or endpoint.' + end + end + + def initialize_relative_uri(options, path) + @host = options[:ip_address] || '169.254.170.2' + @port = options[:port] || 80 + @scheme = 'http' + @credential_path = path + end + + def initialize_full_uri(endpoint) + uri = URI.parse(endpoint) + validate_full_uri!(uri) + @host = uri.host + @port = uri.port + @scheme = uri.scheme + @credential_path = uri.path + end + + # Validate that the full URI is using a loopback address if scheme is http. + def validate_full_uri!(full_uri) + return unless full_uri.scheme == 'http' + + begin + return if ip_loopback?(IPAddr.new(full_uri.host)) + rescue IPAddr::InvalidAddressError + addresses = Resolv.getaddresses(full_uri.host) + return if addresses.all? { |addr| ip_loopback?(IPAddr.new(addr)) } + end + + raise ArgumentError, + 'AWS_CONTAINER_CREDENTIALS_FULL_URI must use a loopback '\ + 'address when using the http scheme.' + end + + # loopback? method is available in Ruby 2.5+ + # Replicate the logic here. + def ip_loopback?(ip_address) + case ip_address.family + when Socket::AF_INET + ip_address & 0xff000000 == 0x7f000000 + when Socket::AF_INET6 + ip_address == 1 + else + false + end + end + + def backoff(backoff) + case backoff + when Proc then backoff + when Numeric then ->(_) { sleep(backoff) } + else ->(num_failures) { Kernel.sleep(1.2**num_failures) } + end + end + + def refresh + # Retry loading credentials up to 3 times is the instance metadata + # service is responding but is returning invalid JSON documents + # in response to the GET profile credentials call. + + retry_errors([Aws::Json::ParseError, StandardError], max_retries: 3) do + c = Aws::Json.load(get_credentials.to_s) + @credentials = Credentials.new( + c['AccessKeyId'], + c['SecretAccessKey'], + c['Token'] + ) + @expiration = c['Expiration'] ? Time.iso8601(c['Expiration']) : nil + end + rescue Aws::Json::ParseError + raise Aws::Errors::MetadataParserError + end + + def get_credentials + # Retry loading credentials a configurable number of times if + # the instance metadata service is not responding. + + retry_errors(NETWORK_ERRORS, max_retries: @retries) do + open_connection do |conn| + http_get(conn, @credential_path) + end + end + rescue StandardError + '{}' + end + + def open_connection + http = Net::HTTP.new(@host, @port, nil) + http.open_timeout = @http_open_timeout + http.read_timeout = @http_read_timeout + http.set_debug_output(@http_debug_output) if @http_debug_output + http.use_ssl = @scheme == 'https' + http.start + yield(http).tap { http.finish } + end + + def http_get(connection, path) + request = Net::HTTP::Get.new(path) + request['Authorization'] = @authorization_token if @authorization_token + response = connection.request(request) + raise Non200Response unless response.code.to_i == 200 + + response.body + end + + def retry_errors(error_classes, options = {}) + max_retries = options[:max_retries] + retries = 0 + begin + yield + rescue *error_classes => _e + raise unless retries < max_retries + + @backoff.call(retries) + retries += 1 + retry + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoint_cache.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoint_cache.rb new file mode 100644 index 0000000..8970fd9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoint_cache.rb @@ -0,0 +1,193 @@ +# frozen_string_literal: true + +module Aws + # @api private + # a LRU cache caching endpoints data + class EndpointCache + + # default cache entries limit + MAX_ENTRIES = 1000 + + # default max threads pool size + MAX_THREADS = 10 + + def initialize(options = {}) + @max_entries = options[:max_entries] || MAX_ENTRIES + @entries = {} # store endpoints + @max_threads = options[:max_threads] || MAX_THREADS + @pool = {} # store polling threads + @mutex = Mutex.new + @require_identifier = nil # whether endpoint operation support identifier + end + + # @return [Integer] Max size limit of cache + attr_reader :max_entries + + # @return [Integer] Max count of polling threads + attr_reader :max_threads + + # return [Hash] Polling threads pool + attr_reader :pool + + # @param [String] key + # @return [Endpoint] + def [](key) + @mutex.synchronize do + # fetching an existing endpoint delete it and then append it + endpoint = @entries[key] + if endpoint + @entries.delete(key) + @entries[key] = endpoint + end + endpoint + end + end + + # @param [String] key + # @param [Hash] value + def []=(key, value) + @mutex.synchronize do + # delete the least recent used endpoint when cache is full + unless @entries.size < @max_entries + old_key, = @entries.shift + delete_polling_thread(old_key) + end + # delete old value if exists + @entries.delete(key) + @entries[key] = Endpoint.new(value.to_hash) + end + end + + # checking whether an unexpired endpoint key exists in cache + # @param [String] key + # @return [Boolean] + def key?(key) + @mutex.synchronize do + if @entries.key?(key) && (@entries[key].nil? || @entries[key].expired?) + @entries.delete(key) + end + @entries.key?(key) + end + end + + # checking whether an polling thread exist for the key + # @param [String] key + # @return [Boolean] + def threads_key?(key) + @pool.key?(key) + end + + # remove entry only + # @param [String] key + def delete(key) + @mutex.synchronize do + @entries.delete(key) + end + end + + # kill the old polling thread and remove it from pool + # @param [String] key + def delete_polling_thread(key) + Thread.kill(@pool[key]) if threads_key?(key) + @pool.delete(key) + end + + # update cache with requests (using service endpoint operation) + # to fetch endpoint list (with identifiers when available) + # @param [String] key + # @param [RequestContext] ctx + def update(key, ctx) + resp = _request_endpoint(ctx) + if resp && resp.endpoints + resp.endpoints.each { |e| self[key] = e } + end + end + + # extract the key to be used in the cache from request context + # @param [RequestContext] ctx + # @return [String] + def extract_key(ctx) + parts = [] + # fetching from cred provider directly gives warnings + parts << ctx.config.credentials.credentials.access_key_id + if _endpoint_operation_identifier(ctx) + parts << ctx.operation_name + ctx.operation.input.shape.members.inject(parts) do |p, (name, ref)| + p << ctx.params[name] if ref['endpointdiscoveryid'] + p + end + end + parts.join('_') + end + + # update polling threads pool + # param [String] key + # param [Thread] thread + def update_polling_pool(key, thread) + unless @pool.size < @max_threads + _, thread = @pool.shift + Thread.kill(thread) + end + @pool[key] = thread + end + + # kill all polling threads + def stop_polling! + @pool.each { |_, t| Thread.kill(t) } + @pool = {} + end + + private + + def _request_endpoint(ctx) + params = {} + if _endpoint_operation_identifier(ctx) + # build identifier params when available + params[:operation] = ctx.operation.name + ctx.operation.input.shape.members.inject(params) do |p, (name, ref)| + if ref['endpointdiscoveryid'] + p[:identifiers] ||= {} + p[:identifiers][ref.location_name] = ctx.params[name] + end + p + end + end + + begin + endpoint_operation_name = ctx.config.api.endpoint_operation + ctx.client.send(endpoint_operation_name, params) + rescue Aws::Errors::ServiceError + nil + end + end + + def _endpoint_operation_identifier(ctx) + return @require_identifier unless @require_identifier.nil? + + operation_name = ctx.config.api.endpoint_operation + operation = ctx.config.api.operation(operation_name) + @require_identifier = operation.input.shape.members.any? + end + + class Endpoint + + # default endpoint cache time, 1 minute + CACHE_PERIOD = 1 + + def initialize(options) + @address = options.fetch(:address) + @cache_period = options[:cache_period_in_minutes] || CACHE_PERIOD + @created_time = Time.now + end + + # [String] valid URI address (with path) + attr_reader :address + + def expired? + Time.now - @created_time > @cache_period * 60 + end + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints.rb new file mode 100644 index 0000000..d56b31e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +require_relative 'endpoints/rule' +require_relative 'endpoints/condition' +require_relative 'endpoints/endpoint_rule' +require_relative 'endpoints/endpoint' +require_relative 'endpoints/error_rule' +require_relative 'endpoints/function' +require_relative 'endpoints/matchers' +require_relative 'endpoints/reference' +require_relative 'endpoints/rules_provider' +require_relative 'endpoints/rule_set' +require_relative 'endpoints/templater' +require_relative 'endpoints/tree_rule' +require_relative 'endpoints/url' + +module Aws + # @api private + module Endpoints + class << self + def resolve_auth_scheme(context, endpoint) + if endpoint && (auth_schemes = endpoint.properties['authSchemes']) + auth_scheme = auth_schemes.find do |scheme| + Aws::Plugins::Sign::SUPPORTED_AUTH_TYPES.include?(scheme['name']) + end + raise 'No supported auth scheme for this endpoint.' unless auth_scheme + + merge_signing_defaults(auth_scheme, context.config) + else + default_auth_scheme(context) + end + end + + private + + def default_auth_scheme(context) + case default_api_authtype(context) + when 'v4', 'v4-unsigned-body' + auth_scheme = { 'name' => 'sigv4' } + merge_signing_defaults(auth_scheme, context.config) + when 's3', 's3v4' + auth_scheme = { 'name' => 'sigv4', 'disableDoubleEncoding' => true } + merge_signing_defaults(auth_scheme, context.config) + when 'bearer' + { 'name' => 'bearer' } + when 'none', nil + { 'name' => 'none' } + end + end + + def merge_signing_defaults(auth_scheme, config) + if %w[sigv4 sigv4a].include?(auth_scheme['name']) + auth_scheme['signingName'] ||= sigv4_name(config) + if auth_scheme['name'] == 'sigv4a' + auth_scheme['signingRegionSet'] ||= ['*'] + else + auth_scheme['signingRegion'] ||= config.region + end + end + auth_scheme + end + + def default_api_authtype(context) + context.config.api.operation(context.operation_name)['authtype'] || + context.config.api.metadata['signatureVersion'] + end + + def sigv4_name(config) + config.api.metadata['signingName'] || + config.api.metadata['endpointPrefix'] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/condition.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/condition.rb new file mode 100644 index 0000000..b7c1ef2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/condition.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class Condition + def initialize(fn:, argv:, assign: nil) + @fn = Function.new(fn: fn, argv: argv) + @assign = assign + @assigned = {} + end + + attr_reader :fn + attr_reader :argv + attr_reader :assign + + attr_reader :assigned + + def match?(parameters, assigns) + output = @fn.call(parameters, assigns) + @assigned = @assigned.merge({ @assign => output }) if @assign + output + end + + def self.from_json(conditions_json) + conditions_json.each.with_object([]) do |condition, conditions| + conditions << new( + fn: condition['fn'], + argv: condition['argv'], + assign: condition['assign'] + ) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/endpoint.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/endpoint.rb new file mode 100644 index 0000000..09feec8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/endpoint.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + class Endpoint + def initialize(url:, properties: {}, headers: {}) + @url = url + @properties = properties + @headers = headers + end + + attr_reader :url + attr_reader :properties + attr_reader :headers + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/endpoint_rule.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/endpoint_rule.rb new file mode 100644 index 0000000..4f10009 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/endpoint_rule.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. # @api private + class EndpointRule < Rule + def initialize(type: 'endpoint', conditions:, endpoint:, + documentation: nil) + @type = type + @conditions = Condition.from_json(conditions) + @endpoint = endpoint + @documentation = documentation + end + + attr_reader :type + attr_reader :conditions + attr_reader :endpoint + attr_reader :documentation + + def match(parameters, assigned = {}) + assigns = assigned.dup + matched = conditions.all? do |condition| + output = condition.match?(parameters, assigns) + assigns = assigns.merge(condition.assigned) if condition.assign + output + end + resolved_endpoint(parameters, assigns) if matched + end + + def resolved_endpoint(parameters, assigns) + Endpoint.new( + url: resolve_value(@endpoint['url'], parameters, assigns), + properties: resolve_properties( + @endpoint['properties'] || {}, + parameters, + assigns + ), + headers: resolve_headers(parameters, assigns) + ) + end + + private + + def resolve_headers(parameters, assigns) + (@endpoint['headers'] || {}).each.with_object({}) do |(key, arr), headers| + headers[key] = [] + arr.each do |value| + headers[key] << resolve_value(value, parameters, assigns) + end + end + end + + def resolve_properties(obj, parameters, assigns) + case obj + when Hash + obj.each.with_object({}) do |(key, value), hash| + hash[key] = resolve_properties(value, parameters, assigns) + end + when Array + obj.collect { |value| resolve_properties(value, parameters, assigns) } + else + if obj.is_a?(String) + Templater.resolve(obj, parameters, assigns) + else + obj + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/error_rule.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/error_rule.rb new file mode 100644 index 0000000..f3f5bac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/error_rule.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class ErrorRule < Rule + def initialize(type: 'error', conditions:, error: nil, documentation: nil) + @type = type + @conditions = Condition.from_json(conditions) + @error = error + @documentation = documentation + end + + attr_reader :type + attr_reader :conditions + attr_reader :error + attr_reader :documentation + + def match(parameters, assigned = {}) + assigns = assigned.dup + matched = conditions.all? do |condition| + output = condition.match?(parameters, assigns) + assigns = assigns.merge(condition.assigned) if condition.assign + output + end + resolved_error(parameters, assigns) if matched + end + + private + + def resolved_error(parameters, assigns) + error = resolve_value(@error, parameters, assigns) + ArgumentError.new(error) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/function.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/function.rb new file mode 100644 index 0000000..cb0a05b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/function.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class Function + def initialize(fn:, argv:) + @fn = fn + @argv = build_argv(argv) + end + + attr_reader :fn + attr_reader :argv + + def call(parameters, assigns) + args = [] + @argv.each do |arg| + if arg.is_a?(Reference) + args << arg.resolve(parameters, assigns) + elsif arg.is_a?(Function) + args << arg.call(parameters, assigns) + else + if arg.is_a?(String) + arg = Templater.resolve(arg, parameters, assigns) + end + args << arg + end + end + + case @fn + when 'isSet' + Matchers.set?(*args) + when 'not' + Matchers.not(*args) + when 'getAttr' + Matchers.attr(*args) + when 'substring' + Matchers.substring(*args) + when 'stringEquals' + Matchers.string_equals?(*args) + when 'booleanEquals' + Matchers.boolean_equals?(*args) + when 'uriEncode' + Matchers.uri_encode(*args) + when 'parseURL' + Matchers.parse_url(*args) + when 'isValidHostLabel' + Matchers.valid_host_label?(*args) + when 'aws.partition' + Matchers.aws_partition(*args) + when 'aws.parseArn' + Matchers.aws_parse_arn(*args) + when 'aws.isVirtualHostableS3Bucket' + Matchers.aws_virtual_hostable_s3_bucket?(*args) + else + raise "Function not found: #{@fn}" + end + end + + private + + def build_argv(argv_json) + argv_json.each.with_object([]) do |arg, argv| + argv << if arg.is_a?(Hash) && arg['ref'] + Reference.new(ref: arg['ref']) + elsif arg.is_a?(Hash) && arg['fn'] + Function.new(fn: arg['fn'], argv: arg['argv']) + else + arg + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/matchers.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/matchers.rb new file mode 100644 index 0000000..5c6bf2c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/matchers.rb @@ -0,0 +1,127 @@ +# frozen_string_literal: true + +require 'cgi' + +module Aws + module Endpoints + # generic matcher functions for service endpoints + # @api private + module Matchers + # Regex that extracts anything in square brackets + BRACKET_REGEX = /\[(.*?)\]/.freeze + + # CORE + + # isSet(value: Option) bool + def self.set?(value) + !value.nil? + end + + # not(value: bool) bool + def self.not(bool) + !bool + end + + # getAttr(value: Object | Array, path: string) Document + def self.attr(value, path) + parts = path.split('.') + + val = if (index = parts.first[BRACKET_REGEX, 1]) + # remove brackets and index from part before indexing + value[parts.first.gsub(BRACKET_REGEX, '')][index.to_i] + else + value[parts.first] + end + + if parts.size == 1 + val + else + attr(val, parts.slice(1..-1).join('.')) + end + end + + def self.substring(input, start, stop, reverse) + return nil if start >= stop || input.size < stop + + return nil if input.chars.any? { |c| c.ord > 127 } + + return input[start...stop] unless reverse + + r_start = input.size - stop + r_stop = input.size - start + input[r_start...r_stop] + end + + # stringEquals(value1: string, value2: string) bool + def self.string_equals?(value1, value2) + value1 == value2 + end + + # booleanEquals(value1: bool, value2: bool) bool + def self.boolean_equals?(value1, value2) + value1 == value2 + end + + # uriEncode(value: string) string + def self.uri_encode(value) + CGI.escape(value.encode('UTF-8')).gsub('+', '%20').gsub('%7E', '~') + end + + # parseUrl(value: string) Option + def self.parse_url(value) + URL.new(value).as_json + rescue ArgumentError, URI::InvalidURIError + nil + end + + # isValidHostLabel(value: string, allowSubDomains: bool) bool + def self.valid_host_label?(value, allow_sub_domains = false) + return false if value.empty? + + if allow_sub_domains + labels = value.split('.') + return labels.all? { |l| valid_host_label?(l) } + end + + value =~ /\A(?!-)[a-zA-Z0-9-]{1,63}(? + def self.aws_partition(value) + partition = + Aws::Partitions.find { |p| p.region?(value) } || + Aws::Partitions.find { |p| value.match(p.region_regex) } || + Aws::Partitions.find { |p| p.name == 'aws' } + + return nil unless partition + + partition.metadata + end + + # aws.parseArn(value: string) Option + def self.aws_parse_arn(value) + arn = Aws::ARNParser.parse(value) + json = arn.as_json + # HACK: because of poor naming and also requirement of splitting + resource = json.delete('resource') + json['resourceId'] = resource.split(%r{[:\/]}, -1) + json + rescue Aws::Errors::InvalidARNError + nil + end + + # aws.isVirtualHostableS3Bucket(value: string, allowSubDomains: bool) bool + def self.aws_virtual_hostable_s3_bucket?(value, allow_sub_domains = false) + !!(value.size < 64 && + # regular naming rules + value =~ /^[a-z0-9][a-z0-9\-#{'.' if allow_sub_domains}]+[a-z0-9]$/ && + # not IP address + value !~ /(\d+\.){3}\d+/ && + # no dash and hyphen together + value !~ /[.-]{2}/) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/reference.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/reference.rb new file mode 100644 index 0000000..f176786 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/reference.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class Reference + def initialize(ref:) + @ref = ref + end + + attr_reader :ref + + def resolve(parameters, assigns) + if parameters.class.singleton_class::PARAM_MAP.key?(@ref) + member_name = parameters.class.singleton_class::PARAM_MAP[@ref] + parameters[member_name] + elsif assigns.key?(@ref) + assigns[@ref] + else + raise ArgumentError, + "Reference #{@ref} is not a param or an assigned value." + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rule.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rule.rb new file mode 100644 index 0000000..04ca810 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rule.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class Rule + # Resolves a value that is a function, reference, or template string. + def resolve_value(value, parameters, assigns) + if value.is_a?(Hash) && value['fn'] + Function.new(fn: value['fn'], argv: value['argv']) + .call(parameters, assigns) + elsif value.is_a?(Hash) && value['ref'] + Reference.new(ref: value['ref']).resolve(parameters, assigns) + else + Templater.resolve(value, parameters, assigns) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rule_set.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rule_set.rb new file mode 100644 index 0000000..93b9441 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rule_set.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class RuleSet + def initialize(version:, service_id:, parameters:, rules:) + @version = version + @service_id = service_id + @parameters = parameters + @rules = RuleSet.rules_from_json(rules || []) + end + + attr_reader :version + attr_reader :service_id + attr_reader :parameters + attr_reader :rules + + def self.rules_from_json(rules_json) + rules_json.each.with_object([]) do |rule, rules| + if rule['type'] == 'endpoint' + rules << EndpointRule.new( + conditions: rule['conditions'], + endpoint: rule['endpoint'], + documentation: rule['documentation'] + ) + elsif rule['type'] == 'error' + rules << ErrorRule.new( + conditions: rule['conditions'], + error: rule['error'], + documentation: rule['documentation'] + ) + elsif rule['type'] == 'tree' + rules << TreeRule.new( + conditions: rule['conditions'], + rules: rule['rules'], + documentation: rule['documentation'] + ) + else + # should not happen + raise "Unknown endpoint rule type: #{rule}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rules_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rules_provider.rb new file mode 100644 index 0000000..95c15de --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/rules_provider.rb @@ -0,0 +1,37 @@ +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class RulesProvider + def initialize(rule_set) + @rule_set = rule_set + end + + def resolve_endpoint(parameters) + obj = resolve_rules(parameters) + case obj + when Endpoint + obj + when ArgumentError + raise obj + else + raise ArgumentError, 'No endpoint could be resolved' + end + end + + private + + def resolve_rules(parameters) + @rule_set.rules.each do |rule| + output = rule.match(parameters) + return output if output + end + nil + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/templater.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/templater.rb new file mode 100644 index 0000000..720cfa2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/templater.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # Does substitutions for templated endpoint strings + + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + module Templater + class << self + def resolve(string, parameters, assigns) + # scans for strings in curly brackets {} + string.scan(/\{.+?\}/).each do |capture| + value = capture[1..-2] # strips curly brackets + string = string.gsub(capture, replace(value, parameters, assigns)) + end + string + end + + private + + # Replaces the captured value with values from parameters or assign + def replace(capture, parameters, assigns) + # Pound sigil is used for getAttr calls + indexes = capture.split('#') + + # no sigil found, just do substitution + if indexes.size == 1 + extract_value(capture, parameters, assigns) + # sigil was found, need to call getAttr + elsif indexes.size == 2 + ref, property = indexes + param = extract_value(ref, parameters, assigns) + Matchers.attr(param, property) + else + raise "Invalid templatable value: #{capture}" + end + end + + # Checks both parameters and assigns hash for the referenced value + def extract_value(key, parameters, assigns) + if assigns.key?(key) + assigns[key] + elsif parameters.class.singleton_class::PARAM_MAP.key?(key) + member_name = parameters.class.singleton_class::PARAM_MAP[key] + parameters[member_name] + else + raise "Templatable value not found: #{key}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/tree_rule.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/tree_rule.rb new file mode 100644 index 0000000..930a940 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/tree_rule.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Aws + module Endpoints + # This class is deprecated. It is used by the Runtime endpoint + # resolution approach. It has been replaced by a code generated + # approach in each service gem. It can be removed in a new + # major version. It has to exist because + # old service gems can use a new core version. + # @api private + class TreeRule + def initialize(type: 'tree', conditions:, rules:, documentation: nil) + @type = type + @conditions = Condition.from_json(conditions) + @rules = RuleSet.rules_from_json(rules) + @documentation = documentation + end + + attr_reader :type + attr_reader :conditions + attr_reader :error + attr_reader :documentation + + def match(parameters, assigned = {}) + assigns = assigned.dup + matched = conditions.all? do |condition| + output = condition.match?(parameters, assigns) + assigns = assigns.merge(condition.assigned) if condition.assign + output + end + resolve_rules(parameters, assigns) if matched + end + + private + + def resolve_rules(parameters, assigns) + @rules.each do |rule| + output = rule.match(parameters, assigns) + return output if output + end + nil + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/url.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/url.rb new file mode 100644 index 0000000..3c156dc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/endpoints/url.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require 'ipaddr' + +module Aws + module Endpoints + + # @api private + class URL + def initialize(url) + uri = URI(url) + @scheme = uri.scheme + # only support http and https schemes + raise ArgumentError unless %w[https http].include?(@scheme) + + # do not support query + raise ArgumentError if uri.query + + @authority = _authority(url, uri) + @path = uri.path + @normalized_path = uri.path + (uri.path[-1] == '/' ? '' : '/') + @is_ip = _is_ip(uri.host) + end + + attr_reader :scheme + attr_reader :authority + attr_reader :path + attr_reader :normalized_path + attr_reader :is_ip + + def as_json(_options = {}) + { + 'scheme' => scheme, + 'authority' => authority, + 'path' => path, + 'normalizedPath' => normalized_path, + 'isIp' => is_ip + } + end + + private + + def _authority(url, uri) + # don't include port if it's default and not parsed originally + if uri.default_port == uri.port && !url.include?(":#{uri.port}") + uri.host + else + "#{uri.host}:#{uri.port}" + end + end + + def _is_ip(authority) + IPAddr.new(authority) + true + rescue IPAddr::InvalidAddressError + false + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/errors.rb new file mode 100644 index 0000000..66c765b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/errors.rb @@ -0,0 +1,406 @@ +# frozen_string_literal: true + +module Aws + module Errors + + class NonSupportedRubyVersionError < RuntimeError; end + + # The base class for all errors returned by an Amazon Web Service. + # All ~400 level client errors and ~500 level server errors are raised + # as service errors. This indicates it was an error returned from the + # service and not one generated by the client. + class ServiceError < RuntimeError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::Structure] data + def initialize(context, message, data = Aws::EmptyStructure.new) + @code = self.class.code + @context = context + @data = data + @message = message && !message.empty? ? message : self.class.to_s + super(@message) + end + + # @return [String] + attr_reader :code + + # @return [Seahorse::Client::RequestContext] The context of the request + # that triggered the remote service to return this error. + attr_reader :context + + # @return [Aws::Structure] + attr_reader :data + + class << self + + # @return [String] + attr_accessor :code + + end + + # @api private undocumented + def retryable? + false + end + + # @api private undocumented + def throttling? + false + end + end + + # Raised when InstanceProfileCredentialsProvider or + # EcsCredentialsProvider fails to parse the metadata response after retries + class MetadataParserError < RuntimeError + def initialize(*args) + msg = 'Failed to parse metadata service response.' + super(msg) + end + end + + # Raised when a `streaming` operation has `requiresLength` trait + # enabled but request payload size/length cannot be calculated + class MissingContentLength < RuntimeError + def initialize(*args) + msg = 'Required `Content-Length` value missing for the request.' + super(msg) + end + end + + # Rasied when endpoint discovery failed for operations + # that requires endpoints from endpoint discovery + class EndpointDiscoveryError < RuntimeError + def initialize(*args) + msg = 'Endpoint discovery failed for the operation or discovered endpoint is not working, '\ + 'request will keep failing until endpoint discovery succeeds or :endpoint option is provided.' + super(msg) + end + end + + # raised when hostLabel member is not provided + # at operation input when endpoint trait is available + # with 'hostPrefix' requirement + class MissingEndpointHostLabelValue < RuntimeError + + def initialize(name) + msg = "Missing required parameter #{name} to construct"\ + ' endpoint host prefix. You can disable host prefix by'\ + ' setting :disable_host_prefix_injection to `true`.' + super(msg) + end + + end + + # Raised when attempting to #signal an event before + # making an async request + class SignalEventError < RuntimeError; end + + # Raised when EventStream Parser failed to parse + # a raw event message + class EventStreamParserError < RuntimeError; end + + # Raise when EventStream Builder failed to build + # an event message with parameters provided + class EventStreamBuilderError < RuntimeError; end + + # Error event in an event stream which has event_type :error + # error code and error message can be retrieved when available. + # + # example usage: + # + # client.stream_foo(name: 'bar') do |event| + # stream.on_error_event do |event| + # puts "Error #{event.error_code}: #{event.error_message}" + # raise event + # end + # end + # + class EventError < RuntimeError + + def initialize(event_type, code, message) + @event_type = event_type + @error_code = code + @error_message = message + end + + # @return [Symbol] + attr_reader :event_type + + # @return [String] + attr_reader :error_code + + # @return [String] + attr_reader :error_message + + end + + # Raised when ARN string input doesn't follow the standard: + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns + class InvalidARNError < RuntimeError; end + + # Raised when the region from the ARN string is different from the :region + # configured on the service client. + class InvalidARNRegionError < RuntimeError + def initialize(*args) + msg = 'ARN region is different from the configured client region.' + super(msg) + end + end + + # Raised when the partition of the ARN region is different than the + # partition of the :region configured on the service client. + class InvalidARNPartitionError < RuntimeError + def initialize(*args) + msg = 'ARN region partition is different from the configured '\ + 'client region partition.' + super(msg) + end + end + + # Various plugins perform client-side checksums of responses. + # This error indicates a checksum failed. + class ChecksumError < RuntimeError; end + + # Raised when a client is constructed and the specified shared + # credentials profile does not exist. + class NoSuchProfileError < RuntimeError; end + + # Raised when a client is constructed, where Assume Role credentials are + # expected, and there is no source profile specified. + class NoSourceProfileError < RuntimeError; end + + # Raised when a client is constructed with Assume Role credentials using + # a credential_source, and that source type is unsupported. + class InvalidCredentialSourceError < RuntimeError; end + + # Raised when a client is constructed with Assume Role credentials, but + # the profile has both source_profile and credential_source. + class CredentialSourceConflictError < RuntimeError; end + + # Raised when a client is constructed with Assume Role credentials using + # a credential_source, and that source doesn't provide credentials. + class NoSourceCredentialsError < RuntimeError; end + + # Raised when a client is constructed and credentials are not + # set, or the set credentials are empty. + class MissingCredentialsError < RuntimeError + def initialize(*args) + msg = 'unable to sign request without credentials set' + super(msg) + end + end + + # Raised when :web_identity_token_file parameter is not + # provided or the file doesn't exist when initializing + # AssumeRoleWebIdentityCredentials credential provider + class MissingWebIdentityTokenFile < RuntimeError + def initialize(*args) + msg = 'Missing :web_identity_token_file parameter or'\ + ' invalid file path provided for'\ + ' Aws::AssumeRoleWebIdentityCredentials provider' + super(msg) + end + end + + # Raised when a credentials provider process returns a JSON + # payload with either invalid version number or malformed contents + class InvalidProcessCredentialsPayload < RuntimeError; end + + # Raised when SSO Credentials are invalid + class InvalidSSOCredentials < RuntimeError; end + + # Raised when SSO Token is invalid + class InvalidSSOToken < RuntimeError; end + + # Raised when a client is unable to sign a request because + # the bearer token is not configured or available + class MissingBearerTokenError < RuntimeError + def initialize(*args) + msg = 'unable to sign request without token set' + super(msg) + end + end + + + # Raised when there is a circular reference in chained + # source_profiles + class SourceProfileCircularReferenceError < RuntimeError; end + + # Raised when a client is constructed and region is not specified. + class MissingRegionError < ArgumentError + def initialize(*args) + msg = 'No region was provided. Configure the `:region` option or '\ + "export the region name to ENV['AWS_REGION']" + super(msg) + end + end + + # Raised when a client is contsructed and the region is not valid. + class InvalidRegionError < ArgumentError + def initialize(*args) + super(<<-MSG) +Invalid `:region` option was provided. + +* Not every service is available in every region. + +* Never suffix region names with availability zones. + Use "us-east-1", not "us-east-1a" + +Known AWS regions include (not specific to this service): + +#{possible_regions} + MSG + end + + private + + def possible_regions + Aws.partitions.each_with_object([]) do |partition, region_names| + partition.regions.each do |region| + region_names << region.name + end + end.join("\n") + end + end + + # Raised when attempting to connect to an endpoint and a `SocketError` + # is received from the HTTP client. This error is typically the result + # of configuring an invalid `:region`. + class NoSuchEndpointError < RuntimeError + + def initialize(options = {}) + @context = options[:context] + @endpoint = @context.http_request.endpoint + @original_error = options[:original_error] + super(<<-MSG) +Encountered a `SocketError` while attempting to connect to: + + #{endpoint} + +This is typically the result of an invalid `:region` option or a +poorly formatted `:endpoint` option. + +* Avoid configuring the `:endpoint` option directly. Endpoints are constructed + from the `:region`. The `:endpoint` option is reserved for certain services + or for connecting to non-standard test endpoints. + +* Not every service is available in every region. + +* Never suffix region names with availability zones. + Use "us-east-1", not "us-east-1a" + +Known AWS regions include (not specific to this service): + +#{possible_regions} + MSG + end + + attr_reader :context + + attr_reader :endpoint + + attr_reader :original_error + + private + + def possible_regions + Aws.partitions.each_with_object([]) do |partition, region_names| + partition.regions.each do |region| + region_names << region.name + end + end.join("\n") + end + end + + # Raised when attempting to retry a request + # and no capacity is available to retry (See adaptive retry_mode) + class RetryCapacityNotAvailableError < RuntimeError + def initialize(*args) + msg = 'Insufficient client side capacity available to retry request.' + super(msg) + end + end + + # This module is mixed into another module, providing dynamic + # error classes. Error classes all inherit from {ServiceError}. + # + # # creates and returns the class + # Aws::S3::Errors::MyNewErrorClass + # + # Since the complete list of possible AWS errors returned by services + # is not known, this allows us to create them as needed. This also + # allows users to rescue errors by class without them being concrete + # classes beforehand. + # + # @api private + module DynamicErrors + + def self.extended(submodule) + submodule.instance_variable_set('@const_set_mutex', Mutex.new) + submodule.const_set(:ServiceError, Class.new(ServiceError)) + end + + def const_missing(constant) + set_error_constant(constant) + end + + # Given the name of a service and an error code, this method + # returns an error class (that extends {ServiceError}. + # + # Aws::S3::Errors.error_class('NoSuchBucket').new + # #=> # + # + # @api private + def error_class(error_code) + constant = error_class_constant(error_code) + if error_const_set?(constant) + # modeled error class exist + # set code attribute + err_class = const_get(constant) + err_class.code = constant.to_s + err_class + else + set_error_constant(constant) + end + end + + private + + # Convert an error code to an error class name/constant. + # This requires filtering non-safe characters from the constant + # name and ensuring it begins with an uppercase letter. + # @param [String] error_code + # @return [Symbol] Returns a symbolized constant name for the given + # `error_code`. + def error_class_constant(error_code) + constant = error_code.to_s + constant = constant.gsub(/https?:.*$/, '') + constant = constant.gsub(/[^a-zA-Z0-9]/, '') + constant = 'Error' + constant unless constant.match(/^[a-z]/i) + constant = constant[0].upcase + constant[1..-1] + constant.to_sym + end + + def set_error_constant(constant) + @const_set_mutex.synchronize do + # Ensure the const was not defined while blocked by the mutex + if error_const_set?(constant) + const_get(constant) + else + error_class = Class.new(const_get(:ServiceError)) + error_class.code = constant.to_s + const_set(constant, error_class) + end + end + end + + def error_const_set?(constant) + # Purposefully not using #const_defined? as that method returns true + # for constants not defined directly in the current module. + constants.include?(constant.to_sym) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/event_emitter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/event_emitter.rb new file mode 100644 index 0000000..fdf8b08 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/event_emitter.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module Aws + class EventEmitter + + def initialize + @listeners = {} + @validate_event = true + @status = :sleep + @signal_queue = Queue.new + end + + attr_accessor :stream + + attr_accessor :encoder + + attr_accessor :validate_event + + attr_accessor :signal_queue + + def on(type, callback) + (@listeners[type] ||= []) << callback + end + + def signal(type, event) + return unless @listeners[type] + @listeners[type].each do |listener| + listener.call(event) if event.event_type == type + end + end + + def emit(type, params) + unless @stream + raise Aws::Errors::SignalEventError.new( + "Singaling events before making async request"\ + " is not allowed." + ) + end + if @validate_event && type != :end_stream + Aws::ParamValidator.validate!( + @encoder.rules.shape.member(type), params) + end + _ready_for_events? + @stream.data( + @encoder.encode(type, params), + end_stream: type == :end_stream + ) + end + + private + + def _ready_for_events? + return true if @status == :ready + + # blocked until once initial 200 response is received + # signal will be available in @signal_queue + # and this check will no longer be blocked + @signal_queue.pop + @status = :ready + true + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ini_parser.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ini_parser.rb new file mode 100644 index 0000000..4fcadb3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/ini_parser.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module Aws + # @api private + class IniParser + class << self + + def ini_parse(raw) + current_profile = nil + current_prefix = nil + raw.lines.inject({}) do |acc, line| + line = line.split(/^|\s;/).first # remove comments + profile = line.match(/^\[([^\[\]]+)\]\s*(#.+)?$/) unless line.nil? + if profile + current_profile = profile[1] + named_profile = current_profile.match(/^profile\s+(.+?)$/) + current_profile = named_profile[1] if named_profile + elsif current_profile + unless line.nil? + item = line.match(/^(.+?)\s*=\s*(.+?)\s*$/) + prefix = line.match(/^(.+?)\s*=\s*$/) + end + if item && item[1].match(/^\s+/) + # Need to add lines to a nested configuration. + inner_item = line.match(/^\s*(.+?)\s*=\s*(.+?)\s*$/) + acc[current_profile] ||= {} + acc[current_profile][current_prefix] ||= {} + acc[current_profile][current_prefix][inner_item[1]] = inner_item[2] + elsif item + current_prefix = nil + acc[current_profile] ||= {} + acc[current_profile][item[1]] = item[2] + elsif prefix + current_prefix = prefix[1] + end + end + acc + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/instance_profile_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/instance_profile_credentials.rb new file mode 100644 index 0000000..e97ab4d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/instance_profile_credentials.rb @@ -0,0 +1,332 @@ +# frozen_string_literal: true + +require 'time' +require 'net/http' + +module Aws + # An auto-refreshing credential provider that loads credentials from + # EC2 instances. + # + # instance_credentials = Aws::InstanceProfileCredentials.new + # ec2 = Aws::EC2::Client.new(credentials: instance_credentials) + class InstanceProfileCredentials + include CredentialProvider + include RefreshingCredentials + + # @api private + class Non200Response < RuntimeError; end + + # @api private + class TokenRetrivalError < RuntimeError; end + + # @api private + class TokenExpiredError < RuntimeError; end + + # These are the errors we trap when attempting to talk to the + # instance metadata service. Any of these imply the service + # is not present, no responding or some other non-recoverable + # error. + # @api private + NETWORK_ERRORS = [ + Errno::EHOSTUNREACH, + Errno::ECONNREFUSED, + Errno::EHOSTDOWN, + Errno::ENETUNREACH, + SocketError, + Timeout::Error, + Non200Response + ].freeze + + # Path base for GET request for profile and credentials + # @api private + METADATA_PATH_BASE = '/latest/meta-data/iam/security-credentials/'.freeze + + # Path for PUT request for token + # @api private + METADATA_TOKEN_PATH = '/latest/api/token'.freeze + + # @param [Hash] options + # @option options [Integer] :retries (1) Number of times to retry + # when retrieving credentials. + # @option options [String] :endpoint ('http://169.254.169.254') The IMDS + # endpoint. This option has precedence over the :endpoint_mode. + # @option options [String] :endpoint_mode ('IPv4') The endpoint mode for + # the instance metadata service. This is either 'IPv4' ('169.254.169.254') + # or 'IPv6' ('[fd00:ec2::254]'). + # @option options [String] :ip_address ('169.254.169.254') Deprecated. Use + # :endpoint instead. The IP address for the endpoint. + # @option options [Integer] :port (80) + # @option options [Float] :http_open_timeout (1) + # @option options [Float] :http_read_timeout (1) + # @option options [Numeric, Proc] :delay By default, failures are retried + # with exponential back-off, i.e. `sleep(1.2 ** num_failures)`. You can + # pass a number of seconds to sleep between failed attempts, or + # a Proc that accepts the number of failures. + # @option options [IO] :http_debug_output (nil) HTTP wire + # traces are sent to this object. You can specify something + # like $stdout. + # @option options [Integer] :token_ttl Time-to-Live in seconds for EC2 + # Metadata Token used for fetching Metadata Profile Credentials, defaults + # to 21600 seconds + # @option options [Callable] before_refresh Proc called before + # credentials are refreshed. `before_refresh` is called + # with an instance of this object when + # AWS credentials are required and need to be refreshed. + def initialize(options = {}) + @retries = options[:retries] || 1 + endpoint_mode = resolve_endpoint_mode(options) + @endpoint = resolve_endpoint(options, endpoint_mode) + @port = options[:port] || 80 + @http_open_timeout = options[:http_open_timeout] || 1 + @http_read_timeout = options[:http_read_timeout] || 1 + @http_debug_output = options[:http_debug_output] + @backoff = backoff(options[:backoff]) + @token_ttl = options[:token_ttl] || 21_600 + @token = nil + @no_refresh_until = nil + @async_refresh = false + super + end + + # @return [Integer] Number of times to retry when retrieving credentials + # from the instance metadata service. Defaults to 0 when resolving from + # the default credential chain ({Aws::CredentialProviderChain}). + attr_reader :retries + + private + + def resolve_endpoint_mode(options) + value = options[:endpoint_mode] + value ||= ENV['AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE'] + value ||= Aws.shared_config.ec2_metadata_service_endpoint_mode( + profile: options[:profile] + ) + value || 'IPv4' + end + + def resolve_endpoint(options, endpoint_mode) + value = options[:endpoint] || options[:ip_address] + value ||= ENV['AWS_EC2_METADATA_SERVICE_ENDPOINT'] + value ||= Aws.shared_config.ec2_metadata_service_endpoint( + profile: options[:profile] + ) + + return value if value + + case endpoint_mode.downcase + when 'ipv4' then 'http://169.254.169.254' + when 'ipv6' then 'http://[fd00:ec2::254]' + else + raise ArgumentError, + ':endpoint_mode is not valid, expected IPv4 or IPv6, '\ + "got: #{endpoint_mode}" + end + end + + def backoff(backoff) + case backoff + when Proc then backoff + when Numeric then ->(_) { sleep(backoff) } + else ->(num_failures) { Kernel.sleep(1.2**num_failures) } + end + end + + def refresh + if @no_refresh_until && @no_refresh_until > Time.now + warn_expired_credentials + return + end + + # Retry loading credentials up to 3 times is the instance metadata + # service is responding but is returning invalid JSON documents + # in response to the GET profile credentials call. + begin + retry_errors([Aws::Json::ParseError, StandardError], max_retries: 3) do + c = Aws::Json.load(get_credentials.to_s) + if empty_credentials?(@credentials) + @credentials = Credentials.new( + c['AccessKeyId'], + c['SecretAccessKey'], + c['Token'] + ) + @expiration = c['Expiration'] ? Time.iso8601(c['Expiration']) : nil + if @expiration && @expiration < Time.now + @no_refresh_until = Time.now + refresh_offset + warn_expired_credentials + end + else + # credentials are already set, update them only if the new ones are not empty + if !c['AccessKeyId'] || c['AccessKeyId'].empty? + # error getting new credentials + @no_refresh_until = Time.now + refresh_offset + warn_expired_credentials + else + @credentials = Credentials.new( + c['AccessKeyId'], + c['SecretAccessKey'], + c['Token'] + ) + @expiration = c['Expiration'] ? Time.iso8601(c['Expiration']) : nil + if @expiration && @expiration < Time.now + @no_refresh_until = Time.now + refresh_offset + warn_expired_credentials + end + end + end + + end + rescue Aws::Json::ParseError + raise Aws::Errors::MetadataParserError + end + end + + def get_credentials + # Retry loading credentials a configurable number of times if + # the instance metadata service is not responding. + if _metadata_disabled? + '{}' + else + begin + retry_errors(NETWORK_ERRORS, max_retries: @retries) do + open_connection do |conn| + # attempt to fetch token to start secure flow first + # and rescue to failover + begin + retry_errors(NETWORK_ERRORS, max_retries: @retries) do + unless token_set? + created_time = Time.now + token_value, ttl = http_put( + conn, METADATA_TOKEN_PATH, @token_ttl + ) + @token = Token.new(token_value, ttl, created_time) if token_value && ttl + end + end + rescue *NETWORK_ERRORS + # token attempt failed, reset token + # fallback to non-token mode + @token = nil + end + + token = @token.value if token_set? + + begin + metadata = http_get(conn, METADATA_PATH_BASE, token) + profile_name = metadata.lines.first.strip + http_get(conn, METADATA_PATH_BASE + profile_name, token) + rescue TokenExpiredError + # Token has expired, reset it + # The next retry should fetch it + @token = nil + raise Non200Response + end + end + end + rescue + '{}' + end + end + end + + def token_set? + @token && !@token.expired? + end + + def _metadata_disabled? + ENV.fetch('AWS_EC2_METADATA_DISABLED', 'false').downcase == 'true' + end + + def open_connection + uri = URI.parse(@endpoint) + http = Net::HTTP.new(uri.hostname || @endpoint, @port || uri.port) + http.open_timeout = @http_open_timeout + http.read_timeout = @http_read_timeout + http.set_debug_output(@http_debug_output) if @http_debug_output + http.start + yield(http).tap { http.finish } + end + + # GET request fetch profile and credentials + def http_get(connection, path, token = nil) + headers = { 'User-Agent' => "aws-sdk-ruby3/#{CORE_GEM_VERSION}" } + headers['x-aws-ec2-metadata-token'] = token if token + response = connection.request(Net::HTTP::Get.new(path, headers)) + + case response.code.to_i + when 200 + response.body + when 401 + raise TokenExpiredError + else + raise Non200Response + end + end + + # PUT request fetch token with ttl + def http_put(connection, path, ttl) + headers = { + 'User-Agent' => "aws-sdk-ruby3/#{CORE_GEM_VERSION}", + 'x-aws-ec2-metadata-token-ttl-seconds' => ttl.to_s + } + response = connection.request(Net::HTTP::Put.new(path, headers)) + case response.code.to_i + when 200 + [ + response.body, + response.header['x-aws-ec2-metadata-token-ttl-seconds'].to_i + ] + when 400 + raise TokenRetrivalError + when 401 + raise TokenExpiredError + else + raise Non200Response + end + end + + def retry_errors(error_classes, options = {}, &_block) + max_retries = options[:max_retries] + retries = 0 + begin + yield + rescue *error_classes + raise unless retries < max_retries + + @backoff.call(retries) + retries += 1 + retry + end + end + + def warn_expired_credentials + warn("Attempting credential expiration extension due to a credential "\ + "service availability issue. A refresh of these credentials "\ + "will be attempted again in 5 minutes.") + end + + def empty_credentials?(creds) + !creds || !creds.access_key_id || creds.access_key_id.empty? + end + + # Compute an offset for refresh with jitter + def refresh_offset + 300 + rand(0..60) + end + + # @api private + # Token used to fetch IMDS profile and credentials + class Token + def initialize(value, ttl, created_time = Time.now) + @ttl = ttl + @value = value + @created_time = created_time + end + + # [String] token value + attr_reader :value + + def expired? + Time.now - @created_time > @ttl + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json.rb new file mode 100644 index 0000000..8c8fabe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +require 'json' +require_relative 'json/builder' +require_relative 'json/error_handler' +require_relative 'json/handler' +require_relative 'json/parser' +require_relative 'json/json_engine' +require_relative 'json/oj_engine' + +module Aws + # @api private + module Json + class ParseError < StandardError + def initialize(error) + @error = error + super(error.message) + end + + attr_reader :error + end + + class << self + def load(json) + ENGINE.load(json) + end + + def load_file(path) + load(File.open(path, 'r', encoding: 'UTF-8', &:read)) + end + + def dump(value) + ENGINE.dump(value) + end + + private + + def select_engine + require 'oj' + OjEngine + rescue LoadError + JSONEngine + end + end + + # @api private + ENGINE = select_engine + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/builder.rb new file mode 100644 index 0000000..52c4918 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/builder.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module Json + class Builder + + include Seahorse::Model::Shapes + + def initialize(rules) + @rules = rules + end + + def to_json(params) + Json.dump(format(@rules, params)) + end + alias serialize to_json + + private + + def structure(ref, values) + shape = ref.shape + values.each_pair.with_object({}) do |(key, value), data| + if shape.member?(key) && !value.nil? + member_ref = shape.member(key) + member_name = member_ref.location_name || key + data[member_name] = format(member_ref, value) + end + end + end + + def list(ref, values) + member_ref = ref.shape.member + values.collect { |value| format(member_ref, value) } + end + + def map(ref, values) + value_ref = ref.shape.value + values.each.with_object({}) do |(key, value), data| + data[key] = format(value_ref, value) + end + end + + def format(ref, value) + case ref.shape + when StructureShape then structure(ref, value) + when ListShape then list(ref, value) + when MapShape then map(ref, value) + when TimestampShape then timestamp(ref, value) + when BlobShape then encode(value) + else value + end + end + + def encode(blob) + Base64.strict_encode64(String === blob ? blob : blob.read) + end + + def timestamp(ref, value) + case ref['timestampFormat'] || ref.shape['timestampFormat'] + when 'iso8601' then value.utc.iso8601 + when 'rfc822' then value.utc.httpdate + else + # rest-json and jsonrpc default to unixTimestamp + value.to_i + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/error_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/error_handler.rb new file mode 100644 index 0000000..f0eee26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/error_handler.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +module Aws + module Json + class ErrorHandler < Xml::ErrorHandler + + # @param [Seahorse::Client::RequestContext] context + # @return [Seahorse::Client::Response] + def call(context) + @handler.call(context).on(300..599) do |response| + response.error = error(context) + response.data = nil + end + end + + private + + def extract_error(body, context) + json = Json.load(body) + code = error_code(json, context) + message = error_message(code, json) + data = parse_error_data(context, code) + [code, message, data] + rescue Json::ParseError + [http_status_error_code(context), '', EmptyStructure.new] + end + + def error_code(json, context) + code = if aws_query_error?(context) + context.http_response.headers['x-amzn-query-error'].split(';')[0] + else + json['__type'] + end + code ||= json['code'] + code ||= context.http_response.headers['x-amzn-errortype'] + if code + code.split('#').last + else + http_status_error_code(context) + end + end + + def aws_query_error?(context) + context.config.api.metadata['awsQueryCompatible'] && + context.http_response.headers['x-amzn-query-error'] + end + + def error_message(code, json) + if code == 'RequestEntityTooLarge' + 'Request body must be less than 1 MB' + else + json['message'] || json['Message'] || '' + end + end + + def parse_error_data(context, code) + data = EmptyStructure.new + if error_rules = context.operation.errors + error_rules.each do |rule| + # match modeled shape name with the type(code) only + # some type(code) might contains invalid characters + # such as ':' (efs) etc + match = rule.shape.name == code.gsub(/[^^a-zA-Z0-9]/, '') + if match && rule.shape.members.any? + data = Parser.new(rule).parse(context.http_response.body_contents) + end + end + end + data + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/handler.rb new file mode 100644 index 0000000..dd67ea6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/handler.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +module Aws + module Json + class Handler < Seahorse::Client::Handler + + CONTENT_TYPE = 'application/x-amz-json-%s' + + # @param [Seahorse::Client::RequestContext] context + # @return [Seahorse::Client::Response] + def call(context) + build_request(context) + response = @handler.call(context) + response.on(200..299) { |resp| parse_response(resp) } + response.on(200..599) { |resp| apply_request_id(context) } + response + end + + private + + def build_request(context) + context.http_request.http_method = 'POST' + context.http_request.headers['Content-Type'] = content_type(context) + context.http_request.headers['X-Amz-Target'] = target(context) + context.http_request.body = build_body(context) + end + + def build_body(context) + if simple_json?(context) + Json.dump(context.params) + else + Builder.new(context.operation.input).serialize(context.params) + end + end + + def parse_response(response) + response.data = parse_body(response.context) + end + + def parse_body(context) + if simple_json?(context) + Json.load(context.http_response.body_contents) + elsif rules = context.operation.output + json = context.http_response.body_contents + if json.is_a?(Array) + # an array of emitted events + if json[0].respond_to?(:response) + # initial response exists + # it must be the first event arrived + resp_struct = json.shift.response + else + resp_struct = context.operation.output.shape.struct_class.new + end + + rules.shape.members.each do |name, ref| + if ref.eventstream + resp_struct.send("#{name}=", json.to_enum) + end + end + resp_struct + else + Parser.new(rules).parse(json == '' ? '{}' : json) + end + else + EmptyStructure.new + end + end + + def content_type(context) + CONTENT_TYPE % [context.config.api.metadata['jsonVersion']] + end + + def target(context) + prefix = context.config.api.metadata['targetPrefix'] + "#{prefix}.#{context.operation.name}" + end + + def apply_request_id(context) + context[:request_id] = context.http_response.headers['x-amzn-requestid'] + end + + def simple_json?(context) + context.config.simple_json + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/json_engine.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/json_engine.rb new file mode 100644 index 0000000..aa1bffc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/json_engine.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +module Aws + module Json + module JSONEngine + class << self + def load(json) + JSON.parse(json) + rescue JSON::ParserError => e + raise ParseError.new(e) + end + + def dump(value) + JSON.dump(value) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/oj_engine.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/oj_engine.rb new file mode 100644 index 0000000..6f57b55 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/oj_engine.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module Aws + module Json + module OjEngine + # @api private + LOAD_OPTIONS = { mode: :compat, symbol_keys: false, empty_string: false }.freeze + + # @api private + DUMP_OPTIONS = { mode: :compat }.freeze + + class << self + def load(json) + Oj.load(json, LOAD_OPTIONS) + rescue *PARSE_ERRORS => e + raise ParseError.new(e) + end + + def dump(value) + Oj.dump(value, DUMP_OPTIONS) + end + + private + + # Oj before 1.4.0 does not define Oj::ParseError and instead raises + # SyntaxError on failure + def detect_oj_parse_errors + require 'oj' + + if Oj.const_defined?(:ParseError) + [Oj::ParseError, EncodingError, JSON::ParserError] + else + [SyntaxError] + end + rescue LoadError + nil + end + end + + # @api private + PARSE_ERRORS = detect_oj_parse_errors + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/parser.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/parser.rb new file mode 100644 index 0000000..bd47974 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/json/parser.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +require 'base64' +require 'time' + +module Aws + module Json + class Parser + + include Seahorse::Model::Shapes + + # @param [Seahorse::Model::ShapeRef] rules + def initialize(rules) + @rules = rules + end + + # @param [String] json + def parse(json, target = nil) + parse_ref(@rules, Json.load(json), target) + end + + private + + def structure(ref, values, target = nil) + shape = ref.shape + target = ref.shape.struct_class.new if target.nil? + values.each do |key, value| + member_name, member_ref = shape.member_by_location_name(key) + if member_ref + target[member_name] = parse_ref(member_ref, value) + elsif shape.union + target[:unknown] = { 'name' => key, 'value' => value } + end + end + if shape.union + # convert to subclass + member_subclass = shape.member_subclass(target.member).new + member_subclass[target.member] = target.value + target = member_subclass + end + target + end + + def list(ref, values, target = nil) + target = [] if target.nil? + values.each do |value| + target << parse_ref(ref.shape.member, value) + end + target + end + + def map(ref, values, target = nil) + target = {} if target.nil? + values.each do |key, value| + target[key] = parse_ref(ref.shape.value, value) + end + target + end + + def parse_ref(ref, value, target = nil) + if value.nil? + nil + else + case ref.shape + when StructureShape then structure(ref, value, target) + when ListShape then list(ref, value, target) + when MapShape then map(ref, value, target) + when TimestampShape then time(value) + when BlobShape then Base64.decode64(value) + when BooleanShape then value.to_s == 'true' + else value + end + end + end + + # @param [String, Integer] value + # @return [Time] + def time(value) + value.is_a?(Numeric) ? Time.at(value) : Time.parse(value) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/formatter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/formatter.rb new file mode 100644 index 0000000..011d218 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/formatter.rb @@ -0,0 +1,261 @@ +# frozen_string_literal: true + +require 'pathname' + +module Aws + module Log + + # A log formatter generates a string for logging from a response. This + # accomplished with a log pattern string: + # + # pattern = ':operation :http_response_status_code :time' + # formatter = Aws::Log::Formatter.new(pattern) + # formatter.format(response) + # #=> 'get_bucket 200 0.0352' + # + # # Canned Formatters + # + # Instead of providing your own pattern, you can choose a canned log + # formatter. + # + # * {Formatter.default} + # * {Formatter.colored} + # * {Formatter.short} + # + # # Pattern Substitutions + # + # You can put any of these placeholders into you pattern. + # + # * `:client_class` - The name of the client class. + # + # * `:operation` - The name of the client request method. + # + # * `:request_params` - The user provided request parameters. Long + # strings are truncated/summarized if they exceed the + # `:max_string_size`. Other objects are inspected. + # + # * `:time` - The total time in seconds spent on the + # request. This includes client side time spent building + # the request and parsing the response. + # + # * `:retries` - The number of times a client request was retried. + # + # * `:http_request_method` - The http request verb, e.g., `POST`, + # `PUT`, `GET`, etc. + # + # * `:http_request_endpoint` - The request endpoint. This includes + # the scheme, host and port, but not the path. + # + # * `:http_request_scheme` - This is replaced by `http` or `https`. + # + # * `:http_request_host` - The host name of the http request + # endpoint (e.g. 's3.amazon.com'). + # + # * `:http_request_port` - The port number (e.g. '443' or '80'). + # + # * `:http_request_headers` - The http request headers, inspected. + # + # * `:http_request_body` - The http request payload. + # + # * `:http_response_status_code` - The http response status + # code, e.g., `200`, `404`, `500`, etc. + # + # * `:http_response_headers` - The http response headers, inspected. + # + # * `:http_response_body` - The http response body contents. + # + # * `:error_class` + # + # * `:error_message` + # + class Formatter + + # @param [String] pattern The log format pattern should be a string + # and may contain substitutions. + # + # @option options [Integer] :max_string_size (1000) When summarizing + # request parameters, strings longer than this value will be + # truncated. + # + # @option options [Array] :filter A list of parameter + # names that should be filtered when logging `:request_params`. + # + # Formatter.new(pattern, filter: [:password]) + # + # The default list of filtered parameters is documented on the + # {ParamFilter} class. + # + # @option options [Boolean] :filter_sensitive_params (true) Set to false + # to disable the sensitive parameter filtering when logging + # `:request_params`. + def initialize(pattern, options = {}) + @pattern = pattern + @param_formatter = ParamFormatter.new(options) + @param_filter = ParamFilter.new(options) + end + + # @return [String] + attr_reader :pattern + + # Given a response, this will format a log message and return it as a + # string according to {#pattern}. + # @param [Seahorse::Client::Response] response + # @return [String] + def format(response) + pattern.gsub(/:(\w+)/) { |sym| send("_#{sym[1..-1]}", response) } + end + + # @api private + def method_missing(method_name, *args) + if method_name.to_s.chars.first == '_' + ":#{method_name.to_s[1..-1]}" + else + super + end + end + + private + + def _client_class(response) + response.context.client.class.name + end + + def _operation(response) + response.context.operation_name + end + + def _request_params(response) + params = response.context.params + type = response.context.operation.input.shape.struct_class + @param_formatter.summarize(@param_filter.filter(params, type)) + end + + def _time(response) + duration = response.context[:logging_completed_at] - + response.context[:logging_started_at] + ("%.06f" % duration).sub(/0+$/, '') + end + + def _retries(response) + response.context.retries + end + + def _http_request_endpoint(response) + response.context.http_request.endpoint.to_s + end + + def _http_request_scheme(response) + response.context.http_request.endpoint.scheme + end + + def _http_request_host(response) + response.context.http_request.endpoint.host + end + + def _http_request_port(response) + response.context.http_request.endpoint.port.to_s + end + + def _http_request_method(response) + response.context.http_request.http_method + end + + def _http_request_headers(response) + response.context.http_request.headers.inspect + end + + def _http_request_body(response) + @param_formatter.summarize(response.context.http_request.body_contents) + end + + def _http_response_status_code(response) + response.context.http_response.status_code.to_s + end + + def _http_response_headers(response) + response.context.http_response.headers.inspect + end + + def _http_response_body(response) + if response.context.http_response.body.respond_to?(:rewind) + @param_formatter.summarize( + response.context.http_response.body_contents + ) + else + '' + end + end + + def _error_class(response) + response.error ? response.error.class.name : '' + end + + def _error_message(response) + response.error ? response.error.message : '' + end + + class << self + + # The default log format. + # @option (see #initialize) + # @example A sample of the default format. + # + # [ClientClass 200 0.580066 0 retries] list_objects(:bucket_name => 'bucket') + # + # @return [Formatter] + def default(options = {}) + pattern = [] + pattern << "[:client_class" + pattern << ":http_response_status_code" + pattern << ":time" + pattern << ":retries retries]" + pattern << ":operation(:request_params)" + pattern << ":error_class" + pattern << ":error_message" + Formatter.new(pattern.join(' ') + "\n", options) + end + + # The short log format. Similar to default, but it does not + # inspect the request params or report on retries. + # @option (see #initialize) + # @example A sample of the short format + # + # [ClientClass 200 0.494532] list_buckets + # + # @return [Formatter] + def short(options = {}) + pattern = [] + pattern << "[:client_class" + pattern << ":http_response_status_code" + pattern << ":time]" + pattern << ":operation" + pattern << ":error_class" + Formatter.new(pattern.join(' ') + "\n", options) + end + + # The default log format with ANSI colors. + # @option (see #initialize) + # @example A sample of the colored format (sans the ansi colors). + # + # [ClientClass 200 0.580066 0 retries] list_objects(:bucket_name => 'bucket') + # + # @return [Formatter] + def colored(options = {}) + bold = "\x1b[1m" + color = "\x1b[34m" + reset = "\x1b[0m" + pattern = [] + pattern << "#{bold}#{color}[:client_class" + pattern << ":http_response_status_code" + pattern << ":time" + pattern << ":retries retries]#{reset}#{bold}" + pattern << ":operation(:request_params)" + pattern << ":error_class" + pattern << ":error_message#{reset}" + Formatter.new(pattern.join(' ') + "\n", options) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/handler.rb new file mode 100644 index 0000000..1c485e1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/handler.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Logging + class Handler < Client::Handler + + # @param [RequestContext] context + # @return [Response] + def call(context) + context[:logging_started_at] = Time.now + @handler.call(context).tap do |response| + context[:logging_completed_at] = Time.now + log(context.config, response) + end + end + + private + + # @param [Configuration] config + # @param [Response] response + # @return [void] + def log(config, response) + config.logger.send(config.log_level, format(config, response)) + end + + # @param [Configuration] config + # @param [Response] response + # @return [String] + def format(config, response) + config.log_formatter.format(response) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/param_filter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/param_filter.rb new file mode 100644 index 0000000..91a54ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/param_filter.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require 'pathname' +require 'set' + +module Aws + module Log + class ParamFilter + # DEPRECATED - This must exist for backwards compatibility. Sensitive + # members are now computed for each request/response type. This can be + # removed in a new major version. This list is no longer updated. + # + # A managed list of sensitive parameters that should be filtered from + # logs. This is updated automatically as part of each release. See the + # `tasks/update-sensitive-params.rake` for more information. + # + # @api private + # begin + SENSITIVE = [:access_token, :account_name, :account_password, :address, :admin_contact, :admin_password, :alexa_for_business_room_arn, :artifact_credentials, :auth_code, :auth_parameters, :authentication_token, :authorization_result, :backup_plan_tags, :backup_vault_tags, :base_32_string_seed, :basic_auth_credentials, :block, :block_address, :block_data, :blocks, :body, :bot_configuration, :bot_email, :calling_name, :cause, :client_id, :client_request_token, :client_secret, :comment, :configuration, :content, :copy_source_sse_customer_key, :credentials, :current_password, :custom_attributes, :custom_private_key, :db_password, :default_phone_number, :definition, :description, :destination_access_token, :digest_tip_address, :display_name, :domain_signing_private_key, :e164_phone_number, :email, :email_address, :email_message, :embed_url, :emergency_phone_number, :error, :external_meeting_id, :external_model_endpoint_data_blobs, :external_user_id, :fall_back_phone_number, :feedback_token, :file, :filter_expression, :first_name, :full_name, :host_key, :id, :id_token, :input, :input_text, :ion_text, :join_token, :key, :key_id, :key_material, :key_store_password, :kms_key_id, :kms_master_key_id, :lambda_function_arn, :last_name, :local_console_password, :master_account_email, :master_user_name, :master_user_password, :meeting_host_id, :message, :metadata, :name, :new_password, :next_password, :notes, :number, :oauth_token, :old_password, :outbound_events_https_endpoint, :output, :owner_information, :parameters, :passphrase, :password, :payload, :phone_number, :plaintext, :previous_password, :primary_email, :primary_provisioned_number, :private_key, :private_key_plaintext, :proof, :proposed_password, :proxy_phone_number, :public_key, :qr_code_png, :query, :random_password, :recovery_point_tags, :refresh_token, :registrant_contact, :request_attributes, :resource_arn, :restore_metadata, :revision, :saml_assertion, :search_query, :secret_access_key, :secret_binary, :secret_code, :secret_hash, :secret_string, :secret_to_authenticate_initiator, :secret_to_authenticate_target, :security_token, :service_password, :session_attributes, :session_token, :share_notes, :shared_secret, :slots, :sns_topic_arn, :source_access_token, :sqs_queue_arn, :sse_customer_key, :ssekms_encryption_context, :ssekms_key_id, :status_message, :tag_key_list, :tags, :target_address, :task_parameters, :tech_contact, :temporary_password, :test_phone_number, :text, :token, :trust_password, :type, :upload_credentials, :upload_url, :uri, :user_data, :user_email, :user_name, :user_password, :username, :value, :values, :variables, :vpn_psk, :web_identity_token, :zip_file] + # end + + def initialize(options = {}) + @enabled = options[:filter_sensitive_params] != false + @additional_filters = options[:filter] || [] + end + + def filter(values, type) + case values + when Struct then filter_struct(values, type) + when Hash then filter_hash(values, type) + when Array then filter_array(values, type) + else values + end + end + + private + + def filter_struct(values, type) + if values.class.include? Aws::Structure::Union + values = { values.member => values.value } + end + filter_hash(values, type) + end + + def filter_hash(values, type) + if type.const_defined?('SENSITIVE') + filters = type::SENSITIVE + @additional_filters + else + # Support backwards compatibility (new core + old service) + filters = SENSITIVE + @additional_filters + end + + filtered = {} + values.each_pair do |key, value| + filtered[key] = if @enabled && filters.include?(key) + '[FILTERED]' + else + filter(value, type) + end + end + filtered + end + + def filter_array(values, type) + values.map { |value| filter(value, type) } + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/param_formatter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/param_formatter.rb new file mode 100644 index 0000000..e437dcd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/log/param_formatter.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require 'pathname' + +module Aws + module Log + # @api private + class ParamFormatter + + # String longer than the max string size are truncated + MAX_STRING_SIZE = 1000 + + def initialize(options = {}) + @max_string_size = options[:max_string_size] || MAX_STRING_SIZE + end + + def summarize(value) + Hash === value ? summarize_hash(value) : summarize_value(value) + end + + private + + def summarize_hash(hash) + hash.keys.first.is_a?(String) ? + summarize_string_hash(hash) : + summarize_symbol_hash(hash) + end + + def summarize_symbol_hash(hash) + hash.map do |key,v| + "#{key}:#{summarize_value(v)}" + end.join(",") + end + + def summarize_string_hash(hash) + hash.map do |key,v| + "#{key.inspect}=>#{summarize_value(v)}" + end.join(",") + end + + def summarize_string(str) + if str.size > @max_string_size + "#" + else + str.inspect + end + end + + def summarize_value(value) + case value + when String then summarize_string(value) + when Hash then '{' + summarize_hash(value) + '}' + when Array then summarize_array(value) + when File then summarize_file(value.path) + when Pathname then summarize_file(value) + else value.inspect + end + end + + def summarize_file(path) + "#" + end + + def summarize_array(array) + "[" + array.map{|v| summarize_value(v) }.join(",") + "]" + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/pageable_response.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/pageable_response.rb new file mode 100644 index 0000000..4b1d26c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/pageable_response.rb @@ -0,0 +1,219 @@ +# frozen_string_literal: true + +module Aws + # Decorates a {Seahorse::Client::Response} with paging convenience methods. + # Some AWS calls provide paged responses to limit the amount of data returned + # with each response. To optimize for latency, some APIs may return an + # inconsistent number of responses per page. You should rely on the values of + # the `next_page?` method or using enumerable methods such as `each` rather + # than the number of items returned to iterate through results. See below for + # examples. + # + # @note Methods such as `to_json` will enumerate all of the responses before + # returning the full response as JSON. + # + # # Paged Responses Are Enumerable + # The simplest way to handle paged response data is to use the built-in + # enumerator in the response object, as shown in the following example. + # + # s3 = Aws::S3::Client.new + # + # s3.list_objects(bucket:'aws-sdk').each do |response| + # puts response.contents.map(&:key) + # end + # + # This yields one response object per API call made, and enumerates objects + # in the named bucket. The SDK retrieves additional pages of data to + # complete the request. + # + # # Handling Paged Responses Manually + # To handle paging yourself, use the response’s `next_page?` method to verify + # there are more pages to retrieve, or use the last_page? method to verify + # there are no more pages to retrieve. + # + # If there are more pages, use the `next_page` method to retrieve the + # next page of results, as shown in the following example. + # + # s3 = Aws::S3::Client.new + # + # # Get the first page of data + # response = s3.list_objects(bucket:'aws-sdk') + # + # # Get additional pages + # while response.next_page? do + # response = response.next_page + # # Use the response data here... + # puts response.contents.map(&:key) + # end + # + module PageableResponse + + def self.apply(base) + base.extend Extension + base.instance_variable_set(:@last_page, nil) + base.instance_variable_set(:@more_results, nil) + base + end + + # @return [Paging::Pager] + attr_accessor :pager + + # Returns `true` if there are no more results. Calling {#next_page} + # when this method returns `false` will raise an error. + # @return [Boolean] + def last_page? + # Actual implementation is in PageableResponse::Extension + end + + # Returns `true` if there are more results. Calling {#next_page} will + # return the next response. + # @return [Boolean] + def next_page? + # Actual implementation is in PageableResponse::Extension + end + + # @return [Seahorse::Client::Response] + def next_page(params = {}) + # Actual implementation is in PageableResponse::Extension + end + + # Yields the current and each following response to the given block. + # @yieldparam [Response] response + # @return [Enumerable,nil] Returns a new Enumerable if no block is given. + def each(&block) + # Actual implementation is in PageableResponse::Extension + end + alias each_page each + + private + + # @param [Hash] params A hash of additional request params to + # merge into the next page request. + # @return [Seahorse::Client::Response] Returns the next page of + # results. + def next_response(params) + # Actual implementation is in PageableResponse::Extension + end + + # @param [Hash] params A hash of additional request params to + # merge into the next page request. + # @return [Hash] Returns the hash of request parameters for the + # next page, merging any given params. + def next_page_params(params) + # Actual implementation is in PageableResponse::Extension + end + + # Raised when calling {PageableResponse#next_page} on a pager that + # is on the last page of results. You can call {PageableResponse#last_page?} + # or {PageableResponse#next_page?} to know if there are more pages. + class LastPageError < RuntimeError + + # @param [Seahorse::Client::Response] response + def initialize(response) + @response = response + super("unable to fetch next page, end of results reached") + end + + # @return [Seahorse::Client::Response] + attr_reader :response + + end + + # A handful of Enumerable methods, such as #count are not safe + # to call on a pageable response, as this would trigger n api calls + # simply to count the number of response pages, when likely what is + # wanted is to access count on the data. Same for #to_h. + # @api private + module UnsafeEnumerableMethods + + def count + if data.respond_to?(:count) + data.count + else + raise NoMethodError, "undefined method `count'" + end + end + + def respond_to?(method_name, *args) + if method_name == :count + data.respond_to?(:count) + else + super + end + end + + def to_h + data.to_h + end + + def as_json(_options = {}) + data.to_h(data, as_json: true) + end + + def to_json(options = {}) + as_json.to_json(options) + end + end + + # The actual decorator module implementation. It is in a distinct module + # so that it can be used to extend objects without busting Ruby's constant cache. + # object.extend(mod) bust the constant cache only if `mod` contains constants of its own. + # @api private + module Extension + + include Enumerable + include UnsafeEnumerableMethods + + attr_accessor :pager + + def last_page? + if @last_page.nil? + @last_page = !@pager.truncated?(self) + end + @last_page + end + + def next_page? + !last_page? + end + + def next_page(params = {}) + if last_page? + raise LastPageError.new(self) + else + next_response(params) + end + end + + def each(&block) + return enum_for(:each_page) unless block_given? + response = self + yield(response) + until response.last_page? + response = response.next_page + yield(response) + end + end + alias each_page each + + private + + def next_response(params) + params = next_page_params(params) + request = context.client.build_request(context.operation_name, params) + request.send_request + end + + def next_page_params(params) + # Remove all previous tokens from original params + # Sometimes a token can be nil and merge would not include it. + tokens = @pager.tokens.values.map(&:to_sym) + + params_without_tokens = context[:original_params].reject { |k, _v| tokens.include?(k) } + params_without_tokens.merge!(@pager.next_tokens(self).merge(params)) + params_without_tokens + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/pager.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/pager.rb new file mode 100644 index 0000000..c5e50ce --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/pager.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +require 'jmespath' + +module Aws + # @api private + class Pager + + # @option options [required, Hash] :tokens + # @option options [String] :limit_key + # @option options [String] :more_results + def initialize(options) + @tokens = options.fetch(:tokens) + @limit_key = options.fetch(:limit_key, nil) + @more_results = options.fetch(:more_results, nil) + end + + # @return [Symbol, nil] + attr_reader :limit_key + + # @return [Hash, nil] + attr_reader :tokens + + # @param [Seahorse::Client::Response] response + # @return [Hash] + def next_tokens(response) + @tokens.each.with_object({}) do |(source, target), next_tokens| + value = JMESPath.search(source, response.data) + next_tokens[target.to_sym] = value unless empty_value?(value) + end + end + + # @api private + def prev_tokens(response) + @tokens.each.with_object({}) do |(_, target), tokens| + value = JMESPath.search(target, response.context.params) + tokens[target.to_sym] = value unless empty_value?(value) + end + end + + # @param [Seahorse::Client::Response] response + # @return [Boolean] + def truncated?(response) + if @more_results + JMESPath.search(@more_results, response.data) + else + next_t = next_tokens(response) + prev_t = prev_tokens(response) + !(next_t.empty? || next_t == prev_t) + end + end + + private + + def empty_value?(value) + value.nil? || value == '' || value == [] || value == {} + end + + class NullPager + + # @return [nil] + attr_reader :limit_key + + def next_tokens + {} + end + + def prev_tokens + {} + end + + def truncated?(response) + false + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/param_converter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/param_converter.rb new file mode 100644 index 0000000..234c7d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/param_converter.rb @@ -0,0 +1,229 @@ +# frozen_string_literal: true + +require 'stringio' +require 'date' +require 'time' +require 'tempfile' +require 'thread' + +module Aws + # @api private + class ParamConverter + + include Seahorse::Model::Shapes + + @mutex = Mutex.new + @converters = Hash.new { |h,k| h[k] = {} } + + def initialize(rules) + @rules = rules + @opened_files = [] + end + + # @api private + attr_reader :opened_files + + # @param [Hash] params + # @return [Hash] + def convert(params) + if @rules + structure(@rules, params) + else + params + end + end + + def close_opened_files + @opened_files.each(&:close) + @opened_files = [] + end + + private + + def structure(ref, values) + values = c(ref, values) + if ::Struct === values || Hash === values + values.each_pair do |k, v| + unless v.nil? + if ref.shape.member?(k) + values[k] = member(ref.shape.member(k), v) + end + end + end + end + values + end + + def list(ref, values) + values = c(ref, values) + if values.is_a?(Array) + values.map { |v| member(ref.shape.member, v) } + else + values + end + end + + def map(ref, values) + values = c(ref, values) + if values.is_a?(Hash) + values.each.with_object({}) do |(key, value), hash| + hash[member(ref.shape.key, key)] = member(ref.shape.value, value) + end + else + values + end + end + + def member(ref, value) + case ref.shape + when StructureShape then structure(ref, value) + when ListShape then list(ref, value) + when MapShape then map(ref, value) + else c(ref, value) + end + end + + def c(ref, value) + self.class.c(ref.shape.class, value, self) + end + + class << self + + def convert(shape, params) + new(shape).convert(params) + end + + # Registers a new value converter. Converters run in the context + # of a shape and value class. + # + # # add a converter that stringifies integers + # shape_class = Seahorse::Model::Shapes::StringShape + # ParamConverter.add(shape_class, Integer) { |i| i.to_s } + # + # @param [Class] shape_class + # @param [Class] value_class + # @param [#call] converter (nil) An object that responds to `#call` + # accepting a single argument. This function should perform + # the value conversion if possible, returning the result. + # If the conversion is not possible, the original value should + # be returned. + # @return [void] + def add(shape_class, value_class, converter = nil, &block) + @converters[shape_class][value_class] = converter || block + end + + def ensure_open(file, converter) + if file.closed? + new_file = File.open(file.path, 'rb') + converter.opened_files << new_file + new_file + else + file + end + end + + # @api private + def c(shape, value, instance = nil) + if converter = converter_for(shape, value) + converter.call(value, instance) + else + value + end + end + + private + + def converter_for(shape_class, value) + unless @converters[shape_class].key?(value.class) + @mutex.synchronize { + unless @converters[shape_class].key?(value.class) + @converters[shape_class][value.class] = find(shape_class, value) + end + } + end + @converters[shape_class][value.class] + end + + def find(shape_class, value) + converter = nil + each_base_class(shape_class) do |klass| + @converters[klass].each do |value_class, block| + if value_class === value + converter = block + break + end + end + break if converter + end + converter + end + + def each_base_class(shape_class, &block) + shape_class.ancestors.each do |ancestor| + yield(ancestor) if @converters.key?(ancestor) + end + end + + end + + add(StructureShape, Hash) { |h| h.dup } + add(StructureShape, ::Struct) + + add(MapShape, Hash) { |h| h.dup } + add(MapShape, ::Struct) do |s| + s.members.each.with_object({}) {|k,h| h[k] = s[k] } + end + + add(ListShape, Array) { |a| a.dup } + add(ListShape, Enumerable) { |value| value.to_a } + + add(StringShape, String) + add(StringShape, Symbol) { |sym| sym.to_s } + + add(IntegerShape, Integer) + add(IntegerShape, Float) { |f| f.to_i } + add(IntegerShape, String) do |str| + begin + Integer(str) + rescue ArgumentError + str + end + end + + add(FloatShape, Float) + add(FloatShape, Integer) { |i| i.to_f } + add(FloatShape, String) do |str| + begin + Float(str) + rescue ArgumentError + str + end + end + + add(TimestampShape, Time) + add(TimestampShape, Date) { |d| d.to_time } + add(TimestampShape, DateTime) { |dt| dt.to_time } + add(TimestampShape, Integer) { |i| Time.at(i) } + add(TimestampShape, Float) { |f| Time.at(f) } + add(TimestampShape, String) do |str| + begin + Time.parse(str) + rescue ArgumentError + str + end + end + + add(BooleanShape, TrueClass) + add(BooleanShape, FalseClass) + add(BooleanShape, String) do |str| + { 'true' => true, 'false' => false }[str] + end + + add(BlobShape, IO) + add(BlobShape, File) { |file, converter| ensure_open(file, converter) } + add(BlobShape, Tempfile) { |tmpfile, converter| ensure_open(tmpfile, converter) } + add(BlobShape, StringIO) + add(BlobShape, String) + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/param_validator.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/param_validator.rb new file mode 100644 index 0000000..b3cce6f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/param_validator.rb @@ -0,0 +1,237 @@ +# frozen_string_literal: true + +module Aws + # @api private + class ParamValidator + + include Seahorse::Model::Shapes + + EXPECTED_GOT = "expected %s to be %s, got value %s (class: %s) instead." + + # @param [Seahorse::Model::Shapes::ShapeRef] rules + # @param [Hash] params + # @return [void] + def self.validate!(rules, params) + new(rules).validate!(params) + end + + # @param [Seahorse::Model::Shapes::ShapeRef] rules + # @option options [Boolean] :validate_required (true) + def initialize(rules, options = {}) + @rules = rules || begin + shape = StructureShape.new + shape.struct_class = EmptyStructure + ShapeRef.new(shape: shape) + end + @validate_required = options[:validate_required] != false + @input = options[:input].nil? ? true : !!options[:input] + end + + # @param [Hash] params + # @return [void] + def validate!(params) + errors = [] + structure(@rules, params, errors, 'params') if @rules + raise ArgumentError, error_messages(errors) unless errors.empty? + end + + private + + def structure(ref, values, errors, context) + # ensure the value is hash like + return unless correct_type?(ref, values, errors, context) + + if ref.eventstream + # input eventstream is provided from event signals + values.each do |value| + # each event is structure type + case value[:message_type] + when 'event' + val = value.dup + val.delete(:message_type) + structure(ref.shape.member(val[:event_type]), val, errors, context) + when 'error' # Error is unmodeled + when 'exception' # Pending + raise Aws::Errors::EventStreamParserError.new( + ':exception event validation is not supported') + end + end + else + shape = ref.shape + + # ensure required members are present + if @validate_required + shape.required.each do |member_name| + input_eventstream = ref.shape.member(member_name).eventstream && @input + if values[member_name].nil? && !input_eventstream + param = "#{context}[#{member_name.inspect}]" + errors << "missing required parameter #{param}" + end + end + end + + if @validate_required && shape.union + if values.length > 1 + errors << "multiple values provided to union at #{context} - must contain exactly one of the supported types: #{shape.member_names.join(', ')}" + elsif values.length == 0 + errors << "No values provided to union at #{context} - must contain exactly one of the supported types: #{shape.member_names.join(', ')}" + end + end + + # validate non-nil members + values.each_pair do |name, value| + unless value.nil? + # :event_type is not modeled + # and also needed when construct body + next if name == :event_type + if shape.member?(name) + member_ref = shape.member(name) + shape(member_ref, value, errors, context + "[#{name.inspect}]") + else + errors << "unexpected value at #{context}[#{name.inspect}]" + end + end + end + + end + end + + def list(ref, values, errors, context) + # ensure the value is an array + unless values.is_a?(Array) + errors << expected_got(context, "an Array", values) + return + end + + # validate members + member_ref = ref.shape.member + values.each.with_index do |value, index| + shape(member_ref, value, errors, context + "[#{index}]") + end + end + + def map(ref, values, errors, context) + unless Hash === values + errors << expected_got(context, "a hash", values) + return + end + + key_ref = ref.shape.key + value_ref = ref.shape.value + + values.each do |key, value| + shape(key_ref, key, errors, "#{context} #{key.inspect} key") + shape(value_ref, value, errors, context + "[#{key.inspect}]") + end + end + + def document(ref, value, errors, context) + document_types = [Hash, Array, Numeric, String, TrueClass, FalseClass, NilClass] + unless document_types.any? { |t| value.is_a?(t) } + errors << expected_got(context, "one of #{document_types.join(', ')}", value) + end + + # recursively validate types for aggregated types + case value + when Hash + value.each do |k, v| + document(ref, v, errors, context + "[#{k}]") + end + when Array + value.each do |v| + document(ref, v, errors, context) + end + end + + end + + def shape(ref, value, errors, context) + case ref.shape + when StructureShape then structure(ref, value, errors, context) + when ListShape then list(ref, value, errors, context) + when MapShape then map(ref, value, errors, context) + when DocumentShape then document(ref, value, errors, context) + when StringShape + unless value.is_a?(String) + errors << expected_got(context, "a String", value) + end + when IntegerShape + unless value.is_a?(Integer) + errors << expected_got(context, "an Integer", value) + end + when FloatShape + unless value.is_a?(Float) + errors << expected_got(context, "a Float", value) + end + when TimestampShape + unless value.is_a?(Time) + errors << expected_got(context, "a Time object", value) + end + when BooleanShape + unless [true, false].include?(value) + errors << expected_got(context, "true or false", value) + end + when BlobShape + unless value.is_a?(String) + if streaming_input?(ref) + unless io_like?(value, _require_size = false) + errors << expected_got( + context, + "a String or IO like object that supports read and rewind", + value + ) + end + elsif !io_like?(value, _require_size = true) + errors << expected_got( + context, + "a String or IO like object that supports read, rewind, and size", + value + ) + end + end + else + raise "unhandled shape type: #{ref.shape.class.name}" + end + end + + def correct_type?(ref, value, errors, context) + if ref.eventstream && @input + errors << "instead of providing value directly for eventstreams at input,"\ + " expected to use #signal events per stream" + return false + end + case value + when Hash then true + when ref.shape.struct_class then true + when Enumerator then ref.eventstream && value.respond_to?(:event_types) + else + errors << expected_got(context, "a hash", value) + false + end + end + + def io_like?(value, require_size = true) + value.respond_to?(:read) && value.respond_to?(:rewind) && + (!require_size || value.respond_to?(:size)) + end + + def streaming_input?(ref) + (ref["streaming"] || ref.shape["streaming"]) + end + + def error_messages(errors) + if errors.size == 1 + errors.first + else + prefix = "\n - " + "parameter validator found #{errors.size} errors:" + + prefix + errors.join(prefix) + end + end + + def expected_got(context, expected, got) + EXPECTED_GOT % [context, expected, got.inspect, got.class.name] + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/api_key.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/api_key.rb new file mode 100644 index 0000000..d459e24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/api_key.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module Aws + module Plugins + + # Provide support for `api_key` parameter for `api-gateway` protocol + # specific `api-gateway` protocol gems' user-agent + class ApiKey < Seahorse::Client::Plugin + + option(:api_key, + default: nil, + doc_type: 'String', + docstring: <<-DOCS) +When provided, `x-api-key` header will be injected with the value provided. + DOCS + + def add_handlers(handlers, config) + handlers.add(OptionHandler, step: :initialize) + handlers.add(ApiKeyHandler, step: :build, priority: 0) + end + + # @api private + class OptionHandler < Seahorse::Client::Handler + def call(context) + if context.operation.require_apikey + if context.params.is_a?(Hash) && context.params[:api_key] + api_key = context.params.delete(:api_key) + end + api_key = context.config.api_key if api_key.nil? + context[:api_key] = api_key + end + + @handler.call(context) + end + + end + + # @api private + class ApiKeyHandler < Seahorse::Client::Handler + + def call(context) + if context[:api_key] + apply_api_key(context) + end + @handler.call(context) + end + + private + + def apply_api_key(context) + context.http_request.headers['x-api-key'] = context[:api_key] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_authorizer_token.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_authorizer_token.rb new file mode 100644 index 0000000..9799a09 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_authorizer_token.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +module Aws + module Plugins + + # apply APIG custom authorizer token to + # operations with 'authtype' of 'custom' only + class APIGAuthorizerToken < Seahorse::Client::Plugin + + option(:authorizer_token, default: nil) + + def add_handlers(handlers, config) + handlers.add(AuthTokenHandler, step: :sign) + end + + # @api private + class AuthTokenHandler < Seahorse::Client::Handler + + def call(context) + if context.operation['authtype'] == 'custom' && + context.config.authorizer_token && + context.authorizer.placement[:location] == 'header' + + header = context.authorizer.placement[:name] + context.http_request.headers[header] = context.config.authorizer_token + end + @handler.call(context) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_credentials_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_credentials_configuration.rb new file mode 100644 index 0000000..35a95d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_credentials_configuration.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Plugins + # @api private + # Used for APIGateway generated SDKs credentials config + class APIGCredentialsConfiguration < Seahorse::Client::Plugin + + option(:access_key_id, doc_type: String, docstring: '') + + option(:secret_access_key, doc_type: String, docstring: '') + + option(:session_token, doc_type: String, docstring: '') + + option(:profile, doc_type: String, docstring: '') + + option(:credentials, + required: false, + doc_type: 'Aws::CredentialProvider', + docstring: <<-DOCS +AWS Credentials options is only required when your API uses +[AWS Signature Version 4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html), +more AWS Credentials Configuration Options are available [here](https://github.com/aws/aws-sdk-ruby#configuration). + DOCS + ) do |config| + CredentialProviderChain.new(config).resolve + end + + option(:instance_profile_credentials_retries, 0) + + option(:instance_profile_credentials_timeout, 1) + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_user_agent.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_user_agent.rb new file mode 100644 index 0000000..76fb04a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/apig_user_agent.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class APIGUserAgent < Seahorse::Client::Plugin + + option(:user_agent_suffix) + + # @api private + class Handler < Seahorse::Client::Handler + + def call(context) + set_user_agent(context) + @handler.call(context) + end + + def set_user_agent(context) + ua = "aws-apig-ruby/#{CORE_GEM_VERSION}" + + begin + ua += " #{RUBY_ENGINE}/#{RUBY_VERSION}" + rescue + ua += " RUBY_ENGINE_NA/#{RUBY_VERSION}" + end + + ua += " #{RUBY_PLATFORM}" + ua += " #{context.config.user_agent_suffix}" if context.config.user_agent_suffix + + context.http_request.headers['User-Agent'] = ua.strip + end + + end + + handler(Handler) + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/bearer_authorization.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/bearer_authorization.rb new file mode 100644 index 0000000..4de0a71 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/bearer_authorization.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Plugins + # @api private + class BearerAuthorization < Seahorse::Client::Plugin + + option(:token_provider, + required: false, + doc_type: 'Aws::TokenProvider', + docstring: <<-DOCS +A Bearer Token Provider. This can be an instance of any one of the +following classes: + +* `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing + tokens. + +* `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an + access token generated from `aws login`. + +When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` +will be used to search for tokens configured for your profile in shared configuration files. + DOCS + ) do |config| + if config.stub_responses + StaticTokenProvider.new('token') + else + TokenProviderChain.new(config).resolve + end + end + + + def add_handlers(handlers, cfg) + bearer_operations = + if cfg.api.metadata['signatureVersion'] == 'bearer' + # select operations where authtype is either not set or is bearer + cfg.api.operation_names.select do |o| + !cfg.api.operation(o)['authtype'] || cfg.api.operation(o)['authtype'] == 'bearer' + end + else # service is not bearer auth + # select only operations where authtype is explicitly bearer + cfg.api.operation_names.select do |o| + cfg.api.operation(o)['authtype'] == 'bearer' + end + end + handlers.add(Handler, step: :sign, operations: bearer_operations) + end + + class Handler < Seahorse::Client::Handler + def call(context) + if context.http_request.endpoint.scheme != 'https' + raise ArgumentError, 'Unable to use bearer authorization on non https endpoint.' + end + + token_provider = context.config.token_provider + if token_provider && token_provider.set? + context.http_request.headers['Authorization'] = "Bearer #{token_provider.token.token}" + else + raise Errors::MissingBearerTokenError + end + @handler.call(context) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/checksum_algorithm.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/checksum_algorithm.rb new file mode 100644 index 0000000..da6f2aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/checksum_algorithm.rb @@ -0,0 +1,340 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class ChecksumAlgorithm < Seahorse::Client::Plugin + CHUNK_SIZE = 1 * 1024 * 1024 # one MB + + # determine the set of supported client side checksum algorithms + # CRC32c requires aws-crt (optional sdk dependency) for support + CLIENT_ALGORITHMS = begin + supported = %w[SHA256 SHA1 CRC32] + begin + require 'aws-crt' + supported << 'CRC32C' + rescue LoadError + end + supported + end.freeze + + # priority order of checksum algorithms to validate responses against + # Remove any algorithms not supported by client (ie, depending on CRT availability) + CHECKSUM_ALGORITHM_PRIORITIES = %w[CRC32C SHA1 CRC32 SHA256] & CLIENT_ALGORITHMS + + # byte size of checksums, used in computing the trailer length + CHECKSUM_SIZE = { + 'CRC32' => 16, + 'CRC32C' => 16, + 'SHA1' => 36, + 'SHA256' => 52 + } + + # Interface for computing digests on request/response bodies + # which may be files, strings or IO like objects + # Applies only to digest functions that produce 32 bit integer checksums + # (eg CRC32) + class Digest32 + + attr_reader :value + + # @param [Object] digest_fn + def initialize(digest_fn) + @digest_fn = digest_fn + @value = 0 + end + + def update(chunk) + @value = @digest_fn.call(chunk, @value) + end + + def base64digest + Base64.encode64([@value].pack('N')).chomp + end + end + + def add_handlers(handlers, _config) + handlers.add(OptionHandler, step: :initialize) + # priority set low to ensure checksum is computed AFTER the request is + # built but before it is signed + handlers.add(ChecksumHandler, priority: 15, step: :build) + end + + private + + def self.request_algorithm_selection(context) + return unless context.operation.http_checksum + + input_member = context.operation.http_checksum['requestAlgorithmMember'] + context.params[input_member.to_sym]&.upcase if input_member + end + + def self.request_validation_mode(context) + return unless context.operation.http_checksum + + input_member = context.operation.http_checksum['requestValidationModeMember'] + context.params[input_member.to_sym] if input_member + end + + def self.operation_response_algorithms(context) + return unless context.operation.http_checksum + + context.operation.http_checksum['responseAlgorithms'] + end + + + # @api private + class OptionHandler < Seahorse::Client::Handler + def call(context) + context[:http_checksum] ||= {} + + # validate request configuration + if (request_input = ChecksumAlgorithm.request_algorithm_selection(context)) + unless CLIENT_ALGORITHMS.include? request_input + if (request_input == 'CRC32C') + raise ArgumentError, "CRC32C requires crt support - install the aws-crt gem for support." + else + raise ArgumentError, "#{request_input} is not a supported checksum algorithm." + end + end + end + + # validate response configuration + if (ChecksumAlgorithm.request_validation_mode(context)) + # Compute an ordered list as the union between priority supported and the + # operation's modeled response algorithms. + validation_list = CHECKSUM_ALGORITHM_PRIORITIES & + ChecksumAlgorithm.operation_response_algorithms(context) + context[:http_checksum][:validation_list] = validation_list + end + + @handler.call(context) + end + end + + # @api private + class ChecksumHandler < Seahorse::Client::Handler + + def call(context) + if should_calculate_request_checksum?(context) + request_algorithm_input = ChecksumAlgorithm.request_algorithm_selection(context) + context[:checksum_algorithms] = request_algorithm_input + + request_checksum_property = { + 'algorithm' => request_algorithm_input, + 'in' => checksum_request_in(context), + 'name' => "x-amz-checksum-#{request_algorithm_input.downcase}" + } + + calculate_request_checksum(context, request_checksum_property) + end + + if should_verify_response_checksum?(context) + add_verify_response_checksum_handlers(context) + end + + @handler.call(context) + end + + private + + def should_calculate_request_checksum?(context) + context.operation.http_checksum && + ChecksumAlgorithm.request_algorithm_selection(context) + end + + def should_verify_response_checksum?(context) + context[:http_checksum][:validation_list] && !context[:http_checksum][:validation_list].empty? + end + + def calculate_request_checksum(context, checksum_properties) + case checksum_properties['in'] + when 'header' + header_name = checksum_properties['name'] + body = context.http_request.body_contents + if body + context.http_request.headers[header_name] ||= + ChecksumAlgorithm.calculate_checksum(checksum_properties['algorithm'], body) + end + when 'trailer' + apply_request_trailer_checksum(context, checksum_properties) + end + end + + def apply_request_trailer_checksum(context, checksum_properties) + location_name = checksum_properties['name'] + + # set required headers + headers = context.http_request.headers + headers['Content-Encoding'] = 'aws-chunked' + headers['X-Amz-Content-Sha256'] = 'STREAMING-UNSIGNED-PAYLOAD-TRAILER' + headers['X-Amz-Trailer'] = location_name + + # We currently always compute the size in the modified body wrapper - allowing us + # to set the Content-Length header (set by content_length plugin). + # This means we cannot use Transfer-Encoding=chunked + + if !context.http_request.body.respond_to?(:size) + raise Aws::Errors::ChecksumError, 'Could not determine length of the body' + end + headers['X-Amz-Decoded-Content-Length'] = context.http_request.body.size + + context.http_request.body = AwsChunkedTrailerDigestIO.new( + context.http_request.body, + checksum_properties['algorithm'], + location_name + ) + end + + # Add events to the http_response to verify the checksum as its read + # This prevents the body from being read multiple times + # verification is done only once a successful response has completed + def add_verify_response_checksum_handlers(context) + http_response = context.http_response + checksum_context = { } + http_response.on_headers do |_status, headers| + header_name, algorithm = response_header_to_verify(headers, context[:http_checksum][:validation_list]) + if header_name + expected = headers[header_name] + + unless context[:http_checksum][:skip_on_suffix] && /-[\d]+$/.match(expected) + checksum_context[:algorithm] = algorithm + checksum_context[:header_name] = header_name + checksum_context[:digest] = ChecksumAlgorithm.digest_for_algorithm(algorithm) + checksum_context[:expected] = expected + end + end + end + + http_response.on_data do |chunk| + checksum_context[:digest].update(chunk) if checksum_context[:digest] + end + + http_response.on_success do + if checksum_context[:digest] && + (computed = checksum_context[:digest].base64digest) + + if computed != checksum_context[:expected] + raise Aws::Errors::ChecksumError, + "Checksum validation failed on #{checksum_context[:header_name]} "\ + "computed: #{computed}, expected: #{checksum_context[:expected]}" + end + + context[:http_checksum][:validated] = checksum_context[:algorithm] + end + end + end + + # returns nil if no headers to verify + def response_header_to_verify(headers, validation_list) + validation_list.each do |algorithm| + header_name = "x-amz-checksum-#{algorithm}" + return [header_name, algorithm] if headers[header_name] + end + nil + end + + # determine where (header vs trailer) a request checksum should be added + def checksum_request_in(context) + if context.operation['authtype'].eql?('v4-unsigned-body') + 'trailer' + else + 'header' + end + end + + end + + def self.calculate_checksum(algorithm, body) + digest = ChecksumAlgorithm.digest_for_algorithm(algorithm) + if body.respond_to?(:read) + ChecksumAlgorithm.update_in_chunks(digest, body) + else + digest.update(body) + end + digest.base64digest + end + + def self.digest_for_algorithm(algorithm) + case algorithm + when 'CRC32' + Digest32.new(Zlib.method(:crc32)) + when 'CRC32C' + # this will only be used if input algorithm is CRC32C AND client supports it (crt available) + Digest32.new(Aws::Crt::Checksums.method(:crc32c)) + when 'SHA1' + Digest::SHA1.new + when 'SHA256' + Digest::SHA256.new + end + end + + # The trailer size (in bytes) is the overhead + the trailer name + + # the length of the base64 encoded checksum + def self.trailer_length(algorithm, location_name) + CHECKSUM_SIZE[algorithm] + location_name.size + end + + def self.update_in_chunks(digest, io) + loop do + chunk = io.read(CHUNK_SIZE) + break unless chunk + digest.update(chunk) + end + io.rewind + end + + # Wrapper for request body that implements application-layer + # chunking with Digest computed on chunks + added as a trailer + class AwsChunkedTrailerDigestIO + CHUNK_SIZE = 16384 + + def initialize(io, algorithm, location_name) + @io = io + @location_name = location_name + @algorithm = algorithm + @digest = ChecksumAlgorithm.digest_for_algorithm(algorithm) + @trailer_io = nil + end + + # the size of the application layer aws-chunked + trailer body + def size + # compute the number of chunks + # a full chunk has 4 + 4 bytes overhead, a partial chunk is len.to_s(16).size + 4 + orig_body_size = @io.size + n_full_chunks = orig_body_size / CHUNK_SIZE + partial_bytes = orig_body_size % CHUNK_SIZE + chunked_body_size = n_full_chunks * (CHUNK_SIZE + 8) + chunked_body_size += partial_bytes.to_s(16).size + partial_bytes + 4 unless partial_bytes.zero? + trailer_size = ChecksumAlgorithm.trailer_length(@algorithm, @location_name) + chunked_body_size + trailer_size + end + + def rewind + @io.rewind + end + + def read(length, buf) + # account for possible leftover bytes at the end, if we have trailer bytes, send them + if @trailer_io + return @trailer_io.read(length, buf) + end + + chunk = @io.read(length) + if chunk + @digest.update(chunk) + application_chunked = "#{chunk.bytesize.to_s(16)}\r\n#{chunk}\r\n" + return StringIO.new(application_chunked).read(application_chunked.size, buf) + else + trailers = {} + trailers[@location_name] = @digest.base64digest + trailers = trailers.map { |k,v| "#{k}:#{v}"}.join("\r\n") + @trailer_io = StringIO.new("0\r\n#{trailers}\r\n\r\n") + chunk = @trailer_io.read(length, buf) + end + chunk + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/client_metrics_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/client_metrics_plugin.rb new file mode 100644 index 0000000..66df6ea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/client_metrics_plugin.rb @@ -0,0 +1,283 @@ +# frozen_string_literal: true + +require 'date' +require_relative 'retries/error_inspector' + +module Aws + module Plugins + class ClientMetricsPlugin < Seahorse::Client::Plugin + + option(:client_side_monitoring, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +When `true`, client-side metrics will be collected for all API requests from +this client. + DOCS + resolve_client_side_monitoring(cfg) + end + + option(:client_side_monitoring_port, + default: 31000, + doc_type: Integer, + docstring: <<-DOCS) do |cfg| +Required for publishing client metrics. The port that the client side monitoring +agent is running on, where client metrics will be published via UDP. + DOCS + resolve_client_side_monitoring_port(cfg) + end + + option(:client_side_monitoring_host, + default: "127.0.0.1", + doc_type: String, + docstring: <<-DOCS) do |cfg| +Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client +side monitoring agent is running on, where client metrics will be published via UDP. + DOCS + resolve_client_side_monitoring_host(cfg) + end + + option(:client_side_monitoring_publisher, + default: ClientSideMonitoring::Publisher, + doc_type: Aws::ClientSideMonitoring::Publisher, + docstring: <<-DOCS) do |cfg| +Allows you to provide a custom client-side monitoring publisher class. By default, +will use the Client Side Monitoring Agent Publisher. + DOCS + resolve_publisher(cfg) + end + + option(:client_side_monitoring_client_id, + default: "", + doc_type: String, + docstring: <<-DOCS) do |cfg| +Allows you to provide an identifier for this client which will be attached to +all generated client side metrics. Defaults to an empty string. + DOCS + resolve_client_id(cfg) + end + + def add_handlers(handlers, config) + if config.client_side_monitoring && config.client_side_monitoring_port + handlers.add(Handler, step: :initialize) + publisher = config.client_side_monitoring_publisher + publisher.agent_port = config.client_side_monitoring_port + publisher.agent_host = config.client_side_monitoring_host + end + end + + private + def self.resolve_publisher(cfg) + ClientSideMonitoring::Publisher.new + end + + def self.resolve_client_side_monitoring_port(cfg) + env_source = ENV["AWS_CSM_PORT"] + env_source = nil if env_source == "" + cfg_source = Aws.shared_config.csm_port(profile: cfg.profile) + if env_source + env_source.to_i + elsif cfg_source + cfg_source.to_i + else + 31000 + end + end + + def self.resolve_client_side_monitoring_host(cfg) + env_source = ENV["AWS_CSM_HOST"] + env_source = nil if env_source == "" + cfg_source = Aws.shared_config.csm_host(profile: cfg.profile) + if env_source + env_source + elsif cfg_source + cfg_source + else + "127.0.0.1" + end + end + + def self.resolve_client_side_monitoring(cfg) + env_source = ENV["AWS_CSM_ENABLED"] + env_source = nil if env_source == "" + if env_source.is_a?(String) && (env_source.downcase == "false" || env_source.downcase == "f") + env_source = false + end + cfg_source = Aws.shared_config.csm_enabled(profile: cfg.profile) + if env_source || cfg_source + true + else + false + end + end + + def self.resolve_client_id(cfg) + default = "" + env_source = ENV["AWS_CSM_CLIENT_ID"] + env_source = nil if env_source == "" + cfg_source = Aws.shared_config.csm_client_id(profile: cfg.profile) + env_source || cfg_source || default + end + + class Handler < Seahorse::Client::Handler + def call(context) + publisher = context.config.client_side_monitoring_publisher + service_id = context.config.api.metadata["serviceId"] + # serviceId not present in all versions, need a fallback + service_id ||= _calculate_service_id(context) + + request_metrics = ClientSideMonitoring::RequestMetrics.new( + service: service_id, + operation: context.operation.name, + client_id: context.config.client_side_monitoring_client_id, + region: context.config.region, + timestamp: DateTime.now.strftime('%Q').to_i, + ) + context.metadata[:client_metrics] = request_metrics + start_time = Aws::Util.monotonic_milliseconds + final_error_retryable = false + final_aws_exception = nil + final_aws_exception_message = nil + final_sdk_exception = nil + final_sdk_exception_message = nil + begin + @handler.call(context) + rescue StandardError => e + # Handle SDK Exceptions + inspector = Retries::ErrorInspector.new( + e, + context.http_response.status_code + ) + if inspector.retryable?(context) + final_error_retryable = true + end + if request_metrics.api_call_attempts.empty? + attempt = request_metrics.build_call_attempt + attempt.sdk_exception = e.class.to_s + attempt.sdk_exception_msg = e.message + request_metrics.add_call_attempt(attempt) + elsif request_metrics.api_call_attempts.last.aws_exception.nil? + # Handle exceptions during response handlers + attempt = request_metrics.api_call_attempts.last + attempt.sdk_exception = e.class.to_s + attempt.sdk_exception_msg = e.message + elsif !e.class.to_s.match(request_metrics.api_call_attempts.last.aws_exception) + # Handle response handling exceptions that happened in addition to + # an AWS exception + attempt = request_metrics.api_call_attempts.last + attempt.sdk_exception = e.class.to_s + attempt.sdk_exception_msg = e.message + end # Else we don't have an SDK exception and are done. + final_attempt = request_metrics.api_call_attempts.last + final_aws_exception = final_attempt.aws_exception + final_aws_exception_message = final_attempt.aws_exception_msg + final_sdk_exception = final_attempt.sdk_exception + final_sdk_exception_message = final_attempt.sdk_exception_msg + raise e + ensure + end_time = Aws::Util.monotonic_milliseconds + complete_opts = { + latency: end_time - start_time, + attempt_count: context.retries + 1, + user_agent: context.http_request.headers["user-agent"], + final_error_retryable: final_error_retryable, + final_http_status_code: context.http_response.status_code, + final_aws_exception: final_aws_exception, + final_aws_exception_message: final_aws_exception_message, + final_sdk_exception: final_sdk_exception, + final_sdk_exception_message: final_sdk_exception_message + } + if context.metadata[:redirect_region] + complete_opts[:region] = context.metadata[:redirect_region] + end + request_metrics.api_call.complete(complete_opts) + # Report the metrics by passing the complete RequestMetrics object + if publisher + publisher.publish(request_metrics) + end # Else we drop all this on the floor. + end + end + + private + def _calculate_service_id(context) + class_name = context.client.class.to_s.match(/(.+)::Client/)[1] + class_name.sub!(/^Aws::/, '') + _fallback_service_id(class_name) + end + + def _fallback_service_id(id) + # Need hard-coded exceptions since information needed to + # reverse-engineer serviceId is not present in older versions. + # This list should not need to grow. + exceptions = { + "ACMPCA" => "ACM PCA", + "APIGateway" => "API Gateway", + "AlexaForBusiness" => "Alexa For Business", + "ApplicationAutoScaling" => "Application Auto Scaling", + "ApplicationDiscoveryService" => "Application Discovery Service", + "AutoScaling" => "Auto Scaling", + "AutoScalingPlans" => "Auto Scaling Plans", + "CloudHSMV2" => "CloudHSM V2", + "CloudSearchDomain" => "CloudSearch Domain", + "CloudWatchEvents" => "CloudWatch Events", + "CloudWatchLogs" => "CloudWatch Logs", + "CognitoIdentity" => "Cognito Identity", + "CognitoIdentityProvider" => "Cognito Identity Provider", + "CognitoSync" => "Cognito Sync", + "ConfigService" => "Config Service", + "CostExplorer" => "Cost Explorer", + "CostandUsageReportService" => "Cost and Usage Report Service", + "DataPipeline" => "Data Pipeline", + "DatabaseMigrationService" => "Database Migration Service", + "DeviceFarm" => "Device Farm", + "DirectConnect" => "Direct Connect", + "DirectoryService" => "Directory Service", + "DynamoDBStreams" => "DynamoDB Streams", + "ElasticBeanstalk" => "Elastic Beanstalk", + "ElasticLoadBalancing" => "Elastic Load Balancing", + "ElasticLoadBalancingV2" => "Elastic Load Balancing v2", + "ElasticTranscoder" => "Elastic Transcoder", + "ElasticsearchService" => "Elasticsearch Service", + "IoTDataPlane" => "IoT Data Plane", + "IoTJobsDataPlane" => "IoT Jobs Data Plane", + "IoT1ClickDevicesService" => "IoT 1Click Devices Service", + "IoT1ClickProjects" => "IoT 1Click Projects", + "KinesisAnalytics" => "Kinesis Analytics", + "KinesisVideo" => "Kinesis Video", + "KinesisVideoArchivedMedia" => "Kinesis Video Archived Media", + "KinesisVideoMedia" => "Kinesis Video Media", + "LambdaPreview" => "Lambda", + "Lex" => "Lex Runtime Service", + "LexModelBuildingService" => "Lex Model Building Service", + "Lightsail" => "Lightsail", + "MQ" => "mq", + "MachineLearning" => "Machine Learning", + "MarketplaceCommerceAnalytics" => "Marketplace Commerce Analytics", + "MarketplaceEntitlementService" => "Marketplace Entitlement Service", + "MarketplaceMetering" => "Marketplace Metering", + "MediaStoreData" => "MediaStore Data", + "MigrationHub" => "Migration Hub", + "ResourceGroups" => "Resource Groups", + "ResourceGroupsTaggingAPI" => "Resource Groups Tagging API", + "Route53" => "Route 53", + "Route53Domains" => "Route 53 Domains", + "SecretsManager" => "Secrets Manager", + "SageMakerRuntime" => "SageMaker Runtime", + "ServiceCatalog" => "Service Catalog", + "ServiceDiscovery" => "ServiceDiscovery", + "Signer" => "signer", + "States" => "SFN", + "StorageGateway" => "Storage Gateway", + "TranscribeService" => "Transcribe Service", + "WAFRegional" => "WAF Regional", + } + if exceptions[id] + exceptions[id] + else + id + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/client_metrics_send_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/client_metrics_send_plugin.rb new file mode 100644 index 0000000..87cedc2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/client_metrics_send_plugin.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +require 'date' + +module Aws + module Plugins + class ClientMetricsSendPlugin < Seahorse::Client::Plugin + + def add_handlers(handlers, config) + if config.client_side_monitoring && config.client_side_monitoring_port + # AttemptHandler comes just before we would retry an error. + # Or before we would follow redirects. + handlers.add(AttemptHandler, step: :sign, priority: 39) + # LatencyHandler is as close to sending as possible. + handlers.add(LatencyHandler, step: :sign, priority: 0) + end + end + + class LatencyHandler < Seahorse::Client::Handler + def call(context) + start_time = Aws::Util.monotonic_milliseconds + resp = @handler.call(context) + end_time = Aws::Util.monotonic_milliseconds + latency = end_time - start_time + context.metadata[:current_call_attempt].request_latency = latency + resp + end + end + + class AttemptHandler < Seahorse::Client::Handler + def call(context) + request_metrics = context.metadata[:client_metrics] + attempt_opts = { + timestamp: DateTime.now.strftime('%Q').to_i, + fqdn: context.http_request.endpoint.host, + region: context.config.region, + user_agent: context.http_request.headers["user-agent"], + } + # It will generally cause an error, but it is semantically valid for + # credentials to not exist. + if context.config.credentials + attempt_opts[:access_key] = + context.config.credentials.credentials.access_key_id + attempt_opts[:session_token] = + context.config.credentials.credentials.session_token + end + call_attempt = request_metrics.build_call_attempt(attempt_opts) + context.metadata[:current_call_attempt] = call_attempt + + resp = @handler.call(context) + if context.metadata[:redirect_region] + call_attempt.region = context.metadata[:redirect_region] + end + headers = context.http_response.headers + if headers.include?("x-amz-id-2") + call_attempt.x_amz_id_2 = headers["x-amz-id-2"] + end + if headers.include?("x-amz-request-id") + call_attempt.x_amz_request_id = headers["x-amz-request-id"] + end + if headers.include?("x-amzn-request-id") + call_attempt.x_amzn_request_id = headers["x-amzn-request-id"] + end + call_attempt.http_status_code = context.http_response.status_code + if e = resp.error + e_name = _extract_error_name(e) + e_msg = e.message + call_attempt.aws_exception = "#{e_name}" + call_attempt.aws_exception_msg = "#{e_msg}" + end + request_metrics.add_call_attempt(call_attempt) + resp + end + + private + def _extract_error_name(error) + if error.is_a?(Aws::Errors::ServiceError) + error.class.code + else + error.class.name.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/credentials_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/credentials_configuration.rb new file mode 100644 index 0000000..b532731 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/credentials_configuration.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Plugins + # @api private + class CredentialsConfiguration < Seahorse::Client::Plugin + + option(:access_key_id, doc_type: String, docstring: '') + + option(:secret_access_key, doc_type: String, docstring: '') + + option(:session_token, doc_type: String, docstring: '') + + option(:profile, + doc_default: 'default', + doc_type: String, + docstring: <<-DOCS) +Used when loading credentials from the shared credentials file +at HOME/.aws/credentials. When not specified, 'default' is used. + DOCS + + option(:credentials, + required: true, + doc_type: 'Aws::CredentialProvider', + docstring: <<-DOCS +Your AWS credentials. This can be an instance of any one of the +following classes: + +* `Aws::Credentials` - Used for configuring static, non-refreshing + credentials. + +* `Aws::SharedCredentials` - Used for loading static credentials from a + shared file, such as `~/.aws/config`. + +* `Aws::AssumeRoleCredentials` - Used when you need to assume a role. + +* `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to + assume a role after providing credentials via the web. + +* `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an + access token generated from `aws login`. + +* `Aws::ProcessCredentials` - Used for loading credentials from a + process that outputs to stdout. + +* `Aws::InstanceProfileCredentials` - Used for loading credentials + from an EC2 IMDS on an EC2 instance. + +* `Aws::ECSCredentials` - Used for loading credentials from + instances running in ECS. + +* `Aws::CognitoIdentityCredentials` - Used for loading credentials + from the Cognito Identity service. + +When `:credentials` are not configured directly, the following +locations will be searched for credentials: + +* `Aws.config[:credentials]` +* The `:access_key_id`, `:secret_access_key`, and `:session_token` options. +* ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] +* `~/.aws/credentials` +* `~/.aws/config` +* EC2/ECS IMDS instance profile - When used by default, the timeouts + are very aggressive. Construct and pass an instance of + `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to + enable retries and extended timeouts. Instance profile credential + fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED'] + to true. + DOCS + ) do |config| + CredentialProviderChain.new(config).resolve + end + + option(:instance_profile_credentials_retries, 0) + + option(:instance_profile_credentials_timeout, 1) + + option(:token_provider, + required: false, + doc_type: 'Aws::TokenProvider', + docstring: <<-DOCS +A Bearer Token Provider. This can be an instance of any one of the +following classes: + +* `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing + tokens. + +* `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an + access token generated from `aws login`. + +When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` +will be used to search for tokens configured for your profile in shared configuration files. + DOCS + ) do |config| + if config.stub_responses + StaticTokenProvider.new('token') + else + TokenProviderChain.new(config).resolve + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/defaults_mode.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/defaults_mode.rb new file mode 100644 index 0000000..9f4f5a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/defaults_mode.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Plugins + # @api private + class DefaultsMode < Seahorse::Client::Plugin + + option(:defaults_mode, + default: 'legacy', + doc_type: String, + docstring: <<-DOCS +See {Aws::DefaultsModeConfiguration} for a list of the +accepted modes and the configuration defaults that are included. + DOCS + ) do |cfg| + resolve_defaults_mode(cfg) + end + + option(:defaults_mode_config_resolver, + doc_type: 'Aws::DefaultsModeConfigResolver') do |cfg| + Aws::DefaultsModeConfigResolver.new( + Aws::DefaultsModeConfiguration::SDK_DEFAULT_CONFIGURATION, cfg) + end + + class << self + private + + def resolve_defaults_mode(cfg) + value = ENV['AWS_DEFAULTS_MODE'] + value ||= Aws.shared_config.defaults_mode( + profile: cfg.profile + ) + value&.downcase || "legacy" + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/endpoint_discovery.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/endpoint_discovery.rb new file mode 100644 index 0000000..4ce6b09 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/endpoint_discovery.rb @@ -0,0 +1,172 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class EndpointDiscovery < Seahorse::Client::Plugin + + option(:endpoint_discovery, + doc_default: Proc.new { |options| options[:require_endpoint_discovery] }, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +When set to `true`, endpoint discovery will be enabled for operations when available. + DOCS + resolve_endpoint_discovery(cfg) + end + + option(:endpoint_cache_max_entries, + default: 1000, + doc_type: Integer, + docstring: <<-DOCS +Used for the maximum size limit of the LRU cache storing endpoints data +for endpoint discovery enabled operations. Defaults to 1000. + DOCS + ) + + option(:endpoint_cache_max_threads, + default: 10, + doc_type: Integer, + docstring: <<-DOCS +Used for the maximum threads in use for polling endpoints to be cached, defaults to 10. + DOCS + ) + + option(:endpoint_cache_poll_interval, + default: 60, + doc_type: Integer, + docstring: <<-DOCS +When :endpoint_discovery and :active_endpoint_cache is enabled, +Use this option to config the time interval in seconds for making +requests fetching endpoints information. Defaults to 60 sec. + DOCS + ) + + option(:endpoint_cache) do |cfg| + Aws::EndpointCache.new( + max_entries: cfg.endpoint_cache_max_entries, + max_threads: cfg.endpoint_cache_max_threads + ) + end + + option(:active_endpoint_cache, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS +When set to `true`, a thread polling for endpoints will be running in +the background every 60 secs (default). Defaults to `false`. + DOCS + ) + + def add_handlers(handlers, config) + handlers.add(Handler, priority: 90) if config.regional_endpoint + end + + class Handler < Seahorse::Client::Handler + + def call(context) + if context.operation.endpoint_operation + context.http_request.headers['x-amz-api-version'] = context.config.api.version + _apply_endpoint_discovery_user_agent(context) + elsif discovery_cfg = context.operation.endpoint_discovery + endpoint = _discover_endpoint( + context, + Aws::Util.str_2_bool(discovery_cfg["required"]) + ) + if endpoint + context.http_request.endpoint = _valid_uri(endpoint.address) + # Skips dynamic endpoint usage, use this endpoint instead + context[:discovered_endpoint] = true + end + if endpoint || context.config.endpoint_discovery + _apply_endpoint_discovery_user_agent(context) + end + end + @handler.call(context) + end + + private + + def _valid_uri(address) + # returned address can be missing scheme + if address.start_with?('http') + URI.parse(address) + else + URI.parse("https://" + address) + end + end + + def _apply_endpoint_discovery_user_agent(ctx) + if ctx.config.user_agent_suffix.nil? + ctx.config.user_agent_suffix = "endpoint-discovery" + elsif !ctx.config.user_agent_suffix.include? "endpoint-discovery" + ctx.config.user_agent_suffix += "endpoint-discovery" + end + end + + def _discover_endpoint(ctx, required) + cache = ctx.config.endpoint_cache + key = cache.extract_key(ctx) + + if required + unless ctx.config.endpoint_discovery + raise ArgumentError, "Operation #{ctx.operation.name} requires "\ + 'endpoint_discovery to be enabled.' + end + # required for the operation + unless cache.key?(key) + cache.update(key, ctx) + end + endpoint = cache[key] + # hard fail if endpoint is not discovered + raise Aws::Errors::EndpointDiscoveryError.new unless endpoint + endpoint + elsif ctx.config.endpoint_discovery + # not required for the operation + # but enabled + if cache.key?(key) + cache[key] + elsif ctx.config.active_endpoint_cache + # enabled active cache pull + interval = ctx.config.endpoint_cache_poll_interval + if key.include?('_') + # identifier related, kill the previous polling thread by key + # because endpoint req params might be changed + cache.delete_polling_thread(key) + end + + # start a thread for polling endpoints when non-exist + unless cache.threads_key?(key) + thread = Thread.new do + while !cache.key?(key) do + cache.update(key, ctx) + sleep(interval) + end + end + cache.update_polling_pool(key, thread) + end + + cache[key] + else + # disabled active cache pull + # attempt, buit fail soft + cache.update(key, ctx) + cache[key] + end + end + end + + end + + private + + def self.resolve_endpoint_discovery(cfg) + env = ENV['AWS_ENABLE_ENDPOINT_DISCOVERY'] + default = cfg.api.require_endpoint_discovery + shared_cfg = Aws.shared_config.endpoint_discovery_enabled(profile: cfg.profile) + resolved = Aws::Util.str_2_bool(env) || Aws::Util.str_2_bool(shared_cfg) + env.nil? && shared_cfg.nil? ? default : !!resolved + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/endpoint_pattern.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/endpoint_pattern.rb new file mode 100644 index 0000000..4d16f1b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/endpoint_pattern.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class EndpointPattern < Seahorse::Client::Plugin + + option(:disable_host_prefix_injection, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS +Set to true to disable SDK automatically adding host prefix +to default service endpoint when available. + DOCS + ) + + def add_handlers(handlers, config) + handlers.add(Handler, priority: 10) + end + + class Handler < Seahorse::Client::Handler + + def call(context) + if !context.config.disable_host_prefix_injection + endpoint_trait = context.operation.endpoint_pattern + if endpoint_trait && !endpoint_trait.empty? + _apply_endpoint_trait(context, endpoint_trait) + end + end + @handler.call(context) + end + + private + + def _apply_endpoint_trait(context, trait) + # currently only support host pattern + ori_host = context.http_request.endpoint.host + if pattern = trait['hostPrefix'] + host_prefix = pattern.gsub(/\{.+?\}/) do |label| + label = label.delete("{}") + _replace_label_value( + ori_host, label, context.operation.input, context.params) + end + context.http_request.endpoint.host = host_prefix + context.http_request.endpoint.host + end + end + + def _replace_label_value(ori, label, input_ref, params) + name = nil + input_ref.shape.members.each do |m_name, ref| + if ref['hostLabel'] && ref['hostLabelName'] == label + name = m_name + end + end + if name.nil? || params[name].nil? + raise Errors::MissingEndpointHostLabelValue.new(name) + end + params[name] + end + + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/event_stream_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/event_stream_configuration.rb new file mode 100644 index 0000000..e9b689d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/event_stream_configuration.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +module Aws + module Plugins + + class EventStreamConfiguration < Seahorse::Client::Plugin + + option(:event_stream_handler, + default: nil, + doc_type: 'Proc', + docstring: <<-DOCS) +When an EventStream or Proc object is provided, it will be used as callback for each chunk of event stream response received along the way. + DOCS + + option(:input_event_stream_handler, + default: nil, + doc_type: 'Proc', + docstring: <<-DOCS) +When an EventStream or Proc object is provided, it can be used for sending events for the event stream. + DOCS + + option(:output_event_stream_handler, + default: nil, + doc_type: 'Proc', + docstring: <<-DOCS) +When an EventStream or Proc object is provided, it will be used as callback for each chunk of event stream response received along the way. + DOCS + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/global_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/global_configuration.rb new file mode 100644 index 0000000..feeed59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/global_configuration.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require 'set' + +module Aws + module Plugins + + # This plugin provides the ability to provide global configuration for + # all AWS classes or specific ones. + # + # ## Global AWS configuration + # + # You can specify global configuration defaults via `Aws.config` + # + # Aws.config[:region] = 'us-west-2' + # + # Options applied to `Aws.config` are merged with constructed + # service interfaces. + # + # # uses the global configuration + # Aws::EC2.new.config.region #=> 'us-west-2' + # + # # constructor args have priority over global configuration + # Aws::EC2.new(region: 'us-east-1').config.region #=> 'us-east-1' + # + # ## Service Specific Global Configuration + # + # Some services have very specific configuration options that are not + # shared by other services. + # + # # oops, this option is only recognized by Aws::S3 + # Aws.config[:force_path_style] = true + # Aws::EC2.new + # #=> raises ArgumentError: invalid configuration option `:force_path_style' + # + # To avoid this issue, you can nest service specific options + # + # Aws.config[:s3] = { force_path_style: true } + # + # Aws::EC2.new # no error this time + # Aws::S3.new.config.force_path_style #=> true + # + # @api private + class GlobalConfiguration < Seahorse::Client::Plugin + + @identifiers = Set.new() + + # @api private + def before_initialize(client_class, options) + # apply service specific defaults before the global aws defaults + apply_service_defaults(client_class, options) + apply_aws_defaults(client_class, options) + end + + private + + def apply_service_defaults(client_class, options) + if defaults = Aws.config[client_class.identifier] + defaults.each do |option_name, default| + options[option_name] = default unless options.key?(option_name) + end + end + end + + def apply_aws_defaults(client_class, options) + Aws.config.each do |option_name, default| + next if self.class.identifiers.include?(option_name) + next if options.key?(option_name) + options[option_name] = default + end + end + + class << self + + # Registers an additional service identifier. + # @api private + def add_identifier(identifier) + @identifiers << identifier + end + + # @return [Set] + # @api private + def identifiers + @identifiers + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/helpful_socket_errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/helpful_socket_errors.rb new file mode 100644 index 0000000..ca21ce9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/helpful_socket_errors.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class HelpfulSocketErrors < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + # Wrap `SocketError` errors with `Aws::Errors::NoSuchEndpointError` + def call(context) + response = @handler.call(context) + response.context.http_response.on_error do |error| + if socket_endpoint_error?(error) + response.error = no_such_endpoint_error(context, error) + end + end + response + end + + private + + def socket_endpoint_error?(error) + Seahorse::Client::NetworkingError === error && + SocketError === error.original_error && + error.original_error.message.match(/failed to open tcp connection/i) && + error.original_error.message.match(/getaddrinfo: nodename nor servname provided, or not known/i) + end + + def no_such_endpoint_error(context, error) + Errors::NoSuchEndpointError.new({ + context: context, + original_error: error.original_error, + }) + end + + end + + handle(Handler, step: :sign) + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/http_checksum.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/http_checksum.rb new file mode 100644 index 0000000..1ee9995 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/http_checksum.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +require 'openssl' + +module Aws + module Plugins + # @api private + class HttpChecksum < Seahorse::Client::Plugin + # @api private + class Handler < Seahorse::Client::Handler + CHUNK_SIZE = 1 * 1024 * 1024 # one MB + + def call(context) + if checksum_required?(context) && + !context[:checksum_algorithms] # skip in favor of flexible checksum + body = context.http_request.body + context.http_request.headers['Content-Md5'] ||= md5(body) + end + @handler.call(context) + end + + private + + def checksum_required?(context) + context.operation.http_checksum_required || + (context.operation.http_checksum && + context.operation.http_checksum['requestChecksumRequired']) + end + + # @param [File, Tempfile, IO#read, String] value + # @return [String] + def md5(value) + if (value.is_a?(File) || value.is_a?(Tempfile)) && + !value.path.nil? && File.exist?(value.path) + OpenSSL::Digest::MD5.file(value).base64digest + elsif value.respond_to?(:read) + md5 = OpenSSL::Digest::MD5.new + update_in_chunks(md5, value) + md5.base64digest + else + OpenSSL::Digest::MD5.digest(value).base64digest + end + end + + def update_in_chunks(digest, io) + loop do + chunk = io.read(CHUNK_SIZE) + break unless chunk + digest.update(chunk) + end + io.rewind + end + + end + + def add_handlers(handlers, _config) + # priority set low to ensure checksum is computed AFTER the request is + # built but before it is signed + handlers.add(Handler, priority: 10, step: :build) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/idempotency_token.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/idempotency_token.rb new file mode 100644 index 0000000..34dfb3a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/idempotency_token.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require 'securerandom' + +module Aws + module Plugins + + # Provides support for auto filling operation parameters + # that enabled with `idempotencyToken` trait with random UUID v4 + # when no value is provided + # @api private + class IdempotencyToken < Seahorse::Client::Plugin + + # @api private + class Handler < Seahorse::Client::Handler + + def call(context) + auto_fill(context.params, context.operation.input) + @handler.call(context) + end + + private + + def auto_fill(params, ref) + ref.shape.members.each do |name, member_ref| + if member_ref['idempotencyToken'] + params[name] ||= SecureRandom.uuid + end + end + end + + end + + handler(Handler, step: :initialize) + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/invocation_id.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/invocation_id.rb new file mode 100644 index 0000000..18ac053 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/invocation_id.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require 'securerandom' + +module Aws + module Plugins + + # @api private + class InvocationId < Seahorse::Client::Plugin + + # @api private + class Handler < Seahorse::Client::Handler + + def call(context) + apply_invocation_id(context) + @handler.call(context) + end + + private + + def apply_invocation_id(context) + context.http_request.headers['amz-sdk-invocation-id'] = SecureRandom.uuid + if context[:input_event_emitter] + # only used for eventstreaming at input + context.http_request.headers['x-amz-content-sha256'] = 'STREAMING-AWS4-HMAC-SHA256-EVENTS' + end + end + + end + + handler(Handler, step: :initialize) + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/jsonvalue_converter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/jsonvalue_converter.rb new file mode 100644 index 0000000..d821505 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/jsonvalue_converter.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +module Aws + module Plugins + + # Converts input value to JSON Syntax for members with jsonvalue trait + class JsonvalueConverter < Seahorse::Client::Plugin + + # @api private + class Handler < Seahorse::Client::Handler + + def call(context) + context.operation.input.shape.members.each do |m, ref| + convert_jsonvalue(m, ref, context.params, 'params') + end + @handler.call(context) + end + + def convert_jsonvalue(m, ref, params, context) + return if params.nil? || !params.key?(m) + + if ref['jsonvalue'] + params[m] = serialize_jsonvalue(params[m], "#{context}[#{m}]") + else + case ref.shape + when Seahorse::Model::Shapes::StructureShape + ref.shape.members.each do |member_m, ref| + convert_jsonvalue(member_m, ref, params[m], "#{context}[#{m}]") + end + when Seahorse::Model::Shapes::ListShape + if ref.shape.member['jsonvalue'] + params[m] = params[m].each_with_index.map do |v, i| + serialize_jsonvalue(v, "#{context}[#{m}][#{i}]") + end + end + when Seahorse::Model::Shapes::MapShape + if ref.shape.value['jsonvalue'] + params[m].each do |k, v| + params[m][k] = serialize_jsonvalue(v, "#{context}[#{m}][#{k}]") + end + end + end + end + end + + def serialize_jsonvalue(v, context) + unless v.respond_to?(:to_json) + raise ArgumentError, "The value of #{context} is not JSON serializable." + end + v.to_json + end + + end + + handler(Handler, step: :initialize) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/logging.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/logging.rb new file mode 100644 index 0000000..1680232 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/logging.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @see Log::Formatter + # @api private + class Logging < Seahorse::Client::Plugin + + option(:logger, + doc_type: 'Logger', + docstring: <<-DOCS +The Logger instance to send log messages to. If this option +is not set, logging will be disabled. + DOCS + ) + + option(:log_level, + default: :info, + doc_type: Symbol, + docstring: 'The log level to send messages to the `:logger` at.' + ) + + option(:log_formatter, + doc_type: 'Aws::Log::Formatter', + doc_default: literal('Aws::Log::Formatter.default'), + docstring: 'The log formatter.' + ) do |config| + Log::Formatter.default if config.logger + end + + def add_handlers(handlers, config) + handlers.add(Handler, step: :validate) if config.logger + end + + class Handler < Seahorse::Client::Handler + + # @param [RequestContext] context + # @return [Response] + def call(context) + context[:logging_started_at] = Time.now + @handler.call(context).tap do |response| + context[:logging_completed_at] = Time.now + log(context.config, response) + end + end + + private + + # @param [Configuration] config + # @param [Response] response + # @return [void] + def log(config, response) + config.logger.send(config.log_level, format(config, response)) + end + + # @param [Configuration] config + # @param [Response] response + # @return [String] + def format(config, response) + config.log_formatter.format(response) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/param_converter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/param_converter.rb new file mode 100644 index 0000000..6c827f8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/param_converter.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class ParamConverter < Seahorse::Client::Plugin + + option(:convert_params, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS +When `true`, an attempt is made to coerce request parameters into +the required types. + DOCS + ) + + def add_handlers(handlers, config) + handlers.add(Handler, step: :initialize) if config.convert_params + end + + class Handler < Seahorse::Client::Handler + + def call(context) + converter = Aws::ParamConverter.new(context.operation.input) + context.params = converter.convert(context.params) + @handler.call(context).on_complete do |resp| + converter.close_opened_files + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/param_validator.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/param_validator.rb new file mode 100644 index 0000000..d7985ad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/param_validator.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class ParamValidator < Seahorse::Client::Plugin + + option(:validate_params, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) +When `true`, request parameters are validated before +sending the request. + DOCS + + def add_handlers(handlers, config) + if config.validate_params + handlers.add(Handler, step: :validate, priority: 50) + end + end + + class Handler < Seahorse::Client::Handler + + def call(context) + Aws::ParamValidator.validate!(context.operation.input, context.params) + @handler.call(context) + end + + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/api_gateway.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/api_gateway.rb new file mode 100644 index 0000000..a4dbb5f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/api_gateway.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Protocols + class ApiGateway < Seahorse::Client::Plugin + + class ContentTypeHandler < Seahorse::Client::Handler + def call(context) + body = context.http_request.body + # Rest::Handler will set a default JSON body, so size can be checked + # if this handler is run after serialization. + if !body.respond_to?(:size) || + (body.respond_to?(:size) && body.size > 0) + context.http_request.headers['Content-Type'] ||= + 'application/json' + end + @handler.call(context) + end + end + + handler(Rest::Handler) + handler(ContentTypeHandler, priority: 30) + handler(Json::ErrorHandler, step: :sign) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/ec2.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/ec2.rb new file mode 100644 index 0000000..953d97b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/ec2.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require_relative '../../query' + +module Aws + module Plugins + module Protocols + class EC2 < Seahorse::Client::Plugin + + class Handler < Aws::Query::Handler + + def apply_params(param_list, params, rules) + Aws::Query::EC2ParamBuilder.new(param_list).apply(rules, params) + end + + def parse_xml(context) + if rules = context.operation.output + parser = Xml::Parser.new(rules) + data = parser.parse(xml(context)) do |path, value| + if path.size == 2 && path.last == 'requestId' + context.metadata[:request_id] = value + end + end + data + else + EmptyStructure.new + end + end + + end + + handler(Handler) + handler(Xml::ErrorHandler, step: :sign) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/json_rpc.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/json_rpc.rb new file mode 100644 index 0000000..fd3c530 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/json_rpc.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Protocols + class JsonRpc < Seahorse::Client::Plugin + + option(:simple_json, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS) +Disables request parameter conversion, validation, and formatting. +Also disable response data type conversions. This option is useful +when you want to ensure the highest level of performance by +avoiding overhead of walking request parameters and response data +structures. + +When `:simple_json` is enabled, the request parameters hash must +be formatted exactly as the DynamoDB API expects. + DOCS + + option(:validate_params) { |config| !config.simple_json } + + option(:convert_params) { |config| !config.simple_json } + + handler(Json::Handler) + + handler(Json::ErrorHandler, step: :sign) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/query.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/query.rb new file mode 100644 index 0000000..9ac8d92 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/query.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require_relative '../../query' + +module Aws + module Plugins + module Protocols + class Query < Seahorse::Client::Plugin + handler(Aws::Query::Handler) + handler(Xml::ErrorHandler, step: :sign) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/rest_json.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/rest_json.rb new file mode 100644 index 0000000..a5b9f12 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/rest_json.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Protocols + class RestJson < Seahorse::Client::Plugin + + class ContentTypeHandler < Seahorse::Client::Handler + def call(context) + body = context.http_request.body + # Rest::Handler will set a default JSON body, so size can be checked + # if this handler is run after serialization. + if !body.respond_to?(:size) || + (body.respond_to?(:size) && body.size > 0) + context.http_request.headers['Content-Type'] ||= + 'application/json' + end + @handler.call(context) + end + end + + handler(Rest::Handler) + handler(ContentTypeHandler, priority: 30) + handler(Json::ErrorHandler, step: :sign) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/rest_xml.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/rest_xml.rb new file mode 100644 index 0000000..1a361ac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/protocols/rest_xml.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Protocols + class RestXml < Seahorse::Client::Plugin + + handler(Rest::Handler) + handler(Xml::ErrorHandler, step: :sign) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/recursion_detection.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/recursion_detection.rb new file mode 100644 index 0000000..08dcdca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/recursion_detection.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class RecursionDetection < Seahorse::Client::Plugin + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + + unless context.http_request.headers.key?('x-amzn-trace-id') + if ENV['AWS_LAMBDA_FUNCTION_NAME'] && + (trace_id = validate_header(ENV['_X_AMZN_TRACE_ID'])) + context.http_request.headers['x-amzn-trace-id'] = trace_id + end + end + @handler.call(context) + end + + private + def validate_header(header_value) + return unless header_value + + if (header_value.chars & (0..31).map(&:chr)).any? + raise ArgumentError, 'Invalid _X_AMZN_TRACE_ID value: '\ + 'contains ASCII control characters' + end + header_value + end + end + + # should be at the end of build so that + # modeled traits / service customizations apply first + handler(Handler, step: :build, order: 99) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/regional_endpoint.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/regional_endpoint.rb new file mode 100644 index 0000000..729578e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/regional_endpoint.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class RegionalEndpoint < Seahorse::Client::Plugin + option(:profile) + + option(:region, + required: true, + doc_type: String, + docstring: <<-DOCS) do |cfg| +The AWS region to connect to. The configured `:region` is +used to determine the service `:endpoint`. When not passed, +a default `:region` is searched for in the following locations: + +* `Aws.config[:region]` +* `ENV['AWS_REGION']` +* `ENV['AMAZON_REGION']` +* `ENV['AWS_DEFAULT_REGION']` +* `~/.aws/credentials` +* `~/.aws/config` + DOCS + resolve_region(cfg) + end + + option(:use_dualstack_endpoint, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +When set to `true`, dualstack enabled endpoints (with `.aws` TLD) +will be used if available. + DOCS + resolve_use_dualstack_endpoint(cfg) + end + + option(:use_fips_endpoint, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +When set to `true`, fips compatible endpoints will be used if available. +When a `fips` region is used, the region is normalized and this config +is set to `true`. + DOCS + resolve_use_fips_endpoint(cfg) + end + + # This option signals whether :endpoint was provided or not. + # Legacy endpoints must continue to be generated at client time. + option(:regional_endpoint, false) + + # NOTE: All of the defaults block code is effectively deprecated. + # Because old services can depend on this new core version, we must + # retain it. + option(:endpoint, doc_type: String, docstring: <<-DOCS) do |cfg| +The client endpoint is normally constructed from the `:region` +option. You should only configure an `:endpoint` when connecting +to test or custom endpoints. This should be a valid HTTP(S) URI. + DOCS + endpoint_prefix = cfg.api.metadata['endpointPrefix'] + if cfg.region && endpoint_prefix + if cfg.respond_to?(:sts_regional_endpoints) + sts_regional = cfg.sts_regional_endpoints + end + + # check region is a valid RFC host label + unless Seahorse::Util.host_label?(cfg.region) + raise Errors::InvalidRegionError + end + + region = cfg.region + new_region = region.gsub('fips-', '').gsub('-fips', '') + if region != new_region + warn("Legacy region #{region} was transformed to #{new_region}."\ + '`use_fips_endpoint` config was set to true.') + cfg.override_config(:use_fips_endpoint, true) + cfg.override_config(:region, new_region) + end + + Aws::Partitions::EndpointProvider.resolve( + cfg.region, + endpoint_prefix, + sts_regional, + { + dualstack: cfg.use_dualstack_endpoint, + fips: cfg.use_fips_endpoint + } + ) + end + end + + def after_initialize(client) + if client.config.region.nil? || client.config.region == '' + raise Errors::MissingRegionError + end + end + + class << self + private + + def resolve_region(cfg) + keys = %w[AWS_REGION AMAZON_REGION AWS_DEFAULT_REGION] + env_region = ENV.values_at(*keys).compact.first + env_region = nil if env_region == '' + cfg_region = Aws.shared_config.region(profile: cfg.profile) + env_region || cfg_region + end + + def resolve_use_dualstack_endpoint(cfg) + value = ENV['AWS_USE_DUALSTACK_ENDPOINT'] + value ||= Aws.shared_config.use_dualstack_endpoint( + profile: cfg.profile + ) + Aws::Util.str_2_bool(value) || false + end + + def resolve_use_fips_endpoint(cfg) + value = ENV['AWS_USE_FIPS_ENDPOINT'] + value ||= Aws.shared_config.use_fips_endpoint(profile: cfg.profile) + Aws::Util.str_2_bool(value) || false + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/response_paging.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/response_paging.rb new file mode 100644 index 0000000..2796bad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/response_paging.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class ResponsePaging < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + context[:original_params] = context.params + resp = @handler.call(context) + PageableResponse.apply(resp) + resp.pager = context.operation[:pager] || Aws::Pager::NullPager.new + resp + end + + end + + handle(Handler, step: :initialize, priority: 90) + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/client_rate_limiter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/client_rate_limiter.rb new file mode 100644 index 0000000..e347c16 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/client_rate_limiter.rb @@ -0,0 +1,139 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Retries + # @api private + # Used only in 'adaptive' retry mode + class ClientRateLimiter + MIN_CAPACITY = 1 + MIN_FILL_RATE = 0.5 + SMOOTH = 0.8 + # How much to scale back after a throttling response + BETA = 0.7 + # Controls how aggressively we scale up after being throttled + SCALE_CONSTANT = 0.4 + + def initialize + @mutex = Mutex.new + @fill_rate = nil + @max_capacity = nil + @current_capacity = 0 + @last_timestamp = nil + @enabled = false + @measured_tx_rate = 0 + @last_tx_rate_bucket = Aws::Util.monotonic_seconds + @request_count = 0 + @last_max_rate = 0 + @last_throttle_time = Aws::Util.monotonic_seconds + @calculated_rate = nil + end + + def token_bucket_acquire(amount, wait_to_fill = true) + # Client side throttling is not enabled until we see a + # throttling error + return unless @enabled + + @mutex.synchronize do + token_bucket_refill + + # Next see if we have enough capacity for the requested amount + while @current_capacity < amount + raise Aws::Errors::RetryCapacityNotAvailableError unless wait_to_fill + @mutex.sleep((amount - @current_capacity) / @fill_rate) + token_bucket_refill + end + @current_capacity -= amount + end + end + + def update_sending_rate(is_throttling_error) + @mutex.synchronize do + update_measured_rate + + if is_throttling_error + rate_to_use = if @enabled + [@measured_tx_rate, @fill_rate].min + else + @measured_tx_rate + end + + # The fill_rate is from the token bucket + @last_max_rate = rate_to_use + calculate_time_window + @last_throttle_time = Aws::Util.monotonic_seconds + @calculated_rate = cubic_throttle(rate_to_use) + enable_token_bucket + else + calculate_time_window + @calculated_rate = cubic_success(Aws::Util.monotonic_seconds) + end + + new_rate = [@calculated_rate, 2 * @measured_tx_rate].min + token_bucket_update_rate(new_rate) + end + end + + private + + def token_bucket_refill + timestamp = Aws::Util.monotonic_seconds + unless @last_timestamp + @last_timestamp = timestamp + return + end + + fill_amount = (timestamp - @last_timestamp) * @fill_rate + @current_capacity = [ + @max_capacity, @current_capacity + fill_amount + ].min + + @last_timestamp = timestamp + end + + def token_bucket_update_rate(new_rps) + # Refill based on our current rate before we update to the + # new fill rate + token_bucket_refill + @fill_rate = [new_rps, MIN_FILL_RATE].max + @max_capacity = [new_rps, MIN_CAPACITY].max + # When we scale down we can't have a current capacity that exceeds our + # max_capacity. + @current_capacity = [@current_capacity, @max_capacity].min + end + + def enable_token_bucket + @enabled = true + end + + def update_measured_rate + t = Aws::Util.monotonic_seconds + time_bucket = (t * 2).floor / 2.0 + @request_count += 1 + if time_bucket > @last_tx_rate_bucket + current_rate = @request_count / (time_bucket - @last_tx_rate_bucket) + @measured_tx_rate = (current_rate * SMOOTH) + + (@measured_tx_rate * (1 - SMOOTH)) + @request_count = 0 + @last_tx_rate_bucket = time_bucket + end + end + + def calculate_time_window + # This is broken out into a separate calculation because it only + # gets updated when @last_max_rate changes so it can be cached. + @time_window = ((@last_max_rate * (1 - BETA)) / SCALE_CONSTANT)**(1.0 / 3) + end + + def cubic_success(timestamp) + dt = timestamp - @last_throttle_time + (SCALE_CONSTANT * ((dt - @time_window)**3)) + @last_max_rate + end + + def cubic_throttle(rate_to_use) + rate_to_use * BETA + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/clock_skew.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/clock_skew.rb new file mode 100644 index 0000000..877d0e3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/clock_skew.rb @@ -0,0 +1,100 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Retries + + # @api private + class ClockSkew + + CLOCK_SKEW_THRESHOLD = 5 * 60 # five minutes + + def initialize + @mutex = Mutex.new + # clock_corrections are recorded only on errors + # and only when time difference is greater than the + # CLOCK_SKEW_THRESHOLD + @endpoint_clock_corrections = Hash.new(0) + + # estimated_skew is calculated on every request + # and is used to estimate a TTL for requests + @endpoint_estimated_skews = Hash.new(nil) + end + + # Gets the clock_correction in seconds to apply to a given endpoint + # @param endpoint [URI / String] + def clock_correction(endpoint) + @mutex.synchronize { @endpoint_clock_corrections[endpoint.to_s] } + end + + # The estimated skew factors in any clock skew from + # the service along with any network latency. + # This provides a more accurate value for the ttl, + # which should represent when the client will stop + # waiting for a request. + # Estimated Skew should not be used to correct clock skew errors + # it should only be used to estimate TTL for a request + def estimated_skew(endpoint) + @mutex.synchronize { @endpoint_estimated_skews[endpoint.to_s] } + end + + # Determines whether a request has clock skew by comparing + # the current time against the server's time in the response + # @param context [Seahorse::Client::RequestContext] + def clock_skewed?(context) + server_time = server_time(context.http_response) + !!server_time && + (Time.now.utc - server_time).abs > CLOCK_SKEW_THRESHOLD + end + + # Called only on clock skew related errors + # Update the stored clock skew correction value for an endpoint + # from the server's time in the response + # @param context [Seahorse::Client::RequestContext] + def update_clock_correction(context) + endpoint = context.http_request.endpoint + now_utc = Time.now.utc + server_time = server_time(context.http_response) + if server_time && (now_utc - server_time).abs > CLOCK_SKEW_THRESHOLD + set_clock_correction(endpoint, server_time - now_utc) + end + end + + # Called for every request + # Update our estimated clock skew for the endpoint + # from the servers time in the response + # @param context [Seahorse::Client::RequestContext] + def update_estimated_skew(context) + endpoint = context.http_request.endpoint + now_utc = Time.now.utc + server_time = server_time(context.http_response) + return unless server_time + @mutex.synchronize do + @endpoint_estimated_skews[endpoint.to_s] = server_time - now_utc + end + end + + private + + # @param response [Seahorse::Client::Http::Response:] + def server_time(response) + begin + Time.parse(response.headers['date']).utc + rescue + nil + end + end + + # Sets the clock correction for an endpoint + # @param endpoint [URI / String] + # @param correction [Number] + def set_clock_correction(endpoint, correction) + @mutex.synchronize do + @endpoint_clock_corrections[endpoint.to_s] = correction + end + end + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/error_inspector.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/error_inspector.rb new file mode 100644 index 0000000..f799bc7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/error_inspector.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Retries + # @api private + # This class will be obsolete when APIs contain modeled exceptions + class ErrorInspector + EXPIRED_CREDS = Set.new( + [ + 'InvalidClientTokenId', # query services + 'UnrecognizedClientException', # json services + 'InvalidAccessKeyId', # s3 + 'AuthFailure', # ec2 + 'InvalidIdentityToken', # sts + 'ExpiredToken', # route53 + 'ExpiredTokenException' # kinesis + ] + ) + + THROTTLING_ERRORS = Set.new( + [ + 'Throttling', # query services + 'ThrottlingException', # json services + 'ThrottledException', # sns + 'RequestThrottled', # sqs + 'RequestThrottledException', # generic service + 'ProvisionedThroughputExceededException', # dynamodb + 'TransactionInProgressException', # dynamodb + 'RequestLimitExceeded', # ec2 + 'BandwidthLimitExceeded', # cloud search + 'LimitExceededException', # kinesis + 'TooManyRequestsException', # batch + 'PriorRequestNotComplete', # route53 + 'SlowDown', # s3 + 'EC2ThrottledException' # ec2 + ] + ) + + CHECKSUM_ERRORS = Set.new( + [ + 'CRC32CheckFailed', # dynamodb + 'BadDigest' # s3 + ] + ) + + NETWORKING_ERRORS = Set.new( + [ + 'RequestTimeout', # s3 + 'InternalError', # s3 + 'RequestTimeoutException', # glacier + 'IDPCommunicationError' # sts + ] + ) + + # See: https://github.com/aws/aws-sdk-net/blob/5810dfe401e0eac2e59d02276d4b479224b4538e/sdk/src/Core/Amazon.Runtime/Pipeline/RetryHandler/RetryPolicy.cs#L78 + CLOCK_SKEW_ERRORS = Set.new( + [ + 'RequestTimeTooSkewed', + 'RequestExpired', + 'InvalidSignatureException', + 'SignatureDoesNotMatch', + 'AuthFailure', + 'RequestInTheFuture' + ] + ) + + def initialize(error, http_status_code) + @error = error + @name = extract_name(@error) + @http_status_code = http_status_code + end + + def expired_credentials? + !!(EXPIRED_CREDS.include?(@name) || @name.match(/expired/i)) + end + + def throttling_error? + !!(THROTTLING_ERRORS.include?(@name) || + @name.match(/throttl/i) || + @http_status_code == 429) || + modeled_throttling? + end + + def checksum? + CHECKSUM_ERRORS.include?(@name) + end + + def networking? + @error.is_a?(Seahorse::Client::NetworkingError) || + @error.is_a?(Errors::NoSuchEndpointError) || + NETWORKING_ERRORS.include?(@name) + end + + def server? + (500..599).cover?(@http_status_code) + end + + def endpoint_discovery?(context) + return false unless context.operation.endpoint_discovery + + @http_status_code == 421 || + @name == 'InvalidEndpointException' || + @error.is_a?(Errors::EndpointDiscoveryError) + end + + def modeled_retryable? + @error.is_a?(Errors::ServiceError) && @error.retryable? + end + + def modeled_throttling? + @error.is_a?(Errors::ServiceError) && @error.throttling? + end + + def clock_skew?(context) + CLOCK_SKEW_ERRORS.include?(@name) && + context.config.clock_skew.clock_skewed?(context) + end + + def retryable?(context) + server? || + modeled_retryable? || + throttling_error? || + networking? || + checksum? || + endpoint_discovery?(context) || + (expired_credentials? && refreshable_credentials?(context)) || + clock_skew?(context) + end + + private + + def refreshable_credentials?(context) + context.config.credentials.respond_to?(:refresh!) + end + + def extract_name(error) + if error.is_a?(Errors::ServiceError) + error.class.code || error.class.name.to_s + else + error.class.name.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/retry_quota.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/retry_quota.rb new file mode 100644 index 0000000..63a2db2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retries/retry_quota.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +module Aws + module Plugins + module Retries + + # @api private + # Used in 'standard' and 'adaptive' retry modes. + class RetryQuota + INITIAL_RETRY_TOKENS = 500 + RETRY_COST = 5 + NO_RETRY_INCREMENT = 1 + TIMEOUT_RETRY_COST = 10 + + def initialize(opts = {}) + @mutex = Mutex.new + @max_capacity = opts.fetch(:max_capacity, INITIAL_RETRY_TOKENS) + @available_capacity = @max_capacity + end + + # check if there is sufficient capacity to retry + # and return it. If there is insufficient capacity + # return 0 + # @return [Integer] The amount of capacity checked out + def checkout_capacity(error_inspector) + @mutex.synchronize do + capacity_amount = if error_inspector.networking? + TIMEOUT_RETRY_COST + else + RETRY_COST + end + + # unable to acquire capacity + return 0 if capacity_amount > @available_capacity + + @available_capacity -= capacity_amount + capacity_amount + end + end + + # capacity_amount refers to the amount of capacity requested from + # the last retry. It can either be RETRY_COST, TIMEOUT_RETRY_COST, + # or unset. + def release(capacity_amount) + # Implementation note: The release() method is called for + # every API call. In the common case where the request is + # successful and we're at full capacity, we can avoid locking. + # We can't exceed max capacity so there's no work we have to do. + return if @available_capacity == @max_capacity + + @mutex.synchronize do + @available_capacity += capacity_amount || NO_RETRY_INCREMENT + @available_capacity = [@available_capacity, @max_capacity].min + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retry_errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retry_errors.rb new file mode 100644 index 0000000..e7bb4f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/retry_errors.rb @@ -0,0 +1,432 @@ +# frozen_string_literal: true + +require 'set' +require_relative 'retries/error_inspector' +require_relative 'retries/retry_quota' +require_relative 'retries/client_rate_limiter' +require_relative 'retries/clock_skew' + +module Aws + module Plugins + # @api private + class RetryErrors < Seahorse::Client::Plugin + # BEGIN LEGACY OPTIONS + EQUAL_JITTER = ->(delay) { (delay / 2) + Kernel.rand(0..(delay / 2)) } + FULL_JITTER = ->(delay) { Kernel.rand(0..delay) } + NO_JITTER = ->(delay) { delay } + + JITTERS = { + none: NO_JITTER, + equal: EQUAL_JITTER, + full: FULL_JITTER + } + + JITTERS.default_proc = lambda { |h, k| + raise KeyError, + "#{k} is not a named jitter function. Must be one of #{h.keys}" + } + + DEFAULT_BACKOFF = lambda do |c| + delay = 2**c.retries * c.config.retry_base_delay + if (c.config.retry_max_delay || 0) > 0 + delay = [delay, c.config.retry_max_delay].min + end + jitter = c.config.retry_jitter + jitter = JITTERS[jitter] if jitter.is_a?(Symbol) + delay = jitter.call(delay) if jitter + Kernel.sleep(delay) + end + + option( + :retry_limit, + default: 3, + doc_type: Integer, + docstring: <<-DOCS) +The maximum number of times to retry failed requests. Only +~ 500 level server errors and certain ~ 400 level client errors +are retried. Generally, these are throttling errors, data +checksum errors, networking errors, timeout errors, auth errors, +endpoint discovery, and errors from expired credentials. +This option is only used in the `legacy` retry mode. + DOCS + + option( + :retry_max_delay, + default: 0, + doc_type: Integer, + docstring: <<-DOCS) +The maximum number of seconds to delay between retries (0 for no limit) +used by the default backoff function. This option is only used in the +`legacy` retry mode. + DOCS + + option( + :retry_base_delay, + default: 0.3, + doc_type: Float, + docstring: <<-DOCS) +The base delay in seconds used by the default backoff function. This option +is only used in the `legacy` retry mode. + DOCS + + option( + :retry_jitter, + default: :none, + doc_type: Symbol, + docstring: <<-DOCS) +A delay randomiser function used by the default backoff function. +Some predefined functions can be referenced by name - :none, :equal, :full, +otherwise a Proc that takes and returns a number. This option is only used +in the `legacy` retry mode. + +@see https://www.awsarchitectureblog.com/2015/03/backoff.html + DOCS + + option( + :retry_backoff, + default: DEFAULT_BACKOFF, + doc_type: Proc, + docstring: <<-DOCS) +A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay. +This option is only used in the `legacy` retry mode. + DOCS + + # END LEGACY OPTIONS + + option( + :retry_mode, + default: 'legacy', + doc_type: String, + docstring: <<-DOCS) do |cfg| +Specifies which retry algorithm to use. Values are: + +* `legacy` - The pre-existing retry behavior. This is default value if + no retry mode is provided. + +* `standard` - A standardized set of retry rules across the AWS SDKs. + This includes support for retry quotas, which limit the number of + unsuccessful retries a client can make. + +* `adaptive` - An experimental retry mode that includes all the + functionality of `standard` mode along with automatic client side + throttling. This is a provisional mode that may change behavior + in the future. + + DOCS + resolve_retry_mode(cfg) + end + + option( + :max_attempts, + default: 3, + doc_type: Integer, + docstring: <<-DOCS) do |cfg| +An integer representing the maximum number attempts that will be made for +a single request, including the initial attempt. For example, +setting this value to 5 will result in a request being retried up to +4 times. Used in `standard` and `adaptive` retry modes. + DOCS + resolve_max_attempts(cfg) + end + + option( + :adaptive_retry_wait_to_fill, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +Used only in `adaptive` retry mode. When true, the request will sleep +until there is sufficent client side capacity to retry the request. +When false, the request will raise a `RetryCapacityNotAvailableError` and will +not retry instead of sleeping. + DOCS + resolve_adaptive_retry_wait_to_fill(cfg) + end + + option( + :correct_clock_skew, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +Used only in `standard` and adaptive retry modes. Specifies whether to apply +a clock skew correction and retry requests with skewed client clocks. + DOCS + resolve_correct_clock_skew(cfg) + end + + # @api private undocumented + option(:client_rate_limiter) { Retries::ClientRateLimiter.new } + + # @api private undocumented + option(:retry_quota) { Retries::RetryQuota.new } + + # @api private undocumented + option(:clock_skew) { Retries::ClockSkew.new } + + def self.resolve_retry_mode(cfg) + default_mode_value = + if cfg.respond_to?(:defaults_mode_config_resolver) + cfg.defaults_mode_config_resolver.resolve(:retry_mode) + end + + value = ENV['AWS_RETRY_MODE'] || + Aws.shared_config.retry_mode(profile: cfg.profile) || + default_mode_value || + 'legacy' + # Raise if provided value is not one of the retry modes + if value != 'legacy' && value != 'standard' && value != 'adaptive' + raise ArgumentError, + 'Must provide either `legacy`, `standard`, or `adaptive` for '\ + 'retry_mode profile option or for ENV[\'AWS_RETRY_MODE\']' + end + value + end + + def self.resolve_max_attempts(cfg) + value = (ENV['AWS_MAX_ATTEMPTS']) || + Aws.shared_config.max_attempts(profile: cfg.profile) || + '3' + value = value.to_i + # Raise if provided value is not a positive integer + if value <= 0 + raise ArgumentError, + 'Must provide a positive integer for max_attempts profile '\ + 'option or for ENV[\'AWS_MAX_ATTEMPTS\']' + end + value + end + + def self.resolve_adaptive_retry_wait_to_fill(cfg) + value = ENV['AWS_ADAPTIVE_RETRY_WAIT_TO_FILL'] || + Aws.shared_config.adaptive_retry_wait_to_fill(profile: cfg.profile) || + 'true' + # Raise if provided value is not true or false + if value != 'true' && value != 'false' + raise ArgumentError, + 'Must provide either `true` or `false` for '\ + 'adaptive_retry_wait_to_fill profile option or for '\ + 'ENV[\'AWS_ADAPTIVE_RETRY_WAIT_TO_FILL\']' + end + value == 'true' + end + + def self.resolve_correct_clock_skew(cfg) + value = ENV['AWS_CORRECT_CLOCK_SKEW'] || + Aws.shared_config.correct_clock_skew(profile: cfg.profile) || + 'true' + # Raise if provided value is not true or false + if value != 'true' && value != 'false' + raise ArgumentError, + 'Must provide either `true` or `false` for '\ + 'correct_clock_skew profile option or for '\ + 'ENV[\'AWS_CORRECT_CLOCK_SKEW\']' + end + value == 'true' + end + + class Handler < Seahorse::Client::Handler + # Max backoff (in seconds) + MAX_BACKOFF = 20 + + def call(context) + context.metadata[:retries] ||= {} + config = context.config + + get_send_token(config) + add_retry_headers(context) + response = @handler.call(context) + error_inspector = Retries::ErrorInspector.new( + response.error, response.context.http_response.status_code + ) + + request_bookkeeping(context, response, error_inspector) + + if error_inspector.endpoint_discovery?(context) + key = config.endpoint_cache.extract_key(context) + config.endpoint_cache.delete(key) + end + + # Clock correction needs to be updated from the response even when + # the request is not retryable but should only be updated + # in the case of clock skew errors + if error_inspector.clock_skew?(context) + config.clock_skew.update_clock_correction(context) + end + + # Estimated skew needs to be updated on every request + config.clock_skew.update_estimated_skew(context) + + return response unless retryable?(context, response, error_inspector) + + return response if context.retries >= config.max_attempts - 1 + + context.metadata[:retries][:capacity_amount] = + config.retry_quota.checkout_capacity(error_inspector) + return response unless context.metadata[:retries][:capacity_amount] > 0 + + delay = exponential_backoff(context.retries) + Kernel.sleep(delay) + retry_request(context, error_inspector) + end + + private + + def get_send_token(config) + # either fail fast or block until a token becomes available + # must be configurable + # need a maximum rate at which we can send requests (max_send_rate) + # is unset until a throttle is seen + if config.retry_mode == 'adaptive' + config.client_rate_limiter.token_bucket_acquire( + 1, + config.adaptive_retry_wait_to_fill + ) + end + end + + # maxsendrate is updated if on adaptive mode and based on response + # retry quota is updated if the request is successful (both modes) + def request_bookkeeping(context, response, error_inspector) + config = context.config + if response.successful? + config.retry_quota.release( + context.metadata[:retries][:capacity_amount] + ) + end + + if config.retry_mode == 'adaptive' + is_throttling_error = error_inspector.throttling_error? + config.client_rate_limiter.update_sending_rate(is_throttling_error) + end + end + + def retryable?(context, response, error_inspector) + return false if response.successful? + + error_inspector.retryable?(context) && + context.http_response.body.respond_to?(:truncate) + end + + def exponential_backoff(retries) + # for a transient error, use backoff + [Kernel.rand * 2**retries, MAX_BACKOFF].min + end + + def retry_request(context, error) + context.retries += 1 + context.config.credentials.refresh! if refresh_credentials?(context, error) + context.http_request.body.rewind + context.http_response.reset + call(context) + end + + def refresh_credentials?(context, error) + error.expired_credentials? && + context.config.credentials.respond_to?(:refresh!) + end + + def add_retry_headers(context) + request_pairs = { + 'attempt' => context.retries, + 'max' => context.config.max_attempts + } + if (ttl = compute_request_ttl(context)) + request_pairs['ttl'] = ttl + end + + # create the request header + formatted_header = request_pairs.map { |k, v| "#{k}=#{v}" }.join('; ') + context.http_request.headers['amz-sdk-request'] = formatted_header + end + + def compute_request_ttl(context) + return if context.operation.async + + endpoint = context.http_request.endpoint + estimated_skew = context.config.clock_skew.estimated_skew(endpoint) + if context.config.respond_to?(:http_read_timeout) + read_timeout = context.config.http_read_timeout + end + + if estimated_skew && read_timeout + (Time.now.utc + read_timeout + estimated_skew) + .strftime('%Y%m%dT%H%M%SZ') + end + end + end + + class LegacyHandler < Seahorse::Client::Handler + + def call(context) + response = @handler.call(context) + if response.error + error_inspector = Retries::ErrorInspector.new( + response.error, response.context.http_response.status_code + ) + + if error_inspector.endpoint_discovery?(context) + key = context.config.endpoint_cache.extract_key(context) + context.config.endpoint_cache.delete(key) + end + + retry_if_possible(response, error_inspector) + else + response + end + end + + private + + def retry_if_possible(response, error_inspector) + context = response.context + if should_retry?(context, error_inspector) + retry_request(context, error_inspector) + else + response + end + end + + def retry_request(context, error) + delay_retry(context) + context.retries += 1 + context.config.credentials.refresh! if refresh_credentials?(context, error) + context.http_request.body.rewind + context.http_response.reset + call(context) + end + + def delay_retry(context) + context.config.retry_backoff.call(context) + end + + def should_retry?(context, error) + error.retryable?(context) && + context.retries < retry_limit(context) && + response_truncatable?(context) + end + + def refresh_credentials?(context, error) + error.expired_credentials? && + context.config.credentials.respond_to?(:refresh!) + end + + def retry_limit(context) + context.config.retry_limit + end + + def response_truncatable?(context) + context.http_response.body.respond_to?(:truncate) + end + end + + def add_handlers(handlers, config) + if config.retry_mode == 'legacy' + if config.retry_limit > 0 + handlers.add(LegacyHandler, step: :sign, priority: 99) + end + else + handlers.add(Handler, step: :sign, priority: 99) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/sign.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/sign.rb new file mode 100644 index 0000000..6ce5a78 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/sign.rb @@ -0,0 +1,200 @@ +# frozen_string_literal: true + +require 'aws-sigv4' + +module Aws + module Plugins + # @api private + class Sign < Seahorse::Client::Plugin + # These once had defaults. But now they are used as overrides to + # new endpoint and auth resolution. + option(:sigv4_signer) + option(:sigv4_name) + option(:sigv4_region) + option(:unsigned_operations, default: []) + + supported_auth_types = %w[sigv4 bearer none] + supported_auth_types += ['sigv4a'] if Aws::Sigv4::Signer.use_crt? + SUPPORTED_AUTH_TYPES = supported_auth_types.freeze + + def add_handlers(handlers, cfg) + operations = cfg.api.operation_names - cfg.unsigned_operations + handlers.add(Handler, step: :sign, operations: operations) + end + + # @api private + # Return a signer with the `sign(context)` method + def self.signer_for(auth_scheme, config, region_override = nil) + case auth_scheme['name'] + when 'sigv4', 'sigv4a' + SignatureV4.new(auth_scheme, config, region_override) + when 'bearer' + Bearer.new + else + NullSigner.new + end + end + + class Handler < Seahorse::Client::Handler + def call(context) + # Skip signing if using sigv2 signing from s3_signer in S3 + unless v2_signing?(context.config) + signer = Sign.signer_for( + context[:auth_scheme], + context.config, + context[:sigv4_region] + ) + signer.sign(context) + end + @handler.call(context) + end + + private + + def v2_signing?(config) + # 's3' is legacy signing, 'v4' is default + config.respond_to?(:signature_version) && + config.signature_version == 's3' + end + end + + # @api private + class Bearer + def initialize + end + + def sign(context) + if context.http_request.endpoint.scheme != 'https' + raise ArgumentError, + 'Unable to use bearer authorization on non https endpoint.' + end + + token_provider = context.config.token_provider + + raise Errors::MissingBearerTokenError unless token_provider&.set? + + context.http_request.headers['Authorization'] = + "Bearer #{token_provider.token.token}" + end + + def presign_url(*args) + raise ArgumentError, 'Bearer auth does not support presigned urls' + end + + def sign_event(*args) + raise ArgumentError, 'Bearer auth does not support event signing' + end + end + + # @api private + class SignatureV4 + def initialize(auth_scheme, config, region_override = nil) + scheme_name = auth_scheme['name'] + + unless %w[sigv4 sigv4a].include?(scheme_name) + raise ArgumentError, + "Expected sigv4 or sigv4a auth scheme, got #{scheme_name}" + end + + region = if scheme_name == 'sigv4a' + auth_scheme['signingRegionSet'].first + else + auth_scheme['signingRegion'] + end + begin + @signer = Aws::Sigv4::Signer.new( + service: config.sigv4_name || auth_scheme['signingName'], + region: region_override || config.sigv4_region || region, + credentials_provider: config.credentials, + signing_algorithm: scheme_name.to_sym, + uri_escape_path: !!!auth_scheme['disableDoubleEncoding'], + unsigned_headers: %w[content-length user-agent x-amzn-trace-id] + ) + rescue Aws::Sigv4::Errors::MissingCredentialsError + raise Aws::Errors::MissingCredentialsError + end + end + + def sign(context) + req = context.http_request + + apply_authtype(context, req) + reset_signature(req) + apply_clock_skew(context, req) + + # compute the signature + begin + signature = @signer.sign_request( + http_method: req.http_method, + url: req.endpoint, + headers: req.headers, + body: req.body + ) + rescue Aws::Sigv4::Errors::MissingCredentialsError + # Necessary for when credentials is explicitly set to nil + raise Aws::Errors::MissingCredentialsError + end + # apply signature headers + req.headers.update(signature.headers) + + # add request metadata with signature components for debugging + context[:canonical_request] = signature.canonical_request + context[:string_to_sign] = signature.string_to_sign + end + + def presign_url(*args) + @signer.presign_url(*args) + end + + def sign_event(*args) + @signer.sign_event(*args) + end + + private + + def apply_authtype(context, req) + if context.operation['authtype'].eql?('v4-unsigned-body') && + req.endpoint.scheme.eql?('https') + req.headers['X-Amz-Content-Sha256'] ||= 'UNSIGNED-PAYLOAD' + end + end + + def reset_signature(req) + # in case this request is being re-signed + req.headers.delete('Authorization') + req.headers.delete('X-Amz-Security-Token') + req.headers.delete('X-Amz-Date') + req.headers.delete('x-Amz-Region-Set') + end + + def apply_clock_skew(context, req) + if context.config.respond_to?(:clock_skew) && + context.config.clock_skew && + context.config.correct_clock_skew + + endpoint = context.http_request.endpoint + skew = context.config.clock_skew.clock_correction(endpoint) + if skew.abs.positive? + req.headers['X-Amz-Date'] = + (Time.now.utc + skew).strftime('%Y%m%dT%H%M%SZ') + end + end + end + + end + + # @api private + class NullSigner + + def sign(context) + end + + def presign_url(*args) + end + + def sign_event(*args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/signature_v2.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/signature_v2.rb new file mode 100644 index 0000000..2d82dcc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/signature_v2.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + # Necessary to keep after Endpoints 2.0 + class SignatureV2 < Seahorse::Client::Plugin + + option(:v2_signer) do |cfg| + Aws::Sigv2::Signer.new(credentials_provider: cfg.credentials) + end + + def add_handlers(handlers, _) + handlers.add(Handler, step: :sign) + end + + class Handler < Seahorse::Client::Handler + + def call(context) + apply_signature( + context.http_request, + context.config.v2_signer + ) + @handler.call(context) + end + + private + + def apply_signature(req, signer) + + param_list = req.body.param_list + param_list.delete('Timestamp') # in case of re-signing + + signature = signer.sign_request( + http_method: req.http_method, + url: req.endpoint, + params: param_list.inject({}) do |hash, param| + hash[param.name] = param.value + hash + end + ) + + # apply signature + signature.each_pair do |param_name, param_value| + param_list.set(param_name, param_value) + end + + req.body = param_list.to_io + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/signature_v4.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/signature_v4.rb new file mode 100644 index 0000000..1233933 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/signature_v4.rb @@ -0,0 +1,151 @@ +# frozen_string_literal: true + +require 'aws-sigv4' + +module Aws + module Plugins + # @api private + # Necessary to exist after endpoints 2.0 + class SignatureV4 < Seahorse::Client::Plugin + + V4_AUTH = %w[v4 v4-unsigned-payload v4-unsigned-body] + + option(:sigv4_signer) do |cfg| + SignatureV4.build_signer(cfg) + end + + option(:sigv4_name) do |cfg| + signingName = if cfg.region + Aws::Partitions::EndpointProvider.signing_service( + cfg.region, cfg.api.metadata['endpointPrefix'] + ) + end + signingName || cfg.api.metadata['signingName'] || cfg.api.metadata['endpointPrefix'] + end + + option(:sigv4_region) do |cfg| + if cfg.region + if cfg.respond_to?(:sts_regional_endpoints) + sts_regional = cfg.sts_regional_endpoints + end + Aws::Partitions::EndpointProvider.signing_region( + cfg.region, cfg.api.metadata['endpointPrefix'], sts_regional + ) + end + end + + option(:unsigned_operations) do |cfg| + if cfg.api.metadata['signatureVersion'] == 'v4' + # select operations where authtype is set and is not v4 + cfg.api.operation_names.select do |o| + cfg.api.operation(o)['authtype'] && !V4_AUTH.include?(cfg.api.operation(o)['authtype']) + end + else # service is not v4 auth + # select all operations where authtype is not v4 + # (includes operations with no explicit authtype) + cfg.api.operation_names.select do |o| + !V4_AUTH.include?(cfg.api.operation(o)['authtype']) + end + end + end + + def add_handlers(handlers, cfg) + if cfg.unsigned_operations.empty? + handlers.add(Handler, step: :sign) + else + operations = cfg.api.operation_names - cfg.unsigned_operations + handlers.add(Handler, step: :sign, operations: operations) + end + end + + class Handler < Seahorse::Client::Handler + def call(context) + SignatureV4.apply_signature(context: context) + @handler.call(context) + end + end + + class MissingCredentialsSigner + def sign_request(*args) + raise Errors::MissingCredentialsError + end + end + + class << self + + # @api private + def build_signer(cfg) + if cfg.credentials && cfg.sigv4_region + Aws::Sigv4::Signer.new( + service: cfg.sigv4_name, + region: cfg.sigv4_region, + credentials_provider: cfg.credentials, + unsigned_headers: ['content-length', 'user-agent', 'x-amzn-trace-id'] + ) + elsif cfg.credentials + raise Errors::MissingRegionError + elsif cfg.sigv4_region + # Instead of raising now, we return a signer that raises only + # if you attempt to sign a request. Some services have unsigned + # operations and it okay to initialize clients for these services + # without credentials. Unsigned operations have an "authtype" + # trait of "none". + MissingCredentialsSigner.new + end + end + + # @api private + def apply_signature(options = {}) + context = apply_authtype(options[:context]) + signer = options[:signer] || context.config.sigv4_signer + req = context.http_request + + # in case this request is being re-signed + req.headers.delete('Authorization') + req.headers.delete('X-Amz-Security-Token') + req.headers.delete('X-Amz-Date') + req.headers.delete('x-Amz-Region-Set') + + if context.config.respond_to?(:clock_skew) && + context.config.clock_skew && + context.config.correct_clock_skew + + endpoint = context.http_request.endpoint + skew = context.config.clock_skew.clock_correction(endpoint) + if skew.abs > 0 + req.headers['X-Amz-Date'] = (Time.now.utc + skew).strftime("%Y%m%dT%H%M%SZ") + end + end + + # compute the signature + begin + signature = signer.sign_request( + http_method: req.http_method, + url: req.endpoint, + headers: req.headers, + body: req.body + ) + rescue Aws::Sigv4::Errors::MissingCredentialsError + raise Aws::Errors::MissingCredentialsError + end + + # apply signature headers + req.headers.update(signature.headers) + + # add request metadata with signature components for debugging + context[:canonical_request] = signature.canonical_request + context[:string_to_sign] = signature.string_to_sign + end + + # @api private + def apply_authtype(context) + if context.operation['authtype'].eql?('v4-unsigned-body') && + context.http_request.endpoint.scheme.eql?('https') + context.http_request.headers['X-Amz-Content-Sha256'] ||= 'UNSIGNED-PAYLOAD' + end + context + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/stub_responses.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/stub_responses.rb new file mode 100644 index 0000000..fa8cd7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/stub_responses.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class StubResponses < Seahorse::Client::Plugin + + option(:stub_responses, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS) +Causes the client to return stubbed responses. By default +fake responses are generated and returned. You can specify +the response data to return or errors to raise by calling +{ClientStubs#stub_responses}. See {ClientStubs} for more information. + +** Please note ** When response stubbing is enabled, no HTTP +requests are made, and retries are disabled. + DOCS + + option(:region) do |config| + 'us-stubbed-1' if config.stub_responses + end + + option(:credentials) do |config| + if config.stub_responses + Credentials.new('stubbed-akid', 'stubbed-secret') + end + end + + def add_handlers(handlers, config) + handlers.add(Handler, step: :send) if config.stub_responses + end + + def after_initialize(client) + if client.config.stub_responses + client.setup_stubbing + client.handlers.remove(RetryErrors::Handler) + client.handlers.remove(RetryErrors::LegacyHandler) + client.handlers.remove(ClientMetricsPlugin::Handler) + client.handlers.remove(ClientMetricsSendPlugin::LatencyHandler) + client.handlers.remove(ClientMetricsSendPlugin::AttemptHandler) + client.handlers.remove(Seahorse::Client::Plugins::RequestCallback::OptionHandler) + client.handlers.remove(Seahorse::Client::Plugins::RequestCallback::ReadCallbackHandler) + end + end + + class Handler < Seahorse::Client::Handler + + def call(context) + stub = context.client.next_stub(context) + resp = Seahorse::Client::Response.new(context: context) + async_mode = context.client.is_a? Seahorse::Client::AsyncBase + if Hash === stub && stub[:mutex] + stub[:mutex].synchronize { apply_stub(stub, resp, async_mode) } + else + apply_stub(stub, resp, async_mode) + end + + async_mode ? Seahorse::Client::AsyncResponse.new( + context: context, stream: context[:input_event_stream_handler].event_emitter.stream, sync_queue: Queue.new) : resp + end + + def apply_stub(stub, response, async_mode = false) + http_resp = response.context.http_response + case + when stub[:error] then signal_error(stub[:error], http_resp) + when stub[:http] then signal_http(stub[:http], http_resp, async_mode) + when stub[:data] then response.data = stub[:data] + end + end + + def signal_error(error, http_resp) + if Exception === error + http_resp.signal_error(error) + else + http_resp.signal_error(error.new) + end + end + + # @param [Seahorse::Client::Http::Response] stub + # @param [Seahorse::Client::Http::Response | Seahorse::Client::Http::AsyncResponse] http_resp + # @param [Boolean] async_mode + def signal_http(stub, http_resp, async_mode = false) + if async_mode + h2_headers = stub.headers.to_h.inject([]) do |arr, (k, v)| + arr << [k, v] + end + h2_headers << [":status", stub.status_code] + http_resp.signal_headers(h2_headers) + else + http_resp.signal_headers(stub.status_code, stub.headers.to_h) + end + while chunk = stub.body.read(1024 * 1024) + http_resp.signal_data(chunk) + end + stub.body.rewind + http_resp.signal_done + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/transfer_encoding.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/transfer_encoding.rb new file mode 100644 index 0000000..718db72 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/transfer_encoding.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +module Aws + module Plugins + + # For Streaming Input Operations, when `requiresLength` is enabled + # checking whether `Content-Length` header can be set, + # for `v4-unsigned-body` operations, set `Transfer-Encoding` header + class TransferEncoding < Seahorse::Client::Plugin + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + if streaming?(context.operation.input) + # If it's an IO object and not a File / String / String IO + unless context.http_request.body.respond_to?(:size) + if requires_length?(context.operation.input) + # if size of the IO is not available but required + raise Aws::Errors::MissingContentLength.new + elsif context.operation['authtype'] == "v4-unsigned-body" + context.http_request.headers['Transfer-Encoding'] = 'chunked' + end + end + end + + @handler.call(context) + end + + private + + def streaming?(ref) + if payload = ref[:payload_member] + payload["streaming"] || # checking ref and shape + payload.shape["streaming"] + else + false + end + end + + def requires_length?(ref) + payload = ref[:payload_member] + payload["requiresLength"] || # checking ref and shape + payload.shape["requiresLength"] + end + + end + + handler(Handler, step: :sign) + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/user_agent.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/user_agent.rb new file mode 100644 index 0000000..ae7937a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/plugins/user_agent.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +module Aws + module Plugins + # @api private + class UserAgent < Seahorse::Client::Plugin + option(:user_agent_suffix) + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + set_user_agent(context) + @handler.call(context) + end + + def set_user_agent(context) + ua = "aws-sdk-ruby3/#{CORE_GEM_VERSION}" + + begin + ua += " #{RUBY_ENGINE}/#{RUBY_VERSION}" + rescue + ua += " RUBY_ENGINE_NA/#{RUBY_VERSION}" + end + + ua += " #{RUBY_PLATFORM}" + + if context[:gem_name] && context[:gem_version] + ua += " #{context[:gem_name]}/#{context[:gem_version]}" + end + + if (execution_env = ENV['AWS_EXECUTION_ENV']) + ua += " exec-env/#{execution_env}" + end + + if context.config.user_agent_suffix + ua += " #{context.config.user_agent_suffix}" + end + + context.http_request.headers['User-Agent'] = ua.strip + end + end + + handler(Handler) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/process_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/process_credentials.rb new file mode 100644 index 0000000..0a778ef --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/process_credentials.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +module Aws + # A credential provider that executes a given process and attempts + # to read its stdout to recieve a JSON payload containing the credentials. + # + # credentials = Aws::ProcessCredentials.new('/usr/bin/credential_proc') + # ec2 = Aws::EC2::Client.new(credentials: credentials) + # + # Automatically handles refreshing credentials if an Expiration time is + # provided in the credentials payload. + # + # @see https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#sourcing-credentials-from-external-processes + class ProcessCredentials + + include CredentialProvider + include RefreshingCredentials + + # Creates a new ProcessCredentials object, which allows an + # external process to be used as a credential provider. + # + # @param [String] process Invocation string for process + # credentials provider. + def initialize(process) + @process = process + @credentials = credentials_from_process(@process) + @async_refresh = false + + super + end + + private + def credentials_from_process(proc_invocation) + begin + raw_out = `#{proc_invocation}` + process_status = $? + rescue Errno::ENOENT + raise Errors::InvalidProcessCredentialsPayload.new("Could not find process #{proc_invocation}") + end + + if process_status.success? + begin + creds_json = Aws::Json.load(raw_out) + rescue Aws::Json::ParseError + raise Errors::InvalidProcessCredentialsPayload.new("Invalid JSON response") + end + payload_version = creds_json['Version'] + if payload_version == 1 + _parse_payload_format_v1(creds_json) + else + raise Errors::InvalidProcessCredentialsPayload.new("Invalid version #{payload_version} for credentials payload") + end + else + raise Errors::InvalidProcessCredentialsPayload.new('credential_process provider failure, the credential process had non zero exit status and failed to provide credentials') + end + end + + def _parse_payload_format_v1(creds_json) + creds = Credentials.new( + creds_json['AccessKeyId'], + creds_json['SecretAccessKey'], + creds_json['SessionToken'] + ) + + @expiration = creds_json['Expiration'] ? Time.iso8601(creds_json['Expiration']) : nil + return creds if creds.set? + raise Errors::InvalidProcessCredentialsPayload.new("Invalid payload for JSON credentials version 1") + end + + def refresh + @credentials = credentials_from_process(@process) + end + + def near_expiration?(expiration_length) + # are we within 5 minutes of expiration? + @expiration && (Time.now.to_i + expiration_length) > @expiration.to_i + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query.rb new file mode 100644 index 0000000..e1c2d95 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require_relative 'query/ec2_param_builder' +require_relative 'query/handler' +require_relative 'query/param' +require_relative 'query/param_builder' +require_relative 'query/param_list' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/ec2_param_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/ec2_param_builder.rb new file mode 100644 index 0000000..3cbbd04 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/ec2_param_builder.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module Query + class EC2ParamBuilder + + include Seahorse::Model::Shapes + + def initialize(param_list) + @params = param_list + end + + attr_reader :params + + def apply(ref, params) + structure(ref, params, '') + end + + private + + def structure(ref, values, prefix) + shape = ref.shape + values.each_pair do |name, value| + unless value.nil? + member_ref = shape.member(name) + format(member_ref, value, prefix + query_name(member_ref)) + end + end + end + + def list(ref, values, prefix) + if values.empty? + set(prefix, '') + else + member_ref = ref.shape.member + values.each.with_index do |value, n| + format(member_ref, value, "#{prefix}.#{n+1}") + end + end + end + + def format(ref, value, prefix) + case ref.shape + when StructureShape then structure(ref, value, prefix + '.') + when ListShape then list(ref, value, prefix) + when MapShape then raise NotImplementedError + when BlobShape then set(prefix, blob(value)) + when TimestampShape then set(prefix, timestamp(ref, value)) + else + set(prefix, value.to_s) + end + end + + def query_name(ref) + ref['queryName'] || ucfirst(ref.location_name) + end + + def set(name, value) + params.set(name, value) + end + + def ucfirst(str) + str[0].upcase + str[1..-1] + end + + def blob(value) + value = value.read unless String === value + Base64.strict_encode64(value) + end + + def timestamp(ref, value) + case ref['timestampFormat'] || ref.shape['timestampFormat'] + when 'unixTimestamp' then value.to_i + when 'rfc822' then value.utc.httpdate + else + # ec2 defaults to iso8601 + value.utc.iso8601 + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/handler.rb new file mode 100644 index 0000000..2b6cf8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/handler.rb @@ -0,0 +1,94 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Query + class Handler < Seahorse::Client::Handler + + include Seahorse::Model::Shapes + + CONTENT_TYPE = 'application/x-www-form-urlencoded; charset=utf-8' + + WRAPPER_STRUCT = ::Struct.new(:result, :response_metadata) + + METADATA_STRUCT = ::Struct.new(:request_id) + + METADATA_REF = begin + request_id = ShapeRef.new( + shape: StringShape.new, + location_name: 'RequestId') + response_metadata = StructureShape.new + response_metadata.struct_class = METADATA_STRUCT + response_metadata.add_member(:request_id, request_id) + ShapeRef.new(shape: response_metadata, location_name: 'ResponseMetadata') + end + + # @param [Seahorse::Client::RequestContext] context + # @return [Seahorse::Client::Response] + def call(context) + build_request(context) + @handler.call(context).on_success do |response| + response.error = nil + parsed = parse_xml(context) + if parsed.nil? || parsed == EmptyStructure + response.data = EmptyStructure.new + else + response.data = parsed + end + end + end + + private + + def build_request(context) + context.http_request.http_method = 'POST' + context.http_request.headers['Content-Type'] = CONTENT_TYPE + param_list = ParamList.new + param_list.set('Version', context.config.api.version) + param_list.set('Action', context.operation.name) + if input_shape = context.operation.input + apply_params(param_list, context.params, input_shape) + end + context.http_request.body = param_list.to_io + end + + def apply_params(param_list, params, rules) + ParamBuilder.new(param_list).apply(rules, params) + end + + def parse_xml(context) + data = Xml::Parser.new(rules(context)).parse(xml(context)) + remove_wrapper(data, context) + end + + def xml(context) + context.http_response.body_contents + end + + def rules(context) + shape = Seahorse::Model::Shapes::StructureShape.new + if context.operation.output + shape.add_member(:result, ShapeRef.new( + shape: context.operation.output.shape, + location_name: context.operation.name + 'Result' + )) + end + shape.struct_class = WRAPPER_STRUCT + shape.add_member(:response_metadata, METADATA_REF) + ShapeRef.new(shape: shape) + end + + def remove_wrapper(data, context) + if context.operation.output + if data.response_metadata + context[:request_id] = data.response_metadata.request_id + end + data.result || Structure.new(*context.operation.output.shape.member_names) + else + data + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param.rb new file mode 100644 index 0000000..dfb6bfc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Aws + module Query + class Param + + # @param [String] name + # @param [String, nil] value (nil) + def initialize(name, value = nil) + @name = name.to_s + @value = value + end + + # @return [String] + attr_reader :name + + # @return [String, nil] + attr_reader :value + + # @return [String] + def to_s + value ? "#{escape(name)}=#{escape(value)}" : "#{escape(name)}=" + end + + # @api private + def ==(other) + other.kind_of?(Param) && + other.name == name && + other.value == value + end + + # @api private + def <=> other + name <=> other.name + end + + private + + def escape(str) + Seahorse::Util.uri_escape(str) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param_builder.rb new file mode 100644 index 0000000..6f952bb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param_builder.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module Query + class ParamBuilder + + include Seahorse::Model::Shapes + + def initialize(param_list) + @params = param_list + end + + attr_reader :params + + def apply(ref, params) + structure(ref, params, '') + end + + private + + def structure(ref, values, prefix) + shape = ref.shape + values.each_pair do |name, value| + next if value.nil? + member_ref = shape.member(name) + format(member_ref, value, prefix + query_name(member_ref)) + end + end + + def list(ref, values, prefix) + member_ref = ref.shape.member + if values.empty? + set(prefix, '') + return + end + if flat?(ref) + if name = query_name(member_ref) + parts = prefix.split('.') + parts.pop + parts.push(name) + prefix = parts.join('.') + end + else + prefix += '.' + (member_ref.location_name || 'member') + end + values.each.with_index do |value, n| + format(member_ref, value, "#{prefix}.#{n+1}") + end + end + + def map(ref, values, prefix) + key_ref = ref.shape.key + value_ref = ref.shape.value + prefix += '.entry' unless flat?(ref) + key_name = "%s.%d.#{query_name(key_ref, 'key')}" + value_name = "%s.%d.#{query_name(value_ref, 'value')}" + values.each.with_index do |(key, value), n| + format(key_ref, key, key_name % [prefix, n + 1]) + format(value_ref, value, value_name % [prefix, n + 1]) + end + end + + def format(ref, value, prefix) + case ref.shape + when StructureShape then structure(ref, value, prefix + '.') + when ListShape then list(ref, value, prefix) + when MapShape then map(ref, value, prefix) + when BlobShape then set(prefix, blob(value)) + when TimestampShape then set(prefix, timestamp(ref, value)) + else set(prefix, value.to_s) + end + end + + def query_name(ref, default = nil) + ref.location_name || default + end + + def set(name, value) + params.set(name, value) + end + + def flat?(ref) + ref.shape.flattened + end + + def timestamp(ref, value) + case ref['timestampFormat'] || ref.shape['timestampFormat'] + when 'unixTimestamp' then value.to_i + when 'rfc822' then value.utc.httpdate + else + # query defaults to iso8601 + value.utc.iso8601 + end + end + + def blob(value) + value = value.read unless String === value + Base64.strict_encode64(value) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param_list.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param_list.rb new file mode 100644 index 0000000..58a49cb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/query/param_list.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require 'stringio' + +module Aws + module Query + class ParamList + + include Enumerable + + # @api private + def initialize + @params = {} + end + + # @param [String] param_name + # @param [String, nil] param_value + # @return [Param] + def set(param_name, param_value = nil) + param = Param.new(param_name, param_value) + @params[param.name] = param + param + end + alias []= set + + # @return [Param, nil] + def [](param_name) + @params[param_name.to_s] + end + + # @param [String] param_name + # @return [Param, nil] + def delete(param_name) + @params.delete(param_name) + end + + # @return [Enumerable] + def each(&block) + to_a.each(&block) + end + + # @return [Boolean] + def empty? + @params.empty? + end + + # @return [Array] Returns an array of sorted {Param} objects. + def to_a + @params.values.sort + end + + # @return [String] + def to_s + to_a.map(&:to_s).join('&') + end + + # @return [#read, #rewind, #size] + def to_io + IoWrapper.new(self) + end + + # @api private + class IoWrapper + + # @param [ParamList] param_list + def initialize(param_list) + @param_list = param_list + @io = StringIO.new(param_list.to_s) + end + + # @return [ParamList] + attr_reader :param_list + + # @return [Integer] + def size + @io.size + end + + # @return [void] + def rewind + @io.rewind + end + + # @return [String, nil] + def read(bytes = nil, output_buffer = nil) + @io.read(bytes, output_buffer) + end + + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/refreshing_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/refreshing_credentials.rb new file mode 100644 index 0000000..4b19f65 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/refreshing_credentials.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true + +require 'thread' + +module Aws + + # Base class used credential classes that can be refreshed. This + # provides basic refresh logic in a thread-safe manner. Classes mixing in + # this module are expected to implement a #refresh method that populates + # the following instance variables: + # + # * `@access_key_id` + # * `@secret_access_key` + # * `@session_token` + # * `@expiration` + # + # @api private + module RefreshingCredentials + + SYNC_EXPIRATION_LENGTH = 300 # 5 minutes + ASYNC_EXPIRATION_LENGTH = 600 # 10 minutes + + CLIENT_EXCLUDE_OPTIONS = Set.new([:before_refresh]).freeze + + def initialize(options = {}) + @mutex = Mutex.new + @before_refresh = options.delete(:before_refresh) if Hash === options + + @before_refresh.call(self) if @before_refresh + refresh + end + + # @return [Credentials] + def credentials + refresh_if_near_expiration! + @credentials + end + + # @return [Time,nil] + def expiration + refresh_if_near_expiration! + @expiration + end + + # Refresh credentials. + # @return [void] + def refresh! + @mutex.synchronize do + @before_refresh.call(self) if @before_refresh + + refresh + end + end + + private + + # Refreshes credentials asynchronously and synchronously. + # If we are near to expiration, block while getting new credentials. + # Otherwise, if we're approaching expiration, use the existing credentials + # but attempt a refresh in the background. + def refresh_if_near_expiration! + # Note: This check is an optimization. Rather than acquire the mutex on every #refresh_if_near_expiration + # call, we check before doing so, and then we check within the mutex to avoid a race condition. + # See issue: https://github.com/aws/aws-sdk-ruby/issues/2641 for more info. + if near_expiration?(SYNC_EXPIRATION_LENGTH) + @mutex.synchronize do + if near_expiration?(SYNC_EXPIRATION_LENGTH) + @before_refresh.call(self) if @before_refresh + refresh + end + end + elsif @async_refresh && near_expiration?(ASYNC_EXPIRATION_LENGTH) + unless @mutex.locked? + Thread.new do + @mutex.synchronize do + if near_expiration?(ASYNC_EXPIRATION_LENGTH) + @before_refresh.call(self) if @before_refresh + refresh + end + end + end + end + end + end + + def near_expiration?(expiration_length) + if @expiration + # Are we within expiration? + (Time.now.to_i + expiration_length) > @expiration.to_i + else + true + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/refreshing_token.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/refreshing_token.rb new file mode 100644 index 0000000..a654201 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/refreshing_token.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'thread' + +module Aws + + # Module/mixin used by token provider classes that can be refreshed. This + # provides basic refresh logic in a thread-safe manner. Classes mixing in + # this module are expected to implement a #refresh method that populates + # the following instance variable: + # + # * `@token` [Token] - {Aws::Token} object with the `expiration` and `token` + # fields set. + # + # @api private + module RefreshingToken + + def initialize(options = {}) + @mutex = Mutex.new + @before_refresh = options.delete(:before_refresh) if Hash === options + + @before_refresh.call(self) if @before_refresh + refresh + end + + # @return [Token] + def token + refresh_if_near_expiration + @token + end + + # @return [Time,nil] + def expiration + refresh_if_near_expiration + @expiration + end + + # Refresh token. + # @return [void] + def refresh! + @mutex.synchronize do + @before_refresh.call(self) if @before_refresh + refresh + end + end + + private + + # Refreshes token if it is within + # 5 minutes of expiration. + def refresh_if_near_expiration + if near_expiration? + @mutex.synchronize do + if near_expiration? + @before_refresh.call(self) if @before_refresh + refresh + end + end + end + end + + def near_expiration? + if @token && @token.expiration + # are we within 5 minutes of expiration? + (Time.now.to_i + 5 * 60) > @token.expiration.to_i + else + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/resources/collection.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/resources/collection.rb new file mode 100644 index 0000000..2bed5d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/resources/collection.rb @@ -0,0 +1,123 @@ +# frozen_string_literal: true + +module Aws + module Resources + class Collection + + extend Aws::Deprecations + include Enumerable + + # @param [Enumerator] batches + # @option options [Integer] :limit + # @option options [Integer] :size + # @api private + def initialize(batches, options = {}) + @batches = batches + @limit = options[:limit] + @size = options[:size] + end + + # @return [Integer,nil] + # Returns the size of this collection if known, returns `nil` when + # an API call is necessary to enumerate items in this collection. + def size + @size + end + alias :length :size + + # @deprecated + # @api private + def batches + ::Enumerator.new do |y| + batch_enum.each do |batch| + y << self.class.new([batch], size: batch.size) + end + end + end + + # @deprecated + # @api private + def [](index) + if @size + @batches[0][index] + else + raise "unable to index into a lazy loaded collection" + end + end + deprecated :[] + + # @return [Enumerator] + def each(&block) + enum = ::Enumerator.new do |y| + batch_enum.each do |batch| + batch.each do |band| + y.yield(band) + end + end + end + enum.each(&block) if block + enum + end + + # @param [Integer] count + # @return [Resource, Collection] + def first(count = nil) + if count + items = limit(count).to_a + self.class.new([items], size: items.size) + else + begin + each.next + rescue StopIteration + nil + end + end + end + + # Returns a new collection that will enumerate a limited number of items. + # + # collection.limit(10).each do |band| + # # yields at most 10 times + # end + # + # @return [Collection] + # @param [Integer] limit + def limit(limit) + Collection.new(@batches, limit: limit) + end + + private + + def batch_enum + case @limit + when 0 then [] + when nil then non_empty_batches + else limited_batches + end + end + + def non_empty_batches + ::Enumerator.new do |y| + @batches.each do |batch| + y.yield(batch) if batch.size > 0 + end + end + end + + def limited_batches + ::Enumerator.new do |y| + yielded = 0 + @batches.each do |batch| + batch = batch.take(@limit - yielded) + if batch.size > 0 + y.yield(batch) + yielded += batch.size + end + break if yielded == @limit + end + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest.rb new file mode 100644 index 0000000..c2387ae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require_relative 'rest/handler' +require_relative 'rest/request/body' +require_relative 'rest/request/builder' +require_relative 'rest/request/endpoint' +require_relative 'rest/request/headers' +require_relative 'rest/request/querystring_builder' +require_relative 'rest/response/body' +require_relative 'rest/response/headers' +require_relative 'rest/response/parser' +require_relative 'rest/response/status_code' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/handler.rb new file mode 100644 index 0000000..3089ffa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/handler.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Rest + class Handler < Seahorse::Client::Handler + + def call(context) + Rest::Request::Builder.new.apply(context) + resp = @handler.call(context) + resp.on(200..299) { |response| Response::Parser.new.apply(response) } + resp.on(200..599) { |response| apply_request_id(context) } + resp + end + + private + + def apply_request_id(context) + h = context.http_response.headers + context[:request_id] ||= h['x-amz-request-id'] || h['x-amzn-requestid'] + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/body.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/body.rb new file mode 100644 index 0000000..805b8f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/body.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +module Aws + module Rest + module Request + class Body + + include Seahorse::Model::Shapes + + # @param [Class] serializer_class + # @param [Seahorse::Model::ShapeRef] rules + def initialize(serializer_class, rules) + @serializer_class = serializer_class + @rules = rules + end + + # @param [Seahorse::Client::Http::Request] http_req + # @param [Hash] params + def apply(http_req, params) + body = build_body(params) + # for rest-json, ensure we send at least an empty object + # don't send an empty object for streaming? case. + if body.nil? && @serializer_class == Json::Builder && + modeled_body? && !streaming? + body = '{}' + end + http_req.body = body + end + + private + + # operation is modeled for body when it is modeled for a payload + # either with payload trait or normal members. + def modeled_body? + return true if @rules[:payload] + @rules.shape.members.each do |member| + _name, shape = member + return true if shape.location.nil? + end + false + end + + def build_body(params) + if streaming? + params[@rules[:payload]] + elsif @rules[:payload] + params = params[@rules[:payload]] + serialize(@rules[:payload_member], params) if params + else + params = body_params(params) + serialize(@rules, params) unless params.empty? + end + end + + def streaming? + @rules[:payload] && ( + BlobShape === @rules[:payload_member].shape || + StringShape === @rules[:payload_member].shape + ) + end + + def serialize(rules, params) + @serializer_class.new(rules).serialize(params) + end + + def body_params(params) + @rules.shape.members.inject({}) do |hash, (member_name, member_ref)| + if !member_ref.location && params.key?(member_name) + hash[member_name] = params[member_name] + end + hash + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/builder.rb new file mode 100644 index 0000000..842a9f0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/builder.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +module Aws + module Rest + module Request + class Builder + + def apply(context) + populate_http_method(context) + populate_endpoint(context) + populate_headers(context) + populate_body(context) + end + + private + + def populate_http_method(context) + context.http_request.http_method = context.operation.http_method + end + + def populate_endpoint(context) + context.http_request.endpoint = Endpoint.new( + context.operation.input, + context.operation.http_request_uri, + ).uri(context.http_request.endpoint, context.params) + end + + def populate_headers(context) + headers = Headers.new(context.operation.input) + headers.apply(context.http_request, context.params) + end + + def populate_body(context) + Body.new( + serializer_class(context), + context.operation.input + ).apply(context.http_request, context.params) + end + + def serializer_class(context) + protocol = context.config.api.metadata['protocol'] + case protocol + when 'rest-xml' then Xml::Builder + when 'rest-json' then Json::Builder + when 'api-gateway' then Json::Builder + else raise "unsupported protocol #{protocol}" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/endpoint.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/endpoint.rb new file mode 100644 index 0000000..540edb1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/endpoint.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +require 'uri' + +module Aws + module Rest + module Request + class Endpoint + + # @param [Seahorse::Model::Shapes::ShapeRef] rules + # @param [String] request_uri_pattern + def initialize(rules, request_uri_pattern) + @rules = rules + request_uri_pattern.split('?').tap do |path_part, query_part| + @path_pattern = path_part + @query_prefix = query_part + end + end + + # @param [URI::HTTPS,URI::HTTP] base_uri + # @param [Hash,Struct] params + # @return [URI::HTTPS,URI::HTTP] + def uri(base_uri, params) + uri = URI.parse(base_uri.to_s) + apply_path_params(uri, params) + apply_querystring_params(uri, params) + uri + end + + private + + def apply_path_params(uri, params) + path = uri.path.sub(/\/$/, '') + @path_pattern.split('?')[0] + uri.path = path.gsub(/{.+?}/) do |placeholder| + param_value_for_placeholder(placeholder, params) + end + end + + def param_value_for_placeholder(placeholder, params) + name = param_name(placeholder) + value = params[name].to_s + raise ArgumentError, ":#{name} must not be blank" if value.empty? + + if placeholder.include?('+') + value.gsub(/[^\/]+/) { |v| escape(v) } + else + escape(value) + end + end + + def param_name(placeholder) + location_name = placeholder.gsub(/[{}+]/,'') + param_name, _ = @rules.shape.member_by_location_name(location_name) + param_name + end + + def apply_querystring_params(uri, params) + # collect params that are supposed to be part of the query string + parts = @rules.shape.members.inject([]) do |prts, (member_name, member_ref)| + if member_ref.location == 'querystring' && !params[member_name].nil? + prts << [member_ref, params[member_name]] + end + prts + end + querystring = QuerystringBuilder.new.build(parts) + querystring = [@query_prefix, querystring == '' ? nil : querystring].compact.join('&') + querystring = nil if querystring == '' + uri.query = querystring + end + + def escape(string) + Seahorse::Util.uri_escape(string) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/headers.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/headers.rb new file mode 100644 index 0000000..50bda30 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/headers.rb @@ -0,0 +1,77 @@ +# frozen_string_literal: true + +require 'time' +require 'base64' + +module Aws + module Rest + module Request + class Headers + + include Seahorse::Model::Shapes + + # @param [Seahorse::Model::ShapeRef] rules + def initialize(rules) + @rules = rules + end + + # @param [Seahorse::Client::Http::Request] http_req + # @param [Hash] params + def apply(http_req, params) + @rules.shape.members.each do |name, ref| + value = params[name] + next if value.nil? + case ref.location + when 'header' then apply_header_value(http_req.headers, ref, value) + when 'headers' then apply_header_map(http_req.headers, ref, value) + end + end + end + + private + + def apply_header_value(headers, ref, value) + value = apply_json_trait(value) if ref['jsonvalue'] + case ref.shape + when TimestampShape then headers[ref.location_name] = timestamp(ref, value) + when ListShape then list(headers, ref, value) + else headers[ref.location_name] = value.to_s + end + end + + def timestamp(ref, value) + case ref['timestampFormat'] || ref.shape['timestampFormat'] + when 'unixTimestamp' then value.to_i + when 'iso8601' then value.utc.iso8601 + else + # header default to rfc822 + value.utc.httpdate + end + end + + def list(headers, ref, value) + return if !value || value.empty? + headers[ref.location_name] = value + .compact + .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } + .join(',') + end + + def apply_header_map(headers, ref, values) + prefix = ref.location_name || '' + values.each_pair do |name, value| + headers["#{prefix}#{name}"] = value.to_s + end + end + + # With complex headers value in json syntax, + # base64 encodes value to avoid weird characters + # causing potential issues in headers + def apply_json_trait(value) + Base64.strict_encode64(value) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/querystring_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/querystring_builder.rb new file mode 100644 index 0000000..3e01351 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/request/querystring_builder.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module Aws + module Rest + module Request + class QuerystringBuilder + + include Seahorse::Model::Shapes + + # Provide shape references and param values: + # + # [ + # [shape_ref1, 123], + # [shape_ref2, "text"] + # ] + # + # Returns a querystring: + # + # "Count=123&Words=text" + # + # @param [Array>] params An array of + # model shape references and request parameter value pairs. + # + # @return [String] Returns a built querystring + def build(params) + params.map do |(shape_ref, param_value)| + build_part(shape_ref, param_value) + end.join('&') + end + + private + + def build_part(shape_ref, param_value) + case shape_ref.shape + # supported scalar types + when StringShape, BooleanShape, FloatShape, IntegerShape, StringShape + param_name = shape_ref.location_name + "#{param_name}=#{escape(param_value.to_s)}" + when TimestampShape + param_name = shape_ref.location_name + "#{param_name}=#{escape(timestamp(shape_ref, param_value))}" + when MapShape + if StringShape === shape_ref.shape.value.shape + query_map_of_string(param_value) + elsif ListShape === shape_ref.shape.value.shape + query_map_of_string_list(param_value) + else + msg = "only map of string and string list supported" + raise NotImplementedError, msg + end + when ListShape + if StringShape === shape_ref.shape.member.shape + list_of_strings(shape_ref.location_name, param_value) + else + msg = "Only list of strings supported, got "\ + "#{shape_ref.shape.member.shape.class.name}" + raise NotImplementedError, msg + end + else + raise NotImplementedError + end + end + + def timestamp(ref, value) + case ref['timestampFormat'] || ref.shape['timestampFormat'] + when 'unixTimestamp' then value.to_i + when 'rfc822' then value.utc.httpdate + else + # querystring defaults to iso8601 + value.utc.iso8601 + end + end + + def query_map_of_string(hash) + list = [] + hash.each_pair do |key, value| + list << "#{escape(key)}=#{escape(value)}" + end + list + end + + def query_map_of_string_list(hash) + list = [] + hash.each_pair do |key, values| + values.each do |value| + list << "#{escape(key)}=#{escape(value)}" + end + end + list + end + + def list_of_strings(name, values) + values.map do |value| + "#{name}=#{escape(value)}" + end + end + + def escape(string) + Seahorse::Util.uri_escape(string) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/body.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/body.rb new file mode 100644 index 0000000..ea4bf60 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/body.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Aws + module Rest + module Response + class Body + + include Seahorse::Model::Shapes + + # @param [Class] parser_class + # @param [Seahorse::Model::ShapeRef] rules + def initialize(parser_class, rules) + @parser_class = parser_class + @rules = rules + end + + # @param [IO] body + # @param [Hash, Struct] data + def apply(body, data) + if event_stream? + data[@rules[:payload]] = parse_eventstream(body) + elsif streaming? + data[@rules[:payload]] = body + elsif @rules[:payload] + data[@rules[:payload]] = parse(body.read, @rules[:payload_member]) + elsif !@rules.shape.member_names.empty? + parse(body.read, @rules, data) + end + end + + private + + def event_stream? + @rules[:payload] && @rules[:payload_member].eventstream + end + + def streaming? + @rules[:payload] && ( + BlobShape === @rules[:payload_member].shape || + StringShape === @rules[:payload_member].shape + ) + end + + def parse(body, rules, target = nil) + @parser_class.new(rules).parse(body, target) if body.size > 0 + end + + def parse_eventstream(body) + # body contains an array of parsed event when they arrive + @rules[:payload_member].shape.struct_class.new do |payload| + body.each { |event| payload << event } + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/headers.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/headers.rb new file mode 100644 index 0000000..40a4402 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/headers.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +require 'time' +require 'base64' + +module Aws + module Rest + module Response + class Headers + + include Seahorse::Model::Shapes + + # @param [Seahorse::Model::ShapeRef] rules + def initialize(rules) + @rules = rules + end + + # @param [Seahorse::Client::Http::Response] http_resp + # @param [Hash, Struct] target + def apply(http_resp, target) + headers = http_resp.headers + @rules.shape.members.each do |name, ref| + case ref.location + when 'header' then extract_header_value(headers, name, ref, target) + when 'headers' then extract_header_map(headers, name, ref, target) + end + end + end + + def extract_header_value(headers, name, ref, data) + if headers.key?(ref.location_name) + data[name] = cast_value(ref, headers[ref.location_name]) + end + end + + def cast_value(ref, value) + value = extract_json_trait(value) if ref['jsonvalue'] + case ref.shape + when StringShape then value + when IntegerShape then value.to_i + when FloatShape then value.to_f + when BooleanShape then value == 'true' + when ListShape then + value.split(",").map { |v| cast_value(ref.shape.member, v) } + when TimestampShape + if value =~ /^\d+(\.\d*)/ + Time.at(value.to_f) + elsif value =~ /^\d+$/ + Time.at(value.to_i) + else + begin + Time.parse(value) + rescue + nil + end + end + else raise "unsupported shape #{ref.shape.class}" + end + end + + def extract_header_map(headers, name, ref, data) + data[name] = {} + prefix = ref.location_name || '' + headers.each do |header_name, header_value| + if match = header_name.match(/^#{prefix}(.+)/i) + data[name][match[1]] = header_value + end + end + end + + def extract_json_trait(value) + Aws::Json.load(Base64.decode64(value)) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/parser.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/parser.rb new file mode 100644 index 0000000..0e6ad68 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/parser.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +module Aws + module Rest + module Response + class Parser + + def apply(response) + # TODO : remove this unless check once response stubbing is fixed + if rules = response.context.operation.output + response.data = rules.shape.struct_class.new + extract_status_code(rules, response) + extract_headers(rules, response) + extract_body(rules, response) + else + response.data = EmptyStructure.new + end + end + + private + + def extract_status_code(rules, response) + status_code = StatusCode.new(rules) + status_code.apply(response.context.http_response, response.data) + end + + def extract_headers(rules, response) + headers = Headers.new(rules) + headers.apply(response.context.http_response, response.data) + end + + def extract_body(rules, response) + Body.new(parser_class(response), rules). + apply( + response.context.http_response.body, + response.data + ) + end + + def parser_class(response) + protocol = response.context.config.api.metadata['protocol'] + case protocol + when 'rest-xml' then Xml::Parser + when 'rest-json' then Json::Parser + when 'api-gateway' then Json::Parser + else raise "unsupported protocol #{protocol}" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/status_code.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/status_code.rb new file mode 100644 index 0000000..f61b7c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/rest/response/status_code.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module Aws + module Rest + module Response + class StatusCode + + # @param [Seahorse::Model::Shapes::ShapeRef] rules + def initialize(rules) + @rules = rules + end + + # @param [Seahorse::Client::Http::Response] http_resp + # @param [Hash, Struct] data + def apply(http_resp, data) + @rules.shape.members.each do |member_name, member_ref| + if member_ref.location == 'statusCode' + data[member_name] = http_resp.status_code + end + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/shared_config.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/shared_config.rb new file mode 100644 index 0000000..acad19c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/shared_config.rb @@ -0,0 +1,449 @@ +# frozen_string_literal: true + +module Aws + # @api private + class SharedConfig + SSO_CREDENTIAL_PROFILE_KEYS = %w[sso_account_id sso_role_name].freeze + SSO_PROFILE_KEYS = %w[sso_session sso_start_url sso_region sso_account_id sso_role_name].freeze + SSO_TOKEN_PROFILE_KEYS = %w[sso_session].freeze + SSO_SESSION_KEYS = %w[sso_region sso_start_url].freeze + + + # @return [String] + attr_reader :credentials_path + + # @return [String] + attr_reader :config_path + + # @return [String] + attr_reader :profile_name + + # Constructs a new SharedConfig provider object. This will load the shared + # credentials file, and optionally the shared configuration file, as ini + # files which support profiles. + # + # By default, the shared credential file (the default path for which is + # `~/.aws/credentials`) and the shared config file (the default path for + # which is `~/.aws/config`) are loaded. However, if you set the + # `ENV['AWS_SDK_CONFIG_OPT_OUT']` environment variable, only the shared + # credential file will be loaded. You can specify the shared credential + # file path with the `ENV['AWS_SHARED_CREDENTIALS_FILE']` environment + # variable or with the `:credentials_path` option. Similarly, you can + # specify the shared config file path with the `ENV['AWS_CONFIG_FILE']` + # environment variable or with the `:config_path` option. + # + # The default profile name is 'default'. You can specify the profile name + # with the `ENV['AWS_PROFILE']` environment variable or with the + # `:profile_name` option. + # + # @param [Hash] options + # @option options [String] :credentials_path Path to the shared credentials + # file. If not specified, will check `ENV['AWS_SHARED_CREDENTIALS_FILE']` + # before using the default value of "#{Dir.home}/.aws/credentials". + # @option options [String] :config_path Path to the shared config file. + # If not specified, will check `ENV['AWS_CONFIG_FILE']` before using the + # default value of "#{Dir.home}/.aws/config". + # @option options [String] :profile_name The credential/config profile name + # to use. If not specified, will check `ENV['AWS_PROFILE']` before using + # the fixed default value of 'default'. + # @option options [Boolean] :config_enabled If true, loads the shared config + # file and enables new config values outside of the old shared credential + # spec. + def initialize(options = {}) + @parsed_config = nil + @profile_name = determine_profile(options) + @config_enabled = options[:config_enabled] + @credentials_path = options[:credentials_path] || + determine_credentials_path + @credentials_path = File.expand_path(@credentials_path) if @credentials_path + @parsed_credentials = {} + load_credentials_file if loadable?(@credentials_path) + if @config_enabled + @config_path = options[:config_path] || determine_config_path + @config_path = File.expand_path(@config_path) if @config_path + load_config_file if loadable?(@config_path) + end + end + + # @api private + def fresh(options = {}) + @profile_name = nil + @credentials_path = nil + @config_path = nil + @parsed_credentials = {} + @parsed_config = nil + @config_enabled = options[:config_enabled] ? true : false + @profile_name = determine_profile(options) + @credentials_path = options[:credentials_path] || + determine_credentials_path + load_credentials_file if loadable?(@credentials_path) + if @config_enabled + @config_path = options[:config_path] || determine_config_path + load_config_file if loadable?(@config_path) + end + end + + # @return [Boolean] Returns `true` if a credential file + # exists and has appropriate read permissions at {#path}. + # @note This method does not indicate if the file found at {#path} + # will be parsable, only if it can be read. + def loadable?(path) + !path.nil? && File.exist?(path) && File.readable?(path) + end + + # @return [Boolean] returns `true` if use of the shared config file is + # enabled. + def config_enabled? + @config_enabled ? true : false + end + + # Sources static credentials from shared credential/config files. + # + # @param [Hash] opts + # @option options [String] :profile the name of the configuration file from + # which credentials are being sourced. + # @return [Aws::Credentials] credentials sourced from configuration values, + # or `nil` if no valid credentials were found. + def credentials(opts = {}) + p = opts[:profile] || @profile_name + validate_profile_exists(p) + if (credentials = credentials_from_shared(p, opts)) + credentials + elsif (credentials = credentials_from_config(p, opts)) + credentials + end + end + + # Attempts to assume a role from shared config or shared credentials file. + # Will always attempt first to assume a role from the shared credentials + # file, if present. + def assume_role_credentials_from_config(opts = {}) + p = opts.delete(:profile) || @profile_name + chain_config = opts.delete(:chain_config) + credentials = assume_role_from_profile(@parsed_credentials, p, opts, chain_config) + if @parsed_config + credentials ||= assume_role_from_profile(@parsed_config, p, opts, chain_config) + end + credentials + end + + def assume_role_web_identity_credentials_from_config(opts = {}) + p = opts[:profile] || @profile_name + if @config_enabled && @parsed_config + entry = @parsed_config.fetch(p, {}) + if entry['web_identity_token_file'] && entry['role_arn'] + cfg = { + role_arn: entry['role_arn'], + web_identity_token_file: entry['web_identity_token_file'], + role_session_name: entry['role_session_name'] + } + cfg[:region] = opts[:region] if opts[:region] + AssumeRoleWebIdentityCredentials.new(cfg) + end + end + end + + # Attempts to load from shared config or shared credentials file. + # Will always attempt first to load from the shared credentials + # file, if present. + def sso_credentials_from_config(opts = {}) + p = opts[:profile] || @profile_name + credentials = sso_credentials_from_profile(@parsed_credentials, p) + if @parsed_config + credentials ||= sso_credentials_from_profile(@parsed_config, p) + end + credentials + end + + # Attempts to load from shared config or shared credentials file. + # Will always attempt first to load from the shared credentials + # file, if present. + def sso_token_from_config(opts = {}) + p = opts[:profile] || @profile_name + token = sso_token_from_profile(@parsed_credentials, p) + if @parsed_config + token ||= sso_token_from_profile(@parsed_config, p) + end + token + end + + # Add an accessor method (similar to attr_reader) to return a configuration value + # Uses the get_config_value below to control where + # values are loaded from + def self.config_reader(*attrs) + attrs.each do |attr| + define_method(attr) { |opts = {}| get_config_value(attr.to_s, opts) } + end + end + + config_reader( + :region, + :ca_bundle, + :credential_process, + :endpoint_discovery_enabled, + :use_dualstack_endpoint, + :use_fips_endpoint, + :ec2_metadata_service_endpoint, + :ec2_metadata_service_endpoint_mode, + :max_attempts, + :retry_mode, + :adaptive_retry_wait_to_fill, + :correct_clock_skew, + :csm_client_id, + :csm_enabled, + :csm_host, + :csm_port, + :sts_regional_endpoints, + :s3_use_arn_region, + :s3_us_east_1_regional_endpoint, + :s3_disable_multiregion_access_points, + :defaults_mode + ) + + private + + # Get a config value from from shared credential/config files. + # Only loads a value when config_enabled is true + # Return a value from credentials preferentially over config + def get_config_value(key, opts) + p = opts[:profile] || @profile_name + + value = @parsed_credentials.fetch(p, {})[key] if @parsed_credentials + value ||= @parsed_config.fetch(p, {})[key] if @config_enabled && @parsed_config + value + end + + def assume_role_from_profile(cfg, profile, opts, chain_config) + if cfg && prof_cfg = cfg[profile] + opts[:source_profile] ||= prof_cfg['source_profile'] + credential_source = opts.delete(:credential_source) + credential_source ||= prof_cfg['credential_source'] + if opts[:source_profile] && credential_source + raise Errors::CredentialSourceConflictError, + "Profile #{profile} has a source_profile, and "\ + 'a credential_source. For assume role credentials, must '\ + 'provide only source_profile or credential_source, not both.' + elsif opts[:source_profile] + opts[:visited_profiles] ||= Set.new + opts[:credentials] = resolve_source_profile(opts[:source_profile], opts) + if opts[:credentials] + opts[:role_session_name] ||= prof_cfg['role_session_name'] + opts[:role_session_name] ||= 'default_session' + opts[:role_arn] ||= prof_cfg['role_arn'] + opts[:duration_seconds] ||= prof_cfg['duration_seconds'] + opts[:external_id] ||= prof_cfg['external_id'] + opts[:serial_number] ||= prof_cfg['mfa_serial'] + opts[:profile] = opts.delete(:source_profile) + opts.delete(:visited_profiles) + AssumeRoleCredentials.new(opts) + else + raise Errors::NoSourceProfileError, + "Profile #{profile} has a role_arn, and source_profile, but the"\ + ' source_profile does not have credentials.' + end + elsif credential_source + opts[:credentials] = credentials_from_source( + credential_source, + chain_config + ) + if opts[:credentials] + opts[:role_session_name] ||= prof_cfg['role_session_name'] + opts[:role_session_name] ||= 'default_session' + opts[:role_arn] ||= prof_cfg['role_arn'] + opts[:duration_seconds] ||= prof_cfg['duration_seconds'] + opts[:external_id] ||= prof_cfg['external_id'] + opts[:serial_number] ||= prof_cfg['mfa_serial'] + opts.delete(:source_profile) # Cleanup + AssumeRoleCredentials.new(opts) + else + raise Errors::NoSourceCredentials, + "Profile #{profile} could not get source credentials from"\ + " provider #{credential_source}" + end + elsif prof_cfg['role_arn'] + raise Errors::NoSourceProfileError, "Profile #{profile} has a role_arn, but no source_profile." + end + end + end + + def resolve_source_profile(profile, opts = {}) + if opts[:visited_profiles] && opts[:visited_profiles].include?(profile) + raise Errors::SourceProfileCircularReferenceError + end + opts[:visited_profiles].add(profile) if opts[:visited_profiles] + + profile_config = @parsed_credentials[profile] + if @config_enabled + profile_config ||= @parsed_config[profile] + end + + if (creds = credentials(profile: profile)) + creds # static credentials + elsif profile_config && profile_config['source_profile'] + opts.delete(:source_profile) + assume_role_credentials_from_config(opts.merge(profile: profile)) + elsif (provider = assume_role_web_identity_credentials_from_config(opts.merge(profile: profile))) + provider.credentials if provider.credentials.set? + elsif (provider = assume_role_process_credentials_from_config(profile)) + provider.credentials if provider.credentials.set? + elsif (provider = sso_credentials_from_config(profile: profile)) + provider.credentials if provider.credentials.set? + end + end + + def credentials_from_source(credential_source, config) + case credential_source + when 'Ec2InstanceMetadata' + InstanceProfileCredentials.new( + retries: config ? config.instance_profile_credentials_retries : 0, + http_open_timeout: config ? config.instance_profile_credentials_timeout : 1, + http_read_timeout: config ? config.instance_profile_credentials_timeout : 1 + ) + when 'EcsContainer' + ECSCredentials.new + else + raise Errors::InvalidCredentialSourceError, "Unsupported credential_source: #{credential_source}" + end + end + + def assume_role_process_credentials_from_config(profile) + validate_profile_exists(profile) + credential_process = @parsed_credentials.fetch(profile, {})['credential_process'] + if @parsed_config + credential_process ||= @parsed_config.fetch(profile, {})['credential_process'] + end + ProcessCredentials.new(credential_process) if credential_process + end + + def credentials_from_shared(profile, _opts) + if @parsed_credentials && prof_config = @parsed_credentials[profile] + credentials_from_profile(prof_config) + end + end + + def credentials_from_config(profile, _opts) + if @parsed_config && prof_config = @parsed_config[profile] + credentials_from_profile(prof_config) + end + end + + # If any of the sso_ profile values are present, attempt to construct + # SSOCredentials + def sso_credentials_from_profile(cfg, profile) + if @parsed_config && + (prof_config = cfg[profile]) && + !(prof_config.keys & SSO_CREDENTIAL_PROFILE_KEYS).empty? + + if sso_session_name = prof_config['sso_session'] + sso_session = cfg["sso-session #{sso_session_name}"] + unless sso_session + raise ArgumentError, + "sso-session #{sso_session_name} must be defined in the config file. " \ + "Referenced by profile #{profile}" + end + sso_region = sso_session['sso_region'] + sso_start_url = sso_session['sso_start_url'] + + # validate sso_region and sso_start_url don't conflict if set on profile and session + if prof_config['sso_region'] && prof_config['sso_region'] != sso_region + raise ArgumentError, + "sso-session #{sso_session_name}'s sso_region (#{sso_region}) " \ + "does not match the profile #{profile}'s sso_region (#{prof_config['sso_region']}'" + end + if prof_config['sso_start_url'] && prof_config['sso_start_url'] != sso_start_url + raise ArgumentError, + "sso-session #{sso_session_name}'s sso_start_url (#{sso_start_url}) " \ + "does not match the profile #{profile}'s sso_start_url (#{prof_config['sso_start_url']}'" + end + else + sso_region = prof_config['sso_region'] + sso_start_url = prof_config['sso_start_url'] + end + + SSOCredentials.new( + sso_account_id: prof_config['sso_account_id'], + sso_role_name: prof_config['sso_role_name'], + sso_session: prof_config['sso_session'], + sso_region: sso_region, + sso_start_url: prof_config['sso_start_url'] + ) + end + end + + # If the required sso_ profile values are present, attempt to construct + # SSOTokenProvider + def sso_token_from_profile(cfg, profile) + if @parsed_config && + (prof_config = cfg[profile]) && + !(prof_config.keys & SSO_TOKEN_PROFILE_KEYS).empty? + + sso_session_name = prof_config['sso_session'] + sso_session = cfg["sso-session #{sso_session_name}"] + unless sso_session + raise ArgumentError, + "sso-session #{sso_session_name} must be defined in the config file." \ + "Referenced by profile #{profile}" + end + + unless sso_session['sso_region'] + raise ArgumentError, "sso-session #{sso_session_name} missing required parameter: sso_region" + end + + SSOTokenProvider.new( + sso_session: sso_session_name, + sso_region: sso_session['sso_region'] + ) + end + end + + def credentials_from_profile(prof_config) + creds = Credentials.new( + prof_config['aws_access_key_id'], + prof_config['aws_secret_access_key'], + prof_config['aws_session_token'] + ) + creds if creds.set? + end + + def load_credentials_file + @parsed_credentials = IniParser.ini_parse( + File.read(@credentials_path) + ) + end + + def load_config_file + @parsed_config = IniParser.ini_parse(File.read(@config_path)) + end + + def determine_credentials_path + ENV['AWS_SHARED_CREDENTIALS_FILE'] || default_shared_config_path('credentials') + end + + def determine_config_path + ENV['AWS_CONFIG_FILE'] || default_shared_config_path('config') + end + + def default_shared_config_path(file) + File.join(Dir.home, '.aws', file) + rescue ArgumentError + # Dir.home raises ArgumentError when ENV['home'] is not set + nil + end + + def validate_profile_exists(profile) + unless (@parsed_credentials && @parsed_credentials[profile]) || + (@parsed_config && @parsed_config[profile]) + msg = "Profile `#{profile}' not found in #{@credentials_path}"\ + "#{" or #{@config_path}" if @config_path}" + raise Errors::NoSuchProfileError, msg + end + end + + def determine_profile(options) + ret = options[:profile_name] + ret ||= ENV['AWS_PROFILE'] + ret ||= 'default' + ret + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/shared_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/shared_credentials.rb new file mode 100644 index 0000000..836dd3f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/shared_credentials.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +require_relative 'ini_parser' + +module Aws + class SharedCredentials + + include CredentialProvider + + # @api private + KEY_MAP = { + 'aws_access_key_id' => 'access_key_id', + 'aws_secret_access_key' => 'secret_access_key', + 'aws_session_token' => 'session_token', + } + + # Constructs a new SharedCredentials object. This will load static + # (access_key_id, secret_access_key and session_token) AWS access + # credentials from an ini file, which supports profiles. The default + # profile name is 'default'. You can specify the profile name with the + # `ENV['AWS_PROFILE']` or with the `:profile_name` option. + # + # To use credentials from the default credential resolution chain + # create a client without the credential option specified. + # You may access the resolved credentials through + # `client.config.credentials`. + # + # @option [String] :path Path to the shared file. Defaults + # to "#{Dir.home}/.aws/credentials". + # + # @option [String] :profile_name Defaults to 'default' or + # `ENV['AWS_PROFILE']`. + # + def initialize(options = {}) + shared_config = Aws.shared_config + @path = options[:path] + @path ||= shared_config.credentials_path + @profile_name = options[:profile_name] + @profile_name ||= ENV['AWS_PROFILE'] + @profile_name ||= shared_config.profile_name + if @path && @path == shared_config.credentials_path + @credentials = shared_config.credentials(profile: @profile_name) + else + config = SharedConfig.new( + credentials_path: @path, + profile_name: @profile_name + ) + @credentials = config.credentials(profile: @profile_name) + end + end + + # @return [String] + attr_reader :path + + # @return [String] + attr_reader :profile_name + + # @return [Credentials] + attr_reader :credentials + + # @api private + def inspect + parts = [ + self.class.name, + "profile_name=#{profile_name.inspect}", + "path=#{path.inspect}", + ] + "#<#{parts.join(' ')}>" + end + + # @deprecated This method is no longer used. + # @return [Boolean] Returns `true` if a credential file + # exists and has appropriate read permissions at {#path}. + # @note This method does not indicate if the file found at {#path} + # will be parsable, only if it can be read. + def loadable? + !path.nil? && File.exist?(path) && File.readable?(path) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/sso_credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/sso_credentials.rb new file mode 100644 index 0000000..8557a84 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/sso_credentials.rb @@ -0,0 +1,172 @@ +# frozen_string_literal: true + +module Aws + # An auto-refreshing credential provider that assumes a role via + # {Aws::SSO::Client#get_role_credentials} using a cached access + # token. When `sso_session` is specified, token refresh logic from + # {Aws::SSOTokenProvider} will be used to refresh the token if possible. + # This class does NOT implement the SSO login token flow - tokens + # must generated separately by running `aws login` from the + # AWS CLI with the correct profile. The `SSOCredentials` will + # auto-refresh the AWS credentials from SSO. + # + # # You must first run aws sso login --profile your-sso-profile + # sso_credentials = Aws::SSOCredentials.new( + # sso_account_id: '123456789', + # sso_role_name: "role_name", + # sso_region: "us-east-1", + # sso_session: 'my_sso_session' + # ) + # ec2 = Aws::EC2::Client.new(credentials: sso_credentials) + # + # If you omit `:client` option, a new {Aws::SSO::Client} object will be + # constructed with additional options that were provided. + # + # @see Aws::SSO::Client#get_role_credentials + # @see https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html + class SSOCredentials + + include CredentialProvider + include RefreshingCredentials + + # @api private + LEGACY_REQUIRED_OPTS = [:sso_start_url, :sso_account_id, :sso_region, :sso_role_name].freeze + TOKEN_PROVIDER_REQUIRED_OPTS = [:sso_session, :sso_account_id, :sso_region, :sso_role_name].freeze + + # @api private + SSO_LOGIN_GUIDANCE = 'The SSO session associated with this profile has '\ + 'expired or is otherwise invalid. To refresh this SSO session run '\ + 'aws sso login with the corresponding profile.'.freeze + + # @option options [required, String] :sso_account_id The AWS account ID + # that temporary AWS credentials will be resolved for + # + # @option options [required, String] :sso_role_name The corresponding + # IAM role in the AWS account that temporary AWS credentials + # will be resolved for. + # + # @option options [required, String] :sso_region The AWS region where the + # SSO directory for the given sso_start_url is hosted. + # + # @option options [String] :sso_session The SSO Token used for fetching + # the token. If provided, refresh logic from the {Aws::SSOTokenProvider} + # will be used. + # + # @option options [String] :sso_start_url (legacy profiles) If provided, + # legacy token fetch behavior will be used, which does not support + # token refreshing. The start URL is provided by the SSO + # service via the console and is the URL used to + # login to the SSO directory. This is also sometimes referred to as + # the "User Portal URL". + # + # @option options [SSO::Client] :client Optional `SSO::Client`. If not + # provided, a client will be constructed. + # + # @option options [Callable] before_refresh Proc called before + # credentials are refreshed. `before_refresh` is called + # with an instance of this object when + # AWS credentials are required and need to be refreshed. + def initialize(options = {}) + options = options.select {|k, v| !v.nil? } + if (options[:sso_session]) + missing_keys = TOKEN_PROVIDER_REQUIRED_OPTS.select { |k| options[k].nil? } + unless missing_keys.empty? + raise ArgumentError, "Missing required keys: #{missing_keys}" + end + @legacy = false + @sso_role_name = options.delete(:sso_role_name) + @sso_account_id = options.delete(:sso_account_id) + + # if client has been passed, don't pass through to SSOTokenProvider + @client = options.delete(:client) + options.delete(:sso_start_url) + @token_provider = Aws::SSOTokenProvider.new(options.dup) + @sso_session = options.delete(:sso_session) + @sso_region = options.delete(:sso_region) + + unless @client + client_opts = {} + options.each_pair { |k,v| client_opts[k] = v unless CLIENT_EXCLUDE_OPTIONS.include?(k) } + client_opts[:region] = @sso_region + client_opts[:credentials] = nil + @client = Aws::SSO::Client.new(client_opts) + end + else # legacy behavior + missing_keys = LEGACY_REQUIRED_OPTS.select { |k| options[k].nil? } + unless missing_keys.empty? + raise ArgumentError, "Missing required keys: #{missing_keys}" + end + @legacy = true + @sso_start_url = options.delete(:sso_start_url) + @sso_region = options.delete(:sso_region) + @sso_role_name = options.delete(:sso_role_name) + @sso_account_id = options.delete(:sso_account_id) + + # validate we can read the token file + read_cached_token + + client_opts = {} + options.each_pair { |k,v| client_opts[k] = v unless CLIENT_EXCLUDE_OPTIONS.include?(k) } + client_opts[:region] = @sso_region + client_opts[:credentials] = nil + + @client = options[:client] || Aws::SSO::Client.new(client_opts) + end + + @async_refresh = true + super + end + + # @return [SSO::Client] + attr_reader :client + + private + + def read_cached_token + cached_token = Json.load(File.read(sso_cache_file)) + # validation + unless cached_token['accessToken'] && cached_token['expiresAt'] + raise ArgumentError, 'Missing required field(s)' + end + expires_at = DateTime.parse(cached_token['expiresAt']) + if expires_at < DateTime.now + raise ArgumentError, 'Cached SSO Token is expired.' + end + cached_token + rescue Errno::ENOENT, Aws::Json::ParseError, ArgumentError + raise Errors::InvalidSSOCredentials, SSO_LOGIN_GUIDANCE + end + + def refresh + c = if @legacy + cached_token = read_cached_token + @client.get_role_credentials( + account_id: @sso_account_id, + role_name: @sso_role_name, + access_token: cached_token['accessToken'] + ).role_credentials + else + @client.get_role_credentials( + account_id: @sso_account_id, + role_name: @sso_role_name, + access_token: @token_provider.token.token + ).role_credentials + end + + @credentials = Credentials.new( + c.access_key_id, + c.secret_access_key, + c.session_token + ) + @expiration = c.expiration + end + + def sso_cache_file + start_url_sha1 = OpenSSL::Digest::SHA1.hexdigest(@sso_start_url.encode('utf-8')) + File.join(Dir.home, '.aws', 'sso', 'cache', "#{start_url_sha1}.json") + rescue ArgumentError + # Dir.home raises ArgumentError when ENV['home'] is not set + raise ArgumentError, "Unable to load sso_cache_file: ENV['HOME'] is not set." + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/sso_token_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/sso_token_provider.rb new file mode 100644 index 0000000..e858c30 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/sso_token_provider.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +module Aws + class SSOTokenProvider + + include TokenProvider + include RefreshingToken + + # @api private + SSO_REQUIRED_OPTS = [:sso_region, :sso_session].freeze + + # @api private + SSO_LOGIN_GUIDANCE = 'The SSO session associated with this profile has '\ + 'expired or is otherwise invalid. To refresh this SSO session run '\ + 'aws sso login with the corresponding profile.'.freeze + + # @option options [required, String] :sso_region The AWS region where the + # SSO directory for the given sso_start_url is hosted. + # + # @option options [required, String] :sso_session The SSO Session used to + # for fetching this token. + # + # @option options [SSOOIDC::Client] :client Optional `SSOOIDC::Client`. If not + # provided, a client will be constructed. + # + # @option options [Callable] before_refresh Proc called before + # credentials are refreshed. `before_refresh` is called + # with an instance of this object when + # AWS credentials are required and need to be refreshed. + def initialize(options = {}) + + missing_keys = SSO_REQUIRED_OPTS.select { |k| options[k].nil? } + unless missing_keys.empty? + raise ArgumentError, "Missing required keys: #{missing_keys}" + end + + @sso_session = options.delete(:sso_session) + @sso_region = options.delete(:sso_region) + + options[:region] = @sso_region + options[:credentials] = nil + options[:token_provider] = nil + @client = options[:client] || Aws::SSOOIDC::Client.new(options) + + super + end + + # @return [SSOOIDC::Client] + attr_reader :client + + private + + def refresh + # token is valid and not in refresh window - do not refresh it. + return if @token && @token.expiration && !near_expiration? + + # token may not exist or is out of the expiration window + # attempt to refresh from disk first (another process/application may have refreshed already) + token_json = read_cached_token + @token = Token.new(token_json['accessToken'], token_json['expiresAt']) + return if @token && @token.expiration && !near_expiration? + + # The token is expired and needs to be refreshed + if can_refresh_token?(token_json) + begin + current_time = Time.now + resp = @client.create_token( + grant_type: 'refresh_token', + client_id: token_json['clientId'], + client_secret: token_json['clientSecret'], + refresh_token: token_json['refreshToken'] + ) + token_json['accessToken'] = resp.access_token + token_json['expiresAt'] = current_time + resp.expires_in + @token = Token.new(token_json['accessToken'], token_json['expiresAt']) + + if resp.refresh_token + token_json['refreshToken'] = resp.refresh_token + else + token_json.delete('refreshToken') + end + + update_token_cache(token_json) + rescue + # refresh has failed, continue attempting to use the token if its not hard expired + end + end + + if !@token.expiration || @token.expiration < Time.now + # Token is hard expired, raise an exception + raise Errors::InvalidSSOToken, 'Token is invalid and failed to refresh.' + end + end + + def read_cached_token + cached_token = Json.load(File.read(sso_cache_file)) + # validation + unless cached_token['accessToken'] && cached_token['expiresAt'] + raise ArgumentError, 'Missing required field(s)' + end + cached_token['expiresAt'] = Time.parse(cached_token['expiresAt']) + cached_token + rescue Errno::ENOENT, Aws::Json::ParseError, ArgumentError + raise Errors::InvalidSSOToken, SSO_LOGIN_GUIDANCE + end + + def update_token_cache(token_json) + cached_token = token_json.dup + cached_token['expiresAt'] = cached_token['expiresAt'].iso8601 + File.write(sso_cache_file, Json.dump(cached_token)) + end + + def sso_cache_file + sso_session_sha1 = OpenSSL::Digest::SHA1.hexdigest(@sso_session.encode('utf-8')) + File.join(Dir.home, '.aws', 'sso', 'cache', "#{sso_session_sha1}.json") + rescue ArgumentError + # Dir.home raises ArgumentError when ENV['home'] is not set + raise ArgumentError, "Unable to load sso_cache_file: ENV['HOME'] is not set." + end + + # return true if all required fields are present + # return false if registrationExpiresAt exists and is later than now + def can_refresh_token?(token_json) + if token_json['clientId'] && + token_json['clientSecret'] && + token_json['refreshToken'] + + return !token_json['registrationExpiresAt'] || + Time.parse(token_json['registrationExpiresAt']) > Time.now + else + false + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/static_token_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/static_token_provider.rb new file mode 100644 index 0000000..7786d1a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/static_token_provider.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module Aws + class StaticTokenProvider + + include TokenProvider + + # @param [String] token + # @param [Time] expiration + def initialize(token, expiration=nil) + @token = Token.new(token, expiration) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/structure.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/structure.rb new file mode 100644 index 0000000..2a42d42 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/structure.rb @@ -0,0 +1,91 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Structure + + def initialize(values = {}) + values.each do |k, v| + self[k] = v + end + end + + # @return [Boolean] Returns `true` if this structure has a value + # set for the given member. + def key?(member_name) + !self[member_name].nil? + end + + # @return [Boolean] Returns `true` if all of the member values are `nil`. + def empty? + values.compact == [] + end + + # Deeply converts the Structure into a hash. Structure members that + # are `nil` are omitted from the resultant hash. + # + # You can call #orig_to_h to get vanilla #to_h behavior as defined + # in stdlib Struct. + # + # @return [Hash] + def to_h(obj = self, options = {}) + case obj + when Struct + obj.each_pair.with_object({}) do |(member, value), hash| + member = member.to_s if options[:as_json] + hash[member] = to_hash(value, options) unless value.nil? + end + when Hash + obj.each.with_object({}) do |(key, value), hash| + key = key.to_s if options[:as_json] + hash[key] = to_hash(value, options) + end + when Array + obj.collect { |value| to_hash(value, options) } + else + obj + end + end + alias to_hash to_h + + # Wraps the default #to_s logic with filtering of sensitive parameters. + def to_s(obj = self) + Aws::Log::ParamFilter.new.filter(obj, obj.class).to_s + end + + class << self + + # @api private + def new(*args) + if args.empty? + Aws::EmptyStructure + else + struct = Struct.new(*args) + struct.send(:include, Aws::Structure) + struct + end + end + + # @api private + def self.included(base_class) + base_class.send(:undef_method, :each) + end + + end + + module Union + def member + self.members.select { |k| self[k] != nil }.first + end + + def value + self[member] if member + end + end + end + + # @api private + class EmptyStructure < Struct.new('AwsEmptyStructure') + include(Aws::Structure) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/data_applicator.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/data_applicator.rb new file mode 100644 index 0000000..3e7b494 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/data_applicator.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + class DataApplicator + + include Seahorse::Model::Shapes + + # @param [Seahorse::Models::Shapes::ShapeRef] rules + def initialize(rules) + @rules = rules + end + + # @param [Hash] data + # @param [Structure] stub + def apply_data(data, stub) + apply_data_to_struct(@rules, data, stub) + end + + private + + def apply_data_to_struct(ref, data, struct) + data.each do |key, value| + struct[key] = member_value(ref.shape.member(key), value) + end + struct + end + + def member_value(ref, value) + case ref.shape + when StructureShape + apply_data_to_struct(ref, value, ref.shape.struct_class.new) + when ListShape + value.inject([]) do |list, v| + list << member_value(ref.shape.member, v) + end + when MapShape + value.inject({}) do |map, (k,v)| + map[k.to_s] = member_value(ref.shape.value, v) + map + end + else + value + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/empty_stub.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/empty_stub.rb new file mode 100644 index 0000000..1074736 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/empty_stub.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + class EmptyStub + + include Seahorse::Model::Shapes + + # @param [Seahorse::Models::Shapes::ShapeRef] rules + def initialize(rules) + @rules = rules + end + + # @return [Structure] + def stub + if @rules + stub_ref(@rules) + else + EmptyStructure.new + end + end + + private + + def stub_ref(ref, visited = []) + if visited.include?(ref.shape) + return nil + else + visited = visited + [ref.shape] + end + case ref.shape + when StructureShape then stub_structure(ref, visited) + when ListShape then [] + when MapShape then {} + else stub_scalar(ref) + end + end + + def stub_structure(ref, visited) + ref.shape.members.inject(ref.shape.struct_class.new) do |struct, (mname, mref)| + # For eventstream shape, it returns an Enumerator + unless mref.eventstream + struct[mname] = stub_ref(mref, visited) + end + struct + end + end + + def stub_scalar(ref) + case ref.shape + when StringShape then ref.shape.name || 'string' + when IntegerShape then 0 + when FloatShape then 0.0 + when BooleanShape then false + when TimestampShape then Time.now + else nil + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/api_gateway.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/api_gateway.rb new file mode 100644 index 0000000..2dafc6b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/api_gateway.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + module Protocols + class ApiGateway < RestJson + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/ec2.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/ec2.rb new file mode 100644 index 0000000..67bd663 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/ec2.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + module Protocols + class EC2 + + def stub_data(api, operation, data) + resp = Seahorse::Client::Http::Response.new + resp.status_code = 200 + resp.body = build_body(api, operation, data) if operation.output + resp.headers['Content-Length'] = resp.body.size + resp.headers['Content-Type'] = 'text/xml;charset=UTF-8' + resp.headers['Server'] = 'AmazonEC2' + resp + end + + def stub_error(error_code) + http_resp = Seahorse::Client::Http::Response.new + http_resp.status_code = 400 + http_resp.body = <<-XML.strip + + + #{error_code} + stubbed-response-error-message + + + XML + http_resp + end + + private + + def build_body(api, operation, data) + xml = [] + Xml::Builder.new(operation.output, target:xml).to_xml(data) + xml.shift + xml.pop + xmlns = "http://ec2.amazonaws.com/doc/#{api.version}/".inspect + xml.unshift(" stubbed-request-id") + xml.unshift("<#{operation.name}Response xmlns=#{xmlns}>\n") + xml.push("\n") + xml.join + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/json.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/json.rb new file mode 100644 index 0000000..e32017f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/json.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + module Protocols + class Json + + def stub_data(api, operation, data) + resp = Seahorse::Client::Http::Response.new + resp.status_code = 200 + resp.headers["Content-Type"] = content_type(api) + resp.headers["x-amzn-RequestId"] = "stubbed-request-id" + resp.body = build_body(operation, data) + resp + end + + def stub_error(error_code) + http_resp = Seahorse::Client::Http::Response.new + http_resp.status_code = 400 + http_resp.body = <<-JSON.strip +{ + "code": #{error_code.inspect}, + "message": "stubbed-response-error-message" +} + JSON + http_resp + end + + private + + def content_type(api) + "application/x-amz-json-#{api.metadata['jsonVersion']}" + end + + def build_body(operation, data) + Aws::Json::Builder.new(operation.output).to_json(data) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/query.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/query.rb new file mode 100644 index 0000000..3af866a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/query.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + module Protocols + class Query + + def stub_data(api, operation, data) + resp = Seahorse::Client::Http::Response.new + resp.status_code = 200 + resp.body = build_body(api, operation, data) + resp + end + + def stub_error(error_code) + http_resp = Seahorse::Client::Http::Response.new + http_resp.status_code = 400 + http_resp.body = XmlError.new(error_code).to_xml + http_resp + end + + private + + def build_body(api, operation, data) + xml = [] + builder = Aws::Xml::DocBuilder.new(target: xml, indent: ' ') + builder.node(operation.name + 'Response', xmlns: xmlns(api)) do + if (rules = operation.output) + rules.location_name = operation.name + 'Result' + Xml::Builder.new(rules, target: xml, pad:' ').to_xml(data) + end + builder.node('ResponseMetadata') do + builder.node('RequestId', 'stubbed-request-id') + end + end + xml.join + end + + def xmlns(api) + api.metadata['xmlNamespace'] + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest.rb new file mode 100644 index 0000000..c3ef688 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest.rb @@ -0,0 +1,198 @@ +# frozen_string_literal: true + +require 'aws-eventstream' + +module Aws + module Stubbing + module Protocols + class Rest + + include Seahorse::Model::Shapes + + def stub_data(api, operation, data) + resp = new_http_response + apply_status_code(operation, resp, data) + apply_headers(operation, resp, data) + apply_body(api, operation, resp, data) + resp + end + + private + + def new_http_response + resp = Seahorse::Client::Http::Response.new + resp.status_code = 200 + resp.headers["x-amzn-RequestId"] = "stubbed-request-id" + resp + end + + def apply_status_code(operation, resp, data) + operation.output.shape.members.each do |member_name, member_ref| + if member_ref.location == 'statusCode' + resp.status_code = data[member_name] if data.key?(member_name) + end + end + end + + def apply_headers(operation, resp, data) + Aws::Rest::Request::Headers.new(operation.output).apply(resp, data) + end + + def apply_body(api, operation, resp, data) + resp.body = build_body(api, operation, data) + end + + def build_body(api, operation, data) + rules = operation.output + if head_operation(operation) + '' + elsif streaming?(rules) + data[rules[:payload]] + elsif rules[:payload] + body_for(api, operation, rules[:payload_member], data[rules[:payload]]) + else + filtered = Seahorse::Model::Shapes::ShapeRef.new( + shape: Seahorse::Model::Shapes::StructureShape.new.tap do |s| + rules.shape.members.each do |member_name, member_ref| + s.add_member(member_name, member_ref) if member_ref.location.nil? + end + end + ) + body_for(api, operation, filtered, data) + end + end + + def streaming?(ref) + if ref[:payload] + case ref[:payload_member].shape + when StringShape then true + when BlobShape then true + else false + end + else + false + end + end + + def head_operation(operation) + operation.http_method == 'HEAD' + end + + def eventstream?(rules) + rules.eventstream + end + + def encode_eventstream_response(rules, data, builder) + data.inject('') do |stream, event_data| + # construct message headers and payload + opts = {headers: {}} + case event_data.delete(:message_type) + when 'event' + encode_event(opts, rules, event_data, builder) + when 'error' + # errors are unmodeled + encode_error(opts, event_data) + when 'exception' + # Pending + raise 'Stubbing :exception event is not supported' + end + [stream, Aws::EventStream::Encoder.new.encode( + Aws::EventStream::Message.new(opts) + )].pack('a*a*') + end + end + + def encode_error(opts, event_data) + opts[:headers][':error-message'] = Aws::EventStream::HeaderValue.new( + value: event_data[:error_message], + type: 'string' + ) + opts[:headers][':error-code'] = Aws::EventStream::HeaderValue.new( + value: event_data[:error_code], + type: 'string' + ) + opts[:headers][':message-type'] = Aws::EventStream::HeaderValue.new( + value: 'error', + type: 'string' + ) + opts + end + + def encode_unknown_event(opts, event_type, event_data) + # right now h2 events are only rest_json + opts[:payload] = StringIO.new(Aws::Json.dump(event_data)) + opts[:headers][':event-type'] = Aws::EventStream::HeaderValue.new( + value: event_type.to_s, + type: 'string' + ) + opts[:headers][':message-type'] = Aws::EventStream::HeaderValue.new( + value: 'event', + type: 'string' + ) + opts + end + + def encode_modeled_event(opts, rules, event_type, event_data, builder) + event_ref = rules.shape.member(event_type) + explicit_payload = false + implicit_payload_members = {} + event_ref.shape.members.each do |name, ref| + if ref.eventpayload + explicit_payload = true + else + implicit_payload_members[name] = ref + end + end + + if !explicit_payload && !implicit_payload_members.empty? + unless implicit_payload_members.size > 1 + m_name, _ = implicit_payload_members.first + value = {} + value[m_name] = event_data[m_name] + opts[:payload] = StringIO.new(builder.new(event_ref).serialize(value)) + end + end + + event_data.each do |k, v| + member_ref = event_ref.shape.member(k) + if member_ref.eventheader + opts[:headers][member_ref.location_name] = Aws::EventStream::HeaderValue.new( + value: v, + type: member_ref.eventheader_type + ) + elsif member_ref.eventpayload + case member_ref.eventpayload_type + when 'string' + opts[:payload] = StringIO.new(v) + when 'blob' + opts[:payload] = v + when 'structure' + opts[:payload] = StringIO.new(builder.new(member_ref).serialize(v)) + end + end + end + opts[:headers][':event-type'] = Aws::EventStream::HeaderValue.new( + value: event_ref.location_name, + type: 'string' + ) + opts[:headers][':message-type'] = Aws::EventStream::HeaderValue.new( + value: 'event', + type: 'string' + ) + opts + end + + def encode_event(opts, rules, event_data, builder) + event_type = event_data.delete(:event_type) + + if rules.shape.member?(event_type) + encode_modeled_event(opts, rules, event_type, event_data, builder) + else + encode_unknown_event(opts, event_type, event_data) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest_json.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest_json.rb new file mode 100644 index 0000000..b4b19ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest_json.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + module Protocols + class RestJson < Rest + + def body_for(_a, _b, rules, data) + if eventstream?(rules) + encode_eventstream_response(rules, data, Aws::Json::Builder) + else + Aws::Json::Builder.new(rules).serialize(data) + end + end + + def stub_error(error_code) + http_resp = Seahorse::Client::Http::Response.new + http_resp.status_code = 400 + http_resp.body = <<-JSON.strip +{ + "code": #{error_code.inspect}, + "message": "stubbed-response-error-message" +} + JSON + http_resp + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest_xml.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest_xml.rb new file mode 100644 index 0000000..c8d9d73 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/protocols/rest_xml.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + module Protocols + class RestXml < Rest + + def body_for(api, operation, rules, data) + if eventstream?(rules) + encode_eventstream_response(rules, data, Xml::Builder) + else + xml = [] + rules.location_name = operation.name + 'Result' + rules['xmlNamespace'] = { 'uri' => api.metadata['xmlNamespace'] } + Xml::Builder.new(rules, target:xml).to_xml(data) + xml.join + end + end + + def stub_error(error_code) + http_resp = Seahorse::Client::Http::Response.new + http_resp.status_code = 400 + http_resp.body = XmlError.new(error_code).to_xml + http_resp + end + + def xmlns(api) + api.metadata['xmlNamespace'] + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/stub_data.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/stub_data.rb new file mode 100644 index 0000000..398444b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/stub_data.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Stubbing + class StubData + + def initialize(operation) + @rules = operation.output + @pager = operation[:pager] + end + + def stub(data = {}) + stub = EmptyStub.new(@rules).stub + remove_paging_tokens(stub) + apply_data(data, stub) + stub + end + + private + + def remove_paging_tokens(stub) + if @pager + @pager.instance_variable_get("@tokens").keys.each do |path| + if divide = (path[' || '] || path[' or ']) + path = path.split(divide)[0] + end + parts = path.split(/\b/) + # if nested struct/expression, EmptyStub auto-pop "string" + # currently not support remove "string" for nested/expression + # as it requires reverse JMESPATH search + stub[parts[0]] = nil if parts.size == 1 + end + if more_results = @pager.instance_variable_get('@more_results') + parts = more_results.split(/\b/) + # if nested struct/expression, EmptyStub auto-pop false value + # no further work needed + stub[parts[0]] = false if parts.size == 1 + end + end + end + + def apply_data(data, stub) + ParamValidator.new(@rules, validate_required: false, input: false).validate!(data) + DataApplicator.new(@rules).apply_data(data, stub) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/xml_error.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/xml_error.rb new file mode 100644 index 0000000..268ae7d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/stubbing/xml_error.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module Aws + module Stubbing + class XmlError + + def initialize(error_code) + @error_code = error_code + end + + def to_xml + <<-XML.strip + + + #{@error_code} + stubbed-response-error-message + + + XML + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token.rb new file mode 100644 index 0000000..5126005 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Aws + class Token + + # @param [String] token + # @param [Time] expiration + def initialize(token, expiration=nil) + @token = token + @expiration = expiration + end + + # @return [String, nil] + attr_reader :token + + # @return [Time, nil] + attr_reader :expiration + + # @return [Boolean] Returns `true` if token is set + def set? + !token.nil? && !token.empty? + end + + # Removing the token from the default inspect string. + # @api private + def inspect + "#<#{self.class.name} token=[FILTERED]> expiration=#{expiration}>" + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token_provider.rb new file mode 100644 index 0000000..643012f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token_provider.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module Aws + module TokenProvider + + # @return [Token] + attr_reader :token + + # @return [Boolean] + def set? + !!token && token.set? + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token_provider_chain.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token_provider_chain.rb new file mode 100644 index 0000000..dfebea1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/token_provider_chain.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module Aws + # @api private + class TokenProviderChain + def initialize(config = nil) + @config = config + end + + # @return [TokenProvider, nil] + def resolve + providers.each do |method_name, options| + provider = send(method_name, options.merge(config: @config)) + return provider if provider && provider.set? + end + nil + end + + private + + def providers + [ + [:static_profile_sso_token, {}], + [:sso_token, {}] + ] + end + + def static_profile_sso_token(options) + if Aws.shared_config.config_enabled? && options[:config] && options[:config].profile + Aws.shared_config.sso_token_from_config( + profile: options[:config].profile + ) + end + end + + + def sso_token(options) + profile_name = determine_profile_name(options) + if Aws.shared_config.config_enabled? + Aws.shared_config.sso_token_from_config(profile: profile_name) + end + rescue Errors::NoSuchProfileError + nil + end + + def determine_profile_name(options) + (options[:config] && options[:config].profile) || ENV['AWS_PROFILE'] || ENV['AWS_DEFAULT_PROFILE'] || 'default' + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/type_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/type_builder.rb new file mode 100644 index 0000000..209558f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/type_builder.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module Aws + # @api private + class TypeBuilder + + def initialize(svc_module) + @types_module = svc_module.const_set(:Types, Module.new) + end + + def build_type(shape, shapes) + @types_module.const_set(shape.name, Structure.new(*shape.member_names)) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/util.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/util.rb new file mode 100644 index 0000000..294eb12 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/util.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require 'cgi' + +module Aws + # @api private + module Util + class << self + + def deep_merge(left, right) + case left + when Hash then left.merge(right) { |key, v1, v2| deep_merge(v1, v2) } + when Array then right + left + else right + end + end + + def copy_hash(hash) + if Hash === hash + deep_copy(hash) + else + raise ArgumentError, "expected hash, got `#{hash.class}`" + end + end + + def deep_copy(obj) + case obj + when nil then nil + when true then true + when false then false + when Hash + obj.inject({}) do |h, (k,v)| + h[k] = deep_copy(v) + h + end + when Array + obj.map { |v| deep_copy(v) } + else + if obj.respond_to?(:dup) + obj.dup + elsif obj.respond_to?(:clone) + obj.clone + else + obj + end + end + end + + def monotonic_milliseconds + if defined?(Process::CLOCK_MONOTONIC) + Process.clock_gettime(Process::CLOCK_MONOTONIC, :millisecond) + else + DateTime.now.strftime('%Q').to_i + end + end + + def monotonic_seconds + monotonic_milliseconds / 1000.0 + end + + def str_2_bool(str) + case str.to_s + when "true" then true + when "false" then false + else + nil + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters.rb new file mode 100644 index 0000000..0ea0c7d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require_relative 'waiters/errors' +require_relative 'waiters/poller' +require_relative 'waiters/waiter' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/errors.rb new file mode 100644 index 0000000..d2261f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/errors.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module Aws + module Waiters + module Errors + + # Raised when a waiter detects a condition where the waiter can never + # succeed. + class WaiterFailed < StandardError; end + + class FailureStateError < WaiterFailed + + MSG = "stopped waiting, encountered a failure state" + + def initialize(response) + @response = response + super(MSG) + end + + # @return [Seahorse::Client::Response] The response that matched + # the failure state. + attr_reader :response + + end + + class TooManyAttemptsError < WaiterFailed + + MSG = "stopped waiting after %d attempts without success" + + def initialize(attempts) + @attempts = attempts + super(MSG % [attempts]) + end + + # @return [Integer] + attr_reader :attempts + + end + + class UnexpectedError < WaiterFailed + + MSG = "stopped waiting due to an unexpected error: %s" + + def initialize(error) + @error = error + super(MSG % [error.message]) + end + + # @return [Exception] The unexpected error. + attr_reader :error + + end + + # Raised when attempting to get a waiter by name and the waiter has not + # been defined. + class NoSuchWaiterError < ArgumentError + + MSG = "no such waiter %s; valid waiter names are: %s" + + def initialize(waiter_name, waiter_names) + waiter_names = waiter_names.map(&:inspect).join(', ') + super(MSG % [waiter_name.inspect, waiter_names]) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/poller.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/poller.rb new file mode 100644 index 0000000..345cab9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/poller.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: true + +module Aws + module Waiters + + # Polls a single API operation inspecting the response data and/or error + # for states matching one of its acceptors. + # @api private + class Poller + + # @api private + RAISE_HANDLER = Seahorse::Client::Plugins::RaiseResponseErrors::Handler + + # @option options [required, String] :operation_name + # @option options [required, Array] :acceptors + # @api private + def initialize(options = {}) + @operation_name = options.fetch(:operation_name) + @acceptors = options.fetch(:acceptors) + end + + # @return [Symbol] + attr_reader :operation_name + + # Makes an API call, returning the resultant state and the response. + # + # * `:success` - A success state has been matched. + # * `:failure` - A terminate failure state has been matched. + # * `:retry` - The waiter may be retried. + # * `:error` - The waiter encountered an un-expected error. + # + # @example A trival (bad) example of a waiter that polls indefinetly. + # + # loop do + # + # state, resp = poller.call(client:client, params:{}) + # + # case state + # when :success then return true + # when :failure then return false + # when :retry then next + # when :error then raise 'oops' + # end + # + # end + # + # @option options [required,Client] :client + # @option options [required,Hash] :params + # @return [Array] + def call(options = {}) + response = send_request(options) + @acceptors.each do |acceptor| + if acceptor_matches?(acceptor, response) + return [acceptor['state'].to_sym, response] + end + end + [response.error ? :error : :retry, response] + end + + private + + def send_request(options) + req = options[:client].build_request(@operation_name, options[:params]) + req.handlers.remove(RAISE_HANDLER) + req.send_request + end + + def acceptor_matches?(acceptor, response) + send("matches_#{acceptor['matcher']}?", acceptor, response) + end + + def matches_path?(acceptor, response) + if response.data + JMESPath.search(path(acceptor), response.data) == acceptor['expected'] + else + false + end + end + + def matches_pathAll?(acceptor, response) + non_empty_array(acceptor, response) do |values| + values.all? { |value| value == acceptor['expected'] } + end + end + + def matches_pathAny?(acceptor, response) + non_empty_array(acceptor, response) do |values| + values.any? { |value| value == acceptor['expected'] } + end + end + + def matches_status?(acceptor, response) + response.context.http_response.status_code == acceptor['expected'] + end + + def matches_error?(acceptor, response) + Aws::Errors::ServiceError === response.error && + response.error.code == acceptor['expected'].gsub('.', '') + end + + def path(acceptor) + acceptor['argument'] + end + + def non_empty_array(acceptor, response, &block) + if response.data + values = JMESPath.search(path(acceptor), response.data) + Array === values && values.count > 0 ? yield(values) : false + else + false + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/waiter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/waiter.rb new file mode 100644 index 0000000..1def3d8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/waiters/waiter.rb @@ -0,0 +1,134 @@ +# frozen_string_literal: true + +module Aws + module Waiters + # @api private + class Waiter + + # @api private + RAISE_HANDLER = Seahorse::Client::Plugins::RaiseResponseErrors::Handler + + # @api private + def initialize(options = {}) + @poller = options[:poller] + @max_attempts = options[:max_attempts] + @delay = options[:delay] + @before_attempt = Array(options[:before_attempt]) + @before_wait = Array(options[:before_wait]) + end + + # @api private + attr_reader :poller + + # @return [Integer] + attr_accessor :max_attempts + + # @return [Float] + attr_accessor :delay + + alias interval delay + alias interval= delay= + + # Register a callback that is invoked before every polling attempt. + # Yields the number of attempts made so far. + # + # waiter.before_attempt do |attempts| + # puts "#{attempts} made, about to make attempt #{attempts + 1}" + # end + # + # Throwing `:success` or `:failure` from the given block will stop + # the waiter and return or raise. You can pass a custom message to the + # throw: + # + # # raises Aws::Waiters::Errors::WaiterFailed + # waiter.before_attempt do |attempts| + # throw :failure, 'custom-error-message' + # end + # + # # cause the waiter to stop polling and return + # waiter.before_attempt do |attempts| + # throw :success + # end + # + # @yieldparam [Integer] attempts The number of attempts made. + def before_attempt(&block) + @before_attempt << block if block_given? + end + + # Register a callback that is invoked after an attempt but before + # sleeping. Yields the number of attempts made and the previous response. + # + # waiter.before_wait do |attempts, response| + # puts "#{attempts} made" + # puts response.error.inspect + # puts response.data.inspect + # end + # + # Throwing `:success` or `:failure` from the given block will stop + # the waiter and return or raise. You can pass a custom message to the + # throw: + # + # # raises Aws::Waiters::Errors::WaiterFailed + # waiter.before_attempt do |attempts| + # throw :failure, 'custom-error-message' + # end + # + # # cause the waiter to stop polling and return + # waiter.before_attempt do |attempts| + # throw :success + # end + # + # + # @yieldparam [Integer] attempts The number of attempts already made. + # @yieldparam [Seahorse::Client::Response] response The response from + # the previous polling attempts. + def before_wait(&block) + @before_wait << block if block_given? + end + + # @option options [Client] :client + # @option options [Hash] :params + def wait(options) + catch(:success) do + failure_msg = catch(:failure) do + return poll(options) + end + raise Errors::WaiterFailed.new(failure_msg || 'waiter failed') + end || true + end + + private + + def poll(options) + n = 0 + loop do + trigger_before_attempt(n) + + state, resp = @poller.call(options) + n += 1 + + case state + when :retry + when :success then return resp + when :failure then raise Errors::FailureStateError.new(resp) + when :error then raise Errors::UnexpectedError.new(resp.error) + end + + raise Errors::TooManyAttemptsError.new(n) if n == @max_attempts + + trigger_before_wait(n, resp) + sleep(@delay) + end + end + + def trigger_before_attempt(attempts) + @before_attempt.each { |block| block.call(attempts) } + end + + def trigger_before_wait(attempts, response) + @before_wait.each { |block| block.call(attempts, response) } + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml.rb new file mode 100644 index 0000000..bcd359e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative 'xml/builder' +require_relative 'xml/default_list' +require_relative 'xml/default_map' +require_relative 'xml/doc_builder' +require_relative 'xml/error_handler' +require_relative 'xml/parser' +require_relative 'xml/parser/stack' +require_relative 'xml/parser/frame' +require_relative 'xml/parser/parsing_error' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/builder.rb new file mode 100644 index 0000000..3f5b41b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/builder.rb @@ -0,0 +1,149 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module Xml + class Builder + + include Seahorse::Model::Shapes + + def initialize(rules, options = {}) + @rules = rules + @xml = options[:target] || [] + indent = options[:indent] || '' + pad = options[:pad] || '' + @builder = DocBuilder.new(target: @xml, indent: indent, pad: pad) + end + + def to_xml(params) + structure(@rules.location_name, @rules, params) + @xml.join + end + alias serialize to_xml + + private + + def structure(name, ref, values) + if values.empty? + node(name, ref) + else + node(name, ref, structure_attrs(ref, values)) do + ref.shape.members.each do |member_name, member_ref| + next if values[member_name].nil? + next if xml_attribute?(member_ref) + member(member_ref.location_name, member_ref, values[member_name]) + end + end + end + end + + def structure_attrs(ref, values) + ref.shape.members.inject({}) do |attrs, (member_name, member_ref)| + if xml_attribute?(member_ref) && values.key?(member_name) + attrs[member_ref.location_name] = values[member_name] + end + attrs + end + end + + def list(name, ref, values) + if ref[:flattened] || ref.shape.flattened + values.each do |value| + member(ref.shape.member.location_name || name, ref.shape.member, value) + end + else + node(name, ref) do + values.each do |value| + mname = ref.shape.member.location_name || 'member' + member(mname, ref.shape.member, value) + end + end + end + end + + def map(name, ref, hash) + key_ref = ref.shape.key + value_ref = ref.shape.value + if ref.shape.flattened + hash.each do |key, value| + node(name, ref) do + member(key_ref.location_name || 'key', key_ref, key) + member(value_ref.location_name || 'value', value_ref, value) + end + end + else + node(name, ref) do + hash.each do |key, value| + node('entry', ref) do + member(key_ref.location_name || 'key', key_ref, key) + member(value_ref.location_name || 'value', value_ref, value) + end + end + end + end + end + + def member(name, ref, value) + case ref.shape + when StructureShape then structure(name, ref, value) + when ListShape then list(name, ref, value) + when MapShape then map(name, ref, value) + when TimestampShape then node(name, ref, timestamp(ref, value)) + when BlobShape then node(name, ref, blob(value)) + else + node(name, ref, value.to_s) + end + end + + def blob(value) + value = value.read unless String === value + Base64.strict_encode64(value) + end + + def timestamp(ref, value) + case ref['timestampFormat'] || ref.shape['timestampFormat'] + when 'unixTimestamp' then value.to_i + when 'rfc822' then value.utc.httpdate + else + # xml defaults to iso8601 + value.utc.iso8601 + end + end + + # The `args` list may contain: + # + # * [] - empty, no value or attributes + # * [value] - inline element, no attributes + # * [value, attributes_hash] - inline element with attributes + # * [attributes_hash] - self closing element with attributes + # + # Pass a block if you want to nest XML nodes inside. When doing this, + # you may *not* pass a value to the `args` list. + # + def node(name, ref, *args, &block) + attrs = args.last.is_a?(Hash) ? args.pop : {} + attrs = shape_attrs(ref).merge(attrs) + args << attrs + @builder.node(name, *args, &block) + end + + def shape_attrs(ref) + if xmlns = ref['xmlNamespace'] + if prefix = xmlns['prefix'] + { 'xmlns:' + prefix => xmlns['uri'] } + else + { 'xmlns' => xmlns['uri'] } + end + else + {} + end + end + + def xml_attribute?(ref) + !!ref['xmlAttribute'] + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/default_list.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/default_list.rb new file mode 100644 index 0000000..32b633b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/default_list.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +module Aws + module Xml + # @api private + class DefaultList < Array + + alias nil? empty? + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/default_map.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/default_map.rb new file mode 100644 index 0000000..be178c2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/default_map.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +module Aws + module Xml + # @api private + class DefaultMap < Hash + + alias nil? empty? + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/doc_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/doc_builder.rb new file mode 100644 index 0000000..9ed056b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/doc_builder.rb @@ -0,0 +1,97 @@ +# frozen_string_literal: true + +module Aws + module Xml + class DocBuilder + + # @option options [#<<] :target ('') + # @option options [String] :pad ('') + # @option options [String] :indent ('') + def initialize(options = {}) + @target = options[:target] || ( + # The String has to be mutable + # because @target implements `<<` method. + String.new + ) + @indent = options[:indent] || '' + @pad = options[:pad] || '' + @end_of_line = @indent == '' ? '' : "\n" + end + + attr_reader :target + + # @overload node(name, attributes = {}) + # Adds a self closing element without any content. + # + # @overload node(name, value, attributes = {}) + # Adds an element that opens and closes on the same line with + # simple text content. + # + # @overload node(name, attributes = {}, &block) + # Adds a wrapping element. Calling {#node} from inside + # the yielded block creates nested elements. + # + # @return [void] + # + def node(name, *args, &block) + attrs = args.last.is_a?(Hash) ? args.pop : {} + if block_given? + @target << open_el(name, attrs) + @target << @end_of_line + increase_pad(&block) + @target << @pad + @target << close_el(name) + elsif args.empty? + @target << empty_element(name, attrs) + else + @target << inline_element(name, args.first, attrs) + end + end + + private + + def empty_element(name, attrs) + "#{@pad}<#{name}#{attributes(attrs)}/>#{@end_of_line}" + end + + def inline_element(name, value, attrs) + "#{open_el(name, attrs)}#{escape(value, :text)}#{close_el(name)}" + end + + def open_el(name, attrs) + "#{@pad}<#{name}#{attributes(attrs)}>" + end + + def close_el(name) + "#{@end_of_line}" + end + + def escape(string, text_or_attr) + string.to_s + .encode(:xml => text_or_attr) + .gsub("\u{000D}", ' ') # Carriage Return + .gsub("\u{000A}", ' ') # Line Feed + .gsub("\u{0085}", '…') # Next Line + .gsub("\u{2028}", '
') # Line Separator + end + + def attributes(attr) + if attr.empty? + '' + else + ' ' + attr.map do |key, value| + "#{key}=#{escape(value, :attr)}" + end.join(' ') + end + end + + def increase_pad(&block) + pre_increase = @pad + @pad = @pad + @indent + block.call + @pad = pre_increase + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/error_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/error_handler.rb new file mode 100644 index 0000000..0f76c41 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/error_handler.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +require 'cgi' + +module Aws + module Xml + class ErrorHandler < Seahorse::Client::Handler + + def call(context) + @handler.call(context).on(300..599) do |response| + response.error = error(context) unless response.error + response.data = nil + end + end + + private + + def error(context) + body = context.http_response.body_contents + if body.empty? + code = http_status_error_code(context) + message = '' + data = EmptyStructure.new + else + code, message, data = extract_error(body, context) + end + context[:request_id] = request_id(body) + errors_module = context.client.class.errors_module + error_class = errors_module.error_class(code).new(context, message, data) + error_class + end + + def extract_error(body, context) + code = error_code(body, context) + [ + code, + error_message(body), + error_data(context, code) + ] + end + + def error_data(context, code) + data = EmptyStructure.new + if error_rules = context.operation.errors + error_rules.each do |rule| + # for modeled shape with error trait + # match `code` in the error trait before + # match modeled shape name + error_shape_code = rule.shape['error']['code'] if rule.shape['error'] + match = (code == error_shape_code || code == rule.shape.name) + if match && rule.shape.members.any? + data = Parser.new(rule).parse(context.http_response.body_contents) + end + end + end + data + rescue Xml::Parser::ParsingError + EmptyStructure.new + end + + def error_code(body, context) + if matches = body.match(/(.+?)<\/Code>/) + remove_prefix(unescape(matches[1]), context) + else + http_status_error_code(context) + end + end + + def http_status_error_code(context) + status_code = context.http_response.status_code + { + 302 => 'MovedTemporarily', + 304 => 'NotModified', + 400 => 'BadRequest', + 403 => 'Forbidden', + 404 => 'NotFound', + 412 => 'PreconditionFailed', + 413 => 'RequestEntityTooLarge', + }[status_code] || "Http#{status_code}Error" + end + + def remove_prefix(error_code, context) + if prefix = context.config.api.metadata['errorPrefix'] + error_code.sub(/^#{prefix}/, '') + else + error_code + end + end + + def error_message(body) + if matches = body.match(/(.+?)<\/Message>/m) + unescape(matches[1]) + else + '' + end + end + + def request_id(body) + if matches = body.match(/(.+?)<\/RequestId>/m) + matches[1] + end + end + + def unescape(str) + CGI.unescapeHTML(str) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser.rb new file mode 100644 index 0000000..ebe7aa6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser.rb @@ -0,0 +1,100 @@ +# frozen_string_literal: true + +module Aws + # @api private + module Xml + # A SAX-style XML parser that uses a shape context to handle types. + class Parser + + # @param [Seahorse::Model::ShapeRef] rules + def initialize(rules, options = {}) + @rules = rules + @engine = options[:engine] || self.class.engine + end + + # Parses the XML document, returning a parsed structure. + # + # If you pass a block, this will yield for XML + # elements that are not modeled in the rules given + # to the constructor. + # + # parser.parse(xml) do |path, value| + # puts "uhandled: #{path.join('/')} - #{value}" + # end + # + # The purpose of the unhandled callback block is to + # allow callers to access values such as the EC2 + # request ID that are part of the XML body but not + # part of the operation result. + # + # @param [String] xml An XML document string to parse. + # @param [Structure] target (nil) + # @return [Structure] + def parse(xml, target = nil, &unhandled_callback) + xml = '' if xml.nil? or xml.empty? + stack = Stack.new(@rules, target, &unhandled_callback) + @engine.new(stack).parse(xml.to_s) + stack.result + end + + class << self + + # @param [Symbol,Class] engine + # Must be one of the following values: + # + # * :ox + # * :oga + # * :libxml + # * :nokogiri + # * :rexml + # + def engine= engine + @engine = Class === engine ? engine : load_engine(engine) + end + + # @return [Class] Returns the default parsing engine. + # One of: + # + # * {OxEngine} + # * {OgaEngine} + # * {LibxmlEngine} + # * {NokogiriEngine} + # * {RexmlEngine} + # + def engine + set_default_engine unless @engine + @engine + end + + def set_default_engine + [:ox, :oga, :libxml, :nokogiri, :rexml].each do |name| + @engine ||= try_load_engine(name) + end + unless @engine + raise 'Unable to find a compatible xml library. ' \ + 'Ensure that you have installed or added to your Gemfile one of ' \ + 'ox, oga, libxml, nokogiri or rexml' + end + end + + private + + def load_engine(name) + require "aws-sdk-core/xml/parser/engines/#{name}" + const_name = name[0].upcase + name[1..-1] + 'Engine' + const_get(const_name) + end + + def try_load_engine(name) + load_engine(name) + rescue LoadError + false + end + + end + + set_default_engine + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/libxml.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/libxml.rb new file mode 100644 index 0000000..de3c13b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/libxml.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +require 'libxml' + +module Aws + module Xml + class Parser + class LibxmlEngine + + include LibXML::XML::SaxParser::Callbacks + + def initialize(stack) + @stack = stack + end + + def parse(xml) + parser = ::LibXML::XML::SaxParser.string(xml) + parser.callbacks = self + parser.parse + end + + def on_start_element_ns(element_name, attributes, prefix = nil, uri = nil, ns = {}) + # libxml-ruby does not provide a mapping from element attribute + # names to their qname prefixes. The following code line assumes + # that if a attribute ns is defined it applies to all attributes. + # This is necessary to support parsing S3 Object ACL Grantees. + # qnames are not used by any other AWS attribute. Also, new + # services are using JSON, limiting the possible blast radius + # of this patch. + attr_ns_prefix = ns.keys.first + @stack.start_element(element_name) + attributes.each do |attr_name, attr_value| + attr_name = "#{attr_ns_prefix}:#{attr_name}" if attr_ns_prefix + @stack.attr(attr_name, attr_value) + end + end + + def on_end_element_ns(*ignored) + @stack.end_element + end + + def on_characters(chars) + @stack.text(chars) + end + + def on_error(msg) + @stack.error(msg) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/nokogiri.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/nokogiri.rb new file mode 100644 index 0000000..e62d010 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/nokogiri.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +require 'nokogiri' + +module Aws + module Xml + class Parser + class NokogiriEngine + + def initialize(stack) + @stack = stack + end + + def parse(xml) + Nokogiri::XML::SAX::Parser.new(self).parse(xml) + end + + def xmldecl(*args); end + def start_document; end + def end_document; end + def comment(*args); end + + def start_element_namespace(element_name, attributes = [], *ignored) + @stack.start_element(element_name) + attributes.each do |attr| + name = attr.localname + name = "#{attr.prefix}:#{name}" if attr.prefix + @stack.attr(name, attr.value) + end + end + + def characters(chars) + @stack.text(chars) + end + + def end_element_namespace(*ignored) + @stack.end_element + end + + def error(msg) + @stack.error(msg) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/oga.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/oga.rb new file mode 100644 index 0000000..6520901 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/oga.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +# Oga Java requires JRuby.runtime +require 'jruby' if RUBY_PLATFORM == 'java' +require 'oga' + +module Aws + module Xml + class Parser + class OgaEngine + + def initialize(stack) + @stack = stack + @depth = 0 + end + + def parse(xml) + Oga.sax_parse_xml(self, xml, strict:true) + rescue LL::ParserError => error + raise ParsingError.new(error.message, nil, nil) + end + + def on_element(namespace, name, attrs = {}) + @depth += 1 + @stack.start_element(name) + attrs.each do |attr| + @stack.attr(*attr) + end + end + + def on_text(value) + @stack.text(value) if @depth > 0 + end + + def after_element(_, _) + @stack.end_element + @depth -= 1 + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/ox.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/ox.rb new file mode 100644 index 0000000..9759b5b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/ox.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +require 'ox' +require 'stringio' + +module Aws + module Xml + class Parser + class OxEngine + + def initialize(stack) + @stack = stack + end + + def parse(xml) + Ox.sax_parse( + @stack, StringIO.new(xml), + :convert_special => true, + :skip => :skip_return + ) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/rexml.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/rexml.rb new file mode 100644 index 0000000..e491364 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/engines/rexml.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +require 'rexml/document' +require 'rexml/streamlistener' + +module Aws + module Xml + class Parser + class RexmlEngine + + include REXML::StreamListener + + def initialize(stack) + @stack = stack + @depth = 0 + end + + def parse(xml) + begin + mutable_xml = xml.dup # REXML only accepts mutable string + source = REXML::Source.new(mutable_xml) + REXML::Parsers::StreamParser.new(source, self).parse + rescue REXML::ParseException => error + @stack.error(error.message) + end + end + + def tag_start(name, attrs) + @depth += 1 + @stack.start_element(name) + attrs.each do |attr| + @stack.attr(*attr) + end + end + + def text(value) + @stack.text(value) if @depth > 0 + end + + def tag_end(name) + @stack.end_element + @depth -= 1 + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/frame.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/frame.rb new file mode 100644 index 0000000..f712f62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/frame.rb @@ -0,0 +1,333 @@ +# frozen_string_literal: true + +require 'base64' +require 'time' + +module Aws + module Xml + class Parser + class Frame + + include Seahorse::Model::Shapes + + class << self + + def new(path, parent, ref, result = nil) + if self == Frame + frame = frame_class(ref).allocate + frame.send(:initialize, path, parent, ref, result) + frame + else + super + end + end + + private + + def frame_class(ref) + klass = FRAME_CLASSES[ref.shape.class] + if ListFrame == klass && (ref.shape.flattened || ref["flattened"]) + FlatListFrame + elsif MapFrame == klass && (ref.shape.flattened || ref["flattened"]) + MapEntryFrame + else + klass + end + end + + end + + def initialize(path, parent, ref, result) + @path = path + @parent = parent + @ref = ref + @result = result + @text = [] + end + + attr_reader :parent + + attr_reader :ref + + attr_reader :result + + def set_text(value) + @text << value + end + + def child_frame(xml_name) + NullFrame.new(xml_name, self) + end + + def consume_child_frame(child); end + + # @api private + def path + if Stack === parent + [@path] + else + parent.path + [@path] + end + end + + # @api private + def yield_unhandled_value(path, value) + parent.yield_unhandled_value(path, value) + end + + end + + class StructureFrame < Frame + + def initialize(xml_name, parent, ref, result = nil) + super + @result ||= ref.shape.struct_class.new + @members = {} + ref.shape.members.each do |member_name, member_ref| + apply_default_value(member_name, member_ref) + @members[xml_name(member_ref)] = { + name: member_name, + ref: member_ref, + } + end + end + + def child_frame(xml_name) + if @member = @members[xml_name] + Frame.new(xml_name, self, @member[:ref]) + elsif @ref.shape.union + UnknownMemberFrame.new(xml_name, self, nil, @result) + else + NullFrame.new(xml_name, self) + end + end + + def consume_child_frame(child) + case child + when MapEntryFrame + @result[@member[:name]][child.key.result] = child.value.result + when FlatListFrame + @result[@member[:name]] << child.result + when UnknownMemberFrame + @result[:unknown] = { 'name' => child.path.last, 'value' => child.result } + when NullFrame + else + @result[@member[:name]] = child.result + end + + if @ref.shape.union + # a union may only have one member set + # convert to the union subclass + # The default Struct created will have defaults set for all values + # This also sets only one of the values leaving everything else nil + # as required for unions + set_member_name = @member ? @member[:name] : :unknown + member_subclass = @ref.shape.member_subclass(set_member_name).new # shape.member_subclass(target.member).new + member_subclass[set_member_name] = @result[set_member_name] + @result = member_subclass + end + end + + private + + def apply_default_value(name, ref) + case ref.shape + when ListShape then @result[name] = DefaultList.new + when MapShape then @result[name] = DefaultMap.new + end + end + + def xml_name(ref) + if flattened_list?(ref) + ref.shape.member.location_name || ref.location_name + else + ref.location_name + end + end + + def flattened_list?(ref) + ListShape === ref.shape && (ref.shape.flattened || ref["flattened"]) + end + + end + + class ListFrame < Frame + + def initialize(*args) + super + @result = [] + @member_xml_name = @ref.shape.member.location_name || 'member' + end + + def child_frame(xml_name) + if xml_name == @member_xml_name + Frame.new(xml_name, self, @ref.shape.member) + else + raise NotImplementedError + end + end + + def consume_child_frame(child) + @result << child.result unless NullFrame === child + end + + end + + class FlatListFrame < Frame + + def initialize(xml_name, *args) + super + @member = Frame.new(xml_name, self, @ref.shape.member) + end + + def result + @member.result + end + + def set_text(value) + @member.set_text(value) + end + + def child_frame(xml_name) + @member.child_frame(xml_name) + end + + def consume_child_frame(child) + @result = @member.result + end + + end + + class MapFrame < Frame + + def initialize(*args) + super + @result = {} + end + + def child_frame(xml_name) + if xml_name == 'entry' + MapEntryFrame.new(xml_name, self, @ref) + else + raise NotImplementedError + end + end + + def consume_child_frame(child) + @result[child.key.result] = child.value.result + end + + end + + class MapEntryFrame < Frame + + def initialize(xml_name, *args) + super + @key_name = @ref.shape.key.location_name || 'key' + @key = Frame.new(xml_name, self, @ref.shape.key) + @value_name = @ref.shape.value.location_name || 'value' + @value = Frame.new(xml_name, self, @ref.shape.value) + end + + # @return [StringFrame] + attr_reader :key + + # @return [Frame] + attr_reader :value + + def child_frame(xml_name) + if @key_name == xml_name + @key + elsif @value_name == xml_name + @value + else + NullFrame.new(xml_name, self) + end + end + + end + + class NullFrame < Frame + def self.new(xml_name, parent) + super(xml_name, parent, nil, nil) + end + + def set_text(value) + yield_unhandled_value(path, value) + super + end + end + + class UnknownMemberFrame < Frame + def result + @text.join + end + end + + class BlobFrame < Frame + def result + @text.empty? ? nil : Base64.decode64(@text.join) + end + end + + class BooleanFrame < Frame + def result + @text.empty? ? nil : (@text.join == 'true') + end + end + + class FloatFrame < Frame + def result + @text.empty? ? nil : @text.join.to_f + end + end + + class IntegerFrame < Frame + def result + @text.empty? ? nil : @text.join.to_i + end + end + + class StringFrame < Frame + def result + @text.join + end + end + + class TimestampFrame < Frame + def result + @text.empty? ? nil : parse(@text.join) + end + def parse(value) + case value + when nil then nil + when /^\d+$/ then Time.at(value.to_i) + else + begin + Time.parse(value).utc + rescue ArgumentError + raise "unhandled timestamp format `#{value}'" + end + end + end + end + + include Seahorse::Model::Shapes + + FRAME_CLASSES = { + NilClass => NullFrame, + BlobShape => BlobFrame, + BooleanShape => BooleanFrame, + FloatShape => FloatFrame, + IntegerShape => IntegerFrame, + ListShape => ListFrame, + MapShape => MapFrame, + StringShape => StringFrame, + StructureShape => StructureFrame, + UnionShape => StructureFrame, + TimestampShape => TimestampFrame, + } + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/parsing_error.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/parsing_error.rb new file mode 100644 index 0000000..d7e6dfd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/parsing_error.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module Aws + module Xml + class Parser + class ParsingError < RuntimeError + + def initialize(msg, line, column) + super(msg) + end + + # @return [Integer,nil] + attr_reader :line + + # @return [Integer,nil] + attr_reader :column + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/stack.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/stack.rb new file mode 100644 index 0000000..dded343 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-core/xml/parser/stack.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module Aws + module Xml + class Parser + class Stack + + def initialize(ref, result = nil, &unhandled_callback) + @ref = ref + @frame = self + @result = result + @unhandled_callback = unhandled_callback + end + + attr_reader :frame + + attr_reader :result + + def start_element(name) + @frame = @frame.child_frame(name.to_s) + end + + def attr(name, value) + if name.to_s == 'encoding' && value.to_s == 'base64' + @frame = BlobFrame.new(name, @frame.parent, @frame.ref) + else + start_element(name) + text(value) + end_element(name) + end + end + + def text(value) + @frame.set_text(value) + end + + def end_element(*args) + @frame.parent.consume_child_frame(@frame) + if @frame.parent.is_a?(FlatListFrame) + @frame = @frame.parent + @frame.parent.consume_child_frame(@frame) + end + @frame = @frame.parent + end + + def error(msg, line = nil, column = nil) + raise ParsingError.new(msg, line, column) + end + + def child_frame(name) + Frame.new(name, self, @ref, @result) + end + + def consume_child_frame(frame) + @result = frame.result + end + + # @api private + def yield_unhandled_value(path, value) + if @unhandled_callback + @unhandled_callback.call(path, value) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso.rb new file mode 100644 index 0000000..77fe05d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +unless Module.const_defined?(:Aws) + require 'aws-sdk-core' + require 'aws-sigv4' +end + +require_relative 'aws-sdk-sso/types' +require_relative 'aws-sdk-sso/client_api' +require_relative 'aws-sdk-sso/plugins/endpoints.rb' +require_relative 'aws-sdk-sso/client' +require_relative 'aws-sdk-sso/errors' +require_relative 'aws-sdk-sso/resource' +require_relative 'aws-sdk-sso/endpoint_parameters' +require_relative 'aws-sdk-sso/endpoint_provider' +require_relative 'aws-sdk-sso/endpoints' +require_relative 'aws-sdk-sso/customizations' + +# This module provides support for AWS Single Sign-On. This module is available in the +# `aws-sdk-core` gem. +# +# # Client +# +# The {Client} class provides one method for each API operation. Operation +# methods each accept a hash of request parameters and return a response +# structure. +# +# sso = Aws::SSO::Client.new +# resp = sso.get_role_credentials(params) +# +# See {Client} for more information. +# +# # Errors +# +# Errors returned from AWS Single Sign-On are defined in the +# {Errors} module and all extend {Errors::ServiceError}. +# +# begin +# # do stuff +# rescue Aws::SSO::Errors::ServiceError +# # rescues all AWS Single Sign-On API errors +# end +# +# See {Errors} for more information. +# +# @!group service +module Aws::SSO + + GEM_VERSION = '3.171.0' + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/client.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/client.rb new file mode 100644 index 0000000..0c6cd3b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/client.rb @@ -0,0 +1,610 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +require 'seahorse/client/plugins/content_length.rb' +require 'aws-sdk-core/plugins/credentials_configuration.rb' +require 'aws-sdk-core/plugins/logging.rb' +require 'aws-sdk-core/plugins/param_converter.rb' +require 'aws-sdk-core/plugins/param_validator.rb' +require 'aws-sdk-core/plugins/user_agent.rb' +require 'aws-sdk-core/plugins/helpful_socket_errors.rb' +require 'aws-sdk-core/plugins/retry_errors.rb' +require 'aws-sdk-core/plugins/global_configuration.rb' +require 'aws-sdk-core/plugins/regional_endpoint.rb' +require 'aws-sdk-core/plugins/endpoint_discovery.rb' +require 'aws-sdk-core/plugins/endpoint_pattern.rb' +require 'aws-sdk-core/plugins/response_paging.rb' +require 'aws-sdk-core/plugins/stub_responses.rb' +require 'aws-sdk-core/plugins/idempotency_token.rb' +require 'aws-sdk-core/plugins/jsonvalue_converter.rb' +require 'aws-sdk-core/plugins/client_metrics_plugin.rb' +require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb' +require 'aws-sdk-core/plugins/transfer_encoding.rb' +require 'aws-sdk-core/plugins/http_checksum.rb' +require 'aws-sdk-core/plugins/checksum_algorithm.rb' +require 'aws-sdk-core/plugins/defaults_mode.rb' +require 'aws-sdk-core/plugins/recursion_detection.rb' +require 'aws-sdk-core/plugins/sign.rb' +require 'aws-sdk-core/plugins/protocols/rest_json.rb' + +Aws::Plugins::GlobalConfiguration.add_identifier(:sso) + +module Aws::SSO + # An API client for SSO. To construct a client, you need to configure a `:region` and `:credentials`. + # + # client = Aws::SSO::Client.new( + # region: region_name, + # credentials: credentials, + # # ... + # ) + # + # For details on configuring region and credentials see + # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html). + # + # See {#initialize} for a full list of supported configuration options. + class Client < Seahorse::Client::Base + + include Aws::ClientStubs + + @identifier = :sso + + set_api(ClientApi::API) + + add_plugin(Seahorse::Client::Plugins::ContentLength) + add_plugin(Aws::Plugins::CredentialsConfiguration) + add_plugin(Aws::Plugins::Logging) + add_plugin(Aws::Plugins::ParamConverter) + add_plugin(Aws::Plugins::ParamValidator) + add_plugin(Aws::Plugins::UserAgent) + add_plugin(Aws::Plugins::HelpfulSocketErrors) + add_plugin(Aws::Plugins::RetryErrors) + add_plugin(Aws::Plugins::GlobalConfiguration) + add_plugin(Aws::Plugins::RegionalEndpoint) + add_plugin(Aws::Plugins::EndpointDiscovery) + add_plugin(Aws::Plugins::EndpointPattern) + add_plugin(Aws::Plugins::ResponsePaging) + add_plugin(Aws::Plugins::StubResponses) + add_plugin(Aws::Plugins::IdempotencyToken) + add_plugin(Aws::Plugins::JsonvalueConverter) + add_plugin(Aws::Plugins::ClientMetricsPlugin) + add_plugin(Aws::Plugins::ClientMetricsSendPlugin) + add_plugin(Aws::Plugins::TransferEncoding) + add_plugin(Aws::Plugins::HttpChecksum) + add_plugin(Aws::Plugins::ChecksumAlgorithm) + add_plugin(Aws::Plugins::DefaultsMode) + add_plugin(Aws::Plugins::RecursionDetection) + add_plugin(Aws::Plugins::Sign) + add_plugin(Aws::Plugins::Protocols::RestJson) + add_plugin(Aws::SSO::Plugins::Endpoints) + + # @overload initialize(options) + # @param [Hash] options + # @option options [required, Aws::CredentialProvider] :credentials + # Your AWS credentials. This can be an instance of any one of the + # following classes: + # + # * `Aws::Credentials` - Used for configuring static, non-refreshing + # credentials. + # + # * `Aws::SharedCredentials` - Used for loading static credentials from a + # shared file, such as `~/.aws/config`. + # + # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role. + # + # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to + # assume a role after providing credentials via the web. + # + # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an + # access token generated from `aws login`. + # + # * `Aws::ProcessCredentials` - Used for loading credentials from a + # process that outputs to stdout. + # + # * `Aws::InstanceProfileCredentials` - Used for loading credentials + # from an EC2 IMDS on an EC2 instance. + # + # * `Aws::ECSCredentials` - Used for loading credentials from + # instances running in ECS. + # + # * `Aws::CognitoIdentityCredentials` - Used for loading credentials + # from the Cognito Identity service. + # + # When `:credentials` are not configured directly, the following + # locations will be searched for credentials: + # + # * `Aws.config[:credentials]` + # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options. + # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] + # * `~/.aws/credentials` + # * `~/.aws/config` + # * EC2/ECS IMDS instance profile - When used by default, the timeouts + # are very aggressive. Construct and pass an instance of + # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to + # enable retries and extended timeouts. Instance profile credential + # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED'] + # to true. + # + # @option options [required, String] :region + # The AWS region to connect to. The configured `:region` is + # used to determine the service `:endpoint`. When not passed, + # a default `:region` is searched for in the following locations: + # + # * `Aws.config[:region]` + # * `ENV['AWS_REGION']` + # * `ENV['AMAZON_REGION']` + # * `ENV['AWS_DEFAULT_REGION']` + # * `~/.aws/credentials` + # * `~/.aws/config` + # + # @option options [String] :access_key_id + # + # @option options [Boolean] :active_endpoint_cache (false) + # When set to `true`, a thread polling for endpoints will be running in + # the background every 60 secs (default). Defaults to `false`. + # + # @option options [Boolean] :adaptive_retry_wait_to_fill (true) + # Used only in `adaptive` retry mode. When true, the request will sleep + # until there is sufficent client side capacity to retry the request. + # When false, the request will raise a `RetryCapacityNotAvailableError` and will + # not retry instead of sleeping. + # + # @option options [Boolean] :client_side_monitoring (false) + # When `true`, client-side metrics will be collected for all API requests from + # this client. + # + # @option options [String] :client_side_monitoring_client_id ("") + # Allows you to provide an identifier for this client which will be attached to + # all generated client side metrics. Defaults to an empty string. + # + # @option options [String] :client_side_monitoring_host ("127.0.0.1") + # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client + # side monitoring agent is running on, where client metrics will be published via UDP. + # + # @option options [Integer] :client_side_monitoring_port (31000) + # Required for publishing client metrics. The port that the client side monitoring + # agent is running on, where client metrics will be published via UDP. + # + # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher) + # Allows you to provide a custom client-side monitoring publisher class. By default, + # will use the Client Side Monitoring Agent Publisher. + # + # @option options [Boolean] :convert_params (true) + # When `true`, an attempt is made to coerce request parameters into + # the required types. + # + # @option options [Boolean] :correct_clock_skew (true) + # Used only in `standard` and adaptive retry modes. Specifies whether to apply + # a clock skew correction and retry requests with skewed client clocks. + # + # @option options [String] :defaults_mode ("legacy") + # See {Aws::DefaultsModeConfiguration} for a list of the + # accepted modes and the configuration defaults that are included. + # + # @option options [Boolean] :disable_host_prefix_injection (false) + # Set to true to disable SDK automatically adding host prefix + # to default service endpoint when available. + # + # @option options [String] :endpoint + # The client endpoint is normally constructed from the `:region` + # option. You should only configure an `:endpoint` when connecting + # to test or custom endpoints. This should be a valid HTTP(S) URI. + # + # @option options [Integer] :endpoint_cache_max_entries (1000) + # Used for the maximum size limit of the LRU cache storing endpoints data + # for endpoint discovery enabled operations. Defaults to 1000. + # + # @option options [Integer] :endpoint_cache_max_threads (10) + # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10. + # + # @option options [Integer] :endpoint_cache_poll_interval (60) + # When :endpoint_discovery and :active_endpoint_cache is enabled, + # Use this option to config the time interval in seconds for making + # requests fetching endpoints information. Defaults to 60 sec. + # + # @option options [Boolean] :endpoint_discovery (false) + # When set to `true`, endpoint discovery will be enabled for operations when available. + # + # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default) + # The log formatter. + # + # @option options [Symbol] :log_level (:info) + # The log level to send messages to the `:logger` at. + # + # @option options [Logger] :logger + # The Logger instance to send log messages to. If this option + # is not set, logging will be disabled. + # + # @option options [Integer] :max_attempts (3) + # An integer representing the maximum number attempts that will be made for + # a single request, including the initial attempt. For example, + # setting this value to 5 will result in a request being retried up to + # 4 times. Used in `standard` and `adaptive` retry modes. + # + # @option options [String] :profile ("default") + # Used when loading credentials from the shared credentials file + # at HOME/.aws/credentials. When not specified, 'default' is used. + # + # @option options [Proc] :retry_backoff + # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay. + # This option is only used in the `legacy` retry mode. + # + # @option options [Float] :retry_base_delay (0.3) + # The base delay in seconds used by the default backoff function. This option + # is only used in the `legacy` retry mode. + # + # @option options [Symbol] :retry_jitter (:none) + # A delay randomiser function used by the default backoff function. + # Some predefined functions can be referenced by name - :none, :equal, :full, + # otherwise a Proc that takes and returns a number. This option is only used + # in the `legacy` retry mode. + # + # @see https://www.awsarchitectureblog.com/2015/03/backoff.html + # + # @option options [Integer] :retry_limit (3) + # The maximum number of times to retry failed requests. Only + # ~ 500 level server errors and certain ~ 400 level client errors + # are retried. Generally, these are throttling errors, data + # checksum errors, networking errors, timeout errors, auth errors, + # endpoint discovery, and errors from expired credentials. + # This option is only used in the `legacy` retry mode. + # + # @option options [Integer] :retry_max_delay (0) + # The maximum number of seconds to delay between retries (0 for no limit) + # used by the default backoff function. This option is only used in the + # `legacy` retry mode. + # + # @option options [String] :retry_mode ("legacy") + # Specifies which retry algorithm to use. Values are: + # + # * `legacy` - The pre-existing retry behavior. This is default value if + # no retry mode is provided. + # + # * `standard` - A standardized set of retry rules across the AWS SDKs. + # This includes support for retry quotas, which limit the number of + # unsuccessful retries a client can make. + # + # * `adaptive` - An experimental retry mode that includes all the + # functionality of `standard` mode along with automatic client side + # throttling. This is a provisional mode that may change behavior + # in the future. + # + # + # @option options [String] :secret_access_key + # + # @option options [String] :session_token + # + # @option options [Boolean] :stub_responses (false) + # Causes the client to return stubbed responses. By default + # fake responses are generated and returned. You can specify + # the response data to return or errors to raise by calling + # {ClientStubs#stub_responses}. See {ClientStubs} for more information. + # + # ** Please note ** When response stubbing is enabled, no HTTP + # requests are made, and retries are disabled. + # + # @option options [Aws::TokenProvider] :token_provider + # A Bearer Token Provider. This can be an instance of any one of the + # following classes: + # + # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing + # tokens. + # + # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an + # access token generated from `aws login`. + # + # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` + # will be used to search for tokens configured for your profile in shared configuration files. + # + # @option options [Boolean] :use_dualstack_endpoint + # When set to `true`, dualstack enabled endpoints (with `.aws` TLD) + # will be used if available. + # + # @option options [Boolean] :use_fips_endpoint + # When set to `true`, fips compatible endpoints will be used if available. + # When a `fips` region is used, the region is normalized and this config + # is set to `true`. + # + # @option options [Boolean] :validate_params (true) + # When `true`, request parameters are validated before + # sending the request. + # + # @option options [Aws::SSO::EndpointProvider] :endpoint_provider + # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::SSO::EndpointParameters` + # + # @option options [URI::HTTP,String] :http_proxy A proxy to send + # requests through. Formatted like 'http://proxy.com:123'. + # + # @option options [Float] :http_open_timeout (15) The number of + # seconds to wait when opening a HTTP session before raising a + # `Timeout::Error`. + # + # @option options [Float] :http_read_timeout (60) The default + # number of seconds to wait for response data. This value can + # safely be set per-request on the session. + # + # @option options [Float] :http_idle_timeout (5) The number of + # seconds a connection is allowed to sit idle before it is + # considered stale. Stale connections are closed and removed + # from the pool before making a request. + # + # @option options [Float] :http_continue_timeout (1) The number of + # seconds to wait for a 100-continue response before sending the + # request body. This option has no effect unless the request has + # "Expect" header set to "100-continue". Defaults to `nil` which + # disables this behaviour. This value can safely be set per + # request on the session. + # + # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout + # in seconds. + # + # @option options [Boolean] :http_wire_trace (false) When `true`, + # HTTP debug output will be sent to the `:logger`. + # + # @option options [Boolean] :ssl_verify_peer (true) When `true`, + # SSL peer certificates are verified when establishing a + # connection. + # + # @option options [String] :ssl_ca_bundle Full path to the SSL + # certificate authority bundle file that should be used when + # verifying peer certificates. If you do not pass + # `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default + # will be used if available. + # + # @option options [String] :ssl_ca_directory Full path of the + # directory that contains the unbundled SSL certificate + # authority files for verifying peer certificates. If you do + # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the + # system default will be used if available. + # + def initialize(*args) + super + end + + # @!group API Operations + + # Returns the STS short-term credentials for a given role name that is + # assigned to the user. + # + # @option params [required, String] :role_name + # The friendly name of the role that is assigned to the user. + # + # @option params [required, String] :account_id + # The identifier for the AWS account that is assigned to the user. + # + # @option params [required, String] :access_token + # The token issued by the `CreateToken` API call. For more information, + # see [CreateToken][1] in the *IAM Identity Center OIDC API Reference + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # + # @return [Types::GetRoleCredentialsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetRoleCredentialsResponse#role_credentials #role_credentials} => Types::RoleCredentials + # + # @example Request syntax with placeholder values + # + # resp = client.get_role_credentials({ + # role_name: "RoleNameType", # required + # account_id: "AccountIdType", # required + # access_token: "AccessTokenType", # required + # }) + # + # @example Response structure + # + # resp.role_credentials.access_key_id #=> String + # resp.role_credentials.secret_access_key #=> String + # resp.role_credentials.session_token #=> String + # resp.role_credentials.expiration #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials AWS API Documentation + # + # @overload get_role_credentials(params = {}) + # @param [Hash] params ({}) + def get_role_credentials(params = {}, options = {}) + req = build_request(:get_role_credentials, params) + req.send_request(options) + end + + # Lists all roles that are assigned to the user for a given AWS account. + # + # @option params [String] :next_token + # The page token from the previous response output when you request + # subsequent pages. + # + # @option params [Integer] :max_results + # The number of items that clients can request per page. + # + # @option params [required, String] :access_token + # The token issued by the `CreateToken` API call. For more information, + # see [CreateToken][1] in the *IAM Identity Center OIDC API Reference + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # + # @option params [required, String] :account_id + # The identifier for the AWS account that is assigned to the user. + # + # @return [Types::ListAccountRolesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListAccountRolesResponse#next_token #next_token} => String + # * {Types::ListAccountRolesResponse#role_list #role_list} => Array<Types::RoleInfo> + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_account_roles({ + # next_token: "NextTokenType", + # max_results: 1, + # access_token: "AccessTokenType", # required + # account_id: "AccountIdType", # required + # }) + # + # @example Response structure + # + # resp.next_token #=> String + # resp.role_list #=> Array + # resp.role_list[0].role_name #=> String + # resp.role_list[0].account_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles AWS API Documentation + # + # @overload list_account_roles(params = {}) + # @param [Hash] params ({}) + def list_account_roles(params = {}, options = {}) + req = build_request(:list_account_roles, params) + req.send_request(options) + end + + # Lists all AWS accounts assigned to the user. These AWS accounts are + # assigned by the administrator of the account. For more information, + # see [Assign User Access][1] in the *IAM Identity Center User Guide*. + # This operation returns a paginated response. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers + # + # @option params [String] :next_token + # (Optional) When requesting subsequent pages, this is the page token + # from the previous response output. + # + # @option params [Integer] :max_results + # This is the number of items clients can request per page. + # + # @option params [required, String] :access_token + # The token issued by the `CreateToken` API call. For more information, + # see [CreateToken][1] in the *IAM Identity Center OIDC API Reference + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # + # @return [Types::ListAccountsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListAccountsResponse#next_token #next_token} => String + # * {Types::ListAccountsResponse#account_list #account_list} => Array<Types::AccountInfo> + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_accounts({ + # next_token: "NextTokenType", + # max_results: 1, + # access_token: "AccessTokenType", # required + # }) + # + # @example Response structure + # + # resp.next_token #=> String + # resp.account_list #=> Array + # resp.account_list[0].account_id #=> String + # resp.account_list[0].account_name #=> String + # resp.account_list[0].email_address #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts AWS API Documentation + # + # @overload list_accounts(params = {}) + # @param [Hash] params ({}) + def list_accounts(params = {}, options = {}) + req = build_request(:list_accounts, params) + req.send_request(options) + end + + # Removes the locally stored SSO tokens from the client-side cache and + # sends an API call to the IAM Identity Center service to invalidate the + # corresponding server-side IAM Identity Center sign in session. + # + # If a user uses IAM Identity Center to access the AWS CLI, the user’s + # IAM Identity Center sign in session is used to obtain an IAM session, + # as specified in the corresponding IAM Identity Center permission set. + # More specifically, IAM Identity Center assumes an IAM role in the + # target account on behalf of the user, and the corresponding temporary + # AWS credentials are returned to the client. + # + # After user logout, any existing IAM role sessions that were created by + # using IAM Identity Center permission sets continue based on the + # duration configured in the permission set. For more information, see + # [User authentications][1] in the *IAM Identity Center User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html + # + # @option params [required, String] :access_token + # The token issued by the `CreateToken` API call. For more information, + # see [CreateToken][1] in the *IAM Identity Center OIDC API Reference + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.logout({ + # access_token: "AccessTokenType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout AWS API Documentation + # + # @overload logout(params = {}) + # @param [Hash] params ({}) + def logout(params = {}, options = {}) + req = build_request(:logout, params) + req.send_request(options) + end + + # @!endgroup + + # @param params ({}) + # @api private + def build_request(operation_name, params = {}) + handlers = @handlers.for(operation_name) + context = Seahorse::Client::RequestContext.new( + operation_name: operation_name, + operation: config.api.operation(operation_name), + client: self, + params: params, + config: config) + context[:gem_name] = 'aws-sdk-core' + context[:gem_version] = '3.171.0' + Seahorse::Client::Request.new(handlers, context) + end + + # @api private + # @deprecated + def waiter_names + [] + end + + class << self + + # @api private + attr_reader :identifier + + # @api private + def errors_module + Errors + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/client_api.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/client_api.rb new file mode 100644 index 0000000..e05d5d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/client_api.rb @@ -0,0 +1,190 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSO + # @api private + module ClientApi + + include Seahorse::Model + + AccessKeyType = Shapes::StringShape.new(name: 'AccessKeyType') + AccessTokenType = Shapes::StringShape.new(name: 'AccessTokenType') + AccountIdType = Shapes::StringShape.new(name: 'AccountIdType') + AccountInfo = Shapes::StructureShape.new(name: 'AccountInfo') + AccountListType = Shapes::ListShape.new(name: 'AccountListType') + AccountNameType = Shapes::StringShape.new(name: 'AccountNameType') + EmailAddressType = Shapes::StringShape.new(name: 'EmailAddressType') + ErrorDescription = Shapes::StringShape.new(name: 'ErrorDescription') + ExpirationTimestampType = Shapes::IntegerShape.new(name: 'ExpirationTimestampType') + GetRoleCredentialsRequest = Shapes::StructureShape.new(name: 'GetRoleCredentialsRequest') + GetRoleCredentialsResponse = Shapes::StructureShape.new(name: 'GetRoleCredentialsResponse') + InvalidRequestException = Shapes::StructureShape.new(name: 'InvalidRequestException') + ListAccountRolesRequest = Shapes::StructureShape.new(name: 'ListAccountRolesRequest') + ListAccountRolesResponse = Shapes::StructureShape.new(name: 'ListAccountRolesResponse') + ListAccountsRequest = Shapes::StructureShape.new(name: 'ListAccountsRequest') + ListAccountsResponse = Shapes::StructureShape.new(name: 'ListAccountsResponse') + LogoutRequest = Shapes::StructureShape.new(name: 'LogoutRequest') + MaxResultType = Shapes::IntegerShape.new(name: 'MaxResultType') + NextTokenType = Shapes::StringShape.new(name: 'NextTokenType') + ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException') + RoleCredentials = Shapes::StructureShape.new(name: 'RoleCredentials') + RoleInfo = Shapes::StructureShape.new(name: 'RoleInfo') + RoleListType = Shapes::ListShape.new(name: 'RoleListType') + RoleNameType = Shapes::StringShape.new(name: 'RoleNameType') + SecretAccessKeyType = Shapes::StringShape.new(name: 'SecretAccessKeyType') + SessionTokenType = Shapes::StringShape.new(name: 'SessionTokenType') + TooManyRequestsException = Shapes::StructureShape.new(name: 'TooManyRequestsException') + UnauthorizedException = Shapes::StructureShape.new(name: 'UnauthorizedException') + + AccountInfo.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountIdType, location_name: "accountId")) + AccountInfo.add_member(:account_name, Shapes::ShapeRef.new(shape: AccountNameType, location_name: "accountName")) + AccountInfo.add_member(:email_address, Shapes::ShapeRef.new(shape: EmailAddressType, location_name: "emailAddress")) + AccountInfo.struct_class = Types::AccountInfo + + AccountListType.member = Shapes::ShapeRef.new(shape: AccountInfo) + + GetRoleCredentialsRequest.add_member(:role_name, Shapes::ShapeRef.new(shape: RoleNameType, required: true, location: "querystring", location_name: "role_name")) + GetRoleCredentialsRequest.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountIdType, required: true, location: "querystring", location_name: "account_id")) + GetRoleCredentialsRequest.add_member(:access_token, Shapes::ShapeRef.new(shape: AccessTokenType, required: true, location: "header", location_name: "x-amz-sso_bearer_token")) + GetRoleCredentialsRequest.struct_class = Types::GetRoleCredentialsRequest + + GetRoleCredentialsResponse.add_member(:role_credentials, Shapes::ShapeRef.new(shape: RoleCredentials, location_name: "roleCredentials")) + GetRoleCredentialsResponse.struct_class = Types::GetRoleCredentialsResponse + + InvalidRequestException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "message")) + InvalidRequestException.struct_class = Types::InvalidRequestException + + ListAccountRolesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextTokenType, location: "querystring", location_name: "next_token")) + ListAccountRolesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResultType, location: "querystring", location_name: "max_result")) + ListAccountRolesRequest.add_member(:access_token, Shapes::ShapeRef.new(shape: AccessTokenType, required: true, location: "header", location_name: "x-amz-sso_bearer_token")) + ListAccountRolesRequest.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountIdType, required: true, location: "querystring", location_name: "account_id")) + ListAccountRolesRequest.struct_class = Types::ListAccountRolesRequest + + ListAccountRolesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextTokenType, location_name: "nextToken")) + ListAccountRolesResponse.add_member(:role_list, Shapes::ShapeRef.new(shape: RoleListType, location_name: "roleList")) + ListAccountRolesResponse.struct_class = Types::ListAccountRolesResponse + + ListAccountsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextTokenType, location: "querystring", location_name: "next_token")) + ListAccountsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResultType, location: "querystring", location_name: "max_result")) + ListAccountsRequest.add_member(:access_token, Shapes::ShapeRef.new(shape: AccessTokenType, required: true, location: "header", location_name: "x-amz-sso_bearer_token")) + ListAccountsRequest.struct_class = Types::ListAccountsRequest + + ListAccountsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextTokenType, location_name: "nextToken")) + ListAccountsResponse.add_member(:account_list, Shapes::ShapeRef.new(shape: AccountListType, location_name: "accountList")) + ListAccountsResponse.struct_class = Types::ListAccountsResponse + + LogoutRequest.add_member(:access_token, Shapes::ShapeRef.new(shape: AccessTokenType, required: true, location: "header", location_name: "x-amz-sso_bearer_token")) + LogoutRequest.struct_class = Types::LogoutRequest + + ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "message")) + ResourceNotFoundException.struct_class = Types::ResourceNotFoundException + + RoleCredentials.add_member(:access_key_id, Shapes::ShapeRef.new(shape: AccessKeyType, location_name: "accessKeyId")) + RoleCredentials.add_member(:secret_access_key, Shapes::ShapeRef.new(shape: SecretAccessKeyType, location_name: "secretAccessKey")) + RoleCredentials.add_member(:session_token, Shapes::ShapeRef.new(shape: SessionTokenType, location_name: "sessionToken")) + RoleCredentials.add_member(:expiration, Shapes::ShapeRef.new(shape: ExpirationTimestampType, location_name: "expiration")) + RoleCredentials.struct_class = Types::RoleCredentials + + RoleInfo.add_member(:role_name, Shapes::ShapeRef.new(shape: RoleNameType, location_name: "roleName")) + RoleInfo.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountIdType, location_name: "accountId")) + RoleInfo.struct_class = Types::RoleInfo + + RoleListType.member = Shapes::ShapeRef.new(shape: RoleInfo) + + TooManyRequestsException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "message")) + TooManyRequestsException.struct_class = Types::TooManyRequestsException + + UnauthorizedException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "message")) + UnauthorizedException.struct_class = Types::UnauthorizedException + + + # @api private + API = Seahorse::Model::Api.new.tap do |api| + + api.version = "2019-06-10" + + api.metadata = { + "apiVersion" => "2019-06-10", + "endpointPrefix" => "portal.sso", + "jsonVersion" => "1.1", + "protocol" => "rest-json", + "serviceAbbreviation" => "SSO", + "serviceFullName" => "AWS Single Sign-On", + "serviceId" => "SSO", + "signatureVersion" => "v4", + "signingName" => "awsssoportal", + "uid" => "sso-2019-06-10", + } + + api.add_operation(:get_role_credentials, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetRoleCredentials" + o.http_method = "GET" + o.http_request_uri = "/federation/credentials" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: GetRoleCredentialsRequest) + o.output = Shapes::ShapeRef.new(shape: GetRoleCredentialsResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + + api.add_operation(:list_account_roles, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListAccountRoles" + o.http_method = "GET" + o.http_request_uri = "/assignment/roles" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: ListAccountRolesRequest) + o.output = Shapes::ShapeRef.new(shape: ListAccountRolesResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + + api.add_operation(:list_accounts, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListAccounts" + o.http_method = "GET" + o.http_request_uri = "/assignment/accounts" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: ListAccountsRequest) + o.output = Shapes::ShapeRef.new(shape: ListAccountsResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + + api.add_operation(:logout, Seahorse::Model::Operation.new.tap do |o| + o.name = "Logout" + o.http_method = "POST" + o.http_request_uri = "/logout" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: LogoutRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + end) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/customizations.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/customizations.rb new file mode 100644 index 0000000..ebd4c26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/customizations.rb @@ -0,0 +1 @@ +# frozen_string_literal: true \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoint_parameters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoint_parameters.rb new file mode 100644 index 0000000..d16fc66 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoint_parameters.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSO + # Endpoint parameters used to influence endpoints per request. + # + # @!attribute region + # The AWS region used to dispatch the request. + # + # @return [String] + # + # @!attribute use_dual_stack + # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error. + # + # @return [Boolean] + # + # @!attribute use_fips + # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error. + # + # @return [Boolean] + # + # @!attribute endpoint + # Override the endpoint used to send this request + # + # @return [String] + # + EndpointParameters = Struct.new( + :region, + :use_dual_stack, + :use_fips, + :endpoint, + ) do + include Aws::Structure + + # @api private + class << self + PARAM_MAP = { + 'Region' => :region, + 'UseDualStack' => :use_dual_stack, + 'UseFIPS' => :use_fips, + 'Endpoint' => :endpoint, + }.freeze + end + + def initialize(options = {}) + self[:region] = options[:region] + self[:use_dual_stack] = options[:use_dual_stack] + self[:use_dual_stack] = false if self[:use_dual_stack].nil? + if self[:use_dual_stack].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_dual_stack" + end + self[:use_fips] = options[:use_fips] + self[:use_fips] = false if self[:use_fips].nil? + if self[:use_fips].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_fips" + end + self[:endpoint] = options[:endpoint] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoint_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoint_provider.rb new file mode 100644 index 0000000..c32a807 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoint_provider.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSO + class EndpointProvider + def resolve_endpoint(parameters) + region = parameters.region + use_dual_stack = parameters.use_dual_stack + use_fips = parameters.use_fips + endpoint = parameters.endpoint + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported" + end + return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://portal.sso-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + return Aws::Endpoints::Endpoint.new(url: "https://portal.sso-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://portal.sso.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "DualStack is enabled but this partition does not support DualStack" + end + return Aws::Endpoints::Endpoint.new(url: "https://portal.sso.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, 'No endpoint could be resolved' + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoints.rb new file mode 100644 index 0000000..fdbb3d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/endpoints.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::SSO + module Endpoints + + class GetRoleCredentials + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSO::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListAccountRoles + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSO::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListAccounts + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSO::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class Logout + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSO::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/errors.rb new file mode 100644 index 0000000..99cdb85 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/errors.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSO + + # When SSO returns an error response, the Ruby SDK constructs and raises an error. + # These errors all extend Aws::SSO::Errors::ServiceError < {Aws::Errors::ServiceError} + # + # You can rescue all SSO errors using ServiceError: + # + # begin + # # do stuff + # rescue Aws::SSO::Errors::ServiceError + # # rescues all SSO API errors + # end + # + # + # ## Request Context + # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns + # information about the request that generated the error. + # See {Seahorse::Client::RequestContext} for more information. + # + # ## Error Classes + # * {InvalidRequestException} + # * {ResourceNotFoundException} + # * {TooManyRequestsException} + # * {UnauthorizedException} + # + # Additionally, error classes are dynamically generated for service errors based on the error code + # if they are not defined above. + module Errors + + extend Aws::Errors::DynamicErrors + + class InvalidRequestException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSO::Types::InvalidRequestException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class ResourceNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSO::Types::ResourceNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class TooManyRequestsException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSO::Types::TooManyRequestsException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class UnauthorizedException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSO::Types::UnauthorizedException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/plugins/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/plugins/endpoints.rb new file mode 100644 index 0000000..d5a341d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/plugins/endpoints.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::SSO + module Plugins + class Endpoints < Seahorse::Client::Plugin + option( + :endpoint_provider, + doc_type: 'Aws::SSO::EndpointProvider', + docstring: 'The endpoint provider used to resolve endpoints. Any '\ + 'object that responds to `#resolve_endpoint(parameters)` '\ + 'where `parameters` is a Struct similar to '\ + '`Aws::SSO::EndpointParameters`' + ) do |cfg| + Aws::SSO::EndpointProvider.new + end + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + # If endpoint was discovered, do not resolve or apply the endpoint. + unless context[:discovered_endpoint] + params = parameters_for_operation(context) + endpoint = context.config.endpoint_provider.resolve_endpoint(params) + + context.http_request.endpoint = endpoint.url + apply_endpoint_headers(context, endpoint.headers) + end + + context[:endpoint_params] = params + context[:auth_scheme] = + Aws::Endpoints.resolve_auth_scheme(context, endpoint) + + @handler.call(context) + end + + private + + def apply_endpoint_headers(context, headers) + headers.each do |key, values| + value = values + .compact + .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } + .join(',') + + context.http_request.headers[key] = value + end + end + + def parameters_for_operation(context) + case context.operation_name + when :get_role_credentials + Aws::SSO::Endpoints::GetRoleCredentials.build(context) + when :list_account_roles + Aws::SSO::Endpoints::ListAccountRoles.build(context) + when :list_accounts + Aws::SSO::Endpoints::ListAccounts.build(context) + when :logout + Aws::SSO::Endpoints::Logout.build(context) + end + end + end + + def add_handlers(handlers, _config) + handlers.add(Handler, step: :build, priority: 75) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/resource.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/resource.rb new file mode 100644 index 0000000..85d2977 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/resource.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSO + + class Resource + + # @param options ({}) + # @option options [Client] :client + def initialize(options = {}) + @client = options[:client] || Client.new(options) + end + + # @return [Client] + def client + @client + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/types.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/types.rb new file mode 100644 index 0000000..78fa120 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sso/types.rb @@ -0,0 +1,317 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSO + module Types + + # Provides information about your AWS account. + # + # @!attribute [rw] account_id + # The identifier of the AWS account that is assigned to the user. + # @return [String] + # + # @!attribute [rw] account_name + # The display name of the AWS account that is assigned to the user. + # @return [String] + # + # @!attribute [rw] email_address + # The email address of the AWS account that is assigned to the user. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/AccountInfo AWS API Documentation + # + class AccountInfo < Struct.new( + :account_id, + :account_name, + :email_address) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] role_name + # The friendly name of the role that is assigned to the user. + # @return [String] + # + # @!attribute [rw] account_id + # The identifier for the AWS account that is assigned to the user. + # @return [String] + # + # @!attribute [rw] access_token + # The token issued by the `CreateToken` API call. For more + # information, see [CreateToken][1] in the *IAM Identity Center OIDC + # API Reference Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentialsRequest AWS API Documentation + # + class GetRoleCredentialsRequest < Struct.new( + :role_name, + :account_id, + :access_token) + SENSITIVE = [:access_token] + include Aws::Structure + end + + # @!attribute [rw] role_credentials + # The credentials for the role that is assigned to the user. + # @return [Types::RoleCredentials] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentialsResponse AWS API Documentation + # + class GetRoleCredentialsResponse < Struct.new( + :role_credentials) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that a problem occurred with the input to the request. For + # example, a required parameter might be missing or out of range. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/InvalidRequestException AWS API Documentation + # + class InvalidRequestException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] next_token + # The page token from the previous response output when you request + # subsequent pages. + # @return [String] + # + # @!attribute [rw] max_results + # The number of items that clients can request per page. + # @return [Integer] + # + # @!attribute [rw] access_token + # The token issued by the `CreateToken` API call. For more + # information, see [CreateToken][1] in the *IAM Identity Center OIDC + # API Reference Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # @return [String] + # + # @!attribute [rw] account_id + # The identifier for the AWS account that is assigned to the user. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRolesRequest AWS API Documentation + # + class ListAccountRolesRequest < Struct.new( + :next_token, + :max_results, + :access_token, + :account_id) + SENSITIVE = [:access_token] + include Aws::Structure + end + + # @!attribute [rw] next_token + # The page token client that is used to retrieve the list of accounts. + # @return [String] + # + # @!attribute [rw] role_list + # A paginated response with the list of roles and the next token if + # more results are available. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRolesResponse AWS API Documentation + # + class ListAccountRolesResponse < Struct.new( + :next_token, + :role_list) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] next_token + # (Optional) When requesting subsequent pages, this is the page token + # from the previous response output. + # @return [String] + # + # @!attribute [rw] max_results + # This is the number of items clients can request per page. + # @return [Integer] + # + # @!attribute [rw] access_token + # The token issued by the `CreateToken` API call. For more + # information, see [CreateToken][1] in the *IAM Identity Center OIDC + # API Reference Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountsRequest AWS API Documentation + # + class ListAccountsRequest < Struct.new( + :next_token, + :max_results, + :access_token) + SENSITIVE = [:access_token] + include Aws::Structure + end + + # @!attribute [rw] next_token + # The page token client that is used to retrieve the list of accounts. + # @return [String] + # + # @!attribute [rw] account_list + # A paginated response with the list of account information and the + # next token if more results are available. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountsResponse AWS API Documentation + # + class ListAccountsResponse < Struct.new( + :next_token, + :account_list) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] access_token + # The token issued by the `CreateToken` API call. For more + # information, see [CreateToken][1] in the *IAM Identity Center OIDC + # API Reference Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/LogoutRequest AWS API Documentation + # + class LogoutRequest < Struct.new( + :access_token) + SENSITIVE = [:access_token] + include Aws::Structure + end + + # The specified resource doesn't exist. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ResourceNotFoundException AWS API Documentation + # + class ResourceNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Provides information about the role credentials that are assigned to + # the user. + # + # @!attribute [rw] access_key_id + # The identifier used for the temporary security credentials. For more + # information, see [Using Temporary Security Credentials to Request + # Access to AWS Resources][1] in the *AWS IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + # @return [String] + # + # @!attribute [rw] secret_access_key + # The key that is used to sign the request. For more information, see + # [Using Temporary Security Credentials to Request Access to AWS + # Resources][1] in the *AWS IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + # @return [String] + # + # @!attribute [rw] session_token + # The token used for temporary credentials. For more information, see + # [Using Temporary Security Credentials to Request Access to AWS + # Resources][1] in the *AWS IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + # @return [String] + # + # @!attribute [rw] expiration + # The date on which temporary security credentials expire. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/RoleCredentials AWS API Documentation + # + class RoleCredentials < Struct.new( + :access_key_id, + :secret_access_key, + :session_token, + :expiration) + SENSITIVE = [:secret_access_key, :session_token] + include Aws::Structure + end + + # Provides information about the role that is assigned to the user. + # + # @!attribute [rw] role_name + # The friendly name of the role that is assigned to the user. + # @return [String] + # + # @!attribute [rw] account_id + # The identifier of the AWS account assigned to the user. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/RoleInfo AWS API Documentation + # + class RoleInfo < Struct.new( + :role_name, + :account_id) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the request is being made too frequently and is more + # than what the server can handle. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/TooManyRequestsException AWS API Documentation + # + class TooManyRequestsException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the request is not authorized. This can happen due to + # an invalid access token in the request. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/UnauthorizedException AWS API Documentation + # + class UnauthorizedException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc.rb new file mode 100644 index 0000000..29e245c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +unless Module.const_defined?(:Aws) + require 'aws-sdk-core' + require 'aws-sigv4' +end + +require_relative 'aws-sdk-ssooidc/types' +require_relative 'aws-sdk-ssooidc/client_api' +require_relative 'aws-sdk-ssooidc/plugins/endpoints.rb' +require_relative 'aws-sdk-ssooidc/client' +require_relative 'aws-sdk-ssooidc/errors' +require_relative 'aws-sdk-ssooidc/resource' +require_relative 'aws-sdk-ssooidc/endpoint_parameters' +require_relative 'aws-sdk-ssooidc/endpoint_provider' +require_relative 'aws-sdk-ssooidc/endpoints' +require_relative 'aws-sdk-ssooidc/customizations' + +# This module provides support for AWS SSO OIDC. This module is available in the +# `aws-sdk-core` gem. +# +# # Client +# +# The {Client} class provides one method for each API operation. Operation +# methods each accept a hash of request parameters and return a response +# structure. +# +# ssooidc = Aws::SSOOIDC::Client.new +# resp = ssooidc.create_token(params) +# +# See {Client} for more information. +# +# # Errors +# +# Errors returned from AWS SSO OIDC are defined in the +# {Errors} module and all extend {Errors::ServiceError}. +# +# begin +# # do stuff +# rescue Aws::SSOOIDC::Errors::ServiceError +# # rescues all AWS SSO OIDC API errors +# end +# +# See {Errors} for more information. +# +# @!group service +module Aws::SSOOIDC + + GEM_VERSION = '3.171.0' + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/client.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/client.rb new file mode 100644 index 0000000..49afdc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/client.rb @@ -0,0 +1,606 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +require 'seahorse/client/plugins/content_length.rb' +require 'aws-sdk-core/plugins/credentials_configuration.rb' +require 'aws-sdk-core/plugins/logging.rb' +require 'aws-sdk-core/plugins/param_converter.rb' +require 'aws-sdk-core/plugins/param_validator.rb' +require 'aws-sdk-core/plugins/user_agent.rb' +require 'aws-sdk-core/plugins/helpful_socket_errors.rb' +require 'aws-sdk-core/plugins/retry_errors.rb' +require 'aws-sdk-core/plugins/global_configuration.rb' +require 'aws-sdk-core/plugins/regional_endpoint.rb' +require 'aws-sdk-core/plugins/endpoint_discovery.rb' +require 'aws-sdk-core/plugins/endpoint_pattern.rb' +require 'aws-sdk-core/plugins/response_paging.rb' +require 'aws-sdk-core/plugins/stub_responses.rb' +require 'aws-sdk-core/plugins/idempotency_token.rb' +require 'aws-sdk-core/plugins/jsonvalue_converter.rb' +require 'aws-sdk-core/plugins/client_metrics_plugin.rb' +require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb' +require 'aws-sdk-core/plugins/transfer_encoding.rb' +require 'aws-sdk-core/plugins/http_checksum.rb' +require 'aws-sdk-core/plugins/checksum_algorithm.rb' +require 'aws-sdk-core/plugins/defaults_mode.rb' +require 'aws-sdk-core/plugins/recursion_detection.rb' +require 'aws-sdk-core/plugins/sign.rb' +require 'aws-sdk-core/plugins/protocols/rest_json.rb' + +Aws::Plugins::GlobalConfiguration.add_identifier(:ssooidc) + +module Aws::SSOOIDC + # An API client for SSOOIDC. To construct a client, you need to configure a `:region` and `:credentials`. + # + # client = Aws::SSOOIDC::Client.new( + # region: region_name, + # credentials: credentials, + # # ... + # ) + # + # For details on configuring region and credentials see + # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html). + # + # See {#initialize} for a full list of supported configuration options. + class Client < Seahorse::Client::Base + + include Aws::ClientStubs + + @identifier = :ssooidc + + set_api(ClientApi::API) + + add_plugin(Seahorse::Client::Plugins::ContentLength) + add_plugin(Aws::Plugins::CredentialsConfiguration) + add_plugin(Aws::Plugins::Logging) + add_plugin(Aws::Plugins::ParamConverter) + add_plugin(Aws::Plugins::ParamValidator) + add_plugin(Aws::Plugins::UserAgent) + add_plugin(Aws::Plugins::HelpfulSocketErrors) + add_plugin(Aws::Plugins::RetryErrors) + add_plugin(Aws::Plugins::GlobalConfiguration) + add_plugin(Aws::Plugins::RegionalEndpoint) + add_plugin(Aws::Plugins::EndpointDiscovery) + add_plugin(Aws::Plugins::EndpointPattern) + add_plugin(Aws::Plugins::ResponsePaging) + add_plugin(Aws::Plugins::StubResponses) + add_plugin(Aws::Plugins::IdempotencyToken) + add_plugin(Aws::Plugins::JsonvalueConverter) + add_plugin(Aws::Plugins::ClientMetricsPlugin) + add_plugin(Aws::Plugins::ClientMetricsSendPlugin) + add_plugin(Aws::Plugins::TransferEncoding) + add_plugin(Aws::Plugins::HttpChecksum) + add_plugin(Aws::Plugins::ChecksumAlgorithm) + add_plugin(Aws::Plugins::DefaultsMode) + add_plugin(Aws::Plugins::RecursionDetection) + add_plugin(Aws::Plugins::Sign) + add_plugin(Aws::Plugins::Protocols::RestJson) + add_plugin(Aws::SSOOIDC::Plugins::Endpoints) + + # @overload initialize(options) + # @param [Hash] options + # @option options [required, Aws::CredentialProvider] :credentials + # Your AWS credentials. This can be an instance of any one of the + # following classes: + # + # * `Aws::Credentials` - Used for configuring static, non-refreshing + # credentials. + # + # * `Aws::SharedCredentials` - Used for loading static credentials from a + # shared file, such as `~/.aws/config`. + # + # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role. + # + # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to + # assume a role after providing credentials via the web. + # + # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an + # access token generated from `aws login`. + # + # * `Aws::ProcessCredentials` - Used for loading credentials from a + # process that outputs to stdout. + # + # * `Aws::InstanceProfileCredentials` - Used for loading credentials + # from an EC2 IMDS on an EC2 instance. + # + # * `Aws::ECSCredentials` - Used for loading credentials from + # instances running in ECS. + # + # * `Aws::CognitoIdentityCredentials` - Used for loading credentials + # from the Cognito Identity service. + # + # When `:credentials` are not configured directly, the following + # locations will be searched for credentials: + # + # * `Aws.config[:credentials]` + # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options. + # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] + # * `~/.aws/credentials` + # * `~/.aws/config` + # * EC2/ECS IMDS instance profile - When used by default, the timeouts + # are very aggressive. Construct and pass an instance of + # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to + # enable retries and extended timeouts. Instance profile credential + # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED'] + # to true. + # + # @option options [required, String] :region + # The AWS region to connect to. The configured `:region` is + # used to determine the service `:endpoint`. When not passed, + # a default `:region` is searched for in the following locations: + # + # * `Aws.config[:region]` + # * `ENV['AWS_REGION']` + # * `ENV['AMAZON_REGION']` + # * `ENV['AWS_DEFAULT_REGION']` + # * `~/.aws/credentials` + # * `~/.aws/config` + # + # @option options [String] :access_key_id + # + # @option options [Boolean] :active_endpoint_cache (false) + # When set to `true`, a thread polling for endpoints will be running in + # the background every 60 secs (default). Defaults to `false`. + # + # @option options [Boolean] :adaptive_retry_wait_to_fill (true) + # Used only in `adaptive` retry mode. When true, the request will sleep + # until there is sufficent client side capacity to retry the request. + # When false, the request will raise a `RetryCapacityNotAvailableError` and will + # not retry instead of sleeping. + # + # @option options [Boolean] :client_side_monitoring (false) + # When `true`, client-side metrics will be collected for all API requests from + # this client. + # + # @option options [String] :client_side_monitoring_client_id ("") + # Allows you to provide an identifier for this client which will be attached to + # all generated client side metrics. Defaults to an empty string. + # + # @option options [String] :client_side_monitoring_host ("127.0.0.1") + # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client + # side monitoring agent is running on, where client metrics will be published via UDP. + # + # @option options [Integer] :client_side_monitoring_port (31000) + # Required for publishing client metrics. The port that the client side monitoring + # agent is running on, where client metrics will be published via UDP. + # + # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher) + # Allows you to provide a custom client-side monitoring publisher class. By default, + # will use the Client Side Monitoring Agent Publisher. + # + # @option options [Boolean] :convert_params (true) + # When `true`, an attempt is made to coerce request parameters into + # the required types. + # + # @option options [Boolean] :correct_clock_skew (true) + # Used only in `standard` and adaptive retry modes. Specifies whether to apply + # a clock skew correction and retry requests with skewed client clocks. + # + # @option options [String] :defaults_mode ("legacy") + # See {Aws::DefaultsModeConfiguration} for a list of the + # accepted modes and the configuration defaults that are included. + # + # @option options [Boolean] :disable_host_prefix_injection (false) + # Set to true to disable SDK automatically adding host prefix + # to default service endpoint when available. + # + # @option options [String] :endpoint + # The client endpoint is normally constructed from the `:region` + # option. You should only configure an `:endpoint` when connecting + # to test or custom endpoints. This should be a valid HTTP(S) URI. + # + # @option options [Integer] :endpoint_cache_max_entries (1000) + # Used for the maximum size limit of the LRU cache storing endpoints data + # for endpoint discovery enabled operations. Defaults to 1000. + # + # @option options [Integer] :endpoint_cache_max_threads (10) + # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10. + # + # @option options [Integer] :endpoint_cache_poll_interval (60) + # When :endpoint_discovery and :active_endpoint_cache is enabled, + # Use this option to config the time interval in seconds for making + # requests fetching endpoints information. Defaults to 60 sec. + # + # @option options [Boolean] :endpoint_discovery (false) + # When set to `true`, endpoint discovery will be enabled for operations when available. + # + # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default) + # The log formatter. + # + # @option options [Symbol] :log_level (:info) + # The log level to send messages to the `:logger` at. + # + # @option options [Logger] :logger + # The Logger instance to send log messages to. If this option + # is not set, logging will be disabled. + # + # @option options [Integer] :max_attempts (3) + # An integer representing the maximum number attempts that will be made for + # a single request, including the initial attempt. For example, + # setting this value to 5 will result in a request being retried up to + # 4 times. Used in `standard` and `adaptive` retry modes. + # + # @option options [String] :profile ("default") + # Used when loading credentials from the shared credentials file + # at HOME/.aws/credentials. When not specified, 'default' is used. + # + # @option options [Proc] :retry_backoff + # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay. + # This option is only used in the `legacy` retry mode. + # + # @option options [Float] :retry_base_delay (0.3) + # The base delay in seconds used by the default backoff function. This option + # is only used in the `legacy` retry mode. + # + # @option options [Symbol] :retry_jitter (:none) + # A delay randomiser function used by the default backoff function. + # Some predefined functions can be referenced by name - :none, :equal, :full, + # otherwise a Proc that takes and returns a number. This option is only used + # in the `legacy` retry mode. + # + # @see https://www.awsarchitectureblog.com/2015/03/backoff.html + # + # @option options [Integer] :retry_limit (3) + # The maximum number of times to retry failed requests. Only + # ~ 500 level server errors and certain ~ 400 level client errors + # are retried. Generally, these are throttling errors, data + # checksum errors, networking errors, timeout errors, auth errors, + # endpoint discovery, and errors from expired credentials. + # This option is only used in the `legacy` retry mode. + # + # @option options [Integer] :retry_max_delay (0) + # The maximum number of seconds to delay between retries (0 for no limit) + # used by the default backoff function. This option is only used in the + # `legacy` retry mode. + # + # @option options [String] :retry_mode ("legacy") + # Specifies which retry algorithm to use. Values are: + # + # * `legacy` - The pre-existing retry behavior. This is default value if + # no retry mode is provided. + # + # * `standard` - A standardized set of retry rules across the AWS SDKs. + # This includes support for retry quotas, which limit the number of + # unsuccessful retries a client can make. + # + # * `adaptive` - An experimental retry mode that includes all the + # functionality of `standard` mode along with automatic client side + # throttling. This is a provisional mode that may change behavior + # in the future. + # + # + # @option options [String] :secret_access_key + # + # @option options [String] :session_token + # + # @option options [Boolean] :stub_responses (false) + # Causes the client to return stubbed responses. By default + # fake responses are generated and returned. You can specify + # the response data to return or errors to raise by calling + # {ClientStubs#stub_responses}. See {ClientStubs} for more information. + # + # ** Please note ** When response stubbing is enabled, no HTTP + # requests are made, and retries are disabled. + # + # @option options [Aws::TokenProvider] :token_provider + # A Bearer Token Provider. This can be an instance of any one of the + # following classes: + # + # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing + # tokens. + # + # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an + # access token generated from `aws login`. + # + # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` + # will be used to search for tokens configured for your profile in shared configuration files. + # + # @option options [Boolean] :use_dualstack_endpoint + # When set to `true`, dualstack enabled endpoints (with `.aws` TLD) + # will be used if available. + # + # @option options [Boolean] :use_fips_endpoint + # When set to `true`, fips compatible endpoints will be used if available. + # When a `fips` region is used, the region is normalized and this config + # is set to `true`. + # + # @option options [Boolean] :validate_params (true) + # When `true`, request parameters are validated before + # sending the request. + # + # @option options [Aws::SSOOIDC::EndpointProvider] :endpoint_provider + # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::SSOOIDC::EndpointParameters` + # + # @option options [URI::HTTP,String] :http_proxy A proxy to send + # requests through. Formatted like 'http://proxy.com:123'. + # + # @option options [Float] :http_open_timeout (15) The number of + # seconds to wait when opening a HTTP session before raising a + # `Timeout::Error`. + # + # @option options [Float] :http_read_timeout (60) The default + # number of seconds to wait for response data. This value can + # safely be set per-request on the session. + # + # @option options [Float] :http_idle_timeout (5) The number of + # seconds a connection is allowed to sit idle before it is + # considered stale. Stale connections are closed and removed + # from the pool before making a request. + # + # @option options [Float] :http_continue_timeout (1) The number of + # seconds to wait for a 100-continue response before sending the + # request body. This option has no effect unless the request has + # "Expect" header set to "100-continue". Defaults to `nil` which + # disables this behaviour. This value can safely be set per + # request on the session. + # + # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout + # in seconds. + # + # @option options [Boolean] :http_wire_trace (false) When `true`, + # HTTP debug output will be sent to the `:logger`. + # + # @option options [Boolean] :ssl_verify_peer (true) When `true`, + # SSL peer certificates are verified when establishing a + # connection. + # + # @option options [String] :ssl_ca_bundle Full path to the SSL + # certificate authority bundle file that should be used when + # verifying peer certificates. If you do not pass + # `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default + # will be used if available. + # + # @option options [String] :ssl_ca_directory Full path of the + # directory that contains the unbundled SSL certificate + # authority files for verifying peer certificates. If you do + # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the + # system default will be used if available. + # + def initialize(*args) + super + end + + # @!group API Operations + + # Creates and returns an access token for the authorized client. The + # access token issued will be used to fetch short-term credentials for + # the assigned roles in the AWS account. + # + # @option params [required, String] :client_id + # The unique identifier string for each client. This value should come + # from the persisted result of the RegisterClient API. + # + # @option params [required, String] :client_secret + # A secret string generated for the client. This value should come from + # the persisted result of the RegisterClient API. + # + # @option params [required, String] :grant_type + # Supports grant types for the authorization code, refresh token, and + # device code request. For device code requests, specify the following + # value: + # + # `urn:ietf:params:oauth:grant-type:device_code ` + # + # For information about how to obtain the device code, see the + # StartDeviceAuthorization topic. + # + # @option params [String] :device_code + # Used only when calling this API for the device code grant type. This + # short-term code is used to identify this authentication attempt. This + # should come from an in-memory reference to the result of the + # StartDeviceAuthorization API. + # + # @option params [String] :code + # The authorization code received from the authorization service. This + # parameter is required to perform an authorization grant request to get + # access to a token. + # + # @option params [String] :refresh_token + # Currently, `refreshToken` is not yet implemented and is not supported. + # For more information about the features and limitations of the current + # IAM Identity Center OIDC implementation, see *Considerations for Using + # this Guide* in the [IAM Identity Center OIDC API Reference][1]. + # + # The token used to obtain an access token in the event that the access + # token is invalid or expired. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + # + # @option params [Array] :scope + # The list of scopes that is defined by the client. Upon authorization, + # this list is used to restrict permissions when granting an access + # token. + # + # @option params [String] :redirect_uri + # The location of the application that will receive the authorization + # code. Users authorize the service to send the request to this + # location. + # + # @return [Types::CreateTokenResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateTokenResponse#access_token #access_token} => String + # * {Types::CreateTokenResponse#token_type #token_type} => String + # * {Types::CreateTokenResponse#expires_in #expires_in} => Integer + # * {Types::CreateTokenResponse#refresh_token #refresh_token} => String + # * {Types::CreateTokenResponse#id_token #id_token} => String + # + # @example Request syntax with placeholder values + # + # resp = client.create_token({ + # client_id: "ClientId", # required + # client_secret: "ClientSecret", # required + # grant_type: "GrantType", # required + # device_code: "DeviceCode", + # code: "AuthCode", + # refresh_token: "RefreshToken", + # scope: ["Scope"], + # redirect_uri: "URI", + # }) + # + # @example Response structure + # + # resp.access_token #=> String + # resp.token_type #=> String + # resp.expires_in #=> Integer + # resp.refresh_token #=> String + # resp.id_token #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken AWS API Documentation + # + # @overload create_token(params = {}) + # @param [Hash] params ({}) + def create_token(params = {}, options = {}) + req = build_request(:create_token, params) + req.send_request(options) + end + + # Registers a client with IAM Identity Center. This allows clients to + # initiate device authorization. The output should be persisted for + # reuse through many authentication requests. + # + # @option params [required, String] :client_name + # The friendly name of the client. + # + # @option params [required, String] :client_type + # The type of client. The service supports only `public` as a client + # type. Anything other than public will be rejected by the service. + # + # @option params [Array] :scopes + # The list of scopes that are defined by the client. Upon authorization, + # this list is used to restrict permissions when granting an access + # token. + # + # @return [Types::RegisterClientResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::RegisterClientResponse#client_id #client_id} => String + # * {Types::RegisterClientResponse#client_secret #client_secret} => String + # * {Types::RegisterClientResponse#client_id_issued_at #client_id_issued_at} => Integer + # * {Types::RegisterClientResponse#client_secret_expires_at #client_secret_expires_at} => Integer + # * {Types::RegisterClientResponse#authorization_endpoint #authorization_endpoint} => String + # * {Types::RegisterClientResponse#token_endpoint #token_endpoint} => String + # + # @example Request syntax with placeholder values + # + # resp = client.register_client({ + # client_name: "ClientName", # required + # client_type: "ClientType", # required + # scopes: ["Scope"], + # }) + # + # @example Response structure + # + # resp.client_id #=> String + # resp.client_secret #=> String + # resp.client_id_issued_at #=> Integer + # resp.client_secret_expires_at #=> Integer + # resp.authorization_endpoint #=> String + # resp.token_endpoint #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient AWS API Documentation + # + # @overload register_client(params = {}) + # @param [Hash] params ({}) + def register_client(params = {}, options = {}) + req = build_request(:register_client, params) + req.send_request(options) + end + + # Initiates device authorization by requesting a pair of verification + # codes from the authorization service. + # + # @option params [required, String] :client_id + # The unique identifier string for the client that is registered with + # IAM Identity Center. This value should come from the persisted result + # of the RegisterClient API operation. + # + # @option params [required, String] :client_secret + # A secret string that is generated for the client. This value should + # come from the persisted result of the RegisterClient API operation. + # + # @option params [required, String] :start_url + # The URL for the AWS access portal. For more information, see [Using + # the AWS access portal][1] in the *IAM Identity Center User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + # + # @return [Types::StartDeviceAuthorizationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::StartDeviceAuthorizationResponse#device_code #device_code} => String + # * {Types::StartDeviceAuthorizationResponse#user_code #user_code} => String + # * {Types::StartDeviceAuthorizationResponse#verification_uri #verification_uri} => String + # * {Types::StartDeviceAuthorizationResponse#verification_uri_complete #verification_uri_complete} => String + # * {Types::StartDeviceAuthorizationResponse#expires_in #expires_in} => Integer + # * {Types::StartDeviceAuthorizationResponse#interval #interval} => Integer + # + # @example Request syntax with placeholder values + # + # resp = client.start_device_authorization({ + # client_id: "ClientId", # required + # client_secret: "ClientSecret", # required + # start_url: "URI", # required + # }) + # + # @example Response structure + # + # resp.device_code #=> String + # resp.user_code #=> String + # resp.verification_uri #=> String + # resp.verification_uri_complete #=> String + # resp.expires_in #=> Integer + # resp.interval #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization AWS API Documentation + # + # @overload start_device_authorization(params = {}) + # @param [Hash] params ({}) + def start_device_authorization(params = {}, options = {}) + req = build_request(:start_device_authorization, params) + req.send_request(options) + end + + # @!endgroup + + # @param params ({}) + # @api private + def build_request(operation_name, params = {}) + handlers = @handlers.for(operation_name) + context = Seahorse::Client::RequestContext.new( + operation_name: operation_name, + operation: config.api.operation(operation_name), + client: self, + params: params, + config: config) + context[:gem_name] = 'aws-sdk-core' + context[:gem_version] = '3.171.0' + Seahorse::Client::Request.new(handlers, context) + end + + # @api private + # @deprecated + def waiter_names + [] + end + + class << self + + # @api private + attr_reader :identifier + + # @api private + def errors_module + Errors + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/client_api.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/client_api.rb new file mode 100644 index 0000000..d30024a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/client_api.rb @@ -0,0 +1,216 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSOOIDC + # @api private + module ClientApi + + include Seahorse::Model + + AccessDeniedException = Shapes::StructureShape.new(name: 'AccessDeniedException') + AccessToken = Shapes::StringShape.new(name: 'AccessToken') + AuthCode = Shapes::StringShape.new(name: 'AuthCode') + AuthorizationPendingException = Shapes::StructureShape.new(name: 'AuthorizationPendingException') + ClientId = Shapes::StringShape.new(name: 'ClientId') + ClientName = Shapes::StringShape.new(name: 'ClientName') + ClientSecret = Shapes::StringShape.new(name: 'ClientSecret') + ClientType = Shapes::StringShape.new(name: 'ClientType') + CreateTokenRequest = Shapes::StructureShape.new(name: 'CreateTokenRequest') + CreateTokenResponse = Shapes::StructureShape.new(name: 'CreateTokenResponse') + DeviceCode = Shapes::StringShape.new(name: 'DeviceCode') + Error = Shapes::StringShape.new(name: 'Error') + ErrorDescription = Shapes::StringShape.new(name: 'ErrorDescription') + ExpirationInSeconds = Shapes::IntegerShape.new(name: 'ExpirationInSeconds') + ExpiredTokenException = Shapes::StructureShape.new(name: 'ExpiredTokenException') + GrantType = Shapes::StringShape.new(name: 'GrantType') + IdToken = Shapes::StringShape.new(name: 'IdToken') + InternalServerException = Shapes::StructureShape.new(name: 'InternalServerException') + IntervalInSeconds = Shapes::IntegerShape.new(name: 'IntervalInSeconds') + InvalidClientException = Shapes::StructureShape.new(name: 'InvalidClientException') + InvalidClientMetadataException = Shapes::StructureShape.new(name: 'InvalidClientMetadataException') + InvalidGrantException = Shapes::StructureShape.new(name: 'InvalidGrantException') + InvalidRequestException = Shapes::StructureShape.new(name: 'InvalidRequestException') + InvalidScopeException = Shapes::StructureShape.new(name: 'InvalidScopeException') + LongTimeStampType = Shapes::IntegerShape.new(name: 'LongTimeStampType') + RefreshToken = Shapes::StringShape.new(name: 'RefreshToken') + RegisterClientRequest = Shapes::StructureShape.new(name: 'RegisterClientRequest') + RegisterClientResponse = Shapes::StructureShape.new(name: 'RegisterClientResponse') + Scope = Shapes::StringShape.new(name: 'Scope') + Scopes = Shapes::ListShape.new(name: 'Scopes') + SlowDownException = Shapes::StructureShape.new(name: 'SlowDownException') + StartDeviceAuthorizationRequest = Shapes::StructureShape.new(name: 'StartDeviceAuthorizationRequest') + StartDeviceAuthorizationResponse = Shapes::StructureShape.new(name: 'StartDeviceAuthorizationResponse') + TokenType = Shapes::StringShape.new(name: 'TokenType') + URI = Shapes::StringShape.new(name: 'URI') + UnauthorizedClientException = Shapes::StructureShape.new(name: 'UnauthorizedClientException') + UnsupportedGrantTypeException = Shapes::StructureShape.new(name: 'UnsupportedGrantTypeException') + UserCode = Shapes::StringShape.new(name: 'UserCode') + + AccessDeniedException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + AccessDeniedException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + AccessDeniedException.struct_class = Types::AccessDeniedException + + AuthorizationPendingException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + AuthorizationPendingException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + AuthorizationPendingException.struct_class = Types::AuthorizationPendingException + + CreateTokenRequest.add_member(:client_id, Shapes::ShapeRef.new(shape: ClientId, required: true, location_name: "clientId")) + CreateTokenRequest.add_member(:client_secret, Shapes::ShapeRef.new(shape: ClientSecret, required: true, location_name: "clientSecret")) + CreateTokenRequest.add_member(:grant_type, Shapes::ShapeRef.new(shape: GrantType, required: true, location_name: "grantType")) + CreateTokenRequest.add_member(:device_code, Shapes::ShapeRef.new(shape: DeviceCode, location_name: "deviceCode")) + CreateTokenRequest.add_member(:code, Shapes::ShapeRef.new(shape: AuthCode, location_name: "code")) + CreateTokenRequest.add_member(:refresh_token, Shapes::ShapeRef.new(shape: RefreshToken, location_name: "refreshToken")) + CreateTokenRequest.add_member(:scope, Shapes::ShapeRef.new(shape: Scopes, location_name: "scope")) + CreateTokenRequest.add_member(:redirect_uri, Shapes::ShapeRef.new(shape: URI, location_name: "redirectUri")) + CreateTokenRequest.struct_class = Types::CreateTokenRequest + + CreateTokenResponse.add_member(:access_token, Shapes::ShapeRef.new(shape: AccessToken, location_name: "accessToken")) + CreateTokenResponse.add_member(:token_type, Shapes::ShapeRef.new(shape: TokenType, location_name: "tokenType")) + CreateTokenResponse.add_member(:expires_in, Shapes::ShapeRef.new(shape: ExpirationInSeconds, location_name: "expiresIn")) + CreateTokenResponse.add_member(:refresh_token, Shapes::ShapeRef.new(shape: RefreshToken, location_name: "refreshToken")) + CreateTokenResponse.add_member(:id_token, Shapes::ShapeRef.new(shape: IdToken, location_name: "idToken")) + CreateTokenResponse.struct_class = Types::CreateTokenResponse + + ExpiredTokenException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + ExpiredTokenException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + ExpiredTokenException.struct_class = Types::ExpiredTokenException + + InternalServerException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + InternalServerException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + InternalServerException.struct_class = Types::InternalServerException + + InvalidClientException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + InvalidClientException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + InvalidClientException.struct_class = Types::InvalidClientException + + InvalidClientMetadataException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + InvalidClientMetadataException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + InvalidClientMetadataException.struct_class = Types::InvalidClientMetadataException + + InvalidGrantException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + InvalidGrantException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + InvalidGrantException.struct_class = Types::InvalidGrantException + + InvalidRequestException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + InvalidRequestException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + InvalidRequestException.struct_class = Types::InvalidRequestException + + InvalidScopeException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + InvalidScopeException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + InvalidScopeException.struct_class = Types::InvalidScopeException + + RegisterClientRequest.add_member(:client_name, Shapes::ShapeRef.new(shape: ClientName, required: true, location_name: "clientName")) + RegisterClientRequest.add_member(:client_type, Shapes::ShapeRef.new(shape: ClientType, required: true, location_name: "clientType")) + RegisterClientRequest.add_member(:scopes, Shapes::ShapeRef.new(shape: Scopes, location_name: "scopes")) + RegisterClientRequest.struct_class = Types::RegisterClientRequest + + RegisterClientResponse.add_member(:client_id, Shapes::ShapeRef.new(shape: ClientId, location_name: "clientId")) + RegisterClientResponse.add_member(:client_secret, Shapes::ShapeRef.new(shape: ClientSecret, location_name: "clientSecret")) + RegisterClientResponse.add_member(:client_id_issued_at, Shapes::ShapeRef.new(shape: LongTimeStampType, location_name: "clientIdIssuedAt")) + RegisterClientResponse.add_member(:client_secret_expires_at, Shapes::ShapeRef.new(shape: LongTimeStampType, location_name: "clientSecretExpiresAt")) + RegisterClientResponse.add_member(:authorization_endpoint, Shapes::ShapeRef.new(shape: URI, location_name: "authorizationEndpoint")) + RegisterClientResponse.add_member(:token_endpoint, Shapes::ShapeRef.new(shape: URI, location_name: "tokenEndpoint")) + RegisterClientResponse.struct_class = Types::RegisterClientResponse + + Scopes.member = Shapes::ShapeRef.new(shape: Scope) + + SlowDownException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + SlowDownException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + SlowDownException.struct_class = Types::SlowDownException + + StartDeviceAuthorizationRequest.add_member(:client_id, Shapes::ShapeRef.new(shape: ClientId, required: true, location_name: "clientId")) + StartDeviceAuthorizationRequest.add_member(:client_secret, Shapes::ShapeRef.new(shape: ClientSecret, required: true, location_name: "clientSecret")) + StartDeviceAuthorizationRequest.add_member(:start_url, Shapes::ShapeRef.new(shape: URI, required: true, location_name: "startUrl")) + StartDeviceAuthorizationRequest.struct_class = Types::StartDeviceAuthorizationRequest + + StartDeviceAuthorizationResponse.add_member(:device_code, Shapes::ShapeRef.new(shape: DeviceCode, location_name: "deviceCode")) + StartDeviceAuthorizationResponse.add_member(:user_code, Shapes::ShapeRef.new(shape: UserCode, location_name: "userCode")) + StartDeviceAuthorizationResponse.add_member(:verification_uri, Shapes::ShapeRef.new(shape: URI, location_name: "verificationUri")) + StartDeviceAuthorizationResponse.add_member(:verification_uri_complete, Shapes::ShapeRef.new(shape: URI, location_name: "verificationUriComplete")) + StartDeviceAuthorizationResponse.add_member(:expires_in, Shapes::ShapeRef.new(shape: ExpirationInSeconds, location_name: "expiresIn")) + StartDeviceAuthorizationResponse.add_member(:interval, Shapes::ShapeRef.new(shape: IntervalInSeconds, location_name: "interval")) + StartDeviceAuthorizationResponse.struct_class = Types::StartDeviceAuthorizationResponse + + UnauthorizedClientException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + UnauthorizedClientException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + UnauthorizedClientException.struct_class = Types::UnauthorizedClientException + + UnsupportedGrantTypeException.add_member(:error, Shapes::ShapeRef.new(shape: Error, location_name: "error")) + UnsupportedGrantTypeException.add_member(:error_description, Shapes::ShapeRef.new(shape: ErrorDescription, location_name: "error_description")) + UnsupportedGrantTypeException.struct_class = Types::UnsupportedGrantTypeException + + + # @api private + API = Seahorse::Model::Api.new.tap do |api| + + api.version = "2019-06-10" + + api.metadata = { + "apiVersion" => "2019-06-10", + "endpointPrefix" => "oidc", + "jsonVersion" => "1.1", + "protocol" => "rest-json", + "serviceAbbreviation" => "SSO OIDC", + "serviceFullName" => "AWS SSO OIDC", + "serviceId" => "SSO OIDC", + "signatureVersion" => "v4", + "signingName" => "awsssooidc", + "uid" => "sso-oidc-2019-06-10", + } + + api.add_operation(:create_token, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateToken" + o.http_method = "POST" + o.http_request_uri = "/token" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: CreateTokenRequest) + o.output = Shapes::ShapeRef.new(shape: CreateTokenResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidClientException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedClientException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedGrantTypeException) + o.errors << Shapes::ShapeRef.new(shape: InvalidScopeException) + o.errors << Shapes::ShapeRef.new(shape: AuthorizationPendingException) + o.errors << Shapes::ShapeRef.new(shape: SlowDownException) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException) + o.errors << Shapes::ShapeRef.new(shape: ExpiredTokenException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + end) + + api.add_operation(:register_client, Seahorse::Model::Operation.new.tap do |o| + o.name = "RegisterClient" + o.http_method = "POST" + o.http_request_uri = "/client/register" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: RegisterClientRequest) + o.output = Shapes::ShapeRef.new(shape: RegisterClientResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidScopeException) + o.errors << Shapes::ShapeRef.new(shape: InvalidClientMetadataException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + end) + + api.add_operation(:start_device_authorization, Seahorse::Model::Operation.new.tap do |o| + o.name = "StartDeviceAuthorization" + o.http_method = "POST" + o.http_request_uri = "/device_authorization" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: StartDeviceAuthorizationRequest) + o.output = Shapes::ShapeRef.new(shape: StartDeviceAuthorizationResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidClientException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedClientException) + o.errors << Shapes::ShapeRef.new(shape: SlowDownException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + end) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/customizations.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/customizations.rb new file mode 100644 index 0000000..ebd4c26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/customizations.rb @@ -0,0 +1 @@ +# frozen_string_literal: true \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoint_parameters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoint_parameters.rb new file mode 100644 index 0000000..59ac3ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoint_parameters.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSOOIDC + # Endpoint parameters used to influence endpoints per request. + # + # @!attribute region + # The AWS region used to dispatch the request. + # + # @return [String] + # + # @!attribute use_dual_stack + # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error. + # + # @return [Boolean] + # + # @!attribute use_fips + # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error. + # + # @return [Boolean] + # + # @!attribute endpoint + # Override the endpoint used to send this request + # + # @return [String] + # + EndpointParameters = Struct.new( + :region, + :use_dual_stack, + :use_fips, + :endpoint, + ) do + include Aws::Structure + + # @api private + class << self + PARAM_MAP = { + 'Region' => :region, + 'UseDualStack' => :use_dual_stack, + 'UseFIPS' => :use_fips, + 'Endpoint' => :endpoint, + }.freeze + end + + def initialize(options = {}) + self[:region] = options[:region] + self[:use_dual_stack] = options[:use_dual_stack] + self[:use_dual_stack] = false if self[:use_dual_stack].nil? + if self[:use_dual_stack].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_dual_stack" + end + self[:use_fips] = options[:use_fips] + self[:use_fips] = false if self[:use_fips].nil? + if self[:use_fips].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_fips" + end + self[:endpoint] = options[:endpoint] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoint_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoint_provider.rb new file mode 100644 index 0000000..fcc6e13 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoint_provider.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSOOIDC + class EndpointProvider + def resolve_endpoint(parameters) + region = parameters.region + use_dual_stack = parameters.use_dual_stack + use_fips = parameters.use_fips + endpoint = parameters.endpoint + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported" + end + return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://oidc-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + return Aws::Endpoints::Endpoint.new(url: "https://oidc-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://oidc.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "DualStack is enabled but this partition does not support DualStack" + end + return Aws::Endpoints::Endpoint.new(url: "https://oidc.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, 'No endpoint could be resolved' + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoints.rb new file mode 100644 index 0000000..986a27c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/endpoints.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::SSOOIDC + module Endpoints + + class CreateToken + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSOOIDC::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class RegisterClient + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSOOIDC::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class StartDeviceAuthorization + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SSOOIDC::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/errors.rb new file mode 100644 index 0000000..065df6b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/errors.rb @@ -0,0 +1,290 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSOOIDC + + # When SSOOIDC returns an error response, the Ruby SDK constructs and raises an error. + # These errors all extend Aws::SSOOIDC::Errors::ServiceError < {Aws::Errors::ServiceError} + # + # You can rescue all SSOOIDC errors using ServiceError: + # + # begin + # # do stuff + # rescue Aws::SSOOIDC::Errors::ServiceError + # # rescues all SSOOIDC API errors + # end + # + # + # ## Request Context + # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns + # information about the request that generated the error. + # See {Seahorse::Client::RequestContext} for more information. + # + # ## Error Classes + # * {AccessDeniedException} + # * {AuthorizationPendingException} + # * {ExpiredTokenException} + # * {InternalServerException} + # * {InvalidClientException} + # * {InvalidClientMetadataException} + # * {InvalidGrantException} + # * {InvalidRequestException} + # * {InvalidScopeException} + # * {SlowDownException} + # * {UnauthorizedClientException} + # * {UnsupportedGrantTypeException} + # + # Additionally, error classes are dynamically generated for service errors based on the error code + # if they are not defined above. + module Errors + + extend Aws::Errors::DynamicErrors + + class AccessDeniedException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::AccessDeniedException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class AuthorizationPendingException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::AuthorizationPendingException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class ExpiredTokenException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::ExpiredTokenException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class InternalServerException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::InternalServerException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class InvalidClientException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::InvalidClientException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class InvalidClientMetadataException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::InvalidClientMetadataException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class InvalidGrantException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::InvalidGrantException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class InvalidRequestException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::InvalidRequestException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class InvalidScopeException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::InvalidScopeException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class SlowDownException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::SlowDownException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class UnauthorizedClientException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::UnauthorizedClientException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + class UnsupportedGrantTypeException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSOOIDC::Types::UnsupportedGrantTypeException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def error + @data[:error] + end + + # @return [String] + def error_description + @data[:error_description] + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/plugins/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/plugins/endpoints.rb new file mode 100644 index 0000000..c45af74 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/plugins/endpoints.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::SSOOIDC + module Plugins + class Endpoints < Seahorse::Client::Plugin + option( + :endpoint_provider, + doc_type: 'Aws::SSOOIDC::EndpointProvider', + docstring: 'The endpoint provider used to resolve endpoints. Any '\ + 'object that responds to `#resolve_endpoint(parameters)` '\ + 'where `parameters` is a Struct similar to '\ + '`Aws::SSOOIDC::EndpointParameters`' + ) do |cfg| + Aws::SSOOIDC::EndpointProvider.new + end + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + # If endpoint was discovered, do not resolve or apply the endpoint. + unless context[:discovered_endpoint] + params = parameters_for_operation(context) + endpoint = context.config.endpoint_provider.resolve_endpoint(params) + + context.http_request.endpoint = endpoint.url + apply_endpoint_headers(context, endpoint.headers) + end + + context[:endpoint_params] = params + context[:auth_scheme] = + Aws::Endpoints.resolve_auth_scheme(context, endpoint) + + @handler.call(context) + end + + private + + def apply_endpoint_headers(context, headers) + headers.each do |key, values| + value = values + .compact + .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } + .join(',') + + context.http_request.headers[key] = value + end + end + + def parameters_for_operation(context) + case context.operation_name + when :create_token + Aws::SSOOIDC::Endpoints::CreateToken.build(context) + when :register_client + Aws::SSOOIDC::Endpoints::RegisterClient.build(context) + when :start_device_authorization + Aws::SSOOIDC::Endpoints::StartDeviceAuthorization.build(context) + end + end + end + + def add_handlers(handlers, _config) + handlers.add(Handler, step: :build, priority: 75) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/resource.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/resource.rb new file mode 100644 index 0000000..293ba26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/resource.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSOOIDC + + class Resource + + # @param options ({}) + # @option options [Client] :client + def initialize(options = {}) + @client = options[:client] || Client.new(options) + end + + # @return [Client] + def client + @client + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/types.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/types.rb new file mode 100644 index 0000000..4bcf325 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-ssooidc/types.rb @@ -0,0 +1,502 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::SSOOIDC + module Types + + # You do not have sufficient access to perform this action. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/AccessDeniedException AWS API Documentation + # + class AccessDeniedException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that a request to authorize a client with an access user + # session token is pending. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/AuthorizationPendingException AWS API Documentation + # + class AuthorizationPendingException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] client_id + # The unique identifier string for each client. This value should come + # from the persisted result of the RegisterClient API. + # @return [String] + # + # @!attribute [rw] client_secret + # A secret string generated for the client. This value should come + # from the persisted result of the RegisterClient API. + # @return [String] + # + # @!attribute [rw] grant_type + # Supports grant types for the authorization code, refresh token, and + # device code request. For device code requests, specify the following + # value: + # + # `urn:ietf:params:oauth:grant-type:device_code ` + # + # For information about how to obtain the device code, see the + # StartDeviceAuthorization topic. + # @return [String] + # + # @!attribute [rw] device_code + # Used only when calling this API for the device code grant type. This + # short-term code is used to identify this authentication attempt. + # This should come from an in-memory reference to the result of the + # StartDeviceAuthorization API. + # @return [String] + # + # @!attribute [rw] code + # The authorization code received from the authorization service. This + # parameter is required to perform an authorization grant request to + # get access to a token. + # @return [String] + # + # @!attribute [rw] refresh_token + # Currently, `refreshToken` is not yet implemented and is not + # supported. For more information about the features and limitations + # of the current IAM Identity Center OIDC implementation, see + # *Considerations for Using this Guide* in the [IAM Identity Center + # OIDC API Reference][1]. + # + # The token used to obtain an access token in the event that the + # access token is invalid or expired. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + # @return [String] + # + # @!attribute [rw] scope + # The list of scopes that is defined by the client. Upon + # authorization, this list is used to restrict permissions when + # granting an access token. + # @return [Array] + # + # @!attribute [rw] redirect_uri + # The location of the application that will receive the authorization + # code. Users authorize the service to send the request to this + # location. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenRequest AWS API Documentation + # + class CreateTokenRequest < Struct.new( + :client_id, + :client_secret, + :grant_type, + :device_code, + :code, + :refresh_token, + :scope, + :redirect_uri) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] access_token + # An opaque token to access IAM Identity Center resources assigned to + # a user. + # @return [String] + # + # @!attribute [rw] token_type + # Used to notify the client that the returned token is an access + # token. The supported type is `BearerToken`. + # @return [String] + # + # @!attribute [rw] expires_in + # Indicates the time in seconds when an access token will expire. + # @return [Integer] + # + # @!attribute [rw] refresh_token + # Currently, `refreshToken` is not yet implemented and is not + # supported. For more information about the features and limitations + # of the current IAM Identity Center OIDC implementation, see + # *Considerations for Using this Guide* in the [IAM Identity Center + # OIDC API Reference][1]. + # + # A token that, if present, can be used to refresh a previously issued + # access token that might have expired. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + # @return [String] + # + # @!attribute [rw] id_token + # Currently, `idToken` is not yet implemented and is not supported. + # For more information about the features and limitations of the + # current IAM Identity Center OIDC implementation, see *Considerations + # for Using this Guide* in the [IAM Identity Center OIDC API + # Reference][1]. + # + # The identifier of the user that associated with the access token, if + # present. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenResponse AWS API Documentation + # + class CreateTokenResponse < Struct.new( + :access_token, + :token_type, + :expires_in, + :refresh_token, + :id_token) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the token issued by the service is expired and is no + # longer valid. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/ExpiredTokenException AWS API Documentation + # + class ExpiredTokenException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that an error from the service occurred while trying to + # process a request. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/InternalServerException AWS API Documentation + # + class InternalServerException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the `clientId` or `clientSecret` in the request is + # invalid. For example, this can occur when a client sends an incorrect + # `clientId` or an expired `clientSecret`. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/InvalidClientException AWS API Documentation + # + class InvalidClientException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the client information sent in the request during + # registration is invalid. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/InvalidClientMetadataException AWS API Documentation + # + class InvalidClientMetadataException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that a request contains an invalid grant. This can occur if + # a client makes a CreateToken request with an invalid grant type. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/InvalidGrantException AWS API Documentation + # + class InvalidGrantException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that something is wrong with the input to the request. For + # example, a required parameter might be missing or out of range. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/InvalidRequestException AWS API Documentation + # + class InvalidRequestException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the scope provided in the request is invalid. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/InvalidScopeException AWS API Documentation + # + class InvalidScopeException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] client_name + # The friendly name of the client. + # @return [String] + # + # @!attribute [rw] client_type + # The type of client. The service supports only `public` as a client + # type. Anything other than public will be rejected by the service. + # @return [String] + # + # @!attribute [rw] scopes + # The list of scopes that are defined by the client. Upon + # authorization, this list is used to restrict permissions when + # granting an access token. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClientRequest AWS API Documentation + # + class RegisterClientRequest < Struct.new( + :client_name, + :client_type, + :scopes) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] client_id + # The unique identifier string for each client. This client uses this + # identifier to get authenticated by the service in subsequent calls. + # @return [String] + # + # @!attribute [rw] client_secret + # A secret string generated for the client. The client will use this + # string to get authenticated by the service in subsequent calls. + # @return [String] + # + # @!attribute [rw] client_id_issued_at + # Indicates the time at which the `clientId` and `clientSecret` were + # issued. + # @return [Integer] + # + # @!attribute [rw] client_secret_expires_at + # Indicates the time at which the `clientId` and `clientSecret` will + # become invalid. + # @return [Integer] + # + # @!attribute [rw] authorization_endpoint + # The endpoint where the client can request authorization. + # @return [String] + # + # @!attribute [rw] token_endpoint + # The endpoint where the client can get an access token. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClientResponse AWS API Documentation + # + class RegisterClientResponse < Struct.new( + :client_id, + :client_secret, + :client_id_issued_at, + :client_secret_expires_at, + :authorization_endpoint, + :token_endpoint) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the client is making the request too frequently and is + # more than the service can handle. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/SlowDownException AWS API Documentation + # + class SlowDownException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] client_id + # The unique identifier string for the client that is registered with + # IAM Identity Center. This value should come from the persisted + # result of the RegisterClient API operation. + # @return [String] + # + # @!attribute [rw] client_secret + # A secret string that is generated for the client. This value should + # come from the persisted result of the RegisterClient API operation. + # @return [String] + # + # @!attribute [rw] start_url + # The URL for the AWS access portal. For more information, see [Using + # the AWS access portal][1] in the *IAM Identity Center User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorizationRequest AWS API Documentation + # + class StartDeviceAuthorizationRequest < Struct.new( + :client_id, + :client_secret, + :start_url) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] device_code + # The short-lived code that is used by the device when polling for a + # session token. + # @return [String] + # + # @!attribute [rw] user_code + # A one-time user verification code. This is needed to authorize an + # in-use device. + # @return [String] + # + # @!attribute [rw] verification_uri + # The URI of the verification page that takes the `userCode` to + # authorize the device. + # @return [String] + # + # @!attribute [rw] verification_uri_complete + # An alternate URL that the client can use to automatically launch a + # browser. This process skips the manual step in which the user visits + # the verification page and enters their code. + # @return [String] + # + # @!attribute [rw] expires_in + # Indicates the number of seconds in which the verification code will + # become invalid. + # @return [Integer] + # + # @!attribute [rw] interval + # Indicates the number of seconds the client must wait between + # attempts when polling for a session. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorizationResponse AWS API Documentation + # + class StartDeviceAuthorizationResponse < Struct.new( + :device_code, + :user_code, + :verification_uri, + :verification_uri_complete, + :expires_in, + :interval) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the client is not currently authorized to make the + # request. This can happen when a `clientId` is not issued for a public + # client. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/UnauthorizedClientException AWS API Documentation + # + class UnauthorizedClientException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + # Indicates that the grant type in the request is not supported by the + # service. + # + # @!attribute [rw] error + # @return [String] + # + # @!attribute [rw] error_description + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/UnsupportedGrantTypeException AWS API Documentation + # + class UnsupportedGrantTypeException < Struct.new( + :error, + :error_description) + SENSITIVE = [] + include Aws::Structure + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts.rb new file mode 100644 index 0000000..b079d3f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +unless Module.const_defined?(:Aws) + require 'aws-sdk-core' + require 'aws-sigv4' +end + +require_relative 'aws-sdk-sts/types' +require_relative 'aws-sdk-sts/client_api' +require_relative 'aws-sdk-sts/plugins/endpoints.rb' +require_relative 'aws-sdk-sts/client' +require_relative 'aws-sdk-sts/errors' +require_relative 'aws-sdk-sts/resource' +require_relative 'aws-sdk-sts/endpoint_parameters' +require_relative 'aws-sdk-sts/endpoint_provider' +require_relative 'aws-sdk-sts/endpoints' +require_relative 'aws-sdk-sts/customizations' + +# This module provides support for AWS Security Token Service. This module is available in the +# `aws-sdk-core` gem. +# +# # Client +# +# The {Client} class provides one method for each API operation. Operation +# methods each accept a hash of request parameters and return a response +# structure. +# +# sts = Aws::STS::Client.new +# resp = sts.assume_role(params) +# +# See {Client} for more information. +# +# # Errors +# +# Errors returned from AWS Security Token Service are defined in the +# {Errors} module and all extend {Errors::ServiceError}. +# +# begin +# # do stuff +# rescue Aws::STS::Errors::ServiceError +# # rescues all AWS Security Token Service API errors +# end +# +# See {Errors} for more information. +# +# @!group service +module Aws::STS + + GEM_VERSION = '3.171.0' + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/client.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/client.rb new file mode 100644 index 0000000..40a2dca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/client.rb @@ -0,0 +1,2343 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +require 'seahorse/client/plugins/content_length.rb' +require 'aws-sdk-core/plugins/credentials_configuration.rb' +require 'aws-sdk-core/plugins/logging.rb' +require 'aws-sdk-core/plugins/param_converter.rb' +require 'aws-sdk-core/plugins/param_validator.rb' +require 'aws-sdk-core/plugins/user_agent.rb' +require 'aws-sdk-core/plugins/helpful_socket_errors.rb' +require 'aws-sdk-core/plugins/retry_errors.rb' +require 'aws-sdk-core/plugins/global_configuration.rb' +require 'aws-sdk-core/plugins/regional_endpoint.rb' +require 'aws-sdk-core/plugins/endpoint_discovery.rb' +require 'aws-sdk-core/plugins/endpoint_pattern.rb' +require 'aws-sdk-core/plugins/response_paging.rb' +require 'aws-sdk-core/plugins/stub_responses.rb' +require 'aws-sdk-core/plugins/idempotency_token.rb' +require 'aws-sdk-core/plugins/jsonvalue_converter.rb' +require 'aws-sdk-core/plugins/client_metrics_plugin.rb' +require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb' +require 'aws-sdk-core/plugins/transfer_encoding.rb' +require 'aws-sdk-core/plugins/http_checksum.rb' +require 'aws-sdk-core/plugins/checksum_algorithm.rb' +require 'aws-sdk-core/plugins/defaults_mode.rb' +require 'aws-sdk-core/plugins/recursion_detection.rb' +require 'aws-sdk-core/plugins/sign.rb' +require 'aws-sdk-core/plugins/protocols/query.rb' +require 'aws-sdk-sts/plugins/sts_regional_endpoints.rb' + +Aws::Plugins::GlobalConfiguration.add_identifier(:sts) + +module Aws::STS + # An API client for STS. To construct a client, you need to configure a `:region` and `:credentials`. + # + # client = Aws::STS::Client.new( + # region: region_name, + # credentials: credentials, + # # ... + # ) + # + # For details on configuring region and credentials see + # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html). + # + # See {#initialize} for a full list of supported configuration options. + class Client < Seahorse::Client::Base + + include Aws::ClientStubs + + @identifier = :sts + + set_api(ClientApi::API) + + add_plugin(Seahorse::Client::Plugins::ContentLength) + add_plugin(Aws::Plugins::CredentialsConfiguration) + add_plugin(Aws::Plugins::Logging) + add_plugin(Aws::Plugins::ParamConverter) + add_plugin(Aws::Plugins::ParamValidator) + add_plugin(Aws::Plugins::UserAgent) + add_plugin(Aws::Plugins::HelpfulSocketErrors) + add_plugin(Aws::Plugins::RetryErrors) + add_plugin(Aws::Plugins::GlobalConfiguration) + add_plugin(Aws::Plugins::RegionalEndpoint) + add_plugin(Aws::Plugins::EndpointDiscovery) + add_plugin(Aws::Plugins::EndpointPattern) + add_plugin(Aws::Plugins::ResponsePaging) + add_plugin(Aws::Plugins::StubResponses) + add_plugin(Aws::Plugins::IdempotencyToken) + add_plugin(Aws::Plugins::JsonvalueConverter) + add_plugin(Aws::Plugins::ClientMetricsPlugin) + add_plugin(Aws::Plugins::ClientMetricsSendPlugin) + add_plugin(Aws::Plugins::TransferEncoding) + add_plugin(Aws::Plugins::HttpChecksum) + add_plugin(Aws::Plugins::ChecksumAlgorithm) + add_plugin(Aws::Plugins::DefaultsMode) + add_plugin(Aws::Plugins::RecursionDetection) + add_plugin(Aws::Plugins::Sign) + add_plugin(Aws::Plugins::Protocols::Query) + add_plugin(Aws::STS::Plugins::STSRegionalEndpoints) + add_plugin(Aws::STS::Plugins::Endpoints) + + # @overload initialize(options) + # @param [Hash] options + # @option options [required, Aws::CredentialProvider] :credentials + # Your AWS credentials. This can be an instance of any one of the + # following classes: + # + # * `Aws::Credentials` - Used for configuring static, non-refreshing + # credentials. + # + # * `Aws::SharedCredentials` - Used for loading static credentials from a + # shared file, such as `~/.aws/config`. + # + # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role. + # + # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to + # assume a role after providing credentials via the web. + # + # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an + # access token generated from `aws login`. + # + # * `Aws::ProcessCredentials` - Used for loading credentials from a + # process that outputs to stdout. + # + # * `Aws::InstanceProfileCredentials` - Used for loading credentials + # from an EC2 IMDS on an EC2 instance. + # + # * `Aws::ECSCredentials` - Used for loading credentials from + # instances running in ECS. + # + # * `Aws::CognitoIdentityCredentials` - Used for loading credentials + # from the Cognito Identity service. + # + # When `:credentials` are not configured directly, the following + # locations will be searched for credentials: + # + # * `Aws.config[:credentials]` + # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options. + # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] + # * `~/.aws/credentials` + # * `~/.aws/config` + # * EC2/ECS IMDS instance profile - When used by default, the timeouts + # are very aggressive. Construct and pass an instance of + # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to + # enable retries and extended timeouts. Instance profile credential + # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED'] + # to true. + # + # @option options [required, String] :region + # The AWS region to connect to. The configured `:region` is + # used to determine the service `:endpoint`. When not passed, + # a default `:region` is searched for in the following locations: + # + # * `Aws.config[:region]` + # * `ENV['AWS_REGION']` + # * `ENV['AMAZON_REGION']` + # * `ENV['AWS_DEFAULT_REGION']` + # * `~/.aws/credentials` + # * `~/.aws/config` + # + # @option options [String] :access_key_id + # + # @option options [Boolean] :active_endpoint_cache (false) + # When set to `true`, a thread polling for endpoints will be running in + # the background every 60 secs (default). Defaults to `false`. + # + # @option options [Boolean] :adaptive_retry_wait_to_fill (true) + # Used only in `adaptive` retry mode. When true, the request will sleep + # until there is sufficent client side capacity to retry the request. + # When false, the request will raise a `RetryCapacityNotAvailableError` and will + # not retry instead of sleeping. + # + # @option options [Boolean] :client_side_monitoring (false) + # When `true`, client-side metrics will be collected for all API requests from + # this client. + # + # @option options [String] :client_side_monitoring_client_id ("") + # Allows you to provide an identifier for this client which will be attached to + # all generated client side metrics. Defaults to an empty string. + # + # @option options [String] :client_side_monitoring_host ("127.0.0.1") + # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client + # side monitoring agent is running on, where client metrics will be published via UDP. + # + # @option options [Integer] :client_side_monitoring_port (31000) + # Required for publishing client metrics. The port that the client side monitoring + # agent is running on, where client metrics will be published via UDP. + # + # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher) + # Allows you to provide a custom client-side monitoring publisher class. By default, + # will use the Client Side Monitoring Agent Publisher. + # + # @option options [Boolean] :convert_params (true) + # When `true`, an attempt is made to coerce request parameters into + # the required types. + # + # @option options [Boolean] :correct_clock_skew (true) + # Used only in `standard` and adaptive retry modes. Specifies whether to apply + # a clock skew correction and retry requests with skewed client clocks. + # + # @option options [String] :defaults_mode ("legacy") + # See {Aws::DefaultsModeConfiguration} for a list of the + # accepted modes and the configuration defaults that are included. + # + # @option options [Boolean] :disable_host_prefix_injection (false) + # Set to true to disable SDK automatically adding host prefix + # to default service endpoint when available. + # + # @option options [String] :endpoint + # The client endpoint is normally constructed from the `:region` + # option. You should only configure an `:endpoint` when connecting + # to test or custom endpoints. This should be a valid HTTP(S) URI. + # + # @option options [Integer] :endpoint_cache_max_entries (1000) + # Used for the maximum size limit of the LRU cache storing endpoints data + # for endpoint discovery enabled operations. Defaults to 1000. + # + # @option options [Integer] :endpoint_cache_max_threads (10) + # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10. + # + # @option options [Integer] :endpoint_cache_poll_interval (60) + # When :endpoint_discovery and :active_endpoint_cache is enabled, + # Use this option to config the time interval in seconds for making + # requests fetching endpoints information. Defaults to 60 sec. + # + # @option options [Boolean] :endpoint_discovery (false) + # When set to `true`, endpoint discovery will be enabled for operations when available. + # + # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default) + # The log formatter. + # + # @option options [Symbol] :log_level (:info) + # The log level to send messages to the `:logger` at. + # + # @option options [Logger] :logger + # The Logger instance to send log messages to. If this option + # is not set, logging will be disabled. + # + # @option options [Integer] :max_attempts (3) + # An integer representing the maximum number attempts that will be made for + # a single request, including the initial attempt. For example, + # setting this value to 5 will result in a request being retried up to + # 4 times. Used in `standard` and `adaptive` retry modes. + # + # @option options [String] :profile ("default") + # Used when loading credentials from the shared credentials file + # at HOME/.aws/credentials. When not specified, 'default' is used. + # + # @option options [Proc] :retry_backoff + # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay. + # This option is only used in the `legacy` retry mode. + # + # @option options [Float] :retry_base_delay (0.3) + # The base delay in seconds used by the default backoff function. This option + # is only used in the `legacy` retry mode. + # + # @option options [Symbol] :retry_jitter (:none) + # A delay randomiser function used by the default backoff function. + # Some predefined functions can be referenced by name - :none, :equal, :full, + # otherwise a Proc that takes and returns a number. This option is only used + # in the `legacy` retry mode. + # + # @see https://www.awsarchitectureblog.com/2015/03/backoff.html + # + # @option options [Integer] :retry_limit (3) + # The maximum number of times to retry failed requests. Only + # ~ 500 level server errors and certain ~ 400 level client errors + # are retried. Generally, these are throttling errors, data + # checksum errors, networking errors, timeout errors, auth errors, + # endpoint discovery, and errors from expired credentials. + # This option is only used in the `legacy` retry mode. + # + # @option options [Integer] :retry_max_delay (0) + # The maximum number of seconds to delay between retries (0 for no limit) + # used by the default backoff function. This option is only used in the + # `legacy` retry mode. + # + # @option options [String] :retry_mode ("legacy") + # Specifies which retry algorithm to use. Values are: + # + # * `legacy` - The pre-existing retry behavior. This is default value if + # no retry mode is provided. + # + # * `standard` - A standardized set of retry rules across the AWS SDKs. + # This includes support for retry quotas, which limit the number of + # unsuccessful retries a client can make. + # + # * `adaptive` - An experimental retry mode that includes all the + # functionality of `standard` mode along with automatic client side + # throttling. This is a provisional mode that may change behavior + # in the future. + # + # + # @option options [String] :secret_access_key + # + # @option options [String] :session_token + # + # @option options [String] :sts_regional_endpoints ("regional") + # Passing in 'regional' to enable regional endpoint for STS for all supported + # regions (except 'aws-global'). Using 'legacy' mode will force all legacy + # regions to resolve to the STS global endpoint. + # + # @option options [Boolean] :stub_responses (false) + # Causes the client to return stubbed responses. By default + # fake responses are generated and returned. You can specify + # the response data to return or errors to raise by calling + # {ClientStubs#stub_responses}. See {ClientStubs} for more information. + # + # ** Please note ** When response stubbing is enabled, no HTTP + # requests are made, and retries are disabled. + # + # @option options [Aws::TokenProvider] :token_provider + # A Bearer Token Provider. This can be an instance of any one of the + # following classes: + # + # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing + # tokens. + # + # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an + # access token generated from `aws login`. + # + # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` + # will be used to search for tokens configured for your profile in shared configuration files. + # + # @option options [Boolean] :use_dualstack_endpoint + # When set to `true`, dualstack enabled endpoints (with `.aws` TLD) + # will be used if available. + # + # @option options [Boolean] :use_fips_endpoint + # When set to `true`, fips compatible endpoints will be used if available. + # When a `fips` region is used, the region is normalized and this config + # is set to `true`. + # + # @option options [Boolean] :validate_params (true) + # When `true`, request parameters are validated before + # sending the request. + # + # @option options [Aws::STS::EndpointProvider] :endpoint_provider + # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::STS::EndpointParameters` + # + # @option options [URI::HTTP,String] :http_proxy A proxy to send + # requests through. Formatted like 'http://proxy.com:123'. + # + # @option options [Float] :http_open_timeout (15) The number of + # seconds to wait when opening a HTTP session before raising a + # `Timeout::Error`. + # + # @option options [Float] :http_read_timeout (60) The default + # number of seconds to wait for response data. This value can + # safely be set per-request on the session. + # + # @option options [Float] :http_idle_timeout (5) The number of + # seconds a connection is allowed to sit idle before it is + # considered stale. Stale connections are closed and removed + # from the pool before making a request. + # + # @option options [Float] :http_continue_timeout (1) The number of + # seconds to wait for a 100-continue response before sending the + # request body. This option has no effect unless the request has + # "Expect" header set to "100-continue". Defaults to `nil` which + # disables this behaviour. This value can safely be set per + # request on the session. + # + # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout + # in seconds. + # + # @option options [Boolean] :http_wire_trace (false) When `true`, + # HTTP debug output will be sent to the `:logger`. + # + # @option options [Boolean] :ssl_verify_peer (true) When `true`, + # SSL peer certificates are verified when establishing a + # connection. + # + # @option options [String] :ssl_ca_bundle Full path to the SSL + # certificate authority bundle file that should be used when + # verifying peer certificates. If you do not pass + # `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default + # will be used if available. + # + # @option options [String] :ssl_ca_directory Full path of the + # directory that contains the unbundled SSL certificate + # authority files for verifying peer certificates. If you do + # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the + # system default will be used if available. + # + def initialize(*args) + super + end + + # @!group API Operations + + # Returns a set of temporary security credentials that you can use to + # access Amazon Web Services resources. These temporary credentials + # consist of an access key ID, a secret access key, and a security + # token. Typically, you use `AssumeRole` within your account or for + # cross-account access. For a comparison of `AssumeRole` with other API + # operations that produce temporary credentials, see [Requesting + # Temporary Security Credentials][1] and [Comparing the Amazon Web + # Services STS API operations][2] in the *IAM User Guide*. + # + # **Permissions** + # + # The temporary security credentials created by `AssumeRole` can be used + # to make API calls to any Amazon Web Services service with the + # following exception: You cannot call the Amazon Web Services STS + # `GetFederationToken` or `GetSessionToken` API operations. + # + # (Optional) You can pass inline or managed [session policies][3] to + # this operation. You can pass a single JSON policy document to use as + # an inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. The + # plaintext that you use for both inline and managed session policies + # can't exceed 2,048 characters. Passing policies to this operation + # returns new temporary credentials. The resulting session's + # permissions are the intersection of the role's identity-based policy + # and the session policies. You can use the role's temporary + # credentials in subsequent Amazon Web Services API calls to access + # resources in the account that owns the role. You cannot use session + # policies to grant more permissions than those allowed by the + # identity-based policy of the role that is being assumed. For more + # information, see [Session Policies][3] in the *IAM User Guide*. + # + # When you create a role, you create two policies: A role trust policy + # that specifies *who* can assume the role and a permissions policy that + # specifies *what* can be done with the role. You specify the trusted + # principal who is allowed to assume the role in the role trust policy. + # + # To assume a role from a different account, your Amazon Web Services + # account must be trusted by the role. The trust relationship is defined + # in the role's trust policy when the role is created. That trust + # policy states which accounts are allowed to delegate that access to + # users in the account. + # + # A user who wants to access a role in a different account must also + # have permissions that are delegated from the user account + # administrator. The administrator must attach a policy that allows the + # user to call `AssumeRole` for the ARN of the role in the other + # account. + # + # To allow a user to assume a role in the same account, you can do + # either of the following: + # + # * Attach a policy to the user that allows the user to call + # `AssumeRole` (as long as the role's trust policy trusts the + # account). + # + # * Add the user as a principal directly in the role's trust policy. + # + # You can do either because the role’s trust policy acts as an IAM + # resource-based policy. When a resource-based policy grants access to a + # principal in the same account, no additional identity-based policy is + # required. For more information about trust policies and resource-based + # policies, see [IAM Policies][4] in the *IAM User Guide*. + # + # **Tags** + # + # (Optional) You can pass tag key-value pairs to your session. These + # tags are called session tags. For more information about session tags, + # see [Passing Session Tags in STS][5] in the *IAM User Guide*. + # + # An administrator must grant you the permissions necessary to pass + # session tags. The administrator can also create granular permissions + # to allow you to pass only specific session tags. For more information, + # see [Tutorial: Using Tags for Attribute-Based Access Control][6] in + # the *IAM User Guide*. + # + # You can set the session tags as transitive. Transitive tags persist + # during role chaining. For more information, see [Chaining Roles with + # Session Tags][7] in the *IAM User Guide*. + # + # **Using MFA with AssumeRole** + # + # (Optional) You can include multi-factor authentication (MFA) + # information when you call `AssumeRole`. This is useful for + # cross-account scenarios to ensure that the user that assumes the role + # has been authenticated with an Amazon Web Services MFA device. In that + # scenario, the trust policy of the role being assumed includes a + # condition that tests for MFA authentication. If the caller does not + # include valid MFA information, the request to assume the role is + # denied. The condition in a trust policy that tests for MFA + # authentication might look like the following example. + # + # `"Condition": \{"Bool": \{"aws:MultiFactorAuthPresent": true\}\}` + # + # For more information, see [Configuring MFA-Protected API Access][8] in + # the *IAM User Guide* guide. + # + # To use MFA with `AssumeRole`, you pass values for the `SerialNumber` + # and `TokenCode` parameters. The `SerialNumber` value identifies the + # user's hardware or virtual MFA device. The `TokenCode` is the + # time-based one-time password (TOTP) that the MFA device produces. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html + # [5]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [6]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html + # [7]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining + # [8]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html + # + # @option params [required, String] :role_arn + # The Amazon Resource Name (ARN) of the role to assume. + # + # @option params [required, String] :role_session_name + # An identifier for the assumed role session. + # + # Use the role session name to uniquely identify a session when the same + # role is assumed by different principals or for different reasons. In + # cross-account scenarios, the role session name is visible to, and can + # be logged by the account that owns the role. The role session name is + # also used in the ARN of the assumed role principal. This means that + # subsequent cross-account API requests that use the temporary security + # credentials will expose the role session name to the external account + # in their CloudTrail logs. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # @option params [Array] :policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that you + # want to use as managed session policies. The policies must exist in + # the same account as the role. + # + # This parameter is optional. You can provide up to 10 managed policy + # ARNs. However, the plaintext that you use for both inline and managed + # session policies can't exceed 2,048 characters. For more information + # about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services + # Service Namespaces][1] in the Amazon Web Services General Reference. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # Passing policies to this operation returns new temporary credentials. + # The resulting session's permissions are the intersection of the + # role's identity-based policy and the session policies. You can use + # the role's temporary credentials in subsequent Amazon Web Services + # API calls to access resources in the account that owns the role. You + # cannot use session policies to grant more permissions than those + # allowed by the identity-based policy of the role that is being + # assumed. For more information, see [Session Policies][2] in the *IAM + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [String] :policy + # An IAM policy in JSON format that you want to use as an inline session + # policy. + # + # This parameter is optional. Passing policies to this operation returns + # new temporary credentials. The resulting session's permissions are + # the intersection of the role's identity-based policy and the session + # policies. You can use the role's temporary credentials in subsequent + # Amazon Web Services API calls to access resources in the account that + # owns the role. You cannot use session policies to grant more + # permissions than those allowed by the identity-based policy of the + # role that is being assumed. For more information, see [Session + # Policies][1] in the *IAM User Guide*. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of the + # valid character list (\\u0020 through \\u00FF). It can also include + # the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) + # characters. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [Integer] :duration_seconds + # The duration, in seconds, of the role session. The value specified can + # range from 900 seconds (15 minutes) up to the maximum session duration + # set for the role. The maximum session duration setting can have a + # value from 1 hour to 12 hours. If you specify a value higher than this + # setting or the administrator setting (whichever is lower), the + # operation fails. For example, if you specify a session duration of 12 + # hours, but your administrator set the maximum session duration to 6 + # hours, your operation fails. + # + # Role chaining limits your Amazon Web Services CLI or Amazon Web + # Services API role session to a maximum of one hour. When you use the + # `AssumeRole` API operation to assume a role, you can specify the + # duration of your role session with the `DurationSeconds` parameter. + # You can specify a parameter value of up to 43200 seconds (12 hours), + # depending on the maximum session duration setting for your role. + # However, if you assume a role using role chaining and provide a + # `DurationSeconds` parameter value greater than one hour, the operation + # fails. To learn how to view the maximum value for your role, see [View + # the Maximum Session Duration Setting for a Role][1] in the *IAM User + # Guide*. + # + # By default, the value is set to `3600` seconds. + # + # The `DurationSeconds` parameter is separate from the duration of a + # console session that you might request using the returned credentials. + # The request to the federation endpoint for a console sign-in token + # takes a `SessionDuration` parameter that specifies the maximum length + # of the console session. For more information, see [Creating a URL that + # Enables Federated Users to Access the Amazon Web Services Management + # Console][2] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + # + # @option params [Array] :tags + # A list of session tags that you want to pass. Each session tag + # consists of a key name and an associated value. For more information + # about session tags, see [Tagging Amazon Web Services STS Sessions][1] + # in the *IAM User Guide*. + # + # This parameter is optional. You can pass up to 50 session tags. The + # plaintext session tag keys can’t exceed 128 characters, and the values + # can’t exceed 256 characters. For these and additional limits, see [IAM + # and STS Character Limits][2] in the *IAM User Guide*. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # You can pass a session tag with the same key as a tag that is already + # attached to the role. When you do, session tags override a role tag + # with the same key. + # + # Tag key–value pairs are not case sensitive, but case is preserved. + # This means that you cannot have separate `Department` and `department` + # tag keys. Assume that the role has the `Department`=`Marketing` tag + # and you pass the `department`=`engineering` session tag. `Department` + # and `department` are not saved as separate tags, and the session tag + # passed in the request takes precedence over the role tag. + # + # Additionally, if you used temporary credentials to perform this + # operation, the new session inherits any transitive session tags from + # the calling session. If you pass a session tag with the same key as an + # inherited tag, the operation fails. To view the inherited tags for a + # session, see the CloudTrail logs. For more information, see [Viewing + # Session Tags in CloudTrail][3] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs + # + # @option params [Array] :transitive_tag_keys + # A list of keys for session tags that you want to set as transitive. If + # you set a tag key as transitive, the corresponding key and value + # passes to subsequent sessions in a role chain. For more information, + # see [Chaining Roles with Session Tags][1] in the *IAM User Guide*. + # + # This parameter is optional. When you set session tags as transitive, + # the session policy and session tags packed binary limit is not + # affected. + # + # If you choose not to specify a transitive tag key, then no tags are + # passed from this session to any subsequent sessions. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining + # + # @option params [String] :external_id + # A unique identifier that might be required when you assume a role in + # another account. If the administrator of the account to which the role + # belongs provided you with an external ID, then provide that value in + # the `ExternalId` parameter. This value can be any string, such as a + # passphrase or account number. A cross-account role is usually set up + # to trust everyone in an account. Therefore, the administrator of the + # trusting account might send an external ID to the administrator of the + # trusted account. That way, only someone with the ID can assume the + # role, rather than everyone in the account. For more information about + # the external ID, see [How to Use an External ID When Granting Access + # to Your Amazon Web Services Resources to a Third Party][1] in the *IAM + # User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@:/- + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html + # + # @option params [String] :serial_number + # The identification number of the MFA device that is associated with + # the user who is making the `AssumeRole` call. Specify this value if + # the trust policy of the role being assumed includes a condition that + # requires MFA authentication. The value is either the serial number for + # a hardware device (such as `GAHT12345678`) or an Amazon Resource Name + # (ARN) for a virtual device (such as + # `arn:aws:iam::123456789012:mfa/user`). + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # @option params [String] :token_code + # The value provided by the MFA device, if the trust policy of the role + # being assumed requires MFA. (In other words, if the policy includes a + # condition that tests for MFA). If the role being assumed requires MFA + # and if the `TokenCode` value is missing or expired, the `AssumeRole` + # call returns an "access denied" error. + # + # The format for this parameter, as described by its regex pattern, is a + # sequence of six numeric digits. + # + # @option params [String] :source_identity + # The source identity specified by the principal that is calling the + # `AssumeRole` operation. + # + # You can require users to specify a source identity when they assume a + # role. You do this by using the `sts:SourceIdentity` condition key in a + # role trust policy. You can use source identity information in + # CloudTrail logs to determine who took actions with a role. You can use + # the `aws:SourceIdentity` condition key to further control access to + # Amazon Web Services resources based on the value of source identity. + # For more information about using source identity, see [Monitor and + # control actions taken with assumed roles][1] in the *IAM User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@-. You cannot use a value that begins with the text + # `aws:`. This prefix is reserved for Amazon Web Services internal use. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + # + # @return [Types::AssumeRoleResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::AssumeRoleResponse#credentials #credentials} => Types::Credentials + # * {Types::AssumeRoleResponse#assumed_role_user #assumed_role_user} => Types::AssumedRoleUser + # * {Types::AssumeRoleResponse#packed_policy_size #packed_policy_size} => Integer + # * {Types::AssumeRoleResponse#source_identity #source_identity} => String + # + # + # @example Example: To assume a role + # + # resp = client.assume_role({ + # external_id: "123ABC", + # policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Stmt1\",\"Effect\":\"Allow\",\"Action\":\"s3:ListAllMyBuckets\",\"Resource\":\"*\"}]}", + # role_arn: "arn:aws:iam::123456789012:role/demo", + # role_session_name: "testAssumeRoleSession", + # tags: [ + # { + # key: "Project", + # value: "Unicorn", + # }, + # { + # key: "Team", + # value: "Automation", + # }, + # { + # key: "Cost-Center", + # value: "12345", + # }, + # ], + # transitive_tag_keys: [ + # "Project", + # "Cost-Center", + # ], + # }) + # + # resp.to_h outputs the following: + # { + # assumed_role_user: { + # arn: "arn:aws:sts::123456789012:assumed-role/demo/Bob", + # assumed_role_id: "ARO123EXAMPLE123:Bob", + # }, + # credentials: { + # access_key_id: "AKIAIOSFODNN7EXAMPLE", + # expiration: Time.parse("2011-07-15T23:28:33.359Z"), + # secret_access_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY", + # session_token: "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==", + # }, + # packed_policy_size: 8, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.assume_role({ + # role_arn: "arnType", # required + # role_session_name: "roleSessionNameType", # required + # policy_arns: [ + # { + # arn: "arnType", + # }, + # ], + # policy: "sessionPolicyDocumentType", + # duration_seconds: 1, + # tags: [ + # { + # key: "tagKeyType", # required + # value: "tagValueType", # required + # }, + # ], + # transitive_tag_keys: ["tagKeyType"], + # external_id: "externalIdType", + # serial_number: "serialNumberType", + # token_code: "tokenCodeType", + # source_identity: "sourceIdentityType", + # }) + # + # @example Response structure + # + # resp.credentials.access_key_id #=> String + # resp.credentials.secret_access_key #=> String + # resp.credentials.session_token #=> String + # resp.credentials.expiration #=> Time + # resp.assumed_role_user.assumed_role_id #=> String + # resp.assumed_role_user.arn #=> String + # resp.packed_policy_size #=> Integer + # resp.source_identity #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole AWS API Documentation + # + # @overload assume_role(params = {}) + # @param [Hash] params ({}) + def assume_role(params = {}, options = {}) + req = build_request(:assume_role, params) + req.send_request(options) + end + + # Returns a set of temporary security credentials for users who have + # been authenticated via a SAML authentication response. This operation + # provides a mechanism for tying an enterprise identity store or + # directory to role-based Amazon Web Services access without + # user-specific credentials or configuration. For a comparison of + # `AssumeRoleWithSAML` with the other API operations that produce + # temporary credentials, see [Requesting Temporary Security + # Credentials][1] and [Comparing the Amazon Web Services STS API + # operations][2] in the *IAM User Guide*. + # + # The temporary security credentials returned by this operation consist + # of an access key ID, a secret access key, and a security token. + # Applications can use these temporary security credentials to sign + # calls to Amazon Web Services services. + # + # **Session Duration** + # + # By default, the temporary security credentials created by + # `AssumeRoleWithSAML` last for one hour. However, you can use the + # optional `DurationSeconds` parameter to specify the duration of your + # session. Your role session lasts for the duration that you specify, or + # until the time specified in the SAML authentication response's + # `SessionNotOnOrAfter` value, whichever is shorter. You can provide a + # `DurationSeconds` value from 900 seconds (15 minutes) up to the + # maximum session duration setting for the role. This setting can have a + # value from 1 hour to 12 hours. To learn how to view the maximum value + # for your role, see [View the Maximum Session Duration Setting for a + # Role][3] in the *IAM User Guide*. The maximum session duration limit + # applies when you use the `AssumeRole*` API operations or the + # `assume-role*` CLI commands. However the limit does not apply when you + # use those operations to create a console URL. For more information, + # see [Using IAM Roles][4] in the *IAM User Guide*. + # + # [Role chaining][5] limits your CLI or Amazon Web Services API role + # session to a maximum of one hour. When you use the `AssumeRole` API + # operation to assume a role, you can specify the duration of your role + # session with the `DurationSeconds` parameter. You can specify a + # parameter value of up to 43200 seconds (12 hours), depending on the + # maximum session duration setting for your role. However, if you assume + # a role using role chaining and provide a `DurationSeconds` parameter + # value greater than one hour, the operation fails. + # + # + # + # **Permissions** + # + # The temporary security credentials created by `AssumeRoleWithSAML` can + # be used to make API calls to any Amazon Web Services service with the + # following exception: you cannot call the STS `GetFederationToken` or + # `GetSessionToken` API operations. + # + # (Optional) You can pass inline or managed [session policies][6] to + # this operation. You can pass a single JSON policy document to use as + # an inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. The + # plaintext that you use for both inline and managed session policies + # can't exceed 2,048 characters. Passing policies to this operation + # returns new temporary credentials. The resulting session's + # permissions are the intersection of the role's identity-based policy + # and the session policies. You can use the role's temporary + # credentials in subsequent Amazon Web Services API calls to access + # resources in the account that owns the role. You cannot use session + # policies to grant more permissions than those allowed by the + # identity-based policy of the role that is being assumed. For more + # information, see [Session Policies][6] in the *IAM User Guide*. + # + # Calling `AssumeRoleWithSAML` does not require the use of Amazon Web + # Services security credentials. The identity of the caller is validated + # by using keys in the metadata document that is uploaded for the SAML + # provider entity for your identity provider. + # + # Calling `AssumeRoleWithSAML` can result in an entry in your CloudTrail + # logs. The entry includes the value in the `NameID` element of the SAML + # assertion. We recommend that you use a `NameIDType` that is not + # associated with any personally identifiable information (PII). For + # example, you could instead use the persistent identifier + # (`urn:oasis:names:tc:SAML:2.0:nameid-format:persistent`). + # + # **Tags** + # + # (Optional) You can configure your IdP to pass attributes into your + # SAML assertion as session tags. Each session tag consists of a key + # name and an associated value. For more information about session tags, + # see [Passing Session Tags in STS][7] in the *IAM User Guide*. + # + # You can pass up to 50 session tags. The plaintext session tag keys + # can’t exceed 128 characters and the values can’t exceed 256 + # characters. For these and additional limits, see [IAM and STS + # Character Limits][8] in the *IAM User Guide*. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # You can pass a session tag with the same key as a tag that is attached + # to the role. When you do, session tags override the role's tags with + # the same key. + # + # An administrator must grant you the permissions necessary to pass + # session tags. The administrator can also create granular permissions + # to allow you to pass only specific session tags. For more information, + # see [Tutorial: Using Tags for Attribute-Based Access Control][9] in + # the *IAM User Guide*. + # + # You can set the session tags as transitive. Transitive tags persist + # during role chaining. For more information, see [Chaining Roles with + # Session Tags][10] in the *IAM User Guide*. + # + # **SAML Configuration** + # + # Before your application can call `AssumeRoleWithSAML`, you must + # configure your SAML identity provider (IdP) to issue the claims + # required by Amazon Web Services. Additionally, you must use Identity + # and Access Management (IAM) to create a SAML provider entity in your + # Amazon Web Services account that represents your identity provider. + # You must also create an IAM role that specifies this SAML provider in + # its trust policy. + # + # For more information, see the following resources: + # + # * [About SAML 2.0-based Federation][11] in the *IAM User Guide*. + # + # * [Creating SAML Identity Providers][12] in the *IAM User Guide*. + # + # * [Configuring a Relying Party and Claims][13] in the *IAM User + # Guide*. + # + # * [Creating a Role for SAML 2.0 Federation][14] in the *IAM User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html + # [5]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining + # [6]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # [7]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [8]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # [9]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html + # [10]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining + # [11]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html + # [12]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html + # [13]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html + # [14]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html + # + # @option params [required, String] :role_arn + # The Amazon Resource Name (ARN) of the role that the caller is + # assuming. + # + # @option params [required, String] :principal_arn + # The Amazon Resource Name (ARN) of the SAML provider in IAM that + # describes the IdP. + # + # @option params [required, String] :saml_assertion + # The base64 encoded SAML authentication response provided by the IdP. + # + # For more information, see [Configuring a Relying Party and Adding + # Claims][1] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html + # + # @option params [Array] :policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that you + # want to use as managed session policies. The policies must exist in + # the same account as the role. + # + # This parameter is optional. You can provide up to 10 managed policy + # ARNs. However, the plaintext that you use for both inline and managed + # session policies can't exceed 2,048 characters. For more information + # about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services + # Service Namespaces][1] in the Amazon Web Services General Reference. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # Passing policies to this operation returns new temporary credentials. + # The resulting session's permissions are the intersection of the + # role's identity-based policy and the session policies. You can use + # the role's temporary credentials in subsequent Amazon Web Services + # API calls to access resources in the account that owns the role. You + # cannot use session policies to grant more permissions than those + # allowed by the identity-based policy of the role that is being + # assumed. For more information, see [Session Policies][2] in the *IAM + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [String] :policy + # An IAM policy in JSON format that you want to use as an inline session + # policy. + # + # This parameter is optional. Passing policies to this operation returns + # new temporary credentials. The resulting session's permissions are + # the intersection of the role's identity-based policy and the session + # policies. You can use the role's temporary credentials in subsequent + # Amazon Web Services API calls to access resources in the account that + # owns the role. You cannot use session policies to grant more + # permissions than those allowed by the identity-based policy of the + # role that is being assumed. For more information, see [Session + # Policies][1] in the *IAM User Guide*. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of the + # valid character list (\\u0020 through \\u00FF). It can also include + # the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) + # characters. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [Integer] :duration_seconds + # The duration, in seconds, of the role session. Your role session lasts + # for the duration that you specify for the `DurationSeconds` parameter, + # or until the time specified in the SAML authentication response's + # `SessionNotOnOrAfter` value, whichever is shorter. You can provide a + # `DurationSeconds` value from 900 seconds (15 minutes) up to the + # maximum session duration setting for the role. This setting can have a + # value from 1 hour to 12 hours. If you specify a value higher than this + # setting, the operation fails. For example, if you specify a session + # duration of 12 hours, but your administrator set the maximum session + # duration to 6 hours, your operation fails. To learn how to view the + # maximum value for your role, see [View the Maximum Session Duration + # Setting for a Role][1] in the *IAM User Guide*. + # + # By default, the value is set to `3600` seconds. + # + # The `DurationSeconds` parameter is separate from the duration of a + # console session that you might request using the returned credentials. + # The request to the federation endpoint for a console sign-in token + # takes a `SessionDuration` parameter that specifies the maximum length + # of the console session. For more information, see [Creating a URL that + # Enables Federated Users to Access the Amazon Web Services Management + # Console][2] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + # + # @return [Types::AssumeRoleWithSAMLResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::AssumeRoleWithSAMLResponse#credentials #credentials} => Types::Credentials + # * {Types::AssumeRoleWithSAMLResponse#assumed_role_user #assumed_role_user} => Types::AssumedRoleUser + # * {Types::AssumeRoleWithSAMLResponse#packed_policy_size #packed_policy_size} => Integer + # * {Types::AssumeRoleWithSAMLResponse#subject #subject} => String + # * {Types::AssumeRoleWithSAMLResponse#subject_type #subject_type} => String + # * {Types::AssumeRoleWithSAMLResponse#issuer #issuer} => String + # * {Types::AssumeRoleWithSAMLResponse#audience #audience} => String + # * {Types::AssumeRoleWithSAMLResponse#name_qualifier #name_qualifier} => String + # * {Types::AssumeRoleWithSAMLResponse#source_identity #source_identity} => String + # + # + # @example Example: To assume a role using a SAML assertion + # + # resp = client.assume_role_with_saml({ + # duration_seconds: 3600, + # principal_arn: "arn:aws:iam::123456789012:saml-provider/SAML-test", + # role_arn: "arn:aws:iam::123456789012:role/TestSaml", + # saml_assertion: "VERYLONGENCODEDASSERTIONEXAMPLExzYW1sOkF1ZGllbmNlPmJsYW5rPC9zYW1sOkF1ZGllbmNlPjwvc2FtbDpBdWRpZW5jZVJlc3RyaWN0aW9uPjwvc2FtbDpDb25kaXRpb25zPjxzYW1sOlN1YmplY3Q+PHNhbWw6TmFtZUlEIEZvcm1hdD0idXJuOm9hc2lzOm5hbWVzOnRjOlNBTUw6Mi4wOm5hbWVpZC1mb3JtYXQ6dHJhbnNpZW50Ij5TYW1sRXhhbXBsZTwvc2FtbDpOYW1lSUQ+PHNhbWw6U3ViamVjdENvbmZpcm1hdGlvbiBNZXRob2Q9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMDpjbTpiZWFyZXIiPjxzYW1sOlN1YmplY3RDb25maXJtYXRpb25EYXRhIE5vdE9uT3JBZnRlcj0iMjAxOS0xMS0wMVQyMDoyNTowNS4xNDVaIiBSZWNpcGllbnQ9Imh0dHBzOi8vc2lnbmluLmF3cy5hbWF6b24uY29tL3NhbWwiLz48L3NhbWw6U3ViamVjdENvbmZpcm1hdGlvbj48L3NhbWw6U3ViamVjdD48c2FtbDpBdXRoblN0YXRlbWVudCBBdXRoPD94bWwgdmpSZXNwb25zZT4=", + # }) + # + # resp.to_h outputs the following: + # { + # assumed_role_user: { + # arn: "arn:aws:sts::123456789012:assumed-role/TestSaml", + # assumed_role_id: "ARO456EXAMPLE789:TestSaml", + # }, + # audience: "https://signin.aws.amazon.com/saml", + # credentials: { + # access_key_id: "ASIAV3ZUEFP6EXAMPLE", + # expiration: Time.parse("2019-11-01T20:26:47Z"), + # secret_access_key: "8P+SQvWIuLnKhh8d++jpw0nNmQRBZvNEXAMPLEKEY", + # session_token: "IQoJb3JpZ2luX2VjEOz////////////////////wEXAMPLEtMSJHMEUCIDoKK3JH9uGQE1z0sINr5M4jk+Na8KHDcCYRVjJCZEvOAiEA3OvJGtw1EcViOleS2vhs8VdCKFJQWPQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==", + # }, + # issuer: "https://integ.example.com/idp/shibboleth", + # name_qualifier: "SbdGOnUkh1i4+EXAMPLExL/jEvs=", + # packed_policy_size: 6, + # subject: "SamlExample", + # subject_type: "transient", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.assume_role_with_saml({ + # role_arn: "arnType", # required + # principal_arn: "arnType", # required + # saml_assertion: "SAMLAssertionType", # required + # policy_arns: [ + # { + # arn: "arnType", + # }, + # ], + # policy: "sessionPolicyDocumentType", + # duration_seconds: 1, + # }) + # + # @example Response structure + # + # resp.credentials.access_key_id #=> String + # resp.credentials.secret_access_key #=> String + # resp.credentials.session_token #=> String + # resp.credentials.expiration #=> Time + # resp.assumed_role_user.assumed_role_id #=> String + # resp.assumed_role_user.arn #=> String + # resp.packed_policy_size #=> Integer + # resp.subject #=> String + # resp.subject_type #=> String + # resp.issuer #=> String + # resp.audience #=> String + # resp.name_qualifier #=> String + # resp.source_identity #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML AWS API Documentation + # + # @overload assume_role_with_saml(params = {}) + # @param [Hash] params ({}) + def assume_role_with_saml(params = {}, options = {}) + req = build_request(:assume_role_with_saml, params) + req.send_request(options) + end + + # Returns a set of temporary security credentials for users who have + # been authenticated in a mobile or web application with a web identity + # provider. Example providers include the OAuth 2.0 providers Login with + # Amazon and Facebook, or any OpenID Connect-compatible identity + # provider such as Google or [Amazon Cognito federated identities][1]. + # + # For mobile applications, we recommend that you use Amazon Cognito. You + # can use Amazon Cognito with the [Amazon Web Services SDK for iOS + # Developer Guide][2] and the [Amazon Web Services SDK for Android + # Developer Guide][3] to uniquely identify a user. You can also supply + # the user with a consistent identity throughout the lifetime of an + # application. + # + # To learn more about Amazon Cognito, see [Amazon Cognito Overview][4] + # in *Amazon Web Services SDK for Android Developer Guide* and [Amazon + # Cognito Overview][5] in the *Amazon Web Services SDK for iOS Developer + # Guide*. + # + # + # + # Calling `AssumeRoleWithWebIdentity` does not require the use of Amazon + # Web Services security credentials. Therefore, you can distribute an + # application (for example, on mobile devices) that requests temporary + # security credentials without including long-term Amazon Web Services + # credentials in the application. You also don't need to deploy + # server-based proxy services that use long-term Amazon Web Services + # credentials. Instead, the identity of the caller is validated by using + # a token from the web identity provider. For a comparison of + # `AssumeRoleWithWebIdentity` with the other API operations that produce + # temporary credentials, see [Requesting Temporary Security + # Credentials][6] and [Comparing the Amazon Web Services STS API + # operations][7] in the *IAM User Guide*. + # + # The temporary security credentials returned by this API consist of an + # access key ID, a secret access key, and a security token. Applications + # can use these temporary security credentials to sign calls to Amazon + # Web Services service API operations. + # + # **Session Duration** + # + # By default, the temporary security credentials created by + # `AssumeRoleWithWebIdentity` last for one hour. However, you can use + # the optional `DurationSeconds` parameter to specify the duration of + # your session. You can provide a value from 900 seconds (15 minutes) up + # to the maximum session duration setting for the role. This setting can + # have a value from 1 hour to 12 hours. To learn how to view the maximum + # value for your role, see [View the Maximum Session Duration Setting + # for a Role][8] in the *IAM User Guide*. The maximum session duration + # limit applies when you use the `AssumeRole*` API operations or the + # `assume-role*` CLI commands. However the limit does not apply when you + # use those operations to create a console URL. For more information, + # see [Using IAM Roles][9] in the *IAM User Guide*. + # + # **Permissions** + # + # The temporary security credentials created by + # `AssumeRoleWithWebIdentity` can be used to make API calls to any + # Amazon Web Services service with the following exception: you cannot + # call the STS `GetFederationToken` or `GetSessionToken` API operations. + # + # (Optional) You can pass inline or managed [session policies][10] to + # this operation. You can pass a single JSON policy document to use as + # an inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. The + # plaintext that you use for both inline and managed session policies + # can't exceed 2,048 characters. Passing policies to this operation + # returns new temporary credentials. The resulting session's + # permissions are the intersection of the role's identity-based policy + # and the session policies. You can use the role's temporary + # credentials in subsequent Amazon Web Services API calls to access + # resources in the account that owns the role. You cannot use session + # policies to grant more permissions than those allowed by the + # identity-based policy of the role that is being assumed. For more + # information, see [Session Policies][10] in the *IAM User Guide*. + # + # **Tags** + # + # (Optional) You can configure your IdP to pass attributes into your web + # identity token as session tags. Each session tag consists of a key + # name and an associated value. For more information about session tags, + # see [Passing Session Tags in STS][11] in the *IAM User Guide*. + # + # You can pass up to 50 session tags. The plaintext session tag keys + # can’t exceed 128 characters and the values can’t exceed 256 + # characters. For these and additional limits, see [IAM and STS + # Character Limits][12] in the *IAM User Guide*. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # You can pass a session tag with the same key as a tag that is attached + # to the role. When you do, the session tag overrides the role tag with + # the same key. + # + # An administrator must grant you the permissions necessary to pass + # session tags. The administrator can also create granular permissions + # to allow you to pass only specific session tags. For more information, + # see [Tutorial: Using Tags for Attribute-Based Access Control][13] in + # the *IAM User Guide*. + # + # You can set the session tags as transitive. Transitive tags persist + # during role chaining. For more information, see [Chaining Roles with + # Session Tags][14] in the *IAM User Guide*. + # + # **Identities** + # + # Before your application can call `AssumeRoleWithWebIdentity`, you must + # have an identity token from a supported identity provider and create a + # role that the application can assume. The role that your application + # assumes must trust the identity provider that is associated with the + # identity token. In other words, the identity provider must be + # specified in the role's trust policy. + # + # Calling `AssumeRoleWithWebIdentity` can result in an entry in your + # CloudTrail logs. The entry includes the [Subject][15] of the provided + # web identity token. We recommend that you avoid using any personally + # identifiable information (PII) in this field. For example, you could + # instead use a GUID or a pairwise identifier, as [suggested in the OIDC + # specification][16]. + # + # For more information about how to use web identity federation and the + # `AssumeRoleWithWebIdentity` API, see the following resources: + # + # * [Using Web Identity Federation API Operations for Mobile Apps][17] + # and [Federation Through a Web-based Identity Provider][18]. + # + # * [ Web Identity Federation Playground][19]. Walk through the process + # of authenticating through Login with Amazon, Facebook, or Google, + # getting temporary security credentials, and then using those + # credentials to make a request to Amazon Web Services. + # + # * [Amazon Web Services SDK for iOS Developer Guide][2] and [Amazon Web + # Services SDK for Android Developer Guide][3]. These toolkits contain + # sample apps that show how to invoke the identity providers. The + # toolkits then show how to use the information from these providers + # to get and use temporary security credentials. + # + # * [Web Identity Federation with Mobile Applications][20]. This article + # discusses web identity federation and shows an example of how to use + # web identity federation to get access to content in Amazon S3. + # + # + # + # [1]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html + # [2]: http://aws.amazon.com/sdkforios/ + # [3]: http://aws.amazon.com/sdkforandroid/ + # [4]: https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840 + # [5]: https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664 + # [6]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html + # [7]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison + # [8]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [9]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html + # [10]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # [11]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [12]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # [13]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html + # [14]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining + # [15]: http://openid.net/specs/openid-connect-core-1_0.html#Claims + # [16]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes + # [17]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html + # [18]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity + # [19]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/ + # [20]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications + # + # @option params [required, String] :role_arn + # The Amazon Resource Name (ARN) of the role that the caller is + # assuming. + # + # @option params [required, String] :role_session_name + # An identifier for the assumed role session. Typically, you pass the + # name or identifier that is associated with the user who is using your + # application. That way, the temporary security credentials that your + # application will use are associated with that user. This session name + # is included as part of the ARN and assumed role ID in the + # `AssumedRoleUser` response element. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # @option params [required, String] :web_identity_token + # The OAuth 2.0 access token or OpenID Connect ID token that is provided + # by the identity provider. Your application must get this token by + # authenticating the user who is using your application with a web + # identity provider before the application makes an + # `AssumeRoleWithWebIdentity` call. + # + # @option params [String] :provider_id + # The fully qualified host component of the domain name of the OAuth 2.0 + # identity provider. Do not specify this value for an OpenID Connect + # identity provider. + # + # Currently `www.amazon.com` and `graph.facebook.com` are the only + # supported identity providers for OAuth 2.0 access tokens. Do not + # include URL schemes and port numbers. + # + # Do not specify this value for OpenID Connect ID tokens. + # + # @option params [Array] :policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that you + # want to use as managed session policies. The policies must exist in + # the same account as the role. + # + # This parameter is optional. You can provide up to 10 managed policy + # ARNs. However, the plaintext that you use for both inline and managed + # session policies can't exceed 2,048 characters. For more information + # about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services + # Service Namespaces][1] in the Amazon Web Services General Reference. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # Passing policies to this operation returns new temporary credentials. + # The resulting session's permissions are the intersection of the + # role's identity-based policy and the session policies. You can use + # the role's temporary credentials in subsequent Amazon Web Services + # API calls to access resources in the account that owns the role. You + # cannot use session policies to grant more permissions than those + # allowed by the identity-based policy of the role that is being + # assumed. For more information, see [Session Policies][2] in the *IAM + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [String] :policy + # An IAM policy in JSON format that you want to use as an inline session + # policy. + # + # This parameter is optional. Passing policies to this operation returns + # new temporary credentials. The resulting session's permissions are + # the intersection of the role's identity-based policy and the session + # policies. You can use the role's temporary credentials in subsequent + # Amazon Web Services API calls to access resources in the account that + # owns the role. You cannot use session policies to grant more + # permissions than those allowed by the identity-based policy of the + # role that is being assumed. For more information, see [Session + # Policies][1] in the *IAM User Guide*. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of the + # valid character list (\\u0020 through \\u00FF). It can also include + # the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) + # characters. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [Integer] :duration_seconds + # The duration, in seconds, of the role session. The value can range + # from 900 seconds (15 minutes) up to the maximum session duration + # setting for the role. This setting can have a value from 1 hour to 12 + # hours. If you specify a value higher than this setting, the operation + # fails. For example, if you specify a session duration of 12 hours, but + # your administrator set the maximum session duration to 6 hours, your + # operation fails. To learn how to view the maximum value for your role, + # see [View the Maximum Session Duration Setting for a Role][1] in the + # *IAM User Guide*. + # + # By default, the value is set to `3600` seconds. + # + # The `DurationSeconds` parameter is separate from the duration of a + # console session that you might request using the returned credentials. + # The request to the federation endpoint for a console sign-in token + # takes a `SessionDuration` parameter that specifies the maximum length + # of the console session. For more information, see [Creating a URL that + # Enables Federated Users to Access the Amazon Web Services Management + # Console][2] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + # + # @return [Types::AssumeRoleWithWebIdentityResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::AssumeRoleWithWebIdentityResponse#credentials #credentials} => Types::Credentials + # * {Types::AssumeRoleWithWebIdentityResponse#subject_from_web_identity_token #subject_from_web_identity_token} => String + # * {Types::AssumeRoleWithWebIdentityResponse#assumed_role_user #assumed_role_user} => Types::AssumedRoleUser + # * {Types::AssumeRoleWithWebIdentityResponse#packed_policy_size #packed_policy_size} => Integer + # * {Types::AssumeRoleWithWebIdentityResponse#provider #provider} => String + # * {Types::AssumeRoleWithWebIdentityResponse#audience #audience} => String + # * {Types::AssumeRoleWithWebIdentityResponse#source_identity #source_identity} => String + # + # + # @example Example: To assume a role as an OpenID Connect-federated user + # + # resp = client.assume_role_with_web_identity({ + # duration_seconds: 3600, + # policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Stmt1\",\"Effect\":\"Allow\",\"Action\":\"s3:ListAllMyBuckets\",\"Resource\":\"*\"}]}", + # provider_id: "www.amazon.com", + # role_arn: "arn:aws:iam::123456789012:role/FederatedWebIdentityRole", + # role_session_name: "app1", + # web_identity_token: "Atza%7CIQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ", + # }) + # + # resp.to_h outputs the following: + # { + # assumed_role_user: { + # arn: "arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1", + # assumed_role_id: "AROACLKWSDQRAOEXAMPLE:app1", + # }, + # audience: "client.5498841531868486423.1548@apps.example.com", + # credentials: { + # access_key_id: "AKIAIOSFODNN7EXAMPLE", + # expiration: Time.parse("2014-10-24T23:00:23Z"), + # secret_access_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY", + # session_token: "AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IEXAMPLE", + # }, + # packed_policy_size: 123, + # provider: "www.amazon.com", + # subject_from_web_identity_token: "amzn1.account.AF6RHO7KZU5XRVQJGXK6HEXAMPLE", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.assume_role_with_web_identity({ + # role_arn: "arnType", # required + # role_session_name: "roleSessionNameType", # required + # web_identity_token: "clientTokenType", # required + # provider_id: "urlType", + # policy_arns: [ + # { + # arn: "arnType", + # }, + # ], + # policy: "sessionPolicyDocumentType", + # duration_seconds: 1, + # }) + # + # @example Response structure + # + # resp.credentials.access_key_id #=> String + # resp.credentials.secret_access_key #=> String + # resp.credentials.session_token #=> String + # resp.credentials.expiration #=> Time + # resp.subject_from_web_identity_token #=> String + # resp.assumed_role_user.assumed_role_id #=> String + # resp.assumed_role_user.arn #=> String + # resp.packed_policy_size #=> Integer + # resp.provider #=> String + # resp.audience #=> String + # resp.source_identity #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity AWS API Documentation + # + # @overload assume_role_with_web_identity(params = {}) + # @param [Hash] params ({}) + def assume_role_with_web_identity(params = {}, options = {}) + req = build_request(:assume_role_with_web_identity, params) + req.send_request(options) + end + + # Decodes additional information about the authorization status of a + # request from an encoded message returned in response to an Amazon Web + # Services request. + # + # For example, if a user is not authorized to perform an operation that + # he or she has requested, the request returns a + # `Client.UnauthorizedOperation` response (an HTTP 403 response). Some + # Amazon Web Services operations additionally return an encoded message + # that can provide details about this authorization failure. + # + # Only certain Amazon Web Services operations return an encoded + # authorization message. The documentation for an individual operation + # indicates whether that operation returns an encoded message in + # addition to returning an HTTP code. + # + # + # + # The message is encoded because the details of the authorization status + # can contain privileged information that the user who requested the + # operation should not see. To decode an authorization status message, a + # user must be granted permissions through an IAM [policy][1] to request + # the `DecodeAuthorizationMessage` (`sts:DecodeAuthorizationMessage`) + # action. + # + # The decoded message includes the following type of information: + # + # * Whether the request was denied due to an explicit deny or due to the + # absence of an explicit allow. For more information, see [Determining + # Whether a Request is Allowed or Denied][2] in the *IAM User Guide*. + # + # * The principal who made the request. + # + # * The requested action. + # + # * The requested resource. + # + # * The values of condition keys in the context of the user's request. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow + # + # @option params [required, String] :encoded_message + # The encoded message that was returned with the response. + # + # @return [Types::DecodeAuthorizationMessageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DecodeAuthorizationMessageResponse#decoded_message #decoded_message} => String + # + # + # @example Example: To decode information about an authorization status of a request + # + # resp = client.decode_authorization_message({ + # encoded_message: "", + # }) + # + # resp.to_h outputs the following: + # { + # decoded_message: "{\"allowed\": \"false\",\"explicitDeny\": \"false\",\"matchedStatements\": \"\",\"failures\": \"\",\"context\": {\"principal\": {\"id\": \"AIDACKCEVSQ6C2EXAMPLE\",\"name\": \"Bob\",\"arn\": \"arn:aws:iam::123456789012:user/Bob\"},\"action\": \"ec2:StopInstances\",\"resource\": \"arn:aws:ec2:us-east-1:123456789012:instance/i-dd01c9bd\",\"conditions\": [{\"item\": {\"key\": \"ec2:Tenancy\",\"values\": [\"default\"]},{\"item\": {\"key\": \"ec2:ResourceTag/elasticbeanstalk:environment-name\",\"values\": [\"Default-Environment\"]}},(Additional items ...)]}}", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.decode_authorization_message({ + # encoded_message: "encodedMessageType", # required + # }) + # + # @example Response structure + # + # resp.decoded_message #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage AWS API Documentation + # + # @overload decode_authorization_message(params = {}) + # @param [Hash] params ({}) + def decode_authorization_message(params = {}, options = {}) + req = build_request(:decode_authorization_message, params) + req.send_request(options) + end + + # Returns the account identifier for the specified access key ID. + # + # Access keys consist of two parts: an access key ID (for example, + # `AKIAIOSFODNN7EXAMPLE`) and a secret access key (for example, + # `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`). For more information + # about access keys, see [Managing Access Keys for IAM Users][1] in the + # *IAM User Guide*. + # + # When you pass an access key ID to this operation, it returns the ID of + # the Amazon Web Services account to which the keys belong. Access key + # IDs beginning with `AKIA` are long-term credentials for an IAM user or + # the Amazon Web Services account root user. Access key IDs beginning + # with `ASIA` are temporary credentials that are created using STS + # operations. If the account in the response belongs to you, you can + # sign in as the root user and review your root user access keys. Then, + # you can pull a [credentials report][2] to learn which IAM user owns + # the keys. To learn who requested the temporary credentials for an + # `ASIA` access key, view the STS events in your [CloudTrail logs][3] in + # the *IAM User Guide*. + # + # This operation does not indicate the state of the access key. The key + # might be active, inactive, or deleted. Active keys might not have + # permissions to perform an operation. Providing a deleted access key + # might return an error that the key doesn't exist. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html + # + # @option params [required, String] :access_key_id + # The identifier of an access key. + # + # This parameter allows (through its regex pattern) a string of + # characters that can consist of any upper- or lowercase letter or + # digit. + # + # @return [Types::GetAccessKeyInfoResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetAccessKeyInfoResponse#account #account} => String + # + # @example Request syntax with placeholder values + # + # resp = client.get_access_key_info({ + # access_key_id: "accessKeyIdType", # required + # }) + # + # @example Response structure + # + # resp.account #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo AWS API Documentation + # + # @overload get_access_key_info(params = {}) + # @param [Hash] params ({}) + def get_access_key_info(params = {}, options = {}) + req = build_request(:get_access_key_info, params) + req.send_request(options) + end + + # Returns details about the IAM user or role whose credentials are used + # to call the operation. + # + # No permissions are required to perform this operation. If an + # administrator adds a policy to your IAM user or role that explicitly + # denies access to the `sts:GetCallerIdentity` action, you can still + # perform this operation. Permissions are not required because the same + # information is returned when an IAM user or role is denied access. To + # view an example response, see [I Am Not Authorized to Perform: + # iam:DeleteVirtualMFADevice][1] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa + # + # @return [Types::GetCallerIdentityResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetCallerIdentityResponse#user_id #user_id} => String + # * {Types::GetCallerIdentityResponse#account #account} => String + # * {Types::GetCallerIdentityResponse#arn #arn} => String + # + # + # @example Example: To get details about a calling IAM user + # + # # This example shows a request and response made with the credentials for a user named Alice in the AWS account + # # 123456789012. + # + # resp = client.get_caller_identity({ + # }) + # + # resp.to_h outputs the following: + # { + # account: "123456789012", + # arn: "arn:aws:iam::123456789012:user/Alice", + # user_id: "AKIAI44QH8DHBEXAMPLE", + # } + # + # @example Example: To get details about a calling user federated with AssumeRole + # + # # This example shows a request and response made with temporary credentials created by AssumeRole. The name of the assumed + # # role is my-role-name, and the RoleSessionName is set to my-role-session-name. + # + # resp = client.get_caller_identity({ + # }) + # + # resp.to_h outputs the following: + # { + # account: "123456789012", + # arn: "arn:aws:sts::123456789012:assumed-role/my-role-name/my-role-session-name", + # user_id: "AKIAI44QH8DHBEXAMPLE:my-role-session-name", + # } + # + # @example Example: To get details about a calling user federated with GetFederationToken + # + # # This example shows a request and response made with temporary credentials created by using GetFederationToken. The Name + # # parameter is set to my-federated-user-name. + # + # resp = client.get_caller_identity({ + # }) + # + # resp.to_h outputs the following: + # { + # account: "123456789012", + # arn: "arn:aws:sts::123456789012:federated-user/my-federated-user-name", + # user_id: "123456789012:my-federated-user-name", + # } + # + # @example Response structure + # + # resp.user_id #=> String + # resp.account #=> String + # resp.arn #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity AWS API Documentation + # + # @overload get_caller_identity(params = {}) + # @param [Hash] params ({}) + def get_caller_identity(params = {}, options = {}) + req = build_request(:get_caller_identity, params) + req.send_request(options) + end + + # Returns a set of temporary security credentials (consisting of an + # access key ID, a secret access key, and a security token) for a + # federated user. A typical use is in a proxy application that gets + # temporary security credentials on behalf of distributed applications + # inside a corporate network. You must call the `GetFederationToken` + # operation using the long-term security credentials of an IAM user. As + # a result, this call is appropriate in contexts where those credentials + # can be safely stored, usually in a server-based application. For a + # comparison of `GetFederationToken` with the other API operations that + # produce temporary credentials, see [Requesting Temporary Security + # Credentials][1] and [Comparing the Amazon Web Services STS API + # operations][2] in the *IAM User Guide*. + # + # You can create a mobile-based or browser-based app that can + # authenticate users using a web identity provider like Login with + # Amazon, Facebook, Google, or an OpenID Connect-compatible identity + # provider. In this case, we recommend that you use [Amazon Cognito][3] + # or `AssumeRoleWithWebIdentity`. For more information, see [Federation + # Through a Web-based Identity Provider][4] in the *IAM User Guide*. + # + # + # + # You can also call `GetFederationToken` using the security credentials + # of an Amazon Web Services account root user, but we do not recommend + # it. Instead, we recommend that you create an IAM user for the purpose + # of the proxy application. Then attach a policy to the IAM user that + # limits federated users to only the actions and resources that they + # need to access. For more information, see [IAM Best Practices][5] in + # the *IAM User Guide*. + # + # **Session duration** + # + # The temporary credentials are valid for the specified duration, from + # 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 + # hours). The default session duration is 43,200 seconds (12 hours). + # Temporary credentials obtained by using the Amazon Web Services + # account root user credentials have a maximum duration of 3,600 seconds + # (1 hour). + # + # **Permissions** + # + # You can use the temporary credentials created by `GetFederationToken` + # in any Amazon Web Services service with the following exceptions: + # + # * You cannot call any IAM operations using the CLI or the Amazon Web + # Services API. This limitation does not apply to console sessions. + # + # * You cannot call any STS operations except `GetCallerIdentity`. + # + # You can use temporary credentials for single sign-on (SSO) to the + # console. + # + # You must pass an inline or managed [session policy][6] to this + # operation. You can pass a single JSON policy document to use as an + # inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. The + # plaintext that you use for both inline and managed session policies + # can't exceed 2,048 characters. + # + # Though the session policy parameters are optional, if you do not pass + # a policy, then the resulting federated user session has no + # permissions. When you pass session policies, the session permissions + # are the intersection of the IAM user policies and the session policies + # that you pass. This gives you a way to further restrict the + # permissions for a federated user. You cannot use session policies to + # grant more permissions than those that are defined in the permissions + # policy of the IAM user. For more information, see [Session + # Policies][6] in the *IAM User Guide*. For information about using + # `GetFederationToken` to create temporary security credentials, see + # [GetFederationToken—Federation Through a Custom Identity Broker][7]. + # + # You can use the credentials to access a resource that has a + # resource-based policy. If that policy specifically references the + # federated user session in the `Principal` element of the policy, the + # session has the permissions allowed by the policy. These permissions + # are granted in addition to the permissions granted by the session + # policies. + # + # **Tags** + # + # (Optional) You can pass tag key-value pairs to your session. These are + # called session tags. For more information about session tags, see + # [Passing Session Tags in STS][8] in the *IAM User Guide*. + # + # You can create a mobile-based or browser-based app that can + # authenticate users using a web identity provider like Login with + # Amazon, Facebook, Google, or an OpenID Connect-compatible identity + # provider. In this case, we recommend that you use [Amazon Cognito][3] + # or `AssumeRoleWithWebIdentity`. For more information, see [Federation + # Through a Web-based Identity Provider][4] in the *IAM User Guide*. + # + # + # + # An administrator must grant you the permissions necessary to pass + # session tags. The administrator can also create granular permissions + # to allow you to pass only specific session tags. For more information, + # see [Tutorial: Using Tags for Attribute-Based Access Control][9] in + # the *IAM User Guide*. + # + # Tag key–value pairs are not case sensitive, but case is preserved. + # This means that you cannot have separate `Department` and `department` + # tag keys. Assume that the user that you are federating has the + # `Department`=`Marketing` tag and you pass the + # `department`=`engineering` session tag. `Department` and `department` + # are not saved as separate tags, and the session tag passed in the + # request takes precedence over the user tag. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison + # [3]: http://aws.amazon.com/cognito/ + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity + # [5]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html + # [6]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # [7]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken + # [8]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [9]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html + # + # @option params [required, String] :name + # The name of the federated user. The name is used as an identifier for + # the temporary security credentials (such as `Bob`). For example, you + # can reference the federated user name in a resource-based policy, such + # as in an Amazon S3 bucket policy. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # @option params [String] :policy + # An IAM policy in JSON format that you want to use as an inline session + # policy. + # + # You must pass an inline or managed [session policy][1] to this + # operation. You can pass a single JSON policy document to use as an + # inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. + # + # This parameter is optional. However, if you do not pass any session + # policies, then the resulting federated user session has no + # permissions. + # + # When you pass session policies, the session permissions are the + # intersection of the IAM user policies and the session policies that + # you pass. This gives you a way to further restrict the permissions for + # a federated user. You cannot use session policies to grant more + # permissions than those that are defined in the permissions policy of + # the IAM user. For more information, see [Session Policies][1] in the + # *IAM User Guide*. + # + # The resulting credentials can be used to access a resource that has a + # resource-based policy. If that policy specifically references the + # federated user session in the `Principal` element of the policy, the + # session has the permissions allowed by the policy. These permissions + # are granted in addition to the permissions that are granted by the + # session policies. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of the + # valid character list (\\u0020 through \\u00FF). It can also include + # the tab (\\u0009), linefeed (\\u000A), and carriage return (\\u000D) + # characters. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # + # @option params [Array] :policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that you + # want to use as a managed session policy. The policies must exist in + # the same account as the IAM user that is requesting federated access. + # + # You must pass an inline or managed [session policy][1] to this + # operation. You can pass a single JSON policy document to use as an + # inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. The + # plaintext that you use for both inline and managed session policies + # can't exceed 2,048 characters. You can provide up to 10 managed + # policy ARNs. For more information about ARNs, see [Amazon Resource + # Names (ARNs) and Amazon Web Services Service Namespaces][2] in the + # Amazon Web Services General Reference. + # + # This parameter is optional. However, if you do not pass any session + # policies, then the resulting federated user session has no + # permissions. + # + # When you pass session policies, the session permissions are the + # intersection of the IAM user policies and the session policies that + # you pass. This gives you a way to further restrict the permissions for + # a federated user. You cannot use session policies to grant more + # permissions than those that are defined in the permissions policy of + # the IAM user. For more information, see [Session Policies][1] in the + # *IAM User Guide*. + # + # The resulting credentials can be used to access a resource that has a + # resource-based policy. If that policy specifically references the + # federated user session in the `Principal` element of the policy, the + # session has the permissions allowed by the policy. These permissions + # are granted in addition to the permissions that are granted by the + # session policies. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # [2]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # + # @option params [Integer] :duration_seconds + # The duration, in seconds, that the session should last. Acceptable + # durations for federation sessions range from 900 seconds (15 minutes) + # to 129,600 seconds (36 hours), with 43,200 seconds (12 hours) as the + # default. Sessions obtained using Amazon Web Services account root user + # credentials are restricted to a maximum of 3,600 seconds (one hour). + # If the specified duration is longer than one hour, the session + # obtained by using root user credentials defaults to one hour. + # + # @option params [Array] :tags + # A list of session tags. Each session tag consists of a key name and an + # associated value. For more information about session tags, see + # [Passing Session Tags in STS][1] in the *IAM User Guide*. + # + # This parameter is optional. You can pass up to 50 session tags. The + # plaintext session tag keys can’t exceed 128 characters and the values + # can’t exceed 256 characters. For these and additional limits, see [IAM + # and STS Character Limits][2] in the *IAM User Guide*. + # + # An Amazon Web Services conversion compresses the passed inline session + # policy, managed policy ARNs, and session tags into a packed binary + # format that has a separate limit. Your request can fail for this limit + # even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how close + # the policies and tags for your request are to the upper size limit. + # + # + # + # You can pass a session tag with the same key as a tag that is already + # attached to the user you are federating. When you do, session tags + # override a user tag with the same key. + # + # Tag key–value pairs are not case sensitive, but case is preserved. + # This means that you cannot have separate `Department` and `department` + # tag keys. Assume that the role has the `Department`=`Marketing` tag + # and you pass the `department`=`engineering` session tag. `Department` + # and `department` are not saved as separate tags, and the session tag + # passed in the request takes precedence over the role tag. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # + # @return [Types::GetFederationTokenResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetFederationTokenResponse#credentials #credentials} => Types::Credentials + # * {Types::GetFederationTokenResponse#federated_user #federated_user} => Types::FederatedUser + # * {Types::GetFederationTokenResponse#packed_policy_size #packed_policy_size} => Integer + # + # + # @example Example: To get temporary credentials for a role by using GetFederationToken + # + # resp = client.get_federation_token({ + # duration_seconds: 3600, + # name: "testFedUserSession", + # policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Stmt1\",\"Effect\":\"Allow\",\"Action\":\"s3:ListAllMyBuckets\",\"Resource\":\"*\"}]}", + # tags: [ + # { + # key: "Project", + # value: "Pegasus", + # }, + # { + # key: "Cost-Center", + # value: "98765", + # }, + # ], + # }) + # + # resp.to_h outputs the following: + # { + # credentials: { + # access_key_id: "AKIAIOSFODNN7EXAMPLE", + # expiration: Time.parse("2011-07-15T23:28:33.359Z"), + # secret_access_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY", + # session_token: "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==", + # }, + # federated_user: { + # arn: "arn:aws:sts::123456789012:federated-user/Bob", + # federated_user_id: "123456789012:Bob", + # }, + # packed_policy_size: 8, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_federation_token({ + # name: "userNameType", # required + # policy: "sessionPolicyDocumentType", + # policy_arns: [ + # { + # arn: "arnType", + # }, + # ], + # duration_seconds: 1, + # tags: [ + # { + # key: "tagKeyType", # required + # value: "tagValueType", # required + # }, + # ], + # }) + # + # @example Response structure + # + # resp.credentials.access_key_id #=> String + # resp.credentials.secret_access_key #=> String + # resp.credentials.session_token #=> String + # resp.credentials.expiration #=> Time + # resp.federated_user.federated_user_id #=> String + # resp.federated_user.arn #=> String + # resp.packed_policy_size #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken AWS API Documentation + # + # @overload get_federation_token(params = {}) + # @param [Hash] params ({}) + def get_federation_token(params = {}, options = {}) + req = build_request(:get_federation_token, params) + req.send_request(options) + end + + # Returns a set of temporary credentials for an Amazon Web Services + # account or IAM user. The credentials consist of an access key ID, a + # secret access key, and a security token. Typically, you use + # `GetSessionToken` if you want to use MFA to protect programmatic calls + # to specific Amazon Web Services API operations like Amazon EC2 + # `StopInstances`. MFA-enabled IAM users would need to call + # `GetSessionToken` and submit an MFA code that is associated with their + # MFA device. Using the temporary security credentials that are returned + # from the call, IAM users can then make programmatic calls to API + # operations that require MFA authentication. If you do not supply a + # correct MFA code, then the API returns an access denied error. For a + # comparison of `GetSessionToken` with the other API operations that + # produce temporary credentials, see [Requesting Temporary Security + # Credentials][1] and [Comparing the Amazon Web Services STS API + # operations][2] in the *IAM User Guide*. + # + # No permissions are required for users to perform this operation. The + # purpose of the `sts:GetSessionToken` operation is to authenticate the + # user using MFA. You cannot use policies to control authentication + # operations. For more information, see [Permissions for + # GetSessionToken][3] in the *IAM User Guide*. + # + # + # + # **Session Duration** + # + # The `GetSessionToken` operation must be called by using the long-term + # Amazon Web Services security credentials of the Amazon Web Services + # account root user or an IAM user. Credentials that are created by IAM + # users are valid for the duration that you specify. This duration can + # range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds + # (36 hours), with a default of 43,200 seconds (12 hours). Credentials + # based on account credentials can range from 900 seconds (15 minutes) + # up to 3,600 seconds (1 hour), with a default of 1 hour. + # + # **Permissions** + # + # The temporary security credentials created by `GetSessionToken` can be + # used to make API calls to any Amazon Web Services service with the + # following exceptions: + # + # * You cannot call any IAM API operations unless MFA authentication + # information is included in the request. + # + # * You cannot call any STS API *except* `AssumeRole` or + # `GetCallerIdentity`. + # + # We recommend that you do not call `GetSessionToken` with Amazon Web + # Services account root user credentials. Instead, follow our [best + # practices][4] by creating one or more IAM users, giving them the + # necessary permissions, and using IAM users for everyday interaction + # with Amazon Web Services. + # + # + # + # The credentials that are returned by `GetSessionToken` are based on + # permissions associated with the user whose credentials were used to + # call the operation. If `GetSessionToken` is called using Amazon Web + # Services account root user credentials, the temporary credentials have + # root user permissions. Similarly, if `GetSessionToken` is called using + # the credentials of an IAM user, the temporary credentials have the + # same permissions as the IAM user. + # + # For more information about using `GetSessionToken` to create temporary + # credentials, go to [Temporary Credentials for Users in Untrusted + # Environments][5] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users + # [5]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken + # + # @option params [Integer] :duration_seconds + # The duration, in seconds, that the credentials should remain valid. + # Acceptable durations for IAM user sessions range from 900 seconds (15 + # minutes) to 129,600 seconds (36 hours), with 43,200 seconds (12 hours) + # as the default. Sessions for Amazon Web Services account owners are + # restricted to a maximum of 3,600 seconds (one hour). If the duration + # is longer than one hour, the session for Amazon Web Services account + # owners defaults to one hour. + # + # @option params [String] :serial_number + # The identification number of the MFA device that is associated with + # the IAM user who is making the `GetSessionToken` call. Specify this + # value if the IAM user has a policy that requires MFA authentication. + # The value is either the serial number for a hardware device (such as + # `GAHT12345678`) or an Amazon Resource Name (ARN) for a virtual device + # (such as `arn:aws:iam::123456789012:mfa/user`). You can find the + # device for an IAM user by going to the Amazon Web Services Management + # Console and viewing the user's security credentials. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@:/- + # + # @option params [String] :token_code + # The value provided by the MFA device, if MFA is required. If any + # policy requires the IAM user to submit an MFA code, specify this + # value. If MFA authentication is required, the user must provide a code + # when requesting a set of temporary security credentials. A user who + # fails to provide the code receives an "access denied" response when + # requesting resources that require MFA authentication. + # + # The format for this parameter, as described by its regex pattern, is a + # sequence of six numeric digits. + # + # @return [Types::GetSessionTokenResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetSessionTokenResponse#credentials #credentials} => Types::Credentials + # + # + # @example Example: To get temporary credentials for an IAM user or an AWS account + # + # resp = client.get_session_token({ + # duration_seconds: 3600, + # serial_number: "YourMFASerialNumber", + # token_code: "123456", + # }) + # + # resp.to_h outputs the following: + # { + # credentials: { + # access_key_id: "AKIAIOSFODNN7EXAMPLE", + # expiration: Time.parse("2011-07-11T19:55:29.611Z"), + # secret_access_key: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY", + # session_token: "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE", + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_session_token({ + # duration_seconds: 1, + # serial_number: "serialNumberType", + # token_code: "tokenCodeType", + # }) + # + # @example Response structure + # + # resp.credentials.access_key_id #=> String + # resp.credentials.secret_access_key #=> String + # resp.credentials.session_token #=> String + # resp.credentials.expiration #=> Time + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken AWS API Documentation + # + # @overload get_session_token(params = {}) + # @param [Hash] params ({}) + def get_session_token(params = {}, options = {}) + req = build_request(:get_session_token, params) + req.send_request(options) + end + + # @!endgroup + + # @param params ({}) + # @api private + def build_request(operation_name, params = {}) + handlers = @handlers.for(operation_name) + context = Seahorse::Client::RequestContext.new( + operation_name: operation_name, + operation: config.api.operation(operation_name), + client: self, + params: params, + config: config) + context[:gem_name] = 'aws-sdk-core' + context[:gem_version] = '3.171.0' + Seahorse::Client::Request.new(handlers, context) + end + + # @api private + # @deprecated + def waiter_names + [] + end + + class << self + + # @api private + attr_reader :identifier + + # @api private + def errors_module + Errors + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/client_api.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/client_api.rb new file mode 100644 index 0000000..6194351 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/client_api.rb @@ -0,0 +1,344 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::STS + # @api private + module ClientApi + + include Seahorse::Model + + AssumeRoleRequest = Shapes::StructureShape.new(name: 'AssumeRoleRequest') + AssumeRoleResponse = Shapes::StructureShape.new(name: 'AssumeRoleResponse') + AssumeRoleWithSAMLRequest = Shapes::StructureShape.new(name: 'AssumeRoleWithSAMLRequest') + AssumeRoleWithSAMLResponse = Shapes::StructureShape.new(name: 'AssumeRoleWithSAMLResponse') + AssumeRoleWithWebIdentityRequest = Shapes::StructureShape.new(name: 'AssumeRoleWithWebIdentityRequest') + AssumeRoleWithWebIdentityResponse = Shapes::StructureShape.new(name: 'AssumeRoleWithWebIdentityResponse') + AssumedRoleUser = Shapes::StructureShape.new(name: 'AssumedRoleUser') + Audience = Shapes::StringShape.new(name: 'Audience') + Credentials = Shapes::StructureShape.new(name: 'Credentials') + DecodeAuthorizationMessageRequest = Shapes::StructureShape.new(name: 'DecodeAuthorizationMessageRequest') + DecodeAuthorizationMessageResponse = Shapes::StructureShape.new(name: 'DecodeAuthorizationMessageResponse') + ExpiredTokenException = Shapes::StructureShape.new(name: 'ExpiredTokenException') + FederatedUser = Shapes::StructureShape.new(name: 'FederatedUser') + GetAccessKeyInfoRequest = Shapes::StructureShape.new(name: 'GetAccessKeyInfoRequest') + GetAccessKeyInfoResponse = Shapes::StructureShape.new(name: 'GetAccessKeyInfoResponse') + GetCallerIdentityRequest = Shapes::StructureShape.new(name: 'GetCallerIdentityRequest') + GetCallerIdentityResponse = Shapes::StructureShape.new(name: 'GetCallerIdentityResponse') + GetFederationTokenRequest = Shapes::StructureShape.new(name: 'GetFederationTokenRequest') + GetFederationTokenResponse = Shapes::StructureShape.new(name: 'GetFederationTokenResponse') + GetSessionTokenRequest = Shapes::StructureShape.new(name: 'GetSessionTokenRequest') + GetSessionTokenResponse = Shapes::StructureShape.new(name: 'GetSessionTokenResponse') + IDPCommunicationErrorException = Shapes::StructureShape.new(name: 'IDPCommunicationErrorException') + IDPRejectedClaimException = Shapes::StructureShape.new(name: 'IDPRejectedClaimException') + InvalidAuthorizationMessageException = Shapes::StructureShape.new(name: 'InvalidAuthorizationMessageException') + InvalidIdentityTokenException = Shapes::StructureShape.new(name: 'InvalidIdentityTokenException') + Issuer = Shapes::StringShape.new(name: 'Issuer') + MalformedPolicyDocumentException = Shapes::StructureShape.new(name: 'MalformedPolicyDocumentException') + NameQualifier = Shapes::StringShape.new(name: 'NameQualifier') + PackedPolicyTooLargeException = Shapes::StructureShape.new(name: 'PackedPolicyTooLargeException') + PolicyDescriptorType = Shapes::StructureShape.new(name: 'PolicyDescriptorType') + RegionDisabledException = Shapes::StructureShape.new(name: 'RegionDisabledException') + SAMLAssertionType = Shapes::StringShape.new(name: 'SAMLAssertionType') + Subject = Shapes::StringShape.new(name: 'Subject') + SubjectType = Shapes::StringShape.new(name: 'SubjectType') + Tag = Shapes::StructureShape.new(name: 'Tag') + accessKeyIdType = Shapes::StringShape.new(name: 'accessKeyIdType') + accessKeySecretType = Shapes::StringShape.new(name: 'accessKeySecretType') + accountType = Shapes::StringShape.new(name: 'accountType') + arnType = Shapes::StringShape.new(name: 'arnType') + assumedRoleIdType = Shapes::StringShape.new(name: 'assumedRoleIdType') + clientTokenType = Shapes::StringShape.new(name: 'clientTokenType') + dateType = Shapes::TimestampShape.new(name: 'dateType') + decodedMessageType = Shapes::StringShape.new(name: 'decodedMessageType') + durationSecondsType = Shapes::IntegerShape.new(name: 'durationSecondsType') + encodedMessageType = Shapes::StringShape.new(name: 'encodedMessageType') + expiredIdentityTokenMessage = Shapes::StringShape.new(name: 'expiredIdentityTokenMessage') + externalIdType = Shapes::StringShape.new(name: 'externalIdType') + federatedIdType = Shapes::StringShape.new(name: 'federatedIdType') + idpCommunicationErrorMessage = Shapes::StringShape.new(name: 'idpCommunicationErrorMessage') + idpRejectedClaimMessage = Shapes::StringShape.new(name: 'idpRejectedClaimMessage') + invalidAuthorizationMessage = Shapes::StringShape.new(name: 'invalidAuthorizationMessage') + invalidIdentityTokenMessage = Shapes::StringShape.new(name: 'invalidIdentityTokenMessage') + malformedPolicyDocumentMessage = Shapes::StringShape.new(name: 'malformedPolicyDocumentMessage') + nonNegativeIntegerType = Shapes::IntegerShape.new(name: 'nonNegativeIntegerType') + packedPolicyTooLargeMessage = Shapes::StringShape.new(name: 'packedPolicyTooLargeMessage') + policyDescriptorListType = Shapes::ListShape.new(name: 'policyDescriptorListType') + regionDisabledMessage = Shapes::StringShape.new(name: 'regionDisabledMessage') + roleDurationSecondsType = Shapes::IntegerShape.new(name: 'roleDurationSecondsType') + roleSessionNameType = Shapes::StringShape.new(name: 'roleSessionNameType') + serialNumberType = Shapes::StringShape.new(name: 'serialNumberType') + sessionPolicyDocumentType = Shapes::StringShape.new(name: 'sessionPolicyDocumentType') + sourceIdentityType = Shapes::StringShape.new(name: 'sourceIdentityType') + tagKeyListType = Shapes::ListShape.new(name: 'tagKeyListType') + tagKeyType = Shapes::StringShape.new(name: 'tagKeyType') + tagListType = Shapes::ListShape.new(name: 'tagListType') + tagValueType = Shapes::StringShape.new(name: 'tagValueType') + tokenCodeType = Shapes::StringShape.new(name: 'tokenCodeType') + tokenType = Shapes::StringShape.new(name: 'tokenType') + urlType = Shapes::StringShape.new(name: 'urlType') + userIdType = Shapes::StringShape.new(name: 'userIdType') + userNameType = Shapes::StringShape.new(name: 'userNameType') + webIdentitySubjectType = Shapes::StringShape.new(name: 'webIdentitySubjectType') + + AssumeRoleRequest.add_member(:role_arn, Shapes::ShapeRef.new(shape: arnType, required: true, location_name: "RoleArn")) + AssumeRoleRequest.add_member(:role_session_name, Shapes::ShapeRef.new(shape: roleSessionNameType, required: true, location_name: "RoleSessionName")) + AssumeRoleRequest.add_member(:policy_arns, Shapes::ShapeRef.new(shape: policyDescriptorListType, location_name: "PolicyArns")) + AssumeRoleRequest.add_member(:policy, Shapes::ShapeRef.new(shape: sessionPolicyDocumentType, location_name: "Policy")) + AssumeRoleRequest.add_member(:duration_seconds, Shapes::ShapeRef.new(shape: roleDurationSecondsType, location_name: "DurationSeconds")) + AssumeRoleRequest.add_member(:tags, Shapes::ShapeRef.new(shape: tagListType, location_name: "Tags")) + AssumeRoleRequest.add_member(:transitive_tag_keys, Shapes::ShapeRef.new(shape: tagKeyListType, location_name: "TransitiveTagKeys")) + AssumeRoleRequest.add_member(:external_id, Shapes::ShapeRef.new(shape: externalIdType, location_name: "ExternalId")) + AssumeRoleRequest.add_member(:serial_number, Shapes::ShapeRef.new(shape: serialNumberType, location_name: "SerialNumber")) + AssumeRoleRequest.add_member(:token_code, Shapes::ShapeRef.new(shape: tokenCodeType, location_name: "TokenCode")) + AssumeRoleRequest.add_member(:source_identity, Shapes::ShapeRef.new(shape: sourceIdentityType, location_name: "SourceIdentity")) + AssumeRoleRequest.struct_class = Types::AssumeRoleRequest + + AssumeRoleResponse.add_member(:credentials, Shapes::ShapeRef.new(shape: Credentials, location_name: "Credentials")) + AssumeRoleResponse.add_member(:assumed_role_user, Shapes::ShapeRef.new(shape: AssumedRoleUser, location_name: "AssumedRoleUser")) + AssumeRoleResponse.add_member(:packed_policy_size, Shapes::ShapeRef.new(shape: nonNegativeIntegerType, location_name: "PackedPolicySize")) + AssumeRoleResponse.add_member(:source_identity, Shapes::ShapeRef.new(shape: sourceIdentityType, location_name: "SourceIdentity")) + AssumeRoleResponse.struct_class = Types::AssumeRoleResponse + + AssumeRoleWithSAMLRequest.add_member(:role_arn, Shapes::ShapeRef.new(shape: arnType, required: true, location_name: "RoleArn")) + AssumeRoleWithSAMLRequest.add_member(:principal_arn, Shapes::ShapeRef.new(shape: arnType, required: true, location_name: "PrincipalArn")) + AssumeRoleWithSAMLRequest.add_member(:saml_assertion, Shapes::ShapeRef.new(shape: SAMLAssertionType, required: true, location_name: "SAMLAssertion")) + AssumeRoleWithSAMLRequest.add_member(:policy_arns, Shapes::ShapeRef.new(shape: policyDescriptorListType, location_name: "PolicyArns")) + AssumeRoleWithSAMLRequest.add_member(:policy, Shapes::ShapeRef.new(shape: sessionPolicyDocumentType, location_name: "Policy")) + AssumeRoleWithSAMLRequest.add_member(:duration_seconds, Shapes::ShapeRef.new(shape: roleDurationSecondsType, location_name: "DurationSeconds")) + AssumeRoleWithSAMLRequest.struct_class = Types::AssumeRoleWithSAMLRequest + + AssumeRoleWithSAMLResponse.add_member(:credentials, Shapes::ShapeRef.new(shape: Credentials, location_name: "Credentials")) + AssumeRoleWithSAMLResponse.add_member(:assumed_role_user, Shapes::ShapeRef.new(shape: AssumedRoleUser, location_name: "AssumedRoleUser")) + AssumeRoleWithSAMLResponse.add_member(:packed_policy_size, Shapes::ShapeRef.new(shape: nonNegativeIntegerType, location_name: "PackedPolicySize")) + AssumeRoleWithSAMLResponse.add_member(:subject, Shapes::ShapeRef.new(shape: Subject, location_name: "Subject")) + AssumeRoleWithSAMLResponse.add_member(:subject_type, Shapes::ShapeRef.new(shape: SubjectType, location_name: "SubjectType")) + AssumeRoleWithSAMLResponse.add_member(:issuer, Shapes::ShapeRef.new(shape: Issuer, location_name: "Issuer")) + AssumeRoleWithSAMLResponse.add_member(:audience, Shapes::ShapeRef.new(shape: Audience, location_name: "Audience")) + AssumeRoleWithSAMLResponse.add_member(:name_qualifier, Shapes::ShapeRef.new(shape: NameQualifier, location_name: "NameQualifier")) + AssumeRoleWithSAMLResponse.add_member(:source_identity, Shapes::ShapeRef.new(shape: sourceIdentityType, location_name: "SourceIdentity")) + AssumeRoleWithSAMLResponse.struct_class = Types::AssumeRoleWithSAMLResponse + + AssumeRoleWithWebIdentityRequest.add_member(:role_arn, Shapes::ShapeRef.new(shape: arnType, required: true, location_name: "RoleArn")) + AssumeRoleWithWebIdentityRequest.add_member(:role_session_name, Shapes::ShapeRef.new(shape: roleSessionNameType, required: true, location_name: "RoleSessionName")) + AssumeRoleWithWebIdentityRequest.add_member(:web_identity_token, Shapes::ShapeRef.new(shape: clientTokenType, required: true, location_name: "WebIdentityToken")) + AssumeRoleWithWebIdentityRequest.add_member(:provider_id, Shapes::ShapeRef.new(shape: urlType, location_name: "ProviderId")) + AssumeRoleWithWebIdentityRequest.add_member(:policy_arns, Shapes::ShapeRef.new(shape: policyDescriptorListType, location_name: "PolicyArns")) + AssumeRoleWithWebIdentityRequest.add_member(:policy, Shapes::ShapeRef.new(shape: sessionPolicyDocumentType, location_name: "Policy")) + AssumeRoleWithWebIdentityRequest.add_member(:duration_seconds, Shapes::ShapeRef.new(shape: roleDurationSecondsType, location_name: "DurationSeconds")) + AssumeRoleWithWebIdentityRequest.struct_class = Types::AssumeRoleWithWebIdentityRequest + + AssumeRoleWithWebIdentityResponse.add_member(:credentials, Shapes::ShapeRef.new(shape: Credentials, location_name: "Credentials")) + AssumeRoleWithWebIdentityResponse.add_member(:subject_from_web_identity_token, Shapes::ShapeRef.new(shape: webIdentitySubjectType, location_name: "SubjectFromWebIdentityToken")) + AssumeRoleWithWebIdentityResponse.add_member(:assumed_role_user, Shapes::ShapeRef.new(shape: AssumedRoleUser, location_name: "AssumedRoleUser")) + AssumeRoleWithWebIdentityResponse.add_member(:packed_policy_size, Shapes::ShapeRef.new(shape: nonNegativeIntegerType, location_name: "PackedPolicySize")) + AssumeRoleWithWebIdentityResponse.add_member(:provider, Shapes::ShapeRef.new(shape: Issuer, location_name: "Provider")) + AssumeRoleWithWebIdentityResponse.add_member(:audience, Shapes::ShapeRef.new(shape: Audience, location_name: "Audience")) + AssumeRoleWithWebIdentityResponse.add_member(:source_identity, Shapes::ShapeRef.new(shape: sourceIdentityType, location_name: "SourceIdentity")) + AssumeRoleWithWebIdentityResponse.struct_class = Types::AssumeRoleWithWebIdentityResponse + + AssumedRoleUser.add_member(:assumed_role_id, Shapes::ShapeRef.new(shape: assumedRoleIdType, required: true, location_name: "AssumedRoleId")) + AssumedRoleUser.add_member(:arn, Shapes::ShapeRef.new(shape: arnType, required: true, location_name: "Arn")) + AssumedRoleUser.struct_class = Types::AssumedRoleUser + + Credentials.add_member(:access_key_id, Shapes::ShapeRef.new(shape: accessKeyIdType, required: true, location_name: "AccessKeyId")) + Credentials.add_member(:secret_access_key, Shapes::ShapeRef.new(shape: accessKeySecretType, required: true, location_name: "SecretAccessKey")) + Credentials.add_member(:session_token, Shapes::ShapeRef.new(shape: tokenType, required: true, location_name: "SessionToken")) + Credentials.add_member(:expiration, Shapes::ShapeRef.new(shape: dateType, required: true, location_name: "Expiration")) + Credentials.struct_class = Types::Credentials + + DecodeAuthorizationMessageRequest.add_member(:encoded_message, Shapes::ShapeRef.new(shape: encodedMessageType, required: true, location_name: "EncodedMessage")) + DecodeAuthorizationMessageRequest.struct_class = Types::DecodeAuthorizationMessageRequest + + DecodeAuthorizationMessageResponse.add_member(:decoded_message, Shapes::ShapeRef.new(shape: decodedMessageType, location_name: "DecodedMessage")) + DecodeAuthorizationMessageResponse.struct_class = Types::DecodeAuthorizationMessageResponse + + ExpiredTokenException.add_member(:message, Shapes::ShapeRef.new(shape: expiredIdentityTokenMessage, location_name: "message")) + ExpiredTokenException.struct_class = Types::ExpiredTokenException + + FederatedUser.add_member(:federated_user_id, Shapes::ShapeRef.new(shape: federatedIdType, required: true, location_name: "FederatedUserId")) + FederatedUser.add_member(:arn, Shapes::ShapeRef.new(shape: arnType, required: true, location_name: "Arn")) + FederatedUser.struct_class = Types::FederatedUser + + GetAccessKeyInfoRequest.add_member(:access_key_id, Shapes::ShapeRef.new(shape: accessKeyIdType, required: true, location_name: "AccessKeyId")) + GetAccessKeyInfoRequest.struct_class = Types::GetAccessKeyInfoRequest + + GetAccessKeyInfoResponse.add_member(:account, Shapes::ShapeRef.new(shape: accountType, location_name: "Account")) + GetAccessKeyInfoResponse.struct_class = Types::GetAccessKeyInfoResponse + + GetCallerIdentityRequest.struct_class = Types::GetCallerIdentityRequest + + GetCallerIdentityResponse.add_member(:user_id, Shapes::ShapeRef.new(shape: userIdType, location_name: "UserId")) + GetCallerIdentityResponse.add_member(:account, Shapes::ShapeRef.new(shape: accountType, location_name: "Account")) + GetCallerIdentityResponse.add_member(:arn, Shapes::ShapeRef.new(shape: arnType, location_name: "Arn")) + GetCallerIdentityResponse.struct_class = Types::GetCallerIdentityResponse + + GetFederationTokenRequest.add_member(:name, Shapes::ShapeRef.new(shape: userNameType, required: true, location_name: "Name")) + GetFederationTokenRequest.add_member(:policy, Shapes::ShapeRef.new(shape: sessionPolicyDocumentType, location_name: "Policy")) + GetFederationTokenRequest.add_member(:policy_arns, Shapes::ShapeRef.new(shape: policyDescriptorListType, location_name: "PolicyArns")) + GetFederationTokenRequest.add_member(:duration_seconds, Shapes::ShapeRef.new(shape: durationSecondsType, location_name: "DurationSeconds")) + GetFederationTokenRequest.add_member(:tags, Shapes::ShapeRef.new(shape: tagListType, location_name: "Tags")) + GetFederationTokenRequest.struct_class = Types::GetFederationTokenRequest + + GetFederationTokenResponse.add_member(:credentials, Shapes::ShapeRef.new(shape: Credentials, location_name: "Credentials")) + GetFederationTokenResponse.add_member(:federated_user, Shapes::ShapeRef.new(shape: FederatedUser, location_name: "FederatedUser")) + GetFederationTokenResponse.add_member(:packed_policy_size, Shapes::ShapeRef.new(shape: nonNegativeIntegerType, location_name: "PackedPolicySize")) + GetFederationTokenResponse.struct_class = Types::GetFederationTokenResponse + + GetSessionTokenRequest.add_member(:duration_seconds, Shapes::ShapeRef.new(shape: durationSecondsType, location_name: "DurationSeconds")) + GetSessionTokenRequest.add_member(:serial_number, Shapes::ShapeRef.new(shape: serialNumberType, location_name: "SerialNumber")) + GetSessionTokenRequest.add_member(:token_code, Shapes::ShapeRef.new(shape: tokenCodeType, location_name: "TokenCode")) + GetSessionTokenRequest.struct_class = Types::GetSessionTokenRequest + + GetSessionTokenResponse.add_member(:credentials, Shapes::ShapeRef.new(shape: Credentials, location_name: "Credentials")) + GetSessionTokenResponse.struct_class = Types::GetSessionTokenResponse + + IDPCommunicationErrorException.add_member(:message, Shapes::ShapeRef.new(shape: idpCommunicationErrorMessage, location_name: "message")) + IDPCommunicationErrorException.struct_class = Types::IDPCommunicationErrorException + + IDPRejectedClaimException.add_member(:message, Shapes::ShapeRef.new(shape: idpRejectedClaimMessage, location_name: "message")) + IDPRejectedClaimException.struct_class = Types::IDPRejectedClaimException + + InvalidAuthorizationMessageException.add_member(:message, Shapes::ShapeRef.new(shape: invalidAuthorizationMessage, location_name: "message")) + InvalidAuthorizationMessageException.struct_class = Types::InvalidAuthorizationMessageException + + InvalidIdentityTokenException.add_member(:message, Shapes::ShapeRef.new(shape: invalidIdentityTokenMessage, location_name: "message")) + InvalidIdentityTokenException.struct_class = Types::InvalidIdentityTokenException + + MalformedPolicyDocumentException.add_member(:message, Shapes::ShapeRef.new(shape: malformedPolicyDocumentMessage, location_name: "message")) + MalformedPolicyDocumentException.struct_class = Types::MalformedPolicyDocumentException + + PackedPolicyTooLargeException.add_member(:message, Shapes::ShapeRef.new(shape: packedPolicyTooLargeMessage, location_name: "message")) + PackedPolicyTooLargeException.struct_class = Types::PackedPolicyTooLargeException + + PolicyDescriptorType.add_member(:arn, Shapes::ShapeRef.new(shape: arnType, location_name: "arn")) + PolicyDescriptorType.struct_class = Types::PolicyDescriptorType + + RegionDisabledException.add_member(:message, Shapes::ShapeRef.new(shape: regionDisabledMessage, location_name: "message")) + RegionDisabledException.struct_class = Types::RegionDisabledException + + Tag.add_member(:key, Shapes::ShapeRef.new(shape: tagKeyType, required: true, location_name: "Key")) + Tag.add_member(:value, Shapes::ShapeRef.new(shape: tagValueType, required: true, location_name: "Value")) + Tag.struct_class = Types::Tag + + policyDescriptorListType.member = Shapes::ShapeRef.new(shape: PolicyDescriptorType) + + tagKeyListType.member = Shapes::ShapeRef.new(shape: tagKeyType) + + tagListType.member = Shapes::ShapeRef.new(shape: Tag) + + + # @api private + API = Seahorse::Model::Api.new.tap do |api| + + api.version = "2011-06-15" + + api.metadata = { + "apiVersion" => "2011-06-15", + "endpointPrefix" => "sts", + "globalEndpoint" => "sts.amazonaws.com", + "protocol" => "query", + "serviceAbbreviation" => "AWS STS", + "serviceFullName" => "AWS Security Token Service", + "serviceId" => "STS", + "signatureVersion" => "v4", + "uid" => "sts-2011-06-15", + "xmlNamespace" => "https://sts.amazonaws.com/doc/2011-06-15/", + } + + api.add_operation(:assume_role, Seahorse::Model::Operation.new.tap do |o| + o.name = "AssumeRole" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: AssumeRoleRequest) + o.output = Shapes::ShapeRef.new(shape: AssumeRoleResponse) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: PackedPolicyTooLargeException) + o.errors << Shapes::ShapeRef.new(shape: RegionDisabledException) + o.errors << Shapes::ShapeRef.new(shape: ExpiredTokenException) + end) + + api.add_operation(:assume_role_with_saml, Seahorse::Model::Operation.new.tap do |o| + o.name = "AssumeRoleWithSAML" + o.http_method = "POST" + o.http_request_uri = "/" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: AssumeRoleWithSAMLRequest) + o.output = Shapes::ShapeRef.new(shape: AssumeRoleWithSAMLResponse) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: PackedPolicyTooLargeException) + o.errors << Shapes::ShapeRef.new(shape: IDPRejectedClaimException) + o.errors << Shapes::ShapeRef.new(shape: InvalidIdentityTokenException) + o.errors << Shapes::ShapeRef.new(shape: ExpiredTokenException) + o.errors << Shapes::ShapeRef.new(shape: RegionDisabledException) + end) + + api.add_operation(:assume_role_with_web_identity, Seahorse::Model::Operation.new.tap do |o| + o.name = "AssumeRoleWithWebIdentity" + o.http_method = "POST" + o.http_request_uri = "/" + o['authtype'] = "none" + o.input = Shapes::ShapeRef.new(shape: AssumeRoleWithWebIdentityRequest) + o.output = Shapes::ShapeRef.new(shape: AssumeRoleWithWebIdentityResponse) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: PackedPolicyTooLargeException) + o.errors << Shapes::ShapeRef.new(shape: IDPRejectedClaimException) + o.errors << Shapes::ShapeRef.new(shape: IDPCommunicationErrorException) + o.errors << Shapes::ShapeRef.new(shape: InvalidIdentityTokenException) + o.errors << Shapes::ShapeRef.new(shape: ExpiredTokenException) + o.errors << Shapes::ShapeRef.new(shape: RegionDisabledException) + end) + + api.add_operation(:decode_authorization_message, Seahorse::Model::Operation.new.tap do |o| + o.name = "DecodeAuthorizationMessage" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DecodeAuthorizationMessageRequest) + o.output = Shapes::ShapeRef.new(shape: DecodeAuthorizationMessageResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidAuthorizationMessageException) + end) + + api.add_operation(:get_access_key_info, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetAccessKeyInfo" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetAccessKeyInfoRequest) + o.output = Shapes::ShapeRef.new(shape: GetAccessKeyInfoResponse) + end) + + api.add_operation(:get_caller_identity, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetCallerIdentity" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetCallerIdentityRequest) + o.output = Shapes::ShapeRef.new(shape: GetCallerIdentityResponse) + end) + + api.add_operation(:get_federation_token, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetFederationToken" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetFederationTokenRequest) + o.output = Shapes::ShapeRef.new(shape: GetFederationTokenResponse) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: PackedPolicyTooLargeException) + o.errors << Shapes::ShapeRef.new(shape: RegionDisabledException) + end) + + api.add_operation(:get_session_token, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetSessionToken" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetSessionTokenRequest) + o.output = Shapes::ShapeRef.new(shape: GetSessionTokenResponse) + o.errors << Shapes::ShapeRef.new(shape: RegionDisabledException) + end) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/customizations.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/customizations.rb new file mode 100644 index 0000000..9f3daa3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/customizations.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +# utility classes +require 'aws-sdk-sts/presigner' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoint_parameters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoint_parameters.rb new file mode 100644 index 0000000..1b4cf45 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoint_parameters.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::STS + # Endpoint parameters used to influence endpoints per request. + # + # @!attribute region + # The AWS region used to dispatch the request. + # + # @return [String] + # + # @!attribute use_dual_stack + # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error. + # + # @return [Boolean] + # + # @!attribute use_fips + # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error. + # + # @return [Boolean] + # + # @!attribute endpoint + # Override the endpoint used to send this request + # + # @return [String] + # + # @!attribute use_global_endpoint + # Whether the global endpoint should be used, rather then the regional endpoint for us-east-1. + # + # @return [Boolean] + # + EndpointParameters = Struct.new( + :region, + :use_dual_stack, + :use_fips, + :endpoint, + :use_global_endpoint, + ) do + include Aws::Structure + + # @api private + class << self + PARAM_MAP = { + 'Region' => :region, + 'UseDualStack' => :use_dual_stack, + 'UseFIPS' => :use_fips, + 'Endpoint' => :endpoint, + 'UseGlobalEndpoint' => :use_global_endpoint, + }.freeze + end + + def initialize(options = {}) + self[:region] = options[:region] + self[:use_dual_stack] = options[:use_dual_stack] + self[:use_dual_stack] = false if self[:use_dual_stack].nil? + if self[:use_dual_stack].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_dual_stack" + end + self[:use_fips] = options[:use_fips] + self[:use_fips] = false if self[:use_fips].nil? + if self[:use_fips].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_fips" + end + self[:endpoint] = options[:endpoint] + self[:use_global_endpoint] = options[:use_global_endpoint] + self[:use_global_endpoint] = false if self[:use_global_endpoint].nil? + if self[:use_global_endpoint].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_global_endpoint" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoint_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoint_provider.rb new file mode 100644 index 0000000..935bc12 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoint_provider.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::STS + class EndpointProvider + def resolve_endpoint(parameters) + region = parameters.region + use_dual_stack = parameters.use_dual_stack + use_fips = parameters.use_fips + endpoint = parameters.endpoint + use_global_endpoint = parameters.use_global_endpoint + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) + if Aws::Endpoints::Matchers.string_equals?(region, "ap-northeast-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "ap-south-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "ap-southeast-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "ap-southeast-2") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "ca-central-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "eu-central-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "eu-north-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "eu-west-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "eu-west-2") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "eu-west-3") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "sa-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-2") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "us-west-1") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(region, "us-west-2") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://sts.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"#{region}", "signingName"=>"sts"}]}) + end + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported" + end + return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://sts-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.string_equals?("aws-us-gov", Aws::Endpoints::Matchers.attr(partition_result, "name")) + return Aws::Endpoints::Endpoint.new(url: "https://sts.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + return Aws::Endpoints::Endpoint.new(url: "https://sts-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://sts.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "DualStack is enabled but this partition does not support DualStack" + end + if Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://sts.amazonaws.com", headers: {}, properties: {"authSchemes"=>[{"name"=>"sigv4", "signingRegion"=>"us-east-1", "signingName"=>"sts"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://sts.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, 'No endpoint could be resolved' + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoints.rb new file mode 100644 index 0000000..3442a51 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/endpoints.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::STS + module Endpoints + + class AssumeRole + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class AssumeRoleWithSAML + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class AssumeRoleWithWebIdentity + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class DecodeAuthorizationMessage + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class GetAccessKeyInfo + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class GetCallerIdentity + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class GetFederationToken + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + class GetSessionToken + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy', + ) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/errors.rb new file mode 100644 index 0000000..d5d28ea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/errors.rb @@ -0,0 +1,166 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::STS + + # When STS returns an error response, the Ruby SDK constructs and raises an error. + # These errors all extend Aws::STS::Errors::ServiceError < {Aws::Errors::ServiceError} + # + # You can rescue all STS errors using ServiceError: + # + # begin + # # do stuff + # rescue Aws::STS::Errors::ServiceError + # # rescues all STS API errors + # end + # + # + # ## Request Context + # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns + # information about the request that generated the error. + # See {Seahorse::Client::RequestContext} for more information. + # + # ## Error Classes + # * {ExpiredTokenException} + # * {IDPCommunicationErrorException} + # * {IDPRejectedClaimException} + # * {InvalidAuthorizationMessageException} + # * {InvalidIdentityTokenException} + # * {MalformedPolicyDocumentException} + # * {PackedPolicyTooLargeException} + # * {RegionDisabledException} + # + # Additionally, error classes are dynamically generated for service errors based on the error code + # if they are not defined above. + module Errors + + extend Aws::Errors::DynamicErrors + + class ExpiredTokenException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::ExpiredTokenException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class IDPCommunicationErrorException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::IDPCommunicationErrorException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class IDPRejectedClaimException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::IDPRejectedClaimException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidAuthorizationMessageException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::InvalidAuthorizationMessageException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidIdentityTokenException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::InvalidIdentityTokenException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class MalformedPolicyDocumentException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::MalformedPolicyDocumentException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class PackedPolicyTooLargeException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::PackedPolicyTooLargeException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class RegionDisabledException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::STS::Types::RegionDisabledException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/plugins/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/plugins/endpoints.rb new file mode 100644 index 0000000..70a68dc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/plugins/endpoints.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::STS + module Plugins + class Endpoints < Seahorse::Client::Plugin + option( + :endpoint_provider, + doc_type: 'Aws::STS::EndpointProvider', + docstring: 'The endpoint provider used to resolve endpoints. Any '\ + 'object that responds to `#resolve_endpoint(parameters)` '\ + 'where `parameters` is a Struct similar to '\ + '`Aws::STS::EndpointParameters`' + ) do |cfg| + Aws::STS::EndpointProvider.new + end + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + # If endpoint was discovered, do not resolve or apply the endpoint. + unless context[:discovered_endpoint] + params = parameters_for_operation(context) + endpoint = context.config.endpoint_provider.resolve_endpoint(params) + + context.http_request.endpoint = endpoint.url + apply_endpoint_headers(context, endpoint.headers) + end + + context[:endpoint_params] = params + context[:auth_scheme] = + Aws::Endpoints.resolve_auth_scheme(context, endpoint) + + @handler.call(context) + end + + private + + def apply_endpoint_headers(context, headers) + headers.each do |key, values| + value = values + .compact + .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } + .join(',') + + context.http_request.headers[key] = value + end + end + + def parameters_for_operation(context) + case context.operation_name + when :assume_role + Aws::STS::Endpoints::AssumeRole.build(context) + when :assume_role_with_saml + Aws::STS::Endpoints::AssumeRoleWithSAML.build(context) + when :assume_role_with_web_identity + Aws::STS::Endpoints::AssumeRoleWithWebIdentity.build(context) + when :decode_authorization_message + Aws::STS::Endpoints::DecodeAuthorizationMessage.build(context) + when :get_access_key_info + Aws::STS::Endpoints::GetAccessKeyInfo.build(context) + when :get_caller_identity + Aws::STS::Endpoints::GetCallerIdentity.build(context) + when :get_federation_token + Aws::STS::Endpoints::GetFederationToken.build(context) + when :get_session_token + Aws::STS::Endpoints::GetSessionToken.build(context) + end + end + end + + def add_handlers(handlers, _config) + handlers.add(Handler, step: :build, priority: 75) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/plugins/sts_regional_endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/plugins/sts_regional_endpoints.rb new file mode 100644 index 0000000..b0ca55b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/plugins/sts_regional_endpoints.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module Aws + module STS + module Plugins + + class STSRegionalEndpoints < Seahorse::Client::Plugin + + option(:sts_regional_endpoints, + default: 'regional', + doc_type: String, + docstring: <<-DOCS) do |cfg| +Passing in 'regional' to enable regional endpoint for STS for all supported +regions (except 'aws-global'). Using 'legacy' mode will force all legacy +regions to resolve to the STS global endpoint. + DOCS + resolve_sts_regional_endpoints(cfg) + end + + private + + def self.resolve_sts_regional_endpoints(cfg) + env_mode = ENV['AWS_STS_REGIONAL_ENDPOINTS'] + env_mode = nil if env_mode == '' + cfg_mode = Aws.shared_config.sts_regional_endpoints( + profile: cfg.profile) + default_mode_value = + if cfg.respond_to?(:defaults_mode_config_resolver) + cfg.defaults_mode_config_resolver.resolve(:sts_regional_endpoints) + end + env_mode || cfg_mode || default_mode_value || 'regional' + end + + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/presigner.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/presigner.rb new file mode 100644 index 0000000..4f0097c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/presigner.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +require 'aws-sigv4' + +module Aws + module STS + # Allows you to create presigned URLs for STS operations. + # + # @example + # + # signer = Aws::STS::Presigner.new + # url = signer.get_caller_identity_presigned_url( + # headers: {"X-K8s-Aws-Id" => 'my-eks-cluster'} + # ) + class Presigner + # @option options [Client] :client Optionally provide an existing + # STS client + def initialize(options = {}) + @client = options[:client] || Aws::STS::Client.new + end + + # Returns a presigned url for get_caller_identity. + # + # @option options [Hash] :headers + # Headers that should be signed and sent along with the request. All + # x-amz-* headers must be present during signing. Other headers are + # optional. + # + # @return [String] A presigned url string. + # + # @example + # + # url = signer.get_caller_identity_presigned_url( + # headers: {"X-K8s-Aws-Id" => 'my-eks-cluster'}, + # ) + # + # This can be easily converted to a token used by the EKS service: + # {https://ruby-doc.org/stdlib-2.3.1/libdoc/base64/rdoc/Base64.html#method-i-encode64} + # "k8s-aws-v1." + Base64.urlsafe_encode64(url).chomp("==") + def get_caller_identity_presigned_url(options = {}) + req = @client.build_request(:get_caller_identity, {}) + context = req.context + + param_list = Aws::Query::ParamList.new + param_list.set('Action', 'GetCallerIdentity') + param_list.set('Version', req.context.config.api.version) + Aws::Query::EC2ParamBuilder.new(param_list) + .apply(req.context.operation.input, {}) + + endpoint_params = Aws::STS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + use_global_endpoint: context.config.sts_regional_endpoints == 'legacy' + ) + endpoint = context.config.endpoint_provider + .resolve_endpoint(endpoint_params) + auth_scheme = Aws::Endpoints.resolve_auth_scheme(context, endpoint) + + signer = Aws::Plugins::Sign.signer_for( + auth_scheme, context.config + ) + + signer.presign_url( + http_method: 'GET', + url: "#{endpoint.url}/?#{param_list}", + body: '', + headers: options[:headers] + ).to_s + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/resource.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/resource.rb new file mode 100644 index 0000000..05a0af9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/resource.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::STS + + class Resource + + # @param options ({}) + # @option options [Client] :client + def initialize(options = {}) + @client = options[:client] || Client.new(options) + end + + # @return [Client] + def client + @client + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/types.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/types.rb new file mode 100644 index 0000000..47ddcae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/aws-sdk-sts/types.rb @@ -0,0 +1,1566 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::STS + module Types + + # @!attribute [rw] role_arn + # The Amazon Resource Name (ARN) of the role to assume. + # @return [String] + # + # @!attribute [rw] role_session_name + # An identifier for the assumed role session. + # + # Use the role session name to uniquely identify a session when the + # same role is assumed by different principals or for different + # reasons. In cross-account scenarios, the role session name is + # visible to, and can be logged by the account that owns the role. The + # role session name is also used in the ARN of the assumed role + # principal. This means that subsequent cross-account API requests + # that use the temporary security credentials will expose the role + # session name to the external account in their CloudTrail logs. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # @return [String] + # + # @!attribute [rw] policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that + # you want to use as managed session policies. The policies must exist + # in the same account as the role. + # + # This parameter is optional. You can provide up to 10 managed policy + # ARNs. However, the plaintext that you use for both inline and + # managed session policies can't exceed 2,048 characters. For more + # information about ARNs, see [Amazon Resource Names (ARNs) and Amazon + # Web Services Service Namespaces][1] in the Amazon Web Services + # General Reference. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # Passing policies to this operation returns new temporary + # credentials. The resulting session's permissions are the + # intersection of the role's identity-based policy and the session + # policies. You can use the role's temporary credentials in + # subsequent Amazon Web Services API calls to access resources in the + # account that owns the role. You cannot use session policies to grant + # more permissions than those allowed by the identity-based policy of + # the role that is being assumed. For more information, see [Session + # Policies][2] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [Array] + # + # @!attribute [rw] policy + # An IAM policy in JSON format that you want to use as an inline + # session policy. + # + # This parameter is optional. Passing policies to this operation + # returns new temporary credentials. The resulting session's + # permissions are the intersection of the role's identity-based + # policy and the session policies. You can use the role's temporary + # credentials in subsequent Amazon Web Services API calls to access + # resources in the account that owns the role. You cannot use session + # policies to grant more permissions than those allowed by the + # identity-based policy of the role that is being assumed. For more + # information, see [Session Policies][1] in the *IAM User Guide*. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of + # the valid character list (\\u0020 through \\u00FF). It can also + # include the tab (\\u0009), linefeed (\\u000A), and carriage return + # (\\u000D) characters. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [String] + # + # @!attribute [rw] duration_seconds + # The duration, in seconds, of the role session. The value specified + # can range from 900 seconds (15 minutes) up to the maximum session + # duration set for the role. The maximum session duration setting can + # have a value from 1 hour to 12 hours. If you specify a value higher + # than this setting or the administrator setting (whichever is lower), + # the operation fails. For example, if you specify a session duration + # of 12 hours, but your administrator set the maximum session duration + # to 6 hours, your operation fails. + # + # Role chaining limits your Amazon Web Services CLI or Amazon Web + # Services API role session to a maximum of one hour. When you use the + # `AssumeRole` API operation to assume a role, you can specify the + # duration of your role session with the `DurationSeconds` parameter. + # You can specify a parameter value of up to 43200 seconds (12 hours), + # depending on the maximum session duration setting for your role. + # However, if you assume a role using role chaining and provide a + # `DurationSeconds` parameter value greater than one hour, the + # operation fails. To learn how to view the maximum value for your + # role, see [View the Maximum Session Duration Setting for a Role][1] + # in the *IAM User Guide*. + # + # By default, the value is set to `3600` seconds. + # + # The `DurationSeconds` parameter is separate from the duration of a + # console session that you might request using the returned + # credentials. The request to the federation endpoint for a console + # sign-in token takes a `SessionDuration` parameter that specifies the + # maximum length of the console session. For more information, see + # [Creating a URL that Enables Federated Users to Access the Amazon + # Web Services Management Console][2] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + # @return [Integer] + # + # @!attribute [rw] tags + # A list of session tags that you want to pass. Each session tag + # consists of a key name and an associated value. For more information + # about session tags, see [Tagging Amazon Web Services STS + # Sessions][1] in the *IAM User Guide*. + # + # This parameter is optional. You can pass up to 50 session tags. The + # plaintext session tag keys can’t exceed 128 characters, and the + # values can’t exceed 256 characters. For these and additional limits, + # see [IAM and STS Character Limits][2] in the *IAM User Guide*. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # You can pass a session tag with the same key as a tag that is + # already attached to the role. When you do, session tags override a + # role tag with the same key. + # + # Tag key–value pairs are not case sensitive, but case is preserved. + # This means that you cannot have separate `Department` and + # `department` tag keys. Assume that the role has the + # `Department`=`Marketing` tag and you pass the + # `department`=`engineering` session tag. `Department` and + # `department` are not saved as separate tags, and the session tag + # passed in the request takes precedence over the role tag. + # + # Additionally, if you used temporary credentials to perform this + # operation, the new session inherits any transitive session tags from + # the calling session. If you pass a session tag with the same key as + # an inherited tag, the operation fails. To view the inherited tags + # for a session, see the CloudTrail logs. For more information, see + # [Viewing Session Tags in CloudTrail][3] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs + # @return [Array] + # + # @!attribute [rw] transitive_tag_keys + # A list of keys for session tags that you want to set as transitive. + # If you set a tag key as transitive, the corresponding key and value + # passes to subsequent sessions in a role chain. For more information, + # see [Chaining Roles with Session Tags][1] in the *IAM User Guide*. + # + # This parameter is optional. When you set session tags as transitive, + # the session policy and session tags packed binary limit is not + # affected. + # + # If you choose not to specify a transitive tag key, then no tags are + # passed from this session to any subsequent sessions. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining + # @return [Array] + # + # @!attribute [rw] external_id + # A unique identifier that might be required when you assume a role in + # another account. If the administrator of the account to which the + # role belongs provided you with an external ID, then provide that + # value in the `ExternalId` parameter. This value can be any string, + # such as a passphrase or account number. A cross-account role is + # usually set up to trust everyone in an account. Therefore, the + # administrator of the trusting account might send an external ID to + # the administrator of the trusted account. That way, only someone + # with the ID can assume the role, rather than everyone in the + # account. For more information about the external ID, see [How to Use + # an External ID When Granting Access to Your Amazon Web Services + # Resources to a Third Party][1] in the *IAM User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@:/- + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html + # @return [String] + # + # @!attribute [rw] serial_number + # The identification number of the MFA device that is associated with + # the user who is making the `AssumeRole` call. Specify this value if + # the trust policy of the role being assumed includes a condition that + # requires MFA authentication. The value is either the serial number + # for a hardware device (such as `GAHT12345678`) or an Amazon Resource + # Name (ARN) for a virtual device (such as + # `arn:aws:iam::123456789012:mfa/user`). + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # @return [String] + # + # @!attribute [rw] token_code + # The value provided by the MFA device, if the trust policy of the + # role being assumed requires MFA. (In other words, if the policy + # includes a condition that tests for MFA). If the role being assumed + # requires MFA and if the `TokenCode` value is missing or expired, the + # `AssumeRole` call returns an "access denied" error. + # + # The format for this parameter, as described by its regex pattern, is + # a sequence of six numeric digits. + # @return [String] + # + # @!attribute [rw] source_identity + # The source identity specified by the principal that is calling the + # `AssumeRole` operation. + # + # You can require users to specify a source identity when they assume + # a role. You do this by using the `sts:SourceIdentity` condition key + # in a role trust policy. You can use source identity information in + # CloudTrail logs to determine who took actions with a role. You can + # use the `aws:SourceIdentity` condition key to further control access + # to Amazon Web Services resources based on the value of source + # identity. For more information about using source identity, see + # [Monitor and control actions taken with assumed roles][1] in the + # *IAM User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@-. You cannot use a value that begins with the text + # `aws:`. This prefix is reserved for Amazon Web Services internal + # use. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest AWS API Documentation + # + class AssumeRoleRequest < Struct.new( + :role_arn, + :role_session_name, + :policy_arns, + :policy, + :duration_seconds, + :tags, + :transitive_tag_keys, + :external_id, + :serial_number, + :token_code, + :source_identity) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the response to a successful AssumeRole request, including + # temporary Amazon Web Services credentials that can be used to make + # Amazon Web Services requests. + # + # @!attribute [rw] credentials + # The temporary security credentials, which include an access key ID, + # a secret access key, and a security (or session) token. + # + # The size of the security token that STS API operations return is not + # fixed. We strongly recommend that you make no assumptions about the + # maximum size. + # + # + # @return [Types::Credentials] + # + # @!attribute [rw] assumed_role_user + # The Amazon Resource Name (ARN) and the assumed role ID, which are + # identifiers that you can use to refer to the resulting temporary + # security credentials. For example, you can reference these + # credentials as a principal in a resource-based policy by using the + # ARN or assumed role ID. The ARN and ID include the `RoleSessionName` + # that you specified when you called `AssumeRole`. + # @return [Types::AssumedRoleUser] + # + # @!attribute [rw] packed_policy_size + # A percentage value that indicates the packed size of the session + # policies and session tags combined passed in the request. The + # request fails if the packed size is greater than 100 percent, which + # means the policies and tags exceeded the allowed space. + # @return [Integer] + # + # @!attribute [rw] source_identity + # The source identity specified by the principal that is calling the + # `AssumeRole` operation. + # + # You can require users to specify a source identity when they assume + # a role. You do this by using the `sts:SourceIdentity` condition key + # in a role trust policy. You can use source identity information in + # CloudTrail logs to determine who took actions with a role. You can + # use the `aws:SourceIdentity` condition key to further control access + # to Amazon Web Services resources based on the value of source + # identity. For more information about using source identity, see + # [Monitor and control actions taken with assumed roles][1] in the + # *IAM User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse AWS API Documentation + # + class AssumeRoleResponse < Struct.new( + :credentials, + :assumed_role_user, + :packed_policy_size, + :source_identity) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] role_arn + # The Amazon Resource Name (ARN) of the role that the caller is + # assuming. + # @return [String] + # + # @!attribute [rw] principal_arn + # The Amazon Resource Name (ARN) of the SAML provider in IAM that + # describes the IdP. + # @return [String] + # + # @!attribute [rw] saml_assertion + # The base64 encoded SAML authentication response provided by the IdP. + # + # For more information, see [Configuring a Relying Party and Adding + # Claims][1] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html + # @return [String] + # + # @!attribute [rw] policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that + # you want to use as managed session policies. The policies must exist + # in the same account as the role. + # + # This parameter is optional. You can provide up to 10 managed policy + # ARNs. However, the plaintext that you use for both inline and + # managed session policies can't exceed 2,048 characters. For more + # information about ARNs, see [Amazon Resource Names (ARNs) and Amazon + # Web Services Service Namespaces][1] in the Amazon Web Services + # General Reference. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # Passing policies to this operation returns new temporary + # credentials. The resulting session's permissions are the + # intersection of the role's identity-based policy and the session + # policies. You can use the role's temporary credentials in + # subsequent Amazon Web Services API calls to access resources in the + # account that owns the role. You cannot use session policies to grant + # more permissions than those allowed by the identity-based policy of + # the role that is being assumed. For more information, see [Session + # Policies][2] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [Array] + # + # @!attribute [rw] policy + # An IAM policy in JSON format that you want to use as an inline + # session policy. + # + # This parameter is optional. Passing policies to this operation + # returns new temporary credentials. The resulting session's + # permissions are the intersection of the role's identity-based + # policy and the session policies. You can use the role's temporary + # credentials in subsequent Amazon Web Services API calls to access + # resources in the account that owns the role. You cannot use session + # policies to grant more permissions than those allowed by the + # identity-based policy of the role that is being assumed. For more + # information, see [Session Policies][1] in the *IAM User Guide*. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of + # the valid character list (\\u0020 through \\u00FF). It can also + # include the tab (\\u0009), linefeed (\\u000A), and carriage return + # (\\u000D) characters. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [String] + # + # @!attribute [rw] duration_seconds + # The duration, in seconds, of the role session. Your role session + # lasts for the duration that you specify for the `DurationSeconds` + # parameter, or until the time specified in the SAML authentication + # response's `SessionNotOnOrAfter` value, whichever is shorter. You + # can provide a `DurationSeconds` value from 900 seconds (15 minutes) + # up to the maximum session duration setting for the role. This + # setting can have a value from 1 hour to 12 hours. If you specify a + # value higher than this setting, the operation fails. For example, if + # you specify a session duration of 12 hours, but your administrator + # set the maximum session duration to 6 hours, your operation fails. + # To learn how to view the maximum value for your role, see [View the + # Maximum Session Duration Setting for a Role][1] in the *IAM User + # Guide*. + # + # By default, the value is set to `3600` seconds. + # + # The `DurationSeconds` parameter is separate from the duration of a + # console session that you might request using the returned + # credentials. The request to the federation endpoint for a console + # sign-in token takes a `SessionDuration` parameter that specifies the + # maximum length of the console session. For more information, see + # [Creating a URL that Enables Federated Users to Access the Amazon + # Web Services Management Console][2] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest AWS API Documentation + # + class AssumeRoleWithSAMLRequest < Struct.new( + :role_arn, + :principal_arn, + :saml_assertion, + :policy_arns, + :policy, + :duration_seconds) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the response to a successful AssumeRoleWithSAML request, + # including temporary Amazon Web Services credentials that can be used + # to make Amazon Web Services requests. + # + # @!attribute [rw] credentials + # The temporary security credentials, which include an access key ID, + # a secret access key, and a security (or session) token. + # + # The size of the security token that STS API operations return is not + # fixed. We strongly recommend that you make no assumptions about the + # maximum size. + # + # + # @return [Types::Credentials] + # + # @!attribute [rw] assumed_role_user + # The identifiers for the temporary security credentials that the + # operation returns. + # @return [Types::AssumedRoleUser] + # + # @!attribute [rw] packed_policy_size + # A percentage value that indicates the packed size of the session + # policies and session tags combined passed in the request. The + # request fails if the packed size is greater than 100 percent, which + # means the policies and tags exceeded the allowed space. + # @return [Integer] + # + # @!attribute [rw] subject + # The value of the `NameID` element in the `Subject` element of the + # SAML assertion. + # @return [String] + # + # @!attribute [rw] subject_type + # The format of the name ID, as defined by the `Format` attribute in + # the `NameID` element of the SAML assertion. Typical examples of the + # format are `transient` or `persistent`. + # + # If the format includes the prefix + # `urn:oasis:names:tc:SAML:2.0:nameid-format`, that prefix is removed. + # For example, `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` + # is returned as `transient`. If the format includes any other prefix, + # the format is returned with no modifications. + # @return [String] + # + # @!attribute [rw] issuer + # The value of the `Issuer` element of the SAML assertion. + # @return [String] + # + # @!attribute [rw] audience + # The value of the `Recipient` attribute of the + # `SubjectConfirmationData` element of the SAML assertion. + # @return [String] + # + # @!attribute [rw] name_qualifier + # A hash value based on the concatenation of the following: + # + # * The `Issuer` response value. + # + # * The Amazon Web Services account ID. + # + # * The friendly name (the last part of the ARN) of the SAML provider + # in IAM. + # + # The combination of `NameQualifier` and `Subject` can be used to + # uniquely identify a federated user. + # + # The following pseudocode shows how the hash value is calculated: + # + # `BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + + # "/MySAMLIdP" ) )` + # @return [String] + # + # @!attribute [rw] source_identity + # The value in the `SourceIdentity` attribute in the SAML assertion. + # + # You can require users to set a source identity value when they + # assume a role. You do this by using the `sts:SourceIdentity` + # condition key in a role trust policy. That way, actions that are + # taken with the role are associated with that user. After the source + # identity is set, the value cannot be changed. It is present in the + # request for all actions that are taken by the role and persists + # across [chained role][1] sessions. You can configure your SAML + # identity provider to use an attribute associated with your users, + # like user name or email, as the source identity when calling + # `AssumeRoleWithSAML`. You do this by adding an attribute to the SAML + # assertion. For more information about using source identity, see + # [Monitor and control actions taken with assumed roles][2] in the + # *IAM User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse AWS API Documentation + # + class AssumeRoleWithSAMLResponse < Struct.new( + :credentials, + :assumed_role_user, + :packed_policy_size, + :subject, + :subject_type, + :issuer, + :audience, + :name_qualifier, + :source_identity) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] role_arn + # The Amazon Resource Name (ARN) of the role that the caller is + # assuming. + # @return [String] + # + # @!attribute [rw] role_session_name + # An identifier for the assumed role session. Typically, you pass the + # name or identifier that is associated with the user who is using + # your application. That way, the temporary security credentials that + # your application will use are associated with that user. This + # session name is included as part of the ARN and assumed role ID in + # the `AssumedRoleUser` response element. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # @return [String] + # + # @!attribute [rw] web_identity_token + # The OAuth 2.0 access token or OpenID Connect ID token that is + # provided by the identity provider. Your application must get this + # token by authenticating the user who is using your application with + # a web identity provider before the application makes an + # `AssumeRoleWithWebIdentity` call. + # @return [String] + # + # @!attribute [rw] provider_id + # The fully qualified host component of the domain name of the OAuth + # 2.0 identity provider. Do not specify this value for an OpenID + # Connect identity provider. + # + # Currently `www.amazon.com` and `graph.facebook.com` are the only + # supported identity providers for OAuth 2.0 access tokens. Do not + # include URL schemes and port numbers. + # + # Do not specify this value for OpenID Connect ID tokens. + # @return [String] + # + # @!attribute [rw] policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that + # you want to use as managed session policies. The policies must exist + # in the same account as the role. + # + # This parameter is optional. You can provide up to 10 managed policy + # ARNs. However, the plaintext that you use for both inline and + # managed session policies can't exceed 2,048 characters. For more + # information about ARNs, see [Amazon Resource Names (ARNs) and Amazon + # Web Services Service Namespaces][1] in the Amazon Web Services + # General Reference. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # Passing policies to this operation returns new temporary + # credentials. The resulting session's permissions are the + # intersection of the role's identity-based policy and the session + # policies. You can use the role's temporary credentials in + # subsequent Amazon Web Services API calls to access resources in the + # account that owns the role. You cannot use session policies to grant + # more permissions than those allowed by the identity-based policy of + # the role that is being assumed. For more information, see [Session + # Policies][2] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [Array] + # + # @!attribute [rw] policy + # An IAM policy in JSON format that you want to use as an inline + # session policy. + # + # This parameter is optional. Passing policies to this operation + # returns new temporary credentials. The resulting session's + # permissions are the intersection of the role's identity-based + # policy and the session policies. You can use the role's temporary + # credentials in subsequent Amazon Web Services API calls to access + # resources in the account that owns the role. You cannot use session + # policies to grant more permissions than those allowed by the + # identity-based policy of the role that is being assumed. For more + # information, see [Session Policies][1] in the *IAM User Guide*. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of + # the valid character list (\\u0020 through \\u00FF). It can also + # include the tab (\\u0009), linefeed (\\u000A), and carriage return + # (\\u000D) characters. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [String] + # + # @!attribute [rw] duration_seconds + # The duration, in seconds, of the role session. The value can range + # from 900 seconds (15 minutes) up to the maximum session duration + # setting for the role. This setting can have a value from 1 hour to + # 12 hours. If you specify a value higher than this setting, the + # operation fails. For example, if you specify a session duration of + # 12 hours, but your administrator set the maximum session duration to + # 6 hours, your operation fails. To learn how to view the maximum + # value for your role, see [View the Maximum Session Duration Setting + # for a Role][1] in the *IAM User Guide*. + # + # By default, the value is set to `3600` seconds. + # + # The `DurationSeconds` parameter is separate from the duration of a + # console session that you might request using the returned + # credentials. The request to the federation endpoint for a console + # sign-in token takes a `SessionDuration` parameter that specifies the + # maximum length of the console session. For more information, see + # [Creating a URL that Enables Federated Users to Access the Amazon + # Web Services Management Console][2] in the *IAM User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest AWS API Documentation + # + class AssumeRoleWithWebIdentityRequest < Struct.new( + :role_arn, + :role_session_name, + :web_identity_token, + :provider_id, + :policy_arns, + :policy, + :duration_seconds) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the response to a successful AssumeRoleWithWebIdentity + # request, including temporary Amazon Web Services credentials that can + # be used to make Amazon Web Services requests. + # + # @!attribute [rw] credentials + # The temporary security credentials, which include an access key ID, + # a secret access key, and a security token. + # + # The size of the security token that STS API operations return is not + # fixed. We strongly recommend that you make no assumptions about the + # maximum size. + # + # + # @return [Types::Credentials] + # + # @!attribute [rw] subject_from_web_identity_token + # The unique user identifier that is returned by the identity + # provider. This identifier is associated with the `WebIdentityToken` + # that was submitted with the `AssumeRoleWithWebIdentity` call. The + # identifier is typically unique to the user and the application that + # acquired the `WebIdentityToken` (pairwise identifier). For OpenID + # Connect ID tokens, this field contains the value returned by the + # identity provider as the token's `sub` (Subject) claim. + # @return [String] + # + # @!attribute [rw] assumed_role_user + # The Amazon Resource Name (ARN) and the assumed role ID, which are + # identifiers that you can use to refer to the resulting temporary + # security credentials. For example, you can reference these + # credentials as a principal in a resource-based policy by using the + # ARN or assumed role ID. The ARN and ID include the `RoleSessionName` + # that you specified when you called `AssumeRole`. + # @return [Types::AssumedRoleUser] + # + # @!attribute [rw] packed_policy_size + # A percentage value that indicates the packed size of the session + # policies and session tags combined passed in the request. The + # request fails if the packed size is greater than 100 percent, which + # means the policies and tags exceeded the allowed space. + # @return [Integer] + # + # @!attribute [rw] provider + # The issuing authority of the web identity token presented. For + # OpenID Connect ID tokens, this contains the value of the `iss` + # field. For OAuth 2.0 access tokens, this contains the value of the + # `ProviderId` parameter that was passed in the + # `AssumeRoleWithWebIdentity` request. + # @return [String] + # + # @!attribute [rw] audience + # The intended audience (also known as client ID) of the web identity + # token. This is traditionally the client identifier issued to the + # application that requested the web identity token. + # @return [String] + # + # @!attribute [rw] source_identity + # The value of the source identity that is returned in the JSON web + # token (JWT) from the identity provider. + # + # You can require users to set a source identity value when they + # assume a role. You do this by using the `sts:SourceIdentity` + # condition key in a role trust policy. That way, actions that are + # taken with the role are associated with that user. After the source + # identity is set, the value cannot be changed. It is present in the + # request for all actions that are taken by the role and persists + # across [chained role][1] sessions. You can configure your identity + # provider to use an attribute associated with your users, like user + # name or email, as the source identity when calling + # `AssumeRoleWithWebIdentity`. You do this by adding a claim to the + # JSON web token. To learn more about OIDC tokens and claims, see + # [Using Tokens with User Pools][2] in the *Amazon Cognito Developer + # Guide*. For more information about using source identity, see + # [Monitor and control actions taken with assumed roles][3] in the + # *IAM User Guide*. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + # [2]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse AWS API Documentation + # + class AssumeRoleWithWebIdentityResponse < Struct.new( + :credentials, + :subject_from_web_identity_token, + :assumed_role_user, + :packed_policy_size, + :provider, + :audience, + :source_identity) + SENSITIVE = [] + include Aws::Structure + end + + # The identifiers for the temporary security credentials that the + # operation returns. + # + # @!attribute [rw] assumed_role_id + # A unique identifier that contains the role ID and the role session + # name of the role that is being assumed. The role ID is generated by + # Amazon Web Services when the role is created. + # @return [String] + # + # @!attribute [rw] arn + # The ARN of the temporary security credentials that are returned from + # the AssumeRole action. For more information about ARNs and how to + # use them in policies, see [IAM Identifiers][1] in the *IAM User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser AWS API Documentation + # + class AssumedRoleUser < Struct.new( + :assumed_role_id, + :arn) + SENSITIVE = [] + include Aws::Structure + end + + # Amazon Web Services credentials for API authentication. + # + # @!attribute [rw] access_key_id + # The access key ID that identifies the temporary security + # credentials. + # @return [String] + # + # @!attribute [rw] secret_access_key + # The secret access key that can be used to sign requests. + # @return [String] + # + # @!attribute [rw] session_token + # The token that users must pass to the service API to use the + # temporary credentials. + # @return [String] + # + # @!attribute [rw] expiration + # The date on which the current credentials expire. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials AWS API Documentation + # + class Credentials < Struct.new( + :access_key_id, + :secret_access_key, + :session_token, + :expiration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] encoded_message + # The encoded message that was returned with the response. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest AWS API Documentation + # + class DecodeAuthorizationMessageRequest < Struct.new( + :encoded_message) + SENSITIVE = [] + include Aws::Structure + end + + # A document that contains additional information about the + # authorization status of a request from an encoded message that is + # returned in response to an Amazon Web Services request. + # + # @!attribute [rw] decoded_message + # The API returns a response with the decoded message. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse AWS API Documentation + # + class DecodeAuthorizationMessageResponse < Struct.new( + :decoded_message) + SENSITIVE = [] + include Aws::Structure + end + + # The web identity token that was passed is expired or is not valid. Get + # a new identity token from the identity provider and then retry the + # request. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/ExpiredTokenException AWS API Documentation + # + class ExpiredTokenException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Identifiers for the federated user that is associated with the + # credentials. + # + # @!attribute [rw] federated_user_id + # The string that identifies the federated user associated with the + # credentials, similar to the unique ID of an IAM user. + # @return [String] + # + # @!attribute [rw] arn + # The ARN that specifies the federated user that is associated with + # the credentials. For more information about ARNs and how to use them + # in policies, see [IAM Identifiers][1] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser AWS API Documentation + # + class FederatedUser < Struct.new( + :federated_user_id, + :arn) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] access_key_id + # The identifier of an access key. + # + # This parameter allows (through its regex pattern) a string of + # characters that can consist of any upper- or lowercase letter or + # digit. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfoRequest AWS API Documentation + # + class GetAccessKeyInfoRequest < Struct.new( + :access_key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] account + # The number used to identify the Amazon Web Services account. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfoResponse AWS API Documentation + # + class GetAccessKeyInfoResponse < Struct.new( + :account) + SENSITIVE = [] + include Aws::Structure + end + + # @api private + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest AWS API Documentation + # + class GetCallerIdentityRequest < Aws::EmptyStructure; end + + # Contains the response to a successful GetCallerIdentity request, + # including information about the entity making the request. + # + # @!attribute [rw] user_id + # The unique identifier of the calling entity. The exact value depends + # on the type of entity that is making the call. The values returned + # are those listed in the **aws:userid** column in the [Principal + # table][1] found on the **Policy Variables** reference page in the + # *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable + # @return [String] + # + # @!attribute [rw] account + # The Amazon Web Services account ID number of the account that owns + # or contains the calling entity. + # @return [String] + # + # @!attribute [rw] arn + # The Amazon Web Services ARN associated with the calling entity. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse AWS API Documentation + # + class GetCallerIdentityResponse < Struct.new( + :user_id, + :account, + :arn) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] name + # The name of the federated user. The name is used as an identifier + # for the temporary security credentials (such as `Bob`). For example, + # you can reference the federated user name in a resource-based + # policy, such as in an Amazon S3 bucket policy. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@- + # @return [String] + # + # @!attribute [rw] policy + # An IAM policy in JSON format that you want to use as an inline + # session policy. + # + # You must pass an inline or managed [session policy][1] to this + # operation. You can pass a single JSON policy document to use as an + # inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. + # + # This parameter is optional. However, if you do not pass any session + # policies, then the resulting federated user session has no + # permissions. + # + # When you pass session policies, the session permissions are the + # intersection of the IAM user policies and the session policies that + # you pass. This gives you a way to further restrict the permissions + # for a federated user. You cannot use session policies to grant more + # permissions than those that are defined in the permissions policy of + # the IAM user. For more information, see [Session Policies][1] in the + # *IAM User Guide*. + # + # The resulting credentials can be used to access a resource that has + # a resource-based policy. If that policy specifically references the + # federated user session in the `Principal` element of the policy, the + # session has the permissions allowed by the policy. These permissions + # are granted in addition to the permissions that are granted by the + # session policies. + # + # The plaintext that you use for both inline and managed session + # policies can't exceed 2,048 characters. The JSON policy characters + # can be any ASCII character from the space character to the end of + # the valid character list (\\u0020 through \\u00FF). It can also + # include the tab (\\u0009), linefeed (\\u000A), and carriage return + # (\\u000D) characters. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # @return [String] + # + # @!attribute [rw] policy_arns + # The Amazon Resource Names (ARNs) of the IAM managed policies that + # you want to use as a managed session policy. The policies must exist + # in the same account as the IAM user that is requesting federated + # access. + # + # You must pass an inline or managed [session policy][1] to this + # operation. You can pass a single JSON policy document to use as an + # inline session policy. You can also specify up to 10 managed policy + # Amazon Resource Names (ARNs) to use as managed session policies. The + # plaintext that you use for both inline and managed session policies + # can't exceed 2,048 characters. You can provide up to 10 managed + # policy ARNs. For more information about ARNs, see [Amazon Resource + # Names (ARNs) and Amazon Web Services Service Namespaces][2] in the + # Amazon Web Services General Reference. + # + # This parameter is optional. However, if you do not pass any session + # policies, then the resulting federated user session has no + # permissions. + # + # When you pass session policies, the session permissions are the + # intersection of the IAM user policies and the session policies that + # you pass. This gives you a way to further restrict the permissions + # for a federated user. You cannot use session policies to grant more + # permissions than those that are defined in the permissions policy of + # the IAM user. For more information, see [Session Policies][1] in the + # *IAM User Guide*. + # + # The resulting credentials can be used to access a resource that has + # a resource-based policy. If that policy specifically references the + # federated user session in the `Principal` element of the policy, the + # session has the permissions allowed by the policy. These permissions + # are granted in addition to the permissions that are granted by the + # session policies. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + # [2]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # @return [Array] + # + # @!attribute [rw] duration_seconds + # The duration, in seconds, that the session should last. Acceptable + # durations for federation sessions range from 900 seconds (15 + # minutes) to 129,600 seconds (36 hours), with 43,200 seconds (12 + # hours) as the default. Sessions obtained using Amazon Web Services + # account root user credentials are restricted to a maximum of 3,600 + # seconds (one hour). If the specified duration is longer than one + # hour, the session obtained by using root user credentials defaults + # to one hour. + # @return [Integer] + # + # @!attribute [rw] tags + # A list of session tags. Each session tag consists of a key name and + # an associated value. For more information about session tags, see + # [Passing Session Tags in STS][1] in the *IAM User Guide*. + # + # This parameter is optional. You can pass up to 50 session tags. The + # plaintext session tag keys can’t exceed 128 characters and the + # values can’t exceed 256 characters. For these and additional limits, + # see [IAM and STS Character Limits][2] in the *IAM User Guide*. + # + # An Amazon Web Services conversion compresses the passed inline + # session policy, managed policy ARNs, and session tags into a packed + # binary format that has a separate limit. Your request can fail for + # this limit even if your plaintext meets the other requirements. The + # `PackedPolicySize` response element indicates by percentage how + # close the policies and tags for your request are to the upper size + # limit. + # + # + # + # You can pass a session tag with the same key as a tag that is + # already attached to the user you are federating. When you do, + # session tags override a user tag with the same key. + # + # Tag key–value pairs are not case sensitive, but case is preserved. + # This means that you cannot have separate `Department` and + # `department` tag keys. Assume that the role has the + # `Department`=`Marketing` tag and you pass the + # `department`=`engineering` session tag. `Department` and + # `department` are not saved as separate tags, and the session tag + # passed in the request takes precedence over the role tag. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest AWS API Documentation + # + class GetFederationTokenRequest < Struct.new( + :name, + :policy, + :policy_arns, + :duration_seconds, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the response to a successful GetFederationToken request, + # including temporary Amazon Web Services credentials that can be used + # to make Amazon Web Services requests. + # + # @!attribute [rw] credentials + # The temporary security credentials, which include an access key ID, + # a secret access key, and a security (or session) token. + # + # The size of the security token that STS API operations return is not + # fixed. We strongly recommend that you make no assumptions about the + # maximum size. + # + # + # @return [Types::Credentials] + # + # @!attribute [rw] federated_user + # Identifiers for the federated user associated with the credentials + # (such as `arn:aws:sts::123456789012:federated-user/Bob` or + # `123456789012:Bob`). You can use the federated user's ARN in your + # resource-based policies, such as an Amazon S3 bucket policy. + # @return [Types::FederatedUser] + # + # @!attribute [rw] packed_policy_size + # A percentage value that indicates the packed size of the session + # policies and session tags combined passed in the request. The + # request fails if the packed size is greater than 100 percent, which + # means the policies and tags exceeded the allowed space. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse AWS API Documentation + # + class GetFederationTokenResponse < Struct.new( + :credentials, + :federated_user, + :packed_policy_size) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] duration_seconds + # The duration, in seconds, that the credentials should remain valid. + # Acceptable durations for IAM user sessions range from 900 seconds + # (15 minutes) to 129,600 seconds (36 hours), with 43,200 seconds (12 + # hours) as the default. Sessions for Amazon Web Services account + # owners are restricted to a maximum of 3,600 seconds (one hour). If + # the duration is longer than one hour, the session for Amazon Web + # Services account owners defaults to one hour. + # @return [Integer] + # + # @!attribute [rw] serial_number + # The identification number of the MFA device that is associated with + # the IAM user who is making the `GetSessionToken` call. Specify this + # value if the IAM user has a policy that requires MFA authentication. + # The value is either the serial number for a hardware device (such as + # `GAHT12345678`) or an Amazon Resource Name (ARN) for a virtual + # device (such as `arn:aws:iam::123456789012:mfa/user`). You can find + # the device for an IAM user by going to the Amazon Web Services + # Management Console and viewing the user's security credentials. + # + # The regex used to validate this parameter is a string of characters + # consisting of upper- and lower-case alphanumeric characters with no + # spaces. You can also include underscores or any of the following + # characters: =,.@:/- + # @return [String] + # + # @!attribute [rw] token_code + # The value provided by the MFA device, if MFA is required. If any + # policy requires the IAM user to submit an MFA code, specify this + # value. If MFA authentication is required, the user must provide a + # code when requesting a set of temporary security credentials. A user + # who fails to provide the code receives an "access denied" response + # when requesting resources that require MFA authentication. + # + # The format for this parameter, as described by its regex pattern, is + # a sequence of six numeric digits. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest AWS API Documentation + # + class GetSessionTokenRequest < Struct.new( + :duration_seconds, + :serial_number, + :token_code) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the response to a successful GetSessionToken request, + # including temporary Amazon Web Services credentials that can be used + # to make Amazon Web Services requests. + # + # @!attribute [rw] credentials + # The temporary security credentials, which include an access key ID, + # a secret access key, and a security (or session) token. + # + # The size of the security token that STS API operations return is not + # fixed. We strongly recommend that you make no assumptions about the + # maximum size. + # + # + # @return [Types::Credentials] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse AWS API Documentation + # + class GetSessionTokenResponse < Struct.new( + :credentials) + SENSITIVE = [] + include Aws::Structure + end + + # The request could not be fulfilled because the identity provider (IDP) + # that was asked to verify the incoming identity token could not be + # reached. This is often a transient error caused by network conditions. + # Retry the request a limited number of times so that you don't exceed + # the request rate. If the error persists, the identity provider might + # be down or not responding. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/IDPCommunicationErrorException AWS API Documentation + # + class IDPCommunicationErrorException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The identity provider (IdP) reported that authentication failed. This + # might be because the claim is invalid. + # + # If this error is returned for the `AssumeRoleWithWebIdentity` + # operation, it can also mean that the claim has expired or has been + # explicitly revoked. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/IDPRejectedClaimException AWS API Documentation + # + class IDPRejectedClaimException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The error returned if the message passed to + # `DecodeAuthorizationMessage` was invalid. This can happen if the token + # contains invalid characters, such as linebreaks. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/InvalidAuthorizationMessageException AWS API Documentation + # + class InvalidAuthorizationMessageException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The web identity token that was passed could not be validated by + # Amazon Web Services. Get a new identity token from the identity + # provider and then retry the request. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/InvalidIdentityTokenException AWS API Documentation + # + class InvalidIdentityTokenException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the policy document was malformed. + # The error message describes the specific error. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/MalformedPolicyDocumentException AWS API Documentation + # + class MalformedPolicyDocumentException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the total packed size of the session + # policies and session tags combined was too large. An Amazon Web + # Services conversion compresses the session policy document, session + # policy ARNs, and session tags into a packed binary format that has a + # separate limit. The error message indicates by percentage how close + # the policies and tags are to the upper size limit. For more + # information, see [Passing Session Tags in STS][1] in the *IAM User + # Guide*. + # + # You could receive this error even though you meet other defined + # session policy and session tag limits. For more information, see [IAM + # and STS Entity Character Limits][2] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/PackedPolicyTooLargeException AWS API Documentation + # + class PackedPolicyTooLargeException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # A reference to the IAM managed policy that is passed as a session + # policy for a role session or a federated user session. + # + # @!attribute [rw] arn + # The Amazon Resource Name (ARN) of the IAM managed policy to use as a + # session policy for the role. For more information about ARNs, see + # [Amazon Resource Names (ARNs) and Amazon Web Services Service + # Namespaces][1] in the *Amazon Web Services General Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/PolicyDescriptorType AWS API Documentation + # + class PolicyDescriptorType < Struct.new( + :arn) + SENSITIVE = [] + include Aws::Structure + end + + # STS is not activated in the requested region for the account that is + # being asked to generate credentials. The account administrator must + # use the IAM console to activate STS in that region. For more + # information, see [Activating and Deactivating Amazon Web Services STS + # in an Amazon Web Services Region][1] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/RegionDisabledException AWS API Documentation + # + class RegionDisabledException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # You can pass custom key-value pair attributes when you assume a role + # or federate a user. These are called session tags. You can then use + # the session tags to control access to resources. For more information, + # see [Tagging Amazon Web Services STS Sessions][1] in the *IAM User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + # + # @!attribute [rw] key + # The key for a session tag. + # + # You can pass up to 50 session tags. The plain text session tag keys + # can’t exceed 128 characters. For these and additional limits, see + # [IAM and STS Character Limits][1] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # @return [String] + # + # @!attribute [rw] value + # The value for a session tag. + # + # You can pass up to 50 session tags. The plain text session tag + # values can’t exceed 256 characters. For these and additional limits, + # see [IAM and STS Character Limits][1] in the *IAM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Tag AWS API Documentation + # + class Tag < Struct.new( + :key, + :value) + SENSITIVE = [] + include Aws::Structure + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse.rb new file mode 100644 index 0000000..3ca8084 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +require_relative 'seahorse/util' + +# client + +require_relative 'seahorse/client/block_io' +require_relative 'seahorse/client/configuration' +require_relative 'seahorse/client/handler' +require_relative 'seahorse/client/handler_builder' +require_relative 'seahorse/client/handler_list' +require_relative 'seahorse/client/handler_list_entry' +require_relative 'seahorse/client/managed_file' +require_relative 'seahorse/client/networking_error' +require_relative 'seahorse/client/plugin' +require_relative 'seahorse/client/plugin_list' +require_relative 'seahorse/client/request' +require_relative 'seahorse/client/request_context' +require_relative 'seahorse/client/response' +require_relative 'seahorse/client/async_response' + +# client http + +require_relative 'seahorse/client/http/headers' +require_relative 'seahorse/client/http/request' +require_relative 'seahorse/client/http/response' +require_relative 'seahorse/client/http/async_response' + +# client logging + +require_relative 'seahorse/client/logging/handler' +require_relative 'seahorse/client/logging/formatter' + +# net http handler + +require_relative 'seahorse/client/net_http/connection_pool' +require_relative 'seahorse/client/net_http/handler' + +# http2 handler + +require_relative 'seahorse/client/h2/connection' +require_relative 'seahorse/client/h2/handler' + +# plugins + +require_relative 'seahorse/client/plugins/content_length' +require_relative 'seahorse/client/plugins/endpoint' +require_relative 'seahorse/client/plugins/logging' +require_relative 'seahorse/client/plugins/net_http' +require_relative 'seahorse/client/plugins/h2' +require_relative 'seahorse/client/plugins/raise_response_errors' +require_relative 'seahorse/client/plugins/response_target' +require_relative 'seahorse/client/plugins/request_callback' + +# model + +require_relative 'seahorse/model/api' +require_relative 'seahorse/model/operation' +require_relative 'seahorse/model/authorizer' +require_relative 'seahorse/model/shapes' + +require_relative 'seahorse/client/base' +require_relative 'seahorse/client/async_base' diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/async_base.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/async_base.rb new file mode 100644 index 0000000..af325d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/async_base.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class AsyncBase < Seahorse::Client::Base + + # default H2 plugins + @plugins = PluginList.new([ + Plugins::Endpoint, + Plugins::H2, + Plugins::ResponseTarget + ]) + + def initialize(plugins, options) + super + @connection = H2::Connection.new(options) + @options = options + end + + # @return [H2::Connection] + attr_reader :connection + + # @return [Array] Returns a list of valid async request + # operation names. + def operation_names + self.class.api.async_operation_names + end + + # Closes the underlying HTTP2 Connection for the client + # @return [Symbol] Returns the status of the connection (:closed) + def close_connection + @connection.close! + end + + # Creates a new HTTP2 Connection for the client + # @return [Seahorse::Client::H2::Connection] + def new_connection + if @connection.closed? + @connection = H2::Connection.new(@options) + else + @connection + end + end + + def connection_errors + @connection.errors + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/async_response.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/async_response.rb new file mode 100644 index 0000000..1d937ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/async_response.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class AsyncResponse + + def initialize(options = {}) + @response = Response.new(context: options[:context]) + @stream = options[:stream] + @stream_mutex = options[:stream_mutex] + @close_condition = options[:close_condition] + @sync_queue = options[:sync_queue] + end + + def context + @response.context + end + + def error + @response.error + end + + def on(range, &block) + @response.on(range, &block) + self + end + + def on_complete(&block) + @response.on_complete(&block) + self + end + + def wait + if error && context.config.raise_response_errors + raise error + elsif @stream + # have a sync signal that #signal can be blocked on + # else, if #signal is called before #wait + # will be waiting for a signal never arrives + @sync_queue << "sync_signal" + # now #signal is unlocked for + # signaling close condition when ready + @stream_mutex.synchronize { + @close_condition.wait(@stream_mutex) + } + @response + end + end + + def join! + if error && context.config.raise_response_errors + raise error + elsif @stream + # close callback is waiting + # for the "sync_signal" + @sync_queue << "sync_signal" + @stream.close + @response + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/base.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/base.rb new file mode 100644 index 0000000..93d181c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/base.rb @@ -0,0 +1,228 @@ +# frozen_string_literal: true + +require 'thread' + +module Seahorse + module Client + class Base + + include HandlerBuilder + + # default plugins + @plugins = PluginList.new([ + Plugins::Endpoint, + Plugins::NetHttp, + Plugins::RaiseResponseErrors, + Plugins::ResponseTarget, + Plugins::RequestCallback + ]) + + # @api private + def initialize(plugins, options) + @config = build_config(plugins, options) + @handlers = build_handler_list(plugins) + after_initialize(plugins) + end + + # @return [Configuration] + attr_reader :config + + # @return [HandlerList] + attr_reader :handlers + + # Builds and returns a {Request} for the named operation. The request + # will not have been sent. + # @param [Symbol, String] operation_name + # @return [Request] + def build_request(operation_name, params = {}) + Request.new( + @handlers.for(operation_name), + context_for(operation_name, params)) + end + + # @api private + def inspect + "#<#{self.class.name}>" + end + + # @return [Array] Returns a list of valid request operation + # names. These are valid arguments to {#build_request} and are also + # valid methods. + def operation_names + self.class.api.operation_names - self.class.api.async_operation_names + end + + private + + # Constructs a {Configuration} object and gives each plugin the + # opportunity to register options with default values. + def build_config(plugins, options) + config = Configuration.new + config.add_option(:api) + plugins.each do |plugin| + plugin.add_options(config) if plugin.respond_to?(:add_options) + end + config.build!(options.merge(api: self.class.api)) + end + + # Gives each plugin the opportunity to register handlers for this client. + def build_handler_list(plugins) + plugins.inject(HandlerList.new) do |handlers, plugin| + if plugin.respond_to?(:add_handlers) + plugin.add_handlers(handlers, @config) + end + handlers + end + end + + # Gives each plugin the opportunity to modify this client. + def after_initialize(plugins) + plugins.reverse.each do |plugin| + plugin.after_initialize(self) if plugin.respond_to?(:after_initialize) + end + end + + # @return [RequestContext] + def context_for(operation_name, params) + RequestContext.new( + operation_name: operation_name, + operation: config.api.operation(operation_name), + client: self, + params: params, + config: config) + end + + class << self + + def new(options = {}) + plugins = build_plugins + options = options.dup + before_initialize(plugins, options) + client = allocate + client.send(:initialize, plugins, options) + client + end + + # Registers a plugin with this client. + # + # @example Register a plugin + # + # ClientClass.add_plugin(PluginClass) + # + # @example Register a plugin by name + # + # ClientClass.add_plugin('gem-name.PluginClass') + # + # @example Register a plugin with an object + # + # plugin = MyPluginClass.new(options) + # ClientClass.add_plugin(plugin) + # + # @param [Class, Symbol, String, Object] plugin + # @see .clear_plugins + # @see .set_plugins + # @see .remove_plugin + # @see .plugins + # @return [void] + def add_plugin(plugin) + @plugins.add(plugin) + end + + # @see .clear_plugins + # @see .set_plugins + # @see .add_plugin + # @see .plugins + # @return [void] + def remove_plugin(plugin) + @plugins.remove(plugin) + end + + # @see .set_plugins + # @see .add_plugin + # @see .remove_plugin + # @see .plugins + # @return [void] + def clear_plugins + @plugins.set([]) + end + + # @param [Array] plugins + # @see .clear_plugins + # @see .add_plugin + # @see .remove_plugin + # @see .plugins + # @return [void] + def set_plugins(plugins) + @plugins.set(plugins) + end + + # Returns the list of registered plugins for this Client. Plugins are + # inherited from the client super class when the client is defined. + # @see .clear_plugins + # @see .set_plugins + # @see .add_plugin + # @see .remove_plugin + # @return [Array] + def plugins + Array(@plugins).freeze + end + + # @return [Model::Api] + def api + @api ||= Model::Api.new + end + + # @param [Model::Api] api + # @return [Model::Api] + def set_api(api) + @api = api + define_operation_methods + @api + end + + # @option options [Model::Api, Hash] :api ({}) + # @option options [Array] :plugins ([]) A list of plugins to + # add to the client class created. + # @return [Class] + def define(options = {}) + subclass = Class.new(self) + subclass.set_api(options[:api] || api) + Array(options[:plugins]).each do |plugin| + subclass.add_plugin(plugin) + end + subclass + end + alias extend define + + private + + def define_operation_methods + operations_module = Module.new + @api.operation_names.each do |method_name| + operations_module.send(:define_method, method_name) do |*args, &block| + params = args[0] || {} + options = args[1] || {} + build_request(method_name, params).send_request(options, &block) + end + end + include(operations_module) + end + + def build_plugins + plugins.map { |plugin| plugin.is_a?(Class) ? plugin.new : plugin } + end + + def before_initialize(plugins, options) + plugins.each do |plugin| + plugin.before_initialize(self, options) if plugin.respond_to?(:before_initialize) + end + end + + def inherited(subclass) + subclass.instance_variable_set('@plugins', PluginList.new(@plugins)) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/block_io.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/block_io.rb new file mode 100644 index 0000000..e1315aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/block_io.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class BlockIO + + def initialize(headers = nil, &block) + @headers = headers + @block = block + @size = 0 + end + + # @param [String] chunk + # @return [Integer] + def write(chunk) + @block.call(chunk, @headers) + ensure + chunk.bytesize.tap { |chunk_size| @size += chunk_size } + end + + # @param [Integer] bytes (nil) + # @param [String] output_buffer (nil) + # @return [String, nil] + def read(bytes = nil, output_buffer = nil) + data = bytes ? nil : '' + output_buffer ? output_buffer.replace(data || '') : data + end + + # @return [Integer] + def size + @size + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/configuration.rb new file mode 100644 index 0000000..19cc73d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/configuration.rb @@ -0,0 +1,237 @@ +# frozen_string_literal: true + +require 'set' + +module Seahorse + module Client + + # Configuration is used to define possible configuration options and + # then build read-only structures with user-supplied data. + # + # ## Adding Configuration Options + # + # Add configuration options with optional default values. These are used + # when building configuration objects. + # + # configuration = Configuration.new + # + # configuration.add_option(:max_retries, 3) + # configuration.add_option(:use_ssl, true) + # + # cfg = configuration.build! + # #=> # + # + # ## Building Configuration Objects + # + # Calling {#build!} on a {Configuration} object causes it to return + # a read-only (frozen) struct. Options passed to {#build!} are merged + # on top of any default options. + # + # configuration = Configuration.new + # configuration.add_option(:color, 'red') + # + # # default + # cfg1 = configuration.build! + # cfg1.color #=> 'red' + # + # # supplied color + # cfg2 = configuration.build!(color: 'blue') + # cfg2.color #=> 'blue' + # + # ## Accepted Options + # + # If you try to {#build!} a {Configuration} object with an unknown + # option, an `ArgumentError` is raised. + # + # configuration = Configuration.new + # configuration.add_option(:color) + # configuration.add_option(:size) + # configuration.add_option(:category) + # + # configuration.build!(price: 100) + # #=> raises an ArgumentError, :price was not added as an option + # + class Configuration + + # @api private + Defaults = Class.new(Array) do + def each(&block) + reverse.to_a.each(&block) + end + end + + # @api private + class DynamicDefault + attr_accessor :block + + def initialize(block = nil) + @block = block + end + + def call(*args) + @block.call(*args) + end + end + + # @api private + def initialize + @defaults = Hash.new { |h,k| h[k] = Defaults.new } + end + + # Adds a getter method that returns the named option or a default + # value. Default values can be passed as a static positional argument + # or via a block. + # + # # defaults to nil + # configuration.add_option(:name) + # + # # with a string default + # configuration.add_option(:name, 'John Doe') + # + # # with a dynamic default value, evaluated once when calling #build! + # configuration.add_option(:name, 'John Doe') + # configuration.add_option(:username) do |config| + # config.name.gsub(/\W+/, '').downcase + # end + # cfg = configuration.build! + # cfg.name #=> 'John Doe' + # cfg.username #=> 'johndoe' + # + # @param [Symbol] name The name of the configuration option. This will + # be used to define a getter by the same name. + # + # @param default The default value for this option. You can specify + # a default by passing a value, a `Proc` object or a block argument. + # Procs and blocks are evaluated when {#build!} is called. + # + # @return [self] + def add_option(name, default = nil, &block) + default = DynamicDefault.new(block) if block_given? + @defaults[name.to_sym] << default + self + end + + # Constructs and returns a configuration structure. + # Values not present in `options` will default to those supplied via + # add option. + # + # configuration = Configuration.new + # configuration.add_option(:enabled, true) + # + # cfg1 = configuration.build! + # cfg1.enabled #=> true + # + # cfg2 = configuration.build!(enabled: false) + # cfg2.enabled #=> false + # + # If you pass in options to `#build!` that have not been defined, + # then an `ArgumentError` will be raised. + # + # configuration = Configuration.new + # configuration.add_option(:enabled, true) + # + # # oops, spelling error for :enabled + # cfg = configuration.build!(enabld: true) + # #=> raises ArgumentError + # + # The object returned is a frozen `Struct`. + # + # configuration = Configuration.new + # configuration.add_option(:enabled, true) + # + # cfg = configuration.build! + # cfg.enabled #=> true + # cfg[:enabled] #=> true + # cfg['enabled'] #=> true + # + # @param [Hash] options ({}) A hash of configuration options. + # @return [Struct] Returns a frozen configuration `Struct`. + def build!(options = {}) + struct = empty_struct + apply_options(struct, options) + apply_defaults(struct, options) + struct + end + + private + + def empty_struct + Struct.new(*@defaults.keys.sort).new + end + + def apply_options(struct, options) + options.each do |opt, value| + begin + struct[opt] = value + rescue NameError + msg = "invalid configuration option `#{opt.inspect}'" + raise ArgumentError, msg + end + end + end + + def apply_defaults(struct, options) + @defaults.each do |opt_name, defaults| + unless options.key?(opt_name) + struct[opt_name] = defaults + end + end + DefaultResolver.new(struct).resolve + end + + # @api private + class DefaultResolver + + def initialize(struct) + @struct = struct + @members = Set.new(@struct.members) + end + + def resolve + @members.each { |opt_name| value_at(opt_name) } + end + + def respond_to?(method_name, *args) + @members.include?(method_name) or super + end + + def override_config(k, v) + @struct[k] = v + end + + private + + def value_at(opt_name) + value = @struct[opt_name] + if value.is_a?(Defaults) + # Legacy endpoints must continue to exist. + if opt_name == :endpoint && @struct.members.include?(:regional_endpoint) + @struct[:regional_endpoint] = true + end + resolve_defaults(opt_name, value) + else + value + end + end + + def resolve_defaults(opt_name, defaults) + defaults.each do |default| + default = default.call(self) if default.is_a?(DynamicDefault) + @struct[opt_name] = default + break if !default.nil? + end + @struct[opt_name] + end + + def method_missing(method_name, *args) + if @members.include?(method_name) + value_at(method_name) + else + super + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/events.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/events.rb new file mode 100644 index 0000000..ab94329 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/events.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module EventEmitter + + def initialize(*args) + @listeners = {} + super + end + + def emit(event_name, *args, &block) + @listeners[event_name] ||= [] + @listeners[event_name] << block if block_given? + end + + def signal(event, *args) + @listeners + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/h2/connection.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/h2/connection.rb new file mode 100644 index 0000000..2b0b018 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/h2/connection.rb @@ -0,0 +1,251 @@ +# frozen_string_literal: true + +begin + require 'http/2' +rescue LoadError; end +require 'openssl' +require 'socket' + +module Seahorse + module Client + # @api private + module H2 + + # H2 Connection build on top of `http/2` gem + # (requires Ruby >= 2.1) + # with TLS layer plus ALPN, requires: + # Ruby >= 2.3 and OpenSSL >= 1.0.2 + class Connection + + OPTIONS = { + max_concurrent_streams: 100, + connection_timeout: 60, + connection_read_timeout: 60, + http_wire_trace: false, + logger: nil, + ssl_verify_peer: true, + ssl_ca_bundle: nil, + ssl_ca_directory: nil, + ssl_ca_store: nil, + enable_alpn: false + } + + # chunk read size at socket + CHUNKSIZE = 1024 + + SOCKET_FAMILY = ::Socket::AF_INET + + def initialize(options = {}) + OPTIONS.each_pair do |opt_name, default_value| + value = options[opt_name].nil? ? default_value : options[opt_name] + instance_variable_set("@#{opt_name}", value) + end + @h2_client = HTTP2::Client.new( + settings_max_concurrent_streams: max_concurrent_streams + ) + @logger = if @http_wire_trace + options[:logger] || Logger.new($stdout) + end + @chunk_size = options[:read_chunk_size] || CHUNKSIZE + @errors = [] + @status = :ready + @mutex = Mutex.new # connection can be shared across requests + @socket = nil + @socket_thread = nil + end + + OPTIONS.keys.each do |attr_name| + attr_reader(attr_name) + end + + alias ssl_verify_peer? ssl_verify_peer + + attr_reader :errors + + attr_accessor :input_signal_thread + + def new_stream + begin + @h2_client.new_stream + rescue => error + raise Http2StreamInitializeError.new(error) + end + end + + def connect(endpoint) + @mutex.synchronize { + if @status == :ready + tcp, addr = _tcp_socket(endpoint) + debug_output("opening connection to #{endpoint.host}:#{endpoint.port} ...") + _nonblocking_connect(tcp, addr) + debug_output('opened') + + if endpoint.scheme == 'https' + @socket = OpenSSL::SSL::SSLSocket.new(tcp, _tls_context) + @socket.sync_close = true + @socket.hostname = endpoint.host + + debug_output("starting TLS for #{endpoint.host}:#{endpoint.port} ...") + @socket.connect + debug_output('TLS established') + else + @socket = tcp + end + + _register_h2_callbacks + @status = :active + elsif @status == :closed + msg = 'Async Client HTTP2 Connection is closed, you may'\ + ' use #new_connection to create a new HTTP2 Connection for this client' + raise Http2ConnectionClosedError.new(msg) + end + } + end + + def start(stream) + @mutex.synchronize { + return if @socket_thread + @socket_thread = Thread.new do + while @socket && !@socket.closed? + begin + data = @socket.read_nonblock(@chunk_size) + @h2_client << data + rescue IO::WaitReadable + begin + unless IO.select([@socket], nil, nil, connection_read_timeout) + self.debug_output('socket connection read time out') + self.close! + else + # available, retry to start reading + retry + end + rescue + # error can happen when closing the socket + # while it's waiting for read + self.close! + end + rescue EOFError + self.close! + rescue => error + self.debug_output(error.inspect) + @errors << error + self.close! + end + end + @socket_thread = nil + end + @socket_thread.abort_on_exception = true + } + end + + def close! + @mutex.synchronize { + self.debug_output('closing connection ...') + if @socket + @socket.close + @socket = nil + end + @status = :closed + } + end + + def closed? + @status == :closed + end + + def debug_output(msg, type = nil) + prefix = case type + when :send then '-> ' + when :receive then '<- ' + else + '' + end + return unless @logger + _debug_entry(prefix + msg) + end + + private + + def _debug_entry(str) + @logger << str + @logger << "\n" + end + + def _register_h2_callbacks + @h2_client.on(:frame) do |bytes| + if @socket.nil? + msg = 'Connection is closed due to errors, '\ + 'you can find errors at async_client.connection.errors' + raise Http2ConnectionClosedError.new(msg) + else + @socket.print(bytes) + @socket.flush + end + end + if @http_wire_trace + @h2_client.on(:frame_sent) do |frame| + debug_output("frame: #{frame.inspect}", :send) + end + @h2_client.on(:frame_received) do |frame| + debug_output("frame: #{frame.inspect}", :receive) + end + end + end + + def _tcp_socket(endpoint) + tcp = ::Socket.new(SOCKET_FAMILY, ::Socket::SOCK_STREAM, 0) + tcp.setsockopt(::Socket::IPPROTO_TCP, ::Socket::TCP_NODELAY, 1) + + address = ::Socket.getaddrinfo(endpoint.host, nil, SOCKET_FAMILY).first[3] + sockaddr = ::Socket.sockaddr_in(endpoint.port, address) + + [tcp, sockaddr] + end + + def _nonblocking_connect(tcp, addr) + begin + tcp.connect_nonblock(addr) + rescue IO::WaitWritable + unless IO.select(nil, [tcp], nil, connection_timeout) + tcp.close + raise + end + begin + tcp.connect_nonblock(addr) + rescue Errno::EISCONN + # tcp socket connected, continue + end + end + end + + def _tls_context + ssl_ctx = OpenSSL::SSL::SSLContext.new(:TLSv1_2) + if ssl_verify_peer? + ssl_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + ssl_ctx.ca_file = ssl_ca_bundle ? ssl_ca_bundle : _default_ca_bundle + ssl_ctx.ca_path = ssl_ca_directory ? ssl_ca_directory : _default_ca_directory + ssl_ctx.cert_store = ssl_ca_store if ssl_ca_store + else + ssl_ctx.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + if enable_alpn + debug_output('enabling ALPN for TLS ...') + ssl_ctx.alpn_protocols = ['h2'] + end + ssl_ctx + end + + def _default_ca_bundle + File.exist?(OpenSSL::X509::DEFAULT_CERT_FILE) ? + OpenSSL::X509::DEFAULT_CERT_FILE : nil + end + + def _default_ca_directory + Dir.exist?(OpenSSL::X509::DEFAULT_CERT_DIR) ? + OpenSSL::X509::DEFAULT_CERT_DIR : nil + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/h2/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/h2/handler.rb new file mode 100644 index 0000000..9fb49fd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/h2/handler.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true + +begin + require 'http/2' +rescue LoadError; end + +require 'securerandom' + +module Seahorse + module Client + # @api private + module H2 + + NETWORK_ERRORS = [ + SocketError, EOFError, IOError, Timeout::Error, + Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EPIPE, + Errno::EINVAL, Errno::ETIMEDOUT, OpenSSL::SSL::SSLError, + Errno::EHOSTUNREACH, Errno::ECONNREFUSED,# OpenSSL::SSL::SSLErrorWaitReadable + ] + + # @api private + DNS_ERROR_MESSAGES = [ + 'getaddrinfo: nodename nor servname provided, or not known', # MacOS + 'getaddrinfo: Name or service not known' # GNU + ] + + class Handler < Client::Handler + + def call(context) + stream = nil + begin + conn = context.client.connection + stream = conn.new_stream + + stream_mutex = Mutex.new + close_condition = ConditionVariable.new + sync_queue = Queue.new + + conn.connect(context.http_request.endpoint) + _register_callbacks( + context.http_response, + stream, + stream_mutex, + close_condition, + sync_queue + ) + + conn.debug_output("sending initial request ...") + if input_emitter = context[:input_event_emitter] + _send_initial_headers(context.http_request, stream) + + # prepare for sending events later + input_emitter.stream = stream + # request sigv4 serves as the initial #prior_signature + input_emitter.encoder.prior_signature = + context.http_request.headers['authorization'].split('Signature=').last + input_emitter.validate_event = context.config.validate_params + else + _send_initial_headers(context.http_request, stream) + _send_initial_data(context.http_request, stream) + end + + conn.start(stream) + rescue *NETWORK_ERRORS => error + error = NetworkingError.new( + error, error_message(context.http_request, error)) + context.http_response.signal_error(error) + rescue => error + conn.debug_output(error.inspect) + # not retryable + context.http_response.signal_error(error) + end + + AsyncResponse.new( + context: context, + stream: stream, + stream_mutex: stream_mutex, + close_condition: close_condition, + sync_queue: sync_queue + ) + end + + private + + def _register_callbacks(resp, stream, stream_mutex, close_condition, sync_queue) + stream.on(:headers) do |headers| + resp.signal_headers(headers) + end + + stream.on(:data) do |data| + resp.signal_data(data) + end + + stream.on(:close) do + resp.signal_done + # block until #wait is ready for signal + # else deadlock may happen because #signal happened + # eariler than #wait (see AsyncResponse#wait) + sync_queue.pop + stream_mutex.synchronize { + close_condition.signal + } + end + end + + def _send_initial_headers(req, stream) + begin + headers = _h2_headers(req) + stream.headers(headers, end_stream: false) + rescue => e + raise Http2InitialRequestError.new(e) + end + end + + def _send_initial_data(req, stream) + begin + data = req.body.read + stream.data(data, end_stream: true) + rescue => e + raise Http2InitialRequestError.new(e) + end + data + end + + # H2 pseudo headers + # https://http2.github.io/http2-spec/#rfc.section.8.1.2.3 + def _h2_headers(req) + headers = {} + headers[':method'] = req.http_method.upcase + headers[':scheme'] = req.endpoint.scheme + headers[':path'] = req.endpoint.path.empty? ? '/' : req.endpoint.path + if req.endpoint.query && !req.endpoint.query.empty? + headers[':path'] += "?#{req.endpoint.query}" + end + req.headers.each {|k, v| headers[k.downcase] = v } + headers + end + + def error_message(req, error) + if error.is_a?(SocketError) && DNS_ERROR_MESSAGES.include?(error.message) + host = req.endpoint.host + "unable to connect to `#{host}`; SocketError: #{error.message}" + else + error.message + end + end + + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler.rb new file mode 100644 index 0000000..925fff7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class Handler + + # @param [Handler] handler (nil) The next handler in the stack that + # should be called from within the {#call} method. This value + # must only be nil for send handlers. + def initialize(handler = nil) + @handler = handler + end + + # @return [Handler, nil] + attr_accessor :handler + + # @param [RequestContext] context + # @return [Response] + def call(context) + @handler.call(context) + end + + def inspect + "#<#{self.class.name||'UnnamedHandler'} @handler=#{@handler.inspect}>" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_builder.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_builder.rb new file mode 100644 index 0000000..47415af --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_builder.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module Seahorse + module Client + + # This module provides the ability to add handlers to a class or + # module. The including class or extending module must respond to + # `#handlers`, returning a {HandlerList}. + module HandlerBuilder + + def handle_request(*args, &block) + handler(*args) do |context| + block.call(context) + @handler.call(context) + end + end + + def handle_response(*args, &block) + handler(*args) do |context| + resp = @handler.call(context) + block.call(resp) if resp.context.http_response.status_code > 0 + resp + end + end + + def handle(*args, &block) + options = args.last.is_a?(Hash) ? args.pop : {} + handler_class = block ? handler_for(*args, &block) : args.first + handlers.add(handler_class, options) + end + alias handler handle + + # @api private + def handler_for(name = nil, &block) + if name + const_set(name, new_handler(block)) + else + new_handler(block) + end + end + + # @api private + def new_handler(block) + Class.new(Handler) do + define_method(:call, &block) + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_list.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_list.rb new file mode 100644 index 0000000..6124c13 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_list.rb @@ -0,0 +1,195 @@ +# frozen_string_literal: true + +require 'thread' +require 'set' + +module Seahorse + module Client + class HandlerList + + include Enumerable + + # @api private + def initialize(options = {}) + @index = options[:index] || 0 + @entries = {} + @mutex = Mutex.new + entries = options[:entries] || [] + add_entries(entries) unless entries.empty? + end + + # @return [Array] + def entries + @mutex.synchronize do + @entries.values + end + end + + # Registers a handler. Handlers are used to build a handler stack. + # Handlers default to the `:build` step with default priority of 50. + # The step and priority determine where in the stack a handler + # will be. + # + # ## Handler Stack Ordering + # + # A handler stack is built from the inside-out. The stack is + # seeded with the send handler. Handlers are constructed recursively + # in reverse step and priority order so that the highest priority + # handler is on the outside. + # + # By constructing the stack from the inside-out, this ensures + # that the validate handlers will be called first and the sign handlers + # will be called just before the final and only send handler is called. + # + # ## Steps + # + # Handlers are ordered first by step. These steps represent the + # life-cycle of a request. Valid steps are: + # + # * `:initialize` + # * `:validate` + # * `:build` + # * `:sign` + # * `:send` + # + # Many handlers can be added to the same step, except for `:send`. + # There can be only one `:send` handler. Adding an additional + # `:send` handler replaces the previous one. + # + # ## Priorities + # + # Handlers within a single step are executed in priority order. The + # higher the priority, the earlier in the stack the handler will + # be called. + # + # * Handler priority is an integer between 0 and 99, inclusively. + # * Handler priority defaults to 50. + # * When multiple handlers are added to the same step with the same + # priority, the last one added will have the highest priority and + # the first one added will have the lowest priority. + # + # @param [Class] handler_class This should be a subclass + # of {Handler}. + # + # @option options [Symbol] :step (:build) The request life-cycle + # step the handler should run in. Defaults to `:build`. The + # list of possible steps, in high-to-low priority order are: + # + # * `:initialize` + # * `:validate` + # * `:build` + # * `:sign` + # * `:send` + # + # There can only be one send handler. Registering an additional + # `:send` handler replaces the previous one. + # + # @option options [Integer] :priority (50) The priority of this + # handler within a step. The priority must be between 0 and 99 + # inclusively. It defaults to 50. When two handlers have the + # same `:step` and `:priority`, the handler registered last has + # the highest priority. + # + # @option options [Array] :operations A list of + # operations names the handler should be applied to. When + # `:operations` is omitted, the handler is applied to all + # operations for the client. + # + # @raise [InvalidStepError] + # @raise [InvalidPriorityError] + # @note There can be only one `:send` handler. Adding an additional + # send handler replaces the previous. + # + # @return [Class] Returns the handler class that was added. + # + def add(handler_class, options = {}) + @mutex.synchronize do + add_entry( + HandlerListEntry.new(options.merge( + handler_class: handler_class, + inserted: next_index + )) + ) + end + handler_class + end + + # @param [Class] handler_class + def remove(handler_class) + @entries.each do |key, entry| + @entries.delete(key) if entry.handler_class == handler_class + end + end + + # Copies handlers from the `source_list` onto the current handler list. + # If a block is given, only the entries that return a `true` value + # from the block will be copied. + # @param [HandlerList] source_list + # @return [void] + def copy_from(source_list, &block) + entries = [] + source_list.entries.each do |entry| + if block_given? + entries << entry.copy(inserted: next_index) if yield(entry) + else + entries << entry.copy(inserted: next_index) + end + end + add_entries(entries) + end + + # Returns a handler list for the given operation. The returned + # will have the operation specific handlers merged with the common + # handlers. + # @param [String] operation The name of an operation. + # @return [HandlerList] + def for(operation) + HandlerList.new(index: @index, entries: filter(operation.to_s)) + end + + # Yields the handlers in stack order, which is reverse priority. + def each(&block) + entries.sort.each do |entry| + yield(entry.handler_class) if entry.operations.nil? + end + end + + # Constructs the handlers recursively, building a handler stack. + # The `:send` handler will be at the top of the stack and the + # `:validate` handlers will be at the bottom. + # @return [Handler] + def to_stack + inject(nil) { |stack, handler| handler.new(stack) } + end + + private + + def add_entries(entries) + @mutex.synchronize do + entries.each { |entry| add_entry(entry) } + end + end + + def add_entry(entry) + key = entry.step == :send ? :send : entry.object_id + @entries[key] = entry + end + + def filter(operation) + entries.inject([]) do |filtered, entry| + if entry.operations.nil? + filtered << entry.copy + elsif entry.operations.include?(operation) + filtered << entry.copy(operations: nil) + end + filtered + end + end + + def next_index + @index += 1 + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_list_entry.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_list_entry.rb new file mode 100644 index 0000000..1b48742 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/handler_list_entry.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +module Seahorse + module Client + + # A container for an un-constructed handler. A handler entry has the + # handler class, and information about handler priority/order. + # + # This class is an implementation detail of the {HandlerList} class. + # Do not rely on public interfaces of this class. + class HandlerListEntry + + STEPS = { + initialize: 400, + validate: 300, + build: 200, + sign: 100, + send: 0, + } + + # @option options [required, Class] :handler_class + # @option options [required, Integer] :inserted The insertion + # order/position. This is used to determine sort order when two + # entries have the same priority. + # @option options [Symbol] :step (:build) + # @option options [Integer] :priority (50) + # @option options [Set] :operations + def initialize(options) + @options = options + @handler_class = option(:handler_class, options) + @inserted = option(:inserted, options) + @operations = options[:operations] + @operations = Set.new(options[:operations]).map(&:to_s) if @operations + set_step(options[:step] || :build) + set_priority(options[:priority] || 50) + compute_weight + end + + # @return [Handler, Class] Returns the handler. This may + # be a constructed handler object or a handler class. + attr_reader :handler_class + + # @return [Integer] The insertion order/position. This is used to + # determine sort order when two entries have the same priority. + # Entries inserted later (with a higher inserted value) have a + # lower priority. + attr_reader :inserted + + # @return [Symbol] + attr_reader :step + + # @return [Integer] + attr_reader :priority + + # @return [Set] + attr_reader :operations + + # @return [Integer] + attr_reader :weight + + # @api private + def <=>(other) + if weight == other.weight + other.inserted <=> inserted + else + weight <=> other.weight + end + end + + # @option options (see #initialize) + # @return [HandlerListEntry] + def copy(options = {}) + HandlerListEntry.new(@options.merge(options)) + end + + private + + def option(name, options) + if options.key?(name) + options[name] + else + msg = "missing option: `%s'" + raise ArgumentError, msg % name.inspect + end + end + + def set_step(step) + if STEPS.key?(step) + @step = step + else + msg = "invalid :step `%s', must be one of :initialize, :validate, "\ + ':build, :sign or :send' + raise ArgumentError, msg % step.inspect + end + end + + def set_priority(priority) + if (0..99).include?(priority) + @priority = priority + else + msg = "invalid :priority `%s', must be between 0 and 99" + raise ArgumentError, msg % priority.inspect + end + end + + def compute_weight + @weight = STEPS[@step] + @priority + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/async_response.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/async_response.rb new file mode 100644 index 0000000..2833eae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/async_response.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Http + class AsyncResponse < Seahorse::Client::Http::Response + + def initialize(options = {}) + super + end + + def signal_headers(headers) + # H2 headers arrive as array of pair + hash = headers.inject({}) do |h, pair| + key, value = pair + h[key] = value + h + end + @status_code = hash[":status"].to_i + @headers = Headers.new(hash) + emit(:headers, @status_code, @headers) + end + + def signal_done(options = {}) + # H2 only has header and body + # ':status' header will be sent back + if options.keys.sort == [:body, :headers] + signal_headers(options[:headers]) + signal_data(options[:body]) + signal_done + elsif options.empty? + @body.rewind if @body.respond_to?(:rewind) + @done = true + emit(:done) + else + msg = "options must be empty or must contain :headers and :body" + raise ArgumentError, msg + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/headers.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/headers.rb new file mode 100644 index 0000000..084f148 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/headers.rb @@ -0,0 +1,120 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Http + + # Provides a Hash-like interface for HTTP headers. Header names + # are treated indifferently as lower-cased strings. Header values + # are cast to strings. + # + # headers = Http::Headers.new + # headers['Content-Length'] = 100 + # headers[:Authorization] = 'Abc' + # + # headers.keys + # #=> ['content-length', 'authorization'] + # + # headers.values + # #=> ['100', 'Abc'] + # + # You can get the header values as a vanilla hash by calling {#to_h}: + # + # headers.to_h + # #=> { 'content-length' => '100', 'authorization' => 'Abc' } + # + class Headers + + include Enumerable + + # @api private + def initialize(headers = {}) + @data = {} + headers.each_pair do |key, value| + self[key] = value + end + end + + # @param [String] key + # @return [String] + def [](key) + @data[key.to_s.downcase] + end + + # @param [String] key + # @param [String] value + def []=(key, value) + @data[key.to_s.downcase] = value.to_s + end + + # @param [Hash] headers + # @return [Headers] + def update(headers) + headers.each_pair do |k, v| + self[k] = v + end + self + end + + # @param [String] key + def delete(key) + @data.delete(key.to_s.downcase) + end + + def clear + @data = {} + end + + # @return [Array] + def keys + @data.keys + end + + # @return [Array] + def values + @data.values + end + + # @return [Array] + def values_at(*keys) + @data.values_at(*keys.map{ |key| key.to_s.downcase }) + end + + # @yield [key, value] + # @yieldparam [String] key + # @yieldparam [String] value + # @return [nil] + def each(&block) + if block_given? + @data.each_pair do |key, value| + yield(key, value) + end + nil + else + @data.enum_for(:each) + end + end + alias each_pair each + + # @return [Boolean] Returns `true` if the header is set. + def key?(key) + @data.key?(key.to_s.downcase) + end + alias has_key? key? + alias include? key? + + # @return [Hash] + def to_hash + @data.dup + end + alias to_h to_hash + + # @api private + def inspect + @data.inspect + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/request.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/request.rb new file mode 100644 index 0000000..e1f1fdd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/request.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require 'stringio' +require 'uri' + +module Seahorse + module Client + module Http + class Request + + # @option options [URI::HTTP, URI::HTTPS] :endpoint (nil) + # @option options [String] :http_method ('GET') + # @option options [Headers] :headers (Headers.new) + # @option options [Body] :body (StringIO.new) + def initialize(options = {}) + self.endpoint = options[:endpoint] + self.http_method = options[:http_method] || 'GET' + self.headers = Headers.new(options[:headers] || {}) + self.body = options[:body] + end + + # @return [String] The HTTP request method, e.g. `GET`, `PUT`, etc. + attr_accessor :http_method + + # @return [Headers] The hash of request headers. + attr_accessor :headers + + # @return [URI::HTTP, URI::HTTPS, nil] + def endpoint + @endpoint + end + + # @param [String, URI::HTTP, URI::HTTPS, nil] endpoint + def endpoint=(endpoint) + endpoint = URI.parse(endpoint) if endpoint.is_a?(String) + if endpoint.nil? or URI::HTTP === endpoint or URI::HTTPS === endpoint + @endpoint = endpoint + else + msg = 'invalid endpoint, expected URI::HTTP, URI::HTTPS, or nil, '\ + "got #{endpoint.inspect}" + raise ArgumentError, msg + end + end + + # @return [IO] + def body + @body + end + + # @return [String] + def body_contents + body.rewind + contents = body.read + body.rewind + contents + end + + # @param [#read, #size, #rewind] io + def body=(io) + @body = case io + when nil then StringIO.new('') + when String then StringIO.new(io) + else io + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/response.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/response.rb new file mode 100644 index 0000000..9e1bd98 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/http/response.rb @@ -0,0 +1,188 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Http + class Response + + # @option options [Integer] :status_code (0) + # @option options [Headers] :headers (Headers.new) + # @option options [IO] :body (StringIO.new) + def initialize(options = {}) + @status_code = options[:status_code] || 0 + @headers = options[:headers] || Headers.new + @body = options[:body] || StringIO.new + @listeners = Hash.new { |h,k| h[k] = [] } + @complete = false + @done = nil + @error = nil + end + + # @return [Integer] Returns `0` if the request failed to generate + # any response. + attr_accessor :status_code + + # @return [Headers] + attr_accessor :headers + + # @return [StandardError, nil] + attr_reader :error + + # @return [IO] + def body + @body + end + + # @param [#read, #size, #rewind] io + def body=(io) + @body = case io + when nil then StringIO.new('') + when String then StringIO.new(io) + else io + end + end + + # @return [String|Array] + def body_contents + if body.is_a?(Array) + # an array of parsed events + body + else + body.rewind + contents = body.read + body.rewind + contents + end + end + + # @param [Integer] status_code + # @param [Hash] headers + def signal_headers(status_code, headers) + @status_code = status_code + @headers = Headers.new(headers) + emit(:headers, @status_code, @headers) + end + + # @param [string] chunk + def signal_data(chunk) + unless chunk == '' + @body.write(chunk) + emit(:data, chunk) + end + end + + # Completes the http response. + # + # @example Completing the response in a single call + # + # http_response.signal_done( + # status_code: 200, + # headers: {}, + # body: '' + # ) + # + # @example Complete the response in parts + # + # # signal headers straight-way + # http_response.signal_headers(200, {}) + # + # # signal data as it is received from the socket + # http_response.signal_data("...") + # http_response.signal_data("...") + # http_response.signal_data("...") + # + # # signal done once the body data is all written + # http_response.signal_done + # + # @overload signal_done() + # + # @overload signal_done(options = {}) + # @option options [required, Integer] :status_code + # @option options [required, Hash] :headers + # @option options [required, String] :body + # + def signal_done(options = {}) + if options.keys.sort == [:body, :headers, :status_code] + signal_headers(options[:status_code], options[:headers]) + signal_data(options[:body]) + signal_done + elsif options.empty? + @body.rewind if @body.respond_to?(:rewind) + @done = true + emit(:done) + else + msg = 'options must be empty or must contain :status_code, :headers, '\ + 'and :body' + raise ArgumentError, msg + end + end + + # @param [StandardError] networking_error + def signal_error(networking_error) + @error = networking_error + signal_done + end + + def on_headers(status_code_range = nil, &block) + @listeners[:headers] << listener(status_code_range, block) + end + + def on_data(&callback) + @listeners[:data] << callback + end + + def on_done(status_code_range = nil, &callback) + listener = listener(status_code_range, callback) + if @done + listener.call + else + @listeners[:done] << listener + end + end + + def on_success(status_code_range = 200..599, &callback) + on_done(status_code_range) do + unless @error + yield + end + end + end + + def on_error(&callback) + on_done(0..599) do + if @error + yield(@error) + end + end + end + + def reset + @status_code = 0 + @headers.clear + @body.truncate(0) + @error = nil + end + + private + + def listener(range, callback) + range = range..range if Integer === range + if range + lambda do |*args| + if range.include?(@status_code) + callback.call(*args) + end + end + else + callback + end + end + + def emit(event_name, *args) + @listeners[event_name].each { |listener| listener.call(*args) } + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/logging/formatter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/logging/formatter.rb new file mode 100644 index 0000000..4cdfa54 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/logging/formatter.rb @@ -0,0 +1,320 @@ +# frozen_string_literal: true + +require 'pathname' + +module Seahorse + module Client + # @deprecated Use Aws::Logging instead. + # @api private + module Logging + + # A log formatter receives a {Response} object and return + # a log message as a string. When you construct a {Formatter}, you provide + # a pattern string with substitutions. + # + # pattern = ':operation :http_response_status_code :time' + # formatter = Seahorse::Client::Logging::Formatter.new(pattern) + # formatter.format(response) + # #=> 'get_bucket 200 0.0352' + # + # # Canned Formatters + # + # Instead of providing your own pattern, you can choose a canned log + # formatter. + # + # * {Formatter.default} + # * {Formatter.colored} + # * {Formatter.short} + # + # # Pattern Substitutions + # + # You can put any of these placeholders into you pattern. + # + # * `:client_class` - The name of the client class. + # + # * `:operation` - The name of the client request method. + # + # * `:request_params` - The user provided request parameters. Long + # strings are truncated/summarized if they exceed the + # {#max_string_size}. Other objects are inspected. + # + # * `:time` - The total time in seconds spent on the + # request. This includes client side time spent building + # the request and parsing the response. + # + # * `:retries` - The number of times a client request was retried. + # + # * `:http_request_method` - The http request verb, e.g., `POST`, + # `PUT`, `GET`, etc. + # + # * `:http_request_endpoint` - The request endpoint. This includes + # the scheme, host and port, but not the path. + # + # * `:http_request_scheme` - This is replaced by `http` or `https`. + # + # * `:http_request_host` - The host name of the http request + # endpoint (e.g. 's3.amazon.com'). + # + # * `:http_request_port` - The port number (e.g. '443' or '80'). + # + # * `:http_request_headers` - The http request headers, inspected. + # + # * `:http_request_body` - The http request payload. + # + # * `:http_response_status_code` - The http response status + # code, e.g., `200`, `404`, `500`, etc. + # + # * `:http_response_headers` - The http response headers, inspected. + # + # * `:http_response_body` - The http response body contents. + # + # * `:error_class` + # + # * `:error_message` + # + class Formatter + + # @param [String] pattern The log format pattern should be a string + # and may contain substitutions. + # + # @option options [Integer] :max_string_size (1000) When summarizing + # request parameters, strings longer than this value will be + # truncated. + # + def initialize(pattern, options = {}) + @pattern = pattern + @max_string_size = options[:max_string_size] || 1000 + end + + # @return [String] + attr_reader :pattern + + # @return [Integer] + attr_reader :max_string_size + + # Given a {Response}, this will format a log message and return it + # as a string. + # @param [Response] response + # @return [String] + def format(response) + pattern.gsub(/:(\w+)/) {|sym| send("_#{sym[1..-1]}", response) } + end + + # @api private + def eql?(other) + other.is_a?(self.class) and other.pattern == self.pattern + end + alias :== :eql? + + private + + def method_missing(method_name, *args) + if method_name.to_s.chars.first == '_' + ":#{method_name.to_s[1..-1]}" + else + super + end + end + + def _client_class(response) + response.context.client.class.name + end + + def _operation(response) + response.context.operation_name + end + + def _request_params(response) + summarize_hash(response.context.params) + end + + def _time(response) + duration = response.context[:logging_completed_at] - + response.context[:logging_started_at] + ("%.06f" % duration).sub(/0+$/, '') + end + + def _retries(response) + response.context.retries + end + + def _http_request_endpoint(response) + response.context.http_request.endpoint.to_s + end + + def _http_request_scheme(response) + response.context.http_request.endpoint.scheme + end + + def _http_request_host(response) + response.context.http_request.endpoint.host + end + + def _http_request_port(response) + response.context.http_request.endpoint.port.to_s + end + + def _http_request_method(response) + response.context.http_request.http_method + end + + def _http_request_headers(response) + response.context.http_request.headers.inspect + end + + def _http_request_body(response) + summarize_value(response.context.http_request.body_contents) + end + + def _http_response_status_code(response) + response.context.http_response.status_code.to_s + end + + def _http_response_headers(response) + response.context.http_response.headers.inspect + end + + def _http_response_body(response) + if response.context.http_response.body.respond_to?(:rewind) + summarize_value(response.context.http_response.body_contents) + else + '' + end + end + + def _error_class(response) + response.error ? response.error.class.name : '' + end + + def _error_message(response) + response.error ? response.error.message : '' + end + + # @param [Hash] hash + # @return [String] + def summarize_hash(hash) + hash.keys.first.is_a?(String) ? + summarize_string_hash(hash) : + summarize_symbol_hash(hash) + end + + def summarize_symbol_hash(hash) + hash.map do |key,v| + "#{key}:#{summarize_value(v)}" + end.join(",") + end + + def summarize_string_hash(hash) + hash.map do |key,v| + "#{key.inspect}=>#{summarize_value(v)}" + end.join(",") + end + + # @param [Object] value + # @return [String] + def summarize_value value + case value + when String then summarize_string(value) + when Hash then '{' + summarize_hash(value) + '}' + when Array then summarize_array(value) + when File then summarize_file(value.path) + when Pathname then summarize_file(value) + else value.inspect + end + end + + # @param [String] str + # @return [String] + def summarize_string str + max = max_string_size + if str.size > max + "#" + else + str.inspect + end + end + + # Given the path to a file on disk, this method returns a summarized + # inspecton string that includes the file size. + # @param [String] path + # @return [String] + def summarize_file path + "#" + end + + # @param [Array] array + # @return [String] + def summarize_array array + "[" + array.map{|v| summarize_value(v) }.join(",") + "]" + end + + class << self + + # The default log format. + # + # @example A sample of the default format. + # + # [ClientClass 200 0.580066 0 retries] list_objects(:bucket_name => 'bucket') + # + # @return [Formatter] + # + def default + pattern = [] + pattern << "[:client_class" + pattern << ":http_response_status_code" + pattern << ":time" + pattern << ":retries retries]" + pattern << ":operation(:request_params)" + pattern << ":error_class" + pattern << ":error_message" + Formatter.new(pattern.join(' ') + "\n") + end + + # The short log format. Similar to default, but it does not + # inspect the request params or report on retries. + # + # @example A sample of the short format + # + # [ClientClass 200 0.494532] list_buckets + # + # @return [Formatter] + # + def short + pattern = [] + pattern << "[:client_class" + pattern << ":http_response_status_code" + pattern << ":time]" + pattern << ":operation" + pattern << ":error_class" + Formatter.new(pattern.join(' ') + "\n") + end + + # The default log format with ANSI colors. + # + # @example A sample of the colored format (sans the ansi colors). + # + # [ClientClass 200 0.580066 0 retries] list_objects(:bucket_name => 'bucket') + # + # @return [Formatter] + # + def colored + bold = "\x1b[1m" + color = "\x1b[34m" + reset = "\x1b[0m" + pattern = [] + pattern << "#{bold}#{color}[:client_class" + pattern << ":http_response_status_code" + pattern << ":time" + pattern << ":retries retries]#{reset}#{bold}" + pattern << ":operation(:request_params)" + pattern << ":error_class" + pattern << ":error_message#{reset}" + Formatter.new(pattern.join(' ') + "\n") + end + + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/logging/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/logging/handler.rb new file mode 100644 index 0000000..53d0b84 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/logging/handler.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module Seahorse + module Client + # @deprecated Use Aws::Logging instead. + # @api private + module Logging + class Handler < Client::Handler + + # @param [RequestContext] context + # @return [Response] + def call(context) + context[:logging_started_at] = Time.now + @handler.call(context).tap do |response| + context[:logging_completed_at] = Time.now + log(context.config, response) + end + end + + private + + # @param [Configuration] config + # @param [Response] response + # @return [void] + def log(config, response) + config.logger.send(config.log_level, format(config, response)) + end + + # @param [Configuration] config + # @param [Response] response + # @return [String] + def format(config, response) + config.log_formatter.format(response) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/managed_file.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/managed_file.rb new file mode 100644 index 0000000..0747e32 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/managed_file.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module Seahorse + module Client + # This utility class is used to track files opened by Seahorse. + # This allows Seahorse to know what files it needs to close. + class ManagedFile < File + + # @return [Boolean] + def open? + !closed? + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/connection_pool.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/connection_pool.rb new file mode 100644 index 0000000..46a3d65 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/connection_pool.rb @@ -0,0 +1,363 @@ +# frozen_string_literal: true + +require 'cgi' +require 'net/http' +require 'net/https' +require 'delegate' +require 'thread' +require 'logger' + +require_relative 'patches' + +Seahorse::Client::NetHttp::Patches.apply! + +module Seahorse + module Client + # @api private + module NetHttp + + class ConnectionPool + + @pools_mutex = Mutex.new + @pools = {} + @default_logger = Logger.new($stdout) + + OPTIONS = { + http_proxy: nil, + http_open_timeout: 15, + http_read_timeout: 60, + http_idle_timeout: 5, + http_continue_timeout: 1, + http_wire_trace: false, + logger: nil, + ssl_verify_peer: true, + ssl_ca_bundle: nil, + ssl_ca_directory: nil, + ssl_ca_store: nil, + ssl_timeout: nil + } + + # @api private + def initialize(options = {}) + OPTIONS.each_pair do |opt_name, default_value| + value = options[opt_name].nil? ? default_value : options[opt_name] + instance_variable_set("@#{opt_name}", value) + end + @pool_mutex = Mutex.new + @pool = {} + end + + OPTIONS.keys.each do |attr_name| + attr_reader(attr_name) + end + + alias http_wire_trace? http_wire_trace + alias ssl_verify_peer? ssl_verify_peer + + # Makes an HTTP request, yielding a Net::HTTPResponse object. + # + # pool.request(URI.parse('http://domain'), Net::HTTP::Get.new('/')) do |resp| + # puts resp.code # status code + # puts resp.to_h.inspect # dump the headers + # puts resp.body + # end + # + # @param [URI::HTTP, URI::HTTPS] endpoint The HTTP(S) endpoint + # to connect to (e.g. 'https://domain.com'). + # + # @param [Net::HTTPRequest] request The request to make. This can be + # any request object from Net::HTTP (e.g. Net::HTTP::Get, + # Net::HTTP::POST, etc). + # + # @yieldparam [Net::HTTPResponse] net_http_response + # + # @return (see #session_for) + def request(endpoint, request, &block) + session_for(endpoint) do |http| + yield(http.request(request)) + end + end + + # @param [URI::HTTP, URI::HTTPS] endpoint The HTTP(S) endpoint + # to connect to (e.g. 'https://domain.com'). + # + # @yieldparam [Net::HTTPSession] session + # + # @return [nil] + def session_for(endpoint, &block) + endpoint = remove_path_and_query(endpoint) + session = nil + + # attempt to recycle an already open session + @pool_mutex.synchronize do + _clean + if @pool.key?(endpoint) + session = @pool[endpoint].shift + end + end + + begin + session ||= start_session(endpoint) + session.read_timeout = http_read_timeout + session.continue_timeout = http_continue_timeout if + session.respond_to?(:continue_timeout=) + yield(session) + rescue + session.finish if session + raise + else + # No error raised? Good, check the session into the pool. + @pool_mutex.synchronize do + @pool[endpoint] = [] unless @pool.key?(endpoint) + @pool[endpoint] << session + end + end + nil + end + + # @return [Integer] Returns the count of sessions currently in the + # pool, not counting those currently in use. + def size + @pool_mutex.synchronize do + size = 0 + @pool.each_pair do |endpoint,sessions| + size += sessions.size + end + size + end + end + + # Removes stale http sessions from the pool (that have exceeded + # the idle timeout). + # @return [nil] + def clean! + @pool_mutex.synchronize { _clean } + nil + end + + # Closes and removes all sessions from the pool. + # If empty! is called while there are outstanding requests they may + # get checked back into the pool, leaving the pool in a non-empty + # state. + # @return [nil] + def empty! + @pool_mutex.synchronize do + @pool.each_pair do |endpoint,sessions| + sessions.each(&:finish) + end + @pool.clear + end + nil + end + + private + + def remove_path_and_query(endpoint) + endpoint.dup.tap do |e| + e.path = '' + e.query = nil + end.to_s + end + + class << self + + # Returns a connection pool constructed from the given options. + # Calling this method twice with the same options will return + # the same pool. + # + # @option options [URI::HTTP,String] :http_proxy A proxy to send + # requests through. Formatted like 'http://proxy.com:123'. + # + # @option options [Float] :http_open_timeout (15) The number of + # seconds to wait when opening an HTTP session before raising a + # `Timeout::Error`. + # + # @option options [Float] :http_read_timeout (60) The default + # number of seconds to wait for response data. This value can be + # safely set per-request on the session yielded by {#session_for}. + # + # @option options [Float] :http_idle_timeout (5) The number of + # seconds a connection is allowed to sit idle before it is + # considered stale. Stale connections are closed and removed + # from the pool before making a request. + # + # @option options [Float] :http_continue_timeout (1) The number of + # seconds to wait for a 100-continue response before sending the + # request body. This option has no effect unless the request has + # "Expect" header set to "100-continue". Defaults to `nil` which + # disables this behaviour. This value can safely be set per + # request on the session yielded by {#session_for}. + # + # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout + # in seconds. + # + # @option options [Boolean] :http_wire_trace (false) When `true`, + # HTTP debug output will be sent to the `:logger`. + # + # @option options [Logger] :logger Where debug output is sent. + # Defaults to `nil` when `:http_wire_trace` is `false`. + # Defaults to `Logger.new($stdout)` when `:http_wire_trace` is + # `true`. + # + # @option options [Boolean] :ssl_verify_peer (true) When `true`, + # SSL peer certificates are verified when establishing a + # connection. + # + # @option options [String] :ssl_ca_bundle Full path to the SSL + # certificate authority bundle file that should be used when + # verifying peer certificates. If you do not pass + # `:ssl_ca_bundle` or `:ssl_ca_directory` the system default + # will be used if available. + # + # @option options [String] :ssl_ca_directory Full path of the + # directory that contains the unbundled SSL certificate + # authority files for verifying peer certificates. If you do + # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the + # system default will be used if available. + # + # @return [ConnectionPool] + def for options = {} + options = pool_options(options) + @pools_mutex.synchronize do + @pools[options] ||= new(options) + end + end + + # @return [Array] Returns a list of the + # constructed connection pools. + def pools + @pools_mutex.synchronize do + @pools.values + end + end + + private + + # Filters an option hash, merging in default values. + # @return [Hash] + def pool_options options + wire_trace = !!options[:http_wire_trace] + logger = options[:logger] || @default_logger if wire_trace + verify_peer = options.key?(:ssl_verify_peer) ? + !!options[:ssl_verify_peer] : true + { + :http_proxy => URI.parse(options[:http_proxy].to_s), + :http_continue_timeout => options[:http_continue_timeout], + :http_open_timeout => options[:http_open_timeout] || 15, + :http_idle_timeout => options[:http_idle_timeout] || 5, + :http_read_timeout => options[:http_read_timeout] || 60, + :http_wire_trace => wire_trace, + :logger => logger, + :ssl_verify_peer => verify_peer, + :ssl_ca_bundle => options[:ssl_ca_bundle], + :ssl_ca_directory => options[:ssl_ca_directory], + :ssl_ca_store => options[:ssl_ca_store], + :ssl_timeout => options[:ssl_timeout] + } + end + + end + + private + + # Extract the parts of the http_proxy URI + # @return [Array(String)] + def http_proxy_parts + return [ + http_proxy.host, + http_proxy.port, + (http_proxy.user && CGI::unescape(http_proxy.user)), + (http_proxy.password && CGI::unescape(http_proxy.password)) + ] + end + + # Starts and returns a new HTTP(S) session. + # @param [String] endpoint + # @return [Net::HTTPSession] + def start_session endpoint + + endpoint = URI.parse(endpoint) + + args = [] + args << endpoint.host + args << endpoint.port + args += http_proxy_parts + + http = ExtendedSession.new(Net::HTTP.new(*args.compact)) + http.set_debug_output(logger) if http_wire_trace? + http.open_timeout = http_open_timeout + http.keep_alive_timeout = http_idle_timeout if http.respond_to?(:keep_alive_timeout=) + + if endpoint.scheme == 'https' + http.use_ssl = true + http.ssl_timeout = ssl_timeout + + if ssl_verify_peer? + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + http.ca_file = ssl_ca_bundle if ssl_ca_bundle + http.ca_path = ssl_ca_directory if ssl_ca_directory + http.cert_store = ssl_ca_store if ssl_ca_store + else + http.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + else + http.use_ssl = false + end + + http.start + http + end + + # Removes stale sessions from the pool. This method *must* be called + # @note **Must** be called behind a `@pool_mutex` synchronize block. + def _clean + now = Aws::Util.monotonic_milliseconds + @pool.each_pair do |endpoint,sessions| + sessions.delete_if do |session| + if session.last_used.nil? or now - session.last_used > http_idle_timeout * 1000 + session.finish + true + end + end + end + end + + # Helper methods extended onto Net::HTTPSession objects opened by the + # connection pool. + # @api private + class ExtendedSession < Delegator + + def initialize(http) + super(http) + @http = http + end + + # @return [Integer,nil] + attr_reader :last_used + + def __getobj__ + @http + end + + def __setobj__(obj) + @http = obj + end + + # Sends the request and tracks that this session has been used. + def request(*args, &block) + @http.request(*args, &block) + @last_used = Aws::Util.monotonic_milliseconds + end + + # Attempts to close/finish the session without raising an error. + def finish + @http.finish + rescue IOError + nil + end + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/handler.rb new file mode 100644 index 0000000..abc233d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/handler.rb @@ -0,0 +1,203 @@ +# frozen_string_literal: true + +require 'net/https' +require 'openssl' + +module Seahorse + module Client + # @api private + module NetHttp + + # The default HTTP handler for Seahorse::Client. This is based on + # the Ruby's `Net::HTTP`. + class Handler < Client::Handler + + # @api private + class TruncatedBodyError < IOError + def initialize(bytes_expected, bytes_received) + msg = "http response body truncated, expected #{bytes_expected} "\ + "bytes, received #{bytes_received} bytes" + super(msg) + end + end + + NETWORK_ERRORS = [ + SocketError, EOFError, IOError, Timeout::Error, + Errno::ECONNABORTED, Errno::ECONNRESET, Errno::EPIPE, + Errno::EINVAL, Errno::ETIMEDOUT, OpenSSL::SSL::SSLError, + Errno::EHOSTUNREACH, Errno::ECONNREFUSED, + Net::HTTPFatalError # for proxy connection failures + ] + + # does not exist in Ruby 1.9.3 + if OpenSSL::SSL.const_defined?(:SSLErrorWaitReadable) + NETWORK_ERRORS << OpenSSL::SSL::SSLErrorWaitReadable + end + + # @api private + DNS_ERROR_MESSAGES = [ + 'getaddrinfo: nodename nor servname provided, or not known', # MacOS + 'getaddrinfo: Name or service not known' # GNU + ] + + # Raised when a {Handler} cannot construct a `Net::HTTP::Request` + # from the given http verb. + class InvalidHttpVerbError < StandardError; end + + # @param [RequestContext] context + # @return [Response] + def call(context) + transmit(context.config, context.http_request, context.http_response) + Response.new(context: context) + end + + # @param [Configuration] config + # @return [ConnectionPool] + def pool_for(config) + ConnectionPool.for(pool_options(config)) + end + + private + + def error_message(req, error) + if error.is_a?(SocketError) && DNS_ERROR_MESSAGES.include?(error.message) + host = req.endpoint.host + "unable to connect to `#{host}`; SocketError: #{error.message}" + else + error.message + end + end + + # @param [Configuration] config + # @param [Http::Request] req + # @param [Http::Response] resp + # @return [void] + def transmit(config, req, resp) + session(config, req) do |http| + # Monkey patch default content-type set by Net::HTTP + Thread.current[:net_http_skip_default_content_type] = true + http.request(build_net_request(req)) do |net_resp| + status_code = net_resp.code.to_i + headers = extract_headers(net_resp) + + bytes_received = 0 + resp.signal_headers(status_code, headers) + net_resp.read_body do |chunk| + bytes_received += chunk.bytesize + resp.signal_data(chunk) + end + complete_response(req, resp, bytes_received) + + end + end + rescue *NETWORK_ERRORS => error + # these are retryable + error = NetworkingError.new(error, error_message(req, error)) + resp.signal_error(error) + rescue => error + # not retryable + resp.signal_error(error) + ensure + # ensure we turn off monkey patch in case of error + Thread.current[:net_http_skip_default_content_type] = nil + end + + def complete_response(req, resp, bytes_received) + if should_verify_bytes?(req, resp) + verify_bytes_received(resp, bytes_received) + else + resp.signal_done + end + end + + def should_verify_bytes?(req, resp) + req.http_method != 'HEAD' && resp.headers['content-length'] + end + + def verify_bytes_received(resp, bytes_received) + bytes_expected = resp.headers['content-length'].to_i + if bytes_expected == bytes_received + resp.signal_done + else + error = TruncatedBodyError.new(bytes_expected, bytes_received) + resp.signal_error(NetworkingError.new(error, error.message)) + end + end + + def session(config, req, &block) + pool_for(config).session_for(req.endpoint) do |http| + # Ruby 2.5, can disable retries for idempotent operations + # avoid patching for Ruby 2.5 for disable retry + http.max_retries = 0 if http.respond_to?(:max_retries) + http.read_timeout = config.http_read_timeout + yield(http) + end + end + + # Extracts the {ConnectionPool} configuration options. + # @param [Configuration] config + # @return [Hash] + def pool_options(config) + ConnectionPool::OPTIONS.keys.inject({}) do |opts,opt| + opts[opt] = config.send(opt) + opts + end + end + + # Constructs and returns a Net::HTTP::Request object from + # a {Http::Request}. + # @param [Http::Request] request + # @return [Net::HTTP::Request] + def build_net_request(request) + request_class = net_http_request_class(request) + req = request_class.new(request.endpoint.request_uri, headers(request)) + # Net::HTTP adds a default Content-Type when a body is present. + # Set the body stream when it has an unknown size or when it is > 0. + if !request.body.respond_to?(:size) || + (request.body.respond_to?(:size) && request.body.size > 0) + req.body_stream = request.body + end + req + end + + # @param [Http::Request] request + # @raise [InvalidHttpVerbError] + # @return Returns a base `Net::HTTP::Request` class, e.g., + # `Net::HTTP::Get`, `Net::HTTP::Post`, etc. + def net_http_request_class(request) + Net::HTTP.const_get(request.http_method.capitalize) + rescue NameError + msg = "`#{request.http_method}` is not a valid http verb" + raise InvalidHttpVerbError, msg + end + + # @param [Http::Request] request + # @return [Hash] Returns a vanilla hash of headers to send with the + # HTTP request. + def headers(request) + # Net::HTTP adds a default header for accept-encoding (2.0.0+). + # Setting a default empty value defeats this. + # + # Removing this is necessary for most services to not break request + # signatures as well as dynamodb crc32 checks (these fail if the + # response is gzipped). + headers = { 'accept-encoding' => '' } + request.headers.each_pair do |key, value| + headers[key] = value + end + headers + end + + # @param [Net::HTTP::Response] response + # @return [Hash] + def extract_headers(response) + response.to_hash.inject({}) do |headers, (k, v)| + headers[k] = v.first + headers + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/patches.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/patches.rb new file mode 100644 index 0000000..7685f06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/net_http/patches.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +require 'net/http' + +module Seahorse + module Client + # @api private + module NetHttp + + # @api private + module Patches + + def self.apply! + Net::HTTPGenericRequest.prepend(PatchDefaultContentType) + return unless RUBY_VERSION < '2.5' + + Net::HTTP::IDEMPOTENT_METHODS_.clear + end + + # For requests with bodys, Net::HTTP sets a default content type of: + # 'application/x-www-form-urlencoded' + # There are cases where we should not send content type at all. + # Even when no body is supplied, Net::HTTP uses a default empty body + # and sets it anyway. This patch disables the behavior when a Thread + # local variable is set. + module PatchDefaultContentType + def supply_default_content_type + return if Thread.current[:net_http_skip_default_content_type] + + super + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/networking_error.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/networking_error.rb new file mode 100644 index 0000000..6eccf32 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/networking_error.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class NetworkingError < StandardError + + def initialize(error, msg = nil) + super(msg || error.message) + set_backtrace(error.backtrace) + @original_error = error + end + + attr_reader :original_error + + end + + # Raised when sending initial headers and data failed + # for event stream requests over Http2 + class Http2InitialRequestError < StandardError + + def initialize(error) + @original_error = error + end + + # @return [HTTP2::Error] + attr_reader :original_error + + end + + # Raised when connection failed to initialize a new stream + class Http2StreamInitializeError < StandardError + + def initialize(error) + @original_error = error + end + + # @return [HTTP2::Error] + attr_reader :original_error + + end + + # Rasied when trying to use an closed connection + class Http2ConnectionClosedError < StandardError; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugin.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugin.rb new file mode 100644 index 0000000..47f2dc4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugin.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class Plugin + + extend HandlerBuilder + + # @param [Configuration] config + # @return [void] + def add_options(config) + self.class.options.each do |option| + if option.default_block + config.add_option(option.name, &option.default_block) + else + config.add_option(option.name, option.default) + end + end + end + + # @param [HandlerList] handlers + # @param [Configuration] config + # @return [void] + def add_handlers(handlers, config) + handlers.copy_from(self.class.handlers) + end + + # @param [Class] client_class + # @param [Hash] options + # @return [void] + def before_initialize(client_class, options) + self.class.before_initialize_hooks.each do |block| + block.call(client_class, options) + end + end + + # @param [Client::Base] client + # @return [void] + def after_initialize(client) + self.class.after_initialize_hooks.each do |block| + block.call(client) + end + end + + class << self + + # @overload option(name, options = {}, &block) + # @option options [Object] :default Can also be set by passing a block. + # @option options [String] :doc_default + # @option options [Boolean] :required + # @option options [String] :doc_type + # @option options [String] :docs + # @return [void] + def option(name, default = nil, options = {}, &block) + # For backwards-compat reasons, the default value can be passed as 2nd + # positional argument (before the options hash) or as the `:default` option + # in the options hash. + if default.is_a? Hash + options = default + else + options[:default] = default + end + options[:default_block] = block if block_given? + self.options << PluginOption.new(name, options) + end + + def before_initialize(&block) + before_initialize_hooks << block + end + + def after_initialize(&block) + after_initialize_hooks << block + end + + # @api private + def options + @options ||= [] + end + + # @api private + def handlers + @handlers ||= HandlerList.new + end + + # @api private + def before_initialize_hooks + @before_initialize_hooks ||= [] + end + + # @api private + def after_initialize_hooks + @after_initialize_hooks ||= [] + end + + # @api private + def literal(string) + CodeLiteral.new(string) + end + + # @api private + class CodeLiteral < String + def inspect + to_s + end + end + + end + + # @api private + class PluginOption + + def initialize(name, options = {}) + @name = name + @doc_default = nil + options.each_pair do |opt_name, opt_value| + self.send("#{opt_name}=", opt_value) + end + end + + attr_reader :name + attr_accessor :default + attr_accessor :default_block + attr_accessor :required + attr_accessor :doc_type + attr_writer :doc_default + attr_accessor :docstring + + def doc_default(options) + if @doc_default.nil? && !default.is_a?(Proc) + default + else + @doc_default.respond_to?(:call) ? @doc_default.call(options) : @doc_default + end + end + + def documented? + !!docstring + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugin_list.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugin_list.rb new file mode 100644 index 0000000..b2f6cad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugin_list.rb @@ -0,0 +1,146 @@ +# frozen_string_literal: true + +require 'set' +require 'thread' + +module Seahorse + module Client + class PluginList + + include Enumerable + + # @param [Array, Set] plugins + # @option options [Mutex] :mutex + def initialize(plugins = [], options = {}) + @mutex = options[:mutex] || Mutex.new + @plugins = Set.new + if plugins.is_a?(PluginList) + plugins.send(:each_plugin) { |plugin| _add(plugin) } + else + plugins.each { |plugin| _add(plugin) } + end + end + + # Adds and returns the `plugin`. + # @param [Plugin] plugin + # @return [void] + def add(plugin) + @mutex.synchronize do + _add(plugin) + end + nil + end + + # Removes and returns the `plugin`. + # @param [Plugin] plugin + # @return [void] + def remove(plugin) + @mutex.synchronize do + @plugins.delete(PluginWrapper.new(plugin)) + end + nil + end + + # Replaces the existing list of plugins. + # @param [Array] plugins + # @return [void] + def set(plugins) + @mutex.synchronize do + @plugins.clear + plugins.each do |plugin| + _add(plugin) + end + end + nil + end + + # Enumerates the plugins. + # @return [Enumerator] + def each(&block) + each_plugin do |plugin_wrapper| + yield(plugin_wrapper.plugin) + end + end + + private + + # Not safe to call outside the mutex. + def _add(plugin) + @plugins << PluginWrapper.new(plugin) + end + + # Yield each PluginDetail behind the mutex + def each_plugin(&block) + @mutex.synchronize do + @plugins.each(&block) + end + end + + # A utility class that computes the canonical name for a plugin + # and defers requiring the plugin until the plugin class is + # required. + # @api private + class PluginWrapper + + # @param [String, Symbol, Module, Class] plugin + def initialize(plugin) + case plugin + when Module + @canonical_name = plugin.name || plugin.object_id + @plugin = plugin + when Symbol, String + words = plugin.to_s.split('.') + @canonical_name = words.pop + @gem_name = words.empty? ? nil : words.join('.') + @plugin = nil + else + @canonical_name = plugin.object_id + @plugin = plugin + end + end + + # @return [String] + attr_reader :canonical_name + + # @return [Class] + def plugin + @plugin ||= require_plugin + end + + # Returns the given plugin if it is already a PluginWrapper. + def self.new(plugin) + if plugin.is_a?(self) + plugin + else + super + end + end + + # @return [Boolean] + # @api private + def eql? other + canonical_name == other.canonical_name + end + + # @return [String] + # @api private + def hash + canonical_name.hash + end + + private + + # @return [Class] + def require_plugin + require(@gem_name) if @gem_name + plugin_class = Kernel + @canonical_name.split('::').each do |const_name| + plugin_class = plugin_class.const_get(const_name) + end + plugin_class + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/content_length.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/content_length.rb new file mode 100644 index 0000000..3fb06d6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/content_length.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Plugins + class ContentLength < Plugin + + # @api private + class Handler < Client::Handler + # https://github.com/ruby/net-http/blob/master/lib/net/http/requests.rb + # Methods without body are forwards compatible, because content-length + # may be set for requests without body but is technically incorrect. + METHODS_WITHOUT_BODY = Set.new( + %w[GET HEAD DELETE OPTIONS TRACE COPY MOVE] + ) + + def call(context) + body = context.http_request.body + method = context.http_request.http_method + # We use Net::HTTP with body_stream which doesn't do this by default + if body.respond_to?(:size) && !METHODS_WITHOUT_BODY.include?(method) + context.http_request.headers['Content-Length'] = body.size + end + @handler.call(context) + end + end + + handler(Handler, step: :sign, priority: 0) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/endpoint.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/endpoint.rb new file mode 100644 index 0000000..ffc29e5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/endpoint.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Plugins + class Endpoint < Plugin + + option(:endpoint, + doc_type: 'String, URI::HTTPS, URI::HTTP', + docstring: <<-DOCS) +Normally you should not configure the `:endpoint` option +directly. This is normally constructed from the `:region` +option. Configuring `:endpoint` is normally reserved for +connecting to test or custom endpoints. The endpoint should +be a URI formatted like: + + 'http://example.com' + 'https://example.com' + 'http://example.com:123' + + DOCS + + def add_handlers(handlers, config) + handlers.add(Handler, priority: 90) + end + + def after_initialize(client) + endpoint = client.config.endpoint + if endpoint.nil? + msg = "missing required option `:endpoint'" + raise ArgumentError, msg + end + + endpoint = URI.parse(endpoint.to_s) + if URI::HTTPS === endpoint or URI::HTTP === endpoint + client.config.endpoint = endpoint + else + msg = 'expected :endpoint to be a HTTP or HTTPS endpoint' + raise ArgumentError, msg + end + end + + class Handler < Client::Handler + + def call(context) + context.http_request.endpoint = URI.parse(context.config.endpoint.to_s) + @handler.call(context) + end + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/h2.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/h2.rb new file mode 100644 index 0000000..ec9d6a3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/h2.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require 'seahorse/client/h2/handler' + +module Seahorse + module Client + module Plugins + class H2 < Plugin + + # H2 Client + option(:max_concurrent_streams, default: 100, doc_type: Integer, docstring: <<-DOCS) +Maximum concurrent streams used in HTTP2 connection, defaults to 100. Note that server may send back +:settings_max_concurrent_streams value which will take priority when initializing new streams. + DOCS + + option(:connection_timeout, default: 60, doc_type: Integer, docstring: <<-DOCS) +Connection timeout in seconds, defaults to 60 sec. + DOCS + + option(:connection_read_timeout, default: 60, doc_type: Integer, docstring: <<-DOCS) +Connection read timeout in seconds, defaults to 60 sec. + DOCS + + option(:read_chunk_size, default: 1024, doc_type: Integer, docstring: '') + + option(:raise_response_errors, default: true, doc_type: 'Boolean', docstring: <<-DOCS) +Defaults to `true`, raises errors if exist when #wait or #join! is called upon async response. + DOCS + + # SSL Context + option(:ssl_ca_bundle, default: nil, doc_type: String, docstring: <<-DOCS) do |cfg| +Full path to the SSL certificate authority bundle file that should be used when +verifying peer certificates. If you do not pass `:ssl_ca_directory` or `:ssl_ca_bundle` +the system default will be used if available. + DOCS + ENV['AWS_CA_BUNDLE'] || + Aws.shared_config.ca_bundle(profile: cfg.profile) if cfg.respond_to?(:profile) + end + + option(:ssl_ca_directory, default: nil, doc_type: String, docstring: <<-DOCS) +Full path of the directory that contains the unbundled SSL certificate authority +files for verifying peer certificates. If you do not pass `:ssl_ca_bundle` or +`:ssl_ca_directory` the system default will be used if available. + DOCS + + option(:ssl_ca_store, default: nil, doc_type: String, docstring: '') + + option(:ssl_verify_peer, default: true, doc_type: 'Boolean', docstring: <<-DOCS) +When `true`, SSL peer certificates are verified when establishing a connection. + DOCS + + option(:http_wire_trace, default: false, doc_type: 'Boolean', docstring: <<-DOCS) +When `true`, HTTP2 debug output will be sent to the `:logger`. + DOCS + + option(:enable_alpn, default: false, doc_type: 'Boolean', docstring: <<-DOCS) +Setting to `true` to enable ALPN in HTTP2 over TLS, requires Ruby version >= 2.3 and +Openssl version >= 1.0.2. Defaults to false. Note: not all service HTTP2 operations +supports ALPN on server side, please refer to service documentation. + DOCS + + option(:logger) + + handler(Client::H2::Handler, step: :send) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/logging.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/logging.rb new file mode 100644 index 0000000..9504205 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/logging.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Plugins + # @api private + class Logging < Plugin + + option(:logger, + default: nil, + doc_type: 'Logger', + docstring: <<-DOCS) +The Logger instance to send log messages to. If this option +is not set, logging is disabled. + DOCS + + option(:log_level, + default: :info, + doc_type: Symbol, + docstring: 'The log level to send messages to the logger at.') + + option(:log_formatter, + default: Seahorse::Client::Logging::Formatter.default, + doc_default: 'Aws::Log::Formatter.default', + doc_type: 'Aws::Log::Formatter', + docstring: 'The log formatter.') + + def add_handlers(handlers, config) + if config.logger + handlers.add(Client::Logging::Handler, step: :validate) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/net_http.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/net_http.rb new file mode 100644 index 0000000..93ae591 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/net_http.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'seahorse/client/net_http/handler' + +module Seahorse + module Client + module Plugins + class NetHttp < Plugin + + option(:http_proxy, default: nil, doc_type: String, docstring: '') + + option(:http_open_timeout, default: 15, doc_type: Integer, docstring: '') do |cfg| + resolve_http_open_timeout(cfg) + end + + option(:http_read_timeout, default: 60, doc_type: Integer, docstring: '') do |cfg| + resolve_http_read_timeout(cfg) + end + + option(:http_idle_timeout, default: 5, doc_type: Integer, docstring: '') + + option(:http_continue_timeout, default: 1, doc_type: Integer, docstring: '') + + option(:http_wire_trace, default: false, doc_type: 'Boolean', docstring: '') + + option(:ssl_verify_peer, default: true, doc_type: 'Boolean', docstring: '') + + option(:ssl_ca_bundle, doc_type: String, docstring: '') do |cfg| + ENV['AWS_CA_BUNDLE'] || + Aws.shared_config.ca_bundle(profile: cfg.profile) if cfg.respond_to?(:profile) + end + + option(:ssl_ca_directory, default: nil, doc_type: String, docstring: '') + + option(:ssl_ca_store, default: nil, doc_type: String, docstring: '') + + option(:ssl_timeout, default: nil, doc_type: Float, docstring: '') do |cfg| + resolve_ssl_timeout(cfg) + end + + option(:logger) # for backwards compat + + handler(Client::NetHttp::Handler, step: :send) + + def self.resolve_http_open_timeout(cfg) + default_mode_value = + if cfg.respond_to?(:defaults_mode_config_resolver) + cfg.defaults_mode_config_resolver.resolve(:http_open_timeout) + end + default_mode_value || 15 + end + + def self.resolve_http_read_timeout(cfg) + default_mode_value = + if cfg.respond_to?(:defaults_mode_config_resolver) + cfg.defaults_mode_config_resolver.resolve(:http_read_timeout) + end + default_mode_value || 60 + end + + def self.resolve_ssl_timeout(cfg) + default_mode_value = + if cfg.respond_to?(:defaults_mode_config_resolver) + cfg.defaults_mode_config_resolver.resolve(:ssl_timeout) + end + default_mode_value || nil + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/operation_methods.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/operation_methods.rb new file mode 100644 index 0000000..aaef546 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/operation_methods.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Plugins + + # Defines a helper method for each API operation that builds and + # sends the named request. + # + # # Helper Methods + # + # This plugin adds a helper method that lists the available API + # operations. + # + # client.operation_names + # #=> [:api_operation_name1, :api_operation_name2, ...] + # + # Additionally, it adds a helper method for each operation. This helper + # handles building and sending the appropriate {Request}. + # + # # without OperationMethods plugin + # req = client.build_request(:api_operation_name, request_params) + # resp = req.send_request + # + # # using the helper method defined by OperationMethods + # resp = client.api_operation_name(request_params) + # + class OperationMethods < Plugin + + def after_initialize(client) + unless client.respond_to?(:operation_names) + client.class.mutex.synchronize do + unless client.respond_to?(:operation_names) + add_operation_helpers(client, client.config.api.operation_names) + end + end + end + end + + def add_operation_helpers(client, operations) + operations.each do |name| + client.class.send(:define_method, name) do |*args, &block| + params = args[0] || {} + send_options = args[1] || {} + build_request(name, params).send_request(send_options, &block) + end + end + client.class.send(:define_method, :operation_names) { operations } + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/raise_response_errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/raise_response_errors.rb new file mode 100644 index 0000000..97ffb1b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/raise_response_errors.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Seahorse + module Client + module Plugins + class RaiseResponseErrors < Plugin + + option(:raise_response_errors, + default: true, + doc_type: 'Boolean', + docstring: 'When `true`, response errors are raised.') + + # @api private + class Handler < Client::Handler + def call(context) + response = @handler.call(context) + raise response.error if response.error + response + end + end + + def add_handlers(handlers, config) + if config.raise_response_errors + handlers.add(Handler, step: :validate, priority: 95) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/request_callback.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/request_callback.rb new file mode 100644 index 0000000..593f558 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/request_callback.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +require 'pathname' +require 'forwardable' + +module Seahorse + module Client + module Plugins + + # @api private + class ReadCallbackIO + extend Forwardable + def_delegators :@io, :size + + def initialize(io, on_read = nil) + @io = io + @on_read = on_read if on_read.is_a? Proc + @bytes_read = 0 + + # Some IO objects support readpartial - IO.copy_stream used by the + # request will call readpartial if available, so define a wrapper + # for it if the underlying IO supports it. + if @io.respond_to?(:readpartial) + def self.readpartial(*args) + @io.readpartial(*args).tap do |chunk| + handle_chunk(chunk) + end + end + end + end + + attr_reader :io + + def read(*args) + @io.read(*args).tap do |chunk| + handle_chunk(chunk) + end + end + + private + + def handle_chunk(chunk) + @bytes_read += chunk.bytesize if chunk && chunk.respond_to?(:bytesize) + total_size = @io.respond_to?(:size) ? @io.size : nil + @on_read.call(chunk, @bytes_read, total_size) if @on_read + end + end + + # @api private + class RequestCallback < Plugin + + option( + :on_chunk_sent, + default: nil, + doc_type: 'Proc', + docstring: <<-DOCS) +When a Proc object is provided, it will be used as callback when each chunk +of the request body is sent. It provides three arguments: the chunk, +the number of bytes read from the body, and the total number of +bytes in the body. + DOCS + + # @api private + class OptionHandler < Client::Handler + def call(context) + if context.params.is_a?(Hash) && context.params[:on_chunk_sent] + on_chunk_sent = context.params.delete(:on_chunk_sent) + end + on_chunk_sent = context.config.on_chunk_sent if on_chunk_sent.nil? + context[:on_chunk_sent] = on_chunk_sent if on_chunk_sent + @handler.call(context) + end + end + + # @api private + class ReadCallbackHandler < Client::Handler + def call(context) + if (callback = context[:on_chunk_sent]) + context.http_request.body = ReadCallbackIO.new( + context.http_request.body, + callback + ) + @handler.call(context).tap do + unwrap_callback_body(context) + end + else + @handler.call(context) + end + end + + def unwrap_callback_body(context) + body = context.http_request.body + if body.is_a? ReadCallbackIO + context.http_request.body = body.io + end + end + end + + # OptionHandler is needed to remove :on_chunk_sent + # from the params before build + handler(OptionHandler, step: :initialize) + + # ReadCallbackHandlerneeds to go late in the call stack + # other plugins including Sigv4 and content_md5 read the request body + # and rewind it + handler(ReadCallbackHandler, step: :sign, priority: 0) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/response_target.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/response_target.rb new file mode 100644 index 0000000..0c6b4e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/plugins/response_target.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require 'pathname' + +module Seahorse + module Client + module Plugins + # @api private + class ResponseTarget < Plugin + + # This handler is responsible for replacing the HTTP response body IO + # object with custom targets, such as a block, or a file. It is important + # to not write data to the custom target in the case of a non-success + # response. We do not want to write an XML error message to someone's + # file. + class Handler < Client::Handler + + def call(context) + if context.params.is_a?(Hash) && context.params[:response_target] + context[:response_target] = context.params.delete(:response_target) + end + target = context[:response_target] + add_event_listeners(context, target) if target + @handler.call(context) + end + + private + + def add_event_listeners(context, target) + context.http_response.on_headers(200..299) do + # In a fresh response body will be a StringIO + # However, when a request is retried we may have + # an existing ManagedFile or BlockIO and those + # should be reused. + if context.http_response.body.is_a? StringIO + context.http_response.body = io(target, context.http_response.headers) + end + end + + context.http_response.on_success(200..299) do + body = context.http_response.body + if body.is_a?(ManagedFile) && body.open? + body.close + end + end + + context.http_response.on_error do + body = context.http_response.body + + # When using response_target of file we do not want to write + # error messages to the file. So set the body to a new StringIO + if body.is_a? ManagedFile + File.unlink(body) + context.http_response.body = StringIO.new + end + + # Aws::S3::Encryption::DecryptHandler (with lower priority) + # has callbacks registered after ResponseTarget::Handler, + # where http_response.body is an IODecrypter + # and has error callbacks handling for it so no action is required here + end + end + + def io(target, headers) + case target + when Proc then BlockIO.new(headers, &target) + when String, Pathname then ManagedFile.new(target, 'w+b') + else target + end + end + + end + + handler(Handler, step: :initialize, priority: 90) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/request.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/request.rb new file mode 100644 index 0000000..1c5765b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/request.rb @@ -0,0 +1,77 @@ +# frozen_string_literal: true + +module Seahorse + module Client + class Request + + include HandlerBuilder + + # @param [HandlerList] handlers + # @param [RequestContext] context + def initialize(handlers, context) + @handlers = handlers + @context = context + end + + # @return [HandlerList] + attr_reader :handlers + + # @return [RequestContext] + attr_reader :context + + # Sends the request, returning a {Response} object. + # + # response = request.send_request + # + # # Streaming Responses + # + # By default, HTTP responses are buffered into memory. This can be + # bad if you are downloading large responses, e.g. large files. + # You can avoid this by streaming the response to a block or some other + # target. + # + # ## Streaming to a File + # + # You can stream the raw HTTP response body to a File, or any IO-like + # object, by passing the `:target` option. + # + # # create a new file at the given path + # request.send_request(target: '/path/to/target/file') + # + # # or provide an IO object to write to + # File.open('photo.jpg', 'wb') do |file| + # request.send_request(target: file) + # end + # + # **Please Note**: The target IO object may receive `#truncate(0)` + # if the request generates a networking error and bytes have already + # been written to the target. + # + # ## Block Streaming + # + # Pass a block to `#send_request` and the response will be yielded in + # chunks to the given block. + # + # # stream the response data + # request.send_request do |chunk| + # file.write(chunk) + # end + # + # **Please Note**: When streaming to a block, it is not possible to + # retry failed requests. + # + # @option options [String, IO] :target When specified, the HTTP response + # body is written to target. This is helpful when you are sending + # a request that may return a large payload that you don't want to + # load into memory. + # + # @return [Response] + # + def send_request(options = {}, &block) + @context[:response_target] = options[:target] || block + @handlers.to_stack.call(@context) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/request_context.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/request_context.rb new file mode 100644 index 0000000..36f81a8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/request_context.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +require 'stringio' + +module Seahorse + module Client + class RequestContext + + # @option options [required,Symbol] :operation_name (nil) + # @option options [required,Model::Operation] :operation (nil) + # @option options [Model::Authorizer] :authorizer (nil) + # @option options [Hash] :params ({}) + # @option options [Configuration] :config (nil) + # @option options [Http::Request] :http_request (Http::Request.new) + # @option options [Http::Response] :http_response (Http::Response.new) + # and #rewind. + def initialize(options = {}) + @operation_name = options[:operation_name] + @operation = options[:operation] + @authorizer = options[:authorizer] + @client = options[:client] + @params = options[:params] || {} + @config = options[:config] + @http_request = options[:http_request] || Http::Request.new + @http_response = options[:http_response] || Http::Response.new + @retries = 0 + @metadata = {} + end + + # @return [Symbol] Name of the API operation called. + attr_accessor :operation_name + + # @return [Model::Operation] + attr_accessor :operation + + # @return [Model::Authorizer] APIG SDKs only + attr_accessor :authorizer + + # @return [Seahorse::Client::Base] + attr_accessor :client + + # @return [Hash] The hash of request parameters. + attr_accessor :params + + # @return [Configuration] The client configuration. + attr_accessor :config + + # @return [Http::Request] + attr_accessor :http_request + + # @return [Http::Response] + attr_accessor :http_response + + # @return [Integer] + attr_accessor :retries + + # @return [Hash] + attr_reader :metadata + + # Returns the metadata for the given `key`. + # @param [Symbol] key + # @return [Object] + def [](key) + @metadata[key] + end + + # Sets the request context metadata for the given `key`. Request metadata + # useful for handlers that need to keep state on the request, without + # sending that data with the request over HTTP. + # @param [Symbol] key + # @param [Object] value + def []=(key, value) + @metadata[key] = value + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/response.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/response.rb new file mode 100644 index 0000000..7277db6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/client/response.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require 'delegate' + +module Seahorse + module Client + class Response < Delegator + # @option options [RequestContext] :context (nil) + # @option options [Integer] :status_code (nil) + # @option options [Http::Headers] :headers (Http::Headers.new) + # @option options [String] :body ('') + def initialize(options = {}) + @context = options[:context] || RequestContext.new + @data = options[:data] + @error = options[:error] + @http_request = @context.http_request + @http_response = @context.http_response + @http_response.on_error do |error| + @error = error + end + end + + # @return [RequestContext] + attr_reader :context + + # @return The response data. This may be `nil` if the response contains + # an {#error}. + attr_accessor :data + + # @return [StandardError, nil] + attr_accessor :error + + # @overload on(status_code, &block) + # @param [Integer] status_code The block will be + # triggered only for responses with the given status code. + # + # @overload on(status_code_range, &block) + # @param [Range] status_code_range The block will be + # triggered only for responses with a status code that falls + # witin the given range. + # + # @return [self] + def on(range, &_block) + response = self + @context.http_response.on_success(range) do + yield response + end + self + end + + # Yields to the block if the response has a 200 level status code. + # @return [self] + def on_success(&block) + on(200..299, &block) + end + + # @return [Boolean] Returns `true` if the response is complete with + # a ~ 200 level http status code. + def successful? + (200..299).cover?(@context.http_response.status_code) && @error.nil? + end + + # @api private + def on_complete(&block) + @context.http_response.on_done(&block) + self + end + + # Necessary to define as a subclass of Delegator + # @api private + def __getobj__ + @data + end + + # Necessary to define as a subclass of Delegator + # @api private + def __setobj__(obj) + @data = obj + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/api.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/api.rb new file mode 100644 index 0000000..a102204 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/api.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module Seahorse + module Model + class Api + + def initialize + @metadata = {} + @operations = {} + @authorizers = {} + @endpoint_operation = nil + @require_endpoint_discovery = false + end + + # @return [String, nil] + attr_accessor :version + + # @return [Hash] + attr_accessor :metadata + + # @return [Symbol|nil] + attr_accessor :endpoint_operation + + # @return [Boolean|nil] + attr_accessor :require_endpoint_discovery + + def operations(&block) + if block_given? + @operations.each(&block) + else + @operations.enum_for(:each) + end + end + + def operation(name) + if @operations.key?(name.to_sym) + @operations[name.to_sym] + else + raise ArgumentError, "unknown operation #{name.inspect}" + end + end + + def operation_names + @operations.keys + end + + def async_operation_names + @operations.select {|_, op| op.async }.keys + end + + def add_operation(name, operation) + @operations[name.to_sym] = operation + end + + def authorizers(&block) + if block_given? + @authorizers.each(&block) + else + @authorizers.enum_for(:each) + end + end + + def authorizer(name) + if @authorizers.key?(name.to_sym) + @authorizers[name.to_sym] + else + raise ArgumentError, "unknown authorizer #{name.inspect}" + end + end + + def authorizer_names + @authorizers.keys + end + + def add_authorizer(name, authorizer) + @authorizers[name.to_sym] = authorizer + end + + def inspect(*args) + "#<#{self.class.name}>" + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/authorizer.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/authorizer.rb new file mode 100644 index 0000000..9d8e07b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/authorizer.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module Seahorse + module Model + class Authorizer + + def initialize + @type = 'provided' + @placement = {} + end + + # @return [String] + attr_accessor :name + + # @return [String] + attr_accessor :type + + # @return [Hash] + attr_accessor :placement + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/operation.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/operation.rb new file mode 100644 index 0000000..8434dc0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/operation.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module Seahorse + module Model + class Operation + + def initialize + @http_method = 'POST' + @http_request_uri = '/' + @deprecated = false + @errors = [] + @metadata = {} + @async = false + end + + # @return [String, nil] + attr_accessor :name + + # @return [String] + attr_accessor :http_method + + # @return [String] + attr_accessor :http_request_uri + + # @return [Boolean] + attr_accessor :http_checksum_required + + # @return [Hash] + attr_accessor :http_checksum + + # @return [Boolean] + attr_accessor :deprecated + + # @return [Boolean] + attr_accessor :endpoint_operation + + # @return [Hash] + attr_accessor :endpoint_discovery + + # @return [String, nil] + attr_accessor :documentation + + # @return [Hash, nil] + attr_accessor :endpoint_pattern + + # @return [String, nil] + attr_accessor :authorizer + + # @return [ShapeRef, nil] + attr_accessor :input + + # @return [ShapeRef, nil] + attr_accessor :output + + # @return [Array] + attr_accessor :errors + + # APIG only + # @return [Boolean] + attr_accessor :require_apikey + + # @return [Boolean] + attr_accessor :async + + def [](key) + @metadata[key.to_s] + end + + def []=(key, value) + @metadata[key.to_s] = value + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/shapes.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/shapes.rb new file mode 100644 index 0000000..f4ee53f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/model/shapes.rb @@ -0,0 +1,296 @@ +# frozen_string_literal: true + +require 'set' + +module Seahorse + module Model + module Shapes + + class ShapeRef + + def initialize(options = {}) + @metadata = {} + @required = false + @deprecated = false + @location = nil + @location_name = nil + @event = false + @eventstream = false + @eventpayload = false + @eventpayload_type = ''.freeze + @eventheader = false + @eventheader_type = ''.freeze + options.each do |key, value| + if key == :metadata + value.each do |k,v| + self[k] = v + end + else + send("#{key}=", value) + end + end + end + + # @return [Shape] + attr_accessor :shape + + # @return [Boolean] + attr_accessor :required + + # @return [String, nil] + attr_accessor :documentation + + # @return [Boolean] + attr_accessor :deprecated + + # @return [Boolean] + attr_accessor :event + + # @return [Boolean] + attr_accessor :eventstream + + # @return [Boolean] + attr_accessor :eventpayload + + # @return [Boolean] + attr_accessor :eventheader + + # @return [String] + attr_accessor :eventpayload_type + + # @return [Boolean] + attr_accessor :eventheader_type + + # @return [Boolean] + attr_accessor :document + + # @return [String, nil] + def location + @location || (shape && shape[:location]) + end + + def location= location + @location = location + end + + # @return [String, nil] + def location_name + @location_name || (shape && shape[:location_name]) + end + + def location_name= location_name + @location_name = location_name + end + + # Gets metadata for the given `key`. + def [](key) + if @metadata.key?(key.to_s) + @metadata[key.to_s] + else + @shape[key.to_s] + end + end + + # Sets metadata for the given `key`. + def []=(key, value) + @metadata[key.to_s] = value + end + + end + + class Shape + + def initialize(options = {}) + @metadata = {} + options.each_pair do |key, value| + if respond_to?("#{key}=") + send("#{key}=", value) + else + self[key] = value + end + end + end + + # @return [String] + attr_accessor :name + + # @return [String, nil] + attr_accessor :documentation + + # @return [Boolean] + attr_accessor :union + + # Gets metadata for the given `key`. + def [](key) + @metadata[key.to_s] + end + + # Sets metadata for the given `key`. + def []=(key, value) + @metadata[key.to_s] = value + end + + end + + class BlobShape < Shape + + # @return [Integer, nil] + attr_accessor :min + + # @return [Integer, nil] + attr_accessor :max + + end + + class BooleanShape < Shape; end + + class FloatShape < Shape + + # @return [Integer, nil] + attr_accessor :min + + # @return [Integer, nil] + attr_accessor :max + + end + + class IntegerShape < Shape + + # @return [Integer, nil] + attr_accessor :min + + # @return [Integer, nil] + attr_accessor :max + + end + + class ListShape < Shape + + # @return [ShapeRef] + attr_accessor :member + + # @return [Integer, nil] + attr_accessor :min + + # @return [Integer, nil] + attr_accessor :max + + # @return [Boolean] + attr_accessor :flattened + + end + + class MapShape < Shape + + # @return [ShapeRef] + attr_accessor :key + + # @return [ShapeRef] + attr_accessor :value + + # @return [Integer, nil] + attr_accessor :min + + # @return [Integer, nil] + attr_accessor :max + + # @return [Boolean] + attr_accessor :flattened + + end + + class StringShape < Shape + + # @return [Set, nil] + attr_accessor :enum + + # @return [Integer, nil] + attr_accessor :min + + # @return [Integer, nil] + attr_accessor :max + + end + + class StructureShape < Shape + + def initialize(options = {}) + @members = {} + @members_by_location_name = {} + @required = Set.new + super + end + + # @return [Set] + attr_accessor :required + + # @return [Class] + attr_accessor :struct_class + + # @param [Symbol] name + # @param [ShapeRef] shape_ref + def add_member(name, shape_ref) + name = name.to_sym + @required << name if shape_ref.required + @members_by_location_name[shape_ref.location_name] = [name, shape_ref] + @members[name] = shape_ref + end + + # @return [Array] + def member_names + @members.keys + end + + # @param [Symbol] member_name + # @return [Boolean] Returns `true` if there exists a member with + # the given name. + def member?(member_name) + @members.key?(member_name.to_sym) + end + + # @return [Enumerator<[Symbol,ShapeRef]>] + def members + @members.to_enum + end + + # @param [Symbol] name + # @return [ShapeRef] + def member(name) + if member?(name) + @members[name.to_sym] + else + raise ArgumentError, "no such member #{name.inspect}" + end + end + + # @api private + def member_by_location_name(location_name) + @members_by_location_name[location_name] + end + + end + + class UnionShape < StructureShape + def initialize(options = {}) + @member_subclasses = {} + super options.merge(union: true) + end + + # @api private + def member_subclass(member) + @member_subclasses[member] + end + + # @api private + def add_member_subclass(member, subclass) + @member_subclasses[member] = subclass + end + end + + class TimestampShape < Shape; end + + class DocumentShape < Shape; end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/util.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/util.rb new file mode 100644 index 0000000..8c35525 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-core-3.171.0/lib/seahorse/util.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +require 'cgi' + +module Seahorse + # @api private + module Util + class << self + def uri_escape(string) + CGI.escape(string.to_s.encode('UTF-8')).gsub('+', '%20').gsub('%7E', '~') + end + + def uri_path_escape(path) + path.gsub(/[^\/]+/) { |part| uri_escape(part) } + end + + def escape_header_list_string(s) + s.include?('"') || s.include?(',') ? "\"#{s.gsub('"', '\"')}\"" : s + end + + # Checks for a valid host label + # @see https://tools.ietf.org/html/rfc3986#section-3.2.2 + # @see https://tools.ietf.org/html/rfc1123#page-13 + def host_label?(str) + str =~ /^(?!-)[a-zA-Z0-9-]{1,63}(? String + # + # + # @example Example: To cancel deletion of a KMS key + # + # # The following example cancels deletion of the specified KMS key. + # + # resp = client.cancel_key_deletion({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose deletion you are canceling. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key whose deletion you canceled. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.cancel_key_deletion({ + # key_id: "KeyIdType", # required + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion AWS API Documentation + # + # @overload cancel_key_deletion(params = {}) + # @param [Hash] params ({}) + def cancel_key_deletion(params = {}, options = {}) + req = build_request(:cancel_key_deletion, params) + req.send_request(options) + end + + # Connects or reconnects a [custom key store][1] to its backing key + # store. For an CloudHSM key store, `ConnectCustomKeyStore` connects the + # key store to its associated CloudHSM cluster. For an external key + # store, `ConnectCustomKeyStore` connects the key store to the external + # key store proxy that communicates with your external key manager. + # + # The custom key store must be connected before you can create KMS keys + # in the key store or use the KMS keys it contains. You can disconnect + # and reconnect a custom key store at any time. + # + # The connection process for a custom key store can take an extended + # amount of time to complete. This operation starts the connection + # process, but it does not wait for it to complete. When it succeeds, + # this operation quickly returns an HTTP 200 response and a JSON object + # with no properties. However, this response does not indicate that the + # custom key store is connected. To get the connection state of the + # custom key store, use the DescribeCustomKeyStores operation. + # + # This operation is part of the [custom key stores][1] feature in KMS, + # which combines the convenience and extensive integration of KMS with + # the isolation and control of a key store that you own and manage. + # + # The `ConnectCustomKeyStore` operation might fail for various reasons. + # To find the reason, use the DescribeCustomKeyStores operation and see + # the `ConnectionErrorCode` in the response. For help interpreting the + # `ConnectionErrorCode`, see CustomKeyStoresListEntry. + # + # To fix the failure, use the DisconnectCustomKeyStore operation to + # disconnect the custom key store, correct the error, use the + # UpdateCustomKeyStore operation if necessary, and then use + # `ConnectCustomKeyStore` again. + # + # **CloudHSM key store** + # + # During the connection process for an CloudHSM key store, KMS finds the + # CloudHSM cluster that is associated with the custom key store, creates + # the connection infrastructure, connects to the cluster, logs into the + # CloudHSM client as the `kmsuser` CU, and rotates its password. + # + # To connect an CloudHSM key store, its associated CloudHSM cluster must + # have at least one active HSM. To get the number of active HSMs in a + # cluster, use the [DescribeClusters][2] operation. To add HSMs to the + # cluster, use the [CreateHsm][3] operation. Also, the [ `kmsuser` + # crypto user][4] (CU) must not be logged into the cluster. This + # prevents KMS from using this account to log in. + # + # If you are having trouble connecting or disconnecting a CloudHSM key + # store, see [Troubleshooting an CloudHSM key store][5] in the *Key + # Management Service Developer Guide*. + # + # **External key store** + # + # When you connect an external key store that uses public endpoint + # connectivity, KMS tests its ability to communicate with your external + # key manager by sending a request via the external key store proxy. + # + # When you connect to an external key store that uses VPC endpoint + # service connectivity, KMS establishes the networking elements that it + # needs to communicate with your external key manager via the external + # key store proxy. This includes creating an interface endpoint to the + # VPC endpoint service and a private hosted zone for traffic between KMS + # and the VPC endpoint service. + # + # To connect an external key store, KMS must be able to connect to the + # external key store proxy, the external key store proxy must be able to + # communicate with your external key manager, and the external key + # manager must be available for cryptographic operations. + # + # If you are having trouble connecting or disconnecting an external key + # store, see [Troubleshooting an external key store][6] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a + # custom key store in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:ConnectCustomKeyStore][7] (IAM policy) + # + # **Related operations** + # + # * CreateCustomKeyStore + # + # * DeleteCustomKeyStore + # + # * DescribeCustomKeyStores + # + # * DisconnectCustomKeyStore + # + # * UpdateCustomKeyStore + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [2]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # [3]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :custom_key_store_id + # Enter the key store ID of the custom key store that you want to + # connect. To find the ID of a custom key store, use the + # DescribeCustomKeyStores operation. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To connect a custom key store + # + # # This example connects an AWS KMS custom key store to its backing key store. For an AWS CloudHSM key store, it connects + # # the key store to its AWS CloudHSM cluster. For an external key store, it connects the key store to the external key + # # store proxy that communicates with your external key manager. This operation does not return any data. To verify that + # # the custom key store is connected, use the DescribeCustomKeyStores operation. + # + # resp = client.connect_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the AWS KMS custom key store. + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.connect_custom_key_store({ + # custom_key_store_id: "CustomKeyStoreIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore AWS API Documentation + # + # @overload connect_custom_key_store(params = {}) + # @param [Hash] params ({}) + def connect_custom_key_store(params = {}, options = {}) + req = build_request(:connect_custom_key_store, params) + req.send_request(options) + end + + # Creates a friendly name for a KMS key. + # + # Adding, deleting, or updating an alias can allow or deny permission to + # the KMS key. For details, see [ABAC for KMS][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # You can use an alias to identify a KMS key in the KMS console, in the + # DescribeKey operation and in [cryptographic operations][2], such as + # Encrypt and GenerateDataKey. You can also change the KMS key that's + # associated with the alias (UpdateAlias) or delete the alias + # (DeleteAlias) at any time. These operations don't affect the + # underlying KMS key. + # + # You can associate the alias with any customer managed key in the same + # Amazon Web Services Region. Each alias is associated with only one KMS + # key at a time, but a KMS key can have multiple aliases. A valid KMS + # key is required. You can't create an alias without a KMS key. + # + # The alias must be unique in the account and Region, but you can have + # aliases with the same name in different Regions. For detailed + # information about aliases, see [Using aliases][3] in the *Key + # Management Service Developer Guide*. + # + # This operation does not return a response. To get the alias that you + # created, use the ListAliases operation. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][4] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on an + # alias in a different Amazon Web Services account. + # + # **Required permissions** + # + # * [kms:CreateAlias][5] on the alias (IAM policy). + # + # * [kms:CreateAlias][5] on the KMS key (key policy). + # + # For details, see [Controlling access to aliases][6] in the *Key + # Management Service Developer Guide*. + # + # **Related operations:** + # + # * DeleteAlias + # + # * ListAliases + # + # * UpdateAlias + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access + # + # @option params [required, String] :alias_name + # Specifies the alias name. This value must begin with `alias/` followed + # by a name, such as `alias/ExampleAlias`. + # + # The `AliasName` value must be string of 1-256 characters. It can + # contain only alphanumeric characters, forward slashes (/), underscores + # (\_), and dashes (-). The alias name cannot begin with `alias/aws/`. + # The `alias/aws/` prefix is reserved for [Amazon Web Services managed + # keys][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # + # @option params [required, String] :target_key_id + # Associates the alias with the specified [customer managed key][1]. The + # KMS key must be in the same Amazon Web Services Region. + # + # A valid key ID is required. If you supply a null or empty string + # value, this operation returns an error. + # + # For help finding the key ID and ARN, see [Finding the Key ID and + # ARN][2] in the Key Management Service Developer Guide . + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To create an alias + # + # # The following example creates an alias for the specified KMS key. + # + # resp = client.create_alias({ + # alias_name: "alias/ExampleAlias", # The alias to create. Aliases must begin with 'alias/'. Do not use aliases that begin with 'alias/aws' because they are reserved for use by AWS. + # target_key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose alias you are creating. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.create_alias({ + # alias_name: "AliasNameType", # required + # target_key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias AWS API Documentation + # + # @overload create_alias(params = {}) + # @param [Hash] params ({}) + def create_alias(params = {}, options = {}) + req = build_request(:create_alias, params) + req.send_request(options) + end + + # Creates a [custom key store][1] backed by a key store that you own and + # manage. When you use a KMS key in a custom key store for a + # cryptographic operation, the cryptographic operation is actually + # performed in your key store using your keys. KMS supports [CloudHSM + # key stores][2] backed by an [CloudHSM cluster][3] and [external key + # stores][4] backed by an external key store proxy and external key + # manager outside of Amazon Web Services. + # + # This operation is part of the [custom key stores][1] feature in KMS, + # which combines the convenience and extensive integration of KMS with + # the isolation and control of a key store that you own and manage. + # + # Before you create the custom key store, the required elements must be + # in place and operational. We recommend that you use the test tools + # that KMS provides to verify the configuration your external key store + # proxy. For details about the required elements and verification tests, + # see [Assemble the prerequisites (for CloudHSM key stores)][5] or + # [Assemble the prerequisites (for external key stores)][6] in the *Key + # Management Service Developer Guide*. + # + # To create a custom key store, use the following parameters. + # + # * To create an CloudHSM key store, specify the `CustomKeyStoreName`, + # `CloudHsmClusterId`, `KeyStorePassword`, and + # `TrustAnchorCertificate`. The `CustomKeyStoreType` parameter is + # optional for CloudHSM key stores. If you include it, set it to the + # default value, `AWS_CLOUDHSM`. For help with failures, see + # [Troubleshooting an CloudHSM key store][7] in the *Key Management + # Service Developer Guide*. + # + # * To create an external key store, specify the `CustomKeyStoreName` + # and a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. Also, specify + # values for `XksProxyConnectivity`, + # `XksProxyAuthenticationCredential`, `XksProxyUriEndpoint`, and + # `XksProxyUriPath`. If your `XksProxyConnectivity` value is + # `VPC_ENDPOINT_SERVICE`, specify the `XksProxyVpcEndpointServiceName` + # parameter. For help with failures, see [Troubleshooting an external + # key store][8] in the *Key Management Service Developer Guide*. + # + # For external key stores: + # + # Some external key managers provide a simpler method for creating an + # external key store. For details, see your external key manager + # documentation. + # + # When creating an external key store in the KMS console, you can upload + # a JSON-based proxy configuration file with the desired values. You + # cannot use a proxy configuration with the `CreateCustomKeyStore` + # operation. However, you can use the values in the file to help you + # determine the correct values for the `CreateCustomKeyStore` + # parameters. + # + # + # + # When the operation completes successfully, it returns the ID of the + # new custom key store. Before you can use your new custom key store, + # you need to use the ConnectCustomKeyStore operation to connect a new + # CloudHSM key store to its CloudHSM cluster, or to connect a new + # external key store to the external key store proxy for your external + # key manager. Even if you are not going to use your custom key store + # immediately, you might want to connect it to verify that all settings + # are correct and then disconnect it until you are ready to use it. + # + # For help with failures, see [Troubleshooting a custom key store][7] in + # the *Key Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a + # custom key store in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:CreateCustomKeyStore][9] (IAM policy). + # + # **Related operations:** + # + # * ConnectCustomKeyStore + # + # * DeleteCustomKeyStore + # + # * DescribeCustomKeyStores + # + # * DisconnectCustomKeyStore + # + # * UpdateCustomKeyStore + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html + # [3]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :custom_key_store_name + # Specifies a friendly name for the custom key store. The name must be + # unique in your Amazon Web Services account and Region. This parameter + # is required for all custom key stores. + # + # @option params [String] :cloud_hsm_cluster_id + # Identifies the CloudHSM cluster for an CloudHSM key store. This + # parameter is required for custom key stores with `CustomKeyStoreType` + # of `AWS_CLOUDHSM`. + # + # Enter the cluster ID of any active CloudHSM cluster that is not + # already associated with a custom key store. To find the cluster ID, + # use the [DescribeClusters][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # + # @option params [String] :trust_anchor_certificate + # Specifies the certificate for an CloudHSM key store. This parameter is + # required for custom key stores with a `CustomKeyStoreType` of + # `AWS_CLOUDHSM`. + # + # Enter the content of the trust anchor certificate for the CloudHSM + # cluster. This is the content of the `customerCA.crt` file that you + # created when you [initialized the cluster][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html + # + # @option params [String] :key_store_password + # Specifies the `kmsuser` password for an CloudHSM key store. This + # parameter is required for custom key stores with a + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # Enter the password of the [ `kmsuser` crypto user (CU) account][1] in + # the specified CloudHSM cluster. KMS logs into the cluster as this user + # to manage key material on your behalf. + # + # The password must be a string of 7 to 32 characters. Its value is case + # sensitive. + # + # This parameter tells KMS the `kmsuser` account password; it does not + # change the password in the CloudHSM cluster. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser + # + # @option params [String] :custom_key_store_type + # Specifies the type of custom key store. The default value is + # `AWS_CLOUDHSM`. + # + # For a custom key store backed by an CloudHSM cluster, omit the + # parameter or enter `AWS_CLOUDHSM`. For a custom key store backed by an + # external key manager outside of Amazon Web Services, enter + # `EXTERNAL_KEY_STORE`. You cannot change this property after the key + # store is created. + # + # @option params [String] :xks_proxy_uri_endpoint + # Specifies the endpoint that KMS uses to send requests to the external + # key store proxy (XKS proxy). This parameter is required for custom key + # stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # The protocol must be HTTPS. KMS communicates on port 443. Do not + # specify the port in the `XksProxyUriEndpoint` value. + # + # For external key stores with `XksProxyConnectivity` value of + # `VPC_ENDPOINT_SERVICE`, specify `https://` followed by the private DNS + # name of the VPC endpoint service. + # + # For external key stores with `PUBLIC_ENDPOINT` connectivity, this + # endpoint must be reachable before you create the custom key store. KMS + # connects to the external key store proxy while creating the custom key + # store. For external key stores with `VPC_ENDPOINT_SERVICE` + # connectivity, KMS connects when you call the ConnectCustomKeyStore + # operation. + # + # The value of this parameter must begin with `https://`. The remainder + # can contain upper and lower case letters (A-Z and a-z), numbers (0-9), + # dots (`.`), and hyphens (`-`). Additional slashes (`/` and ``) are + # not permitted. + # + # Uniqueness requirements: + # + # * The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values must + # be unique in the Amazon Web Services account and Region. + # + # * An external key store with `PUBLIC_ENDPOINT` connectivity cannot use + # the same `XksProxyUriEndpoint` value as an external key store with + # `VPC_ENDPOINT_SERVICE` connectivity in the same Amazon Web Services + # Region. + # + # * Each external key store with `VPC_ENDPOINT_SERVICE` connectivity + # must have its own private DNS name. The `XksProxyUriEndpoint` value + # for external key stores with `VPC_ENDPOINT_SERVICE` connectivity + # (private DNS name) must be unique in the Amazon Web Services account + # and Region. + # + # @option params [String] :xks_proxy_uri_path + # Specifies the base path to the proxy APIs for this external key store. + # To find this value, see the documentation for your external key store + # proxy. This parameter is required for all custom key stores with a + # `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # The value must start with `/` and must end with `/kms/xks/v1` where + # `v1` represents the version of the KMS external key store proxy API. + # This path can include an optional prefix between the required elements + # such as `/prefix/kms/xks/v1`. + # + # Uniqueness requirements: + # + # * The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values must + # be unique in the Amazon Web Services account and Region. + # + # ^ + # + # @option params [String] :xks_proxy_vpc_endpoint_service_name + # Specifies the name of the Amazon VPC endpoint service for interface + # endpoints that is used to communicate with your external key store + # proxy (XKS proxy). This parameter is required when the value of + # `CustomKeyStoreType` is `EXTERNAL_KEY_STORE` and the value of + # `XksProxyConnectivity` is `VPC_ENDPOINT_SERVICE`. + # + # The Amazon VPC endpoint service must [fulfill all requirements][1] for + # use with an external key store. + # + # **Uniqueness requirements:** + # + # * External key stores with `VPC_ENDPOINT_SERVICE` connectivity can + # share an Amazon VPC, but each external key store must have its own + # VPC endpoint service and private DNS name. + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements + # + # @option params [Types::XksProxyAuthenticationCredentialType] :xks_proxy_authentication_credential + # Specifies an authentication credential for the external key store + # proxy (XKS proxy). This parameter is required for all custom key + # stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # The `XksProxyAuthenticationCredential` has two required elements: + # `RawSecretAccessKey`, a secret key, and `AccessKeyId`, a unique + # identifier for the `RawSecretAccessKey`. For character requirements, + # see + # [XksProxyAuthenticationCredentialType](kms/latest/APIReference/API_XksProxyAuthenticationCredentialType.html). + # + # KMS uses this authentication credential to sign requests to the + # external key store proxy on your behalf. This credential is unrelated + # to Identity and Access Management (IAM) and Amazon Web Services + # credentials. + # + # This parameter doesn't set or change the authentication credentials + # on the XKS proxy. It just tells KMS the credential that you + # established on your external key store proxy. If you rotate your proxy + # authentication credential, use the UpdateCustomKeyStore operation to + # provide the new credential to KMS. + # + # @option params [String] :xks_proxy_connectivity + # Indicates how KMS communicates with the external key store proxy. This + # parameter is required for custom key stores with a + # `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # If the external key store proxy uses a public endpoint, specify + # `PUBLIC_ENDPOINT`. If the external key store proxy uses a Amazon VPC + # endpoint service for communication with KMS, specify + # `VPC_ENDPOINT_SERVICE`. For help making this choice, see [Choosing a + # connectivity option][1] in the *Key Management Service Developer + # Guide*. + # + # An Amazon VPC endpoint service keeps your communication with KMS in a + # private address space entirely within Amazon Web Services, but it + # requires more configuration, including establishing a Amazon VPC with + # multiple subnets, a VPC endpoint service, a network load balancer, and + # a verified private DNS name. A public endpoint is simpler to set up, + # but it might be slower and might not fulfill your security + # requirements. You might consider testing with a public endpoint, and + # then establishing a VPC endpoint service for production tasks. Note + # that this choice does not determine the location of the external key + # store proxy. Even if you choose a VPC endpoint service, the proxy can + # be hosted within the VPC or outside of Amazon Web Services such as in + # your corporate data center. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity + # + # @return [Types::CreateCustomKeyStoreResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateCustomKeyStoreResponse#custom_key_store_id #custom_key_store_id} => String + # + # + # @example Example: To create an AWS CloudHSM key store + # + # # This example creates a custom key store that is associated with an AWS CloudHSM cluster. + # + # resp = client.create_custom_key_store({ + # cloud_hsm_cluster_id: "cluster-1a23b4cdefg", # The ID of the CloudHSM cluster. + # custom_key_store_name: "ExampleKeyStore", # A friendly name for the custom key store. + # key_store_password: "kmsPswd", # The password for the kmsuser CU account in the specified cluster. + # trust_anchor_certificate: "", # The content of the customerCA.crt file that you created when you initialized the cluster. + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the new custom key store. + # } + # + # @example Example: To create an external key store with VPC endpoint service connectivity + # + # # This example creates an external key store that uses an Amazon VPC endpoint service to communicate with AWS KMS. + # + # resp = client.create_custom_key_store({ + # custom_key_store_name: "ExampleVPCEndpointKeyStore", # A friendly name for the custom key store + # custom_key_store_type: "EXTERNAL_KEY_STORE", # For external key stores, the value must be EXTERNAL_KEY_STORE + # xks_proxy_authentication_credential: { + # access_key_id: "ABCDE12345670EXAMPLE", + # raw_secret_access_key: "DXjSUawnel2fr6SKC7G25CNxTyWKE5PF9XX6H/u9pSo=", + # }, # The access key ID and secret access key that KMS uses to authenticate to your external key store proxy + # xks_proxy_connectivity: "VPC_ENDPOINT_SERVICE", # Indicates how AWS KMS communicates with the external key store proxy + # xks_proxy_uri_endpoint: "https://myproxy-private.xks.example.com", # The URI that AWS KMS uses to connect to the external key store proxy + # xks_proxy_uri_path: "/example-prefix/kms/xks/v1", # The URI path to the external key store proxy APIs + # xks_proxy_vpc_endpoint_service_name: "com.amazonaws.vpce.us-east-1.vpce-svc-example1", # The VPC endpoint service that KMS uses to communicate with the external key store proxy + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the new custom key store. + # } + # + # @example Example: To create an external key store with public endpoint connectivity + # + # # This example creates an external key store with public endpoint connectivity. + # + # resp = client.create_custom_key_store({ + # custom_key_store_name: "ExamplePublicEndpointKeyStore", # A friendly name for the custom key store + # custom_key_store_type: "EXTERNAL_KEY_STORE", # For external key stores, the value must be EXTERNAL_KEY_STORE + # xks_proxy_authentication_credential: { + # access_key_id: "ABCDE12345670EXAMPLE", + # raw_secret_access_key: "DXjSUawnel2fr6SKC7G25CNxTyWKE5PF9XX6H/u9pSo=", + # }, # The access key ID and secret access key that KMS uses to authenticate to your external key store proxy + # xks_proxy_connectivity: "PUBLIC_ENDPOINT", # Indicates how AWS KMS communicates with the external key store proxy + # xks_proxy_uri_endpoint: "https://myproxy.xks.example.com", # The URI that AWS KMS uses to connect to the external key store proxy + # xks_proxy_uri_path: "/kms/xks/v1", # The URI path to your external key store proxy API + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_store_id: "cks-987654321abcdef0", # The ID of the new custom key store. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.create_custom_key_store({ + # custom_key_store_name: "CustomKeyStoreNameType", # required + # cloud_hsm_cluster_id: "CloudHsmClusterIdType", + # trust_anchor_certificate: "TrustAnchorCertificateType", + # key_store_password: "KeyStorePasswordType", + # custom_key_store_type: "AWS_CLOUDHSM", # accepts AWS_CLOUDHSM, EXTERNAL_KEY_STORE + # xks_proxy_uri_endpoint: "XksProxyUriEndpointType", + # xks_proxy_uri_path: "XksProxyUriPathType", + # xks_proxy_vpc_endpoint_service_name: "XksProxyVpcEndpointServiceNameType", + # xks_proxy_authentication_credential: { + # access_key_id: "XksProxyAuthenticationAccessKeyIdType", # required + # raw_secret_access_key: "XksProxyAuthenticationRawSecretAccessKeyType", # required + # }, + # xks_proxy_connectivity: "PUBLIC_ENDPOINT", # accepts PUBLIC_ENDPOINT, VPC_ENDPOINT_SERVICE + # }) + # + # @example Response structure + # + # resp.custom_key_store_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore AWS API Documentation + # + # @overload create_custom_key_store(params = {}) + # @param [Hash] params ({}) + def create_custom_key_store(params = {}, options = {}) + req = build_request(:create_custom_key_store, params) + req.send_request(options) + end + + # Adds a grant to a KMS key. + # + # A *grant* is a policy instrument that allows Amazon Web Services + # principals to use KMS keys in cryptographic operations. It also can + # allow them to view a KMS key (DescribeKey) and create and manage + # grants. When authorizing access to a KMS key, grants are considered + # along with key policies and IAM policies. Grants are often used for + # temporary permissions because you can create one, use its permissions, + # and delete it without changing your key policies or IAM policies. + # + # For detailed information about grants, including grant terminology, + # see [Grants in KMS][1] in the Key Management Service Developer + # Guide . For examples of working with grants in several + # programming languages, see [Programming grants][2]. + # + # The `CreateGrant` operation returns a `GrantToken` and a `GrantId`. + # + # * When you create, retire, or revoke a grant, there might be a brief + # delay, usually less than five minutes, until the grant is available + # throughout KMS. This state is known as *eventual consistency*. Once + # the grant has achieved eventual consistency, the grantee principal + # can use the permissions in the grant without identifying the grant. + # + # However, to use the permissions in the grant immediately, use the + # `GrantToken` that `CreateGrant` returns. For details, see [Using a + # grant token][3] in the Key Management Service Developer + # Guide . + # + # * The `CreateGrant` operation also returns a `GrantId`. You can use + # the `GrantId` and a key identifier to identify the grant in the + # RetireGrant and RevokeGrant operations. To find the grant ID, use + # the ListGrants or ListRetirableGrants operations. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][4] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation on a KMS key in + # a different Amazon Web Services account, specify the key ARN in the + # value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:CreateGrant][5] (key policy) + # + # **Related operations:** + # + # * ListGrants + # + # * ListRetirableGrants + # + # * RetireGrant + # + # * RevokeGrant + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the KMS key for the grant. The grant gives principals + # permission to use this KMS key. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key in + # a different Amazon Web Services account, you must use the key ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :grantee_principal + # The identity that gets the permissions specified in the grant. + # + # To specify the grantee principal, use the Amazon Resource Name (ARN) + # of an Amazon Web Services principal. Valid principals include Amazon + # Web Services accounts, IAM users, IAM roles, federated users, and + # assumed role users. For help with the ARN syntax for a principal, see + # [IAM ARNs][1] in the Identity and Access Management User + # Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + # + # @option params [String] :retiring_principal + # The principal that has permission to use the RetireGrant operation to + # retire the grant. + # + # To specify the principal, use the [Amazon Resource Name (ARN)][1] of + # an Amazon Web Services principal. Valid principals include Amazon Web + # Services accounts, IAM users, IAM roles, federated users, and assumed + # role users. For help with the ARN syntax for a principal, see [IAM + # ARNs][2] in the Identity and Access Management User Guide + # . + # + # The grant determines the retiring principal. Other principals might + # have permission to retire the grant or revoke the grant. For details, + # see RevokeGrant and [Retiring and revoking grants][3] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete + # + # @option params [required, Array] :operations + # A list of operations that the grant permits. + # + # This list must include only operations that are permitted in a grant. + # Also, the operation must be supported on the KMS key. For example, you + # cannot create a grant for a symmetric encryption KMS key that allows + # the Sign operation, or a grant for an asymmetric KMS key that allows + # the GenerateDataKey operation. If you try, KMS returns a + # `ValidationError` exception. For details, see [Grant operations][1] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations + # + # @option params [Types::GrantConstraints] :constraints + # Specifies a grant constraint. + # + # KMS supports the `EncryptionContextEquals` and + # `EncryptionContextSubset` grant constraints. Each constraint value can + # include up to 8 encryption context pairs. The encryption context value + # in each constraint cannot exceed 384 characters. For information about + # grant constraints, see [Using grant constraints][1] in the *Key + # Management Service Developer Guide*. For more information about + # encryption context, see [Encryption context][2] in the Key + # Management Service Developer Guide . + # + # The encryption context grant constraints allow the permissions in the + # grant only when the encryption context in the request matches + # (`EncryptionContextEquals`) or includes (`EncryptionContextSubset`) + # the encryption context specified in this structure. + # + # The encryption context grant constraints are supported only on [grant + # operations][3] that include an `EncryptionContext` parameter, such as + # cryptographic operations on symmetric encryption KMS keys. Grants with + # grant constraints can include the DescribeKey and RetireGrant + # operations, but the constraint doesn't apply to these operations. If + # a grant with a grant constraint includes the `CreateGrant` operation, + # the constraint requires that any grants created with the `CreateGrant` + # permission have an equally strict or stricter encryption context + # constraint. + # + # You cannot use an encryption context grant constraint for + # cryptographic operations with asymmetric KMS keys or HMAC KMS keys. + # These keys don't support an encryption context. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @option params [String] :name + # A friendly name for the grant. Use this value to prevent the + # unintended creation of duplicate grants when retrying this request. + # + # When this value is absent, all `CreateGrant` requests result in a new + # grant with a unique `GrantId` even if all the supplied parameters are + # identical. This can result in unintended duplicates when you retry the + # `CreateGrant` request. + # + # When this value is present, you can retry a `CreateGrant` request with + # identical parameters; if the grant already exists, the original + # `GrantId` is returned without creating a new grant. Note that the + # returned grant token is unique with every `CreateGrant` request, even + # when a duplicate `GrantId` is returned. All grant tokens for the same + # grant ID can be used interchangeably. + # + # @return [Types::CreateGrantResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateGrantResponse#grant_token #grant_token} => String + # * {Types::CreateGrantResponse#grant_id #grant_id} => String + # + # + # @example Example: To create a grant + # + # # The following example creates a grant that allows the specified IAM role to encrypt data with the specified KMS key. + # + # resp = client.create_grant({ + # grantee_principal: "arn:aws:iam::111122223333:role/ExampleRole", # The identity that is given permission to perform the operations specified in the grant. + # key_id: "arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to which the grant applies. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # operations: [ + # "Encrypt", + # "Decrypt", + # ], # A list of operations that the grant allows. + # }) + # + # resp.to_h outputs the following: + # { + # grant_id: "0c237476b39f8bc44e45212e08498fbe3151305030726c0590dd8d3e9f3d6a60", # The unique identifier of the grant. + # grant_token: "AQpAM2RhZTk1MGMyNTk2ZmZmMzEyYWVhOWViN2I1MWM4Mzc0MWFiYjc0ZDE1ODkyNGFlNTIzODZhMzgyZjBlNGY3NiKIAgEBAgB4Pa6VDCWW__MSrqnre1HIN0Grt00ViSSuUjhqOC8OT3YAAADfMIHcBgkqhkiG9w0BBwaggc4wgcsCAQAwgcUGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMmqLyBTAegIn9XlK5AgEQgIGXZQjkBcl1dykDdqZBUQ6L1OfUivQy7JVYO2-ZJP7m6f1g8GzV47HX5phdtONAP7K_HQIflcgpkoCqd_fUnE114mSmiagWkbQ5sqAVV3ov-VeqgrvMe5ZFEWLMSluvBAqdjHEdMIkHMlhlj4ENZbzBfo9Wxk8b8SnwP4kc4gGivedzFXo-dwN8fxjjq_ZZ9JFOj2ijIbj5FyogDCN0drOfi8RORSEuCEmPvjFRMFAwcmwFkN2NPp89amA", # The grant token. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.create_grant({ + # key_id: "KeyIdType", # required + # grantee_principal: "PrincipalIdType", # required + # retiring_principal: "PrincipalIdType", + # operations: ["Decrypt"], # required, accepts Decrypt, Encrypt, GenerateDataKey, GenerateDataKeyWithoutPlaintext, ReEncryptFrom, ReEncryptTo, Sign, Verify, GetPublicKey, CreateGrant, RetireGrant, DescribeKey, GenerateDataKeyPair, GenerateDataKeyPairWithoutPlaintext, GenerateMac, VerifyMac + # constraints: { + # encryption_context_subset: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # encryption_context_equals: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # }, + # grant_tokens: ["GrantTokenType"], + # name: "GrantNameType", + # }) + # + # @example Response structure + # + # resp.grant_token #=> String + # resp.grant_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant AWS API Documentation + # + # @overload create_grant(params = {}) + # @param [Hash] params ({}) + def create_grant(params = {}, options = {}) + req = build_request(:create_grant, params) + req.send_request(options) + end + + # Creates a unique customer managed [KMS key][1] in your Amazon Web + # Services account and Region. You can use a KMS key in cryptographic + # operations, such as encryption and signing. Some Amazon Web Services + # services let you use KMS keys that you create and manage to protect + # your service resources. + # + # A KMS key is a logical representation of a cryptographic key. In + # addition to the key material used in cryptographic operations, a KMS + # key includes metadata, such as the key ID, key policy, creation date, + # description, and key state. For details, see [Managing keys][2] in the + # *Key Management Service Developer Guide* + # + # Use the parameters of `CreateKey` to specify the type of KMS key, the + # source of its key material, its key policy, description, tags, and + # other properties. + # + # KMS has replaced the term *customer master key (CMK)* with *KMS key* + # and *KMS key*. The concept has not changed. To prevent breaking + # changes, KMS is keeping some variations of this term. + # + # + # + # To create different types of KMS keys, use the following guidance: + # + # Symmetric encryption KMS key + # + # : By default, `CreateKey` creates a symmetric encryption KMS key with + # key material that KMS generates. This is the basic and most widely + # used type of KMS key, and provides the best performance. + # + # To create a symmetric encryption KMS key, you don't need to specify + # any parameters. The default value for `KeySpec`, + # `SYMMETRIC_DEFAULT`, the default value for `KeyUsage`, + # `ENCRYPT_DECRYPT`, and the default value for `Origin`, `AWS_KMS`, + # create a symmetric encryption KMS key with KMS key material. + # + # If you need a key for basic encryption and decryption or you are + # creating a KMS key to protect your resources in an Amazon Web + # Services service, create a symmetric encryption KMS key. The key + # material in a symmetric encryption key never leaves KMS unencrypted. + # You can use a symmetric encryption KMS key to encrypt and decrypt + # data up to 4,096 bytes, but they are typically used to generate data + # keys and data keys pairs. For details, see GenerateDataKey and + # GenerateDataKeyPair. + # + # + # + # Asymmetric KMS keys + # + # : To create an asymmetric KMS key, use the `KeySpec` parameter to + # specify the type of key material in the KMS key. Then, use the + # `KeyUsage` parameter to determine whether the KMS key will be used + # to encrypt and decrypt or sign and verify. You can't change these + # properties after the KMS key is created. + # + # Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) + # key pair, or an SM2 key pair (China Regions only). The private key + # in an asymmetric KMS key never leaves KMS unencrypted. However, you + # can use the GetPublicKey operation to download the public key so it + # can be used outside of KMS. KMS keys with RSA or SM2 key pairs can + # be used to encrypt or decrypt data or sign and verify messages (but + # not both). KMS keys with ECC key pairs can be used only to sign and + # verify messages. For information about asymmetric KMS keys, see + # [Asymmetric KMS keys][3] in the *Key Management Service Developer + # Guide*. + # + # + # + # HMAC KMS key + # + # : To create an HMAC KMS key, set the `KeySpec` parameter to a key spec + # value for HMAC KMS keys. Then set the `KeyUsage` parameter to + # `GENERATE_VERIFY_MAC`. You must set the key usage even though + # `GENERATE_VERIFY_MAC` is the only valid key usage value for HMAC KMS + # keys. You can't change these properties after the KMS key is + # created. + # + # HMAC KMS keys are symmetric keys that never leave KMS unencrypted. + # You can use HMAC keys to generate (GenerateMac) and verify + # (VerifyMac) HMAC codes for messages up to 4096 bytes. + # + # HMAC KMS keys are not supported in all Amazon Web Services Regions. + # If you try to create an HMAC KMS key in an Amazon Web Services + # Region in which HMAC keys are not supported, the `CreateKey` + # operation returns an `UnsupportedOperationException`. For a list of + # Regions in which HMAC KMS keys are supported, see [HMAC keys in + # KMS][4] in the *Key Management Service Developer Guide*. + # + # + # + # Multi-Region primary keys + # Imported key material + # + # : To create a multi-Region *primary key* in the local Amazon Web + # Services Region, use the `MultiRegion` parameter with a value of + # `True`. To create a multi-Region *replica key*, that is, a KMS key + # with the same key ID and key material as a primary key, but in a + # different Amazon Web Services Region, use the ReplicateKey + # operation. To change a replica key to a primary key, and its primary + # key to a replica key, use the UpdatePrimaryRegion operation. + # + # You can create multi-Region KMS keys for all supported KMS key + # types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric + # encryption KMS keys, and asymmetric signing KMS keys. You can also + # create multi-Region keys with imported key material. However, you + # can't create multi-Region keys in a custom key store. + # + # This operation supports *multi-Region keys*, an KMS feature that + # lets you create multiple interoperable KMS keys in different Amazon + # Web Services Regions. Because these KMS keys have the same key ID, + # key material, and other metadata, you can use them interchangeably + # to encrypt data in one Amazon Web Services Region and decrypt it in + # a different Amazon Web Services Region without re-encrypting the + # data or making a cross-Region call. For more information about + # multi-Region keys, see [Multi-Region keys in KMS][5] in the *Key + # Management Service Developer Guide*. + # + # + # + # : To import your own key material into a KMS key, begin by creating a + # symmetric encryption KMS key with no key material. To do this, use + # the `Origin` parameter of `CreateKey` with a value of `EXTERNAL`. + # Next, use GetParametersForImport operation to get a public key and + # import token, and use the public key to encrypt your key material. + # Then, use ImportKeyMaterial with your import token to import the key + # material. For step-by-step instructions, see [Importing Key + # Material][6] in the Key Management Service Developer + # Guide . + # + # This feature supports only symmetric encryption KMS keys, including + # multi-Region symmetric encryption KMS keys. You cannot import key + # material into any other type of KMS key. + # + # To create a multi-Region primary key with imported key material, use + # the `Origin` parameter of `CreateKey` with a value of `EXTERNAL` and + # the `MultiRegion` parameter with a value of `True`. To create + # replicas of the multi-Region primary key, use the ReplicateKey + # operation. For instructions, see [Importing key material into + # multi-Region keys][7]. For more information about multi-Region keys, + # see [Multi-Region keys in KMS][5] in the *Key Management Service + # Developer Guide*. + # + # + # + # Custom key store + # + # : A [custom key store][8] lets you protect your Amazon Web Services + # resources using keys in a backing key store that you own and manage. + # When you request a cryptographic operation with a KMS key in a + # custom key store, the operation is performed in the backing key + # store using its cryptographic keys. + # + # KMS supports [CloudHSM key stores][9] backed by an CloudHSM cluster + # and [external key stores][10] backed by an external key manager + # outside of Amazon Web Services. When you create a KMS key in an + # CloudHSM key store, KMS generates an encryption key in the CloudHSM + # cluster and associates it with the KMS key. When you create a KMS + # key in an external key store, you specify an existing encryption key + # in the external key manager. + # + # Some external key managers provide a simpler method for creating a + # KMS key in an external key store. For details, see your external key + # manager documentation. + # + # + # + # Before you create a KMS key in a custom key store, the + # `ConnectionState` of the key store must be `CONNECTED`. To connect + # the custom key store, use the ConnectCustomKeyStore operation. To + # find the `ConnectionState`, use the DescribeCustomKeyStores + # operation. + # + # To create a KMS key in a custom key store, use the + # `CustomKeyStoreId`. Use the default `KeySpec` value, + # `SYMMETRIC_DEFAULT`, and the default `KeyUsage` value, + # `ENCRYPT_DECRYPT` to create a symmetric encryption key. No other key + # type is supported in a custom key store. + # + # To create a KMS key in an [CloudHSM key store][9], use the `Origin` + # parameter with a value of `AWS_CLOUDHSM`. The CloudHSM cluster that + # is associated with the custom key store must have at least two + # active HSMs in different Availability Zones in the Amazon Web + # Services Region. + # + # To create a KMS key in an [external key store][10], use the `Origin` + # parameter with a value of `EXTERNAL_KEY_STORE` and an `XksKeyId` + # parameter that identifies an existing external key. + # + # Some external key managers provide a simpler method for creating a + # KMS key in an external key store. For details, see your external key + # manager documentation. + # + # + # + # **Cross-account use**\: No. You cannot use this operation to create a + # KMS key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:CreateKey][11] (IAM policy). To use + # the `Tags` parameter, [kms:TagResource][11] (IAM policy). For examples + # and information about related permissions, see [Allow a user to create + # KMS keys][12] in the *Key Management Service Developer Guide*. + # + # **Related operations:** + # + # * DescribeKey + # + # * ListKeys + # + # * ScheduleKeyDeletion + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [12]: https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key + # + # @option params [String] :policy + # The key policy to attach to the KMS key. + # + # If you provide a key policy, it must meet the following criteria: + # + # * The key policy must allow the calling principal to make a subsequent + # `PutKeyPolicy` request on the KMS key. This reduces the risk that + # the KMS key becomes unmanageable. For more information, see [Default + # key policy][1] in the *Key Management Service Developer Guide*. (To + # omit this condition, set `BypassPolicyLockoutSafetyCheck` to true.) + # + # * Each statement in the key policy must contain one or more + # principals. The principals in the key policy must exist and be + # visible to KMS. When you create a new Amazon Web Services principal, + # you might need to enforce a delay before including the new principal + # in a key policy because the new principal might not be immediately + # visible to KMS. For more information, see [Changes that I make are + # not always immediately visible][2] in the *Amazon Web Services + # Identity and Access Management User Guide*. + # + # If you do not provide a key policy, KMS attaches a default key policy + # to the KMS key. For more information, see [Default key policy][3] in + # the *Key Management Service Developer Guide*. + # + # The key policy size quota is 32 kilobytes (32768 bytes). + # + # For help writing and formatting a JSON policy document, see the [IAM + # JSON Policy Reference][4] in the Identity and Access Management + # User Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # + # @option params [String] :description + # A description of the KMS key. + # + # Use a description that helps you decide whether the KMS key is + # appropriate for a task. The default value is an empty string (no + # description). + # + # To set or change the description after the key is created, use + # UpdateKeyDescription. + # + # @option params [String] :key_usage + # Determines the [cryptographic operations][1] for which you can use the + # KMS key. The default value is `ENCRYPT_DECRYPT`. This parameter is + # optional when you are creating a symmetric encryption KMS key; + # otherwise, it is required. You can't change the `KeyUsage` value + # after the KMS key is created. + # + # Select only one valid value. + # + # * For symmetric encryption KMS keys, omit the parameter or specify + # `ENCRYPT_DECRYPT`. + # + # * For HMAC KMS keys (symmetric), specify `GENERATE_VERIFY_MAC`. + # + # * For asymmetric KMS keys with RSA key material, specify + # `ENCRYPT_DECRYPT` or `SIGN_VERIFY`. + # + # * For asymmetric KMS keys with ECC key material, specify + # `SIGN_VERIFY`. + # + # * For asymmetric KMS keys with SM2 key material (China Regions only), + # specify `ENCRYPT_DECRYPT` or `SIGN_VERIFY`. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # + # @option params [String] :customer_master_key_spec + # Instead, use the `KeySpec` parameter. + # + # The `KeySpec` and `CustomerMasterKeySpec` parameters work the same + # way. Only the names differ. We recommend that you use `KeySpec` + # parameter in your code. However, to avoid breaking changes, KMS + # supports both parameters. + # + # @option params [String] :key_spec + # Specifies the type of KMS key to create. The default value, + # `SYMMETRIC_DEFAULT`, creates a KMS key with a 256-bit AES-GCM key that + # is used for encryption and decryption, except in China Regions, where + # it creates a 128-bit symmetric key that uses SM4 encryption. For help + # choosing a key spec for your KMS key, see [Choosing a KMS key type][1] + # in the Key Management Service Developer Guide . + # + # The `KeySpec` determines whether the KMS key contains a symmetric key + # or an asymmetric key pair. It also determines the algorithms that the + # KMS key supports. You can't change the `KeySpec` after the KMS key is + # created. To further restrict the algorithms that can be used with the + # KMS key, use a condition key in its key policy or IAM policy. For more + # information, see [kms:EncryptionAlgorithm][2], [kms:MacAlgorithm][3] + # or [kms:Signing Algorithm][4] in the Key Management Service + # Developer Guide . + # + # [Amazon Web Services services that are integrated with KMS][5] use + # symmetric encryption KMS keys to protect your data. These services do + # not support asymmetric KMS keys or HMAC KMS keys. + # + # KMS supports the following key specs for KMS keys: + # + # * Symmetric encryption key (default) + # + # * `SYMMETRIC_DEFAULT` + # + # ^ + # + # * HMAC keys (symmetric) + # + # * `HMAC_224` + # + # * `HMAC_256` + # + # * `HMAC_384` + # + # * `HMAC_512` + # + # * Asymmetric RSA key pairs + # + # * `RSA_2048` + # + # * `RSA_3072` + # + # * `RSA_4096` + # + # * Asymmetric NIST-recommended elliptic curve key pairs + # + # * `ECC_NIST_P256` (secp256r1) + # + # * `ECC_NIST_P384` (secp384r1) + # + # * `ECC_NIST_P521` (secp521r1) + # + # * Other asymmetric elliptic curve key pairs + # + # * `ECC_SECG_P256K1` (secp256k1), commonly used for cryptocurrencies. + # + # ^ + # + # * SM2 key pairs (China Regions only) + # + # * `SM2` + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm + # [5]: http://aws.amazon.com/kms/features/#AWS_Service_Integration + # + # @option params [String] :origin + # The source of the key material for the KMS key. You cannot change the + # origin after you create the KMS key. The default is `AWS_KMS`, which + # means that KMS creates the key material. + # + # To [create a KMS key with no key material][1] (for imported key + # material), set this value to `EXTERNAL`. For more information about + # importing key material into KMS, see [Importing Key Material][2] in + # the *Key Management Service Developer Guide*. The `EXTERNAL` origin + # value is valid only for symmetric KMS keys. + # + # To [create a KMS key in an CloudHSM key store][3] and create its key + # material in the associated CloudHSM cluster, set this value to + # `AWS_CLOUDHSM`. You must also use the `CustomKeyStoreId` parameter to + # identify the CloudHSM key store. The `KeySpec` value must be + # `SYMMETRIC_DEFAULT`. + # + # To [create a KMS key in an external key store][4], set this value to + # `EXTERNAL_KEY_STORE`. You must also use the `CustomKeyStoreId` + # parameter to identify the external key store and the `XksKeyId` + # parameter to identify the associated external key. The `KeySpec` value + # must be `SYMMETRIC_DEFAULT`. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html + # + # @option params [String] :custom_key_store_id + # Creates the KMS key in the specified [custom key store][1]. The + # `ConnectionState` of the custom key store must be `CONNECTED`. To find + # the CustomKeyStoreID and ConnectionState use the + # DescribeCustomKeyStores operation. + # + # This parameter is valid only for symmetric encryption KMS keys in a + # single Region. You cannot create any other type of KMS key in a custom + # key store. + # + # When you create a KMS key in an CloudHSM key store, KMS generates a + # non-exportable 256-bit symmetric key in its associated CloudHSM + # cluster and associates it with the KMS key. When you create a KMS key + # in an external key store, you must use the `XksKeyId` parameter to + # specify an external key that serves as key material for the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # + # @option params [Boolean] :bypass_policy_lockout_safety_check + # Skips ("bypasses") the key policy lockout safety check. The default + # value is false. + # + # Setting this value to true increases the risk that the KMS key becomes + # unmanageable. Do not set this value to true indiscriminately. + # + # For more information, see [Default key policy][1] in the *Key + # Management Service Developer Guide*. + # + # Use this parameter only when you intend to prevent the principal that + # is making the request from making a subsequent PutKeyPolicy request on + # the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # + # @option params [Array] :tags + # Assigns one or more tags to the KMS key. Use this parameter to tag the + # KMS key when it is created. To tag an existing KMS key, use the + # TagResource operation. + # + # Tagging or untagging a KMS key can allow or deny permission to the KMS + # key. For details, see [ABAC for KMS][1] in the *Key Management Service + # Developer Guide*. + # + # + # + # To use this parameter, you must have [kms:TagResource][2] permission + # in an IAM policy. + # + # Each tag consists of a tag key and a tag value. Both the tag key and + # the tag value are required, but the tag value can be an empty (null) + # string. You cannot have more than one tag on a KMS key with the same + # tag key. If you specify an existing tag key with a different tag + # value, KMS replaces the current tag value with the specified one. + # + # When you add tags to an Amazon Web Services resource, Amazon Web + # Services generates a cost allocation report with usage and costs + # aggregated by tags. Tags can also be used to control access to a KMS + # key. For details, see [Tagging Keys][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # + # @option params [Boolean] :multi_region + # Creates a multi-Region primary key that you can replicate into other + # Amazon Web Services Regions. You cannot change this value after you + # create the KMS key. + # + # For a multi-Region key, set this parameter to `True`. For a + # single-Region KMS key, omit this parameter or set it to `False`. The + # default value is `False`. + # + # This operation supports *multi-Region keys*, an KMS feature that lets + # you create multiple interoperable KMS keys in different Amazon Web + # Services Regions. Because these KMS keys have the same key ID, key + # material, and other metadata, you can use them interchangeably to + # encrypt data in one Amazon Web Services Region and decrypt it in a + # different Amazon Web Services Region without re-encrypting the data or + # making a cross-Region call. For more information about multi-Region + # keys, see [Multi-Region keys in KMS][1] in the *Key Management Service + # Developer Guide*. + # + # This value creates a *primary key*, not a replica. To create a + # *replica key*, use the ReplicateKey operation. + # + # You can create a symmetric or asymmetric multi-Region key, and you can + # create a multi-Region key with imported key material. However, you + # cannot create a multi-Region key in a custom key store. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # + # @option params [String] :xks_key_id + # Identifies the [external key][1] that serves as key material for the + # KMS key in an [external key store][2]. Specify the ID that the + # [external key store proxy][3] uses to refer to the external key. For + # help, see the documentation for your external key store proxy. + # + # This parameter is required for a KMS key with an `Origin` value of + # `EXTERNAL_KEY_STORE`. It is not valid for KMS keys with any other + # `Origin` value. + # + # The external key must be an existing 256-bit AES symmetric encryption + # key hosted outside of Amazon Web Services in an external key manager + # associated with the external key store specified by the + # `CustomKeyStoreId` parameter. This key must be enabled and configured + # to perform encryption and decryption. Each KMS key in an external key + # store must use a different external key. For details, see + # [Requirements for a KMS key in an external key store][4] in the *Key + # Management Service Developer Guide*. + # + # Each KMS key in an external key store is associated two backing keys. + # One is key material that KMS generates. The other is the external key + # specified by this parameter. When you use the KMS key in an external + # key store to encrypt data, the encryption operation is performed first + # by KMS using the KMS key material, and then by the external key + # manager using the specified external key, a process known as *double + # encryption*. For details, see [Double encryption][5] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy + # [4]: https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption + # + # @return [Types::CreateKeyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateKeyResponse#key_metadata #key_metadata} => Types::KeyMetadata + # + # + # @example Example: To create a KMS key + # + # # The following example creates a symmetric KMS key for encryption and decryption. No parameters are required for this + # # operation. + # + # resp = client.create_key({ + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse("2017-07-05T14:04:55-07:00"), + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "AWS_KMS", + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create an asymmetric RSA KMS key for encryption and decryption + # + # # This example creates a KMS key that contains an asymmetric RSA key pair for encryption and decryption. The key spec and + # # key usage can't be changed after the key is created. + # + # resp = client.create_key({ + # key_spec: "RSA_4096", # Describes the type of key material in the KMS key. + # key_usage: "ENCRYPT_DECRYPT", # The cryptographic operations for which you can use the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse("2021-04-05T14:04:55-07:00"), + # customer_master_key_spec: "RSA_4096", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "RSAES_OAEP_SHA_1", + # "RSAES_OAEP_SHA_256", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "RSA_4096", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "AWS_KMS", + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create an asymmetric elliptic curve KMS key for signing and verification + # + # # This example creates a KMS key that contains an asymmetric elliptic curve (ECC) key pair for signing and verification. + # # The key usage is required even though "SIGN_VERIFY" is the only valid value for ECC KMS keys. The key spec and key usage + # # can't be changed after the key is created. + # + # resp = client.create_key({ + # key_spec: "ECC_NIST_P521", # Describes the type of key material in the KMS key. + # key_usage: "SIGN_VERIFY", # The cryptographic operations for which you can use the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse("2019-12-02T07:48:55-07:00"), + # customer_master_key_spec: "ECC_NIST_P521", + # description: "", + # enabled: true, + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "ECC_NIST_P521", + # key_state: "Enabled", + # key_usage: "SIGN_VERIFY", + # multi_region: false, + # origin: "AWS_KMS", + # signing_algorithms: [ + # "ECDSA_SHA_512", + # ], + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create an HMAC KMS key + # + # # This example creates a 384-bit symmetric HMAC KMS key. The GENERATE_VERIFY_MAC key usage value is required even though + # # it's the only valid value for HMAC KMS keys. The key spec and key usage can't be changed after the key is created. + # + # resp = client.create_key({ + # key_spec: "HMAC_384", # Describes the type of key material in the KMS key. + # key_usage: "GENERATE_VERIFY_MAC", # The cryptographic operations for which you can use the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse("2022-04-05T14:04:55-07:00"), + # customer_master_key_spec: "HMAC_384", + # description: "", + # enabled: true, + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "HMAC_384", + # key_state: "Enabled", + # key_usage: "GENERATE_VERIFY_MAC", + # mac_algorithms: [ + # "HMAC_SHA_384", + # ], + # multi_region: false, + # origin: "AWS_KMS", + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create a multi-Region primary KMS key + # + # # This example creates a multi-Region primary symmetric encryption key. Because the default values for all parameters + # # create a symmetric encryption key, only the MultiRegion parameter is required for this KMS key. + # + # resp = client.create_key({ + # multi_region: true, # Indicates whether the KMS key is a multi-Region (True) or regional (False) key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-west-2:111122223333:key/mrk-1234abcd12ab34cd56ef12345678990ab", + # creation_date: Time.parse("2021-09-02T016:15:21-09:00"), + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "mrk-1234abcd12ab34cd56ef12345678990ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: true, + # multi_region_configuration: { + # multi_region_key_type: "PRIMARY", + # primary_key: { + # arn: "arn:aws:kms:us-west-2:111122223333:key/mrk-1234abcd12ab34cd56ef12345678990ab", + # region: "us-west-2", + # }, + # replica_keys: [ + # ], + # }, + # origin: "AWS_KMS", + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create a KMS key for imported key material + # + # # This example creates a KMS key with no key material. When the operation is complete, you can import your own key + # # material into the KMS key. To create this KMS key, set the Origin parameter to EXTERNAL. + # + # resp = client.create_key({ + # origin: "EXTERNAL", # The source of the key material for the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse("2019-12-02T07:48:55-07:00"), + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: false, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "PendingImport", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "EXTERNAL", + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create a KMS key in an AWS CloudHSM key store + # + # # This example creates a KMS key in the specified AWS CloudHSM key store. The operation creates the KMS key and its + # # metadata in AWS KMS and creates the key material in the AWS CloudHSM cluster associated with the custom key store. This + # # example requires the CustomKeyStoreId and Origin parameters. + # + # resp = client.create_key({ + # custom_key_store_id: "cks-1234567890abcdef0", # Identifies the custom key store that hosts the KMS key. + # origin: "AWS_CLOUDHSM", # Indicates the source of the key material for the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # cloud_hsm_cluster_id: "cluster-1a23b4cdefg", + # creation_date: Time.parse("2019-12-02T07:48:55-07:00"), + # custom_key_store_id: "cks-1234567890abcdef0", + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "AWS_CLOUDHSM", + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Example: To create a KMS key in an external key store + # + # # This example creates a KMS key in the specified external key store. It uses the XksKeyId parameter to associate the KMS + # # key with an existing symmetric encryption key in your external key manager. This CustomKeyStoreId, Origin, and XksKeyId + # # parameters are required in this operation. + # + # resp = client.create_key({ + # custom_key_store_id: "cks-9876543210fedcba9", # Identifies the custom key store that hosts the KMS key. + # origin: "EXTERNAL_KEY_STORE", # Indicates the source of the key material for the KMS key. + # xks_key_id: "bb8562717f809024", # Identifies the encryption key in your external key manager that is associated with the KMS key + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", + # creation_date: Time.parse("2022-02-02T07:48:55-07:00"), + # custom_key_store_id: "cks-9876543210fedcba9", + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "0987dcba-09fe-87dc-65ba-ab0987654321", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "EXTERNAL_KEY_STORE", + # xks_key_configuration: { + # id: "bb8562717f809024", + # }, + # }, # Detailed information about the KMS key that this operation creates. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.create_key({ + # policy: "PolicyType", + # description: "DescriptionType", + # key_usage: "SIGN_VERIFY", # accepts SIGN_VERIFY, ENCRYPT_DECRYPT, GENERATE_VERIFY_MAC + # customer_master_key_spec: "RSA_2048", # accepts RSA_2048, RSA_3072, RSA_4096, ECC_NIST_P256, ECC_NIST_P384, ECC_NIST_P521, ECC_SECG_P256K1, SYMMETRIC_DEFAULT, HMAC_224, HMAC_256, HMAC_384, HMAC_512, SM2 + # key_spec: "RSA_2048", # accepts RSA_2048, RSA_3072, RSA_4096, ECC_NIST_P256, ECC_NIST_P384, ECC_NIST_P521, ECC_SECG_P256K1, SYMMETRIC_DEFAULT, HMAC_224, HMAC_256, HMAC_384, HMAC_512, SM2 + # origin: "AWS_KMS", # accepts AWS_KMS, EXTERNAL, AWS_CLOUDHSM, EXTERNAL_KEY_STORE + # custom_key_store_id: "CustomKeyStoreIdType", + # bypass_policy_lockout_safety_check: false, + # tags: [ + # { + # tag_key: "TagKeyType", # required + # tag_value: "TagValueType", # required + # }, + # ], + # multi_region: false, + # xks_key_id: "XksKeyIdType", + # }) + # + # @example Response structure + # + # resp.key_metadata.aws_account_id #=> String + # resp.key_metadata.key_id #=> String + # resp.key_metadata.arn #=> String + # resp.key_metadata.creation_date #=> Time + # resp.key_metadata.enabled #=> Boolean + # resp.key_metadata.description #=> String + # resp.key_metadata.key_usage #=> String, one of "SIGN_VERIFY", "ENCRYPT_DECRYPT", "GENERATE_VERIFY_MAC" + # resp.key_metadata.key_state #=> String, one of "Creating", "Enabled", "Disabled", "PendingDeletion", "PendingImport", "PendingReplicaDeletion", "Unavailable", "Updating" + # resp.key_metadata.deletion_date #=> Time + # resp.key_metadata.valid_to #=> Time + # resp.key_metadata.origin #=> String, one of "AWS_KMS", "EXTERNAL", "AWS_CLOUDHSM", "EXTERNAL_KEY_STORE" + # resp.key_metadata.custom_key_store_id #=> String + # resp.key_metadata.cloud_hsm_cluster_id #=> String + # resp.key_metadata.expiration_model #=> String, one of "KEY_MATERIAL_EXPIRES", "KEY_MATERIAL_DOES_NOT_EXPIRE" + # resp.key_metadata.key_manager #=> String, one of "AWS", "CUSTOMER" + # resp.key_metadata.customer_master_key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.key_metadata.key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.key_metadata.encryption_algorithms #=> Array + # resp.key_metadata.encryption_algorithms[0] #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # resp.key_metadata.signing_algorithms #=> Array + # resp.key_metadata.signing_algorithms[0] #=> String, one of "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", "RSASSA_PSS_SHA_512", "RSASSA_PKCS1_V1_5_SHA_256", "RSASSA_PKCS1_V1_5_SHA_384", "RSASSA_PKCS1_V1_5_SHA_512", "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", "SM2DSA" + # resp.key_metadata.multi_region #=> Boolean + # resp.key_metadata.multi_region_configuration.multi_region_key_type #=> String, one of "PRIMARY", "REPLICA" + # resp.key_metadata.multi_region_configuration.primary_key.arn #=> String + # resp.key_metadata.multi_region_configuration.primary_key.region #=> String + # resp.key_metadata.multi_region_configuration.replica_keys #=> Array + # resp.key_metadata.multi_region_configuration.replica_keys[0].arn #=> String + # resp.key_metadata.multi_region_configuration.replica_keys[0].region #=> String + # resp.key_metadata.pending_deletion_window_in_days #=> Integer + # resp.key_metadata.mac_algorithms #=> Array + # resp.key_metadata.mac_algorithms[0] #=> String, one of "HMAC_SHA_224", "HMAC_SHA_256", "HMAC_SHA_384", "HMAC_SHA_512" + # resp.key_metadata.xks_key_configuration.id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey AWS API Documentation + # + # @overload create_key(params = {}) + # @param [Hash] params ({}) + def create_key(params = {}, options = {}) + req = build_request(:create_key, params) + req.send_request(options) + end + + # Decrypts ciphertext that was encrypted by a KMS key using any of the + # following operations: + # + # * Encrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPair + # + # * GenerateDataKeyWithoutPlaintext + # + # * GenerateDataKeyPairWithoutPlaintext + # + # You can use this operation to decrypt ciphertext that was encrypted + # under a symmetric encryption KMS key or an asymmetric encryption KMS + # key. When the KMS key is asymmetric, you must specify the KMS key and + # the encryption algorithm that was used to encrypt the ciphertext. For + # information about asymmetric KMS keys, see [Asymmetric KMS keys][1] in + # the *Key Management Service Developer Guide*. + # + # The `Decrypt` operation also decrypts ciphertext that was encrypted + # outside of KMS by the public key in an KMS asymmetric KMS key. + # However, it cannot decrypt symmetric ciphertext produced by other + # libraries, such as the [Amazon Web Services Encryption SDK][2] or + # [Amazon S3 client-side encryption][3]. These libraries return a + # ciphertext format that is incompatible with KMS. + # + # If the ciphertext was encrypted under a symmetric encryption KMS key, + # the `KeyId` parameter is optional. KMS can get this information from + # metadata that it adds to the symmetric ciphertext blob. This feature + # adds durability to your implementation by ensuring that authorized + # users can decrypt ciphertext decades after it was encrypted, even if + # they've lost track of the key ID. However, specifying the KMS key is + # always recommended as a best practice. When you use the `KeyId` + # parameter to specify a KMS key, KMS only uses the KMS key you specify. + # If the ciphertext was encrypted under a different KMS key, the + # `Decrypt` operation fails. This practice ensures that you use the KMS + # key that you intend. + # + # Whenever possible, use key policies to give users permission to call + # the `Decrypt` operation on a particular KMS key, instead of using + # &IAM; policies. Otherwise, you might create an &IAM; policy + # that gives the user `Decrypt` permission on all KMS keys. This user + # could decrypt ciphertext that was encrypted by KMS keys in other + # accounts if the key policy for the cross-account KMS key permits it. + # If you must use an IAM policy for `Decrypt` permissions, limit the + # user to particular KMS keys or particular trusted accounts. For + # details, see [Best practices for IAM policies][4] in the *Key + # Management Service Developer Guide*. + # + # Applications in Amazon Web Services Nitro Enclaves can call this + # operation by using the [Amazon Web Services Nitro Enclaves Development + # Kit][5]. For information about the supporting parameters, see [How + # Amazon Web Services Nitro Enclaves use KMS][6] in the *Key Management + # Service Developer Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][7] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. If you use the `KeyId` parameter to + # identify a KMS key in a different Amazon Web Services account, specify + # the key ARN or the alias ARN of the KMS key. + # + # **Required permissions**\: [kms:Decrypt][8] (key policy) + # + # **Related operations:** + # + # * Encrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPair + # + # * ReEncrypt + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [2]: https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/ + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices + # [5]: https://github.com/aws/aws-nitro-enclaves-sdk-c + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String, StringIO, File] :ciphertext_blob + # Ciphertext to be decrypted. The blob includes metadata. + # + # @option params [Hash] :encryption_context + # Specifies the encryption context to use when decrypting the data. An + # encryption context is valid only for [cryptographic operations][1] + # with a symmetric encryption KMS key. The standard asymmetric + # encryption algorithms and HMAC algorithms that KMS uses do not support + # an encryption context. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][2] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @option params [String] :key_id + # Specifies the KMS key that KMS uses to decrypt the ciphertext. + # + # Enter a key ID of the KMS key that was used to encrypt the ciphertext. + # If you identify a different KMS key, the `Decrypt` operation throws an + # `IncorrectKeyException`. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. If you used a symmetric encryption KMS + # key, KMS can get the KMS key from metadata that it adds to the + # symmetric ciphertext blob. However, it is always recommended as a best + # practice. This practice ensures that you use the KMS key that you + # intend. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [String] :encryption_algorithm + # Specifies the encryption algorithm that will be used to decrypt the + # ciphertext. Specify the same algorithm that was used to encrypt the + # data. If you specify a different algorithm, the `Decrypt` operation + # fails. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. The default value, `SYMMETRIC_DEFAULT`, + # represents the only supported algorithm that is valid for symmetric + # encryption KMS keys. + # + # @return [Types::DecryptResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DecryptResponse#key_id #key_id} => String + # * {Types::DecryptResponse#plaintext #plaintext} => String + # * {Types::DecryptResponse#encryption_algorithm #encryption_algorithm} => String + # + # + # @example Example: To decrypt data + # + # # The following example decrypts data that was encrypted with a KMS key. + # + # resp = client.decrypt({ + # ciphertext_blob: "", # The encrypted data (ciphertext). + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # A key identifier for the KMS key to use to decrypt the data. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The Amazon Resource Name (ARN) of the KMS key that was used to decrypt the data. + # plaintext: "", # The decrypted (plaintext) data. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.decrypt({ + # ciphertext_blob: "data", # required + # encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # grant_tokens: ["GrantTokenType"], + # key_id: "KeyIdType", + # encryption_algorithm: "SYMMETRIC_DEFAULT", # accepts SYMMETRIC_DEFAULT, RSAES_OAEP_SHA_1, RSAES_OAEP_SHA_256, SM2PKE + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.plaintext #=> String + # resp.encryption_algorithm #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt AWS API Documentation + # + # @overload decrypt(params = {}) + # @param [Hash] params ({}) + def decrypt(params = {}, options = {}) + req = build_request(:decrypt, params) + req.send_request(options) + end + + # Deletes the specified alias. + # + # Adding, deleting, or updating an alias can allow or deny permission to + # the KMS key. For details, see [ABAC for KMS][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # Because an alias is not a property of a KMS key, you can delete and + # change the aliases of a KMS key without affecting the KMS key. Also, + # aliases do not appear in the response from the DescribeKey operation. + # To get the aliases of all KMS keys, use the ListAliases operation. + # + # Each KMS key can have multiple aliases. To change the alias of a KMS + # key, use DeleteAlias to delete the current alias and CreateAlias to + # create a new alias. To associate an existing alias with a different + # KMS key, call UpdateAlias. + # + # **Cross-account use**\: No. You cannot perform this operation on an + # alias in a different Amazon Web Services account. + # + # **Required permissions** + # + # * [kms:DeleteAlias][2] on the alias (IAM policy). + # + # * [kms:DeleteAlias][2] on the KMS key (key policy). + # + # For details, see [Controlling access to aliases][3] in the *Key + # Management Service Developer Guide*. + # + # **Related operations:** + # + # * CreateAlias + # + # * ListAliases + # + # * UpdateAlias + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access + # + # @option params [required, String] :alias_name + # The alias to be deleted. The alias name must begin with `alias/` + # followed by the alias name, such as `alias/ExampleAlias`. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete an alias + # + # # The following example deletes the specified alias. + # + # resp = client.delete_alias({ + # alias_name: "alias/ExampleAlias", # The alias to delete. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_alias({ + # alias_name: "AliasNameType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias AWS API Documentation + # + # @overload delete_alias(params = {}) + # @param [Hash] params ({}) + def delete_alias(params = {}, options = {}) + req = build_request(:delete_alias, params) + req.send_request(options) + end + + # Deletes a [custom key store][1]. This operation does not affect any + # backing elements of the custom key store. It does not delete the + # CloudHSM cluster that is associated with an CloudHSM key store, or + # affect any users or keys in the cluster. For an external key store, it + # does not affect the external key store proxy, external key manager, or + # any external keys. + # + # This operation is part of the [custom key stores][1] feature in KMS, + # which combines the convenience and extensive integration of KMS with + # the isolation and control of a key store that you own and manage. + # + # The custom key store that you delete cannot contain any [KMS keys][2]. + # Before deleting the key store, verify that you will never need to use + # any of the KMS keys in the key store for any [cryptographic + # operations][3]. Then, use ScheduleKeyDeletion to delete the KMS keys + # from the key store. After the required waiting period expires and all + # KMS keys are deleted from the custom key store, use + # DisconnectCustomKeyStore to disconnect the key store from KMS. Then, + # you can delete the custom key store. + # + # For keys in an CloudHSM key store, the `ScheduleKeyDeletion` operation + # makes a best effort to delete the key material from the associated + # cluster. However, you might need to manually [delete the orphaned key + # material][4] from the cluster and its backups. KMS never creates, + # manages, or deletes cryptographic keys in the external key manager + # associated with an external key store. You must manage them using your + # external key manager tools. + # + # Instead of deleting the custom key store, consider using the + # DisconnectCustomKeyStore operation to disconnect the custom key store + # from its backing key store. While the key store is disconnected, you + # cannot create or use the KMS keys in the key store. But, you do not + # need to delete KMS keys and you can reconnect a disconnected custom + # key store at any time. + # + # If the operation succeeds, it returns a JSON object with no + # properties. + # + # **Cross-account use**\: No. You cannot perform this operation on a + # custom key store in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:DeleteCustomKeyStore][5] (IAM policy) + # + # **Related operations:** + # + # * ConnectCustomKeyStore + # + # * CreateCustomKeyStore + # + # * DescribeCustomKeyStores + # + # * DisconnectCustomKeyStore + # + # * UpdateCustomKeyStore + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :custom_key_store_id + # Enter the ID of the custom key store you want to delete. To find the + # ID of a custom key store, use the DescribeCustomKeyStores operation. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete a custom key store from AWS KMS + # + # # This example deletes a custom key store from AWS KMS. This operation does not affect the backing key store, such as a + # # CloudHSM cluster, external key store proxy, or your external key manager. This operation doesn't return any data. To + # # verify that the operation was successful, use the DescribeCustomKeyStores operation. + # + # resp = client.delete_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the custom key store to be deleted. + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.delete_custom_key_store({ + # custom_key_store_id: "CustomKeyStoreIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStore AWS API Documentation + # + # @overload delete_custom_key_store(params = {}) + # @param [Hash] params ({}) + def delete_custom_key_store(params = {}, options = {}) + req = build_request(:delete_custom_key_store, params) + req.send_request(options) + end + + # Deletes key material that you previously imported. This operation + # makes the specified KMS key unusable. For more information about + # importing key material into KMS, see [Importing Key Material][1] in + # the *Key Management Service Developer Guide*. + # + # When the specified KMS key is in the `PendingDeletion` state, this + # operation does not change the KMS key's state. Otherwise, it changes + # the KMS key's state to `PendingImport`. + # + # After you delete key material, you can use ImportKeyMaterial to + # reimport the same key material into the KMS key. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:DeleteImportedKeyMaterial][3] (key + # policy) + # + # **Related operations:** + # + # * GetParametersForImport + # + # * ImportKeyMaterial + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the KMS key from which you are deleting imported key + # material. The `Origin` of the KMS key must be `EXTERNAL`. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete imported key material + # + # # The following example deletes the imported key material from the specified KMS key. + # + # resp = client.delete_imported_key_material({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose imported key material you are deleting. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_imported_key_material({ + # key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial AWS API Documentation + # + # @overload delete_imported_key_material(params = {}) + # @param [Hash] params ({}) + def delete_imported_key_material(params = {}, options = {}) + req = build_request(:delete_imported_key_material, params) + req.send_request(options) + end + + # Gets information about [custom key stores][1] in the account and + # Region. + # + # This operation is part of the [custom key stores][1] feature in KMS, + # which combines the convenience and extensive integration of KMS with + # the isolation and control of a key store that you own and manage. + # + # By default, this operation returns information about all custom key + # stores in the account and Region. To get only information about a + # particular custom key store, use either the `CustomKeyStoreName` or + # `CustomKeyStoreId` parameter (but not both). + # + # To determine whether the custom key store is connected to its CloudHSM + # cluster or external key store proxy, use the `ConnectionState` element + # in the response. If an attempt to connect the custom key store failed, + # the `ConnectionState` value is `FAILED` and the `ConnectionErrorCode` + # element in the response indicates the cause of the failure. For help + # interpreting the `ConnectionErrorCode`, see CustomKeyStoresListEntry. + # + # Custom key stores have a `DISCONNECTED` connection state if the key + # store has never been connected or you used the + # DisconnectCustomKeyStore operation to disconnect it. Otherwise, the + # connection state is CONNECTED. If your custom key store connection + # state is `CONNECTED` but you are having trouble using it, verify that + # the backing store is active and available. For an CloudHSM key store, + # verify that the associated CloudHSM cluster is active and contains the + # minimum number of HSMs required for the operation, if any. For an + # external key store, verify that the external key store proxy and its + # associated external key manager are reachable and enabled. + # + # For help repairing your CloudHSM key store, see the [Troubleshooting + # CloudHSM key stores][2]. For help repairing your external key store, + # see the [Troubleshooting external key stores][3]. Both topics are in + # the *Key Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a + # custom key store in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:DescribeCustomKeyStores][4] (IAM + # policy) + # + # **Related operations:** + # + # * ConnectCustomKeyStore + # + # * CreateCustomKeyStore + # + # * DeleteCustomKeyStore + # + # * DisconnectCustomKeyStore + # + # * UpdateCustomKeyStore + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [String] :custom_key_store_id + # Gets only information about the specified custom key store. Enter the + # key store ID. + # + # By default, this operation gets information about all custom key + # stores in the account and Region. To limit the output to a particular + # custom key store, provide either the `CustomKeyStoreId` or + # `CustomKeyStoreName` parameter, but not both. + # + # @option params [String] :custom_key_store_name + # Gets only information about the specified custom key store. Enter the + # friendly name of the custom key store. + # + # By default, this operation gets information about all custom key + # stores in the account and Region. To limit the output to a particular + # custom key store, provide either the `CustomKeyStoreId` or + # `CustomKeyStoreName` parameter, but not both. + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # @return [Types::DescribeCustomKeyStoresResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DescribeCustomKeyStoresResponse#custom_key_stores #custom_key_stores} => Array<Types::CustomKeyStoresListEntry> + # * {Types::DescribeCustomKeyStoresResponse#next_marker #next_marker} => String + # * {Types::DescribeCustomKeyStoresResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To get detailed information about custom key stores in the account and Region + # + # # This example gets detailed information about all AWS KMS custom key stores in an AWS account and Region. To get all key + # # stores, do not enter a custom key store name or ID. + # + # resp = client.describe_custom_key_stores({ + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_stores: [ + # ], # Details about each custom key store in the account and Region. + # } + # + # @example Example: To get detailed information about an AWS CloudHSM key store by specifying its friendly name + # + # # This example gets detailed information about a particular AWS CloudHSM key store by specifying its friendly name. To + # # limit the output to a particular custom key store, provide either the custom key store name or ID. + # + # resp = client.describe_custom_key_stores({ + # custom_key_store_name: "ExampleKeyStore", # The friendly name of the custom key store. + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_stores: [ + # { + # cloud_hsm_cluster_id: "cluster-1a23b4cdefg", + # connection_state: "CONNECTED", + # creation_date: Time.parse("1.499288695918E9"), + # custom_key_store_id: "cks-1234567890abcdef0", + # custom_key_store_name: "ExampleKeyStore", + # custom_key_store_type: "AWS_CLOUDHSM", + # trust_anchor_certificate: "", + # }, + # ], # Detailed information about the specified custom key store. + # } + # + # @example Example: To get detailed information about an external key store by specifying its ID + # + # # This example gets detailed information about an external key store by specifying its ID. The example external key store + # # proxy uses public endpoint connectivity. + # + # resp = client.describe_custom_key_stores({ + # custom_key_store_id: "cks-9876543210fedcba9", # The ID of the custom key store. + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_stores: [ + # { + # connection_state: "CONNECTED", + # creation_date: Time.parse("1.599288695918E9"), + # custom_key_store_id: "cks-9876543210fedcba9", + # custom_key_store_name: "ExampleExternalKeyStore", + # custom_key_store_type: "EXTERNAL_KEY_STORE", + # xks_proxy_configuration: { + # access_key_id: "ABCDE12345670EXAMPLE", + # connectivity: "PUBLIC_ENDPOINT", + # uri_endpoint: "https://myproxy.xks.example.com", + # uri_path: "/kms/xks/v1", + # }, + # }, + # ], # Detailed information about the specified custom key store. + # } + # + # @example Example: To get detailed information about an external key store VPC endpoint connectivity by specifying its friendly name + # + # # This example gets detailed information about a particular external key store by specifying its friendly name. To limit + # # the output to a particular custom key store, provide either the custom key store name or ID. The proxy URI path for this + # # external key store includes an optional prefix. Also, because this example external key store uses VPC endpoint + # # connectivity, the response includes the associated VPC endpoint service name. + # + # resp = client.describe_custom_key_stores({ + # custom_key_store_name: "VPCExternalKeystore", + # }) + # + # resp.to_h outputs the following: + # { + # custom_key_stores: [ + # { + # connection_state: "CONNECTED", + # creation_date: Time.parse("1.643057863.842"), + # custom_key_store_id: "cks-876543210fedcba98", + # custom_key_store_name: "ExampleVPCExternalKeyStore", + # custom_key_store_type: "EXTERNAL_KEY_STORE", + # xks_proxy_configuration: { + # access_key_id: "ABCDE12345670EXAMPLE", + # connectivity: "VPC_ENDPOINT_SERVICE", + # uri_endpoint: "https://myproxy-private.xks.example.com", + # uri_path: "/example-prefix/kms/xks/v1", + # vpc_endpoint_service_name: "com.amazonaws.vpce.us-east-1.vpce-svc-example1", + # }, + # }, + # ], # Detailed information about the specified custom key store. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.describe_custom_key_stores({ + # custom_key_store_id: "CustomKeyStoreIdType", + # custom_key_store_name: "CustomKeyStoreNameType", + # limit: 1, + # marker: "MarkerType", + # }) + # + # @example Response structure + # + # resp.custom_key_stores #=> Array + # resp.custom_key_stores[0].custom_key_store_id #=> String + # resp.custom_key_stores[0].custom_key_store_name #=> String + # resp.custom_key_stores[0].cloud_hsm_cluster_id #=> String + # resp.custom_key_stores[0].trust_anchor_certificate #=> String + # resp.custom_key_stores[0].connection_state #=> String, one of "CONNECTED", "CONNECTING", "FAILED", "DISCONNECTED", "DISCONNECTING" + # resp.custom_key_stores[0].connection_error_code #=> String, one of "INVALID_CREDENTIALS", "CLUSTER_NOT_FOUND", "NETWORK_ERRORS", "INTERNAL_ERROR", "INSUFFICIENT_CLOUDHSM_HSMS", "USER_LOCKED_OUT", "USER_NOT_FOUND", "USER_LOGGED_IN", "SUBNET_NOT_FOUND", "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET", "XKS_PROXY_ACCESS_DENIED", "XKS_PROXY_NOT_REACHABLE", "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND", "XKS_PROXY_INVALID_RESPONSE", "XKS_PROXY_INVALID_CONFIGURATION", "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION", "XKS_PROXY_TIMED_OUT", "XKS_PROXY_INVALID_TLS_CONFIGURATION" + # resp.custom_key_stores[0].creation_date #=> Time + # resp.custom_key_stores[0].custom_key_store_type #=> String, one of "AWS_CLOUDHSM", "EXTERNAL_KEY_STORE" + # resp.custom_key_stores[0].xks_proxy_configuration.connectivity #=> String, one of "PUBLIC_ENDPOINT", "VPC_ENDPOINT_SERVICE" + # resp.custom_key_stores[0].xks_proxy_configuration.access_key_id #=> String + # resp.custom_key_stores[0].xks_proxy_configuration.uri_endpoint #=> String + # resp.custom_key_stores[0].xks_proxy_configuration.uri_path #=> String + # resp.custom_key_stores[0].xks_proxy_configuration.vpc_endpoint_service_name #=> String + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStores AWS API Documentation + # + # @overload describe_custom_key_stores(params = {}) + # @param [Hash] params ({}) + def describe_custom_key_stores(params = {}, options = {}) + req = build_request(:describe_custom_key_stores, params) + req.send_request(options) + end + + # Provides detailed information about a KMS key. You can run + # `DescribeKey` on a [customer managed key][1] or an [Amazon Web + # Services managed key][2]. + # + # This detailed information includes the key ARN, creation date (and + # deletion date, if applicable), the key state, and the origin and + # expiration date (if any) of the key material. It includes fields, like + # `KeySpec`, that help you distinguish different types of KMS keys. It + # also displays the key usage (encryption, signing, or generating and + # verifying MACs) and the algorithms that the KMS key supports. + # + # For [multi-Region + # keys](kms/latest/developerguide/multi-region-keys-overview.html), + # `DescribeKey` displays the primary key and all related replica keys. + # For KMS keys in [CloudHSM key + # stores](kms/latest/developerguide/keystore-cloudhsm.html), it includes + # information about the key store, such as the key store ID and the + # CloudHSM cluster ID. For KMS keys in [external key + # stores](kms/latest/developerguide/keystore-external.html), it includes + # the custom key store ID and the ID of the external key. + # + # `DescribeKey` does not return the following information: + # + # * Aliases associated with the KMS key. To get this information, use + # ListAliases. + # + # * Whether automatic key rotation is enabled on the KMS key. To get + # this information, use GetKeyRotationStatus. Also, some key states + # prevent a KMS key from being automatically rotated. For details, see + # [How Automatic Key Rotation Works][3] in the *Key Management Service + # Developer Guide*. + # + # * Tags on the KMS key. To get this information, use ListResourceTags. + # + # * Key policies and grants on the KMS key. To get this information, use + # GetKeyPolicy and ListGrants. + # + # In general, `DescribeKey` is a non-mutating operation. It returns data + # about KMS keys, but doesn't change them. However, Amazon Web Services + # services use `DescribeKey` to create [Amazon Web Services managed + # keys][2] from a *predefined Amazon Web Services alias* with no key ID. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:DescribeKey][4] (key policy) + # + # **Related operations:** + # + # * GetKeyPolicy + # + # * GetKeyRotationStatus + # + # * ListAliases + # + # * ListGrants + # + # * ListKeys + # + # * ListResourceTags + # + # * ListRetirableGrants + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Describes the specified KMS key. + # + # If you specify a predefined Amazon Web Services alias (an Amazon Web + # Services alias with no key ID), KMS associates the alias with an + # [Amazon Web Services managed key][1] and returns its `KeyId` and `Arn` + # in the response. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::DescribeKeyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DescribeKeyResponse#key_metadata #key_metadata} => Types::KeyMetadata + # + # + # @example Example: To get details about a KMS key + # + # # The following example gets metadata for a symmetric encryption KMS key. + # + # resp = client.describe_key({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # An identifier for the KMS key. You can use the key ID, key ARN, alias name, alias ARN of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse("2017-07-05T14:04:55-07:00"), + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "AWS_KMS", + # }, # An object that contains information about the specified KMS key. + # } + # + # @example Example: To get details about an RSA asymmetric KMS key + # + # # The following example gets metadata for an asymmetric RSA KMS key used for signing and verification. + # + # resp = client.describe_key({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # An identifier for the KMS key. You can use the key ID, key ARN, alias name, alias ARN of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse(1571767572.317), + # customer_master_key_spec: "RSA_2048", + # description: "", + # enabled: false, + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "RSA_2048", + # key_state: "Disabled", + # key_usage: "SIGN_VERIFY", + # multi_region: false, + # origin: "AWS_KMS", + # signing_algorithms: [ + # "RSASSA_PKCS1_V1_5_SHA_256", + # "RSASSA_PKCS1_V1_5_SHA_384", + # "RSASSA_PKCS1_V1_5_SHA_512", + # "RSASSA_PSS_SHA_256", + # "RSASSA_PSS_SHA_384", + # "RSASSA_PSS_SHA_512", + # ], + # }, # An object that contains information about the specified KMS key. + # } + # + # @example Example: To get details about a multi-Region key + # + # # The following example gets metadata for a multi-Region replica key. This multi-Region key is a symmetric encryption key. + # # DescribeKey returns information about the primary key and all of its replicas. + # + # resp = client.describe_key({ + # key_id: "arn:aws:kms:ap-northeast-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", # An identifier for the KMS key. You can use the key ID, key ARN, alias name, alias ARN of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:ap-northeast-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # creation_date: Time.parse(1586329200.918), + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "mrk-1234abcd12ab34cd56ef1234567890ab", + # key_manager: "CUSTOMER", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: true, + # multi_region_configuration: { + # multi_region_key_type: "PRIMARY", + # primary_key: { + # arn: "arn:aws:kms:us-west-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # region: "us-west-2", + # }, + # replica_keys: [ + # { + # arn: "arn:aws:kms:eu-west-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # region: "eu-west-1", + # }, + # { + # arn: "arn:aws:kms:ap-northeast-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # region: "ap-northeast-1", + # }, + # { + # arn: "arn:aws:kms:sa-east-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # region: "sa-east-1", + # }, + # ], + # }, + # origin: "AWS_KMS", + # }, # An object that contains information about the specified KMS key. + # } + # + # @example Example: To get details about an HMAC KMS key + # + # # The following example gets the metadata of an HMAC KMS key. + # + # resp = client.describe_key({ + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # An identifier for the KMS key. You can use the key ID, key ARN, alias name, alias ARN of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "123456789012", + # arn: "arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse(1566160362.664), + # customer_master_key_spec: "HMAC_256", + # description: "Development test key", + # enabled: true, + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_state: "Enabled", + # key_usage: "GENERATE_VERIFY_MAC", + # mac_algorithms: [ + # "HMAC_SHA_256", + # ], + # multi_region: false, + # origin: "AWS_KMS", + # }, # An object that contains information about the specified KMS key. + # } + # + # @example Example: To get details about a KMS key in an AWS CloudHSM key store + # + # # The following example gets the metadata of a KMS key in an AWS CloudHSM key store. + # + # resp = client.describe_key({ + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # An identifier for the KMS key. You can use the key ID, key ARN, alias name, alias ARN of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "123456789012", + # arn: "arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # cloud_hsm_cluster_id: "cluster-1a23b4cdefg", + # creation_date: Time.parse(1646160362.664), + # custom_key_store_id: "cks-1234567890abcdef0", + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "CloudHSM key store test key", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "AWS_CLOUDHSM", + # }, # An object that contains information about the specified KMS key. + # } + # + # @example Example: To get details about a KMS key in an external key store + # + # # The following example gets the metadata of a KMS key in an external key store. + # + # resp = client.describe_key({ + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # An identifier for the KMS key. You can use the key ID, key ARN, alias name, alias ARN of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_metadata: { + # aws_account_id: "123456789012", + # arn: "arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # creation_date: Time.parse(1646160362.664), + # custom_key_store_id: "cks-1234567890abcdef0", + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "External key store test key", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", + # key_manager: "CUSTOMER", + # key_spec: "SYMMETRIC_DEFAULT", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: false, + # origin: "EXTERNAL_KEY_STORE", + # xks_key_configuration: { + # id: "bb8562717f809024", + # }, + # }, # An object that contains information about the specified KMS key. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.describe_key({ + # key_id: "KeyIdType", # required + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.key_metadata.aws_account_id #=> String + # resp.key_metadata.key_id #=> String + # resp.key_metadata.arn #=> String + # resp.key_metadata.creation_date #=> Time + # resp.key_metadata.enabled #=> Boolean + # resp.key_metadata.description #=> String + # resp.key_metadata.key_usage #=> String, one of "SIGN_VERIFY", "ENCRYPT_DECRYPT", "GENERATE_VERIFY_MAC" + # resp.key_metadata.key_state #=> String, one of "Creating", "Enabled", "Disabled", "PendingDeletion", "PendingImport", "PendingReplicaDeletion", "Unavailable", "Updating" + # resp.key_metadata.deletion_date #=> Time + # resp.key_metadata.valid_to #=> Time + # resp.key_metadata.origin #=> String, one of "AWS_KMS", "EXTERNAL", "AWS_CLOUDHSM", "EXTERNAL_KEY_STORE" + # resp.key_metadata.custom_key_store_id #=> String + # resp.key_metadata.cloud_hsm_cluster_id #=> String + # resp.key_metadata.expiration_model #=> String, one of "KEY_MATERIAL_EXPIRES", "KEY_MATERIAL_DOES_NOT_EXPIRE" + # resp.key_metadata.key_manager #=> String, one of "AWS", "CUSTOMER" + # resp.key_metadata.customer_master_key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.key_metadata.key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.key_metadata.encryption_algorithms #=> Array + # resp.key_metadata.encryption_algorithms[0] #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # resp.key_metadata.signing_algorithms #=> Array + # resp.key_metadata.signing_algorithms[0] #=> String, one of "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", "RSASSA_PSS_SHA_512", "RSASSA_PKCS1_V1_5_SHA_256", "RSASSA_PKCS1_V1_5_SHA_384", "RSASSA_PKCS1_V1_5_SHA_512", "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", "SM2DSA" + # resp.key_metadata.multi_region #=> Boolean + # resp.key_metadata.multi_region_configuration.multi_region_key_type #=> String, one of "PRIMARY", "REPLICA" + # resp.key_metadata.multi_region_configuration.primary_key.arn #=> String + # resp.key_metadata.multi_region_configuration.primary_key.region #=> String + # resp.key_metadata.multi_region_configuration.replica_keys #=> Array + # resp.key_metadata.multi_region_configuration.replica_keys[0].arn #=> String + # resp.key_metadata.multi_region_configuration.replica_keys[0].region #=> String + # resp.key_metadata.pending_deletion_window_in_days #=> Integer + # resp.key_metadata.mac_algorithms #=> Array + # resp.key_metadata.mac_algorithms[0] #=> String, one of "HMAC_SHA_224", "HMAC_SHA_256", "HMAC_SHA_384", "HMAC_SHA_512" + # resp.key_metadata.xks_key_configuration.id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKey AWS API Documentation + # + # @overload describe_key(params = {}) + # @param [Hash] params ({}) + def describe_key(params = {}, options = {}) + req = build_request(:describe_key, params) + req.send_request(options) + end + + # Sets the state of a KMS key to disabled. This change temporarily + # prevents use of the KMS key for [cryptographic operations][1]. + # + # For more information about how key state affects the use of a KMS key, + # see [Key states of KMS keys][2] in the Key Management Service + # Developer Guide . + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:DisableKey][3] (key policy) + # + # **Related operations**\: EnableKey + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the KMS key to disable. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To disable a KMS key + # + # # The following example disables the specified KMS key. + # + # resp = client.disable_key({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to disable. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.disable_key({ + # key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey AWS API Documentation + # + # @overload disable_key(params = {}) + # @param [Hash] params ({}) + def disable_key(params = {}, options = {}) + req = build_request(:disable_key, params) + req.send_request(options) + end + + # Disables [automatic rotation of the key material][1] of the specified + # symmetric encryption KMS key. + # + # Automatic key rotation is supported only on symmetric encryption KMS + # keys. You cannot enable automatic rotation of [asymmetric KMS + # keys][2], [HMAC KMS keys][3], KMS keys with [imported key + # material][4], or KMS keys in a [custom key store][5]. To enable or + # disable automatic rotation of a set of related [multi-Region keys][6], + # set the property on the primary key. + # + # You can enable (EnableKeyRotation) and disable automatic rotation of + # the key material in [customer managed KMS keys][7]. Key material + # rotation of [Amazon Web Services managed KMS keys][8] is not + # configurable. KMS always rotates the key material for every year. + # Rotation of [Amazon Web Services owned KMS keys][9] varies. + # + # In May 2022, KMS changed the rotation schedule for Amazon Web Services + # managed keys from every three years to every year. For details, see + # EnableKeyRotation. + # + # + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][10] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:DisableKeyRotation][11] (key policy) + # + # **Related operations:** + # + # * EnableKeyRotation + # + # * GetKeyRotationStatus + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies a symmetric encryption KMS key. You cannot enable or + # disable automatic rotation of [asymmetric KMS keys][1], [HMAC KMS + # keys][2], KMS keys with [imported key material][3], or KMS keys in a + # [custom key store][4]. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To disable automatic rotation of key material + # + # # The following example disables automatic annual rotation of the key material for the specified KMS key. + # + # resp = client.disable_key_rotation({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose key material will no longer be rotated. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.disable_key_rotation({ + # key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotation AWS API Documentation + # + # @overload disable_key_rotation(params = {}) + # @param [Hash] params ({}) + def disable_key_rotation(params = {}, options = {}) + req = build_request(:disable_key_rotation, params) + req.send_request(options) + end + + # Disconnects the [custom key store][1] from its backing key store. This + # operation disconnects an CloudHSM key store from its associated + # CloudHSM cluster or disconnects an external key store from the + # external key store proxy that communicates with your external key + # manager. + # + # This operation is part of the [custom key stores][1] feature in KMS, + # which combines the convenience and extensive integration of KMS with + # the isolation and control of a key store that you own and manage. + # + # While a custom key store is disconnected, you can manage the custom + # key store and its KMS keys, but you cannot create or use its KMS keys. + # You can reconnect the custom key store at any time. + # + # While a custom key store is disconnected, all attempts to create KMS + # keys in the custom key store or to use existing KMS keys in + # [cryptographic operations][2] will fail. This action can prevent users + # from storing and accessing sensitive data. + # + # + # + # When you disconnect a custom key store, its `ConnectionState` changes + # to `Disconnected`. To find the connection state of a custom key store, + # use the DescribeCustomKeyStores operation. To reconnect a custom key + # store, use the ConnectCustomKeyStore operation. + # + # If the operation succeeds, it returns a JSON object with no + # properties. + # + # **Cross-account use**\: No. You cannot perform this operation on a + # custom key store in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:DisconnectCustomKeyStore][3] (IAM + # policy) + # + # **Related operations:** + # + # * ConnectCustomKeyStore + # + # * CreateCustomKeyStore + # + # * DeleteCustomKeyStore + # + # * DescribeCustomKeyStores + # + # * UpdateCustomKeyStore + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :custom_key_store_id + # Enter the ID of the custom key store you want to disconnect. To find + # the ID of a custom key store, use the DescribeCustomKeyStores + # operation. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To disconnect a custom key store from its CloudHSM cluster + # + # # This example disconnects an AWS KMS custom key store from its backing key store. For an AWS CloudHSM key store, it + # # disconnects the key store from its AWS CloudHSM cluster. For an external key store, it disconnects the key store from + # # the external key store proxy that communicates with your external key manager. This operation doesn't return any data. + # # To verify that the custom key store is disconnected, use the DescribeCustomKeyStores operation. + # + # resp = client.disconnect_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the custom key store. + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.disconnect_custom_key_store({ + # custom_key_store_id: "CustomKeyStoreIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStore AWS API Documentation + # + # @overload disconnect_custom_key_store(params = {}) + # @param [Hash] params ({}) + def disconnect_custom_key_store(params = {}, options = {}) + req = build_request(:disconnect_custom_key_store, params) + req.send_request(options) + end + + # Sets the key state of a KMS key to enabled. This allows you to use the + # KMS key for [cryptographic operations][1]. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:EnableKey][3] (key policy) + # + # **Related operations**\: DisableKey + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the KMS key to enable. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To enable a KMS key + # + # # The following example enables the specified KMS key. + # + # resp = client.enable_key({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to enable. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.enable_key({ + # key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey AWS API Documentation + # + # @overload enable_key(params = {}) + # @param [Hash] params ({}) + def enable_key(params = {}, options = {}) + req = build_request(:enable_key, params) + req.send_request(options) + end + + # Enables [automatic rotation of the key material][1] of the specified + # symmetric encryption KMS key. + # + # When you enable automatic rotation of a[customer managed KMS key][2], + # KMS rotates the key material of the KMS key one year (approximately + # 365 days) from the enable date and every year thereafter. You can + # monitor rotation of the key material for your KMS keys in CloudTrail + # and Amazon CloudWatch. To disable rotation of the key material in a + # customer managed KMS key, use the DisableKeyRotation operation. + # + # Automatic key rotation is supported only on [symmetric encryption KMS + # keys][3]. You cannot enable automatic rotation of [asymmetric KMS + # keys][4], [HMAC KMS keys][5], KMS keys with [imported key + # material][6], or KMS keys in a [custom key store][7]. To enable or + # disable automatic rotation of a set of related [multi-Region keys][8], + # set the property on the primary key. + # + # You cannot enable or disable automatic rotation [Amazon Web Services + # managed KMS keys][9]. KMS always rotates the key material of Amazon + # Web Services managed keys every year. Rotation of [Amazon Web Services + # owned KMS keys][10] varies. + # + # In May 2022, KMS changed the rotation schedule for Amazon Web Services + # managed keys from every three years (approximately 1,095 days) to + # every year (approximately 365 days). + # + # New Amazon Web Services managed keys are automatically rotated one + # year after they are created, and approximately every year thereafter. + # + # Existing Amazon Web Services managed keys are automatically rotated + # one year after their most recent rotation, and every year thereafter. + # + # + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][11] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:EnableKeyRotation][12] (key policy) + # + # **Related operations:** + # + # * DisableKeyRotation + # + # * GetKeyRotationStatus + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [12]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies a symmetric encryption KMS key. You cannot enable automatic + # rotation of [asymmetric KMS keys][1], [HMAC KMS keys][2], KMS keys + # with [imported key material][3], or KMS keys in a [custom key + # store][4]. To enable or disable automatic rotation of a set of related + # [multi-Region keys][5], set the property on the primary key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To enable automatic rotation of key material + # + # # The following example enables automatic annual rotation of the key material for the specified KMS key. + # + # resp = client.enable_key_rotation({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose key material will be rotated annually. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.enable_key_rotation({ + # key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotation AWS API Documentation + # + # @overload enable_key_rotation(params = {}) + # @param [Hash] params ({}) + def enable_key_rotation(params = {}, options = {}) + req = build_request(:enable_key_rotation, params) + req.send_request(options) + end + + # Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a + # symmetric or asymmetric KMS key with a `KeyUsage` of + # `ENCRYPT_DECRYPT`. + # + # You can use this operation to encrypt small amounts of arbitrary data, + # such as a personal identifier or database password, or other sensitive + # information. You don't need to use the `Encrypt` operation to encrypt + # a data key. The GenerateDataKey and GenerateDataKeyPair operations + # return a plaintext data key and an encrypted copy of that data key. + # + # If you use a symmetric encryption KMS key, you can use an encryption + # context to add additional security to your encryption operation. If + # you specify an `EncryptionContext` when encrypting data, you must + # specify the same encryption context (a case-sensitive exact match) + # when decrypting the data. Otherwise, the request to decrypt fails with + # an `InvalidCiphertextException`. For more information, see [Encryption + # Context][1] in the *Key Management Service Developer Guide*. + # + # If you specify an asymmetric KMS key, you must also specify the + # encryption algorithm. The algorithm must be compatible with the KMS + # key spec. + # + # When you use an asymmetric KMS key to encrypt or reencrypt data, be + # sure to record the KMS key and encryption algorithm that you choose. + # You will be required to provide the same KMS key and encryption + # algorithm when you decrypt the data. If the KMS key and algorithm do + # not match the values used to encrypt the data, the decrypt operation + # fails. + # + # You are not required to supply the key ID and encryption algorithm + # when you decrypt with symmetric encryption KMS keys because KMS stores + # this information in the ciphertext blob. KMS cannot store metadata in + # ciphertext generated with asymmetric keys. The standard format for + # asymmetric key ciphertext does not include configurable fields. + # + # The maximum size of the data that you can encrypt varies with the type + # of KMS key and the encryption algorithm that you choose. + # + # * Symmetric encryption KMS keys + # + # * `SYMMETRIC_DEFAULT`\: 4096 bytes + # + # ^ + # + # * `RSA_2048` + # + # * `RSAES_OAEP_SHA_1`\: 214 bytes + # + # * `RSAES_OAEP_SHA_256`\: 190 bytes + # + # * `RSA_3072` + # + # * `RSAES_OAEP_SHA_1`\: 342 bytes + # + # * `RSAES_OAEP_SHA_256`\: 318 bytes + # + # * `RSA_4096` + # + # * `RSAES_OAEP_SHA_1`\: 470 bytes + # + # * `RSAES_OAEP_SHA_256`\: 446 bytes + # + # * `SM2PKE`\: 1024 bytes (China Regions only) + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:Encrypt][3] (key policy) + # + # **Related operations:** + # + # * Decrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPair + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the KMS key to use in the encryption operation. The KMS key + # must have a `KeyUsage` of `ENCRYPT_DECRYPT`. To find the `KeyUsage` of + # a KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [required, String, StringIO, File] :plaintext + # Data to be encrypted. + # + # @option params [Hash] :encryption_context + # Specifies the encryption context that will be used to encrypt the + # data. An encryption context is valid only for [cryptographic + # operations][1] with a symmetric encryption KMS key. The standard + # asymmetric encryption algorithms and HMAC algorithms that KMS uses do + # not support an encryption context. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][2] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @option params [String] :encryption_algorithm + # Specifies the encryption algorithm that KMS will use to encrypt the + # plaintext message. The algorithm must be compatible with the KMS key + # that you specify. + # + # This parameter is required only for asymmetric KMS keys. The default + # value, `SYMMETRIC_DEFAULT`, is the algorithm used for symmetric + # encryption KMS keys. If you are using an asymmetric KMS key, we + # recommend RSAES\_OAEP\_SHA\_256. + # + # The SM2PKE algorithm is only available in China Regions. + # + # @return [Types::EncryptResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::EncryptResponse#ciphertext_blob #ciphertext_blob} => String + # * {Types::EncryptResponse#key_id #key_id} => String + # * {Types::EncryptResponse#encryption_algorithm #encryption_algorithm} => String + # + # + # @example Example: To encrypt data + # + # # The following example encrypts data with the specified KMS key. + # + # resp = client.encrypt({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to use for encryption. You can use the key ID or Amazon Resource Name (ARN) of the KMS key, or the name or ARN of an alias that refers to the KMS key. + # plaintext: "", # The data to encrypt. + # }) + # + # resp.to_h outputs the following: + # { + # ciphertext_blob: "", # The encrypted data (ciphertext). + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key that was used to encrypt the data. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.encrypt({ + # key_id: "KeyIdType", # required + # plaintext: "data", # required + # encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # grant_tokens: ["GrantTokenType"], + # encryption_algorithm: "SYMMETRIC_DEFAULT", # accepts SYMMETRIC_DEFAULT, RSAES_OAEP_SHA_1, RSAES_OAEP_SHA_256, SM2PKE + # }) + # + # @example Response structure + # + # resp.ciphertext_blob #=> String + # resp.key_id #=> String + # resp.encryption_algorithm #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt AWS API Documentation + # + # @overload encrypt(params = {}) + # @param [Hash] params ({}) + def encrypt(params = {}, options = {}) + req = build_request(:encrypt, params) + req.send_request(options) + end + + # Returns a unique symmetric data key for use outside of KMS. This + # operation returns a plaintext copy of the data key and a copy that is + # encrypted under a symmetric encryption KMS key that you specify. The + # bytes in the plaintext key are random; they are not related to the + # caller or the KMS key. You can use the plaintext key to encrypt your + # data outside of KMS and store the encrypted data key with the + # encrypted data. + # + # To generate a data key, specify the symmetric encryption KMS key that + # will be used to encrypt the data key. You cannot use an asymmetric KMS + # key to encrypt data keys. To get the type of your KMS key, use the + # DescribeKey operation. + # + # You must also specify the length of the data key. Use either the + # `KeySpec` or `NumberOfBytes` parameters (but not both). For 128-bit + # and 256-bit data keys, use the `KeySpec` parameter. + # + # To generate a 128-bit SM4 data key (China Regions only), specify a + # `KeySpec` value of `AES_128` or a `NumberOfBytes` value of `16`. The + # symmetric encryption key used in China Regions to encrypt your data + # key is an SM4 encryption key. + # + # To get only an encrypted copy of the data key, use + # GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key + # pair, use the GenerateDataKeyPair or + # GenerateDataKeyPairWithoutPlaintext operation. To get a + # cryptographically secure random byte string, use GenerateRandom. + # + # You can use an optional encryption context to add additional security + # to the encryption operation. If you specify an `EncryptionContext`, + # you must specify the same encryption context (a case-sensitive exact + # match) when decrypting the encrypted data key. Otherwise, the request + # to decrypt fails with an `InvalidCiphertextException`. For more + # information, see [Encryption Context][1] in the *Key Management + # Service Developer Guide*. + # + # Applications in Amazon Web Services Nitro Enclaves can call this + # operation by using the [Amazon Web Services Nitro Enclaves Development + # Kit][2]. For information about the supporting parameters, see [How + # Amazon Web Services Nitro Enclaves use KMS][3] in the *Key Management + # Service Developer Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][4] in the *Key + # Management Service Developer Guide*. + # + # **How to use your data key** + # + # We recommend that you use the following pattern to encrypt data + # locally in your application. You can write your own code or use a + # client-side encryption library, such as the [Amazon Web Services + # Encryption SDK][5], the [Amazon DynamoDB Encryption Client][6], or + # [Amazon S3 client-side encryption][7] to do these tasks for you. + # + # To encrypt data outside of KMS: + # + # 1. Use the `GenerateDataKey` operation to get a data key. + # + # 2. Use the plaintext data key (in the `Plaintext` field of the + # response) to encrypt your data outside of KMS. Then erase the + # plaintext data key from memory. + # + # 3. Store the encrypted data key (in the `CiphertextBlob` field of the + # response) with the encrypted data. + # + # To decrypt data outside of KMS: + # + # 1. Use the Decrypt operation to decrypt the encrypted data key. The + # operation returns a plaintext copy of the data key. + # + # 2. Use the plaintext data key to decrypt data outside of KMS, then + # erase the plaintext data key from memory. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:GenerateDataKey][8] (key policy) + # + # **Related operations:** + # + # * Decrypt + # + # * Encrypt + # + # * GenerateDataKeyPair + # + # * GenerateDataKeyPairWithoutPlaintext + # + # * GenerateDataKeyWithoutPlaintext + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [2]: https://github.com/aws/aws-nitro-enclaves-sdk-c + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [5]: https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/ + # [6]: https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/ + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Specifies the symmetric encryption KMS key that encrypts the data key. + # You cannot specify an asymmetric KMS key or a KMS key in a custom key + # store. To get the type and origin of your KMS key, use the DescribeKey + # operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [Hash] :encryption_context + # Specifies the encryption context that will be used when encrypting the + # data key. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [Integer] :number_of_bytes + # Specifies the length of the data key in bytes. For example, use the + # value 64 to generate a 512-bit data key (64 bytes is 512 bits). For + # 128-bit (16-byte) and 256-bit (32-byte) data keys, use the `KeySpec` + # parameter. + # + # You must specify either the `KeySpec` or the `NumberOfBytes` parameter + # (but not both) in every `GenerateDataKey` request. + # + # @option params [String] :key_spec + # Specifies the length of the data key. Use `AES_128` to generate a + # 128-bit symmetric key, or `AES_256` to generate a 256-bit symmetric + # key. + # + # You must specify either the `KeySpec` or the `NumberOfBytes` parameter + # (but not both) in every `GenerateDataKey` request. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::GenerateDataKeyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GenerateDataKeyResponse#ciphertext_blob #ciphertext_blob} => String + # * {Types::GenerateDataKeyResponse#plaintext #plaintext} => String + # * {Types::GenerateDataKeyResponse#key_id #key_id} => String + # + # + # @example Example: To generate a data key + # + # # The following example generates a 256-bit symmetric data encryption key (data key) in two formats. One is the + # # unencrypted (plainext) data key, and the other is the data key encrypted with the specified KMS key. + # + # resp = client.generate_data_key({ + # key_id: "alias/ExampleAlias", # The identifier of the KMS key to use to encrypt the data key. You can use the key ID or Amazon Resource Name (ARN) of the KMS key, or the name or ARN of an alias that refers to the KMS key. + # key_spec: "AES_256", # Specifies the type of data key to return. + # }) + # + # resp.to_h outputs the following: + # { + # ciphertext_blob: "", # The encrypted data key. + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key that was used to encrypt the data key. + # plaintext: "", # The unencrypted (plaintext) data key. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.generate_data_key({ + # key_id: "KeyIdType", # required + # encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # number_of_bytes: 1, + # key_spec: "AES_256", # accepts AES_256, AES_128 + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.ciphertext_blob #=> String + # resp.plaintext #=> String + # resp.key_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey AWS API Documentation + # + # @overload generate_data_key(params = {}) + # @param [Hash] params ({}) + def generate_data_key(params = {}, options = {}) + req = build_request(:generate_data_key, params) + req.send_request(options) + end + + # Returns a unique asymmetric data key pair for use outside of KMS. This + # operation returns a plaintext public key, a plaintext private key, and + # a copy of the private key that is encrypted under the symmetric + # encryption KMS key you specify. You can use the data key pair to + # perform asymmetric cryptography and implement digital signatures + # outside of KMS. The bytes in the keys are random; they not related to + # the caller or to the KMS key that is used to encrypt the private key. + # + # You can use the public key that `GenerateDataKeyPair` returns to + # encrypt data or verify a signature outside of KMS. Then, store the + # encrypted private key with the data. When you are ready to decrypt + # data or sign a message, you can use the Decrypt operation to decrypt + # the encrypted private key. + # + # To generate a data key pair, you must specify a symmetric encryption + # KMS key to encrypt the private key in a data key pair. You cannot use + # an asymmetric KMS key or a KMS key in a custom key store. To get the + # type and origin of your KMS key, use the DescribeKey operation. + # + # Use the `KeyPairSpec` parameter to choose an RSA or Elliptic Curve + # (ECC) data key pair. In China Regions, you can also choose an SM2 data + # key pair. KMS recommends that you use ECC key pairs for signing, and + # use RSA and SM2 key pairs for either encryption or signing, but not + # both. However, KMS cannot enforce any restrictions on the use of data + # key pairs outside of KMS. + # + # If you are using the data key pair to encrypt data, or for any + # operation where you don't immediately need a private key, consider + # using the GenerateDataKeyPairWithoutPlaintext operation. + # `GenerateDataKeyPairWithoutPlaintext` returns a plaintext public key + # and an encrypted private key, but omits the plaintext private key that + # you need only to decrypt ciphertext or sign a message. Later, when you + # need to decrypt the data or sign a message, use the Decrypt operation + # to decrypt the encrypted private key in the data key pair. + # + # `GenerateDataKeyPair` returns a unique data key pair for each request. + # The bytes in the keys are random; they are not related to the caller + # or the KMS key that is used to encrypt the private key. The public key + # is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in [RFC + # 5280][1]. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as + # specified in [RFC 5958][2]. + # + # You can use an optional encryption context to add additional security + # to the encryption operation. If you specify an `EncryptionContext`, + # you must specify the same encryption context (a case-sensitive exact + # match) when decrypting the encrypted data key. Otherwise, the request + # to decrypt fails with an `InvalidCiphertextException`. For more + # information, see [Encryption Context][3] in the *Key Management + # Service Developer Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][4] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:GenerateDataKeyPair][5] (key policy) + # + # **Related operations:** + # + # * Decrypt + # + # * Encrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPairWithoutPlaintext + # + # * GenerateDataKeyWithoutPlaintext + # + # + # + # [1]: https://tools.ietf.org/html/rfc5280 + # [2]: https://tools.ietf.org/html/rfc5958 + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [Hash] :encryption_context + # Specifies the encryption context that will be used when encrypting the + # private key in the data key pair. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [required, String] :key_id + # Specifies the symmetric encryption KMS key that encrypts the private + # key in the data key pair. You cannot specify an asymmetric KMS key or + # a KMS key in a custom key store. To get the type and origin of your + # KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [required, String] :key_pair_spec + # Determines the type of data key pair that is generated. + # + # The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys + # to encrypt and decrypt or to sign and verify (but not both), and the + # rule that permits you to use ECC KMS keys only to sign and verify, are + # not effective on data key pairs, which are used outside of KMS. The + # SM2 key spec is only available in China Regions. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::GenerateDataKeyPairResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GenerateDataKeyPairResponse#private_key_ciphertext_blob #private_key_ciphertext_blob} => String + # * {Types::GenerateDataKeyPairResponse#private_key_plaintext #private_key_plaintext} => String + # * {Types::GenerateDataKeyPairResponse#public_key #public_key} => String + # * {Types::GenerateDataKeyPairResponse#key_id #key_id} => String + # * {Types::GenerateDataKeyPairResponse#key_pair_spec #key_pair_spec} => String + # + # + # @example Example: To generate an RSA key pair for encryption and decryption + # + # # This example generates an RSA data key pair for encryption and decryption. The operation returns a plaintext public key + # # and private key, and a copy of the private key that is encrypted under a symmetric encryption KMS key that you specify. + # + # resp = client.generate_data_key_pair({ + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ID of the symmetric encryption KMS key that encrypts the private RSA key in the data key pair. + # key_pair_spec: "RSA_3072", # The requested key spec of the RSA data key pair. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ARN of the symmetric encryption KMS key that was used to encrypt the private key. + # key_pair_spec: "RSA_3072", # The actual key spec of the RSA data key pair. + # private_key_ciphertext_blob: "", # The encrypted private key of the RSA data key pair. + # private_key_plaintext: "", # The plaintext private key of the RSA data key pair. + # public_key: "", # The public key (plaintext) of the RSA data key pair. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.generate_data_key_pair({ + # encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # key_id: "KeyIdType", # required + # key_pair_spec: "RSA_2048", # required, accepts RSA_2048, RSA_3072, RSA_4096, ECC_NIST_P256, ECC_NIST_P384, ECC_NIST_P521, ECC_SECG_P256K1, SM2 + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.private_key_ciphertext_blob #=> String + # resp.private_key_plaintext #=> String + # resp.public_key #=> String + # resp.key_id #=> String + # resp.key_pair_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SM2" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair AWS API Documentation + # + # @overload generate_data_key_pair(params = {}) + # @param [Hash] params ({}) + def generate_data_key_pair(params = {}, options = {}) + req = build_request(:generate_data_key_pair, params) + req.send_request(options) + end + + # Returns a unique asymmetric data key pair for use outside of KMS. This + # operation returns a plaintext public key and a copy of the private key + # that is encrypted under the symmetric encryption KMS key you specify. + # Unlike GenerateDataKeyPair, this operation does not return a plaintext + # private key. The bytes in the keys are random; they are not related to + # the caller or to the KMS key that is used to encrypt the private key. + # + # You can use the public key that `GenerateDataKeyPairWithoutPlaintext` + # returns to encrypt data or verify a signature outside of KMS. Then, + # store the encrypted private key with the data. When you are ready to + # decrypt data or sign a message, you can use the Decrypt operation to + # decrypt the encrypted private key. + # + # To generate a data key pair, you must specify a symmetric encryption + # KMS key to encrypt the private key in a data key pair. You cannot use + # an asymmetric KMS key or a KMS key in a custom key store. To get the + # type and origin of your KMS key, use the DescribeKey operation. + # + # Use the `KeyPairSpec` parameter to choose an RSA or Elliptic Curve + # (ECC) data key pair. In China Regions, you can also choose an SM2 data + # key pair. KMS recommends that you use ECC key pairs for signing, and + # use RSA and SM2 key pairs for either encryption or signing, but not + # both. However, KMS cannot enforce any restrictions on the use of data + # key pairs outside of KMS. + # + # `GenerateDataKeyPairWithoutPlaintext` returns a unique data key pair + # for each request. The bytes in the key are not related to the caller + # or KMS key that is used to encrypt the private key. The public key is + # a DER-encoded X.509 SubjectPublicKeyInfo, as specified in [RFC + # 5280][1]. + # + # You can use an optional encryption context to add additional security + # to the encryption operation. If you specify an `EncryptionContext`, + # you must specify the same encryption context (a case-sensitive exact + # match) when decrypting the encrypted data key. Otherwise, the request + # to decrypt fails with an `InvalidCiphertextException`. For more + # information, see [Encryption Context][2] in the *Key Management + # Service Developer Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][3] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: + # [kms:GenerateDataKeyPairWithoutPlaintext][4] (key policy) + # + # **Related operations:** + # + # * Decrypt + # + # * Encrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPair + # + # * GenerateDataKeyWithoutPlaintext + # + # + # + # [1]: https://tools.ietf.org/html/rfc5280 + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [Hash] :encryption_context + # Specifies the encryption context that will be used when encrypting the + # private key in the data key pair. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [required, String] :key_id + # Specifies the symmetric encryption KMS key that encrypts the private + # key in the data key pair. You cannot specify an asymmetric KMS key or + # a KMS key in a custom key store. To get the type and origin of your + # KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [required, String] :key_pair_spec + # Determines the type of data key pair that is generated. + # + # The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys + # to encrypt and decrypt or to sign and verify (but not both), and the + # rule that permits you to use ECC KMS keys only to sign and verify, are + # not effective on data key pairs, which are used outside of KMS. The + # SM2 key spec is only available in China Regions. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::GenerateDataKeyPairWithoutPlaintextResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GenerateDataKeyPairWithoutPlaintextResponse#private_key_ciphertext_blob #private_key_ciphertext_blob} => String + # * {Types::GenerateDataKeyPairWithoutPlaintextResponse#public_key #public_key} => String + # * {Types::GenerateDataKeyPairWithoutPlaintextResponse#key_id #key_id} => String + # * {Types::GenerateDataKeyPairWithoutPlaintextResponse#key_pair_spec #key_pair_spec} => String + # + # + # @example Example: To generate an asymmetric data key pair without a plaintext key + # + # # This example returns an asymmetric elliptic curve (ECC) data key pair. The private key is encrypted under the symmetric + # # encryption KMS key that you specify. This operation doesn't return a plaintext (unencrypted) private key. + # + # resp = client.generate_data_key_pair_without_plaintext({ + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The symmetric encryption KMS key that encrypts the private key of the ECC data key pair. + # key_pair_spec: "ECC_NIST_P521", # The requested key spec of the ECC asymmetric data key pair. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ARN of the symmetric encryption KMS key that encrypted the private key in the ECC asymmetric data key pair. + # key_pair_spec: "ECC_NIST_P521", # The actual key spec of the ECC asymmetric data key pair. + # private_key_ciphertext_blob: "", # The encrypted private key of the asymmetric ECC data key pair. + # public_key: "", # The public key (plaintext). + # } + # + # @example Request syntax with placeholder values + # + # resp = client.generate_data_key_pair_without_plaintext({ + # encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # key_id: "KeyIdType", # required + # key_pair_spec: "RSA_2048", # required, accepts RSA_2048, RSA_3072, RSA_4096, ECC_NIST_P256, ECC_NIST_P384, ECC_NIST_P521, ECC_SECG_P256K1, SM2 + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.private_key_ciphertext_blob #=> String + # resp.public_key #=> String + # resp.key_id #=> String + # resp.key_pair_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SM2" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext AWS API Documentation + # + # @overload generate_data_key_pair_without_plaintext(params = {}) + # @param [Hash] params ({}) + def generate_data_key_pair_without_plaintext(params = {}, options = {}) + req = build_request(:generate_data_key_pair_without_plaintext, params) + req.send_request(options) + end + + # Returns a unique symmetric data key for use outside of KMS. This + # operation returns a data key that is encrypted under a symmetric + # encryption KMS key that you specify. The bytes in the key are random; + # they are not related to the caller or to the KMS key. + # + # `GenerateDataKeyWithoutPlaintext` is identical to the GenerateDataKey + # operation except that it does not return a plaintext copy of the data + # key. + # + # This operation is useful for systems that need to encrypt data at some + # point, but not immediately. When you need to encrypt the data, you + # call the Decrypt operation on the encrypted copy of the key. + # + # It's also useful in distributed systems with different levels of + # trust. For example, you might store encrypted data in containers. One + # component of your system creates new containers and stores an + # encrypted data key with each container. Then, a different component + # puts the data into the containers. That component first decrypts the + # data key, uses the plaintext data key to encrypt data, puts the + # encrypted data into the container, and then destroys the plaintext + # data key. In this system, the component that creates the containers + # never sees the plaintext data key. + # + # To request an asymmetric data key pair, use the GenerateDataKeyPair or + # GenerateDataKeyPairWithoutPlaintext operations. + # + # To generate a data key, you must specify the symmetric encryption KMS + # key that is used to encrypt the data key. You cannot use an asymmetric + # KMS key or a key in a custom key store to generate a data key. To get + # the type of your KMS key, use the DescribeKey operation. + # + # You must also specify the length of the data key. Use either the + # `KeySpec` or `NumberOfBytes` parameters (but not both). For 128-bit + # and 256-bit data keys, use the `KeySpec` parameter. + # + # To generate an SM4 data key (China Regions only), specify a `KeySpec` + # value of `AES_128` or `NumberOfBytes` value of `128`. The symmetric + # encryption key used in China Regions to encrypt your data key is an + # SM4 encryption key. + # + # If the operation succeeds, you will find the encrypted copy of the + # data key in the `CiphertextBlob` field. + # + # You can use an optional encryption context to add additional security + # to the encryption operation. If you specify an `EncryptionContext`, + # you must specify the same encryption context (a case-sensitive exact + # match) when decrypting the encrypted data key. Otherwise, the request + # to decrypt fails with an `InvalidCiphertextException`. For more + # information, see [Encryption Context][1] in the *Key Management + # Service Developer Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:GenerateDataKeyWithoutPlaintext][3] + # (key policy) + # + # **Related operations:** + # + # * Decrypt + # + # * Encrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPair + # + # * GenerateDataKeyPairWithoutPlaintext + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Specifies the symmetric encryption KMS key that encrypts the data key. + # You cannot specify an asymmetric KMS key or a KMS key in a custom key + # store. To get the type and origin of your KMS key, use the DescribeKey + # operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [Hash] :encryption_context + # Specifies the encryption context that will be used when encrypting the + # data key. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [String] :key_spec + # The length of the data key. Use `AES_128` to generate a 128-bit + # symmetric key, or `AES_256` to generate a 256-bit symmetric key. + # + # @option params [Integer] :number_of_bytes + # The length of the data key in bytes. For example, use the value 64 to + # generate a 512-bit data key (64 bytes is 512 bits). For common key + # lengths (128-bit and 256-bit symmetric keys), we recommend that you + # use the `KeySpec` field instead of this one. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::GenerateDataKeyWithoutPlaintextResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GenerateDataKeyWithoutPlaintextResponse#ciphertext_blob #ciphertext_blob} => String + # * {Types::GenerateDataKeyWithoutPlaintextResponse#key_id #key_id} => String + # + # + # @example Example: To generate an encrypted data key + # + # # The following example generates an encrypted copy of a 256-bit symmetric data encryption key (data key). The data key is + # # encrypted with the specified KMS key. + # + # resp = client.generate_data_key_without_plaintext({ + # key_id: "alias/ExampleAlias", # The identifier of the KMS key to use to encrypt the data key. You can use the key ID or Amazon Resource Name (ARN) of the KMS key, or the name or ARN of an alias that refers to the KMS key. + # key_spec: "AES_256", # Specifies the type of data key to return. + # }) + # + # resp.to_h outputs the following: + # { + # ciphertext_blob: "", # The encrypted data key. + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key that was used to encrypt the data key. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.generate_data_key_without_plaintext({ + # key_id: "KeyIdType", # required + # encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # key_spec: "AES_256", # accepts AES_256, AES_128 + # number_of_bytes: 1, + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.ciphertext_blob #=> String + # resp.key_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext AWS API Documentation + # + # @overload generate_data_key_without_plaintext(params = {}) + # @param [Hash] params ({}) + def generate_data_key_without_plaintext(params = {}, options = {}) + req = build_request(:generate_data_key_without_plaintext, params) + req.send_request(options) + end + + # Generates a hash-based message authentication code (HMAC) for a + # message using an HMAC KMS key and a MAC algorithm that the key + # supports. HMAC KMS keys and the HMAC algorithms that KMS uses conform + # to industry standards defined in [RFC 2104][1]. + # + # You can use value that GenerateMac returns in the VerifyMac operation + # to demonstrate that the original message has not changed. Also, + # because a secret key is used to create the hash, you can verify that + # the party that generated the hash has the required secret key. You can + # also use the raw result to implement HMAC-based algorithms such as key + # derivation functions. This operation is part of KMS support for HMAC + # KMS keys. For details, see [HMAC keys in KMS][2] in the Key + # Management Service Developer Guide . + # + # Best practices recommend that you limit the time during which any + # signing mechanism, including an HMAC, is effective. This deters an + # attack where the actor uses a signed message to establish validity + # repeatedly or long after the message is superseded. HMAC tags do not + # include a timestamp, but you can include a timestamp in the token or + # message to help you detect when its time to refresh the HMAC. + # + # + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][3] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:GenerateMac][4] (key policy) + # + # **Related operations**\: VerifyMac + # + # + # + # [1]: https://datatracker.ietf.org/doc/html/rfc2104 + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String, StringIO, File] :message + # The message to be hashed. Specify a message of up to 4,096 bytes. + # + # `GenerateMac` and VerifyMac do not provide special handling for + # message digests. If you generate an HMAC for a hash digest of a + # message, you must verify the HMAC of the same hash digest. + # + # @option params [required, String] :key_id + # The HMAC KMS key to use in the operation. The MAC algorithm computes + # the HMAC for the message and the key as described in [RFC 2104][1]. + # + # To identify an HMAC KMS key, use the DescribeKey operation and see the + # `KeySpec` field in the response. + # + # + # + # [1]: https://datatracker.ietf.org/doc/html/rfc2104 + # + # @option params [required, String] :mac_algorithm + # The MAC algorithm used in the operation. + # + # The algorithm must be compatible with the HMAC KMS key that you + # specify. To find the MAC algorithms that your HMAC KMS key supports, + # use the DescribeKey operation and see the `MacAlgorithms` field in the + # `DescribeKey` response. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::GenerateMacResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GenerateMacResponse#mac #mac} => String + # * {Types::GenerateMacResponse#mac_algorithm #mac_algorithm} => String + # * {Types::GenerateMacResponse#key_id #key_id} => String + # + # + # @example Example: To generate an HMAC for a message + # + # # This example generates an HMAC for a message, an HMAC KMS key, and a MAC algorithm. The algorithm must be supported by + # # the specified HMAC KMS key. + # + # resp = client.generate_mac({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The HMAC KMS key input to the HMAC algorithm. + # mac_algorithm: "HMAC_SHA_384", # The HMAC algorithm requested for the operation. + # message: "Hello World", # The message input to the HMAC algorithm. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ARN of the HMAC KMS key used in the operation. + # mac: "", # The HMAC tag that results from this operation. + # mac_algorithm: "HMAC_SHA_384", # The HMAC algorithm used in the operation. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.generate_mac({ + # message: "data", # required + # key_id: "KeyIdType", # required + # mac_algorithm: "HMAC_SHA_224", # required, accepts HMAC_SHA_224, HMAC_SHA_256, HMAC_SHA_384, HMAC_SHA_512 + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.mac #=> String + # resp.mac_algorithm #=> String, one of "HMAC_SHA_224", "HMAC_SHA_256", "HMAC_SHA_384", "HMAC_SHA_512" + # resp.key_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMac AWS API Documentation + # + # @overload generate_mac(params = {}) + # @param [Hash] params ({}) + def generate_mac(params = {}, options = {}) + req = build_request(:generate_mac, params) + req.send_request(options) + end + + # Returns a random byte string that is cryptographically secure. + # + # You must use the `NumberOfBytes` parameter to specify the length of + # the random byte string. There is no default value for string length. + # + # By default, the random byte string is generated in KMS. To generate + # the byte string in the CloudHSM cluster associated with an CloudHSM + # key store, use the `CustomKeyStoreId` parameter. + # + # Applications in Amazon Web Services Nitro Enclaves can call this + # operation by using the [Amazon Web Services Nitro Enclaves Development + # Kit][1]. For information about the supporting parameters, see [How + # Amazon Web Services Nitro Enclaves use KMS][2] in the *Key Management + # Service Developer Guide*. + # + # For more information about entropy and random number generation, see + # [Key Management Service Cryptographic Details][3]. + # + # **Cross-account use**\: Not applicable. `GenerateRandom` does not use + # any account-specific resources, such as KMS keys. + # + # **Required permissions**\: [kms:GenerateRandom][4] (IAM policy) + # + # + # + # [1]: https://github.com/aws/aws-nitro-enclaves-sdk-c + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + # [3]: https://docs.aws.amazon.com/kms/latest/cryptographic-details/ + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [Integer] :number_of_bytes + # The length of the random byte string. This parameter is required. + # + # @option params [String] :custom_key_store_id + # Generates the random byte string in the CloudHSM cluster that is + # associated with the specified CloudHSM key store. To find the ID of a + # custom key store, use the DescribeCustomKeyStores operation. + # + # External key store IDs are not valid for this parameter. If you + # specify the ID of an external key store, `GenerateRandom` throws an + # `UnsupportedOperationException`. + # + # @return [Types::GenerateRandomResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GenerateRandomResponse#plaintext #plaintext} => String + # + # + # @example Example: To generate random data + # + # # The following example generates 32 bytes of random data. + # + # resp = client.generate_random({ + # number_of_bytes: 32, # The length of the random data, specified in number of bytes. + # }) + # + # resp.to_h outputs the following: + # { + # plaintext: "", # The random data. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.generate_random({ + # number_of_bytes: 1, + # custom_key_store_id: "CustomKeyStoreIdType", + # }) + # + # @example Response structure + # + # resp.plaintext #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom AWS API Documentation + # + # @overload generate_random(params = {}) + # @param [Hash] params ({}) + def generate_random(params = {}, options = {}) + req = build_request(:generate_random, params) + req.send_request(options) + end + + # Gets a key policy attached to the specified KMS key. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:GetKeyPolicy][1] (key policy) + # + # **Related operations**\: PutKeyPolicy + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Gets the key policy for the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :policy_name + # Specifies the name of the key policy. The only valid name is + # `default`. To get the names of key policies, use ListKeyPolicies. + # + # @return [Types::GetKeyPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetKeyPolicyResponse#policy #policy} => String + # + # + # @example Example: To retrieve a key policy + # + # # The following example retrieves the key policy for the specified KMS key. + # + # resp = client.get_key_policy({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose key policy you want to retrieve. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # policy_name: "default", # The name of the key policy to retrieve. + # }) + # + # resp.to_h outputs the following: + # { + # policy: "{\n \"Version\" : \"2012-10-17\",\n \"Id\" : \"key-default-1\",\n \"Statement\" : [ {\n \"Sid\" : \"Enable IAM User Permissions\",\n \"Effect\" : \"Allow\",\n \"Principal\" : {\n \"AWS\" : \"arn:aws:iam::111122223333:root\"\n },\n \"Action\" : \"kms:*\",\n \"Resource\" : \"*\"\n } ]\n}", # The key policy document. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_key_policy({ + # key_id: "KeyIdType", # required + # policy_name: "PolicyNameType", # required + # }) + # + # @example Response structure + # + # resp.policy #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy AWS API Documentation + # + # @overload get_key_policy(params = {}) + # @param [Hash] params ({}) + def get_key_policy(params = {}, options = {}) + req = build_request(:get_key_policy, params) + req.send_request(options) + end + + # Gets a Boolean value that indicates whether [automatic rotation of the + # key material][1] is enabled for the specified KMS key. + # + # When you enable automatic rotation for [customer managed KMS keys][2], + # KMS rotates the key material of the KMS key one year (approximately + # 365 days) from the enable date and every year thereafter. You can + # monitor rotation of the key material for your KMS keys in CloudTrail + # and Amazon CloudWatch. + # + # Automatic key rotation is supported only on [symmetric encryption KMS + # keys][3]. You cannot enable automatic rotation of [asymmetric KMS + # keys][4], [HMAC KMS keys][5], KMS keys with [imported key + # material][6], or KMS keys in a [custom key store][7]. To enable or + # disable automatic rotation of a set of related [multi-Region keys][8], + # set the property on the primary key.. + # + # You can enable (EnableKeyRotation) and disable automatic rotation + # (DisableKeyRotation) of the key material in customer managed KMS keys. + # Key material rotation of [Amazon Web Services managed KMS keys][9] is + # not configurable. KMS always rotates the key material in Amazon Web + # Services managed KMS keys every year. The key rotation status for + # Amazon Web Services managed KMS keys is always `true`. + # + # In May 2022, KMS changed the rotation schedule for Amazon Web Services + # managed keys from every three years to every year. For details, see + # EnableKeyRotation. + # + # + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][10] in the *Key + # Management Service Developer Guide*. + # + # * Disabled: The key rotation status does not change when you disable a + # KMS key. However, while the KMS key is disabled, KMS does not rotate + # the key material. When you re-enable the KMS key, rotation resumes. + # If the key material in the re-enabled KMS key hasn't been rotated + # in one year, KMS rotates it immediately, and every year thereafter. + # If it's been less than a year since the key material in the + # re-enabled KMS key was rotated, the KMS key resumes its prior + # rotation schedule. + # + # * Pending deletion: While a KMS key is pending deletion, its key + # rotation status is `false` and KMS does not rotate the key material. + # If you cancel the deletion, the original key rotation status returns + # to `true`. + # + # **Cross-account use**\: Yes. To perform this operation on a KMS key in + # a different Amazon Web Services account, specify the key ARN in the + # value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:GetKeyRotationStatus][11] (key policy) + # + # **Related operations:** + # + # * DisableKeyRotation + # + # * EnableKeyRotation + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Gets the rotation status for the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key in + # a different Amazon Web Services account, you must use the key ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @return [Types::GetKeyRotationStatusResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetKeyRotationStatusResponse#key_rotation_enabled #key_rotation_enabled} => Boolean + # + # + # @example Example: To retrieve the rotation status for a KMS key + # + # # The following example retrieves the status of automatic annual rotation of the key material for the specified KMS key. + # + # resp = client.get_key_rotation_status({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose key material rotation status you want to retrieve. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # key_rotation_enabled: true, # A boolean that indicates the key material rotation status. Returns true when automatic annual rotation of the key material is enabled, or false when it is not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_key_rotation_status({ + # key_id: "KeyIdType", # required + # }) + # + # @example Response structure + # + # resp.key_rotation_enabled #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatus AWS API Documentation + # + # @overload get_key_rotation_status(params = {}) + # @param [Hash] params ({}) + def get_key_rotation_status(params = {}, options = {}) + req = build_request(:get_key_rotation_status, params) + req.send_request(options) + end + + # Returns the items you need to import key material into a symmetric + # encryption KMS key. For more information about importing key material + # into KMS, see [Importing key material][1] in the *Key Management + # Service Developer Guide*. + # + # This operation returns a public key and an import token. Use the + # public key to encrypt the symmetric key material. Store the import + # token to send with a subsequent ImportKeyMaterial request. + # + # You must specify the key ID of the symmetric encryption KMS key into + # which you will import key material. The KMS key `Origin` must be + # `EXTERNAL`. You must also specify the wrapping algorithm and type of + # wrapping key (public key) that you will use to encrypt the key + # material. You cannot perform this operation on an asymmetric KMS key, + # an HMAC KMS key, or on any KMS key in a different Amazon Web Services + # account. + # + # To import key material, you must use the public key and import token + # from the same response. These items are valid for 24 hours. The + # expiration date and time appear in the `GetParametersForImport` + # response. You cannot use an expired token in an ImportKeyMaterial + # request. If your key and token expire, send another + # `GetParametersForImport` request. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:GetParametersForImport][3] (key + # policy) + # + # **Related operations:** + # + # * ImportKeyMaterial + # + # * DeleteImportedKeyMaterial + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # The identifier of the symmetric encryption KMS key into which you will + # import key material. The `Origin` of the KMS key must be `EXTERNAL`. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :wrapping_algorithm + # The algorithm you will use to encrypt the key material before using + # the ImportKeyMaterial operation to import it. For more information, + # see [Encrypt the key material][1] in the *Key Management Service + # Developer Guide*. + # + # The `RSAES_PKCS1_V1_5` wrapping algorithm is deprecated. We recommend + # that you begin using a different wrapping algorithm immediately. KMS + # will end support for `RSAES_PKCS1_V1_5` by October 1, 2023 pursuant to + # [cryptographic key management guidance][2] from the National Institute + # of Standards and Technology (NIST). + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html + # [2]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-131Ar2.pdf + # + # @option params [required, String] :wrapping_key_spec + # The type of wrapping key (public key) to return in the response. Only + # 2048-bit RSA public keys are supported. + # + # @return [Types::GetParametersForImportResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetParametersForImportResponse#key_id #key_id} => String + # * {Types::GetParametersForImportResponse#import_token #import_token} => String + # * {Types::GetParametersForImportResponse#public_key #public_key} => String + # * {Types::GetParametersForImportResponse#parameters_valid_to #parameters_valid_to} => Time + # + # + # @example Example: To retrieve the public key and import token for a KMS key + # + # # The following example retrieves the public key and import token for the specified KMS key. + # + # resp = client.get_parameters_for_import({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key for which to retrieve the public key and import token. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # wrapping_algorithm: "RSAES_OAEP_SHA_1", # The algorithm that you will use to encrypt the key material before importing it. + # wrapping_key_spec: "RSA_2048", # The type of wrapping key (public key) to return in the response. + # }) + # + # resp.to_h outputs the following: + # { + # import_token: "", # The import token to send with a subsequent ImportKeyMaterial request. + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key for which you are retrieving the public key and import token. This is the same KMS key specified in the request. + # parameters_valid_to: Time.parse("2016-12-01T14:52:17-08:00"), # The time at which the import token and public key are no longer valid. + # public_key: "", # The public key to use to encrypt the key material before importing it. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_parameters_for_import({ + # key_id: "KeyIdType", # required + # wrapping_algorithm: "RSAES_PKCS1_V1_5", # required, accepts RSAES_PKCS1_V1_5, RSAES_OAEP_SHA_1, RSAES_OAEP_SHA_256 + # wrapping_key_spec: "RSA_2048", # required, accepts RSA_2048 + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.import_token #=> String + # resp.public_key #=> String + # resp.parameters_valid_to #=> Time + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport AWS API Documentation + # + # @overload get_parameters_for_import(params = {}) + # @param [Hash] params ({}) + def get_parameters_for_import(params = {}, options = {}) + req = build_request(:get_parameters_for_import, params) + req.send_request(options) + end + + # Returns the public key of an asymmetric KMS key. Unlike the private + # key of a asymmetric KMS key, which never leaves KMS unencrypted, + # callers with `kms:GetPublicKey` permission can download the public key + # of an asymmetric KMS key. You can share the public key to allow others + # to encrypt messages and verify signatures outside of KMS. For + # information about asymmetric KMS keys, see [Asymmetric KMS keys][1] in + # the *Key Management Service Developer Guide*. + # + # You do not need to download the public key. Instead, you can use the + # public key within KMS by calling the Encrypt, ReEncrypt, or Verify + # operations with the identifier of an asymmetric KMS key. When you use + # the public key within KMS, you benefit from the authentication, + # authorization, and logging that are part of every KMS operation. You + # also reduce of risk of encrypting data that cannot be decrypted. These + # features are not effective outside of KMS. + # + # To help you use the public key safely outside of KMS, `GetPublicKey` + # returns important information about the public key in the response, + # including: + # + # * [KeySpec][2]\: The type of key material in the public key, such as + # `RSA_4096` or `ECC_NIST_P521`. + # + # * [KeyUsage][3]\: Whether the key is used for encryption or signing. + # + # * [EncryptionAlgorithms][4] or [SigningAlgorithms][5]\: A list of the + # encryption algorithms or the signing algorithms for the key. + # + # Although KMS cannot enforce these restrictions on external operations, + # it is crucial that you use this information to prevent the public key + # from being used improperly. For example, you can prevent a public + # signing key from being used encrypt data, or prevent a public key from + # being used with an encryption algorithm that is not supported by KMS. + # You can also avoid errors, such as using the wrong signing algorithm + # in a verification operation. + # + # To verify a signature outside of KMS with an SM2 public key (China + # Regions only), you must specify the distinguishing ID. By default, KMS + # uses `1234567812345678` as the distinguishing ID. For more + # information, see [Offline verification with SM2 key pairs][6]. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][7] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:GetPublicKey][8] (key policy) + # + # **Related operations**\: CreateKey + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [2]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec + # [3]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage + # [4]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms + # [5]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the asymmetric KMS key that includes the public key. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::GetPublicKeyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetPublicKeyResponse#key_id #key_id} => String + # * {Types::GetPublicKeyResponse#public_key #public_key} => String + # * {Types::GetPublicKeyResponse#customer_master_key_spec #customer_master_key_spec} => String + # * {Types::GetPublicKeyResponse#key_spec #key_spec} => String + # * {Types::GetPublicKeyResponse#key_usage #key_usage} => String + # * {Types::GetPublicKeyResponse#encryption_algorithms #encryption_algorithms} => Array<String> + # * {Types::GetPublicKeyResponse#signing_algorithms #signing_algorithms} => Array<String> + # + # + # @example Example: To download the public key of an asymmetric KMS key + # + # # This example gets the public key of an asymmetric RSA KMS key used for encryption and decryption. The operation returns + # # the key spec, key usage, and encryption or signing algorithms to help you use the public key correctly outside of AWS + # # KMS. + # + # resp = client.get_public_key({ + # key_id: "arn:aws:kms:us-west-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", # The key ARN of the asymmetric KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # customer_master_key_spec: "RSA_4096", # The key spec of the asymmetric KMS key from which the public key was downloaded. + # encryption_algorithms: [ + # "RSAES_OAEP_SHA_1", + # "RSAES_OAEP_SHA_256", + # ], # The encryption algorithms supported by the asymmetric KMS key that was downloaded. + # key_id: "arn:aws:kms:us-west-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", # The key ARN of the asymmetric KMS key from which the public key was downloaded. + # key_usage: "ENCRYPT_DECRYPT", # The key usage of the asymmetric KMS key from which the public key was downloaded. + # public_key: "", # The public key (plaintext) of the asymmetric KMS key. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_public_key({ + # key_id: "KeyIdType", # required + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.public_key #=> String + # resp.customer_master_key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.key_usage #=> String, one of "SIGN_VERIFY", "ENCRYPT_DECRYPT", "GENERATE_VERIFY_MAC" + # resp.encryption_algorithms #=> Array + # resp.encryption_algorithms[0] #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # resp.signing_algorithms #=> Array + # resp.signing_algorithms[0] #=> String, one of "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", "RSASSA_PSS_SHA_512", "RSASSA_PKCS1_V1_5_SHA_256", "RSASSA_PKCS1_V1_5_SHA_384", "RSASSA_PKCS1_V1_5_SHA_512", "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", "SM2DSA" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey AWS API Documentation + # + # @overload get_public_key(params = {}) + # @param [Hash] params ({}) + def get_public_key(params = {}, options = {}) + req = build_request(:get_public_key, params) + req.send_request(options) + end + + # Imports key material into an existing symmetric encryption KMS key + # that was created without key material. After you successfully import + # key material into a KMS key, you can [reimport the same key + # material][1] into that KMS key, but you cannot import different key + # material. + # + # You cannot perform this operation on an asymmetric KMS key, an HMAC + # KMS key, or on any KMS key in a different Amazon Web Services account. + # For more information about creating KMS keys with no key material and + # then importing key material, see [Importing Key Material][2] in the + # *Key Management Service Developer Guide*. + # + # Before using this operation, call GetParametersForImport. Its response + # includes a public key and an import token. Use the public key to + # encrypt the key material. Then, submit the import token from the same + # `GetParametersForImport` response. + # + # When calling this operation, you must specify the following values: + # + # * The key ID or key ARN of a KMS key with no key material. Its + # `Origin` must be `EXTERNAL`. + # + # To create a KMS key with no key material, call CreateKey and set the + # value of its `Origin` parameter to `EXTERNAL`. To get the `Origin` + # of a KMS key, call DescribeKey.) + # + # * The encrypted key material. To get the public key to encrypt the key + # material, call GetParametersForImport. + # + # * The import token that GetParametersForImport returned. You must use + # a public key and token from the same `GetParametersForImport` + # response. + # + # * Whether the key material expires (`ExpirationModel`) and, if so, + # when (`ValidTo`). If you set an expiration date, on the specified + # date, KMS deletes the key material from the KMS key, making the KMS + # key unusable. To use the KMS key in cryptographic operations again, + # you must reimport the same key material. The only way to change the + # expiration model or expiration date is by reimporting the same key + # material and specifying a new expiration date. + # + # When this operation is successful, the key state of the KMS key + # changes from `PendingImport` to `Enabled`, and you can use the KMS + # key. + # + # If this operation fails, use the exception to help determine the + # problem. If the error is related to the key material, the import + # token, or wrapping key, use GetParametersForImport to get a new public + # key and import token for the KMS key and repeat the import procedure. + # For help, see [How To Import Key Material][3] in the *Key Management + # Service Developer Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][4] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:ImportKeyMaterial][5] (key policy) + # + # **Related operations:** + # + # * DeleteImportedKeyMaterial + # + # * GetParametersForImport + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # The identifier of the symmetric encryption KMS key that receives the + # imported key material. This must be the same KMS key specified in the + # `KeyID` parameter of the corresponding GetParametersForImport request. + # The `Origin` of the KMS key must be `EXTERNAL`. You cannot perform + # this operation on an asymmetric KMS key, an HMAC KMS key, a KMS key in + # a custom key store, or on a KMS key in a different Amazon Web Services + # account + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String, StringIO, File] :import_token + # The import token that you received in the response to a previous + # GetParametersForImport request. It must be from the same response that + # contained the public key that you used to encrypt the key material. + # + # @option params [required, String, StringIO, File] :encrypted_key_material + # The encrypted key material to import. The key material must be + # encrypted with the public wrapping key that GetParametersForImport + # returned, using the wrapping algorithm that you specified in the same + # `GetParametersForImport` request. + # + # @option params [Time,DateTime,Date,Integer,String] :valid_to + # The date and time when the imported key material expires. This + # parameter is required when the value of the `ExpirationModel` + # parameter is `KEY_MATERIAL_EXPIRES`. Otherwise it is not valid. + # + # The value of this parameter must be a future date and time. The + # maximum value is 365 days from the request date. + # + # When the key material expires, KMS deletes the key material from the + # KMS key. Without its key material, the KMS key is unusable. To use the + # KMS key in cryptographic operations, you must reimport the same key + # material. + # + # You cannot change the `ExpirationModel` or `ValidTo` values for the + # current import after the request completes. To change either value, + # you must delete (DeleteImportedKeyMaterial) and reimport the key + # material. + # + # @option params [String] :expiration_model + # Specifies whether the key material expires. The default is + # `KEY_MATERIAL_EXPIRES`. + # + # When the value of `ExpirationModel` is `KEY_MATERIAL_EXPIRES`, you + # must specify a value for the `ValidTo` parameter. When value is + # `KEY_MATERIAL_DOES_NOT_EXPIRE`, you must omit the `ValidTo` parameter. + # + # You cannot change the `ExpirationModel` or `ValidTo` values for the + # current import after the request completes. To change either value, + # you must delete (DeleteImportedKeyMaterial) and reimport the key + # material. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To import key material into a KMS key + # + # # The following example imports key material into the specified KMS key. + # + # resp = client.import_key_material({ + # encrypted_key_material: "", # The encrypted key material to import. + # expiration_model: "KEY_MATERIAL_DOES_NOT_EXPIRE", # A value that specifies whether the key material expires. + # import_token: "", # The import token that you received in the response to a previous GetParametersForImport request. + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to import the key material into. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.import_key_material({ + # key_id: "KeyIdType", # required + # import_token: "data", # required + # encrypted_key_material: "data", # required + # valid_to: Time.now, + # expiration_model: "KEY_MATERIAL_EXPIRES", # accepts KEY_MATERIAL_EXPIRES, KEY_MATERIAL_DOES_NOT_EXPIRE + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterial AWS API Documentation + # + # @overload import_key_material(params = {}) + # @param [Hash] params ({}) + def import_key_material(params = {}, options = {}) + req = build_request(:import_key_material, params) + req.send_request(options) + end + + # Gets a list of aliases in the caller's Amazon Web Services account + # and region. For more information about aliases, see CreateAlias. + # + # By default, the `ListAliases` operation returns all aliases in the + # account and region. To get only the aliases associated with a + # particular KMS key, use the `KeyId` parameter. + # + # The `ListAliases` response can include aliases that you created and + # associated with your customer managed keys, and aliases that Amazon + # Web Services created and associated with Amazon Web Services managed + # keys in your account. You can recognize Amazon Web Services aliases + # because their names have the format `aws/`, such as + # `aws/dynamodb`. + # + # The response might also include aliases that have no `TargetKeyId` + # field. These are predefined aliases that Amazon Web Services has + # created but has not yet associated with a KMS key. Aliases that Amazon + # Web Services creates in your account, including predefined aliases, do + # not count against your [KMS aliases quota][1]. + # + # **Cross-account use**\: No. `ListAliases` does not return aliases in + # other Amazon Web Services accounts. + # + # **Required permissions**\: [kms:ListAliases][2] (IAM policy) + # + # For details, see [Controlling access to aliases][3] in the *Key + # Management Service Developer Guide*. + # + # **Related operations:** + # + # * CreateAlias + # + # * DeleteAlias + # + # * UpdateAlias + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access + # + # @option params [String] :key_id + # Lists only aliases that are associated with the specified KMS key. + # Enter a KMS key in your Amazon Web Services account. + # + # This parameter is optional. If you omit it, `ListAliases` returns all + # aliases in the account and Region. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 100, inclusive. If you do not include a value, it defaults to 50. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # @return [Types::ListAliasesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListAliasesResponse#aliases #aliases} => Array<Types::AliasListEntry> + # * {Types::ListAliasesResponse#next_marker #next_marker} => String + # * {Types::ListAliasesResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list aliases + # + # # The following example lists aliases. + # + # resp = client.list_aliases({ + # }) + # + # resp.to_h outputs the following: + # { + # aliases: [ + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/aws/acm", + # alias_name: "alias/aws/acm", + # target_key_id: "da03f6f7-d279-427a-9cae-de48d07e5b66", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/aws/ebs", + # alias_name: "alias/aws/ebs", + # target_key_id: "25a217e7-7170-4b8c-8bf6-045ea5f70e5b", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/aws/rds", + # alias_name: "alias/aws/rds", + # target_key_id: "7ec3104e-c3f2-4b5c-bf42-bfc4772c6685", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/aws/redshift", + # alias_name: "alias/aws/redshift", + # target_key_id: "08f7a25a-69e2-4fb5-8f10-393db27326fa", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/aws/s3", + # alias_name: "alias/aws/s3", + # target_key_id: "d2b0f1a3-580d-4f79-b836-bc983be8cfa5", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/example1", + # alias_name: "alias/example1", + # target_key_id: "4da1e216-62d0-46c5-a7c0-5f3a3d2f8046", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/example2", + # alias_name: "alias/example2", + # target_key_id: "f32fef59-2cc2-445b-8573-2d73328acbee", + # }, + # { + # alias_arn: "arn:aws:kms:us-east-2:111122223333:alias/example3", + # alias_name: "alias/example3", + # target_key_id: "1374ef38-d34e-4d5f-b2c9-4e0daee38855", + # }, + # ], # A list of aliases, including the key ID of the KMS key that each alias refers to. + # truncated: false, # A boolean that indicates whether there are more items in the list. Returns true when there are more items, or false when there are not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_aliases({ + # key_id: "KeyIdType", + # limit: 1, + # marker: "MarkerType", + # }) + # + # @example Response structure + # + # resp.aliases #=> Array + # resp.aliases[0].alias_name #=> String + # resp.aliases[0].alias_arn #=> String + # resp.aliases[0].target_key_id #=> String + # resp.aliases[0].creation_date #=> Time + # resp.aliases[0].last_updated_date #=> Time + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliases AWS API Documentation + # + # @overload list_aliases(params = {}) + # @param [Hash] params ({}) + def list_aliases(params = {}, options = {}) + req = build_request(:list_aliases, params) + req.send_request(options) + end + + # Gets a list of all grants for the specified KMS key. + # + # You must specify the KMS key in all requests. You can filter the grant + # list by grant ID or grantee principal. + # + # For detailed information about grants, including grant terminology, + # see [Grants in KMS][1] in the Key Management Service Developer + # Guide . For examples of working with grants in several + # programming languages, see [Programming grants][2]. + # + # The `GranteePrincipal` field in the `ListGrants` response usually + # contains the user or role designated as the grantee principal in the + # grant. However, when the grantee principal in the grant is an Amazon + # Web Services service, the `GranteePrincipal` field contains the + # [service principal][3], which might represent several different + # grantee principals. + # + # + # + # **Cross-account use**\: Yes. To perform this operation on a KMS key in + # a different Amazon Web Services account, specify the key ARN in the + # value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:ListGrants][4] (key policy) + # + # **Related operations:** + # + # * CreateGrant + # + # * ListRetirableGrants + # + # * RetireGrant + # + # * RevokeGrant + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 100, inclusive. If you do not include a value, it defaults to 50. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # @option params [required, String] :key_id + # Returns only grants for the specified KMS key. This parameter is + # required. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key in + # a different Amazon Web Services account, you must use the key ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [String] :grant_id + # Returns only the grant with the specified grant ID. The grant ID + # uniquely identifies the grant. + # + # @option params [String] :grantee_principal + # Returns only grants where the specified principal is the grantee + # principal for the grant. + # + # @return [Types::ListGrantsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListGrantsResponse#grants #grants} => Array<Types::GrantListEntry> + # * {Types::ListGrantsResponse#next_marker #next_marker} => String + # * {Types::ListGrantsResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list grants for a KMS key + # + # # The following example lists grants for the specified KMS key. + # + # resp = client.list_grants({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose grants you want to list. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # grants: [ + # { + # creation_date: Time.parse("2016-10-25T14:37:41-07:00"), + # grant_id: "91ad875e49b04a9d1f3bdeb84d821f9db6ea95e1098813f6d47f0c65fbe2a172", + # grantee_principal: "acm.us-east-2.amazonaws.com", + # issuing_account: "arn:aws:iam::111122223333:root", + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # operations: [ + # "Encrypt", + # "ReEncryptFrom", + # "ReEncryptTo", + # ], + # retiring_principal: "acm.us-east-2.amazonaws.com", + # }, + # { + # creation_date: Time.parse("2016-10-25T14:37:41-07:00"), + # grant_id: "a5d67d3e207a8fc1f4928749ee3e52eb0440493a8b9cf05bbfad91655b056200", + # grantee_principal: "acm.us-east-2.amazonaws.com", + # issuing_account: "arn:aws:iam::111122223333:root", + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # operations: [ + # "ReEncryptFrom", + # "ReEncryptTo", + # ], + # retiring_principal: "acm.us-east-2.amazonaws.com", + # }, + # { + # creation_date: Time.parse("2016-10-25T14:37:41-07:00"), + # grant_id: "c541aaf05d90cb78846a73b346fc43e65be28b7163129488c738e0c9e0628f4f", + # grantee_principal: "acm.us-east-2.amazonaws.com", + # issuing_account: "arn:aws:iam::111122223333:root", + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # operations: [ + # "Encrypt", + # "ReEncryptFrom", + # "ReEncryptTo", + # ], + # retiring_principal: "acm.us-east-2.amazonaws.com", + # }, + # { + # creation_date: Time.parse("2016-10-25T14:37:41-07:00"), + # grant_id: "dd2052c67b4c76ee45caf1dc6a1e2d24e8dc744a51b36ae2f067dc540ce0105c", + # grantee_principal: "acm.us-east-2.amazonaws.com", + # issuing_account: "arn:aws:iam::111122223333:root", + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # operations: [ + # "Encrypt", + # "ReEncryptFrom", + # "ReEncryptTo", + # ], + # retiring_principal: "acm.us-east-2.amazonaws.com", + # }, + # ], # A list of grants. + # truncated: true, # A boolean that indicates whether there are more items in the list. Returns true when there are more items, or false when there are not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_grants({ + # limit: 1, + # marker: "MarkerType", + # key_id: "KeyIdType", # required + # grant_id: "GrantIdType", + # grantee_principal: "PrincipalIdType", + # }) + # + # @example Response structure + # + # resp.grants #=> Array + # resp.grants[0].key_id #=> String + # resp.grants[0].grant_id #=> String + # resp.grants[0].name #=> String + # resp.grants[0].creation_date #=> Time + # resp.grants[0].grantee_principal #=> String + # resp.grants[0].retiring_principal #=> String + # resp.grants[0].issuing_account #=> String + # resp.grants[0].operations #=> Array + # resp.grants[0].operations[0] #=> String, one of "Decrypt", "Encrypt", "GenerateDataKey", "GenerateDataKeyWithoutPlaintext", "ReEncryptFrom", "ReEncryptTo", "Sign", "Verify", "GetPublicKey", "CreateGrant", "RetireGrant", "DescribeKey", "GenerateDataKeyPair", "GenerateDataKeyPairWithoutPlaintext", "GenerateMac", "VerifyMac" + # resp.grants[0].constraints.encryption_context_subset #=> Hash + # resp.grants[0].constraints.encryption_context_subset["EncryptionContextKey"] #=> String + # resp.grants[0].constraints.encryption_context_equals #=> Hash + # resp.grants[0].constraints.encryption_context_equals["EncryptionContextKey"] #=> String + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants AWS API Documentation + # + # @overload list_grants(params = {}) + # @param [Hash] params ({}) + def list_grants(params = {}, options = {}) + req = build_request(:list_grants, params) + req.send_request(options) + end + + # Gets the names of the key policies that are attached to a KMS key. + # This operation is designed to get policy names that you can use in a + # GetKeyPolicy operation. However, the only valid policy name is + # `default`. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:ListKeyPolicies][1] (key policy) + # + # **Related operations:** + # + # * GetKeyPolicy + # + # * PutKeyPolicy + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Gets the names of key policies for the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 1000, inclusive. If you do not include a value, it defaults to + # 100. + # + # Only one policy can be attached to a key. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # @return [Types::ListKeyPoliciesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListKeyPoliciesResponse#policy_names #policy_names} => Array<String> + # * {Types::ListKeyPoliciesResponse#next_marker #next_marker} => String + # * {Types::ListKeyPoliciesResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list key policies for a KMS key + # + # # The following example lists key policies for the specified KMS key. + # + # resp = client.list_key_policies({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose key policies you want to list. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # policy_names: [ + # "default", + # ], # A list of key policy names. + # truncated: false, # A boolean that indicates whether there are more items in the list. Returns true when there are more items, or false when there are not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_key_policies({ + # key_id: "KeyIdType", # required + # limit: 1, + # marker: "MarkerType", + # }) + # + # @example Response structure + # + # resp.policy_names #=> Array + # resp.policy_names[0] #=> String + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies AWS API Documentation + # + # @overload list_key_policies(params = {}) + # @param [Hash] params ({}) + def list_key_policies(params = {}, options = {}) + req = build_request(:list_key_policies, params) + req.send_request(options) + end + + # Gets a list of all KMS keys in the caller's Amazon Web Services + # account and Region. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:ListKeys][1] (IAM policy) + # + # **Related operations:** + # + # * CreateKey + # + # * DescribeKey + # + # * ListAliases + # + # * ListResourceTags + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 1000, inclusive. If you do not include a value, it defaults to + # 100. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # @return [Types::ListKeysResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListKeysResponse#keys #keys} => Array<Types::KeyListEntry> + # * {Types::ListKeysResponse#next_marker #next_marker} => String + # * {Types::ListKeysResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list KMS keys + # + # # The following example lists KMS keys. + # + # resp = client.list_keys({ + # }) + # + # resp.to_h outputs the following: + # { + # keys: [ + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/0d990263-018e-4e65-a703-eff731de951e", + # key_id: "0d990263-018e-4e65-a703-eff731de951e", + # }, + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/144be297-0ae1-44ac-9c8f-93cd8c82f841", + # key_id: "144be297-0ae1-44ac-9c8f-93cd8c82f841", + # }, + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/21184251-b765-428e-b852-2c7353e72571", + # key_id: "21184251-b765-428e-b852-2c7353e72571", + # }, + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/214fe92f-5b03-4ae1-b350-db2a45dbe10c", + # key_id: "214fe92f-5b03-4ae1-b350-db2a45dbe10c", + # }, + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/339963f2-e523-49d3-af24-a0fe752aa458", + # key_id: "339963f2-e523-49d3-af24-a0fe752aa458", + # }, + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/b776a44b-df37-4438-9be4-a27494e4271a", + # key_id: "b776a44b-df37-4438-9be4-a27494e4271a", + # }, + # { + # key_arn: "arn:aws:kms:us-east-2:111122223333:key/deaf6c9e-cf2c-46a6-bf6d-0b6d487cffbb", + # key_id: "deaf6c9e-cf2c-46a6-bf6d-0b6d487cffbb", + # }, + # ], # A list of KMS keys, including the key ID and Amazon Resource Name (ARN) of each one. + # truncated: false, # A boolean that indicates whether there are more items in the list. Returns true when there are more items, or false when there are not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_keys({ + # limit: 1, + # marker: "MarkerType", + # }) + # + # @example Response structure + # + # resp.keys #=> Array + # resp.keys[0].key_id #=> String + # resp.keys[0].key_arn #=> String + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys AWS API Documentation + # + # @overload list_keys(params = {}) + # @param [Hash] params ({}) + def list_keys(params = {}, options = {}) + req = build_request(:list_keys, params) + req.send_request(options) + end + + # Returns all tags on the specified KMS key. + # + # For general information about tags, including the format and syntax, + # see [Tagging Amazon Web Services resources][1] in the *Amazon Web + # Services General Reference*. For information about using tags in KMS, + # see [Tagging keys][2]. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:ListResourceTags][3] (key policy) + # + # **Related operations:** + # + # * CreateKey + # + # * ReplicateKey + # + # * TagResource + # + # * UntagResource + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Gets tags on the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 50, inclusive. If you do not include a value, it defaults to 50. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # Do not attempt to construct this value. Use only the value of + # `NextMarker` from the truncated response you just received. + # + # @return [Types::ListResourceTagsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListResourceTagsResponse#tags #tags} => Array<Types::Tag> + # * {Types::ListResourceTagsResponse#next_marker #next_marker} => String + # * {Types::ListResourceTagsResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list tags for a KMS key + # + # # The following example lists tags for a KMS key. + # + # resp = client.list_resource_tags({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose tags you are listing. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # tags: [ + # { + # tag_key: "CostCenter", + # tag_value: "87654", + # }, + # { + # tag_key: "CreatedBy", + # tag_value: "ExampleUser", + # }, + # { + # tag_key: "Purpose", + # tag_value: "Test", + # }, + # ], # A list of tags. + # truncated: false, # A boolean that indicates whether there are more items in the list. Returns true when there are more items, or false when there are not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_resource_tags({ + # key_id: "KeyIdType", # required + # limit: 1, + # marker: "MarkerType", + # }) + # + # @example Response structure + # + # resp.tags #=> Array + # resp.tags[0].tag_key #=> String + # resp.tags[0].tag_value #=> String + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags AWS API Documentation + # + # @overload list_resource_tags(params = {}) + # @param [Hash] params ({}) + def list_resource_tags(params = {}, options = {}) + req = build_request(:list_resource_tags, params) + req.send_request(options) + end + + # Returns information about all grants in the Amazon Web Services + # account and Region that have the specified retiring principal. + # + # You can specify any principal in your Amazon Web Services account. The + # grants that are returned include grants for KMS keys in your Amazon + # Web Services account and other Amazon Web Services accounts. You might + # use this operation to determine which grants you may retire. To retire + # a grant, use the RetireGrant operation. + # + # For detailed information about grants, including grant terminology, + # see [Grants in KMS][1] in the Key Management Service Developer + # Guide . For examples of working with grants in several + # programming languages, see [Programming grants][2]. + # + # **Cross-account use**\: You must specify a principal in your Amazon + # Web Services account. However, this operation can return grants in any + # Amazon Web Services account. You do not need `kms:ListRetirableGrants` + # permission (or any other additional permission) in any Amazon Web + # Services account other than your own. + # + # **Required permissions**\: [kms:ListRetirableGrants][3] (IAM policy) + # in your Amazon Web Services account. + # + # **Related operations:** + # + # * CreateGrant + # + # * ListGrants + # + # * RetireGrant + # + # * RevokeGrant + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [Integer] :limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 100, inclusive. If you do not include a value, it defaults to 50. + # + # @option params [String] :marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # @option params [required, String] :retiring_principal + # The retiring principal for which to list grants. Enter a principal in + # your Amazon Web Services account. + # + # To specify the retiring principal, use the [Amazon Resource Name + # (ARN)][1] of an Amazon Web Services principal. Valid principals + # include Amazon Web Services accounts, IAM users, IAM roles, federated + # users, and assumed role users. For help with the ARN syntax for a + # principal, see [IAM ARNs][2] in the Identity and Access + # Management User Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + # + # @return [Types::ListGrantsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListGrantsResponse#grants #grants} => Array<Types::GrantListEntry> + # * {Types::ListGrantsResponse#next_marker #next_marker} => String + # * {Types::ListGrantsResponse#truncated #truncated} => Boolean + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list grants that the specified principal can retire + # + # # The following example lists the grants that the specified principal (identity) can retire. + # + # resp = client.list_retirable_grants({ + # retiring_principal: "arn:aws:iam::111122223333:role/ExampleRole", # The retiring principal whose grants you want to list. Use the Amazon Resource Name (ARN) of a principal such as an AWS account (root), IAM user, federated user, or assumed role user. + # }) + # + # resp.to_h outputs the following: + # { + # grants: [ + # { + # creation_date: Time.parse("2016-12-07T11:09:35-08:00"), + # grant_id: "0c237476b39f8bc44e45212e08498fbe3151305030726c0590dd8d3e9f3d6a60", + # grantee_principal: "arn:aws:iam::111122223333:role/ExampleRole", + # issuing_account: "arn:aws:iam::444455556666:root", + # key_id: "arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab", + # operations: [ + # "Decrypt", + # "Encrypt", + # ], + # retiring_principal: "arn:aws:iam::111122223333:role/ExampleRole", + # }, + # ], # A list of grants that the specified principal can retire. + # truncated: false, # A boolean that indicates whether there are more items in the list. Returns true when there are more items, or false when there are not. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_retirable_grants({ + # limit: 1, + # marker: "MarkerType", + # retiring_principal: "PrincipalIdType", # required + # }) + # + # @example Response structure + # + # resp.grants #=> Array + # resp.grants[0].key_id #=> String + # resp.grants[0].grant_id #=> String + # resp.grants[0].name #=> String + # resp.grants[0].creation_date #=> Time + # resp.grants[0].grantee_principal #=> String + # resp.grants[0].retiring_principal #=> String + # resp.grants[0].issuing_account #=> String + # resp.grants[0].operations #=> Array + # resp.grants[0].operations[0] #=> String, one of "Decrypt", "Encrypt", "GenerateDataKey", "GenerateDataKeyWithoutPlaintext", "ReEncryptFrom", "ReEncryptTo", "Sign", "Verify", "GetPublicKey", "CreateGrant", "RetireGrant", "DescribeKey", "GenerateDataKeyPair", "GenerateDataKeyPairWithoutPlaintext", "GenerateMac", "VerifyMac" + # resp.grants[0].constraints.encryption_context_subset #=> Hash + # resp.grants[0].constraints.encryption_context_subset["EncryptionContextKey"] #=> String + # resp.grants[0].constraints.encryption_context_equals #=> Hash + # resp.grants[0].constraints.encryption_context_equals["EncryptionContextKey"] #=> String + # resp.next_marker #=> String + # resp.truncated #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrants AWS API Documentation + # + # @overload list_retirable_grants(params = {}) + # @param [Hash] params ({}) + def list_retirable_grants(params = {}, options = {}) + req = build_request(:list_retirable_grants, params) + req.send_request(options) + end + + # Attaches a key policy to the specified KMS key. + # + # For more information about key policies, see [Key Policies][1] in the + # *Key Management Service Developer Guide*. For help writing and + # formatting a JSON policy document, see the [IAM JSON Policy + # Reference][2] in the Identity and Access Management User + # Guide . For examples of adding a key policy in multiple + # programming languages, see [Setting a key policy][3] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:PutKeyPolicy][4] (key policy) + # + # **Related operations**\: GetKeyPolicy + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Sets the key policy on the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :policy_name + # The name of the key policy. The only valid value is `default`. + # + # @option params [required, String] :policy + # The key policy to attach to the KMS key. + # + # The key policy must meet the following criteria: + # + # * The key policy must allow the calling principal to make a subsequent + # `PutKeyPolicy` request on the KMS key. This reduces the risk that + # the KMS key becomes unmanageable. For more information, see [Default + # key policy][1] in the *Key Management Service Developer Guide*. (To + # omit this condition, set `BypassPolicyLockoutSafetyCheck` to true.) + # + # * Each statement in the key policy must contain one or more + # principals. The principals in the key policy must exist and be + # visible to KMS. When you create a new Amazon Web Services principal, + # you might need to enforce a delay before including the new principal + # in a key policy because the new principal might not be immediately + # visible to KMS. For more information, see [Changes that I make are + # not always immediately visible][2] in the *Amazon Web Services + # Identity and Access Management User Guide*. + # + # A key policy document can include only the following characters: + # + # * Printable ASCII characters from the space character (`\u0020`) + # through the end of the ASCII character range. + # + # * Printable characters in the Basic Latin and Latin-1 Supplement + # character set (through `\u00FF`). + # + # * The tab (`\u0009`), line feed (`\u000A`), and carriage return + # (`\u000D`) special characters + # + # For information about key policies, see [Key policies in KMS][3] in + # the *Key Management Service Developer Guide*.For help writing and + # formatting a JSON policy document, see the [IAM JSON Policy + # Reference][4] in the Identity and Access Management User + # Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # + # @option params [Boolean] :bypass_policy_lockout_safety_check + # Skips ("bypasses") the key policy lockout safety check. The default + # value is false. + # + # Setting this value to true increases the risk that the KMS key becomes + # unmanageable. Do not set this value to true indiscriminately. + # + # For more information, see [Default key policy][1] in the *Key + # Management Service Developer Guide*. + # + # Use this parameter only when you intend to prevent the principal that + # is making the request from making a subsequent PutKeyPolicy request on + # the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To attach a key policy to a KMS key + # + # # The following example attaches a key policy to the specified KMS key. + # + # resp = client.put_key_policy({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to attach the key policy to. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # policy: "{\"Version\":\"2012-10-17\",\"Id\":\"custom-policy-2016-12-07\",\"Statement\":[{\"Sid\":\"EnableIAMUserPermissions\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:root\"},\"Action\":\"kms:*\",\"Resource\":\"*\"},{\"Sid\":\"AllowaccessforKeyAdministrators\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam::111122223333:user/ExampleAdminUser\",\"arn:aws:iam::111122223333:role/ExampleAdminRole\"]},\"Action\":[\"kms:Create*\",\"kms:Describe*\",\"kms:Enable*\",\"kms:List*\",\"kms:Put*\",\"kms:Update*\",\"kms:Revoke*\",\"kms:Disable*\",\"kms:Get*\",\"kms:Delete*\",\"kms:ScheduleKeyDeletion\",\"kms:CancelKeyDeletion\"],\"Resource\":\"*\"},{\"Sid\":\"Allowuseofthekey\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:role/ExamplePowerUserRole\"},\"Action\":[\"kms:Encrypt\",\"kms:Decrypt\",\"kms:ReEncrypt*\",\"kms:GenerateDataKey*\",\"kms:DescribeKey\"],\"Resource\":\"*\"},{\"Sid\":\"Allowattachmentofpersistentresources\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:role/ExamplePowerUserRole\"},\"Action\":[\"kms:CreateGrant\",\"kms:ListGrants\",\"kms:RevokeGrant\"],\"Resource\":\"*\",\"Condition\":{\"Bool\":{\"kms:GrantIsForAWSResource\":\"true\"}}}]}", # The key policy document. + # policy_name: "default", # The name of the key policy. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_key_policy({ + # key_id: "KeyIdType", # required + # policy_name: "PolicyNameType", # required + # policy: "PolicyType", # required + # bypass_policy_lockout_safety_check: false, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy AWS API Documentation + # + # @overload put_key_policy(params = {}) + # @param [Hash] params ({}) + def put_key_policy(params = {}, options = {}) + req = build_request(:put_key_policy, params) + req.send_request(options) + end + + # Decrypts ciphertext and then reencrypts it entirely within KMS. You + # can use this operation to change the KMS key under which data is + # encrypted, such as when you [manually rotate][1] a KMS key or change + # the KMS key that protects a ciphertext. You can also use it to + # reencrypt ciphertext under the same KMS key, such as to change the + # [encryption context][2] of a ciphertext. + # + # The `ReEncrypt` operation can decrypt ciphertext that was encrypted by + # using a KMS key in an KMS operation, such as Encrypt or + # GenerateDataKey. It can also decrypt ciphertext that was encrypted by + # using the public key of an [asymmetric KMS key][3] outside of KMS. + # However, it cannot decrypt ciphertext produced by other libraries, + # such as the [Amazon Web Services Encryption SDK][4] or [Amazon S3 + # client-side encryption][5]. These libraries return a ciphertext format + # that is incompatible with KMS. + # + # When you use the `ReEncrypt` operation, you need to provide + # information for the decrypt operation and the subsequent encrypt + # operation. + # + # * If your ciphertext was encrypted under an asymmetric KMS key, you + # must use the `SourceKeyId` parameter to identify the KMS key that + # encrypted the ciphertext. You must also supply the encryption + # algorithm that was used. This information is required to decrypt the + # data. + # + # * If your ciphertext was encrypted under a symmetric encryption KMS + # key, the `SourceKeyId` parameter is optional. KMS can get this + # information from metadata that it adds to the symmetric ciphertext + # blob. This feature adds durability to your implementation by + # ensuring that authorized users can decrypt ciphertext decades after + # it was encrypted, even if they've lost track of the key ID. + # However, specifying the source KMS key is always recommended as a + # best practice. When you use the `SourceKeyId` parameter to specify a + # KMS key, KMS uses only the KMS key you specify. If the ciphertext + # was encrypted under a different KMS key, the `ReEncrypt` operation + # fails. This practice ensures that you use the KMS key that you + # intend. + # + # * To reencrypt the data, you must use the `DestinationKeyId` parameter + # to specify the KMS key that re-encrypts the data after it is + # decrypted. If the destination KMS key is an asymmetric KMS key, you + # must also provide the encryption algorithm. The algorithm that you + # choose must be compatible with the KMS key. + # + # When you use an asymmetric KMS key to encrypt or reencrypt data, be + # sure to record the KMS key and encryption algorithm that you choose. + # You will be required to provide the same KMS key and encryption + # algorithm when you decrypt the data. If the KMS key and algorithm do + # not match the values used to encrypt the data, the decrypt operation + # fails. + # + # You are not required to supply the key ID and encryption algorithm + # when you decrypt with symmetric encryption KMS keys because KMS + # stores this information in the ciphertext blob. KMS cannot store + # metadata in ciphertext generated with asymmetric keys. The standard + # format for asymmetric key ciphertext does not include configurable + # fields. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][6] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. The source KMS key and destination KMS + # key can be in different Amazon Web Services accounts. Either or both + # KMS keys can be in a different account than the caller. To specify a + # KMS key in a different account, you must use its key ARN or alias ARN. + # + # **Required permissions**\: + # + # * [kms:ReEncryptFrom][7] permission on the source KMS key (key policy) + # + # * [kms:ReEncryptTo][7] permission on the destination KMS key (key + # policy) + # + # To permit reencryption from or to a KMS key, include the + # `"kms:ReEncrypt*"` permission in your [key policy][8]. This permission + # is automatically included in the key policy when you use the console + # to create a KMS key. But you must include it manually when you create + # a KMS key programmatically or when you use the PutKeyPolicy operation + # to set a key policy. + # + # **Related operations:** + # + # * Decrypt + # + # * Encrypt + # + # * GenerateDataKey + # + # * GenerateDataKeyPair + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks + # [4]: https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/ + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # + # @option params [required, String, StringIO, File] :ciphertext_blob + # Ciphertext of the data to reencrypt. + # + # @option params [Hash] :source_encryption_context + # Specifies the encryption context to use to decrypt the ciphertext. + # Enter the same encryption context that was used to encrypt the + # ciphertext. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [String] :source_key_id + # Specifies the KMS key that KMS will use to decrypt the ciphertext + # before it is re-encrypted. + # + # Enter a key ID of the KMS key that was used to encrypt the ciphertext. + # If you identify a different KMS key, the `ReEncrypt` operation throws + # an `IncorrectKeyException`. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. If you used a symmetric encryption KMS + # key, KMS can get the KMS key from metadata that it adds to the + # symmetric ciphertext blob. However, it is always recommended as a best + # practice. This practice ensures that you use the KMS key that you + # intend. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [required, String] :destination_key_id + # A unique identifier for the KMS key that is used to reencrypt the + # data. Specify a symmetric encryption KMS key or an asymmetric KMS key + # with a `KeyUsage` value of `ENCRYPT_DECRYPT`. To find the `KeyUsage` + # value of a KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [Hash] :destination_encryption_context + # Specifies that encryption context to use when the reencrypting the + # data. + # + # A destination encryption context is valid only when the destination + # KMS key is a symmetric encryption KMS key. The standard ciphertext + # format for asymmetric KMS keys does not include fields for metadata. + # + # An *encryption context* is a collection of non-secret key-value pairs + # that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. An + # encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS keys, + # an encryption context is optional, but it is strongly recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # + # @option params [String] :source_encryption_algorithm + # Specifies the encryption algorithm that KMS will use to decrypt the + # ciphertext before it is reencrypted. The default value, + # `SYMMETRIC_DEFAULT`, represents the algorithm used for symmetric + # encryption KMS keys. + # + # Specify the same algorithm that was used to encrypt the ciphertext. If + # you specify a different algorithm, the decrypt attempt fails. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. + # + # @option params [String] :destination_encryption_algorithm + # Specifies the encryption algorithm that KMS will use to reecrypt the + # data after it has decrypted it. The default value, + # `SYMMETRIC_DEFAULT`, represents the encryption algorithm used for + # symmetric encryption KMS keys. + # + # This parameter is required only when the destination KMS key is an + # asymmetric KMS key. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::ReEncryptResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ReEncryptResponse#ciphertext_blob #ciphertext_blob} => String + # * {Types::ReEncryptResponse#source_key_id #source_key_id} => String + # * {Types::ReEncryptResponse#key_id #key_id} => String + # * {Types::ReEncryptResponse#source_encryption_algorithm #source_encryption_algorithm} => String + # * {Types::ReEncryptResponse#destination_encryption_algorithm #destination_encryption_algorithm} => String + # + # + # @example Example: To reencrypt data + # + # # The following example reencrypts data with the specified KMS key. + # + # resp = client.re_encrypt({ + # ciphertext_blob: "", # The data to reencrypt. + # destination_key_id: "0987dcba-09fe-87dc-65ba-ab0987654321", # The identifier of the KMS key to use to reencrypt the data. You can use any valid key identifier. + # }) + # + # resp.to_h outputs the following: + # { + # ciphertext_blob: "", # The reencrypted data. + # key_id: "arn:aws:kms:us-east-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", # The ARN of the KMS key that was used to reencrypt the data. + # source_key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key that was originally used to encrypt the data. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.re_encrypt({ + # ciphertext_blob: "data", # required + # source_encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # source_key_id: "KeyIdType", + # destination_key_id: "KeyIdType", # required + # destination_encryption_context: { + # "EncryptionContextKey" => "EncryptionContextValue", + # }, + # source_encryption_algorithm: "SYMMETRIC_DEFAULT", # accepts SYMMETRIC_DEFAULT, RSAES_OAEP_SHA_1, RSAES_OAEP_SHA_256, SM2PKE + # destination_encryption_algorithm: "SYMMETRIC_DEFAULT", # accepts SYMMETRIC_DEFAULT, RSAES_OAEP_SHA_1, RSAES_OAEP_SHA_256, SM2PKE + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.ciphertext_blob #=> String + # resp.source_key_id #=> String + # resp.key_id #=> String + # resp.source_encryption_algorithm #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # resp.destination_encryption_algorithm #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt AWS API Documentation + # + # @overload re_encrypt(params = {}) + # @param [Hash] params ({}) + def re_encrypt(params = {}, options = {}) + req = build_request(:re_encrypt, params) + req.send_request(options) + end + + # Replicates a multi-Region key into the specified Region. This + # operation creates a multi-Region replica key based on a multi-Region + # primary key in a different Region of the same Amazon Web Services + # partition. You can create multiple replicas of a primary key, but each + # must be in a different Region. To create a multi-Region primary key, + # use the CreateKey operation. + # + # This operation supports *multi-Region keys*, an KMS feature that lets + # you create multiple interoperable KMS keys in different Amazon Web + # Services Regions. Because these KMS keys have the same key ID, key + # material, and other metadata, you can use them interchangeably to + # encrypt data in one Amazon Web Services Region and decrypt it in a + # different Amazon Web Services Region without re-encrypting the data or + # making a cross-Region call. For more information about multi-Region + # keys, see [Multi-Region keys in KMS][1] in the *Key Management Service + # Developer Guide*. + # + # A *replica key* is a fully-functional KMS key that can be used + # independently of its primary and peer replica keys. A primary key and + # its replica keys share properties that make them interoperable. They + # have the same [key ID][2] and key material. They also have the same + # [key spec][3], [key usage][4], [key material origin][5], and + # [automatic key rotation status][6]. KMS automatically synchronizes + # these shared properties among related multi-Region keys. All other + # properties of a replica key can differ, including its [key policy][7], + # [tags][8], [aliases][9], and [Key states of KMS keys][10]. KMS pricing + # and quotas for KMS keys apply to each primary key and replica key. + # + # When this operation completes, the new replica key has a transient key + # state of `Creating`. This key state changes to `Enabled` (or + # `PendingImport`) after a few seconds when the process of creating the + # new replica key is complete. While the key state is `Creating`, you + # can manage key, but you cannot yet use it in cryptographic operations. + # If you are creating and using the replica key programmatically, retry + # on `KMSInvalidStateException` or call `DescribeKey` to check its + # `KeyState` value before using it. For details about the `Creating` key + # state, see [Key states of KMS keys][10] in the *Key Management Service + # Developer Guide*. + # + # You cannot create more than one replica of a primary key in any + # Region. If the Region already includes a replica of the key you're + # trying to replicate, `ReplicateKey` returns an + # `AlreadyExistsException` error. If the key state of the existing + # replica is `PendingDeletion`, you can cancel the scheduled key + # deletion (CancelKeyDeletion) or wait for the key to be deleted. The + # new replica key you create will have the same [shared properties][11] + # as the original replica key. + # + # The CloudTrail log of a `ReplicateKey` operation records a + # `ReplicateKey` operation in the primary key's Region and a CreateKey + # operation in the replica key's Region. + # + # If you replicate a multi-Region primary key with imported key + # material, the replica key is created with no key material. You must + # import the same key material that you imported into the primary key. + # For details, see [Importing key material into multi-Region + # keys](kms/latest/developerguide/multi-region-keys-import.html) in the + # *Key Management Service Developer Guide*. + # + # To convert a replica key to a primary key, use the UpdatePrimaryRegion + # operation. + # + # `ReplicateKey` uses different default values for the `KeyPolicy` and + # `Tags` parameters than those used in the KMS console. For details, see + # the parameter descriptions. + # + # + # + # **Cross-account use**\: No. You cannot use this operation to create a + # replica key in a different Amazon Web Services account. + # + # **Required permissions**\: + # + # * `kms:ReplicateKey` on the primary key (in the primary key's + # Region). Include this permission in the primary key's key policy. + # + # * `kms:CreateKey` in an IAM policy in the replica Region. + # + # * To use the `Tags` parameter, `kms:TagResource` in an IAM policy in + # the replica Region. + # + # **Related operations** + # + # * CreateKey + # + # * UpdatePrimaryRegion + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties + # + # @option params [required, String] :key_id + # Identifies the multi-Region primary key that is being replicated. To + # determine whether a KMS key is a multi-Region primary key, use the + # DescribeKey operation to check the value of the `MultiRegionKeyType` + # property. + # + # Specify the key ID or key ARN of a multi-Region primary key. + # + # For example: + # + # * Key ID: `mrk-1234abcd12ab34cd56ef1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :replica_region + # The Region ID of the Amazon Web Services Region for this replica key. + # + # Enter the Region ID, such as `us-east-1` or `ap-southeast-2`. For a + # list of Amazon Web Services Regions in which KMS is supported, see + # [KMS service endpoints][1] in the *Amazon Web Services General + # Reference*. + # + # HMAC KMS keys are not supported in all Amazon Web Services Regions. If + # you try to replicate an HMAC KMS key in an Amazon Web Services Region + # in which HMAC keys are not supported, the `ReplicateKey` operation + # returns an `UnsupportedOperationException`. For a list of Regions in + # which HMAC KMS keys are supported, see [HMAC keys in KMS][2] in the + # *Key Management Service Developer Guide*. + # + # + # + # The replica must be in a different Amazon Web Services Region than its + # primary key and other replicas of that primary key, but in the same + # Amazon Web Services partition. KMS must be available in the replica + # Region. If the Region is not enabled by default, the Amazon Web + # Services account must be enabled in the Region. For information about + # Amazon Web Services partitions, see [Amazon Resource Names (ARNs)][3] + # in the *Amazon Web Services General Reference*. For information about + # enabling and disabling Regions, see [Enabling a Region][4] and + # [Disabling a Region][5] in the *Amazon Web Services General + # Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [4]: https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable + # [5]: https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable + # + # @option params [String] :policy + # The key policy to attach to the KMS key. This parameter is optional. + # If you do not provide a key policy, KMS attaches the [default key + # policy][1] to the KMS key. + # + # The key policy is not a shared property of multi-Region keys. You can + # specify the same key policy or a different key policy for each key in + # a set of related multi-Region keys. KMS does not synchronize this + # property. + # + # If you provide a key policy, it must meet the following criteria: + # + # * The key policy must allow the calling principal to make a subsequent + # `PutKeyPolicy` request on the KMS key. This reduces the risk that + # the KMS key becomes unmanageable. For more information, see [Default + # key policy][2] in the *Key Management Service Developer Guide*. (To + # omit this condition, set `BypassPolicyLockoutSafetyCheck` to true.) + # + # * Each statement in the key policy must contain one or more + # principals. The principals in the key policy must exist and be + # visible to KMS. When you create a new Amazon Web Services principal, + # you might need to enforce a delay before including the new principal + # in a key policy because the new principal might not be immediately + # visible to KMS. For more information, see [Changes that I make are + # not always immediately visible][3] in the *Amazon Web Services + # Identity and Access Management User Guide*. + # + # A key policy document can include only the following characters: + # + # * Printable ASCII characters from the space character (`\u0020`) + # through the end of the ASCII character range. + # + # * Printable characters in the Basic Latin and Latin-1 Supplement + # character set (through `\u00FF`). + # + # * The tab (`\u0009`), line feed (`\u000A`), and carriage return + # (`\u000D`) special characters + # + # For information about key policies, see [Key policies in KMS][4] in + # the *Key Management Service Developer Guide*. For help writing and + # formatting a JSON policy document, see the [IAM JSON Policy + # Reference][5] in the Identity and Access Management User + # Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # [5]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # + # @option params [Boolean] :bypass_policy_lockout_safety_check + # Skips ("bypasses") the key policy lockout safety check. The default + # value is false. + # + # Setting this value to true increases the risk that the KMS key becomes + # unmanageable. Do not set this value to true indiscriminately. + # + # For more information, see [Default key policy][1] in the *Key + # Management Service Developer Guide*. + # + # Use this parameter only when you intend to prevent the principal that + # is making the request from making a subsequent PutKeyPolicy request on + # the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # + # @option params [String] :description + # A description of the KMS key. The default value is an empty string (no + # description). + # + # The description is not a shared property of multi-Region keys. You can + # specify the same description or a different description for each key + # in a set of related multi-Region keys. KMS does not synchronize this + # property. + # + # @option params [Array] :tags + # Assigns one or more tags to the replica key. Use this parameter to tag + # the KMS key when it is created. To tag an existing KMS key, use the + # TagResource operation. + # + # Tagging or untagging a KMS key can allow or deny permission to the KMS + # key. For details, see [ABAC for KMS][1] in the *Key Management Service + # Developer Guide*. + # + # + # + # To use this parameter, you must have [kms:TagResource][2] permission + # in an IAM policy. + # + # Tags are not a shared property of multi-Region keys. You can specify + # the same tags or different tags for each key in a set of related + # multi-Region keys. KMS does not synchronize this property. + # + # Each tag consists of a tag key and a tag value. Both the tag key and + # the tag value are required, but the tag value can be an empty (null) + # string. You cannot have more than one tag on a KMS key with the same + # tag key. If you specify an existing tag key with a different tag + # value, KMS replaces the current tag value with the specified one. + # + # When you add tags to an Amazon Web Services resource, Amazon Web + # Services generates a cost allocation report with usage and costs + # aggregated by tags. Tags can also be used to control access to a KMS + # key. For details, see [Tagging Keys][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # + # @return [Types::ReplicateKeyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ReplicateKeyResponse#replica_key_metadata #replica_key_metadata} => Types::KeyMetadata + # * {Types::ReplicateKeyResponse#replica_policy #replica_policy} => String + # * {Types::ReplicateKeyResponse#replica_tags #replica_tags} => Array<Types::Tag> + # + # + # @example Example: To replicate a multi-Region key in a different AWS Region + # + # # This example creates a multi-Region replica key in us-west-2 of a multi-Region primary key in us-east-1. + # + # resp = client.replicate_key({ + # key_id: "arn:aws:kms:us-east-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", # The key ID or key ARN of the multi-Region primary key + # replica_region: "us-west-2", # The Region of the new replica. + # }) + # + # resp.to_h outputs the following: + # { + # replica_key_metadata: { + # aws_account_id: "111122223333", + # arn: "arn:aws:kms:us-west-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # creation_date: Time.parse(1607472987.918), + # customer_master_key_spec: "SYMMETRIC_DEFAULT", + # description: "", + # enabled: true, + # encryption_algorithms: [ + # "SYMMETRIC_DEFAULT", + # ], + # key_id: "mrk-1234abcd12ab34cd56ef1234567890ab", + # key_manager: "CUSTOMER", + # key_state: "Enabled", + # key_usage: "ENCRYPT_DECRYPT", + # multi_region: true, + # multi_region_configuration: { + # multi_region_key_type: "REPLICA", + # primary_key: { + # arn: "arn:aws:kms:us-east-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # region: "us-east-1", + # }, + # replica_keys: [ + # { + # arn: "arn:aws:kms:us-west-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", + # region: "us-west-2", + # }, + # ], + # }, + # origin: "AWS_KMS", + # }, # An object that displays detailed information about the replica key. + # replica_policy: "{\n \"Version\" : \"2012-10-17\",\n \"Id\" : \"key-default-1\",...}", # The key policy of the replica key. If you don't specify a key policy, the replica key gets the default key policy for a KMS key. + # replica_tags: [ + # ], # The tags on the replica key, if any. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.replicate_key({ + # key_id: "KeyIdType", # required + # replica_region: "RegionType", # required + # policy: "PolicyType", + # bypass_policy_lockout_safety_check: false, + # description: "DescriptionType", + # tags: [ + # { + # tag_key: "TagKeyType", # required + # tag_value: "TagValueType", # required + # }, + # ], + # }) + # + # @example Response structure + # + # resp.replica_key_metadata.aws_account_id #=> String + # resp.replica_key_metadata.key_id #=> String + # resp.replica_key_metadata.arn #=> String + # resp.replica_key_metadata.creation_date #=> Time + # resp.replica_key_metadata.enabled #=> Boolean + # resp.replica_key_metadata.description #=> String + # resp.replica_key_metadata.key_usage #=> String, one of "SIGN_VERIFY", "ENCRYPT_DECRYPT", "GENERATE_VERIFY_MAC" + # resp.replica_key_metadata.key_state #=> String, one of "Creating", "Enabled", "Disabled", "PendingDeletion", "PendingImport", "PendingReplicaDeletion", "Unavailable", "Updating" + # resp.replica_key_metadata.deletion_date #=> Time + # resp.replica_key_metadata.valid_to #=> Time + # resp.replica_key_metadata.origin #=> String, one of "AWS_KMS", "EXTERNAL", "AWS_CLOUDHSM", "EXTERNAL_KEY_STORE" + # resp.replica_key_metadata.custom_key_store_id #=> String + # resp.replica_key_metadata.cloud_hsm_cluster_id #=> String + # resp.replica_key_metadata.expiration_model #=> String, one of "KEY_MATERIAL_EXPIRES", "KEY_MATERIAL_DOES_NOT_EXPIRE" + # resp.replica_key_metadata.key_manager #=> String, one of "AWS", "CUSTOMER" + # resp.replica_key_metadata.customer_master_key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.replica_key_metadata.key_spec #=> String, one of "RSA_2048", "RSA_3072", "RSA_4096", "ECC_NIST_P256", "ECC_NIST_P384", "ECC_NIST_P521", "ECC_SECG_P256K1", "SYMMETRIC_DEFAULT", "HMAC_224", "HMAC_256", "HMAC_384", "HMAC_512", "SM2" + # resp.replica_key_metadata.encryption_algorithms #=> Array + # resp.replica_key_metadata.encryption_algorithms[0] #=> String, one of "SYMMETRIC_DEFAULT", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", "SM2PKE" + # resp.replica_key_metadata.signing_algorithms #=> Array + # resp.replica_key_metadata.signing_algorithms[0] #=> String, one of "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", "RSASSA_PSS_SHA_512", "RSASSA_PKCS1_V1_5_SHA_256", "RSASSA_PKCS1_V1_5_SHA_384", "RSASSA_PKCS1_V1_5_SHA_512", "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", "SM2DSA" + # resp.replica_key_metadata.multi_region #=> Boolean + # resp.replica_key_metadata.multi_region_configuration.multi_region_key_type #=> String, one of "PRIMARY", "REPLICA" + # resp.replica_key_metadata.multi_region_configuration.primary_key.arn #=> String + # resp.replica_key_metadata.multi_region_configuration.primary_key.region #=> String + # resp.replica_key_metadata.multi_region_configuration.replica_keys #=> Array + # resp.replica_key_metadata.multi_region_configuration.replica_keys[0].arn #=> String + # resp.replica_key_metadata.multi_region_configuration.replica_keys[0].region #=> String + # resp.replica_key_metadata.pending_deletion_window_in_days #=> Integer + # resp.replica_key_metadata.mac_algorithms #=> Array + # resp.replica_key_metadata.mac_algorithms[0] #=> String, one of "HMAC_SHA_224", "HMAC_SHA_256", "HMAC_SHA_384", "HMAC_SHA_512" + # resp.replica_key_metadata.xks_key_configuration.id #=> String + # resp.replica_policy #=> String + # resp.replica_tags #=> Array + # resp.replica_tags[0].tag_key #=> String + # resp.replica_tags[0].tag_value #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKey AWS API Documentation + # + # @overload replicate_key(params = {}) + # @param [Hash] params ({}) + def replicate_key(params = {}, options = {}) + req = build_request(:replicate_key, params) + req.send_request(options) + end + + # Deletes a grant. Typically, you retire a grant when you no longer need + # its permissions. To identify the grant to retire, use a [grant + # token][1], or both the grant ID and a key identifier (key ID or key + # ARN) of the KMS key. The CreateGrant operation returns both values. + # + # This operation can be called by the *retiring principal* for a grant, + # by the *grantee principal* if the grant allows the `RetireGrant` + # operation, and by the Amazon Web Services account in which the grant + # is created. It can also be called by principals to whom permission for + # retiring a grant is delegated. For details, see [Retiring and revoking + # grants][2] in the *Key Management Service Developer Guide*. + # + # For detailed information about grants, including grant terminology, + # see [Grants in KMS][3] in the Key Management Service Developer + # Guide . For examples of working with grants in several + # programming languages, see [Programming grants][4]. + # + # **Cross-account use**\: Yes. You can retire a grant on a KMS key in a + # different Amazon Web Services account. + # + # **Required permissions:**\:Permission to retire a grant is determined + # primarily by the grant. For details, see [Retiring and revoking + # grants][2] in the *Key Management Service Developer Guide*. + # + # **Related operations:** + # + # * CreateGrant + # + # * ListGrants + # + # * ListRetirableGrants + # + # * RevokeGrant + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html + # + # @option params [String] :grant_token + # Identifies the grant to be retired. You can use a grant token to + # identify a new grant even before it has achieved eventual consistency. + # + # Only the CreateGrant operation returns a grant token. For details, see + # [Grant token][1] and [Eventual consistency][2] in the *Key Management + # Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency + # + # @option params [String] :key_id + # The key ARN KMS key associated with the grant. To find the key ARN, + # use the ListKeys operation. + # + # For example: + # `arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # @option params [String] :grant_id + # Identifies the grant to retire. To get the grant ID, use CreateGrant, + # ListGrants, or ListRetirableGrants. + # + # * Grant ID Example - + # 0123456789012345678901234567890123456789012345678901234567890123 + # + # ^ + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To retire a grant + # + # # The following example retires a grant. + # + # resp = client.retire_grant({ + # grant_id: "0c237476b39f8bc44e45212e08498fbe3151305030726c0590dd8d3e9f3d6a60", # The identifier of the grant to retire. + # key_id: "arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The Amazon Resource Name (ARN) of the KMS key associated with the grant. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.retire_grant({ + # grant_token: "GrantTokenType", + # key_id: "KeyIdType", + # grant_id: "GrantIdType", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant AWS API Documentation + # + # @overload retire_grant(params = {}) + # @param [Hash] params ({}) + def retire_grant(params = {}, options = {}) + req = build_request(:retire_grant, params) + req.send_request(options) + end + + # Deletes the specified grant. You revoke a grant to terminate the + # permissions that the grant allows. For more information, see [Retiring + # and revoking grants][1] in the Key Management Service Developer + # Guide . + # + # When you create, retire, or revoke a grant, there might be a brief + # delay, usually less than five minutes, until the grant is available + # throughout KMS. This state is known as *eventual consistency*. For + # details, see [Eventual consistency][2] in the Key Management + # Service Developer Guide . + # + # For detailed information about grants, including grant terminology, + # see [Grants in KMS][3] in the Key Management Service Developer + # Guide . For examples of working with grants in several + # programming languages, see [Programming grants][4]. + # + # **Cross-account use**\: Yes. To perform this operation on a KMS key in + # a different Amazon Web Services account, specify the key ARN in the + # value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:RevokeGrant][5] (key policy). + # + # **Related operations:** + # + # * CreateGrant + # + # * ListGrants + # + # * ListRetirableGrants + # + # * RetireGrant + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # A unique identifier for the KMS key associated with the grant. To get + # the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key in + # a different Amazon Web Services account, you must use the key ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :grant_id + # Identifies the grant to revoke. To get the grant ID, use CreateGrant, + # ListGrants, or ListRetirableGrants. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To revoke a grant + # + # # The following example revokes a grant. + # + # resp = client.revoke_grant({ + # grant_id: "0c237476b39f8bc44e45212e08498fbe3151305030726c0590dd8d3e9f3d6a60", # The identifier of the grant to revoke. + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key associated with the grant. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.revoke_grant({ + # key_id: "KeyIdType", # required + # grant_id: "GrantIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant AWS API Documentation + # + # @overload revoke_grant(params = {}) + # @param [Hash] params ({}) + def revoke_grant(params = {}, options = {}) + req = build_request(:revoke_grant, params) + req.send_request(options) + end + + # Schedules the deletion of a KMS key. By default, KMS applies a waiting + # period of 30 days, but you can specify a waiting period of 7-30 days. + # When this operation is successful, the key state of the KMS key + # changes to `PendingDeletion` and the key can't be used in any + # cryptographic operations. It remains in this state for the duration of + # the waiting period. Before the waiting period ends, you can use + # CancelKeyDeletion to cancel the deletion of the KMS key. After the + # waiting period ends, KMS deletes the KMS key, its key material, and + # all KMS data associated with it, including all aliases that refer to + # it. + # + # Deleting a KMS key is a destructive and potentially dangerous + # operation. When a KMS key is deleted, all data that was encrypted + # under the KMS key is unrecoverable. (The only exception is a + # multi-Region replica key.) To prevent the use of a KMS key without + # deleting it, use DisableKey. + # + # You can schedule the deletion of a multi-Region primary key and its + # replica keys at any time. However, KMS will not delete a multi-Region + # primary key with existing replica keys. If you schedule the deletion + # of a primary key with replicas, its key state changes to + # `PendingReplicaDeletion` and it cannot be replicated or used in + # cryptographic operations. This status can continue indefinitely. When + # the last of its replicas keys is deleted (not just scheduled), the key + # state of the primary key changes to `PendingDeletion` and its waiting + # period (`PendingWindowInDays`) begins. For details, see [Deleting + # multi-Region keys][1] in the *Key Management Service Developer Guide*. + # + # When KMS [deletes a KMS key from an CloudHSM key store][2], it makes a + # best effort to delete the associated key material from the associated + # CloudHSM cluster. However, you might need to manually [delete the + # orphaned key material][3] from the cluster and its backups. [Deleting + # a KMS key from an external key store][4] has no effect on the + # associated external key. However, for both types of custom key stores, + # deleting a KMS key is destructive and irreversible. You cannot decrypt + # ciphertext encrypted under the KMS key by using only its associated + # external key or CloudHSM key. Also, you cannot recreate a KMS key in + # an external key store by creating a new KMS key with the same key + # material. + # + # For more information about scheduling a KMS key for deletion, see + # [Deleting KMS keys][5] in the *Key Management Service Developer + # Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][6] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: kms:ScheduleKeyDeletion (key policy) + # + # **Related operations** + # + # * CancelKeyDeletion + # + # * DisableKey + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # + # @option params [required, String] :key_id + # The unique identifier of the KMS key to delete. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [Integer] :pending_window_in_days + # The waiting period, specified in number of days. After the waiting + # period ends, KMS deletes the KMS key. + # + # If the KMS key is a multi-Region primary key with replica keys, the + # waiting period begins when the last of its replica keys is deleted. + # Otherwise, the waiting period begins immediately. + # + # This value is optional. If you include a value, it must be between 7 + # and 30, inclusive. If you do not include a value, it defaults to 30. + # + # @return [Types::ScheduleKeyDeletionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ScheduleKeyDeletionResponse#key_id #key_id} => String + # * {Types::ScheduleKeyDeletionResponse#deletion_date #deletion_date} => Time + # * {Types::ScheduleKeyDeletionResponse#key_state #key_state} => String + # * {Types::ScheduleKeyDeletionResponse#pending_window_in_days #pending_window_in_days} => Integer + # + # + # @example Example: To schedule a KMS key for deletion + # + # # The following example schedules the specified KMS key for deletion. + # + # resp = client.schedule_key_deletion({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key to schedule for deletion. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # pending_window_in_days: 7, # The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key. + # }) + # + # resp.to_h outputs the following: + # { + # deletion_date: Time.parse("2016-12-17T16:00:00-08:00"), # The date and time after which KMS deletes the KMS key. + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The ARN of the KMS key that is scheduled for deletion. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.schedule_key_deletion({ + # key_id: "KeyIdType", # required + # pending_window_in_days: 1, + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.deletion_date #=> Time + # resp.key_state #=> String, one of "Creating", "Enabled", "Disabled", "PendingDeletion", "PendingImport", "PendingReplicaDeletion", "Unavailable", "Updating" + # resp.pending_window_in_days #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion AWS API Documentation + # + # @overload schedule_key_deletion(params = {}) + # @param [Hash] params ({}) + def schedule_key_deletion(params = {}, options = {}) + req = build_request(:schedule_key_deletion, params) + req.send_request(options) + end + + # Creates a [digital signature][1] for a message or message digest by + # using the private key in an asymmetric signing KMS key. To verify the + # signature, use the Verify operation, or use the public key in the same + # asymmetric KMS key outside of KMS. For information about asymmetric + # KMS keys, see [Asymmetric KMS keys][2] in the *Key Management Service + # Developer Guide*. + # + # Digital signatures are generated and verified by using asymmetric key + # pair, such as an RSA or ECC pair that is represented by an asymmetric + # KMS key. The key owner (or an authorized user) uses their private key + # to sign a message. Anyone with the public key can verify that the + # message was signed with that particular private key and that the + # message hasn't changed since it was signed. + # + # To use the `Sign` operation, provide the following information: + # + # * Use the `KeyId` parameter to identify an asymmetric KMS key with a + # `KeyUsage` value of `SIGN_VERIFY`. To get the `KeyUsage` value of a + # KMS key, use the DescribeKey operation. The caller must have + # `kms:Sign` permission on the KMS key. + # + # * Use the `Message` parameter to specify the message or message digest + # to sign. You can submit messages of up to 4096 bytes. To sign a + # larger message, generate a hash digest of the message, and then + # provide the hash digest in the `Message` parameter. To indicate + # whether the message is a full message or a digest, use the + # `MessageType` parameter. + # + # * Choose a signing algorithm that is compatible with the KMS key. + # + # When signing a message, be sure to record the KMS key and the signing + # algorithm. This information is required to verify the signature. + # + # Best practices recommend that you limit the time during which any + # signature is effective. This deters an attack where the actor uses a + # signed message to establish validity repeatedly or long after the + # message is superseded. Signatures do not include a timestamp, but you + # can include a timestamp in the signed message to help you detect when + # its time to refresh the signature. + # + # + # + # To verify the signature that this operation generates, use the Verify + # operation. Or use the GetPublicKey operation to download the public + # key and then use the public key to verify the signature outside of + # KMS. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][3] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:Sign][4] (key policy) + # + # **Related operations**\: Verify + # + # + # + # [1]: https://en.wikipedia.org/wiki/Digital_signature + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies an asymmetric KMS key. KMS uses the private key in the + # asymmetric KMS key to sign the message. The `KeyUsage` type of the KMS + # key must be `SIGN_VERIFY`. To find the `KeyUsage` of a KMS key, use + # the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [required, String, StringIO, File] :message + # Specifies the message or message digest to sign. Messages can be + # 0-4096 bytes. To sign a larger message, provide a message digest. + # + # If you provide a message digest, use the `DIGEST` value of + # `MessageType` to prevent the digest from being hashed again while + # signing. + # + # @option params [String] :message_type + # Tells KMS whether the value of the `Message` parameter should be + # hashed as part of the signing algorithm. Use `RAW` for unhashed + # messages; use `DIGEST` for message digests, which are already hashed. + # + # When the value of `MessageType` is `RAW`, KMS uses the standard + # signing algorithm, which begins with a hash function. When the value + # is `DIGEST`, KMS skips the hashing step in the signing algorithm. + # + # Use the `DIGEST` value only when the value of the `Message` parameter + # is a message digest. If you use the `DIGEST` value with an unhashed + # message, the security of the signing operation can be compromised. + # + # When the value of `MessageType`is `DIGEST`, the length of the + # `Message` value must match the length of hashed messages for the + # specified signing algorithm. + # + # You can submit a message digest and omit the `MessageType` or specify + # `RAW` so the digest is hashed again while signing. However, this can + # cause verification failures when verifying with a system that assumes + # a single hash. + # + # The hashing algorithm in that `Sign` uses is based on the + # `SigningAlgorithm` value. + # + # * Signing algorithms that end in SHA\_256 use the SHA\_256 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_384 use the SHA\_384 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_512 use the SHA\_512 hashing + # algorithm. + # + # * SM2DSA uses the SM3 hashing algorithm. For details, see [Offline + # verification with SM2 key pairs][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @option params [required, String] :signing_algorithm + # Specifies the signing algorithm to use when signing the message. + # + # Choose an algorithm that is compatible with the type and size of the + # specified asymmetric KMS key. When signing with RSA key pairs, + # RSASSA-PSS algorithms are preferred. We include RSASSA-PKCS1-v1\_5 + # algorithms for compatibility with existing applications. + # + # @return [Types::SignResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::SignResponse#key_id #key_id} => String + # * {Types::SignResponse#signature #signature} => String + # * {Types::SignResponse#signing_algorithm #signing_algorithm} => String + # + # + # @example Example: To digitally sign a message with an asymmetric KMS key. + # + # # This operation uses the private key in an asymmetric elliptic curve (ECC) KMS key to generate a digital signature for a + # # given message. + # + # resp = client.sign({ + # key_id: "alias/ECC_signing_key", # The asymmetric KMS key to be used to generate the digital signature. This example uses an alias of the KMS key. + # message: "", # Message to be signed. Use Base-64 for the CLI. + # message_type: "RAW", # Indicates whether the message is RAW or a DIGEST. + # signing_algorithm: "ECDSA_SHA_384", # The requested signing algorithm. This must be an algorithm that the KMS key supports. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ARN of the asymmetric KMS key that was used to sign the message. + # signature: "", # The digital signature of the message. + # signing_algorithm: "ECDSA_SHA_384", # The actual signing algorithm that was used to generate the signature. + # } + # + # @example Example: To digitally sign a message digest with an asymmetric KMS key. + # + # # This operation uses the private key in an asymmetric RSA signing KMS key to generate a digital signature for a message + # # digest. In this example, a large message was hashed and the resulting digest is provided in the Message parameter. To + # # tell KMS not to hash the message again, the MessageType field is set to DIGEST + # + # resp = client.sign({ + # key_id: "alias/RSA_signing_key", # The asymmetric KMS key to be used to generate the digital signature. This example uses an alias of the KMS key. + # message: "", # Message to be signed. Use Base-64 for the CLI. + # message_type: "DIGEST", # Indicates whether the message is RAW or a DIGEST. When it is RAW, KMS hashes the message before signing. When it is DIGEST, KMS skips the hashing step and signs the Message value. + # signing_algorithm: "RSASSA_PKCS1_V1_5_SHA_256", # The requested signing algorithm. This must be an algorithm that the KMS key supports. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-east-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", # The key ARN of the asymmetric KMS key that was used to sign the message. + # signature: "", # The digital signature of the message. + # signing_algorithm: "RSASSA_PKCS1_V1_5_SHA_256", # The actual signing algorithm that was used to generate the signature. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.sign({ + # key_id: "KeyIdType", # required + # message: "data", # required + # message_type: "RAW", # accepts RAW, DIGEST + # grant_tokens: ["GrantTokenType"], + # signing_algorithm: "RSASSA_PSS_SHA_256", # required, accepts RSASSA_PSS_SHA_256, RSASSA_PSS_SHA_384, RSASSA_PSS_SHA_512, RSASSA_PKCS1_V1_5_SHA_256, RSASSA_PKCS1_V1_5_SHA_384, RSASSA_PKCS1_V1_5_SHA_512, ECDSA_SHA_256, ECDSA_SHA_384, ECDSA_SHA_512, SM2DSA + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.signature #=> String + # resp.signing_algorithm #=> String, one of "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", "RSASSA_PSS_SHA_512", "RSASSA_PKCS1_V1_5_SHA_256", "RSASSA_PKCS1_V1_5_SHA_384", "RSASSA_PKCS1_V1_5_SHA_512", "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", "SM2DSA" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign AWS API Documentation + # + # @overload sign(params = {}) + # @param [Hash] params ({}) + def sign(params = {}, options = {}) + req = build_request(:sign, params) + req.send_request(options) + end + + # Adds or edits tags on a [customer managed key][1]. + # + # Tagging or untagging a KMS key can allow or deny permission to the KMS + # key. For details, see [ABAC for KMS][2] in the *Key Management Service + # Developer Guide*. + # + # + # + # Each tag consists of a tag key and a tag value, both of which are + # case-sensitive strings. The tag value can be an empty (null) string. + # To add a tag, specify a new tag key and a tag value. To edit a tag, + # specify an existing tag key and a new tag value. + # + # You can use this operation to tag a [customer managed key][1], but you + # cannot tag an [Amazon Web Services managed key][3], an [Amazon Web + # Services owned key][4], a [custom key store][5], or an [alias][6]. + # + # You can also add tags to a KMS key while creating it (CreateKey) or + # replicating it (ReplicateKey). + # + # For information about using tags in KMS, see [Tagging keys][7]. For + # general information about tags, including the format and syntax, see + # [Tagging Amazon Web Services resources][8] in the *Amazon Web Services + # General Reference*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][9] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:TagResource][10] (key policy) + # + # **Related operations** + # + # * CreateKey + # + # * ListResourceTags + # + # * ReplicateKey + # + # * UntagResource + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # [8]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies a customer managed key in the account and Region. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, Array] :tags + # One or more tags. + # + # Each tag consists of a tag key and a tag value. The tag value can be + # an empty (null) string. + # + # You cannot have more than one tag on a KMS key with the same tag key. + # If you specify an existing tag key with a different tag value, KMS + # replaces the current tag value with the specified one. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To tag a KMS key + # + # # The following example tags a KMS key. + # + # resp = client.tag_resource({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key you are tagging. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # tags: [ + # { + # tag_key: "Purpose", + # tag_value: "Test", + # }, + # ], # A list of tags. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.tag_resource({ + # key_id: "KeyIdType", # required + # tags: [ # required + # { + # tag_key: "TagKeyType", # required + # tag_value: "TagValueType", # required + # }, + # ], + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource AWS API Documentation + # + # @overload tag_resource(params = {}) + # @param [Hash] params ({}) + def tag_resource(params = {}, options = {}) + req = build_request(:tag_resource, params) + req.send_request(options) + end + + # Deletes tags from a [customer managed key][1]. To delete a tag, + # specify the tag key and the KMS key. + # + # Tagging or untagging a KMS key can allow or deny permission to the KMS + # key. For details, see [ABAC for KMS][2] in the *Key Management Service + # Developer Guide*. + # + # + # + # When it succeeds, the `UntagResource` operation doesn't return any + # output. Also, if the specified tag key isn't found on the KMS key, it + # doesn't throw an exception or return a response. To confirm that the + # operation worked, use the ListResourceTags operation. + # + # For information about using tags in KMS, see [Tagging keys][3]. For + # general information about tags, including the format and syntax, see + # [Tagging Amazon Web Services resources][4] in the *Amazon Web Services + # General Reference*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][5] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:UntagResource][6] (key policy) + # + # **Related operations** + # + # * CreateKey + # + # * ListResourceTags + # + # * ReplicateKey + # + # * TagResource + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # [4]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the KMS key from which you are removing tags. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, Array] :tag_keys + # One or more tag keys. Specify only the tag keys, not the tag values. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To remove tags from a KMS key + # + # # The following example removes tags from a KMS key. + # + # resp = client.untag_resource({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose tags you are removing. + # tag_keys: [ + # "Purpose", + # "CostCenter", + # ], # A list of tag keys. Provide only the tag keys, not the tag values. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.untag_resource({ + # key_id: "KeyIdType", # required + # tag_keys: ["TagKeyType"], # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource AWS API Documentation + # + # @overload untag_resource(params = {}) + # @param [Hash] params ({}) + def untag_resource(params = {}, options = {}) + req = build_request(:untag_resource, params) + req.send_request(options) + end + + # Associates an existing KMS alias with a different KMS key. Each alias + # is associated with only one KMS key at a time, although a KMS key can + # have multiple aliases. The alias and the KMS key must be in the same + # Amazon Web Services account and Region. + # + # Adding, deleting, or updating an alias can allow or deny permission to + # the KMS key. For details, see [ABAC for KMS][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # The current and new KMS key must be the same type (both symmetric or + # both asymmetric or both HMAC), and they must have the same key usage. + # This restriction prevents errors in code that uses aliases. If you + # must assign an alias to a different type of KMS key, use DeleteAlias + # to delete the old alias and CreateAlias to create a new alias. + # + # You cannot use `UpdateAlias` to change an alias name. To change an + # alias name, use DeleteAlias to delete the old alias and CreateAlias to + # create a new alias. + # + # Because an alias is not a property of a KMS key, you can create, + # update, and delete the aliases of a KMS key without affecting the KMS + # key. Also, aliases do not appear in the response from the DescribeKey + # operation. To get the aliases of all KMS keys in the account, use the + # ListAliases operation. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][2] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions** + # + # * [kms:UpdateAlias][3] on the alias (IAM policy). + # + # * [kms:UpdateAlias][3] on the current KMS key (key policy). + # + # * [kms:UpdateAlias][3] on the new KMS key (key policy). + # + # For details, see [Controlling access to aliases][4] in the *Key + # Management Service Developer Guide*. + # + # **Related operations:** + # + # * CreateAlias + # + # * DeleteAlias + # + # * ListAliases + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access + # + # @option params [required, String] :alias_name + # Identifies the alias that is changing its KMS key. This value must + # begin with `alias/` followed by the alias name, such as + # `alias/ExampleAlias`. You cannot use `UpdateAlias` to change the alias + # name. + # + # @option params [required, String] :target_key_id + # Identifies the [customer managed key][1] to associate with the alias. + # You don't have permission to associate an alias with an [Amazon Web + # Services managed key][2]. + # + # The KMS key must be in the same Amazon Web Services account and Region + # as the alias. Also, the new target KMS key must be the same type as + # the current target KMS key (both symmetric or both asymmetric or both + # HMAC) and they must have the same key usage. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # To verify that the alias is mapped to the correct KMS key, use + # ListAliases. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To update an alias + # + # # The following example updates the specified alias to refer to the specified KMS key. + # + # resp = client.update_alias({ + # alias_name: "alias/ExampleAlias", # The alias to update. + # target_key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key that the alias will refer to after this operation succeeds. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.update_alias({ + # alias_name: "AliasNameType", # required + # target_key_id: "KeyIdType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias AWS API Documentation + # + # @overload update_alias(params = {}) + # @param [Hash] params ({}) + def update_alias(params = {}, options = {}) + req = build_request(:update_alias, params) + req.send_request(options) + end + + # Changes the properties of a custom key store. You can use this + # operation to change the properties of an CloudHSM key store or an + # external key store. + # + # Use the required `CustomKeyStoreId` parameter to identify the custom + # key store. Use the remaining optional parameters to change its + # properties. This operation does not return any property values. To + # verify the updated property values, use the DescribeCustomKeyStores + # operation. + # + # This operation is part of the [custom key stores][1] feature in KMS, + # which combines the convenience and extensive integration of KMS with + # the isolation and control of a key store that you own and manage. + # + # When updating the properties of an external key store, verify that the + # updated settings connect your key store, via the external key store + # proxy, to the same external key manager as the previous settings, or + # to a backup or snapshot of the external key manager with the same + # cryptographic keys. If the updated connection settings fail, you can + # fix them and retry, although an extended delay might disrupt Amazon + # Web Services services. However, if KMS permanently loses its access to + # cryptographic keys, ciphertext encrypted under those keys is + # unrecoverable. + # + # For external key stores: + # + # Some external key managers provide a simpler method for updating an + # external key store. For details, see your external key manager + # documentation. + # + # When updating an external key store in the KMS console, you can upload + # a JSON-based proxy configuration file with the desired values. You + # cannot upload the proxy configuration file to the + # `UpdateCustomKeyStore` operation. However, you can use the file to + # help you determine the correct values for the `UpdateCustomKeyStore` + # parameters. + # + # + # + # For an CloudHSM key store, you can use this operation to change the + # custom key store friendly name (`NewCustomKeyStoreName`), to tell KMS + # about a change to the `kmsuser` crypto user password + # (`KeyStorePassword`), or to associate the custom key store with a + # different, but related, CloudHSM cluster (`CloudHsmClusterId`). To + # update any property of an CloudHSM key store, the `ConnectionState` of + # the CloudHSM key store must be `DISCONNECTED`. + # + # For an external key store, you can use this operation to change the + # custom key store friendly name (`NewCustomKeyStoreName`), or to tell + # KMS about a change to the external key store proxy authentication + # credentials (`XksProxyAuthenticationCredential`), connection method + # (`XksProxyConnectivity`), external proxy endpoint + # (`XksProxyUriEndpoint`) and path (`XksProxyUriPath`). For external key + # stores with an `XksProxyConnectivity` of `VPC_ENDPOINT_SERVICE`, you + # can also update the Amazon VPC endpoint service name + # (`XksProxyVpcEndpointServiceName`). To update most properties of an + # external key store, the `ConnectionState` of the external key store + # must be `DISCONNECTED`. However, you can update the + # `CustomKeyStoreName`, `XksProxyAuthenticationCredential`, and + # `XksProxyUriPath` of an external key store when it is in the CONNECTED + # or DISCONNECTED state. + # + # If your update requires a `DISCONNECTED` state, before using + # `UpdateCustomKeyStore`, use the DisconnectCustomKeyStore operation to + # disconnect the custom key store. After the `UpdateCustomKeyStore` + # operation completes, use the ConnectCustomKeyStore to reconnect the + # custom key store. To find the `ConnectionState` of the custom key + # store, use the DescribeCustomKeyStores operation. + # + # + # + # Before updating the custom key store, verify that the new values allow + # KMS to connect the custom key store to its backing key store. For + # example, before you change the `XksProxyUriPath` value, verify that + # the external key store proxy is reachable at the new path. + # + # If the operation succeeds, it returns a JSON object with no + # properties. + # + # **Cross-account use**\: No. You cannot perform this operation on a + # custom key store in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:UpdateCustomKeyStore][2] (IAM policy) + # + # **Related operations:** + # + # * ConnectCustomKeyStore + # + # * CreateCustomKeyStore + # + # * DeleteCustomKeyStore + # + # * DescribeCustomKeyStores + # + # * DisconnectCustomKeyStore + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :custom_key_store_id + # Identifies the custom key store that you want to update. Enter the ID + # of the custom key store. To find the ID of a custom key store, use the + # DescribeCustomKeyStores operation. + # + # @option params [String] :new_custom_key_store_name + # Changes the friendly name of the custom key store to the value that + # you specify. The custom key store name must be unique in the Amazon + # Web Services account. + # + # To change this value, an CloudHSM key store must be disconnected. An + # external key store can be connected or disconnected. + # + # @option params [String] :key_store_password + # Enter the current password of the `kmsuser` crypto user (CU) in the + # CloudHSM cluster that is associated with the custom key store. This + # parameter is valid only for custom key stores with a + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # This parameter tells KMS the current password of the `kmsuser` crypto + # user (CU). It does not set or change the password of any users in the + # CloudHSM cluster. + # + # To change this value, the CloudHSM key store must be disconnected. + # + # @option params [String] :cloud_hsm_cluster_id + # Associates the custom key store with a related CloudHSM cluster. This + # parameter is valid only for custom key stores with a + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # Enter the cluster ID of the cluster that you used to create the custom + # key store or a cluster that shares a backup history and has the same + # cluster certificate as the original cluster. You cannot use this + # parameter to associate a custom key store with an unrelated cluster. + # In addition, the replacement cluster must [fulfill the + # requirements][1] for a cluster associated with a custom key store. To + # view the cluster certificate of a cluster, use the + # [DescribeClusters][2] operation. + # + # To change this value, the CloudHSM key store must be disconnected. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore + # [2]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # + # @option params [String] :xks_proxy_uri_endpoint + # Changes the URI endpoint that KMS uses to connect to your external key + # store proxy (XKS proxy). This parameter is valid only for custom key + # stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # For external key stores with an `XksProxyConnectivity` value of + # `PUBLIC_ENDPOINT`, the protocol must be HTTPS. + # + # For external key stores with an `XksProxyConnectivity` value of + # `VPC_ENDPOINT_SERVICE`, specify `https://` followed by the private DNS + # name associated with the VPC endpoint service. Each external key store + # must use a different private DNS name. + # + # The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values must + # be unique in the Amazon Web Services account and Region. + # + # To change this value, the external key store must be disconnected. + # + # @option params [String] :xks_proxy_uri_path + # Changes the base path to the proxy APIs for this external key store. + # To find this value, see the documentation for your external key + # manager and external key store proxy (XKS proxy). This parameter is + # valid only for custom key stores with a `CustomKeyStoreType` of + # `EXTERNAL_KEY_STORE`. + # + # The value must start with `/` and must end with `/kms/xks/v1`, where + # `v1` represents the version of the KMS external key store proxy API. + # You can include an optional prefix between the required elements such + # as `/example/kms/xks/v1`. + # + # The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values must + # be unique in the Amazon Web Services account and Region. + # + # You can change this value when the external key store is connected or + # disconnected. + # + # @option params [String] :xks_proxy_vpc_endpoint_service_name + # Changes the name that KMS uses to identify the Amazon VPC endpoint + # service for your external key store proxy (XKS proxy). This parameter + # is valid when the `CustomKeyStoreType` is `EXTERNAL_KEY_STORE` and the + # `XksProxyConnectivity` is `VPC_ENDPOINT_SERVICE`. + # + # To change this value, the external key store must be disconnected. + # + # @option params [Types::XksProxyAuthenticationCredentialType] :xks_proxy_authentication_credential + # Changes the credentials that KMS uses to sign requests to the external + # key store proxy (XKS proxy). This parameter is valid only for custom + # key stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # You must specify both the `AccessKeyId` and `SecretAccessKey` value in + # the authentication credential, even if you are only updating one + # value. + # + # This parameter doesn't establish or change your authentication + # credentials on the proxy. It just tells KMS the credential that you + # established with your external key store proxy. For example, if you + # rotate the credential on your external key store proxy, you can use + # this parameter to update the credential in KMS. + # + # You can change this value when the external key store is connected or + # disconnected. + # + # @option params [String] :xks_proxy_connectivity + # Changes the connectivity setting for the external key store. To + # indicate that the external key store proxy uses a Amazon VPC endpoint + # service to communicate with KMS, specify `VPC_ENDPOINT_SERVICE`. + # Otherwise, specify `PUBLIC_ENDPOINT`. + # + # If you change the `XksProxyConnectivity` to `VPC_ENDPOINT_SERVICE`, + # you must also change the `XksProxyUriEndpoint` and add an + # `XksProxyVpcEndpointServiceName` value. + # + # If you change the `XksProxyConnectivity` to `PUBLIC_ENDPOINT`, you + # must also change the `XksProxyUriEndpoint` and specify a null or empty + # string for the `XksProxyVpcEndpointServiceName` value. + # + # To change this value, the external key store must be disconnected. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To edit the friendly name of a custom key store + # + # # This example changes the friendly name of the AWS KMS custom key store to the name that you specify. This operation does + # # not return any data. To verify that the operation worked, use the DescribeCustomKeyStores operation. + # + # resp = client.update_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the custom key store that you are updating. + # new_custom_key_store_name: "DevelopmentKeys", # A new friendly name for the custom key store. + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Example: To edit the password of an AWS CloudHSM key store + # + # # This example tells AWS KMS the password for the kmsuser crypto user in the AWS CloudHSM cluster that is associated with + # # the AWS KMS custom key store. (It does not change the password in the CloudHSM cluster.) This operation does not return + # # any data. + # + # resp = client.update_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the custom key store that you are updating. + # key_store_password: "ExamplePassword", # The password for the kmsuser crypto user in the CloudHSM cluster. + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Example: To associate the custom key store with a different, but related, AWS CloudHSM cluster. + # + # # This example changes the AWS CloudHSM cluster that is associated with an AWS CloudHSM key store to a related cluster, + # # such as a different backup of the same cluster. This operation does not return any data. To verify that the operation + # # worked, use the DescribeCustomKeyStores operation. + # + # resp = client.update_custom_key_store({ + # cloud_hsm_cluster_id: "cluster-1a23b4cdefg", # The ID of the AWS CloudHSM cluster that you want to associate with the custom key store. This cluster must be related to the original CloudHSM cluster for this key store. + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the custom key store that you are updating. + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Example: To update the proxy authentication credential of an external key store + # + # # To update the proxy authentication credential for your external key store, specify both the + # # RawSecretAccessKey and the AccessKeyId, even if you are changing only one of the values. You + # # can use this feature to fix an invalid credential or to change the credential when the external key store proxy rotates + # # it. + # + # resp = client.update_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # Identifies the custom key store + # xks_proxy_authentication_credential: { + # access_key_id: "ABCDE12345670EXAMPLE", + # raw_secret_access_key: "DXjSUawnel2fr6SKC7G25CNxTyWKE5PF9XX6H/u9pSo=", + # }, # Specifies the values in the proxy authentication credential + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Example: To edit the proxy URI path of an external key store. + # + # # This example updates the proxy URI path for an external key store + # + # resp = client.update_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # The ID of the custom key store that you are updating + # xks_proxy_uri_path: "/new-path/kms/xks/v1", # The URI path to the external key store proxy APIs + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Example: To update the proxy connectivity of an external key store to VPC_ENDPOINT_SERVICE + # + # # To change the external key store proxy connectivity option from public endpoint connectivity to VPC endpoint service + # # connectivity, in addition to changing the XksProxyConnectivity value, you must change the + # # XksProxyUriEndpoint value to reflect the private DNS name associated with the VPC endpoint service. You + # # must also add an XksProxyVpcEndpointServiceName value. + # + # resp = client.update_custom_key_store({ + # custom_key_store_id: "cks-1234567890abcdef0", # Identifies the custom key store + # xks_proxy_connectivity: "VPC_ENDPOINT_SERVICE", # Specifies the connectivity option + # xks_proxy_uri_endpoint: "https://myproxy-private.xks.example.com", # Specifies the URI endpoint that AWS KMS uses when communicating with the external key store proxy + # xks_proxy_vpc_endpoint_service_name: "com.amazonaws.vpce.us-east-1.vpce-svc-example", # Specifies the name of the VPC endpoint service that the proxy uses for communication + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.update_custom_key_store({ + # custom_key_store_id: "CustomKeyStoreIdType", # required + # new_custom_key_store_name: "CustomKeyStoreNameType", + # key_store_password: "KeyStorePasswordType", + # cloud_hsm_cluster_id: "CloudHsmClusterIdType", + # xks_proxy_uri_endpoint: "XksProxyUriEndpointType", + # xks_proxy_uri_path: "XksProxyUriPathType", + # xks_proxy_vpc_endpoint_service_name: "XksProxyVpcEndpointServiceNameType", + # xks_proxy_authentication_credential: { + # access_key_id: "XksProxyAuthenticationAccessKeyIdType", # required + # raw_secret_access_key: "XksProxyAuthenticationRawSecretAccessKeyType", # required + # }, + # xks_proxy_connectivity: "PUBLIC_ENDPOINT", # accepts PUBLIC_ENDPOINT, VPC_ENDPOINT_SERVICE + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore AWS API Documentation + # + # @overload update_custom_key_store(params = {}) + # @param [Hash] params ({}) + def update_custom_key_store(params = {}, options = {}) + req = build_request(:update_custom_key_store, params) + req.send_request(options) + end + + # Updates the description of a KMS key. To see the description of a KMS + # key, use DescribeKey. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][1] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: No. You cannot perform this operation on a KMS + # key in a different Amazon Web Services account. + # + # **Required permissions**\: [kms:UpdateKeyDescription][2] (key policy) + # + # **Related operations** + # + # * CreateKey + # + # * DescribeKey + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Updates the description of the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :description + # New description for the KMS key. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To update the description of a KMS key + # + # # The following example updates the description of the specified KMS key. + # + # resp = client.update_key_description({ + # description: "Example description that indicates the intended use of this KMS key.", # The updated description. + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The identifier of the KMS key whose description you are updating. You can use the key ID or the Amazon Resource Name (ARN) of the KMS key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.update_key_description({ + # key_id: "KeyIdType", # required + # description: "DescriptionType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription AWS API Documentation + # + # @overload update_key_description(params = {}) + # @param [Hash] params ({}) + def update_key_description(params = {}, options = {}) + req = build_request(:update_key_description, params) + req.send_request(options) + end + + # Changes the primary key of a multi-Region key. + # + # This operation changes the replica key in the specified Region to a + # primary key and changes the former primary key to a replica key. For + # example, suppose you have a primary key in `us-east-1` and a replica + # key in `eu-west-2`. If you run `UpdatePrimaryRegion` with a + # `PrimaryRegion` value of `eu-west-2`, the primary key is now the key + # in `eu-west-2`, and the key in `us-east-1` becomes a replica key. For + # details, see [Updating the primary Region][1] in the *Key Management + # Service Developer Guide*. + # + # This operation supports *multi-Region keys*, an KMS feature that lets + # you create multiple interoperable KMS keys in different Amazon Web + # Services Regions. Because these KMS keys have the same key ID, key + # material, and other metadata, you can use them interchangeably to + # encrypt data in one Amazon Web Services Region and decrypt it in a + # different Amazon Web Services Region without re-encrypting the data or + # making a cross-Region call. For more information about multi-Region + # keys, see [Multi-Region keys in KMS][2] in the *Key Management Service + # Developer Guide*. + # + # The *primary key* of a multi-Region key is the source for properties + # that are always shared by primary and replica keys, including the key + # material, [key ID][3], [key spec][4], [key usage][5], [key material + # origin][6], and [automatic key rotation][7]. It's the only key that + # can be replicated. You cannot [delete the primary key][8] until all + # replica keys are deleted. + # + # The key ID and primary Region that you specify uniquely identify the + # replica key that will become the primary key. The primary Region must + # already have a replica key. This operation does not create a KMS key + # in the specified Region. To find the replica keys, use the DescribeKey + # operation on the primary key or any replica key. To create a replica + # key, use the ReplicateKey operation. + # + # You can run this operation while using the affected multi-Region keys + # in cryptographic operations. This operation should not delay, + # interrupt, or cause failures in cryptographic operations. + # + # Even after this operation completes, the process of updating the + # primary Region might still be in progress for a few more seconds. + # Operations such as `DescribeKey` might display both the old and new + # primary keys as replicas. The old and new primary keys have a + # transient key state of `Updating`. The original key state is restored + # when the update is complete. While the key state is `Updating`, you + # can use the keys in cryptographic operations, but you cannot replicate + # the new primary key or perform certain management operations, such as + # enabling or disabling these keys. For details about the `Updating` key + # state, see [Key states of KMS keys][9] in the *Key Management Service + # Developer Guide*. + # + # This operation does not return any output. To verify that primary key + # is changed, use the DescribeKey operation. + # + # **Cross-account use**\: No. You cannot use this operation in a + # different Amazon Web Services account. + # + # **Required permissions**\: + # + # * `kms:UpdatePrimaryRegion` on the current primary key (in the primary + # key's Region). Include this permission primary key's key policy. + # + # * `kms:UpdatePrimaryRegion` on the current replica key (in the replica + # key's Region). Include this permission in the replica key's key + # policy. + # + # **Related operations** + # + # * CreateKey + # + # * ReplicateKey + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html + # [8]: https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # + # @option params [required, String] :key_id + # Identifies the current primary key. When the operation completes, this + # KMS key will be a replica key. + # + # Specify the key ID or key ARN of a multi-Region primary key. + # + # For example: + # + # * Key ID: `mrk-1234abcd12ab34cd56ef1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # @option params [required, String] :primary_region + # The Amazon Web Services Region of the new primary key. Enter the + # Region ID, such as `us-east-1` or `ap-southeast-2`. There must be an + # existing replica key in this Region. + # + # When the operation completes, the multi-Region key in this Region will + # be the primary key. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To update the primary Region of a multi-Region KMS key + # + # # The following UpdatePrimaryRegion example changes the multi-Region replica key in the eu-central-1 Region to the primary + # # key. The current primary key in the us-west-1 Region becomes a replica key. + # # The KeyId parameter identifies the current primary key in the us-west-1 Region. The PrimaryRegion parameter indicates + # # the Region of the replica key that will become the new primary key. + # # This operation does not return any output. To verify that primary key is changed, use the DescribeKey operation. + # + # resp = client.update_primary_region({ + # key_id: "arn:aws:kms:us-west-1:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab", # The current primary key. + # primary_region: "eu-central-1", # The Region of the replica key that will become the primary key. + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.update_primary_region({ + # key_id: "KeyIdType", # required + # primary_region: "RegionType", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdatePrimaryRegion AWS API Documentation + # + # @overload update_primary_region(params = {}) + # @param [Hash] params ({}) + def update_primary_region(params = {}, options = {}) + req = build_request(:update_primary_region, params) + req.send_request(options) + end + + # Verifies a digital signature that was generated by the Sign operation. + # + # + # + # Verification confirms that an authorized user signed the message with + # the specified KMS key and signing algorithm, and the message hasn't + # changed since it was signed. If the signature is verified, the value + # of the `SignatureValid` field in the response is `True`. If the + # signature verification fails, the `Verify` operation fails with an + # `KMSInvalidSignatureException` exception. + # + # A digital signature is generated by using the private key in an + # asymmetric KMS key. The signature is verified by using the public key + # in the same asymmetric KMS key. For information about asymmetric KMS + # keys, see [Asymmetric KMS keys][1] in the *Key Management Service + # Developer Guide*. + # + # To use the `Verify` operation, specify the same asymmetric KMS key, + # message, and signing algorithm that were used to produce the + # signature. The message type does not need to be the same as the one + # used for signing, but it must indicate whether the value of the + # `Message` parameter should be hashed as part of the verification + # process. + # + # You can also verify the digital signature by using the public key of + # the KMS key outside of KMS. Use the GetPublicKey operation to download + # the public key in the asymmetric KMS key and then use the public key + # to verify the signature outside of KMS. The advantage of using the + # `Verify` operation is that it is performed within KMS. As a result, + # it's easy to call, the operation is performed within the FIPS + # boundary, it is logged in CloudTrail, and you can use key policy and + # IAM policy to determine who is authorized to use the KMS key to verify + # signatures. + # + # To verify a signature outside of KMS with an SM2 public key (China + # Regions only), you must specify the distinguishing ID. By default, KMS + # uses `1234567812345678` as the distinguishing ID. For more + # information, see [Offline verification with SM2 key pairs][2]. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][3] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:Verify][4] (key policy) + # + # **Related operations**\: Sign + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String] :key_id + # Identifies the asymmetric KMS key that will be used to verify the + # signature. This must be the same KMS key that was used to generate the + # signature. If you specify a different KMS key, the signature + # verification fails. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify a + # KMS key in a different Amazon Web Services account, you must use the + # key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # @option params [required, String, StringIO, File] :message + # Specifies the message that was signed. You can submit a raw message of + # up to 4096 bytes, or a hash digest of the message. If you submit a + # digest, use the `MessageType` parameter with a value of `DIGEST`. + # + # If the message specified here is different from the message that was + # signed, the signature verification fails. A message and its hash + # digest are considered to be the same message. + # + # @option params [String] :message_type + # Tells KMS whether the value of the `Message` parameter should be + # hashed as part of the signing algorithm. Use `RAW` for unhashed + # messages; use `DIGEST` for message digests, which are already hashed. + # + # When the value of `MessageType` is `RAW`, KMS uses the standard + # signing algorithm, which begins with a hash function. When the value + # is `DIGEST`, KMS skips the hashing step in the signing algorithm. + # + # Use the `DIGEST` value only when the value of the `Message` parameter + # is a message digest. If you use the `DIGEST` value with an unhashed + # message, the security of the verification operation can be + # compromised. + # + # When the value of `MessageType`is `DIGEST`, the length of the + # `Message` value must match the length of hashed messages for the + # specified signing algorithm. + # + # You can submit a message digest and omit the `MessageType` or specify + # `RAW` so the digest is hashed again while signing. However, if the + # signed message is hashed once while signing, but twice while + # verifying, verification fails, even when the message hasn't changed. + # + # The hashing algorithm in that `Verify` uses is based on the + # `SigningAlgorithm` value. + # + # * Signing algorithms that end in SHA\_256 use the SHA\_256 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_384 use the SHA\_384 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_512 use the SHA\_512 hashing + # algorithm. + # + # * SM2DSA uses the SM3 hashing algorithm. For details, see [Offline + # verification with SM2 key pairs][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + # + # @option params [required, String, StringIO, File] :signature + # The signature that the `Sign` operation generated. + # + # @option params [required, String] :signing_algorithm + # The signing algorithm that was used to sign the message. If you submit + # a different algorithm, the signature verification fails. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::VerifyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::VerifyResponse#key_id #key_id} => String + # * {Types::VerifyResponse#signature_valid #signature_valid} => Boolean + # * {Types::VerifyResponse#signing_algorithm #signing_algorithm} => String + # + # + # @example Example: To use an asymmetric KMS key to verify a digital signature + # + # # This operation uses the public key in an elliptic curve (ECC) asymmetric key to verify a digital signature within AWS + # # KMS. + # + # resp = client.verify({ + # key_id: "alias/ECC_signing_key", # The asymmetric KMS key to be used to verify the digital signature. This example uses an alias to identify the KMS key. + # message: "", # The message that was signed. + # message_type: "RAW", # Indicates whether the message is RAW or a DIGEST. + # signature: "", # The signature to be verified. + # signing_algorithm: "ECDSA_SHA_384", # The signing algorithm to be used to verify the signature. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ARN of the asymmetric KMS key that was used to verify the digital signature. + # signature_valid: true, # A value of 'true' Indicates that the signature was verified. If verification fails, the call to Verify fails. + # signing_algorithm: "ECDSA_SHA_384", # The signing algorithm that was used to verify the signature. + # } + # + # @example Example: To use an asymmetric KMS key to verify a digital signature on a message digest + # + # # This operation uses the public key in an RSA asymmetric signing key pair to verify the digital signature of a message + # # digest. Hashing a message into a digest before sending it to KMS lets you verify messages that exceed the 4096-byte + # # message size limit. To indicate that the value of Message is a digest, use the MessageType parameter + # + # resp = client.verify({ + # key_id: "arn:aws:kms:us-east-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", # The asymmetric KMS key to be used to verify the digital signature. This example uses an alias to identify the KMS key. + # message: "", # The message that was signed. + # message_type: "DIGEST", # Indicates whether the message is RAW or a DIGEST. When it is RAW, KMS hashes the message before signing. When it is DIGEST, KMS skips the hashing step and signs the Message value. + # signature: "", # The signature to be verified. + # signing_algorithm: "RSASSA_PSS_SHA_512", # The signing algorithm to be used to verify the signature. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-east-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321", # The key ARN of the asymmetric KMS key that was used to verify the digital signature. + # signature_valid: true, # A value of 'true' Indicates that the signature was verified. If verification fails, the call to Verify fails. + # signing_algorithm: "RSASSA_PSS_SHA_512", # The signing algorithm that was used to verify the signature. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.verify({ + # key_id: "KeyIdType", # required + # message: "data", # required + # message_type: "RAW", # accepts RAW, DIGEST + # signature: "data", # required + # signing_algorithm: "RSASSA_PSS_SHA_256", # required, accepts RSASSA_PSS_SHA_256, RSASSA_PSS_SHA_384, RSASSA_PSS_SHA_512, RSASSA_PKCS1_V1_5_SHA_256, RSASSA_PKCS1_V1_5_SHA_384, RSASSA_PKCS1_V1_5_SHA_512, ECDSA_SHA_256, ECDSA_SHA_384, ECDSA_SHA_512, SM2DSA + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.signature_valid #=> Boolean + # resp.signing_algorithm #=> String, one of "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", "RSASSA_PSS_SHA_512", "RSASSA_PKCS1_V1_5_SHA_256", "RSASSA_PKCS1_V1_5_SHA_384", "RSASSA_PKCS1_V1_5_SHA_512", "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", "SM2DSA" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Verify AWS API Documentation + # + # @overload verify(params = {}) + # @param [Hash] params ({}) + def verify(params = {}, options = {}) + req = build_request(:verify, params) + req.send_request(options) + end + + # Verifies the hash-based message authentication code (HMAC) for a + # specified message, HMAC KMS key, and MAC algorithm. To verify the + # HMAC, `VerifyMac` computes an HMAC using the message, HMAC KMS key, + # and MAC algorithm that you specify, and compares the computed HMAC to + # the HMAC that you specify. If the HMACs are identical, the + # verification succeeds; otherwise, it fails. Verification indicates + # that the message hasn't changed since the HMAC was calculated, and + # the specified key was used to generate and verify the HMAC. + # + # HMAC KMS keys and the HMAC algorithms that KMS uses conform to + # industry standards defined in [RFC 2104][1]. + # + # This operation is part of KMS support for HMAC KMS keys. For details, + # see [HMAC keys in KMS][2] in the *Key Management Service Developer + # Guide*. + # + # The KMS key that you use for this operation must be in a compatible + # key state. For details, see [Key states of KMS keys][3] in the *Key + # Management Service Developer Guide*. + # + # **Cross-account use**\: Yes. To perform this operation with a KMS key + # in a different Amazon Web Services account, specify the key ARN or + # alias ARN in the value of the `KeyId` parameter. + # + # **Required permissions**\: [kms:VerifyMac][4] (key policy) + # + # **Related operations**\: GenerateMac + # + # + # + # [1]: https://datatracker.ietf.org/doc/html/rfc2104 + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # + # @option params [required, String, StringIO, File] :message + # The message that will be used in the verification. Enter the same + # message that was used to generate the HMAC. + # + # GenerateMac and `VerifyMac` do not provide special handling for + # message digests. If you generated an HMAC for a hash digest of a + # message, you must verify the HMAC for the same hash digest. + # + # @option params [required, String] :key_id + # The KMS key that will be used in the verification. + # + # Enter a key ID of the KMS key that was used to generate the HMAC. If + # you identify a different KMS key, the `VerifyMac` operation fails. + # + # @option params [required, String] :mac_algorithm + # The MAC algorithm that will be used in the verification. Enter the + # same MAC algorithm that was used to compute the HMAC. This algorithm + # must be supported by the HMAC KMS key identified by the `KeyId` + # parameter. + # + # @option params [required, String, StringIO, File] :mac + # The HMAC to verify. Enter the HMAC that was generated by the + # GenerateMac operation when you specified the same message, HMAC KMS + # key, and MAC algorithm as the values specified in this request. + # + # @option params [Array] :grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. For + # more information, see [Grant token][1] and [Using a grant token][2] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # + # @return [Types::VerifyMacResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::VerifyMacResponse#key_id #key_id} => String + # * {Types::VerifyMacResponse#mac_valid #mac_valid} => Boolean + # * {Types::VerifyMacResponse#mac_algorithm #mac_algorithm} => String + # + # + # @example Example: To verify an HMAC + # + # # This example verifies an HMAC for a particular message, HMAC KMS keys, and MAC algorithm. A value of 'true' in the + # # MacValid value in the response indicates that the HMAC is valid. + # + # resp = client.verify_mac({ + # key_id: "1234abcd-12ab-34cd-56ef-1234567890ab", # The HMAC KMS key input to the HMAC algorithm. + # mac: "", # The HMAC to be verified. + # mac_algorithm: "HMAC_SHA_384", # The HMAC algorithm requested for the operation. + # message: "Hello World", # The message input to the HMAC algorithm. + # }) + # + # resp.to_h outputs the following: + # { + # key_id: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", # The key ARN of the HMAC key used in the operation. + # mac_algorithm: "HMAC_SHA_384", # The HMAC algorithm used in the operation. + # mac_valid: true, # A value of 'true' indicates that verification succeeded. If verification fails, the call to VerifyMac fails. + # } + # + # @example Request syntax with placeholder values + # + # resp = client.verify_mac({ + # message: "data", # required + # key_id: "KeyIdType", # required + # mac_algorithm: "HMAC_SHA_224", # required, accepts HMAC_SHA_224, HMAC_SHA_256, HMAC_SHA_384, HMAC_SHA_512 + # mac: "data", # required + # grant_tokens: ["GrantTokenType"], + # }) + # + # @example Response structure + # + # resp.key_id #=> String + # resp.mac_valid #=> Boolean + # resp.mac_algorithm #=> String, one of "HMAC_SHA_224", "HMAC_SHA_256", "HMAC_SHA_384", "HMAC_SHA_512" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMac AWS API Documentation + # + # @overload verify_mac(params = {}) + # @param [Hash] params ({}) + def verify_mac(params = {}, options = {}) + req = build_request(:verify_mac, params) + req.send_request(options) + end + + # @!endgroup + + # @param params ({}) + # @api private + def build_request(operation_name, params = {}) + handlers = @handlers.for(operation_name) + context = Seahorse::Client::RequestContext.new( + operation_name: operation_name, + operation: config.api.operation(operation_name), + client: self, + params: params, + config: config) + context[:gem_name] = 'aws-sdk-kms' + context[:gem_version] = '1.63.0' + Seahorse::Client::Request.new(handlers, context) + end + + # @api private + # @deprecated + def waiter_names + [] + end + + class << self + + # @api private + attr_reader :identifier + + # @api private + def errors_module + Errors + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/client_api.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/client_api.rb new file mode 100644 index 0000000..128430c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/client_api.rb @@ -0,0 +1,1723 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::KMS + # @api private + module ClientApi + + include Seahorse::Model + + AWSAccountIdType = Shapes::StringShape.new(name: 'AWSAccountIdType') + AlgorithmSpec = Shapes::StringShape.new(name: 'AlgorithmSpec') + AliasList = Shapes::ListShape.new(name: 'AliasList') + AliasListEntry = Shapes::StructureShape.new(name: 'AliasListEntry') + AliasNameType = Shapes::StringShape.new(name: 'AliasNameType') + AlreadyExistsException = Shapes::StructureShape.new(name: 'AlreadyExistsException') + ArnType = Shapes::StringShape.new(name: 'ArnType') + BooleanType = Shapes::BooleanShape.new(name: 'BooleanType') + CancelKeyDeletionRequest = Shapes::StructureShape.new(name: 'CancelKeyDeletionRequest') + CancelKeyDeletionResponse = Shapes::StructureShape.new(name: 'CancelKeyDeletionResponse') + CiphertextType = Shapes::BlobShape.new(name: 'CiphertextType') + CloudHsmClusterIdType = Shapes::StringShape.new(name: 'CloudHsmClusterIdType') + CloudHsmClusterInUseException = Shapes::StructureShape.new(name: 'CloudHsmClusterInUseException') + CloudHsmClusterInvalidConfigurationException = Shapes::StructureShape.new(name: 'CloudHsmClusterInvalidConfigurationException') + CloudHsmClusterNotActiveException = Shapes::StructureShape.new(name: 'CloudHsmClusterNotActiveException') + CloudHsmClusterNotFoundException = Shapes::StructureShape.new(name: 'CloudHsmClusterNotFoundException') + CloudHsmClusterNotRelatedException = Shapes::StructureShape.new(name: 'CloudHsmClusterNotRelatedException') + ConnectCustomKeyStoreRequest = Shapes::StructureShape.new(name: 'ConnectCustomKeyStoreRequest') + ConnectCustomKeyStoreResponse = Shapes::StructureShape.new(name: 'ConnectCustomKeyStoreResponse') + ConnectionErrorCodeType = Shapes::StringShape.new(name: 'ConnectionErrorCodeType') + ConnectionStateType = Shapes::StringShape.new(name: 'ConnectionStateType') + CreateAliasRequest = Shapes::StructureShape.new(name: 'CreateAliasRequest') + CreateCustomKeyStoreRequest = Shapes::StructureShape.new(name: 'CreateCustomKeyStoreRequest') + CreateCustomKeyStoreResponse = Shapes::StructureShape.new(name: 'CreateCustomKeyStoreResponse') + CreateGrantRequest = Shapes::StructureShape.new(name: 'CreateGrantRequest') + CreateGrantResponse = Shapes::StructureShape.new(name: 'CreateGrantResponse') + CreateKeyRequest = Shapes::StructureShape.new(name: 'CreateKeyRequest') + CreateKeyResponse = Shapes::StructureShape.new(name: 'CreateKeyResponse') + CustomKeyStoreHasCMKsException = Shapes::StructureShape.new(name: 'CustomKeyStoreHasCMKsException') + CustomKeyStoreIdType = Shapes::StringShape.new(name: 'CustomKeyStoreIdType') + CustomKeyStoreInvalidStateException = Shapes::StructureShape.new(name: 'CustomKeyStoreInvalidStateException') + CustomKeyStoreNameInUseException = Shapes::StructureShape.new(name: 'CustomKeyStoreNameInUseException') + CustomKeyStoreNameType = Shapes::StringShape.new(name: 'CustomKeyStoreNameType') + CustomKeyStoreNotFoundException = Shapes::StructureShape.new(name: 'CustomKeyStoreNotFoundException') + CustomKeyStoreType = Shapes::StringShape.new(name: 'CustomKeyStoreType') + CustomKeyStoresList = Shapes::ListShape.new(name: 'CustomKeyStoresList') + CustomKeyStoresListEntry = Shapes::StructureShape.new(name: 'CustomKeyStoresListEntry') + CustomerMasterKeySpec = Shapes::StringShape.new(name: 'CustomerMasterKeySpec') + DataKeyPairSpec = Shapes::StringShape.new(name: 'DataKeyPairSpec') + DataKeySpec = Shapes::StringShape.new(name: 'DataKeySpec') + DateType = Shapes::TimestampShape.new(name: 'DateType') + DecryptRequest = Shapes::StructureShape.new(name: 'DecryptRequest') + DecryptResponse = Shapes::StructureShape.new(name: 'DecryptResponse') + DeleteAliasRequest = Shapes::StructureShape.new(name: 'DeleteAliasRequest') + DeleteCustomKeyStoreRequest = Shapes::StructureShape.new(name: 'DeleteCustomKeyStoreRequest') + DeleteCustomKeyStoreResponse = Shapes::StructureShape.new(name: 'DeleteCustomKeyStoreResponse') + DeleteImportedKeyMaterialRequest = Shapes::StructureShape.new(name: 'DeleteImportedKeyMaterialRequest') + DependencyTimeoutException = Shapes::StructureShape.new(name: 'DependencyTimeoutException') + DescribeCustomKeyStoresRequest = Shapes::StructureShape.new(name: 'DescribeCustomKeyStoresRequest') + DescribeCustomKeyStoresResponse = Shapes::StructureShape.new(name: 'DescribeCustomKeyStoresResponse') + DescribeKeyRequest = Shapes::StructureShape.new(name: 'DescribeKeyRequest') + DescribeKeyResponse = Shapes::StructureShape.new(name: 'DescribeKeyResponse') + DescriptionType = Shapes::StringShape.new(name: 'DescriptionType') + DisableKeyRequest = Shapes::StructureShape.new(name: 'DisableKeyRequest') + DisableKeyRotationRequest = Shapes::StructureShape.new(name: 'DisableKeyRotationRequest') + DisabledException = Shapes::StructureShape.new(name: 'DisabledException') + DisconnectCustomKeyStoreRequest = Shapes::StructureShape.new(name: 'DisconnectCustomKeyStoreRequest') + DisconnectCustomKeyStoreResponse = Shapes::StructureShape.new(name: 'DisconnectCustomKeyStoreResponse') + EnableKeyRequest = Shapes::StructureShape.new(name: 'EnableKeyRequest') + EnableKeyRotationRequest = Shapes::StructureShape.new(name: 'EnableKeyRotationRequest') + EncryptRequest = Shapes::StructureShape.new(name: 'EncryptRequest') + EncryptResponse = Shapes::StructureShape.new(name: 'EncryptResponse') + EncryptionAlgorithmSpec = Shapes::StringShape.new(name: 'EncryptionAlgorithmSpec') + EncryptionAlgorithmSpecList = Shapes::ListShape.new(name: 'EncryptionAlgorithmSpecList') + EncryptionContextKey = Shapes::StringShape.new(name: 'EncryptionContextKey') + EncryptionContextType = Shapes::MapShape.new(name: 'EncryptionContextType') + EncryptionContextValue = Shapes::StringShape.new(name: 'EncryptionContextValue') + ErrorMessageType = Shapes::StringShape.new(name: 'ErrorMessageType') + ExpirationModelType = Shapes::StringShape.new(name: 'ExpirationModelType') + ExpiredImportTokenException = Shapes::StructureShape.new(name: 'ExpiredImportTokenException') + GenerateDataKeyPairRequest = Shapes::StructureShape.new(name: 'GenerateDataKeyPairRequest') + GenerateDataKeyPairResponse = Shapes::StructureShape.new(name: 'GenerateDataKeyPairResponse') + GenerateDataKeyPairWithoutPlaintextRequest = Shapes::StructureShape.new(name: 'GenerateDataKeyPairWithoutPlaintextRequest') + GenerateDataKeyPairWithoutPlaintextResponse = Shapes::StructureShape.new(name: 'GenerateDataKeyPairWithoutPlaintextResponse') + GenerateDataKeyRequest = Shapes::StructureShape.new(name: 'GenerateDataKeyRequest') + GenerateDataKeyResponse = Shapes::StructureShape.new(name: 'GenerateDataKeyResponse') + GenerateDataKeyWithoutPlaintextRequest = Shapes::StructureShape.new(name: 'GenerateDataKeyWithoutPlaintextRequest') + GenerateDataKeyWithoutPlaintextResponse = Shapes::StructureShape.new(name: 'GenerateDataKeyWithoutPlaintextResponse') + GenerateMacRequest = Shapes::StructureShape.new(name: 'GenerateMacRequest') + GenerateMacResponse = Shapes::StructureShape.new(name: 'GenerateMacResponse') + GenerateRandomRequest = Shapes::StructureShape.new(name: 'GenerateRandomRequest') + GenerateRandomResponse = Shapes::StructureShape.new(name: 'GenerateRandomResponse') + GetKeyPolicyRequest = Shapes::StructureShape.new(name: 'GetKeyPolicyRequest') + GetKeyPolicyResponse = Shapes::StructureShape.new(name: 'GetKeyPolicyResponse') + GetKeyRotationStatusRequest = Shapes::StructureShape.new(name: 'GetKeyRotationStatusRequest') + GetKeyRotationStatusResponse = Shapes::StructureShape.new(name: 'GetKeyRotationStatusResponse') + GetParametersForImportRequest = Shapes::StructureShape.new(name: 'GetParametersForImportRequest') + GetParametersForImportResponse = Shapes::StructureShape.new(name: 'GetParametersForImportResponse') + GetPublicKeyRequest = Shapes::StructureShape.new(name: 'GetPublicKeyRequest') + GetPublicKeyResponse = Shapes::StructureShape.new(name: 'GetPublicKeyResponse') + GrantConstraints = Shapes::StructureShape.new(name: 'GrantConstraints') + GrantIdType = Shapes::StringShape.new(name: 'GrantIdType') + GrantList = Shapes::ListShape.new(name: 'GrantList') + GrantListEntry = Shapes::StructureShape.new(name: 'GrantListEntry') + GrantNameType = Shapes::StringShape.new(name: 'GrantNameType') + GrantOperation = Shapes::StringShape.new(name: 'GrantOperation') + GrantOperationList = Shapes::ListShape.new(name: 'GrantOperationList') + GrantTokenList = Shapes::ListShape.new(name: 'GrantTokenList') + GrantTokenType = Shapes::StringShape.new(name: 'GrantTokenType') + ImportKeyMaterialRequest = Shapes::StructureShape.new(name: 'ImportKeyMaterialRequest') + ImportKeyMaterialResponse = Shapes::StructureShape.new(name: 'ImportKeyMaterialResponse') + IncorrectKeyException = Shapes::StructureShape.new(name: 'IncorrectKeyException') + IncorrectKeyMaterialException = Shapes::StructureShape.new(name: 'IncorrectKeyMaterialException') + IncorrectTrustAnchorException = Shapes::StructureShape.new(name: 'IncorrectTrustAnchorException') + InvalidAliasNameException = Shapes::StructureShape.new(name: 'InvalidAliasNameException') + InvalidArnException = Shapes::StructureShape.new(name: 'InvalidArnException') + InvalidCiphertextException = Shapes::StructureShape.new(name: 'InvalidCiphertextException') + InvalidGrantIdException = Shapes::StructureShape.new(name: 'InvalidGrantIdException') + InvalidGrantTokenException = Shapes::StructureShape.new(name: 'InvalidGrantTokenException') + InvalidImportTokenException = Shapes::StructureShape.new(name: 'InvalidImportTokenException') + InvalidKeyUsageException = Shapes::StructureShape.new(name: 'InvalidKeyUsageException') + InvalidMarkerException = Shapes::StructureShape.new(name: 'InvalidMarkerException') + KMSInternalException = Shapes::StructureShape.new(name: 'KMSInternalException') + KMSInvalidMacException = Shapes::StructureShape.new(name: 'KMSInvalidMacException') + KMSInvalidSignatureException = Shapes::StructureShape.new(name: 'KMSInvalidSignatureException') + KMSInvalidStateException = Shapes::StructureShape.new(name: 'KMSInvalidStateException') + KeyIdType = Shapes::StringShape.new(name: 'KeyIdType') + KeyList = Shapes::ListShape.new(name: 'KeyList') + KeyListEntry = Shapes::StructureShape.new(name: 'KeyListEntry') + KeyManagerType = Shapes::StringShape.new(name: 'KeyManagerType') + KeyMetadata = Shapes::StructureShape.new(name: 'KeyMetadata') + KeySpec = Shapes::StringShape.new(name: 'KeySpec') + KeyState = Shapes::StringShape.new(name: 'KeyState') + KeyStorePasswordType = Shapes::StringShape.new(name: 'KeyStorePasswordType') + KeyUnavailableException = Shapes::StructureShape.new(name: 'KeyUnavailableException') + KeyUsageType = Shapes::StringShape.new(name: 'KeyUsageType') + LimitExceededException = Shapes::StructureShape.new(name: 'LimitExceededException') + LimitType = Shapes::IntegerShape.new(name: 'LimitType') + ListAliasesRequest = Shapes::StructureShape.new(name: 'ListAliasesRequest') + ListAliasesResponse = Shapes::StructureShape.new(name: 'ListAliasesResponse') + ListGrantsRequest = Shapes::StructureShape.new(name: 'ListGrantsRequest') + ListGrantsResponse = Shapes::StructureShape.new(name: 'ListGrantsResponse') + ListKeyPoliciesRequest = Shapes::StructureShape.new(name: 'ListKeyPoliciesRequest') + ListKeyPoliciesResponse = Shapes::StructureShape.new(name: 'ListKeyPoliciesResponse') + ListKeysRequest = Shapes::StructureShape.new(name: 'ListKeysRequest') + ListKeysResponse = Shapes::StructureShape.new(name: 'ListKeysResponse') + ListResourceTagsRequest = Shapes::StructureShape.new(name: 'ListResourceTagsRequest') + ListResourceTagsResponse = Shapes::StructureShape.new(name: 'ListResourceTagsResponse') + ListRetirableGrantsRequest = Shapes::StructureShape.new(name: 'ListRetirableGrantsRequest') + MacAlgorithmSpec = Shapes::StringShape.new(name: 'MacAlgorithmSpec') + MacAlgorithmSpecList = Shapes::ListShape.new(name: 'MacAlgorithmSpecList') + MalformedPolicyDocumentException = Shapes::StructureShape.new(name: 'MalformedPolicyDocumentException') + MarkerType = Shapes::StringShape.new(name: 'MarkerType') + MessageType = Shapes::StringShape.new(name: 'MessageType') + MultiRegionConfiguration = Shapes::StructureShape.new(name: 'MultiRegionConfiguration') + MultiRegionKey = Shapes::StructureShape.new(name: 'MultiRegionKey') + MultiRegionKeyList = Shapes::ListShape.new(name: 'MultiRegionKeyList') + MultiRegionKeyType = Shapes::StringShape.new(name: 'MultiRegionKeyType') + NotFoundException = Shapes::StructureShape.new(name: 'NotFoundException') + NullableBooleanType = Shapes::BooleanShape.new(name: 'NullableBooleanType') + NumberOfBytesType = Shapes::IntegerShape.new(name: 'NumberOfBytesType') + OriginType = Shapes::StringShape.new(name: 'OriginType') + PendingWindowInDaysType = Shapes::IntegerShape.new(name: 'PendingWindowInDaysType') + PlaintextType = Shapes::BlobShape.new(name: 'PlaintextType') + PolicyNameList = Shapes::ListShape.new(name: 'PolicyNameList') + PolicyNameType = Shapes::StringShape.new(name: 'PolicyNameType') + PolicyType = Shapes::StringShape.new(name: 'PolicyType') + PrincipalIdType = Shapes::StringShape.new(name: 'PrincipalIdType') + PublicKeyType = Shapes::BlobShape.new(name: 'PublicKeyType') + PutKeyPolicyRequest = Shapes::StructureShape.new(name: 'PutKeyPolicyRequest') + ReEncryptRequest = Shapes::StructureShape.new(name: 'ReEncryptRequest') + ReEncryptResponse = Shapes::StructureShape.new(name: 'ReEncryptResponse') + RegionType = Shapes::StringShape.new(name: 'RegionType') + ReplicateKeyRequest = Shapes::StructureShape.new(name: 'ReplicateKeyRequest') + ReplicateKeyResponse = Shapes::StructureShape.new(name: 'ReplicateKeyResponse') + RetireGrantRequest = Shapes::StructureShape.new(name: 'RetireGrantRequest') + RevokeGrantRequest = Shapes::StructureShape.new(name: 'RevokeGrantRequest') + ScheduleKeyDeletionRequest = Shapes::StructureShape.new(name: 'ScheduleKeyDeletionRequest') + ScheduleKeyDeletionResponse = Shapes::StructureShape.new(name: 'ScheduleKeyDeletionResponse') + SignRequest = Shapes::StructureShape.new(name: 'SignRequest') + SignResponse = Shapes::StructureShape.new(name: 'SignResponse') + SigningAlgorithmSpec = Shapes::StringShape.new(name: 'SigningAlgorithmSpec') + SigningAlgorithmSpecList = Shapes::ListShape.new(name: 'SigningAlgorithmSpecList') + Tag = Shapes::StructureShape.new(name: 'Tag') + TagException = Shapes::StructureShape.new(name: 'TagException') + TagKeyList = Shapes::ListShape.new(name: 'TagKeyList') + TagKeyType = Shapes::StringShape.new(name: 'TagKeyType') + TagList = Shapes::ListShape.new(name: 'TagList') + TagResourceRequest = Shapes::StructureShape.new(name: 'TagResourceRequest') + TagValueType = Shapes::StringShape.new(name: 'TagValueType') + TrustAnchorCertificateType = Shapes::StringShape.new(name: 'TrustAnchorCertificateType') + UnsupportedOperationException = Shapes::StructureShape.new(name: 'UnsupportedOperationException') + UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest') + UpdateAliasRequest = Shapes::StructureShape.new(name: 'UpdateAliasRequest') + UpdateCustomKeyStoreRequest = Shapes::StructureShape.new(name: 'UpdateCustomKeyStoreRequest') + UpdateCustomKeyStoreResponse = Shapes::StructureShape.new(name: 'UpdateCustomKeyStoreResponse') + UpdateKeyDescriptionRequest = Shapes::StructureShape.new(name: 'UpdateKeyDescriptionRequest') + UpdatePrimaryRegionRequest = Shapes::StructureShape.new(name: 'UpdatePrimaryRegionRequest') + VerifyMacRequest = Shapes::StructureShape.new(name: 'VerifyMacRequest') + VerifyMacResponse = Shapes::StructureShape.new(name: 'VerifyMacResponse') + VerifyRequest = Shapes::StructureShape.new(name: 'VerifyRequest') + VerifyResponse = Shapes::StructureShape.new(name: 'VerifyResponse') + WrappingKeySpec = Shapes::StringShape.new(name: 'WrappingKeySpec') + XksKeyAlreadyInUseException = Shapes::StructureShape.new(name: 'XksKeyAlreadyInUseException') + XksKeyConfigurationType = Shapes::StructureShape.new(name: 'XksKeyConfigurationType') + XksKeyIdType = Shapes::StringShape.new(name: 'XksKeyIdType') + XksKeyInvalidConfigurationException = Shapes::StructureShape.new(name: 'XksKeyInvalidConfigurationException') + XksKeyNotFoundException = Shapes::StructureShape.new(name: 'XksKeyNotFoundException') + XksProxyAuthenticationAccessKeyIdType = Shapes::StringShape.new(name: 'XksProxyAuthenticationAccessKeyIdType') + XksProxyAuthenticationCredentialType = Shapes::StructureShape.new(name: 'XksProxyAuthenticationCredentialType') + XksProxyAuthenticationRawSecretAccessKeyType = Shapes::StringShape.new(name: 'XksProxyAuthenticationRawSecretAccessKeyType') + XksProxyConfigurationType = Shapes::StructureShape.new(name: 'XksProxyConfigurationType') + XksProxyConnectivityType = Shapes::StringShape.new(name: 'XksProxyConnectivityType') + XksProxyIncorrectAuthenticationCredentialException = Shapes::StructureShape.new(name: 'XksProxyIncorrectAuthenticationCredentialException') + XksProxyInvalidConfigurationException = Shapes::StructureShape.new(name: 'XksProxyInvalidConfigurationException') + XksProxyInvalidResponseException = Shapes::StructureShape.new(name: 'XksProxyInvalidResponseException') + XksProxyUriEndpointInUseException = Shapes::StructureShape.new(name: 'XksProxyUriEndpointInUseException') + XksProxyUriEndpointType = Shapes::StringShape.new(name: 'XksProxyUriEndpointType') + XksProxyUriInUseException = Shapes::StructureShape.new(name: 'XksProxyUriInUseException') + XksProxyUriPathType = Shapes::StringShape.new(name: 'XksProxyUriPathType') + XksProxyUriUnreachableException = Shapes::StructureShape.new(name: 'XksProxyUriUnreachableException') + XksProxyVpcEndpointServiceInUseException = Shapes::StructureShape.new(name: 'XksProxyVpcEndpointServiceInUseException') + XksProxyVpcEndpointServiceInvalidConfigurationException = Shapes::StructureShape.new(name: 'XksProxyVpcEndpointServiceInvalidConfigurationException') + XksProxyVpcEndpointServiceNameType = Shapes::StringShape.new(name: 'XksProxyVpcEndpointServiceNameType') + XksProxyVpcEndpointServiceNotFoundException = Shapes::StructureShape.new(name: 'XksProxyVpcEndpointServiceNotFoundException') + + AliasList.member = Shapes::ShapeRef.new(shape: AliasListEntry) + + AliasListEntry.add_member(:alias_name, Shapes::ShapeRef.new(shape: AliasNameType, location_name: "AliasName")) + AliasListEntry.add_member(:alias_arn, Shapes::ShapeRef.new(shape: ArnType, location_name: "AliasArn")) + AliasListEntry.add_member(:target_key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "TargetKeyId")) + AliasListEntry.add_member(:creation_date, Shapes::ShapeRef.new(shape: DateType, location_name: "CreationDate")) + AliasListEntry.add_member(:last_updated_date, Shapes::ShapeRef.new(shape: DateType, location_name: "LastUpdatedDate")) + AliasListEntry.struct_class = Types::AliasListEntry + + AlreadyExistsException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + AlreadyExistsException.struct_class = Types::AlreadyExistsException + + CancelKeyDeletionRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + CancelKeyDeletionRequest.struct_class = Types::CancelKeyDeletionRequest + + CancelKeyDeletionResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + CancelKeyDeletionResponse.struct_class = Types::CancelKeyDeletionResponse + + CloudHsmClusterInUseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CloudHsmClusterInUseException.struct_class = Types::CloudHsmClusterInUseException + + CloudHsmClusterInvalidConfigurationException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CloudHsmClusterInvalidConfigurationException.struct_class = Types::CloudHsmClusterInvalidConfigurationException + + CloudHsmClusterNotActiveException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CloudHsmClusterNotActiveException.struct_class = Types::CloudHsmClusterNotActiveException + + CloudHsmClusterNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CloudHsmClusterNotFoundException.struct_class = Types::CloudHsmClusterNotFoundException + + CloudHsmClusterNotRelatedException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CloudHsmClusterNotRelatedException.struct_class = Types::CloudHsmClusterNotRelatedException + + ConnectCustomKeyStoreRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, required: true, location_name: "CustomKeyStoreId")) + ConnectCustomKeyStoreRequest.struct_class = Types::ConnectCustomKeyStoreRequest + + ConnectCustomKeyStoreResponse.struct_class = Types::ConnectCustomKeyStoreResponse + + CreateAliasRequest.add_member(:alias_name, Shapes::ShapeRef.new(shape: AliasNameType, required: true, location_name: "AliasName")) + CreateAliasRequest.add_member(:target_key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "TargetKeyId")) + CreateAliasRequest.struct_class = Types::CreateAliasRequest + + CreateCustomKeyStoreRequest.add_member(:custom_key_store_name, Shapes::ShapeRef.new(shape: CustomKeyStoreNameType, required: true, location_name: "CustomKeyStoreName")) + CreateCustomKeyStoreRequest.add_member(:cloud_hsm_cluster_id, Shapes::ShapeRef.new(shape: CloudHsmClusterIdType, location_name: "CloudHsmClusterId")) + CreateCustomKeyStoreRequest.add_member(:trust_anchor_certificate, Shapes::ShapeRef.new(shape: TrustAnchorCertificateType, location_name: "TrustAnchorCertificate")) + CreateCustomKeyStoreRequest.add_member(:key_store_password, Shapes::ShapeRef.new(shape: KeyStorePasswordType, location_name: "KeyStorePassword")) + CreateCustomKeyStoreRequest.add_member(:custom_key_store_type, Shapes::ShapeRef.new(shape: CustomKeyStoreType, location_name: "CustomKeyStoreType")) + CreateCustomKeyStoreRequest.add_member(:xks_proxy_uri_endpoint, Shapes::ShapeRef.new(shape: XksProxyUriEndpointType, location_name: "XksProxyUriEndpoint")) + CreateCustomKeyStoreRequest.add_member(:xks_proxy_uri_path, Shapes::ShapeRef.new(shape: XksProxyUriPathType, location_name: "XksProxyUriPath")) + CreateCustomKeyStoreRequest.add_member(:xks_proxy_vpc_endpoint_service_name, Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceNameType, location_name: "XksProxyVpcEndpointServiceName")) + CreateCustomKeyStoreRequest.add_member(:xks_proxy_authentication_credential, Shapes::ShapeRef.new(shape: XksProxyAuthenticationCredentialType, location_name: "XksProxyAuthenticationCredential")) + CreateCustomKeyStoreRequest.add_member(:xks_proxy_connectivity, Shapes::ShapeRef.new(shape: XksProxyConnectivityType, location_name: "XksProxyConnectivity")) + CreateCustomKeyStoreRequest.struct_class = Types::CreateCustomKeyStoreRequest + + CreateCustomKeyStoreResponse.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, location_name: "CustomKeyStoreId")) + CreateCustomKeyStoreResponse.struct_class = Types::CreateCustomKeyStoreResponse + + CreateGrantRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + CreateGrantRequest.add_member(:grantee_principal, Shapes::ShapeRef.new(shape: PrincipalIdType, required: true, location_name: "GranteePrincipal")) + CreateGrantRequest.add_member(:retiring_principal, Shapes::ShapeRef.new(shape: PrincipalIdType, location_name: "RetiringPrincipal")) + CreateGrantRequest.add_member(:operations, Shapes::ShapeRef.new(shape: GrantOperationList, required: true, location_name: "Operations")) + CreateGrantRequest.add_member(:constraints, Shapes::ShapeRef.new(shape: GrantConstraints, location_name: "Constraints")) + CreateGrantRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + CreateGrantRequest.add_member(:name, Shapes::ShapeRef.new(shape: GrantNameType, location_name: "Name")) + CreateGrantRequest.struct_class = Types::CreateGrantRequest + + CreateGrantResponse.add_member(:grant_token, Shapes::ShapeRef.new(shape: GrantTokenType, location_name: "GrantToken")) + CreateGrantResponse.add_member(:grant_id, Shapes::ShapeRef.new(shape: GrantIdType, location_name: "GrantId")) + CreateGrantResponse.struct_class = Types::CreateGrantResponse + + CreateKeyRequest.add_member(:policy, Shapes::ShapeRef.new(shape: PolicyType, location_name: "Policy")) + CreateKeyRequest.add_member(:description, Shapes::ShapeRef.new(shape: DescriptionType, location_name: "Description")) + CreateKeyRequest.add_member(:key_usage, Shapes::ShapeRef.new(shape: KeyUsageType, location_name: "KeyUsage")) + CreateKeyRequest.add_member(:customer_master_key_spec, Shapes::ShapeRef.new(shape: CustomerMasterKeySpec, deprecated: true, location_name: "CustomerMasterKeySpec", metadata: {"deprecatedMessage"=>"This parameter has been deprecated. Instead, use the KeySpec parameter."})) + CreateKeyRequest.add_member(:key_spec, Shapes::ShapeRef.new(shape: KeySpec, location_name: "KeySpec")) + CreateKeyRequest.add_member(:origin, Shapes::ShapeRef.new(shape: OriginType, location_name: "Origin")) + CreateKeyRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, location_name: "CustomKeyStoreId")) + CreateKeyRequest.add_member(:bypass_policy_lockout_safety_check, Shapes::ShapeRef.new(shape: BooleanType, location_name: "BypassPolicyLockoutSafetyCheck")) + CreateKeyRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags")) + CreateKeyRequest.add_member(:multi_region, Shapes::ShapeRef.new(shape: NullableBooleanType, location_name: "MultiRegion")) + CreateKeyRequest.add_member(:xks_key_id, Shapes::ShapeRef.new(shape: XksKeyIdType, location_name: "XksKeyId")) + CreateKeyRequest.struct_class = Types::CreateKeyRequest + + CreateKeyResponse.add_member(:key_metadata, Shapes::ShapeRef.new(shape: KeyMetadata, location_name: "KeyMetadata")) + CreateKeyResponse.struct_class = Types::CreateKeyResponse + + CustomKeyStoreHasCMKsException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CustomKeyStoreHasCMKsException.struct_class = Types::CustomKeyStoreHasCMKsException + + CustomKeyStoreInvalidStateException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CustomKeyStoreInvalidStateException.struct_class = Types::CustomKeyStoreInvalidStateException + + CustomKeyStoreNameInUseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CustomKeyStoreNameInUseException.struct_class = Types::CustomKeyStoreNameInUseException + + CustomKeyStoreNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + CustomKeyStoreNotFoundException.struct_class = Types::CustomKeyStoreNotFoundException + + CustomKeyStoresList.member = Shapes::ShapeRef.new(shape: CustomKeyStoresListEntry) + + CustomKeyStoresListEntry.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, location_name: "CustomKeyStoreId")) + CustomKeyStoresListEntry.add_member(:custom_key_store_name, Shapes::ShapeRef.new(shape: CustomKeyStoreNameType, location_name: "CustomKeyStoreName")) + CustomKeyStoresListEntry.add_member(:cloud_hsm_cluster_id, Shapes::ShapeRef.new(shape: CloudHsmClusterIdType, location_name: "CloudHsmClusterId")) + CustomKeyStoresListEntry.add_member(:trust_anchor_certificate, Shapes::ShapeRef.new(shape: TrustAnchorCertificateType, location_name: "TrustAnchorCertificate")) + CustomKeyStoresListEntry.add_member(:connection_state, Shapes::ShapeRef.new(shape: ConnectionStateType, location_name: "ConnectionState")) + CustomKeyStoresListEntry.add_member(:connection_error_code, Shapes::ShapeRef.new(shape: ConnectionErrorCodeType, location_name: "ConnectionErrorCode")) + CustomKeyStoresListEntry.add_member(:creation_date, Shapes::ShapeRef.new(shape: DateType, location_name: "CreationDate")) + CustomKeyStoresListEntry.add_member(:custom_key_store_type, Shapes::ShapeRef.new(shape: CustomKeyStoreType, location_name: "CustomKeyStoreType")) + CustomKeyStoresListEntry.add_member(:xks_proxy_configuration, Shapes::ShapeRef.new(shape: XksProxyConfigurationType, location_name: "XksProxyConfiguration")) + CustomKeyStoresListEntry.struct_class = Types::CustomKeyStoresListEntry + + DecryptRequest.add_member(:ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, required: true, location_name: "CiphertextBlob")) + DecryptRequest.add_member(:encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContext")) + DecryptRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + DecryptRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + DecryptRequest.add_member(:encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "EncryptionAlgorithm")) + DecryptRequest.struct_class = Types::DecryptRequest + + DecryptResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + DecryptResponse.add_member(:plaintext, Shapes::ShapeRef.new(shape: PlaintextType, location_name: "Plaintext")) + DecryptResponse.add_member(:encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "EncryptionAlgorithm")) + DecryptResponse.struct_class = Types::DecryptResponse + + DeleteAliasRequest.add_member(:alias_name, Shapes::ShapeRef.new(shape: AliasNameType, required: true, location_name: "AliasName")) + DeleteAliasRequest.struct_class = Types::DeleteAliasRequest + + DeleteCustomKeyStoreRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, required: true, location_name: "CustomKeyStoreId")) + DeleteCustomKeyStoreRequest.struct_class = Types::DeleteCustomKeyStoreRequest + + DeleteCustomKeyStoreResponse.struct_class = Types::DeleteCustomKeyStoreResponse + + DeleteImportedKeyMaterialRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + DeleteImportedKeyMaterialRequest.struct_class = Types::DeleteImportedKeyMaterialRequest + + DependencyTimeoutException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + DependencyTimeoutException.struct_class = Types::DependencyTimeoutException + + DescribeCustomKeyStoresRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, location_name: "CustomKeyStoreId")) + DescribeCustomKeyStoresRequest.add_member(:custom_key_store_name, Shapes::ShapeRef.new(shape: CustomKeyStoreNameType, location_name: "CustomKeyStoreName")) + DescribeCustomKeyStoresRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + DescribeCustomKeyStoresRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + DescribeCustomKeyStoresRequest.struct_class = Types::DescribeCustomKeyStoresRequest + + DescribeCustomKeyStoresResponse.add_member(:custom_key_stores, Shapes::ShapeRef.new(shape: CustomKeyStoresList, location_name: "CustomKeyStores")) + DescribeCustomKeyStoresResponse.add_member(:next_marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "NextMarker")) + DescribeCustomKeyStoresResponse.add_member(:truncated, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Truncated")) + DescribeCustomKeyStoresResponse.struct_class = Types::DescribeCustomKeyStoresResponse + + DescribeKeyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + DescribeKeyRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + DescribeKeyRequest.struct_class = Types::DescribeKeyRequest + + DescribeKeyResponse.add_member(:key_metadata, Shapes::ShapeRef.new(shape: KeyMetadata, location_name: "KeyMetadata")) + DescribeKeyResponse.struct_class = Types::DescribeKeyResponse + + DisableKeyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + DisableKeyRequest.struct_class = Types::DisableKeyRequest + + DisableKeyRotationRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + DisableKeyRotationRequest.struct_class = Types::DisableKeyRotationRequest + + DisabledException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + DisabledException.struct_class = Types::DisabledException + + DisconnectCustomKeyStoreRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, required: true, location_name: "CustomKeyStoreId")) + DisconnectCustomKeyStoreRequest.struct_class = Types::DisconnectCustomKeyStoreRequest + + DisconnectCustomKeyStoreResponse.struct_class = Types::DisconnectCustomKeyStoreResponse + + EnableKeyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + EnableKeyRequest.struct_class = Types::EnableKeyRequest + + EnableKeyRotationRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + EnableKeyRotationRequest.struct_class = Types::EnableKeyRotationRequest + + EncryptRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + EncryptRequest.add_member(:plaintext, Shapes::ShapeRef.new(shape: PlaintextType, required: true, location_name: "Plaintext")) + EncryptRequest.add_member(:encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContext")) + EncryptRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + EncryptRequest.add_member(:encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "EncryptionAlgorithm")) + EncryptRequest.struct_class = Types::EncryptRequest + + EncryptResponse.add_member(:ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "CiphertextBlob")) + EncryptResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + EncryptResponse.add_member(:encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "EncryptionAlgorithm")) + EncryptResponse.struct_class = Types::EncryptResponse + + EncryptionAlgorithmSpecList.member = Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec) + + EncryptionContextType.key = Shapes::ShapeRef.new(shape: EncryptionContextKey) + EncryptionContextType.value = Shapes::ShapeRef.new(shape: EncryptionContextValue) + + ExpiredImportTokenException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + ExpiredImportTokenException.struct_class = Types::ExpiredImportTokenException + + GenerateDataKeyPairRequest.add_member(:encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContext")) + GenerateDataKeyPairRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GenerateDataKeyPairRequest.add_member(:key_pair_spec, Shapes::ShapeRef.new(shape: DataKeyPairSpec, required: true, location_name: "KeyPairSpec")) + GenerateDataKeyPairRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + GenerateDataKeyPairRequest.struct_class = Types::GenerateDataKeyPairRequest + + GenerateDataKeyPairResponse.add_member(:private_key_ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "PrivateKeyCiphertextBlob")) + GenerateDataKeyPairResponse.add_member(:private_key_plaintext, Shapes::ShapeRef.new(shape: PlaintextType, location_name: "PrivateKeyPlaintext")) + GenerateDataKeyPairResponse.add_member(:public_key, Shapes::ShapeRef.new(shape: PublicKeyType, location_name: "PublicKey")) + GenerateDataKeyPairResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GenerateDataKeyPairResponse.add_member(:key_pair_spec, Shapes::ShapeRef.new(shape: DataKeyPairSpec, location_name: "KeyPairSpec")) + GenerateDataKeyPairResponse.struct_class = Types::GenerateDataKeyPairResponse + + GenerateDataKeyPairWithoutPlaintextRequest.add_member(:encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContext")) + GenerateDataKeyPairWithoutPlaintextRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GenerateDataKeyPairWithoutPlaintextRequest.add_member(:key_pair_spec, Shapes::ShapeRef.new(shape: DataKeyPairSpec, required: true, location_name: "KeyPairSpec")) + GenerateDataKeyPairWithoutPlaintextRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + GenerateDataKeyPairWithoutPlaintextRequest.struct_class = Types::GenerateDataKeyPairWithoutPlaintextRequest + + GenerateDataKeyPairWithoutPlaintextResponse.add_member(:private_key_ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "PrivateKeyCiphertextBlob")) + GenerateDataKeyPairWithoutPlaintextResponse.add_member(:public_key, Shapes::ShapeRef.new(shape: PublicKeyType, location_name: "PublicKey")) + GenerateDataKeyPairWithoutPlaintextResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GenerateDataKeyPairWithoutPlaintextResponse.add_member(:key_pair_spec, Shapes::ShapeRef.new(shape: DataKeyPairSpec, location_name: "KeyPairSpec")) + GenerateDataKeyPairWithoutPlaintextResponse.struct_class = Types::GenerateDataKeyPairWithoutPlaintextResponse + + GenerateDataKeyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GenerateDataKeyRequest.add_member(:encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContext")) + GenerateDataKeyRequest.add_member(:number_of_bytes, Shapes::ShapeRef.new(shape: NumberOfBytesType, location_name: "NumberOfBytes")) + GenerateDataKeyRequest.add_member(:key_spec, Shapes::ShapeRef.new(shape: DataKeySpec, location_name: "KeySpec")) + GenerateDataKeyRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + GenerateDataKeyRequest.struct_class = Types::GenerateDataKeyRequest + + GenerateDataKeyResponse.add_member(:ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "CiphertextBlob")) + GenerateDataKeyResponse.add_member(:plaintext, Shapes::ShapeRef.new(shape: PlaintextType, location_name: "Plaintext")) + GenerateDataKeyResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GenerateDataKeyResponse.struct_class = Types::GenerateDataKeyResponse + + GenerateDataKeyWithoutPlaintextRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GenerateDataKeyWithoutPlaintextRequest.add_member(:encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContext")) + GenerateDataKeyWithoutPlaintextRequest.add_member(:key_spec, Shapes::ShapeRef.new(shape: DataKeySpec, location_name: "KeySpec")) + GenerateDataKeyWithoutPlaintextRequest.add_member(:number_of_bytes, Shapes::ShapeRef.new(shape: NumberOfBytesType, location_name: "NumberOfBytes")) + GenerateDataKeyWithoutPlaintextRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + GenerateDataKeyWithoutPlaintextRequest.struct_class = Types::GenerateDataKeyWithoutPlaintextRequest + + GenerateDataKeyWithoutPlaintextResponse.add_member(:ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "CiphertextBlob")) + GenerateDataKeyWithoutPlaintextResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GenerateDataKeyWithoutPlaintextResponse.struct_class = Types::GenerateDataKeyWithoutPlaintextResponse + + GenerateMacRequest.add_member(:message, Shapes::ShapeRef.new(shape: PlaintextType, required: true, location_name: "Message")) + GenerateMacRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GenerateMacRequest.add_member(:mac_algorithm, Shapes::ShapeRef.new(shape: MacAlgorithmSpec, required: true, location_name: "MacAlgorithm")) + GenerateMacRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + GenerateMacRequest.struct_class = Types::GenerateMacRequest + + GenerateMacResponse.add_member(:mac, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "Mac")) + GenerateMacResponse.add_member(:mac_algorithm, Shapes::ShapeRef.new(shape: MacAlgorithmSpec, location_name: "MacAlgorithm")) + GenerateMacResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GenerateMacResponse.struct_class = Types::GenerateMacResponse + + GenerateRandomRequest.add_member(:number_of_bytes, Shapes::ShapeRef.new(shape: NumberOfBytesType, location_name: "NumberOfBytes")) + GenerateRandomRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, location_name: "CustomKeyStoreId")) + GenerateRandomRequest.struct_class = Types::GenerateRandomRequest + + GenerateRandomResponse.add_member(:plaintext, Shapes::ShapeRef.new(shape: PlaintextType, location_name: "Plaintext")) + GenerateRandomResponse.struct_class = Types::GenerateRandomResponse + + GetKeyPolicyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GetKeyPolicyRequest.add_member(:policy_name, Shapes::ShapeRef.new(shape: PolicyNameType, required: true, location_name: "PolicyName")) + GetKeyPolicyRequest.struct_class = Types::GetKeyPolicyRequest + + GetKeyPolicyResponse.add_member(:policy, Shapes::ShapeRef.new(shape: PolicyType, location_name: "Policy")) + GetKeyPolicyResponse.struct_class = Types::GetKeyPolicyResponse + + GetKeyRotationStatusRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GetKeyRotationStatusRequest.struct_class = Types::GetKeyRotationStatusRequest + + GetKeyRotationStatusResponse.add_member(:key_rotation_enabled, Shapes::ShapeRef.new(shape: BooleanType, location_name: "KeyRotationEnabled")) + GetKeyRotationStatusResponse.struct_class = Types::GetKeyRotationStatusResponse + + GetParametersForImportRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GetParametersForImportRequest.add_member(:wrapping_algorithm, Shapes::ShapeRef.new(shape: AlgorithmSpec, required: true, location_name: "WrappingAlgorithm")) + GetParametersForImportRequest.add_member(:wrapping_key_spec, Shapes::ShapeRef.new(shape: WrappingKeySpec, required: true, location_name: "WrappingKeySpec")) + GetParametersForImportRequest.struct_class = Types::GetParametersForImportRequest + + GetParametersForImportResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GetParametersForImportResponse.add_member(:import_token, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "ImportToken")) + GetParametersForImportResponse.add_member(:public_key, Shapes::ShapeRef.new(shape: PlaintextType, location_name: "PublicKey")) + GetParametersForImportResponse.add_member(:parameters_valid_to, Shapes::ShapeRef.new(shape: DateType, location_name: "ParametersValidTo")) + GetParametersForImportResponse.struct_class = Types::GetParametersForImportResponse + + GetPublicKeyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + GetPublicKeyRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + GetPublicKeyRequest.struct_class = Types::GetPublicKeyRequest + + GetPublicKeyResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GetPublicKeyResponse.add_member(:public_key, Shapes::ShapeRef.new(shape: PublicKeyType, location_name: "PublicKey")) + GetPublicKeyResponse.add_member(:customer_master_key_spec, Shapes::ShapeRef.new(shape: CustomerMasterKeySpec, deprecated: true, location_name: "CustomerMasterKeySpec", metadata: {"deprecatedMessage"=>"This field has been deprecated. Instead, use the KeySpec field."})) + GetPublicKeyResponse.add_member(:key_spec, Shapes::ShapeRef.new(shape: KeySpec, location_name: "KeySpec")) + GetPublicKeyResponse.add_member(:key_usage, Shapes::ShapeRef.new(shape: KeyUsageType, location_name: "KeyUsage")) + GetPublicKeyResponse.add_member(:encryption_algorithms, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpecList, location_name: "EncryptionAlgorithms")) + GetPublicKeyResponse.add_member(:signing_algorithms, Shapes::ShapeRef.new(shape: SigningAlgorithmSpecList, location_name: "SigningAlgorithms")) + GetPublicKeyResponse.struct_class = Types::GetPublicKeyResponse + + GrantConstraints.add_member(:encryption_context_subset, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContextSubset")) + GrantConstraints.add_member(:encryption_context_equals, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "EncryptionContextEquals")) + GrantConstraints.struct_class = Types::GrantConstraints + + GrantList.member = Shapes::ShapeRef.new(shape: GrantListEntry) + + GrantListEntry.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + GrantListEntry.add_member(:grant_id, Shapes::ShapeRef.new(shape: GrantIdType, location_name: "GrantId")) + GrantListEntry.add_member(:name, Shapes::ShapeRef.new(shape: GrantNameType, location_name: "Name")) + GrantListEntry.add_member(:creation_date, Shapes::ShapeRef.new(shape: DateType, location_name: "CreationDate")) + GrantListEntry.add_member(:grantee_principal, Shapes::ShapeRef.new(shape: PrincipalIdType, location_name: "GranteePrincipal")) + GrantListEntry.add_member(:retiring_principal, Shapes::ShapeRef.new(shape: PrincipalIdType, location_name: "RetiringPrincipal")) + GrantListEntry.add_member(:issuing_account, Shapes::ShapeRef.new(shape: PrincipalIdType, location_name: "IssuingAccount")) + GrantListEntry.add_member(:operations, Shapes::ShapeRef.new(shape: GrantOperationList, location_name: "Operations")) + GrantListEntry.add_member(:constraints, Shapes::ShapeRef.new(shape: GrantConstraints, location_name: "Constraints")) + GrantListEntry.struct_class = Types::GrantListEntry + + GrantOperationList.member = Shapes::ShapeRef.new(shape: GrantOperation) + + GrantTokenList.member = Shapes::ShapeRef.new(shape: GrantTokenType) + + ImportKeyMaterialRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + ImportKeyMaterialRequest.add_member(:import_token, Shapes::ShapeRef.new(shape: CiphertextType, required: true, location_name: "ImportToken")) + ImportKeyMaterialRequest.add_member(:encrypted_key_material, Shapes::ShapeRef.new(shape: CiphertextType, required: true, location_name: "EncryptedKeyMaterial")) + ImportKeyMaterialRequest.add_member(:valid_to, Shapes::ShapeRef.new(shape: DateType, location_name: "ValidTo")) + ImportKeyMaterialRequest.add_member(:expiration_model, Shapes::ShapeRef.new(shape: ExpirationModelType, location_name: "ExpirationModel")) + ImportKeyMaterialRequest.struct_class = Types::ImportKeyMaterialRequest + + ImportKeyMaterialResponse.struct_class = Types::ImportKeyMaterialResponse + + IncorrectKeyException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + IncorrectKeyException.struct_class = Types::IncorrectKeyException + + IncorrectKeyMaterialException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + IncorrectKeyMaterialException.struct_class = Types::IncorrectKeyMaterialException + + IncorrectTrustAnchorException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + IncorrectTrustAnchorException.struct_class = Types::IncorrectTrustAnchorException + + InvalidAliasNameException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidAliasNameException.struct_class = Types::InvalidAliasNameException + + InvalidArnException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidArnException.struct_class = Types::InvalidArnException + + InvalidCiphertextException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidCiphertextException.struct_class = Types::InvalidCiphertextException + + InvalidGrantIdException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidGrantIdException.struct_class = Types::InvalidGrantIdException + + InvalidGrantTokenException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidGrantTokenException.struct_class = Types::InvalidGrantTokenException + + InvalidImportTokenException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidImportTokenException.struct_class = Types::InvalidImportTokenException + + InvalidKeyUsageException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidKeyUsageException.struct_class = Types::InvalidKeyUsageException + + InvalidMarkerException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + InvalidMarkerException.struct_class = Types::InvalidMarkerException + + KMSInternalException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + KMSInternalException.struct_class = Types::KMSInternalException + + KMSInvalidMacException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + KMSInvalidMacException.struct_class = Types::KMSInvalidMacException + + KMSInvalidSignatureException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + KMSInvalidSignatureException.struct_class = Types::KMSInvalidSignatureException + + KMSInvalidStateException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + KMSInvalidStateException.struct_class = Types::KMSInvalidStateException + + KeyList.member = Shapes::ShapeRef.new(shape: KeyListEntry) + + KeyListEntry.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + KeyListEntry.add_member(:key_arn, Shapes::ShapeRef.new(shape: ArnType, location_name: "KeyArn")) + KeyListEntry.struct_class = Types::KeyListEntry + + KeyMetadata.add_member(:aws_account_id, Shapes::ShapeRef.new(shape: AWSAccountIdType, location_name: "AWSAccountId")) + KeyMetadata.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + KeyMetadata.add_member(:arn, Shapes::ShapeRef.new(shape: ArnType, location_name: "Arn")) + KeyMetadata.add_member(:creation_date, Shapes::ShapeRef.new(shape: DateType, location_name: "CreationDate")) + KeyMetadata.add_member(:enabled, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Enabled")) + KeyMetadata.add_member(:description, Shapes::ShapeRef.new(shape: DescriptionType, location_name: "Description")) + KeyMetadata.add_member(:key_usage, Shapes::ShapeRef.new(shape: KeyUsageType, location_name: "KeyUsage")) + KeyMetadata.add_member(:key_state, Shapes::ShapeRef.new(shape: KeyState, location_name: "KeyState")) + KeyMetadata.add_member(:deletion_date, Shapes::ShapeRef.new(shape: DateType, location_name: "DeletionDate")) + KeyMetadata.add_member(:valid_to, Shapes::ShapeRef.new(shape: DateType, location_name: "ValidTo")) + KeyMetadata.add_member(:origin, Shapes::ShapeRef.new(shape: OriginType, location_name: "Origin")) + KeyMetadata.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, location_name: "CustomKeyStoreId")) + KeyMetadata.add_member(:cloud_hsm_cluster_id, Shapes::ShapeRef.new(shape: CloudHsmClusterIdType, location_name: "CloudHsmClusterId")) + KeyMetadata.add_member(:expiration_model, Shapes::ShapeRef.new(shape: ExpirationModelType, location_name: "ExpirationModel")) + KeyMetadata.add_member(:key_manager, Shapes::ShapeRef.new(shape: KeyManagerType, location_name: "KeyManager")) + KeyMetadata.add_member(:customer_master_key_spec, Shapes::ShapeRef.new(shape: CustomerMasterKeySpec, deprecated: true, location_name: "CustomerMasterKeySpec", metadata: {"deprecatedMessage"=>"This field has been deprecated. Instead, use the KeySpec field."})) + KeyMetadata.add_member(:key_spec, Shapes::ShapeRef.new(shape: KeySpec, location_name: "KeySpec")) + KeyMetadata.add_member(:encryption_algorithms, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpecList, location_name: "EncryptionAlgorithms")) + KeyMetadata.add_member(:signing_algorithms, Shapes::ShapeRef.new(shape: SigningAlgorithmSpecList, location_name: "SigningAlgorithms")) + KeyMetadata.add_member(:multi_region, Shapes::ShapeRef.new(shape: NullableBooleanType, location_name: "MultiRegion")) + KeyMetadata.add_member(:multi_region_configuration, Shapes::ShapeRef.new(shape: MultiRegionConfiguration, location_name: "MultiRegionConfiguration")) + KeyMetadata.add_member(:pending_deletion_window_in_days, Shapes::ShapeRef.new(shape: PendingWindowInDaysType, location_name: "PendingDeletionWindowInDays")) + KeyMetadata.add_member(:mac_algorithms, Shapes::ShapeRef.new(shape: MacAlgorithmSpecList, location_name: "MacAlgorithms")) + KeyMetadata.add_member(:xks_key_configuration, Shapes::ShapeRef.new(shape: XksKeyConfigurationType, location_name: "XksKeyConfiguration")) + KeyMetadata.struct_class = Types::KeyMetadata + + KeyUnavailableException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + KeyUnavailableException.struct_class = Types::KeyUnavailableException + + LimitExceededException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + LimitExceededException.struct_class = Types::LimitExceededException + + ListAliasesRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + ListAliasesRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + ListAliasesRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + ListAliasesRequest.struct_class = Types::ListAliasesRequest + + ListAliasesResponse.add_member(:aliases, Shapes::ShapeRef.new(shape: AliasList, location_name: "Aliases")) + ListAliasesResponse.add_member(:next_marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "NextMarker")) + ListAliasesResponse.add_member(:truncated, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Truncated")) + ListAliasesResponse.struct_class = Types::ListAliasesResponse + + ListGrantsRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + ListGrantsRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + ListGrantsRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + ListGrantsRequest.add_member(:grant_id, Shapes::ShapeRef.new(shape: GrantIdType, location_name: "GrantId")) + ListGrantsRequest.add_member(:grantee_principal, Shapes::ShapeRef.new(shape: PrincipalIdType, location_name: "GranteePrincipal")) + ListGrantsRequest.struct_class = Types::ListGrantsRequest + + ListGrantsResponse.add_member(:grants, Shapes::ShapeRef.new(shape: GrantList, location_name: "Grants")) + ListGrantsResponse.add_member(:next_marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "NextMarker")) + ListGrantsResponse.add_member(:truncated, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Truncated")) + ListGrantsResponse.struct_class = Types::ListGrantsResponse + + ListKeyPoliciesRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + ListKeyPoliciesRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + ListKeyPoliciesRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + ListKeyPoliciesRequest.struct_class = Types::ListKeyPoliciesRequest + + ListKeyPoliciesResponse.add_member(:policy_names, Shapes::ShapeRef.new(shape: PolicyNameList, location_name: "PolicyNames")) + ListKeyPoliciesResponse.add_member(:next_marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "NextMarker")) + ListKeyPoliciesResponse.add_member(:truncated, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Truncated")) + ListKeyPoliciesResponse.struct_class = Types::ListKeyPoliciesResponse + + ListKeysRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + ListKeysRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + ListKeysRequest.struct_class = Types::ListKeysRequest + + ListKeysResponse.add_member(:keys, Shapes::ShapeRef.new(shape: KeyList, location_name: "Keys")) + ListKeysResponse.add_member(:next_marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "NextMarker")) + ListKeysResponse.add_member(:truncated, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Truncated")) + ListKeysResponse.struct_class = Types::ListKeysResponse + + ListResourceTagsRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + ListResourceTagsRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + ListResourceTagsRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + ListResourceTagsRequest.struct_class = Types::ListResourceTagsRequest + + ListResourceTagsResponse.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags")) + ListResourceTagsResponse.add_member(:next_marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "NextMarker")) + ListResourceTagsResponse.add_member(:truncated, Shapes::ShapeRef.new(shape: BooleanType, location_name: "Truncated")) + ListResourceTagsResponse.struct_class = Types::ListResourceTagsResponse + + ListRetirableGrantsRequest.add_member(:limit, Shapes::ShapeRef.new(shape: LimitType, location_name: "Limit")) + ListRetirableGrantsRequest.add_member(:marker, Shapes::ShapeRef.new(shape: MarkerType, location_name: "Marker")) + ListRetirableGrantsRequest.add_member(:retiring_principal, Shapes::ShapeRef.new(shape: PrincipalIdType, required: true, location_name: "RetiringPrincipal")) + ListRetirableGrantsRequest.struct_class = Types::ListRetirableGrantsRequest + + MacAlgorithmSpecList.member = Shapes::ShapeRef.new(shape: MacAlgorithmSpec) + + MalformedPolicyDocumentException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + MalformedPolicyDocumentException.struct_class = Types::MalformedPolicyDocumentException + + MultiRegionConfiguration.add_member(:multi_region_key_type, Shapes::ShapeRef.new(shape: MultiRegionKeyType, location_name: "MultiRegionKeyType")) + MultiRegionConfiguration.add_member(:primary_key, Shapes::ShapeRef.new(shape: MultiRegionKey, location_name: "PrimaryKey")) + MultiRegionConfiguration.add_member(:replica_keys, Shapes::ShapeRef.new(shape: MultiRegionKeyList, location_name: "ReplicaKeys")) + MultiRegionConfiguration.struct_class = Types::MultiRegionConfiguration + + MultiRegionKey.add_member(:arn, Shapes::ShapeRef.new(shape: ArnType, location_name: "Arn")) + MultiRegionKey.add_member(:region, Shapes::ShapeRef.new(shape: RegionType, location_name: "Region")) + MultiRegionKey.struct_class = Types::MultiRegionKey + + MultiRegionKeyList.member = Shapes::ShapeRef.new(shape: MultiRegionKey) + + NotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + NotFoundException.struct_class = Types::NotFoundException + + PolicyNameList.member = Shapes::ShapeRef.new(shape: PolicyNameType) + + PutKeyPolicyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + PutKeyPolicyRequest.add_member(:policy_name, Shapes::ShapeRef.new(shape: PolicyNameType, required: true, location_name: "PolicyName")) + PutKeyPolicyRequest.add_member(:policy, Shapes::ShapeRef.new(shape: PolicyType, required: true, location_name: "Policy")) + PutKeyPolicyRequest.add_member(:bypass_policy_lockout_safety_check, Shapes::ShapeRef.new(shape: BooleanType, location_name: "BypassPolicyLockoutSafetyCheck")) + PutKeyPolicyRequest.struct_class = Types::PutKeyPolicyRequest + + ReEncryptRequest.add_member(:ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, required: true, location_name: "CiphertextBlob")) + ReEncryptRequest.add_member(:source_encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "SourceEncryptionContext")) + ReEncryptRequest.add_member(:source_key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "SourceKeyId")) + ReEncryptRequest.add_member(:destination_key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "DestinationKeyId")) + ReEncryptRequest.add_member(:destination_encryption_context, Shapes::ShapeRef.new(shape: EncryptionContextType, location_name: "DestinationEncryptionContext")) + ReEncryptRequest.add_member(:source_encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "SourceEncryptionAlgorithm")) + ReEncryptRequest.add_member(:destination_encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "DestinationEncryptionAlgorithm")) + ReEncryptRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + ReEncryptRequest.struct_class = Types::ReEncryptRequest + + ReEncryptResponse.add_member(:ciphertext_blob, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "CiphertextBlob")) + ReEncryptResponse.add_member(:source_key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "SourceKeyId")) + ReEncryptResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + ReEncryptResponse.add_member(:source_encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "SourceEncryptionAlgorithm")) + ReEncryptResponse.add_member(:destination_encryption_algorithm, Shapes::ShapeRef.new(shape: EncryptionAlgorithmSpec, location_name: "DestinationEncryptionAlgorithm")) + ReEncryptResponse.struct_class = Types::ReEncryptResponse + + ReplicateKeyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + ReplicateKeyRequest.add_member(:replica_region, Shapes::ShapeRef.new(shape: RegionType, required: true, location_name: "ReplicaRegion")) + ReplicateKeyRequest.add_member(:policy, Shapes::ShapeRef.new(shape: PolicyType, location_name: "Policy")) + ReplicateKeyRequest.add_member(:bypass_policy_lockout_safety_check, Shapes::ShapeRef.new(shape: BooleanType, location_name: "BypassPolicyLockoutSafetyCheck")) + ReplicateKeyRequest.add_member(:description, Shapes::ShapeRef.new(shape: DescriptionType, location_name: "Description")) + ReplicateKeyRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags")) + ReplicateKeyRequest.struct_class = Types::ReplicateKeyRequest + + ReplicateKeyResponse.add_member(:replica_key_metadata, Shapes::ShapeRef.new(shape: KeyMetadata, location_name: "ReplicaKeyMetadata")) + ReplicateKeyResponse.add_member(:replica_policy, Shapes::ShapeRef.new(shape: PolicyType, location_name: "ReplicaPolicy")) + ReplicateKeyResponse.add_member(:replica_tags, Shapes::ShapeRef.new(shape: TagList, location_name: "ReplicaTags")) + ReplicateKeyResponse.struct_class = Types::ReplicateKeyResponse + + RetireGrantRequest.add_member(:grant_token, Shapes::ShapeRef.new(shape: GrantTokenType, location_name: "GrantToken")) + RetireGrantRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + RetireGrantRequest.add_member(:grant_id, Shapes::ShapeRef.new(shape: GrantIdType, location_name: "GrantId")) + RetireGrantRequest.struct_class = Types::RetireGrantRequest + + RevokeGrantRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + RevokeGrantRequest.add_member(:grant_id, Shapes::ShapeRef.new(shape: GrantIdType, required: true, location_name: "GrantId")) + RevokeGrantRequest.struct_class = Types::RevokeGrantRequest + + ScheduleKeyDeletionRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + ScheduleKeyDeletionRequest.add_member(:pending_window_in_days, Shapes::ShapeRef.new(shape: PendingWindowInDaysType, location_name: "PendingWindowInDays")) + ScheduleKeyDeletionRequest.struct_class = Types::ScheduleKeyDeletionRequest + + ScheduleKeyDeletionResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + ScheduleKeyDeletionResponse.add_member(:deletion_date, Shapes::ShapeRef.new(shape: DateType, location_name: "DeletionDate")) + ScheduleKeyDeletionResponse.add_member(:key_state, Shapes::ShapeRef.new(shape: KeyState, location_name: "KeyState")) + ScheduleKeyDeletionResponse.add_member(:pending_window_in_days, Shapes::ShapeRef.new(shape: PendingWindowInDaysType, location_name: "PendingWindowInDays")) + ScheduleKeyDeletionResponse.struct_class = Types::ScheduleKeyDeletionResponse + + SignRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + SignRequest.add_member(:message, Shapes::ShapeRef.new(shape: PlaintextType, required: true, location_name: "Message")) + SignRequest.add_member(:message_type, Shapes::ShapeRef.new(shape: MessageType, location_name: "MessageType")) + SignRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + SignRequest.add_member(:signing_algorithm, Shapes::ShapeRef.new(shape: SigningAlgorithmSpec, required: true, location_name: "SigningAlgorithm")) + SignRequest.struct_class = Types::SignRequest + + SignResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + SignResponse.add_member(:signature, Shapes::ShapeRef.new(shape: CiphertextType, location_name: "Signature")) + SignResponse.add_member(:signing_algorithm, Shapes::ShapeRef.new(shape: SigningAlgorithmSpec, location_name: "SigningAlgorithm")) + SignResponse.struct_class = Types::SignResponse + + SigningAlgorithmSpecList.member = Shapes::ShapeRef.new(shape: SigningAlgorithmSpec) + + Tag.add_member(:tag_key, Shapes::ShapeRef.new(shape: TagKeyType, required: true, location_name: "TagKey")) + Tag.add_member(:tag_value, Shapes::ShapeRef.new(shape: TagValueType, required: true, location_name: "TagValue")) + Tag.struct_class = Types::Tag + + TagException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + TagException.struct_class = Types::TagException + + TagKeyList.member = Shapes::ShapeRef.new(shape: TagKeyType) + + TagList.member = Shapes::ShapeRef.new(shape: Tag) + + TagResourceRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + TagResourceRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, required: true, location_name: "Tags")) + TagResourceRequest.struct_class = Types::TagResourceRequest + + UnsupportedOperationException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + UnsupportedOperationException.struct_class = Types::UnsupportedOperationException + + UntagResourceRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + UntagResourceRequest.add_member(:tag_keys, Shapes::ShapeRef.new(shape: TagKeyList, required: true, location_name: "TagKeys")) + UntagResourceRequest.struct_class = Types::UntagResourceRequest + + UpdateAliasRequest.add_member(:alias_name, Shapes::ShapeRef.new(shape: AliasNameType, required: true, location_name: "AliasName")) + UpdateAliasRequest.add_member(:target_key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "TargetKeyId")) + UpdateAliasRequest.struct_class = Types::UpdateAliasRequest + + UpdateCustomKeyStoreRequest.add_member(:custom_key_store_id, Shapes::ShapeRef.new(shape: CustomKeyStoreIdType, required: true, location_name: "CustomKeyStoreId")) + UpdateCustomKeyStoreRequest.add_member(:new_custom_key_store_name, Shapes::ShapeRef.new(shape: CustomKeyStoreNameType, location_name: "NewCustomKeyStoreName")) + UpdateCustomKeyStoreRequest.add_member(:key_store_password, Shapes::ShapeRef.new(shape: KeyStorePasswordType, location_name: "KeyStorePassword")) + UpdateCustomKeyStoreRequest.add_member(:cloud_hsm_cluster_id, Shapes::ShapeRef.new(shape: CloudHsmClusterIdType, location_name: "CloudHsmClusterId")) + UpdateCustomKeyStoreRequest.add_member(:xks_proxy_uri_endpoint, Shapes::ShapeRef.new(shape: XksProxyUriEndpointType, location_name: "XksProxyUriEndpoint")) + UpdateCustomKeyStoreRequest.add_member(:xks_proxy_uri_path, Shapes::ShapeRef.new(shape: XksProxyUriPathType, location_name: "XksProxyUriPath")) + UpdateCustomKeyStoreRequest.add_member(:xks_proxy_vpc_endpoint_service_name, Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceNameType, location_name: "XksProxyVpcEndpointServiceName")) + UpdateCustomKeyStoreRequest.add_member(:xks_proxy_authentication_credential, Shapes::ShapeRef.new(shape: XksProxyAuthenticationCredentialType, location_name: "XksProxyAuthenticationCredential")) + UpdateCustomKeyStoreRequest.add_member(:xks_proxy_connectivity, Shapes::ShapeRef.new(shape: XksProxyConnectivityType, location_name: "XksProxyConnectivity")) + UpdateCustomKeyStoreRequest.struct_class = Types::UpdateCustomKeyStoreRequest + + UpdateCustomKeyStoreResponse.struct_class = Types::UpdateCustomKeyStoreResponse + + UpdateKeyDescriptionRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + UpdateKeyDescriptionRequest.add_member(:description, Shapes::ShapeRef.new(shape: DescriptionType, required: true, location_name: "Description")) + UpdateKeyDescriptionRequest.struct_class = Types::UpdateKeyDescriptionRequest + + UpdatePrimaryRegionRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + UpdatePrimaryRegionRequest.add_member(:primary_region, Shapes::ShapeRef.new(shape: RegionType, required: true, location_name: "PrimaryRegion")) + UpdatePrimaryRegionRequest.struct_class = Types::UpdatePrimaryRegionRequest + + VerifyMacRequest.add_member(:message, Shapes::ShapeRef.new(shape: PlaintextType, required: true, location_name: "Message")) + VerifyMacRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + VerifyMacRequest.add_member(:mac_algorithm, Shapes::ShapeRef.new(shape: MacAlgorithmSpec, required: true, location_name: "MacAlgorithm")) + VerifyMacRequest.add_member(:mac, Shapes::ShapeRef.new(shape: CiphertextType, required: true, location_name: "Mac")) + VerifyMacRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + VerifyMacRequest.struct_class = Types::VerifyMacRequest + + VerifyMacResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + VerifyMacResponse.add_member(:mac_valid, Shapes::ShapeRef.new(shape: BooleanType, location_name: "MacValid")) + VerifyMacResponse.add_member(:mac_algorithm, Shapes::ShapeRef.new(shape: MacAlgorithmSpec, location_name: "MacAlgorithm")) + VerifyMacResponse.struct_class = Types::VerifyMacResponse + + VerifyRequest.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, required: true, location_name: "KeyId")) + VerifyRequest.add_member(:message, Shapes::ShapeRef.new(shape: PlaintextType, required: true, location_name: "Message")) + VerifyRequest.add_member(:message_type, Shapes::ShapeRef.new(shape: MessageType, location_name: "MessageType")) + VerifyRequest.add_member(:signature, Shapes::ShapeRef.new(shape: CiphertextType, required: true, location_name: "Signature")) + VerifyRequest.add_member(:signing_algorithm, Shapes::ShapeRef.new(shape: SigningAlgorithmSpec, required: true, location_name: "SigningAlgorithm")) + VerifyRequest.add_member(:grant_tokens, Shapes::ShapeRef.new(shape: GrantTokenList, location_name: "GrantTokens")) + VerifyRequest.struct_class = Types::VerifyRequest + + VerifyResponse.add_member(:key_id, Shapes::ShapeRef.new(shape: KeyIdType, location_name: "KeyId")) + VerifyResponse.add_member(:signature_valid, Shapes::ShapeRef.new(shape: BooleanType, location_name: "SignatureValid")) + VerifyResponse.add_member(:signing_algorithm, Shapes::ShapeRef.new(shape: SigningAlgorithmSpec, location_name: "SigningAlgorithm")) + VerifyResponse.struct_class = Types::VerifyResponse + + XksKeyAlreadyInUseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksKeyAlreadyInUseException.struct_class = Types::XksKeyAlreadyInUseException + + XksKeyConfigurationType.add_member(:id, Shapes::ShapeRef.new(shape: XksKeyIdType, location_name: "Id")) + XksKeyConfigurationType.struct_class = Types::XksKeyConfigurationType + + XksKeyInvalidConfigurationException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksKeyInvalidConfigurationException.struct_class = Types::XksKeyInvalidConfigurationException + + XksKeyNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksKeyNotFoundException.struct_class = Types::XksKeyNotFoundException + + XksProxyAuthenticationCredentialType.add_member(:access_key_id, Shapes::ShapeRef.new(shape: XksProxyAuthenticationAccessKeyIdType, required: true, location_name: "AccessKeyId")) + XksProxyAuthenticationCredentialType.add_member(:raw_secret_access_key, Shapes::ShapeRef.new(shape: XksProxyAuthenticationRawSecretAccessKeyType, required: true, location_name: "RawSecretAccessKey")) + XksProxyAuthenticationCredentialType.struct_class = Types::XksProxyAuthenticationCredentialType + + XksProxyConfigurationType.add_member(:connectivity, Shapes::ShapeRef.new(shape: XksProxyConnectivityType, location_name: "Connectivity")) + XksProxyConfigurationType.add_member(:access_key_id, Shapes::ShapeRef.new(shape: XksProxyAuthenticationAccessKeyIdType, location_name: "AccessKeyId")) + XksProxyConfigurationType.add_member(:uri_endpoint, Shapes::ShapeRef.new(shape: XksProxyUriEndpointType, location_name: "UriEndpoint")) + XksProxyConfigurationType.add_member(:uri_path, Shapes::ShapeRef.new(shape: XksProxyUriPathType, location_name: "UriPath")) + XksProxyConfigurationType.add_member(:vpc_endpoint_service_name, Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceNameType, location_name: "VpcEndpointServiceName")) + XksProxyConfigurationType.struct_class = Types::XksProxyConfigurationType + + XksProxyIncorrectAuthenticationCredentialException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyIncorrectAuthenticationCredentialException.struct_class = Types::XksProxyIncorrectAuthenticationCredentialException + + XksProxyInvalidConfigurationException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyInvalidConfigurationException.struct_class = Types::XksProxyInvalidConfigurationException + + XksProxyInvalidResponseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyInvalidResponseException.struct_class = Types::XksProxyInvalidResponseException + + XksProxyUriEndpointInUseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyUriEndpointInUseException.struct_class = Types::XksProxyUriEndpointInUseException + + XksProxyUriInUseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyUriInUseException.struct_class = Types::XksProxyUriInUseException + + XksProxyUriUnreachableException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyUriUnreachableException.struct_class = Types::XksProxyUriUnreachableException + + XksProxyVpcEndpointServiceInUseException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyVpcEndpointServiceInUseException.struct_class = Types::XksProxyVpcEndpointServiceInUseException + + XksProxyVpcEndpointServiceInvalidConfigurationException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyVpcEndpointServiceInvalidConfigurationException.struct_class = Types::XksProxyVpcEndpointServiceInvalidConfigurationException + + XksProxyVpcEndpointServiceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessageType, location_name: "message")) + XksProxyVpcEndpointServiceNotFoundException.struct_class = Types::XksProxyVpcEndpointServiceNotFoundException + + + # @api private + API = Seahorse::Model::Api.new.tap do |api| + + api.version = "2014-11-01" + + api.metadata = { + "apiVersion" => "2014-11-01", + "endpointPrefix" => "kms", + "jsonVersion" => "1.1", + "protocol" => "json", + "serviceAbbreviation" => "KMS", + "serviceFullName" => "AWS Key Management Service", + "serviceId" => "KMS", + "signatureVersion" => "v4", + "targetPrefix" => "TrentService", + "uid" => "kms-2014-11-01", + } + + api.add_operation(:cancel_key_deletion, Seahorse::Model::Operation.new.tap do |o| + o.name = "CancelKeyDeletion" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CancelKeyDeletionRequest) + o.output = Shapes::ShapeRef.new(shape: CancelKeyDeletionResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:connect_custom_key_store, Seahorse::Model::Operation.new.tap do |o| + o.name = "ConnectCustomKeyStore" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ConnectCustomKeyStoreRequest) + o.output = Shapes::ShapeRef.new(shape: ConnectCustomKeyStoreResponse) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterNotActiveException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterInvalidConfigurationException) + end) + + api.add_operation(:create_alias, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateAlias" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateAliasRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: AlreadyExistsException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidAliasNameException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:create_custom_key_store, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateCustomKeyStore" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateCustomKeyStoreRequest) + o.output = Shapes::ShapeRef.new(shape: CreateCustomKeyStoreResponse) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterInUseException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNameInUseException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterNotActiveException) + o.errors << Shapes::ShapeRef.new(shape: IncorrectTrustAnchorException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterInvalidConfigurationException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyUriInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyUriEndpointInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyUriUnreachableException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyIncorrectAuthenticationCredentialException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceInvalidConfigurationException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyInvalidResponseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyInvalidConfigurationException) + end) + + api.add_operation(:create_grant, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateGrant" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateGrantRequest) + o.output = Shapes::ShapeRef.new(shape: CreateGrantResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:create_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateKeyRequest) + o.output = Shapes::ShapeRef.new(shape: CreateKeyResponse) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: TagException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterInvalidConfigurationException) + o.errors << Shapes::ShapeRef.new(shape: XksKeyInvalidConfigurationException) + o.errors << Shapes::ShapeRef.new(shape: XksKeyAlreadyInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksKeyNotFoundException) + end) + + api.add_operation(:decrypt, Seahorse::Model::Operation.new.tap do |o| + o.name = "Decrypt" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DecryptRequest) + o.output = Shapes::ShapeRef.new(shape: DecryptResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: InvalidCiphertextException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: IncorrectKeyException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:delete_alias, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteAlias" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DeleteAliasRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:delete_custom_key_store, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteCustomKeyStore" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DeleteCustomKeyStoreRequest) + o.output = Shapes::ShapeRef.new(shape: DeleteCustomKeyStoreResponse) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreHasCMKsException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + end) + + api.add_operation(:delete_imported_key_material, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteImportedKeyMaterial" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DeleteImportedKeyMaterialRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:describe_custom_key_stores, Seahorse::Model::Operation.new.tap do |o| + o.name = "DescribeCustomKeyStores" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DescribeCustomKeyStoresRequest) + o.output = Shapes::ShapeRef.new(shape: DescribeCustomKeyStoresResponse) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidMarkerException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:describe_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "DescribeKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DescribeKeyRequest) + o.output = Shapes::ShapeRef.new(shape: DescribeKeyResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + end) + + api.add_operation(:disable_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "DisableKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DisableKeyRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:disable_key_rotation, Seahorse::Model::Operation.new.tap do |o| + o.name = "DisableKeyRotation" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DisableKeyRotationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:disconnect_custom_key_store, Seahorse::Model::Operation.new.tap do |o| + o.name = "DisconnectCustomKeyStore" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DisconnectCustomKeyStoreRequest) + o.output = Shapes::ShapeRef.new(shape: DisconnectCustomKeyStoreResponse) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + end) + + api.add_operation(:enable_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "EnableKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: EnableKeyRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:enable_key_rotation, Seahorse::Model::Operation.new.tap do |o| + o.name = "EnableKeyRotation" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: EnableKeyRotationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:encrypt, Seahorse::Model::Operation.new.tap do |o| + o.name = "Encrypt" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: EncryptRequest) + o.output = Shapes::ShapeRef.new(shape: EncryptResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:generate_data_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "GenerateDataKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GenerateDataKeyRequest) + o.output = Shapes::ShapeRef.new(shape: GenerateDataKeyResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:generate_data_key_pair, Seahorse::Model::Operation.new.tap do |o| + o.name = "GenerateDataKeyPair" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GenerateDataKeyPairRequest) + o.output = Shapes::ShapeRef.new(shape: GenerateDataKeyPairResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:generate_data_key_pair_without_plaintext, Seahorse::Model::Operation.new.tap do |o| + o.name = "GenerateDataKeyPairWithoutPlaintext" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GenerateDataKeyPairWithoutPlaintextRequest) + o.output = Shapes::ShapeRef.new(shape: GenerateDataKeyPairWithoutPlaintextResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:generate_data_key_without_plaintext, Seahorse::Model::Operation.new.tap do |o| + o.name = "GenerateDataKeyWithoutPlaintext" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GenerateDataKeyWithoutPlaintextRequest) + o.output = Shapes::ShapeRef.new(shape: GenerateDataKeyWithoutPlaintextResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:generate_mac, Seahorse::Model::Operation.new.tap do |o| + o.name = "GenerateMac" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GenerateMacRequest) + o.output = Shapes::ShapeRef.new(shape: GenerateMacResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:generate_random, Seahorse::Model::Operation.new.tap do |o| + o.name = "GenerateRandom" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GenerateRandomRequest) + o.output = Shapes::ShapeRef.new(shape: GenerateRandomResponse) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreInvalidStateException) + end) + + api.add_operation(:get_key_policy, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetKeyPolicy" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetKeyPolicyRequest) + o.output = Shapes::ShapeRef.new(shape: GetKeyPolicyResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:get_key_rotation_status, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetKeyRotationStatus" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetKeyRotationStatusRequest) + o.output = Shapes::ShapeRef.new(shape: GetKeyRotationStatusResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:get_parameters_for_import, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetParametersForImport" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetParametersForImportRequest) + o.output = Shapes::ShapeRef.new(shape: GetParametersForImportResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:get_public_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetPublicKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: GetPublicKeyRequest) + o.output = Shapes::ShapeRef.new(shape: GetPublicKeyResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:import_key_material, Seahorse::Model::Operation.new.tap do |o| + o.name = "ImportKeyMaterial" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ImportKeyMaterialRequest) + o.output = Shapes::ShapeRef.new(shape: ImportKeyMaterialResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: InvalidCiphertextException) + o.errors << Shapes::ShapeRef.new(shape: IncorrectKeyMaterialException) + o.errors << Shapes::ShapeRef.new(shape: ExpiredImportTokenException) + o.errors << Shapes::ShapeRef.new(shape: InvalidImportTokenException) + end) + + api.add_operation(:list_aliases, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListAliases" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListAliasesRequest) + o.output = Shapes::ShapeRef.new(shape: ListAliasesResponse) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidMarkerException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:list_grants, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListGrants" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListGrantsRequest) + o.output = Shapes::ShapeRef.new(shape: ListGrantsResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidMarkerException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantIdException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:list_key_policies, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListKeyPolicies" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListKeyPoliciesRequest) + o.output = Shapes::ShapeRef.new(shape: ListKeyPoliciesResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:list_keys, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListKeys" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListKeysRequest) + o.output = Shapes::ShapeRef.new(shape: ListKeysResponse) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: InvalidMarkerException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:list_resource_tags, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListResourceTags" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListResourceTagsRequest) + o.output = Shapes::ShapeRef.new(shape: ListResourceTagsResponse) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: InvalidMarkerException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:list_retirable_grants, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListRetirableGrants" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListRetirableGrantsRequest) + o.output = Shapes::ShapeRef.new(shape: ListGrantsResponse) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidMarkerException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o[:pager] = Aws::Pager.new( + limit_key: "limit", + tokens: { + "next_marker" => "marker" + } + ) + end) + + api.add_operation(:put_key_policy, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutKeyPolicy" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: PutKeyPolicyRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:re_encrypt, Seahorse::Model::Operation.new.tap do |o| + o.name = "ReEncrypt" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ReEncryptRequest) + o.output = Shapes::ShapeRef.new(shape: ReEncryptResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: InvalidCiphertextException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: IncorrectKeyException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:replicate_key, Seahorse::Model::Operation.new.tap do |o| + o.name = "ReplicateKey" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ReplicateKeyRequest) + o.output = Shapes::ShapeRef.new(shape: ReplicateKeyResponse) + o.errors << Shapes::ShapeRef.new(shape: AlreadyExistsException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: MalformedPolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: TagException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:retire_grant, Seahorse::Model::Operation.new.tap do |o| + o.name = "RetireGrant" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: RetireGrantRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantIdException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:revoke_grant, Seahorse::Model::Operation.new.tap do |o| + o.name = "RevokeGrant" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: RevokeGrantRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantIdException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:schedule_key_deletion, Seahorse::Model::Operation.new.tap do |o| + o.name = "ScheduleKeyDeletion" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ScheduleKeyDeletionRequest) + o.output = Shapes::ShapeRef.new(shape: ScheduleKeyDeletionResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:sign, Seahorse::Model::Operation.new.tap do |o| + o.name = "Sign" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: SignRequest) + o.output = Shapes::ShapeRef.new(shape: SignResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:tag_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "TagResource" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: TagResourceRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: TagException) + end) + + api.add_operation(:untag_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "UntagResource" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: UntagResourceRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: TagException) + end) + + api.add_operation(:update_alias, Seahorse::Model::Operation.new.tap do |o| + o.name = "UpdateAlias" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: UpdateAliasRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:update_custom_key_store, Seahorse::Model::Operation.new.tap do |o| + o.name = "UpdateCustomKeyStore" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: UpdateCustomKeyStoreRequest) + o.output = Shapes::ShapeRef.new(shape: UpdateCustomKeyStoreResponse) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreNameInUseException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterNotRelatedException) + o.errors << Shapes::ShapeRef.new(shape: CustomKeyStoreInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterNotActiveException) + o.errors << Shapes::ShapeRef.new(shape: CloudHsmClusterInvalidConfigurationException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyUriInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyUriEndpointInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyUriUnreachableException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyIncorrectAuthenticationCredentialException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceInUseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyVpcEndpointServiceInvalidConfigurationException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyInvalidResponseException) + o.errors << Shapes::ShapeRef.new(shape: XksProxyInvalidConfigurationException) + end) + + api.add_operation(:update_key_description, Seahorse::Model::Operation.new.tap do |o| + o.name = "UpdateKeyDescription" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: UpdateKeyDescriptionRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + + api.add_operation(:update_primary_region, Seahorse::Model::Operation.new.tap do |o| + o.name = "UpdatePrimaryRegion" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: UpdatePrimaryRegionRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: InvalidArnException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: UnsupportedOperationException) + end) + + api.add_operation(:verify, Seahorse::Model::Operation.new.tap do |o| + o.name = "Verify" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: VerifyRequest) + o.output = Shapes::ShapeRef.new(shape: VerifyResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: DependencyTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidSignatureException) + end) + + api.add_operation(:verify_mac, Seahorse::Model::Operation.new.tap do |o| + o.name = "VerifyMac" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: VerifyMacRequest) + o.output = Shapes::ShapeRef.new(shape: VerifyMacResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: DisabledException) + o.errors << Shapes::ShapeRef.new(shape: KeyUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: InvalidKeyUsageException) + o.errors << Shapes::ShapeRef.new(shape: InvalidGrantTokenException) + o.errors << Shapes::ShapeRef.new(shape: KMSInternalException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidMacException) + o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateException) + end) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/customizations.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/customizations.rb new file mode 100644 index 0000000..717e1f2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/customizations.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing for info on making contributions: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoint_parameters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoint_parameters.rb new file mode 100644 index 0000000..670e9ae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoint_parameters.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::KMS + # Endpoint parameters used to influence endpoints per request. + # + # @!attribute region + # The AWS region used to dispatch the request. + # + # @return [String] + # + # @!attribute use_dual_stack + # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error. + # + # @return [Boolean] + # + # @!attribute use_fips + # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error. + # + # @return [Boolean] + # + # @!attribute endpoint + # Override the endpoint used to send this request + # + # @return [String] + # + EndpointParameters = Struct.new( + :region, + :use_dual_stack, + :use_fips, + :endpoint, + ) do + include Aws::Structure + + # @api private + class << self + PARAM_MAP = { + 'Region' => :region, + 'UseDualStack' => :use_dual_stack, + 'UseFIPS' => :use_fips, + 'Endpoint' => :endpoint, + }.freeze + end + + def initialize(options = {}) + self[:region] = options[:region] + self[:use_dual_stack] = options[:use_dual_stack] + self[:use_dual_stack] = false if self[:use_dual_stack].nil? + if self[:use_dual_stack].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_dual_stack" + end + self[:use_fips] = options[:use_fips] + self[:use_fips] = false if self[:use_fips].nil? + if self[:use_fips].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_fips" + end + self[:endpoint] = options[:endpoint] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoint_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoint_provider.rb new file mode 100644 index 0000000..9092621 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoint_provider.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::KMS + class EndpointProvider + def resolve_endpoint(parameters) + region = parameters.region + use_dual_stack = parameters.use_dual_stack + use_fips = parameters.use_fips + endpoint = parameters.endpoint + if Aws::Endpoints::Matchers.set?(endpoint) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported" + end + return Aws::Endpoints::Endpoint.new(url: endpoint, headers: {}, properties: {}) + end + if Aws::Endpoints::Matchers.set?(region) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://kms-fips.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + return Aws::Endpoints::Endpoint.new(url: "https://kms-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack")) + return Aws::Endpoints::Endpoint.new(url: "https://kms.#{region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {}) + end + raise ArgumentError, "DualStack is enabled but this partition does not support DualStack" + end + return Aws::Endpoints::Endpoint.new(url: "https://kms.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) + end + end + raise ArgumentError, "Invalid Configuration: Missing Region" + raise ArgumentError, 'No endpoint could be resolved' + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoints.rb new file mode 100644 index 0000000..90cf572 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/endpoints.rb @@ -0,0 +1,715 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::KMS + module Endpoints + + class CancelKeyDeletion + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ConnectCustomKeyStore + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class CreateAlias + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class CreateCustomKeyStore + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class CreateGrant + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class CreateKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class Decrypt + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DeleteAlias + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DeleteCustomKeyStore + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DeleteImportedKeyMaterial + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DescribeCustomKeyStores + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DescribeKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DisableKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DisableKeyRotation + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class DisconnectCustomKeyStore + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class EnableKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class EnableKeyRotation + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class Encrypt + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GenerateDataKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GenerateDataKeyPair + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GenerateDataKeyPairWithoutPlaintext + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GenerateDataKeyWithoutPlaintext + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GenerateMac + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GenerateRandom + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GetKeyPolicy + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GetKeyRotationStatus + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GetParametersForImport + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class GetPublicKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ImportKeyMaterial + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListAliases + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListGrants + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListKeyPolicies + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListKeys + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListResourceTags + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ListRetirableGrants + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class PutKeyPolicy + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ReEncrypt + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ReplicateKey + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class RetireGrant + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class RevokeGrant + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class ScheduleKeyDeletion + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class Sign + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class TagResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UntagResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UpdateAlias + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UpdateCustomKeyStore + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UpdateKeyDescription + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UpdatePrimaryRegion + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class Verify + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class VerifyMac + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KMS::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/errors.rb new file mode 100644 index 0000000..f9d5d47 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/errors.rb @@ -0,0 +1,774 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::KMS + + # When KMS returns an error response, the Ruby SDK constructs and raises an error. + # These errors all extend Aws::KMS::Errors::ServiceError < {Aws::Errors::ServiceError} + # + # You can rescue all KMS errors using ServiceError: + # + # begin + # # do stuff + # rescue Aws::KMS::Errors::ServiceError + # # rescues all KMS API errors + # end + # + # + # ## Request Context + # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns + # information about the request that generated the error. + # See {Seahorse::Client::RequestContext} for more information. + # + # ## Error Classes + # * {AlreadyExistsException} + # * {CloudHsmClusterInUseException} + # * {CloudHsmClusterInvalidConfigurationException} + # * {CloudHsmClusterNotActiveException} + # * {CloudHsmClusterNotFoundException} + # * {CloudHsmClusterNotRelatedException} + # * {CustomKeyStoreHasCMKsException} + # * {CustomKeyStoreInvalidStateException} + # * {CustomKeyStoreNameInUseException} + # * {CustomKeyStoreNotFoundException} + # * {DependencyTimeoutException} + # * {DisabledException} + # * {ExpiredImportTokenException} + # * {IncorrectKeyException} + # * {IncorrectKeyMaterialException} + # * {IncorrectTrustAnchorException} + # * {InvalidAliasNameException} + # * {InvalidArnException} + # * {InvalidCiphertextException} + # * {InvalidGrantIdException} + # * {InvalidGrantTokenException} + # * {InvalidImportTokenException} + # * {InvalidKeyUsageException} + # * {InvalidMarkerException} + # * {KMSInternalException} + # * {KMSInvalidMacException} + # * {KMSInvalidSignatureException} + # * {KMSInvalidStateException} + # * {KeyUnavailableException} + # * {LimitExceededException} + # * {MalformedPolicyDocumentException} + # * {NotFoundException} + # * {TagException} + # * {UnsupportedOperationException} + # * {XksKeyAlreadyInUseException} + # * {XksKeyInvalidConfigurationException} + # * {XksKeyNotFoundException} + # * {XksProxyIncorrectAuthenticationCredentialException} + # * {XksProxyInvalidConfigurationException} + # * {XksProxyInvalidResponseException} + # * {XksProxyUriEndpointInUseException} + # * {XksProxyUriInUseException} + # * {XksProxyUriUnreachableException} + # * {XksProxyVpcEndpointServiceInUseException} + # * {XksProxyVpcEndpointServiceInvalidConfigurationException} + # * {XksProxyVpcEndpointServiceNotFoundException} + # + # Additionally, error classes are dynamically generated for service errors based on the error code + # if they are not defined above. + module Errors + + extend Aws::Errors::DynamicErrors + + class AlreadyExistsException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::AlreadyExistsException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CloudHsmClusterInUseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CloudHsmClusterInUseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CloudHsmClusterInvalidConfigurationException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CloudHsmClusterInvalidConfigurationException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CloudHsmClusterNotActiveException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CloudHsmClusterNotActiveException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CloudHsmClusterNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CloudHsmClusterNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CloudHsmClusterNotRelatedException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CloudHsmClusterNotRelatedException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CustomKeyStoreHasCMKsException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CustomKeyStoreHasCMKsException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CustomKeyStoreInvalidStateException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CustomKeyStoreInvalidStateException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CustomKeyStoreNameInUseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CustomKeyStoreNameInUseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class CustomKeyStoreNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::CustomKeyStoreNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class DependencyTimeoutException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::DependencyTimeoutException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class DisabledException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::DisabledException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class ExpiredImportTokenException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::ExpiredImportTokenException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class IncorrectKeyException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::IncorrectKeyException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class IncorrectKeyMaterialException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::IncorrectKeyMaterialException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class IncorrectTrustAnchorException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::IncorrectTrustAnchorException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidAliasNameException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidAliasNameException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidArnException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidArnException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidCiphertextException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidCiphertextException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidGrantIdException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidGrantIdException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidGrantTokenException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidGrantTokenException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidImportTokenException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidImportTokenException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidKeyUsageException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidKeyUsageException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class InvalidMarkerException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::InvalidMarkerException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class KMSInternalException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::KMSInternalException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class KMSInvalidMacException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::KMSInvalidMacException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class KMSInvalidSignatureException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::KMSInvalidSignatureException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class KMSInvalidStateException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::KMSInvalidStateException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class KeyUnavailableException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::KeyUnavailableException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class LimitExceededException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::LimitExceededException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class MalformedPolicyDocumentException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::MalformedPolicyDocumentException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class NotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::NotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class TagException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::TagException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class UnsupportedOperationException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::UnsupportedOperationException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksKeyAlreadyInUseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksKeyAlreadyInUseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksKeyInvalidConfigurationException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksKeyInvalidConfigurationException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksKeyNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksKeyNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyIncorrectAuthenticationCredentialException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyIncorrectAuthenticationCredentialException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyInvalidConfigurationException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyInvalidConfigurationException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyInvalidResponseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyInvalidResponseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyUriEndpointInUseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyUriEndpointInUseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyUriInUseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyUriInUseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyUriUnreachableException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyUriUnreachableException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyVpcEndpointServiceInUseException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyVpcEndpointServiceInUseException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyVpcEndpointServiceInvalidConfigurationException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyVpcEndpointServiceInvalidConfigurationException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + class XksProxyVpcEndpointServiceNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::KMS::Types::XksProxyVpcEndpointServiceNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/plugins/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/plugins/endpoints.rb new file mode 100644 index 0000000..e85a407 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/plugins/endpoints.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::KMS + module Plugins + class Endpoints < Seahorse::Client::Plugin + option( + :endpoint_provider, + doc_type: 'Aws::KMS::EndpointProvider', + docstring: 'The endpoint provider used to resolve endpoints. Any '\ + 'object that responds to `#resolve_endpoint(parameters)` '\ + 'where `parameters` is a Struct similar to '\ + '`Aws::KMS::EndpointParameters`' + ) do |cfg| + Aws::KMS::EndpointProvider.new + end + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + # If endpoint was discovered, do not resolve or apply the endpoint. + unless context[:discovered_endpoint] + params = parameters_for_operation(context) + endpoint = context.config.endpoint_provider.resolve_endpoint(params) + + context.http_request.endpoint = endpoint.url + apply_endpoint_headers(context, endpoint.headers) + end + + context[:endpoint_params] = params + context[:auth_scheme] = + Aws::Endpoints.resolve_auth_scheme(context, endpoint) + + @handler.call(context) + end + + private + + def apply_endpoint_headers(context, headers) + headers.each do |key, values| + value = values + .compact + .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } + .join(',') + + context.http_request.headers[key] = value + end + end + + def parameters_for_operation(context) + case context.operation_name + when :cancel_key_deletion + Aws::KMS::Endpoints::CancelKeyDeletion.build(context) + when :connect_custom_key_store + Aws::KMS::Endpoints::ConnectCustomKeyStore.build(context) + when :create_alias + Aws::KMS::Endpoints::CreateAlias.build(context) + when :create_custom_key_store + Aws::KMS::Endpoints::CreateCustomKeyStore.build(context) + when :create_grant + Aws::KMS::Endpoints::CreateGrant.build(context) + when :create_key + Aws::KMS::Endpoints::CreateKey.build(context) + when :decrypt + Aws::KMS::Endpoints::Decrypt.build(context) + when :delete_alias + Aws::KMS::Endpoints::DeleteAlias.build(context) + when :delete_custom_key_store + Aws::KMS::Endpoints::DeleteCustomKeyStore.build(context) + when :delete_imported_key_material + Aws::KMS::Endpoints::DeleteImportedKeyMaterial.build(context) + when :describe_custom_key_stores + Aws::KMS::Endpoints::DescribeCustomKeyStores.build(context) + when :describe_key + Aws::KMS::Endpoints::DescribeKey.build(context) + when :disable_key + Aws::KMS::Endpoints::DisableKey.build(context) + when :disable_key_rotation + Aws::KMS::Endpoints::DisableKeyRotation.build(context) + when :disconnect_custom_key_store + Aws::KMS::Endpoints::DisconnectCustomKeyStore.build(context) + when :enable_key + Aws::KMS::Endpoints::EnableKey.build(context) + when :enable_key_rotation + Aws::KMS::Endpoints::EnableKeyRotation.build(context) + when :encrypt + Aws::KMS::Endpoints::Encrypt.build(context) + when :generate_data_key + Aws::KMS::Endpoints::GenerateDataKey.build(context) + when :generate_data_key_pair + Aws::KMS::Endpoints::GenerateDataKeyPair.build(context) + when :generate_data_key_pair_without_plaintext + Aws::KMS::Endpoints::GenerateDataKeyPairWithoutPlaintext.build(context) + when :generate_data_key_without_plaintext + Aws::KMS::Endpoints::GenerateDataKeyWithoutPlaintext.build(context) + when :generate_mac + Aws::KMS::Endpoints::GenerateMac.build(context) + when :generate_random + Aws::KMS::Endpoints::GenerateRandom.build(context) + when :get_key_policy + Aws::KMS::Endpoints::GetKeyPolicy.build(context) + when :get_key_rotation_status + Aws::KMS::Endpoints::GetKeyRotationStatus.build(context) + when :get_parameters_for_import + Aws::KMS::Endpoints::GetParametersForImport.build(context) + when :get_public_key + Aws::KMS::Endpoints::GetPublicKey.build(context) + when :import_key_material + Aws::KMS::Endpoints::ImportKeyMaterial.build(context) + when :list_aliases + Aws::KMS::Endpoints::ListAliases.build(context) + when :list_grants + Aws::KMS::Endpoints::ListGrants.build(context) + when :list_key_policies + Aws::KMS::Endpoints::ListKeyPolicies.build(context) + when :list_keys + Aws::KMS::Endpoints::ListKeys.build(context) + when :list_resource_tags + Aws::KMS::Endpoints::ListResourceTags.build(context) + when :list_retirable_grants + Aws::KMS::Endpoints::ListRetirableGrants.build(context) + when :put_key_policy + Aws::KMS::Endpoints::PutKeyPolicy.build(context) + when :re_encrypt + Aws::KMS::Endpoints::ReEncrypt.build(context) + when :replicate_key + Aws::KMS::Endpoints::ReplicateKey.build(context) + when :retire_grant + Aws::KMS::Endpoints::RetireGrant.build(context) + when :revoke_grant + Aws::KMS::Endpoints::RevokeGrant.build(context) + when :schedule_key_deletion + Aws::KMS::Endpoints::ScheduleKeyDeletion.build(context) + when :sign + Aws::KMS::Endpoints::Sign.build(context) + when :tag_resource + Aws::KMS::Endpoints::TagResource.build(context) + when :untag_resource + Aws::KMS::Endpoints::UntagResource.build(context) + when :update_alias + Aws::KMS::Endpoints::UpdateAlias.build(context) + when :update_custom_key_store + Aws::KMS::Endpoints::UpdateCustomKeyStore.build(context) + when :update_key_description + Aws::KMS::Endpoints::UpdateKeyDescription.build(context) + when :update_primary_region + Aws::KMS::Endpoints::UpdatePrimaryRegion.build(context) + when :verify + Aws::KMS::Endpoints::Verify.build(context) + when :verify_mac + Aws::KMS::Endpoints::VerifyMac.build(context) + end + end + end + + def add_handlers(handlers, _config) + handlers.add(Handler, step: :build, priority: 75) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/resource.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/resource.rb new file mode 100644 index 0000000..e15bb39 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/resource.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::KMS + + class Resource + + # @param options ({}) + # @option options [Client] :client + def initialize(options = {}) + @client = options[:client] || Client.new(options) + end + + # @return [Client] + def client + @client + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/types.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/types.rb new file mode 100644 index 0000000..8a247ae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-kms-1.63.0/lib/aws-sdk-kms/types.rb @@ -0,0 +1,6014 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::KMS + module Types + + # Contains information about an alias. + # + # @!attribute [rw] alias_name + # String that contains the alias. This value begins with `alias/`. + # @return [String] + # + # @!attribute [rw] alias_arn + # String that contains the key ARN. + # @return [String] + # + # @!attribute [rw] target_key_id + # String that contains the key identifier of the KMS key associated + # with the alias. + # @return [String] + # + # @!attribute [rw] creation_date + # Date and time that the alias was most recently created in the + # account and Region. Formatted as Unix time. + # @return [Time] + # + # @!attribute [rw] last_updated_date + # Date and time that the alias was most recently associated with a KMS + # key in the account and Region. Formatted as Unix time. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/AliasListEntry AWS API Documentation + # + class AliasListEntry < Struct.new( + :alias_name, + :alias_arn, + :target_key_id, + :creation_date, + :last_updated_date) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because it attempted to create a resource + # that already exists. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/AlreadyExistsException AWS API Documentation + # + class AlreadyExistsException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the KMS key whose deletion is being canceled. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletionRequest AWS API Documentation + # + class CancelKeyDeletionRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key whose + # deletion is canceled. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletionResponse AWS API Documentation + # + class CancelKeyDeletionResponse < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified CloudHSM cluster is + # already associated with an CloudHSM key store in the account, or it + # shares a backup history with an CloudHSM key store in the account. + # Each CloudHSM key store in the account must be associated with a + # different CloudHSM cluster. + # + # CloudHSM clusters that share a backup history have the same cluster + # certificate. To view the cluster certificate of an CloudHSM cluster, + # use the [DescribeClusters][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CloudHsmClusterInUseException AWS API Documentation + # + class CloudHsmClusterInUseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the associated CloudHSM cluster did + # not meet the configuration requirements for an CloudHSM key store. + # + # * The CloudHSM cluster must be configured with private subnets in at + # least two different Availability Zones in the Region. + # + # * The [security group for the cluster][1] + # (cloudhsm-cluster-*<cluster-id>*-sg) must include inbound + # rules and outbound rules that allow TCP traffic on ports 2223-2225. + # The **Source** in the inbound rules and the **Destination** in the + # outbound rules must match the security group ID. These rules are set + # by default when you create the CloudHSM cluster. Do not delete or + # change them. To get information about a particular security group, + # use the [DescribeSecurityGroups][2] operation. + # + # * The CloudHSM cluster must contain at least as many HSMs as the + # operation requires. To add HSMs, use the CloudHSM [CreateHsm][3] + # operation. + # + # For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey + # operations, the CloudHSM cluster must have at least two active HSMs, + # each in a different Availability Zone. For the ConnectCustomKeyStore + # operation, the CloudHSM must contain at least one active HSM. + # + # For information about the requirements for an CloudHSM cluster that is + # associated with an CloudHSM key store, see [Assemble the + # Prerequisites][4] in the *Key Management Service Developer Guide*. For + # information about creating a private subnet for an CloudHSM cluster, + # see [Create a Private Subnet][5] in the *CloudHSM User Guide*. For + # information about cluster security groups, see [Configure a Default + # Security Group][1] in the CloudHSM User Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html + # [2]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html + # [3]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore + # [5]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CloudHsmClusterInvalidConfigurationException AWS API Documentation + # + class CloudHsmClusterInvalidConfigurationException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the CloudHSM cluster associated with + # the CloudHSM key store is not active. Initialize and activate the + # cluster and try the command again. For detailed instructions, see + # [Getting Started][1] in the *CloudHSM User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CloudHsmClusterNotActiveException AWS API Documentation + # + class CloudHsmClusterNotActiveException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because KMS cannot find the CloudHSM cluster + # with the specified cluster ID. Retry the request with a different + # cluster ID. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CloudHsmClusterNotFoundException AWS API Documentation + # + class CloudHsmClusterNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified CloudHSM cluster has a + # different cluster certificate than the original cluster. You cannot + # use the operation to specify an unrelated cluster for an CloudHSM key + # store. + # + # Specify an CloudHSM cluster that shares a backup history with the + # original cluster. This includes clusters that were created from a + # backup of the current cluster, and clusters that were created from the + # same backup that produced the current cluster. + # + # CloudHSM clusters that share a backup history have the same cluster + # certificate. To view the cluster certificate of an CloudHSM cluster, + # use the [DescribeClusters][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CloudHsmClusterNotRelatedException AWS API Documentation + # + class CloudHsmClusterNotRelatedException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_id + # Enter the key store ID of the custom key store that you want to + # connect. To find the ID of a custom key store, use the + # DescribeCustomKeyStores operation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStoreRequest AWS API Documentation + # + class ConnectCustomKeyStoreRequest < Struct.new( + :custom_key_store_id) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStoreResponse AWS API Documentation + # + class ConnectCustomKeyStoreResponse < Aws::EmptyStructure; end + + # @!attribute [rw] alias_name + # Specifies the alias name. This value must begin with `alias/` + # followed by a name, such as `alias/ExampleAlias`. + # + # The `AliasName` value must be string of 1-256 characters. It can + # contain only alphanumeric characters, forward slashes (/), + # underscores (\_), and dashes (-). The alias name cannot begin with + # `alias/aws/`. The `alias/aws/` prefix is reserved for [Amazon Web + # Services managed keys][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # @return [String] + # + # @!attribute [rw] target_key_id + # Associates the alias with the specified [customer managed key][1]. + # The KMS key must be in the same Amazon Web Services Region. + # + # A valid key ID is required. If you supply a null or empty string + # value, this operation returns an error. + # + # For help finding the key ID and ARN, see [Finding the Key ID and + # ARN][2] in the Key Management Service Developer Guide + # . + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAliasRequest AWS API Documentation + # + class CreateAliasRequest < Struct.new( + :alias_name, + :target_key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_name + # Specifies a friendly name for the custom key store. The name must be + # unique in your Amazon Web Services account and Region. This + # parameter is required for all custom key stores. + # @return [String] + # + # @!attribute [rw] cloud_hsm_cluster_id + # Identifies the CloudHSM cluster for an CloudHSM key store. This + # parameter is required for custom key stores with + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # Enter the cluster ID of any active CloudHSM cluster that is not + # already associated with a custom key store. To find the cluster ID, + # use the [DescribeClusters][1] operation. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # @return [String] + # + # @!attribute [rw] trust_anchor_certificate + # Specifies the certificate for an CloudHSM key store. This parameter + # is required for custom key stores with a `CustomKeyStoreType` of + # `AWS_CLOUDHSM`. + # + # Enter the content of the trust anchor certificate for the CloudHSM + # cluster. This is the content of the `customerCA.crt` file that you + # created when you [initialized the cluster][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html + # @return [String] + # + # @!attribute [rw] key_store_password + # Specifies the `kmsuser` password for an CloudHSM key store. This + # parameter is required for custom key stores with a + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # Enter the password of the [ `kmsuser` crypto user (CU) account][1] + # in the specified CloudHSM cluster. KMS logs into the cluster as this + # user to manage key material on your behalf. + # + # The password must be a string of 7 to 32 characters. Its value is + # case sensitive. + # + # This parameter tells KMS the `kmsuser` account password; it does not + # change the password in the CloudHSM cluster. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser + # @return [String] + # + # @!attribute [rw] custom_key_store_type + # Specifies the type of custom key store. The default value is + # `AWS_CLOUDHSM`. + # + # For a custom key store backed by an CloudHSM cluster, omit the + # parameter or enter `AWS_CLOUDHSM`. For a custom key store backed by + # an external key manager outside of Amazon Web Services, enter + # `EXTERNAL_KEY_STORE`. You cannot change this property after the key + # store is created. + # @return [String] + # + # @!attribute [rw] xks_proxy_uri_endpoint + # Specifies the endpoint that KMS uses to send requests to the + # external key store proxy (XKS proxy). This parameter is required for + # custom key stores with a `CustomKeyStoreType` of + # `EXTERNAL_KEY_STORE`. + # + # The protocol must be HTTPS. KMS communicates on port 443. Do not + # specify the port in the `XksProxyUriEndpoint` value. + # + # For external key stores with `XksProxyConnectivity` value of + # `VPC_ENDPOINT_SERVICE`, specify `https://` followed by the private + # DNS name of the VPC endpoint service. + # + # For external key stores with `PUBLIC_ENDPOINT` connectivity, this + # endpoint must be reachable before you create the custom key store. + # KMS connects to the external key store proxy while creating the + # custom key store. For external key stores with + # `VPC_ENDPOINT_SERVICE` connectivity, KMS connects when you call the + # ConnectCustomKeyStore operation. + # + # The value of this parameter must begin with `https://`. The + # remainder can contain upper and lower case letters (A-Z and a-z), + # numbers (0-9), dots (`.`), and hyphens (`-`). Additional slashes + # (`/` and ``) are not permitted. + # + # Uniqueness requirements: + # + # * The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values + # must be unique in the Amazon Web Services account and Region. + # + # * An external key store with `PUBLIC_ENDPOINT` connectivity cannot + # use the same `XksProxyUriEndpoint` value as an external key store + # with `VPC_ENDPOINT_SERVICE` connectivity in the same Amazon Web + # Services Region. + # + # * Each external key store with `VPC_ENDPOINT_SERVICE` connectivity + # must have its own private DNS name. The `XksProxyUriEndpoint` + # value for external key stores with `VPC_ENDPOINT_SERVICE` + # connectivity (private DNS name) must be unique in the Amazon Web + # Services account and Region. + # @return [String] + # + # @!attribute [rw] xks_proxy_uri_path + # Specifies the base path to the proxy APIs for this external key + # store. To find this value, see the documentation for your external + # key store proxy. This parameter is required for all custom key + # stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # The value must start with `/` and must end with `/kms/xks/v1` where + # `v1` represents the version of the KMS external key store proxy API. + # This path can include an optional prefix between the required + # elements such as `/prefix/kms/xks/v1`. + # + # Uniqueness requirements: + # + # * The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values + # must be unique in the Amazon Web Services account and Region. + # + # ^ + # @return [String] + # + # @!attribute [rw] xks_proxy_vpc_endpoint_service_name + # Specifies the name of the Amazon VPC endpoint service for interface + # endpoints that is used to communicate with your external key store + # proxy (XKS proxy). This parameter is required when the value of + # `CustomKeyStoreType` is `EXTERNAL_KEY_STORE` and the value of + # `XksProxyConnectivity` is `VPC_ENDPOINT_SERVICE`. + # + # The Amazon VPC endpoint service must [fulfill all requirements][1] + # for use with an external key store. + # + # **Uniqueness requirements:** + # + # * External key stores with `VPC_ENDPOINT_SERVICE` connectivity can + # share an Amazon VPC, but each external key store must have its own + # VPC endpoint service and private DNS name. + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements + # @return [String] + # + # @!attribute [rw] xks_proxy_authentication_credential + # Specifies an authentication credential for the external key store + # proxy (XKS proxy). This parameter is required for all custom key + # stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # The `XksProxyAuthenticationCredential` has two required elements: + # `RawSecretAccessKey`, a secret key, and `AccessKeyId`, a unique + # identifier for the `RawSecretAccessKey`. For character requirements, + # see + # [XksProxyAuthenticationCredentialType](kms/latest/APIReference/API_XksProxyAuthenticationCredentialType.html). + # + # KMS uses this authentication credential to sign requests to the + # external key store proxy on your behalf. This credential is + # unrelated to Identity and Access Management (IAM) and Amazon Web + # Services credentials. + # + # This parameter doesn't set or change the authentication credentials + # on the XKS proxy. It just tells KMS the credential that you + # established on your external key store proxy. If you rotate your + # proxy authentication credential, use the UpdateCustomKeyStore + # operation to provide the new credential to KMS. + # @return [Types::XksProxyAuthenticationCredentialType] + # + # @!attribute [rw] xks_proxy_connectivity + # Indicates how KMS communicates with the external key store proxy. + # This parameter is required for custom key stores with a + # `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # If the external key store proxy uses a public endpoint, specify + # `PUBLIC_ENDPOINT`. If the external key store proxy uses a Amazon VPC + # endpoint service for communication with KMS, specify + # `VPC_ENDPOINT_SERVICE`. For help making this choice, see [Choosing a + # connectivity option][1] in the *Key Management Service Developer + # Guide*. + # + # An Amazon VPC endpoint service keeps your communication with KMS in + # a private address space entirely within Amazon Web Services, but it + # requires more configuration, including establishing a Amazon VPC + # with multiple subnets, a VPC endpoint service, a network load + # balancer, and a verified private DNS name. A public endpoint is + # simpler to set up, but it might be slower and might not fulfill your + # security requirements. You might consider testing with a public + # endpoint, and then establishing a VPC endpoint service for + # production tasks. Note that this choice does not determine the + # location of the external key store proxy. Even if you choose a VPC + # endpoint service, the proxy can be hosted within the VPC or outside + # of Amazon Web Services such as in your corporate data center. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStoreRequest AWS API Documentation + # + class CreateCustomKeyStoreRequest < Struct.new( + :custom_key_store_name, + :cloud_hsm_cluster_id, + :trust_anchor_certificate, + :key_store_password, + :custom_key_store_type, + :xks_proxy_uri_endpoint, + :xks_proxy_uri_path, + :xks_proxy_vpc_endpoint_service_name, + :xks_proxy_authentication_credential, + :xks_proxy_connectivity) + SENSITIVE = [:key_store_password] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_id + # A unique identifier for the new custom key store. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStoreResponse AWS API Documentation + # + class CreateCustomKeyStoreResponse < Struct.new( + :custom_key_store_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the KMS key for the grant. The grant gives principals + # permission to use this KMS key. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key + # in a different Amazon Web Services account, you must use the key + # ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] grantee_principal + # The identity that gets the permissions specified in the grant. + # + # To specify the grantee principal, use the Amazon Resource Name (ARN) + # of an Amazon Web Services principal. Valid principals include Amazon + # Web Services accounts, IAM users, IAM roles, federated users, and + # assumed role users. For help with the ARN syntax for a principal, + # see [IAM ARNs][1] in the Identity and Access Management User + # Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + # @return [String] + # + # @!attribute [rw] retiring_principal + # The principal that has permission to use the RetireGrant operation + # to retire the grant. + # + # To specify the principal, use the [Amazon Resource Name (ARN)][1] of + # an Amazon Web Services principal. Valid principals include Amazon + # Web Services accounts, IAM users, IAM roles, federated users, and + # assumed role users. For help with the ARN syntax for a principal, + # see [IAM ARNs][2] in the Identity and Access Management User + # Guide . + # + # The grant determines the retiring principal. Other principals might + # have permission to retire the grant or revoke the grant. For + # details, see RevokeGrant and [Retiring and revoking grants][3] in + # the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete + # @return [String] + # + # @!attribute [rw] operations + # A list of operations that the grant permits. + # + # This list must include only operations that are permitted in a + # grant. Also, the operation must be supported on the KMS key. For + # example, you cannot create a grant for a symmetric encryption KMS + # key that allows the Sign operation, or a grant for an asymmetric KMS + # key that allows the GenerateDataKey operation. If you try, KMS + # returns a `ValidationError` exception. For details, see [Grant + # operations][1] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations + # @return [Array] + # + # @!attribute [rw] constraints + # Specifies a grant constraint. + # + # KMS supports the `EncryptionContextEquals` and + # `EncryptionContextSubset` grant constraints. Each constraint value + # can include up to 8 encryption context pairs. The encryption context + # value in each constraint cannot exceed 384 characters. For + # information about grant constraints, see [Using grant + # constraints][1] in the *Key Management Service Developer Guide*. For + # more information about encryption context, see [Encryption + # context][2] in the Key Management Service Developer Guide + # . + # + # The encryption context grant constraints allow the permissions in + # the grant only when the encryption context in the request matches + # (`EncryptionContextEquals`) or includes (`EncryptionContextSubset`) + # the encryption context specified in this structure. + # + # The encryption context grant constraints are supported only on + # [grant operations][3] that include an `EncryptionContext` parameter, + # such as cryptographic operations on symmetric encryption KMS keys. + # Grants with grant constraints can include the DescribeKey and + # RetireGrant operations, but the constraint doesn't apply to these + # operations. If a grant with a grant constraint includes the + # `CreateGrant` operation, the constraint requires that any grants + # created with the `CreateGrant` permission have an equally strict or + # stricter encryption context constraint. + # + # You cannot use an encryption context grant constraint for + # cryptographic operations with asymmetric KMS keys or HMAC KMS keys. + # These keys don't support an encryption context. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations + # @return [Types::GrantConstraints] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @!attribute [rw] name + # A friendly name for the grant. Use this value to prevent the + # unintended creation of duplicate grants when retrying this request. + # + # When this value is absent, all `CreateGrant` requests result in a + # new grant with a unique `GrantId` even if all the supplied + # parameters are identical. This can result in unintended duplicates + # when you retry the `CreateGrant` request. + # + # When this value is present, you can retry a `CreateGrant` request + # with identical parameters; if the grant already exists, the original + # `GrantId` is returned without creating a new grant. Note that the + # returned grant token is unique with every `CreateGrant` request, + # even when a duplicate `GrantId` is returned. All grant tokens for + # the same grant ID can be used interchangeably. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrantRequest AWS API Documentation + # + class CreateGrantRequest < Struct.new( + :key_id, + :grantee_principal, + :retiring_principal, + :operations, + :constraints, + :grant_tokens, + :name) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] grant_token + # The grant token. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [String] + # + # @!attribute [rw] grant_id + # The unique identifier for the grant. + # + # You can use the `GrantId` in a ListGrants, RetireGrant, or + # RevokeGrant operation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrantResponse AWS API Documentation + # + class CreateGrantResponse < Struct.new( + :grant_token, + :grant_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] policy + # The key policy to attach to the KMS key. + # + # If you provide a key policy, it must meet the following criteria: + # + # * The key policy must allow the calling principal to make a + # subsequent `PutKeyPolicy` request on the KMS key. This reduces the + # risk that the KMS key becomes unmanageable. For more information, + # see [Default key policy][1] in the *Key Management Service + # Developer Guide*. (To omit this condition, set + # `BypassPolicyLockoutSafetyCheck` to true.) + # + # * Each statement in the key policy must contain one or more + # principals. The principals in the key policy must exist and be + # visible to KMS. When you create a new Amazon Web Services + # principal, you might need to enforce a delay before including the + # new principal in a key policy because the new principal might not + # be immediately visible to KMS. For more information, see [Changes + # that I make are not always immediately visible][2] in the *Amazon + # Web Services Identity and Access Management User Guide*. + # + # If you do not provide a key policy, KMS attaches a default key + # policy to the KMS key. For more information, see [Default key + # policy][3] in the *Key Management Service Developer Guide*. + # + # The key policy size quota is 32 kilobytes (32768 bytes). + # + # For help writing and formatting a JSON policy document, see the [IAM + # JSON Policy Reference][4] in the Identity and Access + # Management User Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # @return [String] + # + # @!attribute [rw] description + # A description of the KMS key. + # + # Use a description that helps you decide whether the KMS key is + # appropriate for a task. The default value is an empty string (no + # description). + # + # To set or change the description after the key is created, use + # UpdateKeyDescription. + # @return [String] + # + # @!attribute [rw] key_usage + # Determines the [cryptographic operations][1] for which you can use + # the KMS key. The default value is `ENCRYPT_DECRYPT`. This parameter + # is optional when you are creating a symmetric encryption KMS key; + # otherwise, it is required. You can't change the `KeyUsage` value + # after the KMS key is created. + # + # Select only one valid value. + # + # * For symmetric encryption KMS keys, omit the parameter or specify + # `ENCRYPT_DECRYPT`. + # + # * For HMAC KMS keys (symmetric), specify `GENERATE_VERIFY_MAC`. + # + # * For asymmetric KMS keys with RSA key material, specify + # `ENCRYPT_DECRYPT` or `SIGN_VERIFY`. + # + # * For asymmetric KMS keys with ECC key material, specify + # `SIGN_VERIFY`. + # + # * For asymmetric KMS keys with SM2 key material (China Regions + # only), specify `ENCRYPT_DECRYPT` or `SIGN_VERIFY`. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # @return [String] + # + # @!attribute [rw] customer_master_key_spec + # Instead, use the `KeySpec` parameter. + # + # The `KeySpec` and `CustomerMasterKeySpec` parameters work the same + # way. Only the names differ. We recommend that you use `KeySpec` + # parameter in your code. However, to avoid breaking changes, KMS + # supports both parameters. + # @return [String] + # + # @!attribute [rw] key_spec + # Specifies the type of KMS key to create. The default value, + # `SYMMETRIC_DEFAULT`, creates a KMS key with a 256-bit AES-GCM key + # that is used for encryption and decryption, except in China Regions, + # where it creates a 128-bit symmetric key that uses SM4 encryption. + # For help choosing a key spec for your KMS key, see [Choosing a KMS + # key type][1] in the Key Management Service Developer + # Guide . + # + # The `KeySpec` determines whether the KMS key contains a symmetric + # key or an asymmetric key pair. It also determines the algorithms + # that the KMS key supports. You can't change the `KeySpec` after the + # KMS key is created. To further restrict the algorithms that can be + # used with the KMS key, use a condition key in its key policy or IAM + # policy. For more information, see [kms:EncryptionAlgorithm][2], + # [kms:MacAlgorithm][3] or [kms:Signing Algorithm][4] in the + # Key Management Service Developer Guide . + # + # [Amazon Web Services services that are integrated with KMS][5] use + # symmetric encryption KMS keys to protect your data. These services + # do not support asymmetric KMS keys or HMAC KMS keys. + # + # KMS supports the following key specs for KMS keys: + # + # * Symmetric encryption key (default) + # + # * `SYMMETRIC_DEFAULT` + # + # ^ + # + # * HMAC keys (symmetric) + # + # * `HMAC_224` + # + # * `HMAC_256` + # + # * `HMAC_384` + # + # * `HMAC_512` + # + # * Asymmetric RSA key pairs + # + # * `RSA_2048` + # + # * `RSA_3072` + # + # * `RSA_4096` + # + # * Asymmetric NIST-recommended elliptic curve key pairs + # + # * `ECC_NIST_P256` (secp256r1) + # + # * `ECC_NIST_P384` (secp384r1) + # + # * `ECC_NIST_P521` (secp521r1) + # + # * Other asymmetric elliptic curve key pairs + # + # * `ECC_SECG_P256K1` (secp256k1), commonly used for + # cryptocurrencies. + # + # ^ + # + # * SM2 key pairs (China Regions only) + # + # * `SM2` + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm + # [5]: http://aws.amazon.com/kms/features/#AWS_Service_Integration + # @return [String] + # + # @!attribute [rw] origin + # The source of the key material for the KMS key. You cannot change + # the origin after you create the KMS key. The default is `AWS_KMS`, + # which means that KMS creates the key material. + # + # To [create a KMS key with no key material][1] (for imported key + # material), set this value to `EXTERNAL`. For more information about + # importing key material into KMS, see [Importing Key Material][2] in + # the *Key Management Service Developer Guide*. The `EXTERNAL` origin + # value is valid only for symmetric KMS keys. + # + # To [create a KMS key in an CloudHSM key store][3] and create its key + # material in the associated CloudHSM cluster, set this value to + # `AWS_CLOUDHSM`. You must also use the `CustomKeyStoreId` parameter + # to identify the CloudHSM key store. The `KeySpec` value must be + # `SYMMETRIC_DEFAULT`. + # + # To [create a KMS key in an external key store][4], set this value to + # `EXTERNAL_KEY_STORE`. You must also use the `CustomKeyStoreId` + # parameter to identify the external key store and the `XksKeyId` + # parameter to identify the associated external key. The `KeySpec` + # value must be `SYMMETRIC_DEFAULT`. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html + # @return [String] + # + # @!attribute [rw] custom_key_store_id + # Creates the KMS key in the specified [custom key store][1]. The + # `ConnectionState` of the custom key store must be `CONNECTED`. To + # find the CustomKeyStoreID and ConnectionState use the + # DescribeCustomKeyStores operation. + # + # This parameter is valid only for symmetric encryption KMS keys in a + # single Region. You cannot create any other type of KMS key in a + # custom key store. + # + # When you create a KMS key in an CloudHSM key store, KMS generates a + # non-exportable 256-bit symmetric key in its associated CloudHSM + # cluster and associates it with the KMS key. When you create a KMS + # key in an external key store, you must use the `XksKeyId` parameter + # to specify an external key that serves as key material for the KMS + # key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # @return [String] + # + # @!attribute [rw] bypass_policy_lockout_safety_check + # Skips ("bypasses") the key policy lockout safety check. The + # default value is false. + # + # Setting this value to true increases the risk that the KMS key + # becomes unmanageable. Do not set this value to true + # indiscriminately. + # + # For more information, see [Default key policy][1] in the *Key + # Management Service Developer Guide*. + # + # Use this parameter only when you intend to prevent the principal + # that is making the request from making a subsequent PutKeyPolicy + # request on the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # @return [Boolean] + # + # @!attribute [rw] tags + # Assigns one or more tags to the KMS key. Use this parameter to tag + # the KMS key when it is created. To tag an existing KMS key, use the + # TagResource operation. + # + # Tagging or untagging a KMS key can allow or deny permission to the + # KMS key. For details, see [ABAC for KMS][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # To use this parameter, you must have [kms:TagResource][2] permission + # in an IAM policy. + # + # Each tag consists of a tag key and a tag value. Both the tag key and + # the tag value are required, but the tag value can be an empty (null) + # string. You cannot have more than one tag on a KMS key with the same + # tag key. If you specify an existing tag key with a different tag + # value, KMS replaces the current tag value with the specified one. + # + # When you add tags to an Amazon Web Services resource, Amazon Web + # Services generates a cost allocation report with usage and costs + # aggregated by tags. Tags can also be used to control access to a KMS + # key. For details, see [Tagging Keys][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # @return [Array] + # + # @!attribute [rw] multi_region + # Creates a multi-Region primary key that you can replicate into other + # Amazon Web Services Regions. You cannot change this value after you + # create the KMS key. + # + # For a multi-Region key, set this parameter to `True`. For a + # single-Region KMS key, omit this parameter or set it to `False`. The + # default value is `False`. + # + # This operation supports *multi-Region keys*, an KMS feature that + # lets you create multiple interoperable KMS keys in different Amazon + # Web Services Regions. Because these KMS keys have the same key ID, + # key material, and other metadata, you can use them interchangeably + # to encrypt data in one Amazon Web Services Region and decrypt it in + # a different Amazon Web Services Region without re-encrypting the + # data or making a cross-Region call. For more information about + # multi-Region keys, see [Multi-Region keys in KMS][1] in the *Key + # Management Service Developer Guide*. + # + # This value creates a *primary key*, not a replica. To create a + # *replica key*, use the ReplicateKey operation. + # + # You can create a symmetric or asymmetric multi-Region key, and you + # can create a multi-Region key with imported key material. However, + # you cannot create a multi-Region key in a custom key store. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # @return [Boolean] + # + # @!attribute [rw] xks_key_id + # Identifies the [external key][1] that serves as key material for the + # KMS key in an [external key store][2]. Specify the ID that the + # [external key store proxy][3] uses to refer to the external key. For + # help, see the documentation for your external key store proxy. + # + # This parameter is required for a KMS key with an `Origin` value of + # `EXTERNAL_KEY_STORE`. It is not valid for KMS keys with any other + # `Origin` value. + # + # The external key must be an existing 256-bit AES symmetric + # encryption key hosted outside of Amazon Web Services in an external + # key manager associated with the external key store specified by the + # `CustomKeyStoreId` parameter. This key must be enabled and + # configured to perform encryption and decryption. Each KMS key in an + # external key store must use a different external key. For details, + # see [Requirements for a KMS key in an external key store][4] in the + # *Key Management Service Developer Guide*. + # + # Each KMS key in an external key store is associated two backing + # keys. One is key material that KMS generates. The other is the + # external key specified by this parameter. When you use the KMS key + # in an external key store to encrypt data, the encryption operation + # is performed first by KMS using the KMS key material, and then by + # the external key manager using the specified external key, a process + # known as *double encryption*. For details, see [Double + # encryption][5] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy + # [4]: https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKeyRequest AWS API Documentation + # + class CreateKeyRequest < Struct.new( + :policy, + :description, + :key_usage, + :customer_master_key_spec, + :key_spec, + :origin, + :custom_key_store_id, + :bypass_policy_lockout_safety_check, + :tags, + :multi_region, + :xks_key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_metadata + # Metadata associated with the KMS key. + # @return [Types::KeyMetadata] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKeyResponse AWS API Documentation + # + class CreateKeyResponse < Struct.new( + :key_metadata) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the custom key store contains KMS + # keys. After verifying that you do not need to use the KMS keys, use + # the ScheduleKeyDeletion operation to delete the KMS keys. After they + # are deleted, you can delete the custom key store. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CustomKeyStoreHasCMKsException AWS API Documentation + # + class CustomKeyStoreHasCMKsException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because of the `ConnectionState` of the + # custom key store. To get the `ConnectionState` of a custom key store, + # use the DescribeCustomKeyStores operation. + # + # This exception is thrown under the following conditions: + # + # * You requested the ConnectCustomKeyStore operation on a custom key + # store with a `ConnectionState` of `DISCONNECTING` or `FAILED`. This + # operation is valid for all other `ConnectionState` values. To + # reconnect a custom key store in a `FAILED` state, disconnect it + # (DisconnectCustomKeyStore), then connect it + # (`ConnectCustomKeyStore`). + # + # * You requested the CreateKey operation in a custom key store that is + # not connected. This operations is valid only when the custom key + # store `ConnectionState` is `CONNECTED`. + # + # * You requested the DisconnectCustomKeyStore operation on a custom key + # store with a `ConnectionState` of `DISCONNECTING` or `DISCONNECTED`. + # This operation is valid for all other `ConnectionState` values. + # + # * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore + # operation on a custom key store that is not disconnected. This + # operation is valid only when the custom key store `ConnectionState` + # is `DISCONNECTED`. + # + # * You requested the GenerateRandom operation in an CloudHSM key store + # that is not connected. This operation is valid only when the + # CloudHSM key store `ConnectionState` is `CONNECTED`. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CustomKeyStoreInvalidStateException AWS API Documentation + # + class CustomKeyStoreInvalidStateException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified custom key store name + # is already assigned to another custom key store in the account. Try + # again with a custom key store name that is unique in the account. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CustomKeyStoreNameInUseException AWS API Documentation + # + class CustomKeyStoreNameInUseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because KMS cannot find a custom key store + # with the specified key store name or ID. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CustomKeyStoreNotFoundException AWS API Documentation + # + class CustomKeyStoreNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Contains information about each custom key store in the custom key + # store list. + # + # @!attribute [rw] custom_key_store_id + # A unique identifier for the custom key store. + # @return [String] + # + # @!attribute [rw] custom_key_store_name + # The user-specified friendly name for the custom key store. + # @return [String] + # + # @!attribute [rw] cloud_hsm_cluster_id + # A unique identifier for the CloudHSM cluster that is associated with + # an CloudHSM key store. This field appears only when the + # `CustomKeyStoreType` is `AWS_CLOUDHSM`. + # @return [String] + # + # @!attribute [rw] trust_anchor_certificate + # The trust anchor certificate of the CloudHSM cluster associated with + # an CloudHSM key store. When you [initialize the cluster][1], you + # create this certificate and save it in the `customerCA.crt` file. + # + # This field appears only when the `CustomKeyStoreType` is + # `AWS_CLOUDHSM`. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr + # @return [String] + # + # @!attribute [rw] connection_state + # Indicates whether the custom key store is connected to its backing + # key store. For an CloudHSM key store, the `ConnectionState` + # indicates whether it is connected to its CloudHSM cluster. For an + # external key store, the `ConnectionState` indicates whether it is + # connected to the external key store proxy that communicates with + # your external key manager. + # + # You can create and use KMS keys in your custom key stores only when + # its `ConnectionState` is `CONNECTED`. + # + # The `ConnectionState` value is `DISCONNECTED` only if the key store + # has never been connected or you use the DisconnectCustomKeyStore + # operation to disconnect it. If the value is `CONNECTED` but you are + # having trouble using the custom key store, make sure that the + # backing key store is reachable and active. For an CloudHSM key + # store, verify that its associated CloudHSM cluster is active and + # contains at least one active HSM. For an external key store, verify + # that the external key store proxy and external key manager are + # connected and enabled. + # + # A value of `FAILED` indicates that an attempt to connect was + # unsuccessful. The `ConnectionErrorCode` field in the response + # indicates the cause of the failure. For help resolving a connection + # failure, see [Troubleshooting a custom key store][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html + # @return [String] + # + # @!attribute [rw] connection_error_code + # Describes the connection error. This field appears in the response + # only when the `ConnectionState` is `FAILED`. + # + # Many failures can be resolved by updating the properties of the + # custom key store. To update a custom key store, disconnect it + # (DisconnectCustomKeyStore), correct the errors + # (UpdateCustomKeyStore), and try to connect again + # (ConnectCustomKeyStore). For additional help resolving these errors, + # see [How to Fix a Connection Failure][1] in *Key Management Service + # Developer Guide*. + # + # **All custom key stores:** + # + # * `INTERNAL_ERROR` — KMS could not complete the request due to an + # internal error. Retry the request. For `ConnectCustomKeyStore` + # requests, disconnect the custom key store before trying to connect + # again. + # + # * `NETWORK_ERRORS` — Network errors are preventing KMS from + # connecting the custom key store to its backing key store. + # + # **CloudHSM key stores:** + # + # * `CLUSTER_NOT_FOUND` — KMS cannot find the CloudHSM cluster with + # the specified cluster ID. + # + # * `INSUFFICIENT_CLOUDHSM_HSMS` — The associated CloudHSM cluster + # does not contain any active HSMs. To connect a custom key store to + # its CloudHSM cluster, the cluster must contain at least one active + # HSM. + # + # * `INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET` — At least one private + # subnet associated with the CloudHSM cluster doesn't have any + # available IP addresses. A CloudHSM key store connection requires + # one free IP address in each of the associated private subnets, + # although two are preferable. For details, see [How to Fix a + # Connection Failure][1] in the *Key Management Service Developer + # Guide*. + # + # * `INVALID_CREDENTIALS` — The `KeyStorePassword` for the custom key + # store doesn't match the current password of the `kmsuser` crypto + # user in the CloudHSM cluster. Before you can connect your custom + # key store to its CloudHSM cluster, you must change the `kmsuser` + # account password and update the `KeyStorePassword` value for the + # custom key store. + # + # * `SUBNET_NOT_FOUND` — A subnet in the CloudHSM cluster + # configuration was deleted. If KMS cannot find all of the subnets + # in the cluster configuration, attempts to connect the custom key + # store to the CloudHSM cluster fail. To fix this error, create a + # cluster from a recent backup and associate it with your custom key + # store. (This process creates a new cluster configuration with a + # VPC and private subnets.) For details, see [How to Fix a + # Connection Failure][1] in the *Key Management Service Developer + # Guide*. + # + # * `USER_LOCKED_OUT` — The `kmsuser` CU account is locked out of the + # associated CloudHSM cluster due to too many failed password + # attempts. Before you can connect your custom key store to its + # CloudHSM cluster, you must change the `kmsuser` account password + # and update the key store password value for the custom key store. + # + # * `USER_LOGGED_IN` — The `kmsuser` CU account is logged into the + # associated CloudHSM cluster. This prevents KMS from rotating the + # `kmsuser` account password and logging into the cluster. Before + # you can connect your custom key store to its CloudHSM cluster, you + # must log the `kmsuser` CU out of the cluster. If you changed the + # `kmsuser` password to log into the cluster, you must also and + # update the key store password value for the custom key store. For + # help, see [How to Log Out and Reconnect][2] in the *Key Management + # Service Developer Guide*. + # + # * `USER_NOT_FOUND` — KMS cannot find a `kmsuser` CU account in the + # associated CloudHSM cluster. Before you can connect your custom + # key store to its CloudHSM cluster, you must create a `kmsuser` CU + # account in the cluster, and then update the key store password + # value for the custom key store. + # + # **External key stores:** + # + # * `INVALID_CREDENTIALS` — One or both of the + # `XksProxyAuthenticationCredential` values is not valid on the + # specified external key store proxy. + # + # * `XKS_PROXY_ACCESS_DENIED` — KMS requests are denied access to the + # external key store proxy. If the external key store proxy has + # authorization rules, verify that they permit KMS to communicate + # with the proxy on your behalf. + # + # * `XKS_PROXY_INVALID_CONFIGURATION` — A configuration error is + # preventing the external key store from connecting to its proxy. + # Verify the value of the `XksProxyUriPath`. + # + # * `XKS_PROXY_INVALID_RESPONSE` — KMS cannot interpret the response + # from the external key store proxy. If you see this connection + # error code repeatedly, notify your external key store proxy + # vendor. + # + # * `XKS_PROXY_INVALID_TLS_CONFIGURATION` — KMS cannot connect to the + # external key store proxy because the TLS configuration is invalid. + # Verify that the XKS proxy supports TLS 1.2 or 1.3. Also, verify + # that the TLS certificate is not expired, and that it matches the + # hostname in the `XksProxyUriEndpoint` value, and that it is signed + # by a certificate authority included in the [Trusted Certificate + # Authorities][3] list. + # + # * `XKS_PROXY_NOT_REACHABLE` — KMS can't communicate with your + # external key store proxy. Verify that the `XksProxyUriEndpoint` + # and `XksProxyUriPath` are correct. Use the tools for your external + # key store proxy to verify that the proxy is active and available + # on its network. Also, verify that your external key manager + # instances are operating properly. Connection attempts fail with + # this connection error code if the proxy reports that all external + # key manager instances are unavailable. + # + # * `XKS_PROXY_TIMED_OUT` — KMS can connect to the external key store + # proxy, but the proxy does not respond to KMS in the time allotted. + # If you see this connection error code repeatedly, notify your + # external key store proxy vendor. + # + # * `XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION` — The Amazon VPC + # endpoint service configuration doesn't conform to the + # requirements for an KMS external key store. + # + # * The VPC endpoint service must be an endpoint service for + # interface endpoints in the caller's Amazon Web Services + # account. + # + # * It must have a network load balancer (NLB) connected to at least + # two subnets, each in a different Availability Zone. + # + # * The `Allow principals` list must include the KMS service + # principal for the Region, `cks.kms..amazonaws.com`, such + # as `cks.kms.us-east-1.amazonaws.com`. + # + # * It must *not* require [acceptance][4] of connection requests. + # + # * It must have a private DNS name. The private DNS name for an + # external key store with `VPC_ENDPOINT_SERVICE` connectivity must + # be unique in its Amazon Web Services Region. + # + # * The domain of the private DNS name must have a [verification + # status][5] of `verified`. + # + # * The [TLS certificate][6] specifies the private DNS hostname at + # which the endpoint is reachable. + # + # * `XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND` — KMS can't find the VPC + # endpoint service that it uses to communicate with the external key + # store proxy. Verify that the `XksProxyVpcEndpointServiceName` is + # correct and the KMS service principal has service consumer + # permissions on the Amazon VPC endpoint service. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2 + # [3]: https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities + # [4]: https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html + # [5]: https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html + # [6]: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html + # @return [String] + # + # @!attribute [rw] creation_date + # The date and time when the custom key store was created. + # @return [Time] + # + # @!attribute [rw] custom_key_store_type + # Indicates the type of the custom key store. `AWS_CLOUDHSM` indicates + # a custom key store backed by an CloudHSM cluster. + # `EXTERNAL_KEY_STORE` indicates a custom key store backed by an + # external key store proxy and external key manager outside of Amazon + # Web Services. + # @return [String] + # + # @!attribute [rw] xks_proxy_configuration + # Configuration settings for the external key store proxy (XKS proxy). + # The external key store proxy translates KMS requests into a format + # that your external key manager can understand. The proxy + # configuration includes connection information that KMS requires. + # + # This field appears only when the `CustomKeyStoreType` is + # `EXTERNAL_KEY_STORE`. + # @return [Types::XksProxyConfigurationType] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CustomKeyStoresListEntry AWS API Documentation + # + class CustomKeyStoresListEntry < Struct.new( + :custom_key_store_id, + :custom_key_store_name, + :cloud_hsm_cluster_id, + :trust_anchor_certificate, + :connection_state, + :connection_error_code, + :creation_date, + :custom_key_store_type, + :xks_proxy_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ciphertext_blob + # Ciphertext to be decrypted. The blob includes metadata. + # @return [String] + # + # @!attribute [rw] encryption_context + # Specifies the encryption context to use when decrypting the data. An + # encryption context is valid only for [cryptographic operations][1] + # with a symmetric encryption KMS key. The standard asymmetric + # encryption algorithms and HMAC algorithms that KMS uses do not + # support an encryption context. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][2] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @!attribute [rw] key_id + # Specifies the KMS key that KMS uses to decrypt the ciphertext. + # + # Enter a key ID of the KMS key that was used to encrypt the + # ciphertext. If you identify a different KMS key, the `Decrypt` + # operation throws an `IncorrectKeyException`. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. If you used a symmetric encryption KMS + # key, KMS can get the KMS key from metadata that it adds to the + # symmetric ciphertext blob. However, it is always recommended as a + # best practice. This practice ensures that you use the KMS key that + # you intend. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] encryption_algorithm + # Specifies the encryption algorithm that will be used to decrypt the + # ciphertext. Specify the same algorithm that was used to encrypt the + # data. If you specify a different algorithm, the `Decrypt` operation + # fails. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. The default value, `SYMMETRIC_DEFAULT`, + # represents the only supported algorithm that is valid for symmetric + # encryption KMS keys. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DecryptRequest AWS API Documentation + # + class DecryptRequest < Struct.new( + :ciphertext_blob, + :encryption_context, + :grant_tokens, + :key_id, + :encryption_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that was used + # to decrypt the ciphertext. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] plaintext + # Decrypted plaintext data. When you use the HTTP API or the Amazon + # Web Services CLI, the value is Base64-encoded. Otherwise, it is not + # Base64-encoded. + # @return [String] + # + # @!attribute [rw] encryption_algorithm + # The encryption algorithm that was used to decrypt the ciphertext. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DecryptResponse AWS API Documentation + # + class DecryptResponse < Struct.new( + :key_id, + :plaintext, + :encryption_algorithm) + SENSITIVE = [:plaintext] + include Aws::Structure + end + + # @!attribute [rw] alias_name + # The alias to be deleted. The alias name must begin with `alias/` + # followed by the alias name, such as `alias/ExampleAlias`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAliasRequest AWS API Documentation + # + class DeleteAliasRequest < Struct.new( + :alias_name) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_id + # Enter the ID of the custom key store you want to delete. To find the + # ID of a custom key store, use the DescribeCustomKeyStores operation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStoreRequest AWS API Documentation + # + class DeleteCustomKeyStoreRequest < Struct.new( + :custom_key_store_id) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStoreResponse AWS API Documentation + # + class DeleteCustomKeyStoreResponse < Aws::EmptyStructure; end + + # @!attribute [rw] key_id + # Identifies the KMS key from which you are deleting imported key + # material. The `Origin` of the KMS key must be `EXTERNAL`. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterialRequest AWS API Documentation + # + class DeleteImportedKeyMaterialRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # The system timed out while trying to fulfill the request. You can + # retry the request. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DependencyTimeoutException AWS API Documentation + # + class DependencyTimeoutException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_id + # Gets only information about the specified custom key store. Enter + # the key store ID. + # + # By default, this operation gets information about all custom key + # stores in the account and Region. To limit the output to a + # particular custom key store, provide either the `CustomKeyStoreId` + # or `CustomKeyStoreName` parameter, but not both. + # @return [String] + # + # @!attribute [rw] custom_key_store_name + # Gets only information about the specified custom key store. Enter + # the friendly name of the custom key store. + # + # By default, this operation gets information about all custom key + # stores in the account and Region. To limit the output to a + # particular custom key store, provide either the `CustomKeyStoreId` + # or `CustomKeyStoreName` parameter, but not both. + # @return [String] + # + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStoresRequest AWS API Documentation + # + class DescribeCustomKeyStoresRequest < Struct.new( + :custom_key_store_id, + :custom_key_store_name, + :limit, + :marker) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_stores + # Contains metadata about each custom key store. + # @return [Array] + # + # @!attribute [rw] next_marker + # When `Truncated` is true, this element is present and contains the + # value to use for the `Marker` parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] truncated + # A flag that indicates whether there are more items in the list. When + # this value is true, the list in this response is truncated. To get + # more items, pass the value of the `NextMarker` element in + # thisresponse to the `Marker` parameter in a subsequent request. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStoresResponse AWS API Documentation + # + class DescribeCustomKeyStoresResponse < Struct.new( + :custom_key_stores, + :next_marker, + :truncated) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Describes the specified KMS key. + # + # If you specify a predefined Amazon Web Services alias (an Amazon Web + # Services alias with no key ID), KMS associates the alias with an + # [Amazon Web Services managed key][1] and returns its `KeyId` and + # `Arn` in the response. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKeyRequest AWS API Documentation + # + class DescribeKeyRequest < Struct.new( + :key_id, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_metadata + # Metadata associated with the key. + # @return [Types::KeyMetadata] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKeyResponse AWS API Documentation + # + class DescribeKeyResponse < Struct.new( + :key_metadata) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the KMS key to disable. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRequest AWS API Documentation + # + class DisableKeyRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies a symmetric encryption KMS key. You cannot enable or + # disable automatic rotation of [asymmetric KMS keys][1], [HMAC KMS + # keys][2], KMS keys with [imported key material][3], or KMS keys in a + # [custom key store][4]. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotationRequest AWS API Documentation + # + class DisableKeyRotationRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified KMS key is not enabled. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisabledException AWS API Documentation + # + class DisabledException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_id + # Enter the ID of the custom key store you want to disconnect. To find + # the ID of a custom key store, use the DescribeCustomKeyStores + # operation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStoreRequest AWS API Documentation + # + class DisconnectCustomKeyStoreRequest < Struct.new( + :custom_key_store_id) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStoreResponse AWS API Documentation + # + class DisconnectCustomKeyStoreResponse < Aws::EmptyStructure; end + + # @!attribute [rw] key_id + # Identifies the KMS key to enable. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRequest AWS API Documentation + # + class EnableKeyRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies a symmetric encryption KMS key. You cannot enable + # automatic rotation of [asymmetric KMS keys][1], [HMAC KMS keys][2], + # KMS keys with [imported key material][3], or KMS keys in a [custom + # key store][4]. To enable or disable automatic rotation of a set of + # related [multi-Region keys][5], set the property on the primary key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotationRequest AWS API Documentation + # + class EnableKeyRotationRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the KMS key to use in the encryption operation. The KMS + # key must have a `KeyUsage` of `ENCRYPT_DECRYPT`. To find the + # `KeyUsage` of a KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] plaintext + # Data to be encrypted. + # @return [String] + # + # @!attribute [rw] encryption_context + # Specifies the encryption context that will be used to encrypt the + # data. An encryption context is valid only for [cryptographic + # operations][1] with a symmetric encryption KMS key. The standard + # asymmetric encryption algorithms and HMAC algorithms that KMS uses + # do not support an encryption context. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][2] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @!attribute [rw] encryption_algorithm + # Specifies the encryption algorithm that KMS will use to encrypt the + # plaintext message. The algorithm must be compatible with the KMS key + # that you specify. + # + # This parameter is required only for asymmetric KMS keys. The default + # value, `SYMMETRIC_DEFAULT`, is the algorithm used for symmetric + # encryption KMS keys. If you are using an asymmetric KMS key, we + # recommend RSAES\_OAEP\_SHA\_256. + # + # The SM2PKE algorithm is only available in China Regions. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EncryptRequest AWS API Documentation + # + class EncryptRequest < Struct.new( + :key_id, + :plaintext, + :encryption_context, + :grant_tokens, + :encryption_algorithm) + SENSITIVE = [:plaintext] + include Aws::Structure + end + + # @!attribute [rw] ciphertext_blob + # The encrypted plaintext. When you use the HTTP API or the Amazon Web + # Services CLI, the value is Base64-encoded. Otherwise, it is not + # Base64-encoded. + # @return [String] + # + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that was used + # to encrypt the plaintext. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] encryption_algorithm + # The encryption algorithm that was used to encrypt the plaintext. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EncryptResponse AWS API Documentation + # + class EncryptResponse < Struct.new( + :ciphertext_blob, + :key_id, + :encryption_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified import token is + # expired. Use GetParametersForImport to get a new import token and + # public key, use the new public key to encrypt the key material, and + # then try the request again. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ExpiredImportTokenException AWS API Documentation + # + class ExpiredImportTokenException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] encryption_context + # Specifies the encryption context that will be used when encrypting + # the private key in the data key pair. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] key_id + # Specifies the symmetric encryption KMS key that encrypts the private + # key in the data key pair. You cannot specify an asymmetric KMS key + # or a KMS key in a custom key store. To get the type and origin of + # your KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] key_pair_spec + # Determines the type of data key pair that is generated. + # + # The KMS rule that restricts the use of asymmetric RSA and SM2 KMS + # keys to encrypt and decrypt or to sign and verify (but not both), + # and the rule that permits you to use ECC KMS keys only to sign and + # verify, are not effective on data key pairs, which are used outside + # of KMS. The SM2 key spec is only available in China Regions. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairRequest AWS API Documentation + # + class GenerateDataKeyPairRequest < Struct.new( + :encryption_context, + :key_id, + :key_pair_spec, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] private_key_ciphertext_blob + # The encrypted copy of the private key. When you use the HTTP API or + # the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, + # it is not Base64-encoded. + # @return [String] + # + # @!attribute [rw] private_key_plaintext + # The plaintext copy of the private key. When you use the HTTP API or + # the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, + # it is not Base64-encoded. + # @return [String] + # + # @!attribute [rw] public_key + # The public key (in plaintext). When you use the HTTP API or the + # Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it + # is not Base64-encoded. + # @return [String] + # + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that + # encrypted the private key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] key_pair_spec + # The type of data key pair that was generated. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairResponse AWS API Documentation + # + class GenerateDataKeyPairResponse < Struct.new( + :private_key_ciphertext_blob, + :private_key_plaintext, + :public_key, + :key_id, + :key_pair_spec) + SENSITIVE = [:private_key_plaintext] + include Aws::Structure + end + + # @!attribute [rw] encryption_context + # Specifies the encryption context that will be used when encrypting + # the private key in the data key pair. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] key_id + # Specifies the symmetric encryption KMS key that encrypts the private + # key in the data key pair. You cannot specify an asymmetric KMS key + # or a KMS key in a custom key store. To get the type and origin of + # your KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] key_pair_spec + # Determines the type of data key pair that is generated. + # + # The KMS rule that restricts the use of asymmetric RSA and SM2 KMS + # keys to encrypt and decrypt or to sign and verify (but not both), + # and the rule that permits you to use ECC KMS keys only to sign and + # verify, are not effective on data key pairs, which are used outside + # of KMS. The SM2 key spec is only available in China Regions. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintextRequest AWS API Documentation + # + class GenerateDataKeyPairWithoutPlaintextRequest < Struct.new( + :encryption_context, + :key_id, + :key_pair_spec, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] private_key_ciphertext_blob + # The encrypted copy of the private key. When you use the HTTP API or + # the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, + # it is not Base64-encoded. + # @return [String] + # + # @!attribute [rw] public_key + # The public key (in plaintext). When you use the HTTP API or the + # Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it + # is not Base64-encoded. + # @return [String] + # + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that + # encrypted the private key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] key_pair_spec + # The type of data key pair that was generated. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintextResponse AWS API Documentation + # + class GenerateDataKeyPairWithoutPlaintextResponse < Struct.new( + :private_key_ciphertext_blob, + :public_key, + :key_id, + :key_pair_spec) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Specifies the symmetric encryption KMS key that encrypts the data + # key. You cannot specify an asymmetric KMS key or a KMS key in a + # custom key store. To get the type and origin of your KMS key, use + # the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] encryption_context + # Specifies the encryption context that will be used when encrypting + # the data key. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] number_of_bytes + # Specifies the length of the data key in bytes. For example, use the + # value 64 to generate a 512-bit data key (64 bytes is 512 bits). For + # 128-bit (16-byte) and 256-bit (32-byte) data keys, use the `KeySpec` + # parameter. + # + # You must specify either the `KeySpec` or the `NumberOfBytes` + # parameter (but not both) in every `GenerateDataKey` request. + # @return [Integer] + # + # @!attribute [rw] key_spec + # Specifies the length of the data key. Use `AES_128` to generate a + # 128-bit symmetric key, or `AES_256` to generate a 256-bit symmetric + # key. + # + # You must specify either the `KeySpec` or the `NumberOfBytes` + # parameter (but not both) in every `GenerateDataKey` request. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyRequest AWS API Documentation + # + class GenerateDataKeyRequest < Struct.new( + :key_id, + :encryption_context, + :number_of_bytes, + :key_spec, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ciphertext_blob + # The encrypted copy of the data key. When you use the HTTP API or the + # Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it + # is not Base64-encoded. + # @return [String] + # + # @!attribute [rw] plaintext + # The plaintext data key. When you use the HTTP API or the Amazon Web + # Services CLI, the value is Base64-encoded. Otherwise, it is not + # Base64-encoded. Use this data key to encrypt your data outside of + # KMS. Then, remove it from memory as soon as possible. + # @return [String] + # + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that + # encrypted the data key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyResponse AWS API Documentation + # + class GenerateDataKeyResponse < Struct.new( + :ciphertext_blob, + :plaintext, + :key_id) + SENSITIVE = [:plaintext] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Specifies the symmetric encryption KMS key that encrypts the data + # key. You cannot specify an asymmetric KMS key or a KMS key in a + # custom key store. To get the type and origin of your KMS key, use + # the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] encryption_context + # Specifies the encryption context that will be used when encrypting + # the data key. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] key_spec + # The length of the data key. Use `AES_128` to generate a 128-bit + # symmetric key, or `AES_256` to generate a 256-bit symmetric key. + # @return [String] + # + # @!attribute [rw] number_of_bytes + # The length of the data key in bytes. For example, use the value 64 + # to generate a 512-bit data key (64 bytes is 512 bits). For common + # key lengths (128-bit and 256-bit symmetric keys), we recommend that + # you use the `KeySpec` field instead of this one. + # @return [Integer] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintextRequest AWS API Documentation + # + class GenerateDataKeyWithoutPlaintextRequest < Struct.new( + :key_id, + :encryption_context, + :key_spec, + :number_of_bytes, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ciphertext_blob + # The encrypted data key. When you use the HTTP API or the Amazon Web + # Services CLI, the value is Base64-encoded. Otherwise, it is not + # Base64-encoded. + # @return [String] + # + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that + # encrypted the data key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintextResponse AWS API Documentation + # + class GenerateDataKeyWithoutPlaintextResponse < Struct.new( + :ciphertext_blob, + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] message + # The message to be hashed. Specify a message of up to 4,096 bytes. + # + # `GenerateMac` and VerifyMac do not provide special handling for + # message digests. If you generate an HMAC for a hash digest of a + # message, you must verify the HMAC of the same hash digest. + # @return [String] + # + # @!attribute [rw] key_id + # The HMAC KMS key to use in the operation. The MAC algorithm computes + # the HMAC for the message and the key as described in [RFC 2104][1]. + # + # To identify an HMAC KMS key, use the DescribeKey operation and see + # the `KeySpec` field in the response. + # + # + # + # [1]: https://datatracker.ietf.org/doc/html/rfc2104 + # @return [String] + # + # @!attribute [rw] mac_algorithm + # The MAC algorithm used in the operation. + # + # The algorithm must be compatible with the HMAC KMS key that you + # specify. To find the MAC algorithms that your HMAC KMS key supports, + # use the DescribeKey operation and see the `MacAlgorithms` field in + # the `DescribeKey` response. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMacRequest AWS API Documentation + # + class GenerateMacRequest < Struct.new( + :message, + :key_id, + :mac_algorithm, + :grant_tokens) + SENSITIVE = [:message] + include Aws::Structure + end + + # @!attribute [rw] mac + # The hash-based message authentication code (HMAC) that was generated + # for the specified message, HMAC KMS key, and MAC algorithm. + # + # This is the standard, raw HMAC defined in [RFC 2104][1]. + # + # + # + # [1]: https://datatracker.ietf.org/doc/html/rfc2104 + # @return [String] + # + # @!attribute [rw] mac_algorithm + # The MAC algorithm that was used to generate the HMAC. + # @return [String] + # + # @!attribute [rw] key_id + # The HMAC KMS key used in the operation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMacResponse AWS API Documentation + # + class GenerateMacResponse < Struct.new( + :mac, + :mac_algorithm, + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] number_of_bytes + # The length of the random byte string. This parameter is required. + # @return [Integer] + # + # @!attribute [rw] custom_key_store_id + # Generates the random byte string in the CloudHSM cluster that is + # associated with the specified CloudHSM key store. To find the ID of + # a custom key store, use the DescribeCustomKeyStores operation. + # + # External key store IDs are not valid for this parameter. If you + # specify the ID of an external key store, `GenerateRandom` throws an + # `UnsupportedOperationException`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandomRequest AWS API Documentation + # + class GenerateRandomRequest < Struct.new( + :number_of_bytes, + :custom_key_store_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] plaintext + # The random byte string. When you use the HTTP API or the Amazon Web + # Services CLI, the value is Base64-encoded. Otherwise, it is not + # Base64-encoded. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandomResponse AWS API Documentation + # + class GenerateRandomResponse < Struct.new( + :plaintext) + SENSITIVE = [:plaintext] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Gets the key policy for the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] policy_name + # Specifies the name of the key policy. The only valid name is + # `default`. To get the names of key policies, use ListKeyPolicies. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicyRequest AWS API Documentation + # + class GetKeyPolicyRequest < Struct.new( + :key_id, + :policy_name) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] policy + # A key policy document in JSON format. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicyResponse AWS API Documentation + # + class GetKeyPolicyResponse < Struct.new( + :policy) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Gets the rotation status for the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key + # in a different Amazon Web Services account, you must use the key + # ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatusRequest AWS API Documentation + # + class GetKeyRotationStatusRequest < Struct.new( + :key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_rotation_enabled + # A Boolean value that specifies whether key rotation is enabled. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatusResponse AWS API Documentation + # + class GetKeyRotationStatusResponse < Struct.new( + :key_rotation_enabled) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The identifier of the symmetric encryption KMS key into which you + # will import key material. The `Origin` of the KMS key must be + # `EXTERNAL`. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] wrapping_algorithm + # The algorithm you will use to encrypt the key material before using + # the ImportKeyMaterial operation to import it. For more information, + # see [Encrypt the key material][1] in the *Key Management Service + # Developer Guide*. + # + # The `RSAES_PKCS1_V1_5` wrapping algorithm is deprecated. We + # recommend that you begin using a different wrapping algorithm + # immediately. KMS will end support for `RSAES_PKCS1_V1_5` by October + # 1, 2023 pursuant to [cryptographic key management guidance][2] from + # the National Institute of Standards and Technology (NIST). + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html + # [2]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-131Ar2.pdf + # @return [String] + # + # @!attribute [rw] wrapping_key_spec + # The type of wrapping key (public key) to return in the response. + # Only 2048-bit RSA public keys are supported. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImportRequest AWS API Documentation + # + class GetParametersForImportRequest < Struct.new( + :key_id, + :wrapping_algorithm, + :wrapping_key_spec) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key to use in a + # subsequent ImportKeyMaterial request. This is the same KMS key + # specified in the `GetParametersForImport` request. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] import_token + # The import token to send in a subsequent ImportKeyMaterial request. + # @return [String] + # + # @!attribute [rw] public_key + # The public key to use to encrypt the key material before importing + # it with ImportKeyMaterial. + # @return [String] + # + # @!attribute [rw] parameters_valid_to + # The time at which the import token and public key are no longer + # valid. After this time, you cannot use them to make an + # ImportKeyMaterial request and you must send another + # `GetParametersForImport` request to get new ones. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImportResponse AWS API Documentation + # + class GetParametersForImportResponse < Struct.new( + :key_id, + :import_token, + :public_key, + :parameters_valid_to) + SENSITIVE = [:public_key] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the asymmetric KMS key that includes the public key. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKeyRequest AWS API Documentation + # + class GetPublicKeyRequest < Struct.new( + :key_id, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the asymmetric KMS key + # from which the public key was downloaded. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] public_key + # The exported public key. + # + # The value is a DER-encoded X.509 public key, also known as + # `SubjectPublicKeyInfo` (SPKI), as defined in [RFC 5280][1]. When you + # use the HTTP API or the Amazon Web Services CLI, the value is + # Base64-encoded. Otherwise, it is not Base64-encoded. + # + # + # + # + # + # [1]: https://tools.ietf.org/html/rfc5280 + # @return [String] + # + # @!attribute [rw] customer_master_key_spec + # Instead, use the `KeySpec` field in the `GetPublicKey` response. + # + # The `KeySpec` and `CustomerMasterKeySpec` fields have the same + # value. We recommend that you use the `KeySpec` field in your code. + # However, to avoid breaking changes, KMS supports both fields. + # @return [String] + # + # @!attribute [rw] key_spec + # The type of the of the public key that was downloaded. + # @return [String] + # + # @!attribute [rw] key_usage + # The permitted use of the public key. Valid values are + # `ENCRYPT_DECRYPT` or `SIGN_VERIFY`. + # + # This information is critical. If a public key with `SIGN_VERIFY` key + # usage encrypts data outside of KMS, the ciphertext cannot be + # decrypted. + # @return [String] + # + # @!attribute [rw] encryption_algorithms + # The encryption algorithms that KMS supports for this key. + # + # This information is critical. If a public key encrypts data outside + # of KMS by using an unsupported encryption algorithm, the ciphertext + # cannot be decrypted. + # + # This field appears in the response only when the `KeyUsage` of the + # public key is `ENCRYPT_DECRYPT`. + # @return [Array] + # + # @!attribute [rw] signing_algorithms + # The signing algorithms that KMS supports for this key. + # + # This field appears in the response only when the `KeyUsage` of the + # public key is `SIGN_VERIFY`. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKeyResponse AWS API Documentation + # + class GetPublicKeyResponse < Struct.new( + :key_id, + :public_key, + :customer_master_key_spec, + :key_spec, + :key_usage, + :encryption_algorithms, + :signing_algorithms) + SENSITIVE = [] + include Aws::Structure + end + + # Use this structure to allow [cryptographic operations][1] in the grant + # only when the operation request includes the specified [encryption + # context][2]. + # + # KMS applies the grant constraints only to cryptographic operations + # that support an encryption context, that is, all cryptographic + # operations with a [symmetric KMS key][3]. Grant constraints are not + # applied to operations that do not support an encryption context, such + # as cryptographic operations with asymmetric KMS keys and management + # operations, such as DescribeKey or RetireGrant. + # + # In a cryptographic operation, the encryption context in the decryption + # operation must be an exact, case-sensitive match for the keys and + # values in the encryption context of the encryption operation. Only the + # order of the pairs can vary. + # + # However, in a grant constraint, the key in each key-value pair is not + # case sensitive, but the value is case sensitive. + # + # To avoid confusion, do not use multiple encryption context pairs that + # differ only by case. To require a fully case-sensitive encryption + # context, use the `kms:EncryptionContext:` and + # `kms:EncryptionContextKeys` conditions in an IAM or key policy. For + # details, see [kms:EncryptionContext:][4] in the Key Management + # Service Developer Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context + # + # @!attribute [rw] encryption_context_subset + # A list of key-value pairs that must be included in the encryption + # context of the [cryptographic operation][1] request. The grant + # allows the cryptographic operation only when the encryption context + # in the request includes the key-value pairs specified in this + # constraint, although it can include additional key-value pairs. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # @return [Hash] + # + # @!attribute [rw] encryption_context_equals + # A list of key-value pairs that must match the encryption context in + # the [cryptographic operation][1] request. The grant allows the + # operation only when the encryption context in the request is the + # same as the encryption context specified in this constraint. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GrantConstraints AWS API Documentation + # + class GrantConstraints < Struct.new( + :encryption_context_subset, + :encryption_context_equals) + SENSITIVE = [] + include Aws::Structure + end + + # Contains information about a grant. + # + # @!attribute [rw] key_id + # The unique identifier for the KMS key to which the grant applies. + # @return [String] + # + # @!attribute [rw] grant_id + # The unique identifier for the grant. + # @return [String] + # + # @!attribute [rw] name + # The friendly name that identifies the grant. If a name was provided + # in the CreateGrant request, that name is returned. Otherwise this + # value is null. + # @return [String] + # + # @!attribute [rw] creation_date + # The date and time when the grant was created. + # @return [Time] + # + # @!attribute [rw] grantee_principal + # The identity that gets the permissions in the grant. + # + # The `GranteePrincipal` field in the `ListGrants` response usually + # contains the user or role designated as the grantee principal in the + # grant. However, when the grantee principal in the grant is an Amazon + # Web Services service, the `GranteePrincipal` field contains the + # [service principal][1], which might represent several different + # grantee principals. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services + # @return [String] + # + # @!attribute [rw] retiring_principal + # The principal that can retire the grant. + # @return [String] + # + # @!attribute [rw] issuing_account + # The Amazon Web Services account under which the grant was issued. + # @return [String] + # + # @!attribute [rw] operations + # The list of operations permitted by the grant. + # @return [Array] + # + # @!attribute [rw] constraints + # A list of key-value pairs that must be present in the encryption + # context of certain subsequent operations that the grant allows. + # @return [Types::GrantConstraints] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GrantListEntry AWS API Documentation + # + class GrantListEntry < Struct.new( + :key_id, + :grant_id, + :name, + :creation_date, + :grantee_principal, + :retiring_principal, + :issuing_account, + :operations, + :constraints) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The identifier of the symmetric encryption KMS key that receives the + # imported key material. This must be the same KMS key specified in + # the `KeyID` parameter of the corresponding GetParametersForImport + # request. The `Origin` of the KMS key must be `EXTERNAL`. You cannot + # perform this operation on an asymmetric KMS key, an HMAC KMS key, a + # KMS key in a custom key store, or on a KMS key in a different Amazon + # Web Services account + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] import_token + # The import token that you received in the response to a previous + # GetParametersForImport request. It must be from the same response + # that contained the public key that you used to encrypt the key + # material. + # @return [String] + # + # @!attribute [rw] encrypted_key_material + # The encrypted key material to import. The key material must be + # encrypted with the public wrapping key that GetParametersForImport + # returned, using the wrapping algorithm that you specified in the + # same `GetParametersForImport` request. + # @return [String] + # + # @!attribute [rw] valid_to + # The date and time when the imported key material expires. This + # parameter is required when the value of the `ExpirationModel` + # parameter is `KEY_MATERIAL_EXPIRES`. Otherwise it is not valid. + # + # The value of this parameter must be a future date and time. The + # maximum value is 365 days from the request date. + # + # When the key material expires, KMS deletes the key material from the + # KMS key. Without its key material, the KMS key is unusable. To use + # the KMS key in cryptographic operations, you must reimport the same + # key material. + # + # You cannot change the `ExpirationModel` or `ValidTo` values for the + # current import after the request completes. To change either value, + # you must delete (DeleteImportedKeyMaterial) and reimport the key + # material. + # @return [Time] + # + # @!attribute [rw] expiration_model + # Specifies whether the key material expires. The default is + # `KEY_MATERIAL_EXPIRES`. + # + # When the value of `ExpirationModel` is `KEY_MATERIAL_EXPIRES`, you + # must specify a value for the `ValidTo` parameter. When value is + # `KEY_MATERIAL_DOES_NOT_EXPIRE`, you must omit the `ValidTo` + # parameter. + # + # You cannot change the `ExpirationModel` or `ValidTo` values for the + # current import after the request completes. To change either value, + # you must delete (DeleteImportedKeyMaterial) and reimport the key + # material. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterialRequest AWS API Documentation + # + class ImportKeyMaterialRequest < Struct.new( + :key_id, + :import_token, + :encrypted_key_material, + :valid_to, + :expiration_model) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterialResponse AWS API Documentation + # + class ImportKeyMaterialResponse < Aws::EmptyStructure; end + + # The request was rejected because the specified KMS key cannot decrypt + # the data. The `KeyId` in a Decrypt request and the `SourceKeyId` in a + # ReEncrypt request must identify the same KMS key that was used to + # encrypt the ciphertext. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/IncorrectKeyException AWS API Documentation + # + class IncorrectKeyException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the key material in the request is, + # expired, invalid, or is not the same key material that was previously + # imported into this KMS key. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/IncorrectKeyMaterialException AWS API Documentation + # + class IncorrectKeyMaterialException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the trust anchor certificate in the + # request to create an CloudHSM key store is not the trust anchor + # certificate for the specified CloudHSM cluster. + # + # When you [initialize the CloudHSM cluster][1], you create the trust + # anchor certificate and save it in the `customerCA.crt` file. + # + # + # + # [1]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/IncorrectTrustAnchorException AWS API Documentation + # + class IncorrectTrustAnchorException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified alias name is not + # valid. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidAliasNameException AWS API Documentation + # + class InvalidAliasNameException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because a specified ARN, or an ARN in a key + # policy, is not valid. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidArnException AWS API Documentation + # + class InvalidArnException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # From the Decrypt or ReEncrypt operation, the request was rejected + # because the specified ciphertext, or additional authenticated data + # incorporated into the ciphertext, such as the encryption context, is + # corrupted, missing, or otherwise invalid. + # + # From the ImportKeyMaterial operation, the request was rejected because + # KMS could not decrypt the encrypted (wrapped) key material. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidCiphertextException AWS API Documentation + # + class InvalidCiphertextException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified `GrantId` is not valid. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidGrantIdException AWS API Documentation + # + class InvalidGrantIdException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified grant token is not + # valid. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidGrantTokenException AWS API Documentation + # + class InvalidGrantTokenException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the provided import token is invalid + # or is associated with a different KMS key. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidImportTokenException AWS API Documentation + # + class InvalidImportTokenException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected for one of the following reasons: + # + # * The `KeyUsage` value of the KMS key is incompatible with the API + # operation. + # + # * The encryption algorithm or signing algorithm specified for the + # operation is incompatible with the type of key material in the KMS + # key `(KeySpec`). + # + # For encrypting, decrypting, re-encrypting, and generating data keys, + # the `KeyUsage` must be `ENCRYPT_DECRYPT`. For signing and verifying + # messages, the `KeyUsage` must be `SIGN_VERIFY`. For generating and + # verifying message authentication codes (MACs), the `KeyUsage` must be + # `GENERATE_VERIFY_MAC`. To find the `KeyUsage` of a KMS key, use the + # DescribeKey operation. + # + # To find the encryption or signing algorithms supported for a + # particular KMS key, use the DescribeKey operation. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidKeyUsageException AWS API Documentation + # + class InvalidKeyUsageException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the marker that specifies where + # pagination should next begin is not valid. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/InvalidMarkerException AWS API Documentation + # + class InvalidMarkerException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because an internal exception occurred. The + # request can be retried. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KMSInternalException AWS API Documentation + # + class KMSInternalException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the HMAC verification failed. HMAC + # verification fails when the HMAC computed by using the specified + # message, HMAC KMS key, and MAC algorithm does not match the HMAC + # specified in the request. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KMSInvalidMacException AWS API Documentation + # + class KMSInvalidMacException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the signature verification failed. + # Signature verification fails when it cannot confirm that signature was + # produced by signing the specified message with the specified KMS key + # and signing algorithm. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KMSInvalidSignatureException AWS API Documentation + # + class KMSInvalidSignatureException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the state of the specified resource + # is not valid for this request. + # + # This exceptions means one of the following: + # + # * The key state of the KMS key is not compatible with the operation. + # + # To find the key state, use the DescribeKey operation. For more + # information about which key states are compatible with each KMS + # operation, see [Key states of KMS keys][1] in the Key + # Management Service Developer Guide . + # + # * For cryptographic operations on KMS keys in custom key stores, this + # exception represents a general failure with many possible causes. To + # identify the cause, see the error message that accompanies the + # exception. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KMSInvalidStateException AWS API Documentation + # + class KMSInvalidStateException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Contains information about each entry in the key list. + # + # @!attribute [rw] key_id + # Unique identifier of the key. + # @return [String] + # + # @!attribute [rw] key_arn + # ARN of the key. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KeyListEntry AWS API Documentation + # + class KeyListEntry < Struct.new( + :key_id, + :key_arn) + SENSITIVE = [] + include Aws::Structure + end + + # Contains metadata about a KMS key. + # + # This data type is used as a response element for the CreateKey, + # DescribeKey, and ReplicateKey operations. + # + # @!attribute [rw] aws_account_id + # The twelve-digit account ID of the Amazon Web Services account that + # owns the KMS key. + # @return [String] + # + # @!attribute [rw] key_id + # The globally unique identifier for the KMS key. + # @return [String] + # + # @!attribute [rw] arn + # The Amazon Resource Name (ARN) of the KMS key. For examples, see + # [Key Management Service (KMS)][1] in the Example ARNs section of the + # *Amazon Web Services General Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms + # @return [String] + # + # @!attribute [rw] creation_date + # The date and time when the KMS key was created. + # @return [Time] + # + # @!attribute [rw] enabled + # Specifies whether the KMS key is enabled. When `KeyState` is + # `Enabled` this value is true, otherwise it is false. + # @return [Boolean] + # + # @!attribute [rw] description + # The description of the KMS key. + # @return [String] + # + # @!attribute [rw] key_usage + # The [cryptographic operations][1] for which you can use the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + # @return [String] + # + # @!attribute [rw] key_state + # The current status of the KMS key. + # + # For more information about how key state affects the use of a KMS + # key, see [Key states of KMS keys][1] in the *Key Management Service + # Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # @return [String] + # + # @!attribute [rw] deletion_date + # The date and time after which KMS deletes this KMS key. This value + # is present only when the KMS key is scheduled for deletion, that is, + # when its `KeyState` is `PendingDeletion`. + # + # When the primary key in a multi-Region key is scheduled for deletion + # but still has replica keys, its key state is + # `PendingReplicaDeletion` and the length of its waiting period is + # displayed in the `PendingDeletionWindowInDays` field. + # @return [Time] + # + # @!attribute [rw] valid_to + # The time at which the imported key material expires. When the key + # material expires, KMS deletes the key material and the KMS key + # becomes unusable. This value is present only for KMS keys whose + # `Origin` is `EXTERNAL` and whose `ExpirationModel` is + # `KEY_MATERIAL_EXPIRES`, otherwise this value is omitted. + # @return [Time] + # + # @!attribute [rw] origin + # The source of the key material for the KMS key. When this value is + # `AWS_KMS`, KMS created the key material. When this value is + # `EXTERNAL`, the key material was imported or the KMS key doesn't + # have any key material. When this value is `AWS_CLOUDHSM`, the key + # material was created in the CloudHSM cluster associated with a + # custom key store. + # @return [String] + # + # @!attribute [rw] custom_key_store_id + # A unique identifier for the [custom key store][1] that contains the + # KMS key. This field is present only when the KMS key is created in a + # custom key store. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # @return [String] + # + # @!attribute [rw] cloud_hsm_cluster_id + # The cluster ID of the CloudHSM cluster that contains the key + # material for the KMS key. When you create a KMS key in an CloudHSM + # [custom key store][1], KMS creates the key material for the KMS key + # in the associated CloudHSM cluster. This field is present only when + # the KMS key is created in an CloudHSM key store. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # @return [String] + # + # @!attribute [rw] expiration_model + # Specifies whether the KMS key's key material expires. This value is + # present only when `Origin` is `EXTERNAL`, otherwise this value is + # omitted. + # @return [String] + # + # @!attribute [rw] key_manager + # The manager of the KMS key. KMS keys in your Amazon Web Services + # account are either customer managed or Amazon Web Services managed. + # For more information about the difference, see [KMS keys][1] in the + # *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys + # @return [String] + # + # @!attribute [rw] customer_master_key_spec + # Instead, use the `KeySpec` field. + # + # The `KeySpec` and `CustomerMasterKeySpec` fields have the same + # value. We recommend that you use the `KeySpec` field in your code. + # However, to avoid breaking changes, KMS supports both fields. + # @return [String] + # + # @!attribute [rw] key_spec + # Describes the type of key material in the KMS key. + # @return [String] + # + # @!attribute [rw] encryption_algorithms + # The encryption algorithms that the KMS key supports. You cannot use + # the KMS key with other encryption algorithms within KMS. + # + # This value is present only when the `KeyUsage` of the KMS key is + # `ENCRYPT_DECRYPT`. + # @return [Array] + # + # @!attribute [rw] signing_algorithms + # The signing algorithms that the KMS key supports. You cannot use the + # KMS key with other signing algorithms within KMS. + # + # This field appears only when the `KeyUsage` of the KMS key is + # `SIGN_VERIFY`. + # @return [Array] + # + # @!attribute [rw] multi_region + # Indicates whether the KMS key is a multi-Region (`True`) or regional + # (`False`) key. This value is `True` for multi-Region primary and + # replica keys and `False` for regional KMS keys. + # + # For more information about multi-Region keys, see [Multi-Region keys + # in KMS][1] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + # @return [Boolean] + # + # @!attribute [rw] multi_region_configuration + # Lists the primary and replica keys in same multi-Region key. This + # field is present only when the value of the `MultiRegion` field is + # `True`. + # + # For more information about any listed KMS key, use the DescribeKey + # operation. + # + # * `MultiRegionKeyType` indicates whether the KMS key is a `PRIMARY` + # or `REPLICA` key. + # + # * `PrimaryKey` displays the key ARN and Region of the primary key. + # This field displays the current KMS key if it is the primary key. + # + # * `ReplicaKeys` displays the key ARNs and Regions of all replica + # keys. This field includes the current KMS key if it is a replica + # key. + # @return [Types::MultiRegionConfiguration] + # + # @!attribute [rw] pending_deletion_window_in_days + # The waiting period before the primary key in a multi-Region key is + # deleted. This waiting period begins when the last of its replica + # keys is deleted. This value is present only when the `KeyState` of + # the KMS key is `PendingReplicaDeletion`. That indicates that the KMS + # key is the primary key in a multi-Region key, it is scheduled for + # deletion, and it still has existing replica keys. + # + # When a single-Region KMS key or a multi-Region replica key is + # scheduled for deletion, its deletion date is displayed in the + # `DeletionDate` field. However, when the primary key in a + # multi-Region key is scheduled for deletion, its waiting period + # doesn't begin until all of its replica keys are deleted. This value + # displays that waiting period. When the last replica key in the + # multi-Region key is deleted, the `KeyState` of the scheduled primary + # key changes from `PendingReplicaDeletion` to `PendingDeletion` and + # the deletion date appears in the `DeletionDate` field. + # @return [Integer] + # + # @!attribute [rw] mac_algorithms + # The message authentication code (MAC) algorithm that the HMAC KMS + # key supports. + # + # This value is present only when the `KeyUsage` of the KMS key is + # `GENERATE_VERIFY_MAC`. + # @return [Array] + # + # @!attribute [rw] xks_key_configuration + # Information about the external key that is associated with a KMS key + # in an external key store. + # + # For more information, see [External key][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key + # @return [Types::XksKeyConfigurationType] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KeyMetadata AWS API Documentation + # + class KeyMetadata < Struct.new( + :aws_account_id, + :key_id, + :arn, + :creation_date, + :enabled, + :description, + :key_usage, + :key_state, + :deletion_date, + :valid_to, + :origin, + :custom_key_store_id, + :cloud_hsm_cluster_id, + :expiration_model, + :key_manager, + :customer_master_key_spec, + :key_spec, + :encryption_algorithms, + :signing_algorithms, + :multi_region, + :multi_region_configuration, + :pending_deletion_window_in_days, + :mac_algorithms, + :xks_key_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified KMS key was not + # available. You can retry the request. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/KeyUnavailableException AWS API Documentation + # + class KeyUnavailableException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because a quota was exceeded. For more + # information, see [Quotas][1] in the *Key Management Service Developer + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/limits.html + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/LimitExceededException AWS API Documentation + # + class LimitExceededException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Lists only aliases that are associated with the specified KMS key. + # Enter a KMS key in your Amazon Web Services account. + # + # This parameter is optional. If you omit it, `ListAliases` returns + # all aliases in the account and Region. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 100, inclusive. If you do not include a value, it defaults to + # 50. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliasesRequest AWS API Documentation + # + class ListAliasesRequest < Struct.new( + :key_id, + :limit, + :marker) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] aliases + # A list of aliases. + # @return [Array] + # + # @!attribute [rw] next_marker + # When `Truncated` is true, this element is present and contains the + # value to use for the `Marker` parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] truncated + # A flag that indicates whether there are more items in the list. When + # this value is true, the list in this response is truncated. To get + # more items, pass the value of the `NextMarker` element in + # thisresponse to the `Marker` parameter in a subsequent request. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliasesResponse AWS API Documentation + # + class ListAliasesResponse < Struct.new( + :aliases, + :next_marker, + :truncated) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 100, inclusive. If you do not include a value, it defaults to + # 50. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # @return [String] + # + # @!attribute [rw] key_id + # Returns only grants for the specified KMS key. This parameter is + # required. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key + # in a different Amazon Web Services account, you must use the key + # ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] grant_id + # Returns only the grant with the specified grant ID. The grant ID + # uniquely identifies the grant. + # @return [String] + # + # @!attribute [rw] grantee_principal + # Returns only grants where the specified principal is the grantee + # principal for the grant. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrantsRequest AWS API Documentation + # + class ListGrantsRequest < Struct.new( + :limit, + :marker, + :key_id, + :grant_id, + :grantee_principal) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] grants + # A list of grants. + # @return [Array] + # + # @!attribute [rw] next_marker + # When `Truncated` is true, this element is present and contains the + # value to use for the `Marker` parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] truncated + # A flag that indicates whether there are more items in the list. When + # this value is true, the list in this response is truncated. To get + # more items, pass the value of the `NextMarker` element in + # thisresponse to the `Marker` parameter in a subsequent request. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrantsResponse AWS API Documentation + # + class ListGrantsResponse < Struct.new( + :grants, + :next_marker, + :truncated) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Gets the names of key policies for the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 1000, inclusive. If you do not include a value, it defaults to + # 100. + # + # Only one policy can be attached to a key. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPoliciesRequest AWS API Documentation + # + class ListKeyPoliciesRequest < Struct.new( + :key_id, + :limit, + :marker) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] policy_names + # A list of key policy names. The only valid value is `default`. + # @return [Array] + # + # @!attribute [rw] next_marker + # When `Truncated` is true, this element is present and contains the + # value to use for the `Marker` parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] truncated + # A flag that indicates whether there are more items in the list. When + # this value is true, the list in this response is truncated. To get + # more items, pass the value of the `NextMarker` element in + # thisresponse to the `Marker` parameter in a subsequent request. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPoliciesResponse AWS API Documentation + # + class ListKeyPoliciesResponse < Struct.new( + :policy_names, + :next_marker, + :truncated) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 1000, inclusive. If you do not include a value, it defaults to + # 100. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeysRequest AWS API Documentation + # + class ListKeysRequest < Struct.new( + :limit, + :marker) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] keys + # A list of KMS keys. + # @return [Array] + # + # @!attribute [rw] next_marker + # When `Truncated` is true, this element is present and contains the + # value to use for the `Marker` parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] truncated + # A flag that indicates whether there are more items in the list. When + # this value is true, the list in this response is truncated. To get + # more items, pass the value of the `NextMarker` element in + # thisresponse to the `Marker` parameter in a subsequent request. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeysResponse AWS API Documentation + # + class ListKeysResponse < Struct.new( + :keys, + :next_marker, + :truncated) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Gets tags on the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 50, inclusive. If you do not include a value, it defaults to 50. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # + # Do not attempt to construct this value. Use only the value of + # `NextMarker` from the truncated response you just received. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTagsRequest AWS API Documentation + # + class ListResourceTagsRequest < Struct.new( + :key_id, + :limit, + :marker) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] tags + # A list of tags. Each tag consists of a tag key and a tag value. + # + # Tagging or untagging a KMS key can allow or deny permission to the + # KMS key. For details, see [ABAC for KMS][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # @return [Array] + # + # @!attribute [rw] next_marker + # When `Truncated` is true, this element is present and contains the + # value to use for the `Marker` parameter in a subsequent request. + # + # Do not assume or infer any information from this value. + # @return [String] + # + # @!attribute [rw] truncated + # A flag that indicates whether there are more items in the list. When + # this value is true, the list in this response is truncated. To get + # more items, pass the value of the `NextMarker` element in + # thisresponse to the `Marker` parameter in a subsequent request. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTagsResponse AWS API Documentation + # + class ListResourceTagsResponse < Struct.new( + :tags, + :next_marker, + :truncated) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] limit + # Use this parameter to specify the maximum number of items to return. + # When this value is present, KMS does not return more than the + # specified number of items, but it might return fewer. + # + # This value is optional. If you include a value, it must be between 1 + # and 100, inclusive. If you do not include a value, it defaults to + # 50. + # @return [Integer] + # + # @!attribute [rw] marker + # Use this parameter in a subsequent request after you receive a + # response with truncated results. Set it to the value of `NextMarker` + # from the truncated response you just received. + # @return [String] + # + # @!attribute [rw] retiring_principal + # The retiring principal for which to list grants. Enter a principal + # in your Amazon Web Services account. + # + # To specify the retiring principal, use the [Amazon Resource Name + # (ARN)][1] of an Amazon Web Services principal. Valid principals + # include Amazon Web Services accounts, IAM users, IAM roles, + # federated users, and assumed role users. For help with the ARN + # syntax for a principal, see [IAM ARNs][2] in the Identity and + # Access Management User Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrantsRequest AWS API Documentation + # + class ListRetirableGrantsRequest < Struct.new( + :limit, + :marker, + :retiring_principal) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified policy is not + # syntactically or semantically correct. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/MalformedPolicyDocumentException AWS API Documentation + # + class MalformedPolicyDocumentException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Describes the configuration of this multi-Region key. This field + # appears only when the KMS key is a primary or replica of a + # multi-Region key. + # + # For more information about any listed KMS key, use the DescribeKey + # operation. + # + # @!attribute [rw] multi_region_key_type + # Indicates whether the KMS key is a `PRIMARY` or `REPLICA` key. + # @return [String] + # + # @!attribute [rw] primary_key + # Displays the key ARN and Region of the primary key. This field + # includes the current KMS key if it is the primary key. + # @return [Types::MultiRegionKey] + # + # @!attribute [rw] replica_keys + # displays the key ARNs and Regions of all replica keys. This field + # includes the current KMS key if it is a replica key. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/MultiRegionConfiguration AWS API Documentation + # + class MultiRegionConfiguration < Struct.new( + :multi_region_key_type, + :primary_key, + :replica_keys) + SENSITIVE = [] + include Aws::Structure + end + + # Describes the primary or replica key in a multi-Region key. + # + # @!attribute [rw] arn + # Displays the key ARN of a primary or replica key of a multi-Region + # key. + # @return [String] + # + # @!attribute [rw] region + # Displays the Amazon Web Services Region of a primary or replica key + # in a multi-Region key. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/MultiRegionKey AWS API Documentation + # + class MultiRegionKey < Struct.new( + :arn, + :region) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified entity or resource + # could not be found. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/NotFoundException AWS API Documentation + # + class NotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Sets the key policy on the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] policy_name + # The name of the key policy. The only valid value is `default`. + # @return [String] + # + # @!attribute [rw] policy + # The key policy to attach to the KMS key. + # + # The key policy must meet the following criteria: + # + # * The key policy must allow the calling principal to make a + # subsequent `PutKeyPolicy` request on the KMS key. This reduces the + # risk that the KMS key becomes unmanageable. For more information, + # see [Default key policy][1] in the *Key Management Service + # Developer Guide*. (To omit this condition, set + # `BypassPolicyLockoutSafetyCheck` to true.) + # + # * Each statement in the key policy must contain one or more + # principals. The principals in the key policy must exist and be + # visible to KMS. When you create a new Amazon Web Services + # principal, you might need to enforce a delay before including the + # new principal in a key policy because the new principal might not + # be immediately visible to KMS. For more information, see [Changes + # that I make are not always immediately visible][2] in the *Amazon + # Web Services Identity and Access Management User Guide*. + # + # A key policy document can include only the following characters: + # + # * Printable ASCII characters from the space character (`\u0020`) + # through the end of the ASCII character range. + # + # * Printable characters in the Basic Latin and Latin-1 Supplement + # character set (through `\u00FF`). + # + # * The tab (`\u0009`), line feed (`\u000A`), and carriage return + # (`\u000D`) special characters + # + # For information about key policies, see [Key policies in KMS][3] in + # the *Key Management Service Developer Guide*.For help writing and + # formatting a JSON policy document, see the [IAM JSON Policy + # Reference][4] in the Identity and Access Management User + # Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # @return [String] + # + # @!attribute [rw] bypass_policy_lockout_safety_check + # Skips ("bypasses") the key policy lockout safety check. The + # default value is false. + # + # Setting this value to true increases the risk that the KMS key + # becomes unmanageable. Do not set this value to true + # indiscriminately. + # + # For more information, see [Default key policy][1] in the *Key + # Management Service Developer Guide*. + # + # Use this parameter only when you intend to prevent the principal + # that is making the request from making a subsequent PutKeyPolicy + # request on the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicyRequest AWS API Documentation + # + class PutKeyPolicyRequest < Struct.new( + :key_id, + :policy_name, + :policy, + :bypass_policy_lockout_safety_check) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ciphertext_blob + # Ciphertext of the data to reencrypt. + # @return [String] + # + # @!attribute [rw] source_encryption_context + # Specifies the encryption context to use to decrypt the ciphertext. + # Enter the same encryption context that was used to encrypt the + # ciphertext. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] source_key_id + # Specifies the KMS key that KMS will use to decrypt the ciphertext + # before it is re-encrypted. + # + # Enter a key ID of the KMS key that was used to encrypt the + # ciphertext. If you identify a different KMS key, the `ReEncrypt` + # operation throws an `IncorrectKeyException`. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. If you used a symmetric encryption KMS + # key, KMS can get the KMS key from metadata that it adds to the + # symmetric ciphertext blob. However, it is always recommended as a + # best practice. This practice ensures that you use the KMS key that + # you intend. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] destination_key_id + # A unique identifier for the KMS key that is used to reencrypt the + # data. Specify a symmetric encryption KMS key or an asymmetric KMS + # key with a `KeyUsage` value of `ENCRYPT_DECRYPT`. To find the + # `KeyUsage` value of a KMS key, use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] destination_encryption_context + # Specifies that encryption context to use when the reencrypting the + # data. + # + # A destination encryption context is valid only when the destination + # KMS key is a symmetric encryption KMS key. The standard ciphertext + # format for asymmetric KMS keys does not include fields for metadata. + # + # An *encryption context* is a collection of non-secret key-value + # pairs that represent additional authenticated data. When you use an + # encryption context to encrypt data, you must specify the same (an + # exact case-sensitive match) encryption context to decrypt the data. + # An encryption context is supported only on operations with symmetric + # encryption KMS keys. On operations with symmetric encryption KMS + # keys, an encryption context is optional, but it is strongly + # recommended. + # + # For more information, see [Encryption context][1] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + # @return [Hash] + # + # @!attribute [rw] source_encryption_algorithm + # Specifies the encryption algorithm that KMS will use to decrypt the + # ciphertext before it is reencrypted. The default value, + # `SYMMETRIC_DEFAULT`, represents the algorithm used for symmetric + # encryption KMS keys. + # + # Specify the same algorithm that was used to encrypt the ciphertext. + # If you specify a different algorithm, the decrypt attempt fails. + # + # This parameter is required only when the ciphertext was encrypted + # under an asymmetric KMS key. + # @return [String] + # + # @!attribute [rw] destination_encryption_algorithm + # Specifies the encryption algorithm that KMS will use to reecrypt the + # data after it has decrypted it. The default value, + # `SYMMETRIC_DEFAULT`, represents the encryption algorithm used for + # symmetric encryption KMS keys. + # + # This parameter is required only when the destination KMS key is an + # asymmetric KMS key. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncryptRequest AWS API Documentation + # + class ReEncryptRequest < Struct.new( + :ciphertext_blob, + :source_encryption_context, + :source_key_id, + :destination_key_id, + :destination_encryption_context, + :source_encryption_algorithm, + :destination_encryption_algorithm, + :grant_tokens) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ciphertext_blob + # The reencrypted data. When you use the HTTP API or the Amazon Web + # Services CLI, the value is Base64-encoded. Otherwise, it is not + # Base64-encoded. + # @return [String] + # + # @!attribute [rw] source_key_id + # Unique identifier of the KMS key used to originally encrypt the + # data. + # @return [String] + # + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key that was used + # to reencrypt the data. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] source_encryption_algorithm + # The encryption algorithm that was used to decrypt the ciphertext + # before it was reencrypted. + # @return [String] + # + # @!attribute [rw] destination_encryption_algorithm + # The encryption algorithm that was used to reencrypt the data. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncryptResponse AWS API Documentation + # + class ReEncryptResponse < Struct.new( + :ciphertext_blob, + :source_key_id, + :key_id, + :source_encryption_algorithm, + :destination_encryption_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the multi-Region primary key that is being replicated. To + # determine whether a KMS key is a multi-Region primary key, use the + # DescribeKey operation to check the value of the `MultiRegionKeyType` + # property. + # + # Specify the key ID or key ARN of a multi-Region primary key. + # + # For example: + # + # * Key ID: `mrk-1234abcd12ab34cd56ef1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] replica_region + # The Region ID of the Amazon Web Services Region for this replica + # key. + # + # Enter the Region ID, such as `us-east-1` or `ap-southeast-2`. For a + # list of Amazon Web Services Regions in which KMS is supported, see + # [KMS service endpoints][1] in the *Amazon Web Services General + # Reference*. + # + # HMAC KMS keys are not supported in all Amazon Web Services Regions. + # If you try to replicate an HMAC KMS key in an Amazon Web Services + # Region in which HMAC keys are not supported, the `ReplicateKey` + # operation returns an `UnsupportedOperationException`. For a list of + # Regions in which HMAC KMS keys are supported, see [HMAC keys in + # KMS][2] in the *Key Management Service Developer Guide*. + # + # + # + # The replica must be in a different Amazon Web Services Region than + # its primary key and other replicas of that primary key, but in the + # same Amazon Web Services partition. KMS must be available in the + # replica Region. If the Region is not enabled by default, the Amazon + # Web Services account must be enabled in the Region. For information + # about Amazon Web Services partitions, see [Amazon Resource Names + # (ARNs)][3] in the *Amazon Web Services General Reference*. For + # information about enabling and disabling Regions, see [Enabling a + # Region][4] and [Disabling a Region][5] in the *Amazon Web Services + # General Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [3]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + # [4]: https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable + # [5]: https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable + # @return [String] + # + # @!attribute [rw] policy + # The key policy to attach to the KMS key. This parameter is optional. + # If you do not provide a key policy, KMS attaches the [default key + # policy][1] to the KMS key. + # + # The key policy is not a shared property of multi-Region keys. You + # can specify the same key policy or a different key policy for each + # key in a set of related multi-Region keys. KMS does not synchronize + # this property. + # + # If you provide a key policy, it must meet the following criteria: + # + # * The key policy must allow the calling principal to make a + # subsequent `PutKeyPolicy` request on the KMS key. This reduces the + # risk that the KMS key becomes unmanageable. For more information, + # see [Default key policy][2] in the *Key Management Service + # Developer Guide*. (To omit this condition, set + # `BypassPolicyLockoutSafetyCheck` to true.) + # + # * Each statement in the key policy must contain one or more + # principals. The principals in the key policy must exist and be + # visible to KMS. When you create a new Amazon Web Services + # principal, you might need to enforce a delay before including the + # new principal in a key policy because the new principal might not + # be immediately visible to KMS. For more information, see [Changes + # that I make are not always immediately visible][3] in the *Amazon + # Web Services Identity and Access Management User Guide*. + # + # A key policy document can include only the following characters: + # + # * Printable ASCII characters from the space character (`\u0020`) + # through the end of the ASCII character range. + # + # * Printable characters in the Basic Latin and Latin-1 Supplement + # character set (through `\u00FF`). + # + # * The tab (`\u0009`), line feed (`\u000A`), and carriage return + # (`\u000D`) special characters + # + # For information about key policies, see [Key policies in KMS][4] in + # the *Key Management Service Developer Guide*. For help writing and + # formatting a JSON policy document, see the [IAM JSON Policy + # Reference][5] in the Identity and Access Management User + # Guide . + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + # [5]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + # @return [String] + # + # @!attribute [rw] bypass_policy_lockout_safety_check + # Skips ("bypasses") the key policy lockout safety check. The + # default value is false. + # + # Setting this value to true increases the risk that the KMS key + # becomes unmanageable. Do not set this value to true + # indiscriminately. + # + # For more information, see [Default key policy][1] in the *Key + # Management Service Developer Guide*. + # + # Use this parameter only when you intend to prevent the principal + # that is making the request from making a subsequent PutKeyPolicy + # request on the KMS key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + # @return [Boolean] + # + # @!attribute [rw] description + # A description of the KMS key. The default value is an empty string + # (no description). + # + # The description is not a shared property of multi-Region keys. You + # can specify the same description or a different description for each + # key in a set of related multi-Region keys. KMS does not synchronize + # this property. + # @return [String] + # + # @!attribute [rw] tags + # Assigns one or more tags to the replica key. Use this parameter to + # tag the KMS key when it is created. To tag an existing KMS key, use + # the TagResource operation. + # + # Tagging or untagging a KMS key can allow or deny permission to the + # KMS key. For details, see [ABAC for KMS][1] in the *Key Management + # Service Developer Guide*. + # + # + # + # To use this parameter, you must have [kms:TagResource][2] permission + # in an IAM policy. + # + # Tags are not a shared property of multi-Region keys. You can specify + # the same tags or different tags for each key in a set of related + # multi-Region keys. KMS does not synchronize this property. + # + # Each tag consists of a tag key and a tag value. Both the tag key and + # the tag value are required, but the tag value can be an empty (null) + # string. You cannot have more than one tag on a KMS key with the same + # tag key. If you specify an existing tag key with a different tag + # value, KMS replaces the current tag value with the specified one. + # + # When you add tags to an Amazon Web Services resource, Amazon Web + # Services generates a cost allocation report with usage and costs + # aggregated by tags. Tags can also be used to control access to a KMS + # key. For details, see [Tagging Keys][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKeyRequest AWS API Documentation + # + class ReplicateKeyRequest < Struct.new( + :key_id, + :replica_region, + :policy, + :bypass_policy_lockout_safety_check, + :description, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] replica_key_metadata + # Displays details about the new replica key, including its Amazon + # Resource Name ([key ARN][1]) and [Key states of KMS keys][2]. It + # also includes the ARN and Amazon Web Services Region of its primary + # key and other replica keys. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # @return [Types::KeyMetadata] + # + # @!attribute [rw] replica_policy + # The key policy of the new replica key. The value is a key policy + # document in JSON format. + # @return [String] + # + # @!attribute [rw] replica_tags + # The tags on the new replica key. The value is a list of tag key and + # tag value pairs. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKeyResponse AWS API Documentation + # + class ReplicateKeyResponse < Struct.new( + :replica_key_metadata, + :replica_policy, + :replica_tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] grant_token + # Identifies the grant to be retired. You can use a grant token to + # identify a new grant even before it has achieved eventual + # consistency. + # + # Only the CreateGrant operation returns a grant token. For details, + # see [Grant token][1] and [Eventual consistency][2] in the *Key + # Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency + # @return [String] + # + # @!attribute [rw] key_id + # The key ARN KMS key associated with the grant. To find the key ARN, + # use the ListKeys operation. + # + # For example: + # `arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # @return [String] + # + # @!attribute [rw] grant_id + # Identifies the grant to retire. To get the grant ID, use + # CreateGrant, ListGrants, or ListRetirableGrants. + # + # * Grant ID Example - + # 0123456789012345678901234567890123456789012345678901234567890123 + # + # ^ + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrantRequest AWS API Documentation + # + class RetireGrantRequest < Struct.new( + :grant_token, + :key_id, + :grant_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # A unique identifier for the KMS key associated with the grant. To + # get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # Specify the key ID or key ARN of the KMS key. To specify a KMS key + # in a different Amazon Web Services account, you must use the key + # ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] grant_id + # Identifies the grant to revoke. To get the grant ID, use + # CreateGrant, ListGrants, or ListRetirableGrants. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrantRequest AWS API Documentation + # + class RevokeGrantRequest < Struct.new( + :key_id, + :grant_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The unique identifier of the KMS key to delete. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] pending_window_in_days + # The waiting period, specified in number of days. After the waiting + # period ends, KMS deletes the KMS key. + # + # If the KMS key is a multi-Region primary key with replica keys, the + # waiting period begins when the last of its replica keys is deleted. + # Otherwise, the waiting period begins immediately. + # + # This value is optional. If you include a value, it must be between 7 + # and 30, inclusive. If you do not include a value, it defaults to 30. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletionRequest AWS API Documentation + # + class ScheduleKeyDeletionRequest < Struct.new( + :key_id, + :pending_window_in_days) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the KMS key whose + # deletion is scheduled. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] deletion_date + # The date and time after which KMS deletes the KMS key. + # + # If the KMS key is a multi-Region primary key with replica keys, this + # field does not appear. The deletion date for the primary key isn't + # known until its last replica key is deleted. + # @return [Time] + # + # @!attribute [rw] key_state + # The current status of the KMS key. + # + # For more information about how key state affects the use of a KMS + # key, see [Key states of KMS keys][1] in the *Key Management Service + # Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # @return [String] + # + # @!attribute [rw] pending_window_in_days + # The waiting period before the KMS key is deleted. + # + # If the KMS key is a multi-Region primary key with replicas, the + # waiting period begins when the last of its replica keys is deleted. + # Otherwise, the waiting period begins immediately. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletionResponse AWS API Documentation + # + class ScheduleKeyDeletionResponse < Struct.new( + :key_id, + :deletion_date, + :key_state, + :pending_window_in_days) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies an asymmetric KMS key. KMS uses the private key in the + # asymmetric KMS key to sign the message. The `KeyUsage` type of the + # KMS key must be `SIGN_VERIFY`. To find the `KeyUsage` of a KMS key, + # use the DescribeKey operation. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] message + # Specifies the message or message digest to sign. Messages can be + # 0-4096 bytes. To sign a larger message, provide a message digest. + # + # If you provide a message digest, use the `DIGEST` value of + # `MessageType` to prevent the digest from being hashed again while + # signing. + # @return [String] + # + # @!attribute [rw] message_type + # Tells KMS whether the value of the `Message` parameter should be + # hashed as part of the signing algorithm. Use `RAW` for unhashed + # messages; use `DIGEST` for message digests, which are already + # hashed. + # + # When the value of `MessageType` is `RAW`, KMS uses the standard + # signing algorithm, which begins with a hash function. When the value + # is `DIGEST`, KMS skips the hashing step in the signing algorithm. + # + # Use the `DIGEST` value only when the value of the `Message` + # parameter is a message digest. If you use the `DIGEST` value with an + # unhashed message, the security of the signing operation can be + # compromised. + # + # When the value of `MessageType`is `DIGEST`, the length of the + # `Message` value must match the length of hashed messages for the + # specified signing algorithm. + # + # You can submit a message digest and omit the `MessageType` or + # specify `RAW` so the digest is hashed again while signing. However, + # this can cause verification failures when verifying with a system + # that assumes a single hash. + # + # The hashing algorithm in that `Sign` uses is based on the + # `SigningAlgorithm` value. + # + # * Signing algorithms that end in SHA\_256 use the SHA\_256 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_384 use the SHA\_384 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_512 use the SHA\_512 hashing + # algorithm. + # + # * SM2DSA uses the SM3 hashing algorithm. For details, see [Offline + # verification with SM2 key pairs][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @!attribute [rw] signing_algorithm + # Specifies the signing algorithm to use when signing the message. + # + # Choose an algorithm that is compatible with the type and size of the + # specified asymmetric KMS key. When signing with RSA key pairs, + # RSASSA-PSS algorithms are preferred. We include RSASSA-PKCS1-v1\_5 + # algorithms for compatibility with existing applications. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/SignRequest AWS API Documentation + # + class SignRequest < Struct.new( + :key_id, + :message, + :message_type, + :grant_tokens, + :signing_algorithm) + SENSITIVE = [:message] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the asymmetric KMS key + # that was used to sign the message. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] signature + # The cryptographic signature that was generated for the message. + # + # * When used with the supported RSA signing algorithms, the encoding + # of this value is defined by [PKCS #1 in RFC 8017][1]. + # + # * When used with the `ECDSA_SHA_256`, `ECDSA_SHA_384`, or + # `ECDSA_SHA_512` signing algorithms, this value is a DER-encoded + # object as defined by ANS X9.62–2005 and [RFC 3279 Section + # 2.2.3][2]. This is the most commonly used signature format and is + # appropriate for most uses. + # + # When you use the HTTP API or the Amazon Web Services CLI, the value + # is Base64-encoded. Otherwise, it is not Base64-encoded. + # + # + # + # [1]: https://tools.ietf.org/html/rfc8017 + # [2]: https://tools.ietf.org/html/rfc3279#section-2.2.3 + # @return [String] + # + # @!attribute [rw] signing_algorithm + # The signing algorithm that was used to sign the message. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/SignResponse AWS API Documentation + # + class SignResponse < Struct.new( + :key_id, + :signature, + :signing_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # A key-value pair. A tag consists of a tag key and a tag value. Tag + # keys and tag values are both required, but tag values can be empty + # (null) strings. + # + # For information about the rules that apply to tag keys and tag values, + # see [User-Defined Tag Restrictions][1] in the *Amazon Web Services + # Billing and Cost Management User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html + # + # @!attribute [rw] tag_key + # The key of the tag. + # @return [String] + # + # @!attribute [rw] tag_value + # The value of the tag. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Tag AWS API Documentation + # + class Tag < Struct.new( + :tag_key, + :tag_value) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because one or more tags are not valid. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagException AWS API Documentation + # + class TagException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies a customer managed key in the account and Region. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] tags + # One or more tags. + # + # Each tag consists of a tag key and a tag value. The tag value can be + # an empty (null) string. + # + # You cannot have more than one tag on a KMS key with the same tag + # key. If you specify an existing tag key with a different tag value, + # KMS replaces the current tag value with the specified one. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResourceRequest AWS API Documentation + # + class TagResourceRequest < Struct.new( + :key_id, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because a specified parameter is not + # supported or a specified resource is not valid for this operation. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UnsupportedOperationException AWS API Documentation + # + class UnsupportedOperationException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the KMS key from which you are removing tags. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] tag_keys + # One or more tag keys. Specify only the tag keys, not the tag values. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResourceRequest AWS API Documentation + # + class UntagResourceRequest < Struct.new( + :key_id, + :tag_keys) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] alias_name + # Identifies the alias that is changing its KMS key. This value must + # begin with `alias/` followed by the alias name, such as + # `alias/ExampleAlias`. You cannot use `UpdateAlias` to change the + # alias name. + # @return [String] + # + # @!attribute [rw] target_key_id + # Identifies the [customer managed key][1] to associate with the + # alias. You don't have permission to associate an alias with an + # [Amazon Web Services managed key][2]. + # + # The KMS key must be in the same Amazon Web Services account and + # Region as the alias. Also, the new target KMS key must be the same + # type as the current target KMS key (both symmetric or both + # asymmetric or both HMAC) and they must have the same key usage. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # + # To verify that the alias is mapped to the correct KMS key, use + # ListAliases. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAliasRequest AWS API Documentation + # + class UpdateAliasRequest < Struct.new( + :alias_name, + :target_key_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_key_store_id + # Identifies the custom key store that you want to update. Enter the + # ID of the custom key store. To find the ID of a custom key store, + # use the DescribeCustomKeyStores operation. + # @return [String] + # + # @!attribute [rw] new_custom_key_store_name + # Changes the friendly name of the custom key store to the value that + # you specify. The custom key store name must be unique in the Amazon + # Web Services account. + # + # To change this value, an CloudHSM key store must be disconnected. An + # external key store can be connected or disconnected. + # @return [String] + # + # @!attribute [rw] key_store_password + # Enter the current password of the `kmsuser` crypto user (CU) in the + # CloudHSM cluster that is associated with the custom key store. This + # parameter is valid only for custom key stores with a + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # This parameter tells KMS the current password of the `kmsuser` + # crypto user (CU). It does not set or change the password of any + # users in the CloudHSM cluster. + # + # To change this value, the CloudHSM key store must be disconnected. + # @return [String] + # + # @!attribute [rw] cloud_hsm_cluster_id + # Associates the custom key store with a related CloudHSM cluster. + # This parameter is valid only for custom key stores with a + # `CustomKeyStoreType` of `AWS_CLOUDHSM`. + # + # Enter the cluster ID of the cluster that you used to create the + # custom key store or a cluster that shares a backup history and has + # the same cluster certificate as the original cluster. You cannot use + # this parameter to associate a custom key store with an unrelated + # cluster. In addition, the replacement cluster must [fulfill the + # requirements][1] for a cluster associated with a custom key store. + # To view the cluster certificate of a cluster, use the + # [DescribeClusters][2] operation. + # + # To change this value, the CloudHSM key store must be disconnected. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore + # [2]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + # @return [String] + # + # @!attribute [rw] xks_proxy_uri_endpoint + # Changes the URI endpoint that KMS uses to connect to your external + # key store proxy (XKS proxy). This parameter is valid only for custom + # key stores with a `CustomKeyStoreType` of `EXTERNAL_KEY_STORE`. + # + # For external key stores with an `XksProxyConnectivity` value of + # `PUBLIC_ENDPOINT`, the protocol must be HTTPS. + # + # For external key stores with an `XksProxyConnectivity` value of + # `VPC_ENDPOINT_SERVICE`, specify `https://` followed by the private + # DNS name associated with the VPC endpoint service. Each external key + # store must use a different private DNS name. + # + # The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values must + # be unique in the Amazon Web Services account and Region. + # + # To change this value, the external key store must be disconnected. + # @return [String] + # + # @!attribute [rw] xks_proxy_uri_path + # Changes the base path to the proxy APIs for this external key store. + # To find this value, see the documentation for your external key + # manager and external key store proxy (XKS proxy). This parameter is + # valid only for custom key stores with a `CustomKeyStoreType` of + # `EXTERNAL_KEY_STORE`. + # + # The value must start with `/` and must end with `/kms/xks/v1`, where + # `v1` represents the version of the KMS external key store proxy API. + # You can include an optional prefix between the required elements + # such as `/example/kms/xks/v1`. + # + # The combined `XksProxyUriEndpoint` and `XksProxyUriPath` values must + # be unique in the Amazon Web Services account and Region. + # + # You can change this value when the external key store is connected + # or disconnected. + # @return [String] + # + # @!attribute [rw] xks_proxy_vpc_endpoint_service_name + # Changes the name that KMS uses to identify the Amazon VPC endpoint + # service for your external key store proxy (XKS proxy). This + # parameter is valid when the `CustomKeyStoreType` is + # `EXTERNAL_KEY_STORE` and the `XksProxyConnectivity` is + # `VPC_ENDPOINT_SERVICE`. + # + # To change this value, the external key store must be disconnected. + # @return [String] + # + # @!attribute [rw] xks_proxy_authentication_credential + # Changes the credentials that KMS uses to sign requests to the + # external key store proxy (XKS proxy). This parameter is valid only + # for custom key stores with a `CustomKeyStoreType` of + # `EXTERNAL_KEY_STORE`. + # + # You must specify both the `AccessKeyId` and `SecretAccessKey` value + # in the authentication credential, even if you are only updating one + # value. + # + # This parameter doesn't establish or change your authentication + # credentials on the proxy. It just tells KMS the credential that you + # established with your external key store proxy. For example, if you + # rotate the credential on your external key store proxy, you can use + # this parameter to update the credential in KMS. + # + # You can change this value when the external key store is connected + # or disconnected. + # @return [Types::XksProxyAuthenticationCredentialType] + # + # @!attribute [rw] xks_proxy_connectivity + # Changes the connectivity setting for the external key store. To + # indicate that the external key store proxy uses a Amazon VPC + # endpoint service to communicate with KMS, specify + # `VPC_ENDPOINT_SERVICE`. Otherwise, specify `PUBLIC_ENDPOINT`. + # + # If you change the `XksProxyConnectivity` to `VPC_ENDPOINT_SERVICE`, + # you must also change the `XksProxyUriEndpoint` and add an + # `XksProxyVpcEndpointServiceName` value. + # + # If you change the `XksProxyConnectivity` to `PUBLIC_ENDPOINT`, you + # must also change the `XksProxyUriEndpoint` and specify a null or + # empty string for the `XksProxyVpcEndpointServiceName` value. + # + # To change this value, the external key store must be disconnected. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStoreRequest AWS API Documentation + # + class UpdateCustomKeyStoreRequest < Struct.new( + :custom_key_store_id, + :new_custom_key_store_name, + :key_store_password, + :cloud_hsm_cluster_id, + :xks_proxy_uri_endpoint, + :xks_proxy_uri_path, + :xks_proxy_vpc_endpoint_service_name, + :xks_proxy_authentication_credential, + :xks_proxy_connectivity) + SENSITIVE = [:key_store_password] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStoreResponse AWS API Documentation + # + class UpdateCustomKeyStoreResponse < Aws::EmptyStructure; end + + # @!attribute [rw] key_id + # Updates the description of the specified KMS key. + # + # Specify the key ID or key ARN of the KMS key. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] description + # New description for the KMS key. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescriptionRequest AWS API Documentation + # + class UpdateKeyDescriptionRequest < Struct.new( + :key_id, + :description) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the current primary key. When the operation completes, + # this KMS key will be a replica key. + # + # Specify the key ID or key ARN of a multi-Region primary key. + # + # For example: + # + # * Key ID: `mrk-1234abcd12ab34cd56ef1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. + # @return [String] + # + # @!attribute [rw] primary_region + # The Amazon Web Services Region of the new primary key. Enter the + # Region ID, such as `us-east-1` or `ap-southeast-2`. There must be an + # existing replica key in this Region. + # + # When the operation completes, the multi-Region key in this Region + # will be the primary key. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdatePrimaryRegionRequest AWS API Documentation + # + class UpdatePrimaryRegionRequest < Struct.new( + :key_id, + :primary_region) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] message + # The message that will be used in the verification. Enter the same + # message that was used to generate the HMAC. + # + # GenerateMac and `VerifyMac` do not provide special handling for + # message digests. If you generated an HMAC for a hash digest of a + # message, you must verify the HMAC for the same hash digest. + # @return [String] + # + # @!attribute [rw] key_id + # The KMS key that will be used in the verification. + # + # Enter a key ID of the KMS key that was used to generate the HMAC. If + # you identify a different KMS key, the `VerifyMac` operation fails. + # @return [String] + # + # @!attribute [rw] mac_algorithm + # The MAC algorithm that will be used in the verification. Enter the + # same MAC algorithm that was used to compute the HMAC. This algorithm + # must be supported by the HMAC KMS key identified by the `KeyId` + # parameter. + # @return [String] + # + # @!attribute [rw] mac + # The HMAC to verify. Enter the HMAC that was generated by the + # GenerateMac operation when you specified the same message, HMAC KMS + # key, and MAC algorithm as the values specified in this request. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMacRequest AWS API Documentation + # + class VerifyMacRequest < Struct.new( + :message, + :key_id, + :mac_algorithm, + :mac, + :grant_tokens) + SENSITIVE = [:message] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The HMAC KMS key used in the verification. + # @return [String] + # + # @!attribute [rw] mac_valid + # A Boolean value that indicates whether the HMAC was verified. A + # value of `True` indicates that the HMAC (`Mac`) was generated with + # the specified `Message`, HMAC KMS key (`KeyID`) and `MacAlgorithm.`. + # + # If the HMAC is not verified, the `VerifyMac` operation fails with a + # `KMSInvalidMacException` exception. This exception indicates that + # one or more of the inputs changed since the HMAC was computed. + # @return [Boolean] + # + # @!attribute [rw] mac_algorithm + # The MAC algorithm used in the verification. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMacResponse AWS API Documentation + # + class VerifyMacResponse < Struct.new( + :key_id, + :mac_valid, + :mac_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] key_id + # Identifies the asymmetric KMS key that will be used to verify the + # signature. This must be the same KMS key that was used to generate + # the signature. If you specify a different KMS key, the signature + # verification fails. + # + # To specify a KMS key, use its key ID, key ARN, alias name, or alias + # ARN. When using an alias name, prefix it with `"alias/"`. To specify + # a KMS key in a different Amazon Web Services account, you must use + # the key ARN or alias ARN. + # + # For example: + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Alias name: `alias/ExampleAlias` + # + # * Alias ARN: `arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias` + # + # To get the key ID and key ARN for a KMS key, use ListKeys or + # DescribeKey. To get the alias name and alias ARN, use ListAliases. + # @return [String] + # + # @!attribute [rw] message + # Specifies the message that was signed. You can submit a raw message + # of up to 4096 bytes, or a hash digest of the message. If you submit + # a digest, use the `MessageType` parameter with a value of `DIGEST`. + # + # If the message specified here is different from the message that was + # signed, the signature verification fails. A message and its hash + # digest are considered to be the same message. + # @return [String] + # + # @!attribute [rw] message_type + # Tells KMS whether the value of the `Message` parameter should be + # hashed as part of the signing algorithm. Use `RAW` for unhashed + # messages; use `DIGEST` for message digests, which are already + # hashed. + # + # When the value of `MessageType` is `RAW`, KMS uses the standard + # signing algorithm, which begins with a hash function. When the value + # is `DIGEST`, KMS skips the hashing step in the signing algorithm. + # + # Use the `DIGEST` value only when the value of the `Message` + # parameter is a message digest. If you use the `DIGEST` value with an + # unhashed message, the security of the verification operation can be + # compromised. + # + # When the value of `MessageType`is `DIGEST`, the length of the + # `Message` value must match the length of hashed messages for the + # specified signing algorithm. + # + # You can submit a message digest and omit the `MessageType` or + # specify `RAW` so the digest is hashed again while signing. However, + # if the signed message is hashed once while signing, but twice while + # verifying, verification fails, even when the message hasn't + # changed. + # + # The hashing algorithm in that `Verify` uses is based on the + # `SigningAlgorithm` value. + # + # * Signing algorithms that end in SHA\_256 use the SHA\_256 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_384 use the SHA\_384 hashing + # algorithm. + # + # * Signing algorithms that end in SHA\_512 use the SHA\_512 hashing + # algorithm. + # + # * SM2DSA uses the SM3 hashing algorithm. For details, see [Offline + # verification with SM2 key pairs][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + # @return [String] + # + # @!attribute [rw] signature + # The signature that the `Sign` operation generated. + # @return [String] + # + # @!attribute [rw] signing_algorithm + # The signing algorithm that was used to sign the message. If you + # submit a different algorithm, the signature verification fails. + # @return [String] + # + # @!attribute [rw] grant_tokens + # A list of grant tokens. + # + # Use a grant token when your permission to call this operation comes + # from a new grant that has not yet achieved *eventual consistency*. + # For more information, see [Grant token][1] and [Using a grant + # token][2] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyRequest AWS API Documentation + # + class VerifyRequest < Struct.new( + :key_id, + :message, + :message_type, + :signature, + :signing_algorithm, + :grant_tokens) + SENSITIVE = [:message] + include Aws::Structure + end + + # @!attribute [rw] key_id + # The Amazon Resource Name ([key ARN][1]) of the asymmetric KMS key + # that was used to verify the signature. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + # @return [String] + # + # @!attribute [rw] signature_valid + # A Boolean value that indicates whether the signature was verified. A + # value of `True` indicates that the `Signature` was produced by + # signing the `Message` with the specified `KeyID` and + # `SigningAlgorithm.` If the signature is not verified, the `Verify` + # operation fails with a `KMSInvalidSignatureException` exception. + # @return [Boolean] + # + # @!attribute [rw] signing_algorithm + # The signing algorithm that was used to verify the signature. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyResponse AWS API Documentation + # + class VerifyResponse < Struct.new( + :key_id, + :signature_valid, + :signing_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the (`XksKeyId`) is already + # associated with a KMS key in this external key store. Each KMS key in + # an external key store must be associated with a different external + # key. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksKeyAlreadyInUseException AWS API Documentation + # + class XksKeyAlreadyInUseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Information about the [external key ][1]that is associated with a KMS + # key in an external key store. + # + # This element appears in a CreateKey or DescribeKey response only for a + # KMS key in an external key store. + # + # The *external key* is a symmetric encryption key that is hosted by an + # external key manager outside of Amazon Web Services. When you use the + # KMS key in an external key store in a cryptographic operation, the + # cryptographic operation is performed in the external key manager using + # the specified external key. For more information, see [External + # key][1] in the *Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key + # + # @!attribute [rw] id + # The ID of the external key in its external key manager. This is the + # ID that the external key store proxy uses to identify the external + # key. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksKeyConfigurationType AWS API Documentation + # + class XksKeyConfigurationType < Struct.new( + :id) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the external key specified by the + # `XksKeyId` parameter did not meet the configuration requirements for + # an external key store. + # + # The external key must be an AES-256 symmetric key that is enabled and + # performs encryption and decryption. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksKeyInvalidConfigurationException AWS API Documentation + # + class XksKeyInvalidConfigurationException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the external key store proxy could + # not find the external key. This exception is thrown when the value of + # the `XksKeyId` parameter doesn't identify a key in the external key + # manager associated with the external key proxy. + # + # Verify that the `XksKeyId` represents an existing key in the external + # key manager. Use the key identifier that the external key store proxy + # uses to identify the key. For details, see the documentation provided + # with your external key store proxy or key manager. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksKeyNotFoundException AWS API Documentation + # + class XksKeyNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # KMS uses the authentication credential to sign requests that it sends + # to the external key store proxy (XKS proxy) on your behalf. You + # establish these credentials on your external key store proxy and + # report them to KMS. + # + # The `XksProxyAuthenticationCredential` includes two required elements. + # + # @!attribute [rw] access_key_id + # A unique identifier for the raw secret access key. + # @return [String] + # + # @!attribute [rw] raw_secret_access_key + # A secret string of 43-64 characters. Valid characters are a-z, A-Z, + # 0-9, /, +, and =. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyAuthenticationCredentialType AWS API Documentation + # + class XksProxyAuthenticationCredentialType < Struct.new( + :access_key_id, + :raw_secret_access_key) + SENSITIVE = [:access_key_id, :raw_secret_access_key] + include Aws::Structure + end + + # Detailed information about the external key store proxy (XKS proxy). + # Your external key store proxy translates KMS requests into a format + # that your external key manager can understand. These fields appear in + # a DescribeCustomKeyStores response only when the `CustomKeyStoreType` + # is `EXTERNAL_KEY_STORE`. + # + # @!attribute [rw] connectivity + # Indicates whether the external key store proxy uses a public + # endpoint or an Amazon VPC endpoint service to communicate with KMS. + # @return [String] + # + # @!attribute [rw] access_key_id + # The part of the external key store [proxy authentication + # credential][1] that uniquely identifies the secret access key. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential + # @return [String] + # + # @!attribute [rw] uri_endpoint + # The URI endpoint for the external key store proxy. + # + # If the external key store proxy has a public endpoint, it is + # displayed here. + # + # If the external key store proxy uses an Amazon VPC endpoint service + # name, this field displays the private DNS name associated with the + # VPC endpoint service. + # @return [String] + # + # @!attribute [rw] uri_path + # The path to the external key store proxy APIs. + # @return [String] + # + # @!attribute [rw] vpc_endpoint_service_name + # The Amazon VPC endpoint service used to communicate with the + # external key store proxy. This field appears only when the external + # key store proxy uses an Amazon VPC endpoint service to communicate + # with KMS. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyConfigurationType AWS API Documentation + # + class XksProxyConfigurationType < Struct.new( + :connectivity, + :access_key_id, + :uri_endpoint, + :uri_path, + :vpc_endpoint_service_name) + SENSITIVE = [:access_key_id] + include Aws::Structure + end + + # The request was rejected because the proxy credentials failed to + # authenticate to the specified external key store proxy. The specified + # external key store proxy rejected a status request from KMS due to + # invalid credentials. This can indicate an error in the credentials or + # in the identification of the external key store proxy. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyIncorrectAuthenticationCredentialException AWS API Documentation + # + class XksProxyIncorrectAuthenticationCredentialException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the Amazon VPC endpoint service + # configuration does not fulfill the requirements for an external key + # store proxy. For details, see the exception message. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyInvalidConfigurationException AWS API Documentation + # + class XksProxyInvalidConfigurationException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # KMS cannot interpret the response it received from the external key + # store proxy. The problem might be a poorly constructed response, but + # it could also be a transient network issue. If you see this error + # repeatedly, report it to the proxy vendor. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyInvalidResponseException AWS API Documentation + # + class XksProxyInvalidResponseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the concatenation of the + # `XksProxyUriEndpoint` is already associated with an external key store + # in the Amazon Web Services account and Region. Each external key store + # in an account and Region must use a unique external key store proxy + # address. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyUriEndpointInUseException AWS API Documentation + # + class XksProxyUriEndpointInUseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the concatenation of the + # `XksProxyUriEndpoint` and `XksProxyUriPath` is already associated with + # an external key store in the Amazon Web Services account and Region. + # Each external key store in an account and Region must use a unique + # external key store proxy API address. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyUriInUseException AWS API Documentation + # + class XksProxyUriInUseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # KMS was unable to reach the specified `XksProxyUriPath`. The path must + # be reachable before you create the external key store or update its + # settings. + # + # This exception is also thrown when the external key store proxy + # response to a `GetHealthStatus` request indicates that all external + # key manager instances are unavailable. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyUriUnreachableException AWS API Documentation + # + class XksProxyUriUnreachableException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the specified Amazon VPC endpoint + # service is already associated with an external key store in the Amazon + # Web Services account and Region. Each external key store in an Amazon + # Web Services account and Region must use a different Amazon VPC + # endpoint service. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyVpcEndpointServiceInUseException AWS API Documentation + # + class XksProxyVpcEndpointServiceInUseException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because the Amazon VPC endpoint service + # configuration does not fulfill the requirements for an external key + # store proxy. For details, see the exception message and [review the + # requirements](kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) + # for Amazon VPC endpoint service connectivity for an external key + # store. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyVpcEndpointServiceInvalidConfigurationException AWS API Documentation + # + class XksProxyVpcEndpointServiceInvalidConfigurationException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The request was rejected because KMS could not find the specified VPC + # endpoint service. Use DescribeCustomKeyStores to verify the VPC + # endpoint service name for the external key store. Also, confirm that + # the `Allow principals` list for the VPC endpoint service includes the + # KMS service principal for the Region, such as + # `cks.kms.us-east-1.amazonaws.com`. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/XksProxyVpcEndpointServiceNotFoundException AWS API Documentation + # + class XksProxyVpcEndpointServiceNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/CHANGELOG.md new file mode 100644 index 0000000..3b41e77 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/CHANGELOG.md @@ -0,0 +1,967 @@ +Unreleased Changes +------------------ + +1.121.0 (2023-04-19) +------------------ + +* Feature - Provides support for "Snow" Storage class. + +1.120.1 (2023-04-05) +------------------ + +* Issue - Skip `#check_for_cached_region` if custom endpoint provided + +1.120.0 (2023-03-31) +------------------ + +* Feature - Documentation updates for Amazon S3 + +1.119.2 (2023-03-22) +------------------ + +* Issue - Provide `endpoint` and `bucket` attributes on `Aws::S3::Errors::PermanentRedirect` error objects. + +1.119.1 (2023-02-13) +------------------ + +* Issue - Ensure object metadata is not lost on multipart copy (#2821). + +1.119.0 (2023-01-26) +------------------ + +* Feature - Allow FIPS to be used with path-style URLs. + +1.118.0 (2023-01-18) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Issue - Replace runtime endpoint resolution approach with generated ruby code. + +1.117.2 (2022-11-30) +------------------ + +* Issue - Return error messages from failures in threads in `MultipartStreamUploader` (#2793). + +1.117.1 (2022-10-26) +------------------ + +* Issue - Fix custom endpoint and port regression with `presigned_url` (#2776). + +1.117.0 (2022-10-25) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Issue - Apply checksums to MultipartStreamUploader (#2769). + +1.116.0 (2022-10-21) +------------------ + +* Feature - S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. + +1.115.0 (2022-10-19) +------------------ + +* Feature - Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. + +1.114.0 (2022-05-03) +------------------ + +* Feature - Documentation only update for doc bug fixes for the S3 API docs. + +1.113.2 (2022-04-26) +------------------ + +* Issue - Fix an issue where `ExpiredToken` errors were retried as if the request was from another region. + +1.113.1 (2022-04-25) +------------------ + +* Issue - Rewind the underlying file on a streaming retry that is not a truncated body (#2692). + +1.113.0 (2022-02-24) +------------------ + +* Feature - This release adds support for new integrity checking capabilities in Amazon S3. You can choose from four supported checksum algorithms for data integrity checking on your upload and download requests. In addition, AWS SDK can automatically calculate a checksum as it streams data into S3 + +1.112.0 (2022-02-03) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.111.3 (2022-01-24) +------------------ + +* Issue - Fix starts_with fields on `PresignedPost` (#2636). + +1.111.2 (2022-01-20) +------------------ + +* Issue - Minor cleanups. + +1.111.1 (2022-01-06) +------------------ + +* Issue - Don't fail small files in `upload_file` when `:thread_count` is set. (#2628) + +1.111.0 (2022-01-04) +------------------ + +* Feature - Minor doc-based updates based on feedback bugs received. + +1.110.0 (2021-12-21) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.109.0 (2021-11-30) +------------------ + +* Feature - Introduce Amazon S3 Glacier Instant Retrieval storage class and a new setting in S3 Object Ownership to disable ACLs for bucket and the objects in it. + +1.108.0 (2021-11-29) +------------------ + +* Feature - Amazon S3 Event Notifications adds Amazon EventBridge as a destination and supports additional event types. The PutBucketNotificationConfiguration API can now skip validation of Amazon SQS, Amazon SNS and AWS Lambda destinations. + +1.107.0 (2021-11-23) +------------------ + +* Feature - Introduce two new Filters to S3 Lifecycle configurations - ObjectSizeGreaterThan and ObjectSizeLessThan. Introduce a new way to trigger actions on noncurrent versions by providing the number of newer noncurrent versions along with noncurrent days. + +1.106.0 (2021-11-17) +------------------ + +* Feature - Add `presigned_request` method to `Aws::S3::Object`. + +1.105.1 (2021-11-05) +------------------ + +* Issue - Raise error when `use_fips_endpoint` is used with `use_accelerate_endpoint`. + +1.105.0 (2021-11-04) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.104.0 (2021-10-18) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.103.0 (2021-09-16) +------------------ + +* Feature - Add support for access point arn filtering in S3 CW Request Metrics + +1.102.0 (2021-09-02) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.101.0 (2021-09-01) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.100.0 (2021-08-27) +------------------ + +* Feature - Documentation updates for Amazon S3. + +1.99.0 (2021-08-16) +------------------ + +* Feature - Documentation updates for Amazon S3 + +1.98.0 (2021-07-30) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.97.0 (2021-07-28) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.96.2 (2021-07-20) +------------------ + +* Issue - Fix file downloading edge case for 1 byte multipart ranges (#2561). + +1.96.1 (2021-06-10) +------------------ + +* Issue - fix GetBucketLocation location_constraint XML parsing (#2536) + +1.96.0 (2021-06-03) +------------------ + +* Feature - S3 Inventory now supports Bucket Key Status + +1.95.1 (2021-05-24) +------------------ + +* Issue - Raise an error when FIPS is in the ARN's region for Access Point and Object Lambda. + +1.95.0 (2021-05-21) +------------------ + +* Feature - Documentation updates for Amazon S3 + +1.94.1 (2021-05-05) +------------------ + +* Issue - Expose presigned request status to the request handler stack #2513 + +1.94.0 (2021-04-27) +------------------ + +* Feature - Allow S3 Presigner to sign non http verbs like (upload_part, multipart_upload_abort, etc.) #2511 + +1.93.1 (2021-04-12) +------------------ + +* Issue - Fix FIPS and global endpoint behavior for S3 ARNs. + +* Issue - Increases `multipart_threshold` default from 15 megabytes to 100 megabytes. + +1.93.0 (2021-03-24) +------------------ + +* Feature - Documentation updates for Amazon S3 + +1.92.0 (2021-03-18) +------------------ + +* Feature - S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request + +* Feature - Support S3 Object Lambda ARNs in the `bucket:` parameter. + +1.91.0 (2021-03-10) +------------------ + +* Feature - Adding ID element to the CORSRule schema + +1.90.0 (2021-03-08) +------------------ + +* Feature - Amazon S3 Documentation updates + +1.89.0 (2021-02-26) +------------------ + +* Feature - Add RequestPayer to GetObjectTagging and PutObjectTagging. + +1.88.2 (2021-02-25) +------------------ + +* Issue - Support https in `Object#public_url` for `virtual_host`. (#1389) + +* Issue - Fix an issue with the IAD regional endpoint plugin removing `us-east-1` from custom endpoints. + + +1.88.1 (2021-02-12) +------------------ + +* Issue - Fixed an issue with some plugins expecting `#size` to exist on a request body for streaming IO. + +1.88.0 (2021-02-02) +------------------ + +* Feature - Support PrivateLink using the client `:endpoint` option. This patch has a minor behavioral change: a client constructed using `:use_dualstack_endpoint` or `:use_accelerate_endpoint` and `:endpoint` will now raise an `ArgumentError`. + +* Issue - Fix a bug where bucket region detection did not work correctly with ARNs. + +1.87.0 (2020-12-21) +------------------ + +* Feature - Format GetObject's Expires header to be an http-date instead of iso8601 + +1.86.2 (2020-12-14) +------------------ + +* Issue - Use `URI::DEFAULT_PARSER.escape` (an alias for `URI.escape`) in the legacy signer because Ruby 3 removes WEBrick from stdlib. + +1.86.1 (2020-12-11) +------------------ + +* Issue - Bump minimum KMS dependency. (#2449) + +1.86.0 (2020-12-01) +------------------ + +* Feature - S3 adds support for multiple-destination replication, option to sync replica modifications; S3 Bucket Keys to reduce cost of S3 SSE with AWS KMS + +1.85.0 (2020-11-20) +------------------ + +* Feature - Add new documentation regarding automatically generated Content-MD5 headers when using the SDK or CLI. + +1.84.1 (2020-11-10) +------------------ + +* Issue - Fix presigned urls for Outpost ARNs. + +1.84.0 (2020-11-09) +------------------ + +* Feature - S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication + +1.83.2 (2020-11-06) +------------------ + +* Issue - Fix bug with clients not resolving the correct endpoint in `us-east-1` using access point ARNs. + +1.83.1 (2020-10-19) +------------------ + +* Issue - Fix `multipart_threshold` documentation. + +1.83.0 (2020-10-02) +------------------ + +* Feature - Amazon S3 Object Ownership is a new S3 feature that enables bucket owners to automatically assume ownership of objects that are uploaded to their buckets by other AWS Accounts. + +1.82.0 (2020-09-30) +------------------ + +* Feature - Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features. + +* Feature - Support Outpost Access Point ARNs. + +1.81.1 (2020-09-25) +------------------ + +* Issue - Ignore `amz-sdk-request` header (used for standard and adaptive retries) in the pre-signer. (#2411) + +1.81.0 (2020-09-15) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.80.0 (2020-09-10) +------------------ + +* Feature - Bucket owner verification feature added. This feature introduces the x-amz-expected-bucket-owner and x-amz-source-expected-bucket-owner headers. + +1.79.1 (2020-08-26) +------------------ + +* Issue - Fix `Aws::S3::PresignedPost` using the `use_accelerate_endpoint` option with Resource clients. (#2103) + +1.79.0 (2020-08-25) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.78.0 (2020-08-11) +------------------ + +* Feature - Add support for in-region CopyObject and UploadPartCopy through S3 Access Points + +1.77.0 (2020-08-10) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Issue - Fix issue with JRuby and bump minimum version of core. + +1.76.0 (2020-08-07) +------------------ + +* Feature - Updates Amazon S3 API reference documentation. + +* Feature - Updates to the Amazon S3 Encryption Client. This change includes fixes for issues that were reported by Sophie Schmieg from the Google ISE team, and for issues that were discovered by AWS Cryptography. + +1.75.0 (2020-07-21) +------------------ + +* Feature - Add progress_callback to `Object#upload` to support reporting of upload progress. (#648) + +1.74.0 (2020-07-08) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Feature - Allow the `use_accelerate_endpoint` option to be used with `Aws::S3::PresignedPost`. (#2103) + +1.73.0 (2020-07-02) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.72.0 (2020-06-26) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.71.1 (2020-06-25) +------------------ + +* Issue - Fix uninitialized constant `Aws::S3::Plugins::RetryableBlockIO::Forwardable` (#2348) + +1.71.0 (2020-06-25) +------------------ + +* Issue - This version has been yanked. (#2349). +* Feature - Retry incomplete, streaming responses to `get_object` using the range parameter to avoid re-downloading already processed data (#2326). +* Issue - Reduce memory usage of `IOEncryptor` and `IODecryptor`. + +1.70.0 (2020-06-23) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.69.1 (2020-06-22) +------------------ + +* Issue - Add support for user provided encryption context to `EncryptionV2::Client`. + +1.69.0 (2020-06-18) +------------------ + +* Feature - Add a new version of the S3 Client Side Encryption Client: `EncryptionV2::Client` which supports more modern encryption algorithms. + +1.68.1 (2020-06-11) +------------------ + +* Issue - Republish previous version with correct dependency on `aws-sdk-core`. + +1.68.0 (2020-06-10) +------------------ + +* Issue - This version has been yanked. (#2327). +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Feature - Change `:compute_checksums` option to compute checksums only for optional operations when set to true, and no operations when set to false. Operations that require checksums are now modeled with `httpChecksumRequired` and computed automatically in aws-sdk-core. + +1.67.1 (2020-06-01) +------------------ + +* Issue - Add support for Object.exists? and Waiters for the encryption client. + +1.67.0 (2020-05-28) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.66.0 (2020-05-21) +------------------ + +* Feature - Deprecates unusable input members bound to Content-MD5 header. Updates example and documentation. + +1.65.0 (2020-05-18) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Feature - Allow S3 presigner to presign non-object operations such as `list_objects`. + +1.64.0 (2020-05-07) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.63.1 (2020-05-04) +------------------ + +* Issue - Handle copy_object, complete_multipart_upload, and upload_part_copy http responses with 200 OK and incomplete bodies as errors. + +1.63.0 (2020-04-22) +------------------ + +* Feature - Add `presigned_request` method to the `Presigner` class. This method returns a URL and headers necessary rather than hoisting them onto the query string. +* Feature - Force HTTPS when using `virtual_host: true` on the `Presigner` class. + +1.62.0 (2020-04-20) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.61.2 (2020-04-03) +------------------ + +* Issue - Add `put_bucket_lifecycle_configuration` and `put_bucket_replication` as required operations used in the MD5 plugin. + +1.61.1 (2020-03-10) +------------------ + +* Issue - Fix raising in `Object#upload_stream` block not triggering the `Aws::S3::MultipartStreamUploader#abort_upload`. + +1.61.0 (2020-03-09) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. +* Issue - Don't update endpoint on region mismatch errors when using a custom endpoint. + +1.60.2 (2020-02-07) +------------------ + +* Issue - Allow `Aws::S3::Encrypted::Client` to be used with a Resource client. + +1.60.1 (2019-12-19) +------------------ + +* Issue - Allow downcased option for S3 us-east-1 regionalization. + +1.60.0 (2019-12-18) +------------------ + +* Feature - Updates Amazon S3 endpoints allowing you to configure your client to opt-in to using S3 with the us-east-1 regional endpoint, instead of global. + +1.59.1 (2019-12-17) +------------------ + +* Issue - Added validation in the s3 presigner to check for 0 or negative expire_in times. + +1.59.0 (2019-12-05) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +* Issue - Fixed an issue with Access Point ARNs not resigning correctly. + +* Issue - Fixed S3 gemspec to require a minimum core version to support S3 Access Point ARNs. (GitHub PR #2184) + +1.58.0 (2019-12-03) +------------------ + +* Feature - Amazon S3 Access Points is a new S3 feature that simplifies managing data access at scale for shared data sets on Amazon S3. Access Points provide a customizable way to access the objects in a bucket, with a unique hostname and access policy that enforces the specific permissions and network controls for any request made through the access point. This represents a new way of provisioning access to shared data sets. + +1.57.0 (2019-11-20) +------------------ + +* Feature - This release introduces support for Amazon S3 Replication Time Control, a new feature of S3 Replication that provides a predictable replication time backed by a Service Level Agreement. S3 Replication Time Control helps customers meet compliance or business requirements for data replication, and provides visibility into the replication process with new Amazon CloudWatch Metrics. + +1.56.0 (2019-11-18) +------------------ + +* Feature - Added support for S3 Replication for existing objects. This release allows customers who have requested and been granted access to replicate existing S3 objects across buckets. + +* Issue - Fix issue where `Aws::Errors::MissingRegionError` was not thrown for S3 or S3Control clients. + +1.55.0 (2019-11-15) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.54.0 (2019-11-13) +------------------ + +* Feature - Support `:s3_us_east_1_regional_endpoint` with `regional` to enable IAD regional endpoint for S3. + +1.53.0 (2019-10-31) +------------------ + +* Feature - S3 Inventory now supports a new field 'IntelligentTieringAccessTier' that reports the access tier (frequent or infrequent) of objects stored in Intelligent-Tiering storage class. + +1.52.0 (2019-10-28) +------------------ + +* Feature - Adding support in SelectObjectContent for scanning a portion of an object specified by a scan range. + +1.51.0 (2019-10-23) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.50.0 (2019-10-17) +------------------ + +* Feature - Add support to yield the response in #upload_file if a block is given. + +1.49.0 (2019-10-10) +------------------ + +* Feature - Support `#delete_object` and `#head_object` for encryption client. + +1.48.0 (2019-08-30) +------------------ + +* Feature - Added a `:whitelist_headers` option to S3 presigner. + +1.47.0 (2019-08-28) +------------------ + +* Feature - Added a `:time` option to S3 presigner. + +1.46.0 (2019-07-25) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.45.0 (2019-07-03) +------------------ + +* Feature - Add S3 x-amz-server-side-encryption-context support. + +1.44.0 (2019-07-01) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.43.0 (2019-06-17) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.42.0 (2019-06-04) +------------------ + +* Feature - Documentation updates for s3 + +1.41.0 (2019-05-29) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + +1.40.0 (2019-05-21) +------------------ + +* Feature - API update. + +1.39.0 (2019-05-16) +------------------ + +* Feature - API update. + +1.38.0 (2019-05-15) +------------------ + +* Feature - API update. + +1.37.0 (2019-05-14) +------------------ + +* Feature - API update. + +1.36.1 (2019-04-19) +------------------ + +* Issue - Reduce memory usage of `Aws::S3::Object#upload_stream` when `StringIO` is used + +1.36.0 (2019-03-27) +------------------ + +* Feature - API update. + +1.35.0 (2019-03-22) +------------------ + +* Feature - API update. + +1.34.0 (2019-03-21) +------------------ + +* Feature - API update. + +1.33.0 (2019-03-18) +------------------ + +* Feature - API update. + +1.32.0 (2019-03-14) +------------------ + +* Feature - API update. + +1.31.0 (2019-03-08) +------------------ + +* Feature - API update. + +1.30.1 (2019-01-11) +------------------ + +* Issue - Plugin updates to support client-side monitoring. + +1.30.0 (2018-12-04) +------------------ + +* Feature - API update. + +1.29.0 (2018-11-30) +------------------ + +* Feature - API update. + +1.28.0 (2018-11-29) +------------------ + +* Feature - API update. + +* Issue - Update operations needs Content-MD5 header + +1.27.0 (2018-11-27) +------------------ + +* Feature - API update. + +1.26.0 (2018-11-26) +------------------ + +* Feature - API update. + +1.25.0 (2018-11-20) +------------------ + +* Feature - API update. + +1.24.1 (2018-11-16) +------------------ + +* Issue - Update version dependency on `aws-sdk-core` to support endpoint discovery. + +1.24.0 (2018-11-15) +------------------ + +* Feature - API update. + +1.23.1 (2018-10-30) +------------------ + +* Issue - Support multipart upload empty stream (GitHub Issue #1880) +* Issue - Aws::S3::Encryption::IOAuthDecrypter - Fixes issue where the body tag being split across packets could cause GCM decryption to fail intermittently. + +1.23.0 (2018-10-24) +------------------ + +* Feature - API update. + +1.22.0 (2018-10-23) +------------------ + +* Feature - API update. + +1.21.0 (2018-10-04) +------------------ + +* Feature - API update. + +1.20.0 (2018-09-19) +------------------ + +* Feature - API update. + +1.19.0 (2018-09-06) +------------------ + +* Feature - Adds code paths and plugins for future SDK instrumentation and telemetry. + +1.18.0 (2018-09-05) +------------------ + +* Feature - API update. + +1.17.1 (2018-08-29) +------------------ + +* Issue - Update example for bucket#url (Github Issue#1868) + +* Issue - Support opt-out counting #presigned_url as #api_requests (Github Issue#1866) + +1.17.0 (2018-07-11) +------------------ + +* Feature - API update. + +1.16.1 (2018-07-10) +------------------ + +* Issue - Avoids region redirects for FIPS endpoints + +1.16.0 (2018-06-28) +------------------ + +* Feature - Supports `:version_id` for resource `#download_file` helper. + +* Issue - Reduce memory allocation in checksum and signature generation. + +* Issue - Ensure file handlers are closed when an exception is raised in `Aws::S3::FileUploader`. + +1.15.0 (2018-06-26) +------------------ + +* Feature - API update. + +1.14.0 (2018-06-13) +------------------ + +* Feature - Adds support for `Aws::S3::Object#upload_stream`, allowing streaming uploads outside of a File-based interface. + +1.13.0 (2018-05-22) +------------------ + +* Feature - API update. + +* Issue - Update EventEmitter to Aws::EventEmitter + +1.12.0 (2018-05-18) +------------------ + +* Feature - API update. + +1.11.0 (2018-05-17) +------------------ + +* Feature - Support S3 `SelectObjectContent` API + +1.10.0 (2018-05-07) +------------------ + +* Feature - API update. + +1.9.1 (2018-04-19) +------------------ + +* Issue - S3 accelerate endpoint doesn't work with 'expect' header + +1.9.0 (2018-04-04) +------------------ + +* Feature - API update. + +1.8.2 (2018-02-23) +------------------ + +* Issue - Add support for AES/CBC/PKCS7Padding to encryption client. + +1.8.1 (2018-02-16) +------------------ + +* Issue - Enhance S3 Multipart Downloader performance #1709 + +* Issue - Fix Ruby 2.5 warnings. + +1.8.0 (2017-11-29) +------------------ + +* Feature - API update. + +1.7.0 (2017-11-17) +------------------ + +* Feature - API update. + +* Issue - Fix S3 unit test with latest endpoint + +1.6.0 (2017-11-07) +------------------ + +* Feature - API update. + +* Issue - Update S3 unit test with latest endpoint + +1.5.0 (2017-10-06) +------------------ + +* Feature - API update. + +* Issue - Update OJ Json parser error code +* Issue - Fix typo + +1.4.0 (2017-09-14) +------------------ + +* Feature - API update. + +1.3.0 (2017-09-13) +------------------ + +* Feature - API update. + +1.2.0 (2017-09-07) +------------------ + +* Feature - API update. + +1.1.0 (2017-09-01) +------------------ + +* Feature - API update. + +* Issue - Add object streaming behavior smoke test + +* Issue - Update `aws-sdk-s3` gemspec metadata. + +1.0.0 (2017-08-29) +------------------ + +1.0.0.rc15 (2017-08-15) +------------------ + +* Feature - API update. + +* Issue - Aws::S3 - Fix Multipart Downloader bug issue #1566, now file batches exist in a newly created tmp directory under destination directory. + +1.0.0.rc14 (2017-08-01) +------------------ + +* Feature - API update. + +1.0.0.rc13 (2017-07-25) +------------------ + +* Feature - API update. + +1.0.0.rc12 (2017-07-13) +------------------ + +* Feature - API update. + +1.0.0.rc11 (2017-07-06) +------------------ + +* Feature - API update. + +1.0.0.rc10 (2017-06-29) +------------------ + +* Feature - API update. + +1.0.0.rc9 (2017-06-26) +------------------ + +* Feature - API update. + +1.0.0.rc8 (2017-05-23) +------------------ + +* Feature - API update. + +1.0.0.rc7 (2017-05-09) +------------------ + +* Issue - Correct dependency on `aws-sdk-kms` gem. + +1.0.0.rc6 (2017-05-09) +------------------ + +* Feature - API update. + +1.0.0.rc5 (2017-05-05) +------------------ + +* Feature - Aws::S3 - Added Multipart Download Helper feature to support different `:mode` ("auto", "single_request", "get_range") in downloading large objects with `#download_file` in multipart when possible. + +1.0.0.rc4 (2017-04-21) +------------------ + +* Feature - API update. + +1.0.0.rc3 (2017-03-09) +------------------ + +* Issue - Correct dependency on `aws-sdk-kms` gem. + +1.0.0.rc2 (2016-12-09) +------------------ + +* Feature - API update. + +1.0.0.rc1 (2016-12-05) +------------------ + +* Feature - Initial preview release of the `aws-sdk-s3` gem. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/VERSION b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/VERSION new file mode 100644 index 0000000..83bd345 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/VERSION @@ -0,0 +1 @@ +1.121.0 diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3.rb new file mode 100644 index 0000000..626a54a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +require 'aws-sdk-kms' +require 'aws-sigv4' +require 'aws-sdk-core' + +require_relative 'aws-sdk-s3/types' +require_relative 'aws-sdk-s3/client_api' +require_relative 'aws-sdk-s3/plugins/endpoints.rb' +require_relative 'aws-sdk-s3/client' +require_relative 'aws-sdk-s3/errors' +require_relative 'aws-sdk-s3/waiters' +require_relative 'aws-sdk-s3/resource' +require_relative 'aws-sdk-s3/endpoint_parameters' +require_relative 'aws-sdk-s3/endpoint_provider' +require_relative 'aws-sdk-s3/endpoints' +require_relative 'aws-sdk-s3/bucket' +require_relative 'aws-sdk-s3/bucket_acl' +require_relative 'aws-sdk-s3/bucket_cors' +require_relative 'aws-sdk-s3/bucket_lifecycle' +require_relative 'aws-sdk-s3/bucket_lifecycle_configuration' +require_relative 'aws-sdk-s3/bucket_logging' +require_relative 'aws-sdk-s3/bucket_notification' +require_relative 'aws-sdk-s3/bucket_policy' +require_relative 'aws-sdk-s3/bucket_request_payment' +require_relative 'aws-sdk-s3/bucket_tagging' +require_relative 'aws-sdk-s3/bucket_versioning' +require_relative 'aws-sdk-s3/bucket_website' +require_relative 'aws-sdk-s3/multipart_upload' +require_relative 'aws-sdk-s3/multipart_upload_part' +require_relative 'aws-sdk-s3/object' +require_relative 'aws-sdk-s3/object_acl' +require_relative 'aws-sdk-s3/object_summary' +require_relative 'aws-sdk-s3/object_version' +require_relative 'aws-sdk-s3/customizations' +require_relative 'aws-sdk-s3/event_streams' + +# This module provides support for Amazon Simple Storage Service. This module is available in the +# `aws-sdk-s3` gem. +# +# # Client +# +# The {Client} class provides one method for each API operation. Operation +# methods each accept a hash of request parameters and return a response +# structure. +# +# s3 = Aws::S3::Client.new +# resp = s3.abort_multipart_upload(params) +# +# See {Client} for more information. +# +# # Errors +# +# Errors returned from Amazon Simple Storage Service are defined in the +# {Errors} module and all extend {Errors::ServiceError}. +# +# begin +# # do stuff +# rescue Aws::S3::Errors::ServiceError +# # rescues all Amazon Simple Storage Service API errors +# end +# +# See {Errors} for more information. +# +# @!group service +module Aws::S3 + + GEM_VERSION = '1.121.0' + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket.rb new file mode 100644 index 0000000..ff9eb4a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket.rb @@ -0,0 +1,1018 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class Bucket + + extend Aws::Deprecations + + # @overload def initialize(name, options = {}) + # @param [String] name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @name = extract_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def name + @name + end + + # Date the bucket was created. This date can change when making changes + # to your bucket, such as editing its bucket policy. + # @return [Time] + def creation_date + data[:creation_date] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # @raise [NotImplementedError] + # @api private + def load + msg = "#load is not implemented, data only available via enumeration" + raise NotImplementedError, msg + end + alias :reload :load + + # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. + # @return [Types::Bucket] + # Returns the data for this {Bucket}. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @param [Hash] options ({}) + # @return [Boolean] + # Returns `true` if the Bucket exists. + def exists?(options = {}) + begin + wait_until_exists(options.merge(max_attempts: 1)) + true + rescue Aws::Waiters::Errors::UnexpectedError => e + raise e.error + rescue Aws::Waiters::Errors::WaiterFailed + false + end + end + + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts (20) + # @option options [Float] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + # @return [Bucket] + def wait_until_exists(options = {}, &block) + options, params = separate_params_and_options(options) + waiter = Waiters::BucketExists.new(options) + yield_waiter_and_warn(waiter, &block) if block_given? + waiter.wait(params.merge(bucket: @name)) + Bucket.new({ + name: @name, + client: @client + }) + end + + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts (20) + # @option options [Float] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + # @return [Bucket] + def wait_until_not_exists(options = {}, &block) + options, params = separate_params_and_options(options) + waiter = Waiters::BucketNotExists.new(options) + yield_waiter_and_warn(waiter, &block) if block_given? + waiter.wait(params.merge(bucket: @name)) + Bucket.new({ + name: @name, + client: @client + }) + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket.create({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read + # create_bucket_configuration: { + # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2 + # }, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # object_lock_enabled_for_bucket: false, + # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the bucket. + # @option options [Types::CreateBucketConfiguration] :create_bucket_configuration + # The configuration information for the bucket. + # @option options [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # @option options [String] :grant_read + # Allows grantee to list the objects in the bucket. + # @option options [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # @option options [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # @option options [Boolean] :object_lock_enabled_for_bucket + # Specifies whether you want S3 Object Lock to be enabled for the new + # bucket. + # @option options [String] :object_ownership + # The container element for object ownership for a bucket's ownership + # controls. + # + # BucketOwnerPreferred - Objects uploaded to the bucket change ownership + # to the bucket owner if the objects are uploaded with the + # `bucket-owner-full-control` canned ACL. + # + # ObjectWriter - The uploading account will own the object if the object + # is uploaded with the `bucket-owner-full-control` canned ACL. + # + # BucketOwnerEnforced - Access control lists (ACLs) are disabled and no + # longer affect permissions. The bucket owner automatically owns and has + # full control over every object in the bucket. The bucket only accepts + # PUT requests that don't specify an ACL or bucket owner full control + # ACLs, such as the `bucket-owner-full-control` canned ACL or an + # equivalent form of this ACL expressed in the XML format. + # @return [Types::CreateBucketOutput] + def create(options = {}) + options = options.merge(bucket: @name) + resp = @client.create_bucket(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @name) + resp = @client.delete_bucket(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket.delete_objects({ + # delete: { # required + # objects: [ # required + # { + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # }, + # ], + # quiet: false, + # }, + # mfa: "MFA", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # @param [Hash] options ({}) + # @option options [required, Types::Delete] :delete + # Container for the request. + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Specifies whether you want to delete this object even if it has a + # Governance-type Object Lock in place. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [Types::DeleteObjectsOutput] + def delete_objects(options = {}) + options = options.merge(bucket: @name) + resp = @client.delete_objects(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object = bucket.put_object({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # body: source_file, + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_length: 1, + # content_md5: "ContentMD5", + # content_type: "ContentType", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # key: "ObjectKey", # required + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # @option options [String, StringIO, File] :body + # Object data. + # @option options [String] :cache_control + # Can be used to specify caching behavior along the request/reply chain. + # For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + # @option options [String] :content_disposition + # Specifies presentational information for the object. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + # @option options [String] :content_language + # The language the content is in. + # @option options [Integer] :content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the message (without the + # headers) according to RFC 1864. This header can be used as a message + # integrity check to verify that the data is the same data that was + # originally sent. Although it is optional, we recommend using the + # Content-MD5 mechanism as an end-to-end integrity check. For more + # information about REST request authentication, see [REST + # Authentication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # @option options [String] :content_type + # A standard MIME type describing the format of the contents. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [required, String] :key + # Object key for which the PUT action was initiated. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. For + # information about object metadata, see [Object Key and Metadata][1]. + # + # In the following example, the request header sets the redirect to an + # object (anotherPage.html) in the same bucket: + # + # `x-amz-website-redirect-location: /anotherPage.html` + # + # In the following example, the request header sets the object redirect + # to another website: + # + # `x-amz-website-redirect-location: http://www.example.com/` + # + # For more information about website hosting in Amazon S3, see [Hosting + # Websites on Amazon S3][2] and [How to Configure Website Page + # Redirects][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # If `x-amz-server-side-encryption` is present and has the value of + # `aws:kms`, this header specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetrical customer + # managed key that was used for the object. If you specify + # `x-amz-server-side-encryption:aws:kms`, but do not provide` + # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the + # Amazon Web Services managed key to protect the data. If the KMS key + # does not exist in the same account issuing the command, you must use + # the full ARN and not just the ID. + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a PUT action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. (For example, "Key1=Value1") + # @option options [String] :object_lock_mode + # The Object Lock mode that you want to apply to this object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want this object's Object Lock to expire. + # Must be formatted as a timestamp parameter. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether a legal hold will be applied to this object. For + # more information about S3 Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Object] + def put_object(options = {}) + options = options.merge(bucket: @name) + @client.put_object(options) + Object.new( + bucket_name: @name, + key: options[:key], + client: @client + ) + end + + # @!group Associations + + # @return [BucketAcl] + def acl + BucketAcl.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketCors] + def cors + BucketCors.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketLifecycle] + def lifecycle + BucketLifecycle.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketLifecycleConfiguration] + def lifecycle_configuration + BucketLifecycleConfiguration.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketLogging] + def logging + BucketLogging.new( + bucket_name: @name, + client: @client + ) + end + + # @example Request syntax with placeholder values + # + # multipart_uploads = bucket.multipart_uploads({ + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # key_marker: "KeyMarker", + # prefix: "Prefix", + # upload_id_marker: "UploadIdMarker", + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :delimiter + # Character you use to group keys. + # + # All keys that contain the same string between the prefix, if + # specified, and the first occurrence of the delimiter after the prefix + # are grouped under a single result element, `CommonPrefixes`. If you + # don't specify the prefix parameter, then the substring starts at the + # beginning of the key. The keys that are grouped under `CommonPrefixes` + # result element are not returned elsewhere in the response. + # @option options [String] :encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # @option options [String] :key_marker + # Together with upload-id-marker, this parameter specifies the multipart + # upload after which listing should begin. + # + # If `upload-id-marker` is not specified, only the keys + # lexicographically greater than the specified `key-marker` will be + # included in the list. + # + # If `upload-id-marker` is specified, any multipart uploads for a key + # equal to the `key-marker` might also be included, provided those + # multipart uploads have upload IDs lexicographically greater than the + # specified `upload-id-marker`. + # @option options [String] :prefix + # Lists in-progress uploads only for those keys that begin with the + # specified prefix. You can use prefixes to separate a bucket into + # different grouping of keys. (You can think of using prefix to make + # groups in the same way you'd use a folder in a file system.) + # @option options [String] :upload_id_marker + # Together with key-marker, specifies the multipart upload after which + # listing should begin. If key-marker is not specified, the + # upload-id-marker parameter is ignored. Otherwise, any multipart + # uploads for a key equal to the key-marker might be included in the + # list only if they have an upload ID lexicographically greater than the + # specified `upload-id-marker`. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [MultipartUpload::Collection] + def multipart_uploads(options = {}) + batches = Enumerator.new do |y| + options = options.merge(bucket: @name) + resp = @client.list_multipart_uploads(options) + resp.each_page do |page| + batch = [] + page.data.uploads.each do |u| + batch << MultipartUpload.new( + bucket_name: @name, + object_key: u.key, + id: u.upload_id, + data: u, + client: @client + ) + end + y.yield(batch) + end + end + MultipartUpload::Collection.new(batches) + end + + # @return [BucketNotification] + def notification + BucketNotification.new( + bucket_name: @name, + client: @client + ) + end + + # @param [String] key + # @return [Object] + def object(key) + Object.new( + bucket_name: @name, + key: key, + client: @client + ) + end + + # @example Request syntax with placeholder values + # + # object_versions = bucket.object_versions({ + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # key_marker: "KeyMarker", + # prefix: "Prefix", + # version_id_marker: "VersionIdMarker", + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :delimiter + # A delimiter is a character that you specify to group keys. All keys + # that contain the same string between the `prefix` and the first + # occurrence of the delimiter are grouped under a single result element + # in CommonPrefixes. These groups are counted as one result against the + # max-keys limitation. These keys are not returned elsewhere in the + # response. + # @option options [String] :encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # @option options [String] :key_marker + # Specifies the key to start with when listing objects in a bucket. + # @option options [String] :prefix + # Use this parameter to select only those keys that begin with the + # specified prefix. You can use prefixes to separate a bucket into + # different groupings of keys. (You can think of using prefix to make + # groups in the same way you'd use a folder in a file system.) You can + # use prefix with delimiter to roll up numerous objects into a single + # result under CommonPrefixes. + # @option options [String] :version_id_marker + # Specifies the object version you want to start listing from. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [ObjectVersion::Collection] + def object_versions(options = {}) + batches = Enumerator.new do |y| + options = options.merge(bucket: @name) + resp = @client.list_object_versions(options) + resp.each_page do |page| + batch = [] + page.data.versions_delete_markers.each do |v| + batch << ObjectVersion.new( + bucket_name: @name, + object_key: v.key, + id: v.version_id, + data: v, + client: @client + ) + end + y.yield(batch) + end + end + ObjectVersion::Collection.new(batches) + end + + # @example Request syntax with placeholder values + # + # objects = bucket.objects({ + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # prefix: "Prefix", + # fetch_owner: false, + # start_after: "StartAfter", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :delimiter + # A delimiter is a character you use to group keys. + # @option options [String] :encoding_type + # Encoding type used by Amazon S3 to encode object keys in the response. + # @option options [String] :prefix + # Limits the response to keys that begin with the specified prefix. + # @option options [Boolean] :fetch_owner + # The owner field is not present in listV2 by default, if you want to + # return owner field with each key in the result then set the fetch + # owner field to true. + # @option options [String] :start_after + # StartAfter is where you want Amazon S3 to start listing from. Amazon + # S3 starts listing after this specified key. StartAfter can be any key + # in the bucket. + # @option options [String] :request_payer + # Confirms that the requester knows that she or he will be charged for + # the list objects request in V2 style. Bucket owners need not specify + # this parameter in their requests. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [ObjectSummary::Collection] + def objects(options = {}) + batches = Enumerator.new do |y| + options = options.merge(bucket: @name) + resp = @client.list_objects_v2(options) + resp.each_page do |page| + batch = [] + page.data.contents.each do |c| + batch << ObjectSummary.new( + bucket_name: @name, + key: c.key, + data: c, + client: @client + ) + end + y.yield(batch) + end + end + ObjectSummary::Collection.new(batches) + end + + # @return [BucketPolicy] + def policy + BucketPolicy.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketRequestPayment] + def request_payment + BucketRequestPayment.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketTagging] + def tagging + BucketTagging.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketVersioning] + def versioning + BucketVersioning.new( + bucket_name: @name, + client: @client + ) + end + + # @return [BucketWebsite] + def website + BucketWebsite.new( + bucket_name: @name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { name: @name } + end + deprecated(:identifiers) + + private + + def extract_name(args, options) + value = args[0] || options.delete(:name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :name" + else + msg = "expected :name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def yield_waiter_and_warn(waiter, &block) + if !@waiter_block_warned + msg = "pass options to configure the waiter; "\ + "yielding the waiter is deprecated" + warn(msg) + @waiter_block_warned = true + end + yield(waiter.waiter) + end + + def separate_params_and_options(options) + opts = Set.new( + [:client, :max_attempts, :delay, :before_attempt, :before_wait] + ) + waiter_opts = {} + waiter_params = {} + options.each_pair do |key, value| + if opts.include?(key) + waiter_opts[key] = value + else + waiter_params[key] = value + end + end + waiter_opts[:client] ||= @client + [waiter_opts, waiter_params] + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_acl.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_acl.rb new file mode 100644 index 0000000..f8a79fe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_acl.rb @@ -0,0 +1,304 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketAcl + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Container for the bucket owner's display name and ID. + # @return [Types::Owner] + def owner + data[:owner] + end + + # A list of grants. + # @return [Array] + def grants + data[:grants] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketAcl}. + # Returns `self` making it possible to chain methods. + # + # bucket_acl.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_acl(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketAclOutput] + # Returns the data for this {BucketAcl}. Calls + # {Client#get_bucket_acl} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_acl.put({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read + # access_control_policy: { + # grants: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # owner: { + # display_name: "DisplayName", + # id: "ID", + # }, + # }, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the bucket. + # @option options [Types::AccessControlPolicy] :access_control_policy + # Contains the elements that set the ACL permissions for an object per + # grantee. + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must be + # used as a message integrity check to verify that the request body was + # not corrupted in transit. For more information, go to [RFC 1864.][1] + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # @option options [String] :grant_read + # Allows grantee to list the objects in the bucket. + # @option options [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # @option options [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_acl(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_cors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_cors.rb new file mode 100644 index 0000000..763b7de --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_cors.rb @@ -0,0 +1,293 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketCors + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # A set of origins and methods (cross-origin access that you want to + # allow). You can add up to 100 rules to the configuration. + # @return [Array] + def cors_rules + data[:cors_rules] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketCors}. + # Returns `self` making it possible to chain methods. + # + # bucket_cors.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_cors(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketCorsOutput] + # Returns the data for this {BucketCors}. Calls + # {Client#get_bucket_cors} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_cors.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.delete_bucket_cors(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_cors.put({ + # cors_configuration: { # required + # cors_rules: [ # required + # { + # id: "ID", + # allowed_headers: ["AllowedHeader"], + # allowed_methods: ["AllowedMethod"], # required + # allowed_origins: ["AllowedOrigin"], # required + # expose_headers: ["ExposeHeader"], + # max_age_seconds: 1, + # }, + # ], + # }, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [required, Types::CORSConfiguration] :cors_configuration + # Describes the cross-origin access configuration for objects in an + # Amazon S3 bucket. For more information, see [Enabling Cross-Origin + # Resource Sharing][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must be + # used as a message integrity check to verify that the request body was + # not corrupted in transit. For more information, go to [RFC 1864.][1] + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_cors(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_lifecycle.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_lifecycle.rb new file mode 100644 index 0000000..b316f18 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_lifecycle.rb @@ -0,0 +1,296 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketLifecycle + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Container for a lifecycle rule. + # @return [Array] + def rules + data[:rules] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketLifecycle}. + # Returns `self` making it possible to chain methods. + # + # bucket_lifecycle.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_lifecycle(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketLifecycleOutput] + # Returns the data for this {BucketLifecycle}. Calls + # {Client#get_bucket_lifecycle} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_lifecycle.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.delete_bucket_lifecycle(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_lifecycle.put({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # lifecycle_configuration: { + # rules: [ # required + # { + # expiration: { + # date: Time.now, + # days: 1, + # expired_object_delete_marker: false, + # }, + # id: "ID", + # prefix: "Prefix", # required + # status: "Enabled", # required, accepts Enabled, Disabled + # transition: { + # date: Time.now, + # days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # }, + # noncurrent_version_transition: { + # noncurrent_days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # newer_noncurrent_versions: 1, + # }, + # noncurrent_version_expiration: { + # noncurrent_days: 1, + # newer_noncurrent_versions: 1, + # }, + # abort_incomplete_multipart_upload: { + # days_after_initiation: 1, + # }, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [Types::LifecycleConfiguration] :lifecycle_configuration + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_lifecycle(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb new file mode 100644 index 0000000..429a19c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb @@ -0,0 +1,316 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketLifecycleConfiguration + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Container for a lifecycle rule. + # @return [Array] + def rules + data[:rules] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketLifecycleConfiguration}. + # Returns `self` making it possible to chain methods. + # + # bucket_lifecycle_configuration.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_lifecycle_configuration(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketLifecycleConfigurationOutput] + # Returns the data for this {BucketLifecycleConfiguration}. Calls + # {Client#get_bucket_lifecycle_configuration} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_lifecycle_configuration.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.delete_bucket_lifecycle(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_lifecycle_configuration.put({ + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # lifecycle_configuration: { + # rules: [ # required + # { + # expiration: { + # date: Time.now, + # days: 1, + # expired_object_delete_marker: false, + # }, + # id: "ID", + # prefix: "Prefix", + # filter: { + # prefix: "Prefix", + # tag: { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # object_size_greater_than: 1, + # object_size_less_than: 1, + # and: { + # prefix: "Prefix", + # tags: [ + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # object_size_greater_than: 1, + # object_size_less_than: 1, + # }, + # }, + # status: "Enabled", # required, accepts Enabled, Disabled + # transitions: [ + # { + # date: Time.now, + # days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # }, + # ], + # noncurrent_version_transitions: [ + # { + # noncurrent_days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # newer_noncurrent_versions: 1, + # }, + # ], + # noncurrent_version_expiration: { + # noncurrent_days: 1, + # newer_noncurrent_versions: 1, + # }, + # abort_incomplete_multipart_upload: { + # days_after_initiation: 1, + # }, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [Types::BucketLifecycleConfiguration] :lifecycle_configuration + # Container for lifecycle rules. You can add as many as 1,000 rules. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_lifecycle_configuration(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_logging.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_logging.rb new file mode 100644 index 0000000..77140a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_logging.rb @@ -0,0 +1,275 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketLogging + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Describes where logs are stored and the prefix that Amazon S3 assigns + # to all log object keys for a bucket. For more information, see [PUT + # Bucket logging][1] in the *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + # @return [Types::LoggingEnabled] + def logging_enabled + data[:logging_enabled] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketLogging}. + # Returns `self` making it possible to chain methods. + # + # bucket_logging.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_logging(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketLoggingOutput] + # Returns the data for this {BucketLogging}. Calls + # {Client#get_bucket_logging} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_logging.put({ + # bucket_logging_status: { # required + # logging_enabled: { + # target_bucket: "TargetBucket", # required + # target_grants: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, READ, WRITE + # }, + # ], + # target_prefix: "TargetPrefix", # required + # }, + # }, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [required, Types::BucketLoggingStatus] :bucket_logging_status + # Container for logging status information. + # @option options [String] :content_md5 + # The MD5 hash of the `PutBucketLogging` request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_logging(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_notification.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_notification.rb new file mode 100644 index 0000000..7930158 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_notification.rb @@ -0,0 +1,310 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketNotification + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # The topic to which notifications are sent and the events for which + # notifications are generated. + # @return [Array] + def topic_configurations + data[:topic_configurations] + end + + # The Amazon Simple Queue Service queues to publish messages to and the + # events for which to publish messages. + # @return [Array] + def queue_configurations + data[:queue_configurations] + end + + # Describes the Lambda functions to invoke and the events for which to + # invoke them. + # @return [Array] + def lambda_function_configurations + data[:lambda_function_configurations] + end + + # Enables delivery of events to Amazon EventBridge. + # @return [Types::EventBridgeConfiguration] + def event_bridge_configuration + data[:event_bridge_configuration] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketNotification}. + # Returns `self` making it possible to chain methods. + # + # bucket_notification.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_notification_configuration(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::NotificationConfiguration] + # Returns the data for this {BucketNotification}. Calls + # {Client#get_bucket_notification_configuration} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_notification.put({ + # notification_configuration: { # required + # topic_configurations: [ + # { + # id: "NotificationId", + # topic_arn: "TopicArn", # required + # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # filter: { + # key: { + # filter_rules: [ + # { + # name: "prefix", # accepts prefix, suffix + # value: "FilterRuleValue", + # }, + # ], + # }, + # }, + # }, + # ], + # queue_configurations: [ + # { + # id: "NotificationId", + # queue_arn: "QueueArn", # required + # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # filter: { + # key: { + # filter_rules: [ + # { + # name: "prefix", # accepts prefix, suffix + # value: "FilterRuleValue", + # }, + # ], + # }, + # }, + # }, + # ], + # lambda_function_configurations: [ + # { + # id: "NotificationId", + # lambda_function_arn: "LambdaFunctionArn", # required + # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # filter: { + # key: { + # filter_rules: [ + # { + # name: "prefix", # accepts prefix, suffix + # value: "FilterRuleValue", + # }, + # ], + # }, + # }, + # }, + # ], + # event_bridge_configuration: { + # }, + # }, + # expected_bucket_owner: "AccountId", + # skip_destination_validation: false, + # }) + # @param [Hash] options ({}) + # @option options [required, Types::NotificationConfiguration] :notification_configuration + # A container for specifying the notification configuration of the + # bucket. If this element is empty, notifications are turned off for the + # bucket. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [Boolean] :skip_destination_validation + # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. + # True or false value. + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_notification_configuration(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_policy.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_policy.rb new file mode 100644 index 0000000..4d5b0ac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_policy.rb @@ -0,0 +1,273 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketPolicy + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # The bucket policy as a JSON document. + # @return [IO] + def policy + data[:policy] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketPolicy}. + # Returns `self` making it possible to chain methods. + # + # bucket_policy.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_policy(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketPolicyOutput] + # Returns the data for this {BucketPolicy}. Calls + # {Client#get_bucket_policy} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_policy.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.delete_bucket_policy(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_policy.put({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # confirm_remove_self_bucket_access: false, + # policy: "Policy", # required + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # The MD5 hash of the request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [Boolean] :confirm_remove_self_bucket_access + # Set this parameter to true to confirm that you want to remove your + # permissions to change this bucket policy in the future. + # @option options [required, String] :policy + # The bucket policy as a JSON document. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_policy(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_region_cache.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_region_cache.rb new file mode 100644 index 0000000..b0c1b25 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_region_cache.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +require 'thread' + +module Aws + module S3 + class BucketRegionCache + + def initialize + @regions = {} + @listeners = [] + @mutex = Mutex.new + end + + # Registers a block as a callback. This listener is called when a + # new bucket/region pair is added to the cache. + # + # S3::BUCKET_REGIONS.bucket_added do |bucket_name, region_name| + # # ... + # end + # + # This happens when a request is made against the classic endpoint, + # "s3.amazonaws.com" and an error is returned requiring the request + # to be resent with Signature Version 4. At this point, multiple + # requests are made to discover the bucket region so that a v4 + # signature can be generated. + # + # An application can register listeners here to avoid these extra + # requests in the future. By constructing an {S3::Client} with + # the proper region, a proper signature can be generated and redirects + # avoided. + # @return [void] + def bucket_added(&block) + if block + @mutex.synchronize { @listeners << block } + else + raise ArgumentError, 'missing required block' + end + end + + # @param [String] bucket_name + # @return [String,nil] Returns the cached region for the named bucket. + # Returns `nil` if the bucket is not in the cache. + # @api private + def [](bucket_name) + @mutex.synchronize { @regions[bucket_name] } + end + + # Caches a bucket's region. Calling this method will trigger each + # of the {#bucket_added} listener callbacks. + # @param [String] bucket_name + # @param [String] region_name + # @return [void] + # @api private + def []=(bucket_name, region_name) + @mutex.synchronize do + @regions[bucket_name] = region_name + @listeners.each { |block| block.call(bucket_name, region_name) } + end + end + + # @api private + def clear + @mutex.synchronize { @regions = {} } + end + + # @return [Hash] Returns a hash of cached bucket names and region names. + def to_hash + @mutex.synchronize do + @regions.dup + end + end + alias to_h to_hash + + end + + # @api private + BUCKET_REGIONS = BucketRegionCache.new + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_request_payment.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_request_payment.rb new file mode 100644 index 0000000..105a04a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_request_payment.rb @@ -0,0 +1,260 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketRequestPayment + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Specifies who pays for the download and request fees. + # @return [String] + def payer + data[:payer] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketRequestPayment}. + # Returns `self` making it possible to chain methods. + # + # bucket_request_payment.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_request_payment(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketRequestPaymentOutput] + # Returns the data for this {BucketRequestPayment}. Calls + # {Client#get_bucket_request_payment} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_request_payment.put({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # request_payment_configuration: { # required + # payer: "Requester", # required, accepts Requester, BucketOwner + # }, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [required, Types::RequestPaymentConfiguration] :request_payment_configuration + # Container for Payer. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_request_payment(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_tagging.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_tagging.rb new file mode 100644 index 0000000..6871b58 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_tagging.rb @@ -0,0 +1,282 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketTagging + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Contains the tag set. + # @return [Array] + def tag_set + data[:tag_set] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketTagging}. + # Returns `self` making it possible to chain methods. + # + # bucket_tagging.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_tagging(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketTaggingOutput] + # Returns the data for this {BucketTagging}. Calls + # {Client#get_bucket_tagging} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_tagging.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.delete_bucket_tagging(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_tagging.put({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # tagging: { # required + # tag_set: [ # required + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [required, Types::Tagging] :tagging + # Container for the `TagSet` and `Tag` elements. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_tagging(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_versioning.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_versioning.rb new file mode 100644 index 0000000..67d7ef4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_versioning.rb @@ -0,0 +1,387 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketVersioning + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # The versioning state of the bucket. + # @return [String] + def status + data[:status] + end + + # Specifies whether MFA delete is enabled in the bucket versioning + # configuration. This element is only returned if the bucket has been + # configured with MFA delete. If the bucket has never been so + # configured, this element is not returned. + # @return [String] + def mfa_delete + data[:mfa_delete] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketVersioning}. + # Returns `self` making it possible to chain methods. + # + # bucket_versioning.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_versioning(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketVersioningOutput] + # Returns the data for this {BucketVersioning}. Calls + # {Client#get_bucket_versioning} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_versioning.enable({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # mfa: "MFA", + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # >The base64-encoded 128-bit MD5 digest of the data. You must use + # this header as a message integrity check to verify that the request + # body was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def enable(options = {}) + options = Aws::Util.deep_merge(options, + bucket: @bucket_name, + versioning_configuration: { + status: "Enabled" + } + ) + resp = @client.put_bucket_versioning(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_versioning.put({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # mfa: "MFA", + # versioning_configuration: { # required + # mfa_delete: "Enabled", # accepts Enabled, Disabled + # status: "Enabled", # accepts Enabled, Suspended + # }, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # >The base64-encoded 128-bit MD5 digest of the data. You must use + # this header as a message integrity check to verify that the request + # body was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # @option options [required, Types::VersioningConfiguration] :versioning_configuration + # Container for setting the versioning state. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_versioning(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_versioning.suspend({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # mfa: "MFA", + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # >The base64-encoded 128-bit MD5 digest of the data. You must use + # this header as a message integrity check to verify that the request + # body was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def suspend(options = {}) + options = Aws::Util.deep_merge(options, + bucket: @bucket_name, + versioning_configuration: { + status: "Suspended" + } + ) + resp = @client.put_bucket_versioning(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_website.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_website.rb new file mode 100644 index 0000000..758d6a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/bucket_website.rb @@ -0,0 +1,323 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class BucketWebsite + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, options = {}) + # @param [String] bucket_name + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # Specifies the redirect behavior of all requests to a website endpoint + # of an Amazon S3 bucket. + # @return [Types::RedirectAllRequestsTo] + def redirect_all_requests_to + data[:redirect_all_requests_to] + end + + # The name of the index document for the website (for example + # `index.html`). + # @return [Types::IndexDocument] + def index_document + data[:index_document] + end + + # The object key name of the website error document to use for 4XX class + # errors. + # @return [Types::ErrorDocument] + def error_document + data[:error_document] + end + + # Rules that define when a redirect is applied and the redirect + # behavior. + # @return [Array] + def routing_rules + data[:routing_rules] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {BucketWebsite}. + # Returns `self` making it possible to chain methods. + # + # bucket_website.reload.data + # + # @return [self] + def load + resp = @client.get_bucket_website(bucket: @bucket_name) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetBucketWebsiteOutput] + # Returns the data for this {BucketWebsite}. Calls + # {Client#get_bucket_website} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket_website.delete({ + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def delete(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.delete_bucket_website(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # bucket_website.put({ + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # website_configuration: { # required + # error_document: { + # key: "ObjectKey", # required + # }, + # index_document: { + # suffix: "Suffix", # required + # }, + # redirect_all_requests_to: { + # host_name: "HostName", # required + # protocol: "http", # accepts http, https + # }, + # routing_rules: [ + # { + # condition: { + # http_error_code_returned_equals: "HttpErrorCodeReturnedEquals", + # key_prefix_equals: "KeyPrefixEquals", + # }, + # redirect: { # required + # host_name: "HostName", + # http_redirect_code: "HttpRedirectCode", + # protocol: "http", # accepts http, https + # replace_key_prefix_with: "ReplaceKeyPrefixWith", + # replace_key_with: "ReplaceKeyWith", + # }, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [required, Types::WebsiteConfiguration] :website_configuration + # Container for the request. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [EmptyStructure] + def put(options = {}) + options = options.merge(bucket: @bucket_name) + resp = @client.put_bucket_website(options) + resp.data + end + + # @!group Associations + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { bucket_name: @bucket_name } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/client.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/client.rb new file mode 100644 index 0000000..3eb2ec6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/client.rb @@ -0,0 +1,15498 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +require 'seahorse/client/plugins/content_length.rb' +require 'aws-sdk-core/plugins/credentials_configuration.rb' +require 'aws-sdk-core/plugins/logging.rb' +require 'aws-sdk-core/plugins/param_converter.rb' +require 'aws-sdk-core/plugins/param_validator.rb' +require 'aws-sdk-core/plugins/user_agent.rb' +require 'aws-sdk-core/plugins/helpful_socket_errors.rb' +require 'aws-sdk-core/plugins/retry_errors.rb' +require 'aws-sdk-core/plugins/global_configuration.rb' +require 'aws-sdk-core/plugins/regional_endpoint.rb' +require 'aws-sdk-core/plugins/endpoint_discovery.rb' +require 'aws-sdk-core/plugins/endpoint_pattern.rb' +require 'aws-sdk-core/plugins/response_paging.rb' +require 'aws-sdk-core/plugins/stub_responses.rb' +require 'aws-sdk-core/plugins/idempotency_token.rb' +require 'aws-sdk-core/plugins/jsonvalue_converter.rb' +require 'aws-sdk-core/plugins/client_metrics_plugin.rb' +require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb' +require 'aws-sdk-core/plugins/transfer_encoding.rb' +require 'aws-sdk-core/plugins/http_checksum.rb' +require 'aws-sdk-core/plugins/checksum_algorithm.rb' +require 'aws-sdk-core/plugins/defaults_mode.rb' +require 'aws-sdk-core/plugins/recursion_detection.rb' +require 'aws-sdk-core/plugins/sign.rb' +require 'aws-sdk-core/plugins/protocols/rest_xml.rb' +require 'aws-sdk-s3/plugins/accelerate.rb' +require 'aws-sdk-s3/plugins/arn.rb' +require 'aws-sdk-s3/plugins/bucket_dns.rb' +require 'aws-sdk-s3/plugins/bucket_name_restrictions.rb' +require 'aws-sdk-s3/plugins/dualstack.rb' +require 'aws-sdk-s3/plugins/expect_100_continue.rb' +require 'aws-sdk-s3/plugins/get_bucket_location_fix.rb' +require 'aws-sdk-s3/plugins/http_200_errors.rb' +require 'aws-sdk-s3/plugins/iad_regional_endpoint.rb' +require 'aws-sdk-s3/plugins/location_constraint.rb' +require 'aws-sdk-s3/plugins/md5s.rb' +require 'aws-sdk-s3/plugins/redirects.rb' +require 'aws-sdk-s3/plugins/s3_host_id.rb' +require 'aws-sdk-s3/plugins/s3_signer.rb' +require 'aws-sdk-s3/plugins/sse_cpk.rb' +require 'aws-sdk-s3/plugins/streaming_retry.rb' +require 'aws-sdk-s3/plugins/url_encoded_keys.rb' +require 'aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb' +require 'aws-sdk-core/plugins/event_stream_configuration.rb' + +Aws::Plugins::GlobalConfiguration.add_identifier(:s3) + +module Aws::S3 + # An API client for S3. To construct a client, you need to configure a `:region` and `:credentials`. + # + # client = Aws::S3::Client.new( + # region: region_name, + # credentials: credentials, + # # ... + # ) + # + # For details on configuring region and credentials see + # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html). + # + # See {#initialize} for a full list of supported configuration options. + class Client < Seahorse::Client::Base + + include Aws::ClientStubs + + @identifier = :s3 + + set_api(ClientApi::API) + + add_plugin(Seahorse::Client::Plugins::ContentLength) + add_plugin(Aws::Plugins::CredentialsConfiguration) + add_plugin(Aws::Plugins::Logging) + add_plugin(Aws::Plugins::ParamConverter) + add_plugin(Aws::Plugins::ParamValidator) + add_plugin(Aws::Plugins::UserAgent) + add_plugin(Aws::Plugins::HelpfulSocketErrors) + add_plugin(Aws::Plugins::RetryErrors) + add_plugin(Aws::Plugins::GlobalConfiguration) + add_plugin(Aws::Plugins::RegionalEndpoint) + add_plugin(Aws::Plugins::EndpointDiscovery) + add_plugin(Aws::Plugins::EndpointPattern) + add_plugin(Aws::Plugins::ResponsePaging) + add_plugin(Aws::Plugins::StubResponses) + add_plugin(Aws::Plugins::IdempotencyToken) + add_plugin(Aws::Plugins::JsonvalueConverter) + add_plugin(Aws::Plugins::ClientMetricsPlugin) + add_plugin(Aws::Plugins::ClientMetricsSendPlugin) + add_plugin(Aws::Plugins::TransferEncoding) + add_plugin(Aws::Plugins::HttpChecksum) + add_plugin(Aws::Plugins::ChecksumAlgorithm) + add_plugin(Aws::Plugins::DefaultsMode) + add_plugin(Aws::Plugins::RecursionDetection) + add_plugin(Aws::Plugins::Sign) + add_plugin(Aws::Plugins::Protocols::RestXml) + add_plugin(Aws::S3::Plugins::Accelerate) + add_plugin(Aws::S3::Plugins::ARN) + add_plugin(Aws::S3::Plugins::BucketDns) + add_plugin(Aws::S3::Plugins::BucketNameRestrictions) + add_plugin(Aws::S3::Plugins::Dualstack) + add_plugin(Aws::S3::Plugins::Expect100Continue) + add_plugin(Aws::S3::Plugins::GetBucketLocationFix) + add_plugin(Aws::S3::Plugins::Http200Errors) + add_plugin(Aws::S3::Plugins::IADRegionalEndpoint) + add_plugin(Aws::S3::Plugins::LocationConstraint) + add_plugin(Aws::S3::Plugins::Md5s) + add_plugin(Aws::S3::Plugins::Redirects) + add_plugin(Aws::S3::Plugins::S3HostId) + add_plugin(Aws::S3::Plugins::S3Signer) + add_plugin(Aws::S3::Plugins::SseCpk) + add_plugin(Aws::S3::Plugins::StreamingRetry) + add_plugin(Aws::S3::Plugins::UrlEncodedKeys) + add_plugin(Aws::S3::Plugins::SkipWholeMultipartGetChecksums) + add_plugin(Aws::Plugins::EventStreamConfiguration) + add_plugin(Aws::S3::Plugins::Endpoints) + + # @overload initialize(options) + # @param [Hash] options + # @option options [required, Aws::CredentialProvider] :credentials + # Your AWS credentials. This can be an instance of any one of the + # following classes: + # + # * `Aws::Credentials` - Used for configuring static, non-refreshing + # credentials. + # + # * `Aws::SharedCredentials` - Used for loading static credentials from a + # shared file, such as `~/.aws/config`. + # + # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role. + # + # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to + # assume a role after providing credentials via the web. + # + # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an + # access token generated from `aws login`. + # + # * `Aws::ProcessCredentials` - Used for loading credentials from a + # process that outputs to stdout. + # + # * `Aws::InstanceProfileCredentials` - Used for loading credentials + # from an EC2 IMDS on an EC2 instance. + # + # * `Aws::ECSCredentials` - Used for loading credentials from + # instances running in ECS. + # + # * `Aws::CognitoIdentityCredentials` - Used for loading credentials + # from the Cognito Identity service. + # + # When `:credentials` are not configured directly, the following + # locations will be searched for credentials: + # + # * `Aws.config[:credentials]` + # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options. + # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] + # * `~/.aws/credentials` + # * `~/.aws/config` + # * EC2/ECS IMDS instance profile - When used by default, the timeouts + # are very aggressive. Construct and pass an instance of + # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to + # enable retries and extended timeouts. Instance profile credential + # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED'] + # to true. + # + # @option options [required, String] :region + # The AWS region to connect to. The configured `:region` is + # used to determine the service `:endpoint`. When not passed, + # a default `:region` is searched for in the following locations: + # + # * `Aws.config[:region]` + # * `ENV['AWS_REGION']` + # * `ENV['AMAZON_REGION']` + # * `ENV['AWS_DEFAULT_REGION']` + # * `~/.aws/credentials` + # * `~/.aws/config` + # + # @option options [String] :access_key_id + # + # @option options [Boolean] :active_endpoint_cache (false) + # When set to `true`, a thread polling for endpoints will be running in + # the background every 60 secs (default). Defaults to `false`. + # + # @option options [Boolean] :adaptive_retry_wait_to_fill (true) + # Used only in `adaptive` retry mode. When true, the request will sleep + # until there is sufficent client side capacity to retry the request. + # When false, the request will raise a `RetryCapacityNotAvailableError` and will + # not retry instead of sleeping. + # + # @option options [Boolean] :client_side_monitoring (false) + # When `true`, client-side metrics will be collected for all API requests from + # this client. + # + # @option options [String] :client_side_monitoring_client_id ("") + # Allows you to provide an identifier for this client which will be attached to + # all generated client side metrics. Defaults to an empty string. + # + # @option options [String] :client_side_monitoring_host ("127.0.0.1") + # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client + # side monitoring agent is running on, where client metrics will be published via UDP. + # + # @option options [Integer] :client_side_monitoring_port (31000) + # Required for publishing client metrics. The port that the client side monitoring + # agent is running on, where client metrics will be published via UDP. + # + # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher) + # Allows you to provide a custom client-side monitoring publisher class. By default, + # will use the Client Side Monitoring Agent Publisher. + # + # @option options [Boolean] :compute_checksums (true) + # When `true` a MD5 checksum will be computed and sent in the Content Md5 + # header for :put_object and :upload_part. When `false`, MD5 checksums + # will not be computed for these operations. Checksums are still computed + # for operations requiring them. Checksum errors returned by Amazon S3 are + # automatically retried up to `:retry_limit` times. + # + # @option options [Boolean] :convert_params (true) + # When `true`, an attempt is made to coerce request parameters into + # the required types. + # + # @option options [Boolean] :correct_clock_skew (true) + # Used only in `standard` and adaptive retry modes. Specifies whether to apply + # a clock skew correction and retry requests with skewed client clocks. + # + # @option options [String] :defaults_mode ("legacy") + # See {Aws::DefaultsModeConfiguration} for a list of the + # accepted modes and the configuration defaults that are included. + # + # @option options [Boolean] :disable_host_prefix_injection (false) + # Set to true to disable SDK automatically adding host prefix + # to default service endpoint when available. + # + # @option options [String] :endpoint + # The client endpoint is normally constructed from the `:region` + # option. You should only configure an `:endpoint` when connecting + # to test or custom endpoints. This should be a valid HTTP(S) URI. + # + # @option options [Integer] :endpoint_cache_max_entries (1000) + # Used for the maximum size limit of the LRU cache storing endpoints data + # for endpoint discovery enabled operations. Defaults to 1000. + # + # @option options [Integer] :endpoint_cache_max_threads (10) + # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10. + # + # @option options [Integer] :endpoint_cache_poll_interval (60) + # When :endpoint_discovery and :active_endpoint_cache is enabled, + # Use this option to config the time interval in seconds for making + # requests fetching endpoints information. Defaults to 60 sec. + # + # @option options [Boolean] :endpoint_discovery (false) + # When set to `true`, endpoint discovery will be enabled for operations when available. + # + # @option options [Proc] :event_stream_handler + # When an EventStream or Proc object is provided, it will be used as callback for each chunk of event stream response received along the way. + # + # @option options [Boolean] :follow_redirects (true) + # When `true`, this client will follow 307 redirects returned + # by Amazon S3. + # + # @option options [Boolean] :force_path_style (false) + # When set to `true`, the bucket name is always left in the + # request URI and never moved to the host as a sub-domain. + # + # @option options [Proc] :input_event_stream_handler + # When an EventStream or Proc object is provided, it can be used for sending events for the event stream. + # + # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default) + # The log formatter. + # + # @option options [Symbol] :log_level (:info) + # The log level to send messages to the `:logger` at. + # + # @option options [Logger] :logger + # The Logger instance to send log messages to. If this option + # is not set, logging will be disabled. + # + # @option options [Integer] :max_attempts (3) + # An integer representing the maximum number attempts that will be made for + # a single request, including the initial attempt. For example, + # setting this value to 5 will result in a request being retried up to + # 4 times. Used in `standard` and `adaptive` retry modes. + # + # @option options [Proc] :output_event_stream_handler + # When an EventStream or Proc object is provided, it will be used as callback for each chunk of event stream response received along the way. + # + # @option options [String] :profile ("default") + # Used when loading credentials from the shared credentials file + # at HOME/.aws/credentials. When not specified, 'default' is used. + # + # @option options [Boolean] :require_https_for_sse_cpk (true) + # When `true`, the endpoint **must** be HTTPS for all operations + # where server-side-encryption is used with customer-provided keys. + # This should only be disabled for local testing. + # + # @option options [Proc] :retry_backoff + # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay. + # This option is only used in the `legacy` retry mode. + # + # @option options [Float] :retry_base_delay (0.3) + # The base delay in seconds used by the default backoff function. This option + # is only used in the `legacy` retry mode. + # + # @option options [Symbol] :retry_jitter (:none) + # A delay randomiser function used by the default backoff function. + # Some predefined functions can be referenced by name - :none, :equal, :full, + # otherwise a Proc that takes and returns a number. This option is only used + # in the `legacy` retry mode. + # + # @see https://www.awsarchitectureblog.com/2015/03/backoff.html + # + # @option options [Integer] :retry_limit (3) + # The maximum number of times to retry failed requests. Only + # ~ 500 level server errors and certain ~ 400 level client errors + # are retried. Generally, these are throttling errors, data + # checksum errors, networking errors, timeout errors, auth errors, + # endpoint discovery, and errors from expired credentials. + # This option is only used in the `legacy` retry mode. + # + # @option options [Integer] :retry_max_delay (0) + # The maximum number of seconds to delay between retries (0 for no limit) + # used by the default backoff function. This option is only used in the + # `legacy` retry mode. + # + # @option options [String] :retry_mode ("legacy") + # Specifies which retry algorithm to use. Values are: + # + # * `legacy` - The pre-existing retry behavior. This is default value if + # no retry mode is provided. + # + # * `standard` - A standardized set of retry rules across the AWS SDKs. + # This includes support for retry quotas, which limit the number of + # unsuccessful retries a client can make. + # + # * `adaptive` - An experimental retry mode that includes all the + # functionality of `standard` mode along with automatic client side + # throttling. This is a provisional mode that may change behavior + # in the future. + # + # + # @option options [Boolean] :s3_disable_multiregion_access_points (false) + # When set to `false` this will option will raise errors when multi-region + # access point ARNs are used. Multi-region access points can potentially + # result in cross region requests. + # + # @option options [String] :s3_us_east_1_regional_endpoint ("legacy") + # Pass in `regional` to enable the `us-east-1` regional endpoint. + # Defaults to `legacy` mode which uses the global endpoint. + # + # @option options [Boolean] :s3_use_arn_region (true) + # For S3 ARNs passed into the `:bucket` parameter, this option will + # use the region in the ARN, allowing for cross-region requests to + # be made. Set to `false` to use the client's region instead. + # + # @option options [String] :secret_access_key + # + # @option options [String] :session_token + # + # @option options [Boolean] :stub_responses (false) + # Causes the client to return stubbed responses. By default + # fake responses are generated and returned. You can specify + # the response data to return or errors to raise by calling + # {ClientStubs#stub_responses}. See {ClientStubs} for more information. + # + # ** Please note ** When response stubbing is enabled, no HTTP + # requests are made, and retries are disabled. + # + # @option options [Aws::TokenProvider] :token_provider + # A Bearer Token Provider. This can be an instance of any one of the + # following classes: + # + # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing + # tokens. + # + # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an + # access token generated from `aws login`. + # + # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` + # will be used to search for tokens configured for your profile in shared configuration files. + # + # @option options [Boolean] :use_accelerate_endpoint (false) + # When set to `true`, accelerated bucket endpoints will be used + # for all object operations. You must first enable accelerate for + # each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). + # + # @option options [Boolean] :use_dualstack_endpoint + # When set to `true`, dualstack enabled endpoints (with `.aws` TLD) + # will be used if available. + # + # @option options [Boolean] :use_fips_endpoint + # When set to `true`, fips compatible endpoints will be used if available. + # When a `fips` region is used, the region is normalized and this config + # is set to `true`. + # + # @option options [Boolean] :validate_params (true) + # When `true`, request parameters are validated before + # sending the request. + # + # @option options [Aws::S3::EndpointProvider] :endpoint_provider + # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::S3::EndpointParameters` + # + # @option options [URI::HTTP,String] :http_proxy A proxy to send + # requests through. Formatted like 'http://proxy.com:123'. + # + # @option options [Float] :http_open_timeout (15) The number of + # seconds to wait when opening a HTTP session before raising a + # `Timeout::Error`. + # + # @option options [Float] :http_read_timeout (60) The default + # number of seconds to wait for response data. This value can + # safely be set per-request on the session. + # + # @option options [Float] :http_idle_timeout (5) The number of + # seconds a connection is allowed to sit idle before it is + # considered stale. Stale connections are closed and removed + # from the pool before making a request. + # + # @option options [Float] :http_continue_timeout (1) The number of + # seconds to wait for a 100-continue response before sending the + # request body. This option has no effect unless the request has + # "Expect" header set to "100-continue". Defaults to `nil` which + # disables this behaviour. This value can safely be set per + # request on the session. + # + # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout + # in seconds. + # + # @option options [Boolean] :http_wire_trace (false) When `true`, + # HTTP debug output will be sent to the `:logger`. + # + # @option options [Boolean] :ssl_verify_peer (true) When `true`, + # SSL peer certificates are verified when establishing a + # connection. + # + # @option options [String] :ssl_ca_bundle Full path to the SSL + # certificate authority bundle file that should be used when + # verifying peer certificates. If you do not pass + # `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default + # will be used if available. + # + # @option options [String] :ssl_ca_directory Full path of the + # directory that contains the unbundled SSL certificate + # authority files for verifying peer certificates. If you do + # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the + # system default will be used if available. + # + def initialize(*args) + super + end + + # @!group API Operations + + # This action aborts a multipart upload. After a multipart upload is + # aborted, no additional parts can be uploaded using that upload ID. The + # storage consumed by any previously uploaded parts will be freed. + # However, if any part uploads are currently in progress, those part + # uploads might or might not succeed. As a result, it might be necessary + # to abort a given multipart upload multiple times in order to + # completely free all storage consumed by all parts. + # + # To verify that all parts have been removed, so you don't get charged + # for the part storage, you should call the [ListParts][1] action and + # ensure that the parts list is empty. + # + # For information about permissions required to use the multipart + # upload, see [Multipart Upload and Permissions][2]. + # + # The following operations are related to `AbortMultipartUpload`: + # + # * [CreateMultipartUpload][3] + # + # * [UploadPart][4] + # + # * [CompleteMultipartUpload][5] + # + # * [ListParts][1] + # + # * [ListMultipartUploads][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + # + # @option params [required, String] :bucket + # The bucket name to which the upload was taking place. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Key of the object for which the multipart upload was initiated. + # + # @option params [required, String] :upload_id + # Upload ID that identifies the multipart upload. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::AbortMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::AbortMultipartUploadOutput#request_charged #request_charged} => String + # + # + # @example Example: To abort a multipart upload + # + # # The following example aborts a multipart upload. + # + # resp = client.abort_multipart_upload({ + # bucket: "examplebucket", + # key: "bigobject", + # upload_id: "xadcOB_7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.abort_multipart_upload({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # upload_id: "MultipartUploadId", # required + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload AWS API Documentation + # + # @overload abort_multipart_upload(params = {}) + # @param [Hash] params ({}) + def abort_multipart_upload(params = {}, options = {}) + req = build_request(:abort_multipart_upload, params) + req.send_request(options) + end + + # Completes a multipart upload by assembling previously uploaded parts. + # + # You first initiate the multipart upload and then upload all parts + # using the [UploadPart][1] operation. After successfully uploading all + # relevant parts of an upload, you call this action to complete the + # upload. Upon receiving this request, Amazon S3 concatenates all the + # parts in ascending order by part number to create a new object. In the + # Complete Multipart Upload request, you must provide the parts list. + # You must ensure that the parts list is complete. This action + # concatenates the parts that you provide in the list. For each part in + # the list, you must provide the part number and the `ETag` value, + # returned after that part was uploaded. + # + # Processing of a Complete Multipart Upload request could take several + # minutes to complete. After Amazon S3 begins processing the request, it + # sends an HTTP response header that specifies a 200 OK response. While + # processing is in progress, Amazon S3 periodically sends white space + # characters to keep the connection from timing out. Because a request + # could fail after the initial 200 OK response has been sent, it is + # important that you check the response body to determine whether the + # request succeeded. + # + # Note that if `CompleteMultipartUpload` fails, applications should be + # prepared to retry the failed requests. For more information, see + # [Amazon S3 Error Best Practices][2]. + # + # You cannot use `Content-Type: application/x-www-form-urlencoded` with + # Complete Multipart Upload requests. Also, if you do not provide a + # `Content-Type` header, `CompleteMultipartUpload` returns a 200 OK + # response. + # + # For more information about multipart uploads, see [Uploading Objects + # Using Multipart Upload][3]. + # + # For information about permissions required to use the multipart upload + # API, see [Multipart Upload and Permissions][4]. + # + # `CompleteMultipartUpload` has the following special errors: + # + # * Error code: `EntityTooSmall` + # + # * Description: Your proposed upload is smaller than the minimum + # allowed object size. Each part must be at least 5 MB in size, + # except the last part. + # + # * 400 Bad Request + # + # * Error code: `InvalidPart` + # + # * Description: One or more of the specified parts could not be + # found. The part might not have been uploaded, or the specified + # entity tag might not have matched the part's entity tag. + # + # * 400 Bad Request + # + # * Error code: `InvalidPartOrder` + # + # * Description: The list of parts was not in ascending order. The + # parts list must be specified in order by part number. + # + # * 400 Bad Request + # + # * Error code: `NoSuchUpload` + # + # * Description: The specified multipart upload does not exist. The + # upload ID might be invalid, or the multipart upload might have + # been aborted or completed. + # + # * 404 Not Found + # + # The following operations are related to `CompleteMultipartUpload`: + # + # * [CreateMultipartUpload][5] + # + # * [UploadPart][1] + # + # * [AbortMultipartUpload][6] + # + # * [ListParts][7] + # + # * [ListMultipartUploads][8] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + # + # @option params [required, String] :bucket + # Name of the bucket to which the multipart upload was initiated. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Object key for which the multipart upload was initiated. + # + # @option params [Types::CompletedMultipartUpload] :multipart_upload + # The container for the multipart upload request information. + # + # @option params [required, String] :upload_id + # ID for the initiated multipart upload. + # + # @option params [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the object. + # This parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :sse_customer_key + # The server-side encryption (SSE) customer managed key. This parameter + # is needed only when the object was created using a checksum algorithm. + # For more information, see [Protecting data using SSE-C keys][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a checksum + # algorithm. For more information, see [Protecting data using SSE-C + # keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @return [Types::CompleteMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CompleteMultipartUploadOutput#location #location} => String + # * {Types::CompleteMultipartUploadOutput#bucket #bucket} => String + # * {Types::CompleteMultipartUploadOutput#key #key} => String + # * {Types::CompleteMultipartUploadOutput#expiration #expiration} => String + # * {Types::CompleteMultipartUploadOutput#etag #etag} => String + # * {Types::CompleteMultipartUploadOutput#checksum_crc32 #checksum_crc32} => String + # * {Types::CompleteMultipartUploadOutput#checksum_crc32c #checksum_crc32c} => String + # * {Types::CompleteMultipartUploadOutput#checksum_sha1 #checksum_sha1} => String + # * {Types::CompleteMultipartUploadOutput#checksum_sha256 #checksum_sha256} => String + # * {Types::CompleteMultipartUploadOutput#server_side_encryption #server_side_encryption} => String + # * {Types::CompleteMultipartUploadOutput#version_id #version_id} => String + # * {Types::CompleteMultipartUploadOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::CompleteMultipartUploadOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::CompleteMultipartUploadOutput#request_charged #request_charged} => String + # + # + # @example Example: To complete multipart upload + # + # # The following example completes a multipart upload. + # + # resp = client.complete_multipart_upload({ + # bucket: "examplebucket", + # key: "bigobject", + # multipart_upload: { + # parts: [ + # { + # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", + # part_number: 1, + # }, + # { + # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", + # part_number: 2, + # }, + # ], + # }, + # upload_id: "7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", + # }) + # + # resp.to_h outputs the following: + # { + # bucket: "acexamplebucket", + # etag: "\"4d9031c7644d8081c2829f4ea23c55f7-2\"", + # key: "bigobject", + # location: "https://examplebucket.s3..amazonaws.com/bigobject", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.complete_multipart_upload({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # multipart_upload: { + # parts: [ + # { + # etag: "ETag", + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # part_number: 1, + # }, + # ], + # }, + # upload_id: "MultipartUploadId", # required + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # }) + # + # @example Response structure + # + # resp.location #=> String + # resp.bucket #=> String + # resp.key #=> String + # resp.expiration #=> String + # resp.etag #=> String + # resp.checksum_crc32 #=> String + # resp.checksum_crc32c #=> String + # resp.checksum_sha1 #=> String + # resp.checksum_sha256 #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.version_id #=> String + # resp.ssekms_key_id #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload AWS API Documentation + # + # @overload complete_multipart_upload(params = {}) + # @param [Hash] params ({}) + def complete_multipart_upload(params = {}, options = {}) + req = build_request(:complete_multipart_upload, params) + req.send_request(options) + end + + # Creates a copy of an object that is already stored in Amazon S3. + # + # You can store individual objects of up to 5 TB in Amazon S3. You + # create a copy of your object up to 5 GB in size in a single atomic + # action using this API. However, to copy an object greater than 5 GB, + # you must use the multipart upload Upload Part - Copy (UploadPartCopy) + # API. For more information, see [Copy Object Using the REST Multipart + # Upload API][1]. + # + # + # + # All copy requests must be authenticated. Additionally, you must have + # *read* access to the source object and *write* access to the + # destination bucket. For more information, see [REST + # Authentication][2]. Both the Region that you want to copy the object + # from and the Region that you want to copy the object to must be + # enabled for your account. + # + # A copy request might return an error when Amazon S3 receives the copy + # request or while Amazon S3 is copying the files. If the error occurs + # before the copy action starts, you receive a standard Amazon S3 error. + # If the error occurs during the copy operation, the error response is + # embedded in the `200 OK` response. This means that a `200 OK` response + # can contain either a success or an error. Design your application to + # parse the contents of the response and handle it appropriately. + # + # If the copy is successful, you receive a response with information + # about the copied object. + # + # If the request is an HTTP 1.1 request, the response is chunk encoded. + # If it were not, it would not contain the content-length, and you would + # need to read the entire body. + # + # + # + # The copy request charge is based on the storage class and Region that + # you specify for the destination object. For pricing information, see + # [Amazon S3 pricing][3]. + # + # Amazon S3 transfer acceleration does not support cross-Region copies. + # If you request a cross-Region copy using a transfer acceleration + # endpoint, you get a 400 `Bad Request` error. For more information, see + # [Transfer Acceleration][4]. + # + # **Metadata** + # + # When copying an object, you can preserve all metadata (default) or + # specify new metadata. However, the ACL is not preserved and is set to + # private for the user making the request. To override the default ACL + # setting, specify a new ACL when generating a copy request. For more + # information, see [Using ACLs][5]. + # + # To specify whether you want the object metadata copied from the source + # object or replaced with metadata provided in the request, you can + # optionally add the `x-amz-metadata-directive` header. When you grant + # permissions, you can use the `s3:x-amz-metadata-directive` condition + # key to enforce certain metadata behavior when objects are uploaded. + # For more information, see [Specifying Conditions in a Policy][6] in + # the *Amazon S3 User Guide*. For a complete list of Amazon S3-specific + # condition keys, see [Actions, Resources, and Condition Keys for Amazon + # S3][7]. + # + # **x-amz-copy-source-if Headers** + # + # To only copy an object under certain conditions, such as whether the + # `Etag` matches or whether the object was modified before or after a + # specified date, use the following request parameters: + # + # * `x-amz-copy-source-if-match` + # + # * `x-amz-copy-source-if-none-match` + # + # * `x-amz-copy-source-if-unmodified-since` + # + # * `x-amz-copy-source-if-modified-since` + # + # If both the `x-amz-copy-source-if-match` and + # `x-amz-copy-source-if-unmodified-since` headers are present in the + # request and evaluate as follows, Amazon S3 returns `200 OK` and copies + # the data: + # + # * `x-amz-copy-source-if-match` condition evaluates to true + # + # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false + # + # If both the `x-amz-copy-source-if-none-match` and + # `x-amz-copy-source-if-modified-since` headers are present in the + # request and evaluate as follows, Amazon S3 returns the `412 + # Precondition Failed` response code: + # + # * `x-amz-copy-source-if-none-match` condition evaluates to false + # + # * `x-amz-copy-source-if-modified-since` condition evaluates to true + # + # All headers with the `x-amz-` prefix, including `x-amz-copy-source`, + # must be signed. + # + # + # + # **Server-side encryption** + # + # When you perform a CopyObject operation, you can optionally use the + # appropriate encryption-related headers to encrypt the object using + # server-side encryption with Amazon Web Services managed encryption + # keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With + # server-side encryption, Amazon S3 encrypts your data as it writes it + # to disks in its data centers and decrypts the data when you access it. + # For more information about server-side encryption, see [Using + # Server-Side Encryption][8]. + # + # If a target object uses SSE-KMS, you can enable an S3 Bucket Key for + # the object. For more information, see [Amazon S3 Bucket Keys][9] in + # the *Amazon S3 User Guide*. + # + # **Access Control List (ACL)-Specific Request Headers** + # + # When copying an object, you can optionally use headers to grant + # ACL-based permissions. By default, all objects are private. Only the + # owner has full access control. When adding a new object, you can grant + # permissions to individual Amazon Web Services accounts or to + # predefined groups defined by Amazon S3. These permissions are then + # added to the ACL on the object. For more information, see [Access + # Control List (ACL) Overview][10] and [Managing ACLs Using the REST + # API][11]. + # + # If the bucket that you're copying objects to uses the bucket owner + # enforced setting for S3 Object Ownership, ACLs are disabled and no + # longer affect permissions. Buckets that use this setting only accept + # PUT requests that don't specify an ACL or PUT requests that specify + # bucket owner full control ACLs, such as the + # `bucket-owner-full-control` canned ACL or an equivalent form of this + # ACL expressed in the XML format. + # + # For more information, see [ Controlling ownership of objects and + # disabling ACLs][12] in the *Amazon S3 User Guide*. + # + # If your bucket uses the bucket owner enforced setting for Object + # Ownership, all objects written to the bucket by any account will be + # owned by the bucket owner. + # + # + # + # **Checksums** + # + # When copying an object, if it has a checksum, that checksum will be + # copied to the new object by default. When you copy the object over, + # you may optionally specify a different checksum algorithm to use with + # the `x-amz-checksum-algorithm` header. + # + # **Storage Class Options** + # + # You can use the `CopyObject` action to change the storage class of an + # object that is already stored in Amazon S3 using the `StorageClass` + # parameter. For more information, see [Storage Classes][13] in the + # *Amazon S3 User Guide*. + # + # **Versioning** + # + # By default, `x-amz-copy-source` identifies the current version of an + # object to copy. If the current version is a delete marker, Amazon S3 + # behaves as if the object was deleted. To copy a different version, use + # the `versionId` subresource. + # + # If you enable versioning on the target bucket, Amazon S3 generates a + # unique version ID for the object being copied. This version ID is + # different from the version ID of the source object. Amazon S3 returns + # the version ID of the copied object in the `x-amz-version-id` response + # header in the response. + # + # If you do not enable versioning or suspend it on the target bucket, + # the version ID that Amazon S3 generates is always null. + # + # If the source object's storage class is GLACIER, you must restore a + # copy of this object before you can use it as a source object for the + # copy operation. For more information, see [RestoreObject][14]. + # + # The following operations are related to `CopyObject`: + # + # * [PutObject][15] + # + # * [GetObject][16] + # + # For more information, see [Copying Objects][17]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # [3]: http://aws.amazon.com/s3/pricing/ + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + # [12]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [13]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [16]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [17]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html + # + # @option params [String] :acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [required, String] :bucket + # The name of the destination bucket. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :content_disposition + # Specifies presentational information for the object. + # + # @option params [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # + # @option params [String] :content_language + # The language the content is in. + # + # @option params [String] :content_type + # A standard MIME type describing the format of the object data. + # + # @option params [required, String] :copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and the key of the source object, separated by + # a slash (/). For example, to copy the object `reports/january.pdf` + # from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through access + # point `my-access-point` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when the + # source and destination buckets are in the same Amazon Web Services + # Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # + # @option params [String] :copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified tag. + # + # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # + # @option params [String] :copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # + # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # + # @option params [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # + # @option params [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [required, String] :key + # The key of the destination object. + # + # @option params [Hash] :metadata + # A map of metadata to store with the object in S3. + # + # @option params [String] :metadata_directive + # Specifies whether the metadata is copied from the source object or + # replaced with metadata provided in the request. + # + # @option params [String] :tagging_directive + # Specifies whether the object tag-set are copied from the source object + # or replaced with tag-set provided in the request. + # + # @option params [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # + # @option params [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # + # @option params [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :ssekms_key_id + # Specifies the Amazon Web Services KMS key ID to use for object + # encryption. All GET and PUT requests for an object protected by Amazon + # Web Services KMS will fail if not made via SSL or using SigV4. For + # information about configuring using any of the officially supported + # Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying + # the Signature Version in Request Authentication][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # + # @option params [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # + # @option params [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a COPY action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # + # @option params [String] :copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object (for + # example, AES256). + # + # @option params [String] :copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use to + # decrypt the source object. The encryption key provided in this header + # must be one that was used when the source object was created. + # + # @option params [String] :copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :tagging + # The tag-set for the object destination object this value must be used + # in conjunction with the `TaggingDirective`. The tag-set must be + # encoded as URL Query parameters. + # + # @option params [String] :object_lock_mode + # The Object Lock mode that you want to apply to the copied object. + # + # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want the copied object's Object Lock to + # expire. + # + # @option params [String] :object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the copied object. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request fails + # with the HTTP status code `403 Forbidden` (access denied). + # + # @option params [String] :expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # + # @return [Types::CopyObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CopyObjectOutput#copy_object_result #copy_object_result} => Types::CopyObjectResult + # * {Types::CopyObjectOutput#expiration #expiration} => String + # * {Types::CopyObjectOutput#copy_source_version_id #copy_source_version_id} => String + # * {Types::CopyObjectOutput#version_id #version_id} => String + # * {Types::CopyObjectOutput#server_side_encryption #server_side_encryption} => String + # * {Types::CopyObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::CopyObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::CopyObjectOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::CopyObjectOutput#ssekms_encryption_context #ssekms_encryption_context} => String + # * {Types::CopyObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::CopyObjectOutput#request_charged #request_charged} => String + # + # + # @example Example: To copy an object + # + # # The following example copies an object from one bucket to another. + # + # resp = client.copy_object({ + # bucket: "destinationbucket", + # copy_source: "/sourcebucket/HappyFacejpg", + # key: "HappyFaceCopyjpg", + # }) + # + # resp.to_h outputs the following: + # { + # copy_object_result: { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # last_modified: Time.parse("2016-12-15T17:38:53.000Z"), + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.copy_object({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # bucket: "BucketName", # required + # cache_control: "CacheControl", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_type: "ContentType", + # copy_source: "CopySource", # required + # copy_source_if_match: "CopySourceIfMatch", + # copy_source_if_modified_since: Time.now, + # copy_source_if_none_match: "CopySourceIfNoneMatch", + # copy_source_if_unmodified_since: Time.now, + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # key: "ObjectKey", # required + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # metadata_directive: "COPY", # accepts COPY, REPLACE + # tagging_directive: "COPY", # accepts COPY, REPLACE + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", + # copy_source_sse_customer_key: "CopySourceSSECustomerKey", + # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # expected_source_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.copy_object_result.etag #=> String + # resp.copy_object_result.last_modified #=> Time + # resp.copy_object_result.checksum_crc32 #=> String + # resp.copy_object_result.checksum_crc32c #=> String + # resp.copy_object_result.checksum_sha1 #=> String + # resp.copy_object_result.checksum_sha256 #=> String + # resp.expiration #=> String + # resp.copy_source_version_id #=> String + # resp.version_id #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.ssekms_encryption_context #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject AWS API Documentation + # + # @overload copy_object(params = {}) + # @param [Hash] params ({}) + def copy_object(params = {}, options = {}) + req = build_request(:copy_object, params) + req.send_request(options) + end + + # Creates a new S3 bucket. To create a bucket, you must register with + # Amazon S3 and have a valid Amazon Web Services Access Key ID to + # authenticate requests. Anonymous requests are never allowed to create + # buckets. By creating the bucket, you become the bucket owner. + # + # Not every string is an acceptable bucket name. For information about + # bucket naming restrictions, see [Bucket naming rules][1]. + # + # If you want to create an Amazon S3 on Outposts bucket, see [Create + # Bucket][2]. + # + # By default, the bucket is created in the US East (N. Virginia) Region. + # You can optionally specify a Region in the request body. You might + # choose a Region to optimize latency, minimize costs, or address + # regulatory requirements. For example, if you reside in Europe, you + # will probably find it advantageous to create buckets in the Europe + # (Ireland) Region. For more information, see [Accessing a bucket][3]. + # + # If you send your create bucket request to the `s3.amazonaws.com` + # endpoint, the request goes to the us-east-1 Region. Accordingly, the + # signature calculations in Signature Version 4 must use us-east-1 as + # the Region, even if the location constraint in the request specifies + # another Region where the bucket is to be created. If you create a + # bucket in a Region other than US East (N. Virginia), your application + # must be able to handle 307 redirect. For more information, see + # [Virtual hosting of buckets][4]. + # + # + # + # **Access control lists (ACLs)** + # + # When creating a bucket using this operation, you can optionally + # configure the bucket ACL to specify the accounts or groups that should + # be granted specific permissions on the bucket. + # + # If your CreateBucket request sets bucket owner enforced for S3 Object + # Ownership and specifies a bucket ACL that provides access to an + # external Amazon Web Services account, your request fails with a `400` + # error and returns the `InvalidBucketAclWithObjectOwnership` error + # code. For more information, see [Controlling object ownership][5] in + # the *Amazon S3 User Guide*. + # + # There are two ways to grant the appropriate permissions using the + # request headers. + # + # * Specify a canned ACL using the `x-amz-acl` request header. Amazon S3 + # supports a set of predefined ACLs, known as *canned ACLs*. Each + # canned ACL has a predefined set of grantees and permissions. For + # more information, see [Canned ACL][6]. + # + # * Specify access permissions explicitly using the `x-amz-grant-read`, + # `x-amz-grant-write`, `x-amz-grant-read-acp`, + # `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers. + # These headers map to the set of permissions Amazon S3 supports in an + # ACL. For more information, see [Access control list (ACL) + # overview][7]. + # + # You specify each grantee as a type=value pair, where the type is one + # of the following: + # + # * `id` – if the value specified is the canonical user ID of an + # Amazon Web Services account + # + # * `uri` – if you are granting permissions to a predefined group + # + # * `emailAddress` – if the value specified is the email address of an + # Amazon Web Services account + # + # Using email addresses to specify a grantee is only supported in + # the following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, + # see [Regions and Endpoints][8] in the Amazon Web Services General + # Reference. + # + # + # + # For example, the following `x-amz-grant-read` header grants the + # Amazon Web Services accounts identified by account IDs permissions + # to read object data and its metadata: + # + # `x-amz-grant-read: id="11112222333", id="444455556666" ` + # + # You can use either a canned ACL or specify access permissions + # explicitly. You cannot do both. + # + # + # + # **Permissions** + # + # In addition to `s3:CreateBucket`, the following permissions are + # required when your CreateBucket includes specific headers: + # + # * **ACLs** - If your `CreateBucket` request specifies ACL permissions + # and the ACL is public-read, public-read-write, authenticated-read, + # or if you specify access permissions explicitly through any other + # ACL, both `s3:CreateBucket` and `s3:PutBucketAcl` permissions are + # needed. If the ACL the `CreateBucket` request is private or doesn't + # specify any ACLs, only `s3:CreateBucket` permission is needed. + # + # * **Object Lock** - If `ObjectLockEnabledForBucket` is set to true in + # your `CreateBucket` request, `s3:PutBucketObjectLockConfiguration` + # and `s3:PutBucketVersioning` permissions are required. + # + # * **S3 Object Ownership** - If your CreateBucket request includes the + # the `x-amz-object-ownership` header, `s3:PutBucketOwnershipControls` + # permission is required. + # + # The following operations are related to `CreateBucket`: + # + # * [PutObject][9] + # + # * [DeleteBucket][10] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html + # [8]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + # + # @option params [String] :acl + # The canned ACL to apply to the bucket. + # + # @option params [required, String] :bucket + # The name of the bucket to create. + # + # @option params [Types::CreateBucketConfiguration] :create_bucket_configuration + # The configuration information for the bucket. + # + # @option params [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # + # @option params [String] :grant_read + # Allows grantee to list the objects in the bucket. + # + # @option params [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # + # @option params [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # + # @option params [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # + # @option params [Boolean] :object_lock_enabled_for_bucket + # Specifies whether you want S3 Object Lock to be enabled for the new + # bucket. + # + # @option params [String] :object_ownership + # The container element for object ownership for a bucket's ownership + # controls. + # + # BucketOwnerPreferred - Objects uploaded to the bucket change ownership + # to the bucket owner if the objects are uploaded with the + # `bucket-owner-full-control` canned ACL. + # + # ObjectWriter - The uploading account will own the object if the object + # is uploaded with the `bucket-owner-full-control` canned ACL. + # + # BucketOwnerEnforced - Access control lists (ACLs) are disabled and no + # longer affect permissions. The bucket owner automatically owns and has + # full control over every object in the bucket. The bucket only accepts + # PUT requests that don't specify an ACL or bucket owner full control + # ACLs, such as the `bucket-owner-full-control` canned ACL or an + # equivalent form of this ACL expressed in the XML format. + # + # @return [Types::CreateBucketOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateBucketOutput#location #location} => String + # + # + # @example Example: To create a bucket in a specific region + # + # # The following example creates a bucket. The request specifies an AWS region where to create the bucket. + # + # resp = client.create_bucket({ + # bucket: "examplebucket", + # create_bucket_configuration: { + # location_constraint: "eu-west-1", + # }, + # }) + # + # resp.to_h outputs the following: + # { + # location: "http://examplebucket..s3.amazonaws.com/", + # } + # + # @example Example: To create a bucket + # + # # The following example creates a bucket. + # + # resp = client.create_bucket({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # location: "/examplebucket", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.create_bucket({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read + # bucket: "BucketName", # required + # create_bucket_configuration: { + # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2 + # }, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # object_lock_enabled_for_bucket: false, + # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced + # }) + # + # @example Response structure + # + # resp.location #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket AWS API Documentation + # + # @overload create_bucket(params = {}) + # @param [Hash] params ({}) + def create_bucket(params = {}, options = {}) + req = build_request(:create_bucket, params) + req.send_request(options) + end + + # This action initiates a multipart upload and returns an upload ID. + # This upload ID is used to associate all of the parts in the specific + # multipart upload. You specify this upload ID in each of your + # subsequent upload part requests (see [UploadPart][1]). You also + # include this upload ID in the final request to either complete or + # abort the multipart upload request. + # + # For more information about multipart uploads, see [Multipart Upload + # Overview][2]. + # + # If you have configured a lifecycle rule to abort incomplete multipart + # uploads, the upload must complete within the number of days specified + # in the bucket lifecycle configuration. Otherwise, the incomplete + # multipart upload becomes eligible for an abort action and Amazon S3 + # aborts the multipart upload. For more information, see [Aborting + # Incomplete Multipart Uploads Using a Bucket Lifecycle Policy][3]. + # + # For information about the permissions required to use the multipart + # upload API, see [Multipart Upload and Permissions][4]. + # + # For request signing, multipart upload is just a series of regular + # requests. You initiate a multipart upload, send one or more requests + # to upload parts, and then complete the multipart upload process. You + # sign each request individually. There is nothing special about signing + # multipart upload requests. For more information about signing, see + # [Authenticating Requests (Amazon Web Services Signature Version + # 4)][5]. + # + # After you initiate a multipart upload and upload one or more parts, to + # stop being charged for storing the uploaded parts, you must either + # complete or abort the multipart upload. Amazon S3 frees up the space + # used to store the parts and stop charging you for storing them only + # after you either complete or abort a multipart upload. + # + # + # + # You can optionally request server-side encryption. For server-side + # encryption, Amazon S3 encrypts your data as it writes it to disks in + # its data centers and decrypts it when you access it. You can provide + # your own encryption key, or use Amazon Web Services KMS keys or Amazon + # S3-managed encryption keys. If you choose to provide your own + # encryption key, the request headers you provide in [UploadPart][1] and + # [UploadPartCopy][6] requests must match the headers you used in the + # request to initiate the upload by using `CreateMultipartUpload`. + # + # To perform a multipart upload with encryption using an Amazon Web + # Services KMS key, the requester must have permission to the + # `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These + # permissions are required because Amazon S3 must decrypt and read data + # from the encrypted file parts before it completes the multipart + # upload. For more information, see [Multipart upload API and + # permissions][7] in the *Amazon S3 User Guide*. + # + # If your Identity and Access Management (IAM) user or role is in the + # same Amazon Web Services account as the KMS key, then you must have + # these permissions on the key policy. If your IAM user or role belongs + # to a different account than the key, then you must have the + # permissions on both the key policy and your IAM user or role. + # + # For more information, see [Protecting Data Using Server-Side + # Encryption][8]. + # + # Access Permissions + # + # : When copying an object, you can optionally specify the accounts or + # groups that should be granted specific permissions on the new + # object. There are two ways to grant the permissions using the + # request headers: + # + # * Specify a canned ACL with the `x-amz-acl` request header. For more + # information, see [Canned ACL][9]. + # + # * Specify access permissions explicitly with the `x-amz-grant-read`, + # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and + # `x-amz-grant-full-control` headers. These parameters map to the + # set of permissions that Amazon S3 supports in an ACL. For more + # information, see [Access Control List (ACL) Overview][10]. + # + # You can use either a canned ACL or specify access permissions + # explicitly. You cannot do both. + # + # Server-Side- Encryption-Specific Request Headers + # + # : You can optionally tell Amazon S3 to encrypt data at rest using + # server-side encryption. Server-side encryption is for data + # encryption at rest. Amazon S3 encrypts your data as it writes it to + # disks in its data centers and decrypts it when you access it. The + # option you use depends on whether you want to use Amazon Web + # Services managed encryption keys or provide your own encryption key. + # + # * Use encryption keys managed by Amazon S3 or customer managed key + # stored in Amazon Web Services Key Management Service (Amazon Web + # Services KMS) – If you want Amazon Web Services to manage the keys + # used to encrypt data, specify the following headers in the + # request. + # + # * `x-amz-server-side-encryption` + # + # * `x-amz-server-side-encryption-aws-kms-key-id` + # + # * `x-amz-server-side-encryption-context` + # + # If you specify `x-amz-server-side-encryption:aws:kms`, but don't + # provide `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 + # uses the Amazon Web Services managed key in Amazon Web Services + # KMS to protect the data. + # + # + # + # All GET and PUT requests for an object protected by Amazon Web + # Services KMS fail if you don't make them with SSL or by using + # SigV4. + # + # For more information about server-side encryption with KMS key + # (SSE-KMS), see [Protecting Data Using Server-Side Encryption with + # KMS keys][11]. + # + # * Use customer-provided encryption keys – If you want to manage your + # own encryption keys, provide all the following headers in the + # request. + # + # * `x-amz-server-side-encryption-customer-algorithm` + # + # * `x-amz-server-side-encryption-customer-key` + # + # * `x-amz-server-side-encryption-customer-key-MD5` + # + # For more information about server-side encryption with KMS keys + # (SSE-KMS), see [Protecting Data Using Server-Side Encryption with + # KMS keys][11]. + # + # Access-Control-List (ACL)-Specific Request Headers + # + # : You also can use the following access control–related headers with + # this operation. By default, all objects are private. Only the owner + # has full access control. When adding a new object, you can grant + # permissions to individual Amazon Web Services accounts or to + # predefined groups defined by Amazon S3. These permissions are then + # added to the access control list (ACL) on the object. For more + # information, see [Using ACLs][12]. With this operation, you can + # grant access permissions using one of the following two methods: + # + # * Specify a canned ACL (`x-amz-acl`) — Amazon S3 supports a set of + # predefined ACLs, known as *canned ACLs*. Each canned ACL has a + # predefined set of grantees and permissions. For more information, + # see [Canned ACL][9]. + # + # * Specify access permissions explicitly — To explicitly grant access + # permissions to specific Amazon Web Services accounts or groups, + # use the following headers. Each header maps to specific + # permissions that Amazon S3 supports in an ACL. For more + # information, see [Access Control List (ACL) Overview][10]. In the + # header, you specify a list of grantees who get the specific + # permission. To grant permissions explicitly, use: + # + # * `x-amz-grant-read` + # + # * `x-amz-grant-write` + # + # * `x-amz-grant-read-acp` + # + # * `x-amz-grant-write-acp` + # + # * `x-amz-grant-full-control` + # + # You specify each grantee as a type=value pair, where the type is + # one of the following: + # + # * `id` – if the value specified is the canonical user ID of an + # Amazon Web Services account + # + # * `uri` – if you are granting permissions to a predefined group + # + # * `emailAddress` – if the value specified is the email address of + # an Amazon Web Services account + # + # Using email addresses to specify a grantee is only supported in + # the following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, + # see [Regions and Endpoints][13] in the Amazon Web Services + # General Reference. + # + # + # + # For example, the following `x-amz-grant-read` header grants the + # Amazon Web Services accounts identified by account IDs permissions + # to read object data and its metadata: + # + # `x-amz-grant-read: id="11112222333", id="444455556666" ` + # + # The following operations are related to `CreateMultipartUpload`: + # + # * [UploadPart][1] + # + # * [CompleteMultipartUpload][14] + # + # * [AbortMultipartUpload][15] + # + # * [ListParts][16] + # + # * [ListMultipartUploads][17] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html + # [12]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + # [13]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # [16]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [17]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + # + # @option params [String] :acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [required, String] :bucket + # The name of the bucket to which to initiate the upload + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # + # @option params [String] :content_disposition + # Specifies presentational information for the object. + # + # @option params [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # + # @option params [String] :content_language + # The language the content is in. + # + # @option params [String] :content_type + # A standard MIME type describing the format of the object data. + # + # @option params [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # + # @option params [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [required, String] :key + # Object key for which the multipart upload is to be initiated. + # + # @option params [Hash] :metadata + # A map of metadata to store with the object in S3. + # + # @option params [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # + # @option params [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # + # @option params [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :ssekms_key_id + # Specifies the ID of the symmetric customer managed key to use for + # object encryption. All GET and PUT requests for an object protected by + # Amazon Web Services KMS will fail if not made via SSL or using SigV4. + # For information about configuring using any of the officially + # supported Amazon Web Services SDKs and Amazon Web Services CLI, see + # [Specifying the Signature Version in Request Authentication][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # + # @option params [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # + # @option params [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with an object action doesn’t affect + # bucket-level settings for S3 Bucket Key. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. + # + # @option params [String] :object_lock_mode + # Specifies the Object Lock mode that you want to apply to the uploaded + # object. + # + # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # Specifies the date and time when you want the Object Lock to expire. + # + # @option params [String] :object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the uploaded + # object. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @return [Types::CreateMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateMultipartUploadOutput#abort_date #abort_date} => Time + # * {Types::CreateMultipartUploadOutput#abort_rule_id #abort_rule_id} => String + # * {Types::CreateMultipartUploadOutput#bucket #bucket} => String + # * {Types::CreateMultipartUploadOutput#key #key} => String + # * {Types::CreateMultipartUploadOutput#upload_id #upload_id} => String + # * {Types::CreateMultipartUploadOutput#server_side_encryption #server_side_encryption} => String + # * {Types::CreateMultipartUploadOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::CreateMultipartUploadOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::CreateMultipartUploadOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::CreateMultipartUploadOutput#ssekms_encryption_context #ssekms_encryption_context} => String + # * {Types::CreateMultipartUploadOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::CreateMultipartUploadOutput#request_charged #request_charged} => String + # * {Types::CreateMultipartUploadOutput#checksum_algorithm #checksum_algorithm} => String + # + # + # @example Example: To initiate a multipart upload + # + # # The following example initiates a multipart upload. + # + # resp = client.create_multipart_upload({ + # bucket: "examplebucket", + # key: "largeobject", + # }) + # + # resp.to_h outputs the following: + # { + # bucket: "examplebucket", + # key: "largeobject", + # upload_id: "ibZBv_75gd9r8lH_gqXatLdxMVpAlj6ZQjEs.OwyF3953YdwbcQnMA2BLGn8Lx12fQNICtMw5KyteFeHw.Sjng--", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.create_multipart_upload({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # bucket: "BucketName", # required + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_type: "ContentType", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # key: "ObjectKey", # required + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # + # @example Response structure + # + # resp.abort_date #=> Time + # resp.abort_rule_id #=> String + # resp.bucket #=> String + # resp.key #=> String + # resp.upload_id #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.ssekms_encryption_context #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.request_charged #=> String, one of "requester" + # resp.checksum_algorithm #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload AWS API Documentation + # + # @overload create_multipart_upload(params = {}) + # @param [Hash] params ({}) + def create_multipart_upload(params = {}, options = {}) + req = build_request(:create_multipart_upload, params) + req.send_request(options) + end + + # Deletes the S3 bucket. All objects (including all object versions and + # delete markers) in the bucket must be deleted before the bucket itself + # can be deleted. + # + # **Related Resources** + # + # * [CreateBucket][1] + # + # * [DeleteObject][2] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + # + # @option params [required, String] :bucket + # Specifies the bucket being deleted. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete a bucket + # + # # The following example deletes the specified bucket. + # + # resp = client.delete_bucket({ + # bucket: "forrandall2", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket AWS API Documentation + # + # @overload delete_bucket(params = {}) + # @param [Hash] params ({}) + def delete_bucket(params = {}, options = {}) + req = build_request(:delete_bucket, params) + req.send_request(options) + end + + # Deletes an analytics configuration for the bucket (specified by the + # analytics configuration ID). + # + # To use this operation, you must have permissions to perform the + # `s3:PutAnalyticsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about the Amazon S3 analytics feature, see [Amazon S3 + # Analytics – Storage Class Analysis][3]. + # + # The following operations are related to + # `DeleteBucketAnalyticsConfiguration`: + # + # * [GetBucketAnalyticsConfiguration][4] + # + # * [ListBucketAnalyticsConfigurations][5] + # + # * [PutBucketAnalyticsConfiguration][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket from which an analytics configuration is + # deleted. + # + # @option params [required, String] :id + # The ID that identifies the analytics configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_analytics_configuration({ + # bucket: "BucketName", # required + # id: "AnalyticsId", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration AWS API Documentation + # + # @overload delete_bucket_analytics_configuration(params = {}) + # @param [Hash] params ({}) + def delete_bucket_analytics_configuration(params = {}, options = {}) + req = build_request(:delete_bucket_analytics_configuration, params) + req.send_request(options) + end + + # Deletes the `cors` configuration information set for the bucket. + # + # To use this operation, you must have permission to perform the + # `s3:PutBucketCORS` action. The bucket owner has this permission by + # default and can grant this permission to others. + # + # For information about `cors`, see [Enabling Cross-Origin Resource + # Sharing][1] in the *Amazon S3 User Guide*. + # + # **Related Resources:** + # + # * [PutBucketCors][2] + # + # * [RESTOPTIONSobject][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html + # + # @option params [required, String] :bucket + # Specifies the bucket whose `cors` configuration is being deleted. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete cors configuration on a bucket. + # + # # The following example deletes CORS configuration on a bucket. + # + # resp = client.delete_bucket_cors({ + # bucket: "examplebucket", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_cors({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors AWS API Documentation + # + # @overload delete_bucket_cors(params = {}) + # @param [Hash] params ({}) + def delete_bucket_cors(params = {}, options = {}) + req = build_request(:delete_bucket_cors, params) + req.send_request(options) + end + + # This implementation of the DELETE action removes default encryption + # from the bucket. For information about the Amazon S3 default + # encryption feature, see [Amazon S3 Default Bucket Encryption][1] in + # the *Amazon S3 User Guide*. + # + # To use this operation, you must have permissions to perform the + # `s3:PutEncryptionConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][2] and [Managing Access + # Permissions to your Amazon S3 Resources][3] in the *Amazon S3 User + # Guide*. + # + # **Related Resources** + # + # * [PutBucketEncryption][4] + # + # * [GetBucketEncryption][5] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the server-side encryption + # configuration to delete. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_encryption({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption AWS API Documentation + # + # @overload delete_bucket_encryption(params = {}) + # @param [Hash] params ({}) + def delete_bucket_encryption(params = {}, options = {}) + req = build_request(:delete_bucket_encryption, params) + req.send_request(options) + end + + # Deletes the S3 Intelligent-Tiering configuration from the specified + # bucket. + # + # The S3 Intelligent-Tiering storage class is designed to optimize + # storage costs by automatically moving data to the most cost-effective + # storage access tier, without performance impact or operational + # overhead. S3 Intelligent-Tiering delivers automatic cost savings in + # three low latency and high throughput access tiers. To get the lowest + # storage cost on data that can be accessed in minutes to hours, you can + # choose to activate additional archiving capabilities. + # + # The S3 Intelligent-Tiering storage class is the ideal storage class + # for data with unknown, changing, or unpredictable access patterns, + # independent of object size or retention period. If the size of an + # object is less than 128 KB, it is not monitored and not eligible for + # auto-tiering. Smaller objects can be stored, but they are always + # charged at the Frequent Access tier rates in the S3 + # Intelligent-Tiering storage class. + # + # For more information, see [Storage class for automatically optimizing + # frequently and infrequently accessed objects][1]. + # + # Operations related to `DeleteBucketIntelligentTieringConfiguration` + # include: + # + # * [GetBucketIntelligentTieringConfiguration][2] + # + # * [PutBucketIntelligentTieringConfiguration][3] + # + # * [ListBucketIntelligentTieringConfigurations][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # + # @option params [required, String] :id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_intelligent_tiering_configuration({ + # bucket: "BucketName", # required + # id: "IntelligentTieringId", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration AWS API Documentation + # + # @overload delete_bucket_intelligent_tiering_configuration(params = {}) + # @param [Hash] params ({}) + def delete_bucket_intelligent_tiering_configuration(params = {}, options = {}) + req = build_request(:delete_bucket_intelligent_tiering_configuration, params) + req.send_request(options) + end + + # Deletes an inventory configuration (identified by the inventory ID) + # from the bucket. + # + # To use this operation, you must have permissions to perform the + # `s3:PutInventoryConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about the Amazon S3 inventory feature, see [Amazon S3 + # Inventory][3]. + # + # Operations related to `DeleteBucketInventoryConfiguration` include: + # + # * [GetBucketInventoryConfiguration][4] + # + # * [PutBucketInventoryConfiguration][5] + # + # * [ListBucketInventoryConfigurations][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the inventory configuration to + # delete. + # + # @option params [required, String] :id + # The ID used to identify the inventory configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_inventory_configuration({ + # bucket: "BucketName", # required + # id: "InventoryId", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration AWS API Documentation + # + # @overload delete_bucket_inventory_configuration(params = {}) + # @param [Hash] params ({}) + def delete_bucket_inventory_configuration(params = {}, options = {}) + req = build_request(:delete_bucket_inventory_configuration, params) + req.send_request(options) + end + + # Deletes the lifecycle configuration from the specified bucket. Amazon + # S3 removes all the lifecycle configuration rules in the lifecycle + # subresource associated with the bucket. Your objects never expire, and + # Amazon S3 no longer automatically deletes any objects on the basis of + # rules contained in the deleted lifecycle configuration. + # + # To use this operation, you must have permission to perform the + # `s3:PutLifecycleConfiguration` action. By default, the bucket owner + # has this permission and the bucket owner can grant this permission to + # others. + # + # There is usually some time lag before lifecycle configuration deletion + # is fully propagated to all the Amazon S3 systems. + # + # For more information about the object expiration, see [Elements to + # Describe Lifecycle Actions][1]. + # + # Related actions include: + # + # * [PutBucketLifecycleConfiguration][2] + # + # * [GetBucketLifecycleConfiguration][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + # + # @option params [required, String] :bucket + # The bucket name of the lifecycle to delete. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete lifecycle configuration on a bucket. + # + # # The following example deletes lifecycle configuration on a bucket. + # + # resp = client.delete_bucket_lifecycle({ + # bucket: "examplebucket", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_lifecycle({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle AWS API Documentation + # + # @overload delete_bucket_lifecycle(params = {}) + # @param [Hash] params ({}) + def delete_bucket_lifecycle(params = {}, options = {}) + req = build_request(:delete_bucket_lifecycle, params) + req.send_request(options) + end + + # Deletes a metrics configuration for the Amazon CloudWatch request + # metrics (specified by the metrics configuration ID) from the bucket. + # Note that this doesn't include the daily storage metrics. + # + # To use this operation, you must have permissions to perform the + # `s3:PutMetricsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about CloudWatch request metrics for Amazon S3, see + # [Monitoring Metrics with Amazon CloudWatch][3]. + # + # The following operations are related to + # `DeleteBucketMetricsConfiguration`: + # + # * [GetBucketMetricsConfiguration][4] + # + # * [PutBucketMetricsConfiguration][5] + # + # * [ListBucketMetricsConfigurations][6] + # + # * [Monitoring Metrics with Amazon CloudWatch][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the metrics configuration to delete. + # + # @option params [required, String] :id + # The ID used to identify the metrics configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_metrics_configuration({ + # bucket: "BucketName", # required + # id: "MetricsId", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration AWS API Documentation + # + # @overload delete_bucket_metrics_configuration(params = {}) + # @param [Hash] params ({}) + def delete_bucket_metrics_configuration(params = {}, options = {}) + req = build_request(:delete_bucket_metrics_configuration, params) + req.send_request(options) + end + + # Removes `OwnershipControls` for an Amazon S3 bucket. To use this + # operation, you must have the `s3:PutBucketOwnershipControls` + # permission. For more information about Amazon S3 permissions, see + # [Specifying Permissions in a Policy][1]. + # + # For information about Amazon S3 Object Ownership, see [Using Object + # Ownership][2]. + # + # The following operations are related to + # `DeleteBucketOwnershipControls`: + # + # * GetBucketOwnershipControls + # + # * PutBucketOwnershipControls + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html + # + # @option params [required, String] :bucket + # The Amazon S3 bucket whose `OwnershipControls` you want to delete. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_ownership_controls({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls AWS API Documentation + # + # @overload delete_bucket_ownership_controls(params = {}) + # @param [Hash] params ({}) + def delete_bucket_ownership_controls(params = {}, options = {}) + req = build_request(:delete_bucket_ownership_controls, params) + req.send_request(options) + end + + # This implementation of the DELETE action uses the policy subresource + # to delete the policy of a specified bucket. If you are using an + # identity other than the root user of the Amazon Web Services account + # that owns the bucket, the calling identity must have the + # `DeleteBucketPolicy` permissions on the specified bucket and belong to + # the bucket owner's account to use this operation. + # + # If you don't have `DeleteBucketPolicy` permissions, Amazon S3 returns + # a `403 Access Denied` error. If you have the correct permissions, but + # you're not using an identity that belongs to the bucket owner's + # account, Amazon S3 returns a `405 Method Not Allowed` error. + # + # As a security precaution, the root user of the Amazon Web Services + # account that owns a bucket can always use this operation, even if the + # policy explicitly denies the root user the ability to perform this + # action. + # + # For more information about bucket policies, see [Using Bucket Policies + # and UserPolicies][1]. + # + # The following operations are related to `DeleteBucketPolicy` + # + # * [CreateBucket][2] + # + # * [DeleteObject][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete bucket policy + # + # # The following example deletes bucket policy on the specified bucket. + # + # resp = client.delete_bucket_policy({ + # bucket: "examplebucket", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_policy({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy AWS API Documentation + # + # @overload delete_bucket_policy(params = {}) + # @param [Hash] params ({}) + def delete_bucket_policy(params = {}, options = {}) + req = build_request(:delete_bucket_policy, params) + req.send_request(options) + end + + # Deletes the replication configuration from the bucket. + # + # To use this operation, you must have permissions to perform the + # `s3:PutReplicationConfiguration` action. The bucket owner has these + # permissions by default and can grant it to others. For more + # information about permissions, see [Permissions Related to Bucket + # Subresource Operations][1] and [Managing Access Permissions to Your + # Amazon S3 Resources][2]. + # + # It can take a while for the deletion of a replication configuration to + # fully propagate. + # + # + # + # For information about replication configuration, see [Replication][3] + # in the *Amazon S3 User Guide*. + # + # The following operations are related to `DeleteBucketReplication`: + # + # * [PutBucketReplication][4] + # + # * [GetBucketReplication][5] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete bucket replication configuration + # + # # The following example deletes replication configuration set on bucket. + # + # resp = client.delete_bucket_replication({ + # bucket: "example", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_replication({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication AWS API Documentation + # + # @overload delete_bucket_replication(params = {}) + # @param [Hash] params ({}) + def delete_bucket_replication(params = {}, options = {}) + req = build_request(:delete_bucket_replication, params) + req.send_request(options) + end + + # Deletes the tags from the bucket. + # + # To use this operation, you must have permission to perform the + # `s3:PutBucketTagging` action. By default, the bucket owner has this + # permission and can grant this permission to others. + # + # The following operations are related to `DeleteBucketTagging`: + # + # * [GetBucketTagging][1] + # + # * [PutBucketTagging][2] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html + # + # @option params [required, String] :bucket + # The bucket that has the tag set to be removed. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete bucket tags + # + # # The following example deletes bucket tags. + # + # resp = client.delete_bucket_tagging({ + # bucket: "examplebucket", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_tagging({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging AWS API Documentation + # + # @overload delete_bucket_tagging(params = {}) + # @param [Hash] params ({}) + def delete_bucket_tagging(params = {}, options = {}) + req = build_request(:delete_bucket_tagging, params) + req.send_request(options) + end + + # This action removes the website configuration for a bucket. Amazon S3 + # returns a `200 OK` response upon successfully deleting a website + # configuration on the specified bucket. You will get a `200 OK` + # response if the website configuration you are trying to delete does + # not exist on the bucket. Amazon S3 returns a `404` response if the + # bucket specified in the request does not exist. + # + # This DELETE action requires the `S3:DeleteBucketWebsite` permission. + # By default, only the bucket owner can delete the website configuration + # attached to a bucket. However, bucket owners can grant other users + # permission to delete the website configuration by writing a bucket + # policy granting them the `S3:DeleteBucketWebsite` permission. + # + # For more information about hosting websites, see [Hosting Websites on + # Amazon S3][1]. + # + # The following operations are related to `DeleteBucketWebsite`: + # + # * [GetBucketWebsite][2] + # + # * [PutBucketWebsite][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html + # + # @option params [required, String] :bucket + # The bucket name for which you want to remove the website + # configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To delete bucket website configuration + # + # # The following example deletes bucket website configuration. + # + # resp = client.delete_bucket_website({ + # bucket: "examplebucket", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_bucket_website({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite AWS API Documentation + # + # @overload delete_bucket_website(params = {}) + # @param [Hash] params ({}) + def delete_bucket_website(params = {}, options = {}) + req = build_request(:delete_bucket_website, params) + req.send_request(options) + end + + # Removes the null version (if there is one) of an object and inserts a + # delete marker, which becomes the latest version of the object. If + # there isn't a null version, Amazon S3 does not remove any objects but + # will still respond that the command was successful. + # + # To remove a specific version, you must be the bucket owner and you + # must use the version Id subresource. Using this subresource + # permanently deletes the version. If the object deleted is a delete + # marker, Amazon S3 sets the response header, `x-amz-delete-marker`, to + # true. + # + # If the object you want to delete is in a bucket where the bucket + # versioning configuration is MFA Delete enabled, you must include the + # `x-amz-mfa` request header in the DELETE `versionId` request. Requests + # that include `x-amz-mfa` must use HTTPS. + # + # For more information about MFA Delete, see [Using MFA Delete][1]. To + # see sample requests that use versioning, see [Sample Request][2]. + # + # You can delete objects by explicitly calling DELETE Object or + # configure its lifecycle ([PutBucketLifecycle][3]) to enable Amazon S3 + # to remove them for you. If you want to block users or accounts from + # removing or deleting objects from your bucket, you must deny them the + # `s3:DeleteObject`, `s3:DeleteObjectVersion`, and + # `s3:PutLifeCycleConfiguration` actions. + # + # The following action is related to `DeleteObject`: + # + # * [PutObject][4] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # + # @option params [required, String] :bucket + # The bucket name of the bucket containing the object. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Key name of the object to delete. + # + # @option params [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # + # @option params [String] :version_id + # VersionId used to reference a specific version of the object. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [Boolean] :bypass_governance_retention + # Indicates whether S3 Object Lock should bypass Governance-mode + # restrictions to process this operation. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::DeleteObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DeleteObjectOutput#delete_marker #delete_marker} => Boolean + # * {Types::DeleteObjectOutput#version_id #version_id} => String + # * {Types::DeleteObjectOutput#request_charged #request_charged} => String + # + # + # @example Example: To delete an object + # + # # The following example deletes an object from an S3 bucket. + # + # resp = client.delete_object({ + # bucket: "examplebucket", + # key: "objectkey.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Example: To delete an object (from a non-versioned bucket) + # + # # The following example deletes an object from a non-versioned bucket. + # + # resp = client.delete_object({ + # bucket: "ExampleBucket", + # key: "HappyFace.jpg", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.delete_object({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # mfa: "MFA", + # version_id: "ObjectVersionId", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.delete_marker #=> Boolean + # resp.version_id #=> String + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject AWS API Documentation + # + # @overload delete_object(params = {}) + # @param [Hash] params ({}) + def delete_object(params = {}, options = {}) + req = build_request(:delete_object, params) + req.send_request(options) + end + + # Removes the entire tag set from the specified object. For more + # information about managing object tags, see [ Object Tagging][1]. + # + # To use this operation, you must have permission to perform the + # `s3:DeleteObjectTagging` action. + # + # To delete tags of a specific object version, add the `versionId` query + # parameter in the request. You will need permission for the + # `s3:DeleteObjectVersionTagging` action. + # + # The following operations are related to + # `DeleteBucketMetricsConfiguration`: + # + # * [PutObjectTagging][2] + # + # * [GetObjectTagging][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + # + # @option params [required, String] :bucket + # The bucket name containing the objects from which to remove the tags. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # The key that identifies the object in the bucket from which to remove + # all tags. + # + # @option params [String] :version_id + # The versionId of the object that the tag-set will be removed from. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::DeleteObjectTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DeleteObjectTaggingOutput#version_id #version_id} => String + # + # + # @example Example: To remove tag set from an object + # + # # The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the + # # operation removes tag set from the latest object version. + # + # resp = client.delete_object_tagging({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # version_id: "null", + # } + # + # @example Example: To remove tag set from an object version + # + # # The following example removes tag set associated with the specified object version. The request specifies both the + # # object key and object version. + # + # resp = client.delete_object_tagging({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", + # }) + # + # resp.to_h outputs the following: + # { + # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.delete_object_tagging({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.version_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging AWS API Documentation + # + # @overload delete_object_tagging(params = {}) + # @param [Hash] params ({}) + def delete_object_tagging(params = {}, options = {}) + req = build_request(:delete_object_tagging, params) + req.send_request(options) + end + + # This action enables you to delete multiple objects from a bucket using + # a single HTTP request. If you know the object keys that you want to + # delete, then this action provides a suitable alternative to sending + # individual delete requests, reducing per-request overhead. + # + # The request contains a list of up to 1000 keys that you want to + # delete. In the XML, you provide the object key names, and optionally, + # version IDs if you want to delete a specific version of the object + # from a versioning-enabled bucket. For each key, Amazon S3 performs a + # delete action and returns the result of that delete, success, or + # failure, in the response. Note that if the object specified in the + # request is not found, Amazon S3 returns the result as deleted. + # + # The action supports two modes for the response: verbose and quiet. By + # default, the action uses verbose mode in which the response includes + # the result of deletion of each key in your request. In quiet mode the + # response includes only keys where the delete action encountered an + # error. For a successful deletion, the action does not return any + # information about the delete in the response body. + # + # When performing this action on an MFA Delete enabled bucket, that + # attempts to delete any versioned objects, you must include an MFA + # token. If you do not provide one, the entire request will fail, even + # if there are non-versioned objects you are trying to delete. If you + # provide an invalid token, whether there are versioned keys in the + # request or not, the entire Multi-Object Delete request will fail. For + # information about MFA Delete, see [ MFA Delete][1]. + # + # Finally, the Content-MD5 header is required for all Multi-Object + # Delete requests. Amazon S3 uses the header value to ensure that your + # request body has not been altered in transit. + # + # The following operations are related to `DeleteObjects`: + # + # * [CreateMultipartUpload][2] + # + # * [UploadPart][3] + # + # * [CompleteMultipartUpload][4] + # + # * [ListParts][5] + # + # * [AbortMultipartUpload][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # + # @option params [required, String] :bucket + # The bucket name containing the objects to delete. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, Types::Delete] :delete + # Container for the request. + # + # @option params [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [Boolean] :bypass_governance_retention + # Specifies whether you want to delete this object even if it has a + # Governance-type Object Lock in place. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @return [Types::DeleteObjectsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DeleteObjectsOutput#deleted #deleted} => Array<Types::DeletedObject> + # * {Types::DeleteObjectsOutput#request_charged #request_charged} => String + # * {Types::DeleteObjectsOutput#errors #errors} => Array<Types::Error> + # + # + # @example Example: To delete multiple objects from a versioned bucket + # + # # The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the + # # object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker. + # + # resp = client.delete_objects({ + # bucket: "examplebucket", + # delete: { + # objects: [ + # { + # key: "objectkey1", + # }, + # { + # key: "objectkey2", + # }, + # ], + # quiet: false, + # }, + # }) + # + # resp.to_h outputs the following: + # { + # deleted: [ + # { + # delete_marker: true, + # delete_marker_version_id: "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", + # key: "objectkey1", + # }, + # { + # delete_marker: true, + # delete_marker_version_id: "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", + # key: "objectkey2", + # }, + # ], + # } + # + # @example Example: To delete multiple object versions from a versioned bucket + # + # # The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object + # # versions and returns the key and versions of deleted objects in the response. + # + # resp = client.delete_objects({ + # bucket: "examplebucket", + # delete: { + # objects: [ + # { + # key: "HappyFace.jpg", + # version_id: "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b", + # }, + # { + # key: "HappyFace.jpg", + # version_id: "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd", + # }, + # ], + # quiet: false, + # }, + # }) + # + # resp.to_h outputs the following: + # { + # deleted: [ + # { + # key: "HappyFace.jpg", + # version_id: "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd", + # }, + # { + # key: "HappyFace.jpg", + # version_id: "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b", + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.delete_objects({ + # bucket: "BucketName", # required + # delete: { # required + # objects: [ # required + # { + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # }, + # ], + # quiet: false, + # }, + # mfa: "MFA", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # + # @example Response structure + # + # resp.deleted #=> Array + # resp.deleted[0].key #=> String + # resp.deleted[0].version_id #=> String + # resp.deleted[0].delete_marker #=> Boolean + # resp.deleted[0].delete_marker_version_id #=> String + # resp.request_charged #=> String, one of "requester" + # resp.errors #=> Array + # resp.errors[0].key #=> String + # resp.errors[0].version_id #=> String + # resp.errors[0].code #=> String + # resp.errors[0].message #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects AWS API Documentation + # + # @overload delete_objects(params = {}) + # @param [Hash] params ({}) + def delete_objects(params = {}, options = {}) + req = build_request(:delete_objects, params) + req.send_request(options) + end + + # Removes the `PublicAccessBlock` configuration for an Amazon S3 bucket. + # To use this operation, you must have the + # `s3:PutBucketPublicAccessBlock` permission. For more information about + # permissions, see [Permissions Related to Bucket Subresource + # Operations][1] and [Managing Access Permissions to Your Amazon S3 + # Resources][2]. + # + # The following operations are related to `DeletePublicAccessBlock`: + # + # * [Using Amazon S3 Block Public Access][3] + # + # * [GetPublicAccessBlock][4] + # + # * [PutPublicAccessBlock][5] + # + # * [GetBucketPolicyStatus][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html + # + # @option params [required, String] :bucket + # The Amazon S3 bucket whose `PublicAccessBlock` configuration you want + # to delete. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_public_access_block({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock AWS API Documentation + # + # @overload delete_public_access_block(params = {}) + # @param [Hash] params ({}) + def delete_public_access_block(params = {}, options = {}) + req = build_request(:delete_public_access_block, params) + req.send_request(options) + end + + # This implementation of the GET action uses the `accelerate` + # subresource to return the Transfer Acceleration state of a bucket, + # which is either `Enabled` or `Suspended`. Amazon S3 Transfer + # Acceleration is a bucket-level feature that enables you to perform + # faster data transfers to and from Amazon S3. + # + # To use this operation, you must have permission to perform the + # `s3:GetAccelerateConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to your Amazon S3 Resources][2] in the *Amazon S3 User + # Guide*. + # + # You set the Transfer Acceleration state of an existing bucket to + # `Enabled` or `Suspended` by using the + # [PutBucketAccelerateConfiguration][3] operation. + # + # A GET `accelerate` request does not return a state value for a bucket + # that has no transfer acceleration state. A bucket has no Transfer + # Acceleration state if a state has never been set on the bucket. + # + # For more information about transfer acceleration, see [Transfer + # Acceleration][4] in the Amazon S3 User Guide. + # + # **Related Resources** + # + # * [PutBucketAccelerateConfiguration][3] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + # + # @option params [required, String] :bucket + # The name of the bucket for which the accelerate configuration is + # retrieved. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketAccelerateConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketAccelerateConfigurationOutput#status #status} => String + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_accelerate_configuration({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.status #=> String, one of "Enabled", "Suspended" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration AWS API Documentation + # + # @overload get_bucket_accelerate_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_accelerate_configuration(params = {}, options = {}) + req = build_request(:get_bucket_accelerate_configuration, params) + req.send_request(options) + end + + # This implementation of the `GET` action uses the `acl` subresource to + # return the access control list (ACL) of a bucket. To use `GET` to + # return the ACL of the bucket, you must have `READ_ACP` access to the + # bucket. If `READ_ACP` permission is granted to the anonymous user, you + # can return the ACL of the bucket without using an authorization + # header. + # + # If your bucket uses the bucket owner enforced setting for S3 Object + # Ownership, requests to read ACLs are still supported and return the + # `bucket-owner-full-control` ACL with the owner being the account that + # created the bucket. For more information, see [ Controlling object + # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*. + # + # + # + # **Related Resources** + # + # * [ListObjects][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + # + # @option params [required, String] :bucket + # Specifies the S3 bucket whose ACL is being requested. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketAclOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketAclOutput#owner #owner} => Types::Owner + # * {Types::GetBucketAclOutput#grants #grants} => Array<Types::Grant> + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_acl({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.owner.display_name #=> String + # resp.owner.id #=> String + # resp.grants #=> Array + # resp.grants[0].grantee.display_name #=> String + # resp.grants[0].grantee.email_address #=> String + # resp.grants[0].grantee.id #=> String + # resp.grants[0].grantee.type #=> String, one of "CanonicalUser", "AmazonCustomerByEmail", "Group" + # resp.grants[0].grantee.uri #=> String + # resp.grants[0].permission #=> String, one of "FULL_CONTROL", "WRITE", "WRITE_ACP", "READ", "READ_ACP" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl AWS API Documentation + # + # @overload get_bucket_acl(params = {}) + # @param [Hash] params ({}) + def get_bucket_acl(params = {}, options = {}) + req = build_request(:get_bucket_acl, params) + req.send_request(options) + end + + # This implementation of the GET action returns an analytics + # configuration (identified by the analytics configuration ID) from the + # bucket. + # + # To use this operation, you must have permissions to perform the + # `s3:GetAnalyticsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [ Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2] in the *Amazon S3 User + # Guide*. + # + # For information about Amazon S3 analytics feature, see [Amazon S3 + # Analytics – Storage Class Analysis][3] in the *Amazon S3 User Guide*. + # + # **Related Resources** + # + # * [DeleteBucketAnalyticsConfiguration][4] + # + # * [ListBucketAnalyticsConfigurations][5] + # + # * [PutBucketAnalyticsConfiguration][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket from which an analytics configuration is + # retrieved. + # + # @option params [required, String] :id + # The ID that identifies the analytics configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketAnalyticsConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketAnalyticsConfigurationOutput#analytics_configuration #analytics_configuration} => Types::AnalyticsConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_analytics_configuration({ + # bucket: "BucketName", # required + # id: "AnalyticsId", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.analytics_configuration.id #=> String + # resp.analytics_configuration.filter.prefix #=> String + # resp.analytics_configuration.filter.tag.key #=> String + # resp.analytics_configuration.filter.tag.value #=> String + # resp.analytics_configuration.filter.and.prefix #=> String + # resp.analytics_configuration.filter.and.tags #=> Array + # resp.analytics_configuration.filter.and.tags[0].key #=> String + # resp.analytics_configuration.filter.and.tags[0].value #=> String + # resp.analytics_configuration.storage_class_analysis.data_export.output_schema_version #=> String, one of "V_1" + # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.format #=> String, one of "CSV" + # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.bucket_account_id #=> String + # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.bucket #=> String + # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.prefix #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration AWS API Documentation + # + # @overload get_bucket_analytics_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_analytics_configuration(params = {}, options = {}) + req = build_request(:get_bucket_analytics_configuration, params) + req.send_request(options) + end + + # Returns the Cross-Origin Resource Sharing (CORS) configuration + # information set for the bucket. + # + # To use this operation, you must have permission to perform the + # `s3:GetBucketCORS` action. By default, the bucket owner has this + # permission and can grant it to others. + # + # For more information about CORS, see [ Enabling Cross-Origin Resource + # Sharing][1]. + # + # The following operations are related to `GetBucketCors`: + # + # * [PutBucketCors][2] + # + # * [DeleteBucketCors][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html + # + # @option params [required, String] :bucket + # The bucket name for which to get the cors configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketCorsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketCorsOutput#cors_rules #cors_rules} => Array<Types::CORSRule> + # + # + # @example Example: To get cors configuration set on a bucket + # + # # The following example returns cross-origin resource sharing (CORS) configuration set on a bucket. + # + # resp = client.get_bucket_cors({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # cors_rules: [ + # { + # allowed_headers: [ + # "Authorization", + # ], + # allowed_methods: [ + # "GET", + # ], + # allowed_origins: [ + # "*", + # ], + # max_age_seconds: 3000, + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_cors({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.cors_rules #=> Array + # resp.cors_rules[0].id #=> String + # resp.cors_rules[0].allowed_headers #=> Array + # resp.cors_rules[0].allowed_headers[0] #=> String + # resp.cors_rules[0].allowed_methods #=> Array + # resp.cors_rules[0].allowed_methods[0] #=> String + # resp.cors_rules[0].allowed_origins #=> Array + # resp.cors_rules[0].allowed_origins[0] #=> String + # resp.cors_rules[0].expose_headers #=> Array + # resp.cors_rules[0].expose_headers[0] #=> String + # resp.cors_rules[0].max_age_seconds #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors AWS API Documentation + # + # @overload get_bucket_cors(params = {}) + # @param [Hash] params ({}) + def get_bucket_cors(params = {}, options = {}) + req = build_request(:get_bucket_cors, params) + req.send_request(options) + end + + # Returns the default encryption configuration for an Amazon S3 bucket. + # If the bucket does not have a default encryption configuration, + # GetBucketEncryption returns + # `ServerSideEncryptionConfigurationNotFoundError`. + # + # For information about the Amazon S3 default encryption feature, see + # [Amazon S3 Default Bucket Encryption][1]. + # + # To use this operation, you must have permission to perform the + # `s3:GetEncryptionConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][2] and [Managing Access + # Permissions to Your Amazon S3 Resources][3]. + # + # The following operations are related to `GetBucketEncryption`: + # + # * [PutBucketEncryption][4] + # + # * [DeleteBucketEncryption][5] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html + # + # @option params [required, String] :bucket + # The name of the bucket from which the server-side encryption + # configuration is retrieved. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketEncryptionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketEncryptionOutput#server_side_encryption_configuration #server_side_encryption_configuration} => Types::ServerSideEncryptionConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_encryption({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.server_side_encryption_configuration.rules #=> Array + # resp.server_side_encryption_configuration.rules[0].apply_server_side_encryption_by_default.sse_algorithm #=> String, one of "AES256", "aws:kms" + # resp.server_side_encryption_configuration.rules[0].apply_server_side_encryption_by_default.kms_master_key_id #=> String + # resp.server_side_encryption_configuration.rules[0].bucket_key_enabled #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption AWS API Documentation + # + # @overload get_bucket_encryption(params = {}) + # @param [Hash] params ({}) + def get_bucket_encryption(params = {}, options = {}) + req = build_request(:get_bucket_encryption, params) + req.send_request(options) + end + + # Gets the S3 Intelligent-Tiering configuration from the specified + # bucket. + # + # The S3 Intelligent-Tiering storage class is designed to optimize + # storage costs by automatically moving data to the most cost-effective + # storage access tier, without performance impact or operational + # overhead. S3 Intelligent-Tiering delivers automatic cost savings in + # three low latency and high throughput access tiers. To get the lowest + # storage cost on data that can be accessed in minutes to hours, you can + # choose to activate additional archiving capabilities. + # + # The S3 Intelligent-Tiering storage class is the ideal storage class + # for data with unknown, changing, or unpredictable access patterns, + # independent of object size or retention period. If the size of an + # object is less than 128 KB, it is not monitored and not eligible for + # auto-tiering. Smaller objects can be stored, but they are always + # charged at the Frequent Access tier rates in the S3 + # Intelligent-Tiering storage class. + # + # For more information, see [Storage class for automatically optimizing + # frequently and infrequently accessed objects][1]. + # + # Operations related to `GetBucketIntelligentTieringConfiguration` + # include: + # + # * [DeleteBucketIntelligentTieringConfiguration][2] + # + # * [PutBucketIntelligentTieringConfiguration][3] + # + # * [ListBucketIntelligentTieringConfigurations][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # + # @option params [required, String] :id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # + # @return [Types::GetBucketIntelligentTieringConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketIntelligentTieringConfigurationOutput#intelligent_tiering_configuration #intelligent_tiering_configuration} => Types::IntelligentTieringConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_intelligent_tiering_configuration({ + # bucket: "BucketName", # required + # id: "IntelligentTieringId", # required + # }) + # + # @example Response structure + # + # resp.intelligent_tiering_configuration.id #=> String + # resp.intelligent_tiering_configuration.filter.prefix #=> String + # resp.intelligent_tiering_configuration.filter.tag.key #=> String + # resp.intelligent_tiering_configuration.filter.tag.value #=> String + # resp.intelligent_tiering_configuration.filter.and.prefix #=> String + # resp.intelligent_tiering_configuration.filter.and.tags #=> Array + # resp.intelligent_tiering_configuration.filter.and.tags[0].key #=> String + # resp.intelligent_tiering_configuration.filter.and.tags[0].value #=> String + # resp.intelligent_tiering_configuration.status #=> String, one of "Enabled", "Disabled" + # resp.intelligent_tiering_configuration.tierings #=> Array + # resp.intelligent_tiering_configuration.tierings[0].days #=> Integer + # resp.intelligent_tiering_configuration.tierings[0].access_tier #=> String, one of "ARCHIVE_ACCESS", "DEEP_ARCHIVE_ACCESS" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration AWS API Documentation + # + # @overload get_bucket_intelligent_tiering_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_intelligent_tiering_configuration(params = {}, options = {}) + req = build_request(:get_bucket_intelligent_tiering_configuration, params) + req.send_request(options) + end + + # Returns an inventory configuration (identified by the inventory + # configuration ID) from the bucket. + # + # To use this operation, you must have permissions to perform the + # `s3:GetInventoryConfiguration` action. The bucket owner has this + # permission by default and can grant this permission to others. For + # more information about permissions, see [Permissions Related to Bucket + # Subresource Operations][1] and [Managing Access Permissions to Your + # Amazon S3 Resources][2]. + # + # For information about the Amazon S3 inventory feature, see [Amazon S3 + # Inventory][3]. + # + # The following operations are related to + # `GetBucketInventoryConfiguration`: + # + # * [DeleteBucketInventoryConfiguration][4] + # + # * [ListBucketInventoryConfigurations][5] + # + # * [PutBucketInventoryConfiguration][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the inventory configuration to + # retrieve. + # + # @option params [required, String] :id + # The ID used to identify the inventory configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketInventoryConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketInventoryConfigurationOutput#inventory_configuration #inventory_configuration} => Types::InventoryConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_inventory_configuration({ + # bucket: "BucketName", # required + # id: "InventoryId", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.inventory_configuration.destination.s3_bucket_destination.account_id #=> String + # resp.inventory_configuration.destination.s3_bucket_destination.bucket #=> String + # resp.inventory_configuration.destination.s3_bucket_destination.format #=> String, one of "CSV", "ORC", "Parquet" + # resp.inventory_configuration.destination.s3_bucket_destination.prefix #=> String + # resp.inventory_configuration.destination.s3_bucket_destination.encryption.ssekms.key_id #=> String + # resp.inventory_configuration.is_enabled #=> Boolean + # resp.inventory_configuration.filter.prefix #=> String + # resp.inventory_configuration.id #=> String + # resp.inventory_configuration.included_object_versions #=> String, one of "All", "Current" + # resp.inventory_configuration.optional_fields #=> Array + # resp.inventory_configuration.optional_fields[0] #=> String, one of "Size", "LastModifiedDate", "StorageClass", "ETag", "IsMultipartUploaded", "ReplicationStatus", "EncryptionStatus", "ObjectLockRetainUntilDate", "ObjectLockMode", "ObjectLockLegalHoldStatus", "IntelligentTieringAccessTier", "BucketKeyStatus", "ChecksumAlgorithm" + # resp.inventory_configuration.schedule.frequency #=> String, one of "Daily", "Weekly" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration AWS API Documentation + # + # @overload get_bucket_inventory_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_inventory_configuration(params = {}, options = {}) + req = build_request(:get_bucket_inventory_configuration, params) + req.send_request(options) + end + + # For an updated version of this API, see + # [GetBucketLifecycleConfiguration][1]. If you configured a bucket + # lifecycle using the `filter` element, you should see the updated + # version of this topic. This topic is provided for backward + # compatibility. + # + # Returns the lifecycle configuration information set on the bucket. For + # information about lifecycle configuration, see [Object Lifecycle + # Management][2]. + # + # To use this operation, you must have permission to perform the + # `s3:GetLifecycleConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][3] and [Managing Access + # Permissions to Your Amazon S3 Resources][4]. + # + # `GetBucketLifecycle` has the following special error: + # + # * Error code: `NoSuchLifecycleConfiguration` + # + # * Description: The lifecycle configuration does not exist. + # + # * HTTP Status Code: 404 Not Found + # + # * SOAP Fault Code Prefix: Client + # + # The following operations are related to `GetBucketLifecycle`: + # + # * [GetBucketLifecycleConfiguration][1] + # + # * [PutBucketLifecycle][5] + # + # * [DeleteBucketLifecycle][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the lifecycle information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketLifecycleOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketLifecycleOutput#rules #rules} => Array<Types::Rule> + # + # + # @example Example: To get a bucket acl + # + # # The following example gets ACL on the specified bucket. + # + # resp = client.get_bucket_lifecycle({ + # bucket: "acl1", + # }) + # + # resp.to_h outputs the following: + # { + # rules: [ + # { + # expiration: { + # days: 1, + # }, + # id: "delete logs", + # prefix: "123/", + # status: "Enabled", + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_lifecycle({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.rules #=> Array + # resp.rules[0].expiration.date #=> Time + # resp.rules[0].expiration.days #=> Integer + # resp.rules[0].expiration.expired_object_delete_marker #=> Boolean + # resp.rules[0].id #=> String + # resp.rules[0].prefix #=> String + # resp.rules[0].status #=> String, one of "Enabled", "Disabled" + # resp.rules[0].transition.date #=> Time + # resp.rules[0].transition.days #=> Integer + # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" + # resp.rules[0].noncurrent_version_transition.noncurrent_days #=> Integer + # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" + # resp.rules[0].noncurrent_version_transition.newer_noncurrent_versions #=> Integer + # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer + # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer + # resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle AWS API Documentation + # + # @overload get_bucket_lifecycle(params = {}) + # @param [Hash] params ({}) + def get_bucket_lifecycle(params = {}, options = {}) + req = build_request(:get_bucket_lifecycle, params) + req.send_request(options) + end + + # Bucket lifecycle configuration now supports specifying a lifecycle + # rule using an object key name prefix, one or more object tags, or a + # combination of both. Accordingly, this section describes the latest + # API. The response describes the new filter element that you can use to + # specify a filter to select a subset of objects to which the rule + # applies. If you are using a previous version of the lifecycle + # configuration, it still works. For the earlier action, see + # [GetBucketLifecycle][1]. + # + # + # + # Returns the lifecycle configuration information set on the bucket. For + # information about lifecycle configuration, see [Object Lifecycle + # Management][2]. + # + # To use this operation, you must have permission to perform the + # `s3:GetLifecycleConfiguration` action. The bucket owner has this + # permission, by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][3] and [Managing Access + # Permissions to Your Amazon S3 Resources][4]. + # + # `GetBucketLifecycleConfiguration` has the following special error: + # + # * Error code: `NoSuchLifecycleConfiguration` + # + # * Description: The lifecycle configuration does not exist. + # + # * HTTP Status Code: 404 Not Found + # + # * SOAP Fault Code Prefix: Client + # + # The following operations are related to + # `GetBucketLifecycleConfiguration`: + # + # * [GetBucketLifecycle][1] + # + # * [PutBucketLifecycle][5] + # + # * [DeleteBucketLifecycle][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the lifecycle information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketLifecycleConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketLifecycleConfigurationOutput#rules #rules} => Array<Types::LifecycleRule> + # + # + # @example Example: To get lifecycle configuration on a bucket + # + # # The following example retrieves lifecycle configuration on set on a bucket. + # + # resp = client.get_bucket_lifecycle_configuration({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # rules: [ + # { + # id: "Rule for TaxDocs/", + # prefix: "TaxDocs", + # status: "Enabled", + # transitions: [ + # { + # days: 365, + # storage_class: "STANDARD_IA", + # }, + # ], + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_lifecycle_configuration({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.rules #=> Array + # resp.rules[0].expiration.date #=> Time + # resp.rules[0].expiration.days #=> Integer + # resp.rules[0].expiration.expired_object_delete_marker #=> Boolean + # resp.rules[0].id #=> String + # resp.rules[0].prefix #=> String + # resp.rules[0].filter.prefix #=> String + # resp.rules[0].filter.tag.key #=> String + # resp.rules[0].filter.tag.value #=> String + # resp.rules[0].filter.object_size_greater_than #=> Integer + # resp.rules[0].filter.object_size_less_than #=> Integer + # resp.rules[0].filter.and.prefix #=> String + # resp.rules[0].filter.and.tags #=> Array + # resp.rules[0].filter.and.tags[0].key #=> String + # resp.rules[0].filter.and.tags[0].value #=> String + # resp.rules[0].filter.and.object_size_greater_than #=> Integer + # resp.rules[0].filter.and.object_size_less_than #=> Integer + # resp.rules[0].status #=> String, one of "Enabled", "Disabled" + # resp.rules[0].transitions #=> Array + # resp.rules[0].transitions[0].date #=> Time + # resp.rules[0].transitions[0].days #=> Integer + # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" + # resp.rules[0].noncurrent_version_transitions #=> Array + # resp.rules[0].noncurrent_version_transitions[0].noncurrent_days #=> Integer + # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" + # resp.rules[0].noncurrent_version_transitions[0].newer_noncurrent_versions #=> Integer + # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer + # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer + # resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration AWS API Documentation + # + # @overload get_bucket_lifecycle_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_lifecycle_configuration(params = {}, options = {}) + req = build_request(:get_bucket_lifecycle_configuration, params) + req.send_request(options) + end + + # Returns the Region the bucket resides in. You set the bucket's Region + # using the `LocationConstraint` request parameter in a `CreateBucket` + # request. For more information, see [CreateBucket][1]. + # + # To use this implementation of the operation, you must be the bucket + # owner. + # + # To use this API against an access point, provide the alias of the + # access point in place of the bucket name. + # + # The following operations are related to `GetBucketLocation`: + # + # * [GetObject][2] + # + # * [CreateBucket][1] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the location. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketLocationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketLocationOutput#location_constraint #location_constraint} => String + # + # + # @example Example: To get bucket location + # + # # The following example returns bucket location. + # + # resp = client.get_bucket_location({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # location_constraint: "us-west-2", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_location({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.location_constraint #=> String, one of "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ca-central-1", "cn-north-1", "cn-northwest-1", "EU", "eu-central-1", "eu-north-1", "eu-south-1", "eu-west-1", "eu-west-2", "eu-west-3", "me-south-1", "sa-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation AWS API Documentation + # + # @overload get_bucket_location(params = {}) + # @param [Hash] params ({}) + def get_bucket_location(params = {}, options = {}) + req = build_request(:get_bucket_location, params) + req.send_request(options) + end + + # Returns the logging status of a bucket and the permissions users have + # to view and modify that status. To use GET, you must be the bucket + # owner. + # + # The following operations are related to `GetBucketLogging`: + # + # * [CreateBucket][1] + # + # * [PutBucketLogging][2] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html + # + # @option params [required, String] :bucket + # The bucket name for which to get the logging information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketLoggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketLoggingOutput#logging_enabled #logging_enabled} => Types::LoggingEnabled + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_logging({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.logging_enabled.target_bucket #=> String + # resp.logging_enabled.target_grants #=> Array + # resp.logging_enabled.target_grants[0].grantee.display_name #=> String + # resp.logging_enabled.target_grants[0].grantee.email_address #=> String + # resp.logging_enabled.target_grants[0].grantee.id #=> String + # resp.logging_enabled.target_grants[0].grantee.type #=> String, one of "CanonicalUser", "AmazonCustomerByEmail", "Group" + # resp.logging_enabled.target_grants[0].grantee.uri #=> String + # resp.logging_enabled.target_grants[0].permission #=> String, one of "FULL_CONTROL", "READ", "WRITE" + # resp.logging_enabled.target_prefix #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging AWS API Documentation + # + # @overload get_bucket_logging(params = {}) + # @param [Hash] params ({}) + def get_bucket_logging(params = {}, options = {}) + req = build_request(:get_bucket_logging, params) + req.send_request(options) + end + + # Gets a metrics configuration (specified by the metrics configuration + # ID) from the bucket. Note that this doesn't include the daily storage + # metrics. + # + # To use this operation, you must have permissions to perform the + # `s3:GetMetricsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about CloudWatch request metrics for Amazon S3, see + # [Monitoring Metrics with Amazon CloudWatch][3]. + # + # The following operations are related to + # `GetBucketMetricsConfiguration`: + # + # * [PutBucketMetricsConfiguration][4] + # + # * [DeleteBucketMetricsConfiguration][5] + # + # * [ListBucketMetricsConfigurations][6] + # + # * [Monitoring Metrics with Amazon CloudWatch][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the metrics configuration to + # retrieve. + # + # @option params [required, String] :id + # The ID used to identify the metrics configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketMetricsConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketMetricsConfigurationOutput#metrics_configuration #metrics_configuration} => Types::MetricsConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_metrics_configuration({ + # bucket: "BucketName", # required + # id: "MetricsId", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.metrics_configuration.id #=> String + # resp.metrics_configuration.filter.prefix #=> String + # resp.metrics_configuration.filter.tag.key #=> String + # resp.metrics_configuration.filter.tag.value #=> String + # resp.metrics_configuration.filter.access_point_arn #=> String + # resp.metrics_configuration.filter.and.prefix #=> String + # resp.metrics_configuration.filter.and.tags #=> Array + # resp.metrics_configuration.filter.and.tags[0].key #=> String + # resp.metrics_configuration.filter.and.tags[0].value #=> String + # resp.metrics_configuration.filter.and.access_point_arn #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration AWS API Documentation + # + # @overload get_bucket_metrics_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_metrics_configuration(params = {}, options = {}) + req = build_request(:get_bucket_metrics_configuration, params) + req.send_request(options) + end + + # No longer used, see [GetBucketNotificationConfiguration][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the notification + # configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::NotificationConfigurationDeprecated] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::NotificationConfigurationDeprecated#topic_configuration #topic_configuration} => Types::TopicConfigurationDeprecated + # * {Types::NotificationConfigurationDeprecated#queue_configuration #queue_configuration} => Types::QueueConfigurationDeprecated + # * {Types::NotificationConfigurationDeprecated#cloud_function_configuration #cloud_function_configuration} => Types::CloudFunctionConfiguration + # + # + # @example Example: To get notification configuration set on a bucket + # + # # The following example returns notification configuration set on a bucket. + # + # resp = client.get_bucket_notification({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # queue_configuration: { + # event: "s3:ObjectCreated:Put", + # events: [ + # "s3:ObjectCreated:Put", + # ], + # id: "MDQ2OGQ4NDEtOTBmNi00YTM4LTk0NzYtZDIwN2I3NWQ1NjIx", + # queue: "arn:aws:sqs:us-east-1:acct-id:S3ObjectCreatedEventQueue", + # }, + # topic_configuration: { + # event: "s3:ObjectCreated:Copy", + # events: [ + # "s3:ObjectCreated:Copy", + # ], + # id: "YTVkMWEzZGUtNTY1NS00ZmE2LWJjYjktMmRlY2QwODFkNTJi", + # topic: "arn:aws:sns:us-east-1:acct-id:S3ObjectCreatedEventTopic", + # }, + # } + # + # @example Example: To get notification configuration set on a bucket + # + # # The following example returns notification configuration set on a bucket. + # + # resp = client.get_bucket_notification({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # queue_configuration: { + # event: "s3:ObjectCreated:Put", + # events: [ + # "s3:ObjectCreated:Put", + # ], + # id: "MDQ2OGQ4NDEtOTBmNi00YTM4LTk0NzYtZDIwN2I3NWQ1NjIx", + # queue: "arn:aws:sqs:us-east-1:acct-id:S3ObjectCreatedEventQueue", + # }, + # topic_configuration: { + # event: "s3:ObjectCreated:Copy", + # events: [ + # "s3:ObjectCreated:Copy", + # ], + # id: "YTVkMWEzZGUtNTY1NS00ZmE2LWJjYjktMmRlY2QwODFkNTJi", + # topic: "arn:aws:sns:us-east-1:acct-id:S3ObjectCreatedEventTopic", + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_notification({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.topic_configuration.id #=> String + # resp.topic_configuration.events #=> Array + # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.topic_configuration.topic #=> String + # resp.queue_configuration.id #=> String + # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.queue_configuration.events #=> Array + # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.queue_configuration.queue #=> String + # resp.cloud_function_configuration.id #=> String + # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.cloud_function_configuration.events #=> Array + # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.cloud_function_configuration.cloud_function #=> String + # resp.cloud_function_configuration.invocation_role #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification AWS API Documentation + # + # @overload get_bucket_notification(params = {}) + # @param [Hash] params ({}) + def get_bucket_notification(params = {}, options = {}) + req = build_request(:get_bucket_notification, params) + req.send_request(options) + end + + # Returns the notification configuration of a bucket. + # + # If notifications are not enabled on the bucket, the action returns an + # empty `NotificationConfiguration` element. + # + # By default, you must be the bucket owner to read the notification + # configuration of a bucket. However, the bucket owner can use a bucket + # policy to grant permission to other users to read this configuration + # with the `s3:GetBucketNotification` permission. + # + # For more information about setting and reading the notification + # configuration on a bucket, see [Setting Up Notification of Bucket + # Events][1]. For more information about bucket policies, see [Using + # Bucket Policies][2]. + # + # The following action is related to `GetBucketNotification`: + # + # * [PutBucketNotification][3] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the notification + # configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::NotificationConfiguration] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::NotificationConfiguration#topic_configurations #topic_configurations} => Array<Types::TopicConfiguration> + # * {Types::NotificationConfiguration#queue_configurations #queue_configurations} => Array<Types::QueueConfiguration> + # * {Types::NotificationConfiguration#lambda_function_configurations #lambda_function_configurations} => Array<Types::LambdaFunctionConfiguration> + # * {Types::NotificationConfiguration#event_bridge_configuration #event_bridge_configuration} => Types::EventBridgeConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_notification_configuration({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.topic_configurations #=> Array + # resp.topic_configurations[0].id #=> String + # resp.topic_configurations[0].topic_arn #=> String + # resp.topic_configurations[0].events #=> Array + # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.topic_configurations[0].filter.key.filter_rules #=> Array + # resp.topic_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix" + # resp.topic_configurations[0].filter.key.filter_rules[0].value #=> String + # resp.queue_configurations #=> Array + # resp.queue_configurations[0].id #=> String + # resp.queue_configurations[0].queue_arn #=> String + # resp.queue_configurations[0].events #=> Array + # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.queue_configurations[0].filter.key.filter_rules #=> Array + # resp.queue_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix" + # resp.queue_configurations[0].filter.key.filter_rules[0].value #=> String + # resp.lambda_function_configurations #=> Array + # resp.lambda_function_configurations[0].id #=> String + # resp.lambda_function_configurations[0].lambda_function_arn #=> String + # resp.lambda_function_configurations[0].events #=> Array + # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" + # resp.lambda_function_configurations[0].filter.key.filter_rules #=> Array + # resp.lambda_function_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix" + # resp.lambda_function_configurations[0].filter.key.filter_rules[0].value #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration AWS API Documentation + # + # @overload get_bucket_notification_configuration(params = {}) + # @param [Hash] params ({}) + def get_bucket_notification_configuration(params = {}, options = {}) + req = build_request(:get_bucket_notification_configuration, params) + req.send_request(options) + end + + # Retrieves `OwnershipControls` for an Amazon S3 bucket. To use this + # operation, you must have the `s3:GetBucketOwnershipControls` + # permission. For more information about Amazon S3 permissions, see + # [Specifying permissions in a policy][1]. + # + # For information about Amazon S3 Object Ownership, see [Using Object + # Ownership][2]. + # + # The following operations are related to `GetBucketOwnershipControls`: + # + # * PutBucketOwnershipControls + # + # * DeleteBucketOwnershipControls + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose `OwnershipControls` you want to + # retrieve. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketOwnershipControlsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketOwnershipControlsOutput#ownership_controls #ownership_controls} => Types::OwnershipControls + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_ownership_controls({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.ownership_controls.rules #=> Array + # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter", "BucketOwnerEnforced" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls AWS API Documentation + # + # @overload get_bucket_ownership_controls(params = {}) + # @param [Hash] params ({}) + def get_bucket_ownership_controls(params = {}, options = {}) + req = build_request(:get_bucket_ownership_controls, params) + req.send_request(options) + end + + # Returns the policy of a specified bucket. If you are using an identity + # other than the root user of the Amazon Web Services account that owns + # the bucket, the calling identity must have the `GetBucketPolicy` + # permissions on the specified bucket and belong to the bucket owner's + # account in order to use this operation. + # + # If you don't have `GetBucketPolicy` permissions, Amazon S3 returns a + # `403 Access Denied` error. If you have the correct permissions, but + # you're not using an identity that belongs to the bucket owner's + # account, Amazon S3 returns a `405 Method Not Allowed` error. + # + # As a security precaution, the root user of the Amazon Web Services + # account that owns a bucket can always use this operation, even if the + # policy explicitly denies the root user the ability to perform this + # action. + # + # For more information about bucket policies, see [Using Bucket Policies + # and User Policies][1]. + # + # The following action is related to `GetBucketPolicy`: + # + # * [GetObject][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # + # @option params [required, String] :bucket + # The bucket name for which to get the bucket policy. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketPolicyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketPolicyOutput#policy #policy} => IO + # + # + # @example Example: To get bucket policy + # + # # The following example returns bucket policy associated with a bucket. + # + # resp = client.get_bucket_policy({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # policy: "{\"Version\":\"2008-10-17\",\"Id\":\"LogPolicy\",\"Statement\":[{\"Sid\":\"Enables the log delivery group to publish logs to your bucket \",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"111122223333\"},\"Action\":[\"s3:GetBucketAcl\",\"s3:GetObjectAcl\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::policytest1/*\",\"arn:aws:s3:::policytest1\"]}]}", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_policy({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.policy #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy AWS API Documentation + # + # @overload get_bucket_policy(params = {}) + # @param [Hash] params ({}) + def get_bucket_policy(params = {}, options = {}, &block) + req = build_request(:get_bucket_policy, params) + req.send_request(options, &block) + end + + # Retrieves the policy status for an Amazon S3 bucket, indicating + # whether the bucket is public. In order to use this operation, you must + # have the `s3:GetBucketPolicyStatus` permission. For more information + # about Amazon S3 permissions, see [Specifying Permissions in a + # Policy][1]. + # + # For more information about when Amazon S3 considers a bucket public, + # see [The Meaning of "Public"][2]. + # + # The following operations are related to `GetBucketPolicyStatus`: + # + # * [Using Amazon S3 Block Public Access][3] + # + # * [GetPublicAccessBlock][4] + # + # * [PutPublicAccessBlock][5] + # + # * [DeletePublicAccessBlock][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose policy status you want to + # retrieve. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketPolicyStatusOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketPolicyStatusOutput#policy_status #policy_status} => Types::PolicyStatus + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_policy_status({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.policy_status.is_public #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus AWS API Documentation + # + # @overload get_bucket_policy_status(params = {}) + # @param [Hash] params ({}) + def get_bucket_policy_status(params = {}, options = {}) + req = build_request(:get_bucket_policy_status, params) + req.send_request(options) + end + + # Returns the replication configuration of a bucket. + # + # It can take a while to propagate the put or delete a replication + # configuration to all Amazon S3 systems. Therefore, a get request soon + # after put or delete can return a wrong result. + # + # + # + # For information about replication configuration, see [Replication][1] + # in the *Amazon S3 User Guide*. + # + # This action requires permissions for the + # `s3:GetReplicationConfiguration` action. For more information about + # permissions, see [Using Bucket Policies and User Policies][2]. + # + # If you include the `Filter` element in a replication configuration, + # you must also include the `DeleteMarkerReplication` and `Priority` + # elements. The response also returns those elements. + # + # For information about `GetBucketReplication` errors, see [List of + # replication-related error codes][3] + # + # The following operations are related to `GetBucketReplication`: + # + # * [PutBucketReplication][4] + # + # * [DeleteBucketReplication][5] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html + # + # @option params [required, String] :bucket + # The bucket name for which to get the replication information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketReplicationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketReplicationOutput#replication_configuration #replication_configuration} => Types::ReplicationConfiguration + # + # + # @example Example: To get replication configuration set on a bucket + # + # # The following example returns replication configuration set on a bucket. + # + # resp = client.get_bucket_replication({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # replication_configuration: { + # role: "arn:aws:iam::acct-id:role/example-role", + # rules: [ + # { + # destination: { + # bucket: "arn:aws:s3:::destination-bucket", + # }, + # id: "MWIwNTkwZmItMTE3MS00ZTc3LWJkZDEtNzRmODQwYzc1OTQy", + # prefix: "Tax", + # status: "Enabled", + # }, + # ], + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_replication({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.replication_configuration.role #=> String + # resp.replication_configuration.rules #=> Array + # resp.replication_configuration.rules[0].id #=> String + # resp.replication_configuration.rules[0].priority #=> Integer + # resp.replication_configuration.rules[0].prefix #=> String + # resp.replication_configuration.rules[0].filter.prefix #=> String + # resp.replication_configuration.rules[0].filter.tag.key #=> String + # resp.replication_configuration.rules[0].filter.tag.value #=> String + # resp.replication_configuration.rules[0].filter.and.prefix #=> String + # resp.replication_configuration.rules[0].filter.and.tags #=> Array + # resp.replication_configuration.rules[0].filter.and.tags[0].key #=> String + # resp.replication_configuration.rules[0].filter.and.tags[0].value #=> String + # resp.replication_configuration.rules[0].status #=> String, one of "Enabled", "Disabled" + # resp.replication_configuration.rules[0].source_selection_criteria.sse_kms_encrypted_objects.status #=> String, one of "Enabled", "Disabled" + # resp.replication_configuration.rules[0].source_selection_criteria.replica_modifications.status #=> String, one of "Enabled", "Disabled" + # resp.replication_configuration.rules[0].existing_object_replication.status #=> String, one of "Enabled", "Disabled" + # resp.replication_configuration.rules[0].destination.bucket #=> String + # resp.replication_configuration.rules[0].destination.account #=> String + # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.replication_configuration.rules[0].destination.access_control_translation.owner #=> String, one of "Destination" + # resp.replication_configuration.rules[0].destination.encryption_configuration.replica_kms_key_id #=> String + # resp.replication_configuration.rules[0].destination.replication_time.status #=> String, one of "Enabled", "Disabled" + # resp.replication_configuration.rules[0].destination.replication_time.time.minutes #=> Integer + # resp.replication_configuration.rules[0].destination.metrics.status #=> String, one of "Enabled", "Disabled" + # resp.replication_configuration.rules[0].destination.metrics.event_threshold.minutes #=> Integer + # resp.replication_configuration.rules[0].delete_marker_replication.status #=> String, one of "Enabled", "Disabled" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication AWS API Documentation + # + # @overload get_bucket_replication(params = {}) + # @param [Hash] params ({}) + def get_bucket_replication(params = {}, options = {}) + req = build_request(:get_bucket_replication, params) + req.send_request(options) + end + + # Returns the request payment configuration of a bucket. To use this + # version of the operation, you must be the bucket owner. For more + # information, see [Requester Pays Buckets][1]. + # + # The following operations are related to `GetBucketRequestPayment`: + # + # * [ListObjects][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the payment request + # configuration + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketRequestPaymentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketRequestPaymentOutput#payer #payer} => String + # + # + # @example Example: To get bucket versioning configuration + # + # # The following example retrieves bucket versioning configuration. + # + # resp = client.get_bucket_request_payment({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # payer: "BucketOwner", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_request_payment({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.payer #=> String, one of "Requester", "BucketOwner" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment AWS API Documentation + # + # @overload get_bucket_request_payment(params = {}) + # @param [Hash] params ({}) + def get_bucket_request_payment(params = {}, options = {}) + req = build_request(:get_bucket_request_payment, params) + req.send_request(options) + end + + # Returns the tag set associated with the bucket. + # + # To use this operation, you must have permission to perform the + # `s3:GetBucketTagging` action. By default, the bucket owner has this + # permission and can grant this permission to others. + # + # `GetBucketTagging` has the following special error: + # + # * Error code: `NoSuchTagSet` + # + # * Description: There is no tag set associated with the bucket. + # + # ^ + # + # The following operations are related to `GetBucketTagging`: + # + # * [PutBucketTagging][1] + # + # * [DeleteBucketTagging][2] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the tagging information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketTaggingOutput#tag_set #tag_set} => Array<Types::Tag> + # + # + # @example Example: To get tag set associated with a bucket + # + # # The following example returns tag set associated with a bucket + # + # resp = client.get_bucket_tagging({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # tag_set: [ + # { + # key: "key1", + # value: "value1", + # }, + # { + # key: "key2", + # value: "value2", + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_tagging({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.tag_set #=> Array + # resp.tag_set[0].key #=> String + # resp.tag_set[0].value #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging AWS API Documentation + # + # @overload get_bucket_tagging(params = {}) + # @param [Hash] params ({}) + def get_bucket_tagging(params = {}, options = {}) + req = build_request(:get_bucket_tagging, params) + req.send_request(options) + end + + # Returns the versioning state of a bucket. + # + # To retrieve the versioning state of a bucket, you must be the bucket + # owner. + # + # This implementation also returns the MFA Delete status of the + # versioning state. If the MFA Delete status is `enabled`, the bucket + # owner must use an authentication device to change the versioning state + # of the bucket. + # + # The following operations are related to `GetBucketVersioning`: + # + # * [GetObject][1] + # + # * [PutObject][2] + # + # * [DeleteObject][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to get the versioning information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketVersioningOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketVersioningOutput#status #status} => String + # * {Types::GetBucketVersioningOutput#mfa_delete #mfa_delete} => String + # + # + # @example Example: To get bucket versioning configuration + # + # # The following example retrieves bucket versioning configuration. + # + # resp = client.get_bucket_versioning({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # mfa_delete: "Disabled", + # status: "Enabled", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_versioning({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.status #=> String, one of "Enabled", "Suspended" + # resp.mfa_delete #=> String, one of "Enabled", "Disabled" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning AWS API Documentation + # + # @overload get_bucket_versioning(params = {}) + # @param [Hash] params ({}) + def get_bucket_versioning(params = {}, options = {}) + req = build_request(:get_bucket_versioning, params) + req.send_request(options) + end + + # Returns the website configuration for a bucket. To host website on + # Amazon S3, you can configure a bucket as website by adding a website + # configuration. For more information about hosting websites, see + # [Hosting Websites on Amazon S3][1]. + # + # This GET action requires the `S3:GetBucketWebsite` permission. By + # default, only the bucket owner can read the bucket website + # configuration. However, bucket owners can allow other users to read + # the website configuration by writing a bucket policy granting them the + # `S3:GetBucketWebsite` permission. + # + # The following operations are related to `DeleteBucketWebsite`: + # + # * [DeleteBucketWebsite][2] + # + # * [PutBucketWebsite][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html + # + # @option params [required, String] :bucket + # The bucket name for which to get the website configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetBucketWebsiteOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetBucketWebsiteOutput#redirect_all_requests_to #redirect_all_requests_to} => Types::RedirectAllRequestsTo + # * {Types::GetBucketWebsiteOutput#index_document #index_document} => Types::IndexDocument + # * {Types::GetBucketWebsiteOutput#error_document #error_document} => Types::ErrorDocument + # * {Types::GetBucketWebsiteOutput#routing_rules #routing_rules} => Array<Types::RoutingRule> + # + # + # @example Example: To get bucket website configuration + # + # # The following example retrieves website configuration of a bucket. + # + # resp = client.get_bucket_website({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # error_document: { + # key: "error.html", + # }, + # index_document: { + # suffix: "index.html", + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_bucket_website({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.redirect_all_requests_to.host_name #=> String + # resp.redirect_all_requests_to.protocol #=> String, one of "http", "https" + # resp.index_document.suffix #=> String + # resp.error_document.key #=> String + # resp.routing_rules #=> Array + # resp.routing_rules[0].condition.http_error_code_returned_equals #=> String + # resp.routing_rules[0].condition.key_prefix_equals #=> String + # resp.routing_rules[0].redirect.host_name #=> String + # resp.routing_rules[0].redirect.http_redirect_code #=> String + # resp.routing_rules[0].redirect.protocol #=> String, one of "http", "https" + # resp.routing_rules[0].redirect.replace_key_prefix_with #=> String + # resp.routing_rules[0].redirect.replace_key_with #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite AWS API Documentation + # + # @overload get_bucket_website(params = {}) + # @param [Hash] params ({}) + def get_bucket_website(params = {}, options = {}) + req = build_request(:get_bucket_website, params) + req.send_request(options) + end + + # Retrieves objects from Amazon S3. To use `GET`, you must have `READ` + # access to the object. If you grant `READ` access to the anonymous + # user, you can return the object without using an authorization header. + # + # An Amazon S3 bucket has no directory hierarchy such as you would find + # in a typical computer file system. You can, however, create a logical + # hierarchy by using object key names that imply a folder structure. For + # example, instead of naming an object `sample.jpg`, you can name it + # `photos/2006/February/sample.jpg`. + # + # To get an object from such a logical hierarchy, specify the full key + # name for the object in the `GET` operation. For a virtual hosted-style + # request example, if you have the object + # `photos/2006/February/sample.jpg`, specify the resource as + # `/photos/2006/February/sample.jpg`. For a path-style request example, + # if you have the object `photos/2006/February/sample.jpg` in the bucket + # named `examplebucket`, specify the resource as + # `/examplebucket/photos/2006/February/sample.jpg`. For more information + # about request types, see [HTTP Host Header Bucket Specification][1]. + # + # For more information about returning the ACL of an object, see + # [GetObjectAcl][2]. + # + # If the object you are retrieving is stored in the S3 Glacier or S3 + # Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive + # or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve + # the object you must first restore a copy using [RestoreObject][3]. + # Otherwise, this action returns an `InvalidObjectStateError` error. For + # information about restoring archived objects, see [Restoring Archived + # Objects][4]. + # + # Encryption request headers, like `x-amz-server-side-encryption`, + # should not be sent for GET requests if your object uses server-side + # encryption with KMS keys (SSE-KMS) or server-side encryption with + # Amazon S3–managed encryption keys (SSE-S3). If your object does use + # these types of keys, you’ll get an HTTP 400 BadRequest error. + # + # If you encrypt an object by using server-side encryption with + # customer-provided encryption keys (SSE-C) when you store the object in + # Amazon S3, then when you GET the object, you must use the following + # headers: + # + # * x-amz-server-side-encryption-customer-algorithm + # + # * x-amz-server-side-encryption-customer-key + # + # * x-amz-server-side-encryption-customer-key-MD5 + # + # For more information about SSE-C, see [Server-Side Encryption (Using + # Customer-Provided Encryption Keys)][5]. + # + # Assuming you have the relevant permission to read object tags, the + # response also returns the `x-amz-tagging-count` header that provides + # the count of number of tags associated with the object. You can use + # [GetObjectTagging][6] to retrieve the tag set associated with an + # object. + # + # **Permissions** + # + # You need the relevant read object (or version) permission for this + # operation. For more information, see [Specifying Permissions in a + # Policy][7]. If the object you request does not exist, the error Amazon + # S3 returns depends on whether you also have the `s3:ListBucket` + # permission. + # + # * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 + # will return an HTTP status code 404 ("no such key") error. + # + # * If you don’t have the `s3:ListBucket` permission, Amazon S3 will + # return an HTTP status code 403 ("access denied") error. + # + # **Versioning** + # + # By default, the GET action returns the current version of an object. + # To return a different version, use the `versionId` subresource. + # + # * If you supply a `versionId`, you need the `s3:GetObjectVersion` + # permission to access a specific version of an object. If you request + # a specific version, you do not need to have the `s3:GetObject` + # permission. + # + # * If the current version of the object is a delete marker, Amazon S3 + # behaves as if the object was deleted and includes + # `x-amz-delete-marker: true` in the response. + # + # + # + # For more information about versioning, see [PutBucketVersioning][8]. + # + # **Overriding Response Header Values** + # + # There are times when you want to override certain response header + # values in a GET response. For example, you might override the + # `Content-Disposition` response header value in your GET request. + # + # You can override values for a set of response headers using the + # following query parameters. These response header values are sent only + # on a successful request, that is, when status code 200 OK is returned. + # The set of headers you can override using these parameters is a subset + # of the headers that Amazon S3 accepts when you create an object. The + # response headers that you can override for the GET response are + # `Content-Type`, `Content-Language`, `Expires`, `Cache-Control`, + # `Content-Disposition`, and `Content-Encoding`. To override these + # header values in the GET response, you use the following request + # parameters. + # + # You must sign the request, either using an Authorization header or a + # presigned URL, when using these parameters. They cannot be used with + # an unsigned (anonymous) request. + # + # + # + # * `response-content-type` + # + # * `response-content-language` + # + # * `response-expires` + # + # * `response-cache-control` + # + # * `response-content-disposition` + # + # * `response-content-encoding` + # + # **Additional Considerations about Request Headers** + # + # If both of the `If-Match` and `If-Unmodified-Since` headers are + # present in the request as follows: `If-Match` condition evaluates to + # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; + # then, S3 returns 200 OK and the data requested. + # + # If both of the `If-None-Match` and `If-Modified-Since` headers are + # present in the request as follows:` If-None-Match` condition evaluates + # to `false`, and; `If-Modified-Since` condition evaluates to `true`; + # then, S3 returns 304 Not Modified response code. + # + # For more information about conditional requests, see [RFC 7232][9]. + # + # The following operations are related to `GetObject`: + # + # * [ListBuckets][10] + # + # * [GetObjectAcl][2] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html + # [9]: https://tools.ietf.org/html/rfc7232 + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + # + # @option params [String, IO] :response_target + # Where to write response data, file path, or IO object. + # + # @option params [required, String] :bucket + # The bucket name containing the object. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using an Object Lambda access point the hostname takes the form + # *AccessPointName*-*AccountId*.s3-object-lambda.*Region*.amazonaws.com. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # + # @option params [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # + # @option params [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # + # @option params [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # + # @option params [required, String] :key + # Key of the object to get. + # + # @option params [String] :range + # Downloads the specified range bytes of an object. For more information + # about the HTTP Range header, see + # [https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1]. + # + # Amazon S3 doesn't support retrieving multiple ranges of data per + # `GET` request. + # + # + # + # + # + # [1]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + # + # @option params [String] :response_cache_control + # Sets the `Cache-Control` header of the response. + # + # @option params [String] :response_content_disposition + # Sets the `Content-Disposition` header of the response + # + # @option params [String] :response_content_encoding + # Sets the `Content-Encoding` header of the response. + # + # @option params [String] :response_content_language + # Sets the `Content-Language` header of the response. + # + # @option params [String] :response_content_type + # Sets the `Content-Type` header of the response. + # + # @option params [Time,DateTime,Date,Integer,String] :response_expires + # Sets the `Expires` header of the response. + # + # @option params [String] :version_id + # VersionId used to reference a specific version of the object. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when decrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 used to + # encrypt the data. This value is used to decrypt the object when + # recovering it and must match the one used when storing the data. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' GET request + # for the part specified. Useful for downloading just a part of an + # object. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :checksum_mode + # To retrieve the checksum, this mode must be enabled. + # + # @return [Types::GetObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectOutput#body #body} => IO + # * {Types::GetObjectOutput#delete_marker #delete_marker} => Boolean + # * {Types::GetObjectOutput#accept_ranges #accept_ranges} => String + # * {Types::GetObjectOutput#expiration #expiration} => String + # * {Types::GetObjectOutput#restore #restore} => String + # * {Types::GetObjectOutput#last_modified #last_modified} => Time + # * {Types::GetObjectOutput#content_length #content_length} => Integer + # * {Types::GetObjectOutput#etag #etag} => String + # * {Types::GetObjectOutput#checksum_crc32 #checksum_crc32} => String + # * {Types::GetObjectOutput#checksum_crc32c #checksum_crc32c} => String + # * {Types::GetObjectOutput#checksum_sha1 #checksum_sha1} => String + # * {Types::GetObjectOutput#checksum_sha256 #checksum_sha256} => String + # * {Types::GetObjectOutput#missing_meta #missing_meta} => Integer + # * {Types::GetObjectOutput#version_id #version_id} => String + # * {Types::GetObjectOutput#cache_control #cache_control} => String + # * {Types::GetObjectOutput#content_disposition #content_disposition} => String + # * {Types::GetObjectOutput#content_encoding #content_encoding} => String + # * {Types::GetObjectOutput#content_language #content_language} => String + # * {Types::GetObjectOutput#content_range #content_range} => String + # * {Types::GetObjectOutput#content_type #content_type} => String + # * {Types::GetObjectOutput#expires #expires} => Time + # * {Types::GetObjectOutput#expires_string #expires_string} => String + # * {Types::GetObjectOutput#website_redirect_location #website_redirect_location} => String + # * {Types::GetObjectOutput#server_side_encryption #server_side_encryption} => String + # * {Types::GetObjectOutput#metadata #metadata} => Hash<String,String> + # * {Types::GetObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::GetObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::GetObjectOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::GetObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::GetObjectOutput#storage_class #storage_class} => String + # * {Types::GetObjectOutput#request_charged #request_charged} => String + # * {Types::GetObjectOutput#replication_status #replication_status} => String + # * {Types::GetObjectOutput#parts_count #parts_count} => Integer + # * {Types::GetObjectOutput#tag_count #tag_count} => Integer + # * {Types::GetObjectOutput#object_lock_mode #object_lock_mode} => String + # * {Types::GetObjectOutput#object_lock_retain_until_date #object_lock_retain_until_date} => Time + # * {Types::GetObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String + # + # + # @example Example: To retrieve an object + # + # # The following example retrieves an object for an S3 bucket. + # + # resp = client.get_object({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # accept_ranges: "bytes", + # content_length: 3191, + # content_type: "image/jpeg", + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"), + # metadata: { + # }, + # tag_count: 2, + # version_id: "null", + # } + # + # @example Example: To retrieve a byte range of an object + # + # # The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a + # # specific byte range. + # + # resp = client.get_object({ + # bucket: "examplebucket", + # key: "SampleFile.txt", + # range: "bytes=0-9", + # }) + # + # resp.to_h outputs the following: + # { + # accept_ranges: "bytes", + # content_length: 10, + # content_range: "bytes 0-9/43", + # content_type: "text/plain", + # etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"", + # last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"), + # metadata: { + # }, + # version_id: "null", + # } + # + # @example Download an object to disk + # # stream object directly to disk + # resp = s3.get_object( + # response_target: '/path/to/file', + # bucket: 'bucket-name', + # key: 'object-key') + # + # # you can still access other response data + # resp.metadata #=> { ... } + # resp.etag #=> "..." + # + # @example Download object into memory + # # omit :response_target to download to a StringIO in memory + # resp = s3.get_object(bucket: 'bucket-name', key: 'object-key') + # + # # call #read or #string on the response body + # resp.body.read + # #=> '...' + # + # @example Streaming data to a block + # # WARNING: yielding data to a block disables retries of networking errors + # # However truncation of the body will be retried automatically using a range request + # File.open('/path/to/file', 'wb') do |file| + # s3.get_object(bucket: 'bucket-name', key: 'object-key') do |chunk, headers| + # # headers['content-length'] + # file.write(chunk) + # end + # end + # + # @example Request syntax with placeholder values + # + # resp = client.get_object({ + # bucket: "BucketName", # required + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # key: "ObjectKey", # required + # range: "Range", + # response_cache_control: "ResponseCacheControl", + # response_content_disposition: "ResponseContentDisposition", + # response_content_encoding: "ResponseContentEncoding", + # response_content_language: "ResponseContentLanguage", + # response_content_type: "ResponseContentType", + # response_expires: Time.now, + # version_id: "ObjectVersionId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # + # @example Response structure + # + # resp.body #=> IO + # resp.delete_marker #=> Boolean + # resp.accept_ranges #=> String + # resp.expiration #=> String + # resp.restore #=> String + # resp.last_modified #=> Time + # resp.content_length #=> Integer + # resp.etag #=> String + # resp.checksum_crc32 #=> String + # resp.checksum_crc32c #=> String + # resp.checksum_sha1 #=> String + # resp.checksum_sha256 #=> String + # resp.missing_meta #=> Integer + # resp.version_id #=> String + # resp.cache_control #=> String + # resp.content_disposition #=> String + # resp.content_encoding #=> String + # resp.content_language #=> String + # resp.content_range #=> String + # resp.content_type #=> String + # resp.expires #=> Time + # resp.expires_string #=> String + # resp.website_redirect_location #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.metadata #=> Hash + # resp.metadata["MetadataKey"] #=> String + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.request_charged #=> String, one of "requester" + # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA" + # resp.parts_count #=> Integer + # resp.tag_count #=> Integer + # resp.object_lock_mode #=> String, one of "GOVERNANCE", "COMPLIANCE" + # resp.object_lock_retain_until_date #=> Time + # resp.object_lock_legal_hold_status #=> String, one of "ON", "OFF" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject AWS API Documentation + # + # @overload get_object(params = {}) + # @param [Hash] params ({}) + def get_object(params = {}, options = {}, &block) + req = build_request(:get_object, params) + req.send_request(options, &block) + end + + # Returns the access control list (ACL) of an object. To use this + # operation, you must have `s3:GetObjectAcl` permissions or `READ_ACP` + # access to the object. For more information, see [Mapping of ACL + # permissions and access policy permissions][1] in the *Amazon S3 User + # Guide* + # + # This action is not supported by Amazon S3 on Outposts. + # + # **Versioning** + # + # By default, GET returns ACL information about the current version of + # an object. To return ACL information about a different version, use + # the versionId subresource. + # + # If your bucket uses the bucket owner enforced setting for S3 Object + # Ownership, requests to read ACLs are still supported and return the + # `bucket-owner-full-control` ACL with the owner being the account that + # created the bucket. For more information, see [ Controlling object + # ownership and disabling ACLs][2] in the *Amazon S3 User Guide*. + # + # + # + # The following operations are related to `GetObjectAcl`: + # + # * [GetObject][3] + # + # * [GetObjectAttributes][4] + # + # * [DeleteObject][5] + # + # * [PutObject][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # + # @option params [required, String] :bucket + # The bucket name that contains the object for which to get the ACL + # information. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [required, String] :key + # The key of the object for which to get the ACL information. + # + # @option params [String] :version_id + # VersionId used to reference a specific version of the object. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetObjectAclOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectAclOutput#owner #owner} => Types::Owner + # * {Types::GetObjectAclOutput#grants #grants} => Array<Types::Grant> + # * {Types::GetObjectAclOutput#request_charged #request_charged} => String + # + # + # @example Example: To retrieve object ACL + # + # # The following example retrieves access control list (ACL) of an object. + # + # resp = client.get_object_acl({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # grants: [ + # { + # grantee: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # type: "CanonicalUser", + # }, + # permission: "WRITE", + # }, + # { + # grantee: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # type: "CanonicalUser", + # }, + # permission: "WRITE_ACP", + # }, + # { + # grantee: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # type: "CanonicalUser", + # }, + # permission: "READ", + # }, + # { + # grantee: { + # display_name: "owner-display-name", + # id: "852b113eexamplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # type: "CanonicalUser", + # }, + # permission: "READ_ACP", + # }, + # ], + # owner: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_acl({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.owner.display_name #=> String + # resp.owner.id #=> String + # resp.grants #=> Array + # resp.grants[0].grantee.display_name #=> String + # resp.grants[0].grantee.email_address #=> String + # resp.grants[0].grantee.id #=> String + # resp.grants[0].grantee.type #=> String, one of "CanonicalUser", "AmazonCustomerByEmail", "Group" + # resp.grants[0].grantee.uri #=> String + # resp.grants[0].permission #=> String, one of "FULL_CONTROL", "WRITE", "WRITE_ACP", "READ", "READ_ACP" + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl AWS API Documentation + # + # @overload get_object_acl(params = {}) + # @param [Hash] params ({}) + def get_object_acl(params = {}, options = {}) + req = build_request(:get_object_acl, params) + req.send_request(options) + end + + # Retrieves all the metadata from an object without returning the object + # itself. This action is useful if you're interested only in an + # object's metadata. To use `GetObjectAttributes`, you must have READ + # access to the object. + # + # `GetObjectAttributes` combines the functionality of `GetObjectAcl`, + # `GetObjectLegalHold`, `GetObjectLockConfiguration`, + # `GetObjectRetention`, `GetObjectTagging`, `HeadObject`, and + # `ListParts`. All of the data returned with each of those individual + # calls can be returned with a single call to `GetObjectAttributes`. + # + # If you encrypt an object by using server-side encryption with + # customer-provided encryption keys (SSE-C) when you store the object in + # Amazon S3, then when you retrieve the metadata from the object, you + # must use the following headers: + # + # * `x-amz-server-side-encryption-customer-algorithm` + # + # * `x-amz-server-side-encryption-customer-key` + # + # * `x-amz-server-side-encryption-customer-key-MD5` + # + # For more information about SSE-C, see [Server-Side Encryption (Using + # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. + # + # * Encryption request headers, such as `x-amz-server-side-encryption`, + # should not be sent for GET requests if your object uses server-side + # encryption with Amazon Web Services KMS keys stored in Amazon Web + # Services Key Management Service (SSE-KMS) or server-side encryption + # with Amazon S3 managed encryption keys (SSE-S3). If your object does + # use these types of keys, you'll get an HTTP `400 Bad Request` + # error. + # + # * The last modified property in this case is the creation date of the + # object. + # + # + # + # Consider the following when using request headers: + # + # * If both of the `If-Match` and `If-Unmodified-Since` headers are + # present in the request as follows, then Amazon S3 returns the HTTP + # status code `200 OK` and the data requested: + # + # * `If-Match` condition evaluates to `true`. + # + # * `If-Unmodified-Since` condition evaluates to `false`. + # + # * If both of the `If-None-Match` and `If-Modified-Since` headers are + # present in the request as follows, then Amazon S3 returns the HTTP + # status code `304 Not Modified`: + # + # * `If-None-Match` condition evaluates to `false`. + # + # * `If-Modified-Since` condition evaluates to `true`. + # + # For more information about conditional requests, see [RFC 7232][2]. + # + # **Permissions** + # + # The permissions that you need to use this operation depend on whether + # the bucket is versioned. If the bucket is versioned, you need both the + # `s3:GetObjectVersion` and `s3:GetObjectVersionAttributes` permissions + # for this operation. If the bucket is not versioned, you need the + # `s3:GetObject` and `s3:GetObjectAttributes` permissions. For more + # information, see [Specifying Permissions in a Policy][3] in the + # *Amazon S3 User Guide*. If the object that you request does not exist, + # the error Amazon S3 returns depends on whether you also have the + # `s3:ListBucket` permission. + # + # * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 + # returns an HTTP status code `404 Not Found` ("no such key") error. + # + # * If you don't have the `s3:ListBucket` permission, Amazon S3 returns + # an HTTP status code `403 Forbidden` ("access denied") error. + # + # The following actions are related to `GetObjectAttributes`: + # + # * [GetObject][4] + # + # * [GetObjectAcl][5] + # + # * [GetObjectLegalHold][6] + # + # * [GetObjectLockConfiguration][7] + # + # * [GetObjectRetention][8] + # + # * [GetObjectTagging][9] + # + # * [HeadObject][10] + # + # * [ListParts][11] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # [2]: https://tools.ietf.org/html/rfc7232 + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # + # @option params [required, String] :bucket + # The name of the bucket that contains the object. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # The object key. + # + # @option params [String] :version_id + # The version ID used to reference a specific version of the object. + # + # @option params [Integer] :max_parts + # Sets the maximum number of parts to return. + # + # @option params [Integer] :part_number_marker + # Specifies the part after which listing should begin. Only parts with + # higher part numbers will be listed. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [required, Array] :object_attributes + # An XML header that specifies the fields at the root level that you + # want returned in the response. Fields that you do not specify are not + # returned. + # + # @return [Types::GetObjectAttributesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectAttributesOutput#delete_marker #delete_marker} => Boolean + # * {Types::GetObjectAttributesOutput#last_modified #last_modified} => Time + # * {Types::GetObjectAttributesOutput#version_id #version_id} => String + # * {Types::GetObjectAttributesOutput#request_charged #request_charged} => String + # * {Types::GetObjectAttributesOutput#etag #etag} => String + # * {Types::GetObjectAttributesOutput#checksum #checksum} => Types::Checksum + # * {Types::GetObjectAttributesOutput#object_parts #object_parts} => Types::GetObjectAttributesParts + # * {Types::GetObjectAttributesOutput#storage_class #storage_class} => String + # * {Types::GetObjectAttributesOutput#object_size #object_size} => Integer + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_attributes({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # max_parts: 1, + # part_number_marker: 1, + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # object_attributes: ["ETag"], # required, accepts ETag, Checksum, ObjectParts, StorageClass, ObjectSize + # }) + # + # @example Response structure + # + # resp.delete_marker #=> Boolean + # resp.last_modified #=> Time + # resp.version_id #=> String + # resp.request_charged #=> String, one of "requester" + # resp.etag #=> String + # resp.checksum.checksum_crc32 #=> String + # resp.checksum.checksum_crc32c #=> String + # resp.checksum.checksum_sha1 #=> String + # resp.checksum.checksum_sha256 #=> String + # resp.object_parts.total_parts_count #=> Integer + # resp.object_parts.part_number_marker #=> Integer + # resp.object_parts.next_part_number_marker #=> Integer + # resp.object_parts.max_parts #=> Integer + # resp.object_parts.is_truncated #=> Boolean + # resp.object_parts.parts #=> Array + # resp.object_parts.parts[0].part_number #=> Integer + # resp.object_parts.parts[0].size #=> Integer + # resp.object_parts.parts[0].checksum_crc32 #=> String + # resp.object_parts.parts[0].checksum_crc32c #=> String + # resp.object_parts.parts[0].checksum_sha1 #=> String + # resp.object_parts.parts[0].checksum_sha256 #=> String + # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.object_size #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes AWS API Documentation + # + # @overload get_object_attributes(params = {}) + # @param [Hash] params ({}) + def get_object_attributes(params = {}, options = {}) + req = build_request(:get_object_attributes, params) + req.send_request(options) + end + + # Gets an object's current legal hold status. For more information, see + # [Locking Objects][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # The following action is related to `GetObjectLegalHold`: + # + # * [GetObjectAttributes][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # + # @option params [required, String] :bucket + # The bucket name containing the object whose legal hold status you want + # to retrieve. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [required, String] :key + # The key name for the object whose legal hold status you want to + # retrieve. + # + # @option params [String] :version_id + # The version ID of the object whose legal hold status you want to + # retrieve. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetObjectLegalHoldOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectLegalHoldOutput#legal_hold #legal_hold} => Types::ObjectLockLegalHold + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_legal_hold({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.legal_hold.status #=> String, one of "ON", "OFF" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold AWS API Documentation + # + # @overload get_object_legal_hold(params = {}) + # @param [Hash] params ({}) + def get_object_legal_hold(params = {}, options = {}) + req = build_request(:get_object_legal_hold, params) + req.send_request(options) + end + + # Gets the Object Lock configuration for a bucket. The rule specified in + # the Object Lock configuration will be applied by default to every new + # object placed in the specified bucket. For more information, see + # [Locking Objects][1]. + # + # The following action is related to `GetObjectLockConfiguration`: + # + # * [GetObjectAttributes][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # + # @option params [required, String] :bucket + # The bucket whose Object Lock configuration you want to retrieve. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetObjectLockConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectLockConfigurationOutput#object_lock_configuration #object_lock_configuration} => Types::ObjectLockConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_lock_configuration({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.object_lock_configuration.object_lock_enabled #=> String, one of "Enabled" + # resp.object_lock_configuration.rule.default_retention.mode #=> String, one of "GOVERNANCE", "COMPLIANCE" + # resp.object_lock_configuration.rule.default_retention.days #=> Integer + # resp.object_lock_configuration.rule.default_retention.years #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration AWS API Documentation + # + # @overload get_object_lock_configuration(params = {}) + # @param [Hash] params ({}) + def get_object_lock_configuration(params = {}, options = {}) + req = build_request(:get_object_lock_configuration, params) + req.send_request(options) + end + + # Retrieves an object's retention settings. For more information, see + # [Locking Objects][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # The following action is related to `GetObjectRetention`: + # + # * [GetObjectAttributes][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # + # @option params [required, String] :bucket + # The bucket name containing the object whose retention settings you + # want to retrieve. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [required, String] :key + # The key name for the object whose retention settings you want to + # retrieve. + # + # @option params [String] :version_id + # The version ID for the object whose retention settings you want to + # retrieve. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetObjectRetentionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectRetentionOutput#retention #retention} => Types::ObjectLockRetention + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_retention({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.retention.mode #=> String, one of "GOVERNANCE", "COMPLIANCE" + # resp.retention.retain_until_date #=> Time + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention AWS API Documentation + # + # @overload get_object_retention(params = {}) + # @param [Hash] params ({}) + def get_object_retention(params = {}, options = {}) + req = build_request(:get_object_retention, params) + req.send_request(options) + end + + # Returns the tag-set of an object. You send the GET request against the + # tagging subresource associated with the object. + # + # To use this operation, you must have permission to perform the + # `s3:GetObjectTagging` action. By default, the GET action returns + # information about current version of an object. For a versioned + # bucket, you can have multiple versions of an object in your bucket. To + # retrieve tags of any other version, use the versionId query parameter. + # You also need permission for the `s3:GetObjectVersionTagging` action. + # + # By default, the bucket owner has this permission and can grant this + # permission to others. + # + # For information about the Amazon S3 object tagging feature, see + # [Object Tagging][1]. + # + # The following actions are related to `GetObjectTagging`: + # + # * [DeleteObjectTagging][2] + # + # * [GetObjectAttributes][3] + # + # * [PutObjectTagging][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html + # + # @option params [required, String] :bucket + # The bucket name containing the object for which to get the tagging + # information. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Object key for which to get the tagging information. + # + # @option params [String] :version_id + # The versionId of the object for which to get the tagging information. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @return [Types::GetObjectTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectTaggingOutput#version_id #version_id} => String + # * {Types::GetObjectTaggingOutput#tag_set #tag_set} => Array<Types::Tag> + # + # + # @example Example: To retrieve tag set of a specific object version + # + # # The following example retrieves tag set of an object. The request specifies object version. + # + # resp = client.get_object_tagging({ + # bucket: "examplebucket", + # key: "exampleobject", + # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", + # }) + # + # resp.to_h outputs the following: + # { + # tag_set: [ + # { + # key: "Key1", + # value: "Value1", + # }, + # ], + # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", + # } + # + # @example Example: To retrieve tag set of an object + # + # # The following example retrieves tag set of an object. + # + # resp = client.get_object_tagging({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # tag_set: [ + # { + # key: "Key4", + # value: "Value4", + # }, + # { + # key: "Key3", + # value: "Value3", + # }, + # ], + # version_id: "null", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_tagging({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # expected_bucket_owner: "AccountId", + # request_payer: "requester", # accepts requester + # }) + # + # @example Response structure + # + # resp.version_id #=> String + # resp.tag_set #=> Array + # resp.tag_set[0].key #=> String + # resp.tag_set[0].value #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging AWS API Documentation + # + # @overload get_object_tagging(params = {}) + # @param [Hash] params ({}) + def get_object_tagging(params = {}, options = {}) + req = build_request(:get_object_tagging, params) + req.send_request(options) + end + + # Returns torrent files from a bucket. BitTorrent can save you bandwidth + # when you're distributing large files. For more information about + # BitTorrent, see [Using BitTorrent with Amazon S3][1]. + # + # You can get torrent only for objects that are less than 5 GB in size, + # and that are not encrypted using server-side encryption with a + # customer-provided encryption key. + # + # + # + # To use GET, you must have READ access to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # The following action is related to `GetObjectTorrent`: + # + # * [GetObject][2] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # + # @option params [String, IO] :response_target + # Where to write response data, file path, or IO object. + # + # @option params [required, String] :bucket + # The name of the bucket containing the object for which to get the + # torrent files. + # + # @option params [required, String] :key + # The object key for which to get the information. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetObjectTorrentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetObjectTorrentOutput#body #body} => IO + # * {Types::GetObjectTorrentOutput#request_charged #request_charged} => String + # + # + # @example Example: To retrieve torrent files for an object + # + # # The following example retrieves torrent files of an object. + # + # resp = client.get_object_torrent({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.get_object_torrent({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.body #=> IO + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent AWS API Documentation + # + # @overload get_object_torrent(params = {}) + # @param [Hash] params ({}) + def get_object_torrent(params = {}, options = {}, &block) + req = build_request(:get_object_torrent, params) + req.send_request(options, &block) + end + + # Retrieves the `PublicAccessBlock` configuration for an Amazon S3 + # bucket. To use this operation, you must have the + # `s3:GetBucketPublicAccessBlock` permission. For more information about + # Amazon S3 permissions, see [Specifying Permissions in a Policy][1]. + # + # When Amazon S3 evaluates the `PublicAccessBlock` configuration for a + # bucket or an object, it checks the `PublicAccessBlock` configuration + # for both the bucket (or the bucket that contains the object) and the + # bucket owner's account. If the `PublicAccessBlock` settings are + # different between the bucket and the account, Amazon S3 uses the most + # restrictive combination of the bucket-level and account-level + # settings. + # + # For more information about when Amazon S3 considers a bucket or an + # object public, see [The Meaning of "Public"][2]. + # + # The following operations are related to `GetPublicAccessBlock`: + # + # * [Using Amazon S3 Block Public Access][3] + # + # * [PutPublicAccessBlock][4] + # + # * [GetPublicAccessBlock][5] + # + # * [DeletePublicAccessBlock][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose `PublicAccessBlock` + # configuration you want to retrieve. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::GetPublicAccessBlockOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetPublicAccessBlockOutput#public_access_block_configuration #public_access_block_configuration} => Types::PublicAccessBlockConfiguration + # + # @example Request syntax with placeholder values + # + # resp = client.get_public_access_block({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.public_access_block_configuration.block_public_acls #=> Boolean + # resp.public_access_block_configuration.ignore_public_acls #=> Boolean + # resp.public_access_block_configuration.block_public_policy #=> Boolean + # resp.public_access_block_configuration.restrict_public_buckets #=> Boolean + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock AWS API Documentation + # + # @overload get_public_access_block(params = {}) + # @param [Hash] params ({}) + def get_public_access_block(params = {}, options = {}) + req = build_request(:get_public_access_block, params) + req.send_request(options) + end + + # This action is useful to determine if a bucket exists and you have + # permission to access it. The action returns a `200 OK` if the bucket + # exists and you have permission to access it. + # + # If the bucket does not exist or you do not have permission to access + # it, the `HEAD` request returns a generic `404 Not Found` or `403 + # Forbidden` code. A message body is not included, so you cannot + # determine the exception beyond these error codes. + # + # To use this operation, you must have permissions to perform the + # `s3:ListBucket` action. The bucket owner has this permission by + # default and can grant this permission to others. For more information + # about permissions, see [Permissions Related to Bucket Subresource + # Operations][1] and [Managing Access Permissions to Your Amazon S3 + # Resources][2]. + # + # To use this API against an access point, you must provide the alias of + # the access point in place of the bucket name or specify the access + # point ARN. When using the access point ARN, you must direct requests + # to the access point hostname. The access point hostname takes the form + # AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When + # using the Amazon Web Services SDKs, you provide the ARN in place of + # the bucket name. For more information see, [Using access points][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To determine if bucket exists + # + # # This operation checks to see if a bucket exists. + # + # resp = client.head_bucket({ + # bucket: "acl1", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.head_bucket({ + # bucket: "BucketName", # required + # expected_bucket_owner: "AccountId", + # }) + # + # + # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): + # + # * bucket_exists + # * bucket_not_exists + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket AWS API Documentation + # + # @overload head_bucket(params = {}) + # @param [Hash] params ({}) + def head_bucket(params = {}, options = {}) + req = build_request(:head_bucket, params) + req.send_request(options) + end + + # The HEAD action retrieves metadata from an object without returning + # the object itself. This action is useful if you're only interested in + # an object's metadata. To use HEAD, you must have READ access to the + # object. + # + # A `HEAD` request has the same options as a `GET` action on an object. + # The response is identical to the `GET` response except that there is + # no response body. Because of this, if the `HEAD` request generates an + # error, it returns a generic `404 Not Found` or `403 Forbidden` code. + # It is not possible to retrieve the exact exception beyond these error + # codes. + # + # If you encrypt an object by using server-side encryption with + # customer-provided encryption keys (SSE-C) when you store the object in + # Amazon S3, then when you retrieve the metadata from the object, you + # must use the following headers: + # + # * x-amz-server-side-encryption-customer-algorithm + # + # * x-amz-server-side-encryption-customer-key + # + # * x-amz-server-side-encryption-customer-key-MD5 + # + # For more information about SSE-C, see [Server-Side Encryption (Using + # Customer-Provided Encryption Keys)][1]. + # + # * Encryption request headers, like `x-amz-server-side-encryption`, + # should not be sent for GET requests if your object uses server-side + # encryption with KMS keys (SSE-KMS) or server-side encryption with + # Amazon S3–managed encryption keys (SSE-S3). If your object does use + # these types of keys, you’ll get an HTTP 400 BadRequest error. + # + # * The last modified property in this case is the creation date of the + # object. + # + # + # + # Request headers are limited to 8 KB in size. For more information, see + # [Common Request Headers][2]. + # + # Consider the following when using request headers: + # + # * Consideration 1 – If both of the `If-Match` and + # `If-Unmodified-Since` headers are present in the request as follows: + # + # * `If-Match` condition evaluates to `true`, and; + # + # * `If-Unmodified-Since` condition evaluates to `false`; + # + # Then Amazon S3 returns `200 OK` and the data requested. + # + # * Consideration 2 – If both of the `If-None-Match` and + # `If-Modified-Since` headers are present in the request as follows: + # + # * `If-None-Match` condition evaluates to `false`, and; + # + # * `If-Modified-Since` condition evaluates to `true`; + # + # Then Amazon S3 returns the `304 Not Modified` response code. + # + # For more information about conditional requests, see [RFC 7232][3]. + # + # **Permissions** + # + # You need the relevant read object (or version) permission for this + # operation. For more information, see [Specifying Permissions in a + # Policy][4]. If the object you request does not exist, the error Amazon + # S3 returns depends on whether you also have the s3:ListBucket + # permission. + # + # * If you have the `s3:ListBucket` permission on the bucket, Amazon S3 + # returns an HTTP status code 404 ("no such key") error. + # + # * If you don’t have the `s3:ListBucket` permission, Amazon S3 returns + # an HTTP status code 403 ("access denied") error. + # + # The following actions are related to `HeadObject`: + # + # * [GetObject][5] + # + # * [GetObjectAttributes][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html + # [3]: https://tools.ietf.org/html/rfc7232 + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the object. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # + # @option params [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # + # @option params [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # + # @option params [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # + # @option params [required, String] :key + # The object key. + # + # @option params [String] :range + # Because `HeadObject` returns only the metadata for an object, this + # parameter has no effect. + # + # @option params [String] :version_id + # VersionId used to reference a specific version of the object. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' HEAD request + # for the part specified. Useful querying about the size of the part and + # the number of parts in this object. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :checksum_mode + # To retrieve the checksum, this parameter must be enabled. + # + # In addition, if you enable `ChecksumMode` and the object is encrypted + # with Amazon Web Services Key Management Service (Amazon Web Services + # KMS), you must have permission to use the `kms:Decrypt` action for the + # request to succeed. + # + # @return [Types::HeadObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::HeadObjectOutput#delete_marker #delete_marker} => Boolean + # * {Types::HeadObjectOutput#accept_ranges #accept_ranges} => String + # * {Types::HeadObjectOutput#expiration #expiration} => String + # * {Types::HeadObjectOutput#restore #restore} => String + # * {Types::HeadObjectOutput#archive_status #archive_status} => String + # * {Types::HeadObjectOutput#last_modified #last_modified} => Time + # * {Types::HeadObjectOutput#content_length #content_length} => Integer + # * {Types::HeadObjectOutput#checksum_crc32 #checksum_crc32} => String + # * {Types::HeadObjectOutput#checksum_crc32c #checksum_crc32c} => String + # * {Types::HeadObjectOutput#checksum_sha1 #checksum_sha1} => String + # * {Types::HeadObjectOutput#checksum_sha256 #checksum_sha256} => String + # * {Types::HeadObjectOutput#etag #etag} => String + # * {Types::HeadObjectOutput#missing_meta #missing_meta} => Integer + # * {Types::HeadObjectOutput#version_id #version_id} => String + # * {Types::HeadObjectOutput#cache_control #cache_control} => String + # * {Types::HeadObjectOutput#content_disposition #content_disposition} => String + # * {Types::HeadObjectOutput#content_encoding #content_encoding} => String + # * {Types::HeadObjectOutput#content_language #content_language} => String + # * {Types::HeadObjectOutput#content_type #content_type} => String + # * {Types::HeadObjectOutput#expires #expires} => Time + # * {Types::HeadObjectOutput#expires_string #expires_string} => String + # * {Types::HeadObjectOutput#website_redirect_location #website_redirect_location} => String + # * {Types::HeadObjectOutput#server_side_encryption #server_side_encryption} => String + # * {Types::HeadObjectOutput#metadata #metadata} => Hash<String,String> + # * {Types::HeadObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::HeadObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::HeadObjectOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::HeadObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::HeadObjectOutput#storage_class #storage_class} => String + # * {Types::HeadObjectOutput#request_charged #request_charged} => String + # * {Types::HeadObjectOutput#replication_status #replication_status} => String + # * {Types::HeadObjectOutput#parts_count #parts_count} => Integer + # * {Types::HeadObjectOutput#object_lock_mode #object_lock_mode} => String + # * {Types::HeadObjectOutput#object_lock_retain_until_date #object_lock_retain_until_date} => Time + # * {Types::HeadObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String + # + # + # @example Example: To retrieve metadata of an object without returning the object itself + # + # # The following example retrieves an object metadata. + # + # resp = client.head_object({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # accept_ranges: "bytes", + # content_length: 3191, + # content_type: "image/jpeg", + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"), + # metadata: { + # }, + # version_id: "null", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.head_object({ + # bucket: "BucketName", # required + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # key: "ObjectKey", # required + # range: "Range", + # version_id: "ObjectVersionId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # + # @example Response structure + # + # resp.delete_marker #=> Boolean + # resp.accept_ranges #=> String + # resp.expiration #=> String + # resp.restore #=> String + # resp.archive_status #=> String, one of "ARCHIVE_ACCESS", "DEEP_ARCHIVE_ACCESS" + # resp.last_modified #=> Time + # resp.content_length #=> Integer + # resp.checksum_crc32 #=> String + # resp.checksum_crc32c #=> String + # resp.checksum_sha1 #=> String + # resp.checksum_sha256 #=> String + # resp.etag #=> String + # resp.missing_meta #=> Integer + # resp.version_id #=> String + # resp.cache_control #=> String + # resp.content_disposition #=> String + # resp.content_encoding #=> String + # resp.content_language #=> String + # resp.content_type #=> String + # resp.expires #=> Time + # resp.expires_string #=> String + # resp.website_redirect_location #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.metadata #=> Hash + # resp.metadata["MetadataKey"] #=> String + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.request_charged #=> String, one of "requester" + # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA" + # resp.parts_count #=> Integer + # resp.object_lock_mode #=> String, one of "GOVERNANCE", "COMPLIANCE" + # resp.object_lock_retain_until_date #=> Time + # resp.object_lock_legal_hold_status #=> String, one of "ON", "OFF" + # + # + # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): + # + # * object_exists + # * object_not_exists + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject AWS API Documentation + # + # @overload head_object(params = {}) + # @param [Hash] params ({}) + def head_object(params = {}, options = {}) + req = build_request(:head_object, params) + req.send_request(options) + end + + # Lists the analytics configurations for the bucket. You can have up to + # 1,000 analytics configurations per bucket. + # + # This action supports list pagination and does not return more than 100 + # configurations at a time. You should always check the `IsTruncated` + # element in the response. If there are no more configurations to list, + # `IsTruncated` is set to false. If there are more configurations to + # list, `IsTruncated` is set to true, and there will be a value in + # `NextContinuationToken`. You use the `NextContinuationToken` value to + # continue the pagination of the list by passing the value in + # continuation-token in the request to `GET` the next page. + # + # To use this operation, you must have permissions to perform the + # `s3:GetAnalyticsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about Amazon S3 analytics feature, see [Amazon S3 + # Analytics – Storage Class Analysis][3]. + # + # The following operations are related to + # `ListBucketAnalyticsConfigurations`: + # + # * [GetBucketAnalyticsConfiguration][4] + # + # * [DeleteBucketAnalyticsConfiguration][5] + # + # * [PutBucketAnalyticsConfiguration][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket from which analytics configurations are + # retrieved. + # + # @option params [String] :continuation_token + # The ContinuationToken that represents a placeholder from where this + # request should begin. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListBucketAnalyticsConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListBucketAnalyticsConfigurationsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListBucketAnalyticsConfigurationsOutput#continuation_token #continuation_token} => String + # * {Types::ListBucketAnalyticsConfigurationsOutput#next_continuation_token #next_continuation_token} => String + # * {Types::ListBucketAnalyticsConfigurationsOutput#analytics_configuration_list #analytics_configuration_list} => Array<Types::AnalyticsConfiguration> + # + # @example Request syntax with placeholder values + # + # resp = client.list_bucket_analytics_configurations({ + # bucket: "BucketName", # required + # continuation_token: "Token", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.is_truncated #=> Boolean + # resp.continuation_token #=> String + # resp.next_continuation_token #=> String + # resp.analytics_configuration_list #=> Array + # resp.analytics_configuration_list[0].id #=> String + # resp.analytics_configuration_list[0].filter.prefix #=> String + # resp.analytics_configuration_list[0].filter.tag.key #=> String + # resp.analytics_configuration_list[0].filter.tag.value #=> String + # resp.analytics_configuration_list[0].filter.and.prefix #=> String + # resp.analytics_configuration_list[0].filter.and.tags #=> Array + # resp.analytics_configuration_list[0].filter.and.tags[0].key #=> String + # resp.analytics_configuration_list[0].filter.and.tags[0].value #=> String + # resp.analytics_configuration_list[0].storage_class_analysis.data_export.output_schema_version #=> String, one of "V_1" + # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.format #=> String, one of "CSV" + # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.bucket_account_id #=> String + # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.bucket #=> String + # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.prefix #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations AWS API Documentation + # + # @overload list_bucket_analytics_configurations(params = {}) + # @param [Hash] params ({}) + def list_bucket_analytics_configurations(params = {}, options = {}) + req = build_request(:list_bucket_analytics_configurations, params) + req.send_request(options) + end + + # Lists the S3 Intelligent-Tiering configuration from the specified + # bucket. + # + # The S3 Intelligent-Tiering storage class is designed to optimize + # storage costs by automatically moving data to the most cost-effective + # storage access tier, without performance impact or operational + # overhead. S3 Intelligent-Tiering delivers automatic cost savings in + # three low latency and high throughput access tiers. To get the lowest + # storage cost on data that can be accessed in minutes to hours, you can + # choose to activate additional archiving capabilities. + # + # The S3 Intelligent-Tiering storage class is the ideal storage class + # for data with unknown, changing, or unpredictable access patterns, + # independent of object size or retention period. If the size of an + # object is less than 128 KB, it is not monitored and not eligible for + # auto-tiering. Smaller objects can be stored, but they are always + # charged at the Frequent Access tier rates in the S3 + # Intelligent-Tiering storage class. + # + # For more information, see [Storage class for automatically optimizing + # frequently and infrequently accessed objects][1]. + # + # Operations related to `ListBucketIntelligentTieringConfigurations` + # include: + # + # * [DeleteBucketIntelligentTieringConfiguration][2] + # + # * [PutBucketIntelligentTieringConfiguration][3] + # + # * [GetBucketIntelligentTieringConfiguration][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # + # @option params [String] :continuation_token + # The `ContinuationToken` that represents a placeholder from where this + # request should begin. + # + # @return [Types::ListBucketIntelligentTieringConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListBucketIntelligentTieringConfigurationsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListBucketIntelligentTieringConfigurationsOutput#continuation_token #continuation_token} => String + # * {Types::ListBucketIntelligentTieringConfigurationsOutput#next_continuation_token #next_continuation_token} => String + # * {Types::ListBucketIntelligentTieringConfigurationsOutput#intelligent_tiering_configuration_list #intelligent_tiering_configuration_list} => Array<Types::IntelligentTieringConfiguration> + # + # @example Request syntax with placeholder values + # + # resp = client.list_bucket_intelligent_tiering_configurations({ + # bucket: "BucketName", # required + # continuation_token: "Token", + # }) + # + # @example Response structure + # + # resp.is_truncated #=> Boolean + # resp.continuation_token #=> String + # resp.next_continuation_token #=> String + # resp.intelligent_tiering_configuration_list #=> Array + # resp.intelligent_tiering_configuration_list[0].id #=> String + # resp.intelligent_tiering_configuration_list[0].filter.prefix #=> String + # resp.intelligent_tiering_configuration_list[0].filter.tag.key #=> String + # resp.intelligent_tiering_configuration_list[0].filter.tag.value #=> String + # resp.intelligent_tiering_configuration_list[0].filter.and.prefix #=> String + # resp.intelligent_tiering_configuration_list[0].filter.and.tags #=> Array + # resp.intelligent_tiering_configuration_list[0].filter.and.tags[0].key #=> String + # resp.intelligent_tiering_configuration_list[0].filter.and.tags[0].value #=> String + # resp.intelligent_tiering_configuration_list[0].status #=> String, one of "Enabled", "Disabled" + # resp.intelligent_tiering_configuration_list[0].tierings #=> Array + # resp.intelligent_tiering_configuration_list[0].tierings[0].days #=> Integer + # resp.intelligent_tiering_configuration_list[0].tierings[0].access_tier #=> String, one of "ARCHIVE_ACCESS", "DEEP_ARCHIVE_ACCESS" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations AWS API Documentation + # + # @overload list_bucket_intelligent_tiering_configurations(params = {}) + # @param [Hash] params ({}) + def list_bucket_intelligent_tiering_configurations(params = {}, options = {}) + req = build_request(:list_bucket_intelligent_tiering_configurations, params) + req.send_request(options) + end + + # Returns a list of inventory configurations for the bucket. You can + # have up to 1,000 analytics configurations per bucket. + # + # This action supports list pagination and does not return more than 100 + # configurations at a time. Always check the `IsTruncated` element in + # the response. If there are no more configurations to list, + # `IsTruncated` is set to false. If there are more configurations to + # list, `IsTruncated` is set to true, and there is a value in + # `NextContinuationToken`. You use the `NextContinuationToken` value to + # continue the pagination of the list by passing the value in + # continuation-token in the request to `GET` the next page. + # + # To use this operation, you must have permissions to perform the + # `s3:GetInventoryConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about the Amazon S3 inventory feature, see [Amazon S3 + # Inventory][3] + # + # The following operations are related to + # `ListBucketInventoryConfigurations`: + # + # * [GetBucketInventoryConfiguration][4] + # + # * [DeleteBucketInventoryConfiguration][5] + # + # * [PutBucketInventoryConfiguration][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the inventory configurations to + # retrieve. + # + # @option params [String] :continuation_token + # The marker used to continue an inventory configuration listing that + # has been truncated. Use the NextContinuationToken from a previously + # truncated list response to continue the listing. The continuation + # token is an opaque value that Amazon S3 understands. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListBucketInventoryConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListBucketInventoryConfigurationsOutput#continuation_token #continuation_token} => String + # * {Types::ListBucketInventoryConfigurationsOutput#inventory_configuration_list #inventory_configuration_list} => Array<Types::InventoryConfiguration> + # * {Types::ListBucketInventoryConfigurationsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListBucketInventoryConfigurationsOutput#next_continuation_token #next_continuation_token} => String + # + # @example Request syntax with placeholder values + # + # resp = client.list_bucket_inventory_configurations({ + # bucket: "BucketName", # required + # continuation_token: "Token", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.continuation_token #=> String + # resp.inventory_configuration_list #=> Array + # resp.inventory_configuration_list[0].destination.s3_bucket_destination.account_id #=> String + # resp.inventory_configuration_list[0].destination.s3_bucket_destination.bucket #=> String + # resp.inventory_configuration_list[0].destination.s3_bucket_destination.format #=> String, one of "CSV", "ORC", "Parquet" + # resp.inventory_configuration_list[0].destination.s3_bucket_destination.prefix #=> String + # resp.inventory_configuration_list[0].destination.s3_bucket_destination.encryption.ssekms.key_id #=> String + # resp.inventory_configuration_list[0].is_enabled #=> Boolean + # resp.inventory_configuration_list[0].filter.prefix #=> String + # resp.inventory_configuration_list[0].id #=> String + # resp.inventory_configuration_list[0].included_object_versions #=> String, one of "All", "Current" + # resp.inventory_configuration_list[0].optional_fields #=> Array + # resp.inventory_configuration_list[0].optional_fields[0] #=> String, one of "Size", "LastModifiedDate", "StorageClass", "ETag", "IsMultipartUploaded", "ReplicationStatus", "EncryptionStatus", "ObjectLockRetainUntilDate", "ObjectLockMode", "ObjectLockLegalHoldStatus", "IntelligentTieringAccessTier", "BucketKeyStatus", "ChecksumAlgorithm" + # resp.inventory_configuration_list[0].schedule.frequency #=> String, one of "Daily", "Weekly" + # resp.is_truncated #=> Boolean + # resp.next_continuation_token #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations AWS API Documentation + # + # @overload list_bucket_inventory_configurations(params = {}) + # @param [Hash] params ({}) + def list_bucket_inventory_configurations(params = {}, options = {}) + req = build_request(:list_bucket_inventory_configurations, params) + req.send_request(options) + end + + # Lists the metrics configurations for the bucket. The metrics + # configurations are only for the request metrics of the bucket and do + # not provide information on daily storage metrics. You can have up to + # 1,000 configurations per bucket. + # + # This action supports list pagination and does not return more than 100 + # configurations at a time. Always check the `IsTruncated` element in + # the response. If there are no more configurations to list, + # `IsTruncated` is set to false. If there are more configurations to + # list, `IsTruncated` is set to true, and there is a value in + # `NextContinuationToken`. You use the `NextContinuationToken` value to + # continue the pagination of the list by passing the value in + # `continuation-token` in the request to `GET` the next page. + # + # To use this operation, you must have permissions to perform the + # `s3:GetMetricsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For more information about metrics configurations and CloudWatch + # request metrics, see [Monitoring Metrics with Amazon CloudWatch][3]. + # + # The following operations are related to + # `ListBucketMetricsConfigurations`: + # + # * [PutBucketMetricsConfiguration][4] + # + # * [GetBucketMetricsConfiguration][5] + # + # * [DeleteBucketMetricsConfiguration][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the metrics configurations to + # retrieve. + # + # @option params [String] :continuation_token + # The marker that is used to continue a metrics configuration listing + # that has been truncated. Use the NextContinuationToken from a + # previously truncated list response to continue the listing. The + # continuation token is an opaque value that Amazon S3 understands. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListBucketMetricsConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListBucketMetricsConfigurationsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListBucketMetricsConfigurationsOutput#continuation_token #continuation_token} => String + # * {Types::ListBucketMetricsConfigurationsOutput#next_continuation_token #next_continuation_token} => String + # * {Types::ListBucketMetricsConfigurationsOutput#metrics_configuration_list #metrics_configuration_list} => Array<Types::MetricsConfiguration> + # + # @example Request syntax with placeholder values + # + # resp = client.list_bucket_metrics_configurations({ + # bucket: "BucketName", # required + # continuation_token: "Token", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.is_truncated #=> Boolean + # resp.continuation_token #=> String + # resp.next_continuation_token #=> String + # resp.metrics_configuration_list #=> Array + # resp.metrics_configuration_list[0].id #=> String + # resp.metrics_configuration_list[0].filter.prefix #=> String + # resp.metrics_configuration_list[0].filter.tag.key #=> String + # resp.metrics_configuration_list[0].filter.tag.value #=> String + # resp.metrics_configuration_list[0].filter.access_point_arn #=> String + # resp.metrics_configuration_list[0].filter.and.prefix #=> String + # resp.metrics_configuration_list[0].filter.and.tags #=> Array + # resp.metrics_configuration_list[0].filter.and.tags[0].key #=> String + # resp.metrics_configuration_list[0].filter.and.tags[0].value #=> String + # resp.metrics_configuration_list[0].filter.and.access_point_arn #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations AWS API Documentation + # + # @overload list_bucket_metrics_configurations(params = {}) + # @param [Hash] params ({}) + def list_bucket_metrics_configurations(params = {}, options = {}) + req = build_request(:list_bucket_metrics_configurations, params) + req.send_request(options) + end + + # Returns a list of all buckets owned by the authenticated sender of the + # request. To use this operation, you must have the + # `s3:ListAllMyBuckets` permission. + # + # @return [Types::ListBucketsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListBucketsOutput#buckets #buckets} => Array<Types::Bucket> + # * {Types::ListBucketsOutput#owner #owner} => Types::Owner + # + # + # @example Example: To list object versions + # + # # The following example return versions of an object with specific key name prefix. The request limits the number of items + # # returned to two. If there are are more than two object version, S3 returns NextToken in the response. You can specify + # # this token value in your next request to fetch next set of object versions. + # + # resp = client.list_buckets({ + # }) + # + # resp.to_h outputs the following: + # { + # buckets: [ + # { + # creation_date: Time.parse("2012-02-15T21: 03: 02.000Z"), + # name: "examplebucket", + # }, + # { + # creation_date: Time.parse("2011-07-24T19: 33: 50.000Z"), + # name: "examplebucket2", + # }, + # { + # creation_date: Time.parse("2010-12-17T00: 56: 49.000Z"), + # name: "examplebucket3", + # }, + # ], + # owner: { + # display_name: "own-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31", + # }, + # } + # + # @example Response structure + # + # resp.buckets #=> Array + # resp.buckets[0].name #=> String + # resp.buckets[0].creation_date #=> Time + # resp.owner.display_name #=> String + # resp.owner.id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets AWS API Documentation + # + # @overload list_buckets(params = {}) + # @param [Hash] params ({}) + def list_buckets(params = {}, options = {}) + req = build_request(:list_buckets, params) + req.send_request(options) + end + + # This action lists in-progress multipart uploads. An in-progress + # multipart upload is a multipart upload that has been initiated using + # the Initiate Multipart Upload request, but has not yet been completed + # or aborted. + # + # This action returns at most 1,000 multipart uploads in the response. + # 1,000 multipart uploads is the maximum number of uploads a response + # can include, which is also the default value. You can further limit + # the number of uploads in a response by specifying the `max-uploads` + # parameter in the response. If additional multipart uploads satisfy the + # list criteria, the response will contain an `IsTruncated` element with + # the value true. To list the additional multipart uploads, use the + # `key-marker` and `upload-id-marker` request parameters. + # + # In the response, the uploads are sorted by key. If your application + # has initiated more than one multipart upload using the same object + # key, then uploads in the response are first sorted by key. + # Additionally, uploads are sorted in ascending order within each key by + # the upload initiation time. + # + # For more information on multipart uploads, see [Uploading Objects + # Using Multipart Upload][1]. + # + # For information on permissions required to use the multipart upload + # API, see [Multipart Upload and Permissions][2]. + # + # The following operations are related to `ListMultipartUploads`: + # + # * [CreateMultipartUpload][3] + # + # * [UploadPart][4] + # + # * [CompleteMultipartUpload][5] + # + # * [ListParts][6] + # + # * [AbortMultipartUpload][7] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # + # @option params [required, String] :bucket + # The name of the bucket to which the multipart upload was initiated. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :delimiter + # Character you use to group keys. + # + # All keys that contain the same string between the prefix, if + # specified, and the first occurrence of the delimiter after the prefix + # are grouped under a single result element, `CommonPrefixes`. If you + # don't specify the prefix parameter, then the substring starts at the + # beginning of the key. The keys that are grouped under `CommonPrefixes` + # result element are not returned elsewhere in the response. + # + # @option params [String] :encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # + # @option params [String] :key_marker + # Together with upload-id-marker, this parameter specifies the multipart + # upload after which listing should begin. + # + # If `upload-id-marker` is not specified, only the keys + # lexicographically greater than the specified `key-marker` will be + # included in the list. + # + # If `upload-id-marker` is specified, any multipart uploads for a key + # equal to the `key-marker` might also be included, provided those + # multipart uploads have upload IDs lexicographically greater than the + # specified `upload-id-marker`. + # + # @option params [Integer] :max_uploads + # Sets the maximum number of multipart uploads, from 1 to 1,000, to + # return in the response body. 1,000 is the maximum number of uploads + # that can be returned in a response. + # + # @option params [String] :prefix + # Lists in-progress uploads only for those keys that begin with the + # specified prefix. You can use prefixes to separate a bucket into + # different grouping of keys. (You can think of using prefix to make + # groups in the same way you'd use a folder in a file system.) + # + # @option params [String] :upload_id_marker + # Together with key-marker, specifies the multipart upload after which + # listing should begin. If key-marker is not specified, the + # upload-id-marker parameter is ignored. Otherwise, any multipart + # uploads for a key equal to the key-marker might be included in the + # list only if they have an upload ID lexicographically greater than the + # specified `upload-id-marker`. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListMultipartUploadsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListMultipartUploadsOutput#bucket #bucket} => String + # * {Types::ListMultipartUploadsOutput#key_marker #key_marker} => String + # * {Types::ListMultipartUploadsOutput#upload_id_marker #upload_id_marker} => String + # * {Types::ListMultipartUploadsOutput#next_key_marker #next_key_marker} => String + # * {Types::ListMultipartUploadsOutput#prefix #prefix} => String + # * {Types::ListMultipartUploadsOutput#delimiter #delimiter} => String + # * {Types::ListMultipartUploadsOutput#next_upload_id_marker #next_upload_id_marker} => String + # * {Types::ListMultipartUploadsOutput#max_uploads #max_uploads} => Integer + # * {Types::ListMultipartUploadsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListMultipartUploadsOutput#uploads #uploads} => Array<Types::MultipartUpload> + # * {Types::ListMultipartUploadsOutput#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> + # * {Types::ListMultipartUploadsOutput#encoding_type #encoding_type} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list in-progress multipart uploads on a bucket + # + # # The following example lists in-progress multipart uploads on a specific bucket. + # + # resp = client.list_multipart_uploads({ + # bucket: "examplebucket", + # }) + # + # resp.to_h outputs the following: + # { + # uploads: [ + # { + # initiated: Time.parse("2014-05-01T05:40:58.000Z"), + # initiator: { + # display_name: "display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # key: "JavaFile", + # owner: { + # display_name: "display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # storage_class: "STANDARD", + # upload_id: "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--", + # }, + # { + # initiated: Time.parse("2014-05-01T05:41:27.000Z"), + # initiator: { + # display_name: "display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # key: "JavaFile", + # owner: { + # display_name: "display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # storage_class: "STANDARD", + # upload_id: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + # }, + # ], + # } + # + # @example Example: List next set of multipart uploads when previous result is truncated + # + # # The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next + # # setup of multipart uploads. + # + # resp = client.list_multipart_uploads({ + # bucket: "examplebucket", + # key_marker: "nextkeyfrompreviousresponse", + # max_uploads: 2, + # upload_id_marker: "valuefrompreviousresponse", + # }) + # + # resp.to_h outputs the following: + # { + # bucket: "acl1", + # is_truncated: true, + # key_marker: "", + # max_uploads: 2, + # next_key_marker: "someobjectkey", + # next_upload_id_marker: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + # upload_id_marker: "", + # uploads: [ + # { + # initiated: Time.parse("2014-05-01T05:40:58.000Z"), + # initiator: { + # display_name: "ownder-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # key: "JavaFile", + # owner: { + # display_name: "mohanataws", + # id: "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # storage_class: "STANDARD", + # upload_id: "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--", + # }, + # { + # initiated: Time.parse("2014-05-01T05:41:27.000Z"), + # initiator: { + # display_name: "ownder-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # key: "JavaFile", + # owner: { + # display_name: "ownder-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # storage_class: "STANDARD", + # upload_id: "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_multipart_uploads({ + # bucket: "BucketName", # required + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # key_marker: "KeyMarker", + # max_uploads: 1, + # prefix: "Prefix", + # upload_id_marker: "UploadIdMarker", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.bucket #=> String + # resp.key_marker #=> String + # resp.upload_id_marker #=> String + # resp.next_key_marker #=> String + # resp.prefix #=> String + # resp.delimiter #=> String + # resp.next_upload_id_marker #=> String + # resp.max_uploads #=> Integer + # resp.is_truncated #=> Boolean + # resp.uploads #=> Array + # resp.uploads[0].upload_id #=> String + # resp.uploads[0].key #=> String + # resp.uploads[0].initiated #=> Time + # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.uploads[0].owner.display_name #=> String + # resp.uploads[0].owner.id #=> String + # resp.uploads[0].initiator.id #=> String + # resp.uploads[0].initiator.display_name #=> String + # resp.uploads[0].checksum_algorithm #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" + # resp.common_prefixes #=> Array + # resp.common_prefixes[0].prefix #=> String + # resp.encoding_type #=> String, one of "url" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads AWS API Documentation + # + # @overload list_multipart_uploads(params = {}) + # @param [Hash] params ({}) + def list_multipart_uploads(params = {}, options = {}) + req = build_request(:list_multipart_uploads, params) + req.send_request(options) + end + + # Returns metadata about all versions of the objects in a bucket. You + # can also use request parameters as selection criteria to return + # metadata about a subset of all the object versions. + # + # To use this operation, you must have permissions to perform the + # `s3:ListBucketVersions` action. Be aware of the name difference. + # + # A 200 OK response can contain valid or invalid XML. Make sure to + # design your application to parse the contents of the response and + # handle it appropriately. + # + # + # + # To use this operation, you must have READ access to the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # + # The following operations are related to `ListObjectVersions`: + # + # * [ListObjectsV2][1] + # + # * [GetObject][2] + # + # * [PutObject][3] + # + # * [DeleteObject][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + # + # @option params [required, String] :bucket + # The bucket name that contains the objects. + # + # @option params [String] :delimiter + # A delimiter is a character that you specify to group keys. All keys + # that contain the same string between the `prefix` and the first + # occurrence of the delimiter are grouped under a single result element + # in CommonPrefixes. These groups are counted as one result against the + # max-keys limitation. These keys are not returned elsewhere in the + # response. + # + # @option params [String] :encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # + # @option params [String] :key_marker + # Specifies the key to start with when listing objects in a bucket. + # + # @option params [Integer] :max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. If additional keys satisfy the + # search criteria, but were not returned because max-keys was exceeded, + # the response contains <isTruncated>true</isTruncated>. To + # return the additional keys, see key-marker and version-id-marker. + # + # @option params [String] :prefix + # Use this parameter to select only those keys that begin with the + # specified prefix. You can use prefixes to separate a bucket into + # different groupings of keys. (You can think of using prefix to make + # groups in the same way you'd use a folder in a file system.) You can + # use prefix with delimiter to roll up numerous objects into a single + # result under CommonPrefixes. + # + # @option params [String] :version_id_marker + # Specifies the object version you want to start listing from. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListObjectVersionsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListObjectVersionsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListObjectVersionsOutput#key_marker #key_marker} => String + # * {Types::ListObjectVersionsOutput#version_id_marker #version_id_marker} => String + # * {Types::ListObjectVersionsOutput#next_key_marker #next_key_marker} => String + # * {Types::ListObjectVersionsOutput#next_version_id_marker #next_version_id_marker} => String + # * {Types::ListObjectVersionsOutput#versions #versions} => Array<Types::ObjectVersion> + # * {Types::ListObjectVersionsOutput#delete_markers #delete_markers} => Array<Types::DeleteMarkerEntry> + # * {Types::ListObjectVersionsOutput#name #name} => String + # * {Types::ListObjectVersionsOutput#prefix #prefix} => String + # * {Types::ListObjectVersionsOutput#delimiter #delimiter} => String + # * {Types::ListObjectVersionsOutput#max_keys #max_keys} => Integer + # * {Types::ListObjectVersionsOutput#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> + # * {Types::ListObjectVersionsOutput#encoding_type #encoding_type} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list object versions + # + # # The following example return versions of an object with specific key name prefix. The request limits the number of items + # # returned to two. If there are are more than two object version, S3 returns NextToken in the response. You can specify + # # this token value in your next request to fetch next set of object versions. + # + # resp = client.list_object_versions({ + # bucket: "examplebucket", + # prefix: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # versions: [ + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # is_latest: true, + # key: "HappyFace.jpg", + # last_modified: Time.parse("2016-12-15T01:19:41.000Z"), + # owner: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # size: 3191, + # storage_class: "STANDARD", + # version_id: "null", + # }, + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # is_latest: false, + # key: "HappyFace.jpg", + # last_modified: Time.parse("2016-12-13T00:58:26.000Z"), + # owner: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # size: 3191, + # storage_class: "STANDARD", + # version_id: "PHtexPGjH2y.zBgT8LmB7wwLI2mpbz.k", + # }, + # ], + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_object_versions({ + # bucket: "BucketName", # required + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # key_marker: "KeyMarker", + # max_keys: 1, + # prefix: "Prefix", + # version_id_marker: "VersionIdMarker", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.is_truncated #=> Boolean + # resp.key_marker #=> String + # resp.version_id_marker #=> String + # resp.next_key_marker #=> String + # resp.next_version_id_marker #=> String + # resp.versions #=> Array + # resp.versions[0].etag #=> String + # resp.versions[0].checksum_algorithm #=> Array + # resp.versions[0].checksum_algorithm[0] #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" + # resp.versions[0].size #=> Integer + # resp.versions[0].storage_class #=> String, one of "STANDARD" + # resp.versions[0].key #=> String + # resp.versions[0].version_id #=> String + # resp.versions[0].is_latest #=> Boolean + # resp.versions[0].last_modified #=> Time + # resp.versions[0].owner.display_name #=> String + # resp.versions[0].owner.id #=> String + # resp.delete_markers #=> Array + # resp.delete_markers[0].owner.display_name #=> String + # resp.delete_markers[0].owner.id #=> String + # resp.delete_markers[0].key #=> String + # resp.delete_markers[0].version_id #=> String + # resp.delete_markers[0].is_latest #=> Boolean + # resp.delete_markers[0].last_modified #=> Time + # resp.name #=> String + # resp.prefix #=> String + # resp.delimiter #=> String + # resp.max_keys #=> Integer + # resp.common_prefixes #=> Array + # resp.common_prefixes[0].prefix #=> String + # resp.encoding_type #=> String, one of "url" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions AWS API Documentation + # + # @overload list_object_versions(params = {}) + # @param [Hash] params ({}) + def list_object_versions(params = {}, options = {}) + req = build_request(:list_object_versions, params) + req.send_request(options) + end + + # Returns some or all (up to 1,000) of the objects in a bucket. You can + # use the request parameters as selection criteria to return a subset of + # the objects in a bucket. A 200 OK response can contain valid or + # invalid XML. Be sure to design your application to parse the contents + # of the response and handle it appropriately. + # + # This action has been revised. We recommend that you use the newer + # version, [ListObjectsV2][1], when developing applications. For + # backward compatibility, Amazon S3 continues to support `ListObjects`. + # + # The following operations are related to `ListObjects`: + # + # * [ListObjectsV2][1] + # + # * [GetObject][2] + # + # * [PutObject][3] + # + # * [CreateBucket][4] + # + # * [ListBuckets][5] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + # + # @option params [required, String] :bucket + # The name of the bucket containing the objects. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :delimiter + # A delimiter is a character you use to group keys. + # + # @option params [String] :encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # + # @option params [String] :marker + # Marker is where you want Amazon S3 to start listing from. Amazon S3 + # starts listing after this specified key. Marker can be any key in the + # bucket. + # + # @option params [Integer] :max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. + # + # @option params [String] :prefix + # Limits the response to keys that begin with the specified prefix. + # + # @option params [String] :request_payer + # Confirms that the requester knows that she or he will be charged for + # the list objects request. Bucket owners need not specify this + # parameter in their requests. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListObjectsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListObjectsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListObjectsOutput#marker #marker} => String + # * {Types::ListObjectsOutput#next_marker #next_marker} => String + # * {Types::ListObjectsOutput#contents #contents} => Array<Types::Object> + # * {Types::ListObjectsOutput#name #name} => String + # * {Types::ListObjectsOutput#prefix #prefix} => String + # * {Types::ListObjectsOutput#delimiter #delimiter} => String + # * {Types::ListObjectsOutput#max_keys #max_keys} => Integer + # * {Types::ListObjectsOutput#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> + # * {Types::ListObjectsOutput#encoding_type #encoding_type} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list objects in a bucket + # + # # The following example list two objects in a bucket. + # + # resp = client.list_objects({ + # bucket: "examplebucket", + # max_keys: 2, + # }) + # + # resp.to_h outputs the following: + # { + # contents: [ + # { + # etag: "\"70ee1738b6b21e2c8a43f3a5ab0eee71\"", + # key: "example1.jpg", + # last_modified: Time.parse("2014-11-21T19:40:05.000Z"), + # owner: { + # display_name: "myname", + # id: "12345example25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # size: 11, + # storage_class: "STANDARD", + # }, + # { + # etag: "\"9c8af9a76df052144598c115ef33e511\"", + # key: "example2.jpg", + # last_modified: Time.parse("2013-11-15T01:10:49.000Z"), + # owner: { + # display_name: "myname", + # id: "12345example25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # size: 713193, + # storage_class: "STANDARD", + # }, + # ], + # next_marker: "eyJNYXJrZXIiOiBudWxsLCAiYm90b190cnVuY2F0ZV9hbW91bnQiOiAyfQ==", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_objects({ + # bucket: "BucketName", # required + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # marker: "Marker", + # max_keys: 1, + # prefix: "Prefix", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.is_truncated #=> Boolean + # resp.marker #=> String + # resp.next_marker #=> String + # resp.contents #=> Array + # resp.contents[0].key #=> String + # resp.contents[0].last_modified #=> Time + # resp.contents[0].etag #=> String + # resp.contents[0].checksum_algorithm #=> Array + # resp.contents[0].checksum_algorithm[0] #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" + # resp.contents[0].size #=> Integer + # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.contents[0].owner.display_name #=> String + # resp.contents[0].owner.id #=> String + # resp.name #=> String + # resp.prefix #=> String + # resp.delimiter #=> String + # resp.max_keys #=> Integer + # resp.common_prefixes #=> Array + # resp.common_prefixes[0].prefix #=> String + # resp.encoding_type #=> String, one of "url" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects AWS API Documentation + # + # @overload list_objects(params = {}) + # @param [Hash] params ({}) + def list_objects(params = {}, options = {}) + req = build_request(:list_objects, params) + req.send_request(options) + end + + # Returns some or all (up to 1,000) of the objects in a bucket with each + # request. You can use the request parameters as selection criteria to + # return a subset of the objects in a bucket. A `200 OK` response can + # contain valid or invalid XML. Make sure to design your application to + # parse the contents of the response and handle it appropriately. + # Objects are returned sorted in an ascending order of the respective + # key names in the list. For more information about listing objects, see + # [Listing object keys programmatically][1] + # + # To use this operation, you must have READ access to the bucket. + # + # To use this action in an Identity and Access Management (IAM) policy, + # you must have permissions to perform the `s3:ListBucket` action. The + # bucket owner has this permission by default and can grant this + # permission to others. For more information about permissions, see + # [Permissions Related to Bucket Subresource Operations][2] and + # [Managing Access Permissions to Your Amazon S3 Resources][3]. + # + # This section describes the latest revision of this action. We + # recommend that you use this revised API for application development. + # For backward compatibility, Amazon S3 continues to support the prior + # version of this API, [ListObjects][4]. + # + # To get a list of your buckets, see [ListBuckets][5]. + # + # The following operations are related to `ListObjectsV2`: + # + # * [GetObject][6] + # + # * [PutObject][7] + # + # * [CreateBucket][8] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # + # @option params [required, String] :bucket + # Bucket name to list. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :delimiter + # A delimiter is a character you use to group keys. + # + # @option params [String] :encoding_type + # Encoding type used by Amazon S3 to encode object keys in the response. + # + # @option params [Integer] :max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. + # + # @option params [String] :prefix + # Limits the response to keys that begin with the specified prefix. + # + # @option params [String] :continuation_token + # ContinuationToken indicates Amazon S3 that the list is being continued + # on this bucket with a token. ContinuationToken is obfuscated and is + # not a real key. + # + # @option params [Boolean] :fetch_owner + # The owner field is not present in listV2 by default, if you want to + # return owner field with each key in the result then set the fetch + # owner field to true. + # + # @option params [String] :start_after + # StartAfter is where you want Amazon S3 to start listing from. Amazon + # S3 starts listing after this specified key. StartAfter can be any key + # in the bucket. + # + # @option params [String] :request_payer + # Confirms that the requester knows that she or he will be charged for + # the list objects request in V2 style. Bucket owners need not specify + # this parameter in their requests. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::ListObjectsV2Output] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListObjectsV2Output#is_truncated #is_truncated} => Boolean + # * {Types::ListObjectsV2Output#contents #contents} => Array<Types::Object> + # * {Types::ListObjectsV2Output#name #name} => String + # * {Types::ListObjectsV2Output#prefix #prefix} => String + # * {Types::ListObjectsV2Output#delimiter #delimiter} => String + # * {Types::ListObjectsV2Output#max_keys #max_keys} => Integer + # * {Types::ListObjectsV2Output#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> + # * {Types::ListObjectsV2Output#encoding_type #encoding_type} => String + # * {Types::ListObjectsV2Output#key_count #key_count} => Integer + # * {Types::ListObjectsV2Output#continuation_token #continuation_token} => String + # * {Types::ListObjectsV2Output#next_continuation_token #next_continuation_token} => String + # * {Types::ListObjectsV2Output#start_after #start_after} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To get object list + # + # # The following example retrieves object list. The request specifies max keys to limit response to include only 2 object + # # keys. + # + # resp = client.list_objects_v2({ + # bucket: "examplebucket", + # max_keys: 2, + # }) + # + # resp.to_h outputs the following: + # { + # contents: [ + # { + # etag: "\"70ee1738b6b21e2c8a43f3a5ab0eee71\"", + # key: "happyface.jpg", + # last_modified: Time.parse("2014-11-21T19:40:05.000Z"), + # size: 11, + # storage_class: "STANDARD", + # }, + # { + # etag: "\"becf17f89c30367a9a44495d62ed521a-1\"", + # key: "test.jpg", + # last_modified: Time.parse("2014-05-02T04:51:50.000Z"), + # size: 4192256, + # storage_class: "STANDARD", + # }, + # ], + # is_truncated: true, + # key_count: 2, + # max_keys: 2, + # name: "examplebucket", + # next_continuation_token: "1w41l63U0xa8q7smH50vCxyTQqdxo69O3EmK28Bi5PcROI4wI/EyIJg==", + # prefix: "", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_objects_v2({ + # bucket: "BucketName", # required + # delimiter: "Delimiter", + # encoding_type: "url", # accepts url + # max_keys: 1, + # prefix: "Prefix", + # continuation_token: "Token", + # fetch_owner: false, + # start_after: "StartAfter", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.is_truncated #=> Boolean + # resp.contents #=> Array + # resp.contents[0].key #=> String + # resp.contents[0].last_modified #=> Time + # resp.contents[0].etag #=> String + # resp.contents[0].checksum_algorithm #=> Array + # resp.contents[0].checksum_algorithm[0] #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" + # resp.contents[0].size #=> Integer + # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.contents[0].owner.display_name #=> String + # resp.contents[0].owner.id #=> String + # resp.name #=> String + # resp.prefix #=> String + # resp.delimiter #=> String + # resp.max_keys #=> Integer + # resp.common_prefixes #=> Array + # resp.common_prefixes[0].prefix #=> String + # resp.encoding_type #=> String, one of "url" + # resp.key_count #=> Integer + # resp.continuation_token #=> String + # resp.next_continuation_token #=> String + # resp.start_after #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 AWS API Documentation + # + # @overload list_objects_v2(params = {}) + # @param [Hash] params ({}) + def list_objects_v2(params = {}, options = {}) + req = build_request(:list_objects_v2, params) + req.send_request(options) + end + + # Lists the parts that have been uploaded for a specific multipart + # upload. This operation must include the upload ID, which you obtain by + # sending the initiate multipart upload request (see + # [CreateMultipartUpload][1]). This request returns a maximum of 1,000 + # uploaded parts. The default number of parts returned is 1,000 parts. + # You can restrict the number of parts returned by specifying the + # `max-parts` request parameter. If your multipart upload consists of + # more than 1,000 parts, the response returns an `IsTruncated` field + # with the value of true, and a `NextPartNumberMarker` element. In + # subsequent `ListParts` requests you can include the part-number-marker + # query string parameter and set its value to the `NextPartNumberMarker` + # field value from the previous response. + # + # If the upload was created using a checksum algorithm, you will need to + # have permission to the `kms:Decrypt` action for the request to + # succeed. + # + # For more information on multipart uploads, see [Uploading Objects + # Using Multipart Upload][2]. + # + # For information on permissions required to use the multipart upload + # API, see [Multipart Upload and Permissions][3]. + # + # The following operations are related to `ListParts`: + # + # * [CreateMultipartUpload][1] + # + # * [UploadPart][4] + # + # * [CompleteMultipartUpload][5] + # + # * [AbortMultipartUpload][6] + # + # * [GetObjectAttributes][7] + # + # * [ListMultipartUploads][8] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + # + # @option params [required, String] :bucket + # The name of the bucket to which the parts are being uploaded. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Object key for which the multipart upload was initiated. + # + # @option params [Integer] :max_parts + # Sets the maximum number of parts to return. + # + # @option params [Integer] :part_number_marker + # Specifies the part after which listing should begin. Only parts with + # higher part numbers will be listed. + # + # @option params [required, String] :upload_id + # Upload ID identifying the multipart upload whose parts are being + # listed. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the object. + # This parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :sse_customer_key + # The server-side encryption (SSE) customer managed key. This parameter + # is needed only when the object was created using a checksum algorithm. + # For more information, see [Protecting data using SSE-C keys][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a checksum + # algorithm. For more information, see [Protecting data using SSE-C + # keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @return [Types::ListPartsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListPartsOutput#abort_date #abort_date} => Time + # * {Types::ListPartsOutput#abort_rule_id #abort_rule_id} => String + # * {Types::ListPartsOutput#bucket #bucket} => String + # * {Types::ListPartsOutput#key #key} => String + # * {Types::ListPartsOutput#upload_id #upload_id} => String + # * {Types::ListPartsOutput#part_number_marker #part_number_marker} => Integer + # * {Types::ListPartsOutput#next_part_number_marker #next_part_number_marker} => Integer + # * {Types::ListPartsOutput#max_parts #max_parts} => Integer + # * {Types::ListPartsOutput#is_truncated #is_truncated} => Boolean + # * {Types::ListPartsOutput#parts #parts} => Array<Types::Part> + # * {Types::ListPartsOutput#initiator #initiator} => Types::Initiator + # * {Types::ListPartsOutput#owner #owner} => Types::Owner + # * {Types::ListPartsOutput#storage_class #storage_class} => String + # * {Types::ListPartsOutput#request_charged #request_charged} => String + # * {Types::ListPartsOutput#checksum_algorithm #checksum_algorithm} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # + # @example Example: To list parts of a multipart upload. + # + # # The following example lists parts uploaded for a specific multipart upload. + # + # resp = client.list_parts({ + # bucket: "examplebucket", + # key: "bigobject", + # upload_id: "example7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", + # }) + # + # resp.to_h outputs the following: + # { + # initiator: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # owner: { + # display_name: "owner-display-name", + # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", + # }, + # parts: [ + # { + # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", + # last_modified: Time.parse("2016-12-16T00:11:42.000Z"), + # part_number: 1, + # size: 26246026, + # }, + # { + # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", + # last_modified: Time.parse("2016-12-16T00:15:01.000Z"), + # part_number: 2, + # size: 26246026, + # }, + # ], + # storage_class: "STANDARD", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.list_parts({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # max_parts: 1, + # part_number_marker: 1, + # upload_id: "MultipartUploadId", # required + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # }) + # + # @example Response structure + # + # resp.abort_date #=> Time + # resp.abort_rule_id #=> String + # resp.bucket #=> String + # resp.key #=> String + # resp.upload_id #=> String + # resp.part_number_marker #=> Integer + # resp.next_part_number_marker #=> Integer + # resp.max_parts #=> Integer + # resp.is_truncated #=> Boolean + # resp.parts #=> Array + # resp.parts[0].part_number #=> Integer + # resp.parts[0].last_modified #=> Time + # resp.parts[0].etag #=> String + # resp.parts[0].size #=> Integer + # resp.parts[0].checksum_crc32 #=> String + # resp.parts[0].checksum_crc32c #=> String + # resp.parts[0].checksum_sha1 #=> String + # resp.parts[0].checksum_sha256 #=> String + # resp.initiator.id #=> String + # resp.initiator.display_name #=> String + # resp.owner.display_name #=> String + # resp.owner.id #=> String + # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW" + # resp.request_charged #=> String, one of "requester" + # resp.checksum_algorithm #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts AWS API Documentation + # + # @overload list_parts(params = {}) + # @param [Hash] params ({}) + def list_parts(params = {}, options = {}) + req = build_request(:list_parts, params) + req.send_request(options) + end + + # Sets the accelerate configuration of an existing bucket. Amazon S3 + # Transfer Acceleration is a bucket-level feature that enables you to + # perform faster data transfers to Amazon S3. + # + # To use this operation, you must have permission to perform the + # `s3:PutAccelerateConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # The Transfer Acceleration state of a bucket can be set to one of the + # following two values: + # + # * Enabled – Enables accelerated data transfers to the bucket. + # + # * Suspended – Disables accelerated data transfers to the bucket. + # + # The [GetBucketAccelerateConfiguration][3] action returns the transfer + # acceleration state of a bucket. + # + # After setting the Transfer Acceleration state of a bucket to Enabled, + # it might take up to thirty minutes before the data transfer rates to + # the bucket increase. + # + # The name of the bucket used for Transfer Acceleration must be + # DNS-compliant and must not contain periods ("."). + # + # For more information about transfer acceleration, see [Transfer + # Acceleration][4]. + # + # The following operations are related to + # `PutBucketAccelerateConfiguration`: + # + # * [GetBucketAccelerateConfiguration][3] + # + # * [CreateBucket][5] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # + # @option params [required, String] :bucket + # The name of the bucket for which the accelerate configuration is set. + # + # @option params [required, Types::AccelerateConfiguration] :accelerate_configuration + # Container for setting the transfer acceleration state. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_accelerate_configuration({ + # bucket: "BucketName", # required + # accelerate_configuration: { # required + # status: "Enabled", # accepts Enabled, Suspended + # }, + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration AWS API Documentation + # + # @overload put_bucket_accelerate_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_accelerate_configuration(params = {}, options = {}) + req = build_request(:put_bucket_accelerate_configuration, params) + req.send_request(options) + end + + # Sets the permissions on an existing bucket using access control lists + # (ACL). For more information, see [Using ACLs][1]. To set the ACL of a + # bucket, you must have `WRITE_ACP` permission. + # + # You can use one of the following two ways to set a bucket's + # permissions: + # + # * Specify the ACL in the request body + # + # * Specify permissions using request headers + # + # You cannot specify access permission using both the body and the + # request headers. + # + # + # + # Depending on your application needs, you may choose to set the ACL on + # a bucket using either the request body or the headers. For example, if + # you have an existing application that updates a bucket ACL using the + # request body, then you can continue to use that approach. + # + # If your bucket uses the bucket owner enforced setting for S3 Object + # Ownership, ACLs are disabled and no longer affect permissions. You + # must use policies to grant access to your bucket and the objects in + # it. Requests to set ACLs or update ACLs fail and return the + # `AccessControlListNotSupported` error code. Requests to read ACLs are + # still supported. For more information, see [Controlling object + # ownership][2] in the *Amazon S3 User Guide*. + # + # **Access Permissions** + # + # You can set access permissions using one of the following methods: + # + # * Specify a canned ACL with the `x-amz-acl` request header. Amazon S3 + # supports a set of predefined ACLs, known as *canned ACLs*. Each + # canned ACL has a predefined set of grantees and permissions. Specify + # the canned ACL name as the value of `x-amz-acl`. If you use this + # header, you cannot use other access control-specific headers in your + # request. For more information, see [Canned ACL][3]. + # + # * Specify access permissions explicitly with the `x-amz-grant-read`, + # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and + # `x-amz-grant-full-control` headers. When using these headers, you + # specify explicit access permissions and grantees (Amazon Web + # Services accounts or Amazon S3 groups) who will receive the + # permission. If you use these ACL-specific headers, you cannot use + # the `x-amz-acl` header to set a canned ACL. These parameters map to + # the set of permissions that Amazon S3 supports in an ACL. For more + # information, see [Access Control List (ACL) Overview][4]. + # + # You specify each grantee as a type=value pair, where the type is one + # of the following: + # + # * `id` – if the value specified is the canonical user ID of an + # Amazon Web Services account + # + # * `uri` – if you are granting permissions to a predefined group + # + # * `emailAddress` – if the value specified is the email address of an + # Amazon Web Services account + # + # Using email addresses to specify a grantee is only supported in + # the following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, + # see [Regions and Endpoints][5] in the Amazon Web Services General + # Reference. + # + # + # + # For example, the following `x-amz-grant-write` header grants create, + # overwrite, and delete objects permission to LogDelivery group + # predefined by Amazon S3 and two Amazon Web Services accounts + # identified by their email addresses. + # + # `x-amz-grant-write: + # uri="http://acs.amazonaws.com/groups/s3/LogDelivery", + # id="111122223333", id="555566667777" ` + # + # You can use either a canned ACL or specify access permissions + # explicitly. You cannot do both. + # + # **Grantee Values** + # + # You can specify the person (grantee) to whom you're assigning access + # rights (using request elements) in the following ways: + # + # * By the person's ID: + # + # `<>ID<><>GranteesEmail<> + # ` + # + # DisplayName is optional and ignored in the request + # + # * By URI: + # + # `<>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>` + # + # * By Email address: + # + # `<>Grantees@email.com<>lt;/Grantee>` + # + # The grantee is resolved to the CanonicalUser and, in a response to a + # GET Object acl request, appears as the CanonicalUser. + # + # Using email addresses to specify a grantee is only supported in the + # following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, see + # [Regions and Endpoints][5] in the Amazon Web Services General + # Reference. + # + # + # + # **Related Resources** + # + # * [CreateBucket][6] + # + # * [DeleteBucket][7] + # + # * [GetObjectAcl][8] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html + # + # @option params [String] :acl + # The canned ACL to apply to the bucket. + # + # @option params [Types::AccessControlPolicy] :access_control_policy + # Contains the elements that set the ACL permissions for an object per + # grantee. + # + # @option params [required, String] :bucket + # The bucket to which to apply the ACL. + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must be + # used as a message integrity check to verify that the request body was + # not corrupted in transit. For more information, go to [RFC 1864.][1] + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # + # @option params [String] :grant_read + # Allows grantee to list the objects in the bucket. + # + # @option params [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # + # @option params [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # + # @option params [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Put bucket acl + # + # # The following example replaces existing ACL on a bucket. The ACL grants the bucket owner (specified using the owner ID) + # # and write permission to the LogDelivery group. Because this is a replace operation, you must specify all the grants in + # # your request. To incrementally add or remove ACL grants, you might use the console. + # + # resp = client.put_bucket_acl({ + # bucket: "examplebucket", + # grant_full_control: "id=examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484", + # grant_write: "uri=http://acs.amazonaws.com/groups/s3/LogDelivery", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_acl({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read + # access_control_policy: { + # grants: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # owner: { + # display_name: "DisplayName", + # id: "ID", + # }, + # }, + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl AWS API Documentation + # + # @overload put_bucket_acl(params = {}) + # @param [Hash] params ({}) + def put_bucket_acl(params = {}, options = {}) + req = build_request(:put_bucket_acl, params) + req.send_request(options) + end + + # Sets an analytics configuration for the bucket (specified by the + # analytics configuration ID). You can have up to 1,000 analytics + # configurations per bucket. + # + # You can choose to have storage class analysis export analysis reports + # sent to a comma-separated values (CSV) flat file. See the `DataExport` + # request element. Reports are updated daily and are based on the object + # filters that you configure. When selecting data export, you specify a + # destination bucket and an optional destination prefix where the file + # is written. You can export the data to a destination bucket in a + # different account. However, the destination bucket must be in the same + # Region as the bucket that you are making the PUT analytics + # configuration to. For more information, see [Amazon S3 Analytics – + # Storage Class Analysis][1]. + # + # You must create a bucket policy on the destination bucket where the + # exported file is written to grant permissions to Amazon S3 to write + # objects to the bucket. For an example policy, see [Granting + # Permissions for Amazon S3 Inventory and Storage Class Analysis][2]. + # + # To use this operation, you must have permissions to perform the + # `s3:PutAnalyticsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][3] and [Managing Access + # Permissions to Your Amazon S3 Resources][4]. + # + # **Special Errors** + # + # * * *HTTP Error: HTTP 400 Bad Request* + # + # * *Code: InvalidArgument* + # + # * *Cause: Invalid argument.* + # + # * * *HTTP Error: HTTP 400 Bad Request* + # + # * *Code: TooManyConfigurations* + # + # * *Cause: You are attempting to create a new configuration but have + # already reached the 1,000-configuration limit.* + # + # * * *HTTP Error: HTTP 403 Forbidden* + # + # * *Code: AccessDenied* + # + # * *Cause: You are not the owner of the specified bucket, or you do + # not have the s3:PutAnalyticsConfiguration bucket permission to set + # the configuration on the bucket.* + # + # **Related Resources** + # + # * [GetBucketAnalyticsConfiguration][5] + # + # * [DeleteBucketAnalyticsConfiguration][6] + # + # * [ListBucketAnalyticsConfigurations][7] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html + # + # @option params [required, String] :bucket + # The name of the bucket to which an analytics configuration is stored. + # + # @option params [required, String] :id + # The ID that identifies the analytics configuration. + # + # @option params [required, Types::AnalyticsConfiguration] :analytics_configuration + # The configuration and any analyses for the analytics filter. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_analytics_configuration({ + # bucket: "BucketName", # required + # id: "AnalyticsId", # required + # analytics_configuration: { # required + # id: "AnalyticsId", # required + # filter: { + # prefix: "Prefix", + # tag: { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # and: { + # prefix: "Prefix", + # tags: [ + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # }, + # storage_class_analysis: { # required + # data_export: { + # output_schema_version: "V_1", # required, accepts V_1 + # destination: { # required + # s3_bucket_destination: { # required + # format: "CSV", # required, accepts CSV + # bucket_account_id: "AccountId", + # bucket: "BucketName", # required + # prefix: "Prefix", + # }, + # }, + # }, + # }, + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration AWS API Documentation + # + # @overload put_bucket_analytics_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_analytics_configuration(params = {}, options = {}) + req = build_request(:put_bucket_analytics_configuration, params) + req.send_request(options) + end + + # Sets the `cors` configuration for your bucket. If the configuration + # exists, Amazon S3 replaces it. + # + # To use this operation, you must be allowed to perform the + # `s3:PutBucketCORS` action. By default, the bucket owner has this + # permission and can grant it to others. + # + # You set this configuration on a bucket so that the bucket can service + # cross-origin requests. For example, you might want to enable a request + # whose origin is `http://www.example.com` to access your Amazon S3 + # bucket at `my.example.bucket.com` by using the browser's + # `XMLHttpRequest` capability. + # + # To enable cross-origin resource sharing (CORS) on a bucket, you add + # the `cors` subresource to the bucket. The `cors` subresource is an XML + # document in which you configure rules that identify origins and the + # HTTP methods that can be executed on your bucket. The document is + # limited to 64 KB in size. + # + # When Amazon S3 receives a cross-origin request (or a pre-flight + # OPTIONS request) against a bucket, it evaluates the `cors` + # configuration on the bucket and uses the first `CORSRule` rule that + # matches the incoming browser request to enable a cross-origin request. + # For a rule to match, the following conditions must be met: + # + # * The request's `Origin` header must match `AllowedOrigin` elements. + # + # * The request method (for example, GET, PUT, HEAD, and so on) or the + # `Access-Control-Request-Method` header in case of a pre-flight + # `OPTIONS` request must be one of the `AllowedMethod` elements. + # + # * Every header specified in the `Access-Control-Request-Headers` + # request header of a pre-flight request must match an `AllowedHeader` + # element. + # + # For more information about CORS, go to [Enabling Cross-Origin Resource + # Sharing][1] in the *Amazon S3 User Guide*. + # + # **Related Resources** + # + # * [GetBucketCors][2] + # + # * [DeleteBucketCors][3] + # + # * [RESTOPTIONSobject][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html + # + # @option params [required, String] :bucket + # Specifies the bucket impacted by the `cors`configuration. + # + # @option params [required, Types::CORSConfiguration] :cors_configuration + # Describes the cross-origin access configuration for objects in an + # Amazon S3 bucket. For more information, see [Enabling Cross-Origin + # Resource Sharing][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must be + # used as a message integrity check to verify that the request body was + # not corrupted in transit. For more information, go to [RFC 1864.][1] + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: To set cors configuration on a bucket. + # + # # The following example enables PUT, POST, and DELETE requests from www.example.com, and enables GET requests from any + # # domain. + # + # resp = client.put_bucket_cors({ + # bucket: "", + # cors_configuration: { + # cors_rules: [ + # { + # allowed_headers: [ + # "*", + # ], + # allowed_methods: [ + # "PUT", + # "POST", + # "DELETE", + # ], + # allowed_origins: [ + # "http://www.example.com", + # ], + # expose_headers: [ + # "x-amz-server-side-encryption", + # ], + # max_age_seconds: 3000, + # }, + # { + # allowed_headers: [ + # "Authorization", + # ], + # allowed_methods: [ + # "GET", + # ], + # allowed_origins: [ + # "*", + # ], + # max_age_seconds: 3000, + # }, + # ], + # }, + # content_md5: "", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_cors({ + # bucket: "BucketName", # required + # cors_configuration: { # required + # cors_rules: [ # required + # { + # id: "ID", + # allowed_headers: ["AllowedHeader"], + # allowed_methods: ["AllowedMethod"], # required + # allowed_origins: ["AllowedOrigin"], # required + # expose_headers: ["ExposeHeader"], + # max_age_seconds: 1, + # }, + # ], + # }, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors AWS API Documentation + # + # @overload put_bucket_cors(params = {}) + # @param [Hash] params ({}) + def put_bucket_cors(params = {}, options = {}) + req = build_request(:put_bucket_cors, params) + req.send_request(options) + end + + # This action uses the `encryption` subresource to configure default + # encryption and Amazon S3 Bucket Key for an existing bucket. + # + # Default encryption for a bucket can use server-side encryption with + # Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If + # you specify default encryption using SSE-KMS, you can also configure + # Amazon S3 Bucket Key. When the default encryption is SSE-KMS, if you + # upload an object to the bucket and do not specify the KMS key to use + # for encryption, Amazon S3 uses the default Amazon Web Services managed + # KMS key for your account. For information about default encryption, + # see [Amazon S3 default bucket encryption][1] in the *Amazon S3 User + # Guide*. For more information about S3 Bucket Keys, see [Amazon S3 + # Bucket Keys][2] in the *Amazon S3 User Guide*. + # + # This action requires Amazon Web Services Signature Version 4. For more + # information, see [ Authenticating Requests (Amazon Web Services + # Signature Version 4)][3]. + # + # To use this operation, you must have permissions to perform the + # `s3:PutEncryptionConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][4] and [Managing Access + # Permissions to Your Amazon S3 Resources][5] in the Amazon S3 User + # Guide. + # + # **Related Resources** + # + # * [GetBucketEncryption][6] + # + # * [DeleteBucketEncryption][7] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html + # + # @option params [required, String] :bucket + # Specifies default encryption for a bucket using server-side encryption + # with Amazon S3-managed keys (SSE-S3) or customer managed keys + # (SSE-KMS). For information about the Amazon S3 default encryption + # feature, see [Amazon S3 Default Bucket Encryption][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the server-side encryption + # configuration. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::ServerSideEncryptionConfiguration] :server_side_encryption_configuration + # Specifies the default server-side-encryption configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_encryption({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # server_side_encryption_configuration: { # required + # rules: [ # required + # { + # apply_server_side_encryption_by_default: { + # sse_algorithm: "AES256", # required, accepts AES256, aws:kms + # kms_master_key_id: "SSEKMSKeyId", + # }, + # bucket_key_enabled: false, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption AWS API Documentation + # + # @overload put_bucket_encryption(params = {}) + # @param [Hash] params ({}) + def put_bucket_encryption(params = {}, options = {}) + req = build_request(:put_bucket_encryption, params) + req.send_request(options) + end + + # Puts a S3 Intelligent-Tiering configuration to the specified bucket. + # You can have up to 1,000 S3 Intelligent-Tiering configurations per + # bucket. + # + # The S3 Intelligent-Tiering storage class is designed to optimize + # storage costs by automatically moving data to the most cost-effective + # storage access tier, without performance impact or operational + # overhead. S3 Intelligent-Tiering delivers automatic cost savings in + # three low latency and high throughput access tiers. To get the lowest + # storage cost on data that can be accessed in minutes to hours, you can + # choose to activate additional archiving capabilities. + # + # The S3 Intelligent-Tiering storage class is the ideal storage class + # for data with unknown, changing, or unpredictable access patterns, + # independent of object size or retention period. If the size of an + # object is less than 128 KB, it is not monitored and not eligible for + # auto-tiering. Smaller objects can be stored, but they are always + # charged at the Frequent Access tier rates in the S3 + # Intelligent-Tiering storage class. + # + # For more information, see [Storage class for automatically optimizing + # frequently and infrequently accessed objects][1]. + # + # Operations related to `PutBucketIntelligentTieringConfiguration` + # include: + # + # * [DeleteBucketIntelligentTieringConfiguration][2] + # + # * [GetBucketIntelligentTieringConfiguration][3] + # + # * [ListBucketIntelligentTieringConfigurations][4] + # + # You only need S3 Intelligent-Tiering enabled on a bucket if you want + # to automatically move objects stored in the S3 Intelligent-Tiering + # storage class to the Archive Access or Deep Archive Access tier. + # + # + # + # **Special Errors** + # + # * **HTTP 400 Bad Request Error** + # + # * *Code:* InvalidArgument + # + # * *Cause:* Invalid Argument + # + # * **HTTP 400 Bad Request Error** + # + # * *Code:* TooManyConfigurations + # + # * *Cause:* You are attempting to create a new configuration but have + # already reached the 1,000-configuration limit. + # + # * **HTTP 403 Forbidden Error** + # + # * *Code:* AccessDenied + # + # * *Cause:* You are not the owner of the specified bucket, or you do + # not have the `s3:PutIntelligentTieringConfiguration` bucket + # permission to set the configuration on the bucket. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # + # @option params [required, String] :id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # + # @option params [required, Types::IntelligentTieringConfiguration] :intelligent_tiering_configuration + # Container for S3 Intelligent-Tiering configuration. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_intelligent_tiering_configuration({ + # bucket: "BucketName", # required + # id: "IntelligentTieringId", # required + # intelligent_tiering_configuration: { # required + # id: "IntelligentTieringId", # required + # filter: { + # prefix: "Prefix", + # tag: { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # and: { + # prefix: "Prefix", + # tags: [ + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # }, + # status: "Enabled", # required, accepts Enabled, Disabled + # tierings: [ # required + # { + # days: 1, # required + # access_tier: "ARCHIVE_ACCESS", # required, accepts ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS + # }, + # ], + # }, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration AWS API Documentation + # + # @overload put_bucket_intelligent_tiering_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_intelligent_tiering_configuration(params = {}, options = {}) + req = build_request(:put_bucket_intelligent_tiering_configuration, params) + req.send_request(options) + end + + # This implementation of the `PUT` action adds an inventory + # configuration (identified by the inventory ID) to the bucket. You can + # have up to 1,000 inventory configurations per bucket. + # + # Amazon S3 inventory generates inventories of the objects in the bucket + # on a daily or weekly basis, and the results are published to a flat + # file. The bucket that is inventoried is called the *source* bucket, + # and the bucket where the inventory flat file is stored is called the + # *destination* bucket. The *destination* bucket must be in the same + # Amazon Web Services Region as the *source* bucket. + # + # When you configure an inventory for a *source* bucket, you specify the + # *destination* bucket where you want the inventory to be stored, and + # whether to generate the inventory daily or weekly. You can also + # configure what object metadata to include and whether to inventory all + # object versions or only current versions. For more information, see + # [Amazon S3 Inventory][1] in the Amazon S3 User Guide. + # + # You must create a bucket policy on the *destination* bucket to grant + # permissions to Amazon S3 to write objects to the bucket in the defined + # location. For an example policy, see [ Granting Permissions for Amazon + # S3 Inventory and Storage Class Analysis][2]. + # + # To use this operation, you must have permissions to perform the + # `s3:PutInventoryConfiguration` action. The bucket owner has this + # permission by default and can grant this permission to others. For + # more information about permissions, see [Permissions Related to Bucket + # Subresource Operations][3] and [Managing Access Permissions to Your + # Amazon S3 Resources][4] in the Amazon S3 User Guide. + # + # **Special Errors** + # + # * **HTTP 400 Bad Request Error** + # + # * *Code:* InvalidArgument + # + # * *Cause:* Invalid Argument + # + # * **HTTP 400 Bad Request Error** + # + # * *Code:* TooManyConfigurations + # + # * *Cause:* You are attempting to create a new configuration but have + # already reached the 1,000-configuration limit. + # + # * **HTTP 403 Forbidden Error** + # + # * *Code:* AccessDenied + # + # * *Cause:* You are not the owner of the specified bucket, or you do + # not have the `s3:PutInventoryConfiguration` bucket permission to + # set the configuration on the bucket. + # + # **Related Resources** + # + # * [GetBucketInventoryConfiguration][5] + # + # * [DeleteBucketInventoryConfiguration][6] + # + # * [ListBucketInventoryConfigurations][7] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html + # + # @option params [required, String] :bucket + # The name of the bucket where the inventory configuration will be + # stored. + # + # @option params [required, String] :id + # The ID used to identify the inventory configuration. + # + # @option params [required, Types::InventoryConfiguration] :inventory_configuration + # Specifies the inventory configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_inventory_configuration({ + # bucket: "BucketName", # required + # id: "InventoryId", # required + # inventory_configuration: { # required + # destination: { # required + # s3_bucket_destination: { # required + # account_id: "AccountId", + # bucket: "BucketName", # required + # format: "CSV", # required, accepts CSV, ORC, Parquet + # prefix: "Prefix", + # encryption: { + # sses3: { + # }, + # ssekms: { + # key_id: "SSEKMSKeyId", # required + # }, + # }, + # }, + # }, + # is_enabled: false, # required + # filter: { + # prefix: "Prefix", # required + # }, + # id: "InventoryId", # required + # included_object_versions: "All", # required, accepts All, Current + # optional_fields: ["Size"], # accepts Size, LastModifiedDate, StorageClass, ETag, IsMultipartUploaded, ReplicationStatus, EncryptionStatus, ObjectLockRetainUntilDate, ObjectLockMode, ObjectLockLegalHoldStatus, IntelligentTieringAccessTier, BucketKeyStatus, ChecksumAlgorithm + # schedule: { # required + # frequency: "Daily", # required, accepts Daily, Weekly + # }, + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration AWS API Documentation + # + # @overload put_bucket_inventory_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_inventory_configuration(params = {}, options = {}) + req = build_request(:put_bucket_inventory_configuration, params) + req.send_request(options) + end + + # For an updated version of this API, see + # [PutBucketLifecycleConfiguration][1]. This version has been + # deprecated. Existing lifecycle configurations will work. For new + # lifecycle configurations, use the updated API. + # + # Creates a new lifecycle configuration for the bucket or replaces an + # existing lifecycle configuration. For information about lifecycle + # configuration, see [Object Lifecycle Management][2] in the *Amazon S3 + # User Guide*. + # + # By default, all Amazon S3 resources, including buckets, objects, and + # related subresources (for example, lifecycle configuration and website + # configuration) are private. Only the resource owner, the Amazon Web + # Services account that created the resource, can access it. The + # resource owner can optionally grant access permissions to others by + # writing an access policy. For this operation, users must get the + # `s3:PutLifecycleConfiguration` permission. + # + # You can also explicitly deny permissions. Explicit denial also + # supersedes any other permissions. If you want to prevent users or + # accounts from removing or deleting objects from your bucket, you must + # deny them permissions for the following actions: + # + # * `s3:DeleteObject` + # + # * `s3:DeleteObjectVersion` + # + # * `s3:PutLifecycleConfiguration` + # + # For more information about permissions, see [Managing Access + # Permissions to your Amazon S3 Resources][3] in the *Amazon S3 User + # Guide*. + # + # For more examples of transitioning objects to storage classes such as + # STANDARD\_IA or ONEZONE\_IA, see [Examples of Lifecycle + # Configuration][4]. + # + # **Related Resources** + # + # * [GetBucketLifecycle][5](Deprecated) + # + # * [GetBucketLifecycleConfiguration][6] + # + # * [RestoreObject][7] + # + # * By default, a resource owner—in this case, a bucket owner, which is + # the Amazon Web Services account that created the bucket—can perform + # any of the operations. A resource owner can also grant others + # permission to perform the operation. For more information, see the + # following topics in the Amazon S3 User Guide: + # + # * [Specifying Permissions in a Policy][8] + # + # * [Managing Access Permissions to your Amazon S3 Resources][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # + # @option params [required, String] :bucket + # + # @option params [String] :content_md5 + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [Types::LifecycleConfiguration] :lifecycle_configuration + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_lifecycle({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # lifecycle_configuration: { + # rules: [ # required + # { + # expiration: { + # date: Time.now, + # days: 1, + # expired_object_delete_marker: false, + # }, + # id: "ID", + # prefix: "Prefix", # required + # status: "Enabled", # required, accepts Enabled, Disabled + # transition: { + # date: Time.now, + # days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # }, + # noncurrent_version_transition: { + # noncurrent_days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # newer_noncurrent_versions: 1, + # }, + # noncurrent_version_expiration: { + # noncurrent_days: 1, + # newer_noncurrent_versions: 1, + # }, + # abort_incomplete_multipart_upload: { + # days_after_initiation: 1, + # }, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle AWS API Documentation + # + # @overload put_bucket_lifecycle(params = {}) + # @param [Hash] params ({}) + def put_bucket_lifecycle(params = {}, options = {}) + req = build_request(:put_bucket_lifecycle, params) + req.send_request(options) + end + + # Creates a new lifecycle configuration for the bucket or replaces an + # existing lifecycle configuration. Keep in mind that this will + # overwrite an existing lifecycle configuration, so if you want to + # retain any configuration details, they must be included in the new + # lifecycle configuration. For information about lifecycle + # configuration, see [Managing your storage lifecycle][1]. + # + # Bucket lifecycle configuration now supports specifying a lifecycle + # rule using an object key name prefix, one or more object tags, or a + # combination of both. Accordingly, this section describes the latest + # API. The previous version of the API supported filtering based only on + # an object key name prefix, which is supported for backward + # compatibility. For the related API description, see + # [PutBucketLifecycle][2]. + # + # + # + # **Rules** + # + # You specify the lifecycle configuration in your request body. The + # lifecycle configuration is specified as XML consisting of one or more + # rules. An Amazon S3 Lifecycle configuration can have up to 1,000 + # rules. This limit is not adjustable. Each rule consists of the + # following: + # + # * Filter identifying a subset of objects to which the rule applies. + # The filter can be based on a key name prefix, object tags, or a + # combination of both. + # + # * Status whether the rule is in effect. + # + # * One or more lifecycle transition and expiration actions that you + # want Amazon S3 to perform on the objects identified by the filter. + # If the state of your bucket is versioning-enabled or + # versioning-suspended, you can have many versions of the same object + # (one current version and zero or more noncurrent versions). Amazon + # S3 provides predefined actions that you can specify for current and + # noncurrent object versions. + # + # For more information, see [Object Lifecycle Management][3] and + # [Lifecycle Configuration Elements][4]. + # + # **Permissions** + # + # By default, all Amazon S3 resources are private, including buckets, + # objects, and related subresources (for example, lifecycle + # configuration and website configuration). Only the resource owner + # (that is, the Amazon Web Services account that created it) can access + # the resource. The resource owner can optionally grant access + # permissions to others by writing an access policy. For this operation, + # a user must get the `s3:PutLifecycleConfiguration` permission. + # + # You can also explicitly deny permissions. Explicit deny also + # supersedes any other permissions. If you want to block users or + # accounts from removing or deleting objects from your bucket, you must + # deny them permissions for the following actions: + # + # * `s3:DeleteObject` + # + # * `s3:DeleteObjectVersion` + # + # * `s3:PutLifecycleConfiguration` + # + # For more information about permissions, see [Managing Access + # Permissions to Your Amazon S3 Resources][5]. + # + # The following are related to `PutBucketLifecycleConfiguration`: + # + # * [Examples of Lifecycle Configuration][6] + # + # * [GetBucketLifecycleConfiguration][7] + # + # * [DeleteBucketLifecycle][8] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to set the configuration. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [Types::BucketLifecycleConfiguration] :lifecycle_configuration + # Container for lifecycle rules. You can add as many as 1,000 rules. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Put bucket lifecycle + # + # # The following example replaces existing lifecycle configuration, if any, on the specified bucket. + # + # resp = client.put_bucket_lifecycle_configuration({ + # bucket: "examplebucket", + # lifecycle_configuration: { + # rules: [ + # { + # expiration: { + # days: 3650, + # }, + # filter: { + # prefix: "documents/", + # }, + # id: "TestOnly", + # status: "Enabled", + # transitions: [ + # { + # days: 365, + # storage_class: "GLACIER", + # }, + # ], + # }, + # ], + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_lifecycle_configuration({ + # bucket: "BucketName", # required + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # lifecycle_configuration: { + # rules: [ # required + # { + # expiration: { + # date: Time.now, + # days: 1, + # expired_object_delete_marker: false, + # }, + # id: "ID", + # prefix: "Prefix", + # filter: { + # prefix: "Prefix", + # tag: { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # object_size_greater_than: 1, + # object_size_less_than: 1, + # and: { + # prefix: "Prefix", + # tags: [ + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # object_size_greater_than: 1, + # object_size_less_than: 1, + # }, + # }, + # status: "Enabled", # required, accepts Enabled, Disabled + # transitions: [ + # { + # date: Time.now, + # days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # }, + # ], + # noncurrent_version_transitions: [ + # { + # noncurrent_days: 1, + # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR + # newer_noncurrent_versions: 1, + # }, + # ], + # noncurrent_version_expiration: { + # noncurrent_days: 1, + # newer_noncurrent_versions: 1, + # }, + # abort_incomplete_multipart_upload: { + # days_after_initiation: 1, + # }, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration AWS API Documentation + # + # @overload put_bucket_lifecycle_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_lifecycle_configuration(params = {}, options = {}) + req = build_request(:put_bucket_lifecycle_configuration, params) + req.send_request(options) + end + + # Set the logging parameters for a bucket and to specify permissions for + # who can view and modify the logging parameters. All logs are saved to + # buckets in the same Amazon Web Services Region as the source bucket. + # To set the logging status of a bucket, you must be the bucket owner. + # + # The bucket owner is automatically granted FULL\_CONTROL to all logs. + # You use the `Grantee` request element to grant access to other people. + # The `Permissions` request element specifies the kind of access the + # grantee has to the logs. + # + # If the target bucket for log delivery uses the bucket owner enforced + # setting for S3 Object Ownership, you can't use the `Grantee` request + # element to grant access to others. Permissions can only be granted + # using policies. For more information, see [Permissions for server + # access log delivery][1] in the *Amazon S3 User Guide*. + # + # **Grantee Values** + # + # You can specify the person (grantee) to whom you're assigning access + # rights (using request elements) in the following ways: + # + # * By the person's ID: + # + # `<>ID<><>GranteesEmail<> + # ` + # + # DisplayName is optional and ignored in the request. + # + # * By Email address: + # + # ` <>Grantees@email.com<>` + # + # The grantee is resolved to the CanonicalUser and, in a response to a + # GET Object acl request, appears as the CanonicalUser. + # + # * By URI: + # + # `<>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>` + # + # To enable logging, you use LoggingEnabled and its children request + # elements. To disable logging, you use an empty BucketLoggingStatus + # request element: + # + # `` + # + # For more information about server access logging, see [Server Access + # Logging][2] in the *Amazon S3 User Guide*. + # + # For more information about creating a bucket, see [CreateBucket][3]. + # For more information about returning the logging status of a bucket, + # see [GetBucketLogging][4]. + # + # The following operations are related to `PutBucketLogging`: + # + # * [PutObject][5] + # + # * [DeleteBucket][6] + # + # * [CreateBucket][3] + # + # * [GetBucketLogging][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + # + # @option params [required, String] :bucket + # The name of the bucket for which to set the logging parameters. + # + # @option params [required, Types::BucketLoggingStatus] :bucket_logging_status + # Container for logging status information. + # + # @option params [String] :content_md5 + # The MD5 hash of the `PutBucketLogging` request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set logging configuration for a bucket + # + # # The following example sets logging policy on a bucket. For the Log Delivery group to deliver logs to the destination + # # bucket, it needs permission for the READ_ACP action which the policy grants. + # + # resp = client.put_bucket_logging({ + # bucket: "sourcebucket", + # bucket_logging_status: { + # logging_enabled: { + # target_bucket: "targetbucket", + # target_grants: [ + # { + # grantee: { + # type: "Group", + # uri: "http://acs.amazonaws.com/groups/global/AllUsers", + # }, + # permission: "READ", + # }, + # ], + # target_prefix: "MyBucketLogs/", + # }, + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_logging({ + # bucket: "BucketName", # required + # bucket_logging_status: { # required + # logging_enabled: { + # target_bucket: "TargetBucket", # required + # target_grants: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, READ, WRITE + # }, + # ], + # target_prefix: "TargetPrefix", # required + # }, + # }, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging AWS API Documentation + # + # @overload put_bucket_logging(params = {}) + # @param [Hash] params ({}) + def put_bucket_logging(params = {}, options = {}) + req = build_request(:put_bucket_logging, params) + req.send_request(options) + end + + # Sets a metrics configuration (specified by the metrics configuration + # ID) for the bucket. You can have up to 1,000 metrics configurations + # per bucket. If you're updating an existing metrics configuration, + # note that this is a full replacement of the existing metrics + # configuration. If you don't include the elements you want to keep, + # they are erased. + # + # To use this operation, you must have permissions to perform the + # `s3:PutMetricsConfiguration` action. The bucket owner has this + # permission by default. The bucket owner can grant this permission to + # others. For more information about permissions, see [Permissions + # Related to Bucket Subresource Operations][1] and [Managing Access + # Permissions to Your Amazon S3 Resources][2]. + # + # For information about CloudWatch request metrics for Amazon S3, see + # [Monitoring Metrics with Amazon CloudWatch][3]. + # + # The following operations are related to + # `PutBucketMetricsConfiguration`: + # + # * [DeleteBucketMetricsConfiguration][4] + # + # * [GetBucketMetricsConfiguration][5] + # + # * [ListBucketMetricsConfigurations][6] + # + # `GetBucketLifecycle` has the following special error: + # + # * Error code: `TooManyConfigurations` + # + # * Description: You are attempting to create a new configuration but + # have already reached the 1,000-configuration limit. + # + # * HTTP Status Code: HTTP 400 Bad Request + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html + # + # @option params [required, String] :bucket + # The name of the bucket for which the metrics configuration is set. + # + # @option params [required, String] :id + # The ID used to identify the metrics configuration. + # + # @option params [required, Types::MetricsConfiguration] :metrics_configuration + # Specifies the metrics configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_metrics_configuration({ + # bucket: "BucketName", # required + # id: "MetricsId", # required + # metrics_configuration: { # required + # id: "MetricsId", # required + # filter: { + # prefix: "Prefix", + # tag: { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # access_point_arn: "AccessPointArn", + # and: { + # prefix: "Prefix", + # tags: [ + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # access_point_arn: "AccessPointArn", + # }, + # }, + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration AWS API Documentation + # + # @overload put_bucket_metrics_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_metrics_configuration(params = {}, options = {}) + req = build_request(:put_bucket_metrics_configuration, params) + req.send_request(options) + end + + # No longer used, see the [PutBucketNotificationConfiguration][1] + # operation. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket. + # + # @option params [String] :content_md5 + # The MD5 hash of the `PutPublicAccessBlock` request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::NotificationConfigurationDeprecated] :notification_configuration + # The container for the configuration. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_notification({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # notification_configuration: { # required + # topic_configuration: { + # id: "NotificationId", + # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # topic: "TopicArn", + # }, + # queue_configuration: { + # id: "NotificationId", + # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # queue: "QueueArn", + # }, + # cloud_function_configuration: { + # id: "NotificationId", + # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # cloud_function: "CloudFunction", + # invocation_role: "CloudFunctionInvocationRole", + # }, + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification AWS API Documentation + # + # @overload put_bucket_notification(params = {}) + # @param [Hash] params ({}) + def put_bucket_notification(params = {}, options = {}) + req = build_request(:put_bucket_notification, params) + req.send_request(options) + end + + # Enables notifications of specified events for a bucket. For more + # information about event notifications, see [Configuring Event + # Notifications][1]. + # + # Using this API, you can replace an existing notification + # configuration. The configuration is an XML file that defines the event + # types that you want Amazon S3 to publish and the destination where you + # want Amazon S3 to publish an event notification when it detects an + # event of the specified type. + # + # By default, your bucket has no event notifications configured. That + # is, the notification configuration will be an empty + # `NotificationConfiguration`. + # + # `` + # + # `` + # + # This action replaces the existing notification configuration with the + # configuration you include in the request body. + # + # After Amazon S3 receives this request, it first verifies that any + # Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue + # Service (Amazon SQS) destination exists, and that the bucket owner has + # permission to publish to it by sending a test notification. In the + # case of Lambda destinations, Amazon S3 verifies that the Lambda + # function permissions grant Amazon S3 permission to invoke the function + # from the Amazon S3 bucket. For more information, see [Configuring + # Notifications for Amazon S3 Events][1]. + # + # You can disable notifications by adding the empty + # NotificationConfiguration element. + # + # For more information about the number of event notification + # configurations that you can create per bucket, see [Amazon S3 service + # quotas][2] in *Amazon Web Services General Reference*. + # + # By default, only the bucket owner can configure notifications on a + # bucket. However, bucket owners can use a bucket policy to grant + # permission to other users to set this configuration with + # `s3:PutBucketNotification` permission. + # + # The PUT notification is an atomic operation. For example, suppose your + # notification configuration includes SNS topic, SQS queue, and Lambda + # function configurations. When you send a PUT request with this + # configuration, Amazon S3 sends test messages to your SNS topic. If the + # message fails, the entire PUT action will fail, and Amazon S3 will not + # add the configuration to your bucket. + # + # + # + # **Responses** + # + # If the configuration in the request body includes only one + # `TopicConfiguration` specifying only the + # `s3:ReducedRedundancyLostObject` event type, the response will also + # include the `x-amz-sns-test-message-id` header containing the message + # ID of the test notification sent to the topic. + # + # The following action is related to + # `PutBucketNotificationConfiguration`: + # + # * [GetBucketNotificationConfiguration][3] + # + # ^ + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # [2]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html + # + # @option params [required, String] :bucket + # The name of the bucket. + # + # @option params [required, Types::NotificationConfiguration] :notification_configuration + # A container for specifying the notification configuration of the + # bucket. If this element is empty, notifications are turned off for the + # bucket. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [Boolean] :skip_destination_validation + # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. + # True or false value. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set notification configuration for a bucket + # + # # The following example sets notification configuration on a bucket to publish the object created events to an SNS topic. + # + # resp = client.put_bucket_notification_configuration({ + # bucket: "examplebucket", + # notification_configuration: { + # topic_configurations: [ + # { + # events: [ + # "s3:ObjectCreated:*", + # ], + # topic_arn: "arn:aws:sns:us-west-2:123456789012:s3-notification-topic", + # }, + # ], + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_notification_configuration({ + # bucket: "BucketName", # required + # notification_configuration: { # required + # topic_configurations: [ + # { + # id: "NotificationId", + # topic_arn: "TopicArn", # required + # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # filter: { + # key: { + # filter_rules: [ + # { + # name: "prefix", # accepts prefix, suffix + # value: "FilterRuleValue", + # }, + # ], + # }, + # }, + # }, + # ], + # queue_configurations: [ + # { + # id: "NotificationId", + # queue_arn: "QueueArn", # required + # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # filter: { + # key: { + # filter_rules: [ + # { + # name: "prefix", # accepts prefix, suffix + # value: "FilterRuleValue", + # }, + # ], + # }, + # }, + # }, + # ], + # lambda_function_configurations: [ + # { + # id: "NotificationId", + # lambda_function_arn: "LambdaFunctionArn", # required + # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete + # filter: { + # key: { + # filter_rules: [ + # { + # name: "prefix", # accepts prefix, suffix + # value: "FilterRuleValue", + # }, + # ], + # }, + # }, + # }, + # ], + # event_bridge_configuration: { + # }, + # }, + # expected_bucket_owner: "AccountId", + # skip_destination_validation: false, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration AWS API Documentation + # + # @overload put_bucket_notification_configuration(params = {}) + # @param [Hash] params ({}) + def put_bucket_notification_configuration(params = {}, options = {}) + req = build_request(:put_bucket_notification_configuration, params) + req.send_request(options) + end + + # Creates or modifies `OwnershipControls` for an Amazon S3 bucket. To + # use this operation, you must have the `s3:PutBucketOwnershipControls` + # permission. For more information about Amazon S3 permissions, see + # [Specifying permissions in a policy][1]. + # + # For information about Amazon S3 Object Ownership, see [Using object + # ownership][2]. + # + # The following operations are related to `PutBucketOwnershipControls`: + # + # * GetBucketOwnershipControls + # + # * DeleteBucketOwnershipControls + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose `OwnershipControls` you want to + # set. + # + # @option params [String] :content_md5 + # The MD5 hash of the `OwnershipControls` request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [required, Types::OwnershipControls] :ownership_controls + # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, or + # ObjectWriter) that you want to apply to this Amazon S3 bucket. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_ownership_controls({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # expected_bucket_owner: "AccountId", + # ownership_controls: { # required + # rules: [ # required + # { + # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced + # }, + # ], + # }, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls AWS API Documentation + # + # @overload put_bucket_ownership_controls(params = {}) + # @param [Hash] params ({}) + def put_bucket_ownership_controls(params = {}, options = {}) + req = build_request(:put_bucket_ownership_controls, params) + req.send_request(options) + end + + # Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are + # using an identity other than the root user of the Amazon Web Services + # account that owns the bucket, the calling identity must have the + # `PutBucketPolicy` permissions on the specified bucket and belong to + # the bucket owner's account in order to use this operation. + # + # If you don't have `PutBucketPolicy` permissions, Amazon S3 returns a + # `403 Access Denied` error. If you have the correct permissions, but + # you're not using an identity that belongs to the bucket owner's + # account, Amazon S3 returns a `405 Method Not Allowed` error. + # + # As a security precaution, the root user of the Amazon Web Services + # account that owns a bucket can always use this operation, even if the + # policy explicitly denies the root user the ability to perform this + # action. + # + # For more information, see [Bucket policy examples][1]. + # + # The following operations are related to `PutBucketPolicy`: + # + # * [CreateBucket][2] + # + # * [DeleteBucket][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + # + # @option params [required, String] :bucket + # The name of the bucket. + # + # @option params [String] :content_md5 + # The MD5 hash of the request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [Boolean] :confirm_remove_self_bucket_access + # Set this parameter to true to confirm that you want to remove your + # permissions to change this bucket policy in the future. + # + # @option params [required, String] :policy + # The bucket policy as a JSON document. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set bucket policy + # + # # The following example sets a permission policy on a bucket. + # + # resp = client.put_bucket_policy({ + # bucket: "examplebucket", + # policy: "{\"Version\": \"2012-10-17\", \"Statement\": [{ \"Sid\": \"id-1\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"arn:aws:iam::123456789012:root\"}, \"Action\": [ \"s3:PutObject\",\"s3:PutObjectAcl\"], \"Resource\": [\"arn:aws:s3:::acl3/*\" ] } ]}", + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_policy({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # confirm_remove_self_bucket_access: false, + # policy: "Policy", # required + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy AWS API Documentation + # + # @overload put_bucket_policy(params = {}) + # @param [Hash] params ({}) + def put_bucket_policy(params = {}, options = {}) + req = build_request(:put_bucket_policy, params) + req.send_request(options) + end + + # Creates a replication configuration or replaces an existing one. For + # more information, see [Replication][1] in the *Amazon S3 User Guide*. + # + # Specify the replication configuration in the request body. In the + # replication configuration, you provide the name of the destination + # bucket or buckets where you want Amazon S3 to replicate objects, the + # IAM role that Amazon S3 can assume to replicate objects on your + # behalf, and other relevant information. + # + # A replication configuration must include at least one rule, and can + # contain a maximum of 1,000. Each rule identifies a subset of objects + # to replicate by filtering the objects in the source bucket. To choose + # additional subsets of objects to replicate, add a rule for each + # subset. + # + # To specify a subset of the objects in the source bucket to apply a + # replication rule to, add the Filter element as a child of the Rule + # element. You can filter objects based on an object key prefix, one or + # more object tags, or both. When you add the Filter element in the + # configuration, you must also add the following elements: + # `DeleteMarkerReplication`, `Status`, and `Priority`. + # + # If you are using an earlier version of the replication configuration, + # Amazon S3 handles replication of delete markers differently. For more + # information, see [Backward Compatibility][2]. + # + # + # + # For information about enabling versioning on a bucket, see [Using + # Versioning][3]. + # + # **Handling Replication of Encrypted Objects** + # + # By default, Amazon S3 doesn't replicate objects that are stored at + # rest using server-side encryption with KMS keys. To replicate Amazon + # Web Services KMS-encrypted objects, add the following: + # `SourceSelectionCriteria`, `SseKmsEncryptedObjects`, `Status`, + # `EncryptionConfiguration`, and `ReplicaKmsKeyID`. For information + # about replication configuration, see [Replicating Objects Created with + # SSE Using KMS keys][4]. + # + # For information on `PutBucketReplication` errors, see [List of + # replication-related error codes][5] + # + # **Permissions** + # + # To create a `PutBucketReplication` request, you must have + # `s3:PutReplicationConfiguration` permissions for the bucket. + # + # By default, a resource owner, in this case the Amazon Web Services + # account that created the bucket, can perform this operation. The + # resource owner can also grant others permissions to perform the + # operation. For more information about permissions, see [Specifying + # Permissions in a Policy][6] and [Managing Access Permissions to Your + # Amazon S3 Resources][7]. + # + # To perform this operation, the user or role performing the action must + # have the [iam:PassRole][8] permission. + # + # + # + # The following operations are related to `PutBucketReplication`: + # + # * [GetBucketReplication][9] + # + # * [DeleteBucketReplication][10] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [8]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html + # + # @option params [required, String] :bucket + # The name of the bucket + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::ReplicationConfiguration] :replication_configuration + # A container for replication rules. You can add up to 1,000 rules. The + # maximum size of a replication configuration is 2 MB. + # + # @option params [String] :token + # A token to allow Object Lock to be enabled for an existing bucket. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set replication configuration on a bucket + # + # # The following example sets replication configuration on a bucket. + # + # resp = client.put_bucket_replication({ + # bucket: "examplebucket", + # replication_configuration: { + # role: "arn:aws:iam::123456789012:role/examplerole", + # rules: [ + # { + # destination: { + # bucket: "arn:aws:s3:::destinationbucket", + # storage_class: "STANDARD", + # }, + # prefix: "", + # status: "Enabled", + # }, + # ], + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_replication({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # replication_configuration: { # required + # role: "Role", # required + # rules: [ # required + # { + # id: "ID", + # priority: 1, + # prefix: "Prefix", + # filter: { + # prefix: "Prefix", + # tag: { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # and: { + # prefix: "Prefix", + # tags: [ + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # }, + # status: "Enabled", # required, accepts Enabled, Disabled + # source_selection_criteria: { + # sse_kms_encrypted_objects: { + # status: "Enabled", # required, accepts Enabled, Disabled + # }, + # replica_modifications: { + # status: "Enabled", # required, accepts Enabled, Disabled + # }, + # }, + # existing_object_replication: { + # status: "Enabled", # required, accepts Enabled, Disabled + # }, + # destination: { # required + # bucket: "BucketName", # required + # account: "AccountId", + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # access_control_translation: { + # owner: "Destination", # required, accepts Destination + # }, + # encryption_configuration: { + # replica_kms_key_id: "ReplicaKmsKeyID", + # }, + # replication_time: { + # status: "Enabled", # required, accepts Enabled, Disabled + # time: { # required + # minutes: 1, + # }, + # }, + # metrics: { + # status: "Enabled", # required, accepts Enabled, Disabled + # event_threshold: { + # minutes: 1, + # }, + # }, + # }, + # delete_marker_replication: { + # status: "Enabled", # accepts Enabled, Disabled + # }, + # }, + # ], + # }, + # token: "ObjectLockToken", + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication AWS API Documentation + # + # @overload put_bucket_replication(params = {}) + # @param [Hash] params ({}) + def put_bucket_replication(params = {}, options = {}) + req = build_request(:put_bucket_replication, params) + req.send_request(options) + end + + # Sets the request payment configuration for a bucket. By default, the + # bucket owner pays for downloads from the bucket. This configuration + # parameter enables the bucket owner (only) to specify that the person + # requesting the download will be charged for the download. For more + # information, see [Requester Pays Buckets][1]. + # + # The following operations are related to `PutBucketRequestPayment`: + # + # * [CreateBucket][2] + # + # * [GetBucketRequestPayment][3] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::RequestPaymentConfiguration] :request_payment_configuration + # Container for Payer. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set request payment configuration on a bucket. + # + # # The following example sets request payment configuration on a bucket so that person requesting the download is charged. + # + # resp = client.put_bucket_request_payment({ + # bucket: "examplebucket", + # request_payment_configuration: { + # payer: "Requester", + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_request_payment({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # request_payment_configuration: { # required + # payer: "Requester", # required, accepts Requester, BucketOwner + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment AWS API Documentation + # + # @overload put_bucket_request_payment(params = {}) + # @param [Hash] params ({}) + def put_bucket_request_payment(params = {}, options = {}) + req = build_request(:put_bucket_request_payment, params) + req.send_request(options) + end + + # Sets the tags for a bucket. + # + # Use tags to organize your Amazon Web Services bill to reflect your own + # cost structure. To do this, sign up to get your Amazon Web Services + # account bill with tag key values included. Then, to see the cost of + # combined resources, organize your billing information according to + # resources with the same tag key values. For example, you can tag + # several resources with a specific application name, and then organize + # your billing information to see the total cost of that application + # across several services. For more information, see [Cost Allocation + # and Tagging][1] and [Using Cost Allocation in Amazon S3 Bucket + # Tags][2]. + # + # When this operation sets the tags for a bucket, it will overwrite any + # current tags the bucket already has. You cannot use this operation to + # add tags to an existing list of tags. + # + # + # + # To use this operation, you must have permissions to perform the + # `s3:PutBucketTagging` action. The bucket owner has this permission by + # default and can grant this permission to others. For more information + # about permissions, see [Permissions Related to Bucket Subresource + # Operations][3] and [Managing Access Permissions to Your Amazon S3 + # Resources][4]. + # + # `PutBucketTagging` has the following special errors: + # + # * Error code: `InvalidTagError` + # + # * Description: The tag provided was not a valid tag. This error can + # occur if the tag did not pass input validation. For information + # about tag restrictions, see [User-Defined Tag Restrictions][5] and + # [Amazon Web Services-Generated Cost Allocation Tag + # Restrictions][6]. + # + # ^ + # + # * Error code: `MalformedXMLError` + # + # * Description: The XML provided does not match the schema. + # + # ^ + # + # * Error code: `OperationAbortedError ` + # + # * Description: A conflicting conditional action is currently in + # progress against this resource. Please try again. + # + # ^ + # + # * Error code: `InternalError` + # + # * Description: The service was unable to apply the provided tag to + # the bucket. + # + # ^ + # + # The following operations are related to `PutBucketTagging`: + # + # * [GetBucketTagging][7] + # + # * [DeleteBucketTagging][8] + # + # + # + # [1]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [5]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html + # [6]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::Tagging] :tagging + # Container for the `TagSet` and `Tag` elements. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set tags on a bucket + # + # # The following example sets tags on a bucket. Any existing tags are replaced. + # + # resp = client.put_bucket_tagging({ + # bucket: "examplebucket", + # tagging: { + # tag_set: [ + # { + # key: "Key1", + # value: "Value1", + # }, + # { + # key: "Key2", + # value: "Value2", + # }, + # ], + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_tagging({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # tagging: { # required + # tag_set: [ # required + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging AWS API Documentation + # + # @overload put_bucket_tagging(params = {}) + # @param [Hash] params ({}) + def put_bucket_tagging(params = {}, options = {}) + req = build_request(:put_bucket_tagging, params) + req.send_request(options) + end + + # Sets the versioning state of an existing bucket. + # + # You can set the versioning state with one of the following values: + # + # **Enabled**—Enables versioning for the objects in the bucket. All + # objects added to the bucket receive a unique version ID. + # + # **Suspended**—Disables versioning for the objects in the bucket. All + # objects added to the bucket receive the version ID null. + # + # If the versioning state has never been set on a bucket, it has no + # versioning state; a [GetBucketVersioning][1] request does not return a + # versioning state value. + # + # In order to enable MFA Delete, you must be the bucket owner. If you + # are the bucket owner and want to enable MFA Delete in the bucket + # versioning configuration, you must include the `x-amz-mfa request` + # header and the `Status` and the `MfaDelete` request elements in a + # request to set the versioning state of the bucket. + # + # If you have an object expiration lifecycle policy in your + # non-versioned bucket and you want to maintain the same permanent + # delete behavior when you enable versioning, you must add a noncurrent + # expiration policy. The noncurrent expiration lifecycle policy will + # manage the deletes of the noncurrent object versions in the + # version-enabled bucket. (A version-enabled bucket maintains one + # current and zero or more noncurrent object versions.) For more + # information, see [Lifecycle and Versioning][2]. + # + # **Related Resources** + # + # * [CreateBucket][3] + # + # * [DeleteBucket][4] + # + # * [GetBucketVersioning][1] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # @option params [String] :content_md5 + # >The base64-encoded 128-bit MD5 digest of the data. You must use + # this header as a message integrity check to verify that the request + # body was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # + # @option params [required, Types::VersioningConfiguration] :versioning_configuration + # Container for setting the versioning state. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set versioning configuration on a bucket + # + # # The following example sets versioning configuration on bucket. The configuration enables versioning on the bucket. + # + # resp = client.put_bucket_versioning({ + # bucket: "examplebucket", + # versioning_configuration: { + # mfa_delete: "Disabled", + # status: "Enabled", + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_versioning({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # mfa: "MFA", + # versioning_configuration: { # required + # mfa_delete: "Enabled", # accepts Enabled, Disabled + # status: "Enabled", # accepts Enabled, Suspended + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning AWS API Documentation + # + # @overload put_bucket_versioning(params = {}) + # @param [Hash] params ({}) + def put_bucket_versioning(params = {}, options = {}) + req = build_request(:put_bucket_versioning, params) + req.send_request(options) + end + + # Sets the configuration of the website that is specified in the + # `website` subresource. To configure a bucket as a website, you can add + # this subresource on the bucket with website configuration information + # such as the file name of the index document and any redirect rules. + # For more information, see [Hosting Websites on Amazon S3][1]. + # + # This PUT action requires the `S3:PutBucketWebsite` permission. By + # default, only the bucket owner can configure the website attached to a + # bucket; however, bucket owners can allow other users to set the + # website configuration by writing a bucket policy that grants them the + # `S3:PutBucketWebsite` permission. + # + # To redirect all website requests sent to the bucket's website + # endpoint, you add a website configuration with the following elements. + # Because all requests are sent to another website, you don't need to + # provide index document name for the bucket. + # + # * `WebsiteConfiguration` + # + # * `RedirectAllRequestsTo` + # + # * `HostName` + # + # * `Protocol` + # + # If you want granular control over redirects, you can use the following + # elements to add routing rules that describe conditions for redirecting + # requests and information about the redirect destination. In this case, + # the website configuration must provide an index document for the + # bucket, because some requests might not be redirected. + # + # * `WebsiteConfiguration` + # + # * `IndexDocument` + # + # * `Suffix` + # + # * `ErrorDocument` + # + # * `Key` + # + # * `RoutingRules` + # + # * `RoutingRule` + # + # * `Condition` + # + # * `HttpErrorCodeReturnedEquals` + # + # * `KeyPrefixEquals` + # + # * `Redirect` + # + # * `Protocol` + # + # * `HostName` + # + # * `ReplaceKeyPrefixWith` + # + # * `ReplaceKeyWith` + # + # * `HttpRedirectCode` + # + # Amazon S3 has a limitation of 50 routing rules per website + # configuration. If you require more than 50 routing rules, you can use + # object redirect. For more information, see [Configuring an Object + # Redirect][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC 1864][1]. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::WebsiteConfiguration] :website_configuration + # Container for the request. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # + # @example Example: Set website configuration on a bucket + # + # # The following example adds website configuration to a bucket. + # + # resp = client.put_bucket_website({ + # bucket: "examplebucket", + # content_md5: "", + # website_configuration: { + # error_document: { + # key: "error.html", + # }, + # index_document: { + # suffix: "index.html", + # }, + # }, + # }) + # + # @example Request syntax with placeholder values + # + # resp = client.put_bucket_website({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # website_configuration: { # required + # error_document: { + # key: "ObjectKey", # required + # }, + # index_document: { + # suffix: "Suffix", # required + # }, + # redirect_all_requests_to: { + # host_name: "HostName", # required + # protocol: "http", # accepts http, https + # }, + # routing_rules: [ + # { + # condition: { + # http_error_code_returned_equals: "HttpErrorCodeReturnedEquals", + # key_prefix_equals: "KeyPrefixEquals", + # }, + # redirect: { # required + # host_name: "HostName", + # http_redirect_code: "HttpRedirectCode", + # protocol: "http", # accepts http, https + # replace_key_prefix_with: "ReplaceKeyPrefixWith", + # replace_key_with: "ReplaceKeyWith", + # }, + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite AWS API Documentation + # + # @overload put_bucket_website(params = {}) + # @param [Hash] params ({}) + def put_bucket_website(params = {}, options = {}) + req = build_request(:put_bucket_website, params) + req.send_request(options) + end + + # Adds an object to a bucket. You must have WRITE permissions on a + # bucket to add an object to it. + # + # Amazon S3 never adds partial objects; if you receive a success + # response, Amazon S3 added the entire object to the bucket. + # + # Amazon S3 is a distributed system. If it receives multiple write + # requests for the same object simultaneously, it overwrites all but the + # last object written. Amazon S3 does not provide object locking; if you + # need this, make sure to build it into your application layer or use + # versioning instead. + # + # To ensure that data is not corrupted traversing the network, use the + # `Content-MD5` header. When you use this header, Amazon S3 checks the + # object against the provided MD5 value and, if they do not match, + # returns an error. Additionally, you can calculate the MD5 while + # putting an object to Amazon S3 and compare the returned ETag to the + # calculated MD5 value. + # + # * To successfully complete the `PutObject` request, you must have the + # `s3:PutObject` in your IAM permissions. + # + # * To successfully change the objects acl of your `PutObject` request, + # you must have the `s3:PutObjectAcl` in your IAM permissions. + # + # * The `Content-MD5` header is required for any request to upload an + # object with a retention period configured using Amazon S3 Object + # Lock. For more information about Amazon S3 Object Lock, see [Amazon + # S3 Object Lock Overview][1] in the *Amazon S3 User Guide*. + # + # + # + # **Server-side Encryption** + # + # You can optionally request server-side encryption. With server-side + # encryption, Amazon S3 encrypts your data as it writes it to disks in + # its data centers and decrypts the data when you access it. You have + # the option to provide your own encryption key or use Amazon Web + # Services managed encryption keys (SSE-S3 or SSE-KMS). For more + # information, see [Using Server-Side Encryption][2]. + # + # If you request server-side encryption using Amazon Web Services Key + # Management Service (SSE-KMS), you can enable an S3 Bucket Key at the + # object-level. For more information, see [Amazon S3 Bucket Keys][3] in + # the *Amazon S3 User Guide*. + # + # **Access Control List (ACL)-Specific Request Headers** + # + # You can use headers to grant ACL- based permissions. By default, all + # objects are private. Only the owner has full access control. When + # adding a new object, you can grant permissions to individual Amazon + # Web Services accounts or to predefined groups defined by Amazon S3. + # These permissions are then added to the ACL on the object. For more + # information, see [Access Control List (ACL) Overview][4] and [Managing + # ACLs Using the REST API][5]. + # + # If the bucket that you're uploading objects to uses the bucket owner + # enforced setting for S3 Object Ownership, ACLs are disabled and no + # longer affect permissions. Buckets that use this setting only accept + # PUT requests that don't specify an ACL or PUT requests that specify + # bucket owner full control ACLs, such as the + # `bucket-owner-full-control` canned ACL or an equivalent form of this + # ACL expressed in the XML format. PUT requests that contain other ACLs + # (for example, custom grants to certain Amazon Web Services accounts) + # fail and return a `400` error with the error code + # `AccessControlListNotSupported`. + # + # For more information, see [ Controlling ownership of objects and + # disabling ACLs][6] in the *Amazon S3 User Guide*. + # + # If your bucket uses the bucket owner enforced setting for Object + # Ownership, all objects written to the bucket by any account will be + # owned by the bucket owner. + # + # + # + # **Storage Class Options** + # + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][7] + # in the *Amazon S3 User Guide*. + # + # **Versioning** + # + # If you enable versioning for a bucket, Amazon S3 automatically + # generates a unique version ID for the object being stored. Amazon S3 + # returns this ID in the response. When you enable versioning for a + # bucket, if Amazon S3 receives multiple write requests for the same + # object simultaneously, it stores all of the objects. + # + # For more information about versioning, see [Adding Objects to + # Versioning Enabled Buckets][8]. For information about returning the + # versioning state of a bucket, see [GetBucketVersioning][9]. + # + # **Related Resources** + # + # * [CopyObject][10] + # + # * [DeleteObject][11] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + # + # @option params [String] :acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # + # @option params [String, StringIO, File] :body + # Object data. + # + # @option params [required, String] :bucket + # The bucket name to which the PUT action was initiated. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :cache_control + # Can be used to specify caching behavior along the request/reply chain. + # For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + # + # @option params [String] :content_disposition + # Specifies presentational information for the object. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + # + # @option params [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + # + # @option params [String] :content_language + # The language the content is in. + # + # @option params [Integer] :content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the message (without the + # headers) according to RFC 1864. This header can be used as a message + # integrity check to verify that the data is the same data that was + # originally sent. Although it is optional, we recommend using the + # Content-MD5 mechanism as an end-to-end integrity check. For more + # information about REST request authentication, see [REST + # Authentication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # + # @option params [String] :content_type + # A standard MIME type describing the format of the contents. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # + # @option params [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [required, String] :key + # Object key for which the PUT action was initiated. + # + # @option params [Hash] :metadata + # A map of metadata to store with the object in S3. + # + # @option params [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # + # @option params [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # + # @option params [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. For + # information about object metadata, see [Object Key and Metadata][1]. + # + # In the following example, the request header sets the redirect to an + # object (anotherPage.html) in the same bucket: + # + # `x-amz-website-redirect-location: /anotherPage.html` + # + # In the following example, the request header sets the object redirect + # to another website: + # + # `x-amz-website-redirect-location: http://www.example.com/` + # + # For more information about website hosting in Amazon S3, see [Hosting + # Websites on Amazon S3][2] and [How to Configure Website Page + # Redirects][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :ssekms_key_id + # If `x-amz-server-side-encryption` is present and has the value of + # `aws:kms`, this header specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetrical customer + # managed key that was used for the object. If you specify + # `x-amz-server-side-encryption:aws:kms`, but do not provide` + # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the + # Amazon Web Services managed key to protect the data. If the KMS key + # does not exist in the same account issuing the command, you must use + # the full ARN and not just the ID. + # + # @option params [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # + # @option params [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a PUT action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. (For example, "Key1=Value1") + # + # @option params [String] :object_lock_mode + # The Object Lock mode that you want to apply to this object. + # + # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want this object's Object Lock to expire. + # Must be formatted as a timestamp parameter. + # + # @option params [String] :object_lock_legal_hold_status + # Specifies whether a legal hold will be applied to this object. For + # more information about S3 Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::PutObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::PutObjectOutput#expiration #expiration} => String + # * {Types::PutObjectOutput#etag #etag} => String + # * {Types::PutObjectOutput#checksum_crc32 #checksum_crc32} => String + # * {Types::PutObjectOutput#checksum_crc32c #checksum_crc32c} => String + # * {Types::PutObjectOutput#checksum_sha1 #checksum_sha1} => String + # * {Types::PutObjectOutput#checksum_sha256 #checksum_sha256} => String + # * {Types::PutObjectOutput#server_side_encryption #server_side_encryption} => String + # * {Types::PutObjectOutput#version_id #version_id} => String + # * {Types::PutObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::PutObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::PutObjectOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::PutObjectOutput#ssekms_encryption_context #ssekms_encryption_context} => String + # * {Types::PutObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::PutObjectOutput#request_charged #request_charged} => String + # + # + # @example Example: To upload an object and specify optional tags + # + # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore + # # S3 returns version ID of the newly created object. + # + # resp = client.put_object({ + # body: "c:\\HappyFace.jpg", + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # tagging: "key1=value1&key2=value2", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a", + # } + # + # @example Example: To upload an object + # + # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file + # # syntax. S3 returns VersionId of the newly created object. + # + # resp = client.put_object({ + # body: "HappyFace.jpg", + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk", + # } + # + # @example Example: To upload an object and specify server-side encryption and object tags + # + # # The following example uploads and object. The request specifies the optional server-side encryption option. The request + # # also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response. + # + # resp = client.put_object({ + # body: "filetoupload", + # bucket: "examplebucket", + # key: "exampleobject", + # server_side_encryption: "AES256", + # tagging: "key1=value1&key2=value2", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # server_side_encryption: "AES256", + # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt", + # } + # + # @example Example: To upload object and specify user-defined metadata + # + # # The following example creates an object. The request also specifies optional metadata. If the bucket is versioning + # # enabled, S3 returns version ID in response. + # + # resp = client.put_object({ + # body: "filetoupload", + # bucket: "examplebucket", + # key: "exampleobject", + # metadata: { + # "metadata1" => "value1", + # "metadata2" => "value2", + # }, + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0", + # } + # + # @example Example: To upload an object (specify optional headers) + # + # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific + # # storage class and use server-side encryption. + # + # resp = client.put_object({ + # body: "HappyFace.jpg", + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # server_side_encryption: "AES256", + # storage_class: "STANDARD_IA", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # server_side_encryption: "AES256", + # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp", + # } + # + # @example Example: To upload an object and specify canned ACL. + # + # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ + # # access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response. + # + # resp = client.put_object({ + # acl: "authenticated-read", + # body: "filetoupload", + # bucket: "examplebucket", + # key: "exampleobject", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr", + # } + # + # @example Example: To create an object. + # + # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response. + # + # resp = client.put_object({ + # body: "filetoupload", + # bucket: "examplebucket", + # key: "objectkey", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", + # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ", + # } + # + # @example Streaming a file from disk + # # upload file from disk in a single request, may not exceed 5GB + # File.open('/source/file/path', 'rb') do |file| + # s3.put_object(bucket: 'bucket-name', key: 'object-key', body: file) + # end + # + # @example Request syntax with placeholder values + # + # resp = client.put_object({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # body: source_file, + # bucket: "BucketName", # required + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_length: 1, + # content_md5: "ContentMD5", + # content_type: "ContentType", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # key: "ObjectKey", # required + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.expiration #=> String + # resp.etag #=> String + # resp.checksum_crc32 #=> String + # resp.checksum_crc32c #=> String + # resp.checksum_sha1 #=> String + # resp.checksum_sha256 #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.version_id #=> String + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.ssekms_encryption_context #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject AWS API Documentation + # + # @overload put_object(params = {}) + # @param [Hash] params ({}) + def put_object(params = {}, options = {}) + req = build_request(:put_object, params) + req.send_request(options) + end + + # Uses the `acl` subresource to set the access control list (ACL) + # permissions for a new or existing object in an S3 bucket. You must + # have `WRITE_ACP` permission to set the ACL of an object. For more + # information, see [What permissions can I grant?][1] in the *Amazon S3 + # User Guide*. + # + # This action is not supported by Amazon S3 on Outposts. + # + # Depending on your application needs, you can choose to set the ACL on + # an object using either the request body or the headers. For example, + # if you have an existing application that updates a bucket ACL using + # the request body, you can continue to use that approach. For more + # information, see [Access Control List (ACL) Overview][2] in the + # *Amazon S3 User Guide*. + # + # If your bucket uses the bucket owner enforced setting for S3 Object + # Ownership, ACLs are disabled and no longer affect permissions. You + # must use policies to grant access to your bucket and the objects in + # it. Requests to set ACLs or update ACLs fail and return the + # `AccessControlListNotSupported` error code. Requests to read ACLs are + # still supported. For more information, see [Controlling object + # ownership][3] in the *Amazon S3 User Guide*. + # + # **Access Permissions** + # + # You can set access permissions using one of the following methods: + # + # * Specify a canned ACL with the `x-amz-acl` request header. Amazon S3 + # supports a set of predefined ACLs, known as canned ACLs. Each canned + # ACL has a predefined set of grantees and permissions. Specify the + # canned ACL name as the value of `x-amz-ac`l. If you use this header, + # you cannot use other access control-specific headers in your + # request. For more information, see [Canned ACL][4]. + # + # * Specify access permissions explicitly with the `x-amz-grant-read`, + # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and + # `x-amz-grant-full-control` headers. When using these headers, you + # specify explicit access permissions and grantees (Amazon Web + # Services accounts or Amazon S3 groups) who will receive the + # permission. If you use these ACL-specific headers, you cannot use + # `x-amz-acl` header to set a canned ACL. These parameters map to the + # set of permissions that Amazon S3 supports in an ACL. For more + # information, see [Access Control List (ACL) Overview][2]. + # + # You specify each grantee as a type=value pair, where the type is one + # of the following: + # + # * `id` – if the value specified is the canonical user ID of an + # Amazon Web Services account + # + # * `uri` – if you are granting permissions to a predefined group + # + # * `emailAddress` – if the value specified is the email address of an + # Amazon Web Services account + # + # Using email addresses to specify a grantee is only supported in + # the following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, + # see [Regions and Endpoints][5] in the Amazon Web Services General + # Reference. + # + # + # + # For example, the following `x-amz-grant-read` header grants list + # objects permission to the two Amazon Web Services accounts + # identified by their email addresses. + # + # `x-amz-grant-read: emailAddress="xyz@amazon.com", + # emailAddress="abc@amazon.com" ` + # + # You can use either a canned ACL or specify access permissions + # explicitly. You cannot do both. + # + # **Grantee Values** + # + # You can specify the person (grantee) to whom you're assigning access + # rights (using request elements) in the following ways: + # + # * By the person's ID: + # + # `<>ID<><>GranteesEmail<> + # ` + # + # DisplayName is optional and ignored in the request. + # + # * By URI: + # + # `<>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>` + # + # * By Email address: + # + # `<>Grantees@email.com<>lt;/Grantee>` + # + # The grantee is resolved to the CanonicalUser and, in a response to a + # GET Object acl request, appears as the CanonicalUser. + # + # Using email addresses to specify a grantee is only supported in the + # following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, see + # [Regions and Endpoints][5] in the Amazon Web Services General + # Reference. + # + # + # + # **Versioning** + # + # The ACL of an object is set at the object version level. By default, + # PUT sets the ACL of the current version of an object. To set the ACL + # of a different version, use the `versionId` subresource. + # + # **Related Resources** + # + # * [CopyObject][6] + # + # * [GetObject][7] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # + # @option params [String] :acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # + # @option params [Types::AccessControlPolicy] :access_control_policy + # Contains the elements that set the ACL permissions for an object per + # grantee. + # + # @option params [required, String] :bucket + # The bucket name that contains the object to which you want to attach + # the ACL. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must be + # used as a message integrity check to verify that the request body was + # not corrupted in transit. For more information, go to [RFC + # 1864.>][1] + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read + # Allows grantee to list the objects in the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # + # @option params [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # + # @option params [required, String] :key + # Key for which the PUT action was initiated. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :version_id + # VersionId used to reference a specific version of the object. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::PutObjectAclOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::PutObjectAclOutput#request_charged #request_charged} => String + # + # + # @example Example: To grant permissions using object ACL + # + # # The following example adds grants to an object ACL. The first permission grants user1 and user2 FULL_CONTROL and the + # # AllUsers group READ permission. + # + # resp = client.put_object_acl({ + # access_control_policy: { + # }, + # bucket: "examplebucket", + # grant_full_control: "emailaddress=user1@example.com,emailaddress=user2@example.com", + # grant_read: "uri=http://acs.amazonaws.com/groups/global/AllUsers", + # key: "HappyFace.jpg", + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.put_object_acl({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # access_control_policy: { + # grants: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # owner: { + # display_name: "DisplayName", + # id: "ID", + # }, + # }, + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # key: "ObjectKey", # required + # request_payer: "requester", # accepts requester + # version_id: "ObjectVersionId", + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl AWS API Documentation + # + # @overload put_object_acl(params = {}) + # @param [Hash] params ({}) + def put_object_acl(params = {}, options = {}) + req = build_request(:put_object_acl, params) + req.send_request(options) + end + + # Applies a legal hold configuration to the specified object. For more + # information, see [Locking Objects][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # + # @option params [required, String] :bucket + # The bucket name containing the object that you want to place a legal + # hold on. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [required, String] :key + # The key name for the object that you want to place a legal hold on. + # + # @option params [Types::ObjectLockLegalHold] :legal_hold + # Container element for the legal hold configuration you want to apply + # to the specified object. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :version_id + # The version ID of the object that you want to place a legal hold on. + # + # @option params [String] :content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::PutObjectLegalHoldOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::PutObjectLegalHoldOutput#request_charged #request_charged} => String + # + # @example Request syntax with placeholder values + # + # resp = client.put_object_legal_hold({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # legal_hold: { + # status: "ON", # accepts ON, OFF + # }, + # request_payer: "requester", # accepts requester + # version_id: "ObjectVersionId", + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold AWS API Documentation + # + # @overload put_object_legal_hold(params = {}) + # @param [Hash] params ({}) + def put_object_legal_hold(params = {}, options = {}) + req = build_request(:put_object_legal_hold, params) + req.send_request(options) + end + + # Places an Object Lock configuration on the specified bucket. The rule + # specified in the Object Lock configuration will be applied by default + # to every new object placed in the specified bucket. For more + # information, see [Locking Objects][1]. + # + # * The `DefaultRetention` settings require both a mode and a period. + # + # * The `DefaultRetention` period can be either `Days` or `Years` but + # you must select one. You cannot specify `Days` and `Years` at the + # same time. + # + # * You can only enable Object Lock for new buckets. If you want to turn + # on Object Lock for an existing bucket, contact Amazon Web Services + # Support. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # + # @option params [required, String] :bucket + # The bucket whose Object Lock configuration you want to create or + # replace. + # + # @option params [Types::ObjectLockConfiguration] :object_lock_configuration + # The Object Lock configuration that you want to apply to the specified + # bucket. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :token + # A token to allow Object Lock to be enabled for an existing bucket. + # + # @option params [String] :content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::PutObjectLockConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::PutObjectLockConfigurationOutput#request_charged #request_charged} => String + # + # @example Request syntax with placeholder values + # + # resp = client.put_object_lock_configuration({ + # bucket: "BucketName", # required + # object_lock_configuration: { + # object_lock_enabled: "Enabled", # accepts Enabled + # rule: { + # default_retention: { + # mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # days: 1, + # years: 1, + # }, + # }, + # }, + # request_payer: "requester", # accepts requester + # token: "ObjectLockToken", + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration AWS API Documentation + # + # @overload put_object_lock_configuration(params = {}) + # @param [Hash] params ({}) + def put_object_lock_configuration(params = {}, options = {}) + req = build_request(:put_object_lock_configuration, params) + req.send_request(options) + end + + # Places an Object Retention configuration on an object. For more + # information, see [Locking Objects][1]. Users or accounts require the + # `s3:PutObjectRetention` permission in order to place an Object + # Retention configuration on objects. Bypassing a Governance Retention + # configuration requires the `s3:BypassGovernanceRetention` permission. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # + # @option params [required, String] :bucket + # The bucket name that contains the object you want to apply this Object + # Retention configuration to. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # + # @option params [required, String] :key + # The key name for the object that you want to apply this Object + # Retention configuration to. + # + # @option params [Types::ObjectLockRetention] :retention + # The container element for the Object Retention configuration. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :version_id + # The version ID for the object that you want to apply this Object + # Retention configuration to. + # + # @option params [Boolean] :bypass_governance_retention + # Indicates whether this action should bypass Governance-mode + # restrictions. + # + # @option params [String] :content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::PutObjectRetentionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::PutObjectRetentionOutput#request_charged #request_charged} => String + # + # @example Request syntax with placeholder values + # + # resp = client.put_object_retention({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # retention: { + # mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # retain_until_date: Time.now, + # }, + # request_payer: "requester", # accepts requester + # version_id: "ObjectVersionId", + # bypass_governance_retention: false, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention AWS API Documentation + # + # @overload put_object_retention(params = {}) + # @param [Hash] params ({}) + def put_object_retention(params = {}, options = {}) + req = build_request(:put_object_retention, params) + req.send_request(options) + end + + # Sets the supplied tag-set to an object that already exists in a + # bucket. + # + # A tag is a key-value pair. You can associate tags with an object by + # sending a PUT request against the tagging subresource that is + # associated with the object. You can retrieve tags by sending a GET + # request. For more information, see [GetObjectTagging][1]. + # + # For tagging-related restrictions related to characters and encodings, + # see [Tag Restrictions][2]. Note that Amazon S3 limits the maximum + # number of tags to 10 tags per object. + # + # To use this operation, you must have permission to perform the + # `s3:PutObjectTagging` action. By default, the bucket owner has this + # permission and can grant this permission to others. + # + # To put tags of any other version, use the `versionId` query parameter. + # You also need permission for the `s3:PutObjectVersionTagging` action. + # + # For information about the Amazon S3 object tagging feature, see + # [Object Tagging][3]. + # + # **Special Errors** + # + # * * Code: InvalidTagError + # + # * *Cause: The tag provided was not a valid tag. This error can occur + # if the tag did not pass input validation. For more information, + # see [Object Tagging][3].* + # + # * * Code: MalformedXMLError + # + # * *Cause: The XML provided does not match the schema.* + # + # * * Code: OperationAbortedError + # + # * *Cause: A conflicting conditional action is currently in progress + # against this resource. Please try again.* + # + # * * *Code: InternalError* + # + # * *Cause: The service was unable to apply the provided tag to the + # object.* + # + # **Related Resources** + # + # * [GetObjectTagging][1] + # + # * [DeleteObjectTagging][4] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html + # [2]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html + # + # @option params [required, String] :bucket + # The bucket name containing the object. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Name of the object key. + # + # @option params [String] :version_id + # The versionId of the object that the tag-set will be added to. + # + # @option params [String] :content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::Tagging] :tagging + # Container for the `TagSet` and `Tag` elements + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @return [Types::PutObjectTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::PutObjectTaggingOutput#version_id #version_id} => String + # + # + # @example Example: To add tags to an existing object + # + # # The following example adds tags to an existing object. + # + # resp = client.put_object_tagging({ + # bucket: "examplebucket", + # key: "HappyFace.jpg", + # tagging: { + # tag_set: [ + # { + # key: "Key3", + # value: "Value3", + # }, + # { + # key: "Key4", + # value: "Value4", + # }, + # ], + # }, + # }) + # + # resp.to_h outputs the following: + # { + # version_id: "null", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.put_object_tagging({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # tagging: { # required + # tag_set: [ # required + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # expected_bucket_owner: "AccountId", + # request_payer: "requester", # accepts requester + # }) + # + # @example Response structure + # + # resp.version_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging AWS API Documentation + # + # @overload put_object_tagging(params = {}) + # @param [Hash] params ({}) + def put_object_tagging(params = {}, options = {}) + req = build_request(:put_object_tagging, params) + req.send_request(options) + end + + # Creates or modifies the `PublicAccessBlock` configuration for an + # Amazon S3 bucket. To use this operation, you must have the + # `s3:PutBucketPublicAccessBlock` permission. For more information about + # Amazon S3 permissions, see [Specifying Permissions in a Policy][1]. + # + # When Amazon S3 evaluates the `PublicAccessBlock` configuration for a + # bucket or an object, it checks the `PublicAccessBlock` configuration + # for both the bucket (or the bucket that contains the object) and the + # bucket owner's account. If the `PublicAccessBlock` configurations are + # different between the bucket and the account, Amazon S3 uses the most + # restrictive combination of the bucket-level and account-level + # settings. + # + # For more information about when Amazon S3 considers a bucket or an + # object public, see [The Meaning of "Public"][2]. + # + # **Related Resources** + # + # * [GetPublicAccessBlock][3] + # + # * [DeletePublicAccessBlock][4] + # + # * [GetBucketPolicyStatus][5] + # + # * [Using Amazon S3 Block Public Access][6] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + # + # @option params [required, String] :bucket + # The name of the Amazon S3 bucket whose `PublicAccessBlock` + # configuration you want to set. + # + # @option params [String] :content_md5 + # The MD5 hash of the `PutPublicAccessBlock` request body. + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, Types::PublicAccessBlockConfiguration] :public_access_block_configuration + # The `PublicAccessBlock` configuration that you want to apply to this + # Amazon S3 bucket. You can enable the configuration options in any + # combination. For more information about when Amazon S3 considers a + # bucket or object public, see [The Meaning of "Public"][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.put_public_access_block({ + # bucket: "BucketName", # required + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # public_access_block_configuration: { # required + # block_public_acls: false, + # ignore_public_acls: false, + # block_public_policy: false, + # restrict_public_buckets: false, + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock AWS API Documentation + # + # @overload put_public_access_block(params = {}) + # @param [Hash] params ({}) + def put_public_access_block(params = {}, options = {}) + req = build_request(:put_public_access_block, params) + req.send_request(options) + end + + # Restores an archived copy of an object back into Amazon S3 + # + # This action is not supported by Amazon S3 on Outposts. + # + # This action performs the following types of requests: + # + # * `select` - Perform a select query on an archived object + # + # * `restore an archive` - Restore an archived object + # + # To use this operation, you must have permissions to perform the + # `s3:RestoreObject` action. The bucket owner has this permission by + # default and can grant this permission to others. For more information + # about permissions, see [Permissions Related to Bucket Subresource + # Operations][1] and [Managing Access Permissions to Your Amazon S3 + # Resources][2] in the *Amazon S3 User Guide*. + # + # **Querying Archives with Select Requests** + # + # You use a select type of request to perform SQL queries on archived + # objects. The archived objects that are being queried by the select + # request must be formatted as uncompressed comma-separated values (CSV) + # files. You can run queries and custom analytics on your archived data + # without having to restore your data to a hotter Amazon S3 tier. For an + # overview about select requests, see [Querying Archived Objects][3] in + # the *Amazon S3 User Guide*. + # + # When making a select request, do the following: + # + # * Define an output location for the select query's output. This must + # be an Amazon S3 bucket in the same Amazon Web Services Region as the + # bucket that contains the archive object that is being queried. The + # Amazon Web Services account that initiates the job must have + # permissions to write to the S3 bucket. You can specify the storage + # class and encryption for the output objects stored in the bucket. + # For more information about output, see [Querying Archived + # Objects][3] in the *Amazon S3 User Guide*. + # + # For more information about the `S3` structure in the request body, + # see the following: + # + # * [PutObject][4] + # + # * [Managing Access with ACLs][5] in the *Amazon S3 User Guide* + # + # * [Protecting Data Using Server-Side Encryption][6] in the *Amazon + # S3 User Guide* + # + # * Define the SQL expression for the `SELECT` type of restoration for + # your query in the request body's `SelectParameters` structure. You + # can use expressions like the following examples. + # + # * The following expression returns all records from the specified + # object. + # + # `SELECT * FROM Object` + # + # * Assuming that you are not using any headers for data stored in the + # object, you can specify columns with positional headers. + # + # `SELECT s._1, s._2 FROM Object s WHERE s._3 > 100` + # + # * If you have headers and you set the `fileHeaderInfo` in the `CSV` + # structure in the request body to `USE`, you can specify headers in + # the query. (If you set the `fileHeaderInfo` field to `IGNORE`, the + # first row is skipped for the query.) You cannot mix ordinal + # positions with header column names. + # + # `SELECT s.Id, s.FirstName, s.SSN FROM S3Object s` + # + # For more information about using SQL with S3 Glacier Select restore, + # see [SQL Reference for Amazon S3 Select and S3 Glacier Select][7] in + # the *Amazon S3 User Guide*. + # + # When making a select request, you can also do the following: + # + # * To expedite your queries, specify the `Expedited` tier. For more + # information about tiers, see "Restoring Archives," later in this + # topic. + # + # * Specify details about the data serialization format of both the + # input object that is being queried and the serialization of the + # CSV-encoded query results. + # + # The following are additional important facts about the select feature: + # + # * The output results are new Amazon S3 objects. Unlike archive + # retrievals, they are stored until explicitly deleted-manually or + # through a lifecycle policy. + # + # * You can issue more than one select request on the same Amazon S3 + # object. Amazon S3 doesn't deduplicate requests, so avoid issuing + # duplicate requests. + # + # * Amazon S3 accepts a select request even if the object has already + # been restored. A select request doesn’t return error response `409`. + # + # **Restoring objects** + # + # Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive + # storage class, and S3 Intelligent-Tiering Archive or S3 + # Intelligent-Tiering Deep Archive tiers are not accessible in real + # time. For objects in Archive Access or Deep Archive Access tiers you + # must first initiate a restore request, and then wait until the object + # is moved into the Frequent Access tier. For objects in S3 Glacier or + # S3 Glacier Deep Archive storage classes you must first initiate a + # restore request, and then wait until a temporary copy of the object is + # available. To access an archived object, you must restore the object + # for the duration (number of days) that you specify. + # + # To restore a specific object version, you can provide a version ID. If + # you don't provide a version ID, Amazon S3 restores the current + # version. + # + # When restoring an archived object (or using a select request), you can + # specify one of the following data access tier options in the `Tier` + # element of the request body: + # + # * `Expedited` - Expedited retrievals allow you to quickly access your + # data stored in the S3 Glacier storage class or S3 + # Intelligent-Tiering Archive tier when occasional urgent requests for + # a subset of archives are required. For all but the largest archived + # objects (250 MB+), data accessed using Expedited retrievals is + # typically made available within 1–5 minutes. Provisioned capacity + # ensures that retrieval capacity for Expedited retrievals is + # available when you need it. Expedited retrievals and provisioned + # capacity are not available for objects stored in the S3 Glacier Deep + # Archive storage class or S3 Intelligent-Tiering Deep Archive tier. + # + # * `Standard` - Standard retrievals allow you to access any of your + # archived objects within several hours. This is the default option + # for retrieval requests that do not specify the retrieval option. + # Standard retrievals typically finish within 3–5 hours for objects + # stored in the S3 Glacier storage class or S3 Intelligent-Tiering + # Archive tier. They typically finish within 12 hours for objects + # stored in the S3 Glacier Deep Archive storage class or S3 + # Intelligent-Tiering Deep Archive tier. Standard retrievals are free + # for objects stored in S3 Intelligent-Tiering. + # + # * `Bulk` - Bulk retrievals are the lowest-cost retrieval option in S3 + # Glacier, enabling you to retrieve large amounts, even petabytes, of + # data inexpensively. Bulk retrievals typically finish within 5–12 + # hours for objects stored in the S3 Glacier storage class or S3 + # Intelligent-Tiering Archive tier. They typically finish within 48 + # hours for objects stored in the S3 Glacier Deep Archive storage + # class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals + # are free for objects stored in S3 Intelligent-Tiering. + # + # For more information about archive retrieval options and provisioned + # capacity for `Expedited` data access, see [Restoring Archived + # Objects][8] in the *Amazon S3 User Guide*. + # + # You can use Amazon S3 restore speed upgrade to change the restore + # speed to a faster speed while it is in progress. For more information, + # see [ Upgrading the speed of an in-progress restore][9] in the *Amazon + # S3 User Guide*. + # + # To get the status of object restoration, you can send a `HEAD` + # request. Operations return the `x-amz-restore` header, which provides + # information about the restoration status, in the response. You can use + # Amazon S3 event notifications to notify you when a restore is + # initiated or completed. For more information, see [Configuring Amazon + # S3 Event Notifications][10] in the *Amazon S3 User Guide*. + # + # After restoring an archived object, you can update the restoration + # period by reissuing the request with a new period. Amazon S3 updates + # the restoration period relative to the current time and charges only + # for the request-there are no data transfer charges. You cannot update + # the restoration period when Amazon S3 is actively processing your + # current restore request for the object. + # + # If your bucket has a lifecycle configuration with a rule that includes + # an expiration action, the object expiration overrides the life span + # that you specify in a restore request. For example, if you restore an + # object copy for 10 days, but the object is scheduled to expire in 3 + # days, Amazon S3 deletes the object in 3 days. For more information + # about lifecycle configuration, see + # [PutBucketLifecycleConfiguration][11] and [Object Lifecycle + # Management][12] in *Amazon S3 User Guide*. + # + # **Responses** + # + # A successful action returns either the `200 OK` or `202 Accepted` + # status code. + # + # * If the object is not previously restored, then Amazon S3 returns + # `202 Accepted` in the response. + # + # * If the object is previously restored, Amazon S3 returns `200 OK` in + # the response. + # + # **Special Errors** + # + # * * *Code: RestoreAlreadyInProgress* + # + # * *Cause: Object restore is already in progress. (This error does + # not apply to SELECT type requests.)* + # + # * *HTTP Status Code: 409 Conflict* + # + # * *SOAP Fault Code Prefix: Client* + # + # * * *Code: GlacierExpeditedRetrievalNotAvailable* + # + # * *Cause: expedited retrievals are currently not available. Try + # again later. (Returned if there is insufficient capacity to + # process the Expedited request. This error applies only to + # Expedited retrievals and not to S3 Standard or Bulk retrievals.)* + # + # * *HTTP Status Code: 503* + # + # * *SOAP Fault Code Prefix: N/A* + # + # **Related Resources** + # + # * [PutBucketLifecycleConfiguration][11] + # + # * [GetBucketNotificationConfiguration][13] + # + # * [SQL Reference for Amazon S3 Select and S3 Glacier Select ][7] in + # the *Amazon S3 User Guide* + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + # [12]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html + # + # @option params [required, String] :bucket + # The bucket name containing the object to restore. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :key + # Object key for which the action was initiated. + # + # @option params [String] :version_id + # VersionId used to reference a specific version of the object. + # + # @option params [Types::RestoreRequest] :restore_request + # Container for restore job parameters. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::RestoreObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::RestoreObjectOutput#request_charged #request_charged} => String + # * {Types::RestoreObjectOutput#restore_output_path #restore_output_path} => String + # + # + # @example Example: To restore an archived object + # + # # The following example restores for one day an archived copy of an object back into Amazon S3 bucket. + # + # resp = client.restore_object({ + # bucket: "examplebucket", + # key: "archivedobjectkey", + # restore_request: { + # days: 1, + # glacier_job_parameters: { + # tier: "Expedited", + # }, + # }, + # }) + # + # resp.to_h outputs the following: + # { + # } + # + # @example Request syntax with placeholder values + # + # resp = client.restore_object({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # version_id: "ObjectVersionId", + # restore_request: { + # days: 1, + # glacier_job_parameters: { + # tier: "Standard", # required, accepts Standard, Bulk, Expedited + # }, + # type: "SELECT", # accepts SELECT + # tier: "Standard", # accepts Standard, Bulk, Expedited + # description: "Description", + # select_parameters: { + # input_serialization: { # required + # csv: { + # file_header_info: "USE", # accepts USE, IGNORE, NONE + # comments: "Comments", + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # allow_quoted_record_delimiter: false, + # }, + # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 + # json: { + # type: "DOCUMENT", # accepts DOCUMENT, LINES + # }, + # parquet: { + # }, + # }, + # expression_type: "SQL", # required, accepts SQL + # expression: "Expression", # required + # output_serialization: { # required + # csv: { + # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # }, + # json: { + # record_delimiter: "RecordDelimiter", + # }, + # }, + # }, + # output_location: { + # s3: { + # bucket_name: "BucketName", # required + # prefix: "LocationPrefix", # required + # encryption: { + # encryption_type: "AES256", # required, accepts AES256, aws:kms + # kms_key_id: "SSEKMSKeyId", + # kms_context: "KMSContext", + # }, + # canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # access_control_list: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # tagging: { + # tag_set: [ # required + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # user_metadata: [ + # { + # name: "MetadataKey", + # value: "MetadataValue", + # }, + # ], + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # }, + # }, + # }, + # request_payer: "requester", # accepts requester + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.request_charged #=> String, one of "requester" + # resp.restore_output_path #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject AWS API Documentation + # + # @overload restore_object(params = {}) + # @param [Hash] params ({}) + def restore_object(params = {}, options = {}) + req = build_request(:restore_object, params) + req.send_request(options) + end + + # This action filters the contents of an Amazon S3 object based on a + # simple structured query language (SQL) statement. In the request, + # along with the SQL expression, you must also specify a data + # serialization format (JSON, CSV, or Apache Parquet) of the object. + # Amazon S3 uses this format to parse object data into records, and + # returns only records that match the specified SQL expression. You must + # also specify the data serialization format for the response. + # + # This action is not supported by Amazon S3 on Outposts. + # + # For more information about Amazon S3 Select, see [Selecting Content + # from Objects][1] and [SELECT Command][2] in the *Amazon S3 User + # Guide*. + # + # For more information about using SQL with Amazon S3 Select, see [ SQL + # Reference for Amazon S3 Select and S3 Glacier Select][3] in the + # *Amazon S3 User Guide*. + # + # + # + # **Permissions** + # + # You must have `s3:GetObject` permission for this operation. Amazon S3 + # Select does not support anonymous access. For more information about + # permissions, see [Specifying Permissions in a Policy][4] in the + # *Amazon S3 User Guide*. + # + # + # + # *Object Data Formats* + # + # You can use Amazon S3 Select to query objects that have the following + # format properties: + # + # * *CSV, JSON, and Parquet* - Objects must be in CSV, JSON, or Parquet + # format. + # + # * *UTF-8* - UTF-8 is the only encoding type Amazon S3 Select supports. + # + # * *GZIP or BZIP2* - CSV and JSON files can be compressed using GZIP or + # BZIP2. GZIP and BZIP2 are the only compression formats that Amazon + # S3 Select supports for CSV and JSON files. Amazon S3 Select supports + # columnar compression for Parquet using GZIP or Snappy. Amazon S3 + # Select does not support whole-object compression for Parquet + # objects. + # + # * *Server-side encryption* - Amazon S3 Select supports querying + # objects that are protected with server-side encryption. + # + # For objects that are encrypted with customer-provided encryption + # keys (SSE-C), you must use HTTPS, and you must use the headers that + # are documented in the [GetObject][5]. For more information about + # SSE-C, see [Server-Side Encryption (Using Customer-Provided + # Encryption Keys)][6] in the *Amazon S3 User Guide*. + # + # For objects that are encrypted with Amazon S3 managed encryption + # keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), + # server-side encryption is handled transparently, so you don't need + # to specify anything. For more information about server-side + # encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using + # Server-Side Encryption][7] in the *Amazon S3 User Guide*. + # + # **Working with the Response Body** + # + # Given the response size is unknown, Amazon S3 Select streams the + # response as a series of messages and includes a `Transfer-Encoding` + # header with `chunked` as its value in the response. For more + # information, see [Appendix: SelectObjectContent Response][8]. + # + # + # + # **GetObject Support** + # + # The `SelectObjectContent` action does not support the following + # `GetObject` functionality. For more information, see [GetObject][5]. + # + # * `Range`: Although you can specify a scan range for an Amazon S3 + # Select request (see [SelectObjectContentRequest - ScanRange][9] in + # the request parameters), you cannot specify the range of bytes of an + # object to return. + # + # * GLACIER, DEEP\_ARCHIVE and REDUCED\_REDUNDANCY storage classes: You + # cannot specify the GLACIER, DEEP\_ARCHIVE, or `REDUCED_REDUNDANCY` + # storage classes. For more information, about storage classes see + # [Storage Classes][10] in the *Amazon S3 User Guide*. + # + # + # + # **Special Errors** + # + # For a list of special errors for this operation, see [List of SELECT + # Object Content Error Codes][11] + # + # **Related Resources** + # + # * [GetObject][5] + # + # * [GetBucketLifecycleConfiguration][12] + # + # * [PutBucketLifecycleConfiguration][13] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList + # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html + # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + # + # @option params [required, String] :bucket + # The S3 bucket. + # + # @option params [required, String] :key + # The object key. + # + # @option params [String] :sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the object. + # This parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :sse_customer_key + # The server-side encryption (SSE) customer managed key. This parameter + # is needed only when the object was created using a checksum algorithm. + # For more information, see [Protecting data using SSE-C keys][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a checksum + # algorithm. For more information, see [Protecting data using SSE-C + # keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # + # @option params [required, String] :expression + # The expression that is used to query the object. + # + # @option params [required, String] :expression_type + # The type of the provided expression (for example, SQL). + # + # @option params [Types::RequestProgress] :request_progress + # Specifies if periodic request progress information should be enabled. + # + # @option params [required, Types::InputSerialization] :input_serialization + # Describes the format of the data in the object that is being queried. + # + # @option params [required, Types::OutputSerialization] :output_serialization + # Describes the format of the data that you want Amazon S3 to return in + # response. + # + # @option params [Types::ScanRange] :scan_range + # Specifies the byte range of the object to get the records from. A + # record is processed when its first byte is contained by the range. + # This parameter is optional, but when specified, it must not be empty. + # See RFC 2616, Section 14.35.1 about how to specify the start and end + # of the range. + # + # `ScanRange`may be used in the following ways: + # + # * `50100` - process + # only the records starting between the bytes 50 and 100 (inclusive, + # counting from zero) + # + # * `50` - process only the + # records starting after the byte 50 + # + # * `50` - process only the records + # within the last 50 bytes of the file. + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::SelectObjectContentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::SelectObjectContentOutput#payload #payload} => Types::SelectObjectContentEventStream + # + # @example EventStream Operation Example + # + # You can process event once it arrives immediately, or wait until + # full response complete and iterate through eventstream enumerator. + # + # To interact with event immediately, you need to register #select_object_content + # with callbacks, callbacks can be register for specifc events or for all events, + # callback for errors in the event stream is also available for register. + # + # Callbacks can be passed in by `:event_stream_handler` option or within block + # statement attached to #select_object_content call directly. Hybrid pattern of both + # is also supported. + # + # `:event_stream_handler` option takes in either Proc object or + # Aws::S3::EventStreams::SelectObjectContentEventStream object. + # + # Usage pattern a): callbacks with a block attached to #select_object_content + # Example for registering callbacks for all event types and error event + # + # client.select_object_content( # params input# ) do |stream| + # stream.on_error_event do |event| + # # catch unmodeled error event in the stream + # raise event + # # => Aws::Errors::EventError + # # event.event_type => :error + # # event.error_code => String + # # event.error_message => String + # end + # + # stream.on_event do |event| + # # process all events arrive + # puts event.event_type + # ... + # end + # + # end + # + # Usage pattern b): pass in `:event_stream_handler` for #select_object_content + # + # 1) create a Aws::S3::EventStreams::SelectObjectContentEventStream object + # Example for registering callbacks with specific events + # + # handler = Aws::S3::EventStreams::SelectObjectContentEventStream.new + # handler.on_records_event do |event| + # event # => Aws::S3::Types::Records + # end + # handler.on_stats_event do |event| + # event # => Aws::S3::Types::Stats + # end + # handler.on_progress_event do |event| + # event # => Aws::S3::Types::Progress + # end + # handler.on_cont_event do |event| + # event # => Aws::S3::Types::Cont + # end + # handler.on_end_event do |event| + # event # => Aws::S3::Types::End + # end + # + # client.select_object_content( # params input #, event_stream_handler: handler) + # + # 2) use a Ruby Proc object + # Example for registering callbacks with specific events + # + # handler = Proc.new do |stream| + # stream.on_records_event do |event| + # event # => Aws::S3::Types::Records + # end + # stream.on_stats_event do |event| + # event # => Aws::S3::Types::Stats + # end + # stream.on_progress_event do |event| + # event # => Aws::S3::Types::Progress + # end + # stream.on_cont_event do |event| + # event # => Aws::S3::Types::Cont + # end + # stream.on_end_event do |event| + # event # => Aws::S3::Types::End + # end + # end + # + # client.select_object_content( # params input #, event_stream_handler: handler) + # + # Usage pattern c): hybird pattern of a) and b) + # + # handler = Aws::S3::EventStreams::SelectObjectContentEventStream.new + # handler.on_records_event do |event| + # event # => Aws::S3::Types::Records + # end + # handler.on_stats_event do |event| + # event # => Aws::S3::Types::Stats + # end + # handler.on_progress_event do |event| + # event # => Aws::S3::Types::Progress + # end + # handler.on_cont_event do |event| + # event # => Aws::S3::Types::Cont + # end + # handler.on_end_event do |event| + # event # => Aws::S3::Types::End + # end + # + # client.select_object_content( # params input #, event_stream_handler: handler) do |stream| + # stream.on_error_event do |event| + # # catch unmodeled error event in the stream + # raise event + # # => Aws::Errors::EventError + # # event.event_type => :error + # # event.error_code => String + # # event.error_message => String + # end + # end + # + # Besides above usage patterns for process events when they arrive immediately, you can also + # iterate through events after response complete. + # + # Events are available at resp.payload # => Enumerator + # For parameter input example, please refer to following request syntax + # + # @example Request syntax with placeholder values + # + # resp = client.select_object_content({ + # bucket: "BucketName", # required + # key: "ObjectKey", # required + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # expression: "Expression", # required + # expression_type: "SQL", # required, accepts SQL + # request_progress: { + # enabled: false, + # }, + # input_serialization: { # required + # csv: { + # file_header_info: "USE", # accepts USE, IGNORE, NONE + # comments: "Comments", + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # allow_quoted_record_delimiter: false, + # }, + # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 + # json: { + # type: "DOCUMENT", # accepts DOCUMENT, LINES + # }, + # parquet: { + # }, + # }, + # output_serialization: { # required + # csv: { + # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # }, + # json: { + # record_delimiter: "RecordDelimiter", + # }, + # }, + # scan_range: { + # start: 1, + # end: 1, + # }, + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # All events are available at resp.payload: + # resp.payload #=> Enumerator + # resp.payload.event_types #=> [:records, :stats, :progress, :cont, :end] + # + # For :records event available at #on_records_event callback and response eventstream enumerator: + # event.payload #=> IO + # + # For :stats event available at #on_stats_event callback and response eventstream enumerator: + # event.details.bytes_scanned #=> Integer + # event.details.bytes_processed #=> Integer + # event.details.bytes_returned #=> Integer + # + # For :progress event available at #on_progress_event callback and response eventstream enumerator: + # event.details.bytes_scanned #=> Integer + # event.details.bytes_processed #=> Integer + # event.details.bytes_returned #=> Integer + # + # For :cont event available at #on_cont_event callback and response eventstream enumerator: + # #=> EmptyStruct + # For :end event available at #on_end_event callback and response eventstream enumerator: + # #=> EmptyStruct + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent AWS API Documentation + # + # @overload select_object_content(params = {}) + # @param [Hash] params ({}) + def select_object_content(params = {}, options = {}, &block) + params = params.dup + event_stream_handler = case handler = params.delete(:event_stream_handler) + when EventStreams::SelectObjectContentEventStream then handler + when Proc then EventStreams::SelectObjectContentEventStream.new.tap(&handler) + when nil then EventStreams::SelectObjectContentEventStream.new + else + msg = "expected :event_stream_handler to be a block or "\ + "instance of Aws::S3::EventStreams::SelectObjectContentEventStream"\ + ", got `#{handler.inspect}` instead" + raise ArgumentError, msg + end + + yield(event_stream_handler) if block_given? + + req = build_request(:select_object_content, params) + + req.context[:event_stream_handler] = event_stream_handler + req.handlers.add(Aws::Binary::DecodeHandler, priority: 95) + + req.send_request(options, &block) + end + + # Uploads a part in a multipart upload. + # + # In this operation, you provide part data in your request. However, you + # have an option to specify your existing Amazon S3 object as a data + # source for the part you are uploading. To upload a part from an + # existing object, you use the [UploadPartCopy][1] operation. + # + # + # + # You must initiate a multipart upload (see [CreateMultipartUpload][2]) + # before you can upload any part. In response to your initiate request, + # Amazon S3 returns an upload ID, a unique identifier, that you must + # include in your upload part request. + # + # Part numbers can be any number from 1 to 10,000, inclusive. A part + # number uniquely identifies a part and also defines its position within + # the object being created. If you upload a new part using the same part + # number that was used with a previous part, the previously uploaded + # part is overwritten. + # + # For information about maximum and minimum part sizes and other + # multipart upload specifications, see [Multipart upload limits][3] in + # the *Amazon S3 User Guide*. + # + # To ensure that data is not corrupted when traversing the network, + # specify the `Content-MD5` header in the upload part request. Amazon S3 + # checks the part data against the provided MD5 value. If they do not + # match, Amazon S3 returns an error. + # + # If the upload request is signed with Signature Version 4, then Amazon + # Web Services S3 uses the `x-amz-content-sha256` header as a checksum + # instead of `Content-MD5`. For more information see [Authenticating + # Requests: Using the Authorization Header (Amazon Web Services + # Signature Version 4)][4]. + # + # **Note:** After you initiate multipart upload and upload one or more + # parts, you must either complete or abort multipart upload in order to + # stop getting charged for storage of the uploaded parts. Only after you + # either complete or abort multipart upload, Amazon S3 frees up the + # parts storage and stops charging you for the parts storage. + # + # For more information on multipart uploads, go to [Multipart Upload + # Overview][5] in the Amazon S3 User Guide . + # + # For information on the permissions required to use the multipart + # upload API, go to [Multipart Upload and Permissions][6] in the *Amazon + # S3 User Guide*. + # + # You can optionally request server-side encryption where Amazon S3 + # encrypts your data as it writes it to disks in its data centers and + # decrypts it for you when you access it. You have the option of + # providing your own encryption key, or you can use the Amazon Web + # Services managed encryption keys. If you choose to provide your own + # encryption key, the request headers you provide in the request must + # match the headers you used in the request to initiate the upload by + # using [CreateMultipartUpload][2]. For more information, go to [Using + # Server-Side Encryption][7] in the *Amazon S3 User Guide*. + # + # Server-side encryption is supported by the S3 Multipart Upload + # actions. Unless you are using a customer-provided encryption key, you + # don't need to specify the encryption parameters in each UploadPart + # request. Instead, you only need to specify the server-side encryption + # parameters in the initial Initiate Multipart request. For more + # information, see [CreateMultipartUpload][2]. + # + # If you requested server-side encryption using a customer-provided + # encryption key in your initiate multipart upload request, you must + # provide identical encryption information in each part upload using the + # following headers. + # + # * x-amz-server-side-encryption-customer-algorithm + # + # * x-amz-server-side-encryption-customer-key + # + # * x-amz-server-side-encryption-customer-key-MD5 + # + # **Special Errors** + # + # * * *Code: NoSuchUpload* + # + # * *Cause: The specified multipart upload does not exist. The upload + # ID might be invalid, or the multipart upload might have been + # aborted or completed.* + # + # * HTTP Status Code: 404 Not Found + # + # * *SOAP Fault Code Prefix: Client* + # + # **Related Resources** + # + # * [CreateMultipartUpload][2] + # + # * [CompleteMultipartUpload][8] + # + # * [AbortMultipartUpload][9] + # + # * [ListParts][10] + # + # * [ListMultipartUploads][11] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + # + # @option params [String, StringIO, File] :body + # Object data. + # + # @option params [required, String] :bucket + # The name of the bucket to which the multipart upload was initiated. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [Integer] :content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. + # + # @option params [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the part data. This parameter + # is auto-populated when using the command from the CLI. This parameter + # is required if object lock parameters are specified. + # + # @option params [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [required, String] :key + # Object key for which the multipart upload was initiated. + # + # @option params [required, Integer] :part_number + # Part number of part being uploaded. This is a positive integer between + # 1 and 10,000. + # + # @option params [required, String] :upload_id + # Upload ID identifying the multipart upload whose part is being + # uploaded. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm header`. This must be + # the same encryption key specified in the initiate multipart upload + # request. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # + # @return [Types::UploadPartOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::UploadPartOutput#server_side_encryption #server_side_encryption} => String + # * {Types::UploadPartOutput#etag #etag} => String + # * {Types::UploadPartOutput#checksum_crc32 #checksum_crc32} => String + # * {Types::UploadPartOutput#checksum_crc32c #checksum_crc32c} => String + # * {Types::UploadPartOutput#checksum_sha1 #checksum_sha1} => String + # * {Types::UploadPartOutput#checksum_sha256 #checksum_sha256} => String + # * {Types::UploadPartOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::UploadPartOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::UploadPartOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::UploadPartOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::UploadPartOutput#request_charged #request_charged} => String + # + # + # @example Example: To upload a part + # + # # The following example uploads part 1 of a multipart upload. The example specifies a file name for the part data. The + # # Upload ID is same that is returned by the initiate multipart upload. + # + # resp = client.upload_part({ + # body: "fileToUpload", + # bucket: "examplebucket", + # key: "examplelargeobject", + # part_number: 1, + # upload_id: "xadcOB_7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", + # }) + # + # resp.to_h outputs the following: + # { + # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", + # } + # + # @example Request syntax with placeholder values + # + # resp = client.upload_part({ + # body: source_file, + # bucket: "BucketName", # required + # content_length: 1, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # key: "ObjectKey", # required + # part_number: 1, # required + # upload_id: "MultipartUploadId", # required + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.etag #=> String + # resp.checksum_crc32 #=> String + # resp.checksum_crc32c #=> String + # resp.checksum_sha1 #=> String + # resp.checksum_sha256 #=> String + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart AWS API Documentation + # + # @overload upload_part(params = {}) + # @param [Hash] params ({}) + def upload_part(params = {}, options = {}) + req = build_request(:upload_part, params) + req.send_request(options) + end + + # Uploads a part by copying data from an existing object as data source. + # You specify the data source by adding the request header + # `x-amz-copy-source` in your request and a byte range by adding the + # request header `x-amz-copy-source-range` in your request. + # + # For information about maximum and minimum part sizes and other + # multipart upload specifications, see [Multipart upload limits][1] in + # the *Amazon S3 User Guide*. + # + # Instead of using an existing object as part data, you might use the + # [UploadPart][2] action and provide data in your request. + # + # + # + # You must initiate a multipart upload before you can upload any part. + # In response to your initiate request. Amazon S3 returns a unique + # identifier, the upload ID, that you must include in your upload part + # request. + # + # For more information about using the `UploadPartCopy` operation, see + # the following: + # + # * For conceptual information about multipart uploads, see [Uploading + # Objects Using Multipart Upload][3] in the *Amazon S3 User Guide*. + # + # * For information about permissions required to use the multipart + # upload API, see [Multipart Upload and Permissions][4] in the *Amazon + # S3 User Guide*. + # + # * For information about copying objects using a single atomic action + # vs. a multipart upload, see [Operations on Objects][5] in the + # *Amazon S3 User Guide*. + # + # * For information about using server-side encryption with + # customer-provided encryption keys with the `UploadPartCopy` + # operation, see [CopyObject][6] and [UploadPart][2]. + # + # Note the following additional considerations about the request headers + # `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, + # `x-amz-copy-source-if-unmodified-since`, and + # `x-amz-copy-source-if-modified-since`: + # + # + # + # * **Consideration 1** - If both of the `x-amz-copy-source-if-match` + # and `x-amz-copy-source-if-unmodified-since` headers are present in + # the request as follows: + # + # `x-amz-copy-source-if-match` condition evaluates to `true`, and; + # + # `x-amz-copy-source-if-unmodified-since` condition evaluates to + # `false`; + # + # Amazon S3 returns `200 OK` and copies the data. + # + # * **Consideration 2** - If both of the + # `x-amz-copy-source-if-none-match` and + # `x-amz-copy-source-if-modified-since` headers are present in the + # request as follows: + # + # `x-amz-copy-source-if-none-match` condition evaluates to `false`, + # and; + # + # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; + # + # Amazon S3 returns `412 Precondition Failed` response code. + # + # **Versioning** + # + # If your bucket has versioning enabled, you could have multiple + # versions of the same object. By default, `x-amz-copy-source` + # identifies the current version of the object to copy. If the current + # version is a delete marker and you don't specify a versionId in the + # `x-amz-copy-source`, Amazon S3 returns a 404 error, because the object + # does not exist. If you specify versionId in the `x-amz-copy-source` + # and the versionId is a delete marker, Amazon S3 returns an HTTP 400 + # error, because you are not allowed to specify a delete marker as a + # version for the `x-amz-copy-source`. + # + # You can optionally specify a specific version of the source object to + # copy by adding the `versionId` subresource as shown in the following + # example: + # + # `x-amz-copy-source: /bucket/object?versionId=version id` + # + # **Special Errors** + # + # * * *Code: NoSuchUpload* + # + # * *Cause: The specified multipart upload does not exist. The upload + # ID might be invalid, or the multipart upload might have been + # aborted or completed.* + # + # * *HTTP Status Code: 404 Not Found* + # + # * * *Code: InvalidRequest* + # + # * *Cause: The specified copy source is not supported as a byte-range + # copy source.* + # + # * *HTTP Status Code: 400 Bad Request* + # + # **Related Resources** + # + # * [CreateMultipartUpload][7] + # + # * [UploadPart][2] + # + # * [CompleteMultipartUpload][8] + # + # * [AbortMultipartUpload][9] + # + # * [ListParts][10] + # + # * [ListMultipartUploads][11] + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html + # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html + # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + # + # @option params [required, String] :bucket + # The bucket name. + # + # When using this action with an access point, you must direct requests + # to the access point hostname. The access point hostname takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the bucket + # name. For more information about access point ARNs, see [Using access + # points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # + # @option params [required, String] :copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and key of the source object, separated by a + # slash (/). For example, to copy the object `reports/january.pdf` + # from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through access + # point `my-access-point` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when the + # source and destination buckets are in the same Amazon Web Services + # Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # + # @option params [String] :copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified tag. + # + # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # + # @option params [String] :copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # + # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # + # @option params [String] :copy_source_range + # The range of bytes to copy from the source object. The range value + # must use the form bytes=first-last, where the first and last are the + # zero-based byte offsets to copy. For example, bytes=0-9 indicates that + # you want to copy the first 10 bytes of the source. You can copy a + # range only if the source object is greater than 5 MB. + # + # @option params [required, String] :key + # Object key for which the multipart upload was initiated. + # + # @option params [required, Integer] :part_number + # Part number of part being copied. This is a positive integer between 1 + # and 10,000. + # + # @option params [required, String] :upload_id + # Upload ID identifying the multipart upload whose part is being copied. + # + # @option params [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # + # @option params [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. This must be + # the same encryption key specified in the initiate multipart upload + # request. + # + # @option params [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object (for + # example, AES256). + # + # @option params [String] :copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use to + # decrypt the source object. The encryption key provided in this header + # must be one that was used when the source object was created. + # + # @option params [String] :copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # + # @option params [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # + # @option params [String] :expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request fails + # with the HTTP status code `403 Forbidden` (access denied). + # + # @option params [String] :expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # + # @return [Types::UploadPartCopyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::UploadPartCopyOutput#copy_source_version_id #copy_source_version_id} => String + # * {Types::UploadPartCopyOutput#copy_part_result #copy_part_result} => Types::CopyPartResult + # * {Types::UploadPartCopyOutput#server_side_encryption #server_side_encryption} => String + # * {Types::UploadPartCopyOutput#sse_customer_algorithm #sse_customer_algorithm} => String + # * {Types::UploadPartCopyOutput#sse_customer_key_md5 #sse_customer_key_md5} => String + # * {Types::UploadPartCopyOutput#ssekms_key_id #ssekms_key_id} => String + # * {Types::UploadPartCopyOutput#bucket_key_enabled #bucket_key_enabled} => Boolean + # * {Types::UploadPartCopyOutput#request_charged #request_charged} => String + # + # + # @example Example: To upload a part by copying byte range from an existing object as data source + # + # # The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as + # # data source. + # + # resp = client.upload_part_copy({ + # bucket: "examplebucket", + # copy_source: "/bucketname/sourceobjectkey", + # copy_source_range: "bytes=1-100000", + # key: "examplelargeobject", + # part_number: 2, + # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--", + # }) + # + # resp.to_h outputs the following: + # { + # copy_part_result: { + # etag: "\"65d16d19e65a7508a51f043180edcc36\"", + # last_modified: Time.parse("2016-12-29T21:44:28.000Z"), + # }, + # } + # + # @example Example: To upload a part by copying data from an existing object as data source + # + # # The following example uploads a part of a multipart upload by copying data from an existing object as data source. + # + # resp = client.upload_part_copy({ + # bucket: "examplebucket", + # copy_source: "/bucketname/sourceobjectkey", + # key: "examplelargeobject", + # part_number: 1, + # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--", + # }) + # + # resp.to_h outputs the following: + # { + # copy_part_result: { + # etag: "\"b0c6f0e7e054ab8fa2536a2677f8734d\"", + # last_modified: Time.parse("2016-12-29T21:24:43.000Z"), + # }, + # } + # + # @example Request syntax with placeholder values + # + # resp = client.upload_part_copy({ + # bucket: "BucketName", # required + # copy_source: "CopySource", # required + # copy_source_if_match: "CopySourceIfMatch", + # copy_source_if_modified_since: Time.now, + # copy_source_if_none_match: "CopySourceIfNoneMatch", + # copy_source_if_unmodified_since: Time.now, + # copy_source_range: "CopySourceRange", + # key: "ObjectKey", # required + # part_number: 1, # required + # upload_id: "MultipartUploadId", # required + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", + # copy_source_sse_customer_key: "CopySourceSSECustomerKey", + # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # expected_source_bucket_owner: "AccountId", + # }) + # + # @example Response structure + # + # resp.copy_source_version_id #=> String + # resp.copy_part_result.etag #=> String + # resp.copy_part_result.last_modified #=> Time + # resp.copy_part_result.checksum_crc32 #=> String + # resp.copy_part_result.checksum_crc32c #=> String + # resp.copy_part_result.checksum_sha1 #=> String + # resp.copy_part_result.checksum_sha256 #=> String + # resp.server_side_encryption #=> String, one of "AES256", "aws:kms" + # resp.sse_customer_algorithm #=> String + # resp.sse_customer_key_md5 #=> String + # resp.ssekms_key_id #=> String + # resp.bucket_key_enabled #=> Boolean + # resp.request_charged #=> String, one of "requester" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy AWS API Documentation + # + # @overload upload_part_copy(params = {}) + # @param [Hash] params ({}) + def upload_part_copy(params = {}, options = {}) + req = build_request(:upload_part_copy, params) + req.send_request(options) + end + + # Passes transformed objects to a `GetObject` operation when using + # Object Lambda access points. For information about Object Lambda + # access points, see [Transforming objects with Object Lambda access + # points][1] in the *Amazon S3 User Guide*. + # + # This operation supports metadata that can be returned by + # [GetObject][2], in addition to `RequestRoute`, `RequestToken`, + # `StatusCode`, `ErrorCode`, and `ErrorMessage`. The `GetObject` + # response metadata is supported so that the `WriteGetObjectResponse` + # caller, typically an Lambda function, can provide the same metadata + # when it internally invokes `GetObject`. When `WriteGetObjectResponse` + # is called by a customer-owned Lambda function, the metadata returned + # to the end user `GetObject` call might differ from what Amazon S3 + # would normally return. + # + # You can include any number of metadata headers. When including a + # metadata header, it should be prefaced with `x-amz-meta`. For example, + # `x-amz-meta-my-custom-header: MyCustomValue`. The primary use case for + # this is to forward `GetObject` metadata. + # + # Amazon Web Services provides some prebuilt Lambda functions that you + # can use with S3 Object Lambda to detect and redact personally + # identifiable information (PII) and decompress S3 objects. These Lambda + # functions are available in the Amazon Web Services Serverless + # Application Repository, and can be selected through the Amazon Web + # Services Management Console when you create your Object Lambda access + # point. + # + # Example 1: PII Access Control - This Lambda function uses Amazon + # Comprehend, a natural language processing (NLP) service using machine + # learning to find insights and relationships in text. It automatically + # detects personally identifiable information (PII) such as names, + # addresses, dates, credit card numbers, and social security numbers + # from documents in your Amazon S3 bucket. + # + # Example 2: PII Redaction - This Lambda function uses Amazon + # Comprehend, a natural language processing (NLP) service using machine + # learning to find insights and relationships in text. It automatically + # redacts personally identifiable information (PII) such as names, + # addresses, dates, credit card numbers, and social security numbers + # from documents in your Amazon S3 bucket. + # + # Example 3: Decompression - The Lambda function + # S3ObjectLambdaDecompression, is equipped to decompress objects stored + # in S3 in one of six compressed file formats including bzip2, gzip, + # snappy, zlib, zstandard and ZIP. + # + # For information on how to view and use these functions, see [Using + # Amazon Web Services built Lambda functions][3] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html + # + # @option params [required, String] :request_route + # Route prefix to the HTTP URL generated. + # + # @option params [required, String] :request_token + # A single use encrypted token that maps `WriteGetObjectResponse` to the + # end user `GetObject` request. + # + # @option params [String, IO] :body + # The object data. + # + # @option params [Integer] :status_code + # The integer status code for an HTTP response of a corresponding + # `GetObject` request. + # + # **Status Codes** + # + # * `200 - OK` + # + # * `206 - Partial Content` + # + # * `304 - Not Modified` + # + # * `400 - Bad Request` + # + # * `401 - Unauthorized` + # + # * `403 - Forbidden` + # + # * `404 - Not Found` + # + # * `405 - Method Not Allowed` + # + # * `409 - Conflict` + # + # * `411 - Length Required` + # + # * `412 - Precondition Failed` + # + # * `416 - Range Not Satisfiable` + # + # * `500 - Internal Server Error` + # + # * `503 - Service Unavailable` + # + # @option params [String] :error_code + # A string that uniquely identifies an error condition. Returned in the + # <Code> tag of the error XML response for a corresponding + # `GetObject` call. Cannot be used with a successful `StatusCode` header + # or when the transformed object is provided in the body. All error + # codes from S3 are sentence-cased. The regular expression (regex) value + # is `"^[A-Z][a-zA-Z]+$"`. + # + # @option params [String] :error_message + # Contains a generic description of the error condition. Returned in the + # <Message> tag of the error XML response for a corresponding + # `GetObject` call. Cannot be used with a successful `StatusCode` header + # or when the transformed object is provided in body. + # + # @option params [String] :accept_ranges + # Indicates that a range of bytes was specified. + # + # @option params [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # + # @option params [String] :content_disposition + # Specifies presentational information for the object. + # + # @option params [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # + # @option params [String] :content_language + # The language the content is in. + # + # @option params [Integer] :content_length + # The size of the content body in bytes. + # + # @option params [String] :content_range + # The portion of the object returned in the response. + # + # @option params [String] :content_type + # A standard MIME type describing the format of the object data. + # + # @option params [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 32-bit CRC32 checksum of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 32-bit CRC32C checksum of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 160-bit SHA-1 digest of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 256-bit SHA-256 digest of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # + # @option params [Boolean] :delete_marker + # Specifies whether an object stored in Amazon S3 is (`true`) or is not + # (`false`) a delete marker. + # + # @option params [String] :etag + # An opaque identifier assigned by a web server to a specific version of + # a resource found at a URL. + # + # @option params [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # + # @option params [String] :expiration + # If the object expiration is configured (see PUT Bucket lifecycle), the + # response includes this header. It includes the `expiry-date` and + # `rule-id` key-value pairs that provide the object expiration + # information. The value of the `rule-id` is URL-encoded. + # + # @option params [Time,DateTime,Date,Integer,String] :last_modified + # The date and time that the object was last modified. + # + # @option params [Integer] :missing_meta + # Set to the number of metadata entries not returned in `x-amz-meta` + # headers. This can happen if you create metadata using an API like SOAP + # that supports more flexible metadata than the REST API. For example, + # using SOAP, you can create metadata whose values are not legal HTTP + # headers. + # + # @option params [Hash] :metadata + # A map of metadata to store with the object in S3. + # + # @option params [String] :object_lock_mode + # Indicates whether an object stored in Amazon S3 has Object Lock + # enabled. For more information about S3 Object Lock, see [Object + # Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html + # + # @option params [String] :object_lock_legal_hold_status + # Indicates whether an object stored in Amazon S3 has an active legal + # hold. + # + # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when Object Lock is configured to expire. + # + # @option params [Integer] :parts_count + # The count of parts this object has. + # + # @option params [String] :replication_status + # Indicates if request involves bucket that is either a source or + # destination in a Replication rule. For more information about S3 + # Replication, see [Replication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html + # + # @option params [String] :request_charged + # If present, indicates that the requester was successfully charged for + # the request. + # + # @option params [String] :restore + # Provides information about object restoration operation and expiration + # time of the restored object copy. + # + # @option params [String] :server_side_encryption + # The server-side encryption algorithm used when storing requested + # object in Amazon S3 (for example, AES256, aws:kms). + # + # @option params [String] :sse_customer_algorithm + # Encryption algorithm used if server-side encryption with a + # customer-provided encryption key was specified for object stored in + # Amazon S3. + # + # @option params [String] :ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key Management + # Service (Amazon Web Services KMS) symmetric customer managed key that + # was used for stored in Amazon S3 object. + # + # @option params [String] :sse_customer_key_md5 + # 128-bit MD5 digest of customer-provided encryption key used in Amazon + # S3 to encrypt data stored in S3. For more information, see [Protecting + # data using server-side encryption with customer-provided encryption + # keys (SSE-C)][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html + # + # @option params [String] :storage_class + # Provides storage class information of the object. Amazon S3 returns + # this header for all objects except for S3 Standard storage class + # objects. + # + # For more information, see [Storage Classes][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # + # @option params [Integer] :tag_count + # The number of tags, if any, on the object. + # + # @option params [String] :version_id + # An ID used to reference a specific version of the object. + # + # @option params [Boolean] :bucket_key_enabled + # Indicates whether the object stored in Amazon S3 uses an S3 bucket key + # for server-side encryption with Amazon Web Services KMS (SSE-KMS). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.write_get_object_response({ + # request_route: "RequestRoute", # required + # request_token: "RequestToken", # required + # body: source_file, + # status_code: 1, + # error_code: "ErrorCode", + # error_message: "ErrorMessage", + # accept_ranges: "AcceptRanges", + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_length: 1, + # content_range: "ContentRange", + # content_type: "ContentType", + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # delete_marker: false, + # etag: "ETag", + # expires: Time.now, + # expiration: "Expiration", + # last_modified: Time.now, + # missing_meta: 1, + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # object_lock_retain_until_date: Time.now, + # parts_count: 1, + # replication_status: "COMPLETE", # accepts COMPLETE, PENDING, FAILED, REPLICA + # request_charged: "requester", # accepts requester + # restore: "Restore", + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # sse_customer_algorithm: "SSECustomerAlgorithm", + # ssekms_key_id: "SSEKMSKeyId", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # tag_count: 1, + # version_id: "ObjectVersionId", + # bucket_key_enabled: false, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse AWS API Documentation + # + # @overload write_get_object_response(params = {}) + # @param [Hash] params ({}) + def write_get_object_response(params = {}, options = {}) + req = build_request(:write_get_object_response, params) + req.send_request(options) + end + + # @!endgroup + + # @param params ({}) + # @api private + def build_request(operation_name, params = {}) + handlers = @handlers.for(operation_name) + context = Seahorse::Client::RequestContext.new( + operation_name: operation_name, + operation: config.api.operation(operation_name), + client: self, + params: params, + config: config) + context[:gem_name] = 'aws-sdk-s3' + context[:gem_version] = '1.121.0' + Seahorse::Client::Request.new(handlers, context) + end + + # Polls an API operation until a resource enters a desired state. + # + # ## Basic Usage + # + # A waiter will call an API operation until: + # + # * It is successful + # * It enters a terminal state + # * It makes the maximum number of attempts + # + # In between attempts, the waiter will sleep. + # + # # polls in a loop, sleeping between attempts + # client.wait_until(waiter_name, params) + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. You can pass + # configuration as the final arguments hash. + # + # # poll for ~25 seconds + # client.wait_until(waiter_name, params, { + # max_attempts: 5, + # delay: 5, + # }) + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # client.wait_until(waiter_name, params, { + # + # # disable max attempts + # max_attempts: nil, + # + # # poll for 1 hour, instead of a number of attempts + # before_wait: -> (attempts, response) do + # throw :failure if Time.now - started_at > 3600 + # end + # }) + # + # ## Handling Errors + # + # When a waiter is unsuccessful, it will raise an error. + # All of the failure errors extend from + # {Aws::Waiters::Errors::WaiterFailed}. + # + # begin + # client.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # ## Valid Waiters + # + # The following table lists the valid waiter names, the operations they call, + # and the default `:delay` and `:max_attempts` values. + # + # | waiter_name | params | :delay | :max_attempts | + # | ----------------- | -------------------- | -------- | ------------- | + # | bucket_exists | {Client#head_bucket} | 5 | 20 | + # | bucket_not_exists | {Client#head_bucket} | 5 | 20 | + # | object_exists | {Client#head_object} | 5 | 20 | + # | object_not_exists | {Client#head_object} | 5 | 20 | + # + # @raise [Errors::FailureStateError] Raised when the waiter terminates + # because the waiter has entered a state that it will not transition + # out of, preventing success. + # + # @raise [Errors::TooManyAttemptsError] Raised when the configured + # maximum number of attempts have been made, and the waiter is not + # yet successful. + # + # @raise [Errors::UnexpectedError] Raised when an error is encounted + # while polling for a resource that is not expected. + # + # @raise [Errors::NoSuchWaiterError] Raised when you request to wait + # for an unknown state. + # + # @return [Boolean] Returns `true` if the waiter was successful. + # @param [Symbol] waiter_name + # @param [Hash] params ({}) + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts + # @option options [Integer] :delay + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def wait_until(waiter_name, params = {}, options = {}) + w = waiter(waiter_name, options) + yield(w.waiter) if block_given? # deprecated + w.wait(params) + end + + # @api private + # @deprecated + def waiter_names + waiters.keys + end + + private + + # @param [Symbol] waiter_name + # @param [Hash] options ({}) + def waiter(waiter_name, options = {}) + waiter_class = waiters[waiter_name] + if waiter_class + waiter_class.new(options.merge(client: self)) + else + raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys) + end + end + + def waiters + { + bucket_exists: Waiters::BucketExists, + bucket_not_exists: Waiters::BucketNotExists, + object_exists: Waiters::ObjectExists, + object_not_exists: Waiters::ObjectNotExists + } + end + + class << self + + # @api private + attr_reader :identifier + + # @api private + def errors_module + Errors + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/client_api.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/client_api.rb new file mode 100644 index 0000000..e9466a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/client_api.rb @@ -0,0 +1,3721 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + # @api private + module ClientApi + + include Seahorse::Model + + AbortDate = Shapes::TimestampShape.new(name: 'AbortDate') + AbortIncompleteMultipartUpload = Shapes::StructureShape.new(name: 'AbortIncompleteMultipartUpload') + AbortMultipartUploadOutput = Shapes::StructureShape.new(name: 'AbortMultipartUploadOutput') + AbortMultipartUploadRequest = Shapes::StructureShape.new(name: 'AbortMultipartUploadRequest') + AbortRuleId = Shapes::StringShape.new(name: 'AbortRuleId') + AccelerateConfiguration = Shapes::StructureShape.new(name: 'AccelerateConfiguration') + AcceptRanges = Shapes::StringShape.new(name: 'AcceptRanges') + AccessControlPolicy = Shapes::StructureShape.new(name: 'AccessControlPolicy') + AccessControlTranslation = Shapes::StructureShape.new(name: 'AccessControlTranslation') + AccessPointArn = Shapes::StringShape.new(name: 'AccessPointArn') + AccountId = Shapes::StringShape.new(name: 'AccountId') + AllowQuotedRecordDelimiter = Shapes::BooleanShape.new(name: 'AllowQuotedRecordDelimiter') + AllowedHeader = Shapes::StringShape.new(name: 'AllowedHeader') + AllowedHeaders = Shapes::ListShape.new(name: 'AllowedHeaders', flattened: true) + AllowedMethod = Shapes::StringShape.new(name: 'AllowedMethod') + AllowedMethods = Shapes::ListShape.new(name: 'AllowedMethods', flattened: true) + AllowedOrigin = Shapes::StringShape.new(name: 'AllowedOrigin') + AllowedOrigins = Shapes::ListShape.new(name: 'AllowedOrigins', flattened: true) + AnalyticsAndOperator = Shapes::StructureShape.new(name: 'AnalyticsAndOperator') + AnalyticsConfiguration = Shapes::StructureShape.new(name: 'AnalyticsConfiguration') + AnalyticsConfigurationList = Shapes::ListShape.new(name: 'AnalyticsConfigurationList', flattened: true) + AnalyticsExportDestination = Shapes::StructureShape.new(name: 'AnalyticsExportDestination') + AnalyticsFilter = Shapes::StructureShape.new(name: 'AnalyticsFilter') + AnalyticsId = Shapes::StringShape.new(name: 'AnalyticsId') + AnalyticsS3BucketDestination = Shapes::StructureShape.new(name: 'AnalyticsS3BucketDestination') + AnalyticsS3ExportFileFormat = Shapes::StringShape.new(name: 'AnalyticsS3ExportFileFormat') + ArchiveStatus = Shapes::StringShape.new(name: 'ArchiveStatus') + Body = Shapes::BlobShape.new(name: 'Body') + Bucket = Shapes::StructureShape.new(name: 'Bucket') + BucketAccelerateStatus = Shapes::StringShape.new(name: 'BucketAccelerateStatus') + BucketAlreadyExists = Shapes::StructureShape.new(name: 'BucketAlreadyExists') + BucketAlreadyOwnedByYou = Shapes::StructureShape.new(name: 'BucketAlreadyOwnedByYou') + BucketCannedACL = Shapes::StringShape.new(name: 'BucketCannedACL') + BucketKeyEnabled = Shapes::BooleanShape.new(name: 'BucketKeyEnabled') + BucketLifecycleConfiguration = Shapes::StructureShape.new(name: 'BucketLifecycleConfiguration') + BucketLocationConstraint = Shapes::StringShape.new(name: 'BucketLocationConstraint') + BucketLoggingStatus = Shapes::StructureShape.new(name: 'BucketLoggingStatus') + BucketLogsPermission = Shapes::StringShape.new(name: 'BucketLogsPermission') + BucketName = Shapes::StringShape.new(name: 'BucketName') + BucketVersioningStatus = Shapes::StringShape.new(name: 'BucketVersioningStatus') + Buckets = Shapes::ListShape.new(name: 'Buckets') + BypassGovernanceRetention = Shapes::BooleanShape.new(name: 'BypassGovernanceRetention') + BytesProcessed = Shapes::IntegerShape.new(name: 'BytesProcessed') + BytesReturned = Shapes::IntegerShape.new(name: 'BytesReturned') + BytesScanned = Shapes::IntegerShape.new(name: 'BytesScanned') + CORSConfiguration = Shapes::StructureShape.new(name: 'CORSConfiguration') + CORSRule = Shapes::StructureShape.new(name: 'CORSRule') + CORSRules = Shapes::ListShape.new(name: 'CORSRules', flattened: true) + CSVInput = Shapes::StructureShape.new(name: 'CSVInput') + CSVOutput = Shapes::StructureShape.new(name: 'CSVOutput') + CacheControl = Shapes::StringShape.new(name: 'CacheControl') + Checksum = Shapes::StructureShape.new(name: 'Checksum') + ChecksumAlgorithm = Shapes::StringShape.new(name: 'ChecksumAlgorithm') + ChecksumAlgorithmList = Shapes::ListShape.new(name: 'ChecksumAlgorithmList', flattened: true) + ChecksumCRC32 = Shapes::StringShape.new(name: 'ChecksumCRC32') + ChecksumCRC32C = Shapes::StringShape.new(name: 'ChecksumCRC32C') + ChecksumMode = Shapes::StringShape.new(name: 'ChecksumMode') + ChecksumSHA1 = Shapes::StringShape.new(name: 'ChecksumSHA1') + ChecksumSHA256 = Shapes::StringShape.new(name: 'ChecksumSHA256') + CloudFunction = Shapes::StringShape.new(name: 'CloudFunction') + CloudFunctionConfiguration = Shapes::StructureShape.new(name: 'CloudFunctionConfiguration') + CloudFunctionInvocationRole = Shapes::StringShape.new(name: 'CloudFunctionInvocationRole') + Code = Shapes::StringShape.new(name: 'Code') + Comments = Shapes::StringShape.new(name: 'Comments') + CommonPrefix = Shapes::StructureShape.new(name: 'CommonPrefix') + CommonPrefixList = Shapes::ListShape.new(name: 'CommonPrefixList', flattened: true) + CompleteMultipartUploadOutput = Shapes::StructureShape.new(name: 'CompleteMultipartUploadOutput') + CompleteMultipartUploadRequest = Shapes::StructureShape.new(name: 'CompleteMultipartUploadRequest') + CompletedMultipartUpload = Shapes::StructureShape.new(name: 'CompletedMultipartUpload') + CompletedPart = Shapes::StructureShape.new(name: 'CompletedPart') + CompletedPartList = Shapes::ListShape.new(name: 'CompletedPartList', flattened: true) + CompressionType = Shapes::StringShape.new(name: 'CompressionType') + Condition = Shapes::StructureShape.new(name: 'Condition') + ConfirmRemoveSelfBucketAccess = Shapes::BooleanShape.new(name: 'ConfirmRemoveSelfBucketAccess') + ContentDisposition = Shapes::StringShape.new(name: 'ContentDisposition') + ContentEncoding = Shapes::StringShape.new(name: 'ContentEncoding') + ContentLanguage = Shapes::StringShape.new(name: 'ContentLanguage') + ContentLength = Shapes::IntegerShape.new(name: 'ContentLength') + ContentMD5 = Shapes::StringShape.new(name: 'ContentMD5') + ContentRange = Shapes::StringShape.new(name: 'ContentRange') + ContentType = Shapes::StringShape.new(name: 'ContentType') + ContinuationEvent = Shapes::StructureShape.new(name: 'ContinuationEvent') + CopyObjectOutput = Shapes::StructureShape.new(name: 'CopyObjectOutput') + CopyObjectRequest = Shapes::StructureShape.new(name: 'CopyObjectRequest') + CopyObjectResult = Shapes::StructureShape.new(name: 'CopyObjectResult') + CopyPartResult = Shapes::StructureShape.new(name: 'CopyPartResult') + CopySource = Shapes::StringShape.new(name: 'CopySource') + CopySourceIfMatch = Shapes::StringShape.new(name: 'CopySourceIfMatch') + CopySourceIfModifiedSince = Shapes::TimestampShape.new(name: 'CopySourceIfModifiedSince') + CopySourceIfNoneMatch = Shapes::StringShape.new(name: 'CopySourceIfNoneMatch') + CopySourceIfUnmodifiedSince = Shapes::TimestampShape.new(name: 'CopySourceIfUnmodifiedSince') + CopySourceRange = Shapes::StringShape.new(name: 'CopySourceRange') + CopySourceSSECustomerAlgorithm = Shapes::StringShape.new(name: 'CopySourceSSECustomerAlgorithm') + CopySourceSSECustomerKey = Shapes::StringShape.new(name: 'CopySourceSSECustomerKey') + CopySourceSSECustomerKeyMD5 = Shapes::StringShape.new(name: 'CopySourceSSECustomerKeyMD5') + CopySourceVersionId = Shapes::StringShape.new(name: 'CopySourceVersionId') + CreateBucketConfiguration = Shapes::StructureShape.new(name: 'CreateBucketConfiguration') + CreateBucketOutput = Shapes::StructureShape.new(name: 'CreateBucketOutput') + CreateBucketRequest = Shapes::StructureShape.new(name: 'CreateBucketRequest') + CreateMultipartUploadOutput = Shapes::StructureShape.new(name: 'CreateMultipartUploadOutput') + CreateMultipartUploadRequest = Shapes::StructureShape.new(name: 'CreateMultipartUploadRequest') + CreationDate = Shapes::TimestampShape.new(name: 'CreationDate') + Date = Shapes::TimestampShape.new(name: 'Date', timestampFormat: "iso8601") + Days = Shapes::IntegerShape.new(name: 'Days') + DaysAfterInitiation = Shapes::IntegerShape.new(name: 'DaysAfterInitiation') + DefaultRetention = Shapes::StructureShape.new(name: 'DefaultRetention') + Delete = Shapes::StructureShape.new(name: 'Delete') + DeleteBucketAnalyticsConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketAnalyticsConfigurationRequest') + DeleteBucketCorsRequest = Shapes::StructureShape.new(name: 'DeleteBucketCorsRequest') + DeleteBucketEncryptionRequest = Shapes::StructureShape.new(name: 'DeleteBucketEncryptionRequest') + DeleteBucketIntelligentTieringConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketIntelligentTieringConfigurationRequest') + DeleteBucketInventoryConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketInventoryConfigurationRequest') + DeleteBucketLifecycleRequest = Shapes::StructureShape.new(name: 'DeleteBucketLifecycleRequest') + DeleteBucketMetricsConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketMetricsConfigurationRequest') + DeleteBucketOwnershipControlsRequest = Shapes::StructureShape.new(name: 'DeleteBucketOwnershipControlsRequest') + DeleteBucketPolicyRequest = Shapes::StructureShape.new(name: 'DeleteBucketPolicyRequest') + DeleteBucketReplicationRequest = Shapes::StructureShape.new(name: 'DeleteBucketReplicationRequest') + DeleteBucketRequest = Shapes::StructureShape.new(name: 'DeleteBucketRequest') + DeleteBucketTaggingRequest = Shapes::StructureShape.new(name: 'DeleteBucketTaggingRequest') + DeleteBucketWebsiteRequest = Shapes::StructureShape.new(name: 'DeleteBucketWebsiteRequest') + DeleteMarker = Shapes::BooleanShape.new(name: 'DeleteMarker') + DeleteMarkerEntry = Shapes::StructureShape.new(name: 'DeleteMarkerEntry') + DeleteMarkerReplication = Shapes::StructureShape.new(name: 'DeleteMarkerReplication') + DeleteMarkerReplicationStatus = Shapes::StringShape.new(name: 'DeleteMarkerReplicationStatus') + DeleteMarkerVersionId = Shapes::StringShape.new(name: 'DeleteMarkerVersionId') + DeleteMarkers = Shapes::ListShape.new(name: 'DeleteMarkers', flattened: true) + DeleteObjectOutput = Shapes::StructureShape.new(name: 'DeleteObjectOutput') + DeleteObjectRequest = Shapes::StructureShape.new(name: 'DeleteObjectRequest') + DeleteObjectTaggingOutput = Shapes::StructureShape.new(name: 'DeleteObjectTaggingOutput') + DeleteObjectTaggingRequest = Shapes::StructureShape.new(name: 'DeleteObjectTaggingRequest') + DeleteObjectsOutput = Shapes::StructureShape.new(name: 'DeleteObjectsOutput') + DeleteObjectsRequest = Shapes::StructureShape.new(name: 'DeleteObjectsRequest') + DeletePublicAccessBlockRequest = Shapes::StructureShape.new(name: 'DeletePublicAccessBlockRequest') + DeletedObject = Shapes::StructureShape.new(name: 'DeletedObject') + DeletedObjects = Shapes::ListShape.new(name: 'DeletedObjects', flattened: true) + Delimiter = Shapes::StringShape.new(name: 'Delimiter') + Description = Shapes::StringShape.new(name: 'Description') + Destination = Shapes::StructureShape.new(name: 'Destination') + DisplayName = Shapes::StringShape.new(name: 'DisplayName') + ETag = Shapes::StringShape.new(name: 'ETag') + EmailAddress = Shapes::StringShape.new(name: 'EmailAddress') + EnableRequestProgress = Shapes::BooleanShape.new(name: 'EnableRequestProgress') + EncodingType = Shapes::StringShape.new(name: 'EncodingType') + Encryption = Shapes::StructureShape.new(name: 'Encryption') + EncryptionConfiguration = Shapes::StructureShape.new(name: 'EncryptionConfiguration') + End = Shapes::IntegerShape.new(name: 'End') + EndEvent = Shapes::StructureShape.new(name: 'EndEvent') + Error = Shapes::StructureShape.new(name: 'Error') + ErrorCode = Shapes::StringShape.new(name: 'ErrorCode') + ErrorDocument = Shapes::StructureShape.new(name: 'ErrorDocument') + ErrorMessage = Shapes::StringShape.new(name: 'ErrorMessage') + Errors = Shapes::ListShape.new(name: 'Errors', flattened: true) + Event = Shapes::StringShape.new(name: 'Event') + EventBridgeConfiguration = Shapes::StructureShape.new(name: 'EventBridgeConfiguration') + EventList = Shapes::ListShape.new(name: 'EventList', flattened: true) + ExistingObjectReplication = Shapes::StructureShape.new(name: 'ExistingObjectReplication') + ExistingObjectReplicationStatus = Shapes::StringShape.new(name: 'ExistingObjectReplicationStatus') + Expiration = Shapes::StringShape.new(name: 'Expiration') + ExpirationStatus = Shapes::StringShape.new(name: 'ExpirationStatus') + ExpiredObjectDeleteMarker = Shapes::BooleanShape.new(name: 'ExpiredObjectDeleteMarker') + Expires = Shapes::TimestampShape.new(name: 'Expires') + ExpiresString = Shapes::StringShape.new(name: 'ExpiresString') + ExposeHeader = Shapes::StringShape.new(name: 'ExposeHeader') + ExposeHeaders = Shapes::ListShape.new(name: 'ExposeHeaders', flattened: true) + Expression = Shapes::StringShape.new(name: 'Expression') + ExpressionType = Shapes::StringShape.new(name: 'ExpressionType') + FetchOwner = Shapes::BooleanShape.new(name: 'FetchOwner') + FieldDelimiter = Shapes::StringShape.new(name: 'FieldDelimiter') + FileHeaderInfo = Shapes::StringShape.new(name: 'FileHeaderInfo') + FilterRule = Shapes::StructureShape.new(name: 'FilterRule') + FilterRuleList = Shapes::ListShape.new(name: 'FilterRuleList', flattened: true) + FilterRuleName = Shapes::StringShape.new(name: 'FilterRuleName') + FilterRuleValue = Shapes::StringShape.new(name: 'FilterRuleValue') + GetBucketAccelerateConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketAccelerateConfigurationOutput') + GetBucketAccelerateConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketAccelerateConfigurationRequest') + GetBucketAclOutput = Shapes::StructureShape.new(name: 'GetBucketAclOutput') + GetBucketAclRequest = Shapes::StructureShape.new(name: 'GetBucketAclRequest') + GetBucketAnalyticsConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketAnalyticsConfigurationOutput') + GetBucketAnalyticsConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketAnalyticsConfigurationRequest') + GetBucketCorsOutput = Shapes::StructureShape.new(name: 'GetBucketCorsOutput') + GetBucketCorsRequest = Shapes::StructureShape.new(name: 'GetBucketCorsRequest') + GetBucketEncryptionOutput = Shapes::StructureShape.new(name: 'GetBucketEncryptionOutput') + GetBucketEncryptionRequest = Shapes::StructureShape.new(name: 'GetBucketEncryptionRequest') + GetBucketIntelligentTieringConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketIntelligentTieringConfigurationOutput') + GetBucketIntelligentTieringConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketIntelligentTieringConfigurationRequest') + GetBucketInventoryConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketInventoryConfigurationOutput') + GetBucketInventoryConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketInventoryConfigurationRequest') + GetBucketLifecycleConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketLifecycleConfigurationOutput') + GetBucketLifecycleConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketLifecycleConfigurationRequest') + GetBucketLifecycleOutput = Shapes::StructureShape.new(name: 'GetBucketLifecycleOutput') + GetBucketLifecycleRequest = Shapes::StructureShape.new(name: 'GetBucketLifecycleRequest') + GetBucketLocationOutput = Shapes::StructureShape.new(name: 'GetBucketLocationOutput') + GetBucketLocationRequest = Shapes::StructureShape.new(name: 'GetBucketLocationRequest') + GetBucketLoggingOutput = Shapes::StructureShape.new(name: 'GetBucketLoggingOutput') + GetBucketLoggingRequest = Shapes::StructureShape.new(name: 'GetBucketLoggingRequest') + GetBucketMetricsConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketMetricsConfigurationOutput') + GetBucketMetricsConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketMetricsConfigurationRequest') + GetBucketNotificationConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketNotificationConfigurationRequest') + GetBucketOwnershipControlsOutput = Shapes::StructureShape.new(name: 'GetBucketOwnershipControlsOutput') + GetBucketOwnershipControlsRequest = Shapes::StructureShape.new(name: 'GetBucketOwnershipControlsRequest') + GetBucketPolicyOutput = Shapes::StructureShape.new(name: 'GetBucketPolicyOutput') + GetBucketPolicyRequest = Shapes::StructureShape.new(name: 'GetBucketPolicyRequest') + GetBucketPolicyStatusOutput = Shapes::StructureShape.new(name: 'GetBucketPolicyStatusOutput') + GetBucketPolicyStatusRequest = Shapes::StructureShape.new(name: 'GetBucketPolicyStatusRequest') + GetBucketReplicationOutput = Shapes::StructureShape.new(name: 'GetBucketReplicationOutput') + GetBucketReplicationRequest = Shapes::StructureShape.new(name: 'GetBucketReplicationRequest') + GetBucketRequestPaymentOutput = Shapes::StructureShape.new(name: 'GetBucketRequestPaymentOutput') + GetBucketRequestPaymentRequest = Shapes::StructureShape.new(name: 'GetBucketRequestPaymentRequest') + GetBucketTaggingOutput = Shapes::StructureShape.new(name: 'GetBucketTaggingOutput') + GetBucketTaggingRequest = Shapes::StructureShape.new(name: 'GetBucketTaggingRequest') + GetBucketVersioningOutput = Shapes::StructureShape.new(name: 'GetBucketVersioningOutput') + GetBucketVersioningRequest = Shapes::StructureShape.new(name: 'GetBucketVersioningRequest') + GetBucketWebsiteOutput = Shapes::StructureShape.new(name: 'GetBucketWebsiteOutput') + GetBucketWebsiteRequest = Shapes::StructureShape.new(name: 'GetBucketWebsiteRequest') + GetObjectAclOutput = Shapes::StructureShape.new(name: 'GetObjectAclOutput') + GetObjectAclRequest = Shapes::StructureShape.new(name: 'GetObjectAclRequest') + GetObjectAttributesOutput = Shapes::StructureShape.new(name: 'GetObjectAttributesOutput') + GetObjectAttributesParts = Shapes::StructureShape.new(name: 'GetObjectAttributesParts') + GetObjectAttributesRequest = Shapes::StructureShape.new(name: 'GetObjectAttributesRequest') + GetObjectLegalHoldOutput = Shapes::StructureShape.new(name: 'GetObjectLegalHoldOutput') + GetObjectLegalHoldRequest = Shapes::StructureShape.new(name: 'GetObjectLegalHoldRequest') + GetObjectLockConfigurationOutput = Shapes::StructureShape.new(name: 'GetObjectLockConfigurationOutput') + GetObjectLockConfigurationRequest = Shapes::StructureShape.new(name: 'GetObjectLockConfigurationRequest') + GetObjectOutput = Shapes::StructureShape.new(name: 'GetObjectOutput') + GetObjectRequest = Shapes::StructureShape.new(name: 'GetObjectRequest') + GetObjectResponseStatusCode = Shapes::IntegerShape.new(name: 'GetObjectResponseStatusCode') + GetObjectRetentionOutput = Shapes::StructureShape.new(name: 'GetObjectRetentionOutput') + GetObjectRetentionRequest = Shapes::StructureShape.new(name: 'GetObjectRetentionRequest') + GetObjectTaggingOutput = Shapes::StructureShape.new(name: 'GetObjectTaggingOutput') + GetObjectTaggingRequest = Shapes::StructureShape.new(name: 'GetObjectTaggingRequest') + GetObjectTorrentOutput = Shapes::StructureShape.new(name: 'GetObjectTorrentOutput') + GetObjectTorrentRequest = Shapes::StructureShape.new(name: 'GetObjectTorrentRequest') + GetPublicAccessBlockOutput = Shapes::StructureShape.new(name: 'GetPublicAccessBlockOutput') + GetPublicAccessBlockRequest = Shapes::StructureShape.new(name: 'GetPublicAccessBlockRequest') + GlacierJobParameters = Shapes::StructureShape.new(name: 'GlacierJobParameters') + Grant = Shapes::StructureShape.new(name: 'Grant') + GrantFullControl = Shapes::StringShape.new(name: 'GrantFullControl') + GrantRead = Shapes::StringShape.new(name: 'GrantRead') + GrantReadACP = Shapes::StringShape.new(name: 'GrantReadACP') + GrantWrite = Shapes::StringShape.new(name: 'GrantWrite') + GrantWriteACP = Shapes::StringShape.new(name: 'GrantWriteACP') + Grantee = Shapes::StructureShape.new(name: 'Grantee', xmlNamespace: {"prefix"=>"xsi", "uri"=>"http://www.w3.org/2001/XMLSchema-instance"}) + Grants = Shapes::ListShape.new(name: 'Grants') + HeadBucketRequest = Shapes::StructureShape.new(name: 'HeadBucketRequest') + HeadObjectOutput = Shapes::StructureShape.new(name: 'HeadObjectOutput') + HeadObjectRequest = Shapes::StructureShape.new(name: 'HeadObjectRequest') + HostName = Shapes::StringShape.new(name: 'HostName') + HttpErrorCodeReturnedEquals = Shapes::StringShape.new(name: 'HttpErrorCodeReturnedEquals') + HttpRedirectCode = Shapes::StringShape.new(name: 'HttpRedirectCode') + ID = Shapes::StringShape.new(name: 'ID') + IfMatch = Shapes::StringShape.new(name: 'IfMatch') + IfModifiedSince = Shapes::TimestampShape.new(name: 'IfModifiedSince') + IfNoneMatch = Shapes::StringShape.new(name: 'IfNoneMatch') + IfUnmodifiedSince = Shapes::TimestampShape.new(name: 'IfUnmodifiedSince') + IndexDocument = Shapes::StructureShape.new(name: 'IndexDocument') + Initiated = Shapes::TimestampShape.new(name: 'Initiated') + Initiator = Shapes::StructureShape.new(name: 'Initiator') + InputSerialization = Shapes::StructureShape.new(name: 'InputSerialization') + IntelligentTieringAccessTier = Shapes::StringShape.new(name: 'IntelligentTieringAccessTier') + IntelligentTieringAndOperator = Shapes::StructureShape.new(name: 'IntelligentTieringAndOperator') + IntelligentTieringConfiguration = Shapes::StructureShape.new(name: 'IntelligentTieringConfiguration') + IntelligentTieringConfigurationList = Shapes::ListShape.new(name: 'IntelligentTieringConfigurationList', flattened: true) + IntelligentTieringDays = Shapes::IntegerShape.new(name: 'IntelligentTieringDays') + IntelligentTieringFilter = Shapes::StructureShape.new(name: 'IntelligentTieringFilter') + IntelligentTieringId = Shapes::StringShape.new(name: 'IntelligentTieringId') + IntelligentTieringStatus = Shapes::StringShape.new(name: 'IntelligentTieringStatus') + InvalidObjectState = Shapes::StructureShape.new(name: 'InvalidObjectState') + InventoryConfiguration = Shapes::StructureShape.new(name: 'InventoryConfiguration') + InventoryConfigurationList = Shapes::ListShape.new(name: 'InventoryConfigurationList', flattened: true) + InventoryDestination = Shapes::StructureShape.new(name: 'InventoryDestination') + InventoryEncryption = Shapes::StructureShape.new(name: 'InventoryEncryption') + InventoryFilter = Shapes::StructureShape.new(name: 'InventoryFilter') + InventoryFormat = Shapes::StringShape.new(name: 'InventoryFormat') + InventoryFrequency = Shapes::StringShape.new(name: 'InventoryFrequency') + InventoryId = Shapes::StringShape.new(name: 'InventoryId') + InventoryIncludedObjectVersions = Shapes::StringShape.new(name: 'InventoryIncludedObjectVersions') + InventoryOptionalField = Shapes::StringShape.new(name: 'InventoryOptionalField') + InventoryOptionalFields = Shapes::ListShape.new(name: 'InventoryOptionalFields') + InventoryS3BucketDestination = Shapes::StructureShape.new(name: 'InventoryS3BucketDestination') + InventorySchedule = Shapes::StructureShape.new(name: 'InventorySchedule') + IsEnabled = Shapes::BooleanShape.new(name: 'IsEnabled') + IsLatest = Shapes::BooleanShape.new(name: 'IsLatest') + IsPublic = Shapes::BooleanShape.new(name: 'IsPublic') + IsTruncated = Shapes::BooleanShape.new(name: 'IsTruncated') + JSONInput = Shapes::StructureShape.new(name: 'JSONInput') + JSONOutput = Shapes::StructureShape.new(name: 'JSONOutput') + JSONType = Shapes::StringShape.new(name: 'JSONType') + KMSContext = Shapes::StringShape.new(name: 'KMSContext') + KeyCount = Shapes::IntegerShape.new(name: 'KeyCount') + KeyMarker = Shapes::StringShape.new(name: 'KeyMarker') + KeyPrefixEquals = Shapes::StringShape.new(name: 'KeyPrefixEquals') + LambdaFunctionArn = Shapes::StringShape.new(name: 'LambdaFunctionArn') + LambdaFunctionConfiguration = Shapes::StructureShape.new(name: 'LambdaFunctionConfiguration') + LambdaFunctionConfigurationList = Shapes::ListShape.new(name: 'LambdaFunctionConfigurationList', flattened: true) + LastModified = Shapes::TimestampShape.new(name: 'LastModified') + LifecycleConfiguration = Shapes::StructureShape.new(name: 'LifecycleConfiguration') + LifecycleExpiration = Shapes::StructureShape.new(name: 'LifecycleExpiration') + LifecycleRule = Shapes::StructureShape.new(name: 'LifecycleRule') + LifecycleRuleAndOperator = Shapes::StructureShape.new(name: 'LifecycleRuleAndOperator') + LifecycleRuleFilter = Shapes::StructureShape.new(name: 'LifecycleRuleFilter') + LifecycleRules = Shapes::ListShape.new(name: 'LifecycleRules', flattened: true) + ListBucketAnalyticsConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketAnalyticsConfigurationsOutput') + ListBucketAnalyticsConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketAnalyticsConfigurationsRequest') + ListBucketIntelligentTieringConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketIntelligentTieringConfigurationsOutput') + ListBucketIntelligentTieringConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketIntelligentTieringConfigurationsRequest') + ListBucketInventoryConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketInventoryConfigurationsOutput') + ListBucketInventoryConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketInventoryConfigurationsRequest') + ListBucketMetricsConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketMetricsConfigurationsOutput') + ListBucketMetricsConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketMetricsConfigurationsRequest') + ListBucketsOutput = Shapes::StructureShape.new(name: 'ListBucketsOutput') + ListMultipartUploadsOutput = Shapes::StructureShape.new(name: 'ListMultipartUploadsOutput') + ListMultipartUploadsRequest = Shapes::StructureShape.new(name: 'ListMultipartUploadsRequest') + ListObjectVersionsOutput = Shapes::StructureShape.new(name: 'ListObjectVersionsOutput') + ListObjectVersionsRequest = Shapes::StructureShape.new(name: 'ListObjectVersionsRequest') + ListObjectsOutput = Shapes::StructureShape.new(name: 'ListObjectsOutput') + ListObjectsRequest = Shapes::StructureShape.new(name: 'ListObjectsRequest') + ListObjectsV2Output = Shapes::StructureShape.new(name: 'ListObjectsV2Output') + ListObjectsV2Request = Shapes::StructureShape.new(name: 'ListObjectsV2Request') + ListPartsOutput = Shapes::StructureShape.new(name: 'ListPartsOutput') + ListPartsRequest = Shapes::StructureShape.new(name: 'ListPartsRequest') + Location = Shapes::StringShape.new(name: 'Location') + LocationPrefix = Shapes::StringShape.new(name: 'LocationPrefix') + LoggingEnabled = Shapes::StructureShape.new(name: 'LoggingEnabled') + MFA = Shapes::StringShape.new(name: 'MFA') + MFADelete = Shapes::StringShape.new(name: 'MFADelete') + MFADeleteStatus = Shapes::StringShape.new(name: 'MFADeleteStatus') + Marker = Shapes::StringShape.new(name: 'Marker') + MaxAgeSeconds = Shapes::IntegerShape.new(name: 'MaxAgeSeconds') + MaxKeys = Shapes::IntegerShape.new(name: 'MaxKeys') + MaxParts = Shapes::IntegerShape.new(name: 'MaxParts') + MaxUploads = Shapes::IntegerShape.new(name: 'MaxUploads') + Message = Shapes::StringShape.new(name: 'Message') + Metadata = Shapes::MapShape.new(name: 'Metadata') + MetadataDirective = Shapes::StringShape.new(name: 'MetadataDirective') + MetadataEntry = Shapes::StructureShape.new(name: 'MetadataEntry') + MetadataKey = Shapes::StringShape.new(name: 'MetadataKey') + MetadataValue = Shapes::StringShape.new(name: 'MetadataValue') + Metrics = Shapes::StructureShape.new(name: 'Metrics') + MetricsAndOperator = Shapes::StructureShape.new(name: 'MetricsAndOperator') + MetricsConfiguration = Shapes::StructureShape.new(name: 'MetricsConfiguration') + MetricsConfigurationList = Shapes::ListShape.new(name: 'MetricsConfigurationList', flattened: true) + MetricsFilter = Shapes::StructureShape.new(name: 'MetricsFilter') + MetricsId = Shapes::StringShape.new(name: 'MetricsId') + MetricsStatus = Shapes::StringShape.new(name: 'MetricsStatus') + Minutes = Shapes::IntegerShape.new(name: 'Minutes') + MissingMeta = Shapes::IntegerShape.new(name: 'MissingMeta') + MultipartUpload = Shapes::StructureShape.new(name: 'MultipartUpload') + MultipartUploadId = Shapes::StringShape.new(name: 'MultipartUploadId') + MultipartUploadList = Shapes::ListShape.new(name: 'MultipartUploadList', flattened: true) + NextKeyMarker = Shapes::StringShape.new(name: 'NextKeyMarker') + NextMarker = Shapes::StringShape.new(name: 'NextMarker') + NextPartNumberMarker = Shapes::IntegerShape.new(name: 'NextPartNumberMarker') + NextToken = Shapes::StringShape.new(name: 'NextToken') + NextUploadIdMarker = Shapes::StringShape.new(name: 'NextUploadIdMarker') + NextVersionIdMarker = Shapes::StringShape.new(name: 'NextVersionIdMarker') + NoSuchBucket = Shapes::StructureShape.new(name: 'NoSuchBucket') + NoSuchKey = Shapes::StructureShape.new(name: 'NoSuchKey') + NoSuchUpload = Shapes::StructureShape.new(name: 'NoSuchUpload') + NoncurrentVersionExpiration = Shapes::StructureShape.new(name: 'NoncurrentVersionExpiration') + NoncurrentVersionTransition = Shapes::StructureShape.new(name: 'NoncurrentVersionTransition') + NoncurrentVersionTransitionList = Shapes::ListShape.new(name: 'NoncurrentVersionTransitionList', flattened: true) + NotificationConfiguration = Shapes::StructureShape.new(name: 'NotificationConfiguration') + NotificationConfigurationDeprecated = Shapes::StructureShape.new(name: 'NotificationConfigurationDeprecated') + NotificationConfigurationFilter = Shapes::StructureShape.new(name: 'NotificationConfigurationFilter') + NotificationId = Shapes::StringShape.new(name: 'NotificationId') + Object = Shapes::StructureShape.new(name: 'Object') + ObjectAlreadyInActiveTierError = Shapes::StructureShape.new(name: 'ObjectAlreadyInActiveTierError') + ObjectAttributes = Shapes::StringShape.new(name: 'ObjectAttributes') + ObjectAttributesList = Shapes::ListShape.new(name: 'ObjectAttributesList') + ObjectCannedACL = Shapes::StringShape.new(name: 'ObjectCannedACL') + ObjectIdentifier = Shapes::StructureShape.new(name: 'ObjectIdentifier') + ObjectIdentifierList = Shapes::ListShape.new(name: 'ObjectIdentifierList', flattened: true) + ObjectKey = Shapes::StringShape.new(name: 'ObjectKey') + ObjectList = Shapes::ListShape.new(name: 'ObjectList', flattened: true) + ObjectLockConfiguration = Shapes::StructureShape.new(name: 'ObjectLockConfiguration') + ObjectLockEnabled = Shapes::StringShape.new(name: 'ObjectLockEnabled') + ObjectLockEnabledForBucket = Shapes::BooleanShape.new(name: 'ObjectLockEnabledForBucket') + ObjectLockLegalHold = Shapes::StructureShape.new(name: 'ObjectLockLegalHold') + ObjectLockLegalHoldStatus = Shapes::StringShape.new(name: 'ObjectLockLegalHoldStatus') + ObjectLockMode = Shapes::StringShape.new(name: 'ObjectLockMode') + ObjectLockRetainUntilDate = Shapes::TimestampShape.new(name: 'ObjectLockRetainUntilDate', timestampFormat: "iso8601") + ObjectLockRetention = Shapes::StructureShape.new(name: 'ObjectLockRetention') + ObjectLockRetentionMode = Shapes::StringShape.new(name: 'ObjectLockRetentionMode') + ObjectLockRule = Shapes::StructureShape.new(name: 'ObjectLockRule') + ObjectLockToken = Shapes::StringShape.new(name: 'ObjectLockToken') + ObjectNotInActiveTierError = Shapes::StructureShape.new(name: 'ObjectNotInActiveTierError') + ObjectOwnership = Shapes::StringShape.new(name: 'ObjectOwnership') + ObjectPart = Shapes::StructureShape.new(name: 'ObjectPart') + ObjectSize = Shapes::IntegerShape.new(name: 'ObjectSize') + ObjectSizeGreaterThanBytes = Shapes::IntegerShape.new(name: 'ObjectSizeGreaterThanBytes') + ObjectSizeLessThanBytes = Shapes::IntegerShape.new(name: 'ObjectSizeLessThanBytes') + ObjectStorageClass = Shapes::StringShape.new(name: 'ObjectStorageClass') + ObjectVersion = Shapes::StructureShape.new(name: 'ObjectVersion') + ObjectVersionId = Shapes::StringShape.new(name: 'ObjectVersionId') + ObjectVersionList = Shapes::ListShape.new(name: 'ObjectVersionList', flattened: true) + ObjectVersionStorageClass = Shapes::StringShape.new(name: 'ObjectVersionStorageClass') + OutputLocation = Shapes::StructureShape.new(name: 'OutputLocation') + OutputSerialization = Shapes::StructureShape.new(name: 'OutputSerialization') + Owner = Shapes::StructureShape.new(name: 'Owner') + OwnerOverride = Shapes::StringShape.new(name: 'OwnerOverride') + OwnershipControls = Shapes::StructureShape.new(name: 'OwnershipControls') + OwnershipControlsRule = Shapes::StructureShape.new(name: 'OwnershipControlsRule') + OwnershipControlsRules = Shapes::ListShape.new(name: 'OwnershipControlsRules', flattened: true) + ParquetInput = Shapes::StructureShape.new(name: 'ParquetInput') + Part = Shapes::StructureShape.new(name: 'Part') + PartNumber = Shapes::IntegerShape.new(name: 'PartNumber') + PartNumberMarker = Shapes::IntegerShape.new(name: 'PartNumberMarker') + Parts = Shapes::ListShape.new(name: 'Parts', flattened: true) + PartsCount = Shapes::IntegerShape.new(name: 'PartsCount') + PartsList = Shapes::ListShape.new(name: 'PartsList', flattened: true) + Payer = Shapes::StringShape.new(name: 'Payer') + Permission = Shapes::StringShape.new(name: 'Permission') + Policy = Shapes::StringShape.new(name: 'Policy') + PolicyStatus = Shapes::StructureShape.new(name: 'PolicyStatus') + Prefix = Shapes::StringShape.new(name: 'Prefix') + Priority = Shapes::IntegerShape.new(name: 'Priority') + Progress = Shapes::StructureShape.new(name: 'Progress') + ProgressEvent = Shapes::StructureShape.new(name: 'ProgressEvent') + Protocol = Shapes::StringShape.new(name: 'Protocol') + PublicAccessBlockConfiguration = Shapes::StructureShape.new(name: 'PublicAccessBlockConfiguration') + PutBucketAccelerateConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketAccelerateConfigurationRequest') + PutBucketAclRequest = Shapes::StructureShape.new(name: 'PutBucketAclRequest') + PutBucketAnalyticsConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketAnalyticsConfigurationRequest') + PutBucketCorsRequest = Shapes::StructureShape.new(name: 'PutBucketCorsRequest') + PutBucketEncryptionRequest = Shapes::StructureShape.new(name: 'PutBucketEncryptionRequest') + PutBucketIntelligentTieringConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketIntelligentTieringConfigurationRequest') + PutBucketInventoryConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketInventoryConfigurationRequest') + PutBucketLifecycleConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketLifecycleConfigurationRequest') + PutBucketLifecycleRequest = Shapes::StructureShape.new(name: 'PutBucketLifecycleRequest') + PutBucketLoggingRequest = Shapes::StructureShape.new(name: 'PutBucketLoggingRequest') + PutBucketMetricsConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketMetricsConfigurationRequest') + PutBucketNotificationConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketNotificationConfigurationRequest') + PutBucketNotificationRequest = Shapes::StructureShape.new(name: 'PutBucketNotificationRequest') + PutBucketOwnershipControlsRequest = Shapes::StructureShape.new(name: 'PutBucketOwnershipControlsRequest') + PutBucketPolicyRequest = Shapes::StructureShape.new(name: 'PutBucketPolicyRequest') + PutBucketReplicationRequest = Shapes::StructureShape.new(name: 'PutBucketReplicationRequest') + PutBucketRequestPaymentRequest = Shapes::StructureShape.new(name: 'PutBucketRequestPaymentRequest') + PutBucketTaggingRequest = Shapes::StructureShape.new(name: 'PutBucketTaggingRequest') + PutBucketVersioningRequest = Shapes::StructureShape.new(name: 'PutBucketVersioningRequest') + PutBucketWebsiteRequest = Shapes::StructureShape.new(name: 'PutBucketWebsiteRequest') + PutObjectAclOutput = Shapes::StructureShape.new(name: 'PutObjectAclOutput') + PutObjectAclRequest = Shapes::StructureShape.new(name: 'PutObjectAclRequest') + PutObjectLegalHoldOutput = Shapes::StructureShape.new(name: 'PutObjectLegalHoldOutput') + PutObjectLegalHoldRequest = Shapes::StructureShape.new(name: 'PutObjectLegalHoldRequest') + PutObjectLockConfigurationOutput = Shapes::StructureShape.new(name: 'PutObjectLockConfigurationOutput') + PutObjectLockConfigurationRequest = Shapes::StructureShape.new(name: 'PutObjectLockConfigurationRequest') + PutObjectOutput = Shapes::StructureShape.new(name: 'PutObjectOutput') + PutObjectRequest = Shapes::StructureShape.new(name: 'PutObjectRequest') + PutObjectRetentionOutput = Shapes::StructureShape.new(name: 'PutObjectRetentionOutput') + PutObjectRetentionRequest = Shapes::StructureShape.new(name: 'PutObjectRetentionRequest') + PutObjectTaggingOutput = Shapes::StructureShape.new(name: 'PutObjectTaggingOutput') + PutObjectTaggingRequest = Shapes::StructureShape.new(name: 'PutObjectTaggingRequest') + PutPublicAccessBlockRequest = Shapes::StructureShape.new(name: 'PutPublicAccessBlockRequest') + QueueArn = Shapes::StringShape.new(name: 'QueueArn') + QueueConfiguration = Shapes::StructureShape.new(name: 'QueueConfiguration') + QueueConfigurationDeprecated = Shapes::StructureShape.new(name: 'QueueConfigurationDeprecated') + QueueConfigurationList = Shapes::ListShape.new(name: 'QueueConfigurationList', flattened: true) + Quiet = Shapes::BooleanShape.new(name: 'Quiet') + QuoteCharacter = Shapes::StringShape.new(name: 'QuoteCharacter') + QuoteEscapeCharacter = Shapes::StringShape.new(name: 'QuoteEscapeCharacter') + QuoteFields = Shapes::StringShape.new(name: 'QuoteFields') + Range = Shapes::StringShape.new(name: 'Range') + RecordDelimiter = Shapes::StringShape.new(name: 'RecordDelimiter') + RecordsEvent = Shapes::StructureShape.new(name: 'RecordsEvent') + Redirect = Shapes::StructureShape.new(name: 'Redirect') + RedirectAllRequestsTo = Shapes::StructureShape.new(name: 'RedirectAllRequestsTo') + ReplaceKeyPrefixWith = Shapes::StringShape.new(name: 'ReplaceKeyPrefixWith') + ReplaceKeyWith = Shapes::StringShape.new(name: 'ReplaceKeyWith') + ReplicaKmsKeyID = Shapes::StringShape.new(name: 'ReplicaKmsKeyID') + ReplicaModifications = Shapes::StructureShape.new(name: 'ReplicaModifications') + ReplicaModificationsStatus = Shapes::StringShape.new(name: 'ReplicaModificationsStatus') + ReplicationConfiguration = Shapes::StructureShape.new(name: 'ReplicationConfiguration') + ReplicationRule = Shapes::StructureShape.new(name: 'ReplicationRule') + ReplicationRuleAndOperator = Shapes::StructureShape.new(name: 'ReplicationRuleAndOperator') + ReplicationRuleFilter = Shapes::StructureShape.new(name: 'ReplicationRuleFilter') + ReplicationRuleStatus = Shapes::StringShape.new(name: 'ReplicationRuleStatus') + ReplicationRules = Shapes::ListShape.new(name: 'ReplicationRules', flattened: true) + ReplicationStatus = Shapes::StringShape.new(name: 'ReplicationStatus') + ReplicationTime = Shapes::StructureShape.new(name: 'ReplicationTime') + ReplicationTimeStatus = Shapes::StringShape.new(name: 'ReplicationTimeStatus') + ReplicationTimeValue = Shapes::StructureShape.new(name: 'ReplicationTimeValue') + RequestCharged = Shapes::StringShape.new(name: 'RequestCharged') + RequestPayer = Shapes::StringShape.new(name: 'RequestPayer') + RequestPaymentConfiguration = Shapes::StructureShape.new(name: 'RequestPaymentConfiguration') + RequestProgress = Shapes::StructureShape.new(name: 'RequestProgress') + RequestRoute = Shapes::StringShape.new(name: 'RequestRoute') + RequestToken = Shapes::StringShape.new(name: 'RequestToken') + ResponseCacheControl = Shapes::StringShape.new(name: 'ResponseCacheControl') + ResponseContentDisposition = Shapes::StringShape.new(name: 'ResponseContentDisposition') + ResponseContentEncoding = Shapes::StringShape.new(name: 'ResponseContentEncoding') + ResponseContentLanguage = Shapes::StringShape.new(name: 'ResponseContentLanguage') + ResponseContentType = Shapes::StringShape.new(name: 'ResponseContentType') + ResponseExpires = Shapes::TimestampShape.new(name: 'ResponseExpires', timestampFormat: "rfc822") + Restore = Shapes::StringShape.new(name: 'Restore') + RestoreObjectOutput = Shapes::StructureShape.new(name: 'RestoreObjectOutput') + RestoreObjectRequest = Shapes::StructureShape.new(name: 'RestoreObjectRequest') + RestoreOutputPath = Shapes::StringShape.new(name: 'RestoreOutputPath') + RestoreRequest = Shapes::StructureShape.new(name: 'RestoreRequest') + RestoreRequestType = Shapes::StringShape.new(name: 'RestoreRequestType') + Role = Shapes::StringShape.new(name: 'Role') + RoutingRule = Shapes::StructureShape.new(name: 'RoutingRule') + RoutingRules = Shapes::ListShape.new(name: 'RoutingRules') + Rule = Shapes::StructureShape.new(name: 'Rule') + Rules = Shapes::ListShape.new(name: 'Rules', flattened: true) + S3KeyFilter = Shapes::StructureShape.new(name: 'S3KeyFilter') + S3Location = Shapes::StructureShape.new(name: 'S3Location') + SSECustomerAlgorithm = Shapes::StringShape.new(name: 'SSECustomerAlgorithm') + SSECustomerKey = Shapes::StringShape.new(name: 'SSECustomerKey') + SSECustomerKeyMD5 = Shapes::StringShape.new(name: 'SSECustomerKeyMD5') + SSEKMS = Shapes::StructureShape.new(name: 'SSEKMS') + SSEKMSEncryptionContext = Shapes::StringShape.new(name: 'SSEKMSEncryptionContext') + SSEKMSKeyId = Shapes::StringShape.new(name: 'SSEKMSKeyId') + SSES3 = Shapes::StructureShape.new(name: 'SSES3') + ScanRange = Shapes::StructureShape.new(name: 'ScanRange') + SelectObjectContentEventStream = Shapes::StructureShape.new(name: 'SelectObjectContentEventStream') + SelectObjectContentOutput = Shapes::StructureShape.new(name: 'SelectObjectContentOutput') + SelectObjectContentRequest = Shapes::StructureShape.new(name: 'SelectObjectContentRequest') + SelectParameters = Shapes::StructureShape.new(name: 'SelectParameters') + ServerSideEncryption = Shapes::StringShape.new(name: 'ServerSideEncryption') + ServerSideEncryptionByDefault = Shapes::StructureShape.new(name: 'ServerSideEncryptionByDefault') + ServerSideEncryptionConfiguration = Shapes::StructureShape.new(name: 'ServerSideEncryptionConfiguration') + ServerSideEncryptionRule = Shapes::StructureShape.new(name: 'ServerSideEncryptionRule') + ServerSideEncryptionRules = Shapes::ListShape.new(name: 'ServerSideEncryptionRules', flattened: true) + Setting = Shapes::BooleanShape.new(name: 'Setting') + Size = Shapes::IntegerShape.new(name: 'Size') + SkipValidation = Shapes::BooleanShape.new(name: 'SkipValidation') + SourceSelectionCriteria = Shapes::StructureShape.new(name: 'SourceSelectionCriteria') + SseKmsEncryptedObjects = Shapes::StructureShape.new(name: 'SseKmsEncryptedObjects') + SseKmsEncryptedObjectsStatus = Shapes::StringShape.new(name: 'SseKmsEncryptedObjectsStatus') + Start = Shapes::IntegerShape.new(name: 'Start') + StartAfter = Shapes::StringShape.new(name: 'StartAfter') + Stats = Shapes::StructureShape.new(name: 'Stats') + StatsEvent = Shapes::StructureShape.new(name: 'StatsEvent') + StorageClass = Shapes::StringShape.new(name: 'StorageClass') + StorageClassAnalysis = Shapes::StructureShape.new(name: 'StorageClassAnalysis') + StorageClassAnalysisDataExport = Shapes::StructureShape.new(name: 'StorageClassAnalysisDataExport') + StorageClassAnalysisSchemaVersion = Shapes::StringShape.new(name: 'StorageClassAnalysisSchemaVersion') + Suffix = Shapes::StringShape.new(name: 'Suffix') + Tag = Shapes::StructureShape.new(name: 'Tag') + TagCount = Shapes::IntegerShape.new(name: 'TagCount') + TagSet = Shapes::ListShape.new(name: 'TagSet') + Tagging = Shapes::StructureShape.new(name: 'Tagging') + TaggingDirective = Shapes::StringShape.new(name: 'TaggingDirective') + TaggingHeader = Shapes::StringShape.new(name: 'TaggingHeader') + TargetBucket = Shapes::StringShape.new(name: 'TargetBucket') + TargetGrant = Shapes::StructureShape.new(name: 'TargetGrant') + TargetGrants = Shapes::ListShape.new(name: 'TargetGrants') + TargetPrefix = Shapes::StringShape.new(name: 'TargetPrefix') + Tier = Shapes::StringShape.new(name: 'Tier') + Tiering = Shapes::StructureShape.new(name: 'Tiering') + TieringList = Shapes::ListShape.new(name: 'TieringList', flattened: true) + Token = Shapes::StringShape.new(name: 'Token') + TopicArn = Shapes::StringShape.new(name: 'TopicArn') + TopicConfiguration = Shapes::StructureShape.new(name: 'TopicConfiguration') + TopicConfigurationDeprecated = Shapes::StructureShape.new(name: 'TopicConfigurationDeprecated') + TopicConfigurationList = Shapes::ListShape.new(name: 'TopicConfigurationList', flattened: true) + Transition = Shapes::StructureShape.new(name: 'Transition') + TransitionList = Shapes::ListShape.new(name: 'TransitionList', flattened: true) + TransitionStorageClass = Shapes::StringShape.new(name: 'TransitionStorageClass') + Type = Shapes::StringShape.new(name: 'Type') + URI = Shapes::StringShape.new(name: 'URI') + UploadIdMarker = Shapes::StringShape.new(name: 'UploadIdMarker') + UploadPartCopyOutput = Shapes::StructureShape.new(name: 'UploadPartCopyOutput') + UploadPartCopyRequest = Shapes::StructureShape.new(name: 'UploadPartCopyRequest') + UploadPartOutput = Shapes::StructureShape.new(name: 'UploadPartOutput') + UploadPartRequest = Shapes::StructureShape.new(name: 'UploadPartRequest') + UserMetadata = Shapes::ListShape.new(name: 'UserMetadata') + Value = Shapes::StringShape.new(name: 'Value') + VersionCount = Shapes::IntegerShape.new(name: 'VersionCount') + VersionIdMarker = Shapes::StringShape.new(name: 'VersionIdMarker') + VersioningConfiguration = Shapes::StructureShape.new(name: 'VersioningConfiguration') + WebsiteConfiguration = Shapes::StructureShape.new(name: 'WebsiteConfiguration') + WebsiteRedirectLocation = Shapes::StringShape.new(name: 'WebsiteRedirectLocation') + WriteGetObjectResponseRequest = Shapes::StructureShape.new(name: 'WriteGetObjectResponseRequest') + Years = Shapes::IntegerShape.new(name: 'Years') + + AbortIncompleteMultipartUpload.add_member(:days_after_initiation, Shapes::ShapeRef.new(shape: DaysAfterInitiation, location_name: "DaysAfterInitiation")) + AbortIncompleteMultipartUpload.struct_class = Types::AbortIncompleteMultipartUpload + + AbortMultipartUploadOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + AbortMultipartUploadOutput.struct_class = Types::AbortMultipartUploadOutput + + AbortMultipartUploadRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + AbortMultipartUploadRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + AbortMultipartUploadRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) + AbortMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + AbortMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + AbortMultipartUploadRequest.struct_class = Types::AbortMultipartUploadRequest + + AccelerateConfiguration.add_member(:status, Shapes::ShapeRef.new(shape: BucketAccelerateStatus, location_name: "Status")) + AccelerateConfiguration.struct_class = Types::AccelerateConfiguration + + AccessControlPolicy.add_member(:grants, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) + AccessControlPolicy.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + AccessControlPolicy.struct_class = Types::AccessControlPolicy + + AccessControlTranslation.add_member(:owner, Shapes::ShapeRef.new(shape: OwnerOverride, required: true, location_name: "Owner")) + AccessControlTranslation.struct_class = Types::AccessControlTranslation + + AllowedHeaders.member = Shapes::ShapeRef.new(shape: AllowedHeader) + + AllowedMethods.member = Shapes::ShapeRef.new(shape: AllowedMethod) + + AllowedOrigins.member = Shapes::ShapeRef.new(shape: AllowedOrigin) + + AnalyticsAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + AnalyticsAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) + AnalyticsAndOperator.struct_class = Types::AnalyticsAndOperator + + AnalyticsConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location_name: "Id")) + AnalyticsConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: AnalyticsFilter, location_name: "Filter")) + AnalyticsConfiguration.add_member(:storage_class_analysis, Shapes::ShapeRef.new(shape: StorageClassAnalysis, required: true, location_name: "StorageClassAnalysis")) + AnalyticsConfiguration.struct_class = Types::AnalyticsConfiguration + + AnalyticsConfigurationList.member = Shapes::ShapeRef.new(shape: AnalyticsConfiguration) + + AnalyticsExportDestination.add_member(:s3_bucket_destination, Shapes::ShapeRef.new(shape: AnalyticsS3BucketDestination, required: true, location_name: "S3BucketDestination")) + AnalyticsExportDestination.struct_class = Types::AnalyticsExportDestination + + AnalyticsFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + AnalyticsFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) + AnalyticsFilter.add_member(:and, Shapes::ShapeRef.new(shape: AnalyticsAndOperator, location_name: "And")) + AnalyticsFilter.struct_class = Types::AnalyticsFilter + + AnalyticsS3BucketDestination.add_member(:format, Shapes::ShapeRef.new(shape: AnalyticsS3ExportFileFormat, required: true, location_name: "Format")) + AnalyticsS3BucketDestination.add_member(:bucket_account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "BucketAccountId")) + AnalyticsS3BucketDestination.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "Bucket")) + AnalyticsS3BucketDestination.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + AnalyticsS3BucketDestination.struct_class = Types::AnalyticsS3BucketDestination + + Bucket.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) + Bucket.add_member(:creation_date, Shapes::ShapeRef.new(shape: CreationDate, location_name: "CreationDate")) + Bucket.struct_class = Types::Bucket + + BucketAlreadyExists.struct_class = Types::BucketAlreadyExists + + BucketAlreadyOwnedByYou.struct_class = Types::BucketAlreadyOwnedByYou + + BucketLifecycleConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: LifecycleRules, required: true, location_name: "Rule")) + BucketLifecycleConfiguration.struct_class = Types::BucketLifecycleConfiguration + + BucketLoggingStatus.add_member(:logging_enabled, Shapes::ShapeRef.new(shape: LoggingEnabled, location_name: "LoggingEnabled")) + BucketLoggingStatus.struct_class = Types::BucketLoggingStatus + + Buckets.member = Shapes::ShapeRef.new(shape: Bucket, location_name: "Bucket") + + CORSConfiguration.add_member(:cors_rules, Shapes::ShapeRef.new(shape: CORSRules, required: true, location_name: "CORSRule")) + CORSConfiguration.struct_class = Types::CORSConfiguration + + CORSRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + CORSRule.add_member(:allowed_headers, Shapes::ShapeRef.new(shape: AllowedHeaders, location_name: "AllowedHeader")) + CORSRule.add_member(:allowed_methods, Shapes::ShapeRef.new(shape: AllowedMethods, required: true, location_name: "AllowedMethod")) + CORSRule.add_member(:allowed_origins, Shapes::ShapeRef.new(shape: AllowedOrigins, required: true, location_name: "AllowedOrigin")) + CORSRule.add_member(:expose_headers, Shapes::ShapeRef.new(shape: ExposeHeaders, location_name: "ExposeHeader")) + CORSRule.add_member(:max_age_seconds, Shapes::ShapeRef.new(shape: MaxAgeSeconds, location_name: "MaxAgeSeconds")) + CORSRule.struct_class = Types::CORSRule + + CORSRules.member = Shapes::ShapeRef.new(shape: CORSRule) + + CSVInput.add_member(:file_header_info, Shapes::ShapeRef.new(shape: FileHeaderInfo, location_name: "FileHeaderInfo")) + CSVInput.add_member(:comments, Shapes::ShapeRef.new(shape: Comments, location_name: "Comments")) + CSVInput.add_member(:quote_escape_character, Shapes::ShapeRef.new(shape: QuoteEscapeCharacter, location_name: "QuoteEscapeCharacter")) + CSVInput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter")) + CSVInput.add_member(:field_delimiter, Shapes::ShapeRef.new(shape: FieldDelimiter, location_name: "FieldDelimiter")) + CSVInput.add_member(:quote_character, Shapes::ShapeRef.new(shape: QuoteCharacter, location_name: "QuoteCharacter")) + CSVInput.add_member(:allow_quoted_record_delimiter, Shapes::ShapeRef.new(shape: AllowQuotedRecordDelimiter, location_name: "AllowQuotedRecordDelimiter")) + CSVInput.struct_class = Types::CSVInput + + CSVOutput.add_member(:quote_fields, Shapes::ShapeRef.new(shape: QuoteFields, location_name: "QuoteFields")) + CSVOutput.add_member(:quote_escape_character, Shapes::ShapeRef.new(shape: QuoteEscapeCharacter, location_name: "QuoteEscapeCharacter")) + CSVOutput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter")) + CSVOutput.add_member(:field_delimiter, Shapes::ShapeRef.new(shape: FieldDelimiter, location_name: "FieldDelimiter")) + CSVOutput.add_member(:quote_character, Shapes::ShapeRef.new(shape: QuoteCharacter, location_name: "QuoteCharacter")) + CSVOutput.struct_class = Types::CSVOutput + + Checksum.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + Checksum.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + Checksum.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + Checksum.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + Checksum.struct_class = Types::Checksum + + ChecksumAlgorithmList.member = Shapes::ShapeRef.new(shape: ChecksumAlgorithm) + + CloudFunctionConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) + CloudFunctionConfiguration.add_member(:event, Shapes::ShapeRef.new(shape: Event, deprecated: true, location_name: "Event")) + CloudFunctionConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, location_name: "Event")) + CloudFunctionConfiguration.add_member(:cloud_function, Shapes::ShapeRef.new(shape: CloudFunction, location_name: "CloudFunction")) + CloudFunctionConfiguration.add_member(:invocation_role, Shapes::ShapeRef.new(shape: CloudFunctionInvocationRole, location_name: "InvocationRole")) + CloudFunctionConfiguration.struct_class = Types::CloudFunctionConfiguration + + CommonPrefix.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + CommonPrefix.struct_class = Types::CommonPrefix + + CommonPrefixList.member = Shapes::ShapeRef.new(shape: CommonPrefix) + + CompleteMultipartUploadOutput.add_member(:location, Shapes::ShapeRef.new(shape: Location, location_name: "Location")) + CompleteMultipartUploadOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) + CompleteMultipartUploadOutput.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + CompleteMultipartUploadOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) + CompleteMultipartUploadOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + CompleteMultipartUploadOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + CompleteMultipartUploadOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + CompleteMultipartUploadOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + CompleteMultipartUploadOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + CompleteMultipartUploadOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + CompleteMultipartUploadOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + CompleteMultipartUploadOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + CompleteMultipartUploadOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + CompleteMultipartUploadOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + CompleteMultipartUploadOutput.struct_class = Types::CompleteMultipartUploadOutput + + CompleteMultipartUploadRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + CompleteMultipartUploadRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + CompleteMultipartUploadRequest.add_member(:multipart_upload, Shapes::ShapeRef.new(shape: CompletedMultipartUpload, location_name: "CompleteMultipartUpload", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + CompleteMultipartUploadRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) + CompleteMultipartUploadRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + CompleteMultipartUploadRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + CompleteMultipartUploadRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + CompleteMultipartUploadRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + CompleteMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + CompleteMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + CompleteMultipartUploadRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + CompleteMultipartUploadRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + CompleteMultipartUploadRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + CompleteMultipartUploadRequest.struct_class = Types::CompleteMultipartUploadRequest + CompleteMultipartUploadRequest[:payload] = :multipart_upload + CompleteMultipartUploadRequest[:payload_member] = CompleteMultipartUploadRequest.member(:multipart_upload) + + CompletedMultipartUpload.add_member(:parts, Shapes::ShapeRef.new(shape: CompletedPartList, location_name: "Part")) + CompletedMultipartUpload.struct_class = Types::CompletedMultipartUpload + + CompletedPart.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + CompletedPart.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + CompletedPart.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + CompletedPart.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + CompletedPart.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + CompletedPart.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber")) + CompletedPart.struct_class = Types::CompletedPart + + CompletedPartList.member = Shapes::ShapeRef.new(shape: CompletedPart) + + Condition.add_member(:http_error_code_returned_equals, Shapes::ShapeRef.new(shape: HttpErrorCodeReturnedEquals, location_name: "HttpErrorCodeReturnedEquals")) + Condition.add_member(:key_prefix_equals, Shapes::ShapeRef.new(shape: KeyPrefixEquals, location_name: "KeyPrefixEquals")) + Condition.struct_class = Types::Condition + + ContinuationEvent.struct_class = Types::ContinuationEvent + + CopyObjectOutput.add_member(:copy_object_result, Shapes::ShapeRef.new(shape: CopyObjectResult, location_name: "CopyObjectResult")) + CopyObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) + CopyObjectOutput.add_member(:copy_source_version_id, Shapes::ShapeRef.new(shape: CopySourceVersionId, location: "header", location_name: "x-amz-copy-source-version-id")) + CopyObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + CopyObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + CopyObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + CopyObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + CopyObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + CopyObjectOutput.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) + CopyObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + CopyObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + CopyObjectOutput.struct_class = Types::CopyObjectOutput + CopyObjectOutput[:payload] = :copy_object_result + CopyObjectOutput[:payload_member] = CopyObjectOutput.member(:copy_object_result) + + CopyObjectRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) + CopyObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + CopyObjectRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) + CopyObjectRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-checksum-algorithm")) + CopyObjectRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) + CopyObjectRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) + CopyObjectRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) + CopyObjectRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) + CopyObjectRequest.add_member(:copy_source, Shapes::ShapeRef.new(shape: CopySource, required: true, location: "header", location_name: "x-amz-copy-source")) + CopyObjectRequest.add_member(:copy_source_if_match, Shapes::ShapeRef.new(shape: CopySourceIfMatch, location: "header", location_name: "x-amz-copy-source-if-match")) + CopyObjectRequest.add_member(:copy_source_if_modified_since, Shapes::ShapeRef.new(shape: CopySourceIfModifiedSince, location: "header", location_name: "x-amz-copy-source-if-modified-since")) + CopyObjectRequest.add_member(:copy_source_if_none_match, Shapes::ShapeRef.new(shape: CopySourceIfNoneMatch, location: "header", location_name: "x-amz-copy-source-if-none-match")) + CopyObjectRequest.add_member(:copy_source_if_unmodified_since, Shapes::ShapeRef.new(shape: CopySourceIfUnmodifiedSince, location: "header", location_name: "x-amz-copy-source-if-unmodified-since")) + CopyObjectRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) + CopyObjectRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) + CopyObjectRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) + CopyObjectRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) + CopyObjectRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) + CopyObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + CopyObjectRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) + CopyObjectRequest.add_member(:metadata_directive, Shapes::ShapeRef.new(shape: MetadataDirective, location: "header", location_name: "x-amz-metadata-directive")) + CopyObjectRequest.add_member(:tagging_directive, Shapes::ShapeRef.new(shape: TaggingDirective, location: "header", location_name: "x-amz-tagging-directive")) + CopyObjectRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + CopyObjectRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) + CopyObjectRequest.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) + CopyObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + CopyObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + CopyObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + CopyObjectRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + CopyObjectRequest.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) + CopyObjectRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + CopyObjectRequest.add_member(:copy_source_sse_customer_algorithm, Shapes::ShapeRef.new(shape: CopySourceSSECustomerAlgorithm, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-algorithm")) + CopyObjectRequest.add_member(:copy_source_sse_customer_key, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKey, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key")) + CopyObjectRequest.add_member(:copy_source_sse_customer_key_md5, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKeyMD5, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key-MD5")) + CopyObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + CopyObjectRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: TaggingHeader, location: "header", location_name: "x-amz-tagging")) + CopyObjectRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) + CopyObjectRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) + CopyObjectRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) + CopyObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + CopyObjectRequest.add_member(:expected_source_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-source-expected-bucket-owner")) + CopyObjectRequest.struct_class = Types::CopyObjectRequest + + CopyObjectResult.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + CopyObjectResult.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) + CopyObjectResult.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + CopyObjectResult.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + CopyObjectResult.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + CopyObjectResult.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + CopyObjectResult.struct_class = Types::CopyObjectResult + + CopyPartResult.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + CopyPartResult.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) + CopyPartResult.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + CopyPartResult.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + CopyPartResult.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + CopyPartResult.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + CopyPartResult.struct_class = Types::CopyPartResult + + CreateBucketConfiguration.add_member(:location_constraint, Shapes::ShapeRef.new(shape: BucketLocationConstraint, location_name: "LocationConstraint")) + CreateBucketConfiguration.struct_class = Types::CreateBucketConfiguration + + CreateBucketOutput.add_member(:location, Shapes::ShapeRef.new(shape: Location, location: "header", location_name: "Location")) + CreateBucketOutput.struct_class = Types::CreateBucketOutput + + CreateBucketRequest.add_member(:acl, Shapes::ShapeRef.new(shape: BucketCannedACL, location: "header", location_name: "x-amz-acl")) + CreateBucketRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + CreateBucketRequest.add_member(:create_bucket_configuration, Shapes::ShapeRef.new(shape: CreateBucketConfiguration, location_name: "CreateBucketConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + CreateBucketRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) + CreateBucketRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) + CreateBucketRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) + CreateBucketRequest.add_member(:grant_write, Shapes::ShapeRef.new(shape: GrantWrite, location: "header", location_name: "x-amz-grant-write")) + CreateBucketRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) + CreateBucketRequest.add_member(:object_lock_enabled_for_bucket, Shapes::ShapeRef.new(shape: ObjectLockEnabledForBucket, location: "header", location_name: "x-amz-bucket-object-lock-enabled")) + CreateBucketRequest.add_member(:object_ownership, Shapes::ShapeRef.new(shape: ObjectOwnership, location: "header", location_name: "x-amz-object-ownership")) + CreateBucketRequest.struct_class = Types::CreateBucketRequest + CreateBucketRequest[:payload] = :create_bucket_configuration + CreateBucketRequest[:payload_member] = CreateBucketRequest.member(:create_bucket_configuration) + + CreateMultipartUploadOutput.add_member(:abort_date, Shapes::ShapeRef.new(shape: AbortDate, location: "header", location_name: "x-amz-abort-date")) + CreateMultipartUploadOutput.add_member(:abort_rule_id, Shapes::ShapeRef.new(shape: AbortRuleId, location: "header", location_name: "x-amz-abort-rule-id")) + CreateMultipartUploadOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) + CreateMultipartUploadOutput.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + CreateMultipartUploadOutput.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, location_name: "UploadId")) + CreateMultipartUploadOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + CreateMultipartUploadOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + CreateMultipartUploadOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + CreateMultipartUploadOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + CreateMultipartUploadOutput.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) + CreateMultipartUploadOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + CreateMultipartUploadOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + CreateMultipartUploadOutput.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-checksum-algorithm")) + CreateMultipartUploadOutput.struct_class = Types::CreateMultipartUploadOutput + + CreateMultipartUploadRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) + CreateMultipartUploadRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + CreateMultipartUploadRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) + CreateMultipartUploadRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) + CreateMultipartUploadRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) + CreateMultipartUploadRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) + CreateMultipartUploadRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) + CreateMultipartUploadRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) + CreateMultipartUploadRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) + CreateMultipartUploadRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) + CreateMultipartUploadRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) + CreateMultipartUploadRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) + CreateMultipartUploadRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + CreateMultipartUploadRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) + CreateMultipartUploadRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + CreateMultipartUploadRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) + CreateMultipartUploadRequest.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) + CreateMultipartUploadRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + CreateMultipartUploadRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + CreateMultipartUploadRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + CreateMultipartUploadRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + CreateMultipartUploadRequest.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) + CreateMultipartUploadRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + CreateMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + CreateMultipartUploadRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: TaggingHeader, location: "header", location_name: "x-amz-tagging")) + CreateMultipartUploadRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) + CreateMultipartUploadRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) + CreateMultipartUploadRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) + CreateMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + CreateMultipartUploadRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-checksum-algorithm")) + CreateMultipartUploadRequest.struct_class = Types::CreateMultipartUploadRequest + + DefaultRetention.add_member(:mode, Shapes::ShapeRef.new(shape: ObjectLockRetentionMode, location_name: "Mode")) + DefaultRetention.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) + DefaultRetention.add_member(:years, Shapes::ShapeRef.new(shape: Years, location_name: "Years")) + DefaultRetention.struct_class = Types::DefaultRetention + + Delete.add_member(:objects, Shapes::ShapeRef.new(shape: ObjectIdentifierList, required: true, location_name: "Object")) + Delete.add_member(:quiet, Shapes::ShapeRef.new(shape: Quiet, location_name: "Quiet")) + Delete.struct_class = Types::Delete + + DeleteBucketAnalyticsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketAnalyticsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location: "querystring", location_name: "id")) + DeleteBucketAnalyticsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketAnalyticsConfigurationRequest.struct_class = Types::DeleteBucketAnalyticsConfigurationRequest + + DeleteBucketCorsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketCorsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketCorsRequest.struct_class = Types::DeleteBucketCorsRequest + + DeleteBucketEncryptionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketEncryptionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketEncryptionRequest.struct_class = Types::DeleteBucketEncryptionRequest + + DeleteBucketIntelligentTieringConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketIntelligentTieringConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location: "querystring", location_name: "id")) + DeleteBucketIntelligentTieringConfigurationRequest.struct_class = Types::DeleteBucketIntelligentTieringConfigurationRequest + + DeleteBucketInventoryConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketInventoryConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location: "querystring", location_name: "id")) + DeleteBucketInventoryConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketInventoryConfigurationRequest.struct_class = Types::DeleteBucketInventoryConfigurationRequest + + DeleteBucketLifecycleRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketLifecycleRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketLifecycleRequest.struct_class = Types::DeleteBucketLifecycleRequest + + DeleteBucketMetricsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketMetricsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location: "querystring", location_name: "id")) + DeleteBucketMetricsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketMetricsConfigurationRequest.struct_class = Types::DeleteBucketMetricsConfigurationRequest + + DeleteBucketOwnershipControlsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketOwnershipControlsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketOwnershipControlsRequest.struct_class = Types::DeleteBucketOwnershipControlsRequest + + DeleteBucketPolicyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketPolicyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketPolicyRequest.struct_class = Types::DeleteBucketPolicyRequest + + DeleteBucketReplicationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketReplicationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketReplicationRequest.struct_class = Types::DeleteBucketReplicationRequest + + DeleteBucketRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketRequest.struct_class = Types::DeleteBucketRequest + + DeleteBucketTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketTaggingRequest.struct_class = Types::DeleteBucketTaggingRequest + + DeleteBucketWebsiteRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteBucketWebsiteRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteBucketWebsiteRequest.struct_class = Types::DeleteBucketWebsiteRequest + + DeleteMarkerEntry.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + DeleteMarkerEntry.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + DeleteMarkerEntry.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) + DeleteMarkerEntry.add_member(:is_latest, Shapes::ShapeRef.new(shape: IsLatest, location_name: "IsLatest")) + DeleteMarkerEntry.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) + DeleteMarkerEntry.struct_class = Types::DeleteMarkerEntry + + DeleteMarkerReplication.add_member(:status, Shapes::ShapeRef.new(shape: DeleteMarkerReplicationStatus, location_name: "Status")) + DeleteMarkerReplication.struct_class = Types::DeleteMarkerReplication + + DeleteMarkers.member = Shapes::ShapeRef.new(shape: DeleteMarkerEntry) + + DeleteObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) + DeleteObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + DeleteObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + DeleteObjectOutput.struct_class = Types::DeleteObjectOutput + + DeleteObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + DeleteObjectRequest.add_member(:mfa, Shapes::ShapeRef.new(shape: MFA, location: "header", location_name: "x-amz-mfa")) + DeleteObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + DeleteObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + DeleteObjectRequest.add_member(:bypass_governance_retention, Shapes::ShapeRef.new(shape: BypassGovernanceRetention, location: "header", location_name: "x-amz-bypass-governance-retention")) + DeleteObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteObjectRequest.struct_class = Types::DeleteObjectRequest + + DeleteObjectTaggingOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + DeleteObjectTaggingOutput.struct_class = Types::DeleteObjectTaggingOutput + + DeleteObjectTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteObjectTaggingRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + DeleteObjectTaggingRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + DeleteObjectTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteObjectTaggingRequest.struct_class = Types::DeleteObjectTaggingRequest + + DeleteObjectsOutput.add_member(:deleted, Shapes::ShapeRef.new(shape: DeletedObjects, location_name: "Deleted")) + DeleteObjectsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + DeleteObjectsOutput.add_member(:errors, Shapes::ShapeRef.new(shape: Errors, location_name: "Error")) + DeleteObjectsOutput.struct_class = Types::DeleteObjectsOutput + + DeleteObjectsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeleteObjectsRequest.add_member(:delete, Shapes::ShapeRef.new(shape: Delete, required: true, location_name: "Delete", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + DeleteObjectsRequest.add_member(:mfa, Shapes::ShapeRef.new(shape: MFA, location: "header", location_name: "x-amz-mfa")) + DeleteObjectsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + DeleteObjectsRequest.add_member(:bypass_governance_retention, Shapes::ShapeRef.new(shape: BypassGovernanceRetention, location: "header", location_name: "x-amz-bypass-governance-retention")) + DeleteObjectsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeleteObjectsRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + DeleteObjectsRequest.struct_class = Types::DeleteObjectsRequest + DeleteObjectsRequest[:payload] = :delete + DeleteObjectsRequest[:payload_member] = DeleteObjectsRequest.member(:delete) + + DeletePublicAccessBlockRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + DeletePublicAccessBlockRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + DeletePublicAccessBlockRequest.struct_class = Types::DeletePublicAccessBlockRequest + + DeletedObject.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + DeletedObject.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) + DeletedObject.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location_name: "DeleteMarker")) + DeletedObject.add_member(:delete_marker_version_id, Shapes::ShapeRef.new(shape: DeleteMarkerVersionId, location_name: "DeleteMarkerVersionId")) + DeletedObject.struct_class = Types::DeletedObject + + DeletedObjects.member = Shapes::ShapeRef.new(shape: DeletedObject) + + Destination.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "Bucket")) + Destination.add_member(:account, Shapes::ShapeRef.new(shape: AccountId, location_name: "Account")) + Destination.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) + Destination.add_member(:access_control_translation, Shapes::ShapeRef.new(shape: AccessControlTranslation, location_name: "AccessControlTranslation")) + Destination.add_member(:encryption_configuration, Shapes::ShapeRef.new(shape: EncryptionConfiguration, location_name: "EncryptionConfiguration")) + Destination.add_member(:replication_time, Shapes::ShapeRef.new(shape: ReplicationTime, location_name: "ReplicationTime")) + Destination.add_member(:metrics, Shapes::ShapeRef.new(shape: Metrics, location_name: "Metrics")) + Destination.struct_class = Types::Destination + + Encryption.add_member(:encryption_type, Shapes::ShapeRef.new(shape: ServerSideEncryption, required: true, location_name: "EncryptionType")) + Encryption.add_member(:kms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location_name: "KMSKeyId")) + Encryption.add_member(:kms_context, Shapes::ShapeRef.new(shape: KMSContext, location_name: "KMSContext")) + Encryption.struct_class = Types::Encryption + + EncryptionConfiguration.add_member(:replica_kms_key_id, Shapes::ShapeRef.new(shape: ReplicaKmsKeyID, location_name: "ReplicaKmsKeyID")) + EncryptionConfiguration.struct_class = Types::EncryptionConfiguration + + EndEvent.struct_class = Types::EndEvent + + Error.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + Error.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) + Error.add_member(:code, Shapes::ShapeRef.new(shape: Code, location_name: "Code")) + Error.add_member(:message, Shapes::ShapeRef.new(shape: Message, location_name: "Message")) + Error.struct_class = Types::Error + + ErrorDocument.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location_name: "Key")) + ErrorDocument.struct_class = Types::ErrorDocument + + Errors.member = Shapes::ShapeRef.new(shape: Error) + + EventBridgeConfiguration.struct_class = Types::EventBridgeConfiguration + + EventList.member = Shapes::ShapeRef.new(shape: Event) + + ExistingObjectReplication.add_member(:status, Shapes::ShapeRef.new(shape: ExistingObjectReplicationStatus, required: true, location_name: "Status")) + ExistingObjectReplication.struct_class = Types::ExistingObjectReplication + + ExposeHeaders.member = Shapes::ShapeRef.new(shape: ExposeHeader) + + FilterRule.add_member(:name, Shapes::ShapeRef.new(shape: FilterRuleName, location_name: "Name")) + FilterRule.add_member(:value, Shapes::ShapeRef.new(shape: FilterRuleValue, location_name: "Value")) + FilterRule.struct_class = Types::FilterRule + + FilterRuleList.member = Shapes::ShapeRef.new(shape: FilterRule) + + GetBucketAccelerateConfigurationOutput.add_member(:status, Shapes::ShapeRef.new(shape: BucketAccelerateStatus, location_name: "Status")) + GetBucketAccelerateConfigurationOutput.struct_class = Types::GetBucketAccelerateConfigurationOutput + + GetBucketAccelerateConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketAccelerateConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketAccelerateConfigurationRequest.struct_class = Types::GetBucketAccelerateConfigurationRequest + + GetBucketAclOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + GetBucketAclOutput.add_member(:grants, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) + GetBucketAclOutput.struct_class = Types::GetBucketAclOutput + + GetBucketAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketAclRequest.struct_class = Types::GetBucketAclRequest + + GetBucketAnalyticsConfigurationOutput.add_member(:analytics_configuration, Shapes::ShapeRef.new(shape: AnalyticsConfiguration, location_name: "AnalyticsConfiguration")) + GetBucketAnalyticsConfigurationOutput.struct_class = Types::GetBucketAnalyticsConfigurationOutput + GetBucketAnalyticsConfigurationOutput[:payload] = :analytics_configuration + GetBucketAnalyticsConfigurationOutput[:payload_member] = GetBucketAnalyticsConfigurationOutput.member(:analytics_configuration) + + GetBucketAnalyticsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketAnalyticsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location: "querystring", location_name: "id")) + GetBucketAnalyticsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketAnalyticsConfigurationRequest.struct_class = Types::GetBucketAnalyticsConfigurationRequest + + GetBucketCorsOutput.add_member(:cors_rules, Shapes::ShapeRef.new(shape: CORSRules, location_name: "CORSRule")) + GetBucketCorsOutput.struct_class = Types::GetBucketCorsOutput + + GetBucketCorsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketCorsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketCorsRequest.struct_class = Types::GetBucketCorsRequest + + GetBucketEncryptionOutput.add_member(:server_side_encryption_configuration, Shapes::ShapeRef.new(shape: ServerSideEncryptionConfiguration, location_name: "ServerSideEncryptionConfiguration")) + GetBucketEncryptionOutput.struct_class = Types::GetBucketEncryptionOutput + GetBucketEncryptionOutput[:payload] = :server_side_encryption_configuration + GetBucketEncryptionOutput[:payload_member] = GetBucketEncryptionOutput.member(:server_side_encryption_configuration) + + GetBucketEncryptionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketEncryptionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketEncryptionRequest.struct_class = Types::GetBucketEncryptionRequest + + GetBucketIntelligentTieringConfigurationOutput.add_member(:intelligent_tiering_configuration, Shapes::ShapeRef.new(shape: IntelligentTieringConfiguration, location_name: "IntelligentTieringConfiguration")) + GetBucketIntelligentTieringConfigurationOutput.struct_class = Types::GetBucketIntelligentTieringConfigurationOutput + GetBucketIntelligentTieringConfigurationOutput[:payload] = :intelligent_tiering_configuration + GetBucketIntelligentTieringConfigurationOutput[:payload_member] = GetBucketIntelligentTieringConfigurationOutput.member(:intelligent_tiering_configuration) + + GetBucketIntelligentTieringConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketIntelligentTieringConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location: "querystring", location_name: "id")) + GetBucketIntelligentTieringConfigurationRequest.struct_class = Types::GetBucketIntelligentTieringConfigurationRequest + + GetBucketInventoryConfigurationOutput.add_member(:inventory_configuration, Shapes::ShapeRef.new(shape: InventoryConfiguration, location_name: "InventoryConfiguration")) + GetBucketInventoryConfigurationOutput.struct_class = Types::GetBucketInventoryConfigurationOutput + GetBucketInventoryConfigurationOutput[:payload] = :inventory_configuration + GetBucketInventoryConfigurationOutput[:payload_member] = GetBucketInventoryConfigurationOutput.member(:inventory_configuration) + + GetBucketInventoryConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketInventoryConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location: "querystring", location_name: "id")) + GetBucketInventoryConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketInventoryConfigurationRequest.struct_class = Types::GetBucketInventoryConfigurationRequest + + GetBucketLifecycleConfigurationOutput.add_member(:rules, Shapes::ShapeRef.new(shape: LifecycleRules, location_name: "Rule")) + GetBucketLifecycleConfigurationOutput.struct_class = Types::GetBucketLifecycleConfigurationOutput + + GetBucketLifecycleConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketLifecycleConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketLifecycleConfigurationRequest.struct_class = Types::GetBucketLifecycleConfigurationRequest + + GetBucketLifecycleOutput.add_member(:rules, Shapes::ShapeRef.new(shape: Rules, location_name: "Rule")) + GetBucketLifecycleOutput.struct_class = Types::GetBucketLifecycleOutput + + GetBucketLifecycleRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketLifecycleRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketLifecycleRequest.struct_class = Types::GetBucketLifecycleRequest + + GetBucketLocationOutput.add_member(:location_constraint, Shapes::ShapeRef.new(shape: BucketLocationConstraint, location_name: "LocationConstraint")) + GetBucketLocationOutput.struct_class = Types::GetBucketLocationOutput + + GetBucketLocationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketLocationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketLocationRequest.struct_class = Types::GetBucketLocationRequest + + GetBucketLoggingOutput.add_member(:logging_enabled, Shapes::ShapeRef.new(shape: LoggingEnabled, location_name: "LoggingEnabled")) + GetBucketLoggingOutput.struct_class = Types::GetBucketLoggingOutput + + GetBucketLoggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketLoggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketLoggingRequest.struct_class = Types::GetBucketLoggingRequest + + GetBucketMetricsConfigurationOutput.add_member(:metrics_configuration, Shapes::ShapeRef.new(shape: MetricsConfiguration, location_name: "MetricsConfiguration")) + GetBucketMetricsConfigurationOutput.struct_class = Types::GetBucketMetricsConfigurationOutput + GetBucketMetricsConfigurationOutput[:payload] = :metrics_configuration + GetBucketMetricsConfigurationOutput[:payload_member] = GetBucketMetricsConfigurationOutput.member(:metrics_configuration) + + GetBucketMetricsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketMetricsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location: "querystring", location_name: "id")) + GetBucketMetricsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketMetricsConfigurationRequest.struct_class = Types::GetBucketMetricsConfigurationRequest + + GetBucketNotificationConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketNotificationConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketNotificationConfigurationRequest.struct_class = Types::GetBucketNotificationConfigurationRequest + + GetBucketOwnershipControlsOutput.add_member(:ownership_controls, Shapes::ShapeRef.new(shape: OwnershipControls, location_name: "OwnershipControls")) + GetBucketOwnershipControlsOutput.struct_class = Types::GetBucketOwnershipControlsOutput + GetBucketOwnershipControlsOutput[:payload] = :ownership_controls + GetBucketOwnershipControlsOutput[:payload_member] = GetBucketOwnershipControlsOutput.member(:ownership_controls) + + GetBucketOwnershipControlsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketOwnershipControlsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketOwnershipControlsRequest.struct_class = Types::GetBucketOwnershipControlsRequest + + GetBucketPolicyOutput.add_member(:policy, Shapes::ShapeRef.new(shape: Policy, location_name: "Policy")) + GetBucketPolicyOutput.struct_class = Types::GetBucketPolicyOutput + GetBucketPolicyOutput[:payload] = :policy + GetBucketPolicyOutput[:payload_member] = GetBucketPolicyOutput.member(:policy) + + GetBucketPolicyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketPolicyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketPolicyRequest.struct_class = Types::GetBucketPolicyRequest + + GetBucketPolicyStatusOutput.add_member(:policy_status, Shapes::ShapeRef.new(shape: PolicyStatus, location_name: "PolicyStatus")) + GetBucketPolicyStatusOutput.struct_class = Types::GetBucketPolicyStatusOutput + GetBucketPolicyStatusOutput[:payload] = :policy_status + GetBucketPolicyStatusOutput[:payload_member] = GetBucketPolicyStatusOutput.member(:policy_status) + + GetBucketPolicyStatusRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketPolicyStatusRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketPolicyStatusRequest.struct_class = Types::GetBucketPolicyStatusRequest + + GetBucketReplicationOutput.add_member(:replication_configuration, Shapes::ShapeRef.new(shape: ReplicationConfiguration, location_name: "ReplicationConfiguration")) + GetBucketReplicationOutput.struct_class = Types::GetBucketReplicationOutput + GetBucketReplicationOutput[:payload] = :replication_configuration + GetBucketReplicationOutput[:payload_member] = GetBucketReplicationOutput.member(:replication_configuration) + + GetBucketReplicationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketReplicationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketReplicationRequest.struct_class = Types::GetBucketReplicationRequest + + GetBucketRequestPaymentOutput.add_member(:payer, Shapes::ShapeRef.new(shape: Payer, location_name: "Payer")) + GetBucketRequestPaymentOutput.struct_class = Types::GetBucketRequestPaymentOutput + + GetBucketRequestPaymentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketRequestPaymentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketRequestPaymentRequest.struct_class = Types::GetBucketRequestPaymentRequest + + GetBucketTaggingOutput.add_member(:tag_set, Shapes::ShapeRef.new(shape: TagSet, required: true, location_name: "TagSet")) + GetBucketTaggingOutput.struct_class = Types::GetBucketTaggingOutput + + GetBucketTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketTaggingRequest.struct_class = Types::GetBucketTaggingRequest + + GetBucketVersioningOutput.add_member(:status, Shapes::ShapeRef.new(shape: BucketVersioningStatus, location_name: "Status")) + GetBucketVersioningOutput.add_member(:mfa_delete, Shapes::ShapeRef.new(shape: MFADeleteStatus, location_name: "MfaDelete")) + GetBucketVersioningOutput.struct_class = Types::GetBucketVersioningOutput + + GetBucketVersioningRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketVersioningRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketVersioningRequest.struct_class = Types::GetBucketVersioningRequest + + GetBucketWebsiteOutput.add_member(:redirect_all_requests_to, Shapes::ShapeRef.new(shape: RedirectAllRequestsTo, location_name: "RedirectAllRequestsTo")) + GetBucketWebsiteOutput.add_member(:index_document, Shapes::ShapeRef.new(shape: IndexDocument, location_name: "IndexDocument")) + GetBucketWebsiteOutput.add_member(:error_document, Shapes::ShapeRef.new(shape: ErrorDocument, location_name: "ErrorDocument")) + GetBucketWebsiteOutput.add_member(:routing_rules, Shapes::ShapeRef.new(shape: RoutingRules, location_name: "RoutingRules")) + GetBucketWebsiteOutput.struct_class = Types::GetBucketWebsiteOutput + + GetBucketWebsiteRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetBucketWebsiteRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetBucketWebsiteRequest.struct_class = Types::GetBucketWebsiteRequest + + GetObjectAclOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + GetObjectAclOutput.add_member(:grants, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) + GetObjectAclOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + GetObjectAclOutput.struct_class = Types::GetObjectAclOutput + + GetObjectAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectAclRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectAclRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + GetObjectAclRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectAclRequest.struct_class = Types::GetObjectAclRequest + + GetObjectAttributesOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) + GetObjectAttributesOutput.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "Last-Modified")) + GetObjectAttributesOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + GetObjectAttributesOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + GetObjectAttributesOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + GetObjectAttributesOutput.add_member(:checksum, Shapes::ShapeRef.new(shape: Checksum, location_name: "Checksum")) + GetObjectAttributesOutput.add_member(:object_parts, Shapes::ShapeRef.new(shape: GetObjectAttributesParts, location_name: "ObjectParts")) + GetObjectAttributesOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) + GetObjectAttributesOutput.add_member(:object_size, Shapes::ShapeRef.new(shape: ObjectSize, location_name: "ObjectSize")) + GetObjectAttributesOutput.struct_class = Types::GetObjectAttributesOutput + + GetObjectAttributesParts.add_member(:total_parts_count, Shapes::ShapeRef.new(shape: PartsCount, location_name: "PartsCount")) + GetObjectAttributesParts.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location_name: "PartNumberMarker")) + GetObjectAttributesParts.add_member(:next_part_number_marker, Shapes::ShapeRef.new(shape: NextPartNumberMarker, location_name: "NextPartNumberMarker")) + GetObjectAttributesParts.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location_name: "MaxParts")) + GetObjectAttributesParts.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + GetObjectAttributesParts.add_member(:parts, Shapes::ShapeRef.new(shape: PartsList, location_name: "Part")) + GetObjectAttributesParts.struct_class = Types::GetObjectAttributesParts + + GetObjectAttributesRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectAttributesRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectAttributesRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + GetObjectAttributesRequest.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location: "header", location_name: "x-amz-max-parts")) + GetObjectAttributesRequest.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location: "header", location_name: "x-amz-part-number-marker")) + GetObjectAttributesRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + GetObjectAttributesRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + GetObjectAttributesRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + GetObjectAttributesRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectAttributesRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectAttributesRequest.add_member(:object_attributes, Shapes::ShapeRef.new(shape: ObjectAttributesList, required: true, location: "header", location_name: "x-amz-object-attributes")) + GetObjectAttributesRequest.struct_class = Types::GetObjectAttributesRequest + + GetObjectLegalHoldOutput.add_member(:legal_hold, Shapes::ShapeRef.new(shape: ObjectLockLegalHold, location_name: "LegalHold")) + GetObjectLegalHoldOutput.struct_class = Types::GetObjectLegalHoldOutput + GetObjectLegalHoldOutput[:payload] = :legal_hold + GetObjectLegalHoldOutput[:payload_member] = GetObjectLegalHoldOutput.member(:legal_hold) + + GetObjectLegalHoldRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectLegalHoldRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectLegalHoldRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + GetObjectLegalHoldRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectLegalHoldRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectLegalHoldRequest.struct_class = Types::GetObjectLegalHoldRequest + + GetObjectLockConfigurationOutput.add_member(:object_lock_configuration, Shapes::ShapeRef.new(shape: ObjectLockConfiguration, location_name: "ObjectLockConfiguration")) + GetObjectLockConfigurationOutput.struct_class = Types::GetObjectLockConfigurationOutput + GetObjectLockConfigurationOutput[:payload] = :object_lock_configuration + GetObjectLockConfigurationOutput[:payload_member] = GetObjectLockConfigurationOutput.member(:object_lock_configuration) + + GetObjectLockConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectLockConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectLockConfigurationRequest.struct_class = Types::GetObjectLockConfigurationRequest + + GetObjectOutput.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) + GetObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) + GetObjectOutput.add_member(:accept_ranges, Shapes::ShapeRef.new(shape: AcceptRanges, location: "header", location_name: "accept-ranges")) + GetObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) + GetObjectOutput.add_member(:restore, Shapes::ShapeRef.new(shape: Restore, location: "header", location_name: "x-amz-restore")) + GetObjectOutput.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "Last-Modified")) + GetObjectOutput.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) + GetObjectOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) + GetObjectOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + GetObjectOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + GetObjectOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + GetObjectOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + GetObjectOutput.add_member(:missing_meta, Shapes::ShapeRef.new(shape: MissingMeta, location: "header", location_name: "x-amz-missing-meta")) + GetObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + GetObjectOutput.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) + GetObjectOutput.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) + GetObjectOutput.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) + GetObjectOutput.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) + GetObjectOutput.add_member(:content_range, Shapes::ShapeRef.new(shape: ContentRange, location: "header", location_name: "Content-Range")) + GetObjectOutput.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) + GetObjectOutput.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) + GetObjectOutput.add_member(:expires_string, Shapes::ShapeRef.new(shape: ExpiresString, location: "header", location_name: "Expires")) + GetObjectOutput.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) + GetObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + GetObjectOutput.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) + GetObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + GetObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + GetObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + GetObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + GetObjectOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) + GetObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + GetObjectOutput.add_member(:replication_status, Shapes::ShapeRef.new(shape: ReplicationStatus, location: "header", location_name: "x-amz-replication-status")) + GetObjectOutput.add_member(:parts_count, Shapes::ShapeRef.new(shape: PartsCount, location: "header", location_name: "x-amz-mp-parts-count")) + GetObjectOutput.add_member(:tag_count, Shapes::ShapeRef.new(shape: TagCount, location: "header", location_name: "x-amz-tagging-count")) + GetObjectOutput.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) + GetObjectOutput.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) + GetObjectOutput.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) + GetObjectOutput.struct_class = Types::GetObjectOutput + GetObjectOutput[:payload] = :body + GetObjectOutput[:payload_member] = GetObjectOutput.member(:body) + + GetObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match")) + GetObjectRequest.add_member(:if_modified_since, Shapes::ShapeRef.new(shape: IfModifiedSince, location: "header", location_name: "If-Modified-Since")) + GetObjectRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match")) + GetObjectRequest.add_member(:if_unmodified_since, Shapes::ShapeRef.new(shape: IfUnmodifiedSince, location: "header", location_name: "If-Unmodified-Since")) + GetObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectRequest.add_member(:range, Shapes::ShapeRef.new(shape: Range, location: "header", location_name: "Range")) + GetObjectRequest.add_member(:response_cache_control, Shapes::ShapeRef.new(shape: ResponseCacheControl, location: "querystring", location_name: "response-cache-control")) + GetObjectRequest.add_member(:response_content_disposition, Shapes::ShapeRef.new(shape: ResponseContentDisposition, location: "querystring", location_name: "response-content-disposition")) + GetObjectRequest.add_member(:response_content_encoding, Shapes::ShapeRef.new(shape: ResponseContentEncoding, location: "querystring", location_name: "response-content-encoding")) + GetObjectRequest.add_member(:response_content_language, Shapes::ShapeRef.new(shape: ResponseContentLanguage, location: "querystring", location_name: "response-content-language")) + GetObjectRequest.add_member(:response_content_type, Shapes::ShapeRef.new(shape: ResponseContentType, location: "querystring", location_name: "response-content-type")) + GetObjectRequest.add_member(:response_expires, Shapes::ShapeRef.new(shape: ResponseExpires, location: "querystring", location_name: "response-expires")) + GetObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + GetObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + GetObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + GetObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + GetObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location: "querystring", location_name: "partNumber")) + GetObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectRequest.add_member(:checksum_mode, Shapes::ShapeRef.new(shape: ChecksumMode, location: "header", location_name: "x-amz-checksum-mode")) + GetObjectRequest.struct_class = Types::GetObjectRequest + + GetObjectRetentionOutput.add_member(:retention, Shapes::ShapeRef.new(shape: ObjectLockRetention, location_name: "Retention")) + GetObjectRetentionOutput.struct_class = Types::GetObjectRetentionOutput + GetObjectRetentionOutput[:payload] = :retention + GetObjectRetentionOutput[:payload_member] = GetObjectRetentionOutput.member(:retention) + + GetObjectRetentionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectRetentionRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectRetentionRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + GetObjectRetentionRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectRetentionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectRetentionRequest.struct_class = Types::GetObjectRetentionRequest + + GetObjectTaggingOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + GetObjectTaggingOutput.add_member(:tag_set, Shapes::ShapeRef.new(shape: TagSet, required: true, location_name: "TagSet")) + GetObjectTaggingOutput.struct_class = Types::GetObjectTaggingOutput + + GetObjectTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectTaggingRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectTaggingRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + GetObjectTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectTaggingRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectTaggingRequest.struct_class = Types::GetObjectTaggingRequest + + GetObjectTorrentOutput.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) + GetObjectTorrentOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + GetObjectTorrentOutput.struct_class = Types::GetObjectTorrentOutput + GetObjectTorrentOutput[:payload] = :body + GetObjectTorrentOutput[:payload_member] = GetObjectTorrentOutput.member(:body) + + GetObjectTorrentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetObjectTorrentRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + GetObjectTorrentRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + GetObjectTorrentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetObjectTorrentRequest.struct_class = Types::GetObjectTorrentRequest + + GetPublicAccessBlockOutput.add_member(:public_access_block_configuration, Shapes::ShapeRef.new(shape: PublicAccessBlockConfiguration, location_name: "PublicAccessBlockConfiguration")) + GetPublicAccessBlockOutput.struct_class = Types::GetPublicAccessBlockOutput + GetPublicAccessBlockOutput[:payload] = :public_access_block_configuration + GetPublicAccessBlockOutput[:payload_member] = GetPublicAccessBlockOutput.member(:public_access_block_configuration) + + GetPublicAccessBlockRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + GetPublicAccessBlockRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + GetPublicAccessBlockRequest.struct_class = Types::GetPublicAccessBlockRequest + + GlacierJobParameters.add_member(:tier, Shapes::ShapeRef.new(shape: Tier, required: true, location_name: "Tier")) + GlacierJobParameters.struct_class = Types::GlacierJobParameters + + Grant.add_member(:grantee, Shapes::ShapeRef.new(shape: Grantee, location_name: "Grantee")) + Grant.add_member(:permission, Shapes::ShapeRef.new(shape: Permission, location_name: "Permission")) + Grant.struct_class = Types::Grant + + Grantee.add_member(:display_name, Shapes::ShapeRef.new(shape: DisplayName, location_name: "DisplayName")) + Grantee.add_member(:email_address, Shapes::ShapeRef.new(shape: EmailAddress, location_name: "EmailAddress")) + Grantee.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + Grantee.add_member(:type, Shapes::ShapeRef.new(shape: Type, required: true, location_name: "xsi:type", metadata: {"xmlAttribute"=>true})) + Grantee.add_member(:uri, Shapes::ShapeRef.new(shape: URI, location_name: "URI")) + Grantee.struct_class = Types::Grantee + + Grants.member = Shapes::ShapeRef.new(shape: Grant, location_name: "Grant") + + HeadBucketRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + HeadBucketRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + HeadBucketRequest.struct_class = Types::HeadBucketRequest + + HeadObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) + HeadObjectOutput.add_member(:accept_ranges, Shapes::ShapeRef.new(shape: AcceptRanges, location: "header", location_name: "accept-ranges")) + HeadObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) + HeadObjectOutput.add_member(:restore, Shapes::ShapeRef.new(shape: Restore, location: "header", location_name: "x-amz-restore")) + HeadObjectOutput.add_member(:archive_status, Shapes::ShapeRef.new(shape: ArchiveStatus, location: "header", location_name: "x-amz-archive-status")) + HeadObjectOutput.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "Last-Modified")) + HeadObjectOutput.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) + HeadObjectOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + HeadObjectOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + HeadObjectOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + HeadObjectOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + HeadObjectOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) + HeadObjectOutput.add_member(:missing_meta, Shapes::ShapeRef.new(shape: MissingMeta, location: "header", location_name: "x-amz-missing-meta")) + HeadObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + HeadObjectOutput.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) + HeadObjectOutput.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) + HeadObjectOutput.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) + HeadObjectOutput.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) + HeadObjectOutput.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) + HeadObjectOutput.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) + HeadObjectOutput.add_member(:expires_string, Shapes::ShapeRef.new(shape: ExpiresString, location: "header", location_name: "Expires")) + HeadObjectOutput.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) + HeadObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + HeadObjectOutput.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) + HeadObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + HeadObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + HeadObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + HeadObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + HeadObjectOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) + HeadObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + HeadObjectOutput.add_member(:replication_status, Shapes::ShapeRef.new(shape: ReplicationStatus, location: "header", location_name: "x-amz-replication-status")) + HeadObjectOutput.add_member(:parts_count, Shapes::ShapeRef.new(shape: PartsCount, location: "header", location_name: "x-amz-mp-parts-count")) + HeadObjectOutput.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) + HeadObjectOutput.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) + HeadObjectOutput.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) + HeadObjectOutput.struct_class = Types::HeadObjectOutput + + HeadObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + HeadObjectRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match")) + HeadObjectRequest.add_member(:if_modified_since, Shapes::ShapeRef.new(shape: IfModifiedSince, location: "header", location_name: "If-Modified-Since")) + HeadObjectRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match")) + HeadObjectRequest.add_member(:if_unmodified_since, Shapes::ShapeRef.new(shape: IfUnmodifiedSince, location: "header", location_name: "If-Unmodified-Since")) + HeadObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + HeadObjectRequest.add_member(:range, Shapes::ShapeRef.new(shape: Range, location: "header", location_name: "Range")) + HeadObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + HeadObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + HeadObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + HeadObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + HeadObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + HeadObjectRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location: "querystring", location_name: "partNumber")) + HeadObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + HeadObjectRequest.add_member(:checksum_mode, Shapes::ShapeRef.new(shape: ChecksumMode, location: "header", location_name: "x-amz-checksum-mode")) + HeadObjectRequest.struct_class = Types::HeadObjectRequest + + IndexDocument.add_member(:suffix, Shapes::ShapeRef.new(shape: Suffix, required: true, location_name: "Suffix")) + IndexDocument.struct_class = Types::IndexDocument + + Initiator.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + Initiator.add_member(:display_name, Shapes::ShapeRef.new(shape: DisplayName, location_name: "DisplayName")) + Initiator.struct_class = Types::Initiator + + InputSerialization.add_member(:csv, Shapes::ShapeRef.new(shape: CSVInput, location_name: "CSV")) + InputSerialization.add_member(:compression_type, Shapes::ShapeRef.new(shape: CompressionType, location_name: "CompressionType")) + InputSerialization.add_member(:json, Shapes::ShapeRef.new(shape: JSONInput, location_name: "JSON")) + InputSerialization.add_member(:parquet, Shapes::ShapeRef.new(shape: ParquetInput, location_name: "Parquet")) + InputSerialization.struct_class = Types::InputSerialization + + IntelligentTieringAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + IntelligentTieringAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) + IntelligentTieringAndOperator.struct_class = Types::IntelligentTieringAndOperator + + IntelligentTieringConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location_name: "Id")) + IntelligentTieringConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: IntelligentTieringFilter, location_name: "Filter")) + IntelligentTieringConfiguration.add_member(:status, Shapes::ShapeRef.new(shape: IntelligentTieringStatus, required: true, location_name: "Status")) + IntelligentTieringConfiguration.add_member(:tierings, Shapes::ShapeRef.new(shape: TieringList, required: true, location_name: "Tiering")) + IntelligentTieringConfiguration.struct_class = Types::IntelligentTieringConfiguration + + IntelligentTieringConfigurationList.member = Shapes::ShapeRef.new(shape: IntelligentTieringConfiguration) + + IntelligentTieringFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + IntelligentTieringFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) + IntelligentTieringFilter.add_member(:and, Shapes::ShapeRef.new(shape: IntelligentTieringAndOperator, location_name: "And")) + IntelligentTieringFilter.struct_class = Types::IntelligentTieringFilter + + InvalidObjectState.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) + InvalidObjectState.add_member(:access_tier, Shapes::ShapeRef.new(shape: IntelligentTieringAccessTier, location_name: "AccessTier")) + InvalidObjectState.struct_class = Types::InvalidObjectState + + InventoryConfiguration.add_member(:destination, Shapes::ShapeRef.new(shape: InventoryDestination, required: true, location_name: "Destination")) + InventoryConfiguration.add_member(:is_enabled, Shapes::ShapeRef.new(shape: IsEnabled, required: true, location_name: "IsEnabled")) + InventoryConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: InventoryFilter, location_name: "Filter")) + InventoryConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location_name: "Id")) + InventoryConfiguration.add_member(:included_object_versions, Shapes::ShapeRef.new(shape: InventoryIncludedObjectVersions, required: true, location_name: "IncludedObjectVersions")) + InventoryConfiguration.add_member(:optional_fields, Shapes::ShapeRef.new(shape: InventoryOptionalFields, location_name: "OptionalFields")) + InventoryConfiguration.add_member(:schedule, Shapes::ShapeRef.new(shape: InventorySchedule, required: true, location_name: "Schedule")) + InventoryConfiguration.struct_class = Types::InventoryConfiguration + + InventoryConfigurationList.member = Shapes::ShapeRef.new(shape: InventoryConfiguration) + + InventoryDestination.add_member(:s3_bucket_destination, Shapes::ShapeRef.new(shape: InventoryS3BucketDestination, required: true, location_name: "S3BucketDestination")) + InventoryDestination.struct_class = Types::InventoryDestination + + InventoryEncryption.add_member(:sses3, Shapes::ShapeRef.new(shape: SSES3, location_name: "SSE-S3")) + InventoryEncryption.add_member(:ssekms, Shapes::ShapeRef.new(shape: SSEKMS, location_name: "SSE-KMS")) + InventoryEncryption.struct_class = Types::InventoryEncryption + + InventoryFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, required: true, location_name: "Prefix")) + InventoryFilter.struct_class = Types::InventoryFilter + + InventoryOptionalFields.member = Shapes::ShapeRef.new(shape: InventoryOptionalField, location_name: "Field") + + InventoryS3BucketDestination.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "AccountId")) + InventoryS3BucketDestination.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "Bucket")) + InventoryS3BucketDestination.add_member(:format, Shapes::ShapeRef.new(shape: InventoryFormat, required: true, location_name: "Format")) + InventoryS3BucketDestination.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + InventoryS3BucketDestination.add_member(:encryption, Shapes::ShapeRef.new(shape: InventoryEncryption, location_name: "Encryption")) + InventoryS3BucketDestination.struct_class = Types::InventoryS3BucketDestination + + InventorySchedule.add_member(:frequency, Shapes::ShapeRef.new(shape: InventoryFrequency, required: true, location_name: "Frequency")) + InventorySchedule.struct_class = Types::InventorySchedule + + JSONInput.add_member(:type, Shapes::ShapeRef.new(shape: JSONType, location_name: "Type")) + JSONInput.struct_class = Types::JSONInput + + JSONOutput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter")) + JSONOutput.struct_class = Types::JSONOutput + + LambdaFunctionConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) + LambdaFunctionConfiguration.add_member(:lambda_function_arn, Shapes::ShapeRef.new(shape: LambdaFunctionArn, required: true, location_name: "CloudFunction")) + LambdaFunctionConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "Event")) + LambdaFunctionConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: NotificationConfigurationFilter, location_name: "Filter")) + LambdaFunctionConfiguration.struct_class = Types::LambdaFunctionConfiguration + + LambdaFunctionConfigurationList.member = Shapes::ShapeRef.new(shape: LambdaFunctionConfiguration) + + LifecycleConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: Rules, required: true, location_name: "Rule")) + LifecycleConfiguration.struct_class = Types::LifecycleConfiguration + + LifecycleExpiration.add_member(:date, Shapes::ShapeRef.new(shape: Date, location_name: "Date")) + LifecycleExpiration.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) + LifecycleExpiration.add_member(:expired_object_delete_marker, Shapes::ShapeRef.new(shape: ExpiredObjectDeleteMarker, location_name: "ExpiredObjectDeleteMarker")) + LifecycleExpiration.struct_class = Types::LifecycleExpiration + + LifecycleRule.add_member(:expiration, Shapes::ShapeRef.new(shape: LifecycleExpiration, location_name: "Expiration")) + LifecycleRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + LifecycleRule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, deprecated: true, location_name: "Prefix")) + LifecycleRule.add_member(:filter, Shapes::ShapeRef.new(shape: LifecycleRuleFilter, location_name: "Filter")) + LifecycleRule.add_member(:status, Shapes::ShapeRef.new(shape: ExpirationStatus, required: true, location_name: "Status")) + LifecycleRule.add_member(:transitions, Shapes::ShapeRef.new(shape: TransitionList, location_name: "Transition")) + LifecycleRule.add_member(:noncurrent_version_transitions, Shapes::ShapeRef.new(shape: NoncurrentVersionTransitionList, location_name: "NoncurrentVersionTransition")) + LifecycleRule.add_member(:noncurrent_version_expiration, Shapes::ShapeRef.new(shape: NoncurrentVersionExpiration, location_name: "NoncurrentVersionExpiration")) + LifecycleRule.add_member(:abort_incomplete_multipart_upload, Shapes::ShapeRef.new(shape: AbortIncompleteMultipartUpload, location_name: "AbortIncompleteMultipartUpload")) + LifecycleRule.struct_class = Types::LifecycleRule + + LifecycleRuleAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + LifecycleRuleAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) + LifecycleRuleAndOperator.add_member(:object_size_greater_than, Shapes::ShapeRef.new(shape: ObjectSizeGreaterThanBytes, location_name: "ObjectSizeGreaterThan")) + LifecycleRuleAndOperator.add_member(:object_size_less_than, Shapes::ShapeRef.new(shape: ObjectSizeLessThanBytes, location_name: "ObjectSizeLessThan")) + LifecycleRuleAndOperator.struct_class = Types::LifecycleRuleAndOperator + + LifecycleRuleFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + LifecycleRuleFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) + LifecycleRuleFilter.add_member(:object_size_greater_than, Shapes::ShapeRef.new(shape: ObjectSizeGreaterThanBytes, location_name: "ObjectSizeGreaterThan")) + LifecycleRuleFilter.add_member(:object_size_less_than, Shapes::ShapeRef.new(shape: ObjectSizeLessThanBytes, location_name: "ObjectSizeLessThan")) + LifecycleRuleFilter.add_member(:and, Shapes::ShapeRef.new(shape: LifecycleRuleAndOperator, location_name: "And")) + LifecycleRuleFilter.struct_class = Types::LifecycleRuleFilter + + LifecycleRules.member = Shapes::ShapeRef.new(shape: LifecycleRule) + + ListBucketAnalyticsConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListBucketAnalyticsConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) + ListBucketAnalyticsConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) + ListBucketAnalyticsConfigurationsOutput.add_member(:analytics_configuration_list, Shapes::ShapeRef.new(shape: AnalyticsConfigurationList, location_name: "AnalyticsConfiguration")) + ListBucketAnalyticsConfigurationsOutput.struct_class = Types::ListBucketAnalyticsConfigurationsOutput + + ListBucketAnalyticsConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListBucketAnalyticsConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) + ListBucketAnalyticsConfigurationsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListBucketAnalyticsConfigurationsRequest.struct_class = Types::ListBucketAnalyticsConfigurationsRequest + + ListBucketIntelligentTieringConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListBucketIntelligentTieringConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) + ListBucketIntelligentTieringConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) + ListBucketIntelligentTieringConfigurationsOutput.add_member(:intelligent_tiering_configuration_list, Shapes::ShapeRef.new(shape: IntelligentTieringConfigurationList, location_name: "IntelligentTieringConfiguration")) + ListBucketIntelligentTieringConfigurationsOutput.struct_class = Types::ListBucketIntelligentTieringConfigurationsOutput + + ListBucketIntelligentTieringConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListBucketIntelligentTieringConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) + ListBucketIntelligentTieringConfigurationsRequest.struct_class = Types::ListBucketIntelligentTieringConfigurationsRequest + + ListBucketInventoryConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) + ListBucketInventoryConfigurationsOutput.add_member(:inventory_configuration_list, Shapes::ShapeRef.new(shape: InventoryConfigurationList, location_name: "InventoryConfiguration")) + ListBucketInventoryConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListBucketInventoryConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) + ListBucketInventoryConfigurationsOutput.struct_class = Types::ListBucketInventoryConfigurationsOutput + + ListBucketInventoryConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListBucketInventoryConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) + ListBucketInventoryConfigurationsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListBucketInventoryConfigurationsRequest.struct_class = Types::ListBucketInventoryConfigurationsRequest + + ListBucketMetricsConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListBucketMetricsConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) + ListBucketMetricsConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) + ListBucketMetricsConfigurationsOutput.add_member(:metrics_configuration_list, Shapes::ShapeRef.new(shape: MetricsConfigurationList, location_name: "MetricsConfiguration")) + ListBucketMetricsConfigurationsOutput.struct_class = Types::ListBucketMetricsConfigurationsOutput + + ListBucketMetricsConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListBucketMetricsConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) + ListBucketMetricsConfigurationsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListBucketMetricsConfigurationsRequest.struct_class = Types::ListBucketMetricsConfigurationsRequest + + ListBucketsOutput.add_member(:buckets, Shapes::ShapeRef.new(shape: Buckets, location_name: "Buckets")) + ListBucketsOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + ListBucketsOutput.struct_class = Types::ListBucketsOutput + + ListMultipartUploadsOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) + ListMultipartUploadsOutput.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location_name: "KeyMarker")) + ListMultipartUploadsOutput.add_member(:upload_id_marker, Shapes::ShapeRef.new(shape: UploadIdMarker, location_name: "UploadIdMarker")) + ListMultipartUploadsOutput.add_member(:next_key_marker, Shapes::ShapeRef.new(shape: NextKeyMarker, location_name: "NextKeyMarker")) + ListMultipartUploadsOutput.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + ListMultipartUploadsOutput.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) + ListMultipartUploadsOutput.add_member(:next_upload_id_marker, Shapes::ShapeRef.new(shape: NextUploadIdMarker, location_name: "NextUploadIdMarker")) + ListMultipartUploadsOutput.add_member(:max_uploads, Shapes::ShapeRef.new(shape: MaxUploads, location_name: "MaxUploads")) + ListMultipartUploadsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListMultipartUploadsOutput.add_member(:uploads, Shapes::ShapeRef.new(shape: MultipartUploadList, location_name: "Upload")) + ListMultipartUploadsOutput.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) + ListMultipartUploadsOutput.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) + ListMultipartUploadsOutput.struct_class = Types::ListMultipartUploadsOutput + + ListMultipartUploadsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListMultipartUploadsRequest.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) + ListMultipartUploadsRequest.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) + ListMultipartUploadsRequest.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location: "querystring", location_name: "key-marker")) + ListMultipartUploadsRequest.add_member(:max_uploads, Shapes::ShapeRef.new(shape: MaxUploads, location: "querystring", location_name: "max-uploads")) + ListMultipartUploadsRequest.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix")) + ListMultipartUploadsRequest.add_member(:upload_id_marker, Shapes::ShapeRef.new(shape: UploadIdMarker, location: "querystring", location_name: "upload-id-marker")) + ListMultipartUploadsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListMultipartUploadsRequest.struct_class = Types::ListMultipartUploadsRequest + + ListObjectVersionsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListObjectVersionsOutput.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location_name: "KeyMarker")) + ListObjectVersionsOutput.add_member(:version_id_marker, Shapes::ShapeRef.new(shape: VersionIdMarker, location_name: "VersionIdMarker")) + ListObjectVersionsOutput.add_member(:next_key_marker, Shapes::ShapeRef.new(shape: NextKeyMarker, location_name: "NextKeyMarker")) + ListObjectVersionsOutput.add_member(:next_version_id_marker, Shapes::ShapeRef.new(shape: NextVersionIdMarker, location_name: "NextVersionIdMarker")) + ListObjectVersionsOutput.add_member(:versions, Shapes::ShapeRef.new(shape: ObjectVersionList, location_name: "Version")) + ListObjectVersionsOutput.add_member(:delete_markers, Shapes::ShapeRef.new(shape: DeleteMarkers, location_name: "DeleteMarker")) + ListObjectVersionsOutput.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) + ListObjectVersionsOutput.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + ListObjectVersionsOutput.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) + ListObjectVersionsOutput.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location_name: "MaxKeys")) + ListObjectVersionsOutput.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) + ListObjectVersionsOutput.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) + ListObjectVersionsOutput.struct_class = Types::ListObjectVersionsOutput + + ListObjectVersionsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListObjectVersionsRequest.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) + ListObjectVersionsRequest.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) + ListObjectVersionsRequest.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location: "querystring", location_name: "key-marker")) + ListObjectVersionsRequest.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location: "querystring", location_name: "max-keys")) + ListObjectVersionsRequest.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix")) + ListObjectVersionsRequest.add_member(:version_id_marker, Shapes::ShapeRef.new(shape: VersionIdMarker, location: "querystring", location_name: "version-id-marker")) + ListObjectVersionsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListObjectVersionsRequest.struct_class = Types::ListObjectVersionsRequest + + ListObjectsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListObjectsOutput.add_member(:marker, Shapes::ShapeRef.new(shape: Marker, location_name: "Marker")) + ListObjectsOutput.add_member(:next_marker, Shapes::ShapeRef.new(shape: NextMarker, location_name: "NextMarker")) + ListObjectsOutput.add_member(:contents, Shapes::ShapeRef.new(shape: ObjectList, location_name: "Contents")) + ListObjectsOutput.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) + ListObjectsOutput.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + ListObjectsOutput.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) + ListObjectsOutput.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location_name: "MaxKeys")) + ListObjectsOutput.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) + ListObjectsOutput.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) + ListObjectsOutput.struct_class = Types::ListObjectsOutput + + ListObjectsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListObjectsRequest.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) + ListObjectsRequest.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) + ListObjectsRequest.add_member(:marker, Shapes::ShapeRef.new(shape: Marker, location: "querystring", location_name: "marker")) + ListObjectsRequest.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location: "querystring", location_name: "max-keys")) + ListObjectsRequest.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix")) + ListObjectsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + ListObjectsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListObjectsRequest.struct_class = Types::ListObjectsRequest + + ListObjectsV2Output.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListObjectsV2Output.add_member(:contents, Shapes::ShapeRef.new(shape: ObjectList, location_name: "Contents")) + ListObjectsV2Output.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) + ListObjectsV2Output.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + ListObjectsV2Output.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) + ListObjectsV2Output.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location_name: "MaxKeys")) + ListObjectsV2Output.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) + ListObjectsV2Output.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) + ListObjectsV2Output.add_member(:key_count, Shapes::ShapeRef.new(shape: KeyCount, location_name: "KeyCount")) + ListObjectsV2Output.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) + ListObjectsV2Output.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) + ListObjectsV2Output.add_member(:start_after, Shapes::ShapeRef.new(shape: StartAfter, location_name: "StartAfter")) + ListObjectsV2Output.struct_class = Types::ListObjectsV2Output + + ListObjectsV2Request.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListObjectsV2Request.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) + ListObjectsV2Request.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) + ListObjectsV2Request.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location: "querystring", location_name: "max-keys")) + ListObjectsV2Request.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix")) + ListObjectsV2Request.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) + ListObjectsV2Request.add_member(:fetch_owner, Shapes::ShapeRef.new(shape: FetchOwner, location: "querystring", location_name: "fetch-owner")) + ListObjectsV2Request.add_member(:start_after, Shapes::ShapeRef.new(shape: StartAfter, location: "querystring", location_name: "start-after")) + ListObjectsV2Request.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + ListObjectsV2Request.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListObjectsV2Request.struct_class = Types::ListObjectsV2Request + + ListPartsOutput.add_member(:abort_date, Shapes::ShapeRef.new(shape: AbortDate, location: "header", location_name: "x-amz-abort-date")) + ListPartsOutput.add_member(:abort_rule_id, Shapes::ShapeRef.new(shape: AbortRuleId, location: "header", location_name: "x-amz-abort-rule-id")) + ListPartsOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) + ListPartsOutput.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + ListPartsOutput.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, location_name: "UploadId")) + ListPartsOutput.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location_name: "PartNumberMarker")) + ListPartsOutput.add_member(:next_part_number_marker, Shapes::ShapeRef.new(shape: NextPartNumberMarker, location_name: "NextPartNumberMarker")) + ListPartsOutput.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location_name: "MaxParts")) + ListPartsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) + ListPartsOutput.add_member(:parts, Shapes::ShapeRef.new(shape: Parts, location_name: "Part")) + ListPartsOutput.add_member(:initiator, Shapes::ShapeRef.new(shape: Initiator, location_name: "Initiator")) + ListPartsOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + ListPartsOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) + ListPartsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + ListPartsOutput.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location_name: "ChecksumAlgorithm")) + ListPartsOutput.struct_class = Types::ListPartsOutput + + ListPartsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + ListPartsRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + ListPartsRequest.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location: "querystring", location_name: "max-parts")) + ListPartsRequest.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location: "querystring", location_name: "part-number-marker")) + ListPartsRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) + ListPartsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + ListPartsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + ListPartsRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + ListPartsRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + ListPartsRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + ListPartsRequest.struct_class = Types::ListPartsRequest + + LoggingEnabled.add_member(:target_bucket, Shapes::ShapeRef.new(shape: TargetBucket, required: true, location_name: "TargetBucket")) + LoggingEnabled.add_member(:target_grants, Shapes::ShapeRef.new(shape: TargetGrants, location_name: "TargetGrants")) + LoggingEnabled.add_member(:target_prefix, Shapes::ShapeRef.new(shape: TargetPrefix, required: true, location_name: "TargetPrefix")) + LoggingEnabled.struct_class = Types::LoggingEnabled + + Metadata.key = Shapes::ShapeRef.new(shape: MetadataKey) + Metadata.value = Shapes::ShapeRef.new(shape: MetadataValue) + + MetadataEntry.add_member(:name, Shapes::ShapeRef.new(shape: MetadataKey, location_name: "Name")) + MetadataEntry.add_member(:value, Shapes::ShapeRef.new(shape: MetadataValue, location_name: "Value")) + MetadataEntry.struct_class = Types::MetadataEntry + + Metrics.add_member(:status, Shapes::ShapeRef.new(shape: MetricsStatus, required: true, location_name: "Status")) + Metrics.add_member(:event_threshold, Shapes::ShapeRef.new(shape: ReplicationTimeValue, location_name: "EventThreshold")) + Metrics.struct_class = Types::Metrics + + MetricsAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + MetricsAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) + MetricsAndOperator.add_member(:access_point_arn, Shapes::ShapeRef.new(shape: AccessPointArn, location_name: "AccessPointArn")) + MetricsAndOperator.struct_class = Types::MetricsAndOperator + + MetricsConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location_name: "Id")) + MetricsConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: MetricsFilter, location_name: "Filter")) + MetricsConfiguration.struct_class = Types::MetricsConfiguration + + MetricsConfigurationList.member = Shapes::ShapeRef.new(shape: MetricsConfiguration) + + MetricsFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + MetricsFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) + MetricsFilter.add_member(:access_point_arn, Shapes::ShapeRef.new(shape: AccessPointArn, location_name: "AccessPointArn")) + MetricsFilter.add_member(:and, Shapes::ShapeRef.new(shape: MetricsAndOperator, location_name: "And")) + MetricsFilter.struct_class = Types::MetricsFilter + + MultipartUpload.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, location_name: "UploadId")) + MultipartUpload.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + MultipartUpload.add_member(:initiated, Shapes::ShapeRef.new(shape: Initiated, location_name: "Initiated")) + MultipartUpload.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) + MultipartUpload.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + MultipartUpload.add_member(:initiator, Shapes::ShapeRef.new(shape: Initiator, location_name: "Initiator")) + MultipartUpload.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location_name: "ChecksumAlgorithm")) + MultipartUpload.struct_class = Types::MultipartUpload + + MultipartUploadList.member = Shapes::ShapeRef.new(shape: MultipartUpload) + + NoSuchBucket.struct_class = Types::NoSuchBucket + + NoSuchKey.struct_class = Types::NoSuchKey + + NoSuchUpload.struct_class = Types::NoSuchUpload + + NoncurrentVersionExpiration.add_member(:noncurrent_days, Shapes::ShapeRef.new(shape: Days, location_name: "NoncurrentDays")) + NoncurrentVersionExpiration.add_member(:newer_noncurrent_versions, Shapes::ShapeRef.new(shape: VersionCount, location_name: "NewerNoncurrentVersions")) + NoncurrentVersionExpiration.struct_class = Types::NoncurrentVersionExpiration + + NoncurrentVersionTransition.add_member(:noncurrent_days, Shapes::ShapeRef.new(shape: Days, location_name: "NoncurrentDays")) + NoncurrentVersionTransition.add_member(:storage_class, Shapes::ShapeRef.new(shape: TransitionStorageClass, location_name: "StorageClass")) + NoncurrentVersionTransition.add_member(:newer_noncurrent_versions, Shapes::ShapeRef.new(shape: VersionCount, location_name: "NewerNoncurrentVersions")) + NoncurrentVersionTransition.struct_class = Types::NoncurrentVersionTransition + + NoncurrentVersionTransitionList.member = Shapes::ShapeRef.new(shape: NoncurrentVersionTransition) + + NotificationConfiguration.add_member(:topic_configurations, Shapes::ShapeRef.new(shape: TopicConfigurationList, location_name: "TopicConfiguration")) + NotificationConfiguration.add_member(:queue_configurations, Shapes::ShapeRef.new(shape: QueueConfigurationList, location_name: "QueueConfiguration")) + NotificationConfiguration.add_member(:lambda_function_configurations, Shapes::ShapeRef.new(shape: LambdaFunctionConfigurationList, location_name: "CloudFunctionConfiguration")) + NotificationConfiguration.add_member(:event_bridge_configuration, Shapes::ShapeRef.new(shape: EventBridgeConfiguration, location_name: "EventBridgeConfiguration")) + NotificationConfiguration.struct_class = Types::NotificationConfiguration + + NotificationConfigurationDeprecated.add_member(:topic_configuration, Shapes::ShapeRef.new(shape: TopicConfigurationDeprecated, location_name: "TopicConfiguration")) + NotificationConfigurationDeprecated.add_member(:queue_configuration, Shapes::ShapeRef.new(shape: QueueConfigurationDeprecated, location_name: "QueueConfiguration")) + NotificationConfigurationDeprecated.add_member(:cloud_function_configuration, Shapes::ShapeRef.new(shape: CloudFunctionConfiguration, location_name: "CloudFunctionConfiguration")) + NotificationConfigurationDeprecated.struct_class = Types::NotificationConfigurationDeprecated + + NotificationConfigurationFilter.add_member(:key, Shapes::ShapeRef.new(shape: S3KeyFilter, location_name: "S3Key")) + NotificationConfigurationFilter.struct_class = Types::NotificationConfigurationFilter + + Object.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + Object.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) + Object.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + Object.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithmList, location_name: "ChecksumAlgorithm")) + Object.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) + Object.add_member(:storage_class, Shapes::ShapeRef.new(shape: ObjectStorageClass, location_name: "StorageClass")) + Object.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + Object.struct_class = Types::Object + + ObjectAlreadyInActiveTierError.struct_class = Types::ObjectAlreadyInActiveTierError + + ObjectAttributesList.member = Shapes::ShapeRef.new(shape: ObjectAttributes) + + ObjectIdentifier.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location_name: "Key")) + ObjectIdentifier.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) + ObjectIdentifier.struct_class = Types::ObjectIdentifier + + ObjectIdentifierList.member = Shapes::ShapeRef.new(shape: ObjectIdentifier) + + ObjectList.member = Shapes::ShapeRef.new(shape: Object) + + ObjectLockConfiguration.add_member(:object_lock_enabled, Shapes::ShapeRef.new(shape: ObjectLockEnabled, location_name: "ObjectLockEnabled")) + ObjectLockConfiguration.add_member(:rule, Shapes::ShapeRef.new(shape: ObjectLockRule, location_name: "Rule")) + ObjectLockConfiguration.struct_class = Types::ObjectLockConfiguration + + ObjectLockLegalHold.add_member(:status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location_name: "Status")) + ObjectLockLegalHold.struct_class = Types::ObjectLockLegalHold + + ObjectLockRetention.add_member(:mode, Shapes::ShapeRef.new(shape: ObjectLockRetentionMode, location_name: "Mode")) + ObjectLockRetention.add_member(:retain_until_date, Shapes::ShapeRef.new(shape: Date, location_name: "RetainUntilDate")) + ObjectLockRetention.struct_class = Types::ObjectLockRetention + + ObjectLockRule.add_member(:default_retention, Shapes::ShapeRef.new(shape: DefaultRetention, location_name: "DefaultRetention")) + ObjectLockRule.struct_class = Types::ObjectLockRule + + ObjectNotInActiveTierError.struct_class = Types::ObjectNotInActiveTierError + + ObjectPart.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber")) + ObjectPart.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) + ObjectPart.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + ObjectPart.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + ObjectPart.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + ObjectPart.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + ObjectPart.struct_class = Types::ObjectPart + + ObjectVersion.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + ObjectVersion.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithmList, location_name: "ChecksumAlgorithm")) + ObjectVersion.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) + ObjectVersion.add_member(:storage_class, Shapes::ShapeRef.new(shape: ObjectVersionStorageClass, location_name: "StorageClass")) + ObjectVersion.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) + ObjectVersion.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) + ObjectVersion.add_member(:is_latest, Shapes::ShapeRef.new(shape: IsLatest, location_name: "IsLatest")) + ObjectVersion.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) + ObjectVersion.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) + ObjectVersion.struct_class = Types::ObjectVersion + + ObjectVersionList.member = Shapes::ShapeRef.new(shape: ObjectVersion) + + OutputLocation.add_member(:s3, Shapes::ShapeRef.new(shape: S3Location, location_name: "S3")) + OutputLocation.struct_class = Types::OutputLocation + + OutputSerialization.add_member(:csv, Shapes::ShapeRef.new(shape: CSVOutput, location_name: "CSV")) + OutputSerialization.add_member(:json, Shapes::ShapeRef.new(shape: JSONOutput, location_name: "JSON")) + OutputSerialization.struct_class = Types::OutputSerialization + + Owner.add_member(:display_name, Shapes::ShapeRef.new(shape: DisplayName, location_name: "DisplayName")) + Owner.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + Owner.struct_class = Types::Owner + + OwnershipControls.add_member(:rules, Shapes::ShapeRef.new(shape: OwnershipControlsRules, required: true, location_name: "Rule")) + OwnershipControls.struct_class = Types::OwnershipControls + + OwnershipControlsRule.add_member(:object_ownership, Shapes::ShapeRef.new(shape: ObjectOwnership, required: true, location_name: "ObjectOwnership")) + OwnershipControlsRule.struct_class = Types::OwnershipControlsRule + + OwnershipControlsRules.member = Shapes::ShapeRef.new(shape: OwnershipControlsRule) + + ParquetInput.struct_class = Types::ParquetInput + + Part.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber")) + Part.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) + Part.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) + Part.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) + Part.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) + Part.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) + Part.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) + Part.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) + Part.struct_class = Types::Part + + Parts.member = Shapes::ShapeRef.new(shape: Part) + + PartsList.member = Shapes::ShapeRef.new(shape: ObjectPart) + + PolicyStatus.add_member(:is_public, Shapes::ShapeRef.new(shape: IsPublic, location_name: "IsPublic")) + PolicyStatus.struct_class = Types::PolicyStatus + + Progress.add_member(:bytes_scanned, Shapes::ShapeRef.new(shape: BytesScanned, location_name: "BytesScanned")) + Progress.add_member(:bytes_processed, Shapes::ShapeRef.new(shape: BytesProcessed, location_name: "BytesProcessed")) + Progress.add_member(:bytes_returned, Shapes::ShapeRef.new(shape: BytesReturned, location_name: "BytesReturned")) + Progress.struct_class = Types::Progress + + ProgressEvent.add_member(:details, Shapes::ShapeRef.new(shape: Progress, eventpayload: true, eventpayload_type: 'structure', location_name: "Details", metadata: {"eventpayload"=>true})) + ProgressEvent.struct_class = Types::ProgressEvent + + PublicAccessBlockConfiguration.add_member(:block_public_acls, Shapes::ShapeRef.new(shape: Setting, location_name: "BlockPublicAcls")) + PublicAccessBlockConfiguration.add_member(:ignore_public_acls, Shapes::ShapeRef.new(shape: Setting, location_name: "IgnorePublicAcls")) + PublicAccessBlockConfiguration.add_member(:block_public_policy, Shapes::ShapeRef.new(shape: Setting, location_name: "BlockPublicPolicy")) + PublicAccessBlockConfiguration.add_member(:restrict_public_buckets, Shapes::ShapeRef.new(shape: Setting, location_name: "RestrictPublicBuckets")) + PublicAccessBlockConfiguration.struct_class = Types::PublicAccessBlockConfiguration + + PutBucketAccelerateConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketAccelerateConfigurationRequest.add_member(:accelerate_configuration, Shapes::ShapeRef.new(shape: AccelerateConfiguration, required: true, location_name: "AccelerateConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketAccelerateConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketAccelerateConfigurationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketAccelerateConfigurationRequest.struct_class = Types::PutBucketAccelerateConfigurationRequest + PutBucketAccelerateConfigurationRequest[:payload] = :accelerate_configuration + PutBucketAccelerateConfigurationRequest[:payload_member] = PutBucketAccelerateConfigurationRequest.member(:accelerate_configuration) + + PutBucketAclRequest.add_member(:acl, Shapes::ShapeRef.new(shape: BucketCannedACL, location: "header", location_name: "x-amz-acl")) + PutBucketAclRequest.add_member(:access_control_policy, Shapes::ShapeRef.new(shape: AccessControlPolicy, location_name: "AccessControlPolicy", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketAclRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketAclRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketAclRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) + PutBucketAclRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) + PutBucketAclRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) + PutBucketAclRequest.add_member(:grant_write, Shapes::ShapeRef.new(shape: GrantWrite, location: "header", location_name: "x-amz-grant-write")) + PutBucketAclRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) + PutBucketAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketAclRequest.struct_class = Types::PutBucketAclRequest + PutBucketAclRequest[:payload] = :access_control_policy + PutBucketAclRequest[:payload_member] = PutBucketAclRequest.member(:access_control_policy) + + PutBucketAnalyticsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketAnalyticsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location: "querystring", location_name: "id")) + PutBucketAnalyticsConfigurationRequest.add_member(:analytics_configuration, Shapes::ShapeRef.new(shape: AnalyticsConfiguration, required: true, location_name: "AnalyticsConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketAnalyticsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketAnalyticsConfigurationRequest.struct_class = Types::PutBucketAnalyticsConfigurationRequest + PutBucketAnalyticsConfigurationRequest[:payload] = :analytics_configuration + PutBucketAnalyticsConfigurationRequest[:payload_member] = PutBucketAnalyticsConfigurationRequest.member(:analytics_configuration) + + PutBucketCorsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketCorsRequest.add_member(:cors_configuration, Shapes::ShapeRef.new(shape: CORSConfiguration, required: true, location_name: "CORSConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketCorsRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketCorsRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketCorsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketCorsRequest.struct_class = Types::PutBucketCorsRequest + PutBucketCorsRequest[:payload] = :cors_configuration + PutBucketCorsRequest[:payload_member] = PutBucketCorsRequest.member(:cors_configuration) + + PutBucketEncryptionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketEncryptionRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketEncryptionRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketEncryptionRequest.add_member(:server_side_encryption_configuration, Shapes::ShapeRef.new(shape: ServerSideEncryptionConfiguration, required: true, location_name: "ServerSideEncryptionConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketEncryptionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketEncryptionRequest.struct_class = Types::PutBucketEncryptionRequest + PutBucketEncryptionRequest[:payload] = :server_side_encryption_configuration + PutBucketEncryptionRequest[:payload_member] = PutBucketEncryptionRequest.member(:server_side_encryption_configuration) + + PutBucketIntelligentTieringConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketIntelligentTieringConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location: "querystring", location_name: "id")) + PutBucketIntelligentTieringConfigurationRequest.add_member(:intelligent_tiering_configuration, Shapes::ShapeRef.new(shape: IntelligentTieringConfiguration, required: true, location_name: "IntelligentTieringConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketIntelligentTieringConfigurationRequest.struct_class = Types::PutBucketIntelligentTieringConfigurationRequest + PutBucketIntelligentTieringConfigurationRequest[:payload] = :intelligent_tiering_configuration + PutBucketIntelligentTieringConfigurationRequest[:payload_member] = PutBucketIntelligentTieringConfigurationRequest.member(:intelligent_tiering_configuration) + + PutBucketInventoryConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketInventoryConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location: "querystring", location_name: "id")) + PutBucketInventoryConfigurationRequest.add_member(:inventory_configuration, Shapes::ShapeRef.new(shape: InventoryConfiguration, required: true, location_name: "InventoryConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketInventoryConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketInventoryConfigurationRequest.struct_class = Types::PutBucketInventoryConfigurationRequest + PutBucketInventoryConfigurationRequest[:payload] = :inventory_configuration + PutBucketInventoryConfigurationRequest[:payload_member] = PutBucketInventoryConfigurationRequest.member(:inventory_configuration) + + PutBucketLifecycleConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketLifecycleConfigurationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketLifecycleConfigurationRequest.add_member(:lifecycle_configuration, Shapes::ShapeRef.new(shape: BucketLifecycleConfiguration, location_name: "LifecycleConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketLifecycleConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketLifecycleConfigurationRequest.struct_class = Types::PutBucketLifecycleConfigurationRequest + PutBucketLifecycleConfigurationRequest[:payload] = :lifecycle_configuration + PutBucketLifecycleConfigurationRequest[:payload_member] = PutBucketLifecycleConfigurationRequest.member(:lifecycle_configuration) + + PutBucketLifecycleRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketLifecycleRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketLifecycleRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketLifecycleRequest.add_member(:lifecycle_configuration, Shapes::ShapeRef.new(shape: LifecycleConfiguration, location_name: "LifecycleConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketLifecycleRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketLifecycleRequest.struct_class = Types::PutBucketLifecycleRequest + PutBucketLifecycleRequest[:payload] = :lifecycle_configuration + PutBucketLifecycleRequest[:payload_member] = PutBucketLifecycleRequest.member(:lifecycle_configuration) + + PutBucketLoggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketLoggingRequest.add_member(:bucket_logging_status, Shapes::ShapeRef.new(shape: BucketLoggingStatus, required: true, location_name: "BucketLoggingStatus", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketLoggingRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketLoggingRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketLoggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketLoggingRequest.struct_class = Types::PutBucketLoggingRequest + PutBucketLoggingRequest[:payload] = :bucket_logging_status + PutBucketLoggingRequest[:payload_member] = PutBucketLoggingRequest.member(:bucket_logging_status) + + PutBucketMetricsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketMetricsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location: "querystring", location_name: "id")) + PutBucketMetricsConfigurationRequest.add_member(:metrics_configuration, Shapes::ShapeRef.new(shape: MetricsConfiguration, required: true, location_name: "MetricsConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketMetricsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketMetricsConfigurationRequest.struct_class = Types::PutBucketMetricsConfigurationRequest + PutBucketMetricsConfigurationRequest[:payload] = :metrics_configuration + PutBucketMetricsConfigurationRequest[:payload_member] = PutBucketMetricsConfigurationRequest.member(:metrics_configuration) + + PutBucketNotificationConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketNotificationConfigurationRequest.add_member(:notification_configuration, Shapes::ShapeRef.new(shape: NotificationConfiguration, required: true, location_name: "NotificationConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketNotificationConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketNotificationConfigurationRequest.add_member(:skip_destination_validation, Shapes::ShapeRef.new(shape: SkipValidation, location: "header", location_name: "x-amz-skip-destination-validation")) + PutBucketNotificationConfigurationRequest.struct_class = Types::PutBucketNotificationConfigurationRequest + PutBucketNotificationConfigurationRequest[:payload] = :notification_configuration + PutBucketNotificationConfigurationRequest[:payload_member] = PutBucketNotificationConfigurationRequest.member(:notification_configuration) + + PutBucketNotificationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketNotificationRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketNotificationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketNotificationRequest.add_member(:notification_configuration, Shapes::ShapeRef.new(shape: NotificationConfigurationDeprecated, required: true, location_name: "NotificationConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketNotificationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketNotificationRequest.struct_class = Types::PutBucketNotificationRequest + PutBucketNotificationRequest[:payload] = :notification_configuration + PutBucketNotificationRequest[:payload_member] = PutBucketNotificationRequest.member(:notification_configuration) + + PutBucketOwnershipControlsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketOwnershipControlsRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketOwnershipControlsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketOwnershipControlsRequest.add_member(:ownership_controls, Shapes::ShapeRef.new(shape: OwnershipControls, required: true, location_name: "OwnershipControls", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketOwnershipControlsRequest.struct_class = Types::PutBucketOwnershipControlsRequest + PutBucketOwnershipControlsRequest[:payload] = :ownership_controls + PutBucketOwnershipControlsRequest[:payload_member] = PutBucketOwnershipControlsRequest.member(:ownership_controls) + + PutBucketPolicyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketPolicyRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketPolicyRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketPolicyRequest.add_member(:confirm_remove_self_bucket_access, Shapes::ShapeRef.new(shape: ConfirmRemoveSelfBucketAccess, location: "header", location_name: "x-amz-confirm-remove-self-bucket-access")) + PutBucketPolicyRequest.add_member(:policy, Shapes::ShapeRef.new(shape: Policy, required: true, location_name: "Policy")) + PutBucketPolicyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketPolicyRequest.struct_class = Types::PutBucketPolicyRequest + PutBucketPolicyRequest[:payload] = :policy + PutBucketPolicyRequest[:payload_member] = PutBucketPolicyRequest.member(:policy) + + PutBucketReplicationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketReplicationRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketReplicationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketReplicationRequest.add_member(:replication_configuration, Shapes::ShapeRef.new(shape: ReplicationConfiguration, required: true, location_name: "ReplicationConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketReplicationRequest.add_member(:token, Shapes::ShapeRef.new(shape: ObjectLockToken, location: "header", location_name: "x-amz-bucket-object-lock-token")) + PutBucketReplicationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketReplicationRequest.struct_class = Types::PutBucketReplicationRequest + PutBucketReplicationRequest[:payload] = :replication_configuration + PutBucketReplicationRequest[:payload_member] = PutBucketReplicationRequest.member(:replication_configuration) + + PutBucketRequestPaymentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketRequestPaymentRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketRequestPaymentRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketRequestPaymentRequest.add_member(:request_payment_configuration, Shapes::ShapeRef.new(shape: RequestPaymentConfiguration, required: true, location_name: "RequestPaymentConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketRequestPaymentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketRequestPaymentRequest.struct_class = Types::PutBucketRequestPaymentRequest + PutBucketRequestPaymentRequest[:payload] = :request_payment_configuration + PutBucketRequestPaymentRequest[:payload_member] = PutBucketRequestPaymentRequest.member(:request_payment_configuration) + + PutBucketTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketTaggingRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketTaggingRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketTaggingRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: Tagging, required: true, location_name: "Tagging", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketTaggingRequest.struct_class = Types::PutBucketTaggingRequest + PutBucketTaggingRequest[:payload] = :tagging + PutBucketTaggingRequest[:payload_member] = PutBucketTaggingRequest.member(:tagging) + + PutBucketVersioningRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketVersioningRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketVersioningRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketVersioningRequest.add_member(:mfa, Shapes::ShapeRef.new(shape: MFA, location: "header", location_name: "x-amz-mfa")) + PutBucketVersioningRequest.add_member(:versioning_configuration, Shapes::ShapeRef.new(shape: VersioningConfiguration, required: true, location_name: "VersioningConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketVersioningRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketVersioningRequest.struct_class = Types::PutBucketVersioningRequest + PutBucketVersioningRequest[:payload] = :versioning_configuration + PutBucketVersioningRequest[:payload_member] = PutBucketVersioningRequest.member(:versioning_configuration) + + PutBucketWebsiteRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutBucketWebsiteRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutBucketWebsiteRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutBucketWebsiteRequest.add_member(:website_configuration, Shapes::ShapeRef.new(shape: WebsiteConfiguration, required: true, location_name: "WebsiteConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutBucketWebsiteRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutBucketWebsiteRequest.struct_class = Types::PutBucketWebsiteRequest + PutBucketWebsiteRequest[:payload] = :website_configuration + PutBucketWebsiteRequest[:payload_member] = PutBucketWebsiteRequest.member(:website_configuration) + + PutObjectAclOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + PutObjectAclOutput.struct_class = Types::PutObjectAclOutput + + PutObjectAclRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) + PutObjectAclRequest.add_member(:access_control_policy, Shapes::ShapeRef.new(shape: AccessControlPolicy, location_name: "AccessControlPolicy", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutObjectAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutObjectAclRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutObjectAclRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutObjectAclRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) + PutObjectAclRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) + PutObjectAclRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) + PutObjectAclRequest.add_member(:grant_write, Shapes::ShapeRef.new(shape: GrantWrite, location: "header", location_name: "x-amz-grant-write")) + PutObjectAclRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) + PutObjectAclRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + PutObjectAclRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + PutObjectAclRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + PutObjectAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutObjectAclRequest.struct_class = Types::PutObjectAclRequest + PutObjectAclRequest[:payload] = :access_control_policy + PutObjectAclRequest[:payload_member] = PutObjectAclRequest.member(:access_control_policy) + + PutObjectLegalHoldOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + PutObjectLegalHoldOutput.struct_class = Types::PutObjectLegalHoldOutput + + PutObjectLegalHoldRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutObjectLegalHoldRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + PutObjectLegalHoldRequest.add_member(:legal_hold, Shapes::ShapeRef.new(shape: ObjectLockLegalHold, location_name: "LegalHold", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutObjectLegalHoldRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + PutObjectLegalHoldRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + PutObjectLegalHoldRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutObjectLegalHoldRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutObjectLegalHoldRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutObjectLegalHoldRequest.struct_class = Types::PutObjectLegalHoldRequest + PutObjectLegalHoldRequest[:payload] = :legal_hold + PutObjectLegalHoldRequest[:payload_member] = PutObjectLegalHoldRequest.member(:legal_hold) + + PutObjectLockConfigurationOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + PutObjectLockConfigurationOutput.struct_class = Types::PutObjectLockConfigurationOutput + + PutObjectLockConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutObjectLockConfigurationRequest.add_member(:object_lock_configuration, Shapes::ShapeRef.new(shape: ObjectLockConfiguration, location_name: "ObjectLockConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutObjectLockConfigurationRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + PutObjectLockConfigurationRequest.add_member(:token, Shapes::ShapeRef.new(shape: ObjectLockToken, location: "header", location_name: "x-amz-bucket-object-lock-token")) + PutObjectLockConfigurationRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutObjectLockConfigurationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutObjectLockConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutObjectLockConfigurationRequest.struct_class = Types::PutObjectLockConfigurationRequest + PutObjectLockConfigurationRequest[:payload] = :object_lock_configuration + PutObjectLockConfigurationRequest[:payload_member] = PutObjectLockConfigurationRequest.member(:object_lock_configuration) + + PutObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) + PutObjectOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) + PutObjectOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + PutObjectOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + PutObjectOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + PutObjectOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + PutObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + PutObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + PutObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + PutObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + PutObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + PutObjectOutput.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) + PutObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + PutObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + PutObjectOutput.struct_class = Types::PutObjectOutput + + PutObjectRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) + PutObjectRequest.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) + PutObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutObjectRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) + PutObjectRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) + PutObjectRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) + PutObjectRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) + PutObjectRequest.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) + PutObjectRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutObjectRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) + PutObjectRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutObjectRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + PutObjectRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + PutObjectRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + PutObjectRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + PutObjectRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) + PutObjectRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) + PutObjectRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) + PutObjectRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) + PutObjectRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) + PutObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + PutObjectRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) + PutObjectRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + PutObjectRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) + PutObjectRequest.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) + PutObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + PutObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + PutObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + PutObjectRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + PutObjectRequest.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) + PutObjectRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + PutObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + PutObjectRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: TaggingHeader, location: "header", location_name: "x-amz-tagging")) + PutObjectRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) + PutObjectRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) + PutObjectRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) + PutObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutObjectRequest.struct_class = Types::PutObjectRequest + PutObjectRequest[:payload] = :body + PutObjectRequest[:payload_member] = PutObjectRequest.member(:body) + + PutObjectRetentionOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + PutObjectRetentionOutput.struct_class = Types::PutObjectRetentionOutput + + PutObjectRetentionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutObjectRetentionRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + PutObjectRetentionRequest.add_member(:retention, Shapes::ShapeRef.new(shape: ObjectLockRetention, location_name: "Retention", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutObjectRetentionRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + PutObjectRetentionRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + PutObjectRetentionRequest.add_member(:bypass_governance_retention, Shapes::ShapeRef.new(shape: BypassGovernanceRetention, location: "header", location_name: "x-amz-bypass-governance-retention")) + PutObjectRetentionRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutObjectRetentionRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutObjectRetentionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutObjectRetentionRequest.struct_class = Types::PutObjectRetentionRequest + PutObjectRetentionRequest[:payload] = :retention + PutObjectRetentionRequest[:payload_member] = PutObjectRetentionRequest.member(:retention) + + PutObjectTaggingOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) + PutObjectTaggingOutput.struct_class = Types::PutObjectTaggingOutput + + PutObjectTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutObjectTaggingRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + PutObjectTaggingRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + PutObjectTaggingRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutObjectTaggingRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutObjectTaggingRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: Tagging, required: true, location_name: "Tagging", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutObjectTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutObjectTaggingRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + PutObjectTaggingRequest.struct_class = Types::PutObjectTaggingRequest + PutObjectTaggingRequest[:payload] = :tagging + PutObjectTaggingRequest[:payload_member] = PutObjectTaggingRequest.member(:tagging) + + PutPublicAccessBlockRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + PutPublicAccessBlockRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + PutPublicAccessBlockRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + PutPublicAccessBlockRequest.add_member(:public_access_block_configuration, Shapes::ShapeRef.new(shape: PublicAccessBlockConfiguration, required: true, location_name: "PublicAccessBlockConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + PutPublicAccessBlockRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + PutPublicAccessBlockRequest.struct_class = Types::PutPublicAccessBlockRequest + PutPublicAccessBlockRequest[:payload] = :public_access_block_configuration + PutPublicAccessBlockRequest[:payload_member] = PutPublicAccessBlockRequest.member(:public_access_block_configuration) + + QueueConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) + QueueConfiguration.add_member(:queue_arn, Shapes::ShapeRef.new(shape: QueueArn, required: true, location_name: "Queue")) + QueueConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "Event")) + QueueConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: NotificationConfigurationFilter, location_name: "Filter")) + QueueConfiguration.struct_class = Types::QueueConfiguration + + QueueConfigurationDeprecated.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) + QueueConfigurationDeprecated.add_member(:event, Shapes::ShapeRef.new(shape: Event, deprecated: true, location_name: "Event")) + QueueConfigurationDeprecated.add_member(:events, Shapes::ShapeRef.new(shape: EventList, location_name: "Event")) + QueueConfigurationDeprecated.add_member(:queue, Shapes::ShapeRef.new(shape: QueueArn, location_name: "Queue")) + QueueConfigurationDeprecated.struct_class = Types::QueueConfigurationDeprecated + + QueueConfigurationList.member = Shapes::ShapeRef.new(shape: QueueConfiguration) + + RecordsEvent.add_member(:payload, Shapes::ShapeRef.new(shape: Body, eventpayload: true, eventpayload_type: 'blob', location_name: "Payload", metadata: {"eventpayload"=>true})) + RecordsEvent.struct_class = Types::RecordsEvent + + Redirect.add_member(:host_name, Shapes::ShapeRef.new(shape: HostName, location_name: "HostName")) + Redirect.add_member(:http_redirect_code, Shapes::ShapeRef.new(shape: HttpRedirectCode, location_name: "HttpRedirectCode")) + Redirect.add_member(:protocol, Shapes::ShapeRef.new(shape: Protocol, location_name: "Protocol")) + Redirect.add_member(:replace_key_prefix_with, Shapes::ShapeRef.new(shape: ReplaceKeyPrefixWith, location_name: "ReplaceKeyPrefixWith")) + Redirect.add_member(:replace_key_with, Shapes::ShapeRef.new(shape: ReplaceKeyWith, location_name: "ReplaceKeyWith")) + Redirect.struct_class = Types::Redirect + + RedirectAllRequestsTo.add_member(:host_name, Shapes::ShapeRef.new(shape: HostName, required: true, location_name: "HostName")) + RedirectAllRequestsTo.add_member(:protocol, Shapes::ShapeRef.new(shape: Protocol, location_name: "Protocol")) + RedirectAllRequestsTo.struct_class = Types::RedirectAllRequestsTo + + ReplicaModifications.add_member(:status, Shapes::ShapeRef.new(shape: ReplicaModificationsStatus, required: true, location_name: "Status")) + ReplicaModifications.struct_class = Types::ReplicaModifications + + ReplicationConfiguration.add_member(:role, Shapes::ShapeRef.new(shape: Role, required: true, location_name: "Role")) + ReplicationConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: ReplicationRules, required: true, location_name: "Rule")) + ReplicationConfiguration.struct_class = Types::ReplicationConfiguration + + ReplicationRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + ReplicationRule.add_member(:priority, Shapes::ShapeRef.new(shape: Priority, location_name: "Priority")) + ReplicationRule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, deprecated: true, location_name: "Prefix")) + ReplicationRule.add_member(:filter, Shapes::ShapeRef.new(shape: ReplicationRuleFilter, location_name: "Filter")) + ReplicationRule.add_member(:status, Shapes::ShapeRef.new(shape: ReplicationRuleStatus, required: true, location_name: "Status")) + ReplicationRule.add_member(:source_selection_criteria, Shapes::ShapeRef.new(shape: SourceSelectionCriteria, location_name: "SourceSelectionCriteria")) + ReplicationRule.add_member(:existing_object_replication, Shapes::ShapeRef.new(shape: ExistingObjectReplication, location_name: "ExistingObjectReplication")) + ReplicationRule.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination")) + ReplicationRule.add_member(:delete_marker_replication, Shapes::ShapeRef.new(shape: DeleteMarkerReplication, location_name: "DeleteMarkerReplication")) + ReplicationRule.struct_class = Types::ReplicationRule + + ReplicationRuleAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + ReplicationRuleAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) + ReplicationRuleAndOperator.struct_class = Types::ReplicationRuleAndOperator + + ReplicationRuleFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) + ReplicationRuleFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) + ReplicationRuleFilter.add_member(:and, Shapes::ShapeRef.new(shape: ReplicationRuleAndOperator, location_name: "And")) + ReplicationRuleFilter.struct_class = Types::ReplicationRuleFilter + + ReplicationRules.member = Shapes::ShapeRef.new(shape: ReplicationRule) + + ReplicationTime.add_member(:status, Shapes::ShapeRef.new(shape: ReplicationTimeStatus, required: true, location_name: "Status")) + ReplicationTime.add_member(:time, Shapes::ShapeRef.new(shape: ReplicationTimeValue, required: true, location_name: "Time")) + ReplicationTime.struct_class = Types::ReplicationTime + + ReplicationTimeValue.add_member(:minutes, Shapes::ShapeRef.new(shape: Minutes, location_name: "Minutes")) + ReplicationTimeValue.struct_class = Types::ReplicationTimeValue + + RequestPaymentConfiguration.add_member(:payer, Shapes::ShapeRef.new(shape: Payer, required: true, location_name: "Payer")) + RequestPaymentConfiguration.struct_class = Types::RequestPaymentConfiguration + + RequestProgress.add_member(:enabled, Shapes::ShapeRef.new(shape: EnableRequestProgress, location_name: "Enabled")) + RequestProgress.struct_class = Types::RequestProgress + + RestoreObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + RestoreObjectOutput.add_member(:restore_output_path, Shapes::ShapeRef.new(shape: RestoreOutputPath, location: "header", location_name: "x-amz-restore-output-path")) + RestoreObjectOutput.struct_class = Types::RestoreObjectOutput + + RestoreObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + RestoreObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + RestoreObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) + RestoreObjectRequest.add_member(:restore_request, Shapes::ShapeRef.new(shape: RestoreRequest, location_name: "RestoreRequest", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) + RestoreObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + RestoreObjectRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + RestoreObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + RestoreObjectRequest.struct_class = Types::RestoreObjectRequest + RestoreObjectRequest[:payload] = :restore_request + RestoreObjectRequest[:payload_member] = RestoreObjectRequest.member(:restore_request) + + RestoreRequest.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) + RestoreRequest.add_member(:glacier_job_parameters, Shapes::ShapeRef.new(shape: GlacierJobParameters, location_name: "GlacierJobParameters")) + RestoreRequest.add_member(:type, Shapes::ShapeRef.new(shape: RestoreRequestType, location_name: "Type")) + RestoreRequest.add_member(:tier, Shapes::ShapeRef.new(shape: Tier, location_name: "Tier")) + RestoreRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "Description")) + RestoreRequest.add_member(:select_parameters, Shapes::ShapeRef.new(shape: SelectParameters, location_name: "SelectParameters")) + RestoreRequest.add_member(:output_location, Shapes::ShapeRef.new(shape: OutputLocation, location_name: "OutputLocation")) + RestoreRequest.struct_class = Types::RestoreRequest + + RoutingRule.add_member(:condition, Shapes::ShapeRef.new(shape: Condition, location_name: "Condition")) + RoutingRule.add_member(:redirect, Shapes::ShapeRef.new(shape: Redirect, required: true, location_name: "Redirect")) + RoutingRule.struct_class = Types::RoutingRule + + RoutingRules.member = Shapes::ShapeRef.new(shape: RoutingRule, location_name: "RoutingRule") + + Rule.add_member(:expiration, Shapes::ShapeRef.new(shape: LifecycleExpiration, location_name: "Expiration")) + Rule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) + Rule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, required: true, location_name: "Prefix")) + Rule.add_member(:status, Shapes::ShapeRef.new(shape: ExpirationStatus, required: true, location_name: "Status")) + Rule.add_member(:transition, Shapes::ShapeRef.new(shape: Transition, location_name: "Transition")) + Rule.add_member(:noncurrent_version_transition, Shapes::ShapeRef.new(shape: NoncurrentVersionTransition, location_name: "NoncurrentVersionTransition")) + Rule.add_member(:noncurrent_version_expiration, Shapes::ShapeRef.new(shape: NoncurrentVersionExpiration, location_name: "NoncurrentVersionExpiration")) + Rule.add_member(:abort_incomplete_multipart_upload, Shapes::ShapeRef.new(shape: AbortIncompleteMultipartUpload, location_name: "AbortIncompleteMultipartUpload")) + Rule.struct_class = Types::Rule + + Rules.member = Shapes::ShapeRef.new(shape: Rule) + + S3KeyFilter.add_member(:filter_rules, Shapes::ShapeRef.new(shape: FilterRuleList, location_name: "FilterRule")) + S3KeyFilter.struct_class = Types::S3KeyFilter + + S3Location.add_member(:bucket_name, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "BucketName")) + S3Location.add_member(:prefix, Shapes::ShapeRef.new(shape: LocationPrefix, required: true, location_name: "Prefix")) + S3Location.add_member(:encryption, Shapes::ShapeRef.new(shape: Encryption, location_name: "Encryption")) + S3Location.add_member(:canned_acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location_name: "CannedACL")) + S3Location.add_member(:access_control_list, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) + S3Location.add_member(:tagging, Shapes::ShapeRef.new(shape: Tagging, location_name: "Tagging")) + S3Location.add_member(:user_metadata, Shapes::ShapeRef.new(shape: UserMetadata, location_name: "UserMetadata")) + S3Location.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) + S3Location.struct_class = Types::S3Location + + SSEKMS.add_member(:key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, required: true, location_name: "KeyId")) + SSEKMS.struct_class = Types::SSEKMS + + SSES3.struct_class = Types::SSES3 + + ScanRange.add_member(:start, Shapes::ShapeRef.new(shape: Start, location_name: "Start")) + ScanRange.add_member(:end, Shapes::ShapeRef.new(shape: End, location_name: "End")) + ScanRange.struct_class = Types::ScanRange + + SelectObjectContentEventStream.add_member(:records, Shapes::ShapeRef.new(shape: RecordsEvent, event: true, location_name: "Records")) + SelectObjectContentEventStream.add_member(:stats, Shapes::ShapeRef.new(shape: StatsEvent, event: true, location_name: "Stats")) + SelectObjectContentEventStream.add_member(:progress, Shapes::ShapeRef.new(shape: ProgressEvent, event: true, location_name: "Progress")) + SelectObjectContentEventStream.add_member(:cont, Shapes::ShapeRef.new(shape: ContinuationEvent, event: true, location_name: "Cont")) + SelectObjectContentEventStream.add_member(:end, Shapes::ShapeRef.new(shape: EndEvent, event: true, location_name: "End")) + SelectObjectContentEventStream.struct_class = Types::SelectObjectContentEventStream + + SelectObjectContentOutput.add_member(:payload, Shapes::ShapeRef.new(shape: SelectObjectContentEventStream, eventstream: true, location_name: "Payload")) + SelectObjectContentOutput.struct_class = Types::SelectObjectContentOutput + SelectObjectContentOutput[:payload] = :payload + SelectObjectContentOutput[:payload_member] = SelectObjectContentOutput.member(:payload) + + SelectObjectContentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + SelectObjectContentRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + SelectObjectContentRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + SelectObjectContentRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + SelectObjectContentRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + SelectObjectContentRequest.add_member(:expression, Shapes::ShapeRef.new(shape: Expression, required: true, location_name: "Expression")) + SelectObjectContentRequest.add_member(:expression_type, Shapes::ShapeRef.new(shape: ExpressionType, required: true, location_name: "ExpressionType")) + SelectObjectContentRequest.add_member(:request_progress, Shapes::ShapeRef.new(shape: RequestProgress, location_name: "RequestProgress")) + SelectObjectContentRequest.add_member(:input_serialization, Shapes::ShapeRef.new(shape: InputSerialization, required: true, location_name: "InputSerialization")) + SelectObjectContentRequest.add_member(:output_serialization, Shapes::ShapeRef.new(shape: OutputSerialization, required: true, location_name: "OutputSerialization")) + SelectObjectContentRequest.add_member(:scan_range, Shapes::ShapeRef.new(shape: ScanRange, location_name: "ScanRange")) + SelectObjectContentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + SelectObjectContentRequest.struct_class = Types::SelectObjectContentRequest + + SelectParameters.add_member(:input_serialization, Shapes::ShapeRef.new(shape: InputSerialization, required: true, location_name: "InputSerialization")) + SelectParameters.add_member(:expression_type, Shapes::ShapeRef.new(shape: ExpressionType, required: true, location_name: "ExpressionType")) + SelectParameters.add_member(:expression, Shapes::ShapeRef.new(shape: Expression, required: true, location_name: "Expression")) + SelectParameters.add_member(:output_serialization, Shapes::ShapeRef.new(shape: OutputSerialization, required: true, location_name: "OutputSerialization")) + SelectParameters.struct_class = Types::SelectParameters + + ServerSideEncryptionByDefault.add_member(:sse_algorithm, Shapes::ShapeRef.new(shape: ServerSideEncryption, required: true, location_name: "SSEAlgorithm")) + ServerSideEncryptionByDefault.add_member(:kms_master_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location_name: "KMSMasterKeyID")) + ServerSideEncryptionByDefault.struct_class = Types::ServerSideEncryptionByDefault + + ServerSideEncryptionConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: ServerSideEncryptionRules, required: true, location_name: "Rule")) + ServerSideEncryptionConfiguration.struct_class = Types::ServerSideEncryptionConfiguration + + ServerSideEncryptionRule.add_member(:apply_server_side_encryption_by_default, Shapes::ShapeRef.new(shape: ServerSideEncryptionByDefault, location_name: "ApplyServerSideEncryptionByDefault")) + ServerSideEncryptionRule.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location_name: "BucketKeyEnabled")) + ServerSideEncryptionRule.struct_class = Types::ServerSideEncryptionRule + + ServerSideEncryptionRules.member = Shapes::ShapeRef.new(shape: ServerSideEncryptionRule) + + SourceSelectionCriteria.add_member(:sse_kms_encrypted_objects, Shapes::ShapeRef.new(shape: SseKmsEncryptedObjects, location_name: "SseKmsEncryptedObjects")) + SourceSelectionCriteria.add_member(:replica_modifications, Shapes::ShapeRef.new(shape: ReplicaModifications, location_name: "ReplicaModifications")) + SourceSelectionCriteria.struct_class = Types::SourceSelectionCriteria + + SseKmsEncryptedObjects.add_member(:status, Shapes::ShapeRef.new(shape: SseKmsEncryptedObjectsStatus, required: true, location_name: "Status")) + SseKmsEncryptedObjects.struct_class = Types::SseKmsEncryptedObjects + + Stats.add_member(:bytes_scanned, Shapes::ShapeRef.new(shape: BytesScanned, location_name: "BytesScanned")) + Stats.add_member(:bytes_processed, Shapes::ShapeRef.new(shape: BytesProcessed, location_name: "BytesProcessed")) + Stats.add_member(:bytes_returned, Shapes::ShapeRef.new(shape: BytesReturned, location_name: "BytesReturned")) + Stats.struct_class = Types::Stats + + StatsEvent.add_member(:details, Shapes::ShapeRef.new(shape: Stats, eventpayload: true, eventpayload_type: 'structure', location_name: "Details", metadata: {"eventpayload"=>true})) + StatsEvent.struct_class = Types::StatsEvent + + StorageClassAnalysis.add_member(:data_export, Shapes::ShapeRef.new(shape: StorageClassAnalysisDataExport, location_name: "DataExport")) + StorageClassAnalysis.struct_class = Types::StorageClassAnalysis + + StorageClassAnalysisDataExport.add_member(:output_schema_version, Shapes::ShapeRef.new(shape: StorageClassAnalysisSchemaVersion, required: true, location_name: "OutputSchemaVersion")) + StorageClassAnalysisDataExport.add_member(:destination, Shapes::ShapeRef.new(shape: AnalyticsExportDestination, required: true, location_name: "Destination")) + StorageClassAnalysisDataExport.struct_class = Types::StorageClassAnalysisDataExport + + Tag.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location_name: "Key")) + Tag.add_member(:value, Shapes::ShapeRef.new(shape: Value, required: true, location_name: "Value")) + Tag.struct_class = Types::Tag + + TagSet.member = Shapes::ShapeRef.new(shape: Tag, location_name: "Tag") + + Tagging.add_member(:tag_set, Shapes::ShapeRef.new(shape: TagSet, required: true, location_name: "TagSet")) + Tagging.struct_class = Types::Tagging + + TargetGrant.add_member(:grantee, Shapes::ShapeRef.new(shape: Grantee, location_name: "Grantee")) + TargetGrant.add_member(:permission, Shapes::ShapeRef.new(shape: BucketLogsPermission, location_name: "Permission")) + TargetGrant.struct_class = Types::TargetGrant + + TargetGrants.member = Shapes::ShapeRef.new(shape: TargetGrant, location_name: "Grant") + + Tiering.add_member(:days, Shapes::ShapeRef.new(shape: IntelligentTieringDays, required: true, location_name: "Days")) + Tiering.add_member(:access_tier, Shapes::ShapeRef.new(shape: IntelligentTieringAccessTier, required: true, location_name: "AccessTier")) + Tiering.struct_class = Types::Tiering + + TieringList.member = Shapes::ShapeRef.new(shape: Tiering) + + TopicConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) + TopicConfiguration.add_member(:topic_arn, Shapes::ShapeRef.new(shape: TopicArn, required: true, location_name: "Topic")) + TopicConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "Event")) + TopicConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: NotificationConfigurationFilter, location_name: "Filter")) + TopicConfiguration.struct_class = Types::TopicConfiguration + + TopicConfigurationDeprecated.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) + TopicConfigurationDeprecated.add_member(:events, Shapes::ShapeRef.new(shape: EventList, location_name: "Event")) + TopicConfigurationDeprecated.add_member(:event, Shapes::ShapeRef.new(shape: Event, deprecated: true, location_name: "Event")) + TopicConfigurationDeprecated.add_member(:topic, Shapes::ShapeRef.new(shape: TopicArn, location_name: "Topic")) + TopicConfigurationDeprecated.struct_class = Types::TopicConfigurationDeprecated + + TopicConfigurationList.member = Shapes::ShapeRef.new(shape: TopicConfiguration) + + Transition.add_member(:date, Shapes::ShapeRef.new(shape: Date, location_name: "Date")) + Transition.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) + Transition.add_member(:storage_class, Shapes::ShapeRef.new(shape: TransitionStorageClass, location_name: "StorageClass")) + Transition.struct_class = Types::Transition + + TransitionList.member = Shapes::ShapeRef.new(shape: Transition) + + UploadPartCopyOutput.add_member(:copy_source_version_id, Shapes::ShapeRef.new(shape: CopySourceVersionId, location: "header", location_name: "x-amz-copy-source-version-id")) + UploadPartCopyOutput.add_member(:copy_part_result, Shapes::ShapeRef.new(shape: CopyPartResult, location_name: "CopyPartResult")) + UploadPartCopyOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + UploadPartCopyOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + UploadPartCopyOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + UploadPartCopyOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + UploadPartCopyOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + UploadPartCopyOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + UploadPartCopyOutput.struct_class = Types::UploadPartCopyOutput + UploadPartCopyOutput[:payload] = :copy_part_result + UploadPartCopyOutput[:payload_member] = UploadPartCopyOutput.member(:copy_part_result) + + UploadPartCopyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + UploadPartCopyRequest.add_member(:copy_source, Shapes::ShapeRef.new(shape: CopySource, required: true, location: "header", location_name: "x-amz-copy-source")) + UploadPartCopyRequest.add_member(:copy_source_if_match, Shapes::ShapeRef.new(shape: CopySourceIfMatch, location: "header", location_name: "x-amz-copy-source-if-match")) + UploadPartCopyRequest.add_member(:copy_source_if_modified_since, Shapes::ShapeRef.new(shape: CopySourceIfModifiedSince, location: "header", location_name: "x-amz-copy-source-if-modified-since")) + UploadPartCopyRequest.add_member(:copy_source_if_none_match, Shapes::ShapeRef.new(shape: CopySourceIfNoneMatch, location: "header", location_name: "x-amz-copy-source-if-none-match")) + UploadPartCopyRequest.add_member(:copy_source_if_unmodified_since, Shapes::ShapeRef.new(shape: CopySourceIfUnmodifiedSince, location: "header", location_name: "x-amz-copy-source-if-unmodified-since")) + UploadPartCopyRequest.add_member(:copy_source_range, Shapes::ShapeRef.new(shape: CopySourceRange, location: "header", location_name: "x-amz-copy-source-range")) + UploadPartCopyRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + UploadPartCopyRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, required: true, location: "querystring", location_name: "partNumber")) + UploadPartCopyRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) + UploadPartCopyRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + UploadPartCopyRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + UploadPartCopyRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + UploadPartCopyRequest.add_member(:copy_source_sse_customer_algorithm, Shapes::ShapeRef.new(shape: CopySourceSSECustomerAlgorithm, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-algorithm")) + UploadPartCopyRequest.add_member(:copy_source_sse_customer_key, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKey, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key")) + UploadPartCopyRequest.add_member(:copy_source_sse_customer_key_md5, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKeyMD5, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key-MD5")) + UploadPartCopyRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + UploadPartCopyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + UploadPartCopyRequest.add_member(:expected_source_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-source-expected-bucket-owner")) + UploadPartCopyRequest.struct_class = Types::UploadPartCopyRequest + + UploadPartOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) + UploadPartOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) + UploadPartOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + UploadPartOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + UploadPartOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + UploadPartOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + UploadPartOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + UploadPartOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + UploadPartOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) + UploadPartOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) + UploadPartOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) + UploadPartOutput.struct_class = Types::UploadPartOutput + + UploadPartRequest.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) + UploadPartRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) + UploadPartRequest.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) + UploadPartRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) + UploadPartRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) + UploadPartRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) + UploadPartRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) + UploadPartRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) + UploadPartRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) + UploadPartRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) + UploadPartRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, required: true, location: "querystring", location_name: "partNumber")) + UploadPartRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) + UploadPartRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) + UploadPartRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) + UploadPartRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) + UploadPartRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) + UploadPartRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) + UploadPartRequest.struct_class = Types::UploadPartRequest + UploadPartRequest[:payload] = :body + UploadPartRequest[:payload_member] = UploadPartRequest.member(:body) + + UserMetadata.member = Shapes::ShapeRef.new(shape: MetadataEntry, location_name: "MetadataEntry") + + VersioningConfiguration.add_member(:mfa_delete, Shapes::ShapeRef.new(shape: MFADelete, location_name: "MfaDelete")) + VersioningConfiguration.add_member(:status, Shapes::ShapeRef.new(shape: BucketVersioningStatus, location_name: "Status")) + VersioningConfiguration.struct_class = Types::VersioningConfiguration + + WebsiteConfiguration.add_member(:error_document, Shapes::ShapeRef.new(shape: ErrorDocument, location_name: "ErrorDocument")) + WebsiteConfiguration.add_member(:index_document, Shapes::ShapeRef.new(shape: IndexDocument, location_name: "IndexDocument")) + WebsiteConfiguration.add_member(:redirect_all_requests_to, Shapes::ShapeRef.new(shape: RedirectAllRequestsTo, location_name: "RedirectAllRequestsTo")) + WebsiteConfiguration.add_member(:routing_rules, Shapes::ShapeRef.new(shape: RoutingRules, location_name: "RoutingRules")) + WebsiteConfiguration.struct_class = Types::WebsiteConfiguration + + WriteGetObjectResponseRequest.add_member(:request_route, Shapes::ShapeRef.new(shape: RequestRoute, required: true, location: "header", location_name: "x-amz-request-route", metadata: {"hostLabel"=>true, "hostLabelName"=>"RequestRoute"})) + WriteGetObjectResponseRequest.add_member(:request_token, Shapes::ShapeRef.new(shape: RequestToken, required: true, location: "header", location_name: "x-amz-request-token")) + WriteGetObjectResponseRequest.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) + WriteGetObjectResponseRequest.add_member(:status_code, Shapes::ShapeRef.new(shape: GetObjectResponseStatusCode, location: "header", location_name: "x-amz-fwd-status")) + WriteGetObjectResponseRequest.add_member(:error_code, Shapes::ShapeRef.new(shape: ErrorCode, location: "header", location_name: "x-amz-fwd-error-code")) + WriteGetObjectResponseRequest.add_member(:error_message, Shapes::ShapeRef.new(shape: ErrorMessage, location: "header", location_name: "x-amz-fwd-error-message")) + WriteGetObjectResponseRequest.add_member(:accept_ranges, Shapes::ShapeRef.new(shape: AcceptRanges, location: "header", location_name: "x-amz-fwd-header-accept-ranges")) + WriteGetObjectResponseRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "x-amz-fwd-header-Cache-Control")) + WriteGetObjectResponseRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "x-amz-fwd-header-Content-Disposition")) + WriteGetObjectResponseRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "x-amz-fwd-header-Content-Encoding")) + WriteGetObjectResponseRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "x-amz-fwd-header-Content-Language")) + WriteGetObjectResponseRequest.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) + WriteGetObjectResponseRequest.add_member(:content_range, Shapes::ShapeRef.new(shape: ContentRange, location: "header", location_name: "x-amz-fwd-header-Content-Range")) + WriteGetObjectResponseRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "x-amz-fwd-header-Content-Type")) + WriteGetObjectResponseRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-crc32")) + WriteGetObjectResponseRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-crc32c")) + WriteGetObjectResponseRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-sha1")) + WriteGetObjectResponseRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-sha256")) + WriteGetObjectResponseRequest.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-fwd-header-x-amz-delete-marker")) + WriteGetObjectResponseRequest.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "x-amz-fwd-header-ETag")) + WriteGetObjectResponseRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "x-amz-fwd-header-Expires")) + WriteGetObjectResponseRequest.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-fwd-header-x-amz-expiration")) + WriteGetObjectResponseRequest.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "x-amz-fwd-header-Last-Modified")) + WriteGetObjectResponseRequest.add_member(:missing_meta, Shapes::ShapeRef.new(shape: MissingMeta, location: "header", location_name: "x-amz-fwd-header-x-amz-missing-meta")) + WriteGetObjectResponseRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) + WriteGetObjectResponseRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-fwd-header-x-amz-object-lock-mode")) + WriteGetObjectResponseRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-fwd-header-x-amz-object-lock-legal-hold")) + WriteGetObjectResponseRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-fwd-header-x-amz-object-lock-retain-until-date")) + WriteGetObjectResponseRequest.add_member(:parts_count, Shapes::ShapeRef.new(shape: PartsCount, location: "header", location_name: "x-amz-fwd-header-x-amz-mp-parts-count")) + WriteGetObjectResponseRequest.add_member(:replication_status, Shapes::ShapeRef.new(shape: ReplicationStatus, location: "header", location_name: "x-amz-fwd-header-x-amz-replication-status")) + WriteGetObjectResponseRequest.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-fwd-header-x-amz-request-charged")) + WriteGetObjectResponseRequest.add_member(:restore, Shapes::ShapeRef.new(shape: Restore, location: "header", location_name: "x-amz-fwd-header-x-amz-restore")) + WriteGetObjectResponseRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption")) + WriteGetObjectResponseRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm")) + WriteGetObjectResponseRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id")) + WriteGetObjectResponseRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5")) + WriteGetObjectResponseRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-fwd-header-x-amz-storage-class")) + WriteGetObjectResponseRequest.add_member(:tag_count, Shapes::ShapeRef.new(shape: TagCount, location: "header", location_name: "x-amz-fwd-header-x-amz-tagging-count")) + WriteGetObjectResponseRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-fwd-header-x-amz-version-id")) + WriteGetObjectResponseRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled")) + WriteGetObjectResponseRequest.struct_class = Types::WriteGetObjectResponseRequest + WriteGetObjectResponseRequest[:payload] = :body + WriteGetObjectResponseRequest[:payload_member] = WriteGetObjectResponseRequest.member(:body) + + + # @api private + API = Seahorse::Model::Api.new.tap do |api| + + api.version = "2006-03-01" + + api.metadata = { + "apiVersion" => "2006-03-01", + "checksumFormat" => "md5", + "endpointPrefix" => "s3", + "globalEndpoint" => "s3.amazonaws.com", + "protocol" => "rest-xml", + "serviceAbbreviation" => "Amazon S3", + "serviceFullName" => "Amazon Simple Storage Service", + "serviceId" => "S3", + "uid" => "s3-2006-03-01", + } + + api.add_operation(:abort_multipart_upload, Seahorse::Model::Operation.new.tap do |o| + o.name = "AbortMultipartUpload" + o.http_method = "DELETE" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: AbortMultipartUploadRequest) + o.output = Shapes::ShapeRef.new(shape: AbortMultipartUploadOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchUpload) + end) + + api.add_operation(:complete_multipart_upload, Seahorse::Model::Operation.new.tap do |o| + o.name = "CompleteMultipartUpload" + o.http_method = "POST" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: CompleteMultipartUploadRequest) + o.output = Shapes::ShapeRef.new(shape: CompleteMultipartUploadOutput) + end) + + api.add_operation(:copy_object, Seahorse::Model::Operation.new.tap do |o| + o.name = "CopyObject" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: CopyObjectRequest) + o.output = Shapes::ShapeRef.new(shape: CopyObjectOutput) + o.errors << Shapes::ShapeRef.new(shape: ObjectNotInActiveTierError) + end) + + api.add_operation(:create_bucket, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateBucket" + o.http_method = "PUT" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateBucketRequest) + o.output = Shapes::ShapeRef.new(shape: CreateBucketOutput) + o.errors << Shapes::ShapeRef.new(shape: BucketAlreadyExists) + o.errors << Shapes::ShapeRef.new(shape: BucketAlreadyOwnedByYou) + end) + + api.add_operation(:create_multipart_upload, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateMultipartUpload" + o.http_method = "POST" + o.http_request_uri = "/{Key+}?uploads" + o.input = Shapes::ShapeRef.new(shape: CreateMultipartUploadRequest) + o.output = Shapes::ShapeRef.new(shape: CreateMultipartUploadOutput) + end) + + api.add_operation(:delete_bucket, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucket" + o.http_method = "DELETE" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_analytics_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketAnalyticsConfiguration" + o.http_method = "DELETE" + o.http_request_uri = "/?analytics" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketAnalyticsConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_cors, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketCors" + o.http_method = "DELETE" + o.http_request_uri = "/?cors" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketCorsRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_encryption, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketEncryption" + o.http_method = "DELETE" + o.http_request_uri = "/?encryption" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketEncryptionRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_intelligent_tiering_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketIntelligentTieringConfiguration" + o.http_method = "DELETE" + o.http_request_uri = "/?intelligent-tiering" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketIntelligentTieringConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_inventory_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketInventoryConfiguration" + o.http_method = "DELETE" + o.http_request_uri = "/?inventory" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketInventoryConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_lifecycle, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketLifecycle" + o.http_method = "DELETE" + o.http_request_uri = "/?lifecycle" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketLifecycleRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_metrics_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketMetricsConfiguration" + o.http_method = "DELETE" + o.http_request_uri = "/?metrics" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketMetricsConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_ownership_controls, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketOwnershipControls" + o.http_method = "DELETE" + o.http_request_uri = "/?ownershipControls" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketOwnershipControlsRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_policy, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketPolicy" + o.http_method = "DELETE" + o.http_request_uri = "/?policy" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketPolicyRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_replication, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketReplication" + o.http_method = "DELETE" + o.http_request_uri = "/?replication" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketReplicationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_tagging, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketTagging" + o.http_method = "DELETE" + o.http_request_uri = "/?tagging" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketTaggingRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_bucket_website, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteBucketWebsite" + o.http_method = "DELETE" + o.http_request_uri = "/?website" + o.input = Shapes::ShapeRef.new(shape: DeleteBucketWebsiteRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:delete_object, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteObject" + o.http_method = "DELETE" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: DeleteObjectRequest) + o.output = Shapes::ShapeRef.new(shape: DeleteObjectOutput) + end) + + api.add_operation(:delete_object_tagging, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteObjectTagging" + o.http_method = "DELETE" + o.http_request_uri = "/{Key+}?tagging" + o.input = Shapes::ShapeRef.new(shape: DeleteObjectTaggingRequest) + o.output = Shapes::ShapeRef.new(shape: DeleteObjectTaggingOutput) + end) + + api.add_operation(:delete_objects, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteObjects" + o.http_method = "POST" + o.http_request_uri = "/?delete" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: DeleteObjectsRequest) + o.output = Shapes::ShapeRef.new(shape: DeleteObjectsOutput) + end) + + api.add_operation(:delete_public_access_block, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeletePublicAccessBlock" + o.http_method = "DELETE" + o.http_request_uri = "/?publicAccessBlock" + o.input = Shapes::ShapeRef.new(shape: DeletePublicAccessBlockRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:get_bucket_accelerate_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketAccelerateConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?accelerate" + o.input = Shapes::ShapeRef.new(shape: GetBucketAccelerateConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketAccelerateConfigurationOutput) + end) + + api.add_operation(:get_bucket_acl, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketAcl" + o.http_method = "GET" + o.http_request_uri = "/?acl" + o.input = Shapes::ShapeRef.new(shape: GetBucketAclRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketAclOutput) + end) + + api.add_operation(:get_bucket_analytics_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketAnalyticsConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?analytics" + o.input = Shapes::ShapeRef.new(shape: GetBucketAnalyticsConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketAnalyticsConfigurationOutput) + end) + + api.add_operation(:get_bucket_cors, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketCors" + o.http_method = "GET" + o.http_request_uri = "/?cors" + o.input = Shapes::ShapeRef.new(shape: GetBucketCorsRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketCorsOutput) + end) + + api.add_operation(:get_bucket_encryption, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketEncryption" + o.http_method = "GET" + o.http_request_uri = "/?encryption" + o.input = Shapes::ShapeRef.new(shape: GetBucketEncryptionRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketEncryptionOutput) + end) + + api.add_operation(:get_bucket_intelligent_tiering_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketIntelligentTieringConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?intelligent-tiering" + o.input = Shapes::ShapeRef.new(shape: GetBucketIntelligentTieringConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketIntelligentTieringConfigurationOutput) + end) + + api.add_operation(:get_bucket_inventory_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketInventoryConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?inventory" + o.input = Shapes::ShapeRef.new(shape: GetBucketInventoryConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketInventoryConfigurationOutput) + end) + + api.add_operation(:get_bucket_lifecycle, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketLifecycle" + o.http_method = "GET" + o.http_request_uri = "/?lifecycle" + o.deprecated = true + o.input = Shapes::ShapeRef.new(shape: GetBucketLifecycleRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketLifecycleOutput) + end) + + api.add_operation(:get_bucket_lifecycle_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketLifecycleConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?lifecycle" + o.input = Shapes::ShapeRef.new(shape: GetBucketLifecycleConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketLifecycleConfigurationOutput) + end) + + api.add_operation(:get_bucket_location, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketLocation" + o.http_method = "GET" + o.http_request_uri = "/?location" + o.input = Shapes::ShapeRef.new(shape: GetBucketLocationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketLocationOutput) + end) + + api.add_operation(:get_bucket_logging, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketLogging" + o.http_method = "GET" + o.http_request_uri = "/?logging" + o.input = Shapes::ShapeRef.new(shape: GetBucketLoggingRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketLoggingOutput) + end) + + api.add_operation(:get_bucket_metrics_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketMetricsConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?metrics" + o.input = Shapes::ShapeRef.new(shape: GetBucketMetricsConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketMetricsConfigurationOutput) + end) + + api.add_operation(:get_bucket_notification, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketNotification" + o.http_method = "GET" + o.http_request_uri = "/?notification" + o.deprecated = true + o.input = Shapes::ShapeRef.new(shape: GetBucketNotificationConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: NotificationConfigurationDeprecated) + end) + + api.add_operation(:get_bucket_notification_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketNotificationConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?notification" + o.input = Shapes::ShapeRef.new(shape: GetBucketNotificationConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: NotificationConfiguration) + end) + + api.add_operation(:get_bucket_ownership_controls, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketOwnershipControls" + o.http_method = "GET" + o.http_request_uri = "/?ownershipControls" + o.input = Shapes::ShapeRef.new(shape: GetBucketOwnershipControlsRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketOwnershipControlsOutput) + end) + + api.add_operation(:get_bucket_policy, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketPolicy" + o.http_method = "GET" + o.http_request_uri = "/?policy" + o.input = Shapes::ShapeRef.new(shape: GetBucketPolicyRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketPolicyOutput) + end) + + api.add_operation(:get_bucket_policy_status, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketPolicyStatus" + o.http_method = "GET" + o.http_request_uri = "/?policyStatus" + o.input = Shapes::ShapeRef.new(shape: GetBucketPolicyStatusRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketPolicyStatusOutput) + end) + + api.add_operation(:get_bucket_replication, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketReplication" + o.http_method = "GET" + o.http_request_uri = "/?replication" + o.input = Shapes::ShapeRef.new(shape: GetBucketReplicationRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketReplicationOutput) + end) + + api.add_operation(:get_bucket_request_payment, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketRequestPayment" + o.http_method = "GET" + o.http_request_uri = "/?requestPayment" + o.input = Shapes::ShapeRef.new(shape: GetBucketRequestPaymentRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketRequestPaymentOutput) + end) + + api.add_operation(:get_bucket_tagging, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketTagging" + o.http_method = "GET" + o.http_request_uri = "/?tagging" + o.input = Shapes::ShapeRef.new(shape: GetBucketTaggingRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketTaggingOutput) + end) + + api.add_operation(:get_bucket_versioning, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketVersioning" + o.http_method = "GET" + o.http_request_uri = "/?versioning" + o.input = Shapes::ShapeRef.new(shape: GetBucketVersioningRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketVersioningOutput) + end) + + api.add_operation(:get_bucket_website, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetBucketWebsite" + o.http_method = "GET" + o.http_request_uri = "/?website" + o.input = Shapes::ShapeRef.new(shape: GetBucketWebsiteRequest) + o.output = Shapes::ShapeRef.new(shape: GetBucketWebsiteOutput) + end) + + api.add_operation(:get_object, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObject" + o.http_method = "GET" + o.http_request_uri = "/{Key+}" + o.http_checksum = { + "requestValidationModeMember" => "checksum_mode", + "responseAlgorithms" => ["CRC32", "CRC32C", "SHA256", "SHA1"], + } + o.http_checksum = { + "requestValidationModeMember" => "checksum_mode", + "responseAlgorithms" => ["CRC32", "CRC32C", "SHA256", "SHA1"], + } + o.input = Shapes::ShapeRef.new(shape: GetObjectRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) + o.errors << Shapes::ShapeRef.new(shape: InvalidObjectState) + end) + + api.add_operation(:get_object_acl, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectAcl" + o.http_method = "GET" + o.http_request_uri = "/{Key+}?acl" + o.input = Shapes::ShapeRef.new(shape: GetObjectAclRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectAclOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) + end) + + api.add_operation(:get_object_attributes, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectAttributes" + o.http_method = "GET" + o.http_request_uri = "/{Key+}?attributes" + o.input = Shapes::ShapeRef.new(shape: GetObjectAttributesRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectAttributesOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) + end) + + api.add_operation(:get_object_legal_hold, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectLegalHold" + o.http_method = "GET" + o.http_request_uri = "/{Key+}?legal-hold" + o.input = Shapes::ShapeRef.new(shape: GetObjectLegalHoldRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectLegalHoldOutput) + end) + + api.add_operation(:get_object_lock_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectLockConfiguration" + o.http_method = "GET" + o.http_request_uri = "/?object-lock" + o.input = Shapes::ShapeRef.new(shape: GetObjectLockConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectLockConfigurationOutput) + end) + + api.add_operation(:get_object_retention, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectRetention" + o.http_method = "GET" + o.http_request_uri = "/{Key+}?retention" + o.input = Shapes::ShapeRef.new(shape: GetObjectRetentionRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectRetentionOutput) + end) + + api.add_operation(:get_object_tagging, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectTagging" + o.http_method = "GET" + o.http_request_uri = "/{Key+}?tagging" + o.input = Shapes::ShapeRef.new(shape: GetObjectTaggingRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectTaggingOutput) + end) + + api.add_operation(:get_object_torrent, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetObjectTorrent" + o.http_method = "GET" + o.http_request_uri = "/{Key+}?torrent" + o.input = Shapes::ShapeRef.new(shape: GetObjectTorrentRequest) + o.output = Shapes::ShapeRef.new(shape: GetObjectTorrentOutput) + end) + + api.add_operation(:get_public_access_block, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetPublicAccessBlock" + o.http_method = "GET" + o.http_request_uri = "/?publicAccessBlock" + o.input = Shapes::ShapeRef.new(shape: GetPublicAccessBlockRequest) + o.output = Shapes::ShapeRef.new(shape: GetPublicAccessBlockOutput) + end) + + api.add_operation(:head_bucket, Seahorse::Model::Operation.new.tap do |o| + o.name = "HeadBucket" + o.http_method = "HEAD" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: HeadBucketRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) + end) + + api.add_operation(:head_object, Seahorse::Model::Operation.new.tap do |o| + o.name = "HeadObject" + o.http_method = "HEAD" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: HeadObjectRequest) + o.output = Shapes::ShapeRef.new(shape: HeadObjectOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) + end) + + api.add_operation(:list_bucket_analytics_configurations, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListBucketAnalyticsConfigurations" + o.http_method = "GET" + o.http_request_uri = "/?analytics" + o.input = Shapes::ShapeRef.new(shape: ListBucketAnalyticsConfigurationsRequest) + o.output = Shapes::ShapeRef.new(shape: ListBucketAnalyticsConfigurationsOutput) + end) + + api.add_operation(:list_bucket_intelligent_tiering_configurations, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListBucketIntelligentTieringConfigurations" + o.http_method = "GET" + o.http_request_uri = "/?intelligent-tiering" + o.input = Shapes::ShapeRef.new(shape: ListBucketIntelligentTieringConfigurationsRequest) + o.output = Shapes::ShapeRef.new(shape: ListBucketIntelligentTieringConfigurationsOutput) + end) + + api.add_operation(:list_bucket_inventory_configurations, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListBucketInventoryConfigurations" + o.http_method = "GET" + o.http_request_uri = "/?inventory" + o.input = Shapes::ShapeRef.new(shape: ListBucketInventoryConfigurationsRequest) + o.output = Shapes::ShapeRef.new(shape: ListBucketInventoryConfigurationsOutput) + end) + + api.add_operation(:list_bucket_metrics_configurations, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListBucketMetricsConfigurations" + o.http_method = "GET" + o.http_request_uri = "/?metrics" + o.input = Shapes::ShapeRef.new(shape: ListBucketMetricsConfigurationsRequest) + o.output = Shapes::ShapeRef.new(shape: ListBucketMetricsConfigurationsOutput) + end) + + api.add_operation(:list_buckets, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListBuckets" + o.http_method = "GET" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.output = Shapes::ShapeRef.new(shape: ListBucketsOutput) + end) + + api.add_operation(:list_multipart_uploads, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListMultipartUploads" + o.http_method = "GET" + o.http_request_uri = "/?uploads" + o.input = Shapes::ShapeRef.new(shape: ListMultipartUploadsRequest) + o.output = Shapes::ShapeRef.new(shape: ListMultipartUploadsOutput) + o[:pager] = Aws::Pager.new( + more_results: "is_truncated", + limit_key: "max_uploads", + tokens: { + "next_key_marker" => "key_marker", + "next_upload_id_marker" => "upload_id_marker" + } + ) + end) + + api.add_operation(:list_object_versions, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListObjectVersions" + o.http_method = "GET" + o.http_request_uri = "/?versions" + o.input = Shapes::ShapeRef.new(shape: ListObjectVersionsRequest) + o.output = Shapes::ShapeRef.new(shape: ListObjectVersionsOutput) + o[:pager] = Aws::Pager.new( + more_results: "is_truncated", + limit_key: "max_keys", + tokens: { + "next_key_marker" => "key_marker", + "next_version_id_marker" => "version_id_marker" + } + ) + end) + + api.add_operation(:list_objects, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListObjects" + o.http_method = "GET" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListObjectsRequest) + o.output = Shapes::ShapeRef.new(shape: ListObjectsOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) + o[:pager] = Aws::Pager.new( + more_results: "is_truncated", + limit_key: "max_keys", + tokens: { + "next_marker || contents[-1].key" => "marker" + } + ) + end) + + api.add_operation(:list_objects_v2, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListObjectsV2" + o.http_method = "GET" + o.http_request_uri = "/?list-type=2" + o.input = Shapes::ShapeRef.new(shape: ListObjectsV2Request) + o.output = Shapes::ShapeRef.new(shape: ListObjectsV2Output) + o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) + o[:pager] = Aws::Pager.new( + limit_key: "max_keys", + tokens: { + "next_continuation_token" => "continuation_token" + } + ) + end) + + api.add_operation(:list_parts, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListParts" + o.http_method = "GET" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: ListPartsRequest) + o.output = Shapes::ShapeRef.new(shape: ListPartsOutput) + o[:pager] = Aws::Pager.new( + more_results: "is_truncated", + limit_key: "max_parts", + tokens: { + "next_part_number_marker" => "part_number_marker" + } + ) + end) + + api.add_operation(:put_bucket_accelerate_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketAccelerateConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?accelerate" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketAccelerateConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_acl, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketAcl" + o.http_method = "PUT" + o.http_request_uri = "/?acl" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketAclRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_analytics_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketAnalyticsConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?analytics" + o.input = Shapes::ShapeRef.new(shape: PutBucketAnalyticsConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_cors, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketCors" + o.http_method = "PUT" + o.http_request_uri = "/?cors" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketCorsRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_encryption, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketEncryption" + o.http_method = "PUT" + o.http_request_uri = "/?encryption" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketEncryptionRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_intelligent_tiering_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketIntelligentTieringConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?intelligent-tiering" + o.input = Shapes::ShapeRef.new(shape: PutBucketIntelligentTieringConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_inventory_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketInventoryConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?inventory" + o.input = Shapes::ShapeRef.new(shape: PutBucketInventoryConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_lifecycle, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketLifecycle" + o.http_method = "PUT" + o.http_request_uri = "/?lifecycle" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.deprecated = true + o.input = Shapes::ShapeRef.new(shape: PutBucketLifecycleRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_lifecycle_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketLifecycleConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?lifecycle" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketLifecycleConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_logging, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketLogging" + o.http_method = "PUT" + o.http_request_uri = "/?logging" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketLoggingRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_metrics_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketMetricsConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?metrics" + o.input = Shapes::ShapeRef.new(shape: PutBucketMetricsConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_notification, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketNotification" + o.http_method = "PUT" + o.http_request_uri = "/?notification" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.deprecated = true + o.input = Shapes::ShapeRef.new(shape: PutBucketNotificationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_notification_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketNotificationConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?notification" + o.input = Shapes::ShapeRef.new(shape: PutBucketNotificationConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_ownership_controls, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketOwnershipControls" + o.http_method = "PUT" + o.http_request_uri = "/?ownershipControls" + o.http_checksum = { + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketOwnershipControlsRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_policy, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketPolicy" + o.http_method = "PUT" + o.http_request_uri = "/?policy" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketPolicyRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_replication, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketReplication" + o.http_method = "PUT" + o.http_request_uri = "/?replication" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketReplicationRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_request_payment, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketRequestPayment" + o.http_method = "PUT" + o.http_request_uri = "/?requestPayment" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketRequestPaymentRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_tagging, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketTagging" + o.http_method = "PUT" + o.http_request_uri = "/?tagging" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketTaggingRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_versioning, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketVersioning" + o.http_method = "PUT" + o.http_request_uri = "/?versioning" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketVersioningRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_bucket_website, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutBucketWebsite" + o.http_method = "PUT" + o.http_request_uri = "/?website" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutBucketWebsiteRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:put_object, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutObject" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.input = Shapes::ShapeRef.new(shape: PutObjectRequest) + o.output = Shapes::ShapeRef.new(shape: PutObjectOutput) + end) + + api.add_operation(:put_object_acl, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutObjectAcl" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}?acl" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutObjectAclRequest) + o.output = Shapes::ShapeRef.new(shape: PutObjectAclOutput) + o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) + end) + + api.add_operation(:put_object_legal_hold, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutObjectLegalHold" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}?legal-hold" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutObjectLegalHoldRequest) + o.output = Shapes::ShapeRef.new(shape: PutObjectLegalHoldOutput) + end) + + api.add_operation(:put_object_lock_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutObjectLockConfiguration" + o.http_method = "PUT" + o.http_request_uri = "/?object-lock" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutObjectLockConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: PutObjectLockConfigurationOutput) + end) + + api.add_operation(:put_object_retention, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutObjectRetention" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}?retention" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutObjectRetentionRequest) + o.output = Shapes::ShapeRef.new(shape: PutObjectRetentionOutput) + end) + + api.add_operation(:put_object_tagging, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutObjectTagging" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}?tagging" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutObjectTaggingRequest) + o.output = Shapes::ShapeRef.new(shape: PutObjectTaggingOutput) + end) + + api.add_operation(:put_public_access_block, Seahorse::Model::Operation.new.tap do |o| + o.name = "PutPublicAccessBlock" + o.http_method = "PUT" + o.http_request_uri = "/?publicAccessBlock" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => true, + } + o.input = Shapes::ShapeRef.new(shape: PutPublicAccessBlockRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + + api.add_operation(:restore_object, Seahorse::Model::Operation.new.tap do |o| + o.name = "RestoreObject" + o.http_method = "POST" + o.http_request_uri = "/{Key+}?restore" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.input = Shapes::ShapeRef.new(shape: RestoreObjectRequest) + o.output = Shapes::ShapeRef.new(shape: RestoreObjectOutput) + o.errors << Shapes::ShapeRef.new(shape: ObjectAlreadyInActiveTierError) + end) + + api.add_operation(:select_object_content, Seahorse::Model::Operation.new.tap do |o| + o.name = "SelectObjectContent" + o.http_method = "POST" + o.http_request_uri = "/{Key+}?select&select-type=2" + o.input = Shapes::ShapeRef.new(shape: SelectObjectContentRequest, + location_name: "SelectObjectContentRequest", + metadata: { + "xmlNamespace" => {"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"} + } + ) + o.output = Shapes::ShapeRef.new(shape: SelectObjectContentOutput) + end) + + api.add_operation(:upload_part, Seahorse::Model::Operation.new.tap do |o| + o.name = "UploadPart" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}" + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.http_checksum = { + "requestAlgorithmMember" => "checksum_algorithm", + "requestChecksumRequired" => false, + } + o.input = Shapes::ShapeRef.new(shape: UploadPartRequest) + o.output = Shapes::ShapeRef.new(shape: UploadPartOutput) + end) + + api.add_operation(:upload_part_copy, Seahorse::Model::Operation.new.tap do |o| + o.name = "UploadPartCopy" + o.http_method = "PUT" + o.http_request_uri = "/{Key+}" + o.input = Shapes::ShapeRef.new(shape: UploadPartCopyRequest) + o.output = Shapes::ShapeRef.new(shape: UploadPartCopyOutput) + end) + + api.add_operation(:write_get_object_response, Seahorse::Model::Operation.new.tap do |o| + o.name = "WriteGetObjectResponse" + o.http_method = "POST" + o.http_request_uri = "/WriteGetObjectResponse" + o['authtype'] = "v4-unsigned-body" + o.endpoint_pattern = { + "hostPrefix" => "{RequestRoute}.", + } + o.input = Shapes::ShapeRef.new(shape: WriteGetObjectResponseRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + end) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations.rb new file mode 100644 index 0000000..5044608 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +# utility classes +require 'aws-sdk-s3/bucket_region_cache' +require 'aws-sdk-s3/encryption' +require 'aws-sdk-s3/encryption_v2' +require 'aws-sdk-s3/file_part' +require 'aws-sdk-s3/file_uploader' +require 'aws-sdk-s3/file_downloader' +require 'aws-sdk-s3/legacy_signer' +require 'aws-sdk-s3/multipart_file_uploader' +require 'aws-sdk-s3/multipart_stream_uploader' +require 'aws-sdk-s3/multipart_upload_error' +require 'aws-sdk-s3/object_copier' +require 'aws-sdk-s3/object_multipart_copier' +require 'aws-sdk-s3/presigned_post' +require 'aws-sdk-s3/presigner' + +# customizations to generated classes +require 'aws-sdk-s3/customizations/bucket' +require 'aws-sdk-s3/customizations/errors' +require 'aws-sdk-s3/customizations/object' +require 'aws-sdk-s3/customizations/object_summary' +require 'aws-sdk-s3/customizations/multipart_upload' +require 'aws-sdk-s3/customizations/types/list_object_versions_output' +require 'aws-sdk-s3/customizations/types/permanent_redirect' + +[ + Aws::S3::Object::Collection, + Aws::S3::ObjectSummary::Collection, + Aws::S3::ObjectVersion::Collection, +].each do |klass| + klass.send(:alias_method, :delete, :batch_delete!) + klass.extend Aws::Deprecations + klass.send(:deprecated, :delete, use: :batch_delete!) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/bucket.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/bucket.rb new file mode 100644 index 0000000..b04de0b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/bucket.rb @@ -0,0 +1,144 @@ +# frozen_string_literal: true + +require 'uri' + +module Aws + module S3 + class Bucket + # Deletes all objects and versioned objects from this bucket + # + # @example + # + # bucket.clear! + # + # @return [void] + def clear! + object_versions.batch_delete! + end + + # Deletes all objects and versioned objects from this bucket and + # then deletes the bucket. + # + # @example + # + # bucket.delete! + # + # @option options [Integer] :max_attempts (3) Maximum number of times to + # attempt to delete the empty bucket before raising + # `Aws::S3::Errors::BucketNotEmpty`. + # + # @option options [Float] :initial_wait (1.3) Seconds to wait before + # retrying the call to delete the bucket, exponentially increased for + # each attempt. + # + # @return [void] + def delete!(options = {}) + options = { + initial_wait: 1.3, + max_attempts: 3 + }.merge(options) + + attempts = 0 + begin + clear! + delete + rescue Errors::BucketNotEmpty + attempts += 1 + raise if attempts >= options[:max_attempts] + + Kernel.sleep(options[:initial_wait]**attempts) + retry + end + end + + # Returns a public URL for this bucket. + # + # @example + # + # bucket = s3.bucket('bucket-name') + # bucket.url + # #=> "https://bucket-name.s3.amazonaws.com" + # + # It will also work when provided an Access Point ARN. + # + # @example + # + # bucket = s3.bucket( + # 'arn:aws:s3:us-east-1:123456789012:accesspoint:myendpoint' + # ) + # bucket.url + # #=> "https://myendpoint-123456789012.s3-accesspoint.us-west-2.amazonaws.com" + # + # You can pass `virtual_host: true` to use the bucket name as the + # host name. + # + # bucket = s3.bucket('my-bucket.com') + # bucket.url(virtual_host: true) + # #=> "http://my-bucket.com" + # + # @option options [Boolean] :virtual_host (false) When `true`, + # the bucket name will be used as the host name. This is useful + # when you have a CNAME configured for this bucket. + # + # @option options [Boolean] :secure (true) When `false`, http + # will be used with virtual_host. This is required when + # the bucket name has a dot (.) in it. + # + # @return [String] the URL for this bucket. + def url(options = {}) + if options[:virtual_host] + scheme = options.fetch(:secure, true) ? 'https' : 'http' + "#{scheme}://#{name}" + else + # Taken from Aws::S3::Endpoints module + unless client.config.regional_endpoint + endpoint = client.config.endpoint.to_s + end + params = Aws::S3::EndpointParameters.new( + bucket: name, + region: client.config.region, + use_fips: client.config.use_fips_endpoint, + use_dual_stack: client.config.use_dualstack_endpoint, + endpoint: endpoint, + force_path_style: client.config.force_path_style, + accelerate: client.config.use_accelerate_endpoint, + use_global_endpoint: client.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: client.config.s3_disable_multiregion_access_points, + use_arn_region: client.config.s3_use_arn_region, + ) + endpoint = Aws::S3::EndpointProvider.new.resolve_endpoint(params) + endpoint.url + end + end + + # Creates a {PresignedPost} that makes it easy to upload a file from + # a web browser direct to Amazon S3 using an HTML post form with + # a file field. + # + # See the {PresignedPost} documentation for more information. + # @note You must specify `:key` or `:key_starts_with`. All other options + # are optional. + # @option (see PresignedPost#initialize) + # @return [PresignedPost] + # @see PresignedPost + def presigned_post(options = {}) + PresignedPost.new( + client.config.credentials, + client.config.region, + name, + { url: url }.merge(options) + ) + end + + # @api private + def load + @data = client.list_buckets.buckets.find { |b| b.name == name } + raise "unable to load bucket #{name}" if @data.nil? + + self + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/errors.rb new file mode 100644 index 0000000..173cc57 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/errors.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Errors + # Hijack PermanentRedirect dynamic error to also include endpoint + # and bucket. + class PermanentRedirect < ServiceError + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::PermanentRedirect] data + def initialize(context, message, _data = Aws::EmptyStructure.new) + data = Aws::S3::Types::PermanentRedirect.new(message: message) + body = context.http_response.body_contents + if (endpoint = body.match(/(.+?)<\/Endpoint>/)) + data.endpoint = endpoint[1] + end + if (bucket = body.match(/(.+?)<\/Bucket>/)) + data.bucket = bucket[1] + end + data.region = context.http_response.headers['x-amz-bucket-region'] + super(context, message, data) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/multipart_upload.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/multipart_upload.rb new file mode 100644 index 0000000..e9a6501 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/multipart_upload.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module Aws + module S3 + class MultipartUpload + + alias_method :basic_complete, :complete + + # Completes the upload, requires a list of completed parts. You can + # provide the list of parts with `:part_number` and `:etag` values. + # + # upload.complete(multipart_upload: { parts: [ + # { part_number: 1, etag:'etag1' }, + # { part_number: 2, etag:'etag2' }, + # ... + # ]}) + # + # Alternatively, you can pass **`compute_parts: true`** and the part + # list will be computed by calling {Client#list_parts}. + # + # upload.complete(compute_parts: true) + # + # @option options [Boolean] :compute_parts (false) When `true`, + # the {Client#list_parts} method will be called to determine + # the list of required part numbers and their ETags. + # + def complete(options = {}) + if options.delete(:compute_parts) + options[:multipart_upload] = { parts: compute_parts } + end + basic_complete(options) + end + + private + + def compute_parts + parts.sort_by(&:part_number).each.with_object([]) do |part, part_list| + part_list << { part_number: part.part_number, etag: part.etag } + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/object.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/object.rb new file mode 100644 index 0000000..e8704b5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/object.rb @@ -0,0 +1,491 @@ +# frozen_string_literal: true + +module Aws + module S3 + class Object + alias size content_length + + # Make the method redefinable + alias_method :copy_from, :copy_from + + # Copies another object to this object. Use `multipart_copy: true` + # for large objects. This is required for objects that exceed 5GB. + # + # @param [S3::Object, S3::ObjectVersion, S3::ObjectSummary, String, Hash] + # source Where to copy object data from. `source` must be one of the + # following: + # + # * {Aws::S3::Object} + # * {Aws::S3::ObjectSummary} + # * {Aws::S3::ObjectVersion} + # * Hash - with `:bucket` and `:key` and optional `:version_id` + # * String - formatted like `"source-bucket-name/uri-escaped-key"` + # or `"source-bucket-name/uri-escaped-key?versionId=version-id"` + # + # @option options [Boolean] :multipart_copy (false) When `true`, + # the object will be copied using the multipart APIs. This is + # necessary for objects larger than 5GB and can provide + # performance improvements on large objects. Amazon S3 does + # not accept multipart copies for objects smaller than 5MB. + # Object metadata such as Content-Type will be copied, however, + # Checksums are not copied. + # + # @option options [Integer] :content_length Only used when + # `:multipart_copy` is `true`. Passing this options avoids a HEAD + # request to query the source object size but prevents object metadata + # from being copied. Raises an `ArgumentError` if + # this option is provided when `:multipart_copy` is `false` or not set. + # + # @option options [S3::Client] :copy_source_client Only used when + # `:multipart_copy` is `true` and the source object is in a + # different region. You do not need to specify this option + # if you have provided `:content_length`. + # + # @option options [String] :copy_source_region Only used when + # `:multipart_copy` is `true` and the source object is in a + # different region. You do not need to specify this option + # if you have provided a `:source_client` or a `:content_length`. + # + # @example Basic object copy + # + # bucket = Aws::S3::Bucket.new('target-bucket') + # object = bucket.object('target-key') + # + # # source as String + # object.copy_from('source-bucket/source-key') + # + # # source as Hash + # object.copy_from(bucket:'source-bucket', key:'source-key') + # + # # source as Aws::S3::Object + # object.copy_from(bucket.object('source-key')) + # + # @example Managed copy of large objects + # + # # uses multipart upload APIs to copy object + # object.copy_from('src-bucket/src-key', multipart_copy: true) + # + # @see #copy_to + # + def copy_from(source, options = {}) + if Hash === source && source[:copy_source] + # for backwards compatibility + @client.copy_object(source.merge(bucket: bucket_name, key: key)) + else + ObjectCopier.new(self, options).copy_from(source, options) + end + end + + # Copies this object to another object. Use `multipart_copy: true` + # for large objects. This is required for objects that exceed 5GB. + # + # @note If you need to copy to a bucket in a different region, use + # {#copy_from}. + # + # @param [S3::Object, String, Hash] target Where to copy the object + # data to. `target` must be one of the following: + # + # * {Aws::S3::Object} + # * Hash - with `:bucket` and `:key` + # * String - formatted like `"target-bucket-name/target-key"` + # + # @example Basic object copy + # + # bucket = Aws::S3::Bucket.new('source-bucket') + # object = bucket.object('source-key') + # + # # target as String + # object.copy_to('target-bucket/target-key') + # + # # target as Hash + # object.copy_to(bucket: 'target-bucket', key: 'target-key') + # + # # target as Aws::S3::Object + # object.copy_to(bucket.object('target-key')) + # + # @example Managed copy of large objects + # + # # uses multipart upload APIs to copy object + # object.copy_to('src-bucket/src-key', multipart_copy: true) + # + def copy_to(target, options = {}) + ObjectCopier.new(self, options).copy_to(target, options) + end + + # Copies and deletes the current object. The object will only be deleted + # if the copy operation succeeds. + # + # @param (see Object#copy_to) + # @option (see Object#copy_to) + # @return [void] + # @see Object#copy_to + # @see Object#delete + def move_to(target, options = {}) + copy_to(target, options) + delete + end + + # Creates a {PresignedPost} that makes it easy to upload a file from + # a web browser direct to Amazon S3 using an HTML post form with + # a file field. + # + # See the {PresignedPost} documentation for more information. + # + # @option (see PresignedPost#initialize) + # @return [PresignedPost] + # @see PresignedPost + def presigned_post(options = {}) + PresignedPost.new( + client.config.credentials, + client.config.region, + bucket_name, + { key: key, url: bucket.url }.merge(options) + ) + end + + # Generates a pre-signed URL for this object. + # + # @example Pre-signed GET URL, valid for one hour + # + # obj.presigned_url(:get, expires_in: 3600) + # #=> "https://bucket-name.s3.amazonaws.com/object-key?..." + # + # @example Pre-signed PUT with a canned ACL + # + # # the object uploaded using this URL will be publicly accessible + # obj.presigned_url(:put, acl: 'public-read') + # #=> "https://bucket-name.s3.amazonaws.com/object-key?..." + # + # @example Pre-signed UploadPart PUT + # + # # the object uploaded using this URL will be publicly accessible + # obj.presigned_url(:upload_part, part_number: 1, upload_id: 'uploadIdToken') + # #=> "https://bucket-name.s3.amazonaws.com/object-key?..." + # + # @param [Symbol] method + # The S3 operation to generate a presigned URL for. Valid values + # are `:get`, `:put`, `:head`, `:delete`, `:create_multipart_upload`, + # `:list_multipart_uploads`, `:complete_multipart_upload`, + # `:abort_multipart_upload`, `:list_parts`, and `:upload_part`. + # + # @param [Hash] params + # Additional request parameters to use when generating the pre-signed + # URL. See the related documentation in {Client} for accepted + # params. + # + # | Method | Client Method | + # |------------------------------|------------------------------------| + # | `:get` | {Client#get_object} | + # | `:put` | {Client#put_object} | + # | `:head` | {Client#head_object} | + # | `:delete` | {Client#delete_object} | + # | `:create_multipart_upload` | {Client#create_multipart_upload} | + # | `:list_multipart_uploads` | {Client#list_multipart_uploads} | + # | `:complete_multipart_upload` | {Client#complete_multipart_upload} | + # | `:abort_multipart_upload` | {Client#abort_multipart_upload} | + # | `:list_parts` | {Client#list_parts} | + # | `:upload_part` | {Client#upload_part} | + # + # @option params [Boolean] :virtual_host (false) When `true` the + # presigned URL will use the bucket name as a virtual host. + # + # bucket = Aws::S3::Bucket.new('my.bucket.com') + # bucket.object('key').presigned_url(virtual_host: true) + # #=> "http://my.bucket.com/key?..." + # + # @option params [Integer] :expires_in (900) Number of seconds before + # the pre-signed URL expires. This may not exceed one week (604800 + # seconds). Note that the pre-signed URL is also only valid as long as + # credentials used to sign it are. For example, when using IAM roles, + # temporary tokens generated for signing also have a default expiration + # which will affect the effective expiration of the pre-signed URL. + # + # @raise [ArgumentError] Raised if `:expires_in` exceeds one week + # (604800 seconds). + # + # @return [String] + # + def presigned_url(method, params = {}) + presigner = Presigner.new(client: client) + + if %w(delete head get put).include?(method.to_s) + method = "#{method}_object".to_sym + end + + presigner.presigned_url( + method.downcase, + params.merge(bucket: bucket_name, key: key) + ) + end + + # Allows you to create presigned URL requests for S3 operations. This + # method returns a tuple containing the URL and the signed X-amz-* headers + # to be used with the presigned url. + # + # @example Pre-signed GET URL, valid for one hour + # + # obj.presigned_request(:get, expires_in: 3600) + # #=> ["https://bucket-name.s3.amazonaws.com/object-key?...", {}] + # + # @example Pre-signed PUT with a canned ACL + # + # # the object uploaded using this URL will be publicly accessible + # obj.presigned_request(:put, acl: 'public-read') + # #=> ["https://bucket-name.s3.amazonaws.com/object-key?...", + # {"x-amz-acl"=>"public-read"}] + # + # @param [Symbol] method + # The S3 operation to generate a presigned request for. Valid values + # are `:get`, `:put`, `:head`, `:delete`, `:create_multipart_upload`, + # `:list_multipart_uploads`, `:complete_multipart_upload`, + # `:abort_multipart_upload`, `:list_parts`, and `:upload_part`. + # + # @param [Hash] params + # Additional request parameters to use when generating the pre-signed + # request. See the related documentation in {Client} for accepted + # params. + # + # | Method | Client Method | + # |------------------------------|------------------------------------| + # | `:get` | {Client#get_object} | + # | `:put` | {Client#put_object} | + # | `:head` | {Client#head_object} | + # | `:delete` | {Client#delete_object} | + # | `:create_multipart_upload` | {Client#create_multipart_upload} | + # | `:list_multipart_uploads` | {Client#list_multipart_uploads} | + # | `:complete_multipart_upload` | {Client#complete_multipart_upload} | + # | `:abort_multipart_upload` | {Client#abort_multipart_upload} | + # | `:list_parts` | {Client#list_parts} | + # | `:upload_part` | {Client#upload_part} | + # + # @option params [Boolean] :virtual_host (false) When `true` the + # presigned URL will use the bucket name as a virtual host. + # + # bucket = Aws::S3::Bucket.new('my.bucket.com') + # bucket.object('key').presigned_request(virtual_host: true) + # #=> ["http://my.bucket.com/key?...", {}] + # + # @option params [Integer] :expires_in (900) Number of seconds before + # the pre-signed URL expires. This may not exceed one week (604800 + # seconds). Note that the pre-signed URL is also only valid as long as + # credentials used to sign it are. For example, when using IAM roles, + # temporary tokens generated for signing also have a default expiration + # which will affect the effective expiration of the pre-signed URL. + # + # @raise [ArgumentError] Raised if `:expires_in` exceeds one week + # (604800 seconds). + # + # @return [String, Hash] A tuple with a presigned URL and headers that + # should be included with the request. + # + def presigned_request(method, params = {}) + presigner = Presigner.new(client: client) + + if %w(delete head get put).include?(method.to_s) + method = "#{method}_object".to_sym + end + + presigner.presigned_request( + method.downcase, + params.merge(bucket: bucket_name, key: key) + ) + end + + # Returns the public (un-signed) URL for this object. + # + # s3.bucket('bucket-name').object('obj-key').public_url + # #=> "https://bucket-name.s3.amazonaws.com/obj-key" + # + # To use virtual hosted bucket url. + # Uses https unless secure: false is set. If the bucket + # name contains dots (.) then you will need to set secure: false. + # + # s3.bucket('my-bucket.com').object('key') + # .public_url(virtual_host: true) + # #=> "https://my-bucket.com/key" + # + # @option options [Boolean] :virtual_host (false) When `true`, the bucket + # name will be used as the host name. This is useful when you have + # a CNAME configured for the bucket. + # + # @option options [Boolean] :secure (true) When `false`, http + # will be used with virtual_host. This is required when + # the bucket name has a dot (.) in it. + # + # @return [String] + def public_url(options = {}) + url = URI.parse(bucket.url(options)) + url.path += '/' unless url.path[-1] == '/' + url.path += key.gsub(/[^\/]+/) { |s| Seahorse::Util.uri_escape(s) } + url.to_s + end + + # Uploads a stream in a streaming fashion to the current object in S3. + # + # Passed chunks automatically split into multipart upload parts and the + # parts are uploaded in parallel. This allows for streaming uploads that + # never touch the disk. + # + # Note that this is known to have issues in JRuby until jruby-9.1.15.0, + # so avoid using this with older versions of JRuby. + # + # @example Streaming chunks of data + # obj.upload_stream do |write_stream| + # 10.times { write_stream << 'foo' } + # end + # @example Streaming chunks of data + # obj.upload_stream do |write_stream| + # IO.copy_stream(IO.popen('ls'), write_stream) + # end + # @example Streaming chunks of data + # obj.upload_stream do |write_stream| + # IO.copy_stream(STDIN, write_stream) + # end + # + # @option options [Integer] :thread_count (10) The number of parallel + # multipart uploads + # + # @option options [Boolean] :tempfile (false) Normally read data is stored + # in memory when building the parts in order to complete the underlying + # multipart upload. By passing `:tempfile => true` data read will be + # temporarily stored on disk reducing the memory footprint vastly. + # + # @option options [Integer] :part_size (5242880) + # Define how big each part size but the last should be. + # Default `:part_size` is `5 * 1024 * 1024`. + # + # @raise [MultipartUploadError] If an object is being uploaded in + # parts, and the upload can not be completed, then the upload is + # aborted and this error is raised. The raised error has a `#errors` + # method that returns the failures that caused the upload to be + # aborted. + # + # @return [Boolean] Returns `true` when the object is uploaded + # without any errors. + # + def upload_stream(options = {}, &block) + uploading_options = options.dup + uploader = MultipartStreamUploader.new( + client: client, + thread_count: uploading_options.delete(:thread_count), + tempfile: uploading_options.delete(:tempfile), + part_size: uploading_options.delete(:part_size) + ) + uploader.upload( + uploading_options.merge(bucket: bucket_name, key: key), + &block + ) + true + end + + # Uploads a file from disk to the current object in S3. + # + # # small files are uploaded in a single API call + # obj.upload_file('/path/to/file') + # + # Files larger than or equal to `:multipart_threshold` are uploaded + # using the Amazon S3 multipart upload APIs. + # + # # large files are automatically split into parts + # # and the parts are uploaded in parallel + # obj.upload_file('/path/to/very_large_file') + # + # The response of the S3 upload API is yielded if a block given. + # + # # API response will have etag value of the file + # obj.upload_file('/path/to/file') do |response| + # etag = response.etag + # end + # + # You can provide a callback to monitor progress of the upload: + # + # # bytes and totals are each an array with 1 entry per part + # progress = Proc.new do |bytes, totals| + # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%" } + # end + # obj.upload_file('/path/to/file', progress_callback: progress) + # + # @param [String, Pathname, File, Tempfile] source A file on the local + # file system that will be uploaded as this object. This can either be + # a String or Pathname to the file, an open File object, or an open + # Tempfile object. If you pass an open File or Tempfile object, then + # you are responsible for closing it after the upload completes. When + # using an open Tempfile, rewind it before uploading or else the object + # will be empty. + # + # @option options [Integer] :multipart_threshold (104857600) Files larger + # than or equal to `:multipart_threshold` are uploaded using the S3 + # multipart APIs. + # Default threshold is 100MB. + # + # @option options [Integer] :thread_count (10) The number of parallel + # multipart uploads. This option is not used if the file is smaller than + # `:multipart_threshold`. + # + # @option options [Proc] :progress_callback + # A Proc that will be called when each chunk of the upload is sent. + # It will be invoked with [bytes_read], [total_sizes] + # + # @raise [MultipartUploadError] If an object is being uploaded in + # parts, and the upload can not be completed, then the upload is + # aborted and this error is raised. The raised error has a `#errors` + # method that returns the failures that caused the upload to be + # aborted. + # + # @return [Boolean] Returns `true` when the object is uploaded + # without any errors. + def upload_file(source, options = {}) + uploading_options = options.dup + uploader = FileUploader.new( + multipart_threshold: uploading_options.delete(:multipart_threshold), + client: client + ) + response = uploader.upload( + source, + uploading_options.merge(bucket: bucket_name, key: key) + ) + yield response if block_given? + true + end + + # Downloads a file in S3 to a path on disk. + # + # # small files (< 5MB) are downloaded in a single API call + # obj.download_file('/path/to/file') + # + # Files larger than 5MB are downloaded using multipart method + # + # # large files are split into parts + # # and the parts are downloaded in parallel + # obj.download_file('/path/to/very_large_file') + # + # @param [String] destination Where to download the file to. + # + # @option options [String] mode `auto`, `single_request`, `get_range` + # `single_request` mode forces only 1 GET request is made in download, + # `get_range` mode allows `chunk_size` parameter to configured in + # customizing each range size in multipart_download, + # By default, `auto` mode is enabled, which performs multipart_download + # + # @option options [Integer] chunk_size required in get_range mode. + # + # @option options [Integer] thread_count (10) Customize threads used in + # the multipart download. + # + # @option options [String] version_id The object version id used to + # retrieve the object. For more about object versioning, see: + # https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html + # + # @return [Boolean] Returns `true` when the file is downloaded without + # any errors. + def download_file(destination, options = {}) + downloader = FileDownloader.new(client: client) + downloader.download( + destination, + options.merge(bucket: bucket_name, key: key) + ) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/object_summary.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/object_summary.rb new file mode 100644 index 0000000..94565f0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/object_summary.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module Aws + module S3 + class ObjectSummary + + alias content_length size + + # Make the method redefinable + alias_method :copy_from, :copy_from + + # @param (see Object#copy_from) + # @options (see Object#copy_from) + # @return (see Object#copy_from) + # @see Object#copy_from + def copy_from(source, options = {}) + object.copy_from(source, options) + end + + # @param (see Object#copy_to) + # @options (see Object#copy_to) + # @return (see Object#copy_to) + # @see Object#copy_to + def copy_to(target, options = {}) + object.copy_to(target, options) + end + + # @param (see Object#move_to) + # @options (see Object#move_to) + # @return (see Object#move_to) + # @see Object#move_to + def move_to(target, options = {}) + object.move_to(target, options) + end + + # @param (see Object#presigned_post) + # @options (see Object#presigned_post) + # @return (see Object#presigned_post) + # @see Object#presigned_post + def presigned_post(options = {}) + object.presigned_post(options) + end + + # @param (see Object#presigned_url) + # @options (see Object#presigned_url) + # @return (see Object#presigned_url) + # @see Object#presigned_url + def presigned_url(http_method, params = {}) + object.presigned_url(http_method, params) + end + + # @param (see Object#public_url) + # @options (see Object#public_url) + # @return (see Object#public_url) + # @see Object#public_url + def public_url(options = {}) + object.public_url(options) + end + + # @param (see Object#upload_file) + # @options (see Object#upload_file) + # @return (see Object#upload_file) + # @see Object#upload_file + def upload_file(source, options = {}) + object.upload_file(source, options) + end + + # @options (see Object#upload_stream) + # @return (see Object#upload_stream) + # @see Object#upload_stream + def upload_stream(options = {}, &block) + object.upload_stream(options, &block) + end + + # @param (see Object#download_file) + # @options (see Object#download_file) + # @return (see Object#download_file) + # @see Object#download_file + def download_file(destination, options = {}) + object.download_file(destination, options) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb new file mode 100644 index 0000000..2ee22aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +class Aws::S3::Types::ListObjectVersionsOutput + + # TODO : Remove this customization once the resource code + # generator correct handles the JMESPath || expression. + # Only used by the Bucket#object_versions collection. + # @api private + def versions_delete_markers + versions + delete_markers + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/types/permanent_redirect.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/types/permanent_redirect.rb new file mode 100644 index 0000000..9b291c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/customizations/types/permanent_redirect.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Types + # This error is not modeled. + # + # The bucket you are attempting to access must be addressed using the + # specified endpoint. Please send all future requests to this endpoint. + # + # @!attribute [rw] endpoint + # @return [String] + # + # @!attribute [rw] bucket + # @return [String] + # + # @!attribute [rw] message + # @return [String] + # + class PermanentRedirect < Struct.new(:endpoint, :bucket, :region, :message) + SENSITIVE = [] + include Aws::Structure + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption.rb new file mode 100644 index 0000000..ba23cbb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require 'aws-sdk-s3/encryption/client' +require 'aws-sdk-s3/encryption/decrypt_handler' +require 'aws-sdk-s3/encryption/default_cipher_provider' +require 'aws-sdk-s3/encryption/encrypt_handler' +require 'aws-sdk-s3/encryption/errors' +require 'aws-sdk-s3/encryption/io_encrypter' +require 'aws-sdk-s3/encryption/io_decrypter' +require 'aws-sdk-s3/encryption/io_auth_decrypter' +require 'aws-sdk-s3/encryption/key_provider' +require 'aws-sdk-s3/encryption/kms_cipher_provider' +require 'aws-sdk-s3/encryption/materials' +require 'aws-sdk-s3/encryption/utils' +require 'aws-sdk-s3/encryption/default_key_provider' + +module Aws + module S3 + module Encryption; end + AES_GCM_TAG_LEN_BYTES = 16 + EC_USER_AGENT = 'S3CryptoV1n' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/client.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/client.rb new file mode 100644 index 0000000..b25ee88 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/client.rb @@ -0,0 +1,386 @@ +# frozen_string_literal: true + +require 'forwardable' + +module Aws + module S3 + + # [MAINTENANCE MODE] There is a new version of the Encryption Client. + # AWS strongly recommends upgrading to the {Aws::S3::EncryptionV2::Client}, + # which provides updated data security best practices. + # See documentation for {Aws::S3::EncryptionV2::Client}. + # Provides an encryption client that encrypts and decrypts data client-side, + # storing the encrypted data in Amazon S3. + # + # This client uses a process called "envelope encryption". Your private + # encryption keys and your data's plain-text are **never** sent to + # Amazon S3. **If you lose you encryption keys, you will not be able to + # decrypt your data.** + # + # ## Envelope Encryption Overview + # + # The goal of envelope encryption is to combine the performance of + # fast symmetric encryption while maintaining the secure key management + # that asymmetric keys provide. + # + # A one-time-use symmetric key (envelope key) is generated client-side. + # This is used to encrypt the data client-side. This key is then + # encrypted by your master key and stored alongside your data in Amazon + # S3. + # + # When accessing your encrypted data with the encryption client, + # the encrypted envelope key is retrieved and decrypted client-side + # with your master key. The envelope key is then used to decrypt the + # data client-side. + # + # One of the benefits of envelope encryption is that if your master key + # is compromised, you have the option of just re-encrypting the stored + # envelope symmetric keys, instead of re-encrypting all of the + # data in your account. + # + # ## Basic Usage + # + # The encryption client requires an {Aws::S3::Client}. If you do not + # provide a `:client`, then a client will be constructed for you. + # + # require 'openssl' + # key = OpenSSL::PKey::RSA.new(1024) + # + # # encryption client + # s3 = Aws::S3::Encryption::Client.new(encryption_key: key) + # + # # round-trip an object, encrypted/decrypted locally + # s3.put_object(bucket:'aws-sdk', key:'secret', body:'handshake') + # s3.get_object(bucket:'aws-sdk', key:'secret').body.read + # #=> 'handshake' + # + # # reading encrypted object without the encryption client + # # results in the getting the cipher text + # Aws::S3::Client.new.get_object(bucket:'aws-sdk', key:'secret').body.read + # #=> "... cipher text ..." + # + # ## Keys + # + # For client-side encryption to work, you must provide one of the following: + # + # * An encryption key + # * A {KeyProvider} + # * A KMS encryption key id + # + # ### An Encryption Key + # + # You can pass a single encryption key. This is used as a master key + # encrypting and decrypting all object keys. + # + # key = OpenSSL::Cipher.new("AES-256-ECB").random_key # symmetric key + # key = OpenSSL::PKey::RSA.new(1024) # asymmetric key pair + # + # s3 = Aws::S3::Encryption::Client.new(encryption_key: key) + # + # ### Key Provider + # + # Alternatively, you can use a {KeyProvider}. A key provider makes + # it easy to work with multiple keys and simplifies key rotation. + # + # ### KMS Encryption Key Id + # + # If you pass the id to an AWS Key Management Service (KMS) key, + # then KMS will be used to generate, encrypt and decrypt object keys. + # + # # keep track of the kms key id + # kms = Aws::KMS::Client.new + # key_id = kms.create_key.key_metadata.key_id + # + # Aws::S3::Encryption::Client.new( + # kms_key_id: key_id, + # kms_client: kms, + # ) + # + # ## Custom Key Providers + # + # A {KeyProvider} is any object that responds to: + # + # * `#encryption_materials` + # * `#key_for(materials_description)` + # + # Here is a trivial implementation of an in-memory key provider. + # This is provided as a demonstration of the key provider interface, + # and should not be used in production: + # + # class KeyProvider + # + # def initialize(default_key_name, keys) + # @keys = keys + # @encryption_materials = Aws::S3::Encryption::Materials.new( + # key: @keys[default_key_name], + # description: JSON.dump(key: default_key_name), + # ) + # end + # + # attr_reader :encryption_materials + # + # def key_for(matdesc) + # key_name = JSON.parse(matdesc)['key'] + # if key = @keys[key_name] + # key + # else + # raise "encryption key not found for: #{matdesc.inspect}" + # end + # end + # end + # + # Given the above key provider, you can create an encryption client that + # chooses the key to use based on the materials description stored with + # the encrypted object. This makes it possible to use multiple keys + # and simplifies key rotation. + # + # # uses "new-key" for encrypting objects, uses either for decrypting + # keys = KeyProvider.new('new-key', { + # "old-key" => Base64.decode64("kM5UVbhE/4rtMZJfsadYEdm2vaKFsmV2f5+URSeUCV4="), + # "new-key" => Base64.decode64("w1WLio3agRWRTSJK/Ouh8NHoqRQ6fn5WbSXDTHjXMSo="), + # }), + # + # # chooses the key based on the materials description stored + # # with the encrypted object + # s3 = Aws::S3::Encryption::Client.new(key_provider: keys) + # + # ## Materials Description + # + # A materials description is JSON document string that is stored + # in the metadata (or instruction file) of an encrypted object. + # The {DefaultKeyProvider} uses the empty JSON document `"{}"`. + # + # When building a key provider, you are free to store whatever + # information you need to identify the master key that was used + # to encrypt the object. + # + # ## Envelope Location + # + # By default, the encryption client store the encryption envelope + # with the object, as metadata. You can choose to have the envelope + # stored in a separate "instruction file". An instruction file + # is an object, with the key of the encrypted object, suffixed with + # `".instruction"`. + # + # Specify the `:envelope_location` option as `:instruction_file` to + # use an instruction file for storing the envelope. + # + # # default behavior + # s3 = Aws::S3::Encryption::Client.new( + # key_provider: ..., + # envelope_location: :metadata, + # ) + # + # # store envelope in a separate object + # s3 = Aws::S3::Encryption::Client.new( + # key_provider: ..., + # envelope_location: :instruction_file, + # instruction_file_suffix: '.instruction' # default + # ) + # + # When using an instruction file, multiple requests are made when + # putting and getting the object. **This may cause issues if you are + # issuing concurrent PUT and GET requests to an encrypted object.** + # + module Encryption + class Client + + extend Deprecations + extend Forwardable + def_delegators :@client, :config, :delete_object, :head_object, :build_request + + # Creates a new encryption client. You must provide one of the following + # options: + # + # * `:encryption_key` + # * `:kms_key_id` + # * `:key_provider` + # + # You may also pass any other options accepted by `Client#initialize`. + # + # @option options [S3::Client] :client A basic S3 client that is used + # to make api calls. If a `:client` is not provided, a new {S3::Client} + # will be constructed. + # + # @option options [OpenSSL::PKey::RSA, String] :encryption_key The master + # key to use for encrypting/decrypting all objects. + # + # @option options [String] :kms_key_id When you provide a `:kms_key_id`, + # then AWS Key Management Service (KMS) will be used to manage the + # object encryption keys. By default a {KMS::Client} will be + # constructed for KMS API calls. Alternatively, you can provide + # your own via `:kms_client`. + # + # @option options [#key_for] :key_provider Any object that responds + # to `#key_for`. This method should accept a materials description + # JSON document string and return return an encryption key. + # + # @option options [Symbol] :envelope_location (:metadata) Where to + # store the envelope encryption keys. By default, the envelope is + # stored with the encrypted object. If you pass `:instruction_file`, + # then the envelope is stored in a separate object in Amazon S3. + # + # @option options [String] :instruction_file_suffix ('.instruction') + # When `:envelope_location` is `:instruction_file` then the + # instruction file uses the object key with this suffix appended. + # + # @option options [KMS::Client] :kms_client A default {KMS::Client} + # is constructed when using KMS to manage encryption keys. + # + def initialize(options = {}) + @client = extract_client(options) + @cipher_provider = cipher_provider(options) + @envelope_location = extract_location(options) + @instruction_file_suffix = extract_suffix(options) + end + deprecated :initialize, + message: + '[MAINTENANCE MODE] This version of the S3 Encryption client is currently in maintenance mode. ' \ + 'AWS strongly recommends upgrading to the Aws::S3::EncryptionV2::Client, ' \ + 'which provides updated data security best practices. ' \ + 'See documentation for Aws::S3::EncryptionV2::Client.' + + + # @return [S3::Client] + attr_reader :client + + # @return [KeyProvider, nil] Returns `nil` if you are using + # AWS Key Management Service (KMS). + attr_reader :key_provider + + # @return [Symbol<:metadata, :instruction_file>] + attr_reader :envelope_location + + # @return [String] When {#envelope_location} is `:instruction_file`, + # the envelope is stored in the object with the object key suffixed + # by this string. + attr_reader :instruction_file_suffix + + # Uploads an object to Amazon S3, encrypting data client-side. + # See {S3::Client#put_object} for documentation on accepted + # request parameters. + # @option (see S3::Client#put_object) + # @return (see S3::Client#put_object) + # @see S3::Client#put_object + def put_object(params = {}) + req = @client.build_request(:put_object, params) + req.handlers.add(EncryptHandler, priority: 95) + req.context[:encryption] = { + cipher_provider: @cipher_provider, + envelope_location: @envelope_location, + instruction_file_suffix: @instruction_file_suffix, + } + req.send_request + end + + # Gets an object from Amazon S3, decrypting data locally. + # See {S3::Client#get_object} for documentation on accepted + # request parameters. + # @option params [String] :instruction_file_suffix The suffix + # used to find the instruction file containing the encryption + # envelope. You should not set this option when the envelope + # is stored in the object metadata. Defaults to + # {#instruction_file_suffix}. + # @option params [String] :instruction_file_suffix + # @option (see S3::Client#get_object) + # @return (see S3::Client#get_object) + # @see S3::Client#get_object + # @note The `:range` request parameter is not yet supported. + def get_object(params = {}, &block) + if params[:range] + raise NotImplementedError, '#get_object with :range not supported yet' + end + envelope_location, instruction_file_suffix = envelope_options(params) + req = @client.build_request(:get_object, params) + req.handlers.add(DecryptHandler) + req.context[:encryption] = { + cipher_provider: @cipher_provider, + envelope_location: envelope_location, + instruction_file_suffix: instruction_file_suffix, + } + req.send_request(target: block) + end + + private + + def extract_client(options) + options[:client] || begin + options = options.dup + options.delete(:kms_key_id) + options.delete(:kms_client) + options.delete(:key_provider) + options.delete(:encryption_key) + options.delete(:envelope_location) + options.delete(:instruction_file_suffix) + S3::Client.new(options) + end + end + + def kms_client(options) + options[:kms_client] || begin + KMS::Client.new( + region: @client.config.region, + credentials: @client.config.credentials, + ) + end + end + + def cipher_provider(options) + if options[:kms_key_id] + KmsCipherProvider.new( + kms_key_id: options[:kms_key_id], + kms_client: kms_client(options), + ) + else + # kept here for backwards compatability, {#key_provider} is deprecated + @key_provider = extract_key_provider(options) + DefaultCipherProvider.new(key_provider: @key_provider) + end + end + + def extract_key_provider(options) + if options[:key_provider] + options[:key_provider] + elsif options[:encryption_key] + DefaultKeyProvider.new(options) + else + msg = 'you must pass a :kms_key_id, :key_provider, or :encryption_key' + raise ArgumentError, msg + end + end + + def envelope_options(params) + location = params.delete(:envelope_location) || @envelope_location + suffix = params.delete(:instruction_file_suffix) + if suffix + [:instruction_file, suffix] + else + [location, @instruction_file_suffix] + end + end + + def extract_location(options) + location = options[:envelope_location] || :metadata + if [:metadata, :instruction_file].include?(location) + location + else + msg = ':envelope_location must be :metadata or :instruction_file '\ + "got #{location.inspect}" + raise ArgumentError, msg + end + end + + def extract_suffix(options) + suffix = options[:instruction_file_suffix] || '.instruction' + if String === suffix + suffix + else + msg = ':instruction_file_suffix must be a String' + raise ArgumentError, msg + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/decrypt_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/decrypt_handler.rb new file mode 100644 index 0000000..d44f23c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/decrypt_handler.rb @@ -0,0 +1,221 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module Encryption + # @api private + class DecryptHandler < Seahorse::Client::Handler + @@warned_response_target_proc = false + + V1_ENVELOPE_KEYS = %w( + x-amz-key + x-amz-iv + x-amz-matdesc + ) + + V2_ENVELOPE_KEYS = %w( + x-amz-key-v2 + x-amz-iv + x-amz-cek-alg + x-amz-wrap-alg + x-amz-matdesc + ) + + V2_OPTIONAL_KEYS = %w(x-amz-tag-len) + + POSSIBLE_ENVELOPE_KEYS = (V1_ENVELOPE_KEYS + + V2_ENVELOPE_KEYS + V2_OPTIONAL_KEYS).uniq + + POSSIBLE_WRAPPING_FORMATS = %w( + AES/GCM + kms + kms+context + RSA-OAEP-SHA1 + ) + + POSSIBLE_ENCRYPTION_FORMATS = %w( + AES/GCM/NoPadding + AES/CBC/PKCS5Padding + AES/CBC/PKCS7Padding + ) + + AUTH_REQUIRED_CEK_ALGS = %w(AES/GCM/NoPadding) + + def call(context) + attach_http_event_listeners(context) + apply_cse_user_agent(context) + + if context[:response_target].is_a?(Proc) && !@@warned_response_target_proc + @@warned_response_target_proc = true + warn(':response_target is a Proc, or a block was provided. ' \ + 'Read the entire object to the ' \ + 'end before you start using the decrypted data. This is to ' \ + 'verify that the object has not been modified since it ' \ + 'was encrypted.') + end + + @handler.call(context) + end + + private + + def attach_http_event_listeners(context) + + context.http_response.on_headers(200) do + cipher, envelope = decryption_cipher(context) + decrypter = body_contains_auth_tag?(envelope) ? + authenticated_decrypter(context, cipher, envelope) : + IODecrypter.new(cipher, context.http_response.body) + context.http_response.body = decrypter + end + + context.http_response.on_success(200) do + decrypter = context.http_response.body + decrypter.finalize + decrypter.io.rewind if decrypter.io.respond_to?(:rewind) + context.http_response.body = decrypter.io + end + + context.http_response.on_error do + if context.http_response.body.respond_to?(:io) + context.http_response.body = context.http_response.body.io + end + end + end + + def decryption_cipher(context) + if (envelope = get_encryption_envelope(context)) + cipher = context[:encryption][:cipher_provider] + .decryption_cipher( + envelope, + context[:encryption] + ) + [cipher, envelope] + else + raise Errors::DecryptionError, "unable to locate encryption envelope" + end + end + + def get_encryption_envelope(context) + if context[:encryption][:envelope_location] == :metadata + envelope_from_metadata(context) || envelope_from_instr_file(context) + else + envelope_from_instr_file(context) || envelope_from_metadata(context) + end + end + + def envelope_from_metadata(context) + possible_envelope = {} + POSSIBLE_ENVELOPE_KEYS.each do |suffix| + if value = context.http_response.headers["x-amz-meta-#{suffix}"] + possible_envelope[suffix] = value + end + end + extract_envelope(possible_envelope) + end + + def envelope_from_instr_file(context) + suffix = context[:encryption][:instruction_file_suffix] + possible_envelope = Json.load(context.client.get_object( + bucket: context.params[:bucket], + key: context.params[:key] + suffix + ).body.read) + extract_envelope(possible_envelope) + rescue S3::Errors::ServiceError, Json::ParseError + nil + end + + def extract_envelope(hash) + return nil unless hash + return v1_envelope(hash) if hash.key?('x-amz-key') + return v2_envelope(hash) if hash.key?('x-amz-key-v2') + if hash.keys.any? { |key| key.match(/^x-amz-key-(.+)$/) } + msg = "unsupported envelope encryption version #{$1}" + raise Errors::DecryptionError, msg + end + end + + def v1_envelope(envelope) + envelope + end + + def v2_envelope(envelope) + unless POSSIBLE_ENCRYPTION_FORMATS.include? envelope['x-amz-cek-alg'] + alg = envelope['x-amz-cek-alg'].inspect + msg = "unsupported content encrypting key (cek) format: #{alg}" + raise Errors::DecryptionError, msg + end + unless POSSIBLE_WRAPPING_FORMATS.include? envelope['x-amz-wrap-alg'] + alg = envelope['x-amz-wrap-alg'].inspect + msg = "unsupported key wrapping algorithm: #{alg}" + raise Errors::DecryptionError, msg + end + unless (missing_keys = V2_ENVELOPE_KEYS - envelope.keys).empty? + msg = "incomplete v2 encryption envelope:\n" + msg += " missing: #{missing_keys.join(',')}\n" + raise Errors::DecryptionError, msg + end + envelope + end + + # This method fetches the tag from the end of the object by + # making a GET Object w/range request. This auth tag is used + # to initialize the cipher, and the decrypter truncates the + # auth tag from the body when writing the final bytes. + def authenticated_decrypter(context, cipher, envelope) + http_resp = context.http_response + content_length = http_resp.headers['content-length'].to_i + auth_tag_length = auth_tag_length(envelope) + + auth_tag = context.client.get_object( + bucket: context.params[:bucket], + key: context.params[:key], + range: "bytes=-#{auth_tag_length}" + ).body.read + + cipher.auth_tag = auth_tag + cipher.auth_data = '' + + # The encrypted object contains both the cipher text + # plus a trailing auth tag. + IOAuthDecrypter.new( + io: http_resp.body, + encrypted_content_length: content_length - auth_tag_length, + cipher: cipher) + end + + def body_contains_auth_tag?(envelope) + AUTH_REQUIRED_CEK_ALGS.include?(envelope['x-amz-cek-alg']) + end + + # Determine the auth tag length from the algorithm + # Validate it against the value provided in the x-amz-tag-len + # Return the tag length in bytes + def auth_tag_length(envelope) + tag_length = + case envelope['x-amz-cek-alg'] + when 'AES/GCM/NoPadding' then AES_GCM_TAG_LEN_BYTES + else + raise ArgumentError, 'Unsupported cek-alg: ' \ + "#{envelope['x-amz-cek-alg']}" + end + if (tag_length * 8) != envelope['x-amz-tag-len'].to_i + raise Errors::DecryptionError, 'x-amz-tag-len does not match expected' + end + tag_length + end + + def apply_cse_user_agent(context) + if context.config.user_agent_suffix.nil? + context.config.user_agent_suffix = EC_USER_AGENT + elsif !context.config.user_agent_suffix.include? EC_USER_AGENT + context.config.user_agent_suffix += " #{EC_USER_AGENT}" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/default_cipher_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/default_cipher_provider.rb new file mode 100644 index 0000000..1748119 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/default_cipher_provider.rb @@ -0,0 +1,101 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module Encryption + # @api private + class DefaultCipherProvider + + def initialize(options = {}) + @key_provider = options[:key_provider] + end + + # @return [Array] Creates an returns a new encryption + # envelope and encryption cipher. + def encryption_cipher + cipher = Utils.aes_encryption_cipher(:CBC) + envelope = { + 'x-amz-key' => encode64(encrypt(envelope_key(cipher))), + 'x-amz-iv' => encode64(envelope_iv(cipher)), + 'x-amz-matdesc' => materials_description, + } + [envelope, cipher] + end + + # @return [Cipher] Given an encryption envelope, returns a + # decryption cipher. + def decryption_cipher(envelope, options = {}) + master_key = @key_provider.key_for(envelope['x-amz-matdesc']) + if envelope.key? 'x-amz-key' + # Support for decryption of legacy objects + key = Utils.decrypt(master_key, decode64(envelope['x-amz-key'])) + iv = decode64(envelope['x-amz-iv']) + Utils.aes_decryption_cipher(:CBC, key, iv) + else + if envelope['x-amz-cek-alg'] != 'AES/GCM/NoPadding' + raise ArgumentError, 'Unsupported cek-alg: ' \ + "#{envelope['x-amz-cek-alg']}" + end + key = + case envelope['x-amz-wrap-alg'] + when 'AES/GCM' + if master_key.is_a? OpenSSL::PKey::RSA + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with an RSA key and the x-amz-wrap-alg is AES/GCM.' + end + Utils.decrypt_aes_gcm(master_key, + decode64(envelope['x-amz-key-v2']), + envelope['x-amz-cek-alg']) + when 'RSA-OAEP-SHA1' + unless master_key.is_a? OpenSSL::PKey::RSA + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with an AES key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' + end + key, cek_alg = Utils.decrypt_rsa(master_key, decode64(envelope['x-amz-key-v2'])) + raise Errors::DecryptionError unless cek_alg == envelope['x-amz-cek-alg'] + key + when 'kms+context' + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with a user provided key and the x-amz-wrap-alg is' \ + ' kms+context. Please configure the client with the' \ + ' required kms_key_id' + else + raise ArgumentError, 'Unsupported wrap-alg: ' \ + "#{envelope['x-amz-wrap-alg']}" + end + iv = decode64(envelope['x-amz-iv']) + Utils.aes_decryption_cipher(:GCM, key, iv) + end + end + + private + + def envelope_key(cipher) + cipher.key = cipher.random_key + end + + def envelope_iv(cipher) + cipher.iv = cipher.random_iv + end + + def encrypt(data) + Utils.encrypt(@key_provider.encryption_materials.key, data) + end + + def materials_description + @key_provider.encryption_materials.description + end + + def encode64(str) + Base64.encode64(str).split("\n") * "" + end + + def decode64(str) + Base64.decode64(str) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/default_key_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/default_key_provider.rb new file mode 100644 index 0000000..7f37934 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/default_key_provider.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Encryption + + # The default key provider is constructed with a single key + # that is used for both encryption and decryption, ignoring + # the possible per-object envelope encryption materials description. + # @api private + class DefaultKeyProvider + + include KeyProvider + + # @option options [required, OpenSSL::PKey::RSA, String] :encryption_key + # The master key to use for encrypting objects. + # @option options [String] :materials_description ('{}') + # A description of the encryption key. + def initialize(options = {}) + @encryption_materials = Materials.new( + key: options[:encryption_key], + description: options[:materials_description] || '{}' + ) + end + + # @return [Materials] + def encryption_materials + @encryption_materials + end + + # @param [String] materials_description + # @return Returns the key given in the constructor. + def key_for(materials_description) + @encryption_materials.key + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/encrypt_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/encrypt_handler.rb new file mode 100644 index 0000000..d264d2a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/encrypt_handler.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module Encryption + # @api private + class EncryptHandler < Seahorse::Client::Handler + + def call(context) + envelope, cipher = context[:encryption][:cipher_provider].encryption_cipher + apply_encryption_envelope(context, envelope, cipher) + apply_encryption_cipher(context, cipher) + apply_cse_user_agent(context) + @handler.call(context) + end + + private + + def apply_encryption_envelope(context, envelope, cipher) + context[:encryption][:cipher] = cipher + if context[:encryption][:envelope_location] == :metadata + context.params[:metadata] ||= {} + context.params[:metadata].update(envelope) + else # :instruction_file + suffix = context[:encryption][:instruction_file_suffix] + context.client.put_object( + bucket: context.params[:bucket], + key: context.params[:key] + suffix, + body: Json.dump(envelope) + ) + end + end + + def apply_encryption_cipher(context, cipher) + io = context.params[:body] || '' + io = StringIO.new(io) if String === io + context.params[:body] = IOEncrypter.new(cipher, io) + context.params[:metadata] ||= {} + context.params[:metadata]['x-amz-unencrypted-content-length'] = io.size + if context.params.delete(:content_md5) + warn('Setting content_md5 on client side encrypted objects is deprecated') + end + context.http_response.on_headers do + context.params[:body].close + end + end + + def apply_cse_user_agent(context) + if context.config.user_agent_suffix.nil? + context.config.user_agent_suffix = EC_USER_AGENT + elsif !context.config.user_agent_suffix.include? EC_USER_AGENT + context.config.user_agent_suffix += " #{EC_USER_AGENT}" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/errors.rb new file mode 100644 index 0000000..ae9f740 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/errors.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Encryption + module Errors + + class DecryptionError < RuntimeError; end + + class EncryptionError < RuntimeError; end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_auth_decrypter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_auth_decrypter.rb new file mode 100644 index 0000000..a491a61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_auth_decrypter.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Encryption + # @api private + class IOAuthDecrypter + + # @option options [required, IO#write] :io + # An IO-like object that responds to {#write}. + # @option options [required, Integer] :encrypted_content_length + # The number of bytes to decrypt from the `:io` object. + # This should be the total size of `:io` minus the length of + # the cipher auth tag. + # @option options [required, OpenSSL::Cipher] :cipher An initialized + # cipher that can be used to decrypt the bytes as they are + # written to the `:io` object. The cipher should already have + # its `#auth_tag` set. + def initialize(options = {}) + @decrypter = IODecrypter.new(options[:cipher], options[:io]) + @max_bytes = options[:encrypted_content_length] + @bytes_written = 0 + end + + def write(chunk) + chunk = truncate_chunk(chunk) + if chunk.bytesize > 0 + @bytes_written += chunk.bytesize + @decrypter.write(chunk) + end + end + + def finalize + @decrypter.finalize + end + + def io + @decrypter.io + end + + private + + def truncate_chunk(chunk) + if chunk.bytesize + @bytes_written <= @max_bytes + chunk + elsif @bytes_written < @max_bytes + chunk[0..(@max_bytes - @bytes_written - 1)] + else + # If the tag was sent over after the full body has been read, + # we don't want to accidentally append it. + "" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_decrypter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_decrypter.rb new file mode 100644 index 0000000..2e43917 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_decrypter.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Encryption + # @api private + class IODecrypter + + # @param [OpenSSL::Cipher] cipher + # @param [IO#write] io An IO-like object that responds to `#write`. + def initialize(cipher, io) + @cipher = cipher + # Ensure that IO is reset between retries + @io = io.tap { |io| io.truncate(0) if io.respond_to?(:truncate) } + @cipher_buffer = String.new + end + + # @return [#write] + attr_reader :io + + def write(chunk) + # decrypt and write + if @cipher.method(:update).arity == 1 + @io.write(@cipher.update(chunk)) + else + @io.write(@cipher.update(chunk, @cipher_buffer)) + end + end + + def finalize + @io.write(@cipher.final) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_encrypter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_encrypter.rb new file mode 100644 index 0000000..14fbf4b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/io_encrypter.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'stringio' +require 'tempfile' + +module Aws + module S3 + module Encryption + + # Provides an IO wrapper encrpyting a stream of data. + # It is possible to use this same object for decrypting. You must + # initialize it with a decryptiion cipher in that case and the + # IO object must contain cipher text instead of plain text. + # @api private + class IOEncrypter + + # @api private + ONE_MEGABYTE = 1024 * 1024 + + def initialize(cipher, io) + @encrypted = io.size <= ONE_MEGABYTE ? + encrypt_to_stringio(cipher, io.read) : + encrypt_to_tempfile(cipher, io) + @size = @encrypted.size + end + + # @return [Integer] + attr_reader :size + + def read(bytes = nil, output_buffer = nil) + if Tempfile === @encrypted && @encrypted.closed? + @encrypted.open + @encrypted.binmode + end + @encrypted.read(bytes, output_buffer) + end + + def rewind + @encrypted.rewind + end + + # @api private + def close + @encrypted.close if Tempfile === @encrypted + end + + private + + def encrypt_to_stringio(cipher, plain_text) + if plain_text.empty? + StringIO.new(cipher.final) + else + StringIO.new(cipher.update(plain_text) + cipher.final) + end + end + + def encrypt_to_tempfile(cipher, io) + encrypted = Tempfile.new(self.object_id.to_s) + encrypted.binmode + while chunk = io.read(ONE_MEGABYTE) + encrypted.write(cipher.update(chunk)) + end + encrypted.write(cipher.final) + encrypted.rewind + encrypted + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/key_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/key_provider.rb new file mode 100644 index 0000000..58cb30e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/key_provider.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Encryption + + # This module defines the interface required for a {Client#key_provider}. + # A key provider is any object that: + # + # * Responds to {#encryption_materials} with an {Materials} object. + # + # * Responds to {#key_for}, receiving a JSON document String, + # returning an encryption key. The returned encryption key + # must be one of: + # + # * `OpenSSL::PKey::RSA` - for asymmetric encryption + # * `String` - 32, 24, or 16 bytes long, for symmetric encryption + # + module KeyProvider + + # @return [Materials] + def encryption_materials; end + + # @param [String] materials_description + # @return [OpenSSL::PKey::RSA, String] encryption_key + def key_for(materials_description); end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb new file mode 100644 index 0000000..0aeeeb8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module Encryption + # @api private + class KmsCipherProvider + + def initialize(options = {}) + @kms_key_id = options[:kms_key_id] + @kms_client = options[:kms_client] + end + + # @return [Array] Creates an returns a new encryption + # envelope and encryption cipher. + def encryption_cipher + encryption_context = { "kms_cmk_id" => @kms_key_id } + key_data = @kms_client.generate_data_key( + key_id: @kms_key_id, + encryption_context: encryption_context, + key_spec: 'AES_256', + ) + cipher = Utils.aes_encryption_cipher(:CBC) + cipher.key = key_data.plaintext + envelope = { + 'x-amz-key-v2' => encode64(key_data.ciphertext_blob), + 'x-amz-iv' => encode64(cipher.iv = cipher.random_iv), + 'x-amz-cek-alg' => 'AES/CBC/PKCS5Padding', + 'x-amz-wrap-alg' => 'kms', + 'x-amz-matdesc' => Json.dump(encryption_context) + } + [envelope, cipher] + end + + # @return [Cipher] Given an encryption envelope, returns a + # decryption cipher. + def decryption_cipher(envelope, options = {}) + encryption_context = Json.load(envelope['x-amz-matdesc']) + cek_alg = envelope['x-amz-cek-alg'] + + case envelope['x-amz-wrap-alg'] + when 'kms'; # NO OP + when 'kms+context' + if cek_alg != encryption_context['aws:x-amz-cek-alg'] + raise Errors::DecryptionError, 'Value of cek-alg from envelope'\ + ' does not match the value in the encryption context' + end + when 'AES/GCM' + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with a KMS key and the x-amz-wrap-alg is AES/GCM.' + when 'RSA-OAEP-SHA1' + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with a KMS key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' + else + raise ArgumentError, 'Unsupported wrap-alg: ' \ + "#{envelope['x-amz-wrap-alg']}" + end + + key = @kms_client.decrypt( + ciphertext_blob: decode64(envelope['x-amz-key-v2']), + encryption_context: encryption_context + ).plaintext + + iv = decode64(envelope['x-amz-iv']) + block_mode = + case cek_alg + when 'AES/CBC/PKCS5Padding' + :CBC + when 'AES/CBC/PKCS7Padding' + :CBC + when 'AES/GCM/NoPadding' + :GCM + else + type = envelope['x-amz-cek-alg'].inspect + msg = "unsupported content encrypting key (cek) format: #{type}" + raise Errors::DecryptionError, msg + end + Utils.aes_decryption_cipher(block_mode, key, iv) + end + + private + + def build_encryption_context(cek_alg, options = {}) + kms_context = (options[:kms_encryption_context] || {}) + .each_with_object({}) { |(k, v), h| h[k.to_s] = v } + { + 'aws:x-amz-cek-alg' => cek_alg + }.merge(kms_context) + end + + def encode64(str) + Base64.encode64(str).split("\n") * "" + end + + def decode64(str) + Base64.decode64(str) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/materials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/materials.rb new file mode 100644 index 0000000..92b68e9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/materials.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module Encryption + class Materials + + # @option options [required, OpenSSL::PKey::RSA, String] :key + # The master key to use for encrypting/decrypting all objects. + # + # @option options [String] :description ('{}') + # The encryption materials description. This is must be + # a JSON document string. + # + def initialize(options = {}) + @key = validate_key(options[:key]) + @description = validate_desc(options[:description]) + end + + # @return [OpenSSL::PKey::RSA, String] + attr_reader :key + + # @return [String] + attr_reader :description + + private + + def validate_key(key) + case key + when OpenSSL::PKey::RSA then key + when String + if [32, 24, 16].include?(key.bytesize) + key + else + msg = 'invalid key, symmetric key required to be 16, 24, or '\ + '32 bytes in length, saw length ' + key.bytesize.to_s + raise ArgumentError, msg + end + else + msg = 'invalid encryption key, expected an OpenSSL::PKey::RSA key '\ + '(for asymmetric encryption) or a String (for symmetric '\ + 'encryption).' + raise ArgumentError, msg + end + end + + def validate_desc(description) + Json.load(description) + description + rescue Json::ParseError, EncodingError + msg = 'expected description to be a valid JSON document string' + raise ArgumentError, msg + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/utils.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/utils.rb new file mode 100644 index 0000000..597fe12 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption/utils.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +require 'openssl' + +module Aws + module S3 + module Encryption + # @api private + module Utils + + UNSAFE_MSG = "unsafe encryption, data is longer than key length" + + class << self + + def encrypt(key, data) + case key + when OpenSSL::PKey::RSA # asymmetric encryption + warn(UNSAFE_MSG) if key.public_key.n.num_bits < cipher_size(data) + key.public_encrypt(data) + when String # symmetric encryption + warn(UNSAFE_MSG) if cipher_size(key) < cipher_size(data) + cipher = aes_encryption_cipher(:ECB, key) + cipher.update(data) + cipher.final + end + end + + def decrypt(key, data) + begin + case key + when OpenSSL::PKey::RSA # asymmetric decryption + key.private_decrypt(data) + when String # symmetric Decryption + cipher = aes_cipher(:decrypt, :ECB, key, nil) + cipher.update(data) + cipher.final + end + rescue OpenSSL::Cipher::CipherError + msg = 'decryption failed, possible incorrect key' + raise Errors::DecryptionError, msg + end + end + + + def decrypt_aes_gcm(key, data, auth_data) + # data is iv (12B) + key + tag (16B) + buf = data.unpack('C*') + iv = buf[0,12].pack('C*') # iv will always be 12 bytes + tag = buf[-16, 16].pack('C*') # tag is 16 bytes + enc_key = buf[12, buf.size - (12+16)].pack('C*') + cipher = aes_cipher(:decrypt, :GCM, key, iv) + cipher.auth_tag = tag + cipher.auth_data = auth_data + cipher.update(enc_key) + cipher.final + end + + # returns the decrypted data + auth_data + def decrypt_rsa(key, enc_data) + # Plaintext must be KeyLengthInBytes (1 Byte) + DataKey + AuthData + buf = key.private_decrypt(enc_data, OpenSSL::PKey::RSA::PKCS1_OAEP_PADDING).unpack('C*') + key_length = buf[0] + data = buf[1, key_length].pack('C*') + auth_data = buf[key_length+1, buf.length - key_length].pack('C*') + [data, auth_data] + end + + # @param [String] block_mode "CBC" or "ECB" + # @param [OpenSSL::PKey::RSA, String, nil] key + # @param [String, nil] iv The initialization vector + def aes_encryption_cipher(block_mode, key = nil, iv = nil) + aes_cipher(:encrypt, block_mode, key, iv) + end + + # @param [String] block_mode "CBC" or "ECB" + # @param [OpenSSL::PKey::RSA, String, nil] key + # @param [String, nil] iv The initialization vector + def aes_decryption_cipher(block_mode, key = nil, iv = nil) + aes_cipher(:decrypt, block_mode, key, iv) + end + + # @param [String] mode "encrypt" or "decrypt" + # @param [String] block_mode "CBC" or "ECB" + # @param [OpenSSL::PKey::RSA, String, nil] key + # @param [String, nil] iv The initialization vector + def aes_cipher(mode, block_mode, key, iv) + cipher = key ? + OpenSSL::Cipher.new("aes-#{cipher_size(key)}-#{block_mode.downcase}") : + OpenSSL::Cipher.new("aes-256-#{block_mode.downcase}") + cipher.send(mode) # encrypt or decrypt + cipher.key = key if key + cipher.iv = iv if iv + cipher + end + + # @param [String] key + # @return [Integer] + # @raise ArgumentError + def cipher_size(key) + key.bytesize * 8 + end + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/client.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/client.rb new file mode 100644 index 0000000..50fdc29 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/client.rb @@ -0,0 +1,566 @@ +# frozen_string_literal: true + +require 'forwardable' + +module Aws + module S3 + + REQUIRED_PARAMS = [:key_wrap_schema, :content_encryption_schema, :security_profile] + SUPPORTED_SECURITY_PROFILES = [:v2, :v2_and_legacy] + + # Provides an encryption client that encrypts and decrypts data client-side, + # storing the encrypted data in Amazon S3. The `EncryptionV2::Client` (V2 Client) + # provides improved security over the `Encryption::Client` (V1 Client) + # by using more modern and secure algorithms. You can use the V2 Client + # to continue decrypting objects encrypted using deprecated algorithms + # by setting security_profile: :v2_and_legacy. The latest V1 Client also + # supports reading and decrypting objects encrypted by the V2 Client. + # + # This client uses a process called "envelope encryption". Your private + # encryption keys and your data's plain-text are **never** sent to + # Amazon S3. **If you lose you encryption keys, you will not be able to + # decrypt your data.** + # + # ## Envelope Encryption Overview + # + # The goal of envelope encryption is to combine the performance of + # fast symmetric encryption while maintaining the secure key management + # that asymmetric keys provide. + # + # A one-time-use symmetric key (envelope key) is generated client-side. + # This is used to encrypt the data client-side. This key is then + # encrypted by your master key and stored alongside your data in Amazon + # S3. + # + # When accessing your encrypted data with the encryption client, + # the encrypted envelope key is retrieved and decrypted client-side + # with your master key. The envelope key is then used to decrypt the + # data client-side. + # + # One of the benefits of envelope encryption is that if your master key + # is compromised, you have the option of just re-encrypting the stored + # envelope symmetric keys, instead of re-encrypting all of the + # data in your account. + # + # ## Basic Usage + # + # The encryption client requires an {Aws::S3::Client}. If you do not + # provide a `:client`, then a client will be constructed for you. + # + # require 'openssl' + # key = OpenSSL::PKey::RSA.new(1024) + # + # # encryption client + # s3 = Aws::S3::EncryptionV2::Client.new( + # encryption_key: key, + # key_wrap_schema: :rsa_oaep_sha1, # the key_wrap_schema must be rsa_oaep_sha1 for asymmetric keys + # content_encryption_schema: :aes_gcm_no_padding, + # security_profile: :v2 # use :v2_and_legacy to allow reading/decrypting objects encrypted by the V1 encryption client + # ) + # + # # round-trip an object, encrypted/decrypted locally + # s3.put_object(bucket:'aws-sdk', key:'secret', body:'handshake') + # s3.get_object(bucket:'aws-sdk', key:'secret').body.read + # #=> 'handshake' + # + # # reading encrypted object without the encryption client + # # results in the getting the cipher text + # Aws::S3::Client.new.get_object(bucket:'aws-sdk', key:'secret').body.read + # #=> "... cipher text ..." + # + # ## Required Configuration + # + # You must configure all of the following: + # + # * a key or key provider - See the Keys section below. The key provided determines + # the key wrapping schema(s) supported for both encryption and decryption. + # * `key_wrap_schema` - The key wrapping schema. It must match the type of key configured. + # * `content_encryption_schema` - The only supported value currently is `:aes_gcm_no_padding`. + # More options will be added in future releases. + # * `security_profile` - Determines the support for reading objects written + # using older key wrap or content encryption schemas. If you need to read + # legacy objects encrypted by an existing V1 Client, then set this to `:v2_and_legacy`. + # Otherwise, set it to `:v2` + # + # ## Keys + # + # For client-side encryption to work, you must provide one of the following: + # + # * An encryption key + # * A {KeyProvider} + # * A KMS encryption key id + # + # Additionally, the key wrapping schema must agree with the type of the key: + # * :aes_gcm: An AES encryption key or a key provider. + # * :rsa_oaep_sha1: An RSA encryption key or key provider. + # * :kms_context: A KMS encryption key id + # + # ### An Encryption Key + # + # You can pass a single encryption key. This is used as a master key + # encrypting and decrypting all object keys. + # + # key = OpenSSL::Cipher.new("AES-256-ECB").random_key # symmetric key - used with `key_wrap_schema: :aes_gcm` + # key = OpenSSL::PKey::RSA.new(1024) # asymmetric key pair - used with `key_wrap_schema: :rsa_oaep_sha1` + # + # s3 = Aws::S3::EncryptionV2::Client.new( + # encryption_key: key, + # key_wrap_schema: :aes_gcm, # or :rsa_oaep_sha1 if using RSA + # content_encryption_schema: :aes_gcm_no_padding, + # security_profile: :v2 + # ) + # + # ### Key Provider + # + # Alternatively, you can use a {KeyProvider}. A key provider makes + # it easy to work with multiple keys and simplifies key rotation. + # + # ### KMS Encryption Key Id + # + # If you pass the id of an AWS Key Management Service (KMS) key and + # use :kms_content for the key_wrap_schema, then KMS will be used to + # generate, encrypt and decrypt object keys. + # + # # keep track of the kms key id + # kms = Aws::KMS::Client.new + # key_id = kms.create_key.key_metadata.key_id + # + # Aws::S3::EncryptionV2::Client.new( + # kms_key_id: key_id, + # kms_client: kms, + # key_wrap_schema: :kms_context, + # content_encryption_schema: :aes_gcm_no_padding, + # security_profile: :v2 + # ) + # + # ## Custom Key Providers + # + # A {KeyProvider} is any object that responds to: + # + # * `#encryption_materials` + # * `#key_for(materials_description)` + # + # Here is a trivial implementation of an in-memory key provider. + # This is provided as a demonstration of the key provider interface, + # and should not be used in production: + # + # class KeyProvider + # + # def initialize(default_key_name, keys) + # @keys = keys + # @encryption_materials = Aws::S3::EncryptionV2::Materials.new( + # key: @keys[default_key_name], + # description: JSON.dump(key: default_key_name), + # ) + # end + # + # attr_reader :encryption_materials + # + # def key_for(matdesc) + # key_name = JSON.parse(matdesc)['key'] + # if key = @keys[key_name] + # key + # else + # raise "encryption key not found for: #{matdesc.inspect}" + # end + # end + # end + # + # Given the above key provider, you can create an encryption client that + # chooses the key to use based on the materials description stored with + # the encrypted object. This makes it possible to use multiple keys + # and simplifies key rotation. + # + # # uses "new-key" for encrypting objects, uses either for decrypting + # keys = KeyProvider.new('new-key', { + # "old-key" => Base64.decode64("kM5UVbhE/4rtMZJfsadYEdm2vaKFsmV2f5+URSeUCV4="), + # "new-key" => Base64.decode64("w1WLio3agRWRTSJK/Ouh8NHoqRQ6fn5WbSXDTHjXMSo="), + # }), + # + # # chooses the key based on the materials description stored + # # with the encrypted object + # s3 = Aws::S3::EncryptionV2::Client.new( + # key_provider: keys, + # key_wrap_schema: ..., + # content_encryption_schema: :aes_gcm_no_padding, + # security_profile: :v2 + # ) + # + # ## Materials Description + # + # A materials description is JSON document string that is stored + # in the metadata (or instruction file) of an encrypted object. + # The {DefaultKeyProvider} uses the empty JSON document `"{}"`. + # + # When building a key provider, you are free to store whatever + # information you need to identify the master key that was used + # to encrypt the object. + # + # ## Envelope Location + # + # By default, the encryption client store the encryption envelope + # with the object, as metadata. You can choose to have the envelope + # stored in a separate "instruction file". An instruction file + # is an object, with the key of the encrypted object, suffixed with + # `".instruction"`. + # + # Specify the `:envelope_location` option as `:instruction_file` to + # use an instruction file for storing the envelope. + # + # # default behavior + # s3 = Aws::S3::EncryptionV2::Client.new( + # key_provider: ..., + # envelope_location: :metadata, + # ) + # + # # store envelope in a separate object + # s3 = Aws::S3::EncryptionV2::Client.new( + # key_provider: ..., + # envelope_location: :instruction_file, + # instruction_file_suffix: '.instruction' # default + # key_wrap_schema: ..., + # content_encryption_schema: :aes_gcm_no_padding, + # security_profile: :v2 + # ) + # + # When using an instruction file, multiple requests are made when + # putting and getting the object. **This may cause issues if you are + # issuing concurrent PUT and GET requests to an encrypted object.** + # + module EncryptionV2 + class Client + + extend Deprecations + extend Forwardable + def_delegators :@client, :config, :delete_object, :head_object, :build_request + + # Creates a new encryption client. You must configure all of the following: + # + # * a key or key provider - The key provided also determines the key wrapping + # schema(s) supported for both encryption and decryption. + # * `key_wrap_schema` - The key wrapping schema. It must match the type of key configured. + # * `content_encryption_schema` - The only supported value currently is `:aes_gcm_no_padding` + # More options will be added in future releases. + # * `security_profile` - Determines the support for reading objects written + # using older key wrap or content encryption schemas. If you need to read + # legacy objects encrypted by an existing V1 Client, then set this to `:v2_and_legacy`. + # Otherwise, set it to `:v2` + # + # To configure the key you must provide one of the following set of options: + # + # * `:encryption_key` + # * `:kms_key_id` + # * `:key_provider` + # + # You may also pass any other options accepted by `Client#initialize`. + # + # @option options [S3::Client] :client A basic S3 client that is used + # to make api calls. If a `:client` is not provided, a new {S3::Client} + # will be constructed. + # + # @option options [OpenSSL::PKey::RSA, String] :encryption_key The master + # key to use for encrypting/decrypting all objects. + # + # @option options [String] :kms_key_id When you provide a `:kms_key_id`, + # then AWS Key Management Service (KMS) will be used to manage the + # object encryption keys. By default a {KMS::Client} will be + # constructed for KMS API calls. Alternatively, you can provide + # your own via `:kms_client`. To only support decryption/reads, you may + # provide `:allow_decrypt_with_any_cmk` which will use + # the implicit CMK associated with the data during reads but will + # not allow you to encrypt/write objects with this client. + # + # @option options [#key_for] :key_provider Any object that responds + # to `#key_for`. This method should accept a materials description + # JSON document string and return return an encryption key. + # + # @option options [required, Symbol] :key_wrap_schema The Key wrapping + # schema to be used. It must match the type of key configured. + # Must be one of the following: + # + # * :kms_context (Must provide kms_key_id) + # * :aes_gcm (Must provide an AES (string) key) + # * :rsa_oaep_sha1 (Must provide an RSA key) + # + # @option options [required, Symbol] :content_encryption_schema + # Must be one of the following: + # + # * :aes_gcm_no_padding + # + # @option options [Required, Symbol] :security_profile + # Determines the support for reading objects written using older + # key wrap or content encryption schemas. + # Must be one of the following: + # + # * :v2 - Reads of legacy (v1) objects are NOT allowed + # * :v2_and_legacy - Enables reading of legacy (V1) schemas. + # + # @option options [Symbol] :envelope_location (:metadata) Where to + # store the envelope encryption keys. By default, the envelope is + # stored with the encrypted object. If you pass `:instruction_file`, + # then the envelope is stored in a separate object in Amazon S3. + # + # @option options [String] :instruction_file_suffix ('.instruction') + # When `:envelope_location` is `:instruction_file` then the + # instruction file uses the object key with this suffix appended. + # + # @option options [KMS::Client] :kms_client A default {KMS::Client} + # is constructed when using KMS to manage encryption keys. + # + def initialize(options = {}) + validate_params(options) + @client = extract_client(options) + @cipher_provider = cipher_provider(options) + @envelope_location = extract_location(options) + @instruction_file_suffix = extract_suffix(options) + @kms_allow_decrypt_with_any_cmk = + options[:kms_key_id] == :kms_allow_decrypt_with_any_cmk + @security_profile = extract_security_profile(options) + end + + # @return [S3::Client] + attr_reader :client + + # @return [KeyProvider, nil] Returns `nil` if you are using + # AWS Key Management Service (KMS). + attr_reader :key_provider + + # @return [Symbol] Determines the support for reading objects written + # using older key wrap or content encryption schemas. + attr_reader :security_profile + + # @return [Boolean] If true the provided KMS key_id will not be used + # during decrypt, allowing decryption with the key_id from the object. + attr_reader :kms_allow_decrypt_with_any_cmk + + # @return [Symbol<:metadata, :instruction_file>] + attr_reader :envelope_location + + # @return [String] When {#envelope_location} is `:instruction_file`, + # the envelope is stored in the object with the object key suffixed + # by this string. + attr_reader :instruction_file_suffix + + # Uploads an object to Amazon S3, encrypting data client-side. + # See {S3::Client#put_object} for documentation on accepted + # request parameters. + # @option params [Hash] :kms_encryption_context Additional encryption + # context to use with KMS. Applies only when KMS is used. In order + # to decrypt the object you will need to provide the identical + # :kms_encryption_context to `get_object`. + # @option (see S3::Client#put_object) + # @return (see S3::Client#put_object) + # @see S3::Client#put_object + def put_object(params = {}) + kms_encryption_context = params.delete(:kms_encryption_context) + req = @client.build_request(:put_object, params) + req.handlers.add(EncryptHandler, priority: 95) + req.context[:encryption] = { + cipher_provider: @cipher_provider, + envelope_location: @envelope_location, + instruction_file_suffix: @instruction_file_suffix, + kms_encryption_context: kms_encryption_context + } + req.send_request + end + + # Gets an object from Amazon S3, decrypting data locally. + # See {S3::Client#get_object} for documentation on accepted + # request parameters. + # Warning: If you provide a block to get_object or set the request + # parameter :response_target to a Proc, then read the entire object to the + # end before you start using the decrypted data. This is to verify that + # the object has not been modified since it was encrypted. + # + # @option options [Symbol] :security_profile + # Determines the support for reading objects written using older + # key wrap or content encryption schemas. Overrides the value set + # on client construction if provided. + # Must be one of the following: + # + # * :v2 - Reads of legacy (v1) objects are NOT allowed + # * :v2_and_legacy - Enables reading of legacy (V1) schemas. + # @option params [String] :instruction_file_suffix The suffix + # used to find the instruction file containing the encryption + # envelope. You should not set this option when the envelope + # is stored in the object metadata. Defaults to + # {#instruction_file_suffix}. + # @option params [Hash] :kms_encryption_context Additional encryption + # context to use with KMS. Applies only when KMS is used. + # @option options [Boolean] :kms_allow_decrypt_with_any_cmk (false) + # By default the KMS CMK ID (kms_key_id) will be used during decrypt + # and will fail if there is a mismatch. Setting this to true + # will use the implicit CMK associated with the data. + # @option (see S3::Client#get_object) + # @return (see S3::Client#get_object) + # @see S3::Client#get_object + # @note The `:range` request parameter is not supported. + def get_object(params = {}, &block) + if params[:range] + raise NotImplementedError, '#get_object with :range not supported' + end + envelope_location, instruction_file_suffix = envelope_options(params) + kms_encryption_context = params.delete(:kms_encryption_context) + kms_any_cmk_mode = kms_any_cmk_mode(params) + security_profile = security_profile_from_params(params) + + req = @client.build_request(:get_object, params) + req.handlers.add(DecryptHandler) + req.context[:encryption] = { + cipher_provider: @cipher_provider, + envelope_location: envelope_location, + instruction_file_suffix: instruction_file_suffix, + kms_encryption_context: kms_encryption_context, + kms_allow_decrypt_with_any_cmk: kms_any_cmk_mode, + security_profile: security_profile + } + req.send_request(target: block) + end + + private + + # Validate required parameters exist and don't conflict. + # The cek_alg and wrap_alg are passed on to the CipherProviders + # and further validated there + def validate_params(options) + unless (missing_params = REQUIRED_PARAMS - options.keys).empty? + raise ArgumentError, "Missing required parameter(s): "\ + "#{missing_params.map{ |s| ":#{s}" }.join(', ')}" + end + + wrap_alg = options[:key_wrap_schema] + + # validate that the wrap alg matches the type of key given + case wrap_alg + when :kms_context + unless options[:kms_key_id] + raise ArgumentError, 'You must provide :kms_key_id to use :kms_context' + end + end + end + + def extract_client(options) + options[:client] || begin + options = options.dup + options.delete(:kms_key_id) + options.delete(:kms_client) + options.delete(:key_provider) + options.delete(:encryption_key) + options.delete(:envelope_location) + options.delete(:instruction_file_suffix) + REQUIRED_PARAMS.each { |p| options.delete(p) } + S3::Client.new(options) + end + end + + def kms_client(options) + options[:kms_client] || begin + KMS::Client.new( + region: @client.config.region, + credentials: @client.config.credentials, + ) + end + end + + def cipher_provider(options) + if options[:kms_key_id] + KmsCipherProvider.new( + kms_key_id: options[:kms_key_id], + kms_client: kms_client(options), + key_wrap_schema: options[:key_wrap_schema], + content_encryption_schema: options[:content_encryption_schema] + ) + else + @key_provider = extract_key_provider(options) + DefaultCipherProvider.new( + key_provider: @key_provider, + key_wrap_schema: options[:key_wrap_schema], + content_encryption_schema: options[:content_encryption_schema] + ) + end + end + + def extract_key_provider(options) + if options[:key_provider] + options[:key_provider] + elsif options[:encryption_key] + DefaultKeyProvider.new(options) + else + msg = 'you must pass a :kms_key_id, :key_provider, or :encryption_key' + raise ArgumentError, msg + end + end + + def envelope_options(params) + location = params.delete(:envelope_location) || @envelope_location + suffix = params.delete(:instruction_file_suffix) + if suffix + [:instruction_file, suffix] + else + [location, @instruction_file_suffix] + end + end + + def extract_location(options) + location = options[:envelope_location] || :metadata + if [:metadata, :instruction_file].include?(location) + location + else + msg = ':envelope_location must be :metadata or :instruction_file '\ + "got #{location.inspect}" + raise ArgumentError, msg + end + end + + def extract_suffix(options) + suffix = options[:instruction_file_suffix] || '.instruction' + if suffix.is_a? String + suffix + else + msg = ':instruction_file_suffix must be a String' + raise ArgumentError, msg + end + end + + def kms_any_cmk_mode(params) + if !params[:kms_allow_decrypt_with_any_cmk].nil? + params.delete(:kms_allow_decrypt_with_any_cmk) + else + @kms_allow_decrypt_with_any_cmk + end + end + + def extract_security_profile(options) + validate_security_profile(options[:security_profile]) + end + + def security_profile_from_params(params) + security_profile = + if !params[:security_profile].nil? + params.delete(:security_profile) + else + @security_profile + end + validate_security_profile(security_profile) + end + + def validate_security_profile(security_profile) + unless SUPPORTED_SECURITY_PROFILES.include? security_profile + raise ArgumentError, "Unsupported security profile: :#{security_profile}. " \ + "Please provide one of: #{SUPPORTED_SECURITY_PROFILES.map { |s| ":#{s}" }.join(', ')}" + end + if security_profile == :v2_and_legacy && !@warned_about_legacy + @warned_about_legacy = true + warn( + 'The S3 Encryption Client is configured to read encrypted objects ' \ + "with legacy encryption modes. If you don't have objects " \ + 'encrypted with these legacy modes, you should disable support ' \ + 'for them to enhance security.' + ) + end + security_profile + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb new file mode 100644 index 0000000..be6d603 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb @@ -0,0 +1,222 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module EncryptionV2 + # @api private + class DecryptHandler < Seahorse::Client::Handler + @@warned_response_target_proc = false + + V1_ENVELOPE_KEYS = %w( + x-amz-key + x-amz-iv + x-amz-matdesc + ) + + V2_ENVELOPE_KEYS = %w( + x-amz-key-v2 + x-amz-iv + x-amz-cek-alg + x-amz-wrap-alg + x-amz-matdesc + ) + + V2_OPTIONAL_KEYS = %w(x-amz-tag-len) + + POSSIBLE_ENVELOPE_KEYS = (V1_ENVELOPE_KEYS + + V2_ENVELOPE_KEYS + V2_OPTIONAL_KEYS).uniq + + POSSIBLE_WRAPPING_FORMATS = %w( + AES/GCM + kms + kms+context + RSA-OAEP-SHA1 + ) + + POSSIBLE_ENCRYPTION_FORMATS = %w( + AES/GCM/NoPadding + AES/CBC/PKCS5Padding + AES/CBC/PKCS7Padding + ) + + AUTH_REQUIRED_CEK_ALGS = %w(AES/GCM/NoPadding) + + def call(context) + attach_http_event_listeners(context) + apply_cse_user_agent(context) + + if context[:response_target].is_a?(Proc) && !@@warned_response_target_proc + @@warned_response_target_proc = true + warn(':response_target is a Proc, or a block was provided. ' \ + 'Read the entire object to the ' \ + 'end before you start using the decrypted data. This is to ' \ + 'verify that the object has not been modified since it ' \ + 'was encrypted.') + + end + + @handler.call(context) + end + + private + + def attach_http_event_listeners(context) + + context.http_response.on_headers(200) do + cipher, envelope = decryption_cipher(context) + decrypter = body_contains_auth_tag?(envelope) ? + authenticated_decrypter(context, cipher, envelope) : + IODecrypter.new(cipher, context.http_response.body) + context.http_response.body = decrypter + end + + context.http_response.on_success(200) do + decrypter = context.http_response.body + decrypter.finalize + decrypter.io.rewind if decrypter.io.respond_to?(:rewind) + context.http_response.body = decrypter.io + end + + context.http_response.on_error do + if context.http_response.body.respond_to?(:io) + context.http_response.body = context.http_response.body.io + end + end + end + + def decryption_cipher(context) + if (envelope = get_encryption_envelope(context)) + cipher = context[:encryption][:cipher_provider] + .decryption_cipher( + envelope, + context[:encryption] + ) + [cipher, envelope] + else + raise Errors::DecryptionError, "unable to locate encryption envelope" + end + end + + def get_encryption_envelope(context) + if context[:encryption][:envelope_location] == :metadata + envelope_from_metadata(context) || envelope_from_instr_file(context) + else + envelope_from_instr_file(context) || envelope_from_metadata(context) + end + end + + def envelope_from_metadata(context) + possible_envelope = {} + POSSIBLE_ENVELOPE_KEYS.each do |suffix| + if value = context.http_response.headers["x-amz-meta-#{suffix}"] + possible_envelope[suffix] = value + end + end + extract_envelope(possible_envelope) + end + + def envelope_from_instr_file(context) + suffix = context[:encryption][:instruction_file_suffix] + possible_envelope = Json.load(context.client.get_object( + bucket: context.params[:bucket], + key: context.params[:key] + suffix + ).body.read) + extract_envelope(possible_envelope) + rescue S3::Errors::ServiceError, Json::ParseError + nil + end + + def extract_envelope(hash) + return nil unless hash + return v1_envelope(hash) if hash.key?('x-amz-key') + return v2_envelope(hash) if hash.key?('x-amz-key-v2') + if hash.keys.any? { |key| key.match(/^x-amz-key-(.+)$/) } + msg = "unsupported envelope encryption version #{$1}" + raise Errors::DecryptionError, msg + end + end + + def v1_envelope(envelope) + envelope + end + + def v2_envelope(envelope) + unless POSSIBLE_ENCRYPTION_FORMATS.include? envelope['x-amz-cek-alg'] + alg = envelope['x-amz-cek-alg'].inspect + msg = "unsupported content encrypting key (cek) format: #{alg}" + raise Errors::DecryptionError, msg + end + unless POSSIBLE_WRAPPING_FORMATS.include? envelope['x-amz-wrap-alg'] + alg = envelope['x-amz-wrap-alg'].inspect + msg = "unsupported key wrapping algorithm: #{alg}" + raise Errors::DecryptionError, msg + end + unless (missing_keys = V2_ENVELOPE_KEYS - envelope.keys).empty? + msg = "incomplete v2 encryption envelope:\n" + msg += " missing: #{missing_keys.join(',')}\n" + raise Errors::DecryptionError, msg + end + envelope + end + + # This method fetches the tag from the end of the object by + # making a GET Object w/range request. This auth tag is used + # to initialize the cipher, and the decrypter truncates the + # auth tag from the body when writing the final bytes. + def authenticated_decrypter(context, cipher, envelope) + http_resp = context.http_response + content_length = http_resp.headers['content-length'].to_i + auth_tag_length = auth_tag_length(envelope) + + auth_tag = context.client.get_object( + bucket: context.params[:bucket], + key: context.params[:key], + range: "bytes=-#{auth_tag_length}" + ).body.read + + cipher.auth_tag = auth_tag + cipher.auth_data = '' + + # The encrypted object contains both the cipher text + # plus a trailing auth tag. + IOAuthDecrypter.new( + io: http_resp.body, + encrypted_content_length: content_length - auth_tag_length, + cipher: cipher) + end + + def body_contains_auth_tag?(envelope) + AUTH_REQUIRED_CEK_ALGS.include?(envelope['x-amz-cek-alg']) + end + + # Determine the auth tag length from the algorithm + # Validate it against the value provided in the x-amz-tag-len + # Return the tag length in bytes + def auth_tag_length(envelope) + tag_length = + case envelope['x-amz-cek-alg'] + when 'AES/GCM/NoPadding' then AES_GCM_TAG_LEN_BYTES + else + raise ArgumentError, 'Unsupported cek-alg: ' \ + "#{envelope['x-amz-cek-alg']}" + end + if (tag_length * 8) != envelope['x-amz-tag-len'].to_i + raise Errors::DecryptionError, 'x-amz-tag-len does not match expected' + end + tag_length + end + + def apply_cse_user_agent(context) + if context.config.user_agent_suffix.nil? + context.config.user_agent_suffix = EC_USER_AGENT + elsif !context.config.user_agent_suffix.include? EC_USER_AGENT + context.config.user_agent_suffix += " #{EC_USER_AGENT}" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb new file mode 100644 index 0000000..589939a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb @@ -0,0 +1,170 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module EncryptionV2 + # @api private + class DefaultCipherProvider + + def initialize(options = {}) + @key_provider = options[:key_provider] + @key_wrap_schema = validate_key_wrap( + options[:key_wrap_schema], + @key_provider.encryption_materials.key + ) + @content_encryption_schema = validate_cek( + options[:content_encryption_schema] + ) + end + + # @return [Array] Creates an returns a new encryption + # envelope and encryption cipher. + def encryption_cipher(options = {}) + validate_options(options) + cipher = Utils.aes_encryption_cipher(:GCM) + if @key_provider.encryption_materials.key.is_a? OpenSSL::PKey::RSA + enc_key = encode64( + encrypt_rsa(envelope_key(cipher), @content_encryption_schema) + ) + else + enc_key = encode64( + encrypt_aes_gcm(envelope_key(cipher), @content_encryption_schema) + ) + end + envelope = { + 'x-amz-key-v2' => enc_key, + 'x-amz-cek-alg' => @content_encryption_schema, + 'x-amz-tag-len' => (AES_GCM_TAG_LEN_BYTES * 8).to_s, + 'x-amz-wrap-alg' => @key_wrap_schema, + 'x-amz-iv' => encode64(envelope_iv(cipher)), + 'x-amz-matdesc' => materials_description + } + cipher.auth_data = '' # auth_data must be set after key and iv + [envelope, cipher] + end + + # @return [Cipher] Given an encryption envelope, returns a + # decryption cipher. + def decryption_cipher(envelope, options = {}) + validate_options(options) + master_key = @key_provider.key_for(envelope['x-amz-matdesc']) + if envelope.key? 'x-amz-key' + unless options[:security_profile] == :v2_and_legacy + raise Errors::LegacyDecryptionError + end + # Support for decryption of legacy objects + key = Utils.decrypt(master_key, decode64(envelope['x-amz-key'])) + iv = decode64(envelope['x-amz-iv']) + Utils.aes_decryption_cipher(:CBC, key, iv) + else + if envelope['x-amz-cek-alg'] != 'AES/GCM/NoPadding' + raise ArgumentError, 'Unsupported cek-alg: ' \ + "#{envelope['x-amz-cek-alg']}" + end + key = + case envelope['x-amz-wrap-alg'] + when 'AES/GCM' + if master_key.is_a? OpenSSL::PKey::RSA + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with an RSA key and the x-amz-wrap-alg is AES/GCM.' + end + Utils.decrypt_aes_gcm(master_key, + decode64(envelope['x-amz-key-v2']), + envelope['x-amz-cek-alg']) + when 'RSA-OAEP-SHA1' + unless master_key.is_a? OpenSSL::PKey::RSA + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with an AES key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' + end + key, cek_alg = Utils.decrypt_rsa(master_key, decode64(envelope['x-amz-key-v2'])) + raise Errors::CEKAlgMismatchError unless cek_alg == envelope['x-amz-cek-alg'] + key + when 'kms+context' + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with a user provided key and the x-amz-wrap-alg is' \ + ' kms+context. Please configure the client with the' \ + ' required kms_key_id' + else + raise ArgumentError, 'Unsupported wrap-alg: ' \ + "#{envelope['x-amz-wrap-alg']}" + end + iv = decode64(envelope['x-amz-iv']) + Utils.aes_decryption_cipher(:GCM, key, iv) + end + end + + private + + # Validate that the key_wrap_schema + # is valid, supported and matches the provided key. + # Returns the string version for the x-amz-key-wrap-alg + def validate_key_wrap(key_wrap_schema, key) + if key.is_a? OpenSSL::PKey::RSA + unless key_wrap_schema == :rsa_oaep_sha1 + raise ArgumentError, ':key_wrap_schema must be set to :rsa_oaep_sha1 for RSA keys.' + end + else + unless key_wrap_schema == :aes_gcm + raise ArgumentError, ':key_wrap_schema must be set to :aes_gcm for AES keys.' + end + end + + case key_wrap_schema + when :rsa_oaep_sha1 then 'RSA-OAEP-SHA1' + when :aes_gcm then 'AES/GCM' + when :kms_context + raise ArgumentError, 'A kms_key_id is required when using :kms_context.' + else + raise ArgumentError, "Unsupported key_wrap_schema: #{key_wrap_schema}" + end + end + + def validate_cek(content_encryption_schema) + case content_encryption_schema + when :aes_gcm_no_padding + "AES/GCM/NoPadding" + else + raise ArgumentError, "Unsupported content_encryption_schema: #{content_encryption_schema}" + end + end + + def envelope_key(cipher) + cipher.key = cipher.random_key + end + + def envelope_iv(cipher) + cipher.iv = cipher.random_iv + end + + def encrypt_aes_gcm(data, auth_data) + Utils.encrypt_aes_gcm(@key_provider.encryption_materials.key, data, auth_data) + end + + def encrypt_rsa(data, auth_data) + Utils.encrypt_rsa(@key_provider.encryption_materials.key, data, auth_data) + end + + def materials_description + @key_provider.encryption_materials.description + end + + def encode64(str) + Base64.encode64(str).split("\n") * '' + end + + def decode64(str) + Base64.decode64(str) + end + + def validate_options(options) + if !options[:kms_encryption_context].nil? + raise ArgumentError, 'Cannot provide :kms_encryption_context ' \ + 'with non KMS client.' + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/default_key_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/default_key_provider.rb new file mode 100644 index 0000000..d99ee41 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/default_key_provider.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module Aws + module S3 + module EncryptionV2 + + # The default key provider is constructed with a single key + # that is used for both encryption and decryption, ignoring + # the possible per-object envelope encryption materials description. + # @api private + class DefaultKeyProvider + + include KeyProvider + + # @option options [required, OpenSSL::PKey::RSA, String] :encryption_key + # The master key to use for encrypting objects. + # @option options [String] :materials_description ('{}') + # A description of the encryption key. + def initialize(options = {}) + @encryption_materials = Materials.new( + key: options[:encryption_key], + description: options[:materials_description] || '{}' + ) + end + + # @return [Materials] + def encryption_materials + @encryption_materials + end + + # @param [String] materials_description + # @return Returns the key given in the constructor. + def key_for(materials_description) + @encryption_materials.key + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb new file mode 100644 index 0000000..d90e854 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module EncryptionV2 + # @api private + class EncryptHandler < Seahorse::Client::Handler + + def call(context) + envelope, cipher = context[:encryption][:cipher_provider] + .encryption_cipher( + kms_encryption_context: context[:encryption][:kms_encryption_context] + ) + context[:encryption][:cipher] = cipher + apply_encryption_envelope(context, envelope) + apply_encryption_cipher(context, cipher) + apply_cse_user_agent(context) + @handler.call(context) + end + + private + + def apply_encryption_envelope(context, envelope) + if context[:encryption][:envelope_location] == :instruction_file + suffix = context[:encryption][:instruction_file_suffix] + context.client.put_object( + bucket: context.params[:bucket], + key: context.params[:key] + suffix, + body: Json.dump(envelope) + ) + else # :metadata + context.params[:metadata] ||= {} + context.params[:metadata].update(envelope) + end + end + + def apply_encryption_cipher(context, cipher) + io = context.params[:body] || '' + io = StringIO.new(io) if io.is_a? String + context.params[:body] = IOEncrypter.new(cipher, io) + context.params[:metadata] ||= {} + context.params[:metadata]['x-amz-unencrypted-content-length'] = io.size + if context.params.delete(:content_md5) + raise ArgumentError, 'Setting content_md5 on client side '\ + 'encrypted objects is deprecated.' + end + context.http_response.on_headers do + context.params[:body].close + end + end + + def apply_cse_user_agent(context) + if context.config.user_agent_suffix.nil? + context.config.user_agent_suffix = EC_USER_AGENT + elsif !context.config.user_agent_suffix.include? EC_USER_AGENT + context.config.user_agent_suffix += " #{EC_USER_AGENT}" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/errors.rb new file mode 100644 index 0000000..031169b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/errors.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module Aws + module S3 + module EncryptionV2 + module Errors + + # Generic DecryptionError + class DecryptionError < RuntimeError; end + + class EncryptionError < RuntimeError; end + + # Raised when attempting to decrypt a legacy (V1) encrypted object + # when using a security_profile that does not support it. + class LegacyDecryptionError < DecryptionError + def initialize(*args) + msg = 'The requested object is ' \ + 'encrypted with V1 encryption schemas that have been disabled ' \ + 'by client configuration security_profile = :v2. Retry with ' \ + ':v2_and_legacy or re-encrypt the object.' + super(msg) + end + end + + class CEKAlgMismatchError < DecryptionError + def initialize(*args) + msg = 'The content encryption algorithm used at encryption time ' \ + 'does not match the algorithm stored for decryption time. ' \ + 'The object may be altered or corrupted.' + super(msg) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_auth_decrypter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_auth_decrypter.rb new file mode 100644 index 0000000..f336702 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_auth_decrypter.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Aws + module S3 + module EncryptionV2 + # @api private + class IOAuthDecrypter + + # @option options [required, IO#write] :io + # An IO-like object that responds to {#write}. + # @option options [required, Integer] :encrypted_content_length + # The number of bytes to decrypt from the `:io` object. + # This should be the total size of `:io` minus the length of + # the cipher auth tag. + # @option options [required, OpenSSL::Cipher] :cipher An initialized + # cipher that can be used to decrypt the bytes as they are + # written to the `:io` object. The cipher should already have + # its `#auth_tag` set. + def initialize(options = {}) + @decrypter = IODecrypter.new(options[:cipher], options[:io]) + @max_bytes = options[:encrypted_content_length] + @bytes_written = 0 + end + + def write(chunk) + chunk = truncate_chunk(chunk) + if chunk.bytesize > 0 + @bytes_written += chunk.bytesize + @decrypter.write(chunk) + end + end + + def finalize + @decrypter.finalize + end + + def io + @decrypter.io + end + + private + + def truncate_chunk(chunk) + if chunk.bytesize + @bytes_written <= @max_bytes + chunk + elsif @bytes_written < @max_bytes + chunk[0..(@max_bytes - @bytes_written - 1)] + else + # If the tag was sent over after the full body has been read, + # we don't want to accidentally append it. + "" + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_decrypter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_decrypter.rb new file mode 100644 index 0000000..a978940 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_decrypter.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module Aws + module S3 + module EncryptionV2 + # @api private + class IODecrypter + + # @param [OpenSSL::Cipher] cipher + # @param [IO#write] io An IO-like object that responds to `#write`. + def initialize(cipher, io) + @cipher = cipher + # Ensure that IO is reset between retries + @io = io.tap { |io| io.truncate(0) if io.respond_to?(:truncate) } + @cipher_buffer = String.new + end + + # @return [#write] + attr_reader :io + + def write(chunk) + # decrypt and write + if @cipher.method(:update).arity == 1 + @io.write(@cipher.update(chunk)) + else + @io.write(@cipher.update(chunk, @cipher_buffer)) + end + end + + def finalize + @io.write(@cipher.final) + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb new file mode 100644 index 0000000..60e2537 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +require 'stringio' +require 'tempfile' + +module Aws + module S3 + module EncryptionV2 + + # Provides an IO wrapper encrypting a stream of data. + # @api private + class IOEncrypter + + # @api private + ONE_MEGABYTE = 1024 * 1024 + + def initialize(cipher, io) + @encrypted = io.size <= ONE_MEGABYTE ? + encrypt_to_stringio(cipher, io.read) : + encrypt_to_tempfile(cipher, io) + @size = @encrypted.size + end + + # @return [Integer] + attr_reader :size + + def read(bytes = nil, output_buffer = nil) + if @encrypted.is_a?(Tempfile) && @encrypted.closed? + @encrypted.open + @encrypted.binmode + end + @encrypted.read(bytes, output_buffer) + end + + def rewind + @encrypted.rewind + end + + # @api private + def close + @encrypted.close if @encrypted.is_a?(Tempfile) + end + + private + + def encrypt_to_stringio(cipher, plain_text) + if plain_text.empty? + StringIO.new(cipher.final + cipher.auth_tag) + else + StringIO.new(cipher.update(plain_text) + cipher.final + cipher.auth_tag) + end + end + + def encrypt_to_tempfile(cipher, io) + encrypted = Tempfile.new(self.object_id.to_s) + encrypted.binmode + while chunk = io.read(ONE_MEGABYTE, read_buffer ||= String.new) + if cipher.method(:update).arity == 1 + encrypted.write(cipher.update(chunk)) + else + encrypted.write(cipher.update(chunk, cipher_buffer ||= String.new)) + end + end + encrypted.write(cipher.final) + encrypted.write(cipher.auth_tag) + encrypted.rewind + encrypted + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/key_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/key_provider.rb new file mode 100644 index 0000000..2b31204 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/key_provider.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Aws + module S3 + module EncryptionV2 + + # This module defines the interface required for a {Client#key_provider}. + # A key provider is any object that: + # + # * Responds to {#encryption_materials} with an {Materials} object. + # + # * Responds to {#key_for}, receiving a JSON document String, + # returning an encryption key. The returned encryption key + # must be one of: + # + # * `OpenSSL::PKey::RSA` - for asymmetric encryption + # * `String` - 32, 24, or 16 bytes long, for symmetric encryption + # + module KeyProvider + + # @return [Materials] + def encryption_materials; end + + # @param [String] materials_description + # @return [OpenSSL::PKey::RSA, String] encryption_key + def key_for(materials_description); end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb new file mode 100644 index 0000000..fa9d642 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb @@ -0,0 +1,169 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module EncryptionV2 + # @api private + class KmsCipherProvider + + def initialize(options = {}) + @kms_key_id = validate_kms_key(options[:kms_key_id]) + @kms_client = options[:kms_client] + @key_wrap_schema = validate_key_wrap( + options[:key_wrap_schema] + ) + @content_encryption_schema = validate_cek( + options[:content_encryption_schema] + ) + end + + # @return [Array] Creates and returns a new encryption + # envelope and encryption cipher. + def encryption_cipher(options = {}) + validate_key_for_encryption + encryption_context = build_encryption_context(@content_encryption_schema, options) + key_data = @kms_client.generate_data_key( + key_id: @kms_key_id, + encryption_context: encryption_context, + key_spec: 'AES_256' + ) + cipher = Utils.aes_encryption_cipher(:GCM) + cipher.key = key_data.plaintext + envelope = { + 'x-amz-key-v2' => encode64(key_data.ciphertext_blob), + 'x-amz-iv' => encode64(cipher.iv = cipher.random_iv), + 'x-amz-cek-alg' => @content_encryption_schema, + 'x-amz-tag-len' => (AES_GCM_TAG_LEN_BYTES * 8).to_s, + 'x-amz-wrap-alg' => @key_wrap_schema, + 'x-amz-matdesc' => Json.dump(encryption_context) + } + cipher.auth_data = '' # auth_data must be set after key and iv + [envelope, cipher] + end + + # @return [Cipher] Given an encryption envelope, returns a + # decryption cipher. + def decryption_cipher(envelope, options = {}) + encryption_context = Json.load(envelope['x-amz-matdesc']) + cek_alg = envelope['x-amz-cek-alg'] + + case envelope['x-amz-wrap-alg'] + when 'kms' + unless options[:security_profile] == :v2_and_legacy + raise Errors::LegacyDecryptionError + end + when 'kms+context' + if cek_alg != encryption_context['aws:x-amz-cek-alg'] + raise Errors::CEKAlgMismatchError + end + + if encryption_context != build_encryption_context(cek_alg, options) + raise Errors::DecryptionError, 'Value of encryption context from'\ + ' envelope does not match the provided encryption context' + end + when 'AES/GCM' + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with a KMS key and the x-amz-wrap-alg is AES/GCM.' + when 'RSA-OAEP-SHA1' + raise ArgumentError, 'Key mismatch - Client is configured' \ + ' with a KMS key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' + else + raise ArgumentError, 'Unsupported wrap-alg: ' \ + "#{envelope['x-amz-wrap-alg']}" + end + + any_cmk_mode = false || options[:kms_allow_decrypt_with_any_cmk] + decrypt_options = { + ciphertext_blob: decode64(envelope['x-amz-key-v2']), + encryption_context: encryption_context + } + unless any_cmk_mode + decrypt_options[:key_id] = @kms_key_id + end + + key = @kms_client.decrypt(decrypt_options).plaintext + iv = decode64(envelope['x-amz-iv']) + block_mode = + case cek_alg + when 'AES/CBC/PKCS5Padding' + :CBC + when 'AES/CBC/PKCS7Padding' + :CBC + when 'AES/GCM/NoPadding' + :GCM + else + type = envelope['x-amz-cek-alg'].inspect + msg = "unsupported content encrypting key (cek) format: #{type}" + raise Errors::DecryptionError, msg + end + Utils.aes_decryption_cipher(block_mode, key, iv) + end + + private + + def validate_key_wrap(key_wrap_schema) + case key_wrap_schema + when :kms_context then 'kms+context' + else + raise ArgumentError, "Unsupported key_wrap_schema: #{key_wrap_schema}" + end + end + + def validate_cek(content_encryption_schema) + case content_encryption_schema + when :aes_gcm_no_padding + "AES/GCM/NoPadding" + else + raise ArgumentError, "Unsupported content_encryption_schema: #{content_encryption_schema}" + end + end + + def validate_kms_key(kms_key_id) + if kms_key_id.nil? || kms_key_id.length.zero? + raise ArgumentError, 'KMS CMK ID was not specified. ' \ + 'Please specify a CMK ID, ' \ + 'or set kms_key_id: :kms_allow_decrypt_with_any_cmk to use ' \ + 'any valid CMK from the object.' + end + + if kms_key_id.is_a?(Symbol) && kms_key_id != :kms_allow_decrypt_with_any_cmk + raise ArgumentError, 'kms_key_id must be a valid KMS CMK or be ' \ + 'set to :kms_allow_decrypt_with_any_cmk' + end + kms_key_id + end + + def build_encryption_context(cek_alg, options = {}) + kms_context = (options[:kms_encryption_context] || {}) + .each_with_object({}) { |(k, v), h| h[k.to_s] = v } + if kms_context.include? 'aws:x-amz-cek-alg' + raise ArgumentError, 'Conflict in reserved KMS Encryption Context ' \ + 'key aws:x-amz-cek-alg. This value is reserved for the S3 ' \ + 'Encryption Client and cannot be set by the user.' + end + { + 'aws:x-amz-cek-alg' => cek_alg + }.merge(kms_context) + end + + def encode64(str) + Base64.encode64(str).split("\n") * "" + end + + def decode64(str) + Base64.decode64(str) + end + + def validate_key_for_encryption + if @kms_key_id == :kms_allow_decrypt_with_any_cmk + raise ArgumentError, 'Unable to encrypt/write objects with '\ + 'kms_key_id = :kms_allow_decrypt_with_any_cmk. Provide ' \ + 'a valid kms_key_id on client construction.' + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/materials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/materials.rb new file mode 100644 index 0000000..27759d6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/materials.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +require 'base64' + +module Aws + module S3 + module EncryptionV2 + class Materials + + # @option options [required, OpenSSL::PKey::RSA, String] :key + # The master key to use for encrypting/decrypting all objects. + # + # @option options [String] :description ('{}') + # The encryption materials description. This is must be + # a JSON document string. + # + def initialize(options = {}) + @key = validate_key(options[:key]) + @description = validate_desc(options[:description]) + end + + # @return [OpenSSL::PKey::RSA, String] + attr_reader :key + + # @return [String] + attr_reader :description + + private + + def validate_key(key) + case key + when OpenSSL::PKey::RSA then key + when String + if [32, 24, 16].include?(key.bytesize) + key + else + msg = 'invalid key, symmetric key required to be 16, 24, or '\ + '32 bytes in length, saw length ' + key.bytesize.to_s + raise ArgumentError, msg + end + else + msg = 'invalid encryption key, expected an OpenSSL::PKey::RSA key '\ + '(for asymmetric encryption) or a String (for symmetric '\ + 'encryption).' + raise ArgumentError, msg + end + end + + def validate_desc(description) + Json.load(description) + description + rescue Json::ParseError, EncodingError + msg = 'expected description to be a valid JSON document string' + raise ArgumentError, msg + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/utils.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/utils.rb new file mode 100644 index 0000000..3ca250a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryptionV2/utils.rb @@ -0,0 +1,103 @@ +# frozen_string_literal: true + +require 'openssl' + +module Aws + module S3 + module EncryptionV2 + # @api private + module Utils + + class << self + + def encrypt_aes_gcm(key, data, auth_data) + cipher = aes_encryption_cipher(:GCM, key) + cipher.iv = (iv = cipher.random_iv) + cipher.auth_data = auth_data + + iv + cipher.update(data) + cipher.final + cipher.auth_tag + end + + def encrypt_rsa(key, data, auth_data) + # Plaintext must be KeyLengthInBytes (1 Byte) + DataKey + AuthData + buf = [data.bytesize] + data.unpack('C*') + auth_data.unpack('C*') + key.public_encrypt(buf.pack('C*'), OpenSSL::PKey::RSA::PKCS1_OAEP_PADDING) + end + + def decrypt(key, data) + begin + case key + when OpenSSL::PKey::RSA # asymmetric decryption + key.private_decrypt(data) + when String # symmetric Decryption + cipher = aes_cipher(:decrypt, :ECB, key, nil) + cipher.update(data) + cipher.final + end + rescue OpenSSL::Cipher::CipherError + msg = 'decryption failed, possible incorrect key' + raise Errors::DecryptionError, msg + end + end + + def decrypt_aes_gcm(key, data, auth_data) + # data is iv (12B) + key + tag (16B) + buf = data.unpack('C*') + iv = buf[0,12].pack('C*') # iv will always be 12 bytes + tag = buf[-16, 16].pack('C*') # tag is 16 bytes + enc_key = buf[12, buf.size - (12+16)].pack('C*') + cipher = aes_cipher(:decrypt, :GCM, key, iv) + cipher.auth_tag = tag + cipher.auth_data = auth_data + cipher.update(enc_key) + cipher.final + end + + # returns the decrypted data + auth_data + def decrypt_rsa(key, enc_data) + # Plaintext must be KeyLengthInBytes (1 Byte) + DataKey + AuthData + buf = key.private_decrypt(enc_data, OpenSSL::PKey::RSA::PKCS1_OAEP_PADDING).unpack('C*') + key_length = buf[0] + data = buf[1, key_length].pack('C*') + auth_data = buf[key_length+1, buf.length - key_length].pack('C*') + [data, auth_data] + end + + # @param [String] block_mode "CBC" or "ECB" + # @param [OpenSSL::PKey::RSA, String, nil] key + # @param [String, nil] iv The initialization vector + def aes_encryption_cipher(block_mode, key = nil, iv = nil) + aes_cipher(:encrypt, block_mode, key, iv) + end + + # @param [String] block_mode "CBC" or "ECB" + # @param [OpenSSL::PKey::RSA, String, nil] key + # @param [String, nil] iv The initialization vector + def aes_decryption_cipher(block_mode, key = nil, iv = nil) + aes_cipher(:decrypt, block_mode, key, iv) + end + + # @param [String] mode "encrypt" or "decrypt" + # @param [String] block_mode "CBC" or "ECB" + # @param [OpenSSL::PKey::RSA, String, nil] key + # @param [String, nil] iv The initialization vector + def aes_cipher(mode, block_mode, key, iv) + cipher = key ? + OpenSSL::Cipher.new("aes-#{cipher_size(key)}-#{block_mode.downcase}") : + OpenSSL::Cipher.new("aes-256-#{block_mode.downcase}") + cipher.send(mode) # encrypt or decrypt + cipher.key = key if key + cipher.iv = iv if iv + cipher + end + + # @param [String] key + # @return [Integer] + # @raise ArgumentError + def cipher_size(key) + key.bytesize * 8 + end + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption_v2.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption_v2.rb new file mode 100644 index 0000000..5d17fa9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/encryption_v2.rb @@ -0,0 +1,23 @@ +require 'aws-sdk-s3/encryptionV2/client' +require 'aws-sdk-s3/encryptionV2/decrypt_handler' +require 'aws-sdk-s3/encryptionV2/default_cipher_provider' +require 'aws-sdk-s3/encryptionV2/encrypt_handler' +require 'aws-sdk-s3/encryptionV2/errors' +require 'aws-sdk-s3/encryptionV2/io_encrypter' +require 'aws-sdk-s3/encryptionV2/io_decrypter' +require 'aws-sdk-s3/encryptionV2/io_auth_decrypter' +require 'aws-sdk-s3/encryptionV2/key_provider' +require 'aws-sdk-s3/encryptionV2/kms_cipher_provider' +require 'aws-sdk-s3/encryptionV2/materials' +require 'aws-sdk-s3/encryptionV2/utils' +require 'aws-sdk-s3/encryptionV2/default_key_provider' + +module Aws + module S3 + module EncryptionV2 + AES_GCM_TAG_LEN_BYTES = 16 + EC_USER_AGENT = 'S3CryptoV2' + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoint_parameters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoint_parameters.rb new file mode 100644 index 0000000..eeec041 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoint_parameters.rb @@ -0,0 +1,142 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + # Endpoint parameters used to influence endpoints per request. + # + # @!attribute bucket + # The S3 bucket used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 bucket. + # + # @return [String] + # + # @!attribute region + # The AWS region used to dispatch the request. + # + # @return [String] + # + # @!attribute use_fips + # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error. + # + # @return [Boolean] + # + # @!attribute use_dual_stack + # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error. + # + # @return [Boolean] + # + # @!attribute endpoint + # Override the endpoint used to send this request + # + # @return [String] + # + # @!attribute force_path_style + # When true, force a path-style endpoint to be used where the bucket name is part of the path. + # + # @return [Boolean] + # + # @!attribute accelerate + # When true, use S3 Accelerate. NOTE: Not all regions support S3 accelerate. + # + # @return [Boolean] + # + # @!attribute use_global_endpoint + # Whether the global endpoint should be used, rather then the regional endpoint for us-east-1. + # + # @return [Boolean] + # + # @!attribute use_object_lambda_endpoint + # Internal parameter to use object lambda endpoint for an operation (eg: WriteGetObjectResponse) + # + # @return [Boolean] + # + # @!attribute disable_access_points + # Internal parameter to disable Access Point Buckets + # + # @return [Boolean] + # + # @!attribute disable_multi_region_access_points + # Whether multi-region access points (MRAP) should be disabled. + # + # @return [Boolean] + # + # @!attribute use_arn_region + # When an Access Point ARN is provided and this flag is enabled, the SDK MUST use the ARN's region when constructing the endpoint instead of the client's configured region. + # + # @return [Boolean] + # + EndpointParameters = Struct.new( + :bucket, + :region, + :use_fips, + :use_dual_stack, + :endpoint, + :force_path_style, + :accelerate, + :use_global_endpoint, + :use_object_lambda_endpoint, + :disable_access_points, + :disable_multi_region_access_points, + :use_arn_region, + ) do + include Aws::Structure + + # @api private + class << self + PARAM_MAP = { + 'Bucket' => :bucket, + 'Region' => :region, + 'UseFIPS' => :use_fips, + 'UseDualStack' => :use_dual_stack, + 'Endpoint' => :endpoint, + 'ForcePathStyle' => :force_path_style, + 'Accelerate' => :accelerate, + 'UseGlobalEndpoint' => :use_global_endpoint, + 'UseObjectLambdaEndpoint' => :use_object_lambda_endpoint, + 'DisableAccessPoints' => :disable_access_points, + 'DisableMultiRegionAccessPoints' => :disable_multi_region_access_points, + 'UseArnRegion' => :use_arn_region, + }.freeze + end + + def initialize(options = {}) + self[:bucket] = options[:bucket] + self[:region] = options[:region] + self[:use_fips] = options[:use_fips] + self[:use_fips] = false if self[:use_fips].nil? + if self[:use_fips].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_fips" + end + self[:use_dual_stack] = options[:use_dual_stack] + self[:use_dual_stack] = false if self[:use_dual_stack].nil? + if self[:use_dual_stack].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_dual_stack" + end + self[:endpoint] = options[:endpoint] + self[:force_path_style] = options[:force_path_style] + self[:accelerate] = options[:accelerate] + self[:accelerate] = false if self[:accelerate].nil? + if self[:accelerate].nil? + raise ArgumentError, "Missing required EndpointParameter: :accelerate" + end + self[:use_global_endpoint] = options[:use_global_endpoint] + self[:use_global_endpoint] = false if self[:use_global_endpoint].nil? + if self[:use_global_endpoint].nil? + raise ArgumentError, "Missing required EndpointParameter: :use_global_endpoint" + end + self[:use_object_lambda_endpoint] = options[:use_object_lambda_endpoint] + self[:disable_access_points] = options[:disable_access_points] + self[:disable_multi_region_access_points] = options[:disable_multi_region_access_points] + self[:disable_multi_region_access_points] = false if self[:disable_multi_region_access_points].nil? + if self[:disable_multi_region_access_points].nil? + raise ArgumentError, "Missing required EndpointParameter: :disable_multi_region_access_points" + end + self[:use_arn_region] = options[:use_arn_region] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoint_provider.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoint_provider.rb new file mode 100644 index 0000000..7db3d20 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoint_provider.rb @@ -0,0 +1,733 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + class EndpointProvider + def resolve_endpoint(parameters) + bucket = parameters.bucket + region = parameters.region + use_fips = parameters.use_fips + use_dual_stack = parameters.use_dual_stack + endpoint = parameters.endpoint + force_path_style = parameters.force_path_style + accelerate = parameters.accelerate + use_global_endpoint = parameters.use_global_endpoint + use_object_lambda_endpoint = parameters.use_object_lambda_endpoint + disable_access_points = parameters.disable_access_points + disable_multi_region_access_points = parameters.disable_multi_region_access_points + use_arn_region = parameters.use_arn_region + if Aws::Endpoints::Matchers.set?(region) + if Aws::Endpoints::Matchers.set?(bucket) && (hardware_type = Aws::Endpoints::Matchers.substring(bucket, 49, 50, true)) && (region_prefix = Aws::Endpoints::Matchers.substring(bucket, 8, 12, true)) && (abba_suffix = Aws::Endpoints::Matchers.substring(bucket, 0, 7, true)) && (outpost_id = Aws::Endpoints::Matchers.substring(bucket, 32, 49, true)) && (region_partition = Aws::Endpoints::Matchers.aws_partition(region)) && Aws::Endpoints::Matchers.string_equals?(abba_suffix, "--op-s3") + if Aws::Endpoints::Matchers.valid_host_label?(outpost_id, false) + if Aws::Endpoints::Matchers.string_equals?(hardware_type, "e") + if Aws::Endpoints::Matchers.string_equals?(region_prefix, "beta") + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) + raise ArgumentError, "Expected a endpoint to be specified but no endpoint was found" + end + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.ec2.#{url['authority']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) + end + end + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.ec2.s3-outposts.#{region}.#{region_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.string_equals?(hardware_type, "o") + if Aws::Endpoints::Matchers.string_equals?(region_prefix, "beta") + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) + raise ArgumentError, "Expected a endpoint to be specified but no endpoint was found" + end + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.op-#{outpost_id}.#{url['authority']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) + end + end + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.op-#{outpost_id}.s3-outposts.#{region}.#{region_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) + end + raise ArgumentError, "Unrecognized hardware type: \"Expected hardware type o or e but got #{hardware_type}\"" + end + raise ArgumentError, "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`." + end + if Aws::Endpoints::Matchers.set?(bucket) + if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.parse_url(endpoint))) + raise ArgumentError, "Custom endpoint `#{endpoint}` was not a valid URI" + end + if Aws::Endpoints::Matchers.set?(force_path_style) && Aws::Endpoints::Matchers.boolean_equals?(force_path_style, true) + if Aws::Endpoints::Matchers.aws_parse_arn(bucket) + raise ArgumentError, "Path-style addressing cannot be used with ARN buckets" + end + if (uri_encoded_bucket = Aws::Endpoints::Matchers.uri_encode(bucket)) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) + raise ArgumentError, "Cannot set dual-stack in combination with a custom endpoint." + end + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + end + raise ArgumentError, "Path-style addressing cannot be used with S3 Accelerate" + end + raise ArgumentError, "A valid partition could not be determined" + end + end + if Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, false) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.valid_host_label?(region, false) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(partition_result, "name"), "aws-cn") + raise ArgumentError, "Partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "Accelerate cannot be used with FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(partition_result, "name"), "aws-cn") + raise ArgumentError, "S3 Accelerate cannot be used in this region" + end + if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "Host override cannot be combined with Dualstack, FIPS, or S3 Accelerate" + end + if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "Host override cannot be combined with Dualstack, FIPS, or S3 Accelerate" + end + if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) + raise ArgumentError, "Host override cannot be combined with Dualstack, FIPS, or S3 Accelerate" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + end + raise ArgumentError, "Invalid region: region was not a valid DNS name." + end + raise ArgumentError, "A valid partition could not be determined" + end + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(url, "scheme"), "http") && Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.valid_host_label?(region, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + raise ArgumentError, "Invalid region: region was not a valid DNS name." + end + raise ArgumentError, "A valid partition could not be determined" + end + if (bucket_arn = Aws::Endpoints::Matchers.aws_parse_arn(bucket)) + if (arn_type = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[0]")) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(arn_type, "")) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "service"), "s3-object-lambda") + if Aws::Endpoints::Matchers.string_equals?(arn_type, "accesspoint") + if (access_point_name = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[1]")) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(access_point_name, "")) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "S3 Object Lambda does not support Dual-stack" + end + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) + raise ArgumentError, "S3 Object Lambda does not support S3 Accelerate" + end + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "")) + if Aws::Endpoints::Matchers.set?(disable_access_points) && Aws::Endpoints::Matchers.boolean_equals?(disable_access_points, true) + raise ArgumentError, "Access points are not supported for this operation" + end + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[2]"))) + if Aws::Endpoints::Matchers.set?(use_arn_region) && Aws::Endpoints::Matchers.boolean_equals?(use_arn_region, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "#{region}")) + raise ArgumentError, "Invalid configuration: region from ARN `#{bucket_arn['region']}` does not match client region `#{region}` and UseArnRegion is `false`" + end + if (bucket_partition = Aws::Endpoints::Matchers.aws_partition(Aws::Endpoints::Matchers.attr(bucket_arn, "region"))) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), Aws::Endpoints::Matchers.attr(partition_result, "name")) + if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), true) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), "") + raise ArgumentError, "Invalid ARN: Missing account id" + end + if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), false) + if Aws::Endpoints::Matchers.valid_host_label?(access_point_name, false) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), "aws-cn") + raise ArgumentError, "Partition does not support FIPS" + end + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{access_point_name}-#{bucket_arn['accountId']}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-object-lambda-fips.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-object-lambda.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + raise ArgumentError, "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `#{access_point_name}`" + end + raise ArgumentError, "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{bucket_arn['accountId']}`" + end + raise ArgumentError, "Invalid region in ARN: `#{bucket_arn['region']}` (invalid DNS name)" + end + raise ArgumentError, "Client was configured for partition `#{partition_result['name']}` but ARN (`#{bucket}`) has `#{bucket_partition['name']}`" + end + raise ArgumentError, "A valid partition could not be determined" + end + raise ArgumentError, "Could not load partition for ARN region `#{bucket_arn['region']}`" + end + raise ArgumentError, "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`." + end + raise ArgumentError, "Invalid ARN: bucket ARN is missing a region" + end + raise ArgumentError, "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided" + end + raise ArgumentError, "Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `#{arn_type}`" + end + if Aws::Endpoints::Matchers.string_equals?(arn_type, "accesspoint") + if (access_point_name = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[1]")) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(access_point_name, "")) + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "")) + if Aws::Endpoints::Matchers.string_equals?(arn_type, "accesspoint") + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "")) + if Aws::Endpoints::Matchers.set?(disable_access_points) && Aws::Endpoints::Matchers.boolean_equals?(disable_access_points, true) + raise ArgumentError, "Access points are not supported for this operation" + end + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[2]"))) + if Aws::Endpoints::Matchers.set?(use_arn_region) && Aws::Endpoints::Matchers.boolean_equals?(use_arn_region, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "#{region}")) + raise ArgumentError, "Invalid configuration: region from ARN `#{bucket_arn['region']}` does not match client region `#{region}` and UseArnRegion is `false`" + end + if (bucket_partition = Aws::Endpoints::Matchers.aws_partition(Aws::Endpoints::Matchers.attr(bucket_arn, "region"))) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), "#{partition_result['name']}") + if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), true) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "service"), "s3") + if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), false) + if Aws::Endpoints::Matchers.valid_host_label?(access_point_name, false) + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) + raise ArgumentError, "Access Points do not support S3 Accelerate" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), "aws-cn") + raise ArgumentError, "Partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) + raise ArgumentError, "DualStack cannot be combined with a Host override (PrivateLink)" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint-fips.dualstack.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint-fips.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint.dualstack.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{access_point_name}-#{bucket_arn['accountId']}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + end + raise ArgumentError, "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `#{access_point_name}`" + end + raise ArgumentError, "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{bucket_arn['accountId']}`" + end + raise ArgumentError, "Invalid ARN: The ARN was not for the S3 service, found: #{bucket_arn['service']}" + end + raise ArgumentError, "Invalid region in ARN: `#{bucket_arn['region']}` (invalid DNS name)" + end + raise ArgumentError, "Client was configured for partition `#{partition_result['name']}` but ARN (`#{bucket}`) has `#{bucket_partition['name']}`" + end + raise ArgumentError, "A valid partition could not be determined" + end + raise ArgumentError, "Could not load partition for ARN region `#{bucket_arn['region']}`" + end + raise ArgumentError, "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`." + end + raise ArgumentError, "Invalid ARN: bucket ARN is missing a region" + end + end + if Aws::Endpoints::Matchers.valid_host_label?(access_point_name, true) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "S3 MRAP does not support dual-stack" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "S3 MRAP does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) + raise ArgumentError, "S3 MRAP does not support S3 Accelerate" + end + if Aws::Endpoints::Matchers.boolean_equals?(disable_multi_region_access_points, true) + raise ArgumentError, "Invalid configuration: Multi-Region Access Point ARNs are disabled." + end + if (mrap_partition = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(mrap_partition, "name"), Aws::Endpoints::Matchers.attr(bucket_arn, "partition")) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}.accesspoint.s3-global.#{mrap_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4a", "signingName"=>"s3", "signingRegionSet"=>["*"]}]}) + end + raise ArgumentError, "Client was configured for partition `#{mrap_partition['name']}` but bucket referred to partition `#{bucket_arn['partition']}`" + end + raise ArgumentError, "#{region} was not a valid region" + end + raise ArgumentError, "Invalid Access Point Name" + end + raise ArgumentError, "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided" + end + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "service"), "s3-outposts") + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "S3 Outposts does not support Dual-stack" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + raise ArgumentError, "S3 Outposts does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) + raise ArgumentError, "S3 Outposts does not support S3 Accelerate" + end + if Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[4]")) + raise ArgumentError, "Invalid Arn: Outpost Access Point ARN contains sub resources" + end + if (outpost_id = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[1]")) + if Aws::Endpoints::Matchers.valid_host_label?(outpost_id, false) + if Aws::Endpoints::Matchers.set?(use_arn_region) && Aws::Endpoints::Matchers.boolean_equals?(use_arn_region, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "#{region}")) + raise ArgumentError, "Invalid configuration: region from ARN `#{bucket_arn['region']}` does not match client region `#{region}` and UseArnRegion is `false`" + end + if (bucket_partition = Aws::Endpoints::Matchers.aws_partition(Aws::Endpoints::Matchers.attr(bucket_arn, "region"))) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), Aws::Endpoints::Matchers.attr(partition_result, "name")) + if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), true) + if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), false) + if (outpost_type = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[2]")) + if (access_point_name = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[3]")) + if Aws::Endpoints::Matchers.string_equals?(outpost_type, "accesspoint") + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.#{outpost_id}.#{url['authority']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.#{outpost_id}.s3-outposts.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{bucket_arn['region']}"}]}) + end + raise ArgumentError, "Expected an outpost type `accesspoint`, found #{outpost_type}" + end + raise ArgumentError, "Invalid ARN: expected an access point name" + end + raise ArgumentError, "Invalid ARN: Expected a 4-component resource" + end + raise ArgumentError, "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{bucket_arn['accountId']}`" + end + raise ArgumentError, "Invalid region in ARN: `#{bucket_arn['region']}` (invalid DNS name)" + end + raise ArgumentError, "Client was configured for partition `#{partition_result['name']}` but ARN (`#{bucket}`) has `#{bucket_partition['name']}`" + end + raise ArgumentError, "A valid partition could not be determined" + end + raise ArgumentError, "Could not load partition for ARN region #{bucket_arn['region']}" + end + raise ArgumentError, "Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{outpost_id}`" + end + raise ArgumentError, "Invalid ARN: The Outpost Id was not set" + end + raise ArgumentError, "Invalid ARN: Unrecognized format: #{bucket} (type: #{arn_type})" + end + raise ArgumentError, "Invalid ARN: No ARN type specified" + end + if (arn_prefix = Aws::Endpoints::Matchers.substring(bucket, 0, 4, false)) && Aws::Endpoints::Matchers.string_equals?(arn_prefix, "arn:") && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.aws_parse_arn(bucket))) + raise ArgumentError, "Invalid ARN: `#{bucket}` was not a valid ARN" + end + if (uri_encoded_bucket = Aws::Endpoints::Matchers.uri_encode(bucket)) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) + raise ArgumentError, "Cannot set dual-stack in combination with a custom endpoint." + end + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + end + raise ArgumentError, "Path-style addressing cannot be used with S3 Accelerate" + end + raise ArgumentError, "A valid partition could not be determined" + end + end + if Aws::Endpoints::Matchers.set?(use_object_lambda_endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_object_lambda_endpoint, true) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.valid_host_label?(region, true) + if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) + raise ArgumentError, "S3 Object Lambda does not support Dual-stack" + end + if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) + raise ArgumentError, "S3 Object Lambda does not support S3 Accelerate" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(partition_result, "name"), "aws-cn") + raise ArgumentError, "Partition does not support FIPS" + end + if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-object-lambda-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://s3-object-lambda.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{region}"}]}) + end + raise ArgumentError, "Invalid region: region was not a valid DNS name." + end + raise ArgumentError, "A valid partition could not be determined" + end + if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(bucket)) + if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) + if Aws::Endpoints::Matchers.valid_host_label?(region, true) + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(partition_result, "name"), "aws-cn") + raise ArgumentError, "Partition does not support FIPS" + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) + if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) + return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) + end + end + raise ArgumentError, "Invalid region: region was not a valid DNS name." + end + raise ArgumentError, "A valid partition could not be determined" + end + end + raise ArgumentError, "A region must be set when sending requests to S3." + raise ArgumentError, 'No endpoint could be resolved' + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoints.rb new file mode 100644 index 0000000..cb34e52 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/endpoints.rb @@ -0,0 +1,2149 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::S3 + module Endpoints + + class AbortMultipartUpload + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class CompleteMultipartUpload + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class CopyObject + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class CreateBucket + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: true, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class CreateMultipartUpload + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucket + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketAnalyticsConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketCors + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketEncryption + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketIntelligentTieringConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketInventoryConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketLifecycle + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketMetricsConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketOwnershipControls + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketPolicy + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketReplication + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketTagging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteBucketWebsite + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteObject + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteObjectTagging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeleteObjects + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class DeletePublicAccessBlock + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketAccelerateConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketAcl + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketAnalyticsConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketCors + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketEncryption + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketIntelligentTieringConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketInventoryConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketLifecycle + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketLifecycleConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketLocation + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketLogging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketMetricsConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketNotification + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketNotificationConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketOwnershipControls + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketPolicy + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketPolicyStatus + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketReplication + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketRequestPayment + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketTagging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketVersioning + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetBucketWebsite + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObject + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectAcl + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectAttributes + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectLegalHold + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectLockConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectRetention + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectTagging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetObjectTorrent + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class GetPublicAccessBlock + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class HeadBucket + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class HeadObject + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListBucketAnalyticsConfigurations + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListBucketIntelligentTieringConfigurations + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListBucketInventoryConfigurations + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListBucketMetricsConfigurations + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListBuckets + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: nil, + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListMultipartUploads + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListObjectVersions + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListObjects + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListObjectsV2 + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class ListParts + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketAccelerateConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketAcl + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketAnalyticsConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketCors + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketEncryption + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketIntelligentTieringConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketInventoryConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketLifecycle + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketLifecycleConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketLogging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketMetricsConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketNotification + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketNotificationConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketOwnershipControls + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketPolicy + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketReplication + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketRequestPayment + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketTagging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketVersioning + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutBucketWebsite + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutObject + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutObjectAcl + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutObjectLegalHold + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutObjectLockConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutObjectRetention + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutObjectTagging + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class PutPublicAccessBlock + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class RestoreObject + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class SelectObjectContent + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class UploadPart + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class UploadPartCopy + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: context.params[:bucket], + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: nil, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + class WriteGetObjectResponse + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::S3::EndpointParameters.new( + bucket: nil, + region: context.config.region, + use_fips: context.config.use_fips_endpoint, + use_dual_stack: context[:use_dualstack_endpoint], + endpoint: endpoint, + force_path_style: context.config.force_path_style, + accelerate: context[:use_accelerate_endpoint], + use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', + use_object_lambda_endpoint: true, + disable_access_points: nil, + disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, + use_arn_region: context.config.s3_use_arn_region, + ) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/errors.rb new file mode 100644 index 0000000..092945d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/errors.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + # When S3 returns an error response, the Ruby SDK constructs and raises an error. + # These errors all extend Aws::S3::Errors::ServiceError < {Aws::Errors::ServiceError} + # + # You can rescue all S3 errors using ServiceError: + # + # begin + # # do stuff + # rescue Aws::S3::Errors::ServiceError + # # rescues all S3 API errors + # end + # + # + # ## Request Context + # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns + # information about the request that generated the error. + # See {Seahorse::Client::RequestContext} for more information. + # + # ## Error Classes + # * {BucketAlreadyExists} + # * {BucketAlreadyOwnedByYou} + # * {InvalidObjectState} + # * {NoSuchBucket} + # * {NoSuchKey} + # * {NoSuchUpload} + # * {ObjectAlreadyInActiveTierError} + # * {ObjectNotInActiveTierError} + # + # Additionally, error classes are dynamically generated for service errors based on the error code + # if they are not defined above. + module Errors + + extend Aws::Errors::DynamicErrors + + class BucketAlreadyExists < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::BucketAlreadyExists] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + class BucketAlreadyOwnedByYou < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::BucketAlreadyOwnedByYou] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + class InvalidObjectState < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::InvalidObjectState] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def storage_class + @data[:storage_class] + end + + # @return [String] + def access_tier + @data[:access_tier] + end + end + + class NoSuchBucket < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::NoSuchBucket] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + class NoSuchKey < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::NoSuchKey] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + class NoSuchUpload < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::NoSuchUpload] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + class ObjectAlreadyInActiveTierError < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::ObjectAlreadyInActiveTierError] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + class ObjectNotInActiveTierError < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::S3::Types::ObjectNotInActiveTierError] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/event_streams.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/event_streams.rb new file mode 100644 index 0000000..5d83eeb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/event_streams.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + module EventStreams + class SelectObjectContentEventStream + + def initialize + @event_emitter = Aws::EventEmitter.new + end + + def on_records_event(&block) + @event_emitter.on(:records, block) if block_given? + end + + def on_stats_event(&block) + @event_emitter.on(:stats, block) if block_given? + end + + def on_progress_event(&block) + @event_emitter.on(:progress, block) if block_given? + end + + def on_cont_event(&block) + @event_emitter.on(:cont, block) if block_given? + end + + def on_end_event(&block) + @event_emitter.on(:end, block) if block_given? + end + + def on_error_event(&block) + @event_emitter.on(:error, block) if block_given? + end + + def on_initial_response_event(&block) + @event_emitter.on(:initial_response, block) if block_given? + end + + def on_unknown_event(&block) + @event_emitter.on(:unknown_event, block) if block_given? + end + + def on_event(&block) + on_records_event(&block) + on_stats_event(&block) + on_progress_event(&block) + on_cont_event(&block) + on_end_event(&block) + on_error_event(&block) + on_initial_response_event(&block) + on_unknown_event(&block) + end + + # @api private + # @return Aws::EventEmitter + attr_reader :event_emitter + + end + + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_downloader.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_downloader.rb new file mode 100644 index 0000000..d72a1f3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_downloader.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +require 'pathname' +require 'thread' +require 'set' +require 'tmpdir' + +module Aws + module S3 + # @api private + class FileDownloader + + MIN_CHUNK_SIZE = 5 * 1024 * 1024 + MAX_PARTS = 10_000 + THREAD_COUNT = 10 + + def initialize(options = {}) + @client = options[:client] || Client.new + end + + # @return [Client] + attr_reader :client + + def download(destination, options = {}) + @path = destination + @mode = options[:mode] || 'auto' + @thread_count = options[:thread_count] || THREAD_COUNT + @chunk_size = options[:chunk_size] + @params = { + bucket: options[:bucket], + key: options[:key], + } + @params[:version_id] = options[:version_id] if options[:version_id] + + case @mode + when 'auto' then multipart_download + when 'single_request' then single_request + when 'get_range' + if @chunk_size + resp = @client.head_object(@params) + multithreaded_get_by_ranges(construct_chunks(resp.content_length)) + else + msg = 'In :get_range mode, :chunk_size must be provided' + raise ArgumentError, msg + end + else + msg = "Invalid mode #{@mode} provided, "\ + 'mode should be :single_request, :get_range or :auto' + raise ArgumentError, msg + end + end + + private + + def multipart_download + resp = @client.head_object(@params.merge(part_number: 1)) + count = resp.parts_count + if count.nil? || count <= 1 + resp.content_length < MIN_CHUNK_SIZE ? + single_request : + multithreaded_get_by_ranges(construct_chunks(resp.content_length)) + else + # partNumber is an option + resp = @client.head_object(@params) + resp.content_length < MIN_CHUNK_SIZE ? + single_request : + compute_mode(resp.content_length, count) + end + end + + def compute_mode(file_size, count) + chunk_size = compute_chunk(file_size) + part_size = (file_size.to_f / count.to_f).ceil + if chunk_size < part_size + multithreaded_get_by_ranges(construct_chunks(file_size)) + else + multithreaded_get_by_parts(count) + end + end + + def construct_chunks(file_size) + offset = 0 + default_chunk_size = compute_chunk(file_size) + chunks = [] + while offset <= file_size + progress = offset + default_chunk_size + chunks << "bytes=#{offset}-#{progress < file_size ? progress : file_size}" + offset = progress + 1 + end + chunks + end + + def compute_chunk(file_size) + if @chunk_size && @chunk_size > file_size + raise ArgumentError, ":chunk_size shouldn't exceed total file size." + else + chunk_size = @chunk_size || [ + (file_size.to_f / MAX_PARTS).ceil, + MIN_CHUNK_SIZE + ].max.to_i + chunk_size -= 1 if file_size % chunk_size == 1 + chunk_size + end + end + + def batches(chunks, mode) + chunks = (1..chunks) if mode.eql? 'part_number' + chunks.each_slice(@thread_count).to_a + end + + def multithreaded_get_by_ranges(chunks) + thread_batches(chunks, 'range') + end + + def multithreaded_get_by_parts(parts) + thread_batches(parts, 'part_number') + end + + def thread_batches(chunks, param) + batches(chunks, param).each do |batch| + threads = [] + batch.each do |chunk| + threads << Thread.new do + resp = @client.get_object( + @params.merge(param.to_sym => chunk) + ) + write(resp) + end + end + threads.each(&:join) + end + end + + def write(resp) + range, _ = resp.content_range.split(' ').last.split('/') + head, _ = range.split('-').map {|s| s.to_i} + File.write(@path, resp.body.read, head) + end + + def single_request + @client.get_object( + @params.merge(response_target: @path) + ) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_part.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_part.rb new file mode 100644 index 0000000..f20314f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_part.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +module Aws + module S3 + + # A utility class that provides an IO-like interface to a portion of a file + # on disk. + # @api private + class FilePart + + # @option options [required, String, Pathname, File, Tempfile] :source + # The file to upload. + # + # @option options [required, Integer] :offset The file part will read + # starting at this byte offset. + # + # @option options [required, Integer] :size The maximum number of bytes to + # read from the `:offset`. + def initialize(options = {}) + @source = options[:source] + @first_byte = options[:offset] + @last_byte = @first_byte + options[:size] + @size = options[:size] + @file = nil + end + + # @return [String, Pathname, File, Tempfile] + attr_reader :source + + # @return [Integer] + attr_reader :first_byte + + # @return [Integer] + attr_reader :last_byte + + # @return [Integer] + attr_reader :size + + def read(bytes = nil, output_buffer = nil) + open_file unless @file + read_from_file(bytes, output_buffer) + end + + def rewind + if @file + @file.seek(@first_byte) + @position = @first_byte + end + 0 + end + + def close + @file.close if @file + end + + private + + def open_file + @file = File.open(@source, 'rb') + rewind + end + + def read_from_file(bytes, output_buffer) + length = [remaining_bytes, *bytes].min + data = @file.read(length, output_buffer) + + @position += data ? data.bytesize : 0 + + data.to_s unless bytes && (data.nil? || data.empty?) + end + + def remaining_bytes + @last_byte - @position + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_uploader.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_uploader.rb new file mode 100644 index 0000000..917d2c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/file_uploader.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +require 'pathname' + +module Aws + module S3 + # @api private + class FileUploader + + ONE_HUNDRED_MEGABYTES = 100 * 1024 * 1024 + + # @param [Hash] options + # @option options [Client] :client + # @option options [Integer] :multipart_threshold (104857600) + def initialize(options = {}) + @options = options + @client = options[:client] || Client.new + @multipart_threshold = options[:multipart_threshold] || + ONE_HUNDRED_MEGABYTES + end + + # @return [Client] + attr_reader :client + + # @return [Integer] Files larger than or equal to this in bytes are uploaded + # using a {MultipartFileUploader}. + attr_reader :multipart_threshold + + # @param [String, Pathname, File, Tempfile] source The file to upload. + # @option options [required, String] :bucket The bucket to upload to. + # @option options [required, String] :key The key for the object. + # @option options [Proc] :progress_callback + # A Proc that will be called when each chunk of the upload is sent. + # It will be invoked with [bytes_read], [total_sizes] + # @option options [Integer] :thread_count + # The thread count to use for multipart uploads. Ignored for + # objects smaller than the multipart threshold. + # @return [void] + def upload(source, options = {}) + if File.size(source) >= multipart_threshold + MultipartFileUploader.new(@options).upload(source, options) + else + # remove multipart parameters not supported by put_object + options.delete(:thread_count) + put_object(source, options) + end + end + + private + + def open_file(source) + if String === source || Pathname === source + File.open(source, 'rb') { |file| yield(file) } + else + yield(source) + end + end + + def put_object(source, options) + if (callback = options.delete(:progress_callback)) + options[:on_chunk_sent] = single_part_progress(callback) + end + open_file(source) do |file| + @client.put_object(options.merge(body: file)) + end + end + + def single_part_progress(progress_callback) + proc do |_chunk, bytes_read, total_size| + progress_callback.call([bytes_read], [total_size]) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/legacy_signer.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/legacy_signer.rb new file mode 100644 index 0000000..c3ecd1e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/legacy_signer.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +require 'set' +require 'time' +require 'openssl' +require 'cgi' +require 'aws-sdk-core/query' + +module Aws + module S3 + # @api private + class LegacySigner + + SIGNED_QUERYSTRING_PARAMS = Set.new(%w( + + acl delete cors lifecycle location logging notification partNumber + policy requestPayment restore tagging torrent uploadId uploads + versionId versioning versions website replication requestPayment + accelerate + + response-content-type response-content-language + response-expires response-cache-control + response-content-disposition response-content-encoding + + )) + + def self.sign(context) + new( + context.config.credentials, + context.params, + context.config.force_path_style + ).sign(context.http_request) + end + + # @param [CredentialProvider] credentials + def initialize(credentials, params, force_path_style) + @credentials = credentials.credentials + @params = Query::ParamList.new + params.each_pair do |param_name, param_value| + @params.set(param_name, param_value) + end + @force_path_style = force_path_style + end + + attr_reader :credentials, :params + + def sign(request) + if token = credentials.session_token + request.headers["X-Amz-Security-Token"] = token + end + request.headers['Authorization'] = authorization(request) + end + + def authorization(request) + "AWS #{credentials.access_key_id}:#{signature(request)}" + end + + def signature(request) + string_to_sign = string_to_sign(request) + signature = digest(credentials.secret_access_key, string_to_sign) + uri_escape(signature) + end + + def digest(secret, string_to_sign) + Base64.encode64(hmac(secret, string_to_sign)).strip + end + + def hmac(key, value) + OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha1'), key, value) + end + + # From the S3 developer guide: + # + # StringToSign = + # HTTP-Verb ` "\n" ` + # content-md5 ` "\n" ` + # content-type ` "\n" ` + # date ` "\n" ` + # CanonicalizedAmzHeaders + CanonicalizedResource; + # + def string_to_sign(request) + [ + request.http_method, + request.headers.values_at('Content-Md5', 'Content-Type').join("\n"), + signing_string_date(request), + canonicalized_headers(request), + canonicalized_resource(request.endpoint), + ].flatten.compact.join("\n") + end + + def signing_string_date(request) + # if a date is provided via x-amz-date then we should omit the + # Date header from the signing string (should appear as a blank line) + if request.headers.detect{|k,v| k.to_s =~ /^x-amz-date$/i } + '' + else + request.headers['Date'] = Time.now.httpdate + end + end + + # CanonicalizedAmzHeaders + # + # See the developer guide for more information on how this element + # is generated. + # + def canonicalized_headers(request) + x_amz = request.headers.select{|k, v| k =~ /^x-amz-/i } + x_amz = x_amz.collect{|k, v| [k.downcase, v] } + x_amz = x_amz.sort_by{|k, v| k } + x_amz = x_amz.collect{|k, v| "#{k}:#{v.to_s.strip}" }.join("\n") + x_amz == '' ? nil : x_amz + end + + # From the S3 developer guide + # + # CanonicalizedResource = + # [ "/" ` Bucket ] ` + # + + # [ sub-resource, if present. e.g. "?acl", "?location", + # "?logging", or "?torrent"]; + # + # @api private + def canonicalized_resource(endpoint) + + parts = [] + + # virtual hosted-style requests require the hostname to appear + # in the canonicalized resource prefixed by a forward slash. + if bucket = params[:bucket] + bucket = bucket.value + ssl = endpoint.scheme == 'https' + if Plugins::BucketDns.dns_compatible?(bucket, ssl) && !@force_path_style + parts << "/#{bucket}" + end + end + + # append the path name (no querystring) + parts << endpoint.path + + # lastly any sub resource querystring params need to be appened + # in lexigraphical ordered joined by '&' and prefixed by '?' + params = signed_querystring_params(endpoint) + + unless params.empty? + parts << '?' + parts << params.sort.collect{|p| p.to_s }.join('&') + end + + parts.join + end + + def signed_querystring_params(endpoint) + endpoint.query.to_s.split('&').select do |p| + SIGNED_QUERYSTRING_PARAMS.include?(p.split('=')[0]) + end.map { |p| CGI.unescape(p) } + end + + def uri_escape(s) + #URI.escape(s) + + # (0..255).each {|c| + # s = [c].pack("C") + # e = [ + # CGI.escape(s), + # ERB::Util.url_encode(s), + # URI.encode_www_form_component(s), + # WEBrick::HTTPUtils.escape_form(s), + # WEBrick::HTTPUtils.escape(s), + # URI.escape(s), + # URI::DEFAULT_PARSER.escape(s) + # ] + # next if e.uniq.length == 1 + # puts("%5s %5s %5s %5s %5s %5s %5s %5s" % ([s.inspect] + e)) + # } + URI::DEFAULT_PARSER.escape(s) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_file_uploader.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_file_uploader.rb new file mode 100644 index 0000000..c3851ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_file_uploader.rb @@ -0,0 +1,246 @@ +# frozen_string_literal: true + +require 'pathname' +require 'set' + +module Aws + module S3 + # @api private + class MultipartFileUploader + + MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB + + FILE_TOO_SMALL = "unable to multipart upload files smaller than 5MB" + + MAX_PARTS = 10_000 + + THREAD_COUNT = 10 + + # @api private + CREATE_OPTIONS = Set.new( + Client.api.operation(:create_multipart_upload).input.shape.member_names + ) + + COMPLETE_OPTIONS = Set.new( + Client.api.operation(:complete_multipart_upload).input.shape.member_names + ) + + # @api private + UPLOAD_PART_OPTIONS = Set.new( + Client.api.operation(:upload_part).input.shape.member_names + ) + + # @option options [Client] :client + # @option options [Integer] :thread_count (THREAD_COUNT) + def initialize(options = {}) + @client = options[:client] || Client.new + @thread_count = options[:thread_count] || THREAD_COUNT + end + + # @return [Client] + attr_reader :client + + # @param [String, Pathname, File, Tempfile] source The file to upload. + # @option options [required, String] :bucket The bucket to upload to. + # @option options [required, String] :key The key for the object. + # @option options [Proc] :progress_callback + # A Proc that will be called when each chunk of the upload is sent. + # It will be invoked with [bytes_read], [total_sizes] + # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse + def upload(source, options = {}) + if File.size(source) < MIN_PART_SIZE + raise ArgumentError, FILE_TOO_SMALL + else + upload_id = initiate_upload(options) + parts = upload_parts(upload_id, source, options) + complete_upload(upload_id, parts, options) + end + end + + private + + def initiate_upload(options) + @client.create_multipart_upload(create_opts(options)).upload_id + end + + def complete_upload(upload_id, parts, options) + @client.complete_multipart_upload( + **complete_opts(options).merge( + upload_id: upload_id, + multipart_upload: { parts: parts } + ) + ) + end + + def upload_parts(upload_id, source, options) + pending = PartList.new(compute_parts(upload_id, source, options)) + completed = PartList.new + errors = upload_in_threads(pending, completed, options) + if errors.empty? + completed.to_a.sort_by { |part| part[:part_number] } + else + abort_upload(upload_id, options, errors) + end + end + + def abort_upload(upload_id, options, errors) + @client.abort_multipart_upload( + bucket: options[:bucket], + key: options[:key], + upload_id: upload_id + ) + msg = "multipart upload failed: #{errors.map(&:message).join("; ")}" + raise MultipartUploadError.new(msg, errors) + rescue MultipartUploadError => error + raise error + rescue => error + msg = "failed to abort multipart upload: #{error.message}" + raise MultipartUploadError.new(msg, errors + [error]) + end + + def compute_parts(upload_id, source, options) + size = File.size(source) + default_part_size = compute_default_part_size(size) + offset = 0 + part_number = 1 + parts = [] + while offset < size + parts << upload_part_opts(options).merge( + upload_id: upload_id, + part_number: part_number, + body: FilePart.new( + source: source, + offset: offset, + size: part_size(size, default_part_size, offset) + ) + ) + part_number += 1 + offset += default_part_size + end + parts + end + + def create_opts(options) + CREATE_OPTIONS.inject({}) do |hash, key| + hash[key] = options[key] if options.key?(key) + hash + end + end + + def complete_opts(options) + COMPLETE_OPTIONS.inject({}) do |hash, key| + hash[key] = options[key] if options.key?(key) + hash + end + end + + def upload_part_opts(options) + UPLOAD_PART_OPTIONS.inject({}) do |hash, key| + hash[key] = options[key] if options.key?(key) + hash + end + end + + def upload_in_threads(pending, completed, options) + threads = [] + if (callback = options[:progress_callback]) + progress = MultipartProgress.new(pending, callback) + end + @thread_count.times do + thread = Thread.new do + begin + while part = pending.shift + if progress + part[:on_chunk_sent] = + proc do |_chunk, bytes, _total| + progress.call(part[:part_number], bytes) + end + end + resp = @client.upload_part(part) + part[:body].close + completed_part = {etag: resp.etag, part_number: part[:part_number]} + + # get the requested checksum from the response + if part[:checksum_algorithm] + k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym + completed_part[k] = resp[k] + end + + completed.push(completed_part) + end + nil + rescue => error + # keep other threads from uploading other parts + pending.clear! + error + end + end + thread.abort_on_exception = true + threads << thread + end + threads.map(&:value).compact + end + + def compute_default_part_size(source_size) + [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i + end + + def part_size(total_size, part_size, offset) + if offset + part_size > total_size + total_size - offset + else + part_size + end + end + + # @api private + class PartList + + def initialize(parts = []) + @parts = parts + @mutex = Mutex.new + end + + def push(part) + @mutex.synchronize { @parts.push(part) } + end + + def shift + @mutex.synchronize { @parts.shift } + end + + def clear! + @mutex.synchronize { @parts.clear } + end + + def size + @mutex.synchronize { @parts.size } + end + + def part_sizes + @mutex.synchronize { @parts.map { |p| p[:body].size } } + end + + def to_a + @mutex.synchronize { @parts.dup } + end + + end + + # @api private + class MultipartProgress + def initialize(parts, progress_callback) + @bytes_sent = Array.new(parts.size, 0) + @total_sizes = parts.part_sizes + @progress_callback = progress_callback + end + + def call(part_number, bytes_read) + # part numbers start at 1 + @bytes_sent[part_number - 1] = bytes_read + @progress_callback.call(@bytes_sent, @total_sizes) + end + end + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_stream_uploader.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_stream_uploader.rb new file mode 100644 index 0000000..b91ba06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_stream_uploader.rb @@ -0,0 +1,199 @@ +# frozen_string_literal: true + +require 'thread' +require 'set' +require 'tempfile' +require 'stringio' + +module Aws + module S3 + # @api private + class MultipartStreamUploader + # api private + PART_SIZE = 5 * 1024 * 1024 # 5MB + + # api private + THREAD_COUNT = 10 + + # api private + TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze + + # @api private + CREATE_OPTIONS = + Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names) + + # @api private + UPLOAD_PART_OPTIONS = + Set.new(Client.api.operation(:upload_part).input.shape.member_names) + + # @api private + COMPLETE_UPLOAD_OPTIONS = + Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names) + + # @option options [Client] :client + def initialize(options = {}) + @client = options[:client] || Client.new + @tempfile = options[:tempfile] + @part_size = options[:part_size] || PART_SIZE + @thread_count = options[:thread_count] || THREAD_COUNT + end + + # @return [Client] + attr_reader :client + + # @option options [required,String] :bucket + # @option options [required,String] :key + # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse + def upload(options = {}, &block) + upload_id = initiate_upload(options) + parts = upload_parts(upload_id, options, &block) + complete_upload(upload_id, parts, options) + end + + private + + def initiate_upload(options) + @client.create_multipart_upload(create_opts(options)).upload_id + end + + def complete_upload(upload_id, parts, options) + @client.complete_multipart_upload( + **complete_opts(options).merge( + upload_id: upload_id, + multipart_upload: { parts: parts } + ) + ) + end + + def upload_parts(upload_id, options, &block) + completed = Queue.new + thread_errors = [] + errors = begin + IO.pipe do |read_pipe, write_pipe| + threads = upload_in_threads( + read_pipe, completed, + upload_part_opts(options).merge(upload_id: upload_id), + thread_errors) + begin + block.call(write_pipe) + ensure + # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111 + write_pipe.close + end + threads.map(&:value).compact + end + rescue => e + thread_errors + [e] + end + + if errors.empty? + Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] } + else + abort_upload(upload_id, options, errors) + end + end + + def abort_upload(upload_id, options, errors) + @client.abort_multipart_upload( + bucket: options[:bucket], + key: options[:key], + upload_id: upload_id + ) + msg = "multipart upload failed: #{errors.map(&:message).join("; ")}" + raise MultipartUploadError.new(msg, errors) + rescue MultipartUploadError => error + raise error + rescue => error + msg = "failed to abort multipart upload: #{error.message}" + raise MultipartUploadError.new(msg, errors + [error]) + end + + def create_opts(options) + CREATE_OPTIONS.inject({}) do |hash, key| + hash[key] = options[key] if options.key?(key) + hash + end + end + + def upload_part_opts(options) + UPLOAD_PART_OPTIONS.inject({}) do |hash, key| + hash[key] = options[key] if options.key?(key) + hash + end + end + + def complete_opts(options) + COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key| + hash[key] = options[key] if options.key?(key) + hash + end + end + + def read_to_part_body(read_pipe) + return if read_pipe.closed? + temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new) + temp_io.binmode + bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size) + temp_io.rewind + if bytes_copied == 0 + if Tempfile === temp_io + temp_io.close + temp_io.unlink + end + nil + else + temp_io + end + end + + def upload_in_threads(read_pipe, completed, options, thread_errors) + mutex = Mutex.new + part_number = 0 + @thread_count.times.map do + thread = Thread.new do + begin + loop do + body, thread_part_number = mutex.synchronize do + [read_to_part_body(read_pipe), part_number += 1] + end + break unless (body || thread_part_number == 1) + begin + part = options.merge( + body: body, + part_number: thread_part_number, + ) + resp = @client.upload_part(part) + completed_part = {etag: resp.etag, part_number: part[:part_number]} + + # get the requested checksum from the response + if part[:checksum_algorithm] + k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym + completed_part[k] = resp[k] + end + completed.push(completed_part) + ensure + if Tempfile === body + body.close + body.unlink + elsif StringIO === body + body.string.clear + end + end + end + nil + rescue => error + # keep other threads from uploading other parts + mutex.synchronize do + thread_errors.push(error) + read_pipe.close_read unless read_pipe.closed? + end + error + end + end + thread.abort_on_exception = true + thread + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload.rb new file mode 100644 index 0000000..82f2f04 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload.rb @@ -0,0 +1,530 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class MultipartUpload + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, object_key, id, options = {}) + # @param [String] bucket_name + # @param [String] object_key + # @param [String] id + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [required, String] :object_key + # @option options [required, String] :id + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @object_key = extract_object_key(args, options) + @id = extract_id(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # @return [String] + def object_key + @object_key + end + + # @return [String] + def id + @id + end + + # Upload ID that identifies the multipart upload. + # @return [String] + def upload_id + data[:upload_id] + end + + # Key of the object for which the multipart upload was initiated. + # @return [String] + def key + data[:key] + end + + # Date and time at which the multipart upload was initiated. + # @return [Time] + def initiated + data[:initiated] + end + + # The class of storage used to store the object. + # @return [String] + def storage_class + data[:storage_class] + end + + # Specifies the owner of the object that is part of the multipart + # upload. + # @return [Types::Owner] + def owner + data[:owner] + end + + # Identifies who initiated the multipart upload. + # @return [Types::Initiator] + def initiator + data[:initiator] + end + + # The algorithm that was used to create a checksum of the object. + # @return [String] + def checksum_algorithm + data[:checksum_algorithm] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # @raise [NotImplementedError] + # @api private + def load + msg = "#load is not implemented, data only available via enumeration" + raise NotImplementedError, msg + end + alias :reload :load + + # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. + # @return [Types::MultipartUpload] + # Returns the data for this {MultipartUpload}. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # multipart_upload.abort({ + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::AbortMultipartUploadOutput] + def abort(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + upload_id: @id + ) + resp = @client.abort_multipart_upload(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object = multipart_upload.complete({ + # multipart_upload: { + # parts: [ + # { + # etag: "ETag", + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # part_number: 1, + # }, + # ], + # }, + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # }) + # @param [Hash] options ({}) + # @option options [Types::CompletedMultipartUpload] :multipart_upload + # The container for the multipart upload request information. + # @option options [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the object. + # This parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @option options [String] :sse_customer_key + # The server-side encryption (SSE) customer managed key. This parameter + # is needed only when the object was created using a checksum algorithm. + # For more information, see [Protecting data using SSE-C keys][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @option options [String] :sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a checksum + # algorithm. For more information, see [Protecting data using SSE-C + # keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [Object] + def complete(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + upload_id: @id + ) + @client.complete_multipart_upload(options) + Object.new( + bucket_name: @bucket_name, + key: @object_key, + client: @client + ) + end + + # @!group Associations + + # @return [Object] + def object + Object.new( + bucket_name: @bucket_name, + key: @object_key, + client: @client + ) + end + + # @param [String] part_number + # @return [MultipartUploadPart] + def part(part_number) + MultipartUploadPart.new( + bucket_name: @bucket_name, + object_key: @object_key, + multipart_upload_id: @id, + part_number: part_number, + client: @client + ) + end + + # @example Request syntax with placeholder values + # + # parts = multipart_upload.parts({ + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # }) + # @param [Hash] options ({}) + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the object. + # This parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @option options [String] :sse_customer_key + # The server-side encryption (SSE) customer managed key. This parameter + # is needed only when the object was created using a checksum algorithm. + # For more information, see [Protecting data using SSE-C keys][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @option options [String] :sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a checksum + # algorithm. For more information, see [Protecting data using SSE-C + # keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [MultipartUploadPart::Collection] + def parts(options = {}) + batches = Enumerator.new do |y| + options = options.merge( + bucket: @bucket_name, + key: @object_key, + upload_id: @id + ) + resp = @client.list_parts(options) + resp.each_page do |page| + batch = [] + page.data.parts.each do |p| + batch << MultipartUploadPart.new( + bucket_name: options[:bucket], + object_key: options[:key], + multipart_upload_id: options[:upload_id], + part_number: p.part_number, + data: p, + client: @client + ) + end + y.yield(batch) + end + end + MultipartUploadPart::Collection.new(batches) + end + + # @deprecated + # @api private + def identifiers + { + bucket_name: @bucket_name, + object_key: @object_key, + id: @id + } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_object_key(args, options) + value = args[1] || options.delete(:object_key) + case value + when String then value + when nil then raise ArgumentError, "missing required option :object_key" + else + msg = "expected :object_key to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_id(args, options) + value = args[2] || options.delete(:id) + case value + when String then value + when nil then raise ArgumentError, "missing required option :id" + else + msg = "expected :id to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload_error.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload_error.rb new file mode 100644 index 0000000..1ddc3d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload_error.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module Aws + module S3 + class MultipartUploadError < StandardError + + def initialize(message, errors) + @errors = errors + super(message) + end + + # @return [Array] The list of errors encountered + # when uploading or aborting the upload. + attr_reader :errors + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload_part.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload_part.rb new file mode 100644 index 0000000..9c9cef9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/multipart_upload_part.rb @@ -0,0 +1,600 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class MultipartUploadPart + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, object_key, multipart_upload_id, part_number, options = {}) + # @param [String] bucket_name + # @param [String] object_key + # @param [String] multipart_upload_id + # @param [Integer] part_number + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [required, String] :object_key + # @option options [required, String] :multipart_upload_id + # @option options [required, Integer] :part_number + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @object_key = extract_object_key(args, options) + @multipart_upload_id = extract_multipart_upload_id(args, options) + @part_number = extract_part_number(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # @return [String] + def object_key + @object_key + end + + # @return [String] + def multipart_upload_id + @multipart_upload_id + end + + # @return [Integer] + def part_number + @part_number + end + + # Date and time at which the part was uploaded. + # @return [Time] + def last_modified + data[:last_modified] + end + + # Entity tag returned when the part was uploaded. + # @return [String] + def etag + data[:etag] + end + + # Size in bytes of the uploaded part data. + # @return [Integer] + def size + data[:size] + end + + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + def checksum_crc32 + data[:checksum_crc32] + end + + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart uploads, + # see [ Checking object integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + def checksum_crc32c + data[:checksum_crc32c] + end + + # The base64-encoded, 160-bit SHA-1 digest of the object. This will only + # be present if it was uploaded with the object. With multipart uploads, + # this may not be a checksum value of the object. For more information + # about how checksums are calculated with multipart uploads, see [ + # Checking object integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + def checksum_sha1 + data[:checksum_sha1] + end + + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + def checksum_sha256 + data[:checksum_sha256] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # @raise [NotImplementedError] + # @api private + def load + msg = "#load is not implemented, data only available via enumeration" + raise NotImplementedError, msg + end + alias :reload :load + + # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. + # @return [Types::Part] + # Returns the data for this {MultipartUploadPart}. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # multipart_upload_part.copy_from({ + # copy_source: "CopySource", # required + # copy_source_if_match: "CopySourceIfMatch", + # copy_source_if_modified_since: Time.now, + # copy_source_if_none_match: "CopySourceIfNoneMatch", + # copy_source_if_unmodified_since: Time.now, + # copy_source_range: "CopySourceRange", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", + # copy_source_sse_customer_key: "CopySourceSSECustomerKey", + # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # expected_source_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [required, String] :copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and key of the source object, separated by a + # slash (/). For example, to copy the object `reports/january.pdf` + # from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through access + # point `my-access-point` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when the + # source and destination buckets are in the same Amazon Web Services + # Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # @option options [String] :copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified tag. + # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # @option options [String] :copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # @option options [String] :copy_source_range + # The range of bytes to copy from the source object. The range value + # must use the form bytes=first-last, where the first and last are the + # zero-based byte offsets to copy. For example, bytes=0-9 indicates that + # you want to copy the first 10 bytes of the source. You can copy a + # range only if the source object is greater than 5 MB. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. This must be + # the same encryption key specified in the initiate multipart upload + # request. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object (for + # example, AES256). + # @option options [String] :copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use to + # decrypt the source object. The encryption key provided in this header + # must be one that was used when the source object was created. + # @option options [String] :copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request fails + # with the HTTP status code `403 Forbidden` (access denied). + # @option options [String] :expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # @return [Types::UploadPartCopyOutput] + def copy_from(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + upload_id: @multipart_upload_id, + part_number: @part_number + ) + resp = @client.upload_part_copy(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # multipart_upload_part.upload({ + # body: source_file, + # content_length: 1, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String, StringIO, File] :body + # Object data. + # @option options [Integer] :content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the part data. This parameter + # is auto-populated when using the command from the CLI. This parameter + # is required if object lock parameters are specified. + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm header`. This must be + # the same encryption key specified in the initiate multipart upload + # request. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::UploadPartOutput] + def upload(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + upload_id: @multipart_upload_id, + part_number: @part_number + ) + resp = @client.upload_part(options) + resp.data + end + + # @!group Associations + + # @return [MultipartUpload] + def multipart_upload + MultipartUpload.new( + bucket_name: @bucket_name, + object_key: @object_key, + id: @multipart_upload_id, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { + bucket_name: @bucket_name, + object_key: @object_key, + multipart_upload_id: @multipart_upload_id, + part_number: @part_number + } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_object_key(args, options) + value = args[1] || options.delete(:object_key) + case value + when String then value + when nil then raise ArgumentError, "missing required option :object_key" + else + msg = "expected :object_key to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_multipart_upload_id(args, options) + value = args[2] || options.delete(:multipart_upload_id) + case value + when String then value + when nil then raise ArgumentError, "missing required option :multipart_upload_id" + else + msg = "expected :multipart_upload_id to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_part_number(args, options) + value = args[3] || options.delete(:part_number) + case value + when Integer then value + when nil then raise ArgumentError, "missing required option :part_number" + else + msg = "expected :part_number to be a Integer, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object.rb new file mode 100644 index 0000000..4c46b59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object.rb @@ -0,0 +1,1871 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class Object + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, key, options = {}) + # @param [String] bucket_name + # @param [String] key + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [required, String] :key + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @key = extract_key(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # @return [String] + def key + @key + end + + # Specifies whether the object retrieved was (true) or was not (false) a + # Delete Marker. If false, this response header does not appear in the + # response. + # @return [Boolean] + def delete_marker + data[:delete_marker] + end + + # Indicates that a range of bytes was specified. + # @return [String] + def accept_ranges + data[:accept_ranges] + end + + # If the object expiration is configured (see PUT Bucket lifecycle), the + # response includes this header. It includes the `expiry-date` and + # `rule-id` key-value pairs providing object expiration information. The + # value of the `rule-id` is URL-encoded. + # @return [String] + def expiration + data[:expiration] + end + + # If the object is an archived object (an object whose storage class is + # GLACIER), the response includes this header if either the archive + # restoration is in progress (see [RestoreObject][1] or an archive copy + # is already restored. + # + # If an archive copy is already restored, the header value indicates + # when Amazon S3 is scheduled to delete the object copy. For example: + # + # `x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 + # 00:00:00 GMT"` + # + # If the object restoration is in progress, the header returns the value + # `ongoing-request="true"`. + # + # For more information about archiving objects, see [Transitioning + # Objects: General Considerations][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + # @return [String] + def restore + data[:restore] + end + + # The archive state of the head object. + # @return [String] + def archive_status + data[:archive_status] + end + + # Creation date of the object. + # @return [Time] + def last_modified + data[:last_modified] + end + + # Size of the body in bytes. + # @return [Integer] + def content_length + data[:content_length] + end + + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart uploads, + # see [ Checking object integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + def checksum_crc32 + data[:checksum_crc32] + end + + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart uploads, + # see [ Checking object integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + def checksum_crc32c + data[:checksum_crc32c] + end + + # The base64-encoded, 160-bit SHA-1 digest of the object. This will only + # be present if it was uploaded with the object. With multipart uploads, + # this may not be a checksum value of the object. For more information + # about how checksums are calculated with multipart uploads, see [ + # Checking object integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + def checksum_sha1 + data[:checksum_sha1] + end + + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart uploads, + # see [ Checking object integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + def checksum_sha256 + data[:checksum_sha256] + end + + # An entity tag (ETag) is an opaque identifier assigned by a web server + # to a specific version of a resource found at a URL. + # @return [String] + def etag + data[:etag] + end + + # This is set to the number of metadata entries not returned in + # `x-amz-meta` headers. This can happen if you create metadata using an + # API like SOAP that supports more flexible metadata than the REST API. + # For example, using SOAP, you can create metadata whose values are not + # legal HTTP headers. + # @return [Integer] + def missing_meta + data[:missing_meta] + end + + # Version of the object. + # @return [String] + def version_id + data[:version_id] + end + + # Specifies caching behavior along the request/reply chain. + # @return [String] + def cache_control + data[:cache_control] + end + + # Specifies presentational information for the object. + # @return [String] + def content_disposition + data[:content_disposition] + end + + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # @return [String] + def content_encoding + data[:content_encoding] + end + + # The language the content is in. + # @return [String] + def content_language + data[:content_language] + end + + # A standard MIME type describing the format of the object data. + # @return [String] + def content_type + data[:content_type] + end + + # The date and time at which the object is no longer cacheable. + # @return [Time] + def expires + data[:expires] + end + + # @return [String] + def expires_string + data[:expires_string] + end + + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # @return [String] + def website_redirect_location + data[:website_redirect_location] + end + + # If the object is stored using server-side encryption either with an + # Amazon Web Services KMS key or an Amazon S3-managed encryption key, + # the response includes this header with the value of the server-side + # encryption algorithm used when storing this object in Amazon S3 (for + # example, AES256, aws:kms). + # @return [String] + def server_side_encryption + data[:server_side_encryption] + end + + # A map of metadata to store with the object in S3. + # @return [Hash] + def metadata + data[:metadata] + end + + # If server-side encryption with a customer-provided encryption key was + # requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + def sse_customer_algorithm + data[:sse_customer_algorithm] + end + + # If server-side encryption with a customer-provided encryption key was + # requested, the response will include this header to provide round-trip + # message integrity verification of the customer-provided encryption + # key. + # @return [String] + def sse_customer_key_md5 + data[:sse_customer_key_md5] + end + + # If present, specifies the ID of the Amazon Web Services Key Management + # Service (Amazon Web Services KMS) symmetric customer managed key that + # was used for the object. + # @return [String] + def ssekms_key_id + data[:ssekms_key_id] + end + + # Indicates whether the object uses an S3 Bucket Key for server-side + # encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + def bucket_key_enabled + data[:bucket_key_enabled] + end + + # Provides storage class information of the object. Amazon S3 returns + # this header for all objects except for S3 Standard storage class + # objects. + # + # For more information, see [Storage Classes][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + def storage_class + data[:storage_class] + end + + # If present, indicates that the requester was successfully charged for + # the request. + # @return [String] + def request_charged + data[:request_charged] + end + + # Amazon S3 can return this header if your request involves a bucket + # that is either a source or a destination in a replication rule. + # + # In replication, you have a source bucket on which you configure + # replication and destination bucket or buckets where Amazon S3 stores + # object replicas. When you request an object (`GetObject`) or object + # metadata (`HeadObject`) from these buckets, Amazon S3 will return the + # `x-amz-replication-status` header in the response as follows: + # + # * **If requesting an object from the source bucket**, Amazon S3 will + # return the `x-amz-replication-status` header if the object in your + # request is eligible for replication. + # + # For example, suppose that in your replication configuration, you + # specify object prefix `TaxDocs` requesting Amazon S3 to replicate + # objects with key prefix `TaxDocs`. Any objects you upload with this + # key name prefix, for example `TaxDocs/document1.pdf`, are eligible + # for replication. For any object request with this key name prefix, + # Amazon S3 will return the `x-amz-replication-status` header with + # value PENDING, COMPLETED or FAILED indicating object replication + # status. + # + # * **If requesting an object from a destination bucket**, Amazon S3 + # will return the `x-amz-replication-status` header with value REPLICA + # if the object in your request is a replica that Amazon S3 created + # and there is no replica modification replication in progress. + # + # * **When replicating objects to multiple destination buckets**, the + # `x-amz-replication-status` header acts differently. The header of + # the source object will only return a value of COMPLETED when + # replication is successful to all destinations. The header will + # remain at value PENDING until replication has completed for all + # destinations. If one or more destinations fails replication the + # header will return FAILED. + # + # For more information, see [Replication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [String] + def replication_status + data[:replication_status] + end + + # The count of parts this object has. This value is only returned if you + # specify `partNumber` in your request and the object was uploaded as a + # multipart upload. + # @return [Integer] + def parts_count + data[:parts_count] + end + + # The Object Lock mode, if any, that's in effect for this object. This + # header is only returned if the requester has the + # `s3:GetObjectRetention` permission. For more information about S3 + # Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @return [String] + def object_lock_mode + data[:object_lock_mode] + end + + # The date and time when the Object Lock retention period expires. This + # header is only returned if the requester has the + # `s3:GetObjectRetention` permission. + # @return [Time] + def object_lock_retain_until_date + data[:object_lock_retain_until_date] + end + + # Specifies whether a legal hold is in effect for this object. This + # header is only returned if the requester has the + # `s3:GetObjectLegalHold` permission. This header is not returned if the + # specified version of this object has never had a legal hold applied. + # For more information about S3 Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @return [String] + def object_lock_legal_hold_status + data[:object_lock_legal_hold_status] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {Object}. + # Returns `self` making it possible to chain methods. + # + # object.reload.data + # + # @return [self] + def load + resp = @client.head_object( + bucket: @bucket_name, + key: @key + ) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::HeadObjectOutput] + # Returns the data for this {Object}. Calls + # {Client#head_object} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @param [Hash] options ({}) + # @return [Boolean] + # Returns `true` if the Object exists. + def exists?(options = {}) + begin + wait_until_exists(options.merge(max_attempts: 1)) + true + rescue Aws::Waiters::Errors::UnexpectedError => e + raise e.error + rescue Aws::Waiters::Errors::WaiterFailed + false + end + end + + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts (20) + # @option options [Float] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + # @return [Object] + def wait_until_exists(options = {}, &block) + options, params = separate_params_and_options(options) + waiter = Waiters::ObjectExists.new(options) + yield_waiter_and_warn(waiter, &block) if block_given? + waiter.wait(params.merge(bucket: @bucket_name, + key: @key)) + Object.new({ + bucket_name: @bucket_name, + key: @key, + client: @client + }) + end + + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts (20) + # @option options [Float] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + # @return [Object] + def wait_until_not_exists(options = {}, &block) + options, params = separate_params_and_options(options) + waiter = Waiters::ObjectNotExists.new(options) + yield_waiter_and_warn(waiter, &block) if block_given? + waiter.wait(params.merge(bucket: @bucket_name, + key: @key)) + Object.new({ + bucket_name: @bucket_name, + key: @key, + client: @client + }) + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # object.copy_from({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # cache_control: "CacheControl", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_type: "ContentType", + # copy_source: "CopySource", # required + # copy_source_if_match: "CopySourceIfMatch", + # copy_source_if_modified_since: Time.now, + # copy_source_if_none_match: "CopySourceIfNoneMatch", + # copy_source_if_unmodified_since: Time.now, + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # metadata_directive: "COPY", # accepts COPY, REPLACE + # tagging_directive: "COPY", # accepts COPY, REPLACE + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", + # copy_source_sse_customer_key: "CopySourceSSECustomerKey", + # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # expected_source_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # @option options [String] :checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :content_disposition + # Specifies presentational information for the object. + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # @option options [String] :content_language + # The language the content is in. + # @option options [String] :content_type + # A standard MIME type describing the format of the object data. + # @option options [required, String] :copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and the key of the source object, separated by + # a slash (/). For example, to copy the object `reports/january.pdf` + # from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through access + # point `my-access-point` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when the + # source and destination buckets are in the same Amazon Web Services + # Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # @option options [String] :copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified tag. + # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # @option options [String] :copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :metadata_directive + # Specifies whether the metadata is copied from the source object or + # replaced with metadata provided in the request. + # @option options [String] :tagging_directive + # Specifies whether the object tag-set are copied from the source object + # or replaced with tag-set provided in the request. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # Specifies the Amazon Web Services KMS key ID to use for object + # encryption. All GET and PUT requests for an object protected by Amazon + # Web Services KMS will fail if not made via SSL or using SigV4. For + # information about configuring using any of the officially supported + # Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying + # the Signature Version in Request Authentication][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a COPY action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # @option options [String] :copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object (for + # example, AES256). + # @option options [String] :copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use to + # decrypt the source object. The encryption key provided in this header + # must be one that was used when the source object was created. + # @option options [String] :copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object destination object this value must be used + # in conjunction with the `TaggingDirective`. The tag-set must be + # encoded as URL Query parameters. + # @option options [String] :object_lock_mode + # The Object Lock mode that you want to apply to the copied object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want the copied object's Object Lock to + # expire. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the copied object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request fails + # with the HTTP status code `403 Forbidden` (access denied). + # @option options [String] :expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # @return [Types::CopyObjectOutput] + def copy_from(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.copy_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object.delete({ + # mfa: "MFA", + # version_id: "ObjectVersionId", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Indicates whether S3 Object Lock should bypass Governance-mode + # restrictions to process this operation. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::DeleteObjectOutput] + def delete(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.delete_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object.get({ + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # range: "Range", + # response_cache_control: "ResponseCacheControl", + # response_content_disposition: "ResponseContentDisposition", + # response_content_encoding: "ResponseContentEncoding", + # response_content_language: "ResponseContentLanguage", + # response_content_type: "ResponseContentType", + # response_expires: Time.now, + # version_id: "ObjectVersionId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # @param [Hash] options ({}) + # @option options [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # @option options [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @option options [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # @option options [String] :range + # Downloads the specified range bytes of an object. For more information + # about the HTTP Range header, see + # [https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1]. + # + # Amazon S3 doesn't support retrieving multiple ranges of data per + # `GET` request. + # + # + # + # + # + # [1]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + # @option options [String] :response_cache_control + # Sets the `Cache-Control` header of the response. + # @option options [String] :response_content_disposition + # Sets the `Content-Disposition` header of the response + # @option options [String] :response_content_encoding + # Sets the `Content-Encoding` header of the response. + # @option options [String] :response_content_language + # Sets the `Content-Language` header of the response. + # @option options [String] :response_content_type + # Sets the `Content-Type` header of the response. + # @option options [Time,DateTime,Date,Integer,String] :response_expires + # Sets the `Expires` header of the response. + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when decrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 used to + # encrypt the data. This value is used to decrypt the object when + # recovering it and must match the one used when storing the data. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' GET request + # for the part specified. Useful for downloading just a part of an + # object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_mode + # To retrieve the checksum, this mode must be enabled. + # @return [Types::GetObjectOutput] + def get(options = {}, &block) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.get_object(options, &block) + resp.data + end + + # @example Request syntax with placeholder values + # + # multipartupload = object.initiate_multipart_upload({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_type: "ContentType", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # @option options [String] :content_disposition + # Specifies presentational information for the object. + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # @option options [String] :content_language + # The language the content is in. + # @option options [String] :content_type + # A standard MIME type describing the format of the object data. + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # Specifies the ID of the symmetric customer managed key to use for + # object encryption. All GET and PUT requests for an object protected by + # Amazon Web Services KMS will fail if not made via SSL or using SigV4. + # For information about configuring using any of the officially + # supported Amazon Web Services SDKs and Amazon Web Services CLI, see + # [Specifying the Signature Version in Request Authentication][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with an object action doesn’t affect + # bucket-level settings for S3 Bucket Key. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. + # @option options [String] :object_lock_mode + # Specifies the Object Lock mode that you want to apply to the uploaded + # object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # Specifies the date and time when you want the Object Lock to expire. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the uploaded + # object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [MultipartUpload] + def initiate_multipart_upload(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.create_multipart_upload(options) + MultipartUpload.new( + bucket_name: @bucket_name, + object_key: @key, + id: resp.data.upload_id, + client: @client + ) + end + + # @example Request syntax with placeholder values + # + # object.put({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # body: source_file, + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_length: 1, + # content_md5: "ContentMD5", + # content_type: "ContentType", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # @option options [String, StringIO, File] :body + # Object data. + # @option options [String] :cache_control + # Can be used to specify caching behavior along the request/reply chain. + # For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + # @option options [String] :content_disposition + # Specifies presentational information for the object. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + # @option options [String] :content_language + # The language the content is in. + # @option options [Integer] :content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the message (without the + # headers) according to RFC 1864. This header can be used as a message + # integrity check to verify that the data is the same data that was + # originally sent. Although it is optional, we recommend using the + # Content-MD5 mechanism as an end-to-end integrity check. For more + # information about REST request authentication, see [REST + # Authentication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # @option options [String] :content_type + # A standard MIME type describing the format of the contents. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. For + # information about object metadata, see [Object Key and Metadata][1]. + # + # In the following example, the request header sets the redirect to an + # object (anotherPage.html) in the same bucket: + # + # `x-amz-website-redirect-location: /anotherPage.html` + # + # In the following example, the request header sets the object redirect + # to another website: + # + # `x-amz-website-redirect-location: http://www.example.com/` + # + # For more information about website hosting in Amazon S3, see [Hosting + # Websites on Amazon S3][2] and [How to Configure Website Page + # Redirects][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # If `x-amz-server-side-encryption` is present and has the value of + # `aws:kms`, this header specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetrical customer + # managed key that was used for the object. If you specify + # `x-amz-server-side-encryption:aws:kms`, but do not provide` + # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the + # Amazon Web Services managed key to protect the data. If the KMS key + # does not exist in the same account issuing the command, you must use + # the full ARN and not just the ID. + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a PUT action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. (For example, "Key1=Value1") + # @option options [String] :object_lock_mode + # The Object Lock mode that you want to apply to this object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want this object's Object Lock to expire. + # Must be formatted as a timestamp parameter. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether a legal hold will be applied to this object. For + # more information about S3 Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::PutObjectOutput] + def put(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.put_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object.restore_object({ + # version_id: "ObjectVersionId", + # restore_request: { + # days: 1, + # glacier_job_parameters: { + # tier: "Standard", # required, accepts Standard, Bulk, Expedited + # }, + # type: "SELECT", # accepts SELECT + # tier: "Standard", # accepts Standard, Bulk, Expedited + # description: "Description", + # select_parameters: { + # input_serialization: { # required + # csv: { + # file_header_info: "USE", # accepts USE, IGNORE, NONE + # comments: "Comments", + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # allow_quoted_record_delimiter: false, + # }, + # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 + # json: { + # type: "DOCUMENT", # accepts DOCUMENT, LINES + # }, + # parquet: { + # }, + # }, + # expression_type: "SQL", # required, accepts SQL + # expression: "Expression", # required + # output_serialization: { # required + # csv: { + # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # }, + # json: { + # record_delimiter: "RecordDelimiter", + # }, + # }, + # }, + # output_location: { + # s3: { + # bucket_name: "BucketName", # required + # prefix: "LocationPrefix", # required + # encryption: { + # encryption_type: "AES256", # required, accepts AES256, aws:kms + # kms_key_id: "SSEKMSKeyId", + # kms_context: "KMSContext", + # }, + # canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # access_control_list: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # tagging: { + # tag_set: [ # required + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # user_metadata: [ + # { + # name: "MetadataKey", + # value: "MetadataValue", + # }, + # ], + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # }, + # }, + # }, + # request_payer: "requester", # accepts requester + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [Types::RestoreRequest] :restore_request + # Container for restore job parameters. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::RestoreObjectOutput] + def restore_object(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.restore_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object.head({ + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # range: "Range", + # version_id: "ObjectVersionId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # @param [Hash] options ({}) + # @option options [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # @option options [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @option options [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # @option options [String] :range + # Because `HeadObject` returns only the metadata for an object, this + # parameter has no effect. + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' HEAD request + # for the part specified. Useful querying about the size of the part and + # the number of parts in this object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_mode + # To retrieve the checksum, this parameter must be enabled. + # + # In addition, if you enable `ChecksumMode` and the object is encrypted + # with Amazon Web Services Key Management Service (Amazon Web Services + # KMS), you must have permission to use the `kms:Decrypt` action for the + # request to succeed. + # @return [Types::HeadObjectOutput] + def head(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.head_object(options) + resp.data + end + + # @!group Associations + + # @return [ObjectAcl] + def acl + ObjectAcl.new( + bucket_name: @bucket_name, + object_key: @key, + client: @client + ) + end + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @param [String] id + # @return [MultipartUpload] + def multipart_upload(id) + MultipartUpload.new( + bucket_name: @bucket_name, + object_key: @key, + id: id, + client: @client + ) + end + + # @param [String] id + # @return [ObjectVersion] + def version(id) + ObjectVersion.new( + bucket_name: @bucket_name, + object_key: @key, + id: id, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { + bucket_name: @bucket_name, + key: @key + } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_key(args, options) + value = args[1] || options.delete(:key) + case value + when String then value + when nil then raise ArgumentError, "missing required option :key" + else + msg = "expected :key to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def yield_waiter_and_warn(waiter, &block) + if !@waiter_block_warned + msg = "pass options to configure the waiter; "\ + "yielding the waiter is deprecated" + warn(msg) + @waiter_block_warned = true + end + yield(waiter.waiter) + end + + def separate_params_and_options(options) + opts = Set.new( + [:client, :max_attempts, :delay, :before_attempt, :before_wait] + ) + waiter_opts = {} + waiter_params = {} + options.each_pair do |key, value| + if opts.include?(key) + waiter_opts[key] = value + else + waiter_params[key] = value + end + end + waiter_opts[:client] ||= @client + [waiter_opts, waiter_params] + end + + class Collection < Aws::Resources::Collection + + # @!group Batch Actions + + # @example Request syntax with placeholder values + # + # object.batch_delete!({ + # mfa: "MFA", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # @param options ({}) + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Specifies whether you want to delete this object even if it has a + # Governance-type Object Lock in place. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [void] + def batch_delete!(options = {}) + batch_enum.each do |batch| + params = Aws::Util.copy_hash(options) + params[:bucket] = batch[0].bucket_name + params[:delete] ||= {} + params[:delete][:objects] ||= [] + batch.each do |item| + params[:delete][:objects] << { + key: item.key + } + end + batch[0].client.delete_objects(params) + end + nil + end + + # @!endgroup + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_acl.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_acl.rb new file mode 100644 index 0000000..438bada --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_acl.rb @@ -0,0 +1,368 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class ObjectAcl + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, object_key, options = {}) + # @param [String] bucket_name + # @param [String] object_key + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [required, String] :object_key + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @object_key = extract_object_key(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # @return [String] + def object_key + @object_key + end + + # Container for the bucket owner's display name and ID. + # @return [Types::Owner] + def owner + data[:owner] + end + + # A list of grants. + # @return [Array] + def grants + data[:grants] + end + + # If present, indicates that the requester was successfully charged for + # the request. + # @return [String] + def request_charged + data[:request_charged] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # Loads, or reloads {#data} for the current {ObjectAcl}. + # Returns `self` making it possible to chain methods. + # + # object_acl.reload.data + # + # @return [self] + def load + resp = @client.get_object_acl( + bucket: @bucket_name, + key: @object_key + ) + @data = resp.data + self + end + alias :reload :load + + # @return [Types::GetObjectAclOutput] + # Returns the data for this {ObjectAcl}. Calls + # {Client#get_object_acl} if {#data_loaded?} is `false`. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # object_acl.put({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # access_control_policy: { + # grants: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # owner: { + # display_name: "DisplayName", + # id: "ID", + # }, + # }, + # content_md5: "ContentMD5", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # request_payer: "requester", # accepts requester + # version_id: "ObjectVersionId", + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # @option options [Types::AccessControlPolicy] :access_control_policy + # Contains the elements that set the ACL permissions for an object per + # grantee. + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must be + # used as a message integrity check to verify that the request body was + # not corrupted in transit. For more information, go to [RFC + # 1864.>][1] + # + # For requests made using the Amazon Web Services Command Line Interface + # (CLI) or Amazon Web Services SDKs, this field is calculated + # automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to list the objects in the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::PutObjectAclOutput] + def put(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key + ) + resp = @client.put_object_acl(options) + resp.data + end + + # @!group Associations + + # @return [Object] + def object + Object.new( + bucket_name: @bucket_name, + key: @object_key, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { + bucket_name: @bucket_name, + object_key: @object_key + } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_object_key(args, options) + value = args[1] || options.delete(:object_key) + case value + when String then value + when nil then raise ArgumentError, "missing required option :object_key" + else + msg = "expected :object_key to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection; end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_copier.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_copier.rb new file mode 100644 index 0000000..ba3ddb3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_copier.rb @@ -0,0 +1,101 @@ +# frozen_string_literal: true + +require 'thread' + +module Aws + module S3 + # @api private + class ObjectCopier + + # @param [S3::Object] object + def initialize(object, options = {}) + @object = object + @options = options.merge(client: @object.client) + end + + def copy_from(source, options = {}) + copy_object(source, @object, merge_options(source, options)) + end + + def copy_to(target, options = {}) + copy_object(@object, target, merge_options(target, options)) + end + + private + + def copy_object(source, target, options) + target_bucket, target_key = copy_target(target) + options[:bucket] = target_bucket + options[:key] = target_key + options[:copy_source] = copy_source(source) + if options.delete(:multipart_copy) + apply_source_client(source, options) + ObjectMultipartCopier.new(@options).copy(options) + else + @object.client.copy_object(options) + end + end + + def copy_source(source) + case source + when String then source + when Hash + src = "#{source[:bucket]}/#{escape(source[:key])}" + src += "?versionId=#{source[:version_id]}" if source.key?(:version_id) + src + when S3::Object, S3::ObjectSummary + "#{source.bucket_name}/#{escape(source.key)}" + when S3::ObjectVersion + "#{source.bucket_name}/#{escape(source.object_key)}?versionId=#{source.id}" + else + msg = "expected source to be an Aws::S3::Object, Hash, or String" + raise ArgumentError, msg + end + end + + def copy_target(target) + case target + when String then target.match(/([^\/]+?)\/(.+)/)[1,2] + when Hash then target.values_at(:bucket, :key) + when S3::Object then [target.bucket_name, target.key] + else + msg = "expected target to be an Aws::S3::Object, Hash, or String" + raise ArgumentError, msg + end + end + + def merge_options(source_or_target, options) + if Hash === source_or_target + source_or_target.inject(options.dup) do |opts, (key, value)| + opts[key] = value unless [:bucket, :key, :version_id].include?(key) + opts + end + else + options.dup + end + end + + def apply_source_client(source, options) + + if source.respond_to?(:client) + options[:copy_source_client] ||= source.client + end + + if options[:copy_source_region] + config = @object.client.config + config = config.each_pair.inject({}) { |h, (k,v)| h[k] = v; h } + config[:region] = options.delete(:copy_source_region) + options[:copy_source_client] ||= S3::Client.new(config) + end + + options[:copy_source_client] ||= @object.client + + end + + def escape(str) + Seahorse::Util.uri_path_escape(str) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_multipart_copier.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_multipart_copier.rb new file mode 100644 index 0000000..864748d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_multipart_copier.rb @@ -0,0 +1,188 @@ +# frozen_string_literal: true + +require 'thread' +require 'cgi' + +module Aws + module S3 + # @api private + class ObjectMultipartCopier + + FIVE_MB = 5 * 1024 * 1024 # 5MB + + FILE_TOO_SMALL = "unable to multipart copy files smaller than 5MB" + + MAX_PARTS = 10_000 + + # @option options [Client] :client + # @option [Integer] :min_part_size (52428800) Size of copied parts. + # Defaults to 50MB. + # will be constructed from the given `options' hash. + # @option [Integer] :thread_count (10) Number of concurrent threads to + # use for copying parts. + def initialize(options = {}) + @thread_count = options.delete(:thread_count) || 10 + @min_part_size = options.delete(:min_part_size) || (FIVE_MB * 10) + @client = options[:client] || Client.new + if options[:checksum_algorithm] + raise ArgumentError, 'Multipart Copy does not support setting :checksum_algorithm' + end + end + + # @return [Client] + attr_reader :client + + # @option (see S3::Client#copy_object) + def copy(options = {}) + metadata = source_metadata(options) + size = metadata[:content_length] + options[:upload_id] = initiate_upload(metadata.merge(options)) + begin + parts = copy_parts(size, default_part_size(size), options) + complete_upload(parts, options) + rescue => error + abort_upload(options) + raise error + end + end + + private + + def initiate_upload(options) + options = options_for(:create_multipart_upload, options) + @client.create_multipart_upload(options).upload_id + end + + def copy_parts(size, default_part_size, options) + queue = PartQueue.new(compute_parts(size, default_part_size, options)) + threads = [] + @thread_count.times do + threads << copy_part_thread(queue) + end + threads.map(&:value).flatten.sort_by{ |part| part[:part_number] } + end + + def copy_part_thread(queue) + Thread.new do + begin + completed = [] + while part = queue.shift + completed << copy_part(part) + end + completed + rescue => error + queue.clear! + raise error + end + end + end + + def copy_part(part) + { + etag: @client.upload_part_copy(part).copy_part_result.etag, + part_number: part[:part_number], + } + end + + def complete_upload(parts, options) + options = options_for(:complete_multipart_upload, options) + options[:multipart_upload] = { parts: parts } + @client.complete_multipart_upload(options) + end + + def abort_upload(options) + @client.abort_multipart_upload({ + bucket: options[:bucket], + key: options[:key], + upload_id: options[:upload_id], + }) + end + + def compute_parts(size, default_part_size, options) + part_number = 1 + offset = 0 + parts = [] + options = options_for(:upload_part_copy, options) + while offset < size + parts << options.merge({ + part_number: part_number, + copy_source_range: byte_range(offset, default_part_size, size), + }) + part_number += 1 + offset += default_part_size + end + parts + end + + def byte_range(offset, default_part_size, size) + if offset + default_part_size < size + "bytes=#{offset}-#{offset + default_part_size - 1}" + else + "bytes=#{offset}-#{size - 1}" + end + end + + def source_metadata(options) + if options[:content_length] + return { content_length: options.delete(:content_length) } + end + + client = options[:copy_source_client] || @client + + if vid_match = options[:copy_source].match(/([^\/]+?)\/(.+)\?versionId=(.+)/) + bucket, key, version_id = vid_match[1,3] + else + bucket, key = options[:copy_source].match(/([^\/]+?)\/(.+)/)[1,2] + end + + key = CGI.unescape(key) + opts = { bucket: bucket, key: key } + opts[:version_id] = version_id if version_id + client.head_object(opts).to_h + end + + def default_part_size(source_size) + if source_size < FIVE_MB + raise ArgumentError, FILE_TOO_SMALL + else + [(source_size.to_f / MAX_PARTS).ceil, @min_part_size].max.to_i + end + end + + def options_for(operation_name, options) + API_OPTIONS[operation_name].inject({}) do |hash, opt_name| + hash[opt_name] = options[opt_name] if options.key?(opt_name) + hash + end + end + + # @api private + def self.options_for(shape_name) + Client.api.metadata['shapes'][shape_name].member_names + end + + API_OPTIONS = { + create_multipart_upload: Types::CreateMultipartUploadRequest.members, + upload_part_copy: Types::UploadPartCopyRequest.members, + complete_multipart_upload: Types::CompleteMultipartUploadRequest.members, + } + + class PartQueue + + def initialize(parts = []) + @parts = parts + @mutex = Mutex.new + end + + def shift + @mutex.synchronize { @parts.shift } + end + + def clear! + @mutex.synchronize { @parts.clear } + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_summary.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_summary.rb new file mode 100644 index 0000000..7aeb193 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_summary.rb @@ -0,0 +1,1490 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class ObjectSummary + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, key, options = {}) + # @param [String] bucket_name + # @param [String] key + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [required, String] :key + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @key = extract_key(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # @return [String] + def key + @key + end + + # Creation date of the object. + # @return [Time] + def last_modified + data[:last_modified] + end + + # The entity tag is a hash of the object. The ETag reflects changes only + # to the contents of an object, not its metadata. The ETag may or may + # not be an MD5 digest of the object data. Whether or not it is depends + # on how the object was created and how it is encrypted as described + # below: + # + # * Objects created by the PUT Object, POST Object, or Copy operation, + # or through the Amazon Web Services Management Console, and are + # encrypted by SSE-S3 or plaintext, have ETags that are an MD5 digest + # of their object data. + # + # * Objects created by the PUT Object, POST Object, or Copy operation, + # or through the Amazon Web Services Management Console, and are + # encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest + # of their object data. + # + # * If an object is created by either the Multipart Upload or Part Copy + # operation, the ETag is not an MD5 digest, regardless of the method + # of encryption. If an object is larger than 16 MB, the Amazon Web + # Services Management Console will upload or copy that object as a + # Multipart Upload, and therefore the ETag will not be an MD5 digest. + # @return [String] + def etag + data[:etag] + end + + # The algorithm that was used to create a checksum of the object. + # @return [Array] + def checksum_algorithm + data[:checksum_algorithm] + end + + # Size in bytes of the object + # @return [Integer] + def size + data[:size] + end + + # The class of storage used to store the object. + # @return [String] + def storage_class + data[:storage_class] + end + + # The owner of the object + # @return [Types::Owner] + def owner + data[:owner] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # @raise [NotImplementedError] + # @api private + def load + msg = "#load is not implemented, data only available via enumeration" + raise NotImplementedError, msg + end + alias :reload :load + + # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. + # @return [Types::Object] + # Returns the data for this {ObjectSummary}. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @param [Hash] options ({}) + # @return [Boolean] + # Returns `true` if the ObjectSummary exists. + def exists?(options = {}) + begin + wait_until_exists(options.merge(max_attempts: 1)) + true + rescue Aws::Waiters::Errors::UnexpectedError => e + raise e.error + rescue Aws::Waiters::Errors::WaiterFailed + false + end + end + + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts (20) + # @option options [Float] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + # @return [ObjectSummary] + def wait_until_exists(options = {}, &block) + options, params = separate_params_and_options(options) + waiter = Waiters::ObjectExists.new(options) + yield_waiter_and_warn(waiter, &block) if block_given? + waiter.wait(params.merge(bucket: @bucket_name, + key: @key)) + ObjectSummary.new({ + bucket_name: @bucket_name, + key: @key, + client: @client + }) + end + + # @param [Hash] options ({}) + # @option options [Integer] :max_attempts (20) + # @option options [Float] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + # @return [ObjectSummary] + def wait_until_not_exists(options = {}, &block) + options, params = separate_params_and_options(options) + waiter = Waiters::ObjectNotExists.new(options) + yield_waiter_and_warn(waiter, &block) if block_given? + waiter.wait(params.merge(bucket: @bucket_name, + key: @key)) + ObjectSummary.new({ + bucket_name: @bucket_name, + key: @key, + client: @client + }) + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # object_summary.copy_from({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # cache_control: "CacheControl", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_type: "ContentType", + # copy_source: "CopySource", # required + # copy_source_if_match: "CopySourceIfMatch", + # copy_source_if_modified_since: Time.now, + # copy_source_if_none_match: "CopySourceIfNoneMatch", + # copy_source_if_unmodified_since: Time.now, + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # metadata_directive: "COPY", # accepts COPY, REPLACE + # tagging_directive: "COPY", # accepts COPY, REPLACE + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", + # copy_source_sse_customer_key: "CopySourceSSECustomerKey", + # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # expected_source_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # @option options [String] :checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :content_disposition + # Specifies presentational information for the object. + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # @option options [String] :content_language + # The language the content is in. + # @option options [String] :content_type + # A standard MIME type describing the format of the object data. + # @option options [required, String] :copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and the key of the source object, separated by + # a slash (/). For example, to copy the object `reports/january.pdf` + # from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through access + # point `my-access-point` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when the + # source and destination buckets are in the same Amazon Web Services + # Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # @option options [String] :copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified tag. + # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # @option options [String] :copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :metadata_directive + # Specifies whether the metadata is copied from the source object or + # replaced with metadata provided in the request. + # @option options [String] :tagging_directive + # Specifies whether the object tag-set are copied from the source object + # or replaced with tag-set provided in the request. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # Specifies the Amazon Web Services KMS key ID to use for object + # encryption. All GET and PUT requests for an object protected by Amazon + # Web Services KMS will fail if not made via SSL or using SigV4. For + # information about configuring using any of the officially supported + # Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying + # the Signature Version in Request Authentication][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a COPY action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # @option options [String] :copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object (for + # example, AES256). + # @option options [String] :copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use to + # decrypt the source object. The encryption key provided in this header + # must be one that was used when the source object was created. + # @option options [String] :copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object destination object this value must be used + # in conjunction with the `TaggingDirective`. The tag-set must be + # encoded as URL Query parameters. + # @option options [String] :object_lock_mode + # The Object Lock mode that you want to apply to the copied object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want the copied object's Object Lock to + # expire. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the copied object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request fails + # with the HTTP status code `403 Forbidden` (access denied). + # @option options [String] :expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # @return [Types::CopyObjectOutput] + def copy_from(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.copy_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object_summary.delete({ + # mfa: "MFA", + # version_id: "ObjectVersionId", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Indicates whether S3 Object Lock should bypass Governance-mode + # restrictions to process this operation. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::DeleteObjectOutput] + def delete(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.delete_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object_summary.get({ + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # range: "Range", + # response_cache_control: "ResponseCacheControl", + # response_content_disposition: "ResponseContentDisposition", + # response_content_encoding: "ResponseContentEncoding", + # response_content_language: "ResponseContentLanguage", + # response_content_type: "ResponseContentType", + # response_expires: Time.now, + # version_id: "ObjectVersionId", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # @param [Hash] options ({}) + # @option options [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # @option options [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @option options [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # @option options [String] :range + # Downloads the specified range bytes of an object. For more information + # about the HTTP Range header, see + # [https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1]. + # + # Amazon S3 doesn't support retrieving multiple ranges of data per + # `GET` request. + # + # + # + # + # + # [1]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + # @option options [String] :response_cache_control + # Sets the `Cache-Control` header of the response. + # @option options [String] :response_content_disposition + # Sets the `Content-Disposition` header of the response + # @option options [String] :response_content_encoding + # Sets the `Content-Encoding` header of the response. + # @option options [String] :response_content_language + # Sets the `Content-Language` header of the response. + # @option options [String] :response_content_type + # Sets the `Content-Type` header of the response. + # @option options [Time,DateTime,Date,Integer,String] :response_expires + # Sets the `Expires` header of the response. + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when decrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 used to + # encrypt the data. This value is used to decrypt the object when + # recovering it and must match the one used when storing the data. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' GET request + # for the part specified. Useful for downloading just a part of an + # object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_mode + # To retrieve the checksum, this mode must be enabled. + # @return [Types::GetObjectOutput] + def get(options = {}, &block) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.get_object(options, &block) + resp.data + end + + # @example Request syntax with placeholder values + # + # multipartupload = object_summary.initiate_multipart_upload({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_type: "ContentType", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :cache_control + # Specifies caching behavior along the request/reply chain. + # @option options [String] :content_disposition + # Specifies presentational information for the object. + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. + # @option options [String] :content_language + # The language the content is in. + # @option options [String] :content_type + # A standard MIME type describing the format of the object data. + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # Specifies the ID of the symmetric customer managed key to use for + # object encryption. All GET and PUT requests for an object protected by + # Amazon Web Services KMS will fail if not made via SSL or using SigV4. + # For information about configuring using any of the officially + # supported Amazon Web Services SDKs and Amazon Web Services CLI, see + # [Specifying the Signature Version in Request Authentication][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with an object action doesn’t affect + # bucket-level settings for S3 Bucket Key. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. + # @option options [String] :object_lock_mode + # Specifies the Object Lock mode that you want to apply to the uploaded + # object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # Specifies the date and time when you want the Object Lock to expire. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the uploaded + # object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [MultipartUpload] + def initiate_multipart_upload(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.create_multipart_upload(options) + MultipartUpload.new( + bucket_name: @bucket_name, + object_key: @key, + id: resp.data.upload_id, + client: @client + ) + end + + # @example Request syntax with placeholder values + # + # object_summary.put({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # body: source_file, + # cache_control: "CacheControl", + # content_disposition: "ContentDisposition", + # content_encoding: "ContentEncoding", + # content_language: "ContentLanguage", + # content_length: 1, + # content_md5: "ContentMD5", + # content_type: "ContentType", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # checksum_crc32: "ChecksumCRC32", + # checksum_crc32c: "ChecksumCRC32C", + # checksum_sha1: "ChecksumSHA1", + # checksum_sha256: "ChecksumSHA256", + # expires: Time.now, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write_acp: "GrantWriteACP", + # metadata: { + # "MetadataKey" => "MetadataValue", + # }, + # server_side_encryption: "AES256", # accepts AES256, aws:kms + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # website_redirect_location: "WebsiteRedirectLocation", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # ssekms_key_id: "SSEKMSKeyId", + # ssekms_encryption_context: "SSEKMSEncryptionContext", + # bucket_key_enabled: false, + # request_payer: "requester", # accepts requester + # tagging: "TaggingHeader", + # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE + # object_lock_retain_until_date: Time.now, + # object_lock_legal_hold_status: "ON", # accepts ON, OFF + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # @option options [String, StringIO, File] :body + # Object data. + # @option options [String] :cache_control + # Can be used to specify caching behavior along the request/reply chain. + # For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + # @option options [String] :content_disposition + # Specifies presentational information for the object. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + # @option options [String] :content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the media-type + # referenced by the Content-Type header field. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + # @option options [String] :content_language + # The language the content is in. + # @option options [Integer] :content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + # @option options [String] :content_md5 + # The base64-encoded 128-bit MD5 digest of the message (without the + # headers) according to RFC 1864. This header can be used as a message + # integrity check to verify that the data is the same data that was + # originally sent. Although it is optional, we recommend using the + # Content-MD5 mechanism as an end-to-end integrity check. For more + # information about REST request authentication, see [REST + # Authentication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # @option options [String] :content_type + # A standard MIME type describing the format of the contents. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For + # more information, see [Checking object integrity][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [Time,DateTime,Date,Integer,String] :expires + # The date and time at which the object is no longer cacheable. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # @option options [String] :grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @option options [Hash] :metadata + # A map of metadata to store with the object in S3. + # @option options [String] :server_side_encryption + # The server-side encryption algorithm used when storing this object in + # Amazon S3 (for example, AES256, aws:kms). + # @option options [String] :storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can specify + # a different Storage Class. Amazon S3 on Outposts only uses the + # OUTPOSTS Storage Class. For more information, see [Storage Classes][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @option options [String] :website_redirect_location + # If the bucket is configured as a website, redirects requests for this + # object to another object in the same bucket or to an external URL. + # Amazon S3 stores the value of this header in the object metadata. For + # information about object metadata, see [Object Key and Metadata][1]. + # + # In the following example, the request header sets the redirect to an + # object (anotherPage.html) in the same bucket: + # + # `x-amz-website-redirect-location: /anotherPage.html` + # + # In the following example, the request header sets the object redirect + # to another website: + # + # `x-amz-website-redirect-location: http://www.example.com/` + # + # For more information about website hosting in Amazon S3, see [Hosting + # Websites on Amazon S3][2] and [How to Configure Website Page + # Redirects][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :ssekms_key_id + # If `x-amz-server-side-encryption` is present and has the value of + # `aws:kms`, this header specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetrical customer + # managed key that was used for the object. If you specify + # `x-amz-server-side-encryption:aws:kms`, but do not provide` + # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the + # Amazon Web Services managed key to protect the data. If the KMS key + # does not exist in the same account issuing the command, you must use + # the full ARN and not just the ID. + # @option options [String] :ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded UTF-8 + # string holding JSON with the encryption context key-value pairs. + # @option options [Boolean] :bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key + # for object encryption with SSE-KMS. + # + # Specifying this header with a PUT action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. (For example, "Key1=Value1") + # @option options [String] :object_lock_mode + # The Object Lock mode that you want to apply to this object. + # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date + # The date and time when you want this object's Object Lock to expire. + # Must be formatted as a timestamp parameter. + # @option options [String] :object_lock_legal_hold_status + # Specifies whether a legal hold will be applied to this object. For + # more information about S3 Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::PutObjectOutput] + def put(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.put_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object_summary.restore_object({ + # version_id: "ObjectVersionId", + # restore_request: { + # days: 1, + # glacier_job_parameters: { + # tier: "Standard", # required, accepts Standard, Bulk, Expedited + # }, + # type: "SELECT", # accepts SELECT + # tier: "Standard", # accepts Standard, Bulk, Expedited + # description: "Description", + # select_parameters: { + # input_serialization: { # required + # csv: { + # file_header_info: "USE", # accepts USE, IGNORE, NONE + # comments: "Comments", + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # allow_quoted_record_delimiter: false, + # }, + # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 + # json: { + # type: "DOCUMENT", # accepts DOCUMENT, LINES + # }, + # parquet: { + # }, + # }, + # expression_type: "SQL", # required, accepts SQL + # expression: "Expression", # required + # output_serialization: { # required + # csv: { + # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED + # quote_escape_character: "QuoteEscapeCharacter", + # record_delimiter: "RecordDelimiter", + # field_delimiter: "FieldDelimiter", + # quote_character: "QuoteCharacter", + # }, + # json: { + # record_delimiter: "RecordDelimiter", + # }, + # }, + # }, + # output_location: { + # s3: { + # bucket_name: "BucketName", # required + # prefix: "LocationPrefix", # required + # encryption: { + # encryption_type: "AES256", # required, accepts AES256, aws:kms + # kms_key_id: "SSEKMSKeyId", + # kms_context: "KMSContext", + # }, + # canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control + # access_control_list: [ + # { + # grantee: { + # display_name: "DisplayName", + # email_address: "EmailAddress", + # id: "ID", + # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group + # uri: "URI", + # }, + # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP + # }, + # ], + # tagging: { + # tag_set: [ # required + # { + # key: "ObjectKey", # required + # value: "Value", # required + # }, + # ], + # }, + # user_metadata: [ + # { + # name: "MetadataKey", + # value: "MetadataValue", + # }, + # ], + # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW + # }, + # }, + # }, + # request_payer: "requester", # accepts requester + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :version_id + # VersionId used to reference a specific version of the object. + # @option options [Types::RestoreRequest] :restore_request + # Container for restore job parameters. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::RestoreObjectOutput] + def restore_object(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @key + ) + resp = @client.restore_object(options) + resp.data + end + + # @!group Associations + + # @return [ObjectAcl] + def acl + ObjectAcl.new( + bucket_name: @bucket_name, + object_key: @key, + client: @client + ) + end + + # @return [Bucket] + def bucket + Bucket.new( + name: @bucket_name, + client: @client + ) + end + + # @param [String] id + # @return [MultipartUpload] + def multipart_upload(id) + MultipartUpload.new( + bucket_name: @bucket_name, + object_key: @key, + id: id, + client: @client + ) + end + + # @return [Object] + def object + Object.new( + bucket_name: @bucket_name, + key: @key, + client: @client + ) + end + + # @param [String] id + # @return [ObjectVersion] + def version(id) + ObjectVersion.new( + bucket_name: @bucket_name, + object_key: @key, + id: id, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { + bucket_name: @bucket_name, + key: @key + } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_key(args, options) + value = args[1] || options.delete(:key) + case value + when String then value + when nil then raise ArgumentError, "missing required option :key" + else + msg = "expected :key to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def yield_waiter_and_warn(waiter, &block) + if !@waiter_block_warned + msg = "pass options to configure the waiter; "\ + "yielding the waiter is deprecated" + warn(msg) + @waiter_block_warned = true + end + yield(waiter.waiter) + end + + def separate_params_and_options(options) + opts = Set.new( + [:client, :max_attempts, :delay, :before_attempt, :before_wait] + ) + waiter_opts = {} + waiter_params = {} + options.each_pair do |key, value| + if opts.include?(key) + waiter_opts[key] = value + else + waiter_params[key] = value + end + end + waiter_opts[:client] ||= @client + [waiter_opts, waiter_params] + end + + class Collection < Aws::Resources::Collection + + # @!group Batch Actions + + # @example Request syntax with placeholder values + # + # object_summary.batch_delete!({ + # mfa: "MFA", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # @param options ({}) + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Specifies whether you want to delete this object even if it has a + # Governance-type Object Lock in place. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [void] + def batch_delete!(options = {}) + batch_enum.each do |batch| + params = Aws::Util.copy_hash(options) + params[:bucket] = batch[0].bucket_name + params[:delete] ||= {} + params[:delete][:objects] ||= [] + batch.each do |item| + params[:delete][:objects] << { + key: item.key + } + end + batch[0].client.delete_objects(params) + end + nil + end + + # @!endgroup + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_version.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_version.rb new file mode 100644 index 0000000..07cf09a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/object_version.rb @@ -0,0 +1,601 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + class ObjectVersion + + extend Aws::Deprecations + + # @overload def initialize(bucket_name, object_key, id, options = {}) + # @param [String] bucket_name + # @param [String] object_key + # @param [String] id + # @option options [Client] :client + # @overload def initialize(options = {}) + # @option options [required, String] :bucket_name + # @option options [required, String] :object_key + # @option options [required, String] :id + # @option options [Client] :client + def initialize(*args) + options = Hash === args.last ? args.pop.dup : {} + @bucket_name = extract_bucket_name(args, options) + @object_key = extract_object_key(args, options) + @id = extract_id(args, options) + @data = options.delete(:data) + @client = options.delete(:client) || Client.new(options) + @waiter_block_warned = false + end + + # @!group Read-Only Attributes + + # @return [String] + def bucket_name + @bucket_name + end + + # @return [String] + def object_key + @object_key + end + + # @return [String] + def id + @id + end + + # The entity tag is an MD5 hash of that version of the object. + # @return [String] + def etag + data[:etag] + end + + # The algorithm that was used to create a checksum of the object. + # @return [Array] + def checksum_algorithm + data[:checksum_algorithm] + end + + # Size in bytes of the object. + # @return [Integer] + def size + data[:size] + end + + # The class of storage used to store the object. + # @return [String] + def storage_class + data[:storage_class] + end + + # The object key. + # @return [String] + def key + data[:key] + end + + # Version ID of an object. + # @return [String] + def version_id + data[:version_id] + end + + # Specifies whether the object is (true) or is not (false) the latest + # version of an object. + # @return [Boolean] + def is_latest + data[:is_latest] + end + + # Date and time the object was last modified. + # @return [Time] + def last_modified + data[:last_modified] + end + + # Specifies the owner of the object. + # @return [Types::Owner] + def owner + data[:owner] + end + + # @!endgroup + + # @return [Client] + def client + @client + end + + # @raise [NotImplementedError] + # @api private + def load + msg = "#load is not implemented, data only available via enumeration" + raise NotImplementedError, msg + end + alias :reload :load + + # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. + # @return [Types::ObjectVersion] + # Returns the data for this {ObjectVersion}. + def data + load unless @data + @data + end + + # @return [Boolean] + # Returns `true` if this resource is loaded. Accessing attributes or + # {#data} on an unloaded resource will trigger a call to {#load}. + def data_loaded? + !!@data + end + + # @deprecated Use [Aws::S3::Client] #wait_until instead + # + # Waiter polls an API operation until a resource enters a desired + # state. + # + # @note The waiting operation is performed on a copy. The original resource + # remains unchanged. + # + # ## Basic Usage + # + # Waiter will polls until it is successful, it fails by + # entering a terminal state, or until a maximum number of attempts + # are made. + # + # # polls in a loop until condition is true + # resource.wait_until(options) {|resource| condition} + # + # ## Example + # + # instance.wait_until(max_attempts:10, delay:5) do |instance| + # instance.state.name == 'running' + # end + # + # ## Configuration + # + # You can configure the maximum number of polling attempts, and the + # delay (in seconds) between each polling attempt. The waiting condition is + # set by passing a block to {#wait_until}: + # + # # poll for ~25 seconds + # resource.wait_until(max_attempts:5,delay:5) {|resource|...} + # + # ## Callbacks + # + # You can be notified before each polling attempt and before each + # delay. If you throw `:success` or `:failure` from these callbacks, + # it will terminate the waiter. + # + # started_at = Time.now + # # poll for 1 hour, instead of a number of attempts + # proc = Proc.new do |attempts, response| + # throw :failure if Time.now - started_at > 3600 + # end + # + # # disable max attempts + # instance.wait_until(before_wait:proc, max_attempts:nil) {...} + # + # ## Handling Errors + # + # When a waiter is successful, it returns the Resource. When a waiter + # fails, it raises an error. + # + # begin + # resource.wait_until(...) + # rescue Aws::Waiters::Errors::WaiterFailed + # # resource did not enter the desired state in time + # end + # + # @yieldparam [Resource] resource to be used in the waiting condition. + # + # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter + # terminates because the waiter has entered a state that it will not + # transition out of, preventing success. + # + # yet successful. + # + # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is + # encountered while polling for a resource that is not expected. + # + # @raise [NotImplementedError] Raised when the resource does not + # + # @option options [Integer] :max_attempts (10) Maximum number of + # attempts + # @option options [Integer] :delay (10) Delay between each + # attempt in seconds + # @option options [Proc] :before_attempt (nil) Callback + # invoked before each attempt + # @option options [Proc] :before_wait (nil) Callback + # invoked before each wait + # @return [Resource] if the waiter was successful + def wait_until(options = {}, &block) + self_copy = self.dup + attempts = 0 + options[:max_attempts] = 10 unless options.key?(:max_attempts) + options[:delay] ||= 10 + options[:poller] = Proc.new do + attempts += 1 + if block.call(self_copy) + [:success, self_copy] + else + self_copy.reload unless attempts == options[:max_attempts] + :retry + end + end + Aws::Waiters::Waiter.new(options).wait({}) + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # object_version.delete({ + # mfa: "MFA", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # }) + # @param [Hash] options ({}) + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Indicates whether S3 Object Lock should bypass Governance-mode + # restrictions to process this operation. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @return [Types::DeleteObjectOutput] + def delete(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + version_id: @id + ) + resp = @client.delete_object(options) + resp.data + end + + # @example Request syntax with placeholder values + # + # object_version.get({ + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # range: "Range", + # response_cache_control: "ResponseCacheControl", + # response_content_disposition: "ResponseContentDisposition", + # response_content_encoding: "ResponseContentEncoding", + # response_content_language: "ResponseContentLanguage", + # response_content_type: "ResponseContentType", + # response_expires: Time.now, + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # @param [Hash] options ({}) + # @option options [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # @option options [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @option options [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # @option options [String] :range + # Downloads the specified range bytes of an object. For more information + # about the HTTP Range header, see + # [https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1]. + # + # Amazon S3 doesn't support retrieving multiple ranges of data per + # `GET` request. + # + # + # + # + # + # [1]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + # @option options [String] :response_cache_control + # Sets the `Cache-Control` header of the response. + # @option options [String] :response_content_disposition + # Sets the `Content-Disposition` header of the response + # @option options [String] :response_content_encoding + # Sets the `Content-Encoding` header of the response. + # @option options [String] :response_content_language + # Sets the `Content-Language` header of the response. + # @option options [String] :response_content_type + # Sets the `Content-Type` header of the response. + # @option options [Time,DateTime,Date,Integer,String] :response_expires + # Sets the `Expires` header of the response. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when decrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 used to + # encrypt the data. This value is used to decrypt the object when + # recovering it and must match the one used when storing the data. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' GET request + # for the part specified. Useful for downloading just a part of an + # object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_mode + # To retrieve the checksum, this mode must be enabled. + # @return [Types::GetObjectOutput] + def get(options = {}, &block) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + version_id: @id + ) + resp = @client.get_object(options, &block) + resp.data + end + + # @example Request syntax with placeholder values + # + # object_version.head({ + # if_match: "IfMatch", + # if_modified_since: Time.now, + # if_none_match: "IfNoneMatch", + # if_unmodified_since: Time.now, + # range: "Range", + # sse_customer_algorithm: "SSECustomerAlgorithm", + # sse_customer_key: "SSECustomerKey", + # sse_customer_key_md5: "SSECustomerKeyMD5", + # request_payer: "requester", # accepts requester + # part_number: 1, + # expected_bucket_owner: "AccountId", + # checksum_mode: "ENABLED", # accepts ENABLED + # }) + # @param [Hash] options ({}) + # @option options [String] :if_match + # Return the object only if its entity tag (ETag) is the same as the one + # specified; otherwise, return a 412 (precondition failed) error. + # @option options [Time,DateTime,Date,Integer,String] :if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @option options [String] :if_none_match + # Return the object only if its entity tag (ETag) is different from the + # one specified; otherwise, return a 304 (not modified) error. + # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since + # Return the object only if it has not been modified since the specified + # time; otherwise, return a 412 (precondition failed) error. + # @option options [String] :range + # Because `HeadObject` returns only the metadata for an object, this + # parameter has no effect. + # @option options [String] :sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @option options [String] :sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use in + # encrypting data. This value is used to store the object and then it is + # discarded; Amazon S3 does not store the encryption key. The key must + # be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @option options [String] :sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check to + # ensure that the encryption key was transmitted without error. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Integer] :part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' HEAD request + # for the part specified. Useful querying about the size of the part and + # the number of parts in this object. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_mode + # To retrieve the checksum, this parameter must be enabled. + # + # In addition, if you enable `ChecksumMode` and the object is encrypted + # with Amazon Web Services Key Management Service (Amazon Web Services + # KMS), you must have permission to use the `kms:Decrypt` action for the + # request to succeed. + # @return [Types::HeadObjectOutput] + def head(options = {}) + options = options.merge( + bucket: @bucket_name, + key: @object_key, + version_id: @id + ) + resp = @client.head_object(options) + resp.data + end + + # @!group Associations + + # @return [Object] + def object + Object.new( + bucket_name: @bucket_name, + key: @object_key, + client: @client + ) + end + + # @deprecated + # @api private + def identifiers + { + bucket_name: @bucket_name, + object_key: @object_key, + id: @id + } + end + deprecated(:identifiers) + + private + + def extract_bucket_name(args, options) + value = args[0] || options.delete(:bucket_name) + case value + when String then value + when nil then raise ArgumentError, "missing required option :bucket_name" + else + msg = "expected :bucket_name to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_object_key(args, options) + value = args[1] || options.delete(:object_key) + case value + when String then value + when nil then raise ArgumentError, "missing required option :object_key" + else + msg = "expected :object_key to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + def extract_id(args, options) + value = args[2] || options.delete(:id) + case value + when String then value + when nil then raise ArgumentError, "missing required option :id" + else + msg = "expected :id to be a String, got #{value.class}" + raise ArgumentError, msg + end + end + + class Collection < Aws::Resources::Collection + + # @!group Batch Actions + + # @example Request syntax with placeholder values + # + # object_version.batch_delete!({ + # mfa: "MFA", + # request_payer: "requester", # accepts requester + # bypass_governance_retention: false, + # expected_bucket_owner: "AccountId", + # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 + # }) + # @param options ({}) + # @option options [String] :mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication device. + # Required to permanently delete a versioned object if versioning is + # configured with MFA delete enabled. + # @option options [String] :request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @option options [Boolean] :bypass_governance_retention + # Specifies whether you want to delete this object even if it has a + # Governance-type Object Lock in place. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @option options [String] :expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned by + # a different account, the request fails with the HTTP status code `403 + # Forbidden` (access denied). + # @option options [String] :checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status code + # `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any provided + # `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [void] + def batch_delete!(options = {}) + batch_enum.each do |batch| + params = Aws::Util.copy_hash(options) + params[:bucket] = batch[0].bucket_name + params[:delete] ||= {} + params[:delete][:objects] ||= [] + batch.each do |item| + params[:delete][:objects] << { + key: item.object_key, + version_id: item.id + } + end + batch[0].client.delete_objects(params) + end + nil + end + + # @!endgroup + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/accelerate.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/accelerate.rb new file mode 100644 index 0000000..ff3d83d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/accelerate.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + # Provides support for using `Aws::S3::Client` with Amazon S3 Transfer + # Acceleration. + # + # Go here for more information about transfer acceleration: + # [http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) + class Accelerate < Seahorse::Client::Plugin + option( + :use_accelerate_endpoint, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS) +When set to `true`, accelerated bucket endpoints will be used +for all object operations. You must first enable accelerate for +each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). + DOCS + + def add_handlers(handlers, config) + operations = config.api.operation_names - [ + :create_bucket, :list_buckets, :delete_bucket + ] + handlers.add( + OptionHandler, step: :initialize, operations: operations + ) + end + + # @api private + class OptionHandler < Seahorse::Client::Handler + def call(context) + # Support client configuration and per-operation configuration + # TODO: move this to an options hash and warn here. + if context.params.is_a?(Hash) + accelerate = context.params.delete(:use_accelerate_endpoint) + end + if accelerate.nil? + accelerate = context.config.use_accelerate_endpoint + end + context[:use_accelerate_endpoint] = accelerate + @handler.call(context) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/arn.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/arn.rb new file mode 100644 index 0000000..d74c006 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/arn.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + # When an accesspoint ARN is provided for :bucket in S3 operations, this + # plugin resolves the request endpoint from the ARN when possible. + # @api private + class ARN < Seahorse::Client::Plugin + option( + :s3_use_arn_region, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +For S3 ARNs passed into the `:bucket` parameter, this option will +use the region in the ARN, allowing for cross-region requests to +be made. Set to `false` to use the client's region instead. + DOCS + resolve_s3_use_arn_region(cfg) + end + + option( + :s3_disable_multiregion_access_points, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS) do |cfg| +When set to `false` this will option will raise errors when multi-region +access point ARNs are used. Multi-region access points can potentially +result in cross region requests. + DOCS + resolve_s3_disable_multiregion_access_points(cfg) + end + + class << self + private + + def resolve_s3_use_arn_region(cfg) + value = ENV['AWS_S3_USE_ARN_REGION'] || + Aws.shared_config.s3_use_arn_region(profile: cfg.profile) || + 'true' + value = Aws::Util.str_2_bool(value) + # Raise if provided value is not true or false + if value.nil? + raise ArgumentError, + 'Must provide either `true` or `false` for the '\ + '`s3_use_arn_region` profile option or for '\ + "ENV['AWS_S3_USE_ARN_REGION']." + end + value + end + + def resolve_s3_disable_multiregion_access_points(cfg) + value = ENV['AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS'] || + Aws.shared_config.s3_disable_multiregion_access_points(profile: cfg.profile) || + 'false' + value = Aws::Util.str_2_bool(value) + # Raise if provided value is not true or false + if value.nil? + raise ArgumentError, + 'Must provide either `true` or `false` for '\ + 's3_use_arn_region profile option or for '\ + "ENV['AWS_S3_USE_ARN_REGION']" + end + value + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/bucket_dns.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/bucket_dns.rb new file mode 100644 index 0000000..23de22e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/bucket_dns.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + + # Amazon S3 requires DNS style addressing for buckets outside of + # the classic region when possible. + class BucketDns < Seahorse::Client::Plugin + + # When set to `false` DNS compatible bucket names are moved from + # the request URI path to the host as a subdomain, unless the request + # is using SSL and the bucket name contains a dot. + # + # When set to `true`, the bucket name is always forced to be part + # of the request URI path. This will not work with buckets outside + # the classic region. + option(:force_path_style, + default: false, + doc_type: 'Boolean', + docstring: <<-DOCS) +When set to `true`, the bucket name is always left in the +request URI and never moved to the host as a sub-domain. + DOCS + + # These class methods were originally used in a handler in this plugin. + # SigV2 legacy signer needs this logic so we keep it here as utility. + # New endpoint resolution will check this as a matcher. + class << self + # @param [String] bucket_name + # @param [Boolean] ssl + # @return [Boolean] + def dns_compatible?(bucket_name, ssl) + if valid_subdomain?(bucket_name) + bucket_name.match(/\./) && ssl ? false : true + else + false + end + end + + # @param [String] bucket_name + # @return [Boolean] + def valid_subdomain?(bucket_name) + bucket_name.size < 64 && + bucket_name =~ /^[a-z0-9][a-z0-9.-]+[a-z0-9]$/ && + bucket_name !~ /(\d+\.){3}\d+/ && + bucket_name !~ /[.-]{2}/ + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb new file mode 100644 index 0000000..9c05221 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + # @api private + class BucketNameRestrictions < Seahorse::Client::Plugin + class Handler < Seahorse::Client::Handler + + # Useful because Aws::S3::Errors::SignatureDoesNotMatch is thrown + # when passed a bucket with a forward slash. Instead provide a more + # helpful error. Ideally should not be a plugin? + def call(context) + bucket_member = _bucket_member(context.operation.input.shape) + if bucket_member && (bucket = context.params[bucket_member]) + if !Aws::ARNParser.arn?(bucket) && bucket.include?('/') + raise ArgumentError, + 'bucket name must not contain a forward-slash (/)' + end + end + @handler.call(context) + end + + private + + def _bucket_member(input) + input.members.each do |member, ref| + return member if ref.shape.name == 'BucketName' + end + nil + end + + end + + handler(Handler) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/dualstack.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/dualstack.rb new file mode 100644 index 0000000..5bcb653 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/dualstack.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + # @api private + class Dualstack < Seahorse::Client::Plugin + def add_handlers(handlers, _config) + handlers.add(OptionHandler, step: :initialize) + end + + # @api private + class OptionHandler < Seahorse::Client::Handler + def call(context) + # Support client configuration and per-operation configuration + if context.params.is_a?(Hash) + dualstack = context.params.delete(:use_dualstack_endpoint) + end + dualstack = context.config.use_dualstack_endpoint if dualstack.nil? + context[:use_dualstack_endpoint] = dualstack + @handler.call(context) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/endpoints.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/endpoints.rb new file mode 100644 index 0000000..289b2ad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/endpoints.rb @@ -0,0 +1,262 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + + +module Aws::S3 + module Plugins + class Endpoints < Seahorse::Client::Plugin + option( + :endpoint_provider, + doc_type: 'Aws::S3::EndpointProvider', + docstring: 'The endpoint provider used to resolve endpoints. Any '\ + 'object that responds to `#resolve_endpoint(parameters)` '\ + 'where `parameters` is a Struct similar to '\ + '`Aws::S3::EndpointParameters`' + ) do |cfg| + Aws::S3::EndpointProvider.new + end + + # @api private + class Handler < Seahorse::Client::Handler + def call(context) + # If endpoint was discovered, do not resolve or apply the endpoint. + unless context[:discovered_endpoint] + params = parameters_for_operation(context) + endpoint = context.config.endpoint_provider.resolve_endpoint(params) + + context.http_request.endpoint = endpoint.url + apply_endpoint_headers(context, endpoint.headers) + end + + context[:endpoint_params] = params + context[:auth_scheme] = + Aws::Endpoints.resolve_auth_scheme(context, endpoint) + + @handler.call(context) + end + + private + + def apply_endpoint_headers(context, headers) + headers.each do |key, values| + value = values + .compact + .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } + .join(',') + + context.http_request.headers[key] = value + end + end + + def parameters_for_operation(context) + case context.operation_name + when :abort_multipart_upload + Aws::S3::Endpoints::AbortMultipartUpload.build(context) + when :complete_multipart_upload + Aws::S3::Endpoints::CompleteMultipartUpload.build(context) + when :copy_object + Aws::S3::Endpoints::CopyObject.build(context) + when :create_bucket + Aws::S3::Endpoints::CreateBucket.build(context) + when :create_multipart_upload + Aws::S3::Endpoints::CreateMultipartUpload.build(context) + when :delete_bucket + Aws::S3::Endpoints::DeleteBucket.build(context) + when :delete_bucket_analytics_configuration + Aws::S3::Endpoints::DeleteBucketAnalyticsConfiguration.build(context) + when :delete_bucket_cors + Aws::S3::Endpoints::DeleteBucketCors.build(context) + when :delete_bucket_encryption + Aws::S3::Endpoints::DeleteBucketEncryption.build(context) + when :delete_bucket_intelligent_tiering_configuration + Aws::S3::Endpoints::DeleteBucketIntelligentTieringConfiguration.build(context) + when :delete_bucket_inventory_configuration + Aws::S3::Endpoints::DeleteBucketInventoryConfiguration.build(context) + when :delete_bucket_lifecycle + Aws::S3::Endpoints::DeleteBucketLifecycle.build(context) + when :delete_bucket_metrics_configuration + Aws::S3::Endpoints::DeleteBucketMetricsConfiguration.build(context) + when :delete_bucket_ownership_controls + Aws::S3::Endpoints::DeleteBucketOwnershipControls.build(context) + when :delete_bucket_policy + Aws::S3::Endpoints::DeleteBucketPolicy.build(context) + when :delete_bucket_replication + Aws::S3::Endpoints::DeleteBucketReplication.build(context) + when :delete_bucket_tagging + Aws::S3::Endpoints::DeleteBucketTagging.build(context) + when :delete_bucket_website + Aws::S3::Endpoints::DeleteBucketWebsite.build(context) + when :delete_object + Aws::S3::Endpoints::DeleteObject.build(context) + when :delete_object_tagging + Aws::S3::Endpoints::DeleteObjectTagging.build(context) + when :delete_objects + Aws::S3::Endpoints::DeleteObjects.build(context) + when :delete_public_access_block + Aws::S3::Endpoints::DeletePublicAccessBlock.build(context) + when :get_bucket_accelerate_configuration + Aws::S3::Endpoints::GetBucketAccelerateConfiguration.build(context) + when :get_bucket_acl + Aws::S3::Endpoints::GetBucketAcl.build(context) + when :get_bucket_analytics_configuration + Aws::S3::Endpoints::GetBucketAnalyticsConfiguration.build(context) + when :get_bucket_cors + Aws::S3::Endpoints::GetBucketCors.build(context) + when :get_bucket_encryption + Aws::S3::Endpoints::GetBucketEncryption.build(context) + when :get_bucket_intelligent_tiering_configuration + Aws::S3::Endpoints::GetBucketIntelligentTieringConfiguration.build(context) + when :get_bucket_inventory_configuration + Aws::S3::Endpoints::GetBucketInventoryConfiguration.build(context) + when :get_bucket_lifecycle + Aws::S3::Endpoints::GetBucketLifecycle.build(context) + when :get_bucket_lifecycle_configuration + Aws::S3::Endpoints::GetBucketLifecycleConfiguration.build(context) + when :get_bucket_location + Aws::S3::Endpoints::GetBucketLocation.build(context) + when :get_bucket_logging + Aws::S3::Endpoints::GetBucketLogging.build(context) + when :get_bucket_metrics_configuration + Aws::S3::Endpoints::GetBucketMetricsConfiguration.build(context) + when :get_bucket_notification + Aws::S3::Endpoints::GetBucketNotification.build(context) + when :get_bucket_notification_configuration + Aws::S3::Endpoints::GetBucketNotificationConfiguration.build(context) + when :get_bucket_ownership_controls + Aws::S3::Endpoints::GetBucketOwnershipControls.build(context) + when :get_bucket_policy + Aws::S3::Endpoints::GetBucketPolicy.build(context) + when :get_bucket_policy_status + Aws::S3::Endpoints::GetBucketPolicyStatus.build(context) + when :get_bucket_replication + Aws::S3::Endpoints::GetBucketReplication.build(context) + when :get_bucket_request_payment + Aws::S3::Endpoints::GetBucketRequestPayment.build(context) + when :get_bucket_tagging + Aws::S3::Endpoints::GetBucketTagging.build(context) + when :get_bucket_versioning + Aws::S3::Endpoints::GetBucketVersioning.build(context) + when :get_bucket_website + Aws::S3::Endpoints::GetBucketWebsite.build(context) + when :get_object + Aws::S3::Endpoints::GetObject.build(context) + when :get_object_acl + Aws::S3::Endpoints::GetObjectAcl.build(context) + when :get_object_attributes + Aws::S3::Endpoints::GetObjectAttributes.build(context) + when :get_object_legal_hold + Aws::S3::Endpoints::GetObjectLegalHold.build(context) + when :get_object_lock_configuration + Aws::S3::Endpoints::GetObjectLockConfiguration.build(context) + when :get_object_retention + Aws::S3::Endpoints::GetObjectRetention.build(context) + when :get_object_tagging + Aws::S3::Endpoints::GetObjectTagging.build(context) + when :get_object_torrent + Aws::S3::Endpoints::GetObjectTorrent.build(context) + when :get_public_access_block + Aws::S3::Endpoints::GetPublicAccessBlock.build(context) + when :head_bucket + Aws::S3::Endpoints::HeadBucket.build(context) + when :head_object + Aws::S3::Endpoints::HeadObject.build(context) + when :list_bucket_analytics_configurations + Aws::S3::Endpoints::ListBucketAnalyticsConfigurations.build(context) + when :list_bucket_intelligent_tiering_configurations + Aws::S3::Endpoints::ListBucketIntelligentTieringConfigurations.build(context) + when :list_bucket_inventory_configurations + Aws::S3::Endpoints::ListBucketInventoryConfigurations.build(context) + when :list_bucket_metrics_configurations + Aws::S3::Endpoints::ListBucketMetricsConfigurations.build(context) + when :list_buckets + Aws::S3::Endpoints::ListBuckets.build(context) + when :list_multipart_uploads + Aws::S3::Endpoints::ListMultipartUploads.build(context) + when :list_object_versions + Aws::S3::Endpoints::ListObjectVersions.build(context) + when :list_objects + Aws::S3::Endpoints::ListObjects.build(context) + when :list_objects_v2 + Aws::S3::Endpoints::ListObjectsV2.build(context) + when :list_parts + Aws::S3::Endpoints::ListParts.build(context) + when :put_bucket_accelerate_configuration + Aws::S3::Endpoints::PutBucketAccelerateConfiguration.build(context) + when :put_bucket_acl + Aws::S3::Endpoints::PutBucketAcl.build(context) + when :put_bucket_analytics_configuration + Aws::S3::Endpoints::PutBucketAnalyticsConfiguration.build(context) + when :put_bucket_cors + Aws::S3::Endpoints::PutBucketCors.build(context) + when :put_bucket_encryption + Aws::S3::Endpoints::PutBucketEncryption.build(context) + when :put_bucket_intelligent_tiering_configuration + Aws::S3::Endpoints::PutBucketIntelligentTieringConfiguration.build(context) + when :put_bucket_inventory_configuration + Aws::S3::Endpoints::PutBucketInventoryConfiguration.build(context) + when :put_bucket_lifecycle + Aws::S3::Endpoints::PutBucketLifecycle.build(context) + when :put_bucket_lifecycle_configuration + Aws::S3::Endpoints::PutBucketLifecycleConfiguration.build(context) + when :put_bucket_logging + Aws::S3::Endpoints::PutBucketLogging.build(context) + when :put_bucket_metrics_configuration + Aws::S3::Endpoints::PutBucketMetricsConfiguration.build(context) + when :put_bucket_notification + Aws::S3::Endpoints::PutBucketNotification.build(context) + when :put_bucket_notification_configuration + Aws::S3::Endpoints::PutBucketNotificationConfiguration.build(context) + when :put_bucket_ownership_controls + Aws::S3::Endpoints::PutBucketOwnershipControls.build(context) + when :put_bucket_policy + Aws::S3::Endpoints::PutBucketPolicy.build(context) + when :put_bucket_replication + Aws::S3::Endpoints::PutBucketReplication.build(context) + when :put_bucket_request_payment + Aws::S3::Endpoints::PutBucketRequestPayment.build(context) + when :put_bucket_tagging + Aws::S3::Endpoints::PutBucketTagging.build(context) + when :put_bucket_versioning + Aws::S3::Endpoints::PutBucketVersioning.build(context) + when :put_bucket_website + Aws::S3::Endpoints::PutBucketWebsite.build(context) + when :put_object + Aws::S3::Endpoints::PutObject.build(context) + when :put_object_acl + Aws::S3::Endpoints::PutObjectAcl.build(context) + when :put_object_legal_hold + Aws::S3::Endpoints::PutObjectLegalHold.build(context) + when :put_object_lock_configuration + Aws::S3::Endpoints::PutObjectLockConfiguration.build(context) + when :put_object_retention + Aws::S3::Endpoints::PutObjectRetention.build(context) + when :put_object_tagging + Aws::S3::Endpoints::PutObjectTagging.build(context) + when :put_public_access_block + Aws::S3::Endpoints::PutPublicAccessBlock.build(context) + when :restore_object + Aws::S3::Endpoints::RestoreObject.build(context) + when :select_object_content + Aws::S3::Endpoints::SelectObjectContent.build(context) + when :upload_part + Aws::S3::Endpoints::UploadPart.build(context) + when :upload_part_copy + Aws::S3::Endpoints::UploadPartCopy.build(context) + when :write_get_object_response + Aws::S3::Endpoints::WriteGetObjectResponse.build(context) + end + end + end + + def add_handlers(handlers, _config) + handlers.add(Handler, step: :build, priority: 75) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/expect_100_continue.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/expect_100_continue.rb new file mode 100644 index 0000000..6c622be --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/expect_100_continue.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + class Expect100Continue < Seahorse::Client::Plugin + + def add_handlers(handlers, config) + if config.http_continue_timeout && config.http_continue_timeout > 0 + handlers.add(Handler) + end + end + + # @api private + class Handler < Seahorse::Client::Handler + + def call(context) + body = context.http_request.body + if body.respond_to?(:size) && body.size > 0 && + !context[:use_accelerate_endpoint] + context.http_request.headers['expect'] = '100-continue' + end + @handler.call(context) + end + + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb new file mode 100644 index 0000000..13fa88f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + class GetBucketLocationFix < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + @handler.call(context).on(200) do |response| + response.data = S3::Types::GetBucketLocationOutput.new + xml = context.http_response.body_contents + matches = xml.match(/(.+?)<\/LocationConstraint>/) + response.data[:location_constraint] = matches ? matches[1] : '' + end + end + end + + handler(Handler, priority: 60, operations: [:get_bucket_location]) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/http_200_errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/http_200_errors.rb new file mode 100644 index 0000000..f228de7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/http_200_errors.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + + # A handful of Amazon S3 operations will respond with a 200 status + # code but will send an error in the response body. This plugin + # injects a handler that will parse 200 response bodies for potential + # errors, allowing them to be retried. + # @api private + class Http200Errors < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + @handler.call(context).on(200) do |response| + if error = check_for_error(context) + context.http_response.status_code = 500 + response.data = nil + response.error = error + end + end + end + + def check_for_error(context) + xml = context.http_response.body_contents + if xml.match(//) + error_code = xml.match(/(.+?)<\/Code>/)[1] + error_message = xml.match(/(.+?)<\/Message>/)[1] + S3::Errors.error_class(error_code).new(context, error_message) + elsif !xml.match(/<\w/) # Must have the start of an XML Tag + # Other incomplete xml bodies will result in XML ParsingError + Seahorse::Client::NetworkingError.new( + S3::Errors + .error_class('InternalError') + .new(context, 'Empty or incomplete response body') + ) + end + end + end + + handler( + Handler, + step: :sign, + operations: [ + :complete_multipart_upload, + :copy_object, + :upload_part_copy, + ] + ) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb new file mode 100644 index 0000000..4254fe8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + + class IADRegionalEndpoint < Seahorse::Client::Plugin + + option(:s3_us_east_1_regional_endpoint, + default: 'legacy', + doc_type: String, + docstring: <<-DOCS) do |cfg| +Pass in `regional` to enable the `us-east-1` regional endpoint. +Defaults to `legacy` mode which uses the global endpoint. + DOCS + resolve_iad_regional_endpoint(cfg) + end + + private + + def self.resolve_iad_regional_endpoint(cfg) + default_mode_value = + if cfg.respond_to?(:defaults_mode_config_resolver) + cfg.defaults_mode_config_resolver.resolve(:s3_us_east_1_regional_endpoint) + end + + mode = ENV['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] || + Aws.shared_config.s3_us_east_1_regional_endpoint(profile: cfg.profile) || + default_mode_value || + 'legacy' + mode = mode.downcase + unless %w(legacy regional).include?(mode) + raise ArgumentError, "expected :s3_us_east_1_regional_endpoint or"\ + " ENV['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] to be `legacy` or"\ + " `regional`." + end + mode + end + + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/location_constraint.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/location_constraint.rb new file mode 100644 index 0000000..0a1e394 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/location_constraint.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + + # When making calls to {S3::Client#create_bucket} outside the + # "classic" region, the bucket location constraint must be specified. + # This plugin auto populates the constraint to the configured region. + class LocationConstraint < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + unless context.config.region == 'us-east-1' + populate_location_constraint(context.params, context.config.region) + end + @handler.call(context) + end + + private + + def populate_location_constraint(params, region) + params[:create_bucket_configuration] ||= {} + params[:create_bucket_configuration][:location_constraint] ||= region + end + + end + + handler(Handler, step: :initialize, operations: [:create_bucket]) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/md5s.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/md5s.rb new file mode 100644 index 0000000..236aca0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/md5s.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +require 'openssl' + +module Aws + module S3 + module Plugins + # @api private + # This plugin is effectively deprecated in favor of modeled + # httpChecksumRequired traits. + class Md5s < Seahorse::Client::Plugin + # These operations allow Content MD5 but are not required by + # httpChecksumRequired. This list should not grow. + OPTIONAL_OPERATIONS = [ + :put_object, + :upload_part + ] + + # @api private + class Handler < Seahorse::Client::Handler + + CHUNK_SIZE = 1 * 1024 * 1024 # one MB + + def call(context) + if !context[:checksum_algorithms] # skip in favor of flexible checksum + body = context.http_request.body + if body.respond_to?(:size) && body.size > 0 + context.http_request.headers['Content-Md5'] ||= md5(body) + end + end + @handler.call(context) + end + + private + + # @param [File, Tempfile, IO#read, String] value + # @return [String] + def md5(value) + if (File === value || Tempfile === value) && !value.path.nil? && File.exist?(value.path) + OpenSSL::Digest::MD5.file(value).base64digest + elsif value.respond_to?(:read) + md5 = OpenSSL::Digest::MD5.new + update_in_chunks(md5, value) + md5.base64digest + else + OpenSSL::Digest::MD5.digest(value).base64digest + end + end + + def update_in_chunks(digest, io) + loop do + chunk = io.read(CHUNK_SIZE) + break unless chunk + digest.update(chunk) + end + io.rewind + end + + end + + option(:compute_checksums, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) +When `true` a MD5 checksum will be computed and sent in the Content Md5 +header for :put_object and :upload_part. When `false`, MD5 checksums +will not be computed for these operations. Checksums are still computed +for operations requiring them. Checksum errors returned by Amazon S3 are +automatically retried up to `:retry_limit` times. + DOCS + + def add_handlers(handlers, config) + if config.compute_checksums + # priority set low to ensure md5 is computed AFTER the request is + # built but before it is signed + handlers.add( + Handler, + priority: 10, step: :build, operations: OPTIONAL_OPERATIONS + ) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/redirects.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/redirects.rb new file mode 100644 index 0000000..819009d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/redirects.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + class Redirects < Seahorse::Client::Plugin + + option(:follow_redirects, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) +When `true`, this client will follow 307 redirects returned +by Amazon S3. + DOCS + + # @api private + class Handler < Seahorse::Client::Handler + + def call(context) + response = @handler.call(context) + if context.http_response.status_code == 307 + endpoint = context.http_response.headers['location'] + unless context.http_request.endpoint.host.include?('fips') + context.http_request.endpoint = endpoint + end + context.http_response.body.truncate(0) + @handler.call(context) + else + response + end + end + + end + + def add_handlers(handlers, config) + if config.follow_redirects + # we want to re-trigger request signing + handlers.add(Handler, step: :sign, priority: 90) + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/s3_host_id.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/s3_host_id.rb new file mode 100644 index 0000000..fa1d35d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/s3_host_id.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + + # Support S3 host id, more information, see: + # http://docs.aws.amazon.com/AmazonS3/latest/dev/troubleshooting.html#sdk-request-ids + # + # This plugin adds :host_id for s3 responses when available + # @api private + class S3HostId < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + response = @handler.call(context) + h = context.http_response.headers + context[:s3_host_id] = h['x-amz-id-2'] + response + end + + end + + handler(Handler, step: :sign) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/s3_signer.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/s3_signer.rb new file mode 100644 index 0000000..8d18bd2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/s3_signer.rb @@ -0,0 +1,177 @@ +# frozen_string_literal: true + +require 'aws-sigv4' + +module Aws + module S3 + module Plugins + # This plugin used to have a V4 signer but it was removed in favor of + # generic Sign plugin that uses endpoint auth scheme. + # + # @api private + class S3Signer < Seahorse::Client::Plugin + option(:signature_version, 'v4') + + def add_handlers(handlers, cfg) + case cfg.signature_version + when 'v4' then add_v4_handlers(handlers) + when 's3' then add_legacy_handler(handlers) + else + msg = "unsupported signature version `#{cfg.signature_version}'" + raise ArgumentError, msg + end + end + + def add_v4_handlers(handlers) + handlers.add(CachedBucketRegionHandler, step: :sign, priority: 60) + handlers.add(BucketRegionErrorHandler, step: :sign, priority: 40) + end + + def add_legacy_handler(handlers) + # generic Sign plugin will be skipped if it sees sigv2 + handlers.add(LegacyHandler, step: :sign) + end + + class LegacyHandler < Seahorse::Client::Handler + def call(context) + LegacySigner.sign(context) + @handler.call(context) + end + end + + # This handler will update the http endpoint when the bucket region + # is known/cached. + class CachedBucketRegionHandler < Seahorse::Client::Handler + def call(context) + bucket = context.params[:bucket] + check_for_cached_region(context, bucket) if bucket + @handler.call(context) + end + + private + + def check_for_cached_region(context, bucket) + cached_region = S3::BUCKET_REGIONS[bucket] + if cached_region && + cached_region != context.config.region && + !S3Signer.custom_endpoint?(context) + context.http_request.endpoint.host = S3Signer.new_hostname( + context, cached_region + ) + context[:sigv4_region] = cached_region # Sign plugin will use this + end + end + end + + # This handler detects when a request fails because of a mismatched bucket + # region. It follows up by making a request to determine the correct + # region, then finally a version 4 signed request against the correct + # regional endpoint. This is intended for s3's global endpoint which + # will return 400 if the bucket is not in region. + class BucketRegionErrorHandler < Seahorse::Client::Handler + def call(context) + response = @handler.call(context) + handle_region_errors(response) + end + + private + + def handle_region_errors(response) + if wrong_sigv4_region?(response) && + !fips_region?(response) && + !S3Signer.custom_endpoint?(response.context) && + !expired_credentials?(response) + get_region_and_retry(response.context) + else + response + end + end + + def get_region_and_retry(context) + actual_region = context.http_response.headers['x-amz-bucket-region'] + actual_region ||= region_from_body(context.http_response.body_contents) + update_bucket_cache(context, actual_region) + log_warning(context, actual_region) + resign_with_new_region(context, actual_region) + @handler.call(context) + end + + def update_bucket_cache(context, actual_region) + S3::BUCKET_REGIONS[context.params[:bucket]] = actual_region + end + + def fips_region?(resp) + resp.context.http_request.endpoint.host.include?('s3-fips.') + end + + def expired_credentials?(resp) + resp.context.http_response.body_contents.match(/ExpiredToken<\/Code>/) + end + + def wrong_sigv4_region?(resp) + resp.context.http_response.status_code == 400 && + (resp.context.http_response.headers['x-amz-bucket-region'] || + resp.context.http_response.body_contents.match(/.+?<\/Region>/)) + end + + def resign_with_new_region(context, actual_region) + context.http_response.body.truncate(0) + context.http_request.endpoint.host = S3Signer.new_hostname( + context, actual_region + ) + context.metadata[:redirect_region] = actual_region + + signer = Aws::Plugins::Sign.signer_for( + context[:auth_scheme], + context.config, + actual_region + ) + + signer.sign(context) + end + + def region_from_body(body) + region = body.match(/(.+?)<\/Region>/)[1] + if region.nil? || region == '' + raise "couldn't get region from body: #{body}" + else + region + end + end + + def log_warning(context, actual_region) + msg = "S3 client configured for #{context.config.region.inspect} " \ + "but the bucket #{context.params[:bucket].inspect} is in " \ + "#{actual_region.inspect}; Please configure the proper region " \ + "to avoid multiple unnecessary redirects and signing attempts\n" + if (logger = context.config.logger) + logger.warn(msg) + else + warn(msg) + end + end + end + + class << self + def new_hostname(context, region) + endpoint_params = context[:endpoint_params].dup + endpoint_params.region = region + endpoint_params.endpoint = nil + endpoint = + context.config.endpoint_provider.resolve_endpoint(endpoint_params) + URI(endpoint.url).host + end + + def custom_endpoint?(context) + region = context.config.region + partition = Aws::Endpoints::Matchers.aws_partition(region) + endpoint = context.http_request.endpoint + + !endpoint.hostname.include?(partition['dnsSuffix']) && + !endpoint.hostname.include?(partition['dualStackDnsSuffix']) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb new file mode 100644 index 0000000..4d274bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Aws + module S3 + module Plugins + + # S3 GetObject results for whole Multipart Objects contain a checksum + # that cannot be validated. These should be skipped by the + # ChecksumAlgorithm plugin. + class SkipWholeMultipartGetChecksums < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + context[:http_checksum] ||= {} + context[:http_checksum][:skip_on_suffix] = true + + @handler.call(context) + end + + end + + handler( + Handler, + step: :initialize, + operations: [:get_object] + ) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/sse_cpk.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/sse_cpk.rb new file mode 100644 index 0000000..c5e7a33 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/sse_cpk.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +require 'uri' +require 'openssl' + +module Aws + module S3 + module Plugins + class SseCpk < Seahorse::Client::Plugin + + option(:require_https_for_sse_cpk, + default: true, + doc_type: 'Boolean', + docstring: <<-DOCS) +When `true`, the endpoint **must** be HTTPS for all operations +where server-side-encryption is used with customer-provided keys. +This should only be disabled for local testing. + DOCS + + class Handler < Seahorse::Client::Handler + + def call(context) + compute_key_md5(context) if context.params.is_a?(Hash) + @handler.call(context) + end + + private + + def compute_key_md5(context) + params = context.params + if key = params[:sse_customer_key] + require_https(context) + params[:sse_customer_key] = base64(key) + params[:sse_customer_key_md5] = base64(md5(key)) + end + if key = params[:copy_source_sse_customer_key] + require_https(context) + params[:copy_source_sse_customer_key] = base64(key) + params[:copy_source_sse_customer_key_md5] = base64(md5(key)) + end + end + + def require_https(context) + unless URI::HTTPS === context.config.endpoint + msg = <<-MSG.strip.gsub("\n", ' ') + Attempting to send customer-provided-keys for S3 + server-side-encryption over HTTP; Please configure a HTTPS + endpoint. If you are attempting to use a test endpoint, + you can disable this check via `:require_https_for_sse_cpk` + MSG + raise ArgumentError, msg + end + end + + def md5(str) + OpenSSL::Digest::MD5.digest(str) + end + + def base64(str) + Base64.encode64(str).strip + end + + end + + handler(Handler, step: :initialize) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/streaming_retry.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/streaming_retry.rb new file mode 100644 index 0000000..84d86d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/streaming_retry.rb @@ -0,0 +1,139 @@ +# frozen_string_literal: true + +require 'forwardable' + +module Aws + module S3 + module Plugins + + # A wrapper around BlockIO that adds no-ops for truncate and rewind + # @api private + class RetryableBlockIO + extend Forwardable + def_delegators :@block_io, :write, :read, :size + + def initialize(block_io) + @block_io = block_io + end + + def truncate(_integer); end + + def rewind; end + end + + # A wrapper around ManagedFile that adds no-ops for truncate and rewind + # @api private + class RetryableManagedFile + extend Forwardable + def_delegators :@file, :write, :read, :size, :open?, :close + + def initialize(managed_file) + @file = managed_file + end + + def truncate(_integer); end + + def rewind; end + end + + class NonRetryableStreamingError < StandardError + + def initialize(error) + super('Unable to retry request - retry could result in processing duplicated chunks.') + set_backtrace(error.backtrace) + @original_error = error + end + + attr_reader :original_error + end + + # This handler works with the ResponseTarget plugin to provide smart + # retries of S3 streaming operations that support the range parameter + # (currently only: get_object). When a 200 OK with a TruncatedBodyError + # is received this handler will add a range header that excludes the + # data that has already been processed (written to file or sent to + # the target Proc). + # It is important to not write data to the custom target in the case of + # a non-success response. We do not want to write an XML error + # message to someone's file or pass it to a user's Proc. + # @api private + class StreamingRetry < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + target = context.params[:response_target] || context[:response_target] + + # retry is only supported when range is NOT set on the initial request + if supported_target?(target) && !context.params[:range] + add_event_listeners(context, target) + end + @handler.call(context) + end + + private + + def add_event_listeners(context, target) + context.http_response.on_headers(200..299) do + case context.http_response.body + when Seahorse::Client::BlockIO then + context.http_response.body = RetryableBlockIO.new(context.http_response.body) + when Seahorse::Client::ManagedFile then + context.http_response.body = RetryableManagedFile.new(context.http_response.body) + end + end + + context.http_response.on_headers(400..599) do + context.http_response.body = StringIO.new # something to write the error to + end + + context.http_response.on_success(200..299) do + body = context.http_response.body + if body.is_a?(RetryableManagedFile) && body.open? + body.close + end + end + + context.http_response.on_error do |error| + if retryable_body?(context) + if truncated_body?(error) + context.http_request.headers[:range] = "bytes=#{context.http_response.body.size}-" + else + case context.http_response.body + when RetryableManagedFile + # call rewind on the underlying file + context.http_response.body.instance_variable_get(:@file).rewind + else + raise NonRetryableStreamingError, error + end + end + end + end + end + + def truncated_body?(error) + error.is_a?(Seahorse::Client::NetworkingError) && + error.original_error.is_a?( + Seahorse::Client::NetHttp::Handler::TruncatedBodyError + ) + end + + def retryable_body?(context) + context.http_response.body.is_a?(RetryableBlockIO) || + context.http_response.body.is_a?(RetryableManagedFile) + end + + def supported_target?(target) + case target + when Proc, String, Pathname then true + else false + end + end + end + + handler(Handler, step: :sign, operations: [:get_object], priority: 10) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/url_encoded_keys.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/url_encoded_keys.rb new file mode 100644 index 0000000..177bf03 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/plugins/url_encoded_keys.rb @@ -0,0 +1,97 @@ +# frozen_string_literal: true + +require 'uri' +require 'cgi' + +module Aws + module S3 + module Plugins + + # This plugin auto-populates the `:encoding_type` request parameter + # to all calls made to Amazon S3 that accept it. + # + # This enables Amazon S3 to return object keys that might contain + # invalid XML characters as URL encoded strings. This plugin also + # automatically decodes these keys so that the key management is + # transparent to the user. + # + # If you specify the `:encoding_type` parameter, then this plugin + # will be disabled, and you will need to decode the keys yourself. + # + # The following operations are managed: + # + # * {S3::Client#list_objects} + # * {S3::Client#list_multipart_uploads} + # * {S3::Client#list_object_versions} + # + class UrlEncodedKeys < Seahorse::Client::Plugin + + class Handler < Seahorse::Client::Handler + + def call(context) + if context.params.key?(:encoding_type) + @handler.call(context) # user managed + else + manage_keys(context) + end + end + + private + + def manage_keys(context) + context.params[:encoding_type] = 'url' + @handler.call(context).on_success do |resp| + send("decode_#{resp.context.operation_name}_keys", resp.data) + end + end + + def decode_list_objects_keys(data) + decode(:marker, data) + decode(:next_marker, data) + decode(:prefix, data) + decode(:delimiter, data) + data.contents.each { |o| decode(:key, o) } if data.contents + data.common_prefixes.each { |o| decode(:prefix, o) } if data.common_prefixes + end + + def decode_list_object_versions_keys(data) + decode(:key_marker, data) + decode(:next_key_marker, data) + decode(:prefix, data) + decode(:delimiter, data) + data.versions.each { |o| decode(:key, o) } if data.versions + data.delete_markers.each { |o| decode(:key, o) } if data.delete_markers + data.common_prefixes.each { |o| decode(:prefix, o) } if data.common_prefixes + end + + def decode_list_multipart_uploads_keys(data) + decode(:key_marker, data) + decode(:next_key_marker, data) + decode(:prefix, data) + decode(:delimiter, data) + data.uploads.each { |o| decode(:key, o) } if data.uploads + data.common_prefixes.each { |o| decode(:prefix, o) } if data.common_prefixes + end + + def decode(member, struct) + if struct[member] + struct[member] = CGI.unescape(struct[member]) + end + end + + end + + handler(Handler, + step: :validate, + priority: 0, + operations: [ + :list_objects, + :list_object_versions, + :list_multipart_uploads, + ] + ) + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/presigned_post.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/presigned_post.rb new file mode 100644 index 0000000..94b6c4a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/presigned_post.rb @@ -0,0 +1,699 @@ +# frozen_string_literal: true + +require 'openssl' +require 'base64' + +module Aws + module S3 + + # @note Normally you do not need to construct a {PresignedPost} yourself. + # See {Bucket#presigned_post} and {Object#presigned_post}. + # + # ## Basic Usage + # + # To generate a presigned post, you need AWS credentials, the region + # your bucket is in, and the name of your bucket. You can apply constraints + # to the post object as options to {#initialize} or by calling + # methods such as {#key} and {#content_length_range}. + # + # The following two examples are equivalent. + # + # ```ruby + # post = Aws::S3::PresignedPost.new(creds, region, bucket, { + # key: '/uploaded/object/key', + # content_length_range: 0..1024, + # acl: 'public-read', + # metadata: { + # 'original-filename' => '${filename}' + # } + # }) + # post.fields + # #=> { ... } + # + # post = Aws::S3::PresignedPost.new(creds, region, bucket). + # key('/uploaded/object/key'). + # content_length_range(0..1024). + # acl('public-read'). + # metadata('original-filename' => '${filename}'). + # fields + # #=> { ... } + # ``` + # + # ## HTML Forms + # + # You can use a {PresignedPost} object to build an HTML form. It is + # recommended to use some helper to build the form tag and input + # tags that properly escapes values. + # + # ### Form Tag + # + # To upload a file to Amazon S3 using a browser, you need to create + # a post form. The {#url} method returns the value you should use + # as the form action. + # + # ```erb + #
+ # ... + #
+ # ``` + # + # The follow attributes must be set on the form: + # + # * `action` - This must be the {#url}. + # * `method` - This must be `post`. + # * `enctype` - This must be `multipart/form-data`. + # + # ### Form Fields + # + # The {#fields} method returns a hash of form fields to render inside + # the form. Typically these are rendered as hidden input fields. + # + # ```erb + # <% @post.fields.each do |name, value| %> + # + # <% end %> + # ``` + # + # Lastly, the form must have a file field with the name `file`. + # + # ```erb + # + # ``` + # + # ## Post Policy + # + # When you construct a {PresignedPost}, you must specify every form + # field name that will be posted by the browser. If you omit a form + # field sent by the browser, Amazon S3 will reject the request. + # You can specify accepted form field values three ways: + # + # * Specify exactly what the value must be. + # * Specify what value the field starts with. + # * Specify the field may have any value. + # + # ### Field Equals + # + # You can specify that a form field must be a certain value. + # Simply pass an option like `:content_type` to the constructor, + # or call the associated method. + # + # ```ruby + # post = Aws::S3::PresignedPost.new(creds, region, bucket) + # post.content_type('text/plain') + # ``` + # + # If any of the given values are changed by the user in the form, then + # Amazon S3 will reject the POST request. + # + # ### Field Starts With + # + # You can specify prefix values for many of the POST form fields. + # To specify a required prefix, use the `:_starts_with` + # option or call the associated `#_starts_with` method. + # + # ```ruby + # post = Aws::S3::PresignedPost.new(creds, region, bucket, { + # key_starts_with: '/images/', + # content_type_starts_with: 'image/', + # # ... + # }) + # ``` + # + # When using starts with, the form must contain a field where the + # user can specify the value. The {PresignedPost} will not add + # a value for these fields. + # + # ### Any Field Value + # + # To white-list a form field to send any value, you can name that + # field with `:allow_any` or {#allow_any}. + # + # ```ruby + # post = Aws::S3::PresignedPost.new(creds, region, bucket, { + # key: 'object-key', + # allow_any: ['Filename'], + # # ... + # }) + # ``` + # + # ### Metadata + # + # You can add rules for metadata fields using `:metadata`, {#metadata}, + # `:metadata_starts_with` and {#metadata_starts_with}. Unlike other + # form fields, you pass a hash value to these options/methods: + # + # ```ruby + # post = Aws::S3::PresignedPost.new(creds, region, bucket). + # key('/fixed/key'). + # metadata(foo: 'bar') + # + # post.fields['x-amz-meta-foo'] + # #=> 'bar' + # ``` + # + # ### The `${filename}` Variable + # + # The string `${filename}` is automatically replaced with the name of the + # file provided by the user and is recognized by all form fields. It is + # not supported with `starts_with` conditions. + # + # If the browser or client provides a full or partial path to the file, + # only the text following the last slash (/) or backslash (\) will be used + # (e.g., "C:\Program Files\directory1\file.txt" will be interpreted + # as "file.txt"). If no file or file name is provided, the variable is + # replaced with an empty string. + # + # In the following example, we use `${filename}` to store the original + # filename in the `x-amz-meta-` hash with the uploaded object. + # + # ```ruby + # post = Aws::S3::PresignedPost.new(creds, region, bucket, { + # key: '/fixed/key', + # metadata: { + # 'original-filename': '${filename}' + # } + # }) + # ``` + # + class PresignedPost + @@allowed_fields = [] + + # @param [Credentials] credentials Security credentials for signing + # the post policy. + # @param [String] bucket_region Region of the target bucket. + # @param [String] bucket_name Name of the target bucket. + # @option options [Boolean] :use_accelerate_endpoint (false) When `true`, + # PresignedPost will attempt to use accelerated endpoint. + # @option options [String] :url See {PresignedPost#url}. + # @option options [Sting, Array] :allow_any + # See {PresignedPost#allow_any}. + # @option options [Time] :signature_expiration Specify when the signature on + # the post will expire. Defaults to one hour from creation of the + # presigned post. May not exceed one week from creation time. + # @option options [String] :key See {PresignedPost#key}. + # @option options [String] :key_starts_with + # See {PresignedPost#key_starts_with}. + # @option options [String] :acl See {PresignedPost#acl}. + # @option options [String] :acl_starts_with + # See {PresignedPost#acl_starts_with}. + # @option options [String] :cache_control + # See {PresignedPost#cache_control}. + # @option options [String] :cache_control_starts_with + # See {PresignedPost#cache_control_starts_with}. + # @option options [String] :content_type See {PresignedPost#content_type}. + # @option options [String] :content_type_starts_with + # See {PresignedPost#content_type_starts_with}. + # @option options [String] :content_disposition + # See {PresignedPost#content_disposition}. + # @option options [String] :content_disposition_starts_with + # See {PresignedPost#content_disposition_starts_with}. + # @option options [String] :content_encoding + # See {PresignedPost#content_encoding}. + # @option options [String] :content_encoding_starts_with + # See {PresignedPost#content_encoding_starts_with}. + # @option options [Time] :expires See {PresignedPost#expires}. + # @option options [String] :expires_starts_with + # See {PresignedPost#expires_starts_with}. + # @option options [Range] :content_length_range + # See {PresignedPost#content_length_range}. + # @option options [String] :success_action_redirect + # See {PresignedPost#success_action_redirect}. + # @option options [String] :success_action_redirect_starts_with + # See {PresignedPost#success_action_redirect_starts_with}. + # @option options [String] :success_action_status + # See {PresignedPost#success_action_status}. + # @option options [String] :storage_class + # See {PresignedPost#storage_class}. + # @option options [String] :website_redirect_location + # See {PresignedPost#website_redirect_location}. + # @option options [Hash] :metadata + # See {PresignedPost#metadata}. + # @option options [Hash] :metadata_starts_with + # See {PresignedPost#metadata_starts_with}. + # @option options [String] :server_side_encryption + # See {PresignedPost#server_side_encryption}. + # @option options [String] :server_side_encryption_aws_kms_key_id + # See {PresignedPost#server_side_encryption_aws_kms_key_id}. + # @option options [String] :server_side_encryption_customer_algorithm + # See {PresignedPost#server_side_encryption_customer_algorithm}. + # @option options [String] :server_side_encryption_customer_key + # See {PresignedPost#server_side_encryption_customer_key}. + # @option options [String] :server_side_encryption_customer_key_starts_with + # See {PresignedPost#server_side_encryption_customer_key_starts_with}. + def initialize(credentials, bucket_region, bucket_name, options = {}) + @credentials = credentials.credentials + @bucket_region = bucket_region + @bucket_name = bucket_name + @accelerate = !!options.delete(:use_accelerate_endpoint) + options.delete(:url) if @accelerate # resource methods pass url + @url = options.delete(:url) || bucket_url + @fields = {} + @key_set = false + @signature_expiration = Time.now + 3600 + @conditions = [{ 'bucket' => @bucket_name }] + options.each do |option_name, option_value| + case option_name + when :allow_any then allow_any(option_value) + when :signature_expiration then @signature_expiration = option_value + else + if @@allowed_fields.include?(option_name) + send("#{option_name}", option_value) + else + raise ArgumentError, "Unsupported option: #{option_name}" + end + end + end + end + + # @return [String] The URL to post a file upload to. This should be + # the form action. + attr_reader :url + + # @return [Hash] A hash of fields to render in an HTML form + # as hidden input fields. + def fields + check_required_values! + datetime = Time.now.utc.strftime('%Y%m%dT%H%M%SZ') + fields = @fields.dup + fields.update('policy' => policy(datetime)) + fields.update(signature_fields(datetime)) + fields.update('x-amz-signature' => signature(datetime, fields['policy'])) + end + + # A list of form fields to white-list with any value. + # @param [Sting, Array] field_names + # @return [self] + def allow_any(*field_names) + field_names.flatten.each do |field_name| + @key_set = true if field_name.to_s == 'key' + starts_with(field_name, '') + end + self + end + + # @api private + def self.define_field(field, *args, &block) + @@allowed_fields << field + options = args.last.is_a?(Hash) ? args.pop : {} + field_name = args.last || field.to_s + + if block_given? + define_method("#{field}", block) + else + define_method("#{field}") do |value| + with(field_name, value) + end + + if options[:starts_with] + @@allowed_fields << "#{field}_starts_with".to_sym + define_method("#{field}_starts_with") do |value| + starts_with(field_name, value) + end + end + end + end + + # @!group Fields + + # The key to use for the uploaded object. You can use `${filename}` + # as a variable in the key. This will be replaced with the name + # of the file as provided by the user. + # + # For example, if the key is given as `/user/betty/${filename}` and + # the file uploaded is named `lolcatz.jpg`, the resultant key will + # be `/user/betty/lolcatz.jpg`. + # + # @param [String] key + # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + # @return [self] + define_field(:key) do |key| + @key_set = true + with('key', key) + end + + # Specify a prefix the uploaded + # @param [String] prefix + # @see #key + # @return [self] + define_field(:key_starts_with) do |prefix| + @key_set = true + starts_with('key', prefix) + end + + # @!method acl(canned_acl) + # Specify the cannedl ACL (access control list) for the object. + # May be one of the following values: + # + # * `private` + # * `public-read` + # * `public-read-write` + # * `authenticated-read` + # * `bucket-owner-read` + # * `bucket-owner-full-control` + # + # @param [String] canned_acl + # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + # @return [self] + # + # @!method acl_starts_with(prefix) + # @param [String] prefix + # @see #acl + # @return [self] + define_field(:acl, starts_with: true) + + # @!method cache_control(value) + # Specify caching behavior along the request/reply chain. + # @param [String] value + # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. + # @return [self] + # + # @!method cache_control_starts_with(prefix) + # @param [String] prefix + # @see #cache_control + # @return [self] + define_field(:cache_control, 'Cache-Control', starts_with: true) + + # @return [String] + # @!method content_type(value) + # A standard MIME type describing the format of the contents. + # @param [String] value + # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # @return [self] + # + # @!method content_type_starts_with(prefix) + # @param [String] prefix + # @see #content_type + # @return [self] + define_field(:content_type, 'Content-Type', starts_with: true) + + # @!method content_disposition(value) + # Specifies presentational information for the object. + # @param [String] value + # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + # @return [self] + # + # @!method content_disposition_starts_with(prefix) + # @param [String] prefix + # @see #content_disposition + # @return [self] + define_field(:content_disposition, 'Content-Disposition', starts_with: true) + + # @!method content_encoding(value) + # Specifies what content encodings have been applied to the object + # and thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. + # @param [String] value + # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + # @return [self] + # + # @!method content_encoding_starts_with(prefix) + # @param [String] prefix + # @see #content_encoding + # @return [self] + define_field(:content_encoding, 'Content-Encoding', starts_with: true) + + # The date and time at which the object is no longer cacheable. + # @note This does not affect the expiration of the presigned post + # signature. + # @param [Time] time + # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # @return [self] + define_field(:expires) do |time| + with('Expires', time.httpdate) + end + + # @param [String] prefix + # @see #expires + # @return [self] + define_field(:expires_starts_with) do |prefix| + starts_with('Expires', prefix) + end + + # The minimum and maximum allowable size for the uploaded content. + # @param [Range] byte_range + # @return [self] + define_field(:content_length_range) do |byte_range| + min = byte_range.begin + max = byte_range.end + max -= 1 if byte_range.exclude_end? + @conditions << ['content-length-range', min, max] + self + end + + # @!method success_action_redirect(value) + # The URL to which the client is redirected + # upon successful upload. If {#success_action_redirect} is not + # specified, Amazon S3 returns the empty document type specified + # by {#success_action_status}. + # + # If Amazon S3 cannot interpret the URL, it acts as if the field + # is not present. If the upload fails, Amazon S3 displays an error + # and does not redirect the user to a URL. + # + # @param [String] value + # @return [self] + # + # @!method success_action_redirect_starts_with(prefix) + # @param [String] prefix + # @see #success_action_redirect + # @return [self] + define_field(:success_action_redirect, starts_with: true) + + # @!method success_action_status(value) + # The status code returned to the client upon + # successful upload if {#success_action_redirect} is not + # specified. + # + # Accepts the values `200`, `201`, or `204` (default). + # + # If the value is set to 200 or 204, Amazon S3 returns an empty + # document with a 200 or 204 status code. If the value is set to 201, + # Amazon S3 returns an XML document with a 201 status code. + # + # If the value is not set or if it is set to an invalid value, Amazon + # S3 returns an empty document with a 204 status code. + # + # @param [String] value The status code returned to the client upon + # @return [self] + define_field(:success_action_status) + + # @!method storage_class(value) + # Storage class to use for storing the object. Defaults to + # `STANDARD`. Must be one of: + # + # * `STANDARD` + # * `REDUCED_REDUNDANCY` + # + # You cannot specify `GLACIER` as the storage class. To transition + # objects to the GLACIER storage class you can use lifecycle + # configuration. + # @param [String] value Storage class to use for storing the + # @return [self] + define_field(:storage_class, 'x-amz-storage-class') + + # @!method website_redirect_location(value) + # If the bucket is configured as a website, + # redirects requests for this object to another object in the + # same bucket or to an external URL. Amazon S3 stores this value + # in the object metadata. + # + # The value must be prefixed by, "/", "http://" or "https://". + # The length of the value is limited to 2K. + # + # @param [String] value + # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # @return [self] + define_field(:website_redirect_location, 'x-amz-website-redirect-location') + + # Metadata hash to store with the uploaded object. Hash keys will be + # prefixed with "x-amz-meta-". + # @param [Hash] hash + # @return [self] + define_field(:metadata) do |hash| + hash.each do |key, value| + with("x-amz-meta-#{key}", value) + end + self + end + + # Specify allowable prefix for each key in the metadata hash. + # @param [Hash] hash + # @see #metadata + # @return [self] + define_field(:metadata_starts_with) do |hash| + hash.each do |key, value| + starts_with("x-amz-meta-#{key}", value) + end + self + end + + # @!endgroup + + # @!group Server-Side Encryption Fields + + # @!method server_side_encryption(value) + # Specifies a server-side encryption algorithm to use when Amazon + # S3 creates an object. Valid values include: + # + # * `aws:kms` + # * `AES256` + # + # @param [String] value + # @return [self] + define_field(:server_side_encryption, 'x-amz-server-side-encryption') + + # @!method server_side_encryption_aws_kms_key_id(value) + # If {#server_side_encryption} is called with the value of `aws:kms`, + # this method specifies the ID of the AWS Key Management Service + # (KMS) master encryption key to use for the object. + # @param [String] value + # @return [self] + define_field( + :server_side_encryption_aws_kms_key_id, + 'x-amz-server-side-encryption-aws-kms-key-id' + ) + + # @!endgroup + + # @!group Server-Side Encryption with Customer-Provided Key Fields + + # @!method server_side_encryption_customer_algorithm(value) + # Specifies the algorithm to use to when encrypting the object. + # Must be set to `AES256` when using customer-provided encryption + # keys. Must also call {#server_side_encryption_customer_key}. + # @param [String] value + # @see #server_side_encryption_customer_key + # @return [self] + define_field( + :server_side_encryption_customer_algorithm, + 'x-amz-server-side-encryption-customer-algorithm' + ) + + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon does not store the encryption key. + # + # You must also call {#server_side_encryption_customer_algorithm}. + # + # @param [String] value + # @see #server_side_encryption_customer_algorithm + # @return [self] + define_field(:server_side_encryption_customer_key) do |value| + field_name = 'x-amz-server-side-encryption-customer-key' + with(field_name, base64(value)) + with(field_name + '-MD5', base64(OpenSSL::Digest::MD5.digest(value))) + end + + # @param [String] prefix + # @see #server_side_encryption_customer_key + # @return [self] + define_field(:server_side_encryption_customer_key_starts_with) do |prefix| + field_name = 'x-amz-server-side-encryption-customer-key' + starts_with(field_name, prefix) + end + + # @!endgroup + + private + + def with(field_name, value) + fvar = '${filename}' + if index = value.rindex(fvar) + if index + fvar.size == value.size + @fields[field_name] = value + starts_with(field_name, value[0,index]) + else + msg = "${filename} only supported at the end of #{field_name}" + raise ArgumentError, msg + end + else + @fields[field_name] = value.to_s + @conditions << { field_name => value.to_s } + end + self + end + + def starts_with(field_name, value, &block) + @conditions << ['starts-with', "$#{field_name}", value.to_s] + self + end + + def check_required_values! + unless @key_set + msg = 'key required; you must provide a key via :key, '\ + ":key_starts_with, or :allow_any => ['key']" + raise msg + end + end + + def bucket_url + # Taken from Aws::S3::Endpoints module + params = Aws::S3::EndpointParameters.new( + bucket: @bucket_name, + region: @bucket_region, + accelerate: @accelerate, + use_global_endpoint: true + ) + endpoint = Aws::S3::EndpointProvider.new.resolve_endpoint(params) + endpoint.url + end + + # @return [Hash] + def policy(datetime) + check_required_values! + policy = {} + policy['expiration'] = @signature_expiration.utc.iso8601 + policy['conditions'] = @conditions.dup + signature_fields(datetime).each do |name, value| + policy['conditions'] << { name => value } + end + base64(Json.dump(policy)) + end + + def signature_fields(datetime) + fields = {} + fields['x-amz-credential'] = credential_scope(datetime) + fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256' + fields['x-amz-date'] = datetime + if session_token = @credentials.session_token + fields['x-amz-security-token'] = session_token + end + fields + end + + def signature(datetime, string_to_sign) + k_secret = @credentials.secret_access_key + k_date = hmac('AWS4' + k_secret, datetime[0,8]) + k_region = hmac(k_date, @bucket_region) + k_service = hmac(k_region, 's3') + k_credentials = hmac(k_service, 'aws4_request') + hexhmac(k_credentials, string_to_sign) + end + + def hmac(key, value) + OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha256'), key, value) + end + + def hexhmac(key, value) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), key, value) + end + + def credential_scope(datetime) + parts = [] + parts << @credentials.access_key_id + parts << datetime[0,8] + parts << @bucket_region + parts << 's3' + parts << 'aws4_request' + parts.join('/') + end + + def base64(str) + Base64.strict_encode64(str) + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/presigner.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/presigner.rb new file mode 100644 index 0000000..d970c1e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/presigner.rb @@ -0,0 +1,257 @@ +# frozen_string_literal: true + +module Aws + module S3 + class Presigner + # @api private + ONE_WEEK = 60 * 60 * 24 * 7 + + # @api private + FIFTEEN_MINUTES = 60 * 15 + + # @api private + BLACKLISTED_HEADERS = [ + 'accept', + 'amz-sdk-request', + 'cache-control', + 'content-length', # due to a ELB bug + 'expect', + 'from', + 'if-match', + 'if-none-match', + 'if-modified-since', + 'if-unmodified-since', + 'if-range', + 'max-forwards', + 'pragma', + 'proxy-authorization', + 'referer', + 'te', + 'user-agent' + ].freeze + + # @option options [Client] :client Optionally provide an existing + # S3 client + def initialize(options = {}) + @client = options[:client] || Aws::S3::Client.new + end + + # Create presigned URLs for S3 operations. + # + # @example + # signer = Aws::S3::Presigner.new + # url = signer.presigned_url(:get_object, bucket: "bucket", key: "key") + # + # @param [Symbol] method Symbolized method name of the operation you want + # to presign. + # + # @option params [Integer] :expires_in (900) The number of seconds + # before the presigned URL expires. Defaults to 15 minutes. As signature + # version 4 has a maximum expiry time of one week for presigned URLs, + # attempts to set this value to greater than one week (604800) will + # raise an exception. + # + # @option params [Time] :time (Time.now) The starting time for when the + # presigned url becomes active. + # + # @option params [Boolean] :secure (true) When `false`, a HTTP URL + # is returned instead of the default HTTPS URL. + # + # @option params [Boolean] :virtual_host (false) When `true`, the + # bucket name will be used as the hostname. + # + # @option params [Boolean] :use_accelerate_endpoint (false) When `true`, + # Presigner will attempt to use accelerated endpoint. + # + # @option params [Array] :whitelist_headers ([]) Additional + # headers to be included for the signed request. Certain headers beyond + # the authorization header could, in theory, be changed for various + # reasons (including but not limited to proxies) while in transit and + # after signing. This would lead to signature errors being returned, + # despite no actual problems with signing. (see BLACKLISTED_HEADERS) + # + # @raise [ArgumentError] Raises an ArgumentError if `:expires_in` + # exceeds one week. + # + # @return [String] a presigned url + def presigned_url(method, params = {}) + url, _headers = _presigned_request(method, params) + url + end + + # Allows you to create presigned URL requests for S3 operations. This + # method returns a tuple containing the URL and the signed X-amz-* headers + # to be used with the presigned url. + # + # @example + # signer = Aws::S3::Presigner.new + # url, headers = signer.presigned_request( + # :get_object, bucket: "bucket", key: "key" + # ) + # + # @param [Symbol] method Symbolized method name of the operation you want + # to presign. + # + # @option params [Integer] :expires_in (900) The number of seconds + # before the presigned URL expires. Defaults to 15 minutes. As signature + # version 4 has a maximum expiry time of one week for presigned URLs, + # attempts to set this value to greater than one week (604800) will + # raise an exception. + # + # @option params [Time] :time (Time.now) The starting time for when the + # presigned url becomes active. + # + # @option params [Boolean] :secure (true) When `false`, a HTTP URL + # is returned instead of the default HTTPS URL. + # + # @option params [Boolean] :virtual_host (false) When `true`, the + # bucket name will be used as the hostname. This will cause + # the returned URL to be 'http' and not 'https'. + # + # @option params [Boolean] :use_accelerate_endpoint (false) When `true`, + # Presigner will attempt to use accelerated endpoint. + # + # @option params [Array] :whitelist_headers ([]) Additional + # headers to be included for the signed request. Certain headers beyond + # the authorization header could, in theory, be changed for various + # reasons (including but not limited to proxies) while in transit and + # after signing. This would lead to signature errors being returned, + # despite no actual problems with signing. (see BLACKLISTED_HEADERS) + # + # @raise [ArgumentError] Raises an ArgumentError if `:expires_in` + # exceeds one week. + # + # @return [String, Hash] A tuple with a presigned URL and headers that + # should be included with the request. + def presigned_request(method, params = {}) + _presigned_request(method, params, false) + end + + private + + def _presigned_request(method, params, hoist = true) + virtual_host = params.delete(:virtual_host) + time = params.delete(:time) + unsigned_headers = unsigned_headers(params) + secure = params.delete(:secure) != false + expires_in = expires_in(params) + + req = @client.build_request(method, params) + use_bucket_as_hostname(req) if virtual_host + handle_presigned_url_context(req) + + x_amz_headers = sign_but_dont_send( + req, expires_in, secure, time, unsigned_headers, hoist + ) + [req.send_request.data, x_amz_headers] + end + + def unsigned_headers(params) + whitelist_headers = params.delete(:whitelist_headers) || [] + BLACKLISTED_HEADERS - whitelist_headers + end + + def expires_in(params) + if (expires_in = params.delete(:expires_in)) + if expires_in > ONE_WEEK + raise ArgumentError, + "expires_in value of #{expires_in} exceeds one-week maximum." + elsif expires_in <= 0 + raise ArgumentError, + "expires_in value of #{expires_in} cannot be 0 or less." + end + expires_in + else + FIFTEEN_MINUTES + end + end + + def use_bucket_as_hostname(req) + req.handle(priority: 35) do |context| + uri = context.http_request.endpoint + uri.host = context.params[:bucket] + uri.path.sub!("/#{context.params[:bucket]}", '') + @handler.call(context) + end + end + + # Used for excluding presigned_urls from API request count. + # + # Store context information as early as possible, to allow + # handlers to perform decisions based on this flag if need. + def handle_presigned_url_context(req) + req.handle(step: :initialize, priority: 98) do |context| + context[:presigned_url] = true + @handler.call(context) + end + end + + # @param [Seahorse::Client::Request] req + def sign_but_dont_send( + req, expires_in, secure, time, unsigned_headers, hoist = true + ) + x_amz_headers = {} + + http_req = req.context.http_request + + req.handlers.remove(Aws::S3::Plugins::S3Signer::LegacyHandler) + req.handlers.remove(Aws::Plugins::Sign::Handler) + req.handlers.remove(Seahorse::Client::Plugins::ContentLength::Handler) + + req.handle(step: :send) do |context| + # if an endpoint was not provided, force secure or insecure + if context.config.regional_endpoint + http_req.endpoint.scheme = secure ? 'https' : 'http' + http_req.endpoint.port = secure ? 443 : 80 + end + + query = http_req.endpoint.query ? http_req.endpoint.query.split('&') : [] + http_req.headers.each do |key, value| + next unless key =~ /^x-amz/i + + if hoist + value = Aws::Sigv4::Signer.uri_escape(value) + key = Aws::Sigv4::Signer.uri_escape(key) + # hoist x-amz-* headers to the querystring + http_req.headers.delete(key) + query << "#{key}=#{value}" + else + x_amz_headers[key] = value + end + end + http_req.endpoint.query = query.join('&') unless query.empty? + + auth_scheme = context[:auth_scheme] + scheme_name = auth_scheme['name'] + region = if scheme_name == 'sigv4a' + auth_scheme['signingRegionSet'].first + else + auth_scheme['signingRegion'] + end + signer = Aws::Sigv4::Signer.new( + service: auth_scheme['signingName'] || 's3', + region: region || context.config.region, + credentials_provider: context.config.credentials, + signing_algorithm: scheme_name.to_sym, + uri_escape_path: !!!auth_scheme['disableDoubleEncoding'], + unsigned_headers: unsigned_headers, + apply_checksum_header: false + ) + + url = signer.presign_url( + http_method: http_req.http_method, + url: http_req.endpoint, + headers: http_req.headers, + body_digest: 'UNSIGNED-PAYLOAD', + expires_in: expires_in, + time: time + ).to_s + + Seahorse::Client::Response.new(context: context, data: url) + end + # Return the headers + x_amz_headers + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/resource.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/resource.rb new file mode 100644 index 0000000..0005a52 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/resource.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + + # This class provides a resource oriented interface for S3. + # To create a resource object: + # + # resource = Aws::S3::Resource.new(region: 'us-west-2') + # + # You can supply a client object with custom configuration that will be used for all resource operations. + # If you do not pass `:client`, a default client will be constructed. + # + # client = Aws::S3::Client.new(region: 'us-west-2') + # resource = Aws::S3::Resource.new(client: client) + # + class Resource + + # @param options ({}) + # @option options [Client] :client + def initialize(options = {}) + @client = options[:client] || Client.new(options) + end + + # @return [Client] + def client + @client + end + + # @!group Actions + + # @example Request syntax with placeholder values + # + # bucket = s3.create_bucket({ + # acl: "private", # accepts private, public-read, public-read-write, authenticated-read + # bucket: "BucketName", # required + # create_bucket_configuration: { + # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2 + # }, + # grant_full_control: "GrantFullControl", + # grant_read: "GrantRead", + # grant_read_acp: "GrantReadACP", + # grant_write: "GrantWrite", + # grant_write_acp: "GrantWriteACP", + # object_lock_enabled_for_bucket: false, + # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced + # }) + # @param [Hash] options ({}) + # @option options [String] :acl + # The canned ACL to apply to the bucket. + # @option options [required, String] :bucket + # The name of the bucket to create. + # @option options [Types::CreateBucketConfiguration] :create_bucket_configuration + # The configuration information for the bucket. + # @option options [String] :grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions on + # the bucket. + # @option options [String] :grant_read + # Allows grantee to list the objects in the bucket. + # @option options [String] :grant_read_acp + # Allows grantee to read the bucket ACL. + # @option options [String] :grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @option options [String] :grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # @option options [Boolean] :object_lock_enabled_for_bucket + # Specifies whether you want S3 Object Lock to be enabled for the new + # bucket. + # @option options [String] :object_ownership + # The container element for object ownership for a bucket's ownership + # controls. + # + # BucketOwnerPreferred - Objects uploaded to the bucket change ownership + # to the bucket owner if the objects are uploaded with the + # `bucket-owner-full-control` canned ACL. + # + # ObjectWriter - The uploading account will own the object if the object + # is uploaded with the `bucket-owner-full-control` canned ACL. + # + # BucketOwnerEnforced - Access control lists (ACLs) are disabled and no + # longer affect permissions. The bucket owner automatically owns and has + # full control over every object in the bucket. The bucket only accepts + # PUT requests that don't specify an ACL or bucket owner full control + # ACLs, such as the `bucket-owner-full-control` canned ACL or an + # equivalent form of this ACL expressed in the XML format. + # @return [Bucket] + def create_bucket(options = {}) + @client.create_bucket(options) + Bucket.new( + name: options[:bucket], + client: @client + ) + end + + # @!group Associations + + # @param [String] name + # @return [Bucket] + def bucket(name) + Bucket.new( + name: name, + client: @client + ) + end + + # @example Request syntax with placeholder values + # + # s3.buckets() + # @param [Hash] options ({}) + # @return [Bucket::Collection] + def buckets(options = {}) + batches = Enumerator.new do |y| + batch = [] + resp = @client.list_buckets(options) + resp.data.buckets.each do |b| + batch << Bucket.new( + name: b.name, + data: b, + client: @client + ) + end + y.yield(batch) + end + Bucket::Collection.new(batches) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/types.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/types.rb new file mode 100644 index 0000000..8c17114 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/types.rb @@ -0,0 +1,14019 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +module Aws::S3 + module Types + + # Specifies the days since the initiation of an incomplete multipart + # upload that Amazon S3 will wait before permanently removing all parts + # of the upload. For more information, see [ Aborting Incomplete + # Multipart Uploads Using a Bucket Lifecycle Policy][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + # + # @!attribute [rw] days_after_initiation + # Specifies the number of days after which Amazon S3 aborts an + # incomplete multipart upload. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload AWS API Documentation + # + class AbortIncompleteMultipartUpload < Struct.new( + :days_after_initiation) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput AWS API Documentation + # + class AbortMultipartUploadOutput < Struct.new( + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name to which the upload was taking place. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Key of the object for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] upload_id + # Upload ID that identifies the multipart upload. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest AWS API Documentation + # + class AbortMultipartUploadRequest < Struct.new( + :bucket, + :key, + :upload_id, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # Configures the transfer acceleration state for an Amazon S3 bucket. + # For more information, see [Amazon S3 Transfer Acceleration][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + # + # @!attribute [rw] status + # Specifies the transfer acceleration status of the bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration AWS API Documentation + # + class AccelerateConfiguration < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the elements that set the ACL permissions for an object per + # grantee. + # + # @!attribute [rw] grants + # A list of grants. + # @return [Array] + # + # @!attribute [rw] owner + # Container for the bucket owner's display name and ID. + # @return [Types::Owner] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy AWS API Documentation + # + class AccessControlPolicy < Struct.new( + :grants, + :owner) + SENSITIVE = [] + include Aws::Structure + end + + # A container for information about access control for replicas. + # + # @!attribute [rw] owner + # Specifies the replica ownership. For default and valid values, see + # [PUT bucket replication][1] in the *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlTranslation AWS API Documentation + # + class AccessControlTranslation < Struct.new( + :owner) + SENSITIVE = [] + include Aws::Structure + end + + # A conjunction (logical AND) of predicates, which is used in evaluating + # a metrics filter. The operator must have at least two predicates in + # any combination, and an object must match all of the predicates for + # the filter to apply. + # + # @!attribute [rw] prefix + # The prefix to use when evaluating an AND predicate: The prefix that + # an object must have to be included in the metrics results. + # @return [String] + # + # @!attribute [rw] tags + # The list of tags to use when evaluating an AND predicate. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator AWS API Documentation + # + class AnalyticsAndOperator < Struct.new( + :prefix, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the configuration and any analyses for the analytics filter + # of an Amazon S3 bucket. + # + # @!attribute [rw] id + # The ID that identifies the analytics configuration. + # @return [String] + # + # @!attribute [rw] filter + # The filter used to describe a set of objects for analyses. A filter + # must have exactly one prefix, one tag, or one conjunction + # (AnalyticsAndOperator). If no filter is provided, all objects will + # be considered in any analysis. + # @return [Types::AnalyticsFilter] + # + # @!attribute [rw] storage_class_analysis + # Contains data related to access patterns to be collected and made + # available to analyze the tradeoffs between different storage + # classes. + # @return [Types::StorageClassAnalysis] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration AWS API Documentation + # + class AnalyticsConfiguration < Struct.new( + :id, + :filter, + :storage_class_analysis) + SENSITIVE = [] + include Aws::Structure + end + + # Where to publish the analytics results. + # + # @!attribute [rw] s3_bucket_destination + # A destination signifying output to an S3 bucket. + # @return [Types::AnalyticsS3BucketDestination] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination AWS API Documentation + # + class AnalyticsExportDestination < Struct.new( + :s3_bucket_destination) + SENSITIVE = [] + include Aws::Structure + end + + # The filter used to describe a set of objects for analyses. A filter + # must have exactly one prefix, one tag, or one conjunction + # (AnalyticsAndOperator). If no filter is provided, all objects will be + # considered in any analysis. + # + # @!attribute [rw] prefix + # The prefix to use when evaluating an analytics filter. + # @return [String] + # + # @!attribute [rw] tag + # The tag to use when evaluating an analytics filter. + # @return [Types::Tag] + # + # @!attribute [rw] and + # A conjunction (logical AND) of predicates, which is used in + # evaluating an analytics filter. The operator must have at least two + # predicates. + # @return [Types::AnalyticsAndOperator] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter AWS API Documentation + # + class AnalyticsFilter < Struct.new( + :prefix, + :tag, + :and) + SENSITIVE = [] + include Aws::Structure + end + + # Contains information about where to publish the analytics results. + # + # @!attribute [rw] format + # Specifies the file format used when exporting data to Amazon S3. + # @return [String] + # + # @!attribute [rw] bucket_account_id + # The account ID that owns the destination S3 bucket. If no account ID + # is provided, the owner is not validated before exporting data. + # + # Although this value is optional, we strongly recommend that you set + # it to help prevent problems if the destination bucket ownership + # changes. + # + # + # @return [String] + # + # @!attribute [rw] bucket + # The Amazon Resource Name (ARN) of the bucket to which data is + # exported. + # @return [String] + # + # @!attribute [rw] prefix + # The prefix to use when exporting data. The prefix is prepended to + # all results. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination AWS API Documentation + # + class AnalyticsS3BucketDestination < Struct.new( + :format, + :bucket_account_id, + :bucket, + :prefix) + SENSITIVE = [] + include Aws::Structure + end + + # In terms of implementation, a Bucket is a resource. An Amazon S3 + # bucket name is globally unique, and the namespace is shared by all + # Amazon Web Services accounts. + # + # @!attribute [rw] name + # The name of the bucket. + # @return [String] + # + # @!attribute [rw] creation_date + # Date the bucket was created. This date can change when making + # changes to your bucket, such as editing its bucket policy. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket AWS API Documentation + # + class Bucket < Struct.new( + :name, + :creation_date) + SENSITIVE = [] + include Aws::Structure + end + + # The requested bucket name is not available. The bucket namespace is + # shared by all users of the system. Select a different name and try + # again. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketAlreadyExists AWS API Documentation + # + class BucketAlreadyExists < Aws::EmptyStructure; end + + # The bucket you tried to create already exists, and you own it. Amazon + # S3 returns this error in all Amazon Web Services Regions except in the + # North Virginia Region. For legacy compatibility, if you re-create an + # existing bucket that you already own in the North Virginia Region, + # Amazon S3 returns 200 OK and resets the bucket access control lists + # (ACLs). + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketAlreadyOwnedByYou AWS API Documentation + # + class BucketAlreadyOwnedByYou < Aws::EmptyStructure; end + + # Specifies the lifecycle configuration for objects in an Amazon S3 + # bucket. For more information, see [Object Lifecycle Management][1] in + # the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html + # + # @!attribute [rw] rules + # A lifecycle rule for individual objects in an Amazon S3 bucket. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration AWS API Documentation + # + class BucketLifecycleConfiguration < Struct.new( + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # Container for logging status information. + # + # @!attribute [rw] logging_enabled + # Describes where logs are stored and the prefix that Amazon S3 + # assigns to all log object keys for a bucket. For more information, + # see [PUT Bucket logging][1] in the *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + # @return [Types::LoggingEnabled] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus AWS API Documentation + # + class BucketLoggingStatus < Struct.new( + :logging_enabled) + SENSITIVE = [] + include Aws::Structure + end + + # Describes the cross-origin access configuration for objects in an + # Amazon S3 bucket. For more information, see [Enabling Cross-Origin + # Resource Sharing][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # + # @!attribute [rw] cors_rules + # A set of origins and methods (cross-origin access that you want to + # allow). You can add up to 100 rules to the configuration. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration AWS API Documentation + # + class CORSConfiguration < Struct.new( + :cors_rules) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a cross-origin access rule for an Amazon S3 bucket. + # + # @!attribute [rw] id + # Unique identifier for the rule. The value cannot be longer than 255 + # characters. + # @return [String] + # + # @!attribute [rw] allowed_headers + # Headers that are specified in the `Access-Control-Request-Headers` + # header. These headers are allowed in a preflight OPTIONS request. In + # response to any preflight OPTIONS request, Amazon S3 returns any + # requested headers that are allowed. + # @return [Array] + # + # @!attribute [rw] allowed_methods + # An HTTP method that you allow the origin to execute. Valid values + # are `GET`, `PUT`, `HEAD`, `POST`, and `DELETE`. + # @return [Array] + # + # @!attribute [rw] allowed_origins + # One or more origins you want customers to be able to access the + # bucket from. + # @return [Array] + # + # @!attribute [rw] expose_headers + # One or more headers in the response that you want customers to be + # able to access from their applications (for example, from a + # JavaScript `XMLHttpRequest` object). + # @return [Array] + # + # @!attribute [rw] max_age_seconds + # The time in seconds that your browser is to cache the preflight + # response for the specified resource. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule AWS API Documentation + # + class CORSRule < Struct.new( + :id, + :allowed_headers, + :allowed_methods, + :allowed_origins, + :expose_headers, + :max_age_seconds) + SENSITIVE = [] + include Aws::Structure + end + + # Describes how an uncompressed comma-separated values (CSV)-formatted + # input object is formatted. + # + # @!attribute [rw] file_header_info + # Describes the first line of input. Valid values are: + # + # * `NONE`: First line is not a header. + # + # * `IGNORE`: First line is a header, but you can't use the header + # values to indicate the column in an expression. You can use column + # position (such as \_1, \_2, â€Ļ) to indicate the column (`SELECT + # s._1 FROM OBJECT s`). + # + # * `Use`: First line is a header, and you can use the header value to + # identify a column in an expression (`SELECT "name" FROM OBJECT`). + # @return [String] + # + # @!attribute [rw] comments + # A single character used to indicate that a row should be ignored + # when the character is present at the start of that row. You can + # specify any character to indicate a comment line. + # @return [String] + # + # @!attribute [rw] quote_escape_character + # A single character used for escaping the quotation mark character + # inside an already escaped value. For example, the value `""" a , b + # """` is parsed as `" a , b "`. + # @return [String] + # + # @!attribute [rw] record_delimiter + # A single character used to separate individual records in the input. + # Instead of the default value, you can specify an arbitrary + # delimiter. + # @return [String] + # + # @!attribute [rw] field_delimiter + # A single character used to separate individual fields in a record. + # You can specify an arbitrary delimiter. + # @return [String] + # + # @!attribute [rw] quote_character + # A single character used for escaping when the field delimiter is + # part of the value. For example, if the value is `a, b`, Amazon S3 + # wraps this field value in quotation marks, as follows: `" a , b "`. + # + # Type: String + # + # Default: `"` + # + # Ancestors: `CSV` + # @return [String] + # + # @!attribute [rw] allow_quoted_record_delimiter + # Specifies that CSV field values may contain quoted record delimiters + # and such records should be allowed. Default value is FALSE. Setting + # this value to TRUE may lower performance. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVInput AWS API Documentation + # + class CSVInput < Struct.new( + :file_header_info, + :comments, + :quote_escape_character, + :record_delimiter, + :field_delimiter, + :quote_character, + :allow_quoted_record_delimiter) + SENSITIVE = [] + include Aws::Structure + end + + # Describes how uncompressed comma-separated values (CSV)-formatted + # results are formatted. + # + # @!attribute [rw] quote_fields + # Indicates whether to use quotation marks around output fields. + # + # * `ALWAYS`: Always use quotation marks for output fields. + # + # * `ASNEEDED`: Use quotation marks for output fields when needed. + # @return [String] + # + # @!attribute [rw] quote_escape_character + # The single character used for escaping the quote character inside an + # already escaped value. + # @return [String] + # + # @!attribute [rw] record_delimiter + # A single character used to separate individual records in the + # output. Instead of the default value, you can specify an arbitrary + # delimiter. + # @return [String] + # + # @!attribute [rw] field_delimiter + # The value used to separate individual fields in a record. You can + # specify an arbitrary delimiter. + # @return [String] + # + # @!attribute [rw] quote_character + # A single character used for escaping when the field delimiter is + # part of the value. For example, if the value is `a, b`, Amazon S3 + # wraps this field value in quotation marks, as follows: `" a , b "`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVOutput AWS API Documentation + # + class CSVOutput < Struct.new( + :quote_fields, + :quote_escape_character, + :record_delimiter, + :field_delimiter, + :quote_character) + SENSITIVE = [] + include Aws::Structure + end + + # Contains all the possible checksum or digest values for an object. + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Checksum AWS API Documentation + # + class Checksum < Struct.new( + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256) + SENSITIVE = [] + include Aws::Structure + end + + # Container for specifying the Lambda notification configuration. + # + # @!attribute [rw] id + # An optional unique identifier for configurations in a notification + # configuration. If you don't provide one, Amazon S3 will assign an + # ID. + # @return [String] + # + # @!attribute [rw] event + # The bucket event for which to send notifications. + # @return [String] + # + # @!attribute [rw] events + # Bucket events for which to send notifications. + # @return [Array] + # + # @!attribute [rw] cloud_function + # Lambda cloud function ARN that Amazon S3 can invoke when it detects + # events of the specified type. + # @return [String] + # + # @!attribute [rw] invocation_role + # The role supporting the invocation of the Lambda function + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration AWS API Documentation + # + class CloudFunctionConfiguration < Struct.new( + :id, + :event, + :events, + :cloud_function, + :invocation_role) + SENSITIVE = [] + include Aws::Structure + end + + # Container for all (if there are any) keys between Prefix and the next + # occurrence of the string specified by a delimiter. CommonPrefixes + # lists keys that act like subdirectories in the directory specified by + # Prefix. For example, if the prefix is notes/ and the delimiter is a + # slash (/) as in notes/summer/july, the common prefix is notes/summer/. + # + # @!attribute [rw] prefix + # Container for the specified common prefix. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix AWS API Documentation + # + class CommonPrefix < Struct.new( + :prefix) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] location + # The URI that identifies the newly created object. + # @return [String] + # + # @!attribute [rw] bucket + # The name of the bucket that contains the newly created object. Does + # not return the access point ARN or access point alias if used. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # The object key of the newly created object. + # @return [String] + # + # @!attribute [rw] expiration + # If the object expiration is configured, this will contain the + # expiration date (`expiry-date`) and rule ID (`rule-id`). The value + # of `rule-id` is URL-encoded. + # @return [String] + # + # @!attribute [rw] etag + # Entity tag that identifies the newly created object's data. Objects + # with different object data will have different entity tags. The + # entity tag is an opaque string. The entity tag may or may not be an + # MD5 digest of the object data. If the entity tag is not an MD5 + # digest of the object data, it will contain one or more + # nonhexadecimal characters and/or will consist of less than 32 or + # more than 32 hexadecimal digits. For more information about how the + # entity tag is calculated, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] server_side_encryption + # If you specified server-side encryption either with an Amazon + # S3-managed encryption key or an Amazon Web Services KMS key in your + # initiate multipart upload request, the response includes this + # header. It confirms the encryption algorithm that Amazon S3 used to + # encrypt the object. + # @return [String] + # + # @!attribute [rw] version_id + # Version ID of the newly created object, in case the bucket has + # versioning turned on. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the multipart upload uses an S3 Bucket Key for + # server-side encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput AWS API Documentation + # + class CompleteMultipartUploadOutput < Struct.new( + :location, + :bucket, + :key, + :expiration, + :etag, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :server_side_encryption, + :version_id, + :ssekms_key_id, + :bucket_key_enabled, + :request_charged) + SENSITIVE = [:ssekms_key_id] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Name of the bucket to which the multipart upload was initiated. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] multipart_upload + # The container for the multipart upload request information. + # @return [Types::CompletedMultipartUpload] + # + # @!attribute [rw] upload_id + # ID for the initiated multipart upload. + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the + # object. This parameter is needed only when the object was created + # using a checksum algorithm. For more information, see [Protecting + # data using SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] sse_customer_key + # The server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest AWS API Documentation + # + class CompleteMultipartUploadRequest < Struct.new( + :bucket, + :key, + :multipart_upload, + :upload_id, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :request_payer, + :expected_bucket_owner, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # The container for the completed multipart upload details. + # + # @!attribute [rw] parts + # Array of CompletedPart data types. + # + # If you do not supply a valid `Part` with your request, the service + # sends back an HTTP 400 response. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload AWS API Documentation + # + class CompletedMultipartUpload < Struct.new( + :parts) + SENSITIVE = [] + include Aws::Structure + end + + # Details of the parts that were uploaded. + # + # @!attribute [rw] etag + # Entity tag returned when the part was uploaded. + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] part_number + # Part number that identifies the part. This is a positive integer + # between 1 and 10,000. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart AWS API Documentation + # + class CompletedPart < Struct.new( + :etag, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :part_number) + SENSITIVE = [] + include Aws::Structure + end + + # A container for describing a condition that must be met for the + # specified redirect to apply. For example, 1. If request is for pages + # in the `/docs` folder, redirect to the `/documents` folder. 2. If + # request results in HTTP error 4xx, redirect request to another host + # where you might process the error. + # + # @!attribute [rw] http_error_code_returned_equals + # The HTTP error code when the redirect is applied. In the event of an + # error, if the error code equals this value, then the specified + # redirect is applied. Required when parent element `Condition` is + # specified and sibling `KeyPrefixEquals` is not specified. If both + # are specified, then both must be true for the redirect to be + # applied. + # @return [String] + # + # @!attribute [rw] key_prefix_equals + # The object key name prefix when the redirect is applied. For + # example, to redirect requests for `ExamplePage.html`, the key prefix + # will be `ExamplePage.html`. To redirect request for all pages with + # the prefix `docs/`, the key prefix will be `/docs`, which identifies + # all objects in the `docs/` folder. Required when the parent element + # `Condition` is specified and sibling `HttpErrorCodeReturnedEquals` + # is not specified. If both conditions are specified, both must be + # true for the redirect to be applied. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition AWS API Documentation + # + class Condition < Struct.new( + :http_error_code_returned_equals, + :key_prefix_equals) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ContinuationEvent AWS API Documentation + # + class ContinuationEvent < Struct.new( + :event_type) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] copy_object_result + # Container for all response elements. + # @return [Types::CopyObjectResult] + # + # @!attribute [rw] expiration + # If the object expiration is configured, the response includes this + # header. + # @return [String] + # + # @!attribute [rw] copy_source_version_id + # Version of the copied object in the destination bucket. + # @return [String] + # + # @!attribute [rw] version_id + # Version ID of the newly created copy. + # @return [String] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] ssekms_encryption_context + # If present, specifies the Amazon Web Services KMS Encryption Context + # to use for object encryption. The value of this header is a + # base64-encoded UTF-8 string holding JSON with the encryption context + # key-value pairs. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the copied object uses an S3 Bucket Key for + # server-side encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput AWS API Documentation + # + class CopyObjectOutput < Struct.new( + :copy_object_result, + :expiration, + :copy_source_version_id, + :version_id, + :server_side_encryption, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :ssekms_encryption_context, + :bucket_key_enabled, + :request_charged) + SENSITIVE = [:ssekms_key_id, :ssekms_encryption_context] + include Aws::Structure + end + + # @!attribute [rw] acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] bucket + # The name of the destination bucket. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] cache_control + # Specifies caching behavior along the request/reply chain. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] content_disposition + # Specifies presentational information for the object. + # @return [String] + # + # @!attribute [rw] content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. + # @return [String] + # + # @!attribute [rw] content_language + # The language the content is in. + # @return [String] + # + # @!attribute [rw] content_type + # A standard MIME type describing the format of the object data. + # @return [String] + # + # @!attribute [rw] copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and the key of the source object, separated + # by a slash (/). For example, to copy the object + # `reports/january.pdf` from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through + # access point `my-access-point` owned by account `123456789012` in + # Region `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when + # the source and destination buckets are in the same Amazon Web + # Services Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # @return [String] + # + # @!attribute [rw] copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified + # tag. + # @return [String] + # + # @!attribute [rw] copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # @return [Time] + # + # @!attribute [rw] copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # @return [String] + # + # @!attribute [rw] copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # @return [Time] + # + # @!attribute [rw] expires + # The date and time at which the object is no longer cacheable. + # @return [Time] + # + # @!attribute [rw] grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] key + # The key of the destination object. + # @return [String] + # + # @!attribute [rw] metadata + # A map of metadata to store with the object in S3. + # @return [Hash] + # + # @!attribute [rw] metadata_directive + # Specifies whether the metadata is copied from the source object or + # replaced with metadata provided in the request. + # @return [String] + # + # @!attribute [rw] tagging_directive + # Specifies whether the object tag-set are copied from the source + # object or replaced with tag-set provided in the request. + # @return [String] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can + # specify a different Storage Class. Amazon S3 on Outposts only uses + # the OUTPOSTS Storage Class. For more information, see [Storage + # Classes][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + # + # @!attribute [rw] website_redirect_location + # If the bucket is configured as a website, redirects requests for + # this object to another object in the same bucket or to an external + # URL. Amazon S3 stores the value of this header in the object + # metadata. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # Specifies the Amazon Web Services KMS key ID to use for object + # encryption. All GET and PUT requests for an object protected by + # Amazon Web Services KMS will fail if not made via SSL or using + # SigV4. For information about configuring using any of the officially + # supported Amazon Web Services SDKs and Amazon Web Services CLI, see + # [Specifying the Signature Version in Request Authentication][1] in + # the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # @return [String] + # + # @!attribute [rw] ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded + # UTF-8 string holding JSON with the encryption context key-value + # pairs. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket + # Key for object encryption with SSE-KMS. + # + # Specifying this header with a COPY action doesn’t affect + # bucket-level settings for S3 Bucket Key. + # @return [Boolean] + # + # @!attribute [rw] copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object + # (for example, AES256). + # @return [String] + # + # @!attribute [rw] copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # to decrypt the source object. The encryption key provided in this + # header must be one that was used when the source object was created. + # @return [String] + # + # @!attribute [rw] copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] tagging + # The tag-set for the object destination object this value must be + # used in conjunction with the `TaggingDirective`. The tag-set must be + # encoded as URL Query parameters. + # @return [String] + # + # @!attribute [rw] object_lock_mode + # The Object Lock mode that you want to apply to the copied object. + # @return [String] + # + # @!attribute [rw] object_lock_retain_until_date + # The date and time when you want the copied object's Object Lock to + # expire. + # @return [Time] + # + # @!attribute [rw] object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the copied + # object. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request + # fails with the HTTP status code `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest AWS API Documentation + # + class CopyObjectRequest < Struct.new( + :acl, + :bucket, + :cache_control, + :checksum_algorithm, + :content_disposition, + :content_encoding, + :content_language, + :content_type, + :copy_source, + :copy_source_if_match, + :copy_source_if_modified_since, + :copy_source_if_none_match, + :copy_source_if_unmodified_since, + :expires, + :grant_full_control, + :grant_read, + :grant_read_acp, + :grant_write_acp, + :key, + :metadata, + :metadata_directive, + :tagging_directive, + :server_side_encryption, + :storage_class, + :website_redirect_location, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :ssekms_key_id, + :ssekms_encryption_context, + :bucket_key_enabled, + :copy_source_sse_customer_algorithm, + :copy_source_sse_customer_key, + :copy_source_sse_customer_key_md5, + :request_payer, + :tagging, + :object_lock_mode, + :object_lock_retain_until_date, + :object_lock_legal_hold_status, + :expected_bucket_owner, + :expected_source_bucket_owner) + SENSITIVE = [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context, :copy_source_sse_customer_key] + include Aws::Structure + end + + # Container for all response elements. + # + # @!attribute [rw] etag + # Returns the ETag of the new object. The ETag reflects only changes + # to the contents of an object, not its metadata. + # @return [String] + # + # @!attribute [rw] last_modified + # Creation date of the object. + # @return [Time] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult AWS API Documentation + # + class CopyObjectResult < Struct.new( + :etag, + :last_modified, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256) + SENSITIVE = [] + include Aws::Structure + end + + # Container for all response elements. + # + # @!attribute [rw] etag + # Entity tag of the object. + # @return [String] + # + # @!attribute [rw] last_modified + # Date and time at which the object was uploaded. + # @return [Time] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult AWS API Documentation + # + class CopyPartResult < Struct.new( + :etag, + :last_modified, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256) + SENSITIVE = [] + include Aws::Structure + end + + # The configuration information for the bucket. + # + # @!attribute [rw] location_constraint + # Specifies the Region where the bucket will be created. If you don't + # specify a Region, the bucket is created in the US East (N. Virginia) + # Region (us-east-1). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration AWS API Documentation + # + class CreateBucketConfiguration < Struct.new( + :location_constraint) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] location + # A forward slash followed by the name of the bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput AWS API Documentation + # + class CreateBucketOutput < Struct.new( + :location) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] acl + # The canned ACL to apply to the bucket. + # @return [String] + # + # @!attribute [rw] bucket + # The name of the bucket to create. + # @return [String] + # + # @!attribute [rw] create_bucket_configuration + # The configuration information for the bucket. + # @return [Types::CreateBucketConfiguration] + # + # @!attribute [rw] grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions + # on the bucket. + # @return [String] + # + # @!attribute [rw] grant_read + # Allows grantee to list the objects in the bucket. + # @return [String] + # + # @!attribute [rw] grant_read_acp + # Allows grantee to read the bucket ACL. + # @return [String] + # + # @!attribute [rw] grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @return [String] + # + # @!attribute [rw] grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # @return [String] + # + # @!attribute [rw] object_lock_enabled_for_bucket + # Specifies whether you want S3 Object Lock to be enabled for the new + # bucket. + # @return [Boolean] + # + # @!attribute [rw] object_ownership + # The container element for object ownership for a bucket's ownership + # controls. + # + # BucketOwnerPreferred - Objects uploaded to the bucket change + # ownership to the bucket owner if the objects are uploaded with the + # `bucket-owner-full-control` canned ACL. + # + # ObjectWriter - The uploading account will own the object if the + # object is uploaded with the `bucket-owner-full-control` canned ACL. + # + # BucketOwnerEnforced - Access control lists (ACLs) are disabled and + # no longer affect permissions. The bucket owner automatically owns + # and has full control over every object in the bucket. The bucket + # only accepts PUT requests that don't specify an ACL or bucket owner + # full control ACLs, such as the `bucket-owner-full-control` canned + # ACL or an equivalent form of this ACL expressed in the XML format. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest AWS API Documentation + # + class CreateBucketRequest < Struct.new( + :acl, + :bucket, + :create_bucket_configuration, + :grant_full_control, + :grant_read, + :grant_read_acp, + :grant_write, + :grant_write_acp, + :object_lock_enabled_for_bucket, + :object_ownership) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] abort_date + # If the bucket has a lifecycle rule configured with an action to + # abort incomplete multipart uploads and the prefix in the lifecycle + # rule matches the object name in the request, the response includes + # this header. The header indicates when the initiated multipart + # upload becomes eligible for an abort operation. For more + # information, see [ Aborting Incomplete Multipart Uploads Using a + # Bucket Lifecycle Policy][1]. + # + # The response also includes the `x-amz-abort-rule-id` header that + # provides the ID of the lifecycle configuration rule that defines + # this action. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + # @return [Time] + # + # @!attribute [rw] abort_rule_id + # This header is returned along with the `x-amz-abort-date` header. It + # identifies the applicable lifecycle configuration rule that defines + # the action to abort incomplete multipart uploads. + # @return [String] + # + # @!attribute [rw] bucket + # The name of the bucket to which the multipart upload was initiated. + # Does not return the access point ARN or access point alias if used. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] upload_id + # ID for the initiated multipart upload. + # @return [String] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] ssekms_encryption_context + # If present, specifies the Amazon Web Services KMS Encryption Context + # to use for object encryption. The value of this header is a + # base64-encoded UTF-8 string holding JSON with the encryption context + # key-value pairs. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the multipart upload uses an S3 Bucket Key for + # server-side encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # The algorithm that was used to create a checksum of the object. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput AWS API Documentation + # + class CreateMultipartUploadOutput < Struct.new( + :abort_date, + :abort_rule_id, + :bucket, + :key, + :upload_id, + :server_side_encryption, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :ssekms_encryption_context, + :bucket_key_enabled, + :request_charged, + :checksum_algorithm) + SENSITIVE = [:ssekms_key_id, :ssekms_encryption_context] + include Aws::Structure + end + + # @!attribute [rw] acl + # The canned ACL to apply to the object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] bucket + # The name of the bucket to which to initiate the upload + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] cache_control + # Specifies caching behavior along the request/reply chain. + # @return [String] + # + # @!attribute [rw] content_disposition + # Specifies presentational information for the object. + # @return [String] + # + # @!attribute [rw] content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. + # @return [String] + # + # @!attribute [rw] content_language + # The language the content is in. + # @return [String] + # + # @!attribute [rw] content_type + # A standard MIME type describing the format of the object data. + # @return [String] + # + # @!attribute [rw] expires + # The date and time at which the object is no longer cacheable. + # @return [Time] + # + # @!attribute [rw] grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload is to be initiated. + # @return [String] + # + # @!attribute [rw] metadata + # A map of metadata to store with the object in S3. + # @return [Hash] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can + # specify a different Storage Class. Amazon S3 on Outposts only uses + # the OUTPOSTS Storage Class. For more information, see [Storage + # Classes][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + # + # @!attribute [rw] website_redirect_location + # If the bucket is configured as a website, redirects requests for + # this object to another object in the same bucket or to an external + # URL. Amazon S3 stores the value of this header in the object + # metadata. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # Specifies the ID of the symmetric customer managed key to use for + # object encryption. All GET and PUT requests for an object protected + # by Amazon Web Services KMS will fail if not made via SSL or using + # SigV4. For information about configuring using any of the officially + # supported Amazon Web Services SDKs and Amazon Web Services CLI, see + # [Specifying the Signature Version in Request Authentication][1] in + # the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + # @return [String] + # + # @!attribute [rw] ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded + # UTF-8 string holding JSON with the encryption context key-value + # pairs. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket + # Key for object encryption with SSE-KMS. + # + # Specifying this header with an object action doesn’t affect + # bucket-level settings for S3 Bucket Key. + # @return [Boolean] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. + # @return [String] + # + # @!attribute [rw] object_lock_mode + # Specifies the Object Lock mode that you want to apply to the + # uploaded object. + # @return [String] + # + # @!attribute [rw] object_lock_retain_until_date + # Specifies the date and time when you want the Object Lock to expire. + # @return [Time] + # + # @!attribute [rw] object_lock_legal_hold_status + # Specifies whether you want to apply a legal hold to the uploaded + # object. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm you want Amazon S3 to use to create the + # checksum for the object. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest AWS API Documentation + # + class CreateMultipartUploadRequest < Struct.new( + :acl, + :bucket, + :cache_control, + :content_disposition, + :content_encoding, + :content_language, + :content_type, + :expires, + :grant_full_control, + :grant_read, + :grant_read_acp, + :grant_write_acp, + :key, + :metadata, + :server_side_encryption, + :storage_class, + :website_redirect_location, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :ssekms_key_id, + :ssekms_encryption_context, + :bucket_key_enabled, + :request_payer, + :tagging, + :object_lock_mode, + :object_lock_retain_until_date, + :object_lock_legal_hold_status, + :expected_bucket_owner, + :checksum_algorithm) + SENSITIVE = [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context] + include Aws::Structure + end + + # The container element for specifying the default Object Lock retention + # settings for new objects placed in the specified bucket. + # + # * The `DefaultRetention` settings require both a mode and a period. + # + # * The `DefaultRetention` period can be either `Days` or `Years` but + # you must select one. You cannot specify `Days` and `Years` at the + # same time. + # + # + # + # @!attribute [rw] mode + # The default Object Lock retention mode you want to apply to new + # objects placed in the specified bucket. Must be used with either + # `Days` or `Years`. + # @return [String] + # + # @!attribute [rw] days + # The number of days that you want to specify for the default + # retention period. Must be used with `Mode`. + # @return [Integer] + # + # @!attribute [rw] years + # The number of years that you want to specify for the default + # retention period. Must be used with `Mode`. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DefaultRetention AWS API Documentation + # + class DefaultRetention < Struct.new( + :mode, + :days, + :years) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the objects to delete. + # + # @!attribute [rw] objects + # The objects to delete. + # @return [Array] + # + # @!attribute [rw] quiet + # Element to enable quiet mode for the request. When you add this + # element, you must set its value to true. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete AWS API Documentation + # + class Delete < Struct.new( + :objects, + :quiet) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket from which an analytics configuration is + # deleted. + # @return [String] + # + # @!attribute [rw] id + # The ID that identifies the analytics configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest AWS API Documentation + # + class DeleteBucketAnalyticsConfigurationRequest < Struct.new( + :bucket, + :id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Specifies the bucket whose `cors` configuration is being deleted. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest AWS API Documentation + # + class DeleteBucketCorsRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the server-side encryption + # configuration to delete. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionRequest AWS API Documentation + # + class DeleteBucketEncryptionRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfigurationRequest AWS API Documentation + # + class DeleteBucketIntelligentTieringConfigurationRequest < Struct.new( + :bucket, + :id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the inventory configuration to + # delete. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the inventory configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest AWS API Documentation + # + class DeleteBucketInventoryConfigurationRequest < Struct.new( + :bucket, + :id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name of the lifecycle to delete. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest AWS API Documentation + # + class DeleteBucketLifecycleRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the metrics configuration to + # delete. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the metrics configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest AWS API Documentation + # + class DeleteBucketMetricsConfigurationRequest < Struct.new( + :bucket, + :id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The Amazon S3 bucket whose `OwnershipControls` you want to delete. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControlsRequest AWS API Documentation + # + class DeleteBucketOwnershipControlsRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest AWS API Documentation + # + class DeleteBucketPolicyRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest AWS API Documentation + # + class DeleteBucketReplicationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Specifies the bucket being deleted. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest AWS API Documentation + # + class DeleteBucketRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket that has the tag set to be removed. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest AWS API Documentation + # + class DeleteBucketTaggingRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name for which you want to remove the website + # configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest AWS API Documentation + # + class DeleteBucketWebsiteRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # Information about the delete marker. + # + # @!attribute [rw] owner + # The account that created the delete marker.> + # @return [Types::Owner] + # + # @!attribute [rw] key + # The object key. + # @return [String] + # + # @!attribute [rw] version_id + # Version ID of an object. + # @return [String] + # + # @!attribute [rw] is_latest + # Specifies whether the object is (true) or is not (false) the latest + # version of an object. + # @return [Boolean] + # + # @!attribute [rw] last_modified + # Date and time the object was last modified. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry AWS API Documentation + # + class DeleteMarkerEntry < Struct.new( + :owner, + :key, + :version_id, + :is_latest, + :last_modified) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies whether Amazon S3 replicates delete markers. If you specify + # a `Filter` in your replication configuration, you must also include a + # `DeleteMarkerReplication` element. If your `Filter` includes a `Tag` + # element, the `DeleteMarkerReplication` `Status` must be set to + # Disabled, because Amazon S3 does not support replicating delete + # markers for tag-based rules. For an example configuration, see [Basic + # Rule Configuration][1]. + # + # For more information about delete marker replication, see [Basic Rule + # Configuration][2]. + # + # If you are using an earlier version of the replication configuration, + # Amazon S3 handles replication of delete markers differently. For more + # information, see [Backward Compatibility][3]. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations + # + # @!attribute [rw] status + # Indicates whether to replicate delete markers. + # + # Indicates whether to replicate delete markers. + # + # + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerReplication AWS API Documentation + # + class DeleteMarkerReplication < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] delete_marker + # Specifies whether the versioned object that was permanently deleted + # was (true) or was not (false) a delete marker. + # @return [Boolean] + # + # @!attribute [rw] version_id + # Returns the version ID of the delete marker created as a result of + # the DELETE operation. + # @return [String] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput AWS API Documentation + # + class DeleteObjectOutput < Struct.new( + :delete_marker, + :version_id, + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name of the bucket containing the object. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Key name of the object to delete. + # @return [String] + # + # @!attribute [rw] mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication + # device. Required to permanently delete a versioned object if + # versioning is configured with MFA delete enabled. + # @return [String] + # + # @!attribute [rw] version_id + # VersionId used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] bypass_governance_retention + # Indicates whether S3 Object Lock should bypass Governance-mode + # restrictions to process this operation. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @return [Boolean] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest AWS API Documentation + # + class DeleteObjectRequest < Struct.new( + :bucket, + :key, + :mfa, + :version_id, + :request_payer, + :bypass_governance_retention, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] version_id + # The versionId of the object the tag-set was removed from. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput AWS API Documentation + # + class DeleteObjectTaggingOutput < Struct.new( + :version_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the objects from which to remove the + # tags. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # The key that identifies the object in the bucket from which to + # remove all tags. + # @return [String] + # + # @!attribute [rw] version_id + # The versionId of the object that the tag-set will be removed from. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest AWS API Documentation + # + class DeleteObjectTaggingRequest < Struct.new( + :bucket, + :key, + :version_id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] deleted + # Container element for a successful delete. It identifies the object + # that was successfully deleted. + # @return [Array] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] errors + # Container for a failed delete action that describes the object that + # Amazon S3 attempted to delete and the error it encountered. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput AWS API Documentation + # + class DeleteObjectsOutput < Struct.new( + :deleted, + :request_charged, + :errors) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the objects to delete. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] delete + # Container for the request. + # @return [Types::Delete] + # + # @!attribute [rw] mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication + # device. Required to permanently delete a versioned object if + # versioning is configured with MFA delete enabled. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] bypass_governance_retention + # Specifies whether you want to delete this object even if it has a + # Governance-type Object Lock in place. To use this header, you must + # have the `s3:BypassGovernanceRetention` permission. + # @return [Boolean] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest AWS API Documentation + # + class DeleteObjectsRequest < Struct.new( + :bucket, + :delete, + :mfa, + :request_payer, + :bypass_governance_retention, + :expected_bucket_owner, + :checksum_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The Amazon S3 bucket whose `PublicAccessBlock` configuration you + # want to delete. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlockRequest AWS API Documentation + # + class DeletePublicAccessBlockRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # Information about the deleted object. + # + # @!attribute [rw] key + # The name of the deleted object. + # @return [String] + # + # @!attribute [rw] version_id + # The version ID of the deleted object. + # @return [String] + # + # @!attribute [rw] delete_marker + # Specifies whether the versioned object that was permanently deleted + # was (true) or was not (false) a delete marker. In a simple DELETE, + # this header indicates whether (true) or not (false) a delete marker + # was created. + # @return [Boolean] + # + # @!attribute [rw] delete_marker_version_id + # The version ID of the delete marker created as a result of the + # DELETE operation. If you delete a specific object version, the value + # returned by this header is the version ID of the object version + # deleted. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject AWS API Documentation + # + class DeletedObject < Struct.new( + :key, + :version_id, + :delete_marker, + :delete_marker_version_id) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies information about where to publish analysis or configuration + # results for an Amazon S3 bucket and S3 Replication Time Control (S3 + # RTC). + # + # @!attribute [rw] bucket + # The Amazon Resource Name (ARN) of the bucket where you want Amazon + # S3 to store the results. + # @return [String] + # + # @!attribute [rw] account + # Destination bucket owner account ID. In a cross-account scenario, if + # you direct Amazon S3 to change replica ownership to the Amazon Web + # Services account that owns the destination bucket by specifying the + # `AccessControlTranslation` property, this is the account ID of the + # destination bucket owner. For more information, see [Replication + # Additional Configuration: Changing the Replica Owner][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html + # @return [String] + # + # @!attribute [rw] storage_class + # The storage class to use when replicating objects, such as S3 + # Standard or reduced redundancy. By default, Amazon S3 uses the + # storage class of the source object to create the object replica. + # + # For valid values, see the `StorageClass` element of the [PUT Bucket + # replication][1] action in the *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html + # @return [String] + # + # @!attribute [rw] access_control_translation + # Specify this only in a cross-account scenario (where source and + # destination bucket owners are not the same), and you want to change + # replica ownership to the Amazon Web Services account that owns the + # destination bucket. If this is not specified in the replication + # configuration, the replicas are owned by same Amazon Web Services + # account that owns the source object. + # @return [Types::AccessControlTranslation] + # + # @!attribute [rw] encryption_configuration + # A container that provides information about encryption. If + # `SourceSelectionCriteria` is specified, you must specify this + # element. + # @return [Types::EncryptionConfiguration] + # + # @!attribute [rw] replication_time + # A container specifying S3 Replication Time Control (S3 RTC), + # including whether S3 RTC is enabled and the time when all objects + # and operations on objects must be replicated. Must be specified + # together with a `Metrics` block. + # @return [Types::ReplicationTime] + # + # @!attribute [rw] metrics + # A container specifying replication metrics-related settings enabling + # replication metrics and events. + # @return [Types::Metrics] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination AWS API Documentation + # + class Destination < Struct.new( + :bucket, + :account, + :storage_class, + :access_control_translation, + :encryption_configuration, + :replication_time, + :metrics) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the type of server-side encryption used. + # + # @!attribute [rw] encryption_type + # The server-side encryption algorithm used when storing job results + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] kms_key_id + # If the encryption type is `aws:kms`, this optional value specifies + # the ID of the symmetric customer managed key to use for encryption + # of job results. Amazon S3 only supports symmetric keys. For more + # information, see [Using symmetric and asymmetric keys][1] in the + # *Amazon Web Services Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # @return [String] + # + # @!attribute [rw] kms_context + # If the encryption type is `aws:kms`, this optional value can be used + # to specify the encryption context for the restore results. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Encryption AWS API Documentation + # + class Encryption < Struct.new( + :encryption_type, + :kms_key_id, + :kms_context) + SENSITIVE = [:kms_key_id] + include Aws::Structure + end + + # Specifies encryption-related information for an Amazon S3 bucket that + # is a destination for replicated objects. + # + # @!attribute [rw] replica_kms_key_id + # Specifies the ID (Key ARN or Alias ARN) of the customer managed + # Amazon Web Services KMS key stored in Amazon Web Services Key + # Management Service (KMS) for the destination bucket. Amazon S3 uses + # this key to encrypt replica objects. Amazon S3 only supports + # symmetric, customer managed KMS keys. For more information, see + # [Using symmetric and asymmetric keys][1] in the *Amazon Web Services + # Key Management Service Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EncryptionConfiguration AWS API Documentation + # + class EncryptionConfiguration < Struct.new( + :replica_kms_key_id) + SENSITIVE = [] + include Aws::Structure + end + + # A message that indicates the request is complete and no more messages + # will be sent. You should not assume that the request is complete until + # the client receives an `EndEvent`. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EndEvent AWS API Documentation + # + class EndEvent < Struct.new( + :event_type) + SENSITIVE = [] + include Aws::Structure + end + + # Container for all error elements. + # + # @!attribute [rw] key + # The error key. + # @return [String] + # + # @!attribute [rw] version_id + # The version ID of the error. + # @return [String] + # + # @!attribute [rw] code + # The error code is a string that uniquely identifies an error + # condition. It is meant to be read and understood by programs that + # detect and handle errors by type. + # + # **Amazon S3 error codes** + # + # * * *Code:* AccessDenied + # + # * *Description:* Access Denied + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* AccountProblem + # + # * *Description:* There is a problem with your Amazon Web Services + # account that prevents the action from completing successfully. + # Contact Amazon Web Services Support for further assistance. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* AllAccessDisabled + # + # * *Description:* All access to this Amazon S3 resource has been + # disabled. Contact Amazon Web Services Support for further + # assistance. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* AmbiguousGrantByEmailAddress + # + # * *Description:* The email address you provided is associated with + # more than one account. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* AuthorizationHeaderMalformed + # + # * *Description:* The authorization header you provided is invalid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *HTTP Status Code:* N/A + # + # * * *Code:* BadDigest + # + # * *Description:* The Content-MD5 you specified did not match what + # we received. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* BucketAlreadyExists + # + # * *Description:* The requested bucket name is not available. The + # bucket namespace is shared by all users of the system. Please + # select a different name and try again. + # + # * *HTTP Status Code:* 409 Conflict + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* BucketAlreadyOwnedByYou + # + # * *Description:* The bucket you tried to create already exists, + # and you own it. Amazon S3 returns this error in all Amazon Web + # Services Regions except in the North Virginia Region. For legacy + # compatibility, if you re-create an existing bucket that you + # already own in the North Virginia Region, Amazon S3 returns 200 + # OK and resets the bucket access control lists (ACLs). + # + # * *Code:* 409 Conflict (in all Regions except the North Virginia + # Region) + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* BucketNotEmpty + # + # * *Description:* The bucket you tried to delete is not empty. + # + # * *HTTP Status Code:* 409 Conflict + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* CredentialsNotSupported + # + # * *Description:* This request does not support credentials. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* CrossLocationLoggingProhibited + # + # * *Description:* Cross-location logging not allowed. Buckets in + # one geographic location cannot log information to a bucket in + # another location. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* EntityTooSmall + # + # * *Description:* Your proposed upload is smaller than the minimum + # allowed object size. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* EntityTooLarge + # + # * *Description:* Your proposed upload exceeds the maximum allowed + # object size. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* ExpiredToken + # + # * *Description:* The provided token has expired. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* IllegalVersioningConfigurationException + # + # * *Description:* Indicates that the versioning configuration + # specified in the request is invalid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* IncompleteBody + # + # * *Description:* You did not provide the number of bytes specified + # by the Content-Length HTTP header + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* IncorrectNumberOfFilesInPostRequest + # + # * *Description:* POST requires exactly one file upload per + # request. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InlineDataTooLarge + # + # * *Description:* Inline data exceeds the maximum allowed size. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InternalError + # + # * *Description:* We encountered an internal error. Please try + # again. + # + # * *HTTP Status Code:* 500 Internal Server Error + # + # * *SOAP Fault Code Prefix:* Server + # + # * * *Code:* InvalidAccessKeyId + # + # * *Description:* The Amazon Web Services access key ID you + # provided does not exist in our records. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidAddressingHeader + # + # * *Description:* You must specify the Anonymous role. + # + # * *HTTP Status Code:* N/A + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidArgument + # + # * *Description:* Invalid Argument + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidBucketName + # + # * *Description:* The specified bucket is not valid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidBucketState + # + # * *Description:* The request is not valid with the current state + # of the bucket. + # + # * *HTTP Status Code:* 409 Conflict + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidDigest + # + # * *Description:* The Content-MD5 you specified is not valid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidEncryptionAlgorithmError + # + # * *Description:* The encryption request you specified is not + # valid. The valid value is AES256. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidLocationConstraint + # + # * *Description:* The specified location constraint is not valid. + # For more information about Regions, see [How to Select a Region + # for Your Buckets][1]. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidObjectState + # + # * *Description:* The action is not valid for the current state of + # the object. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidPart + # + # * *Description:* One or more of the specified parts could not be + # found. The part might not have been uploaded, or the specified + # entity tag might not have matched the part's entity tag. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidPartOrder + # + # * *Description:* The list of parts was not in ascending order. + # Parts list must be specified in order by part number. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidPayer + # + # * *Description:* All access to this object has been disabled. + # Please contact Amazon Web Services Support for further + # assistance. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidPolicyDocument + # + # * *Description:* The content of the form does not meet the + # conditions specified in the policy document. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidRange + # + # * *Description:* The requested range cannot be satisfied. + # + # * *HTTP Status Code:* 416 Requested Range Not Satisfiable + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidRequest + # + # * *Description:* Please use `AWS4-HMAC-SHA256`. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* SOAP requests must be made over an HTTPS + # connection. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Acceleration is not supported + # for buckets with non-DNS compliant names. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Acceleration is not supported + # for buckets with periods (.) in their names. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Accelerate endpoint only + # supports virtual style requests. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Accelerate is not configured + # on this bucket. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Accelerate is disabled on this + # bucket. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Acceleration is not supported + # on this bucket. Contact Amazon Web Services Support for more + # information. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidRequest + # + # * *Description:* Amazon S3 Transfer Acceleration cannot be enabled + # on this bucket. Contact Amazon Web Services Support for more + # information. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *Code:* N/A + # + # * * *Code:* InvalidSecurity + # + # * *Description:* The provided security credentials are not valid. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidSOAPRequest + # + # * *Description:* The SOAP request body is invalid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidStorageClass + # + # * *Description:* The storage class you specified is not valid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidTargetBucketForLogging + # + # * *Description:* The target bucket for logging does not exist, is + # not owned by you, or does not have the appropriate grants for + # the log-delivery group. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidToken + # + # * *Description:* The provided token is malformed or otherwise + # invalid. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* InvalidURI + # + # * *Description:* Couldn't parse the specified URI. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* KeyTooLongError + # + # * *Description:* Your key is too long. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MalformedACLError + # + # * *Description:* The XML you provided was not well-formed or did + # not validate against our published schema. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MalformedPOSTRequest + # + # * *Description:* The body of your POST request is not well-formed + # multipart/form-data. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MalformedXML + # + # * *Description:* This happens when the user sends malformed XML + # (XML that doesn't conform to the published XSD) for the + # configuration. The error message is, "The XML you provided was + # not well-formed or did not validate against our published + # schema." + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MaxMessageLengthExceeded + # + # * *Description:* Your request was too big. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MaxPostPreDataLengthExceededError + # + # * *Description:* Your POST request fields preceding the upload + # file were too large. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MetadataTooLarge + # + # * *Description:* Your metadata headers exceed the maximum allowed + # metadata size. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MethodNotAllowed + # + # * *Description:* The specified method is not allowed against this + # resource. + # + # * *HTTP Status Code:* 405 Method Not Allowed + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MissingAttachment + # + # * *Description:* A SOAP attachment was expected, but none were + # found. + # + # * *HTTP Status Code:* N/A + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MissingContentLength + # + # * *Description:* You must provide the Content-Length HTTP header. + # + # * *HTTP Status Code:* 411 Length Required + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MissingRequestBodyError + # + # * *Description:* This happens when the user sends an empty XML + # document as a request. The error message is, "Request body is + # empty." + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MissingSecurityElement + # + # * *Description:* The SOAP 1.1 request is missing a security + # element. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* MissingSecurityHeader + # + # * *Description:* Your request is missing a required header. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoLoggingStatusForKey + # + # * *Description:* There is no such thing as a logging status + # subresource for a key. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoSuchBucket + # + # * *Description:* The specified bucket does not exist. + # + # * *HTTP Status Code:* 404 Not Found + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoSuchBucketPolicy + # + # * *Description:* The specified bucket does not have a bucket + # policy. + # + # * *HTTP Status Code:* 404 Not Found + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoSuchKey + # + # * *Description:* The specified key does not exist. + # + # * *HTTP Status Code:* 404 Not Found + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoSuchLifecycleConfiguration + # + # * *Description:* The lifecycle configuration does not exist. + # + # * *HTTP Status Code:* 404 Not Found + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoSuchUpload + # + # * *Description:* The specified multipart upload does not exist. + # The upload ID might be invalid, or the multipart upload might + # have been aborted or completed. + # + # * *HTTP Status Code:* 404 Not Found + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NoSuchVersion + # + # * *Description:* Indicates that the version ID specified in the + # request does not match an existing version. + # + # * *HTTP Status Code:* 404 Not Found + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* NotImplemented + # + # * *Description:* A header you provided implies functionality that + # is not implemented. + # + # * *HTTP Status Code:* 501 Not Implemented + # + # * *SOAP Fault Code Prefix:* Server + # + # * * *Code:* NotSignedUp + # + # * *Description:* Your account is not signed up for the Amazon S3 + # service. You must sign up before you can use Amazon S3. You can + # sign up at the following URL: [Amazon S3][2] + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* OperationAborted + # + # * *Description:* A conflicting conditional action is currently in + # progress against this resource. Try again. + # + # * *HTTP Status Code:* 409 Conflict + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* PermanentRedirect + # + # * *Description:* The bucket you are attempting to access must be + # addressed using the specified endpoint. Send all future requests + # to this endpoint. + # + # * *HTTP Status Code:* 301 Moved Permanently + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* PreconditionFailed + # + # * *Description:* At least one of the preconditions you specified + # did not hold. + # + # * *HTTP Status Code:* 412 Precondition Failed + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* Redirect + # + # * *Description:* Temporary redirect. + # + # * *HTTP Status Code:* 307 Moved Temporarily + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* RestoreAlreadyInProgress + # + # * *Description:* Object restore is already in progress. + # + # * *HTTP Status Code:* 409 Conflict + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* RequestIsNotMultiPartContent + # + # * *Description:* Bucket POST must be of the enclosure-type + # multipart/form-data. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* RequestTimeout + # + # * *Description:* Your socket connection to the server was not read + # from or written to within the timeout period. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* RequestTimeTooSkewed + # + # * *Description:* The difference between the request time and the + # server's time is too large. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* RequestTorrentOfBucketError + # + # * *Description:* Requesting the torrent file of a bucket is not + # permitted. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* SignatureDoesNotMatch + # + # * *Description:* The request signature we calculated does not + # match the signature you provided. Check your Amazon Web Services + # secret access key and signing method. For more information, see + # [REST Authentication][3] and [SOAP Authentication][4] for + # details. + # + # * *HTTP Status Code:* 403 Forbidden + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* ServiceUnavailable + # + # * *Description:* Reduce your request rate. + # + # * *HTTP Status Code:* 503 Service Unavailable + # + # * *SOAP Fault Code Prefix:* Server + # + # * * *Code:* SlowDown + # + # * *Description:* Reduce your request rate. + # + # * *HTTP Status Code:* 503 Slow Down + # + # * *SOAP Fault Code Prefix:* Server + # + # * * *Code:* TemporaryRedirect + # + # * *Description:* You are being redirected to the bucket while DNS + # updates. + # + # * *HTTP Status Code:* 307 Moved Temporarily + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* TokenRefreshRequired + # + # * *Description:* The provided token must be refreshed. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* TooManyBuckets + # + # * *Description:* You have attempted to create more buckets than + # allowed. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* UnexpectedContent + # + # * *Description:* This request does not support content. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* UnresolvableGrantByEmailAddress + # + # * *Description:* The email address you provided does not match any + # account on record. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # * * *Code:* UserKeyMustBeSpecified + # + # * *Description:* The bucket POST must contain the specified field + # name. If it is specified, check the order of the fields. + # + # * *HTTP Status Code:* 400 Bad Request + # + # * *SOAP Fault Code Prefix:* Client + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + # [2]: http://aws.amazon.com/s3 + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html + # @return [String] + # + # @!attribute [rw] message + # The error message contains a generic description of the error + # condition in English. It is intended for a human audience. Simple + # programs display the message directly to the end user if they + # encounter an error condition they don't know how or don't care to + # handle. Sophisticated programs with more exhaustive error handling + # and proper internationalization are more likely to ignore the error + # message. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error AWS API Documentation + # + class Error < Struct.new( + :key, + :version_id, + :code, + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The error information. + # + # @!attribute [rw] key + # The object key name to use when a 4XX class error occurs. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument AWS API Documentation + # + class ErrorDocument < Struct.new( + :key) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying the configuration for Amazon EventBridge. + # + # @api private + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EventBridgeConfiguration AWS API Documentation + # + class EventBridgeConfiguration < Aws::EmptyStructure; end + + # Optional configuration to replicate existing source bucket objects. + # For more information, see [Replicating Existing Objects][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication + # + # @!attribute [rw] status + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExistingObjectReplication AWS API Documentation + # + class ExistingObjectReplication < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the Amazon S3 object key name to filter on and whether to + # filter on the suffix or prefix of the key name. + # + # @!attribute [rw] name + # The object key name prefix or suffix identifying one or more objects + # to which the filtering rule applies. The maximum length is 1,024 + # characters. Overlapping prefixes and suffixes are not supported. For + # more information, see [Configuring Event Notifications][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [String] + # + # @!attribute [rw] value + # The value that the filter searches for in object key names. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule AWS API Documentation + # + class FilterRule < Struct.new( + :name, + :value) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] status + # The accelerate configuration of the bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput AWS API Documentation + # + class GetBucketAccelerateConfigurationOutput < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which the accelerate configuration is + # retrieved. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest AWS API Documentation + # + class GetBucketAccelerateConfigurationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] owner + # Container for the bucket owner's display name and ID. + # @return [Types::Owner] + # + # @!attribute [rw] grants + # A list of grants. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput AWS API Documentation + # + class GetBucketAclOutput < Struct.new( + :owner, + :grants) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Specifies the S3 bucket whose ACL is being requested. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest AWS API Documentation + # + class GetBucketAclRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] analytics_configuration + # The configuration and any analyses for the analytics filter. + # @return [Types::AnalyticsConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput AWS API Documentation + # + class GetBucketAnalyticsConfigurationOutput < Struct.new( + :analytics_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket from which an analytics configuration is + # retrieved. + # @return [String] + # + # @!attribute [rw] id + # The ID that identifies the analytics configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest AWS API Documentation + # + class GetBucketAnalyticsConfigurationRequest < Struct.new( + :bucket, + :id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] cors_rules + # A set of origins and methods (cross-origin access that you want to + # allow). You can add up to 100 rules to the configuration. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput AWS API Documentation + # + class GetBucketCorsOutput < Struct.new( + :cors_rules) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name for which to get the cors configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest AWS API Documentation + # + class GetBucketCorsRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] server_side_encryption_configuration + # Specifies the default server-side-encryption configuration. + # @return [Types::ServerSideEncryptionConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionOutput AWS API Documentation + # + class GetBucketEncryptionOutput < Struct.new( + :server_side_encryption_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket from which the server-side encryption + # configuration is retrieved. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionRequest AWS API Documentation + # + class GetBucketEncryptionRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] intelligent_tiering_configuration + # Container for S3 Intelligent-Tiering configuration. + # @return [Types::IntelligentTieringConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfigurationOutput AWS API Documentation + # + class GetBucketIntelligentTieringConfigurationOutput < Struct.new( + :intelligent_tiering_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfigurationRequest AWS API Documentation + # + class GetBucketIntelligentTieringConfigurationRequest < Struct.new( + :bucket, + :id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] inventory_configuration + # Specifies the inventory configuration. + # @return [Types::InventoryConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput AWS API Documentation + # + class GetBucketInventoryConfigurationOutput < Struct.new( + :inventory_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the inventory configuration to + # retrieve. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the inventory configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest AWS API Documentation + # + class GetBucketInventoryConfigurationRequest < Struct.new( + :bucket, + :id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] rules + # Container for a lifecycle rule. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput AWS API Documentation + # + class GetBucketLifecycleConfigurationOutput < Struct.new( + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the lifecycle information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest AWS API Documentation + # + class GetBucketLifecycleConfigurationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] rules + # Container for a lifecycle rule. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput AWS API Documentation + # + class GetBucketLifecycleOutput < Struct.new( + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the lifecycle information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest AWS API Documentation + # + class GetBucketLifecycleRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] location_constraint + # Specifies the Region where the bucket resides. For a list of all the + # Amazon S3 supported location constraints by Region, see [Regions and + # Endpoints][1]. Buckets in Region `us-east-1` have a + # LocationConstraint of `null`. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput AWS API Documentation + # + class GetBucketLocationOutput < Struct.new( + :location_constraint) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the location. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest AWS API Documentation + # + class GetBucketLocationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] logging_enabled + # Describes where logs are stored and the prefix that Amazon S3 + # assigns to all log object keys for a bucket. For more information, + # see [PUT Bucket logging][1] in the *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + # @return [Types::LoggingEnabled] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput AWS API Documentation + # + class GetBucketLoggingOutput < Struct.new( + :logging_enabled) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name for which to get the logging information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest AWS API Documentation + # + class GetBucketLoggingRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] metrics_configuration + # Specifies the metrics configuration. + # @return [Types::MetricsConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput AWS API Documentation + # + class GetBucketMetricsConfigurationOutput < Struct.new( + :metrics_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the metrics configuration to + # retrieve. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the metrics configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest AWS API Documentation + # + class GetBucketMetricsConfigurationRequest < Struct.new( + :bucket, + :id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the notification + # configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest AWS API Documentation + # + class GetBucketNotificationConfigurationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ownership_controls + # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, + # or ObjectWriter) currently in effect for this Amazon S3 bucket. + # @return [Types::OwnershipControls] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControlsOutput AWS API Documentation + # + class GetBucketOwnershipControlsOutput < Struct.new( + :ownership_controls) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose `OwnershipControls` you want + # to retrieve. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControlsRequest AWS API Documentation + # + class GetBucketOwnershipControlsRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] policy + # The bucket policy as a JSON document. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput AWS API Documentation + # + class GetBucketPolicyOutput < Struct.new( + :policy) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name for which to get the bucket policy. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest AWS API Documentation + # + class GetBucketPolicyRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] policy_status + # The policy status for the specified bucket. + # @return [Types::PolicyStatus] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatusOutput AWS API Documentation + # + class GetBucketPolicyStatusOutput < Struct.new( + :policy_status) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose policy status you want to + # retrieve. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatusRequest AWS API Documentation + # + class GetBucketPolicyStatusRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] replication_configuration + # A container for replication rules. You can add up to 1,000 rules. + # The maximum size of a replication configuration is 2 MB. + # @return [Types::ReplicationConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput AWS API Documentation + # + class GetBucketReplicationOutput < Struct.new( + :replication_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name for which to get the replication information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest AWS API Documentation + # + class GetBucketReplicationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] payer + # Specifies who pays for the download and request fees. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput AWS API Documentation + # + class GetBucketRequestPaymentOutput < Struct.new( + :payer) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the payment request + # configuration + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest AWS API Documentation + # + class GetBucketRequestPaymentRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] tag_set + # Contains the tag set. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput AWS API Documentation + # + class GetBucketTaggingOutput < Struct.new( + :tag_set) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the tagging information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest AWS API Documentation + # + class GetBucketTaggingRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] status + # The versioning state of the bucket. + # @return [String] + # + # @!attribute [rw] mfa_delete + # Specifies whether MFA delete is enabled in the bucket versioning + # configuration. This element is only returned if the bucket has been + # configured with MFA delete. If the bucket has never been so + # configured, this element is not returned. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput AWS API Documentation + # + class GetBucketVersioningOutput < Struct.new( + :status, + :mfa_delete) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to get the versioning information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest AWS API Documentation + # + class GetBucketVersioningRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] redirect_all_requests_to + # Specifies the redirect behavior of all requests to a website + # endpoint of an Amazon S3 bucket. + # @return [Types::RedirectAllRequestsTo] + # + # @!attribute [rw] index_document + # The name of the index document for the website (for example + # `index.html`). + # @return [Types::IndexDocument] + # + # @!attribute [rw] error_document + # The object key name of the website error document to use for 4XX + # class errors. + # @return [Types::ErrorDocument] + # + # @!attribute [rw] routing_rules + # Rules that define when a redirect is applied and the redirect + # behavior. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput AWS API Documentation + # + class GetBucketWebsiteOutput < Struct.new( + :redirect_all_requests_to, + :index_document, + :error_document, + :routing_rules) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name for which to get the website configuration. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest AWS API Documentation + # + class GetBucketWebsiteRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] owner + # Container for the bucket owner's display name and ID. + # @return [Types::Owner] + # + # @!attribute [rw] grants + # A list of grants. + # @return [Array] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput AWS API Documentation + # + class GetObjectAclOutput < Struct.new( + :owner, + :grants, + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name that contains the object for which to get the ACL + # information. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] key + # The key of the object for which to get the ACL information. + # @return [String] + # + # @!attribute [rw] version_id + # VersionId used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest AWS API Documentation + # + class GetObjectAclRequest < Struct.new( + :bucket, + :key, + :version_id, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] delete_marker + # Specifies whether the object retrieved was (`true`) or was not + # (`false`) a delete marker. If `false`, this response header does not + # appear in the response. + # @return [Boolean] + # + # @!attribute [rw] last_modified + # The creation date of the object. + # @return [Time] + # + # @!attribute [rw] version_id + # The version ID of the object. + # @return [String] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] etag + # An ETag is an opaque identifier assigned by a web server to a + # specific version of a resource found at a URL. + # @return [String] + # + # @!attribute [rw] checksum + # The checksum or digest of the object. + # @return [Types::Checksum] + # + # @!attribute [rw] object_parts + # A collection of parts associated with a multipart upload. + # @return [Types::GetObjectAttributesParts] + # + # @!attribute [rw] storage_class + # Provides the storage class information of the object. Amazon S3 + # returns this header for all objects except for S3 Standard storage + # class objects. + # + # For more information, see [Storage Classes][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + # + # @!attribute [rw] object_size + # The size of the object in bytes. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesOutput AWS API Documentation + # + class GetObjectAttributesOutput < Struct.new( + :delete_marker, + :last_modified, + :version_id, + :request_charged, + :etag, + :checksum, + :object_parts, + :storage_class, + :object_size) + SENSITIVE = [] + include Aws::Structure + end + + # A collection of parts associated with a multipart upload. + # + # @!attribute [rw] total_parts_count + # The total number of parts. + # @return [Integer] + # + # @!attribute [rw] part_number_marker + # The marker for the current part. + # @return [Integer] + # + # @!attribute [rw] next_part_number_marker + # When a list is truncated, this element specifies the last part in + # the list, as well as the value to use for the `PartNumberMarker` + # request parameter in a subsequent request. + # @return [Integer] + # + # @!attribute [rw] max_parts + # The maximum number of parts allowed in the response. + # @return [Integer] + # + # @!attribute [rw] is_truncated + # Indicates whether the returned list of parts is truncated. A value + # of `true` indicates that the list was truncated. A list can be + # truncated if the number of parts exceeds the limit returned in the + # `MaxParts` element. + # @return [Boolean] + # + # @!attribute [rw] parts + # A container for elements related to a particular part. A response + # can contain zero or more `Parts` elements. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesParts AWS API Documentation + # + class GetObjectAttributesParts < Struct.new( + :total_parts_count, + :part_number_marker, + :next_part_number_marker, + :max_parts, + :is_truncated, + :parts) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket that contains the object. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # The object key. + # @return [String] + # + # @!attribute [rw] version_id + # The version ID used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] max_parts + # Sets the maximum number of parts to return. + # @return [Integer] + # + # @!attribute [rw] part_number_marker + # Specifies the part after which listing should begin. Only parts with + # higher part numbers will be listed. + # @return [Integer] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] object_attributes + # An XML header that specifies the fields at the root level that you + # want returned in the response. Fields that you do not specify are + # not returned. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesRequest AWS API Documentation + # + class GetObjectAttributesRequest < Struct.new( + :bucket, + :key, + :version_id, + :max_parts, + :part_number_marker, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :request_payer, + :expected_bucket_owner, + :object_attributes) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # @!attribute [rw] legal_hold + # The current legal hold status for the specified object. + # @return [Types::ObjectLockLegalHold] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHoldOutput AWS API Documentation + # + class GetObjectLegalHoldOutput < Struct.new( + :legal_hold) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object whose legal hold status you + # want to retrieve. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] key + # The key name for the object whose legal hold status you want to + # retrieve. + # @return [String] + # + # @!attribute [rw] version_id + # The version ID of the object whose legal hold status you want to + # retrieve. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHoldRequest AWS API Documentation + # + class GetObjectLegalHoldRequest < Struct.new( + :bucket, + :key, + :version_id, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] object_lock_configuration + # The specified bucket's Object Lock configuration. + # @return [Types::ObjectLockConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfigurationOutput AWS API Documentation + # + class GetObjectLockConfigurationOutput < Struct.new( + :object_lock_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket whose Object Lock configuration you want to retrieve. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfigurationRequest AWS API Documentation + # + class GetObjectLockConfigurationRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] body + # Object data. + # @return [IO] + # + # @!attribute [rw] delete_marker + # Specifies whether the object retrieved was (true) or was not (false) + # a Delete Marker. If false, this response header does not appear in + # the response. + # @return [Boolean] + # + # @!attribute [rw] accept_ranges + # Indicates that a range of bytes was specified. + # @return [String] + # + # @!attribute [rw] expiration + # If the object expiration is configured (see PUT Bucket lifecycle), + # the response includes this header. It includes the `expiry-date` and + # `rule-id` key-value pairs providing object expiration information. + # The value of the `rule-id` is URL-encoded. + # @return [String] + # + # @!attribute [rw] restore + # Provides information about object restoration action and expiration + # time of the restored object copy. + # @return [String] + # + # @!attribute [rw] last_modified + # Creation date of the object. + # @return [Time] + # + # @!attribute [rw] content_length + # Size of the body in bytes. + # @return [Integer] + # + # @!attribute [rw] etag + # An entity tag (ETag) is an opaque identifier assigned by a web + # server to a specific version of a resource found at a URL. + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] missing_meta + # This is set to the number of metadata entries not returned in + # `x-amz-meta` headers. This can happen if you create metadata using + # an API like SOAP that supports more flexible metadata than the REST + # API. For example, using SOAP, you can create metadata whose values + # are not legal HTTP headers. + # @return [Integer] + # + # @!attribute [rw] version_id + # Version of the object. + # @return [String] + # + # @!attribute [rw] cache_control + # Specifies caching behavior along the request/reply chain. + # @return [String] + # + # @!attribute [rw] content_disposition + # Specifies presentational information for the object. + # @return [String] + # + # @!attribute [rw] content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. + # @return [String] + # + # @!attribute [rw] content_language + # The language the content is in. + # @return [String] + # + # @!attribute [rw] content_range + # The portion of the object returned in the response. + # @return [String] + # + # @!attribute [rw] content_type + # A standard MIME type describing the format of the object data. + # @return [String] + # + # @!attribute [rw] expires + # The date and time at which the object is no longer cacheable. + # @return [Time] + # + # @!attribute [rw] expires_string + # @return [String] + # + # @!attribute [rw] website_redirect_location + # If the bucket is configured as a website, redirects requests for + # this object to another object in the same bucket or to an external + # URL. Amazon S3 stores the value of this header in the object + # metadata. + # @return [String] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] metadata + # A map of metadata to store with the object in S3. + # @return [Hash] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the object uses an S3 Bucket Key for server-side + # encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] storage_class + # Provides storage class information of the object. Amazon S3 returns + # this header for all objects except for S3 Standard storage class + # objects. + # @return [String] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] replication_status + # Amazon S3 can return this if your request involves a bucket that is + # either a source or destination in a replication rule. + # @return [String] + # + # @!attribute [rw] parts_count + # The count of parts this object has. This value is only returned if + # you specify `partNumber` in your request and the object was uploaded + # as a multipart upload. + # @return [Integer] + # + # @!attribute [rw] tag_count + # The number of tags, if any, on the object. + # @return [Integer] + # + # @!attribute [rw] object_lock_mode + # The Object Lock mode currently in place for this object. + # @return [String] + # + # @!attribute [rw] object_lock_retain_until_date + # The date and time when this object's Object Lock will expire. + # @return [Time] + # + # @!attribute [rw] object_lock_legal_hold_status + # Indicates whether this object has an active legal hold. This field + # is only returned if you have permission to view an object's legal + # hold status. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput AWS API Documentation + # + class GetObjectOutput < Struct.new( + :body, + :delete_marker, + :accept_ranges, + :expiration, + :restore, + :last_modified, + :content_length, + :etag, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :missing_meta, + :version_id, + :cache_control, + :content_disposition, + :content_encoding, + :content_language, + :content_range, + :content_type, + :expires, + :expires_string, + :website_redirect_location, + :server_side_encryption, + :metadata, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :bucket_key_enabled, + :storage_class, + :request_charged, + :replication_status, + :parts_count, + :tag_count, + :object_lock_mode, + :object_lock_retain_until_date, + :object_lock_legal_hold_status) + SENSITIVE = [:ssekms_key_id] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using an Object Lambda access point the hostname takes the form + # *AccessPointName*-*AccountId*.s3-object-lambda.*Region*.amazonaws.com. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] if_match + # Return the object only if its entity tag (ETag) is the same as the + # one specified; otherwise, return a 412 (precondition failed) error. + # @return [String] + # + # @!attribute [rw] if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @return [Time] + # + # @!attribute [rw] if_none_match + # Return the object only if its entity tag (ETag) is different from + # the one specified; otherwise, return a 304 (not modified) error. + # @return [String] + # + # @!attribute [rw] if_unmodified_since + # Return the object only if it has not been modified since the + # specified time; otherwise, return a 412 (precondition failed) error. + # @return [Time] + # + # @!attribute [rw] key + # Key of the object to get. + # @return [String] + # + # @!attribute [rw] range + # Downloads the specified range bytes of an object. For more + # information about the HTTP Range header, see + # [https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1]. + # + # Amazon S3 doesn't support retrieving multiple ranges of data per + # `GET` request. + # + # + # + # + # + # [1]: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + # @return [String] + # + # @!attribute [rw] response_cache_control + # Sets the `Cache-Control` header of the response. + # @return [String] + # + # @!attribute [rw] response_content_disposition + # Sets the `Content-Disposition` header of the response + # @return [String] + # + # @!attribute [rw] response_content_encoding + # Sets the `Content-Encoding` header of the response. + # @return [String] + # + # @!attribute [rw] response_content_language + # Sets the `Content-Language` header of the response. + # @return [String] + # + # @!attribute [rw] response_content_type + # Sets the `Content-Type` header of the response. + # @return [String] + # + # @!attribute [rw] response_expires + # Sets the `Expires` header of the response. + # @return [Time] + # + # @!attribute [rw] version_id + # VersionId used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when decrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 used to + # encrypt the data. This value is used to decrypt the object when + # recovering it and must match the one used when storing the data. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' GET request + # for the part specified. Useful for downloading just a part of an + # object. + # @return [Integer] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] checksum_mode + # To retrieve the checksum, this mode must be enabled. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest AWS API Documentation + # + class GetObjectRequest < Struct.new( + :bucket, + :if_match, + :if_modified_since, + :if_none_match, + :if_unmodified_since, + :key, + :range, + :response_cache_control, + :response_content_disposition, + :response_content_encoding, + :response_content_language, + :response_content_type, + :response_expires, + :version_id, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :request_payer, + :part_number, + :expected_bucket_owner, + :checksum_mode) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # @!attribute [rw] retention + # The container element for an object's retention settings. + # @return [Types::ObjectLockRetention] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetentionOutput AWS API Documentation + # + class GetObjectRetentionOutput < Struct.new( + :retention) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object whose retention settings you + # want to retrieve. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] key + # The key name for the object whose retention settings you want to + # retrieve. + # @return [String] + # + # @!attribute [rw] version_id + # The version ID for the object whose retention settings you want to + # retrieve. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetentionRequest AWS API Documentation + # + class GetObjectRetentionRequest < Struct.new( + :bucket, + :key, + :version_id, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] version_id + # The versionId of the object for which you got the tagging + # information. + # @return [String] + # + # @!attribute [rw] tag_set + # Contains the tag set. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput AWS API Documentation + # + class GetObjectTaggingOutput < Struct.new( + :version_id, + :tag_set) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object for which to get the tagging + # information. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Object key for which to get the tagging information. + # @return [String] + # + # @!attribute [rw] version_id + # The versionId of the object for which to get the tagging + # information. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest AWS API Documentation + # + class GetObjectTaggingRequest < Struct.new( + :bucket, + :key, + :version_id, + :expected_bucket_owner, + :request_payer) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] body + # A Bencoded dictionary as defined by the BitTorrent specification + # @return [IO] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput AWS API Documentation + # + class GetObjectTorrentOutput < Struct.new( + :body, + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the object for which to get the + # torrent files. + # @return [String] + # + # @!attribute [rw] key + # The object key for which to get the information. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest AWS API Documentation + # + class GetObjectTorrentRequest < Struct.new( + :bucket, + :key, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] public_access_block_configuration + # The `PublicAccessBlock` configuration currently in effect for this + # Amazon S3 bucket. + # @return [Types::PublicAccessBlockConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlockOutput AWS API Documentation + # + class GetPublicAccessBlockOutput < Struct.new( + :public_access_block_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose `PublicAccessBlock` + # configuration you want to retrieve. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlockRequest AWS API Documentation + # + class GetPublicAccessBlockRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # Container for S3 Glacier job parameters. + # + # @!attribute [rw] tier + # Retrieval tier at which the restore will be processed. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters AWS API Documentation + # + class GlacierJobParameters < Struct.new( + :tier) + SENSITIVE = [] + include Aws::Structure + end + + # Container for grant information. + # + # @!attribute [rw] grantee + # The person being granted permissions. + # @return [Types::Grantee] + # + # @!attribute [rw] permission + # Specifies the permission given to the grantee. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant AWS API Documentation + # + class Grant < Struct.new( + :grantee, + :permission) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the person being granted permissions. + # + # @!attribute [rw] display_name + # Screen name of the grantee. + # @return [String] + # + # @!attribute [rw] email_address + # Email address of the grantee. + # + # Using email addresses to specify a grantee is only supported in the + # following Amazon Web Services Regions: + # + # * US East (N. Virginia) + # + # * US West (N. California) + # + # * US West (Oregon) + # + # * Asia Pacific (Singapore) + # + # * Asia Pacific (Sydney) + # + # * Asia Pacific (Tokyo) + # + # * Europe (Ireland) + # + # * South America (SÃŖo Paulo) + # + # For a list of all the Amazon S3 supported Regions and endpoints, see + # [Regions and Endpoints][1] in the Amazon Web Services General + # Reference. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + # @return [String] + # + # @!attribute [rw] id + # The canonical user ID of the grantee. + # @return [String] + # + # @!attribute [rw] type + # Type of grantee + # @return [String] + # + # @!attribute [rw] uri + # URI of the grantee group. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee AWS API Documentation + # + class Grantee < Struct.new( + :display_name, + :email_address, + :id, + :type, + :uri) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest AWS API Documentation + # + class HeadBucketRequest < Struct.new( + :bucket, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] delete_marker + # Specifies whether the object retrieved was (true) or was not (false) + # a Delete Marker. If false, this response header does not appear in + # the response. + # @return [Boolean] + # + # @!attribute [rw] accept_ranges + # Indicates that a range of bytes was specified. + # @return [String] + # + # @!attribute [rw] expiration + # If the object expiration is configured (see PUT Bucket lifecycle), + # the response includes this header. It includes the `expiry-date` and + # `rule-id` key-value pairs providing object expiration information. + # The value of the `rule-id` is URL-encoded. + # @return [String] + # + # @!attribute [rw] restore + # If the object is an archived object (an object whose storage class + # is GLACIER), the response includes this header if either the archive + # restoration is in progress (see [RestoreObject][1] or an archive + # copy is already restored. + # + # If an archive copy is already restored, the header value indicates + # when Amazon S3 is scheduled to delete the object copy. For example: + # + # `x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec + # 2012 00:00:00 GMT"` + # + # If the object restoration is in progress, the header returns the + # value `ongoing-request="true"`. + # + # For more information about archiving objects, see [Transitioning + # Objects: General Considerations][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + # @return [String] + # + # @!attribute [rw] archive_status + # The archive state of the head object. + # @return [String] + # + # @!attribute [rw] last_modified + # Creation date of the object. + # @return [Time] + # + # @!attribute [rw] content_length + # Size of the body in bytes. + # @return [Integer] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] etag + # An entity tag (ETag) is an opaque identifier assigned by a web + # server to a specific version of a resource found at a URL. + # @return [String] + # + # @!attribute [rw] missing_meta + # This is set to the number of metadata entries not returned in + # `x-amz-meta` headers. This can happen if you create metadata using + # an API like SOAP that supports more flexible metadata than the REST + # API. For example, using SOAP, you can create metadata whose values + # are not legal HTTP headers. + # @return [Integer] + # + # @!attribute [rw] version_id + # Version of the object. + # @return [String] + # + # @!attribute [rw] cache_control + # Specifies caching behavior along the request/reply chain. + # @return [String] + # + # @!attribute [rw] content_disposition + # Specifies presentational information for the object. + # @return [String] + # + # @!attribute [rw] content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. + # @return [String] + # + # @!attribute [rw] content_language + # The language the content is in. + # @return [String] + # + # @!attribute [rw] content_type + # A standard MIME type describing the format of the object data. + # @return [String] + # + # @!attribute [rw] expires + # The date and time at which the object is no longer cacheable. + # @return [Time] + # + # @!attribute [rw] expires_string + # @return [String] + # + # @!attribute [rw] website_redirect_location + # If the bucket is configured as a website, redirects requests for + # this object to another object in the same bucket or to an external + # URL. Amazon S3 stores the value of this header in the object + # metadata. + # @return [String] + # + # @!attribute [rw] server_side_encryption + # If the object is stored using server-side encryption either with an + # Amazon Web Services KMS key or an Amazon S3-managed encryption key, + # the response includes this header with the value of the server-side + # encryption algorithm used when storing this object in Amazon S3 (for + # example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] metadata + # A map of metadata to store with the object in S3. + # @return [Hash] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the object uses an S3 Bucket Key for server-side + # encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] storage_class + # Provides storage class information of the object. Amazon S3 returns + # this header for all objects except for S3 Standard storage class + # objects. + # + # For more information, see [Storage Classes][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] replication_status + # Amazon S3 can return this header if your request involves a bucket + # that is either a source or a destination in a replication rule. + # + # In replication, you have a source bucket on which you configure + # replication and destination bucket or buckets where Amazon S3 stores + # object replicas. When you request an object (`GetObject`) or object + # metadata (`HeadObject`) from these buckets, Amazon S3 will return + # the `x-amz-replication-status` header in the response as follows: + # + # * **If requesting an object from the source bucket**, Amazon S3 will + # return the `x-amz-replication-status` header if the object in your + # request is eligible for replication. + # + # For example, suppose that in your replication configuration, you + # specify object prefix `TaxDocs` requesting Amazon S3 to replicate + # objects with key prefix `TaxDocs`. Any objects you upload with + # this key name prefix, for example `TaxDocs/document1.pdf`, are + # eligible for replication. For any object request with this key + # name prefix, Amazon S3 will return the `x-amz-replication-status` + # header with value PENDING, COMPLETED or FAILED indicating object + # replication status. + # + # * **If requesting an object from a destination bucket**, Amazon S3 + # will return the `x-amz-replication-status` header with value + # REPLICA if the object in your request is a replica that Amazon S3 + # created and there is no replica modification replication in + # progress. + # + # * **When replicating objects to multiple destination buckets**, the + # `x-amz-replication-status` header acts differently. The header of + # the source object will only return a value of COMPLETED when + # replication is successful to all destinations. The header will + # remain at value PENDING until replication has completed for all + # destinations. If one or more destinations fails replication the + # header will return FAILED. + # + # For more information, see [Replication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [String] + # + # @!attribute [rw] parts_count + # The count of parts this object has. This value is only returned if + # you specify `partNumber` in your request and the object was uploaded + # as a multipart upload. + # @return [Integer] + # + # @!attribute [rw] object_lock_mode + # The Object Lock mode, if any, that's in effect for this object. + # This header is only returned if the requester has the + # `s3:GetObjectRetention` permission. For more information about S3 + # Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @return [String] + # + # @!attribute [rw] object_lock_retain_until_date + # The date and time when the Object Lock retention period expires. + # This header is only returned if the requester has the + # `s3:GetObjectRetention` permission. + # @return [Time] + # + # @!attribute [rw] object_lock_legal_hold_status + # Specifies whether a legal hold is in effect for this object. This + # header is only returned if the requester has the + # `s3:GetObjectLegalHold` permission. This header is not returned if + # the specified version of this object has never had a legal hold + # applied. For more information about S3 Object Lock, see [Object + # Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput AWS API Documentation + # + class HeadObjectOutput < Struct.new( + :delete_marker, + :accept_ranges, + :expiration, + :restore, + :archive_status, + :last_modified, + :content_length, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :etag, + :missing_meta, + :version_id, + :cache_control, + :content_disposition, + :content_encoding, + :content_language, + :content_type, + :expires, + :expires_string, + :website_redirect_location, + :server_side_encryption, + :metadata, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :bucket_key_enabled, + :storage_class, + :request_charged, + :replication_status, + :parts_count, + :object_lock_mode, + :object_lock_retain_until_date, + :object_lock_legal_hold_status) + SENSITIVE = [:ssekms_key_id] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the object. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] if_match + # Return the object only if its entity tag (ETag) is the same as the + # one specified; otherwise, return a 412 (precondition failed) error. + # @return [String] + # + # @!attribute [rw] if_modified_since + # Return the object only if it has been modified since the specified + # time; otherwise, return a 304 (not modified) error. + # @return [Time] + # + # @!attribute [rw] if_none_match + # Return the object only if its entity tag (ETag) is different from + # the one specified; otherwise, return a 304 (not modified) error. + # @return [String] + # + # @!attribute [rw] if_unmodified_since + # Return the object only if it has not been modified since the + # specified time; otherwise, return a 412 (precondition failed) error. + # @return [Time] + # + # @!attribute [rw] key + # The object key. + # @return [String] + # + # @!attribute [rw] range + # Because `HeadObject` returns only the metadata for an object, this + # parameter has no effect. + # @return [String] + # + # @!attribute [rw] version_id + # VersionId used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] part_number + # Part number of the object being read. This is a positive integer + # between 1 and 10,000. Effectively performs a 'ranged' HEAD request + # for the part specified. Useful querying about the size of the part + # and the number of parts in this object. + # @return [Integer] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] checksum_mode + # To retrieve the checksum, this parameter must be enabled. + # + # In addition, if you enable `ChecksumMode` and the object is + # encrypted with Amazon Web Services Key Management Service (Amazon + # Web Services KMS), you must have permission to use the `kms:Decrypt` + # action for the request to succeed. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest AWS API Documentation + # + class HeadObjectRequest < Struct.new( + :bucket, + :if_match, + :if_modified_since, + :if_none_match, + :if_unmodified_since, + :key, + :range, + :version_id, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :request_payer, + :part_number, + :expected_bucket_owner, + :checksum_mode) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # Container for the `Suffix` element. + # + # @!attribute [rw] suffix + # A suffix that is appended to a request that is for a directory on + # the website endpoint (for example,if the suffix is index.html and + # you make a request to samplebucket/images/ the data that is returned + # will be for the object with the key name images/index.html) The + # suffix must not be empty and must not include a slash character. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument AWS API Documentation + # + class IndexDocument < Struct.new( + :suffix) + SENSITIVE = [] + include Aws::Structure + end + + # Container element that identifies who initiated the multipart upload. + # + # @!attribute [rw] id + # If the principal is an Amazon Web Services account, it provides the + # Canonical User ID. If the principal is an IAM User, it provides a + # user ARN value. + # @return [String] + # + # @!attribute [rw] display_name + # Name of the Principal. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator AWS API Documentation + # + class Initiator < Struct.new( + :id, + :display_name) + SENSITIVE = [] + include Aws::Structure + end + + # Describes the serialization format of the object. + # + # @!attribute [rw] csv + # Describes the serialization of a CSV-encoded object. + # @return [Types::CSVInput] + # + # @!attribute [rw] compression_type + # Specifies object's compression format. Valid values: NONE, GZIP, + # BZIP2. Default Value: NONE. + # @return [String] + # + # @!attribute [rw] json + # Specifies JSON as object's input serialization format. + # @return [Types::JSONInput] + # + # @!attribute [rw] parquet + # Specifies Parquet as object's input serialization format. + # @return [Types::ParquetInput] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InputSerialization AWS API Documentation + # + class InputSerialization < Struct.new( + :csv, + :compression_type, + :json, + :parquet) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying S3 Intelligent-Tiering filters. The filters + # determine the subset of objects to which the rule applies. + # + # @!attribute [rw] prefix + # An object key name prefix that identifies the subset of objects to + # which the configuration applies. + # @return [String] + # + # @!attribute [rw] tags + # All of these tags must exist in the object's tag set in order for + # the configuration to apply. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringAndOperator AWS API Documentation + # + class IntelligentTieringAndOperator < Struct.new( + :prefix, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 + # bucket. + # + # For information about the S3 Intelligent-Tiering storage class, see + # [Storage class for automatically optimizing frequently and + # infrequently accessed objects][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + # + # @!attribute [rw] id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # @return [String] + # + # @!attribute [rw] filter + # Specifies a bucket filter. The configuration only includes objects + # that meet the filter's criteria. + # @return [Types::IntelligentTieringFilter] + # + # @!attribute [rw] status + # Specifies the status of the configuration. + # @return [String] + # + # @!attribute [rw] tierings + # Specifies the S3 Intelligent-Tiering storage class tier of the + # configuration. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringConfiguration AWS API Documentation + # + class IntelligentTieringConfiguration < Struct.new( + :id, + :filter, + :status, + :tierings) + SENSITIVE = [] + include Aws::Structure + end + + # The `Filter` is used to identify objects that the S3 + # Intelligent-Tiering configuration applies to. + # + # @!attribute [rw] prefix + # An object key name prefix that identifies the subset of objects to + # which the rule applies. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] tag + # A container of a key value name pair. + # @return [Types::Tag] + # + # @!attribute [rw] and + # A conjunction (logical AND) of predicates, which is used in + # evaluating a metrics filter. The operator must have at least two + # predicates, and an object must match all of the predicates in order + # for the filter to apply. + # @return [Types::IntelligentTieringAndOperator] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringFilter AWS API Documentation + # + class IntelligentTieringFilter < Struct.new( + :prefix, + :tag, + :and) + SENSITIVE = [] + include Aws::Structure + end + + # Object is archived and inaccessible until restored. + # + # @!attribute [rw] storage_class + # @return [String] + # + # @!attribute [rw] access_tier + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InvalidObjectState AWS API Documentation + # + class InvalidObjectState < Struct.new( + :storage_class, + :access_tier) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the inventory configuration for an Amazon S3 bucket. For + # more information, see [GET Bucket inventory][1] in the *Amazon S3 API + # Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html + # + # @!attribute [rw] destination + # Contains information about where to publish the inventory results. + # @return [Types::InventoryDestination] + # + # @!attribute [rw] is_enabled + # Specifies whether the inventory is enabled or disabled. If set to + # `True`, an inventory list is generated. If set to `False`, no + # inventory list is generated. + # @return [Boolean] + # + # @!attribute [rw] filter + # Specifies an inventory filter. The inventory only includes objects + # that meet the filter's criteria. + # @return [Types::InventoryFilter] + # + # @!attribute [rw] id + # The ID used to identify the inventory configuration. + # @return [String] + # + # @!attribute [rw] included_object_versions + # Object versions to include in the inventory list. If set to `All`, + # the list includes all the object versions, which adds the + # version-related fields `VersionId`, `IsLatest`, and `DeleteMarker` + # to the list. If set to `Current`, the list does not contain these + # version-related fields. + # @return [String] + # + # @!attribute [rw] optional_fields + # Contains the optional fields that are included in the inventory + # results. + # @return [Array] + # + # @!attribute [rw] schedule + # Specifies the schedule for generating inventory results. + # @return [Types::InventorySchedule] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration AWS API Documentation + # + class InventoryConfiguration < Struct.new( + :destination, + :is_enabled, + :filter, + :id, + :included_object_versions, + :optional_fields, + :schedule) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the inventory configuration for an Amazon S3 bucket. + # + # @!attribute [rw] s3_bucket_destination + # Contains the bucket name, file format, bucket owner (optional), and + # prefix (optional) where inventory results are published. + # @return [Types::InventoryS3BucketDestination] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination AWS API Documentation + # + class InventoryDestination < Struct.new( + :s3_bucket_destination) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the type of server-side encryption used to encrypt the + # inventory results. + # + # @!attribute [rw] sses3 + # Specifies the use of SSE-S3 to encrypt delivered inventory reports. + # @return [Types::SSES3] + # + # @!attribute [rw] ssekms + # Specifies the use of SSE-KMS to encrypt delivered inventory reports. + # @return [Types::SSEKMS] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryEncryption AWS API Documentation + # + class InventoryEncryption < Struct.new( + :sses3, + :ssekms) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies an inventory filter. The inventory only includes objects + # that meet the filter's criteria. + # + # @!attribute [rw] prefix + # The prefix that an object must have to be included in the inventory + # results. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter AWS API Documentation + # + class InventoryFilter < Struct.new( + :prefix) + SENSITIVE = [] + include Aws::Structure + end + + # Contains the bucket name, file format, bucket owner (optional), and + # prefix (optional) where inventory results are published. + # + # @!attribute [rw] account_id + # The account ID that owns the destination S3 bucket. If no account ID + # is provided, the owner is not validated before exporting data. + # + # Although this value is optional, we strongly recommend that you set + # it to help prevent problems if the destination bucket ownership + # changes. + # + # + # @return [String] + # + # @!attribute [rw] bucket + # The Amazon Resource Name (ARN) of the bucket where inventory results + # will be published. + # @return [String] + # + # @!attribute [rw] format + # Specifies the output format of the inventory results. + # @return [String] + # + # @!attribute [rw] prefix + # The prefix that is prepended to all inventory results. + # @return [String] + # + # @!attribute [rw] encryption + # Contains the type of server-side encryption used to encrypt the + # inventory results. + # @return [Types::InventoryEncryption] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination AWS API Documentation + # + class InventoryS3BucketDestination < Struct.new( + :account_id, + :bucket, + :format, + :prefix, + :encryption) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the schedule for generating inventory results. + # + # @!attribute [rw] frequency + # Specifies how frequently inventory results are produced. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule AWS API Documentation + # + class InventorySchedule < Struct.new( + :frequency) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies JSON as object's input serialization format. + # + # @!attribute [rw] type + # The type of JSON. Valid values: Document, Lines. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONInput AWS API Documentation + # + class JSONInput < Struct.new( + :type) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies JSON as request's output serialization format. + # + # @!attribute [rw] record_delimiter + # The value used to separate individual records in the output. If no + # value is specified, Amazon S3 uses a newline character ('\\n'). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONOutput AWS API Documentation + # + class JSONOutput < Struct.new( + :record_delimiter) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying the configuration for Lambda notifications. + # + # @!attribute [rw] id + # An optional unique identifier for configurations in a notification + # configuration. If you don't provide one, Amazon S3 will assign an + # ID. + # @return [String] + # + # @!attribute [rw] lambda_function_arn + # The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 + # invokes when the specified event type occurs. + # @return [String] + # + # @!attribute [rw] events + # The Amazon S3 bucket event for which to invoke the Lambda function. + # For more information, see [Supported Event Types][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [Array] + # + # @!attribute [rw] filter + # Specifies object key name filtering rules. For information about key + # name filtering, see [Configuring Event Notifications][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [Types::NotificationConfigurationFilter] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration AWS API Documentation + # + class LambdaFunctionConfiguration < Struct.new( + :id, + :lambda_function_arn, + :events, + :filter) + SENSITIVE = [] + include Aws::Structure + end + + # Container for lifecycle rules. You can add as many as 1000 rules. + # + # @!attribute [rw] rules + # Specifies lifecycle configuration rules for an Amazon S3 bucket. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration AWS API Documentation + # + class LifecycleConfiguration < Struct.new( + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the expiration for the lifecycle of the object. + # + # @!attribute [rw] date + # Indicates at what date the object is to be moved or deleted. Should + # be in GMT ISO 8601 Format. + # @return [Time] + # + # @!attribute [rw] days + # Indicates the lifetime, in days, of the objects that are subject to + # the rule. The value must be a non-zero positive integer. + # @return [Integer] + # + # @!attribute [rw] expired_object_delete_marker + # Indicates whether Amazon S3 will remove a delete marker with no + # noncurrent versions. If set to true, the delete marker will be + # expired; if set to false the policy takes no action. This cannot be + # specified with Days or Date in a Lifecycle Expiration Policy. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration AWS API Documentation + # + class LifecycleExpiration < Struct.new( + :date, + :days, + :expired_object_delete_marker) + SENSITIVE = [] + include Aws::Structure + end + + # A lifecycle rule for individual objects in an Amazon S3 bucket. + # + # @!attribute [rw] expiration + # Specifies the expiration for the lifecycle of the object in the form + # of date, days and, whether the object has a delete marker. + # @return [Types::LifecycleExpiration] + # + # @!attribute [rw] id + # Unique identifier for the rule. The value cannot be longer than 255 + # characters. + # @return [String] + # + # @!attribute [rw] prefix + # Prefix identifying one or more objects to which the rule applies. + # This is no longer used; use `Filter` instead. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] filter + # The `Filter` is used to identify objects that a Lifecycle Rule + # applies to. A `Filter` must have exactly one of `Prefix`, `Tag`, or + # `And` specified. `Filter` is required if the `LifecycleRule` does + # not contain a `Prefix` element. + # @return [Types::LifecycleRuleFilter] + # + # @!attribute [rw] status + # If 'Enabled', the rule is currently being applied. If + # 'Disabled', the rule is not currently being applied. + # @return [String] + # + # @!attribute [rw] transitions + # Specifies when an Amazon S3 object transitions to a specified + # storage class. + # @return [Array] + # + # @!attribute [rw] noncurrent_version_transitions + # Specifies the transition rule for the lifecycle rule that describes + # when noncurrent objects transition to a specific storage class. If + # your bucket is versioning-enabled (or versioning is suspended), you + # can set this action to request that Amazon S3 transition noncurrent + # object versions to a specific storage class at a set period in the + # object's lifetime. + # @return [Array] + # + # @!attribute [rw] noncurrent_version_expiration + # Specifies when noncurrent object versions expire. Upon expiration, + # Amazon S3 permanently deletes the noncurrent object versions. You + # set this lifecycle configuration action on a bucket that has + # versioning enabled (or suspended) to request that Amazon S3 delete + # noncurrent object versions at a specific period in the object's + # lifetime. + # @return [Types::NoncurrentVersionExpiration] + # + # @!attribute [rw] abort_incomplete_multipart_upload + # Specifies the days since the initiation of an incomplete multipart + # upload that Amazon S3 will wait before permanently removing all + # parts of the upload. For more information, see [ Aborting Incomplete + # Multipart Uploads Using a Bucket Lifecycle Policy][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + # @return [Types::AbortIncompleteMultipartUpload] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule AWS API Documentation + # + class LifecycleRule < Struct.new( + :expiration, + :id, + :prefix, + :filter, + :status, + :transitions, + :noncurrent_version_transitions, + :noncurrent_version_expiration, + :abort_incomplete_multipart_upload) + SENSITIVE = [] + include Aws::Structure + end + + # This is used in a Lifecycle Rule Filter to apply a logical AND to two + # or more predicates. The Lifecycle Rule will apply to any object + # matching all of the predicates configured inside the And operator. + # + # @!attribute [rw] prefix + # Prefix identifying one or more objects to which the rule applies. + # @return [String] + # + # @!attribute [rw] tags + # All of these tags must exist in the object's tag set in order for + # the rule to apply. + # @return [Array] + # + # @!attribute [rw] object_size_greater_than + # Minimum object size to which the rule applies. + # @return [Integer] + # + # @!attribute [rw] object_size_less_than + # Maximum object size to which the rule applies. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator AWS API Documentation + # + class LifecycleRuleAndOperator < Struct.new( + :prefix, + :tags, + :object_size_greater_than, + :object_size_less_than) + SENSITIVE = [] + include Aws::Structure + end + + # The `Filter` is used to identify objects that a Lifecycle Rule applies + # to. A `Filter` must have exactly one of `Prefix`, `Tag`, or `And` + # specified. + # + # @!attribute [rw] prefix + # Prefix identifying one or more objects to which the rule applies. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] tag + # This tag must exist in the object's tag set in order for the rule + # to apply. + # @return [Types::Tag] + # + # @!attribute [rw] object_size_greater_than + # Minimum object size to which the rule applies. + # @return [Integer] + # + # @!attribute [rw] object_size_less_than + # Maximum object size to which the rule applies. + # @return [Integer] + # + # @!attribute [rw] and + # This is used in a Lifecycle Rule Filter to apply a logical AND to + # two or more predicates. The Lifecycle Rule will apply to any object + # matching all of the predicates configured inside the And operator. + # @return [Types::LifecycleRuleAndOperator] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter AWS API Documentation + # + class LifecycleRuleFilter < Struct.new( + :prefix, + :tag, + :object_size_greater_than, + :object_size_less_than, + :and) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] is_truncated + # Indicates whether the returned list of analytics configurations is + # complete. A value of true indicates that the list is not complete + # and the NextContinuationToken will be provided for a subsequent + # request. + # @return [Boolean] + # + # @!attribute [rw] continuation_token + # The marker that is used as a starting point for this analytics + # configuration list response. This value is present if it was sent in + # the request. + # @return [String] + # + # @!attribute [rw] next_continuation_token + # `NextContinuationToken` is sent when `isTruncated` is true, which + # indicates that there are more analytics configurations to list. The + # next request must include this `NextContinuationToken`. The token is + # obfuscated and is not a usable value. + # @return [String] + # + # @!attribute [rw] analytics_configuration_list + # The list of analytics configurations for a bucket. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput AWS API Documentation + # + class ListBucketAnalyticsConfigurationsOutput < Struct.new( + :is_truncated, + :continuation_token, + :next_continuation_token, + :analytics_configuration_list) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket from which analytics configurations are + # retrieved. + # @return [String] + # + # @!attribute [rw] continuation_token + # The ContinuationToken that represents a placeholder from where this + # request should begin. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest AWS API Documentation + # + class ListBucketAnalyticsConfigurationsRequest < Struct.new( + :bucket, + :continuation_token, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] is_truncated + # Indicates whether the returned list of analytics configurations is + # complete. A value of `true` indicates that the list is not complete + # and the `NextContinuationToken` will be provided for a subsequent + # request. + # @return [Boolean] + # + # @!attribute [rw] continuation_token + # The `ContinuationToken` that represents a placeholder from where + # this request should begin. + # @return [String] + # + # @!attribute [rw] next_continuation_token + # The marker used to continue this inventory configuration listing. + # Use the `NextContinuationToken` from this response to continue the + # listing in a subsequent request. The continuation token is an opaque + # value that Amazon S3 understands. + # @return [String] + # + # @!attribute [rw] intelligent_tiering_configuration_list + # The list of S3 Intelligent-Tiering configurations for a bucket. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurationsOutput AWS API Documentation + # + class ListBucketIntelligentTieringConfigurationsOutput < Struct.new( + :is_truncated, + :continuation_token, + :next_continuation_token, + :intelligent_tiering_configuration_list) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # @return [String] + # + # @!attribute [rw] continuation_token + # The `ContinuationToken` that represents a placeholder from where + # this request should begin. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurationsRequest AWS API Documentation + # + class ListBucketIntelligentTieringConfigurationsRequest < Struct.new( + :bucket, + :continuation_token) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] continuation_token + # If sent in the request, the marker that is used as a starting point + # for this inventory configuration list response. + # @return [String] + # + # @!attribute [rw] inventory_configuration_list + # The list of inventory configurations for a bucket. + # @return [Array] + # + # @!attribute [rw] is_truncated + # Tells whether the returned list of inventory configurations is + # complete. A value of true indicates that the list is not complete + # and the NextContinuationToken is provided for a subsequent request. + # @return [Boolean] + # + # @!attribute [rw] next_continuation_token + # The marker used to continue this inventory configuration listing. + # Use the `NextContinuationToken` from this response to continue the + # listing in a subsequent request. The continuation token is an opaque + # value that Amazon S3 understands. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput AWS API Documentation + # + class ListBucketInventoryConfigurationsOutput < Struct.new( + :continuation_token, + :inventory_configuration_list, + :is_truncated, + :next_continuation_token) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the inventory configurations to + # retrieve. + # @return [String] + # + # @!attribute [rw] continuation_token + # The marker used to continue an inventory configuration listing that + # has been truncated. Use the NextContinuationToken from a previously + # truncated list response to continue the listing. The continuation + # token is an opaque value that Amazon S3 understands. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest AWS API Documentation + # + class ListBucketInventoryConfigurationsRequest < Struct.new( + :bucket, + :continuation_token, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] is_truncated + # Indicates whether the returned list of metrics configurations is + # complete. A value of true indicates that the list is not complete + # and the NextContinuationToken will be provided for a subsequent + # request. + # @return [Boolean] + # + # @!attribute [rw] continuation_token + # The marker that is used as a starting point for this metrics + # configuration list response. This value is present if it was sent in + # the request. + # @return [String] + # + # @!attribute [rw] next_continuation_token + # The marker used to continue a metrics configuration listing that has + # been truncated. Use the `NextContinuationToken` from a previously + # truncated list response to continue the listing. The continuation + # token is an opaque value that Amazon S3 understands. + # @return [String] + # + # @!attribute [rw] metrics_configuration_list + # The list of metrics configurations for a bucket. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput AWS API Documentation + # + class ListBucketMetricsConfigurationsOutput < Struct.new( + :is_truncated, + :continuation_token, + :next_continuation_token, + :metrics_configuration_list) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the metrics configurations to + # retrieve. + # @return [String] + # + # @!attribute [rw] continuation_token + # The marker that is used to continue a metrics configuration listing + # that has been truncated. Use the NextContinuationToken from a + # previously truncated list response to continue the listing. The + # continuation token is an opaque value that Amazon S3 understands. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest AWS API Documentation + # + class ListBucketMetricsConfigurationsRequest < Struct.new( + :bucket, + :continuation_token, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] buckets + # The list of buckets owned by the requester. + # @return [Array] + # + # @!attribute [rw] owner + # The owner of the buckets listed. + # @return [Types::Owner] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput AWS API Documentation + # + class ListBucketsOutput < Struct.new( + :buckets, + :owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket to which the multipart upload was initiated. + # Does not return the access point ARN or access point alias if used. + # @return [String] + # + # @!attribute [rw] key_marker + # The key at or after which the listing began. + # @return [String] + # + # @!attribute [rw] upload_id_marker + # Upload ID after which listing began. + # @return [String] + # + # @!attribute [rw] next_key_marker + # When a list is truncated, this element specifies the value that + # should be used for the key-marker request parameter in a subsequent + # request. + # @return [String] + # + # @!attribute [rw] prefix + # When a prefix is provided in the request, this field contains the + # specified prefix. The result contains only keys starting with the + # specified prefix. + # @return [String] + # + # @!attribute [rw] delimiter + # Contains the delimiter you specified in the request. If you don't + # specify a delimiter in your request, this element is absent from the + # response. + # @return [String] + # + # @!attribute [rw] next_upload_id_marker + # When a list is truncated, this element specifies the value that + # should be used for the `upload-id-marker` request parameter in a + # subsequent request. + # @return [String] + # + # @!attribute [rw] max_uploads + # Maximum number of multipart uploads that could have been included in + # the response. + # @return [Integer] + # + # @!attribute [rw] is_truncated + # Indicates whether the returned list of multipart uploads is + # truncated. A value of true indicates that the list was truncated. + # The list can be truncated if the number of multipart uploads exceeds + # the limit allowed or specified by max uploads. + # @return [Boolean] + # + # @!attribute [rw] uploads + # Container for elements related to a particular multipart upload. A + # response can contain zero or more `Upload` elements. + # @return [Array] + # + # @!attribute [rw] common_prefixes + # If you specify a delimiter in the request, then the result returns + # each distinct key prefix containing the delimiter in a + # `CommonPrefixes` element. The distinct key prefixes are returned in + # the `Prefix` child element. + # @return [Array] + # + # @!attribute [rw] encoding_type + # Encoding type used by Amazon S3 to encode object keys in the + # response. + # + # If you specify `encoding-type` request parameter, Amazon S3 includes + # this element in the response, and returns encoded key name values in + # the following response elements: + # + # `Delimiter`, `KeyMarker`, `Prefix`, `NextKeyMarker`, `Key`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput AWS API Documentation + # + class ListMultipartUploadsOutput < Struct.new( + :bucket, + :key_marker, + :upload_id_marker, + :next_key_marker, + :prefix, + :delimiter, + :next_upload_id_marker, + :max_uploads, + :is_truncated, + :uploads, + :common_prefixes, + :encoding_type) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket to which the multipart upload was initiated. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] delimiter + # Character you use to group keys. + # + # All keys that contain the same string between the prefix, if + # specified, and the first occurrence of the delimiter after the + # prefix are grouped under a single result element, `CommonPrefixes`. + # If you don't specify the prefix parameter, then the substring + # starts at the beginning of the key. The keys that are grouped under + # `CommonPrefixes` result element are not returned elsewhere in the + # response. + # @return [String] + # + # @!attribute [rw] encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # @return [String] + # + # @!attribute [rw] key_marker + # Together with upload-id-marker, this parameter specifies the + # multipart upload after which listing should begin. + # + # If `upload-id-marker` is not specified, only the keys + # lexicographically greater than the specified `key-marker` will be + # included in the list. + # + # If `upload-id-marker` is specified, any multipart uploads for a key + # equal to the `key-marker` might also be included, provided those + # multipart uploads have upload IDs lexicographically greater than the + # specified `upload-id-marker`. + # @return [String] + # + # @!attribute [rw] max_uploads + # Sets the maximum number of multipart uploads, from 1 to 1,000, to + # return in the response body. 1,000 is the maximum number of uploads + # that can be returned in a response. + # @return [Integer] + # + # @!attribute [rw] prefix + # Lists in-progress uploads only for those keys that begin with the + # specified prefix. You can use prefixes to separate a bucket into + # different grouping of keys. (You can think of using prefix to make + # groups in the same way you'd use a folder in a file system.) + # @return [String] + # + # @!attribute [rw] upload_id_marker + # Together with key-marker, specifies the multipart upload after which + # listing should begin. If key-marker is not specified, the + # upload-id-marker parameter is ignored. Otherwise, any multipart + # uploads for a key equal to the key-marker might be included in the + # list only if they have an upload ID lexicographically greater than + # the specified `upload-id-marker`. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest AWS API Documentation + # + class ListMultipartUploadsRequest < Struct.new( + :bucket, + :delimiter, + :encoding_type, + :key_marker, + :max_uploads, + :prefix, + :upload_id_marker, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] is_truncated + # A flag that indicates whether Amazon S3 returned all of the results + # that satisfied the search criteria. If your results were truncated, + # you can make a follow-up paginated request using the NextKeyMarker + # and NextVersionIdMarker response parameters as a starting place in + # another request to return the rest of the results. + # @return [Boolean] + # + # @!attribute [rw] key_marker + # Marks the last key returned in a truncated response. + # @return [String] + # + # @!attribute [rw] version_id_marker + # Marks the last version of the key returned in a truncated response. + # @return [String] + # + # @!attribute [rw] next_key_marker + # When the number of responses exceeds the value of `MaxKeys`, + # `NextKeyMarker` specifies the first key not returned that satisfies + # the search criteria. Use this value for the key-marker request + # parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] next_version_id_marker + # When the number of responses exceeds the value of `MaxKeys`, + # `NextVersionIdMarker` specifies the first object version not + # returned that satisfies the search criteria. Use this value for the + # version-id-marker request parameter in a subsequent request. + # @return [String] + # + # @!attribute [rw] versions + # Container for version information. + # @return [Array] + # + # @!attribute [rw] delete_markers + # Container for an object that is a delete marker. + # @return [Array] + # + # @!attribute [rw] name + # The bucket name. + # @return [String] + # + # @!attribute [rw] prefix + # Selects objects that start with the value supplied by this + # parameter. + # @return [String] + # + # @!attribute [rw] delimiter + # The delimiter grouping the included keys. A delimiter is a character + # that you specify to group keys. All keys that contain the same + # string between the prefix and the first occurrence of the delimiter + # are grouped under a single result element in `CommonPrefixes`. These + # groups are counted as one result against the max-keys limitation. + # These keys are not returned elsewhere in the response. + # @return [String] + # + # @!attribute [rw] max_keys + # Specifies the maximum number of objects to return. + # @return [Integer] + # + # @!attribute [rw] common_prefixes + # All of the keys rolled up into a common prefix count as a single + # return when calculating the number of returns. + # @return [Array] + # + # @!attribute [rw] encoding_type + # Encoding type used by Amazon S3 to encode object key names in the + # XML response. + # + # If you specify encoding-type request parameter, Amazon S3 includes + # this element in the response, and returns encoded key name values in + # the following response elements: + # + # `KeyMarker, NextKeyMarker, Prefix, Key`, and `Delimiter`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput AWS API Documentation + # + class ListObjectVersionsOutput < Struct.new( + :is_truncated, + :key_marker, + :version_id_marker, + :next_key_marker, + :next_version_id_marker, + :versions, + :delete_markers, + :name, + :prefix, + :delimiter, + :max_keys, + :common_prefixes, + :encoding_type) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name that contains the objects. + # @return [String] + # + # @!attribute [rw] delimiter + # A delimiter is a character that you specify to group keys. All keys + # that contain the same string between the `prefix` and the first + # occurrence of the delimiter are grouped under a single result + # element in CommonPrefixes. These groups are counted as one result + # against the max-keys limitation. These keys are not returned + # elsewhere in the response. + # @return [String] + # + # @!attribute [rw] encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # @return [String] + # + # @!attribute [rw] key_marker + # Specifies the key to start with when listing objects in a bucket. + # @return [String] + # + # @!attribute [rw] max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. If additional keys satisfy + # the search criteria, but were not returned because max-keys was + # exceeded, the response contains + # <isTruncated>true</isTruncated>. To return the + # additional keys, see key-marker and version-id-marker. + # @return [Integer] + # + # @!attribute [rw] prefix + # Use this parameter to select only those keys that begin with the + # specified prefix. You can use prefixes to separate a bucket into + # different groupings of keys. (You can think of using prefix to make + # groups in the same way you'd use a folder in a file system.) You + # can use prefix with delimiter to roll up numerous objects into a + # single result under CommonPrefixes. + # @return [String] + # + # @!attribute [rw] version_id_marker + # Specifies the object version you want to start listing from. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest AWS API Documentation + # + class ListObjectVersionsRequest < Struct.new( + :bucket, + :delimiter, + :encoding_type, + :key_marker, + :max_keys, + :prefix, + :version_id_marker, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] is_truncated + # A flag that indicates whether Amazon S3 returned all of the results + # that satisfied the search criteria. + # @return [Boolean] + # + # @!attribute [rw] marker + # Indicates where in the bucket listing begins. Marker is included in + # the response if it was sent with the request. + # @return [String] + # + # @!attribute [rw] next_marker + # When response is truncated (the IsTruncated element value in the + # response is true), you can use the key name in this field as marker + # in the subsequent request to get next set of objects. Amazon S3 + # lists objects in alphabetical order Note: This element is returned + # only if you have delimiter request parameter specified. If response + # does not include the NextMarker and it is truncated, you can use the + # value of the last Key in the response as the marker in the + # subsequent request to get the next set of object keys. + # @return [String] + # + # @!attribute [rw] contents + # Metadata about each object returned. + # @return [Array] + # + # @!attribute [rw] name + # The bucket name. + # @return [String] + # + # @!attribute [rw] prefix + # Keys that begin with the indicated prefix. + # @return [String] + # + # @!attribute [rw] delimiter + # Causes keys that contain the same string between the prefix and the + # first occurrence of the delimiter to be rolled up into a single + # result element in the `CommonPrefixes` collection. These rolled-up + # keys are not returned elsewhere in the response. Each rolled-up + # result counts as only one return against the `MaxKeys` value. + # @return [String] + # + # @!attribute [rw] max_keys + # The maximum number of keys returned in the response body. + # @return [Integer] + # + # @!attribute [rw] common_prefixes + # All of the keys (up to 1,000) rolled up in a common prefix count as + # a single return when calculating the number of returns. + # + # A response can contain CommonPrefixes only if you specify a + # delimiter. + # + # CommonPrefixes contains all (if there are any) keys between Prefix + # and the next occurrence of the string specified by the delimiter. + # + # CommonPrefixes lists keys that act like subdirectories in the + # directory specified by Prefix. + # + # For example, if the prefix is notes/ and the delimiter is a slash + # (/) as in notes/summer/july, the common prefix is notes/summer/. All + # of the keys that roll up into a common prefix count as a single + # return when calculating the number of returns. + # @return [Array] + # + # @!attribute [rw] encoding_type + # Encoding type used by Amazon S3 to encode object keys in the + # response. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput AWS API Documentation + # + class ListObjectsOutput < Struct.new( + :is_truncated, + :marker, + :next_marker, + :contents, + :name, + :prefix, + :delimiter, + :max_keys, + :common_prefixes, + :encoding_type) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket containing the objects. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] delimiter + # A delimiter is a character you use to group keys. + # @return [String] + # + # @!attribute [rw] encoding_type + # Requests Amazon S3 to encode the object keys in the response and + # specifies the encoding method to use. An object key may contain any + # Unicode character; however, XML 1.0 parser cannot parse some + # characters, such as characters with an ASCII value from 0 to 10. For + # characters that are not supported in XML 1.0, you can add this + # parameter to request that Amazon S3 encode the keys in the response. + # @return [String] + # + # @!attribute [rw] marker + # Marker is where you want Amazon S3 to start listing from. Amazon S3 + # starts listing after this specified key. Marker can be any key in + # the bucket. + # @return [String] + # + # @!attribute [rw] max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. + # @return [Integer] + # + # @!attribute [rw] prefix + # Limits the response to keys that begin with the specified prefix. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that she or he will be charged for + # the list objects request. Bucket owners need not specify this + # parameter in their requests. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest AWS API Documentation + # + class ListObjectsRequest < Struct.new( + :bucket, + :delimiter, + :encoding_type, + :marker, + :max_keys, + :prefix, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] is_truncated + # Set to false if all of the results were returned. Set to true if + # more keys are available to return. If the number of results exceeds + # that specified by MaxKeys, all of the results might not be returned. + # @return [Boolean] + # + # @!attribute [rw] contents + # Metadata about each object returned. + # @return [Array] + # + # @!attribute [rw] name + # The bucket name. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] prefix + # Keys that begin with the indicated prefix. + # @return [String] + # + # @!attribute [rw] delimiter + # Causes keys that contain the same string between the prefix and the + # first occurrence of the delimiter to be rolled up into a single + # result element in the CommonPrefixes collection. These rolled-up + # keys are not returned elsewhere in the response. Each rolled-up + # result counts as only one return against the `MaxKeys` value. + # @return [String] + # + # @!attribute [rw] max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. + # @return [Integer] + # + # @!attribute [rw] common_prefixes + # All of the keys (up to 1,000) rolled up into a common prefix count + # as a single return when calculating the number of returns. + # + # A response can contain `CommonPrefixes` only if you specify a + # delimiter. + # + # `CommonPrefixes` contains all (if there are any) keys between + # `Prefix` and the next occurrence of the string specified by a + # delimiter. + # + # `CommonPrefixes` lists keys that act like subdirectories in the + # directory specified by `Prefix`. + # + # For example, if the prefix is `notes/` and the delimiter is a slash + # (`/`) as in `notes/summer/july`, the common prefix is + # `notes/summer/`. All of the keys that roll up into a common prefix + # count as a single return when calculating the number of returns. + # @return [Array] + # + # @!attribute [rw] encoding_type + # Encoding type used by Amazon S3 to encode object key names in the + # XML response. + # + # If you specify the encoding-type request parameter, Amazon S3 + # includes this element in the response, and returns encoded key name + # values in the following response elements: + # + # `Delimiter, Prefix, Key,` and `StartAfter`. + # @return [String] + # + # @!attribute [rw] key_count + # KeyCount is the number of keys returned with this request. KeyCount + # will always be less than or equals to MaxKeys field. Say you ask for + # 50 keys, your result will include less than equals 50 keys + # @return [Integer] + # + # @!attribute [rw] continuation_token + # If ContinuationToken was sent with the request, it is included in + # the response. + # @return [String] + # + # @!attribute [rw] next_continuation_token + # `NextContinuationToken` is sent when `isTruncated` is true, which + # means there are more keys in the bucket that can be listed. The next + # list requests to Amazon S3 can be continued with this + # `NextContinuationToken`. `NextContinuationToken` is obfuscated and + # is not a real key + # @return [String] + # + # @!attribute [rw] start_after + # If StartAfter was sent with the request, it is included in the + # response. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output AWS API Documentation + # + class ListObjectsV2Output < Struct.new( + :is_truncated, + :contents, + :name, + :prefix, + :delimiter, + :max_keys, + :common_prefixes, + :encoding_type, + :key_count, + :continuation_token, + :next_continuation_token, + :start_after) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Bucket name to list. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] delimiter + # A delimiter is a character you use to group keys. + # @return [String] + # + # @!attribute [rw] encoding_type + # Encoding type used by Amazon S3 to encode object keys in the + # response. + # @return [String] + # + # @!attribute [rw] max_keys + # Sets the maximum number of keys returned in the response. By default + # the action returns up to 1,000 key names. The response might contain + # fewer keys but will never contain more. + # @return [Integer] + # + # @!attribute [rw] prefix + # Limits the response to keys that begin with the specified prefix. + # @return [String] + # + # @!attribute [rw] continuation_token + # ContinuationToken indicates Amazon S3 that the list is being + # continued on this bucket with a token. ContinuationToken is + # obfuscated and is not a real key. + # @return [String] + # + # @!attribute [rw] fetch_owner + # The owner field is not present in listV2 by default, if you want to + # return owner field with each key in the result then set the fetch + # owner field to true. + # @return [Boolean] + # + # @!attribute [rw] start_after + # StartAfter is where you want Amazon S3 to start listing from. Amazon + # S3 starts listing after this specified key. StartAfter can be any + # key in the bucket. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that she or he will be charged for + # the list objects request in V2 style. Bucket owners need not specify + # this parameter in their requests. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request AWS API Documentation + # + class ListObjectsV2Request < Struct.new( + :bucket, + :delimiter, + :encoding_type, + :max_keys, + :prefix, + :continuation_token, + :fetch_owner, + :start_after, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] abort_date + # If the bucket has a lifecycle rule configured with an action to + # abort incomplete multipart uploads and the prefix in the lifecycle + # rule matches the object name in the request, then the response + # includes this header indicating when the initiated multipart upload + # will become eligible for abort operation. For more information, see + # [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle + # Policy][1]. + # + # The response will also include the `x-amz-abort-rule-id` header that + # will provide the ID of the lifecycle configuration rule that defines + # this action. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + # @return [Time] + # + # @!attribute [rw] abort_rule_id + # This header is returned along with the `x-amz-abort-date` header. It + # identifies applicable lifecycle configuration rule that defines the + # action to abort incomplete multipart uploads. + # @return [String] + # + # @!attribute [rw] bucket + # The name of the bucket to which the multipart upload was initiated. + # Does not return the access point ARN or access point alias if used. + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] upload_id + # Upload ID identifying the multipart upload whose parts are being + # listed. + # @return [String] + # + # @!attribute [rw] part_number_marker + # When a list is truncated, this element specifies the last part in + # the list, as well as the value to use for the part-number-marker + # request parameter in a subsequent request. + # @return [Integer] + # + # @!attribute [rw] next_part_number_marker + # When a list is truncated, this element specifies the last part in + # the list, as well as the value to use for the part-number-marker + # request parameter in a subsequent request. + # @return [Integer] + # + # @!attribute [rw] max_parts + # Maximum number of parts that were allowed in the response. + # @return [Integer] + # + # @!attribute [rw] is_truncated + # Indicates whether the returned list of parts is truncated. A true + # value indicates that the list was truncated. A list can be truncated + # if the number of parts exceeds the limit returned in the MaxParts + # element. + # @return [Boolean] + # + # @!attribute [rw] parts + # Container for elements related to a particular part. A response can + # contain zero or more `Part` elements. + # @return [Array] + # + # @!attribute [rw] initiator + # Container element that identifies who initiated the multipart + # upload. If the initiator is an Amazon Web Services account, this + # element provides the same information as the `Owner` element. If the + # initiator is an IAM User, this element provides the user ARN and + # display name. + # @return [Types::Initiator] + # + # @!attribute [rw] owner + # Container element that identifies the object owner, after the object + # is created. If multipart upload is initiated by an IAM user, this + # element provides the parent account ID and display name. + # @return [Types::Owner] + # + # @!attribute [rw] storage_class + # Class of storage (STANDARD or REDUCED\_REDUNDANCY) used to store the + # uploaded object. + # @return [String] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # The algorithm that was used to create a checksum of the object. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput AWS API Documentation + # + class ListPartsOutput < Struct.new( + :abort_date, + :abort_rule_id, + :bucket, + :key, + :upload_id, + :part_number_marker, + :next_part_number_marker, + :max_parts, + :is_truncated, + :parts, + :initiator, + :owner, + :storage_class, + :request_charged, + :checksum_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket to which the parts are being uploaded. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] max_parts + # Sets the maximum number of parts to return. + # @return [Integer] + # + # @!attribute [rw] part_number_marker + # Specifies the part after which listing should begin. Only parts with + # higher part numbers will be listed. + # @return [Integer] + # + # @!attribute [rw] upload_id + # Upload ID identifying the multipart upload whose parts are being + # listed. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the + # object. This parameter is needed only when the object was created + # using a checksum algorithm. For more information, see [Protecting + # data using SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] sse_customer_key + # The server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest AWS API Documentation + # + class ListPartsRequest < Struct.new( + :bucket, + :key, + :max_parts, + :part_number_marker, + :upload_id, + :request_payer, + :expected_bucket_owner, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # Describes where logs are stored and the prefix that Amazon S3 assigns + # to all log object keys for a bucket. For more information, see [PUT + # Bucket logging][1] in the *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html + # + # @!attribute [rw] target_bucket + # Specifies the bucket where you want Amazon S3 to store server access + # logs. You can have your logs delivered to any bucket that you own, + # including the same bucket that is being logged. You can also + # configure multiple buckets to deliver their logs to the same target + # bucket. In this case, you should choose a different `TargetPrefix` + # for each source bucket so that the delivered log files can be + # distinguished by key. + # @return [String] + # + # @!attribute [rw] target_grants + # Container for granting information. + # + # Buckets that use the bucket owner enforced setting for Object + # Ownership don't support target grants. For more information, see + # [Permissions for server access log delivery][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general + # @return [Array] + # + # @!attribute [rw] target_prefix + # A prefix for all log object keys. If you store log files from + # multiple Amazon S3 buckets in a single bucket, you can use a prefix + # to distinguish which log files came from which bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled AWS API Documentation + # + class LoggingEnabled < Struct.new( + :target_bucket, + :target_grants, + :target_prefix) + SENSITIVE = [] + include Aws::Structure + end + + # A metadata key-value pair to store with an object. + # + # @!attribute [rw] name + # Name of the Object. + # @return [String] + # + # @!attribute [rw] value + # Value of the Object. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetadataEntry AWS API Documentation + # + class MetadataEntry < Struct.new( + :name, + :value) + SENSITIVE = [] + include Aws::Structure + end + + # A container specifying replication metrics-related settings enabling + # replication metrics and events. + # + # @!attribute [rw] status + # Specifies whether the replication metrics are enabled. + # @return [String] + # + # @!attribute [rw] event_threshold + # A container specifying the time threshold for emitting the + # `s3:Replication:OperationMissedThreshold` event. + # @return [Types::ReplicationTimeValue] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Metrics AWS API Documentation + # + class Metrics < Struct.new( + :status, + :event_threshold) + SENSITIVE = [] + include Aws::Structure + end + + # A conjunction (logical AND) of predicates, which is used in evaluating + # a metrics filter. The operator must have at least two predicates, and + # an object must match all of the predicates in order for the filter to + # apply. + # + # @!attribute [rw] prefix + # The prefix used when evaluating an AND predicate. + # @return [String] + # + # @!attribute [rw] tags + # The list of tags used when evaluating an AND predicate. + # @return [Array] + # + # @!attribute [rw] access_point_arn + # The access point ARN used when evaluating an `AND` predicate. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator AWS API Documentation + # + class MetricsAndOperator < Struct.new( + :prefix, + :tags, + :access_point_arn) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a metrics configuration for the CloudWatch request metrics + # (specified by the metrics configuration ID) from an Amazon S3 bucket. + # If you're updating an existing metrics configuration, note that this + # is a full replacement of the existing metrics configuration. If you + # don't include the elements you want to keep, they are erased. For + # more information, see [PutBucketMetricsConfiguration][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html + # + # @!attribute [rw] id + # The ID used to identify the metrics configuration. + # @return [String] + # + # @!attribute [rw] filter + # Specifies a metrics configuration filter. The metrics configuration + # will only include objects that meet the filter's criteria. A filter + # must be a prefix, an object tag, an access point ARN, or a + # conjunction (MetricsAndOperator). + # @return [Types::MetricsFilter] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration AWS API Documentation + # + class MetricsConfiguration < Struct.new( + :id, + :filter) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a metrics configuration filter. The metrics configuration + # only includes objects that meet the filter's criteria. A filter must + # be a prefix, an object tag, an access point ARN, or a conjunction + # (MetricsAndOperator). For more information, see + # [PutBucketMetricsConfiguration][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html + # + # @!attribute [rw] prefix + # The prefix used when evaluating a metrics filter. + # @return [String] + # + # @!attribute [rw] tag + # The tag used when evaluating a metrics filter. + # @return [Types::Tag] + # + # @!attribute [rw] access_point_arn + # The access point ARN used when evaluating a metrics filter. + # @return [String] + # + # @!attribute [rw] and + # A conjunction (logical AND) of predicates, which is used in + # evaluating a metrics filter. The operator must have at least two + # predicates, and an object must match all of the predicates in order + # for the filter to apply. + # @return [Types::MetricsAndOperator] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter AWS API Documentation + # + class MetricsFilter < Struct.new( + :prefix, + :tag, + :access_point_arn, + :and) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the `MultipartUpload` for the Amazon S3 object. + # + # @!attribute [rw] upload_id + # Upload ID that identifies the multipart upload. + # @return [String] + # + # @!attribute [rw] key + # Key of the object for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] initiated + # Date and time at which the multipart upload was initiated. + # @return [Time] + # + # @!attribute [rw] storage_class + # The class of storage used to store the object. + # @return [String] + # + # @!attribute [rw] owner + # Specifies the owner of the object that is part of the multipart + # upload. + # @return [Types::Owner] + # + # @!attribute [rw] initiator + # Identifies who initiated the multipart upload. + # @return [Types::Initiator] + # + # @!attribute [rw] checksum_algorithm + # The algorithm that was used to create a checksum of the object. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload AWS API Documentation + # + class MultipartUpload < Struct.new( + :upload_id, + :key, + :initiated, + :storage_class, + :owner, + :initiator, + :checksum_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # The specified bucket does not exist. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoSuchBucket AWS API Documentation + # + class NoSuchBucket < Aws::EmptyStructure; end + + # The specified key does not exist. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoSuchKey AWS API Documentation + # + class NoSuchKey < Aws::EmptyStructure; end + + # The specified multipart upload does not exist. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoSuchUpload AWS API Documentation + # + class NoSuchUpload < Aws::EmptyStructure; end + + # Specifies when noncurrent object versions expire. Upon expiration, + # Amazon S3 permanently deletes the noncurrent object versions. You set + # this lifecycle configuration action on a bucket that has versioning + # enabled (or suspended) to request that Amazon S3 delete noncurrent + # object versions at a specific period in the object's lifetime. + # + # @!attribute [rw] noncurrent_days + # Specifies the number of days an object is noncurrent before Amazon + # S3 can perform the associated action. The value must be a non-zero + # positive integer. For information about the noncurrent days + # calculations, see [How Amazon S3 Calculates When an Object Became + # Noncurrent][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations + # @return [Integer] + # + # @!attribute [rw] newer_noncurrent_versions + # Specifies how many noncurrent versions Amazon S3 will retain. If + # there are this many more recent noncurrent versions, Amazon S3 will + # take the associated action. For more information about noncurrent + # versions, see [Lifecycle configuration elements][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration AWS API Documentation + # + class NoncurrentVersionExpiration < Struct.new( + :noncurrent_days, + :newer_noncurrent_versions) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the transition rule that describes when noncurrent + # objects transition to the `STANDARD_IA`, `ONEZONE_IA`, + # `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or `DEEP_ARCHIVE` + # storage class. If your bucket is versioning-enabled (or versioning is + # suspended), you can set this action to request that Amazon S3 + # transition noncurrent object versions to the `STANDARD_IA`, + # `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or + # `DEEP_ARCHIVE` storage class at a specific period in the object's + # lifetime. + # + # @!attribute [rw] noncurrent_days + # Specifies the number of days an object is noncurrent before Amazon + # S3 can perform the associated action. For information about the + # noncurrent days calculations, see [How Amazon S3 Calculates How Long + # an Object Has Been Noncurrent][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations + # @return [Integer] + # + # @!attribute [rw] storage_class + # The class of storage used to store the object. + # @return [String] + # + # @!attribute [rw] newer_noncurrent_versions + # Specifies how many noncurrent versions Amazon S3 will retain. If + # there are this many more recent noncurrent versions, Amazon S3 will + # take the associated action. For more information about noncurrent + # versions, see [Lifecycle configuration elements][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition AWS API Documentation + # + class NoncurrentVersionTransition < Struct.new( + :noncurrent_days, + :storage_class, + :newer_noncurrent_versions) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying the notification configuration of the + # bucket. If this element is empty, notifications are turned off for the + # bucket. + # + # @!attribute [rw] topic_configurations + # The topic to which notifications are sent and the events for which + # notifications are generated. + # @return [Array] + # + # @!attribute [rw] queue_configurations + # The Amazon Simple Queue Service queues to publish messages to and + # the events for which to publish messages. + # @return [Array] + # + # @!attribute [rw] lambda_function_configurations + # Describes the Lambda functions to invoke and the events for which to + # invoke them. + # @return [Array] + # + # @!attribute [rw] event_bridge_configuration + # Enables delivery of events to Amazon EventBridge. + # @return [Types::EventBridgeConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration AWS API Documentation + # + class NotificationConfiguration < Struct.new( + :topic_configurations, + :queue_configurations, + :lambda_function_configurations, + :event_bridge_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] topic_configuration + # This data type is deprecated. A container for specifying the + # configuration for publication of messages to an Amazon Simple + # Notification Service (Amazon SNS) topic when Amazon S3 detects + # specified events. + # @return [Types::TopicConfigurationDeprecated] + # + # @!attribute [rw] queue_configuration + # This data type is deprecated. This data type specifies the + # configuration for publishing messages to an Amazon Simple Queue + # Service (Amazon SQS) queue when Amazon S3 detects specified events. + # @return [Types::QueueConfigurationDeprecated] + # + # @!attribute [rw] cloud_function_configuration + # Container for specifying the Lambda notification configuration. + # @return [Types::CloudFunctionConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated AWS API Documentation + # + class NotificationConfigurationDeprecated < Struct.new( + :topic_configuration, + :queue_configuration, + :cloud_function_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies object key name filtering rules. For information about key + # name filtering, see [Configuring Event Notifications][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # + # @!attribute [rw] key + # A container for object key name prefix and suffix filtering rules. + # @return [Types::S3KeyFilter] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter AWS API Documentation + # + class NotificationConfigurationFilter < Struct.new( + :key) + SENSITIVE = [] + include Aws::Structure + end + + # An object consists of data and its descriptive metadata. + # + # @!attribute [rw] key + # The name that you assign to an object. You use the object key to + # retrieve the object. + # @return [String] + # + # @!attribute [rw] last_modified + # Creation date of the object. + # @return [Time] + # + # @!attribute [rw] etag + # The entity tag is a hash of the object. The ETag reflects changes + # only to the contents of an object, not its metadata. The ETag may or + # may not be an MD5 digest of the object data. Whether or not it is + # depends on how the object was created and how it is encrypted as + # described below: + # + # * Objects created by the PUT Object, POST Object, or Copy operation, + # or through the Amazon Web Services Management Console, and are + # encrypted by SSE-S3 or plaintext, have ETags that are an MD5 + # digest of their object data. + # + # * Objects created by the PUT Object, POST Object, or Copy operation, + # or through the Amazon Web Services Management Console, and are + # encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 + # digest of their object data. + # + # * If an object is created by either the Multipart Upload or Part + # Copy operation, the ETag is not an MD5 digest, regardless of the + # method of encryption. If an object is larger than 16 MB, the + # Amazon Web Services Management Console will upload or copy that + # object as a Multipart Upload, and therefore the ETag will not be + # an MD5 digest. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # The algorithm that was used to create a checksum of the object. + # @return [Array] + # + # @!attribute [rw] size + # Size in bytes of the object + # @return [Integer] + # + # @!attribute [rw] storage_class + # The class of storage used to store the object. + # @return [String] + # + # @!attribute [rw] owner + # The owner of the object + # @return [Types::Owner] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object AWS API Documentation + # + class Object < Struct.new( + :key, + :last_modified, + :etag, + :checksum_algorithm, + :size, + :storage_class, + :owner) + SENSITIVE = [] + include Aws::Structure + end + + # This action is not allowed against this storage tier. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectAlreadyInActiveTierError AWS API Documentation + # + class ObjectAlreadyInActiveTierError < Aws::EmptyStructure; end + + # Object Identifier is unique value to identify objects. + # + # @!attribute [rw] key + # Key name of the object. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] version_id + # VersionId for the specific version of the object to delete. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier AWS API Documentation + # + class ObjectIdentifier < Struct.new( + :key, + :version_id) + SENSITIVE = [] + include Aws::Structure + end + + # The container element for Object Lock configuration parameters. + # + # @!attribute [rw] object_lock_enabled + # Indicates whether this bucket has an Object Lock configuration + # enabled. Enable `ObjectLockEnabled` when you apply + # `ObjectLockConfiguration` to a bucket. + # @return [String] + # + # @!attribute [rw] rule + # Specifies the Object Lock rule for the specified object. Enable the + # this rule when you apply `ObjectLockConfiguration` to a bucket. + # Bucket settings require both a mode and a period. The period can be + # either `Days` or `Years` but you must select one. You cannot specify + # `Days` and `Years` at the same time. + # @return [Types::ObjectLockRule] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockConfiguration AWS API Documentation + # + class ObjectLockConfiguration < Struct.new( + :object_lock_enabled, + :rule) + SENSITIVE = [] + include Aws::Structure + end + + # A legal hold configuration for an object. + # + # @!attribute [rw] status + # Indicates whether the specified object has a legal hold in place. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockLegalHold AWS API Documentation + # + class ObjectLockLegalHold < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # A Retention configuration for an object. + # + # @!attribute [rw] mode + # Indicates the Retention mode for the specified object. + # @return [String] + # + # @!attribute [rw] retain_until_date + # The date on which this Object Lock Retention will expire. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockRetention AWS API Documentation + # + class ObjectLockRetention < Struct.new( + :mode, + :retain_until_date) + SENSITIVE = [] + include Aws::Structure + end + + # The container element for an Object Lock rule. + # + # @!attribute [rw] default_retention + # The default Object Lock retention mode and period that you want to + # apply to new objects placed in the specified bucket. Bucket settings + # require both a mode and a period. The period can be either `Days` or + # `Years` but you must select one. You cannot specify `Days` and + # `Years` at the same time. + # @return [Types::DefaultRetention] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockRule AWS API Documentation + # + class ObjectLockRule < Struct.new( + :default_retention) + SENSITIVE = [] + include Aws::Structure + end + + # The source object of the COPY action is not in the active tier and is + # only stored in Amazon S3 Glacier. + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectNotInActiveTierError AWS API Documentation + # + class ObjectNotInActiveTierError < Aws::EmptyStructure; end + + # A container for elements related to an individual part. + # + # @!attribute [rw] part_number + # The part number identifying the part. This value is a positive + # integer between 1 and 10,000. + # @return [Integer] + # + # @!attribute [rw] size + # The size of the uploaded part in bytes. + # @return [Integer] + # + # @!attribute [rw] checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectPart AWS API Documentation + # + class ObjectPart < Struct.new( + :part_number, + :size, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256) + SENSITIVE = [] + include Aws::Structure + end + + # The version of an object. + # + # @!attribute [rw] etag + # The entity tag is an MD5 hash of that version of the object. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # The algorithm that was used to create a checksum of the object. + # @return [Array] + # + # @!attribute [rw] size + # Size in bytes of the object. + # @return [Integer] + # + # @!attribute [rw] storage_class + # The class of storage used to store the object. + # @return [String] + # + # @!attribute [rw] key + # The object key. + # @return [String] + # + # @!attribute [rw] version_id + # Version ID of an object. + # @return [String] + # + # @!attribute [rw] is_latest + # Specifies whether the object is (true) or is not (false) the latest + # version of an object. + # @return [Boolean] + # + # @!attribute [rw] last_modified + # Date and time the object was last modified. + # @return [Time] + # + # @!attribute [rw] owner + # Specifies the owner of the object. + # @return [Types::Owner] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion AWS API Documentation + # + class ObjectVersion < Struct.new( + :etag, + :checksum_algorithm, + :size, + :storage_class, + :key, + :version_id, + :is_latest, + :last_modified, + :owner) + SENSITIVE = [] + include Aws::Structure + end + + # Describes the location where the restore job's output is stored. + # + # @!attribute [rw] s3 + # Describes an S3 location that will receive the results of the + # restore request. + # @return [Types::S3Location] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputLocation AWS API Documentation + # + class OutputLocation < Struct.new( + :s3) + SENSITIVE = [] + include Aws::Structure + end + + # Describes how results of the Select job are serialized. + # + # @!attribute [rw] csv + # Describes the serialization of CSV-encoded Select results. + # @return [Types::CSVOutput] + # + # @!attribute [rw] json + # Specifies JSON as request's output serialization format. + # @return [Types::JSONOutput] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputSerialization AWS API Documentation + # + class OutputSerialization < Struct.new( + :csv, + :json) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the owner's display name and ID. + # + # @!attribute [rw] display_name + # Container for the display name of the owner. + # @return [String] + # + # @!attribute [rw] id + # Container for the ID of the owner. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner AWS API Documentation + # + class Owner < Struct.new( + :display_name, + :id) + SENSITIVE = [] + include Aws::Structure + end + + # The container element for a bucket's ownership controls. + # + # @!attribute [rw] rules + # The container element for an ownership control rule. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OwnershipControls AWS API Documentation + # + class OwnershipControls < Struct.new( + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # The container element for an ownership control rule. + # + # @!attribute [rw] object_ownership + # The container element for object ownership for a bucket's ownership + # controls. + # + # BucketOwnerPreferred - Objects uploaded to the bucket change + # ownership to the bucket owner if the objects are uploaded with the + # `bucket-owner-full-control` canned ACL. + # + # ObjectWriter - The uploading account will own the object if the + # object is uploaded with the `bucket-owner-full-control` canned ACL. + # + # BucketOwnerEnforced - Access control lists (ACLs) are disabled and + # no longer affect permissions. The bucket owner automatically owns + # and has full control over every object in the bucket. The bucket + # only accepts PUT requests that don't specify an ACL or bucket owner + # full control ACLs, such as the `bucket-owner-full-control` canned + # ACL or an equivalent form of this ACL expressed in the XML format. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OwnershipControlsRule AWS API Documentation + # + class OwnershipControlsRule < Struct.new( + :object_ownership) + SENSITIVE = [] + include Aws::Structure + end + + # Container for Parquet. + # + # @api private + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ParquetInput AWS API Documentation + # + class ParquetInput < Aws::EmptyStructure; end + + # Container for elements related to a part. + # + # @!attribute [rw] part_number + # Part number identifying the part. This is a positive integer between + # 1 and 10,000. + # @return [Integer] + # + # @!attribute [rw] last_modified + # Date and time at which the part was uploaded. + # @return [Time] + # + # @!attribute [rw] etag + # Entity tag returned when the part was uploaded. + # @return [String] + # + # @!attribute [rw] size + # Size in bytes of the uploaded part data. + # @return [Integer] + # + # @!attribute [rw] checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part AWS API Documentation + # + class Part < Struct.new( + :part_number, + :last_modified, + :etag, + :size, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256) + SENSITIVE = [] + include Aws::Structure + end + + # The container element for a bucket's policy status. + # + # @!attribute [rw] is_public + # The policy status for this bucket. `TRUE` indicates that this bucket + # is public. `FALSE` indicates that the bucket is not public. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PolicyStatus AWS API Documentation + # + class PolicyStatus < Struct.new( + :is_public) + SENSITIVE = [] + include Aws::Structure + end + + # This data type contains information about progress of an operation. + # + # @!attribute [rw] bytes_scanned + # The current number of object bytes scanned. + # @return [Integer] + # + # @!attribute [rw] bytes_processed + # The current number of uncompressed object bytes processed. + # @return [Integer] + # + # @!attribute [rw] bytes_returned + # The current number of bytes of records payload data returned. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Progress AWS API Documentation + # + class Progress < Struct.new( + :bytes_scanned, + :bytes_processed, + :bytes_returned) + SENSITIVE = [] + include Aws::Structure + end + + # This data type contains information about the progress event of an + # operation. + # + # @!attribute [rw] details + # The Progress event details. + # @return [Types::Progress] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ProgressEvent AWS API Documentation + # + class ProgressEvent < Struct.new( + :details, + :event_type) + SENSITIVE = [] + include Aws::Structure + end + + # The PublicAccessBlock configuration that you want to apply to this + # Amazon S3 bucket. You can enable the configuration options in any + # combination. For more information about when Amazon S3 considers a + # bucket or object public, see [The Meaning of "Public"][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + # + # @!attribute [rw] block_public_acls + # Specifies whether Amazon S3 should block public access control lists + # (ACLs) for this bucket and objects in this bucket. Setting this + # element to `TRUE` causes the following behavior: + # + # * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL + # is public. + # + # * PUT Object calls fail if the request includes a public ACL. + # + # * PUT Bucket calls fail if the request includes a public ACL. + # + # Enabling this setting doesn't affect existing policies or ACLs. + # @return [Boolean] + # + # @!attribute [rw] ignore_public_acls + # Specifies whether Amazon S3 should ignore public ACLs for this + # bucket and objects in this bucket. Setting this element to `TRUE` + # causes Amazon S3 to ignore all public ACLs on this bucket and + # objects in this bucket. + # + # Enabling this setting doesn't affect the persistence of any + # existing ACLs and doesn't prevent new public ACLs from being set. + # @return [Boolean] + # + # @!attribute [rw] block_public_policy + # Specifies whether Amazon S3 should block public bucket policies for + # this bucket. Setting this element to `TRUE` causes Amazon S3 to + # reject calls to PUT Bucket policy if the specified bucket policy + # allows public access. + # + # Enabling this setting doesn't affect existing bucket policies. + # @return [Boolean] + # + # @!attribute [rw] restrict_public_buckets + # Specifies whether Amazon S3 should restrict public bucket policies + # for this bucket. Setting this element to `TRUE` restricts access to + # this bucket to only Amazon Web Service principals and authorized + # users within this account if the bucket has a public policy. + # + # Enabling this setting doesn't affect previously stored bucket + # policies, except that public and cross-account access within any + # public bucket policy, including non-public delegation to specific + # accounts, is blocked. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PublicAccessBlockConfiguration AWS API Documentation + # + class PublicAccessBlockConfiguration < Struct.new( + :block_public_acls, + :ignore_public_acls, + :block_public_policy, + :restrict_public_buckets) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which the accelerate configuration is + # set. + # @return [String] + # + # @!attribute [rw] accelerate_configuration + # Container for setting the transfer acceleration state. + # @return [Types::AccelerateConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest AWS API Documentation + # + class PutBucketAccelerateConfigurationRequest < Struct.new( + :bucket, + :accelerate_configuration, + :expected_bucket_owner, + :checksum_algorithm) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] acl + # The canned ACL to apply to the bucket. + # @return [String] + # + # @!attribute [rw] access_control_policy + # Contains the elements that set the ACL permissions for an object per + # grantee. + # @return [Types::AccessControlPolicy] + # + # @!attribute [rw] bucket + # The bucket to which to apply the ACL. + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must + # be used as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, go to [RFC + # 1864.][1] + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions + # on the bucket. + # @return [String] + # + # @!attribute [rw] grant_read + # Allows grantee to list the objects in the bucket. + # @return [String] + # + # @!attribute [rw] grant_read_acp + # Allows grantee to read the bucket ACL. + # @return [String] + # + # @!attribute [rw] grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @return [String] + # + # @!attribute [rw] grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest AWS API Documentation + # + class PutBucketAclRequest < Struct.new( + :acl, + :access_control_policy, + :bucket, + :content_md5, + :checksum_algorithm, + :grant_full_control, + :grant_read, + :grant_read_acp, + :grant_write, + :grant_write_acp, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket to which an analytics configuration is + # stored. + # @return [String] + # + # @!attribute [rw] id + # The ID that identifies the analytics configuration. + # @return [String] + # + # @!attribute [rw] analytics_configuration + # The configuration and any analyses for the analytics filter. + # @return [Types::AnalyticsConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest AWS API Documentation + # + class PutBucketAnalyticsConfigurationRequest < Struct.new( + :bucket, + :id, + :analytics_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Specifies the bucket impacted by the `cors`configuration. + # @return [String] + # + # @!attribute [rw] cors_configuration + # Describes the cross-origin access configuration for objects in an + # Amazon S3 bucket. For more information, see [Enabling Cross-Origin + # Resource Sharing][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html + # @return [Types::CORSConfiguration] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must + # be used as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, go to [RFC + # 1864.][1] + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest AWS API Documentation + # + class PutBucketCorsRequest < Struct.new( + :bucket, + :cors_configuration, + :content_md5, + :checksum_algorithm, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # Specifies default encryption for a bucket using server-side + # encryption with Amazon S3-managed keys (SSE-S3) or customer managed + # keys (SSE-KMS). For information about the Amazon S3 default + # encryption feature, see [Amazon S3 Default Bucket Encryption][1] in + # the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the server-side encryption + # configuration. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] server_side_encryption_configuration + # Specifies the default server-side-encryption configuration. + # @return [Types::ServerSideEncryptionConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionRequest AWS API Documentation + # + class PutBucketEncryptionRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :server_side_encryption_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose configuration you want to + # modify or retrieve. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the S3 Intelligent-Tiering configuration. + # @return [String] + # + # @!attribute [rw] intelligent_tiering_configuration + # Container for S3 Intelligent-Tiering configuration. + # @return [Types::IntelligentTieringConfiguration] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfigurationRequest AWS API Documentation + # + class PutBucketIntelligentTieringConfigurationRequest < Struct.new( + :bucket, + :id, + :intelligent_tiering_configuration) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket where the inventory configuration will be + # stored. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the inventory configuration. + # @return [String] + # + # @!attribute [rw] inventory_configuration + # Specifies the inventory configuration. + # @return [Types::InventoryConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest AWS API Documentation + # + class PutBucketInventoryConfigurationRequest < Struct.new( + :bucket, + :id, + :inventory_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to set the configuration. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] lifecycle_configuration + # Container for lifecycle rules. You can add as many as 1,000 rules. + # @return [Types::BucketLifecycleConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest AWS API Documentation + # + class PutBucketLifecycleConfigurationRequest < Struct.new( + :bucket, + :checksum_algorithm, + :lifecycle_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # @return [String] + # + # @!attribute [rw] content_md5 + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] lifecycle_configuration + # @return [Types::LifecycleConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest AWS API Documentation + # + class PutBucketLifecycleRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :lifecycle_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which to set the logging parameters. + # @return [String] + # + # @!attribute [rw] bucket_logging_status + # Container for logging status information. + # @return [Types::BucketLoggingStatus] + # + # @!attribute [rw] content_md5 + # The MD5 hash of the `PutBucketLogging` request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest AWS API Documentation + # + class PutBucketLoggingRequest < Struct.new( + :bucket, + :bucket_logging_status, + :content_md5, + :checksum_algorithm, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket for which the metrics configuration is set. + # @return [String] + # + # @!attribute [rw] id + # The ID used to identify the metrics configuration. + # @return [String] + # + # @!attribute [rw] metrics_configuration + # Specifies the metrics configuration. + # @return [Types::MetricsConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest AWS API Documentation + # + class PutBucketMetricsConfigurationRequest < Struct.new( + :bucket, + :id, + :metrics_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket. + # @return [String] + # + # @!attribute [rw] notification_configuration + # A container for specifying the notification configuration of the + # bucket. If this element is empty, notifications are turned off for + # the bucket. + # @return [Types::NotificationConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] skip_destination_validation + # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. + # True or false value. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest AWS API Documentation + # + class PutBucketNotificationConfigurationRequest < Struct.new( + :bucket, + :notification_configuration, + :expected_bucket_owner, + :skip_destination_validation) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash of the `PutPublicAccessBlock` request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] notification_configuration + # The container for the configuration. + # @return [Types::NotificationConfigurationDeprecated] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest AWS API Documentation + # + class PutBucketNotificationRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :notification_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose `OwnershipControls` you want + # to set. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash of the `OwnershipControls` request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] ownership_controls + # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, + # or ObjectWriter) that you want to apply to this Amazon S3 bucket. + # @return [Types::OwnershipControls] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControlsRequest AWS API Documentation + # + class PutBucketOwnershipControlsRequest < Struct.new( + :bucket, + :content_md5, + :expected_bucket_owner, + :ownership_controls) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash of the request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] confirm_remove_self_bucket_access + # Set this parameter to true to confirm that you want to remove your + # permissions to change this bucket policy in the future. + # @return [Boolean] + # + # @!attribute [rw] policy + # The bucket policy as a JSON document. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest AWS API Documentation + # + class PutBucketPolicyRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :confirm_remove_self_bucket_access, + :policy, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the bucket + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] replication_configuration + # A container for replication rules. You can add up to 1,000 rules. + # The maximum size of a replication configuration is 2 MB. + # @return [Types::ReplicationConfiguration] + # + # @!attribute [rw] token + # A token to allow Object Lock to be enabled for an existing bucket. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest AWS API Documentation + # + class PutBucketReplicationRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :replication_configuration, + :token, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] request_payment_configuration + # Container for Payer. + # @return [Types::RequestPaymentConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest AWS API Documentation + # + class PutBucketRequestPaymentRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :request_payment_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] tagging + # Container for the `TagSet` and `Tag` elements. + # @return [Types::Tagging] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest AWS API Documentation + # + class PutBucketTaggingRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :tagging, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # @return [String] + # + # @!attribute [rw] content_md5 + # >The base64-encoded 128-bit MD5 digest of the data. You must use + # this header as a message integrity check to verify that the request + # body was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] mfa + # The concatenation of the authentication device's serial number, a + # space, and the value that is displayed on your authentication + # device. + # @return [String] + # + # @!attribute [rw] versioning_configuration + # Container for setting the versioning state. + # @return [Types::VersioningConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest AWS API Documentation + # + class PutBucketVersioningRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :mfa, + :versioning_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. You must use this + # header as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, see [RFC + # 1864][1]. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] website_configuration + # Container for the request. + # @return [Types::WebsiteConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest AWS API Documentation + # + class PutBucketWebsiteRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :website_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput AWS API Documentation + # + class PutObjectAclOutput < Struct.new( + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # @return [String] + # + # @!attribute [rw] access_control_policy + # Contains the elements that set the ACL permissions for an object per + # grantee. + # @return [Types::AccessControlPolicy] + # + # @!attribute [rw] bucket + # The bucket name that contains the object to which you want to attach + # the ACL. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the data. This header must + # be used as a message integrity check to verify that the request body + # was not corrupted in transit. For more information, go to [RFC + # 1864.>][1] + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # + # + # + # [1]: http://www.ietf.org/rfc/rfc1864.txt + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] grant_full_control + # Allows grantee the read, write, read ACP, and write ACP permissions + # on the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read + # Allows grantee to list the objects in the bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read_acp + # Allows grantee to read the bucket ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_write + # Allows grantee to create new objects in the bucket. + # + # For the bucket and object owners of existing objects, also allows + # deletions and overwrites of those objects. + # @return [String] + # + # @!attribute [rw] grant_write_acp + # Allows grantee to write the ACL for the applicable bucket. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] key + # Key for which the PUT action was initiated. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] version_id + # VersionId used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest AWS API Documentation + # + class PutObjectAclRequest < Struct.new( + :acl, + :access_control_policy, + :bucket, + :content_md5, + :checksum_algorithm, + :grant_full_control, + :grant_read, + :grant_read_acp, + :grant_write, + :grant_write_acp, + :key, + :request_payer, + :version_id, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHoldOutput AWS API Documentation + # + class PutObjectLegalHoldOutput < Struct.new( + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object that you want to place a legal + # hold on. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] key + # The key name for the object that you want to place a legal hold on. + # @return [String] + # + # @!attribute [rw] legal_hold + # Container element for the legal hold configuration you want to apply + # to the specified object. + # @return [Types::ObjectLockLegalHold] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] version_id + # The version ID of the object that you want to place a legal hold on. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHoldRequest AWS API Documentation + # + class PutObjectLegalHoldRequest < Struct.new( + :bucket, + :key, + :legal_hold, + :request_payer, + :version_id, + :content_md5, + :checksum_algorithm, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfigurationOutput AWS API Documentation + # + class PutObjectLockConfigurationOutput < Struct.new( + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket whose Object Lock configuration you want to create or + # replace. + # @return [String] + # + # @!attribute [rw] object_lock_configuration + # The Object Lock configuration that you want to apply to the + # specified bucket. + # @return [Types::ObjectLockConfiguration] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] token + # A token to allow Object Lock to be enabled for an existing bucket. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfigurationRequest AWS API Documentation + # + class PutObjectLockConfigurationRequest < Struct.new( + :bucket, + :object_lock_configuration, + :request_payer, + :token, + :content_md5, + :checksum_algorithm, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] expiration + # If the expiration is configured for the object (see + # [PutBucketLifecycleConfiguration][1]), the response includes this + # header. It includes the `expiry-date` and `rule-id` key-value pairs + # that provide information about object expiration. The value of the + # `rule-id` is URL-encoded. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + # @return [String] + # + # @!attribute [rw] etag + # Entity tag for the uploaded object. + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] server_side_encryption + # If you specified server-side encryption either with an Amazon Web + # Services KMS key or Amazon S3-managed encryption key in your PUT + # request, the response includes this header. It confirms the + # encryption algorithm that Amazon S3 used to encrypt the object. + # @return [String] + # + # @!attribute [rw] version_id + # Version of the object. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If `x-amz-server-side-encryption` is present and has the value of + # `aws:kms`, this header specifies the ID of the Amazon Web Services + # Key Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] ssekms_encryption_context + # If present, specifies the Amazon Web Services KMS Encryption Context + # to use for object encryption. The value of this header is a + # base64-encoded UTF-8 string holding JSON with the encryption context + # key-value pairs. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the uploaded object uses an S3 Bucket Key for + # server-side encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput AWS API Documentation + # + class PutObjectOutput < Struct.new( + :expiration, + :etag, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :server_side_encryption, + :version_id, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :ssekms_encryption_context, + :bucket_key_enabled, + :request_charged) + SENSITIVE = [:ssekms_key_id, :ssekms_encryption_context] + include Aws::Structure + end + + # @!attribute [rw] acl + # The canned ACL to apply to the object. For more information, see + # [Canned ACL][1]. + # + # This action is not supported by Amazon S3 on Outposts. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + # @return [String] + # + # @!attribute [rw] body + # Object data. + # @return [IO] + # + # @!attribute [rw] bucket + # The bucket name to which the PUT action was initiated. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] cache_control + # Can be used to specify caching behavior along the request/reply + # chain. For more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + # @return [String] + # + # @!attribute [rw] content_disposition + # Specifies presentational information for the object. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + # @return [String] + # + # @!attribute [rw] content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + # @return [String] + # + # @!attribute [rw] content_language + # The language the content is in. + # @return [String] + # + # @!attribute [rw] content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. For more information, + # see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + # @return [Integer] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the message (without the + # headers) according to RFC 1864. This header can be used as a message + # integrity check to verify that the data is the same data that was + # originally sent. Although it is optional, we recommend using the + # Content-MD5 mechanism as an end-to-end integrity check. For more + # information about REST request authentication, see [REST + # Authentication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + # @return [String] + # + # @!attribute [rw] content_type + # A standard MIME type describing the format of the contents. For more + # information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expires + # The date and time at which the object is no longer cacheable. For + # more information, see + # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21][1]. + # + # + # + # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + # @return [Time] + # + # @!attribute [rw] grant_full_control + # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the + # object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read + # Allows grantee to read the object data and its metadata. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_read_acp + # Allows grantee to read the object ACL. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] grant_write_acp + # Allows grantee to write the ACL for the applicable object. + # + # This action is not supported by Amazon S3 on Outposts. + # @return [String] + # + # @!attribute [rw] key + # Object key for which the PUT action was initiated. + # @return [String] + # + # @!attribute [rw] metadata + # A map of metadata to store with the object in S3. + # @return [Hash] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] storage_class + # By default, Amazon S3 uses the STANDARD Storage Class to store newly + # created objects. The STANDARD storage class provides high durability + # and high availability. Depending on performance needs, you can + # specify a different Storage Class. Amazon S3 on Outposts only uses + # the OUTPOSTS Storage Class. For more information, see [Storage + # Classes][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + # + # @!attribute [rw] website_redirect_location + # If the bucket is configured as a website, redirects requests for + # this object to another object in the same bucket or to an external + # URL. Amazon S3 stores the value of this header in the object + # metadata. For information about object metadata, see [Object Key and + # Metadata][1]. + # + # In the following example, the request header sets the redirect to an + # object (anotherPage.html) in the same bucket: + # + # `x-amz-website-redirect-location: /anotherPage.html` + # + # In the following example, the request header sets the object + # redirect to another website: + # + # `x-amz-website-redirect-location: http://www.example.com/` + # + # For more information about website hosting in Amazon S3, see + # [Hosting Websites on Amazon S3][2] and [How to Configure Website + # Page Redirects][3]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If `x-amz-server-side-encryption` is present and has the value of + # `aws:kms`, this header specifies the ID of the Amazon Web Services + # Key Management Service (Amazon Web Services KMS) symmetrical + # customer managed key that was used for the object. If you specify + # `x-amz-server-side-encryption:aws:kms`, but do not provide` + # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the + # Amazon Web Services managed key to protect the data. If the KMS key + # does not exist in the same account issuing the command, you must use + # the full ARN and not just the ID. + # @return [String] + # + # @!attribute [rw] ssekms_encryption_context + # Specifies the Amazon Web Services KMS Encryption Context to use for + # object encryption. The value of this header is a base64-encoded + # UTF-8 string holding JSON with the encryption context key-value + # pairs. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key for object + # encryption with server-side encryption using AWS KMS (SSE-KMS). + # Setting this header to `true` causes Amazon S3 to use an S3 Bucket + # Key for object encryption with SSE-KMS. + # + # Specifying this header with a PUT action doesn’t affect bucket-level + # settings for S3 Bucket Key. + # @return [Boolean] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] tagging + # The tag-set for the object. The tag-set must be encoded as URL Query + # parameters. (For example, "Key1=Value1") + # @return [String] + # + # @!attribute [rw] object_lock_mode + # The Object Lock mode that you want to apply to this object. + # @return [String] + # + # @!attribute [rw] object_lock_retain_until_date + # The date and time when you want this object's Object Lock to + # expire. Must be formatted as a timestamp parameter. + # @return [Time] + # + # @!attribute [rw] object_lock_legal_hold_status + # Specifies whether a legal hold will be applied to this object. For + # more information about S3 Object Lock, see [Object Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest AWS API Documentation + # + class PutObjectRequest < Struct.new( + :acl, + :body, + :bucket, + :cache_control, + :content_disposition, + :content_encoding, + :content_language, + :content_length, + :content_md5, + :content_type, + :checksum_algorithm, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :expires, + :grant_full_control, + :grant_read, + :grant_read_acp, + :grant_write_acp, + :key, + :metadata, + :server_side_encryption, + :storage_class, + :website_redirect_location, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :ssekms_key_id, + :ssekms_encryption_context, + :bucket_key_enabled, + :request_payer, + :tagging, + :object_lock_mode, + :object_lock_retain_until_date, + :object_lock_legal_hold_status, + :expected_bucket_owner) + SENSITIVE = [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context] + include Aws::Structure + end + + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetentionOutput AWS API Documentation + # + class PutObjectRetentionOutput < Struct.new( + :request_charged) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name that contains the object you want to apply this + # Object Retention configuration to. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # @return [String] + # + # @!attribute [rw] key + # The key name for the object that you want to apply this Object + # Retention configuration to. + # @return [String] + # + # @!attribute [rw] retention + # The container element for the Object Retention configuration. + # @return [Types::ObjectLockRetention] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] version_id + # The version ID for the object that you want to apply this Object + # Retention configuration to. + # @return [String] + # + # @!attribute [rw] bypass_governance_retention + # Indicates whether this action should bypass Governance-mode + # restrictions. + # @return [Boolean] + # + # @!attribute [rw] content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetentionRequest AWS API Documentation + # + class PutObjectRetentionRequest < Struct.new( + :bucket, + :key, + :retention, + :request_payer, + :version_id, + :bypass_governance_retention, + :content_md5, + :checksum_algorithm, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] version_id + # The versionId of the object the tag-set was added to. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput AWS API Documentation + # + class PutObjectTaggingOutput < Struct.new( + :version_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Name of the object key. + # @return [String] + # + # @!attribute [rw] version_id + # The versionId of the object that the tag-set will be added to. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash for the request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] tagging + # Container for the `TagSet` and `Tag` elements + # @return [Types::Tagging] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest AWS API Documentation + # + class PutObjectTaggingRequest < Struct.new( + :bucket, + :key, + :version_id, + :content_md5, + :checksum_algorithm, + :tagging, + :expected_bucket_owner, + :request_payer) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The name of the Amazon S3 bucket whose `PublicAccessBlock` + # configuration you want to set. + # @return [String] + # + # @!attribute [rw] content_md5 + # The MD5 hash of the `PutPublicAccessBlock` request body. + # + # For requests made using the Amazon Web Services Command Line + # Interface (CLI) or Amazon Web Services SDKs, this field is + # calculated automatically. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] public_access_block_configuration + # The `PublicAccessBlock` configuration that you want to apply to this + # Amazon S3 bucket. You can enable the configuration options in any + # combination. For more information about when Amazon S3 considers a + # bucket or object public, see [The Meaning of "Public"][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status + # @return [Types::PublicAccessBlockConfiguration] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlockRequest AWS API Documentation + # + class PutPublicAccessBlockRequest < Struct.new( + :bucket, + :content_md5, + :checksum_algorithm, + :public_access_block_configuration, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the configuration for publishing messages to an Amazon + # Simple Queue Service (Amazon SQS) queue when Amazon S3 detects + # specified events. + # + # @!attribute [rw] id + # An optional unique identifier for configurations in a notification + # configuration. If you don't provide one, Amazon S3 will assign an + # ID. + # @return [String] + # + # @!attribute [rw] queue_arn + # The Amazon Resource Name (ARN) of the Amazon SQS queue to which + # Amazon S3 publishes a message when it detects events of the + # specified type. + # @return [String] + # + # @!attribute [rw] events + # A collection of bucket events for which to send notifications + # @return [Array] + # + # @!attribute [rw] filter + # Specifies object key name filtering rules. For information about key + # name filtering, see [Configuring Event Notifications][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [Types::NotificationConfigurationFilter] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration AWS API Documentation + # + class QueueConfiguration < Struct.new( + :id, + :queue_arn, + :events, + :filter) + SENSITIVE = [] + include Aws::Structure + end + + # This data type is deprecated. Use [QueueConfiguration][1] for the same + # purposes. This data type specifies the configuration for publishing + # messages to an Amazon Simple Queue Service (Amazon SQS) queue when + # Amazon S3 detects specified events. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html + # + # @!attribute [rw] id + # An optional unique identifier for configurations in a notification + # configuration. If you don't provide one, Amazon S3 will assign an + # ID. + # @return [String] + # + # @!attribute [rw] event + # The bucket event for which to send notifications. + # @return [String] + # + # @!attribute [rw] events + # A collection of bucket events for which to send notifications. + # @return [Array] + # + # @!attribute [rw] queue + # The Amazon Resource Name (ARN) of the Amazon SQS queue to which + # Amazon S3 publishes a message when it detects events of the + # specified type. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated AWS API Documentation + # + class QueueConfigurationDeprecated < Struct.new( + :id, + :event, + :events, + :queue) + SENSITIVE = [] + include Aws::Structure + end + + # The container for the records event. + # + # @!attribute [rw] payload + # The byte array of partial, one or more result records. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RecordsEvent AWS API Documentation + # + class RecordsEvent < Struct.new( + :payload, + :event_type) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies how requests are redirected. In the event of an error, you + # can specify a different error code to return. + # + # @!attribute [rw] host_name + # The host name to use in the redirect request. + # @return [String] + # + # @!attribute [rw] http_redirect_code + # The HTTP redirect code to use on the response. Not required if one + # of the siblings is present. + # @return [String] + # + # @!attribute [rw] protocol + # Protocol to use when redirecting requests. The default is the + # protocol that is used in the original request. + # @return [String] + # + # @!attribute [rw] replace_key_prefix_with + # The object key prefix to use in the redirect request. For example, + # to redirect requests for all pages with prefix `docs/` (objects in + # the `docs/` folder) to `documents/`, you can set a condition block + # with `KeyPrefixEquals` set to `docs/` and in the Redirect set + # `ReplaceKeyPrefixWith` to `/documents`. Not required if one of the + # siblings is present. Can be present only if `ReplaceKeyWith` is not + # provided. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] replace_key_with + # The specific object key to use in the redirect request. For example, + # redirect request to `error.html`. Not required if one of the + # siblings is present. Can be present only if `ReplaceKeyPrefixWith` + # is not provided. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect AWS API Documentation + # + class Redirect < Struct.new( + :host_name, + :http_redirect_code, + :protocol, + :replace_key_prefix_with, + :replace_key_with) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the redirect behavior of all requests to a website endpoint + # of an Amazon S3 bucket. + # + # @!attribute [rw] host_name + # Name of the host where requests are redirected. + # @return [String] + # + # @!attribute [rw] protocol + # Protocol to use when redirecting requests. The default is the + # protocol that is used in the original request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo AWS API Documentation + # + class RedirectAllRequestsTo < Struct.new( + :host_name, + :protocol) + SENSITIVE = [] + include Aws::Structure + end + + # A filter that you can specify for selection for modifications on + # replicas. Amazon S3 doesn't replicate replica modifications by + # default. In the latest version of replication configuration (when + # `Filter` is specified), you can specify this element and set the + # status to `Enabled` to replicate modifications on replicas. + # + # If you don't specify the `Filter` element, Amazon S3 assumes that the + # replication configuration is the earlier version, V1. In the earlier + # version, this element is not allowed. + # + # + # + # @!attribute [rw] status + # Specifies whether Amazon S3 replicates modifications on replicas. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicaModifications AWS API Documentation + # + class ReplicaModifications < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # A container for replication rules. You can add up to 1,000 rules. The + # maximum size of a replication configuration is 2 MB. + # + # @!attribute [rw] role + # The Amazon Resource Name (ARN) of the Identity and Access Management + # (IAM) role that Amazon S3 assumes when replicating objects. For more + # information, see [How to Set Up Replication][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html + # @return [String] + # + # @!attribute [rw] rules + # A container for one or more replication rules. A replication + # configuration must have at least one rule and can contain a maximum + # of 1,000 rules. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration AWS API Documentation + # + class ReplicationConfiguration < Struct.new( + :role, + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies which Amazon S3 objects to replicate and where to store the + # replicas. + # + # @!attribute [rw] id + # A unique identifier for the rule. The maximum value is 255 + # characters. + # @return [String] + # + # @!attribute [rw] priority + # The priority indicates which rule has precedence whenever two or + # more replication rules conflict. Amazon S3 will attempt to replicate + # objects according to all replication rules. However, if there are + # two or more rules with the same destination bucket, then objects + # will be replicated according to the rule with the highest priority. + # The higher the number, the higher the priority. + # + # For more information, see [Replication][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html + # @return [Integer] + # + # @!attribute [rw] prefix + # An object key name prefix that identifies the object or objects to + # which the rule applies. The maximum prefix length is 1,024 + # characters. To include all objects in a bucket, specify an empty + # string. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] filter + # A filter that identifies the subset of objects to which the + # replication rule applies. A `Filter` must specify exactly one + # `Prefix`, `Tag`, or an `And` child element. + # @return [Types::ReplicationRuleFilter] + # + # @!attribute [rw] status + # Specifies whether the rule is enabled. + # @return [String] + # + # @!attribute [rw] source_selection_criteria + # A container that describes additional filters for identifying the + # source objects that you want to replicate. You can choose to enable + # or disable the replication of these objects. Currently, Amazon S3 + # supports only the filter that you can specify for objects created + # with server-side encryption using a customer managed key stored in + # Amazon Web Services Key Management Service (SSE-KMS). + # @return [Types::SourceSelectionCriteria] + # + # @!attribute [rw] existing_object_replication + # @return [Types::ExistingObjectReplication] + # + # @!attribute [rw] destination + # A container for information about the replication destination and + # its configurations including enabling the S3 Replication Time + # Control (S3 RTC). + # @return [Types::Destination] + # + # @!attribute [rw] delete_marker_replication + # Specifies whether Amazon S3 replicates delete markers. If you + # specify a `Filter` in your replication configuration, you must also + # include a `DeleteMarkerReplication` element. If your `Filter` + # includes a `Tag` element, the `DeleteMarkerReplication` `Status` + # must be set to Disabled, because Amazon S3 does not support + # replicating delete markers for tag-based rules. For an example + # configuration, see [Basic Rule Configuration][1]. + # + # For more information about delete marker replication, see [Basic + # Rule Configuration][2]. + # + # If you are using an earlier version of the replication + # configuration, Amazon S3 handles replication of delete markers + # differently. For more information, see [Backward Compatibility][3]. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations + # @return [Types::DeleteMarkerReplication] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule AWS API Documentation + # + class ReplicationRule < Struct.new( + :id, + :priority, + :prefix, + :filter, + :status, + :source_selection_criteria, + :existing_object_replication, + :destination, + :delete_marker_replication) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying rule filters. The filters determine the + # subset of objects to which the rule applies. This element is required + # only if you specify more than one filter. + # + # For example: + # + # * If you specify both a `Prefix` and a `Tag` filter, wrap these + # filters in an `And` tag. + # + # * If you specify a filter based on multiple tags, wrap the `Tag` + # elements in an `And` tag. + # + # @!attribute [rw] prefix + # An object key name prefix that identifies the subset of objects to + # which the rule applies. + # @return [String] + # + # @!attribute [rw] tags + # An array of tags containing key and value pairs. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRuleAndOperator AWS API Documentation + # + class ReplicationRuleAndOperator < Struct.new( + :prefix, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # A filter that identifies the subset of objects to which the + # replication rule applies. A `Filter` must specify exactly one + # `Prefix`, `Tag`, or an `And` child element. + # + # @!attribute [rw] prefix + # An object key name prefix that identifies the subset of objects to + # which the rule applies. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] tag + # A container for specifying a tag key and value. + # + # The rule applies only to objects that have the tag in their tag set. + # @return [Types::Tag] + # + # @!attribute [rw] and + # A container for specifying rule filters. The filters determine the + # subset of objects to which the rule applies. This element is + # required only if you specify more than one filter. For example: + # + # * If you specify both a `Prefix` and a `Tag` filter, wrap these + # filters in an `And` tag. + # + # * If you specify a filter based on multiple tags, wrap the `Tag` + # elements in an `And` tag. + # @return [Types::ReplicationRuleAndOperator] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRuleFilter AWS API Documentation + # + class ReplicationRuleFilter < Struct.new( + :prefix, + :tag, + :and) + SENSITIVE = [] + include Aws::Structure + end + + # A container specifying S3 Replication Time Control (S3 RTC) related + # information, including whether S3 RTC is enabled and the time when all + # objects and operations on objects must be replicated. Must be + # specified together with a `Metrics` block. + # + # @!attribute [rw] status + # Specifies whether the replication time is enabled. + # @return [String] + # + # @!attribute [rw] time + # A container specifying the time by which replication should be + # complete for all objects and operations on objects. + # @return [Types::ReplicationTimeValue] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationTime AWS API Documentation + # + class ReplicationTime < Struct.new( + :status, + :time) + SENSITIVE = [] + include Aws::Structure + end + + # A container specifying the time value for S3 Replication Time Control + # (S3 RTC) and replication metrics `EventThreshold`. + # + # @!attribute [rw] minutes + # Contains an integer specifying time in minutes. + # + # Valid value: 15 + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationTimeValue AWS API Documentation + # + class ReplicationTimeValue < Struct.new( + :minutes) + SENSITIVE = [] + include Aws::Structure + end + + # Container for Payer. + # + # @!attribute [rw] payer + # Specifies who pays for the download and request fees. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration AWS API Documentation + # + class RequestPaymentConfiguration < Struct.new( + :payer) + SENSITIVE = [] + include Aws::Structure + end + + # Container for specifying if periodic `QueryProgress` messages should + # be sent. + # + # @!attribute [rw] enabled + # Specifies whether periodic QueryProgress frames should be sent. + # Valid values: TRUE, FALSE. Default value: FALSE. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestProgress AWS API Documentation + # + class RequestProgress < Struct.new( + :enabled) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] restore_output_path + # Indicates the path in the provided S3 output location where Select + # results will be restored to. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput AWS API Documentation + # + class RestoreObjectOutput < Struct.new( + :request_charged, + :restore_output_path) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name containing the object to restore. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] key + # Object key for which the action was initiated. + # @return [String] + # + # @!attribute [rw] version_id + # VersionId used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] restore_request + # Container for restore job parameters. + # @return [Types::RestoreRequest] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest AWS API Documentation + # + class RestoreObjectRequest < Struct.new( + :bucket, + :key, + :version_id, + :restore_request, + :request_payer, + :checksum_algorithm, + :expected_bucket_owner) + SENSITIVE = [] + include Aws::Structure + end + + # Container for restore job parameters. + # + # @!attribute [rw] days + # Lifetime of the active copy in days. Do not use with restores that + # specify `OutputLocation`. + # + # The Days element is required for regular restores, and must not be + # provided for select requests. + # @return [Integer] + # + # @!attribute [rw] glacier_job_parameters + # S3 Glacier related parameters pertaining to this job. Do not use + # with restores that specify `OutputLocation`. + # @return [Types::GlacierJobParameters] + # + # @!attribute [rw] type + # Type of restore request. + # @return [String] + # + # @!attribute [rw] tier + # Retrieval tier at which the restore will be processed. + # @return [String] + # + # @!attribute [rw] description + # The optional description for the job. + # @return [String] + # + # @!attribute [rw] select_parameters + # Describes the parameters for Select job types. + # @return [Types::SelectParameters] + # + # @!attribute [rw] output_location + # Describes the location where the restore job's output is stored. + # @return [Types::OutputLocation] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest AWS API Documentation + # + class RestoreRequest < Struct.new( + :days, + :glacier_job_parameters, + :type, + :tier, + :description, + :select_parameters, + :output_location) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the redirect behavior and when a redirect is applied. For + # more information about routing rules, see [Configuring advanced + # conditional redirects][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects + # + # @!attribute [rw] condition + # A container for describing a condition that must be met for the + # specified redirect to apply. For example, 1. If request is for pages + # in the `/docs` folder, redirect to the `/documents` folder. 2. If + # request results in HTTP error 4xx, redirect request to another host + # where you might process the error. + # @return [Types::Condition] + # + # @!attribute [rw] redirect + # Container for redirect information. You can redirect requests to + # another host, to another page, or with another protocol. In the + # event of an error, you can specify a different error code to return. + # @return [Types::Redirect] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule AWS API Documentation + # + class RoutingRule < Struct.new( + :condition, + :redirect) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies lifecycle rules for an Amazon S3 bucket. For more + # information, see [Put Bucket Lifecycle Configuration][1] in the + # *Amazon S3 API Reference*. For examples, see [Put Bucket Lifecycle + # Configuration Examples][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples + # + # @!attribute [rw] expiration + # Specifies the expiration for the lifecycle of the object. + # @return [Types::LifecycleExpiration] + # + # @!attribute [rw] id + # Unique identifier for the rule. The value can't be longer than 255 + # characters. + # @return [String] + # + # @!attribute [rw] prefix + # Object key prefix that identifies one or more objects to which this + # rule applies. + # + # Replacement must be made for object keys containing special + # characters (such as carriage returns) when using XML requests. For + # more information, see [ XML related object key constraints][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + # @return [String] + # + # @!attribute [rw] status + # If `Enabled`, the rule is currently being applied. If `Disabled`, + # the rule is not currently being applied. + # @return [String] + # + # @!attribute [rw] transition + # Specifies when an object transitions to a specified storage class. + # For more information about Amazon S3 lifecycle configuration rules, + # see [Transitioning Objects Using Amazon S3 Lifecycle][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html + # @return [Types::Transition] + # + # @!attribute [rw] noncurrent_version_transition + # Container for the transition rule that describes when noncurrent + # objects transition to the `STANDARD_IA`, `ONEZONE_IA`, + # `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or `DEEP_ARCHIVE` + # storage class. If your bucket is versioning-enabled (or versioning + # is suspended), you can set this action to request that Amazon S3 + # transition noncurrent object versions to the `STANDARD_IA`, + # `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or + # `DEEP_ARCHIVE` storage class at a specific period in the object's + # lifetime. + # @return [Types::NoncurrentVersionTransition] + # + # @!attribute [rw] noncurrent_version_expiration + # Specifies when noncurrent object versions expire. Upon expiration, + # Amazon S3 permanently deletes the noncurrent object versions. You + # set this lifecycle configuration action on a bucket that has + # versioning enabled (or suspended) to request that Amazon S3 delete + # noncurrent object versions at a specific period in the object's + # lifetime. + # @return [Types::NoncurrentVersionExpiration] + # + # @!attribute [rw] abort_incomplete_multipart_upload + # Specifies the days since the initiation of an incomplete multipart + # upload that Amazon S3 will wait before permanently removing all + # parts of the upload. For more information, see [ Aborting Incomplete + # Multipart Uploads Using a Bucket Lifecycle Policy][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config + # @return [Types::AbortIncompleteMultipartUpload] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule AWS API Documentation + # + class Rule < Struct.new( + :expiration, + :id, + :prefix, + :status, + :transition, + :noncurrent_version_transition, + :noncurrent_version_expiration, + :abort_incomplete_multipart_upload) + SENSITIVE = [] + include Aws::Structure + end + + # A container for object key name prefix and suffix filtering rules. + # + # @!attribute [rw] filter_rules + # A list of containers for the key-value pair that defines the + # criteria for the filter rule. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter AWS API Documentation + # + class S3KeyFilter < Struct.new( + :filter_rules) + SENSITIVE = [] + include Aws::Structure + end + + # Describes an Amazon S3 location that will receive the results of the + # restore request. + # + # @!attribute [rw] bucket_name + # The name of the bucket where the restore results will be placed. + # @return [String] + # + # @!attribute [rw] prefix + # The prefix that is prepended to the restore results for this + # request. + # @return [String] + # + # @!attribute [rw] encryption + # Contains the type of server-side encryption used. + # @return [Types::Encryption] + # + # @!attribute [rw] canned_acl + # The canned ACL to apply to the restore results. + # @return [String] + # + # @!attribute [rw] access_control_list + # A list of grants that control access to the staged results. + # @return [Array] + # + # @!attribute [rw] tagging + # The tag-set that is applied to the restore results. + # @return [Types::Tagging] + # + # @!attribute [rw] user_metadata + # A list of metadata to store with the restore results in S3. + # @return [Array] + # + # @!attribute [rw] storage_class + # The class of storage used to store the restore results. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3Location AWS API Documentation + # + class S3Location < Struct.new( + :bucket_name, + :prefix, + :encryption, + :canned_acl, + :access_control_list, + :tagging, + :user_metadata, + :storage_class) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the use of SSE-KMS to encrypt delivered inventory reports. + # + # @!attribute [rw] key_id + # Specifies the ID of the Amazon Web Services Key Management Service + # (Amazon Web Services KMS) symmetric customer managed key to use for + # encrypting inventory reports. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSEKMS AWS API Documentation + # + class SSEKMS < Struct.new( + :key_id) + SENSITIVE = [:key_id] + include Aws::Structure + end + + # Specifies the use of SSE-S3 to encrypt delivered inventory reports. + # + # @api private + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSES3 AWS API Documentation + # + class SSES3 < Aws::EmptyStructure; end + + # Specifies the byte range of the object to get the records from. A + # record is processed when its first byte is contained by the range. + # This parameter is optional, but when specified, it must not be empty. + # See RFC 2616, Section 14.35.1 about how to specify the start and end + # of the range. + # + # @!attribute [rw] start + # Specifies the start of the byte range. This parameter is optional. + # Valid values: non-negative integers. The default value is 0. If only + # `start` is supplied, it means scan from that point to the end of the + # file. For example, `50` means + # scan from byte 50 until the end of the file. + # @return [Integer] + # + # @!attribute [rw] end + # Specifies the end of the byte range. This parameter is optional. + # Valid values: non-negative integers. The default value is one less + # than the size of the object being queried. If only the End parameter + # is supplied, it is interpreted to mean scan the last N bytes of the + # file. For example, `50` means scan + # the last 50 bytes. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ScanRange AWS API Documentation + # + class ScanRange < Struct.new( + :start, + :end) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] payload + # The array of results. + # @return [Types::SelectObjectContentEventStream] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentOutput AWS API Documentation + # + class SelectObjectContentOutput < Struct.new( + :payload) + SENSITIVE = [] + include Aws::Structure + end + + # Request to filter the contents of an Amazon S3 object based on a + # simple Structured Query Language (SQL) statement. In the request, + # along with the SQL expression, you must specify a data serialization + # format (JSON or CSV) of the object. Amazon S3 uses this to parse + # object data into records. It returns only records that match the + # specified SQL expression. You must also specify the data serialization + # format for the response. For more information, see [S3Select API + # Documentation][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + # + # @!attribute [rw] bucket + # The S3 bucket. + # @return [String] + # + # @!attribute [rw] key + # The object key. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # The server-side encryption (SSE) algorithm used to encrypt the + # object. This parameter is needed only when the object was created + # using a checksum algorithm. For more information, see [Protecting + # data using SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] sse_customer_key + # The server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # The MD5 server-side encryption (SSE) customer managed key. This + # parameter is needed only when the object was created using a + # checksum algorithm. For more information, see [Protecting data using + # SSE-C keys][1] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] expression + # The expression that is used to query the object. + # @return [String] + # + # @!attribute [rw] expression_type + # The type of the provided expression (for example, SQL). + # @return [String] + # + # @!attribute [rw] request_progress + # Specifies if periodic request progress information should be + # enabled. + # @return [Types::RequestProgress] + # + # @!attribute [rw] input_serialization + # Describes the format of the data in the object that is being + # queried. + # @return [Types::InputSerialization] + # + # @!attribute [rw] output_serialization + # Describes the format of the data that you want Amazon S3 to return + # in response. + # @return [Types::OutputSerialization] + # + # @!attribute [rw] scan_range + # Specifies the byte range of the object to get the records from. A + # record is processed when its first byte is contained by the range. + # This parameter is optional, but when specified, it must not be + # empty. See RFC 2616, Section 14.35.1 about how to specify the start + # and end of the range. + # + # `ScanRange`may be used in the following ways: + # + # * `50100` - process + # only the records starting between the bytes 50 and 100 (inclusive, + # counting from zero) + # + # * `50` - process only the + # records starting after the byte 50 + # + # * `50` - process only the records + # within the last 50 bytes of the file. + # @return [Types::ScanRange] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentRequest AWS API Documentation + # + class SelectObjectContentRequest < Struct.new( + :bucket, + :key, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :expression, + :expression_type, + :request_progress, + :input_serialization, + :output_serialization, + :scan_range, + :expected_bucket_owner) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # Describes the parameters for Select job types. + # + # @!attribute [rw] input_serialization + # Describes the serialization format of the object. + # @return [Types::InputSerialization] + # + # @!attribute [rw] expression_type + # The type of the provided expression (for example, SQL). + # @return [String] + # + # @!attribute [rw] expression + # The expression that is used to query the object. + # @return [String] + # + # @!attribute [rw] output_serialization + # Describes how the results of the Select job are serialized. + # @return [Types::OutputSerialization] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectParameters AWS API Documentation + # + class SelectParameters < Struct.new( + :input_serialization, + :expression_type, + :expression, + :output_serialization) + SENSITIVE = [] + include Aws::Structure + end + + # Describes the default server-side encryption to apply to new objects + # in the bucket. If a PUT Object request doesn't specify any + # server-side encryption, this default encryption will be applied. If + # you don't specify a customer managed key at configuration, Amazon S3 + # automatically creates an Amazon Web Services KMS key in your Amazon + # Web Services account the first time that you add an object encrypted + # with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for + # SSE-KMS. For more information, see [PUT Bucket encryption][1] in the + # *Amazon S3 API Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html + # + # @!attribute [rw] sse_algorithm + # Server-side encryption algorithm to use for the default encryption. + # @return [String] + # + # @!attribute [rw] kms_master_key_id + # Amazon Web Services Key Management Service (KMS) customer Amazon Web + # Services KMS key ID to use for the default encryption. This + # parameter is allowed if and only if `SSEAlgorithm` is set to + # `aws:kms`. + # + # You can specify the key ID or the Amazon Resource Name (ARN) of the + # KMS key. However, if you are using encryption with cross-account or + # Amazon Web Services service operations you must use a fully + # qualified KMS key ARN. For more information, see [Using encryption + # for cross-account operations][1]. + # + # **For example:** + # + # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` + # + # * Key ARN: + # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` + # + # Amazon S3 only supports symmetric KMS keys and not asymmetric KMS + # keys. For more information, see [Using symmetric and asymmetric + # keys][2] in the *Amazon Web Services Key Management Service + # Developer Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionByDefault AWS API Documentation + # + class ServerSideEncryptionByDefault < Struct.new( + :sse_algorithm, + :kms_master_key_id) + SENSITIVE = [:kms_master_key_id] + include Aws::Structure + end + + # Specifies the default server-side-encryption configuration. + # + # @!attribute [rw] rules + # Container for information about a particular server-side encryption + # configuration rule. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionConfiguration AWS API Documentation + # + class ServerSideEncryptionConfiguration < Struct.new( + :rules) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the default server-side encryption configuration. + # + # @!attribute [rw] apply_server_side_encryption_by_default + # Specifies the default server-side encryption to apply to new objects + # in the bucket. If a PUT Object request doesn't specify any + # server-side encryption, this default encryption will be applied. + # @return [Types::ServerSideEncryptionByDefault] + # + # @!attribute [rw] bucket_key_enabled + # Specifies whether Amazon S3 should use an S3 Bucket Key with + # server-side encryption using KMS (SSE-KMS) for new objects in the + # bucket. Existing objects are not affected. Setting the + # `BucketKeyEnabled` element to `true` causes Amazon S3 to use an S3 + # Bucket Key. By default, S3 Bucket Key is not enabled. + # + # For more information, see [Amazon S3 Bucket Keys][1] in the *Amazon + # S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionRule AWS API Documentation + # + class ServerSideEncryptionRule < Struct.new( + :apply_server_side_encryption_by_default, + :bucket_key_enabled) + SENSITIVE = [] + include Aws::Structure + end + + # A container that describes additional filters for identifying the + # source objects that you want to replicate. You can choose to enable or + # disable the replication of these objects. Currently, Amazon S3 + # supports only the filter that you can specify for objects created with + # server-side encryption using a customer managed key stored in Amazon + # Web Services Key Management Service (SSE-KMS). + # + # @!attribute [rw] sse_kms_encrypted_objects + # A container for filter information for the selection of Amazon S3 + # objects encrypted with Amazon Web Services KMS. If you include + # `SourceSelectionCriteria` in the replication configuration, this + # element is required. + # @return [Types::SseKmsEncryptedObjects] + # + # @!attribute [rw] replica_modifications + # A filter that you can specify for selections for modifications on + # replicas. Amazon S3 doesn't replicate replica modifications by + # default. In the latest version of replication configuration (when + # `Filter` is specified), you can specify this element and set the + # status to `Enabled` to replicate modifications on replicas. + # + # If you don't specify the `Filter` element, Amazon S3 assumes that + # the replication configuration is the earlier version, V1. In the + # earlier version, this element is not allowed + # + # + # @return [Types::ReplicaModifications] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SourceSelectionCriteria AWS API Documentation + # + class SourceSelectionCriteria < Struct.new( + :sse_kms_encrypted_objects, + :replica_modifications) + SENSITIVE = [] + include Aws::Structure + end + + # A container for filter information for the selection of S3 objects + # encrypted with Amazon Web Services KMS. + # + # @!attribute [rw] status + # Specifies whether Amazon S3 replicates objects created with + # server-side encryption using an Amazon Web Services KMS key stored + # in Amazon Web Services Key Management Service. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SseKmsEncryptedObjects AWS API Documentation + # + class SseKmsEncryptedObjects < Struct.new( + :status) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the stats details. + # + # @!attribute [rw] bytes_scanned + # The total number of object bytes scanned. + # @return [Integer] + # + # @!attribute [rw] bytes_processed + # The total number of uncompressed object bytes processed. + # @return [Integer] + # + # @!attribute [rw] bytes_returned + # The total number of bytes of records payload data returned. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Stats AWS API Documentation + # + class Stats < Struct.new( + :bytes_scanned, + :bytes_processed, + :bytes_returned) + SENSITIVE = [] + include Aws::Structure + end + + # Container for the Stats Event. + # + # @!attribute [rw] details + # The Stats event details. + # @return [Types::Stats] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StatsEvent AWS API Documentation + # + class StatsEvent < Struct.new( + :details, + :event_type) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies data related to access patterns to be collected and made + # available to analyze the tradeoffs between different storage classes + # for an Amazon S3 bucket. + # + # @!attribute [rw] data_export + # Specifies how data related to the storage class analysis for an + # Amazon S3 bucket should be exported. + # @return [Types::StorageClassAnalysisDataExport] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis AWS API Documentation + # + class StorageClassAnalysis < Struct.new( + :data_export) + SENSITIVE = [] + include Aws::Structure + end + + # Container for data related to the storage class analysis for an Amazon + # S3 bucket for export. + # + # @!attribute [rw] output_schema_version + # The version of the output schema to use when exporting data. Must be + # `V_1`. + # @return [String] + # + # @!attribute [rw] destination + # The place to store the data for an analysis. + # @return [Types::AnalyticsExportDestination] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport AWS API Documentation + # + class StorageClassAnalysisDataExport < Struct.new( + :output_schema_version, + :destination) + SENSITIVE = [] + include Aws::Structure + end + + # A container of a key value name pair. + # + # @!attribute [rw] key + # Name of the object key. + # @return [String] + # + # @!attribute [rw] value + # Value of the tag. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag AWS API Documentation + # + class Tag < Struct.new( + :key, + :value) + SENSITIVE = [] + include Aws::Structure + end + + # Container for `TagSet` elements. + # + # @!attribute [rw] tag_set + # A collection for a set of tags + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging AWS API Documentation + # + class Tagging < Struct.new( + :tag_set) + SENSITIVE = [] + include Aws::Structure + end + + # Container for granting information. + # + # Buckets that use the bucket owner enforced setting for Object + # Ownership don't support target grants. For more information, see + # [Permissions server access log delivery][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general + # + # @!attribute [rw] grantee + # Container for the person being granted permissions. + # @return [Types::Grantee] + # + # @!attribute [rw] permission + # Logging permissions assigned to the grantee for the bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant AWS API Documentation + # + class TargetGrant < Struct.new( + :grantee, + :permission) + SENSITIVE = [] + include Aws::Structure + end + + # The S3 Intelligent-Tiering storage class is designed to optimize + # storage costs by automatically moving data to the most cost-effective + # storage access tier, without additional operational overhead. + # + # @!attribute [rw] days + # The number of consecutive days of no access after which an object + # will be eligible to be transitioned to the corresponding tier. The + # minimum number of days specified for Archive Access tier must be at + # least 90 days and Deep Archive Access tier must be at least 180 + # days. The maximum can be up to 2 years (730 days). + # @return [Integer] + # + # @!attribute [rw] access_tier + # S3 Intelligent-Tiering access tier. See [Storage class for + # automatically optimizing frequently and infrequently accessed + # objects][1] for a list of access tiers in the S3 Intelligent-Tiering + # storage class. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tiering AWS API Documentation + # + class Tiering < Struct.new( + :days, + :access_tier) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying the configuration for publication of + # messages to an Amazon Simple Notification Service (Amazon SNS) topic + # when Amazon S3 detects specified events. + # + # @!attribute [rw] id + # An optional unique identifier for configurations in a notification + # configuration. If you don't provide one, Amazon S3 will assign an + # ID. + # @return [String] + # + # @!attribute [rw] topic_arn + # The Amazon Resource Name (ARN) of the Amazon SNS topic to which + # Amazon S3 publishes a message when it detects events of the + # specified type. + # @return [String] + # + # @!attribute [rw] events + # The Amazon S3 bucket event about which to send notifications. For + # more information, see [Supported Event Types][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [Array] + # + # @!attribute [rw] filter + # Specifies object key name filtering rules. For information about key + # name filtering, see [Configuring Event Notifications][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html + # @return [Types::NotificationConfigurationFilter] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration AWS API Documentation + # + class TopicConfiguration < Struct.new( + :id, + :topic_arn, + :events, + :filter) + SENSITIVE = [] + include Aws::Structure + end + + # A container for specifying the configuration for publication of + # messages to an Amazon Simple Notification Service (Amazon SNS) topic + # when Amazon S3 detects specified events. This data type is deprecated. + # Use [TopicConfiguration][1] instead. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html + # + # @!attribute [rw] id + # An optional unique identifier for configurations in a notification + # configuration. If you don't provide one, Amazon S3 will assign an + # ID. + # @return [String] + # + # @!attribute [rw] events + # A collection of events related to objects + # @return [Array] + # + # @!attribute [rw] event + # Bucket event for which to send notifications. + # @return [String] + # + # @!attribute [rw] topic + # Amazon SNS topic to which Amazon S3 will publish a message to report + # the specified events for the bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated AWS API Documentation + # + class TopicConfigurationDeprecated < Struct.new( + :id, + :events, + :event, + :topic) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies when an object transitions to a specified storage class. For + # more information about Amazon S3 lifecycle configuration rules, see + # [Transitioning Objects Using Amazon S3 Lifecycle][1] in the *Amazon S3 + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html + # + # @!attribute [rw] date + # Indicates when objects are transitioned to the specified storage + # class. The date value must be in ISO 8601 format. The time is always + # midnight UTC. + # @return [Time] + # + # @!attribute [rw] days + # Indicates the number of days after creation when objects are + # transitioned to the specified storage class. The value must be a + # positive integer. + # @return [Integer] + # + # @!attribute [rw] storage_class + # The storage class to which you want the object to transition. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition AWS API Documentation + # + class Transition < Struct.new( + :date, + :days, + :storage_class) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] copy_source_version_id + # The version of the source object that was copied, if you have + # enabled versioning on the source bucket. + # @return [String] + # + # @!attribute [rw] copy_part_result + # Container for all response elements. + # @return [Types::CopyPartResult] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for the object. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the multipart upload uses an S3 Bucket Key for + # server-side encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput AWS API Documentation + # + class UploadPartCopyOutput < Struct.new( + :copy_source_version_id, + :copy_part_result, + :server_side_encryption, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :bucket_key_enabled, + :request_charged) + SENSITIVE = [:ssekms_key_id] + include Aws::Structure + end + + # @!attribute [rw] bucket + # The bucket name. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] copy_source + # Specifies the source object for the copy operation. You specify the + # value in one of two formats, depending on whether you want to access + # the source object through an [access point][1]: + # + # * For objects not accessed through an access point, specify the name + # of the source bucket and key of the source object, separated by a + # slash (/). For example, to copy the object `reports/january.pdf` + # from the bucket `awsexamplebucket`, use + # `awsexamplebucket/reports/january.pdf`. The value must be + # URL-encoded. + # + # * For objects accessed through access points, specify the Amazon + # Resource Name (ARN) of the object as accessed through the access + # point, in the format + # `arn:aws:s3:::accesspoint//object/`. + # For example, to copy the object `reports/january.pdf` through + # access point `my-access-point` owned by account `123456789012` in + # Region `us-west-2`, use the URL encoding of + # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. + # The value must be URL encoded. + # + # Amazon S3 supports copy operations using access points only when + # the source and destination buckets are in the same Amazon Web + # Services Region. + # + # + # + # Alternatively, for objects accessed through Amazon S3 on Outposts, + # specify the ARN of the object as accessed in the format + # `arn:aws:s3-outposts:::outpost//object/`. + # For example, to copy the object `reports/january.pdf` through + # outpost `my-outpost` owned by account `123456789012` in Region + # `us-west-2`, use the URL encoding of + # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. + # The value must be URL-encoded. + # + # To copy a specific version of an object, append + # `?versionId=` to the value (for example, + # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). + # If you don't specify a version ID, Amazon S3 copies the latest + # version of the source object. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html + # @return [String] + # + # @!attribute [rw] copy_source_if_match + # Copies the object if its entity tag (ETag) matches the specified + # tag. + # @return [String] + # + # @!attribute [rw] copy_source_if_modified_since + # Copies the object if it has been modified since the specified time. + # @return [Time] + # + # @!attribute [rw] copy_source_if_none_match + # Copies the object if its entity tag (ETag) is different than the + # specified ETag. + # @return [String] + # + # @!attribute [rw] copy_source_if_unmodified_since + # Copies the object if it hasn't been modified since the specified + # time. + # @return [Time] + # + # @!attribute [rw] copy_source_range + # The range of bytes to copy from the source object. The range value + # must use the form bytes=first-last, where the first and last are the + # zero-based byte offsets to copy. For example, bytes=0-9 indicates + # that you want to copy the first 10 bytes of the source. You can copy + # a range only if the source object is greater than 5 MB. + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] part_number + # Part number of part being copied. This is a positive integer between + # 1 and 10,000. + # @return [Integer] + # + # @!attribute [rw] upload_id + # Upload ID identifying the multipart upload whose part is being + # copied. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm` header. This must + # be the same encryption key specified in the initiate multipart + # upload request. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] copy_source_sse_customer_algorithm + # Specifies the algorithm to use when decrypting the source object + # (for example, AES256). + # @return [String] + # + # @!attribute [rw] copy_source_sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # to decrypt the source object. The encryption key provided in this + # header must be one that was used when the source object was created. + # @return [String] + # + # @!attribute [rw] copy_source_sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected destination bucket owner. If the + # destination bucket is owned by a different account, the request + # fails with the HTTP status code `403 Forbidden` (access denied). + # @return [String] + # + # @!attribute [rw] expected_source_bucket_owner + # The account ID of the expected source bucket owner. If the source + # bucket is owned by a different account, the request fails with the + # HTTP status code `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest AWS API Documentation + # + class UploadPartCopyRequest < Struct.new( + :bucket, + :copy_source, + :copy_source_if_match, + :copy_source_if_modified_since, + :copy_source_if_none_match, + :copy_source_if_unmodified_since, + :copy_source_range, + :key, + :part_number, + :upload_id, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :copy_source_sse_customer_algorithm, + :copy_source_sse_customer_key, + :copy_source_sse_customer_key_md5, + :request_payer, + :expected_bucket_owner, + :expected_source_bucket_owner) + SENSITIVE = [:sse_customer_key, :copy_source_sse_customer_key] + include Aws::Structure + end + + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing this object + # in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] etag + # Entity tag for the uploaded object. + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # The base64-encoded, 32-bit CRC32 checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # The base64-encoded, 32-bit CRC32C checksum of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # The base64-encoded, 160-bit SHA-1 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # The base64-encoded, 256-bit SHA-256 digest of the object. This will + # only be present if it was uploaded with the object. With multipart + # uploads, this may not be a checksum value of the object. For more + # information about how checksums are calculated with multipart + # uploads, see [ Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header confirming the + # encryption algorithm used. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # If server-side encryption with a customer-provided encryption key + # was requested, the response will include this header to provide + # round-trip message integrity verification of the customer-provided + # encryption key. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key was used for the object. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the multipart upload uses an S3 Bucket Key for + # server-side encryption with Amazon Web Services KMS (SSE-KMS). + # @return [Boolean] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput AWS API Documentation + # + class UploadPartOutput < Struct.new( + :server_side_encryption, + :etag, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :sse_customer_algorithm, + :sse_customer_key_md5, + :ssekms_key_id, + :bucket_key_enabled, + :request_charged) + SENSITIVE = [:ssekms_key_id] + include Aws::Structure + end + + # @!attribute [rw] body + # Object data. + # @return [IO] + # + # @!attribute [rw] bucket + # The name of the bucket to which the multipart upload was initiated. + # + # When using this action with an access point, you must direct + # requests to the access point hostname. The access point hostname + # takes the form + # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. + # When using this action with an access point through the Amazon Web + # Services SDKs, you provide the access point ARN in place of the + # bucket name. For more information about access point ARNs, see + # [Using access points][1] in the *Amazon S3 User Guide*. + # + # When using this action with Amazon S3 on Outposts, you must direct + # requests to the S3 on Outposts hostname. The S3 on Outposts hostname + # takes the form ` + # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. + # When using this action with S3 on Outposts through the Amazon Web + # Services SDKs, you provide the Outposts bucket ARN in place of the + # bucket name. For more information about S3 on Outposts ARNs, see + # [Using Amazon S3 on Outposts][2] in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + # @return [String] + # + # @!attribute [rw] content_length + # Size of the body in bytes. This parameter is useful when the size of + # the body cannot be determined automatically. + # @return [Integer] + # + # @!attribute [rw] content_md5 + # The base64-encoded 128-bit MD5 digest of the part data. This + # parameter is auto-populated when using the command from the CLI. + # This parameter is required if object lock parameters are specified. + # @return [String] + # + # @!attribute [rw] checksum_algorithm + # Indicates the algorithm used to create the checksum for the object + # when using the SDK. This header will not provide any additional + # functionality if not using the SDK. When sending this header, there + # must be a corresponding `x-amz-checksum` or `x-amz-trailer` header + # sent. Otherwise, Amazon S3 fails the request with the HTTP status + # code `400 Bad Request`. For more information, see [Checking object + # integrity][1] in the *Amazon S3 User Guide*. + # + # If you provide an individual checksum, Amazon S3 ignores any + # provided `ChecksumAlgorithm` parameter. + # + # This checksum algorithm must be the same for all parts and it match + # the checksum value supplied in the `CreateMultipartUpload` request. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32 checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 32-bit CRC32C checksum of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 160-bit SHA-1 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This header + # specifies the base64-encoded, 256-bit SHA-256 digest of the object. + # For more information, see [Checking object integrity][1] in the + # *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] key + # Object key for which the multipart upload was initiated. + # @return [String] + # + # @!attribute [rw] part_number + # Part number of part being uploaded. This is a positive integer + # between 1 and 10,000. + # @return [Integer] + # + # @!attribute [rw] upload_id + # Upload ID identifying the multipart upload whose part is being + # uploaded. + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Specifies the algorithm to use to when encrypting the object (for + # example, AES256). + # @return [String] + # + # @!attribute [rw] sse_customer_key + # Specifies the customer-provided encryption key for Amazon S3 to use + # in encrypting data. This value is used to store the object and then + # it is discarded; Amazon S3 does not store the encryption key. The + # key must be appropriate for use with the algorithm specified in the + # `x-amz-server-side-encryption-customer-algorithm header`. This must + # be the same encryption key specified in the initiate multipart + # upload request. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # Specifies the 128-bit MD5 digest of the encryption key according to + # RFC 1321. Amazon S3 uses this header for a message integrity check + # to ensure that the encryption key was transmitted without error. + # @return [String] + # + # @!attribute [rw] request_payer + # Confirms that the requester knows that they will be charged for the + # request. Bucket owners need not specify this parameter in their + # requests. For information about downloading objects from Requester + # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] + # in the *Amazon S3 User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + # @return [String] + # + # @!attribute [rw] expected_bucket_owner + # The account ID of the expected bucket owner. If the bucket is owned + # by a different account, the request fails with the HTTP status code + # `403 Forbidden` (access denied). + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest AWS API Documentation + # + class UploadPartRequest < Struct.new( + :body, + :bucket, + :content_length, + :content_md5, + :checksum_algorithm, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :key, + :part_number, + :upload_id, + :sse_customer_algorithm, + :sse_customer_key, + :sse_customer_key_md5, + :request_payer, + :expected_bucket_owner) + SENSITIVE = [:sse_customer_key] + include Aws::Structure + end + + # Describes the versioning state of an Amazon S3 bucket. For more + # information, see [PUT Bucket versioning][1] in the *Amazon S3 API + # Reference*. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html + # + # @!attribute [rw] mfa_delete + # Specifies whether MFA delete is enabled in the bucket versioning + # configuration. This element is only returned if the bucket has been + # configured with MFA delete. If the bucket has never been so + # configured, this element is not returned. + # @return [String] + # + # @!attribute [rw] status + # The versioning state of the bucket. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration AWS API Documentation + # + class VersioningConfiguration < Struct.new( + :mfa_delete, + :status) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies website configuration parameters for an Amazon S3 bucket. + # + # @!attribute [rw] error_document + # The name of the error document for the website. + # @return [Types::ErrorDocument] + # + # @!attribute [rw] index_document + # The name of the index document for the website. + # @return [Types::IndexDocument] + # + # @!attribute [rw] redirect_all_requests_to + # The redirect behavior for every request to this bucket's website + # endpoint. + # + # If you specify this property, you can't specify any other property. + # @return [Types::RedirectAllRequestsTo] + # + # @!attribute [rw] routing_rules + # Rules that define when a redirect is applied and the redirect + # behavior. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration AWS API Documentation + # + class WebsiteConfiguration < Struct.new( + :error_document, + :index_document, + :redirect_all_requests_to, + :routing_rules) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] request_route + # Route prefix to the HTTP URL generated. + # @return [String] + # + # @!attribute [rw] request_token + # A single use encrypted token that maps `WriteGetObjectResponse` to + # the end user `GetObject` request. + # @return [String] + # + # @!attribute [rw] body + # The object data. + # @return [IO] + # + # @!attribute [rw] status_code + # The integer status code for an HTTP response of a corresponding + # `GetObject` request. + # + # **Status Codes** + # + # * `200 - OK` + # + # * `206 - Partial Content` + # + # * `304 - Not Modified` + # + # * `400 - Bad Request` + # + # * `401 - Unauthorized` + # + # * `403 - Forbidden` + # + # * `404 - Not Found` + # + # * `405 - Method Not Allowed` + # + # * `409 - Conflict` + # + # * `411 - Length Required` + # + # * `412 - Precondition Failed` + # + # * `416 - Range Not Satisfiable` + # + # * `500 - Internal Server Error` + # + # * `503 - Service Unavailable` + # @return [Integer] + # + # @!attribute [rw] error_code + # A string that uniquely identifies an error condition. Returned in + # the <Code> tag of the error XML response for a corresponding + # `GetObject` call. Cannot be used with a successful `StatusCode` + # header or when the transformed object is provided in the body. All + # error codes from S3 are sentence-cased. The regular expression + # (regex) value is `"^[A-Z][a-zA-Z]+$"`. + # @return [String] + # + # @!attribute [rw] error_message + # Contains a generic description of the error condition. Returned in + # the <Message> tag of the error XML response for a + # corresponding `GetObject` call. Cannot be used with a successful + # `StatusCode` header or when the transformed object is provided in + # body. + # @return [String] + # + # @!attribute [rw] accept_ranges + # Indicates that a range of bytes was specified. + # @return [String] + # + # @!attribute [rw] cache_control + # Specifies caching behavior along the request/reply chain. + # @return [String] + # + # @!attribute [rw] content_disposition + # Specifies presentational information for the object. + # @return [String] + # + # @!attribute [rw] content_encoding + # Specifies what content encodings have been applied to the object and + # thus what decoding mechanisms must be applied to obtain the + # media-type referenced by the Content-Type header field. + # @return [String] + # + # @!attribute [rw] content_language + # The language the content is in. + # @return [String] + # + # @!attribute [rw] content_length + # The size of the content body in bytes. + # @return [Integer] + # + # @!attribute [rw] content_range + # The portion of the object returned in the response. + # @return [String] + # + # @!attribute [rw] content_type + # A standard MIME type describing the format of the object data. + # @return [String] + # + # @!attribute [rw] checksum_crc32 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 32-bit CRC32 checksum of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_crc32c + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 32-bit CRC32C checksum of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha1 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 160-bit SHA-1 digest of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] checksum_sha256 + # This header can be used as a data integrity check to verify that the + # data received is the same data that was originally sent. This + # specifies the base64-encoded, 256-bit SHA-256 digest of the object + # returned by the Object Lambda function. This may not match the + # checksum for the object stored in Amazon S3. Amazon S3 will perform + # validation of the checksum values only when the original `GetObject` + # request required checksum validation. For more information about + # checksums, see [Checking object integrity][1] in the *Amazon S3 User + # Guide*. + # + # Only one checksum header can be specified at a time. If you supply + # multiple checksum headers, this request will fail. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + # @return [String] + # + # @!attribute [rw] delete_marker + # Specifies whether an object stored in Amazon S3 is (`true`) or is + # not (`false`) a delete marker. + # @return [Boolean] + # + # @!attribute [rw] etag + # An opaque identifier assigned by a web server to a specific version + # of a resource found at a URL. + # @return [String] + # + # @!attribute [rw] expires + # The date and time at which the object is no longer cacheable. + # @return [Time] + # + # @!attribute [rw] expiration + # If the object expiration is configured (see PUT Bucket lifecycle), + # the response includes this header. It includes the `expiry-date` and + # `rule-id` key-value pairs that provide the object expiration + # information. The value of the `rule-id` is URL-encoded. + # @return [String] + # + # @!attribute [rw] last_modified + # The date and time that the object was last modified. + # @return [Time] + # + # @!attribute [rw] missing_meta + # Set to the number of metadata entries not returned in `x-amz-meta` + # headers. This can happen if you create metadata using an API like + # SOAP that supports more flexible metadata than the REST API. For + # example, using SOAP, you can create metadata whose values are not + # legal HTTP headers. + # @return [Integer] + # + # @!attribute [rw] metadata + # A map of metadata to store with the object in S3. + # @return [Hash] + # + # @!attribute [rw] object_lock_mode + # Indicates whether an object stored in Amazon S3 has Object Lock + # enabled. For more information about S3 Object Lock, see [Object + # Lock][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html + # @return [String] + # + # @!attribute [rw] object_lock_legal_hold_status + # Indicates whether an object stored in Amazon S3 has an active legal + # hold. + # @return [String] + # + # @!attribute [rw] object_lock_retain_until_date + # The date and time when Object Lock is configured to expire. + # @return [Time] + # + # @!attribute [rw] parts_count + # The count of parts this object has. + # @return [Integer] + # + # @!attribute [rw] replication_status + # Indicates if request involves bucket that is either a source or + # destination in a Replication rule. For more information about S3 + # Replication, see [Replication][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html + # @return [String] + # + # @!attribute [rw] request_charged + # If present, indicates that the requester was successfully charged + # for the request. + # @return [String] + # + # @!attribute [rw] restore + # Provides information about object restoration operation and + # expiration time of the restored object copy. + # @return [String] + # + # @!attribute [rw] server_side_encryption + # The server-side encryption algorithm used when storing requested + # object in Amazon S3 (for example, AES256, aws:kms). + # @return [String] + # + # @!attribute [rw] sse_customer_algorithm + # Encryption algorithm used if server-side encryption with a + # customer-provided encryption key was specified for object stored in + # Amazon S3. + # @return [String] + # + # @!attribute [rw] ssekms_key_id + # If present, specifies the ID of the Amazon Web Services Key + # Management Service (Amazon Web Services KMS) symmetric customer + # managed key that was used for stored in Amazon S3 object. + # @return [String] + # + # @!attribute [rw] sse_customer_key_md5 + # 128-bit MD5 digest of customer-provided encryption key used in + # Amazon S3 to encrypt data stored in S3. For more information, see + # [Protecting data using server-side encryption with customer-provided + # encryption keys (SSE-C)][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html + # @return [String] + # + # @!attribute [rw] storage_class + # Provides storage class information of the object. Amazon S3 returns + # this header for all objects except for S3 Standard storage class + # objects. + # + # For more information, see [Storage Classes][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + # @return [String] + # + # @!attribute [rw] tag_count + # The number of tags, if any, on the object. + # @return [Integer] + # + # @!attribute [rw] version_id + # An ID used to reference a specific version of the object. + # @return [String] + # + # @!attribute [rw] bucket_key_enabled + # Indicates whether the object stored in Amazon S3 uses an S3 bucket + # key for server-side encryption with Amazon Web Services KMS + # (SSE-KMS). + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponseRequest AWS API Documentation + # + class WriteGetObjectResponseRequest < Struct.new( + :request_route, + :request_token, + :body, + :status_code, + :error_code, + :error_message, + :accept_ranges, + :cache_control, + :content_disposition, + :content_encoding, + :content_language, + :content_length, + :content_range, + :content_type, + :checksum_crc32, + :checksum_crc32c, + :checksum_sha1, + :checksum_sha256, + :delete_marker, + :etag, + :expires, + :expiration, + :last_modified, + :missing_meta, + :metadata, + :object_lock_mode, + :object_lock_legal_hold_status, + :object_lock_retain_until_date, + :parts_count, + :replication_status, + :request_charged, + :restore, + :server_side_encryption, + :sse_customer_algorithm, + :ssekms_key_id, + :sse_customer_key_md5, + :storage_class, + :tag_count, + :version_id, + :bucket_key_enabled) + SENSITIVE = [:ssekms_key_id] + include Aws::Structure + end + + # The container for selecting objects from a content event stream. + # + # EventStream is an Enumerator of Events. + # #event_types #=> Array, returns all modeled event types in the stream + # + # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentEventStream AWS API Documentation + # + class SelectObjectContentEventStream < Enumerator + + def event_types + [ + :records, + :stats, + :progress, + :cont, + :end + ] + end + + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/waiters.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/waiters.rb new file mode 100644 index 0000000..71d88b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sdk-s3-1.121.0/lib/aws-sdk-s3/waiters.rb @@ -0,0 +1,243 @@ +# frozen_string_literal: true + +# WARNING ABOUT GENERATED CODE +# +# This file is generated. See the contributing guide for more information: +# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md +# +# WARNING ABOUT GENERATED CODE + +require 'aws-sdk-core/waiters' + +module Aws::S3 + # Waiters are utility methods that poll for a particular state to occur + # on a client. Waiters can fail after a number of attempts at a polling + # interval defined for the service client. + # + # For a list of operations that can be waited for and the + # client methods called for each operation, see the table below or the + # {Client#wait_until} field documentation for the {Client}. + # + # # Invoking a Waiter + # To invoke a waiter, call #wait_until on a {Client}. The first parameter + # is the waiter name, which is specific to the service client and indicates + # which operation is being waited for. The second parameter is a hash of + # parameters that are passed to the client method called by the waiter, + # which varies according to the waiter name. + # + # # Wait Failures + # To catch errors in a waiter, use WaiterFailed, + # as shown in the following example. + # + # rescue rescue Aws::Waiters::Errors::WaiterFailed => error + # puts "failed waiting for instance running: #{error.message} + # end + # + # # Configuring a Waiter + # Each waiter has a default polling interval and a maximum number of + # attempts it will make before returning control to your program. + # To set these values, use the `max_attempts` and `delay` parameters + # in your `#wait_until` call. + # The following example waits for up to 25 seconds, polling every five seconds. + # + # client.wait_until(...) do |w| + # w.max_attempts = 5 + # w.delay = 5 + # end + # + # To disable wait failures, set the value of either of these parameters + # to `nil`. + # + # # Extending a Waiter + # To modify the behavior of waiters, you can register callbacks that are + # triggered before each polling attempt and before waiting. + # + # The following example implements an exponential backoff in a waiter + # by doubling the amount of time to wait on every attempt. + # + # client.wait_until(...) do |w| + # w.interval = 0 # disable normal sleep + # w.before_wait do |n, resp| + # sleep(n ** 2) + # end + # end + # + # # Available Waiters + # + # The following table lists the valid waiter names, the operations they call, + # and the default `:delay` and `:max_attempts` values. + # + # | waiter_name | params | :delay | :max_attempts | + # | ----------------- | -------------------- | -------- | ------------- | + # | bucket_exists | {Client#head_bucket} | 5 | 20 | + # | bucket_not_exists | {Client#head_bucket} | 5 | 20 | + # | object_exists | {Client#head_object} | 5 | 20 | + # | object_not_exists | {Client#head_object} | 5 | 20 | + # + module Waiters + + class BucketExists + + # @param [Hash] options + # @option options [required, Client] :client + # @option options [Integer] :max_attempts (20) + # @option options [Integer] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def initialize(options) + @client = options.fetch(:client) + @waiter = Aws::Waiters::Waiter.new({ + max_attempts: 20, + delay: 5, + poller: Aws::Waiters::Poller.new( + operation_name: :head_bucket, + acceptors: [ + { + "expected" => 200, + "matcher" => "status", + "state" => "success" + }, + { + "expected" => 301, + "matcher" => "status", + "state" => "success" + }, + { + "expected" => 403, + "matcher" => "status", + "state" => "success" + }, + { + "expected" => 404, + "matcher" => "status", + "state" => "retry" + } + ] + ) + }.merge(options)) + end + + # @option (see Client#head_bucket) + # @return (see Client#head_bucket) + def wait(params = {}) + @waiter.wait(client: @client, params: params) + end + + # @api private + attr_reader :waiter + + end + + class BucketNotExists + + # @param [Hash] options + # @option options [required, Client] :client + # @option options [Integer] :max_attempts (20) + # @option options [Integer] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def initialize(options) + @client = options.fetch(:client) + @waiter = Aws::Waiters::Waiter.new({ + max_attempts: 20, + delay: 5, + poller: Aws::Waiters::Poller.new( + operation_name: :head_bucket, + acceptors: [{ + "expected" => 404, + "matcher" => "status", + "state" => "success" + }] + ) + }.merge(options)) + end + + # @option (see Client#head_bucket) + # @return (see Client#head_bucket) + def wait(params = {}) + @waiter.wait(client: @client, params: params) + end + + # @api private + attr_reader :waiter + + end + + class ObjectExists + + # @param [Hash] options + # @option options [required, Client] :client + # @option options [Integer] :max_attempts (20) + # @option options [Integer] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def initialize(options) + @client = options.fetch(:client) + @waiter = Aws::Waiters::Waiter.new({ + max_attempts: 20, + delay: 5, + poller: Aws::Waiters::Poller.new( + operation_name: :head_object, + acceptors: [ + { + "expected" => 200, + "matcher" => "status", + "state" => "success" + }, + { + "expected" => 404, + "matcher" => "status", + "state" => "retry" + } + ] + ) + }.merge(options)) + end + + # @option (see Client#head_object) + # @return (see Client#head_object) + def wait(params = {}) + @waiter.wait(client: @client, params: params) + end + + # @api private + attr_reader :waiter + + end + + class ObjectNotExists + + # @param [Hash] options + # @option options [required, Client] :client + # @option options [Integer] :max_attempts (20) + # @option options [Integer] :delay (5) + # @option options [Proc] :before_attempt + # @option options [Proc] :before_wait + def initialize(options) + @client = options.fetch(:client) + @waiter = Aws::Waiters::Waiter.new({ + max_attempts: 20, + delay: 5, + poller: Aws::Waiters::Poller.new( + operation_name: :head_object, + acceptors: [{ + "expected" => 404, + "matcher" => "status", + "state" => "success" + }] + ) + }.merge(options)) + end + + # @option (see Client#head_object) + # @return (see Client#head_object) + def wait(params = {}) + @waiter.wait(client: @client, params: params) + end + + # @api private + attr_reader :waiter + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/CHANGELOG.md new file mode 100644 index 0000000..0e2cd31 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/CHANGELOG.md @@ -0,0 +1,103 @@ +Unreleased Changes +------------------ + +1.5.2 (2022-09-30) +------------------ + +* Issue - Fix an issue where quoted strings with multiple spaces are not trimmed. (#2758) + +1.5.1 (2022-07-19) +------------------ + +* Issue - Fix performance regression when checking if `aws-crt` is available. (#2729) + +1.5.0 (2022-04-20) +------------------ + +* Feature - Use CRT based signers if `aws-crt` is available - provides support for `sigv4a`. + +1.4.0 (2021-09-02) +------------------ + +* Feature - add `signing_algorithm` option with `sigv4` default. + +1.3.0 (2021-09-01) +------------------ + +* Feature - AWS SDK for Ruby no longer supports Ruby runtime versions 1.9, 2.0, 2.1, and 2.2. + +1.2.4 (2021-07-08) +------------------ + +* Issue - Fix usage of `:uri_escape_path` and `:apply_checksum_header` in `Signer`. + +1.2.3 (2021-03-04) +------------------ + +* Issue - Include LICENSE, CHANGELOG, and VERSION files with this gem. + +1.2.2 (2020-08-13) +------------------ + +* Issue - Sort query params with same names by value when signing. (#2376) + +1.2.1 (2020-06-24) +------------------ + +* Issue - Don't overwrite `host` header in sigv4 signer if given. + +1.2.0 (2020-06-17) +------------------ + +* Feature - Bump `aws-eventstream` dependency to `~> 1`. + +1.1.4 (2020-05-28) +------------------ + +* Issue - Don't use `expect` header to compute Signature. + +1.1.3 (2020-04-27) +------------------ + +* Issue - Don't rely on the set? method of credentials. + +1.1.2 (2020-04-17) +------------------ + +* Issue - Raise errors when credentials are not set (nil or empty) + +1.1.1 (2020-02-26) +------------------ + +* Issue - Handle signing for unknown protocols and default ports. + +1.1.0 (2019-03-13) +------------------ + +* Feature - Support signature V4 signing per event. + +1.0.3 (2018-06-28) +------------------ + +* Issue - Reduce memory allocation when generating signatures. + +1.0.2 (2018-02-21) +------------------ + +* Issue - Fix Ruby warning: shadowed local variable "headers". + +1.0.2 (2017-08-31) +------------------ + +* Issue - Update `aws-sigv4` gemspec metadata. + +1.0.1 (2017-07-12) +------------------ + +* Issue - Make UTF-8 encoding explicit in spec test. + + +1.0.0 (2016-11-08) +------------------ + +* Feature - Initial release of the `aws-sigv4` gem. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/VERSION b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/VERSION new file mode 100644 index 0000000..4cda8f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/VERSION @@ -0,0 +1 @@ +1.5.2 diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4.rb new file mode 100644 index 0000000..12ad05e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require_relative 'aws-sigv4/credentials' +require_relative 'aws-sigv4/errors' +require_relative 'aws-sigv4/signature' +require_relative 'aws-sigv4/signer' + +module Aws + module Sigv4 + VERSION = File.read(File.expand_path('../VERSION', __dir__)).strip + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/credentials.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/credentials.rb new file mode 100644 index 0000000..d5f799e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/credentials.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module Aws + module Sigv4 + # Users that wish to configure static credentials can use the + # `:access_key_id` and `:secret_access_key` constructor options. + # @api private + class Credentials + + # @option options [required, String] :access_key_id + # @option options [required, String] :secret_access_key + # @option options [String, nil] :session_token (nil) + def initialize(options = {}) + if options[:access_key_id] && options[:secret_access_key] + @access_key_id = options[:access_key_id] + @secret_access_key = options[:secret_access_key] + @session_token = options[:session_token] + else + msg = "expected both :access_key_id and :secret_access_key options" + raise ArgumentError, msg + end + end + + # @return [String] + attr_reader :access_key_id + + # @return [String] + attr_reader :secret_access_key + + # @return [String, nil] + attr_reader :session_token + + # @return [Boolean] Returns `true` if the access key id and secret + # access key are both set. + def set? + !access_key_id.nil? && + !access_key_id.empty? && + !secret_access_key.nil? && + !secret_access_key.empty? + end + end + + # Users that wish to configure static credentials can use the + # `:access_key_id` and `:secret_access_key` constructor options. + # @api private + class StaticCredentialsProvider + + # @option options [Credentials] :credentials + # @option options [String] :access_key_id + # @option options [String] :secret_access_key + # @option options [String] :session_token (nil) + def initialize(options = {}) + @credentials = options[:credentials] ? + options[:credentials] : + Credentials.new(options) + end + + # @return [Credentials] + attr_reader :credentials + + # @return [Boolean] + def set? + !!credentials && credentials.set? + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/errors.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/errors.rb new file mode 100644 index 0000000..0029072 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/errors.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Aws + module Sigv4 + module Errors + + class MissingCredentialsError < ArgumentError + def initialize(msg = nil) + super(msg || <<-MSG.strip) +missing credentials, provide credentials with one of the following options: + - :access_key_id and :secret_access_key + - :credentials + - :credentials_provider + MSG + end + end + + class MissingRegionError < ArgumentError + def initialize(*args) + super("missing required option :region") + end + end + + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/request.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/request.rb new file mode 100644 index 0000000..da80cc6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/request.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +require 'uri' + +module Aws + module Sigv4 + class Request + + # @option options [required, String] :http_method + # @option options [required, HTTP::URI, HTTPS::URI, String] :endpoint + # @option options [Hash] :headers ({}) + # @option options [String, IO] :body ('') + def initialize(options = {}) + @http_method = nil + @endpoint = nil + @headers = {} + @body = '' + options.each_pair do |attr_name, attr_value| + send("#{attr_name}=", attr_value) + end + end + + # @param [String] http_method One of 'GET', 'PUT', 'POST', 'DELETE', 'HEAD', or 'PATCH' + def http_method=(http_method) + @http_method = http_method + end + + # @return [String] One of 'GET', 'PUT', 'POST', 'DELETE', 'HEAD', or 'PATCH' + def http_method + @http_method + end + + # @param [String, HTTP::URI, HTTPS::URI] endpoint + def endpoint=(endpoint) + @endpoint = URI.parse(endpoint.to_s) + end + + # @return [HTTP::URI, HTTPS::URI] + def endpoint + @endpoint + end + + # @param [Hash] headers + def headers=(headers) + @headers = headers + end + + # @return [Hash] + def headers + @headers + end + + # @param [String, IO] body + def body=(body) + @body = body + end + + # @return [String, IO] + def body + @body + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/signature.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/signature.rb new file mode 100644 index 0000000..d181a66 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/signature.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module Aws + module Sigv4 + class Signature + + # @api private + def initialize(options) + options.each_pair do |attr_name, attr_value| + send("#{attr_name}=", attr_value) + end + end + + # @return [Hash] A hash of headers that should + # be applied to the HTTP request. Header keys are lower + # cased strings and may include the following: + # + # * 'host' + # * 'x-amz-date' + # * 'x-amz-security-token' + # * 'x-amz-content-sha256' + # * 'authorization' + # + attr_accessor :headers + + # @return [String] For debugging purposes. + attr_accessor :canonical_request + + # @return [String] For debugging purposes. + attr_accessor :string_to_sign + + # @return [String] For debugging purposes. + attr_accessor :content_sha256 + + # @return [Hash] Internal data for debugging purposes. + attr_accessor :extra + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/signer.rb b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/signer.rb new file mode 100644 index 0000000..7411531 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/aws-sigv4-1.5.2/lib/aws-sigv4/signer.rb @@ -0,0 +1,864 @@ +# frozen_string_literal: true + +require 'openssl' +require 'tempfile' +require 'time' +require 'uri' +require 'set' +require 'cgi' +require 'aws-eventstream' + +module Aws + module Sigv4 + + # Utility class for creating AWS signature version 4 signature. This class + # provides two methods for generating signatures: + # + # * {#sign_request} - Computes a signature of the given request, returning + # the hash of headers that should be applied to the request. + # + # * {#presign_url} - Computes a presigned request with an expiration. + # By default, the body of this request is not signed and the request + # expires in 15 minutes. + # + # ## Configuration + # + # To use the signer, you need to specify the service, region, and credentials. + # The service name is normally the endpoint prefix to an AWS service. For + # example: + # + # ec2.us-west-1.amazonaws.com => ec2 + # + # The region is normally the second portion of the endpoint, following + # the service name. + # + # ec2.us-west-1.amazonaws.com => us-west-1 + # + # It is important to have the correct service and region name, or the + # signature will be invalid. + # + # ## Credentials + # + # The signer requires credentials. You can configure the signer + # with static credentials: + # + # signer = Aws::Sigv4::Signer.new( + # service: 's3', + # region: 'us-east-1', + # # static credentials + # access_key_id: 'akid', + # secret_access_key: 'secret' + # ) + # + # You can also provide refreshing credentials via the `:credentials_provider`. + # If you are using the AWS SDK for Ruby, you can use any of the credential + # classes: + # + # signer = Aws::Sigv4::Signer.new( + # service: 's3', + # region: 'us-east-1', + # credentials_provider: Aws::InstanceProfileCredentials.new + # ) + # + # Other AWS SDK for Ruby classes that can be provided via `:credentials_provider`: + # + # * `Aws::Credentials` + # * `Aws::SharedCredentials` + # * `Aws::InstanceProfileCredentials` + # * `Aws::AssumeRoleCredentials` + # * `Aws::ECSCredentials` + # + # A credential provider is any object that responds to `#credentials` + # returning another object that responds to `#access_key_id`, `#secret_access_key`, + # and `#session_token`. + # + class Signer + + @@use_crt = + begin + require 'aws-crt' + true + rescue LoadError + false + end + + # @overload initialize(service:, region:, access_key_id:, secret_access_key:, session_token:nil, **options) + # @param [String] :service The service signing name, e.g. 's3'. + # @param [String] :region The region name, e.g. 'us-east-1'. + # @param [String] :access_key_id + # @param [String] :secret_access_key + # @param [String] :session_token (nil) + # + # @overload initialize(service:, region:, credentials:, **options) + # @param [String] :service The service signing name, e.g. 's3'. + # @param [String] :region The region name, e.g. 'us-east-1'. + # @param [Credentials] :credentials Any object that responds to the following + # methods: + # + # * `#access_key_id` => String + # * `#secret_access_key` => String + # * `#session_token` => String, nil + # * `#set?` => Boolean + # + # @overload initialize(service:, region:, credentials_provider:, **options) + # @param [String] :service The service signing name, e.g. 's3'. + # @param [String] :region The region name, e.g. 'us-east-1'. + # @param [#credentials] :credentials_provider An object that responds + # to `#credentials`, returning an object that responds to the following + # methods: + # + # * `#access_key_id` => String + # * `#secret_access_key` => String + # * `#session_token` => String, nil + # * `#set?` => Boolean + # + # @option options [Array] :unsigned_headers ([]) A list of + # headers that should not be signed. This is useful when a proxy + # modifies headers, such as 'User-Agent', invalidating a signature. + # + # @option options [Boolean] :uri_escape_path (true) When `true`, + # the request URI path is uri-escaped as part of computing the canonical + # request string. This is required for every service, except Amazon S3, + # as of late 2016. + # + # @option options [Boolean] :apply_checksum_header (true) When `true`, + # the computed content checksum is returned in the hash of signature + # headers. This is required for AWS Glacier, and optional for + # every other AWS service as of late 2016. + # + # @option options [Symbol] :signing_algorithm (:sigv4) The + # algorithm to use for signing. :sigv4a is only supported when + # `aws-crt` is available. + # + # @option options [Boolean] :omit_session_token (false) + # (Supported only when `aws-crt` is available) If `true`, + # then security token is added to the final signing result, + # but is treated as "unsigned" and does not contribute + # to the authorization signature. + # + # @option options [Boolean] :normalize_path (true) (Supported only when `aws-crt` is available) + # When `true`, the uri paths will be normalized when building the canonical request + def initialize(options = {}) + @service = extract_service(options) + @region = extract_region(options) + @credentials_provider = extract_credentials_provider(options) + @unsigned_headers = Set.new((options.fetch(:unsigned_headers, [])).map(&:downcase)) + @unsigned_headers << 'authorization' + @unsigned_headers << 'x-amzn-trace-id' + @unsigned_headers << 'expect' + @uri_escape_path = options.fetch(:uri_escape_path, true) + @apply_checksum_header = options.fetch(:apply_checksum_header, true) + @signing_algorithm = options.fetch(:signing_algorithm, :sigv4) + @normalize_path = options.fetch(:normalize_path, true) + @omit_session_token = options.fetch(:omit_session_token, false) + + if @signing_algorithm == :sigv4a && !Signer.use_crt? + raise ArgumentError, 'You are attempting to sign a' \ +' request with sigv4a which requires the `aws-crt` gem.'\ +' Please install the gem or add it to your gemfile.' + end + end + + # @return [String] + attr_reader :service + + # @return [String] + attr_reader :region + + # @return [#credentials] Returns an object that responds to + # `#credentials`, returning an object that responds to the following + # methods: + # + # * `#access_key_id` => String + # * `#secret_access_key` => String + # * `#session_token` => String, nil + # * `#set?` => Boolean + # + attr_reader :credentials_provider + + # @return [Set] Returns a set of header names that should not be signed. + # All header names have been downcased. + attr_reader :unsigned_headers + + # @return [Boolean] When `true` the `x-amz-content-sha256` header will be signed and + # returned in the signature headers. + attr_reader :apply_checksum_header + + # Computes a version 4 signature signature. Returns the resultant + # signature as a hash of headers to apply to your HTTP request. The given + # request is not modified. + # + # signature = signer.sign_request( + # http_method: 'PUT', + # url: 'https://domain.com', + # headers: { + # 'Abc' => 'xyz', + # }, + # body: 'body' # String or IO object + # ) + # + # # Apply the following hash of headers to your HTTP request + # signature.headers['host'] + # signature.headers['x-amz-date'] + # signature.headers['x-amz-security-token'] + # signature.headers['x-amz-content-sha256'] + # signature.headers['authorization'] + # + # In addition to computing the signature headers, the canonicalized + # request, string to sign and content sha256 checksum are also available. + # These values are useful for debugging signature errors returned by AWS. + # + # signature.canonical_request #=> "..." + # signature.string_to_sign #=> "..." + # signature.content_sha256 #=> "..." + # + # @param [Hash] request + # + # @option request [required, String] :http_method One of + # 'GET', 'HEAD', 'PUT', 'POST', 'PATCH', or 'DELETE' + # + # @option request [required, String, URI::HTTPS, URI::HTTP] :url + # The request URI. Must be a valid HTTP or HTTPS URI. + # + # @option request [optional, Hash] :headers ({}) A hash of headers + # to sign. If the 'X-Amz-Content-Sha256' header is set, the `:body` + # is optional and will not be read. + # + # @option request [optional, String, IO] :body ('') The HTTP request body. + # A sha256 checksum is computed of the body unless the + # 'X-Amz-Content-Sha256' header is set. + # + # @return [Signature] Return an instance of {Signature} that has + # a `#headers` method. The headers must be applied to your request. + # + def sign_request(request) + + return crt_sign_request(request) if Signer.use_crt? + + creds = fetch_credentials + + http_method = extract_http_method(request) + url = extract_url(request) + headers = downcase_headers(request[:headers]) + + datetime = headers['x-amz-date'] + datetime ||= Time.now.utc.strftime("%Y%m%dT%H%M%SZ") + date = datetime[0,8] + + content_sha256 = headers['x-amz-content-sha256'] + content_sha256 ||= sha256_hexdigest(request[:body] || '') + + sigv4_headers = {} + sigv4_headers['host'] = headers['host'] || host(url) + sigv4_headers['x-amz-date'] = datetime + sigv4_headers['x-amz-security-token'] = creds.session_token if creds.session_token + sigv4_headers['x-amz-content-sha256'] ||= content_sha256 if @apply_checksum_header + + headers = headers.merge(sigv4_headers) # merge so we do not modify given headers hash + + # compute signature parts + creq = canonical_request(http_method, url, headers, content_sha256) + sts = string_to_sign(datetime, creq) + sig = signature(creds.secret_access_key, date, sts) + + # apply signature + sigv4_headers['authorization'] = [ + "AWS4-HMAC-SHA256 Credential=#{credential(creds, date)}", + "SignedHeaders=#{signed_headers(headers)}", + "Signature=#{sig}", + ].join(', ') + + # Returning the signature components. + Signature.new( + headers: sigv4_headers, + string_to_sign: sts, + canonical_request: creq, + content_sha256: content_sha256 + ) + end + + # Signs a event and returns signature headers and prior signature + # used for next event signing. + # + # Headers of a sigv4 signed event message only contains 2 headers + # * ':chunk-signature' + # * computed signature of the event, binary string, 'bytes' type + # * ':date' + # * millisecond since epoch, 'timestamp' type + # + # Payload of the sigv4 signed event message contains eventstream encoded message + # which is serialized based on input and protocol + # + # To sign events + # + # headers_0, signature_0 = signer.sign_event( + # prior_signature, # hex-encoded string + # payload_0, # binary string (eventstream encoded event 0) + # encoder, # Aws::EventStreamEncoder + # ) + # + # headers_1, signature_1 = signer.sign_event( + # signature_0, + # payload_1, # binary string (eventstream encoded event 1) + # encoder + # ) + # + # The initial prior_signature should be using the signature computed at initial request + # + # Note: + # + # Since ':chunk-signature' header value has bytes type, the signature value provided + # needs to be a binary string instead of a hex-encoded string (like original signature + # V4 algorithm). Thus, when returning signature value used for next event siging, the + # signature value (a binary string) used at ':chunk-signature' needs to converted to + # hex-encoded string using #unpack + def sign_event(prior_signature, payload, encoder) + # Note: CRT does not currently provide event stream signing, so we always use the ruby implementation. + creds = fetch_credentials + time = Time.now + headers = {} + + datetime = time.utc.strftime("%Y%m%dT%H%M%SZ") + date = datetime[0,8] + headers[':date'] = Aws::EventStream::HeaderValue.new(value: time.to_i * 1000, type: 'timestamp') + + sts = event_string_to_sign(datetime, headers, payload, prior_signature, encoder) + sig = event_signature(creds.secret_access_key, date, sts) + + headers[':chunk-signature'] = Aws::EventStream::HeaderValue.new(value: sig, type: 'bytes') + + # Returning signed headers and signature value in hex-encoded string + [headers, sig.unpack('H*').first] + end + + # Signs a URL with query authentication. Using query parameters + # to authenticate requests is useful when you want to express a + # request entirely in a URL. This method is also referred as + # presigning a URL. + # + # See [Authenticating Requests: Using Query Parameters (AWS Signature Version 4)](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) for more information. + # + # To generate a presigned URL, you must provide a HTTP URI and + # the http method. + # + # url = signer.presign_url( + # http_method: 'GET', + # url: 'https://my-bucket.s3-us-east-1.amazonaws.com/key', + # expires_in: 60 + # ) + # + # By default, signatures are valid for 15 minutes. You can specify + # the number of seconds for the URL to expire in. + # + # url = signer.presign_url( + # http_method: 'GET', + # url: 'https://my-bucket.s3-us-east-1.amazonaws.com/key', + # expires_in: 3600 # one hour + # ) + # + # You can provide a hash of headers that you plan to send with the + # request. Every 'X-Amz-*' header you plan to send with the request + # **must** be provided, or the signature is invalid. Other headers + # are optional, but should be provided for security reasons. + # + # url = signer.presign_url( + # http_method: 'PUT', + # url: 'https://my-bucket.s3-us-east-1.amazonaws.com/key', + # headers: { + # 'X-Amz-Meta-Custom' => 'metadata' + # } + # ) + # + # @option options [required, String] :http_method The HTTP request method, + # e.g. 'GET', 'HEAD', 'PUT', 'POST', 'PATCH', or 'DELETE'. + # + # @option options [required, String, HTTPS::URI, HTTP::URI] :url + # The URI to sign. + # + # @option options [Hash] :headers ({}) Headers that should + # be signed and sent along with the request. All x-amz-* + # headers must be present during signing. Other + # headers are optional. + # + # @option options [Integer] :expires_in (900) + # How long the presigned URL should be valid for. Defaults + # to 15 minutes (900 seconds). + # + # @option options [optional, String, IO] :body + # If the `:body` is set, then a SHA256 hexdigest will be computed of the body. + # If `:body_digest` is set, this option is ignored. If neither are set, then + # the `:body_digest` will be computed of the empty string. + # + # @option options [optional, String] :body_digest + # The SHA256 hexdigest of the request body. If you wish to send the presigned + # request without signing the body, you can pass 'UNSIGNED-PAYLOAD' as the + # `:body_digest` in place of passing `:body`. + # + # @option options [Time] :time (Time.now) Time of the signature. + # You should only set this value for testing. + # + # @return [HTTPS::URI, HTTP::URI] + # + def presign_url(options) + + return crt_presign_url(options) if Signer.use_crt? + + creds = fetch_credentials + + http_method = extract_http_method(options) + url = extract_url(options) + + headers = downcase_headers(options[:headers]) + headers['host'] ||= host(url) + + datetime = headers['x-amz-date'] + datetime ||= (options[:time] || Time.now).utc.strftime("%Y%m%dT%H%M%SZ") + date = datetime[0,8] + + content_sha256 = headers['x-amz-content-sha256'] + content_sha256 ||= options[:body_digest] + content_sha256 ||= sha256_hexdigest(options[:body] || '') + + params = {} + params['X-Amz-Algorithm'] = 'AWS4-HMAC-SHA256' + params['X-Amz-Credential'] = credential(creds, date) + params['X-Amz-Date'] = datetime + params['X-Amz-Expires'] = extract_expires_in(options) + params['X-Amz-Security-Token'] = creds.session_token if creds.session_token + params['X-Amz-SignedHeaders'] = signed_headers(headers) + + params = params.map do |key, value| + "#{uri_escape(key)}=#{uri_escape(value)}" + end.join('&') + + if url.query + url.query += '&' + params + else + url.query = params + end + + creq = canonical_request(http_method, url, headers, content_sha256) + sts = string_to_sign(datetime, creq) + url.query += '&X-Amz-Signature=' + signature(creds.secret_access_key, date, sts) + url + end + + private + + def canonical_request(http_method, url, headers, content_sha256) + [ + http_method, + path(url), + normalized_querystring(url.query || ''), + canonical_headers(headers) + "\n", + signed_headers(headers), + content_sha256, + ].join("\n") + end + + def string_to_sign(datetime, canonical_request) + [ + 'AWS4-HMAC-SHA256', + datetime, + credential_scope(datetime[0,8]), + sha256_hexdigest(canonical_request), + ].join("\n") + end + + # Compared to original #string_to_sign at signature v4 algorithm + # there is no canonical_request concept for an eventstream event, + # instead, an event contains headers and payload two parts, and + # they will be used for computing digest in #event_string_to_sign + # + # Note: + # While headers need to be encoded under eventstream format, + # payload used is already eventstream encoded (event without signature), + # thus no extra encoding is needed. + def event_string_to_sign(datetime, headers, payload, prior_signature, encoder) + encoded_headers = encoder.encode_headers( + Aws::EventStream::Message.new(headers: headers, payload: payload) + ) + [ + "AWS4-HMAC-SHA256-PAYLOAD", + datetime, + credential_scope(datetime[0,8]), + prior_signature, + sha256_hexdigest(encoded_headers), + sha256_hexdigest(payload) + ].join("\n") + end + + def credential_scope(date) + [ + date, + @region, + @service, + 'aws4_request', + ].join('/') + end + + def credential(credentials, date) + "#{credentials.access_key_id}/#{credential_scope(date)}" + end + + def signature(secret_access_key, date, string_to_sign) + k_date = hmac("AWS4" + secret_access_key, date) + k_region = hmac(k_date, @region) + k_service = hmac(k_region, @service) + k_credentials = hmac(k_service, 'aws4_request') + hexhmac(k_credentials, string_to_sign) + end + + # Comparing to original signature v4 algorithm, + # returned signature is a binary string instread of + # hex-encoded string. (Since ':chunk-signature' requires + # 'bytes' type) + # + # Note: + # converting signature from binary string to hex-encoded + # string is handled at #sign_event instead. (Will be used + # as next prior signature for event signing) + def event_signature(secret_access_key, date, string_to_sign) + k_date = hmac("AWS4" + secret_access_key, date) + k_region = hmac(k_date, @region) + k_service = hmac(k_region, @service) + k_credentials = hmac(k_service, 'aws4_request') + hmac(k_credentials, string_to_sign) + end + + + def path(url) + path = url.path + path = '/' if path == '' + if @uri_escape_path + uri_escape_path(path) + else + path + end + end + + def normalized_querystring(querystring) + params = querystring.split('&') + params = params.map { |p| p.match(/=/) ? p : p + '=' } + # From: https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + # Sort the parameter names by character code point in ascending order. + # Parameters with duplicate names should be sorted by value. + # + # Default sort <=> in JRuby will swap members + # occasionally when <=> is 0 (considered still sorted), but this + # causes our normalized query string to not match the sent querystring. + # When names match, we then sort by their values. When values also + # match then we sort by their original order + params.each.with_index.sort do |a, b| + a, a_offset = a + b, b_offset = b + a_name, a_value = a.split('=') + b_name, b_value = b.split('=') + if a_name == b_name + if a_value == b_value + a_offset <=> b_offset + else + a_value <=> b_value + end + else + a_name <=> b_name + end + end.map(&:first).join('&') + end + + def signed_headers(headers) + headers.inject([]) do |signed_headers, (header, _)| + if @unsigned_headers.include?(header) + signed_headers + else + signed_headers << header + end + end.sort.join(';') + end + + def canonical_headers(headers) + headers = headers.inject([]) do |hdrs, (k,v)| + if @unsigned_headers.include?(k) + hdrs + else + hdrs << [k,v] + end + end + headers = headers.sort_by(&:first) + headers.map{|k,v| "#{k}:#{canonical_header_value(v.to_s)}" }.join("\n") + end + + def canonical_header_value(value) + value.gsub(/\s+/, ' ').strip + end + + def host(uri) + # Handles known and unknown URI schemes; default_port nil when unknown. + if uri.default_port == uri.port + uri.host + else + "#{uri.host}:#{uri.port}" + end + end + + # @param [File, Tempfile, IO#read, String] value + # @return [String] + def sha256_hexdigest(value) + if (File === value || Tempfile === value) && !value.path.nil? && File.exist?(value.path) + OpenSSL::Digest::SHA256.file(value).hexdigest + elsif value.respond_to?(:read) + sha256 = OpenSSL::Digest::SHA256.new + loop do + chunk = value.read(1024 * 1024) # 1MB + break unless chunk + sha256.update(chunk) + end + value.rewind + sha256.hexdigest + else + OpenSSL::Digest::SHA256.hexdigest(value) + end + end + + def hmac(key, value) + OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha256'), key, value) + end + + def hexhmac(key, value) + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), key, value) + end + + def extract_service(options) + if options[:service] + options[:service] + else + msg = "missing required option :service" + raise ArgumentError, msg + end + end + + def extract_region(options) + if options[:region] + options[:region] + else + raise Errors::MissingRegionError + end + end + + def extract_credentials_provider(options) + if options[:credentials_provider] + options[:credentials_provider] + elsif options.key?(:credentials) || options.key?(:access_key_id) + StaticCredentialsProvider.new(options) + else + raise Errors::MissingCredentialsError + end + end + + def extract_http_method(request) + if request[:http_method] + request[:http_method].upcase + else + msg = "missing required option :http_method" + raise ArgumentError, msg + end + end + + def extract_url(request) + if request[:url] + URI.parse(request[:url].to_s) + else + msg = "missing required option :url" + raise ArgumentError, msg + end + end + + def downcase_headers(headers) + (headers || {}).to_hash.inject({}) do |hash, (key, value)| + hash[key.downcase] = value + hash + end + end + + def extract_expires_in(options) + case options[:expires_in] + when nil then 900.to_s + when Integer then options[:expires_in].to_s + else + msg = "expected :expires_in to be a number of seconds" + raise ArgumentError, msg + end + end + + def uri_escape(string) + self.class.uri_escape(string) + end + + def uri_escape_path(string) + self.class.uri_escape_path(string) + end + + + def fetch_credentials + credentials = @credentials_provider.credentials + if credentials_set?(credentials) + credentials + else + raise Errors::MissingCredentialsError, + 'unable to sign request without credentials set' + end + end + + # Returns true if credentials are set (not nil or empty) + # Credentials may not implement the Credentials interface + # and may just be credential like Client response objects + # (eg those returned by sts#assume_role) + def credentials_set?(credentials) + !credentials.access_key_id.nil? && + !credentials.access_key_id.empty? && + !credentials.secret_access_key.nil? && + !credentials.secret_access_key.empty? + end + + ### CRT Code + + # the credentials used by CRT must be a + # CRT StaticCredentialsProvider object + def crt_fetch_credentials + creds = fetch_credentials + Aws::Crt::Auth::StaticCredentialsProvider.new( + creds.access_key_id, + creds.secret_access_key, + creds.session_token + ) + end + + def crt_sign_request(request) + creds = crt_fetch_credentials + http_method = extract_http_method(request) + url = extract_url(request) + headers = downcase_headers(request[:headers]) + + datetime = + if headers.include? 'x-amz-date' + Time.parse(headers.delete('x-amz-date')) + end + + content_sha256 = headers.delete('x-amz-content-sha256') + content_sha256 ||= sha256_hexdigest(request[:body] || '') + + sigv4_headers = {} + sigv4_headers['host'] = headers['host'] || host(url) + + # Modify the user-agent to add usage of crt-signer + # This should be temporary during developer preview only + if headers.include? 'user-agent' + headers['user-agent'] = "#{headers['user-agent']} crt-signer/#{@signing_algorithm}/#{Aws::Sigv4::VERSION}" + sigv4_headers['user-agent'] = headers['user-agent'] + end + + headers = headers.merge(sigv4_headers) # merge so we do not modify given headers hash + + config = Aws::Crt::Auth::SigningConfig.new( + algorithm: @signing_algorithm, + signature_type: :http_request_headers, + region: @region, + service: @service, + date: datetime, + signed_body_value: content_sha256, + signed_body_header_type: @apply_checksum_header ? + :sbht_content_sha256 : :sbht_none, + credentials: creds, + unsigned_headers: @unsigned_headers, + use_double_uri_encode: @uri_escape_path, + should_normalize_uri_path: @normalize_path, + omit_session_token: @omit_session_token + ) + http_request = Aws::Crt::Http::Message.new( + http_method, url.to_s, headers + ) + signable = Aws::Crt::Auth::Signable.new(http_request) + + signing_result = Aws::Crt::Auth::Signer.sign_request(config, signable) + + Signature.new( + headers: sigv4_headers.merge( + downcase_headers(signing_result[:headers]) + ), + string_to_sign: 'CRT_INTERNAL', + canonical_request: 'CRT_INTERNAL', + content_sha256: content_sha256, + extra: {config: config, signable: signable} + ) + end + + def crt_presign_url(options) + creds = crt_fetch_credentials + + http_method = extract_http_method(options) + url = extract_url(options) + headers = downcase_headers(options[:headers]) + headers['host'] ||= host(url) + + datetime = headers.delete('x-amz-date') + datetime ||= (options[:time] || Time.now) + + content_sha256 = headers.delete('x-amz-content-sha256') + content_sha256 ||= options[:body_digest] + content_sha256 ||= sha256_hexdigest(options[:body] || '') + + config = Aws::Crt::Auth::SigningConfig.new( + algorithm: @signing_algorithm, + signature_type: :http_request_query_params, + region: @region, + service: @service, + date: datetime, + signed_body_value: content_sha256, + signed_body_header_type: @apply_checksum_header ? + :sbht_content_sha256 : :sbht_none, + credentials: creds, + unsigned_headers: @unsigned_headers, + use_double_uri_encode: @uri_escape_path, + should_normalize_uri_path: @normalize_path, + omit_session_token: @omit_session_token, + expiration_in_seconds: options.fetch(:expires_in, 900) + ) + http_request = Aws::Crt::Http::Message.new( + http_method, url.to_s, headers + ) + signable = Aws::Crt::Auth::Signable.new(http_request) + + signing_result = Aws::Crt::Auth::Signer.sign_request(config, signable, http_method, url.to_s) + url = URI.parse(signing_result[:path]) + + if options[:extra] && options[:extra].is_a?(Hash) + options[:extra][:config] = config + options[:extra][:signable] = signable + end + url + end + + class << self + + def use_crt? + @@use_crt + end + + # @api private + def uri_escape_path(path) + path.gsub(/[^\/]+/) { |part| uri_escape(part) } + end + + # @api private + def uri_escape(string) + if string.nil? + nil + else + CGI.escape(string.encode('UTF-8')).gsub('+', '%20').gsub('%7E', '~') + end + end + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/.gemtest b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/.gemtest new file mode 100644 index 0000000..e69de29 diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/Changelog.md b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/Changelog.md new file mode 100644 index 0000000..9259a3c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/Changelog.md @@ -0,0 +1,107 @@ +# Babosa Changelog + +## 1.0.4 + +* Fix nil being cast to frozen string (https://github.com/norman/babosa/pull/52) + +## 1.0.3 + +* Fix Active Support 6 deprecations (https://github.com/norman/babosa/pull/50) + +## 1.0.2 + +* Fix regression in ActiveSupport UTF8 proxy. + +## 1.0.1 + +* Fix error with tidy_bytes on Rubinius. +* Simplify Active Support UTF8 proxy. +* Fix `allow_bangs` argument to to_ruby_method being silently ignored. +* Raise error when generating an impossible Ruby method name. + +## 1.0.0 + +* Adopt semantic versioning. +* When using Active Support, require 3.2 or greater. +* Require Ruby 2.0 or greater. +* Fix Ruby warnings. +* Improve support for Ukrainian. +* Support some additional punctuation characters used by Chinese and others. +* Add Polish spec. +* Use native Unicode normalization on Ruby 2.2 in UTF8::DumbProxy. +* Invoke Ruby-native upcase/downcase in UTF8::DumbProxy. +* Proxy `tidy_bytes` method to Active Support when possible. +* Remove SlugString constant. + +## 0.3.11 + +* Add support for Vietnamese. + +## 0.3.10 + +* Fix Macedonian "S/S". Don't `include JRuby` unnecessarily. + +## 0.3.9 + +* Add missing Greek vowels with diaeresis. + +## 0.3.8 + +* Correct and improve Macedonian support. + +## 0.3.7 + +* Fix compatibility with Ruby 1.8.7. +* Add Swedish support. + +## 0.3.6 + +* Allow multiple transliterators. +* Add Greek support. + +## 0.3.5 + +* Don't strip underscores from identifiers. + +## 0.3.4 + +* Add Romanian support. + +## 0.3.3 + +* Add Norwegian support. + +## 0.3.2 + +* Improve Macedonian support. + +## 0.3.1 + +* Small fixes to Cyrillic. + +## 0.3.0 + +* Cyrillic support. +* Improve support for various Unicode spaces and dashes. + +## 0.2.2 + +* Fix for "smart" quote handling. + +## 0.2.1 + +* Implement #empty? for compatiblity with Active Support's #blank?. + +## 0.2.0 + +* Add support for Danish. +* Add method to generate Ruby identifiers. +* Improve performance. + +## 0.1.1 + +* Add support for Serbian. + +## 0.1.0 + +* Initial extraction from FriendlyId. diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/MIT-LICENSE b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/MIT-LICENSE new file mode 100644 index 0000000..74181b4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/MIT-LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2010 Norman Clarke + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/README.md b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/README.md new file mode 100644 index 0000000..f7ce26a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/README.md @@ -0,0 +1,279 @@ +# Babosa + +[![Build Status](https://travis-ci.org/norman/babosa.png?branch=master)](https://travis-ci.org/norman/babosa) + + +Babosa is a library for creating human-friendly identifiers, aka "slugs". It can +also be useful for normalizing and sanitizing data. + +It is an extraction and improvement of the string code from +[FriendlyId](http://github.com/norman/friendly_id). I have released this as a +separate library to help developers who want to create libraries similar to +FriendlyId. + +## Features / Usage + +### Transliterate UTF-8 characters to ASCII + + "GÃļlcÃŧk, Turkey".to_slug.transliterate.to_s #=> "Golcuk, Turkey" + +### Locale sensitive transliteration, with support for many languages + + "JÃŧrgen MÃŧller".to_slug.transliterate.to_s #=> "Jurgen Muller" + "JÃŧrgen MÃŧller".to_slug.transliterate(:german).to_s #=> "Juergen Mueller" + +Currently supported languages include: + +* Bulgarian +* Danish +* German +* Greek +* Macedonian +* Norwegian +* Romanian +* Russian +* Serbian +* Spanish +* Swedish +* Ukrainian + +I'll gladly accept contributions from fluent speakers to support more languages. + +### Strip non-ASCII characters + + "GÃļlcÃŧk, Turkey".to_slug.to_ascii.to_s #=> "Glck, Turkey" + +### Truncate by characters + + "ÃŧÃŧÃŧ".to_slug.truncate(2).to_s #=> "ÃŧÃŧ" + +### Truncate by bytes + +This can be useful to ensure the generated slug will fit in a database column +whose length is limited by bytes rather than UTF-8 characters. + + "ÃŧÃŧÃŧ".to_slug.truncate_bytes(2).to_s #=> "Ãŧ" + +### Remove punctuation chars + + "this is, um, **really** cool, huh?".to_slug.word_chars.to_s #=> "this is um really cool huh" + +### All-in-one + + "GÃļlcÃŧk, Turkey".to_slug.normalize.to_s #=> "golcuk-turkey" + +### Other stuff + +#### Using Babosa With FriendlyId 4 + + require "babosa" + + class Person < ActiveRecord::Base + friendly_id :name, use: :slugged + + def normalize_friendly_id(input) + input.to_s.to_slug.normalize(transliterations: :russian).to_s + end + end + +#### Pedantic UTF-8 support + +Babosa goes out of its way to handle [nasty Unicode issues you might never think +you would have](https://github.com/norman/enc/blob/master/equivalence.rb) by +checking, sanitizing and normalizing your string input. + +It will automatically use whatever Unicode library you have loaded before +Babosa, or fall back to a simple built-in library. Supported +Unicode libraries include: + +* Java (only on JRuby of course) +* Active Support +* [Unicode](https://github.com/blackwinter/unicode) +* Built-in + +This built-in module is much faster than Active Support but much slower than +Java or Unicode. It can only do **very** naive Unicode composition to ensure +that, for example, "Ê" will always be composed to a single codepoint rather than +an "e" and a "´" - making it safe to use as a hash key. + +But seriously - save yourself the headache and install a real Unicode library. +If you are using Babosa with a language that uses the Cyrillic alphabet, Babosa +requires either Unicode, Active Support or Java. + +#### Ruby Method Names + +Babosa can also generate strings for Ruby method names. (Yes, Ruby 1.9 can use +UTF-8 chars in method names, but you may not want to): + + + "this is a method".to_slug.to_ruby_method! #=> this_is_a_method + "Ãŧber cool stuff!".to_slug.to_ruby_method! #=> uber_cool_stuff! + + # You can also disallow trailing punctuation chars + "Ãŧber cool stuff!".to_slug.to_ruby_method(false) #=> uber_cool_stuff + +#### Easy to Extend + +You can add custom transliterators for your language with very little code. For +example here's the transliterator for German: + + # encoding: utf-8 + module Babosa + module Transliterator + class German < Latin + APPROXIMATIONS = { + "ä" => "ae", + "Ãļ" => "oe", + "Ãŧ" => "ue", + "Ä" => "Ae", + "Ö" => "Oe", + "Ü" => "Ue" + } + end + end + end + +And a spec (you can use this as a template): + + # encoding: utf-8 + require File.expand_path("../../spec_helper", __FILE__) + + describe Babosa::Transliterator::German do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate Eszett" do + t.transliterate("ß").should eql("ss") + end + + it "should transliterate vowels with umlauts" do + t.transliterate("ÃŧÃļä").should eql("ueoeae") + end + + end + + +### Rails 3.x and higher + +Some of Babosa's functionality was added to Active Support 3.0.0. + +Babosa now differs from ActiveSupport primarily in that it supports non-Latin +strings by default, and has per-locale ASCII transliterations already baked-in. +If you are considering using Babosa with Rails, you may want to first take a +look at Active Support's +[transliterate](http://api.rubyonrails.org/classes/ActiveSupport/Inflector.html#method-i-transliterate) +and +[parameterize](http://api.rubyonrails.org/classes/ActiveSupport/Inflector.html#method-i-parameterize) +to see if they suit your needs. + +### Babosa vs. Stringex + +Babosa provides much of the functionality provided by the +[Stringex](https://github.com/rsl/stringex) gem, but in the subjective opinion +of the author, is for most use cases a better choice. + +#### Fewer Features + +Stringex offers functionality for storing slugs in an Active Record model, like +a simple version of [FriendlyId](http://github.com/norman/friendly_id), in +addition to string processing. Babosa only does string processing. + +#### Less Aggressive Unicode Transliteration + +Stringex uses an agressive Unicode to ASCII mapping which outputs gibberish for +almost anything but Western European langages and Mandarin Chinese. Babosa +supports only languages for which fluent speakers have provided +transliterations, to ensure that the output makes sense to users. + +#### Unicode Support + +Stringex does no Unicode normalization or validation before transliterating +strings, so if you pass in strings with encoding errors or with different +Unicode normalizations, you'll get unpredictable results. + +#### No Locale Assumptions + +Babosa avoids making assumptions about locales like Stringex does, so it doesn't +offer transliterations like this out of the box: + + "$12 worth of Ruby power".to_url => "12-dollars-worth-of-ruby-power" + +This is because the symbol "$" is used in many Latin American countries for the +peso. Stringex does this in many places, for example, transliterating all Han +characters into Pinyin, effectively treating Japanese text as if it were +Mandarin Chinese. + + +### More info + +Please see the [API docs](http://rubydoc.info/github/norman/babosa/master/frames) and source code for +more info. + +## Getting it + +Babosa can be installed via Rubygems: + + gem install babosa + +You can get the source code from its [Github repository](http://github.com/norman/babosa). + +Babosa is tested to be compatible with Ruby 2.x, JRuby 1.7+, and +Rubinius 2.x It's probably compatible with other Rubies as well. + +## Reporting bugs + +Please use Babosa's [Github issue +tracker](http://github.com/norman/babosa/issues). + + +## Misc + +"Babosa" means slug in Spanish. + +## Author + +[Norman Clarke](http://njclarke.com) + +## Contributors + +Many thanks to the following people for their help: + +* [Dmitry A. Ilyashevich](https://github.com/dmitry-ilyashevich) - Deprecation fixes +* [anhkind](https://github.com/anhkind) - Vietnamese support +* [Martins Zakis](https://github.com/martins) - Bug fixes +* [Vassilis Rodokanakis](https://github.com/vrodokanakis) - Greek support +* [Peco Danajlovski](https://github.com/Vortex) - Macedonian support +* [Philip Arndt](https://github.com/parndt) - Bug fixes +* [Jonas Forsberg](https://github.com/himynameisjonas) - Swedish support +* [Jaroslav Kalistsuk](https://github.com/jarosan) - Greek support +* [Steven Heidel](https://github.com/stevenheidel) - Bug fixes +* [Edgars Beigarts](https://github.com/ebeigarts) - Support for multiple transliterators +* [Tiberiu C. Turbureanu](https://gitorious.org/~tct) - Romanian support +* [Kim Joar Bekkelund](https://github.com/kjbekkelund) - Norwegian support +* [Alexey Shkolnikov](https://github.com/grlm) - Russian support +* [Martin Petrov](https://github.com/martin-petrov) - Bulgarian support +* [Molte Emil Strange Andersen](https://github.com/molte) - Danish support +* [Milan Dobrota](https://github.com/milandobrota) - Serbian support + +## Copyright + +Copyright (c) 2010-2013 Norman Clarke + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/Rakefile b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/Rakefile new file mode 100644 index 0000000..5344d2a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/Rakefile @@ -0,0 +1,34 @@ +require "rubygems" +require "rake/testtask" +require "rake/clean" +require "rubygems/package_task" + +task :default => :spec +task :test => :spec + +CLEAN << "pkg" << "doc" << "coverage" << ".yardoc" + +begin + require "yard" + YARD::Rake::YardocTask.new do |t| + t.options = ["--output-dir=doc"] + end +rescue LoadError +end + +begin + desc "Run SimpleCov" + task :coverage do + ENV["COV"] = "true" + Rake::Task["spec"].execute + end +rescue LoadError +end + +gemspec = File.expand_path("../babosa.gemspec", __FILE__) +if File.exist? gemspec + Gem::PackageTask.new(eval(File.read(gemspec))) { |pkg| } +end + +require 'rspec/core/rake_task' +RSpec::Core::RakeTask.new(:spec) diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa.rb new file mode 100644 index 0000000..31a42fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa.rb @@ -0,0 +1,16 @@ +module Babosa + def self.jruby15? + JRUBY_VERSION >= "1.5" rescue false + end +end + +class String + def to_identifier + Babosa::Identifier.new self + end + alias to_slug to_identifier +end + +require "babosa/transliterator/base" +require "babosa/utf8/proxy" +require "babosa/identifier" diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/identifier.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/identifier.rb new file mode 100644 index 0000000..486b678 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/identifier.rb @@ -0,0 +1,293 @@ +# encoding: utf-8 +module Babosa + + # Codepoints for characters that will be deleted by +#word_chars!+. + STRIPPABLE = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 58, 59, 60, 61, 62, 63, 64, 91, 92, 93, 94, + 96, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 182, 183, 184, + 185, 187, 188, 189, 190, 191, 215, 247, 8203, 8204, 8205, 8239, 65279] + + # This class provides some string-manipulation methods specific to slugs. + # + # Note that this class includes many "bang methods" such as {#clean!} and + # {#normalize!} that perform actions on the string in-place. Each of these + # methods has a corresponding "bangless" method (i.e., +Identifier#clean!+ + # and +Identifier#clean+) which does not appear in the documentation because + # it is generated dynamically. + # + # All of the bang methods return an instance of String, while the bangless + # versions return an instance of Babosa::Identifier, so that calls to methods + # specific to this class can be chained: + # + # string = Identifier.new("hello world") + # string.with_separators! # => "hello-world" + # string.with_separators # => + # + # @see http://www.utf8-chartable.de/unicode-utf8-table.pl?utf8=dec Unicode character table + class Identifier + + Error = Class.new(StandardError) + + attr_reader :wrapped_string + alias to_s wrapped_string + + @@utf8_proxy = if Babosa.jruby15? + UTF8::JavaProxy + elsif defined? Unicode::VERSION + UTF8::UnicodeProxy + elsif defined? ActiveSupport + UTF8::ActiveSupportProxy + else + UTF8::DumbProxy + end + + # Return the proxy used for UTF-8 support. + # @see Babosa::UTF8::Proxy + def self.utf8_proxy + @@utf8_proxy + end + + # Set a proxy object used for UTF-8 support. + # @see Babosa::UTF8::Proxy + def self.utf8_proxy=(obj) + @@utf8_proxy = obj + end + + def method_missing(symbol, *args, &block) + @wrapped_string.__send__(symbol, *args, &block) + end + + # @param string [#to_s] The string to use as the basis of the Identifier. + def initialize(string) + @wrapped_string = string.to_s + tidy_bytes! + normalize_utf8! + end + + def ==(value) + @wrapped_string.to_s == value.to_s + end + + def eql?(value) + @wrapped_string == value + end + + def empty? + # included to make this class :respond_to? :empty for compatibility with Active Support's + # #blank? + @wrapped_string.empty? + end + + # Approximate an ASCII string. This works only for Western strings using + # characters that are Roman-alphabet characters + diacritics. Non-letter + # characters are left unmodified. + # + # string = Identifier.new "ŁÃŗdÅē + # string.transliterate # => "Lodz, Poland" + # string = Identifier.new "æ—ĨæœŦ" + # string.transliterate # => "æ—ĨæœŦ" + # + # You can pass any key(s) from +Characters.approximations+ as arguments. This allows + # for contextual approximations. Various languages are supported, you can see which ones + # by looking at the source of {Babosa::Transliterator::Base}. + # + # string = Identifier.new "JÃŧrgen MÃŧller" + # string.transliterate # => "Jurgen Muller" + # string.transliterate :german # => "Juergen Mueller" + # string = Identifier.new "ÂĄFeliz aÃąo!" + # string.transliterate # => "ÂĄFeliz ano!" + # string.transliterate :spanish # => "ÂĄFeliz anio!" + # + # The approximations are an array, which you can modify if you choose: + # + # # Make Spanish use "nh" rather than "nn" + # Babosa::Transliterator::Spanish::APPROXIMATIONS["Ãą"] = "nh" + # + # Notice that this method does not simply convert to ASCII; if you want + # to remove non-ASCII characters such as "ÂĄ" and "Âŋ", use {#to_ascii!}: + # + # string.transliterate!(:spanish) # => "ÂĄFeliz anio!" + # string.transliterate! # => "ÂĄFeliz anio!" + # + # @param *args + # @return String + def transliterate!(*kinds) + kinds.compact! + kinds = [:latin] if kinds.empty? + kinds.each do |kind| + transliterator = Transliterator.get(kind).instance + @wrapped_string = transliterator.transliterate(@wrapped_string) + end + @wrapped_string + end + + # Converts dashes to spaces, removes leading and trailing spaces, and + # replaces multiple whitespace characters with a single space. + # @return String + def clean! + @wrapped_string = @wrapped_string.gsub("-", " ").squeeze(" ").strip + end + + # Remove any non-word characters. For this library's purposes, this means + # anything other than letters, numbers, spaces, newlines and linefeeds. + # @return String + def word_chars! + @wrapped_string = (unpack("U*") - Babosa::STRIPPABLE).pack("U*") + end + + # Normalize the string for use as a URL slug. Note that in this context, + # +normalize+ means, strip, remove non-letters/numbers, downcasing, + # truncating to 255 bytes and converting whitespace to dashes. + # @param Options + # @return String + def normalize!(options = nil) + options = default_normalize_options.merge(options || {}) + + if translit_option = options[:transliterate] + if translit_option != true + transliterate!(*translit_option) + else + transliterate!(*options[:transliterations]) + end + end + to_ascii! if options[:to_ascii] + clean! + word_chars! + clean! + downcase! + truncate_bytes!(options[:max_length]) + with_separators!(options[:separator]) + end + + # Normalize a string so that it can safely be used as a Ruby method name. + def to_ruby_method!(allow_bangs = true) + leader, trailer = @wrapped_string.strip.scan(/\A(.+)(.)\z/).flatten + leader = leader.to_s.dup + trailer = trailer.to_s.dup + if allow_bangs + trailer.downcase! + trailer.gsub!(/[^a-z0-9!=\\?]/, '') + else + trailer.downcase! + trailer.gsub!(/[^a-z0-9]/, '') + end + id = leader.to_identifier + id.transliterate! + id.to_ascii! + id.clean! + id.word_chars! + id.clean! + @wrapped_string = id.to_s + trailer + if @wrapped_string == "" + raise Error, "Input generates impossible Ruby method name" + end + with_separators!("_") + end + + # Delete any non-ascii characters. + # @return String + def to_ascii! + @wrapped_string = @wrapped_string.gsub(/[^\x00-\x7f]/u, '') + end + + # Truncate the string to +max+ characters. + # @example + # "ÃŧÃŠÃ¸ÃĄ".to_identifier.truncate(3) #=> "ÃŧÊø" + # @return String + def truncate!(max) + @wrapped_string = unpack("U*")[0...max].pack("U*") + end + + # Truncate the string to +max+ bytes. This can be useful for ensuring that + # a UTF-8 string will always fit into a database column with a certain max + # byte length. The resulting string may be less than +max+ if the string must + # be truncated at a multibyte character boundary. + # @example + # "ÃŧÃŠÃ¸ÃĄ".to_identifier.truncate_bytes(3) #=> "Ãŧ" + # @return String + def truncate_bytes!(max) + return @wrapped_string if @wrapped_string.bytesize <= max + curr = 0 + new = [] + unpack("U*").each do |char| + break if curr > max + char = [char].pack("U") + curr += char.bytesize + if curr <= max + new << char + end + end + @wrapped_string = new.join + end + + # Replaces whitespace with dashes ("-"). + # @return String + def with_separators!(char = "-") + @wrapped_string = @wrapped_string.gsub(/\s/u, char) + end + + # Perform UTF-8 sensitive upcasing. + # @return String + def upcase! + @wrapped_string = @@utf8_proxy.upcase(@wrapped_string) + end + + # Perform UTF-8 sensitive downcasing. + # @return String + def downcase! + @wrapped_string = @@utf8_proxy.downcase(@wrapped_string) + end + + # Perform Unicode composition on the wrapped string. + # @return String + def normalize_utf8! + @wrapped_string = @@utf8_proxy.normalize_utf8(@wrapped_string) + end + + # Attempt to convert characters encoded using CP1252 and IS0-8859-1 to + # UTF-8. + # @return String + def tidy_bytes! + @wrapped_string = @@utf8_proxy.tidy_bytes(@wrapped_string) + end + + %w[transliterate clean downcase word_chars normalize normalize_utf8 + tidy_bytes to_ascii to_ruby_method truncate truncate_bytes upcase + with_separators].each do |method| + class_eval(<<-EOM, __FILE__, __LINE__ + 1) + def #{method}(*args) + send_to_new_instance(:#{method}!, *args) + end + EOM + end + + def to_identifier + self + end + + # The default options for {#normalize!}. Override to set your own defaults. + def default_normalize_options + {:transliterate => true, :max_length => 255, :separator => "-"} + end + + alias approximate_ascii transliterate + alias approximate_ascii! transliterate! + alias with_dashes with_separators + alias with_dashes! with_separators! + alias to_slug to_identifier + + private + + # Used as the basis of the bangless methods. + def send_to_new_instance(*args) + id = Identifier.allocate + id.instance_variable_set :@wrapped_string, to_s + id.send(*args) + id + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/base.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/base.rb new file mode 100644 index 0000000..6de2804 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/base.rb @@ -0,0 +1,110 @@ +# encoding: utf-8 + +require 'singleton' + +module Babosa + + module Transliterator + + autoload :Bulgarian, "babosa/transliterator/bulgarian" + autoload :Cyrillic, "babosa/transliterator/cyrillic" + autoload :Danish, "babosa/transliterator/danish" + autoload :German, "babosa/transliterator/german" + autoload :Hindi, "babosa/transliterator/hindi" + autoload :Latin, "babosa/transliterator/latin" + autoload :Macedonian, "babosa/transliterator/macedonian" + autoload :Norwegian, "babosa/transliterator/norwegian" + autoload :Romanian, "babosa/transliterator/romanian" + autoload :Russian, "babosa/transliterator/russian" + autoload :Serbian, "babosa/transliterator/serbian" + autoload :Spanish, "babosa/transliterator/spanish" + autoload :Swedish, "babosa/transliterator/swedish" + autoload :Ukrainian, "babosa/transliterator/ukrainian" + autoload :Greek, "babosa/transliterator/greek" + autoload :Vietnamese, "babosa/transliterator/vietnamese" + autoload :Turkish, "babosa/transliterator/turkish" + + def self.get(symbol) + class_name = symbol.to_s.split("_").map {|a| a.gsub(/\b('?[a-z])/) { $1.upcase }}.join + const_get(class_name) + end + + class Base + include Singleton + + APPROXIMATIONS = { + "×" => "x", + "Ãˇ" => "/", + "‐" => "-", + "‑" => "-", + "‒" => "-", + "–" => "-", + "—" => "-", + "―" => "-", + "‘" => "'", + "‛" => "'", + "“" => '"', + "”" => '"', + "„" => '"', + "‟" => '"', + '’' => "'", + 'īŧŒ' => ",", + '。' => ".", + 'īŧ' => "!", + 'īŧŸ' => '?', + '、' => ',', + 'īŧˆ' => '(', + 'īŧ‰' => ')', + '【' => '[', + '】' => ']', + 'īŧ›' => ';', + 'īŧš' => ':', + '《' => '<', + '》' => '>', + # various kinds of space characters + "\xc2\xa0" => " ", + "\xe2\x80\x80" => " ", + "\xe2\x80\x81" => " ", + "\xe2\x80\x82" => " ", + "\xe2\x80\x83" => " ", + "\xe2\x80\x84" => " ", + "\xe2\x80\x85" => " ", + "\xe2\x80\x86" => " ", + "\xe2\x80\x87" => " ", + "\xe2\x80\x88" => " ", + "\xe2\x80\x89" => " ", + "\xe2\x80\x8a" => " ", + "\xe2\x81\x9f" => " ", + "\xe3\x80\x80" => " ", + }.freeze + + attr_reader :approximations + + def initialize + if self.class < Base + @approximations = self.class.superclass.instance.approximations.dup + else + @approximations = {} + end + self.class.const_get(:APPROXIMATIONS).inject(@approximations) do |memo, object| + index = object[0].unpack("U").shift + value = object[1].unpack("C*") + memo[index] = value.length == 1 ? value[0] : value + memo + end + @approximations.freeze + end + + # Accepts a single UTF-8 codepoint and returns the ASCII character code + # used as the transliteration value. + def [](codepoint) + @approximations[codepoint] + end + + # Transliterates a string. + def transliterate(string) + string.unpack("U*").map {|char| self[char] || char}.flatten.pack("U*") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/bulgarian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/bulgarian.rb new file mode 100644 index 0000000..3c8f943 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/bulgarian.rb @@ -0,0 +1,27 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Bulgarian < Cyrillic + APPROXIMATIONS = { + "Ж" => "J", + "Й" => "I", + "ĐĨ" => "H", + "ĐĻ" => "C", + "ĐŠ" => "Sht", + "ĐĒ" => "U", + "ĐŦ" => "I", + "ĐŽ" => "Iu", + "Đ¯" => "Ia", + "Đļ" => "j", + "Đš" => "i", + "Ņ…" => "h", + "Ņ†" => "c", + "Ņ‰" => "sht", + "ŅŠ" => "u", + "ŅŒ" => "i", + "ŅŽ" => "iu", + "Ņ" => "ia" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/cyrillic.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/cyrillic.rb new file mode 100644 index 0000000..2478af3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/cyrillic.rb @@ -0,0 +1,108 @@ +# encoding: utf-8 +module Babosa + module Transliterator + + # Approximations are based on GOST 7.79, System B: + # http://en.wikipedia.org/wiki/ISO_9#GOST_7.79 + class Cyrillic < Base + APPROXIMATIONS = { + "Ё" => "Yo", + "Ѓ" => "G", + "Є" => "Ye", + "Ї" => "Yi", + "Љ" => "L", + "Њ" => "N", + "Ќ" => "K", + "Ў" => "U", + "Џ" => "Dh", + "А" => "A", + "Б" => "B", + "В" => "V", + "Г" => "G", + "Д" => "D", + "Е" => "E", + "Ж" => "Zh", + "З" => "Z", + "И" => "I", + "Й" => "J", + "К" => "K", + "Л" => "L", + "М" => "M", + "Н" => "N", + "О" => "O", + "П" => "P", + "Đ " => "R", + "ĐĄ" => "S", + "Đĸ" => "T", + "ĐŖ" => "U", + "Ф" => "F", + "ĐĨ" => "X", + "ĐĻ" => "Cz", + "Ч" => "Ch", + "Ш" => "Sh", + "ĐŠ" => "Shh", + "ĐĒ" => "", + "ĐĢ" => "Y", + "ĐŦ" => "", + "Đ­" => "E", + "ĐŽ" => "Yu", + "Đ¯" => "Ya", + "Đ°" => "a", + "Đą" => "b", + "в" => "v", + "Đŗ" => "g", + "Đ´" => "d", + "Đĩ" => "e", + "Đļ" => "zh", + "С" => "z", + "и" => "i", + "Đš" => "j", + "Đē" => "k", + "Đģ" => "l", + "Đŧ" => "m", + "ĐŊ" => "n", + "Đž" => "o", + "Đŋ" => "p", + "Ņ€" => "r", + "Ņ" => "s", + "Ņ‚" => "t", + "Ņƒ" => "u", + "Ņ„" => "f", + "Ņ…" => "x", + "Ņ†" => "cz", + "Ņ‡" => "ch", + "Ņˆ" => "sh", + "Ņ‰" => "shh", + "ŅŠ" => "", + "Ņ‹" => "y", + "ŅŒ" => "", + "Ņ" => "e", + "ŅŽ" => "yu", + "Ņ" => "ya", + "Ņ‘" => "yo", + "Ņ“" => "g", + "Ņ”" => "ye", + "Ņ—" => "yi", + "Ņ™" => "l", + "Ņš" => "n", + "Ņœ" => "k", + "Ņž" => "u", + "ŅŸ" => "dh", + "Ņĸ" => "Ye", + "ŅŖ" => "ye", + "ŅĒ" => "O", + "ŅĢ" => "o", + "Ņ˛" => "Fh", + "Ņŗ" => "fh", + "Ņ´" => "Yh", + "Ņĩ" => "yh", + "Ō" => "G", + "Ō‘" => "g", + } + + def transliterate(string) + super.gsub(/(c)z([ieyj])/) { "#{$1}#{$2}" } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/danish.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/danish.rb new file mode 100644 index 0000000..523e5ce --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/danish.rb @@ -0,0 +1,15 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Danish < Latin + APPROXIMATIONS = { + "ÃĻ" => "ae", + "ø" => "oe", + "ÃĨ" => "aa", + "Ø" => "Oe", + "Å" => "Aa" + } + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/german.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/german.rb new file mode 100644 index 0000000..1ae48f7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/german.rb @@ -0,0 +1,15 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class German < Latin + APPROXIMATIONS = { + "ä" => "ae", + "Ãļ" => "oe", + "Ãŧ" => "ue", + "Ä" => "Ae", + "Ö" => "Oe", + "Ü" => "Ue" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/greek.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/greek.rb new file mode 100644 index 0000000..6c7eeb8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/greek.rb @@ -0,0 +1,77 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Greek < Base + APPROXIMATIONS = { + "Α" => "A", + "Ά" => "A", + "Îą" => "a", + "ÎŦ" => "a", + "Β" => "V", + "β" => "v", + "Γ" => "G", + "Îŗ" => "g", + "Δ" => "D", + "δ" => "d", + "Ε" => "E", + "Έ" => "E", + "Îĩ" => "e", + "έ" => "e", + "Ζ" => "Z", + "Îļ" => "z", + "Η" => "I", + "Ή" => "i", + "Ρ" => "i", + "ÎŽ" => "i", + "Θ" => "TH", + "θ" => "th", + "Ι" => "I", + "Ί" => "Ι", + "Î" => "I", + "Κ" => "i", + "ί" => "i", + "ĪŠ" => "i", + "ΐ" => "i", + "Κ" => "K", + "Îē" => "k", + "Λ" => "L", + "Îģ" => "l", + "Μ" => "M", + "Îŧ" => "m", + "Ν" => "N", + "ÎŊ" => "n", + "Ξ" => "KS", + "Ξ" => "ks", + "Ο" => "O", + "Ό" => "O", + "Îŋ" => "o", + "ĪŒ" => "o", + "Π" => "P", + "Ī€" => "p", + "ÎĄ" => "R", + "Ī" => "r", + "ÎŖ" => "S", + "Īƒ" => "s", + "Ī‚" => "s", + "Τ" => "T", + "Ī„" => "t", + "ÎĨ" => "Y", + "Ύ" => "Y", + "Ī…" => "y", + "Ī" => "y", + "Ī‹" => "y", + "ΰ" => "y", + "ÎĻ" => "F", + "Ī†" => "f", + "Χ" => "X", + "Ī‡" => "x", + "Ψ" => "PS", + "Īˆ" => "ps", + "Ί" => "O", + "Ώ" => "O", + "Ī‰" => "o", + "ĪŽ" => "o" + } + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/hindi.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/hindi.rb new file mode 100644 index 0000000..e1c2017 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/hindi.rb @@ -0,0 +1,137 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Hindi < Base + APPROXIMATIONS = { + "ā¤€" => "n", + "ā¤" => "n", + "ā¤‚" => "n", + "ā¤ƒ" => "h", + "ā¤„" => "a", + "ā¤…" => "a", + "ā¤†" => "aa", + "ā¤‡" => "i", + "ā¤ˆ" => "ii", + "ā¤‰" => "u", + "ā¤Š" => "uu", + "ā¤‹" => "ri", + "ā¤Œ" => "lri", + "ā¤" => "e", + "ā¤Ž" => "e", + "ā¤" => "e", + "ā¤" => "ei", + "ā¤‘" => "o", + "ā¤’" => "o", + "ā¤“" => "o", + "ā¤”" => "ou", + "ā¤•" => "k", + "ā¤–" => "kh", + "ā¤—" => "g", + "ā¤˜" => "gh", + "ā¤™" => "d", + "ā¤š" => "ch", + "ā¤›" => "chh", + "ā¤œ" => "j", + "ā¤" => "jh", + "ā¤ž" => "ny", + "ā¤Ÿ" => "tt", + "ā¤ " => "tth", + "ā¤Ą" => "dd", + "ā¤ĸ" => "ddh", + "ā¤Ŗ" => "nn", + "ā¤¤" => "t", + "ā¤Ĩ" => "th", + "ā¤Ļ" => "d", + "ā¤§" => "dh", + "ā¤¨" => "n", + "ā¤Š" => "nnn", + "ā¤Ē" => "p", + "ā¤Ģ" => "ph", + "ā¤Ŧ" => "b", + "ā¤­" => "bh", + "ā¤Ž" => "m", + "ā¤¯" => "y", + "ā¤°" => "r", + "ā¤ą" => "rr", + "ā¤˛" => "l", + "ā¤ŗ" => "ll", + "ā¤´" => "ll", + "ā¤ĩ" => "v", + "ā¤ļ" => "sh", + "ā¤ˇ" => "ss", + "ā¤¸" => "s", + "ā¤š" => "h", + "ā¤ē" => "oe", + "ā¤ģ" => "ooe", + "ā¤ŧ" => "", + "ā¤Ŋ" => "-", + "ā¤ž" => "aa", + "ā¤ŋ" => "i", + "āĨ€" => "ii", + "āĨ" => "u", + "āĨ‚" => "uu", + "āĨƒ" => "r", + "āĨ„" => "rr", + "āĨ…" => "e", + "āĨ†" => "e", + "āĨ‡" => "e", + "āĨˆ" => "ai", + "āĨ‰" => "o", + "āĨŠ" => "o", + "āĨ‹" => "o", + "āĨŒ" => "au", + "āĨ" => "", + "āĨŽ" => "e", + "āĨ" => "aw", + "āĨ" => "om", + "āĨ‘" => "", + "āĨ’" => "_", + "āĨ“" => "", + "āĨ”" => "", + "āĨ•" => "ee", + "āĨ–" => "ue", + "āĨ—" => "uue", + "āĨ˜" => "q", + "āĨ™" => "khh", + "āĨš" => "ghh", + "āĨ›" => "za", + "āĨœ" => "dddh", + "āĨ" => "rh", + "āĨž" => "f", + "āĨŸ" => "yy", + "āĨ " => "rri", + "āĨĄ" => "lr", + "āĨĸ" => "l", + "āĨŖ" => "l", + "āĨ¤" => ".", + "āĨĨ" => "..", + "āĨĻ" => "0", + "āĨ§" => "1", + "āĨ¨" => "2", + "āĨŠ" => "3", + "āĨĒ" => "4", + "āĨĢ" => "5", + "āĨŦ" => "6", + "āĨ­" => "7", + "āĨŽ" => "8", + "āĨ¯" => "9", + "āĨ°" => ".", + "āĨą" => ".", + "āĨ˛" => "a", + "āĨŗ" => "oe", + "āĨ´" => "ooe", + "āĨĩ" => "aw", + "āĨļ" => "ue", + "āĨˇ" => "uue", + "āĨ¸" => "dd", + "āĨš" => "zh", + "āĨē" => "y", + "āĨģ" => "gg", + "āĨŧ" => "jj", + "āĨŊ" => "?", + "āĨž" => "ddd", + "āĨŋ" => "bb" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/latin.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/latin.rb new file mode 100644 index 0000000..0ab7997 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/latin.rb @@ -0,0 +1,199 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Latin < Base + + APPROXIMATIONS = { + "À" => "A", + "Á" => "A", + "Â" => "A", + "Ã" => "A", + "Ä" => "A", + "Å" => "A", + "Æ" => "Ae", + "Ç" => "C", + "È" => "E", + "É" => "E", + "Ê" => "E", + "Ë" => "E", + "Ì" => "I", + "Í" => "I", + "Î" => "I", + "Ï" => "I", + "Ð" => "D", + "Ñ" => "N", + "Ò" => "O", + "Ó" => "O", + "Ô" => "O", + "Õ" => "O", + "Ö" => "O", + "Ø" => "O", + "Ù" => "U", + "Ú" => "U", + "Û" => "U", + "Ü" => "U", + "Ý" => "Y", + "Þ" => "Th", + "ß" => "ss", + "à" => "a" , + "ÃĄ" => "a", + "Ãĸ" => "a", + "ÃŖ" => "a", + "ä" => "a", + "ÃĨ" => "a", + "ÃĻ" => "ae", + "ç" => "c" , + "è" => "e", + "Ê" => "e", + "ÃĒ" => "e", + "ÃĢ" => "e", + "ÃŦ" => "i", + "í" => "i", + "ÃŽ" => "i", + "ï" => "i", + "ð" => "d", + "Ãą" => "n", + "Ã˛" => "o", + "Ãŗ" => "o", + "ô" => "o", + "Ãĩ" => "o", + "Ãļ" => "o", + "ø" => "o", + "Ú" => "u", + "Ãē" => "u", + "Ãģ" => "u", + "Ãŧ" => "u", + "ÃŊ" => "y", + "Þ" => "th", + "Ãŋ" => "y", + "Ā" => "A", + "Ă" => "A", + "Ą" => "A", + "Ć" => "C", + "Ĉ" => "C", + "Ċ" => "C", + "Č" => "C", + "Ď" => "D", + "Đ" => "D", + "Ē" => "E", + "Ĕ" => "E", + "Ė" => "E", + "Ę" => "E", + "Ě" => "E", + "Ĝ" => "G", + "Ğ" => "G", + "Ä " => "G", + "Äĸ" => "G", + "Ĥ" => "H", + "ÄĻ" => "H", + "Ĩ" => "I", + "ÄĒ" => "I", + "ÄŦ" => "I", + "ÄŽ" => "I", + "Ä°" => "I", + "IJ" => "Ij", + "Ä´" => "J", + "Äļ" => "K", + "Äš" => "L", + "Äģ" => "L", + "ÄŊ" => "L", + "Äŋ" => "L", + "Ł" => "L", + "Ń" => "N", + "Ņ" => "N", + "Ň" => "N", + "Ŋ" => "Ng", + "Ō" => "O", + "Ŏ" => "O", + "Ő" => "O", + "Œ" => "OE", + "Ŕ" => "R", + "Ŗ" => "R", + "Ř" => "R", + "Ś" => "S", + "Ŝ" => "S", + "Ş" => "S", + "Å " => "S", + "Åĸ" => "T", + "Ť" => "T", + "ÅĻ" => "T", + "Ũ" => "U", + "ÅĒ" => "U", + "ÅŦ" => "U", + "ÅŽ" => "U", + "Å°" => "U", + "Å˛" => "U", + "Å´" => "W", + "Åļ" => "Y", + "Ÿ" => "Y", + "Åš" => "Z", + "Åģ" => "Z", + "ÅŊ" => "Z", + "ā" => "a", + "ă" => "a", + "ą" => "a", + "ć" => "c", + "ĉ" => "c", + "ċ" => "c", + "č" => "c", + "ď" => "d", + "đ" => "d", + "ē" => "e", + "ĕ" => "e", + "ė" => "e", + "ę" => "e", + "ě" => "e", + "ĝ" => "g", + "ğ" => "g", + "ÄĄ" => "g", + "ÄŖ" => "g", + "ÄĨ" => "h", + "ħ" => "h", + "ÄŠ" => "i", + "ÄĢ" => "i", + "Ä­" => "i", + "į" => "i", + "Äą" => "i", + "Äŗ" => "ij", + "Äĩ" => "j", + "ġ" => "k", + "ĸ" => "k", + "Äē" => "l", + "Äŧ" => "l", + "Äž" => "l", + "ŀ" => "l", + "ł" => "l", + "ń" => "n", + "ņ" => "n", + "ň" => "n", + "ʼn" => "n", + "ŋ" => "ng", + "ō" => "o", + "ŏ" => "o", + "ő" => "o", + "œ" => "oe", + "ŕ" => "r", + "ŗ" => "r", + "ř" => "r", + "ś" => "s", + "ŝ" => "s", + "ş" => "s", + "ÅĄ" => "s", + "ÅŖ" => "t", + "ÅĨ" => "t", + "ŧ" => "t", + "ÅŠ" => "u", + "ÅĢ" => "u", + "Å­" => "u", + "ů" => "u", + "Åą" => "u", + "Åŗ" => "u", + "Åĩ" => "w", + "Åˇ" => "y", + "Åž" => "z", + "Åē" => "z", + "Åŧ" => "z" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/macedonian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/macedonian.rb new file mode 100644 index 0000000..568f456 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/macedonian.rb @@ -0,0 +1,29 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Macedonian < Cyrillic + APPROXIMATIONS = { + "Ѓ" => "Gj", + "Љ" => "Lj", + "Њ" => "Nj", + "Ќ" => "Kj", + "Џ" => "Dzh", + "Ж" => "Zh", + "ĐĻ" => "C", + "Ѕ" => "Z", + "Ј" => "J", + "ĐĨ" => "H", + "Ņ“" => "gj", + "Ņ™" => "lj", + "Ņš" => "nj", + "Ņœ" => "kj", + "ŅŸ" => "dzh", + "Đļ" => "zh", + "Ņ†" => "c", + "Ņ•" => "z", + "Ņ˜" => "j", + "Ņ…" => "h" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/norwegian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/norwegian.rb new file mode 100644 index 0000000..957feb8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/norwegian.rb @@ -0,0 +1,14 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Norwegian < Latin + APPROXIMATIONS = { + "ø" => "oe", + "ÃĨ" => "aa", + "Ø" => "Oe", + "Å" => "Aa" + } + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/romanian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/romanian.rb new file mode 100644 index 0000000..6a16e1c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/romanian.rb @@ -0,0 +1,13 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Romanian < Latin + APPROXIMATIONS = { + "ș" => "s", + "ț" => "t", + "Ș" => "S", + "Ț" => "T" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/russian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/russian.rb new file mode 100644 index 0000000..65e6de5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/russian.rb @@ -0,0 +1,22 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Russian < Cyrillic + APPROXIMATIONS = { + "Й" => "I", + "М" => "M", + "ĐĨ" => "H", + "ĐĻ" => "Ts", + "Ш" => "Sh", + "ĐŠ" => "Sch", + "ĐŽ" => "U", + "Đ¯" => "Ya", + "Đš" => "i", + "Ņ…" => "h", + "Ņ†" => "ts", + "Ņ‰" => "sch", + "ŅŽ" => "u" + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/serbian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/serbian.rb new file mode 100644 index 0000000..a40b8b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/serbian.rb @@ -0,0 +1,34 @@ +# encoding: utf-8 + +module Babosa + module Transliterator + class Serbian < Latin + APPROXIMATIONS = Cyrillic.const_get(:APPROXIMATIONS).merge({ + "Ð" => "Dj", + "Č" => "Ch", + "Å " => "Sh", + "č" => "ch", + "đ" => "dj", + "ÅĄ" => "sh", + "Ћ" => "C", + "ĐĻ" => "C", + "Ч" => "Ch", + "Ђ" => "Dj", + "Џ" => "Dz", + "ĐĨ" => "H", + "Ј" => "J", + "Љ" => "Lj", + "Њ" => "Nj", + "Ņ†" => "c", + "Ņ›" => "c", + "Ņ‡" => "ch", + "Ņ’" => "dj", + "ŅŸ" => "dz", + "Ņ…" => "h", + "Ņ˜" => "j", + "Ņ™" => "lj", + "Ņš" => "nj" + }) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/spanish.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/spanish.rb new file mode 100644 index 0000000..07563c1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/spanish.rb @@ -0,0 +1,9 @@ +# encoding: utf-8 + +module Babosa + module Transliterator + class Spanish < Latin + APPROXIMATIONS = {"Ãą" => "ni", "Ñ" => "Ni"} + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/swedish.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/swedish.rb new file mode 100644 index 0000000..19fdeba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/swedish.rb @@ -0,0 +1,16 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Swedish < Latin + APPROXIMATIONS = { + "ÃĨ" => "aa", + "ä" => "ae", + "Ãļ" => "oe", + "Å" => "Aa", + "Ä" => "Ae", + "Ö" => "Oe" + } + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/turkish.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/turkish.rb new file mode 100644 index 0000000..4279a8e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/turkish.rb @@ -0,0 +1,8 @@ +# encoding: utf-8 + +module Babosa + module Transliterator + class Turkish < Latin + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/ukrainian.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/ukrainian.rb new file mode 100644 index 0000000..a4153f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/ukrainian.rb @@ -0,0 +1,30 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Ukrainian < Cyrillic + APPROXIMATIONS = { + "Г" => "H", + "Đŗ" => "h", + "Ō" => "G", + "Ō‘" => "g", + "Ņ”" => "ie", + "И" => "Y", + "и" => "y", + "І" => "I", + "Ņ–" => "i", + "Ņ—" => "i", + "Й" => "Y", + "Đš" => "i", + "ĐĨ" => "Kh", + "Ņ…" => "kh", + "ĐĻ" => "Ts", + "Ņ†" => 'ts', + "ĐŠ" => "Shch", + "Ņ‰" => "shch", + "ŅŽ" => "iu", + "Ņ" => "ia", + "'" => "" + } + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/vietnamese.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/vietnamese.rb new file mode 100644 index 0000000..80bea3c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/transliterator/vietnamese.rb @@ -0,0 +1,143 @@ +# encoding: utf-8 +module Babosa + module Transliterator + class Vietnamese < Latin + APPROXIMATIONS = { + "à" => "a", + "ÃĄ" => "a", + "áēĄ" => "a", + "áēŖ" => "a", + "ÃŖ" => "a", + "Ãĸ" => "a", + "áē§" => "a", + "áēĨ" => "a", + "áē­" => "a", + "áēŠ" => "a", + "áēĢ" => "a", + "ă" => "a", + "áēą" => "a", + "áē¯" => "a", + "áēˇ" => "a", + "áēŗ" => "a", + "áēĩ" => "a", + "À" => "A", + "Á" => "A", + "áē " => "A", + "áēĸ" => "A", + "Ã" => "A", + "Â" => "A", + "áēĻ" => "A", + "áē¤" => "A", + "áēŦ" => "A", + "áē¨" => "A", + "áēĒ" => "A", + "Ă" => "A", + "áē°" => "A", + "áēŽ" => "A", + "áēļ" => "A", + "áē˛" => "A", + "áē´" => "A", + "ÃŦ" => "i", + "í" => "i", + "áģ‹" => "i", + "áģ‰" => "i", + "ÄŠ" => "i", + "Ì" => "I", + "Í" => "I", + "áģŠ" => "I", + "áģˆ" => "I", + "Ĩ" => "I", + "Ú" => "u", + "Ãē" => "u", + "áģĨ" => "u", + "áģ§" => "u", + "ÅŠ" => "u", + "Æ°" => "u", + "áģĢ" => "u", + "áģŠ" => "u", + "áģą" => "u", + "áģ­" => "u", + "áģ¯" => "u", + "Ù" => "U", + "Ú" => "U", + "áģ¤" => "U", + "áģĻ" => "U", + "Ũ" => "U", + "Ư" => "U", + "áģĒ" => "U", + "áģ¨" => "U", + "áģ°" => "U", + "áģŦ" => "U", + "áģŽ" => "U", + "è" => "e", + "Ê" => "e", + "áēš" => "e", + "áēģ" => "e", + "áēŊ" => "e", + "ÃĒ" => "e", + "áģ" => "e", + "áēŋ" => "e", + "áģ‡" => "e", + "áģƒ" => "e", + "áģ…" => "e", + "È" => "E", + "É" => "E", + "áē¸" => "E", + "áēē" => "E", + "áēŧ" => "E", + "Ê" => "E", + "áģ€" => "E", + "áēž" => "E", + "áģ†" => "E", + "áģ‚" => "E", + "áģ„" => "E", + "Ã˛" => "o", + "Ãŗ" => "o", + "áģ" => "o", + "áģ" => "o", + "Ãĩ" => "o", + "ô" => "o", + "áģ“" => "o", + "áģ‘" => "o", + "áģ™" => "o", + "áģ•" => "o", + "áģ—" => "o", + "ÆĄ" => "o", + "áģ" => "o", + "áģ›" => "o", + "áģŖ" => "o", + "áģŸ" => "o", + "áģĄ" => "o", + "Ò" => "O", + "Ó" => "O", + "áģŒ" => "O", + "áģŽ" => "O", + "Õ" => "O", + "Ô" => "O", + "áģ’" => "O", + "áģ" => "O", + "áģ˜" => "O", + "áģ”" => "O", + "áģ–" => "O", + "Æ " => "O", + "áģœ" => "O", + "áģš" => "O", + "áģĸ" => "O", + "áģž" => "O", + "áģ " => "O", + "áģŗ" => "y", + "ÃŊ" => "y", + "áģĩ" => "y", + "áģˇ" => "y", + "áģš" => "y", + "áģ˛" => "Y", + "Ý" => "Y", + "áģ´" => "Y", + "áģļ" => "Y", + "áģ¸" => "Y", + "đ" => "d", + "Đ" => "D" + } + end + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/active_support_proxy.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/active_support_proxy.rb new file mode 100644 index 0000000..d7b4931 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/active_support_proxy.rb @@ -0,0 +1,38 @@ +require 'active_support' +require 'active_support/multibyte/unicode' + +module Babosa + module UTF8 + # A UTF-8 proxy using Active Support's multibyte support. + module ActiveSupportProxy + extend ActiveSupport::Multibyte::Unicode + extend self + + def self.normalize_utf8(string) + normalize(string, :c) + end + + if ActiveSupport::VERSION::MAJOR == 3 + def downcase(string) + ActiveSupport::Multibyte::Chars.new(string).downcase.to_s + end + + def upcase(string) + ActiveSupport::Multibyte::Chars.new(string).upcase.to_s + end + elsif ActiveSupport::VERSION::MAJOR >= 6 + def self.normalize_utf8(string) + string.unicode_normalize(:nfc).to_s + end + + def downcase(string) + string.downcase.to_s + end + + def upcase(string) + string.upcase.to_s + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/dumb_proxy.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/dumb_proxy.rb new file mode 100644 index 0000000..0abd175 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/dumb_proxy.rb @@ -0,0 +1,49 @@ +require File.expand_path("../mappings", __FILE__) + +module Babosa + module UTF8 + + # This module provides fallback UTF-8 support when nothing else is + # available. It does case folding for Roman alphabet-based characters + # commonly used by Western European languages and little else, making it + # useless for Russian, Bulgarian, Greek, etc. If at all possible, Unicode + # or ActiveSupport should be used instead because they support the full + # UTF-8 character range. + module DumbProxy + extend Proxy + extend self + + def downcase(string) + string.downcase.unpack("U*").map {|char| Mappings::DOWNCASE[char] or char}.flatten.pack("U*") + end + + def upcase(string) + string.upcase.unpack("U*").map {|char| Mappings::UPCASE[char] or char}.flatten.pack("U*") + end + + if ''.respond_to?(:unicode_normalize) + def normalize_utf8(string) + string.unicode_normalize + end + else + # On Ruby 2.2, this uses the native Unicode normalize method. On all + # other Rubies, it does a very naive Unicode normalization, which should + # work for this library's purposes (i.e., Roman-based codepoints, up to + # U+017E). Do not use reuse this as a general solution! Use a real + # library like Unicode or ActiveSupport instead. + def normalize_utf8(string) + codepoints = string.unpack("U*") + new = [] + until codepoints.empty? do + if Mappings::COMPOSITION[codepoints[0..1]] + new << Mappings::COMPOSITION[codepoints.slice!(0,2)] + else + new << codepoints.shift + end + end + new.compact.flatten.pack("U*") + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/java_proxy.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/java_proxy.rb new file mode 100644 index 0000000..68cd807 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/java_proxy.rb @@ -0,0 +1,22 @@ +module Babosa + module UTF8 + # A UTF-8 proxy module using Java's built-in Unicode support. Requires JRuby 1.5+. + module JavaProxy + extend Proxy + extend self + java_import java.text.Normalizer + + def downcase(string) + string.to_java.to_lower_case.to_s + end + + def upcase(string) + string.to_java.to_upper_case.to_s + end + + def normalize_utf8(string) + Normalizer.normalize(string, Normalizer::Form::NFC).to_s + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/mappings.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/mappings.rb new file mode 100644 index 0000000..a6042b1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/mappings.rb @@ -0,0 +1,193 @@ +module Babosa + module UTF8 + + # A small subset of the mappings provided by Unicode.org, limited to Latin + # characters. This is used for Babosa's default "dumb" UTF-8 support. + module Mappings + DOWNCASE = Hash[65, 97, 66, 98, 67, 99, 68, 100, 69, 101, 70, 102, + 71, 103, 72, 104, 73, 105, 74, 106, 75, 107, 76, 108, 77, 109, 78, 110, + 79, 111, 80, 112, 81, 113, 82, 114, 83, 115, 84, 116, 85, 117, 86, 118, + 87, 119, 88, 120, 89, 121, 90, 122, 181, 956, 192, 224, 193, 225, 194, + 226, 195, 227, 196, 228, 197, 229, 198, 230, 199, 231, 200, 232, 201, + 233, 202, 234, 203, 235, 204, 236, 205, 237, 206, 238, 207, 239, 208, + 240, 209, 241, 210, 242, 211, 243, 212, 244, 213, 245, 214, 246, 216, + 248, 217, 249, 218, 250, 219, 251, 220, 252, 221, 253, 222, 254, 223, + [115, 115], 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, + 296, 297, 298, 299, 300, 301, 302, 303, 304, [105, 775], 306, 307, 308, + 309, 310, 311, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, [700, 110], 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, + 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 255, + 377, 378, 379, 380, 381, 382] + + UPCASE = DOWNCASE.invert + + COMPOSITION = { + [65,768] => 192, + [65,769] => 193, + [65,770] => 194, + [65,771] => 195, + [65,776] => 196, + [65,778] => 197, + [67,807] => 199, + [69,768] => 200, + [69,769] => 201, + [69,770] => 202, + [69,776] => 203, + [73,768] => 204, + [73,769] => 205, + [73,770] => 206, + [73,776] => 207, + [78,771] => 209, + [79,768] => 210, + [79,769] => 211, + [79,770] => 212, + [79,771] => 213, + [79,776] => 214, + [85,768] => 217, + [85,769] => 218, + [85,770] => 219, + [85,776] => 220, + [89,769] => 221, + [97,768] => 224, + [97,769] => 225, + [97,770] => 226, + [97,771] => 227, + [97,776] => 228, + [97,778] => 229, + [99,807] => 231, + [101,768] => 232, + [101,769] => 233, + [101,770] => 234, + [101,776] => 235, + [105,768] => 236, + [105,769] => 237, + [105,770] => 238, + [105,776] => 239, + [110,771] => 241, + [111,768] => 242, + [111,769] => 243, + [111,770] => 244, + [111,771] => 245, + [111,776] => 246, + [117,768] => 249, + [117,769] => 250, + [117,770] => 251, + [117,776] => 252, + [121,769] => 253, + [121,776] => 255, + [65,772] => 256, + [97,772] => 257, + [65,774] => 258, + [97,774] => 259, + [65,808] => 260, + [97,808] => 261, + [67,769] => 262, + [99,769] => 263, + [67,770] => 264, + [99,770] => 265, + [67,775] => 266, + [99,775] => 267, + [67,780] => 268, + [99,780] => 269, + [68,780] => 270, + [100,780] => 271, + [69,772] => 274, + [101,772] => 275, + [69,774] => 276, + [101,774] => 277, + [69,775] => 278, + [101,775] => 279, + [69,808] => 280, + [101,808] => 281, + [69,780] => 282, + [101,780] => 283, + [71,770] => 284, + [103,770] => 285, + [71,774] => 286, + [103,774] => 287, + [71,775] => 288, + [103,775] => 289, + [71,807] => 290, + [103,807] => 291, + [72,770] => 292, + [104,770] => 293, + [73,771] => 296, + [105,771] => 297, + [73,772] => 298, + [105,772] => 299, + [73,774] => 300, + [105,774] => 301, + [73,808] => 302, + [105,808] => 303, + [73,775] => 304, + [74,770] => 308, + [106,770] => 309, + [75,807] => 310, + [107,807] => 311, + [76,769] => 313, + [108,769] => 314, + [76,807] => 315, + [108,807] => 316, + [76,780] => 317, + [108,780] => 318, + [78,769] => 323, + [110,769] => 324, + [78,807] => 325, + [110,807] => 326, + [78,780] => 327, + [110,780] => 328, + [79,772] => 332, + [111,772] => 333, + [79,774] => 334, + [111,774] => 335, + [79,779] => 336, + [111,779] => 337, + [82,769] => 340, + [114,769] => 341, + [82,807] => 342, + [114,807] => 343, + [82,780] => 344, + [114,780] => 345, + [83,769] => 346, + [115,769] => 347, + [83,770] => 348, + [115,770] => 349, + [83,807] => 350, + [115,807] => 351, + [83,780] => 352, + [115,780] => 353, + [84,807] => 354, + [116,807] => 355, + [84,780] => 356, + [116,780] => 357, + [85,771] => 360, + [117,771] => 361, + [85,772] => 362, + [117,772] => 363, + [85,774] => 364, + [117,774] => 365, + [85,778] => 366, + [117,778] => 367, + [85,779] => 368, + [117,779] => 369, + [85,808] => 370, + [117,808] => 371, + [87,770] => 372, + [119,770] => 373, + [89,770] => 374, + [121,770] => 375, + [89,776] => 376, + [90,769] => 377, + [122,769] => 378, + [90,775] => 379, + [122,775] => 380, + [90,780] => 381, + [122,780] => 382 + } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/proxy.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/proxy.rb new file mode 100644 index 0000000..2ee36d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/proxy.rb @@ -0,0 +1,125 @@ +module Babosa + module UTF8 + + autoload :JavaProxy, "babosa/utf8/java_proxy" + autoload :UnicodeProxy, "babosa/utf8/unicode_proxy" + autoload :ActiveSupportProxy, "babosa/utf8/active_support_proxy" + autoload :DumbProxy, "babosa/utf8/dumb_proxy" + + # A UTF-8 proxy for Babosa can be any object which responds to the methods in this module. + # The following proxies are provided by Babosa: {ActiveSupportProxy}, {DumbProxy}, {JavaProxy}, and {UnicodeProxy}. + module Proxy + CP1252 = { + 128 => [226, 130, 172], + 129 => nil, + 130 => [226, 128, 154], + 131 => [198, 146], + 132 => [226, 128, 158], + 133 => [226, 128, 166], + 134 => [226, 128, 160], + 135 => [226, 128, 161], + 136 => [203, 134], + 137 => [226, 128, 176], + 138 => [197, 160], + 139 => [226, 128, 185], + 140 => [197, 146], + 141 => nil, + 142 => [197, 189], + 143 => nil, + 144 => nil, + 145 => [226, 128, 152], + 146 => [226, 128, 153], + 147 => [226, 128, 156], + 148 => [226, 128, 157], + 149 => [226, 128, 162], + 150 => [226, 128, 147], + 151 => [226, 128, 148], + 152 => [203, 156], + 153 => [226, 132, 162], + 154 => [197, 161], + 155 => [226, 128, 186], + 156 => [197, 147], + 157 => nil, + 158 => [197, 190], + 159 => [197, 184] + } + + # This is a stub for a method that should return a Unicode-aware + # downcased version of the given string. + def downcase(string) + raise NotImplementedError + end + + # This is a stub for a method that should return a Unicode-aware + # upcased version of the given string. + def upcase(string) + raise NotImplementedError + end + + # This is a stub for a method that should return the Unicode NFC + # normalization of the given string. + def normalize_utf8(string) + raise NotImplementedError + end + + if ''.respond_to?(:scrub) && !defined?(Rubinius) + # Attempt to replace invalid UTF-8 bytes with valid ones. This method + # naively assumes if you have invalid UTF8 bytes, they are either Windows + # CP-1252 or ISO8859-1. In practice this isn't a bad assumption, but may not + # always work. + def tidy_bytes(string) + string.scrub do |bad| + tidy_byte(*bad.bytes).flatten.compact.pack('C*').unpack('U*').pack('U*') + end + end + else + def tidy_bytes(string) + bytes = string.unpack("C*") + conts_expected = 0 + last_lead = 0 + + bytes.each_index do |i| + byte = bytes[i] + is_cont = byte > 127 && byte < 192 + is_lead = byte > 191 && byte < 245 + is_unused = byte > 240 + is_restricted = byte > 244 + + # Impossible or highly unlikely byte? Clean it. + if is_unused || is_restricted + bytes[i] = tidy_byte(byte) + elsif is_cont + # Not expecting contination byte? Clean up. Otherwise, now expect one less. + conts_expected == 0 ? bytes[i] = tidy_byte(byte) : conts_expected -= 1 + else + if conts_expected > 0 + # Expected continuation, but got ASCII or leading? Clean backwards up to + # the leading byte. + (1..(i - last_lead)).each {|j| bytes[i - j] = tidy_byte(bytes[i - j])} + conts_expected = 0 + end + if is_lead + # Final byte is leading? Clean it. + if i == bytes.length - 1 + bytes[i] = tidy_byte(bytes.last) + else + # Valid leading byte? Expect continuations determined by position of + # first zero bit, with max of 3. + conts_expected = byte < 224 ? 1 : byte < 240 ? 2 : 3 + last_lead = i + end + end + end + end + bytes.empty? ? "" : bytes.flatten.compact.pack("C*").unpack("U*").pack("U*") + end + end + + private + + def tidy_byte(byte) + byte < 160 ? CP1252[byte] : byte < 192 ? [194, byte] : [195, byte - 64] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/unicode_proxy.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/unicode_proxy.rb new file mode 100644 index 0000000..bad7b0e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/utf8/unicode_proxy.rb @@ -0,0 +1,23 @@ +require 'unicode' + +module Babosa + module UTF8 + # A UTF-8 proxy using the Unicode gem. + # @see http://github.com/blackwinter/unicode + module UnicodeProxy + extend Proxy + extend self + def downcase(string) + Unicode.downcase(string) + end + + def upcase(string) + Unicode.upcase(string) + end + + def normalize_utf8(string) + Unicode.normalize_C(string) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/version.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/version.rb new file mode 100644 index 0000000..0d62ed7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/lib/babosa/version.rb @@ -0,0 +1,5 @@ +module Babosa + module Version + STRING = '1.0.4' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/babosa_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/babosa_spec.rb new file mode 100644 index 0000000..5517b90 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/babosa_spec.rb @@ -0,0 +1,158 @@ +# encoding: utf-8 +require File.expand_path("../spec_helper", __FILE__) + +describe Babosa::Identifier do + + it "should respond_to :empty?" do + expect("".to_slug).to respond_to(:empty?) + end + + %w[approximate_ascii clean downcase word_chars normalize to_ascii upcase with_dashes].each do |method| + describe "##{method}" do + it "should work with invalid UTF-8 strings" do + expect {"\x93abc".to_slug.send method}.not_to raise_exception + end + end + end + + describe "#word_chars" do + it "word_chars! should leave only letters and spaces" do + string = "a*$%^$@!@b$%^&*()*!c" + expect(string.to_slug.word_chars!).to match(/[a-z ]*/i) + end + end + + describe "#transliterate" do + it "should transliterate to ascii" do + (0xC0..0x17E).to_a.each do |codepoint| + ss = [codepoint].pack("U*").to_slug + expect(ss.approximate_ascii!).to match(/[\x0-\x7f]/) + end + end + + it "should transliterate uncomposed utf8" do + string = [117, 776].pack("U*") # "Ãŧ" as ASCII "u" plus COMBINING DIAERESIS + expect(string.to_slug.approximate_ascii).to eql("u") + end + + it "should transliterate using multiple transliterators" do + string = "ŅĐ˛ĐžĐąĐžĐ´ĐŊĐžĐĩ reÅžÄŖis" + expect(string.to_slug.approximate_ascii(:latin, :russian)).to eql("svobodnoe rezgis") + end + end + + describe "#downcase" do + it "should lowercase strings" do + expect("FELIZ AÑO".to_slug.downcase).to eql("feliz aÃąo") + end + end + + describe "#upcase" do + it "should uppercase strings" do + expect("feliz aÃąo".to_slug.upcase).to eql("FELIZ AÑO") + end + end + + describe "#normalize" do + + it "should allow passing locale as key for :transliterate" do + expect("Ãļ".to_slug.clean.normalize(:transliterate => :german)).to eql("oe") + end + + it "should replace whitespace with dashes" do + expect("a b".to_slug.clean.normalize).to eql("a-b") + end + + it "should replace multiple spaces with 1 dash" do + expect("a b".to_slug.clean.normalize).to eql("a-b") + end + + it "should replace multiple dashes with 1 dash" do + expect("male - female".to_slug.normalize).to eql("male-female") + end + + it "should strip trailing space" do + expect("ab ".to_slug.normalize).to eql("ab") + end + + it "should strip leading space" do + expect(" ab".to_slug.normalize).to eql("ab") + end + + it "should strip trailing slashes" do + expect("ab-".to_slug.normalize).to eql("ab") + end + + it "should strip leading slashes" do + expect("-ab".to_slug.normalize).to eql("ab") + end + + it "should not modify valid name strings" do + expect("a-b-c-d".to_slug.normalize).to eql("a-b-c-d") + end + + it "should not convert underscores" do + expect("hello_world".to_slug.normalize).to eql("hello_world") + end + + it "should work with non roman chars" do + expect("検 į´ĸ".to_slug.normalize).to eql("検-į´ĸ") + end + + context "with to_ascii option" do + it "should approximate and strip non ascii" do + ss = "ã‚Ģã‚ŋã‚Ģナ: katakana is Ãŧber cool".to_slug + expect(ss.normalize(:to_ascii => true)).to eql("katakana-is-uber-cool") + end + end + end + + describe "#truncate_bytes" do + it "should by byte length" do + expect("Ãŧa".to_slug.truncate_bytes(2)).to eql("Ãŧ") + expect("Ãŧa".to_slug.truncate_bytes(1)).to eql("") + expect("Ãŧa".to_slug.truncate_bytes(100)).to eql("Ãŧa") + expect("ÃŧÃŠÃ¸ÃĄ".to_slug.truncate_bytes(3)).to eql("Ãŧ") + end + end + + describe "#truncate" do + it "should truncate by char length" do + expect("Ãŧa".to_slug.truncate(2)).to eql("Ãŧa") + expect("Ãŧa".to_slug.truncate(1)).to eql("Ãŧ") + expect("Ãŧa".to_slug.truncate(100)).to eql("Ãŧa") + end + end + + describe "#with_dashes" do + it "should not change byte size when replacing spaces" do + expect("".to_slug.with_dashes.bytesize).to eql(0) + expect(" ".to_slug.with_dashes.bytesize).to eql(1) + expect("-abc-".to_slug.with_dashes.bytesize).to eql(5) + expect(" abc ".to_slug.with_dashes.bytesize).to eql(5) + expect(" a bc ".to_slug.with_dashes.bytesize).to eql(7) + end + end + + describe "#to_ruby_method" do + it "should get a string suitable for use as a ruby method" do + expect("ÂŋÂŋÂŋhello... world???".to_slug.to_ruby_method).to eql("hello_world?") + expect("ã‚Ģã‚ŋã‚Ģナ: katakana is Ãŧber cool".to_slug.to_ruby_method).to eql("katakana_is_uber_cool") + expect("ã‚Ģã‚ŋã‚Ģナ: katakana is Ãŧber cool!".to_slug.to_ruby_method).to eql("katakana_is_uber_cool!") + expect("ã‚Ģã‚ŋã‚Ģナ: katakana is Ãŧber cool".to_slug.to_ruby_method(false)).to eql("katakana_is_uber_cool") + end + + it "should optionally remove trailing punctuation" do + expect("ÂŋÂŋÂŋhello... world???".to_slug.to_ruby_method(false)).to eql("hello_world") + end + + it "should raise an error when it would generate an impossible method name" do + # "1".to_identifier.to_ruby_method + expect {"1".to_identifier.to_ruby_method}.to raise_error(Babosa::Identifier::Error) + end + + it "should raise Babosa::Error error when the string is nil" do + expect { "".to_slug.to_ruby_method }.to raise_error(Babosa::Identifier::Error) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/spec_helper.rb new file mode 100644 index 0000000..b4bc947 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/spec_helper.rb @@ -0,0 +1,45 @@ +# coding: utf-8 + +if ENV['COV'] + require 'simplecov' + SimpleCov.start +end + +require 'bundler/setup' +require 'babosa' + +shared_examples_for "a latin transliterator" do + let(:t) { described_class.instance } + + it "should transliterate latin characters" do + string = (0xC0..0x17E).to_a.pack("U*") + expect(t.transliterate(string)).to match(/[\x0-\x7f]/) + end +end + +shared_examples_for "a cyrillic transliterator" do + let(:t) { described_class.instance } + + it "should transliterate cyrillic characters" do + string = "ĐĄĐģавŅŒŅŅ, ОŅ‚ĐĩŅ‡ĐĩŅŅ‚вО ĐŊĐ°ŅˆĐĩ ŅĐ˛ĐžĐąĐžĐ´ĐŊĐžĐĩ" + expect(t.transliterate(string)).to match(/[\x0-\x7f]/) + end +end + +shared_examples_for "a greek transliterator" do + let(:t) { described_class.instance } + + it "should transliterate greek characters" do + string = "ΓÎĩĪÎŧÎąÎŊÎ¯Îą" + expect(t.transliterate(string)).to match(/[\x0-\x7f]/) + end +end + +shared_examples_for "a hindi transliterator" do + let(:t) { described_class.instance } + + it "should transliterate hindi characters" do + string = "ā¤†ā¤Ļā¤ŋā¤¤āĨā¤¯ ā¤¤ā¤žā¤Ēā¤Ąā¤ŧā¤ŋā¤¯ā¤ž" + expect(t.transliterate(string)).to match(/[\x0-\x7f]/) + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/base_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/base_spec.rb new file mode 100644 index 0000000..32168c3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/base_spec.rb @@ -0,0 +1,16 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Base do + + let(:t) {Babosa::Transliterator::Base.instance} + + it "should transliterate 'smart' quotes" do + expect(t.transliterate("’")).to eql("'") + end + + it "should transliterate non-breaking spaces" do + expect(t.transliterate("\xc2\xa0")).to eql(" ") + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/bulgarian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/bulgarian_spec.rb new file mode 100644 index 0000000..cefd8fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/bulgarian_spec.rb @@ -0,0 +1,20 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Bulgarian do + + let(:t) { described_class.instance } + it_behaves_like "a cyrillic transliterator" + + it "should transliterate Cyrillic characters" do + examples = { + "ĐŽŅ‚иŅ" => "Iutiia", + "ЧŅƒŅˆĐēĐ°" => "Chushka", + "ĐēŅŒĐžŅ€Đ°Đ˛" => "kiorav", + "ĐŠŅŠŅ€ĐēĐĩĐģ" => "Shturkel", + "ĐŋĐžĐģиŅ†Đ°Đš" => "policai" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/danish_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/danish_spec.rb new file mode 100644 index 0000000..6e4b122 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/danish_spec.rb @@ -0,0 +1,17 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Danish do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + examples = { + "Ærøskøbing" => "Aeroeskoebing", + "Årslev" => "Aarslev" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/german_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/german_spec.rb new file mode 100644 index 0000000..ee204cc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/german_spec.rb @@ -0,0 +1,17 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::German do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate Eszett" do + expect(t.transliterate("ß")).to eql("ss") + end + + it "should transliterate vowels with umlauts" do + expect(t.transliterate("ÃŧÃļä")).to eql("ueoeae") + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/greek_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/greek_spec.rb new file mode 100644 index 0000000..bfe8497 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/greek_spec.rb @@ -0,0 +1,17 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Greek do + + let(:t) { described_class.instance } + it_behaves_like "a greek transliterator" + + it "should transliterate various characters" do + examples = { + "ΓÎĩĪÎŧÎąÎŊÎ¯Îą" => "Germania", + "ΑĪ…ĪƒĪ„ĪÎ¯Îą" => "Aystria", + "ΙĪ„ÎąÎģÎ¯Îą" => "Italia" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/hindi_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/hindi_spec.rb new file mode 100644 index 0000000..4b2e1a6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/hindi_spec.rb @@ -0,0 +1,17 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Hindi do + + let(:t) { described_class.instance } + it_behaves_like "a hindi transliterator" + + it "should transliterate hindi characters" do + examples = { + "ā¤†ā¤Ļā¤ŋā¤¤āĨā¤¯" => "aadity", + "ā¤¸ā¤Ŧā¤°āĨ€ā¤Žā¤žā¤˛ā¤ž ā¤•ā¤°ā¤ĩā¤žā¤¨ā¤ž ā¤Ēā¤žā¤¯ā¤¸ā¤Ž" => "sbriimaalaa krvaanaa paaysm", + "ā¤¸ā¤•āĨā¤°ā¤žā¤‚ā¤¤ā¤ŋ ā¤†ā¤ā¤–" => "skraanti aankh" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/latin_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/latin_spec.rb new file mode 100644 index 0000000..065bb9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/latin_spec.rb @@ -0,0 +1,9 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Latin do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/macedonian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/macedonian_spec.rb new file mode 100644 index 0000000..bfa87a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/macedonian_spec.rb @@ -0,0 +1,9 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Macedonian do + + let(:t) { described_class.instance } + it_behaves_like "a cyrillic transliterator" + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/norwegian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/norwegian_spec.rb new file mode 100644 index 0000000..a0a6427 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/norwegian_spec.rb @@ -0,0 +1,18 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Norwegian do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + examples = { + "Øivind" => "Oeivind", + "Bø" => "Boe", + "Åre" => "Aare", + "HÃĨkon" => "Haakon" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/polish_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/polish_spec.rb new file mode 100644 index 0000000..67bd850 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/polish_spec.rb @@ -0,0 +1,14 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Romanian do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + expect(t.transliterate("ĄąĆćĘꣳŃńÓÃŗŚśŚÅēÅģÅŧ")).to eql("AaCcEeLlNnOoSsZzZz") + end + +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/romanian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/romanian_spec.rb new file mode 100644 index 0000000..4c578bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/romanian_spec.rb @@ -0,0 +1,19 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Romanian do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + examples = { + "Iași" => "Iasi", + "Mehedinți" => "Mehedinti", + "Țară" => "Tara", + "Șanț" => "Sant" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/russian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/russian_spec.rb new file mode 100644 index 0000000..994ce61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/russian_spec.rb @@ -0,0 +1,9 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Russian do + + let(:t) { described_class.instance } + it_behaves_like "a cyrillic transliterator" + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/serbian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/serbian_spec.rb new file mode 100644 index 0000000..fd0b15e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/serbian_spec.rb @@ -0,0 +1,25 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Serbian do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + it_behaves_like "a cyrillic transliterator" + + it "should transliterate Latin characters" do + examples = { + "Ðorđe" => "Djordje", + "Inđija" => "Indjija", + "Četiri" => "Chetiri", + "četiri" => "chetiri", + "Å kola" => "Shkola", + "ÅĄkola" => "shkola", + "ЂоŅ€Ņ’Đĩ" => "Djordje", + "ИĐŊŅ’иŅ˜Đ°" => "Indjija", + "ШĐēĐžĐģĐ°" => "Shkola", + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/spanish_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/spanish_spec.rb new file mode 100644 index 0000000..9518a29 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/spanish_spec.rb @@ -0,0 +1,13 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Spanish do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate Ãą" do + expect(t.transliterate("Ãą")).to eql("ni") + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/swedish_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/swedish_spec.rb new file mode 100644 index 0000000..8eb1748 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/swedish_spec.rb @@ -0,0 +1,18 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Swedish do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + examples = { + "RäksmÃļrgÃĨs" => "Raeksmoergaas", + "Öre" => "Oere", + "Åre" => "Aare", + "Älskar" => "Aelskar" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/turkish_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/turkish_spec.rb new file mode 100644 index 0000000..ffefb0d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/turkish_spec.rb @@ -0,0 +1,24 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Turkish do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + examples = { + "NÃĸzÄąm" => "Nazim", + "sÃŧkÃģnet" => "sukunet", + "millÃŽleştirmek" => "millilestirmek", + "mÃĒmur" => "memur", + "lôkman" => "lokman", + "yoğurt" => "yogurt", + "şair" => "sair", + "Ä°zmir" => "Izmir", + "yığın" => "yigin", + "çarÅŸÄą" => "carsi" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/ukrainian_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/ukrainian_spec.rb new file mode 100644 index 0000000..8276d8c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/ukrainian_spec.rb @@ -0,0 +1,88 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Ukrainian do + + let(:t) { described_class.instance } + it_behaves_like "a cyrillic transliterator" + + it "should transliterate Cyrillic characters" do + examples = { + "АĐģŅƒŅˆŅ‚Đ°" => "Alushta", + "АĐŊĐ´Ņ€Ņ–Đš" => "Andrii", + "БоŅ€Ņ‰Đ°ĐŗŅ–вĐēĐ°" => "Borshchahivka", + "БоŅ€Đ¸ŅĐĩĐŊĐēĐž" => "Borysenko", + "ВŅ–ĐŊĐŊиŅ†Ņ" => "Vinnytsia", + "ВоĐģОдиĐŧиŅ€" => "Volodymyr", + "ГадŅŅ‡" => "Hadiach", + "БоĐŗĐ´Đ°ĐŊ" => "Bohdan", + "ŌĐ°ĐģĐ°Ō‘Đ°ĐŊ" => "Galagan", + "ŌĐžŅ€Ō‘Đ°ĐŊи" => "Gorgany", + "ДоĐŊĐĩŅ†ŅŒĐē" => "Donetsk", + "ДĐŧиŅ‚Ņ€Đž" => "Dmytro", + "Đ Ņ–вĐŊĐĩ" => "Rivne", + "ОĐģĐĩĐŗ" => "Oleh", + "ЕŅĐŧĐ°ĐŊŅŒ" => "Esman", + "ЄĐŊĐ°ĐēŅ–Ņ”вĐĩ" => "Yenakiieve", + "ГаŅ”виŅ‡" => "Haievych", + "КоŅ€ĐžĐŋ'Ņ”" => "Koropie", + "ЖиŅ‚ĐžĐŧиŅ€" => "Zhytomyr", + "ЖаĐŊĐŊĐ°" => "Zhanna", + "ЖĐĩĐļĐĩĐģŅ–в" => "Zhezheliv", + "ЗаĐēĐ°Ņ€ĐŋĐ°Ņ‚Ņ‚Ņ" => "Zakarpattia", + "КазиĐŧиŅ€Ņ‡ŅƒĐē" => "Kazymyrchuk", + "МĐĩдвиĐŊ" => "Medvyn", + "МиŅ…Đ°ĐšĐģĐĩĐŊĐēĐž" => "Mykhailenko", + "ІваĐŊĐēŅ–в" => "Ivankiv", + "ІваŅ‰ĐĩĐŊĐēĐž" => "Ivashchenko", + "ЇĐļĐ°ĐēĐĩвиŅ‡" => "Yizhakevych", + "КадиŅ—вĐēĐ°" => "Kadyivka", + "МаŅ€'Ņ—ĐŊĐĩ" => "Marine", + "ЙоŅĐ¸ĐŋŅ–вĐēĐ°" => "Yosypivka", + "ĐĄŅ‚Ņ€Đ¸Đš" => "Stryi", + "ОĐģĐĩĐēŅŅ–Đš" => "Oleksii", + "КиŅ—в" => "Kyiv", + "КоваĐģĐĩĐŊĐēĐž" => "Kovalenko", + "ЛĐĩĐąĐĩдиĐŊ" => "Lebedyn", + "ЛĐĩĐžĐŊŅ–Đ´" => "Leonid", + "МиĐēĐžĐģĐ°Ņ—в" => "Mykolaiv", + "МаŅ€Đ¸ĐŊиŅ‡" => "Marynych", + "НŅ–ĐļиĐŊ" => "Nizhyn", + "НаŅ‚Đ°ĐģŅ–Ņ" => "Nataliia", + "ОдĐĩŅĐ°" => "Odesa", + "ОĐŊиŅ‰ĐĩĐŊĐēĐž" => "Onyshchenko", + "ПоĐģŅ‚ава" => "Poltava", + "ПĐĩŅ‚Ņ€Đž" => "Petro", + "Đ ĐĩŅˆĐĩŅ‚иĐģŅ–вĐēĐ°" => "Reshetylivka", + "РийŅ‡Đ¸ĐŊŅŅŒĐēиК" => "Rybchynskyi", + "ĐĄŅƒĐŧи" => "Sumy", + "ĐĄĐžĐģĐžĐŧŅ–Ņ" => "Solomiia", + "ĐĸĐĩŅ€ĐŊĐžĐŋŅ–ĐģŅŒ" => "Ternopil", + "ĐĸŅ€ĐžŅ†ŅŒ" => "Trots", + "ĐŖĐļĐŗĐžŅ€ĐžĐ´" => "Uzhhorod", + "ĐŖĐģŅĐŊĐ°" => "Uliana", + "ФаŅŅ‚Ņ–в" => "Fastiv", + "ФŅ–ĐģŅ–ĐŋŅ‡ŅƒĐē" => "Filipchuk", + "ĐĨĐ°Ņ€ĐēŅ–в" => "Kharkiv", + "ĐĨŅ€Đ¸ŅŅ‚иĐŊĐ°" => "Khrystyna", + "БŅ–ĐģĐ° ĐĻĐĩŅ€Đēва" => "Bila Tserkva", + "ĐĄŅ‚ĐĩŅ†ĐĩĐŊĐēĐž" => "Stetsenko", + "ЧĐĩŅ€ĐŊŅ–вŅ†Ņ–" => "Chernivtsi", + "ШĐĩвŅ‡ĐĩĐŊĐēĐž" => "Shevchenko", + "ШОŅŅ‚ĐēĐ°" => "Shostka", + "КиŅˆĐĩĐŊŅŒĐēи" => "Kyshenky", + "ĐŠĐĩŅ€ĐąŅƒŅ…и" => "Shcherbukhy", + "ГоŅ‰Đ°" => "Hoshcha", + "ГаŅ€Đ°Ņ‰ĐĩĐŊĐēĐž" => "Harashchenko", + "ĐŽŅ€Ņ–Đš" => "Yurii", + "КоŅ€ŅŽĐēŅ–вĐēĐ°" => "Koriukivka", + "Đ¯ĐŗĐžŅ‚иĐŊ" => "Yahotyn", + "Đ¯Ņ€ĐžŅˆĐĩĐŊĐēĐž" => "Yaroshenko", + "КоŅŅ‚ŅĐŊŅ‚иĐŊ" => "Kostiantyn", + "ЗĐŊĐ°Đŧ'ŅĐŊĐēĐ°" => "Znamianka", + "ФĐĩОдОŅŅ–Ņ" => "Feodosiia" + } + examples.each { |k, v| expect(t.transliterate(k)).to eql(v) } + end + +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/vietnamese_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/vietnamese_spec.rb new file mode 100644 index 0000000..92b50c6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/transliterators/vietnamese_spec.rb @@ -0,0 +1,18 @@ +# encoding: utf-8 +require File.expand_path("../../spec_helper", __FILE__) + +describe Babosa::Transliterator::Vietnamese do + + let(:t) { described_class.instance } + it_behaves_like "a latin transliterator" + + it "should transliterate various characters" do + examples = { + "làm" => "lam", + "đàn ông" => "dan ong", + "tháē­t" => "that", + "kháģ•" => "kho" + } + examples.each {|k, v| expect(t.transliterate(k)).to eql(v)} + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/utf8_proxy_spec.rb b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/utf8_proxy_spec.rb new file mode 100644 index 0000000..a203a30 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/babosa-1.0.4/spec/utf8_proxy_spec.rb @@ -0,0 +1,52 @@ +# encoding: utf-8 +require File.expand_path("../spec_helper", __FILE__) + +PROXIES = [Babosa::UTF8::DumbProxy, Babosa::UTF8::ActiveSupportProxy, Babosa::UTF8::UnicodeProxy] +PROXIES << Babosa::UTF8::JavaProxy if Babosa.jruby15? + +PROXIES.each do |proxy| + + describe proxy do + + around do |example| + begin + old_proxy = Babosa::Identifier.utf8_proxy + Babosa::Identifier.utf8_proxy = proxy + example.run + ensure + Babosa::Identifier.utf8_proxy = old_proxy + end + end + + describe "#normalize_utf8" do + it "should normalize to canonical composed" do + # ÅÉÎØÜ + uncomposed_bytes = [65, 204, 138, 69, 204, 129, 73, 204, 130, 195, 152, 85, 204, 136] + composed_bytes = [195, 133, 195, 137, 195, 142, 195, 152, 195, 156] + uncomposed_string = uncomposed_bytes.pack("C*").unpack("U*").pack("U*") + expect(proxy.normalize_utf8(uncomposed_string).unpack("C*")).to eql(composed_bytes) + end + end + + describe "#upcase" do + it "should upcase the string" do + expect(proxy.upcase("ÃĨÊÎøÃŧ")).to eql("ÅÉÎØÜ") + expect("ÃĨÊÎøÃŧ".to_identifier.upcase).to eql("ÅÉÎØÜ") + end + end + + describe "#downcase" do + it "should downcase the string" do + expect(proxy.downcase("ÅÉÎØÜ")).to eql("ÃĨÊÎøÃŧ") + expect("ÅÉÎØÜ".to_identifier.downcase).to eql("ÃĨÊÎøÃŧ") + end + end + + describe 'tidy_bytes' do + it 'should fix invalid UTF-8 strings' do + expect(proxy.tidy_bytes("\x93abc")).to eq('“abc') + end + end + + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.github/workflows/ci.yml b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.github/workflows/ci.yml new file mode 100644 index 0000000..54e1f05 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.github/workflows/ci.yml @@ -0,0 +1,45 @@ +name: ci + +on: + pull_request: + + push: + branches: + - master + - '*-stable' + +jobs: + ci: + name: Ruby ${{ matrix.ruby.name }} + + runs-on: ubuntu-20.04 + + strategy: + fail-fast: false + + matrix: + ruby: + - { name: "2.3", value: 2.3.8 } + - { name: "2.4", value: 2.4.10 } + - { name: "2.5", value: 2.5.9 } + - { name: "2.6", value: 2.6.9 } + - { name: "2.7", value: 2.7.5 } + - { name: "3.0", value: 3.0.3 } + - { name: "3.1", value: 3.1.0 } + + steps: + - uses: actions/checkout@v2 + + - name: Setup ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby.value }} + bundler-cache: true + + - name: Run Test + run: bundle exec rake spec + + - name: Test & publish code coverage + uses: paambaati/codeclimate-action@v3.0.0 + env: + CC_TEST_REPORTER_ID: 46c8b29dd6711f35704e7c5a541486cbbf2cff8b2df8ce755bfc09917d3c1cbb diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.gitignore b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.kick b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.kick new file mode 100644 index 0000000..0686cce --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.kick @@ -0,0 +1,30 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bundle exec bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/[^/]*/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) +ignore(/^tmp/) + diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop.yml b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop.yml new file mode 100644 index 0000000..3ef9f69 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop.yml @@ -0,0 +1,6 @@ +require: + - rubocop-performance + +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..4702a3e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop_cocoapods.yml @@ -0,0 +1,151 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Style/Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +Style/SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +Lint/AssignmentInCondition: + Enabled: false + +# Allow backticks +Style/AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +Style/IfUnlessModifier: + Enabled: false + +# No enforced convention here. +Style/SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Style/Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInArrayLiteral: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInHashLiteral: + EnforcedStyleForMultiline: comma + +Layout/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +Style/GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Style/Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +Style/HashSyntax: + EnforcedStyle: hash_rockets + +Style/Lambda: + Enabled: false + +Layout/DotPosition: + EnforcedStyle: trailing + +Style/EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +Lint/AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Lint/Void: + Exclude: + - spec/**/* + +Style/ClassAndModuleChildren: + Exclude: + - spec/**/* + +Lint/UselessComparison: + Exclude: + - spec/**/* + +Lint/RaiseException: + Enabled: false + +Lint/StructNewOverride: + Enabled: false + +Style/HashEachMethods: + Enabled: false + +Style/HashTransformKeys: + Enabled: false + +Style/HashTransformValues: + Enabled: false diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop_todo.yml b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop_todo.yml new file mode 100644 index 0000000..926b32c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.rubocop_todo.yml @@ -0,0 +1,70 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2016-03-09 18:40:14 -0600 using RuboCop version 0.38.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 3 +Lint/IneffectiveAccessModifier: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Lint/RedundantCopDisableDirective: + Exclude: + - 'spec/command/banner_spec.rb' + +# Offense count: 1 +Performance/FixedSize: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Performance/StringReplacement: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: prefer_alias, prefer_alias_method +Style/Alias: + Exclude: + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: SingleLineConditionsOnly. +Style/ConditionalAssignment: + Exclude: + - 'lib/claide/command/banner.rb' + +# Offense count: 1 +Style/IfInsideElse: + Exclude: + - 'lib/claide/command.rb' + +# Offense count: 9 +# Cop supports --auto-correct. +Style/MutableConstant: + Exclude: + - 'lib/claide/ansi.rb' + - 'lib/claide/argument.rb' + - 'lib/claide/command.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'lib/claide/command/argument_suggester.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/RedundantInterpolation: + Exclude: + - 'lib/claide/command/argument_suggester.rb' diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.yardopts b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.yardopts new file mode 100644 index 0000000..a647564 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/.yardopts @@ -0,0 +1 @@ +--markup markdown --protected --charset=utf-8 lib diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/CHANGELOG.md new file mode 100644 index 0000000..2fd0d62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/CHANGELOG.md @@ -0,0 +1,265 @@ +# CLAide Changelog + +## 1.1.0 (2022-01-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.3 (2019-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Correctly handle `--help` flags when using `argv.remainder!` after initialization + [Eric Amorde](https://github.com/amorde), + [tripleCC](https://github.com/tripleCC) + [#87](https://github.com/CocoaPods/CLAide/pull/87) + + +## 1.0.2 (2017-06-06) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Avoid a method redefinition warning when requiring `claide`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.1 (2016-10-10) + +##### Bug Fixes + +* Adds a fix for older versions of Rubygems when CLAide crashes. + [Samuel Giddins](https://github.com/segiddins) + [#73](https://github.com/CocoaPods/CLAide/issues/73) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix circular require of `claide/ansi` in `claide/ansi/string_escaper`. + [bootstraponline](https://github.com/bootstraponline) + [#66](https://github.com/CocoaPods/CLAide/issues/66) + + +## 1.0.0.beta.3 (2016-03-15) + +##### Enhancements + +* Added `Command.option` to easily add a single option to a command class. + [Samuel Giddins](https://github.com/segiddins) + [#64](https://github.com/CocoaPods/CLAide/issues/64) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-03-08) + +##### Bug Fixes + +* Attempt to get the terminal width without shelling out to `tput`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Bug Fixes + +* The plugin manager will now properly activate plugin gems, ensuring all of + their files are requirable. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.1 (2015-07-05) + +##### Bug Fixes + +* Fix a regression when contradictory flags were given in `ARGV` -- the last + flag given will once again be the value returned, and all entries for that key + are removed. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.9.0 (2015-07-02) + +##### Enhancements + +* Properly parse everything in `ARGV` after `--` as an argument. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/CLAide/issues/48) + +* Allow parsing an option that occurs multiple times. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.2 (2015-06-27) + +##### Enhancements + +* Add `ARGV#remainder!`, which returns all the remaining arguments, deleting + them from the receiver. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.8.1 (2015-02-25) + +###### Bug Fixes + +* Silence errors while loading plugins. + [ClÊment Beffa](https://github.com/cl3m) + [#44](https://github.com/CocoaPods/CLAide/issues/44) + + +## 0.8.0 (2014-12-25) + +###### Breaking + +* Removes the `ShellCompletionHelper` along with completion script for ZSH. This is out of the scope of CLAide. + [Eloy DurÃĄn](https://github.com/alloy) + [#43](https://github.com/CocoaPods/CLAide/issues/43) + +* Various refactoring replacing “Helper” API’s which specialised classes such as ArgumentSuggester, TextWrapper and PluginManager. + [Eloy DurÃĄn](https://github.com/alloy) + +###### Enhancements + +* Added convenience method to invoke commands more easily. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/40) + +* Changes to the PluginManager to handle multiple plugin prefixes, which by default adds the `clad` plugin prefix. + [Eloy DurÃĄn](https://github.com/alloy) + +## 0.7.0 (2014-09-11) + +###### Breaking + +* Plugins are now expected to include the `cocoapods-plugin.rb` file in + `./lib`. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +###### Enhancements + +* Improved messages for exceptions generated by plugins. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#28](https://github.com/CocoaPods/CLAide/pull/28) + +* Use the Argument class to describe arguments. + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* Support for argument alternatives and repeatable arguments (ellipsis). + [Olivier Halligon](https://github.com/AliSoftware) + [#33](https://github.com/CocoaPods/CLAide/issues/33) + +* No stack trace if --help and --vebose are combined. + [Marius Rackwitz](https://github.com/mrackwitz) + [#36](https://github.com/CocoaPods/CLAide/issues/36) + + +## 0.6.1 (2014-05-20) + +###### Bug Fixes + +* Respect the ANSI flag for the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#34](https://github.com/CocoaPods/CLAide/issues/34) + +* Underline the colon of the titles of the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + +## 0.6.0 (2014-05-19) + +###### Enhancements + +* Use an array to describe arguments. + [Fabio Pelosin][fabiopelosin] + [#26](https://github.com/CocoaPods/CLAide/issues/26) + +* Improved layout and contents of help banner + [Fabio Pelosin](https://github.com/fabiopelosin) + [#25](https://github.com/CocoaPods/CLAide/pull/25) + +* Colorize option, arguments, and example commands in the help banner. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#12](https://github.com/CocoaPods/CLAide/issues/12) + +* Add support for ANSI escape sequences. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#17](https://github.com/CocoaPods/CLAide/issues/17) + [#20](https://github.com/CocoaPods/CLAide/pull/20) + [#24](https://github.com/CocoaPods/CLAide/pull/24) + +* Add support for completion script + [Fabio Pelosin](https://github.com/fabiopelosin) + [#19](https://github.com/CocoaPods/CLAide/pull/19) + +* Add support for version logic via the introduction of the `version` class + attribute to the `CLAide::Commmand` class. If a value for the attribute is + specified the `--version` flag is added. The `--version --verbose` flags + include the version of the plugins in the output. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#13](https://github.com/CocoaPods/CLAide/issues/13) + [#14](https://github.com/CocoaPods/CLAide/issues/14) + +## 0.5.0 (2014-03-26) + +###### Enhancements + +* Add a `ignore_in_command_lookup` option to commands, which makes it possible + to have anonymous command classes that are or only meant to provide common + functionality, but are otherwise completely ignored during parsing, command + lookup, and help banner printing. + [Eloy DurÃĄn](https://github.com/alloy) + +* Deprecate the `color` option in favor of `ansi`. This is more abstract and + can be used for commands that only prettify output by using, for instance, + the bold ANSI code. This applies to the `CLAide` APIs as well. + [Eloy DurÃĄn](https://github.com/alloy) + +* Add more hooks that allow the user to customize how to prettify output. + [Eloy DurÃĄn](https://github.com/alloy) + +* Word wrap option descriptions to terminal width. + [Eloy DurÃĄn](https://github.com/alloy) + [#6](https://github.com/CocoaPods/CLAide/issues/6) + + +## 0.4.0 (2013-11-14) + +###### Enhancements + +* Added support for plugins. + [Les Hill](https://github.com/leshill) + [#1](https://github.com/CocoaPods/CLAide/pull/1) diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Gemfile b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Gemfile new file mode 100644 index 0000000..c33e9bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Gemfile @@ -0,0 +1,22 @@ +source 'https://rubygems.org' + +gemspec + +gem 'rake' + +group :development do + gem 'kicker' + gem 'colored' # for examples +end + +group :spec do + gem 'bacon' + gem 'json', '< 3' + gem 'mocha-on-bacon' + gem 'prettybacon' + + gem 'parallel', '<= 1.19.2' + gem 'rubocop', '<= 0.81.0' + gem 'rubocop-performance', '<= 1.5.2', :require => nil + gem 'simplecov' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Gemfile.lock new file mode 100644 index 0000000..22fe1a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Gemfile.lock @@ -0,0 +1,79 @@ +PATH + remote: . + specs: + claide (1.1.0) + +GEM + remote: https://rubygems.org/ + specs: + ast (2.4.2) + bacon (1.2.0) + colored (1.2) + docile (1.1.5) + ffi (1.14.2) + jaro_winkler (1.5.4) + json (2.5.1) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + multi_json (1.10.1) + notify (0.5.2) + parallel (1.19.2) + parser (3.1.0.0) + ast (~> 2.4.1) + prettybacon (0.0.2) + bacon (~> 1.2) + rainbow (3.0.0) + rake (10.3.2) + rb-fsevent (0.9.4) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.3) + ffi (>= 0.5.0) + rexml (3.2.5) + rubocop (0.81.0) + jaro_winkler (~> 1.5.1) + parallel (~> 1.10) + parser (>= 2.7.0.1) + rainbow (>= 2.2.2, < 4.0) + rexml + ruby-progressbar (~> 1.7) + unicode-display_width (>= 1.4.0, < 2.0) + rubocop-performance (1.5.2) + rubocop (>= 0.71.0) + ruby-progressbar (1.11.0) + simplecov (0.9.1) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.8.0) + simplecov-html (0.8.0) + unicode-display_width (1.8.0) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + claide! + colored + json (< 3) + kicker + mocha-on-bacon + parallel (<= 1.19.2) + prettybacon + rake + rubocop (<= 0.81.0) + rubocop-performance (<= 1.5.2) + simplecov + +BUNDLED WITH + 2.3.4 diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/LICENSE b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy DurÃĄn +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/README.md b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/README.md new file mode 100644 index 0000000..f4ed63d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/README.md @@ -0,0 +1,115 @@ +# Hi, I’m Claide, your command-line tool aide. + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/claide/ci)](https://github.com/CocoaPods/claide/actions) +[![Gem Version](https://img.shields.io/gem/v/claide)](https://rubygems.org/gems/claide) + +I was born out of a need for a _simple_ option and command parser, while still +providing an API that allows you to quickly create a full featured command-line +interface. + +## Install + +``` +$ [sudo] gem install claide +``` + + +## Usage + +For full documentation, on the API of CLAide, visit [rubydoc.info][docs]. + + +### Argument handling + +At its core, a library, such as myself, needs to parse the parameters specified +by the user. + +Working with parameters is done through the `CLAide::ARGV` class. It takes an +array of parameters and parses them as either flags, options, or arguments. + +| Parameter | Description | +| :---: | :---: | +| `--milk`, `--no-milk` | A boolean ‘flag’, which may be negated. | +| `--sweetener=honey` | An ‘option’ consists of a key, a ‘=’, and a value. | +| `tea` | An ‘argument’ is just a value. | + + +Accessing flags, options, and arguments, with the following methods, will also +remove the parameter from the remaining unprocessed parameters. + +```ruby +argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) +argv.shift_argument # => 'tea' +argv.shift_argument # => nil +argv.flag?('milk') # => false +argv.flag?('milk') # => nil +argv.option('sweetener') # => 'honey' +argv.option('sweetener') # => nil +``` + + +In case the requested flag or option is not present, `nil` is returned. You can +specify a default value to be used as the optional second method parameter: + +```ruby +argv = CLAide::ARGV.new(['tea']) +argv.flag?('milk', true) # => true +argv.option('sweetener', 'sugar') # => 'sugar' +``` + + +Unlike flags and options, accessing all of the arguments can be done in either +a preserving or mutating way: + +```ruby +argv = CLAide::ARGV.new(['tea', 'coffee']) +argv.arguments # => ['tea', 'coffee'] +argv.arguments! # => ['tea', 'coffee'] +argv.arguments # => [] +``` + + +### Command handling + +Commands are actions that a tool can perform. Every command is represented by +its own command class. + +Commands may be nested, in which case they inherit from the ‘super command’ +class. Some of these nested commands may not actually perform any work +themselves, but are rather used as ‘super commands’ _only_, in which case they +are ‘abtract commands’. + +Running commands is typically done through the `CLAide::Command.run(argv)` +method, which performs the following three steps: + +1. Parses the given parameters, finds the command class matching the parameters, + and instantiates it with the remaining parameters. It’s each nested command + class’ responsibility to remove the parameters it handles from the remaining + parameters, _before_ calling the `super` implementation. + +2. Asks the command instance to validate its parameters, but only _after_ + calling the `super` implementation. The `super` implementation will show a + help banner in case the `--help` flag is specified, not all parameters were + removed from the parameter list, or the command is an abstract command. + +3. Calls the `run` method on the command instance, where it may do its work. + +4. Catches _any_ uncaught exception and shows it to user in a meaningful way. + * A `Help` exception triggers a help banner to be shown for the command. + * A exception that includes the `InformativeError` module will show _only_ + the message, unless disabled with the `--verbose` flag; and in red, + depending on the color configuration. + * Any other type of exception will be passed to `Command.report_error(error)` + for custom error reporting (such as the one in [CocoaPods][report-error]). + +In case you want to call commands from _inside_ other commands, you should use +the `CLAide::Command.parse(argv)` method to retrieve an instance of the command +and call `run` on it. Unless you are using user-supplied parameters, there +should not be a need to validate the parameters. + +See the [example][example] for a illustration of how to define commands. + + +[docs]: http://www.rubydoc.info/github/CocoaPods/CLAide/index +[example]: https://github.com/CocoaPods/CLAide/blob/master/examples/make.rb +[report-error]: https://github.com/CocoaPods/CocoaPods/blob/054fe5c861d932219ec40a91c0439a7cfc3a420c/lib/cocoapods/command.rb#L36 diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Rakefile b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Rakefile new file mode 100644 index 0000000..dc22070 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/Rakefile @@ -0,0 +1,57 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + require 'bundler/gem_tasks' + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Run specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + Rake::Task['rubocop'].invoke + end + + #-- Rubocop ----------------------------------------------------------------# + + desc 'Check code against RuboCop rules' + task :rubocop do + sh 'bundle exec rubocop' + end + +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/claide.gemspec b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/claide.gemspec new file mode 100644 index 0000000..1f298d3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/claide.gemspec @@ -0,0 +1,23 @@ +# -*- encoding: utf-8 -*- +$:.unshift File.expand_path('../lib', __FILE__) +require File.expand_path('../lib/claide/gem_version', __FILE__) + +Gem::Specification.new do |s| + s.name = "claide" + s.version = CLAide::VERSION + s.license = "MIT" + s.email = ["eloy.de.enige@gmail.com", "fabiopelosin@gmail.com"] + s.homepage = "https://github.com/CocoaPods/CLAide" + s.authors = ["Eloy Duran", "Fabio Pelosin"] + + s.summary = "A small command-line interface framework." + + s.files = `git ls-files -z`.split("\0").reject { |f| f =~ /\A(spec|examples)/i } + + ## Make sure you can build the gem on older versions of RubyGems too: + s.rubygems_version = "1.6.2" + s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= + s.specification_version = 3 if s.respond_to? :specification_version + + s.required_ruby_version = ">= 2.3.0" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide.rb new file mode 100644 index 0000000..25d2c75 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide.rb @@ -0,0 +1,13 @@ +# encoding: utf-8 + +# The mods of interest are {CLAide::ARGV}, {CLAide::Command}, and +# {CLAide::InformativeError} +# +module CLAide + require 'claide/ansi' + require 'claide/argument' + require 'claide/argv' + require 'claide/command' + require 'claide/help' + require 'claide/informative_error' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi.rb new file mode 100644 index 0000000..0839ed6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi.rb @@ -0,0 +1,126 @@ +# encoding: utf-8 + +require 'claide/ansi/cursor' +require 'claide/ansi/graphics' + +module CLAide + # Provides support for ANSI Escape sequences + # + # For more information see: + # + # - http://ascii-table.com/ansi-escape-sequences.php + # - http://en.wikipedia.org/wiki/ANSI_escape_code + # + # This functionality has been inspired and derived from the following gems: + # + # - colored + # - colorize + # + class ANSI + extend Cursor + extend Graphics + + class << self + # @return [Bool] Wether the string mixin should be disabled to return the + # original string. This method is intended to offer a central location + # where to disable ANSI logic without needed to implement conditionals + # across the code base of clients. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # ANSI.disabled = true + # "example".ansi.yellow #=> "example" + # + attr_accessor :disabled + end + + # @return [Hash{Symbol => Fixnum}] The text attributes codes by their + # English name. + # + TEXT_ATTRIBUTES = { + :bold => 1, + :underline => 4, + :blink => 5, + :reverse => 7, + :hidden => 8, + } + + # @return [Hash{Symbol => Fixnum}] The codes to disable a text attribute by + # their name. + # + TEXT_DISABLE_ATTRIBUTES = { + :bold => 21, + :underline => 24, + :blink => 25, + :reverse => 27, + :hidden => 28, + } + + # Return [String] The escape sequence to reset the graphics. + # + RESET_SEQUENCE = "\e[0m" + + # @return [Hash{Symbol => Fixnum}] The colors codes by their English name. + # + COLORS = { + :black => 0, + :red => 1, + :green => 2, + :yellow => 3, + :blue => 4, + :magenta => 5, + :cyan => 6, + :white => 7, + } + + # Return [String] The escape sequence for the default foreground color. + # + DEFAULT_FOREGROUND_COLOR = "\e[39m" + + # Return [String] The escape sequence for the default background color. + # + DEFAULT_BACKGROUND_COLOR = "\e[49m" + + # @return [Fixnum] The code of a key given the map. + # + # @param [Symbol] key + # The key for which the code is needed. + # + # @param [Hash{Symbol => Fixnum}] map + # A hash which associates each code to each key. + # + # @raise If the key is not provided. + # @raise If the key is not present in the map. + # + def self.code_for_key(key, map) + unless key + raise ArgumentError, 'A key must be provided' + end + code = map[key] + unless code + raise ArgumentError, "Unsupported key: `#{key}`" + end + code + end + end +end + +#-- String mixin -------------------------------------------------------------# + +require 'claide/ansi/string_escaper' + +class String + # @return [StringEscaper] An object which provides convenience methods to + # wrap the receiver in ANSI sequences. + # + # @example + # + # "example".ansi.yellow #=> "\e[33mexample\e[39m" + # "example".ansi.on_red #=> "\e[41mexample\e[49m" + # "example".ansi.bold #=> "\e[1mexample\e[21m" + # + def ansi + CLAide::ANSI::StringEscaper.new(self) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/cursor.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/cursor.rb new file mode 100644 index 0000000..acfd5b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/cursor.rb @@ -0,0 +1,69 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the position + # of the cursor and to erase parts of text. + # + module Cursor + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] line + # The line where to place the cursor. + # + # @param [Fixnum] column + # The column where to place the cursor. + # + def self.set_cursor_position(line = 0, column = 0) + "\e[#{line};#{column}H" + end + + # @return [String] The escape sequence to set the cursor at the + # given line. + # + # @param [Fixnum] lines + # The amount of lines the cursor should be moved to. + # Negative values indicate up direction and positive ones + # down direction. + # + # @param [Fixnum] columns + # The amount of columns the cursor should be moved to. + # Negative values indicate left direction and positive ones + # right direction. + # + def self.move_cursor(lines, columns = 0) + lines_code = lines < 0 ? 'A' : 'B' + columns_code = columns > 0 ? 'C' : 'D' + "\e[#{lines.abs}#{lines_code};#{columns.abs}#{columns_code}" + end + + # @return [String] The escape sequence to save the cursor position. + # + def self.save_cursor_position + "\e[s" + end + + # @return [String] The escape sequence to restore the cursor to the + # previously saved position. This sequence also clears all the + # output after the position. + # + def self.restore_cursor_position + "\e[u" + end + + # @return [String] The escape sequence to erase the display. + # + def self.erase_display + "\e[2J" + end + + # @return [String] The escape sequence to erase a line form the + # cursor position to then end. + # + def self.erase_line + "\e[K" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/graphics.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/graphics.rb new file mode 100644 index 0000000..e5c2d15 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/graphics.rb @@ -0,0 +1,72 @@ +# encoding: utf-8 + +module CLAide + class ANSI + # Provides support for generating escape sequences relative to the graphic + # mode. + # + module Graphics + # @return [String] The escape sequence for a text attribute. + # + # @param [Symbol] key + # The name of the text attribute. + # + def self.text_attribute(key) + code = ANSI.code_for_key(key, TEXT_ATTRIBUTES) + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color. + # + # @param [Symbol] key + # The name of the color. + # + def self.foreground_color(key) + code = ANSI.code_for_key(key, COLORS) + 30 + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color. + # + # @param [Symbol] key + # The name of the color. + # + def self.background_color(key) + code = ANSI.code_for_key(key, COLORS) + 40 + graphics_mode(code) + end + + # @return [String] The escape sequence for a foreground color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.foreground_color_256(color) + code = [38, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a background color using the + # xterm-256 format. + # + # @param [Fixnum] color + # The value of the color. + # + def self.background_color_256(color) + code = [48, 5, color] + graphics_mode(code) + end + + # @return [String] The escape sequence for a single or a list of codes. + # + # @param [Fixnum, Array] codes + # The code(s). + # + def self.graphics_mode(codes) + codes = Array(codes) + "\e[#{codes.join(';')}m" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/string_escaper.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/string_escaper.rb new file mode 100644 index 0000000..b6f461c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/ansi/string_escaper.rb @@ -0,0 +1,79 @@ +module CLAide + class ANSI + # Provides support to wrap strings in ANSI sequences according to the + # `ANSI.disabled` setting. + # + class StringEscaper < String + # @param [String] string The string to wrap. + # + def initialize(string) + super + end + + # @return [StringEscaper] Wraps a string in the given ANSI sequences, + # taking care of handling existing sequences for the same + # family of attributes (i.e. attributes terminated by the + # same sequence). + # + def wrap_in_ansi_sequence(open, close) + if ANSI.disabled + self + else + gsub!(close, open) + insert(0, open).insert(-1, close) + end + end + + # @return [StringEscaper] + # + # @param [Array] keys + # One or more keys corresponding to ANSI codes to apply to the + # string. + # + def apply(*keys) + keys.flatten.each do |key| + send(key) + end + self + end + + ANSI::COLORS.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each foreground color (e.g. #blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.foreground_color(key) + close = ANSI::DEFAULT_FOREGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each background color (e.g. #on_blue). + # + # The methods handle nesting of ANSI sequences. + # + define_method "on_#{key}" do + open = Graphics.background_color(key) + close = ANSI::DEFAULT_BACKGROUND_COLOR + wrap_in_ansi_sequence(open, close) + end + end + + ANSI::TEXT_ATTRIBUTES.each_key do |key| + # Defines a method returns a copy of the receiver wrapped in an ANSI + # sequence for each text attribute (e.g. #bold). + # + # The methods handle nesting of ANSI sequences. + # + define_method key do + open = Graphics.text_attribute(key) + close_code = TEXT_DISABLE_ATTRIBUTES[key] + close = Graphics.graphics_mode(close_code) + wrap_in_ansi_sequence(open, close) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/argument.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/argument.rb new file mode 100644 index 0000000..4d54f29 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/argument.rb @@ -0,0 +1,62 @@ +# encoding: utf-8 + +module CLAide + # This class is used to represent individual arguments to present to + # the command help banner + # + class Argument + # The string used for ellipsis / repeatable arguments in the banner + # + ELLIPSIS = '...' + + # @return [Array] + # List of alternate names for the parameters + attr_reader :names + + # @return [Boolean] + # Indicates if the argument is required (not optional) + # + attr_accessor :required + alias_method :required?, :required + + # @return [Boolean] + # Indicates if the argument is repeatable (= can appear multiple + # times in the command, which is indicated by '...' in the banner) + # + attr_accessor :repeatable + alias_method :repeatable?, :repeatable + + # @param [String,Array] names + # List of the names of each parameter alternatives. + # For convenience, if there is only one alternative for that + # parameter, we can use a String instead of a 1-item Array + # + # @param [Boolean] required + # true if the parameter is required, false if it is optional + # + # @param [Boolean] repeatable + # If true, the argument can appear multiple times in the command. + # In that case, an ellipsis will be appended after the argument + # in the help banner. + # + # @example + # + # # A required parameter that can be either a NAME or URL + # Argument.new(%(NAME URL), true) + # + def initialize(names, required, repeatable = false) + @names = Array(names) + @required = required + @repeatable = repeatable + end + + # @return [Boolean] true on equality + # + # @param [Argument] other the Argument compared against + # + def ==(other) + other.is_a?(Argument) && + names == other.names && required == other.required + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/argv.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/argv.rb new file mode 100644 index 0000000..ecadfaf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/argv.rb @@ -0,0 +1,329 @@ +# encoding: utf-8 + +module CLAide + # This class is responsible for parsing the parameters specified by the user, + # accessing individual parameters, and keep state by removing handled + # parameters. + # + class ARGV + # @return [ARGV] Coerces an object to the ARGV class if needed. + # + # @param [Object] argv + # The object which should be converted to the ARGV class. + # + def self.coerce(argv) + if argv.is_a?(ARGV) + argv + else + ARGV.new(argv) + end + end + + # @param [Array<#to_s>] argv + # A list of parameters. + # + def initialize(argv) + @entries = Parser.parse(argv) + end + + # @return [Boolean] Whether or not there are any remaining unhandled + # parameters. + # + def empty? + @entries.empty? + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format a user specifies it in. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder # => ['--no-milk', '--sweetener=honey'] + # + def remainder + @entries.map do |type, (key, value)| + case type + when :arg + key + when :flag + "--#{'no-' if value == false}#{key}" + when :option + "--#{key}=#{value}" + end + end + end + + # @return [Array] A list of the remaining unhandled parameters, in + # the same format the user specified them. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.shift_argument # => 'tea' + # argv.remainder! # => ['--no-milk', '--sweetener=honey'] + # argv.remainder # => [] + # + def remainder! + remainder.tap { @entries.clear } + end + + # @return [Hash] A hash that consists of the remaining flags and options + # and their values. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.options # => { 'milk' => false, 'sweetener' => 'honey' } + # + def options + options = {} + @entries.each do |type, (key, value)| + options[key] = value unless type == :arg + end + options + end + + # @return [Array] A list of the remaining arguments. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white', 'biscuit'] + # + def arguments + @entries.map { |type, value| value if type == :arg }.compact + end + + # @return [Array] A list of the remaining arguments. + # + # @note This version also removes the arguments from the remaining + # parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white', '--no-milk', 'biscuit']) + # argv.arguments # => ['tea', 'white', 'biscuit'] + # argv.arguments! # => ['tea', 'white', 'biscuit'] + # argv.arguments # => [] + # + def arguments! + arguments = [] + while arg = shift_argument + arguments << arg + end + arguments + end + + # @return [String] The first argument in the remaining parameters. + # + # @note This will remove the argument from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', 'white']) + # argv.shift_argument # => 'tea' + # argv.arguments # => ['white'] + # + def shift_argument + if index = @entries.find_index { |type, _| type == :arg } + entry = @entries[index] + @entries.delete_at(index) + entry.last + end + end + + # @return [Boolean, nil] Returns `true` if the flag by the specified `name` + # is among the remaining parameters and is not negated. + # + # @param [String] name + # The name of the flag to look for among the remaining parameters. + # + # @param [Boolean] default + # The value that is returned in case the flag is not among the + # remaining parameters. + # + # @note This will remove the flag from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.flag?('milk') # => false + # argv.flag?('milk') # => nil + # argv.flag?('milk', true) # => true + # argv.remainder # => ['tea', '--sweetener=honey'] + # + def flag?(name, default = nil) + delete_entry(:flag, name, default, true) + end + + # @return [String, nil] Returns the value of the option by the specified + # `name` is among the remaining parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @param [String] default + # The value that is returned in case the option is not among the + # remaining parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['tea', '--no-milk', '--sweetener=honey']) + # argv.option('sweetener') # => 'honey' + # argv.option('sweetener') # => nil + # argv.option('sweetener', 'sugar') # => 'sugar' + # argv.remainder # => ['tea', '--no-milk'] + # + def option(name, default = nil) + delete_entry(:option, name, default) + end + + # @return [Array] Returns an array of all the values of the option + # with the specified `name` among the remaining + # parameters. + # + # @param [String] name + # The name of the option to look for among the remaining + # parameters. + # + # @note This will remove the option from the remaining parameters. + # + # @example + # + # argv = CLAide::ARGV.new(['--ignore=foo', '--ignore=bar']) + # argv.all_options('include') # => [] + # argv.all_options('ignore') # => ['bar', 'foo'] + # argv.remainder # => [] + # + def all_options(name) + options = [] + while entry = option(name) + options << entry + end + options + end + + private + + # @return [Array>] A list of tuples for each + # non consumed parameter, where the first entry is the `type` and + # the second entry the actual parsed parameter. + # + attr_reader :entries + + # @return [Bool, String, Nil] Removes an entry from the entries list and + # returns its value or the default value if the entry was not + # present. + # + # @param [Symbol] requested_type + # The type of the entry. + # + # @param [String] requested_key + # The key of the entry. + # + # @param [Bool, String, Nil] default + # The value which should be returned if the entry is not present. + # + # @param [Bool] delete_all + # Whether all values matching `requested_type` and `requested_key` + # should be deleted. + # + def delete_entry(requested_type, requested_key, default, delete_all = false) + pred = proc do |type, (key, _value)| + requested_key == key && requested_type == type + end + entry = entries.reverse_each.find(&pred) + delete_all ? entries.delete_if(&pred) : entries.delete(entry) + + entry.nil? ? default : entry.last.last + end + + module Parser + # @return [Array>] A list of tuples for each + # parameter, where the first entry is the `type` and the second + # entry the actual parsed parameter. + # + # @example + # + # list = parse(['tea', '--no-milk', '--sweetener=honey']) + # list # => [[:arg, "tea"], + # [:flag, ["milk", false]], + # [:option, ["sweetener", "honey"]]] + # + def self.parse(argv) + entries = [] + copy = argv.map(&:to_s) + double_dash = false + while argument = copy.shift + next if !double_dash && double_dash = (argument == '--') + type = double_dash ? :arg : argument_type(argument) + parsed_argument = parse_argument(type, argument) + entries << [type, parsed_argument] + end + entries + end + + # @return [Symbol] Returns the type of an argument. The types can be + # either: `:arg`, `:flag`, `:option`. + # + # @param [String] argument + # The argument to check. + # + def self.argument_type(argument) + if argument.start_with?('--') + if argument.include?('=') + :option + else + :flag + end + else + :arg + end + end + + # @return [String, Array] Returns the argument itself for + # normal arguments (like commands) and a tuple with the key and + # the value for options and flags. + # + # @param [Symbol] type + # The type of the argument. + # + # @param [String] argument + # The argument to check. + # + def self.parse_argument(type, argument) + case type + when :arg + return argument + when :flag + return parse_flag(argument) + when :option + return argument[2..-1].split('=', 2) + end + end + + # @return [String, Array] Returns the parameter + # describing a flag arguments. + # + # @param [String] argument + # The flag argument to check. + # + def self.parse_flag(argument) + if argument.start_with?('--no-') + key = argument[5..-1] + value = false + else + key = argument[2..-1] + value = true + end + [key, value] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command.rb new file mode 100644 index 0000000..6414b5c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command.rb @@ -0,0 +1,669 @@ +# encoding: utf-8 + +require 'claide/command/banner' +require 'claide/command/plugin_manager' +require 'claide/command/argument_suggester' + +module CLAide + # This class is used to build a command-line interface + # + # Each command is represented by a subclass of this class, which may be + # nested to create more granular commands. + # + # Following is an overview of the types of commands and what they should do. + # + # ### Any command type + # + # * Inherit from the command class under which the command should be nested. + # * Set {Command.summary} to a brief description of the command. + # * Override {Command.options} to return the options it handles and their + # descriptions and prepending them to the results of calling `super`. + # * Override {Command#initialize} if it handles any parameters. + # * Override {Command#validate!} to check if the required parameters the + # command handles are valid, or call {Command#help!} in case they’re not. + # + # ### Abstract command + # + # The following is needed for an abstract command: + # + # * Set {Command.abstract_command} to `true`. + # * Subclass the command. + # + # When the optional {Command.description} is specified, it will be shown at + # the top of the command’s help banner. + # + # ### Normal command + # + # The following is needed for a normal command: + # + # * Set {Command.arguments} to the description of the arguments this command + # handles. + # * Override {Command#run} to perform the actual work. + # + # When the optional {Command.description} is specified, it will be shown + # underneath the usage section of the command’s help banner. Otherwise this + # defaults to {Command.summary}. + # + class Command + class << self + # @return [Boolean] Indicates whether or not this command can actually + # perform work of itself, or that it only contains subcommands. + # + attr_accessor :abstract_command + alias_method :abstract_command?, :abstract_command + + # @return [Boolean] Indicates whether or not this command is used during + # command parsing and whether or not it should be shown in the + # help banner or to show its subcommands instead. + # + # Setting this to `true` implies it’s an abstract command. + # + attr_reader :ignore_in_command_lookup + alias_method :ignore_in_command_lookup?, :ignore_in_command_lookup + def ignore_in_command_lookup=(flag) + @ignore_in_command_lookup = self.abstract_command = flag + end + + # @return [String] The subcommand which an abstract command should invoke + # by default. + # + attr_accessor :default_subcommand + + # @return [String] A brief description of the command, which is shown + # next to the command in the help banner of a parent command. + # + attr_accessor :summary + + # @return [String] A longer description of the command, which is shown + # underneath the usage section of the command’s help banner. Any + # indentation in this value will be ignored. + # + attr_accessor :description + + # @return [Array] The prefixes used to search for CLAide plugins. + # Plugins are loaded via their `_plugin.rb` file. + # Defaults to search for `claide` plugins. + # + def plugin_prefixes + @plugin_prefixes ||= ['claide'] + end + attr_writer :plugin_prefixes + + # @return [Array] + # A list of arguments the command handles. This is shown + # in the usage section of the command’s help banner. + # Each Argument in the array represents an argument by its name + # (or list of alternatives) and whether it's required or optional + # + def arguments + @arguments ||= [] + end + + # @param [Array] arguments + # An array listing the command arguments. + # Each Argument object describe the argument by its name + # (or list of alternatives) and whether it's required or optional + # + # @todo Remove deprecation + # + def arguments=(arguments) + if arguments.is_a?(Array) + if arguments.empty? || arguments[0].is_a?(Argument) + @arguments = arguments + else + self.arguments_array = arguments + end + else + self.arguments_string = arguments + end + end + + # @return [Boolean] The default value for {Command#ansi_output}. This + # defaults to `true` if `STDOUT` is connected to a TTY and + # `String` has the instance methods `#red`, `#green`, and + # `#yellow` (which are defined by, for instance, the + # [colored](https://github.com/defunkt/colored) gem). + # + def ansi_output + if @ansi_output.nil? + @ansi_output = STDOUT.tty? + end + @ansi_output + end + attr_writer :ansi_output + alias_method :ansi_output?, :ansi_output + + # @return [String] The name of the command. Defaults to a snake-cased + # version of the class’ name. + # + def command + @command ||= name.split('::').last.gsub(/[A-Z]+[a-z]*/) do |part| + part.downcase << '-' + end[0..-2] + end + attr_writer :command + + # @return [String] The version of the command. This value will be printed + # by the `--version` flag if used for the root command. + # + attr_accessor :version + end + + #-------------------------------------------------------------------------# + + # @return [String] The full command up-to this command, as it would be + # looked up during parsing. + # + # @note (see #ignore_in_command_lookup) + # + # @example + # + # BevarageMaker::Tea.full_command # => "beverage-maker tea" + # + def self.full_command + if superclass == Command + ignore_in_command_lookup? ? '' : command + else + if ignore_in_command_lookup? + superclass.full_command + else + "#{superclass.full_command} #{command}" + end + end + end + + # @return [Bool] Whether this is the root command class + # + def self.root_command? + superclass == CLAide::Command + end + + # @return [Array] A list of all command classes that are nested + # under this command. + # + def self.subcommands + @subcommands ||= [] + end + + # @return [Array] A list of command classes that are nested under + # this command _or_ the subcommands of those command classes in + # case the command class should be ignored in command lookup. + # + def self.subcommands_for_command_lookup + subcommands.map do |subcommand| + if subcommand.ignore_in_command_lookup? + subcommand.subcommands_for_command_lookup + else + subcommand + end + end.flatten + end + + # Searches the list of subcommands that should not be ignored for command + # lookup for a subcommand with the given `name`. + # + # @param [String] name + # The name of the subcommand to be found. + # + # @return [CLAide::Command, nil] The subcommand, if found. + # + def self.find_subcommand(name) + subcommands_for_command_lookup.find { |sc| sc.command == name } + end + + # @visibility private + # + # Automatically registers a subclass as a subcommand. + # + def self.inherited(subcommand) + subcommands << subcommand + end + + DEFAULT_ROOT_OPTIONS = [ + ['--version', 'Show the version of the tool'], + ] + + DEFAULT_OPTIONS = [ + ['--verbose', 'Show more debugging information'], + ['--no-ansi', 'Show output without ANSI codes'], + ['--help', 'Show help banner of specified command'], + ] + + # Should be overridden by a subclass if it handles any options. + # + # The subclass has to combine the result of calling `super` and its own + # list of options. The recommended way of doing this is by concatenating + # to this classes’ own options. + # + # @return [Array] + # + # A list of option name and description tuples. + # + # @example + # + # def self.options + # [ + # ['--verbose', 'Print more info'], + # ['--help', 'Print help banner'], + # ].concat(super) + # end + # + def self.options + if root_command? + DEFAULT_ROOT_OPTIONS + DEFAULT_OPTIONS + else + DEFAULT_OPTIONS + end + end + + # Adds a new option for the current command. + # + # This method can be used in conjunction with overriding `options`. + # + # @return [void] + # + # @example + # + # option '--help', 'Print help banner ' + # + def self.option(name, description) + mod = Module.new do + define_method(:options) do + [ + [name, description], + ].concat(super()) + end + end + extend(mod) + end + private_class_method :option + + # Handles root commands options if appropriate. + # + # @param [ARGV] argv + # The parameters of the command. + # + # @return [Bool] Whether any root command option was handled. + # + def handle_root_options(argv) + return false unless self.class.root_command? + if argv.flag?('version') + print_version + return true + end + false + end + + # Prints the version of the command optionally including plugins. + # + def print_version + puts self.class.version + if verbose? + PluginManager.specifications.each do |spec| + puts "#{spec.name}: #{spec.version}" + end + end + end + + # Instantiates the command class matching the parameters through + # {Command.parse}, validates it through {Command#validate!}, and runs it + # through {Command#run}. + # + # @note The ANSI support is configured before running a command to allow + # the same process to run multiple commands with different + # settings. For example a process with ANSI output enabled might + # want to programmatically invoke another command with the output + # enabled. + # + # @param [Array, ARGV] argv + # A list of parameters. For instance, the standard `ARGV` constant, + # which contains the parameters passed to the program. + # + # @return [void] + # + def self.run(argv = []) + plugin_prefixes.each do |plugin_prefix| + PluginManager.load_plugins(plugin_prefix) + end + + argv = ARGV.coerce(argv) + command = parse(argv) + ANSI.disabled = !command.ansi_output? + unless command.handle_root_options(argv) + command.validate! + command.run + end + rescue Object => exception + handle_exception(command, exception) + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] An instance of the command class that was matched by + # going through the arguments in the parameters and drilling down + # command classes. + # + def self.parse(argv) + argv = ARGV.coerce(argv) + cmd = argv.arguments.first + if cmd && subcommand = find_subcommand(cmd) + argv.shift_argument + subcommand.parse(argv) + elsif abstract_command? && default_subcommand + load_default_subcommand(argv) + else + new(argv) + end + end + + # @param [Array, ARGV] argv + # A list of (remaining) parameters. + # + # @return [Command] Returns the default subcommand initialized with the + # given arguments. + # + def self.load_default_subcommand(argv) + unless subcommand = find_subcommand(default_subcommand) + raise 'Unable to find the default subcommand ' \ + "`#{default_subcommand}` for command `#{self}`." + end + result = subcommand.parse(argv) + result.invoked_as_default = true + result + end + + # Presents an exception to the user in a short manner in case of an + # `InformativeError` or in long form in other cases, + # + # @param [Command, nil] command + # The command from where the exception originated. + # + # @param [Object] exception + # The exception to present. + # + # @return [void] + # + def self.handle_exception(command, exception) + if exception.is_a?(InformativeError) + puts exception.message + if command.nil? || command.verbose? + puts + puts(*exception.backtrace) + end + exit exception.exit_status + else + report_error(exception) + end + end + + # Allows the application to perform custom error reporting, by overriding + # this method. + # + # @param [Exception] exception + # + # An exception that occurred while running a command through + # {Command.run}. + # + # @raise + # + # By default re-raises the specified exception. + # + # @return [void] + # + def self.report_error(exception) + plugins = PluginManager.plugins_involved_in_exception(exception) + unless plugins.empty? + puts '[!] The exception involves the following plugins:' \ + "\n - #{plugins.join("\n - ")}\n".ansi.yellow + end + raise exception + end + + # @visibility private + # + # @param [String] error_message + # The error message to show to the user. + # + # @param [Class] help_class + # The class to use to raise a ‘help’ error. + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def self.help!(error_message = nil, help_class = Help) + raise help_class.new(banner, error_message) + end + + # @visibility private + # + # Returns the banner for the command. + # + # @param [Class] banner_class + # The class to use to format help banners. + # + # @return [String] The banner for the command. + # + def self.banner(banner_class = Banner) + banner_class.new(self).formatted_banner + end + + # @visibility private + # + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def self.banner! + puts banner + exit 0 + end + + #-------------------------------------------------------------------------# + + # Set to `true` if the user specifies the `--verbose` option. + # + # @note + # + # If you want to make use of this value for your own configuration, you + # should check the value _after_ calling the `super` {Command#initialize} + # implementation. + # + # @return [Boolean] + # + # Wether or not backtraces should be included when presenting the user an + # exception that includes the {InformativeError} module. + # + attr_accessor :verbose + alias_method :verbose?, :verbose + + # Set to `true` if {Command.ansi_output} returns `true` and the user + # did **not** specify the `--no-ansi` option. + # + # @note (see #verbose) + # + # @return [Boolean] + # + # Whether or not to use ANSI codes to prettify output. For instance, by + # default {InformativeError} exception messages will be colored red and + # subcommands in help banners green. + # + attr_accessor :ansi_output + alias_method :ansi_output?, :ansi_output + + # Set to `true` if initialized with a `--help` flag + # + # @return [Boolean] + # + # Whether the command was initialized with argv containing --help + # + attr_accessor :help_arg + alias_method :help?, :help_arg + + # Subclasses should override this method to remove the arguments/options + # they support from `argv` _before_ calling `super`. + # + # The `super` implementation sets the {#verbose} attribute based on whether + # or not the `--verbose` option is specified; and the {#ansi_output} + # attribute to `false` if {Command.ansi_output} returns `true`, but the + # user specified the `--no-ansi` option. + # + # @param [ARGV, Array] argv + # + # A list of (user-supplied) params that should be handled. + # + def initialize(argv) + argv = ARGV.coerce(argv) + @verbose = argv.flag?('verbose') + @ansi_output = argv.flag?('ansi', Command.ansi_output?) + @argv = argv + @help_arg = argv.flag?('help') + end + + # Convenience method. + # Instantiate the command and run it with the provided arguments at once. + # + # @note This method validate! the command before running it, but contrary to + # CLAide::Command::run, it does not load plugins nor exit on failure. + # It is up to the caller to rescue any possible exception raised. + # + # @param [String..., Array] args + # The arguments to initialize the command with + # + # @raise [Help] If validate! fails + # + def self.invoke(*args) + command = new(ARGV.new(args.flatten)) + command.validate! + command.run + end + + # @return [Bool] Whether the command was invoked by an abstract command by + # default. + # + attr_accessor :invoked_as_default + alias_method :invoked_as_default?, :invoked_as_default + + # Raises a Help exception if the `--help` option is specified, if `argv` + # still contains remaining arguments/options by the time it reaches this + # implementation, or when called on an ‘abstract command’. + # + # Subclasses should call `super` _before_ doing their own validation. This + # way when the user specifies the `--help` flag a help banner is shown, + # instead of possible actual validation errors. + # + # @raise [Help] + # + # @return [void] + # + def validate! + banner! if help? + unless @argv.empty? + argument = @argv.remainder.first + help! ArgumentSuggester.new(argument, self.class).suggestion + end + help! if self.class.abstract_command? + end + + # This method should be overridden by the command class to perform its + # work. + # + # @return [void] + # + def run + raise 'A subclass should override the `CLAide::Command#run` method to ' \ + 'actually perform some work.' + end + + protected + + # Returns the class of the invoked command + # + # @return [Command] + # + def invoked_command_class + if invoked_as_default? + self.class.superclass + else + self.class + end + end + + # @param [String] error_message + # A custom optional error message + # + # @raise [Help] + # + # Signals CLAide that a help banner for this command should be shown, + # with an optional error message. + # + # @return [void] + # + def help!(error_message = nil) + invoked_command_class.help!(error_message) + end + + # Print banner and exit + # + # @note Calling this method exits the current process. + # + # @return [void] + # + def banner! + invoked_command_class.banner! + end + + #-------------------------------------------------------------------------# + + # Handle deprecated form of self.arguments as an + # Array> like in: + # + # self.arguments = [ ['NAME', :required], ['QUERY', :optional] ] + # + # @todo Remove deprecated format support + # + def self.arguments_array=(arguments) + warn '[!] The signature of CLAide#arguments has changed. ' \ + "Use CLAide::Argument (#{self}: `#{arguments}`)".ansi.yellow + @arguments = arguments.map do |(name_str, type)| + names = name_str.split('|') + required = (type == :required) + Argument.new(names, required) + end + end + + # Handle deprecated form of self.arguments as a String, like in: + # + # self.arguments = 'NAME [QUERY]' + # + # @todo Remove deprecated format support + # + def self.arguments_string=(arguments) + warn '[!] The specification of arguments as a string has been' \ + " deprecated #{self}: `#{arguments}`".ansi.yellow + @arguments = arguments.split(' ').map do |argument| + if argument.start_with?('[') + Argument.new(argument.sub(/\[(.*)\]/, '\1').split('|'), false) + else + Argument.new(argument.split('|'), true) + end + end + end + + # Handle depracted form of assigning a plugin prefix. + # + # @todo Remove deprecated form. + # + def self.plugin_prefix=(prefix) + warn '[!] The specification of a singular plugin prefix has been ' \ + "deprecated. Use `#{self}::plugin_prefixes` instead." + plugin_prefixes << prefix + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/argument_suggester.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/argument_suggester.rb new file mode 100644 index 0000000..a13575c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/argument_suggester.rb @@ -0,0 +1,99 @@ +# encoding: utf-8 + +module CLAide + class Command + class ArgumentSuggester + # @param [String] argument + # The unrecognized argument for which to make a suggestion. + # + # @param [Class] command_class + # The class of the command which encountered the unrecognized + # arguments. + # + def initialize(argument, command_class) + @argument, @command_class = argument, command_class + @argument_type = ARGV::Parser.argument_type(@argument) + end + + # @return [Array] The list of the valid arguments for a command + # according to the type of the argument. + # + def possibilities + case @argument_type + when :option, :flag + @command_class.options.map(&:first) + when :arg + @command_class.subcommands_for_command_lookup.map(&:command) + end + end + + # @return [String] Returns a suggested argument from `possibilities` based + # on the `levenshtein_distance` score. + # + def suggested_argument + possibilities.sort_by do |element| + self.class.levenshtein_distance(@argument, element) + end.first + end + + # @return [String] Returns a message including a suggestion for the given + # suggestion. + # + def suggestion + argument_description = @argument_type == :arg ? 'command' : 'option' + if suggestion = suggested_argument + pretty_suggestion = self.class.prettify_suggestion(suggestion, + @argument_type) + "Unknown #{argument_description}: `#{@argument}`\n" \ + "Did you mean: #{pretty_suggestion}?" + else + "Unknown #{argument_description}: `#{@argument}`" + end + end + + # Prettifies the given validation suggestion according to the type. + # + # @param [String] suggestion + # The suggestion to prettify. + # + # @param [Type] argument_type + # The type of the suggestion: either `:command` or `:option`. + # + # @return [String] A handsome suggestion. + # + def self.prettify_suggestion(suggestion, argument_type) + case argument_type + when :option, :flag + suggestion = suggestion.to_s + suggestion.ansi.blue + when :arg + suggestion.ansi.green + end + end + + # Returns the Levenshtein distance between the given strings. + # From: http://rosettacode.org/wiki/Levenshtein_distance#Ruby + # + # @param [String] a + # The first string to compare. + # + # @param [String] b + # The second string to compare. + # + # @return [Fixnum] The distance between the strings. + def self.levenshtein_distance(a, b) + a, b = a.downcase, b.downcase + costs = Array(0..b.length) + (1..a.length).each do |i| + costs[0], nw = i, i - 1 + (1..b.length).each do |j| + costs[j], nw = [ + costs[j] + 1, costs[j - 1] + 1, a[i - 1] == b[j - 1] ? nw : nw + 1 + ].min, costs[j] + end + end + costs[b.length] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/banner.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/banner.rb new file mode 100644 index 0000000..d87c699 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/banner.rb @@ -0,0 +1,307 @@ +# encoding: utf-8 + +module CLAide + class Command + # Creates the formatted banner to present as help of the provided command + # class. + # + class Banner + # @return [Class] The command for which the banner should be created. + # + attr_accessor :command + + # @param [Class] command @see command + # + def initialize(command) + @command = command + end + + # @return [String] The banner for the command. + # + def formatted_banner + sections = [ + ['Usage', formatted_usage_description], + ['Commands', formatted_subcommand_summaries], + ['Options', formatted_options_description], + ] + banner = sections.map do |(title, body)| + [prettify_title("#{title}:"), body] unless body.empty? + end.compact.join("\n\n") + banner + end + + private + + # @!group Banner sections + #-----------------------------------------------------------------------# + + # @return [String] The indentation of the text. + # + TEXT_INDENT = 6 + + # @return [Fixnum] The maximum width of the text. + # + MAX_WIDTH = TEXT_INDENT + 80 + + # @return [Fixnum] The minimum between a name and its description. + # + DESCRIPTION_SPACES = 3 + + # @return [Fixnum] The minimum between a name and its description. + # + SUBCOMMAND_BULLET_SIZE = 2 + + # @return [String] The section describing the usage of the command. + # + def formatted_usage_description + message = command.description || command.summary || '' + message = TextWrapper.wrap_formatted_text(message, + TEXT_INDENT, + MAX_WIDTH) + message = prettify_message(command, message) + "#{signature}\n\n#{message}" + end + + # @return [String] The signature of the command. + # + def signature + full_command = command.full_command + sub_command = signature_sub_command + arguments = signature_arguments + result = prettify_signature(full_command, sub_command, arguments) + result.insert(0, '$ ') + result.insert(0, ' ' * (TEXT_INDENT - '$ '.size)) + end + + # @return [String] The subcommand indicator of the signature. + # + def signature_sub_command + return '[COMMAND]' if command.default_subcommand + return 'COMMAND' if command.subcommands.any? + end + + # @return [String] The arguments of the signature. + # + def signature_arguments + command.arguments.map do |arg| + names = arg.names.join('|') + names.concat(' ' + Argument::ELLIPSIS) if arg.repeatable? + arg.required? ? names : "[#{names}]" + end.join(' ') + end + + # @return [String] The section describing the subcommands of the command. + # + # @note The plus sign emphasizes the that the subcommands are added to + # the command. The square brackets conveys a sense of direction + # and indicates the gravitational force towards the default + # command. + # + def formatted_subcommand_summaries + subcommands = subcommands_for_banner + subcommands.map do |subcommand| + name = subcommand.command + bullet = (name == command.default_subcommand) ? '>' : '+' + name = "#{bullet} #{name}" + pretty_name = prettify_subcommand(name) + entry_description(pretty_name, subcommand.summary, name.size) + end.join("\n") + end + + # @return [String] The section describing the options of the command. + # + def formatted_options_description + options = command.options + options.map do |name, description| + pretty_name = prettify_option_name(name) + entry_description(pretty_name, description, name.size) + end.join("\n") + end + + # @return [String] The line describing a single entry (subcommand or + # option). + # + def entry_description(name, description, name_width) + max_name_width = compute_max_name_width + desc_start = max_name_width + (TEXT_INDENT - 2) + DESCRIPTION_SPACES + result = ' ' * (TEXT_INDENT - 2) + result << name + result << ' ' * DESCRIPTION_SPACES + result << ' ' * (max_name_width - name_width) + result << TextWrapper.wrap_with_indent(description, + desc_start, + MAX_WIDTH) + end + + # @!group Overrides + #-----------------------------------------------------------------------# + + # @return [String] A decorated title. + # + def prettify_title(title) + title.ansi.underline + end + + # @return [String] A decorated textual representation of the subcommand + # name. + # + def prettify_subcommand(name) + name.chomp.ansi.green + end + + # @return [String] A decorated textual representation of the option name. + # + # + def prettify_option_name(name) + name.chomp.ansi.blue + end + + # @return [String] A decorated textual representation of the command. + # + def prettify_signature(command, subcommand, argument) + components = [ + [command, :green], + [subcommand, :green], + [argument, :magenta], + ] + components.reduce('') do |memo, (string, ansi_key)| + next memo if !string || string.empty? + memo << ' ' << string.ansi.apply(ansi_key) + end.lstrip + end + + # @return [String] A decorated command description. + # + def prettify_message(command, message) + message = message.dup + command.arguments.each do |arg| + arg.names.each do |name| + message.gsub!("`#{name.gsub(/\.{3}$/, '')}`", '\0'.ansi.magenta) + end + end + command.options.each do |(name, _description)| + message.gsub!("`#{name}`", '\0'.ansi.blue) + end + message + end + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # @return [Array] The list of the subcommands to use in the + # banner. + # + def subcommands_for_banner + command.subcommands_for_command_lookup.reject do |subcommand| + subcommand.summary.nil? + end.sort_by(&:command) + end + + # @return [Fixnum] The width of the largest command name or of the + # largest option name. Used to align all the descriptions. + # + def compute_max_name_width + widths = [] + widths << command.options.map { |option| option.first.size } + widths << subcommands_for_banner.map do |cmd| + cmd.command.size + SUBCOMMAND_BULLET_SIZE + end.max + widths.flatten.compact.max || 1 + end + + module TextWrapper + # @return [String] Wraps a formatted string (e.g. markdown) by stripping + # heredoc indentation and wrapping by word to the terminal width + # taking into account a maximum one, and indenting the string. + # Code lines (i.e. indented by four spaces) are not wrapped. + # + # @param [String] string + # The string to format. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_formatted_text(string, indent = 0, max_width = 80) + paragraphs = strip_heredoc(string).split("\n\n") + paragraphs = paragraphs.map do |paragraph| + if paragraph.start_with?(' ' * 4) + paragraph.gsub!(/\n/, "\n#{' ' * indent}") + else + paragraph = wrap_with_indent(paragraph, indent, max_width) + end + paragraph.insert(0, ' ' * indent).rstrip + end + paragraphs.join("\n\n") + end + + # @return [String] Wraps a string to the terminal width taking into + # account the given indentation. + # + # @param [String] string + # The string to indent. + # + # @param [Fixnum] indent + # The number of spaces to insert before the string. + # + # @param [Fixnum] max_width + # The maximum width to use to format the string if the terminal + # is too wide. + # + def self.wrap_with_indent(string, indent = 0, max_width = 80) + if terminal_width == 0 + width = max_width + else + width = [terminal_width, max_width].min + end + + full_line = string.gsub("\n", ' ') + available_width = width - indent + space = ' ' * indent + word_wrap(full_line, available_width).split("\n").join("\n#{space}") + end + + # @return [String] Lifted straight from ActionView. Thanks guys! + # + def self.word_wrap(line, line_width) + line.gsub(/(.{1,#{line_width}})(\s+|$)/, "\\1\n").strip + end + + # @return [String] Lifted straight from ActiveSupport. Thanks guys! + # + def self.strip_heredoc(string) + if min = string.scan(/^[ \t]*(?=\S)/).min + string.gsub(/^[ \t]{#{min.size}}/, '') + else + string + end + end + + # @!group Private helpers + #---------------------------------------------------------------------# + + # @return [Fixnum] The width of the current terminal unless being piped. + # + def self.terminal_width + @terminal_width ||= + (!ENV['CLAIDE_DISABLE_AUTO_WRAP'] && + STDOUT.tty? && + calculate_terminal_width) || 0 + end + + def self.calculate_terminal_width + require 'io/console' + STDOUT.winsize.last + rescue LoadError + (system('which tput > /dev/null 2>&1') && `tput cols`.to_i) || 0 + rescue + 0 + end + private_class_method :calculate_terminal_width + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/plugin_manager.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/plugin_manager.rb new file mode 100644 index 0000000..184a5ea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/command/plugin_manager.rb @@ -0,0 +1,124 @@ +# encoding: utf-8 + +module CLAide + class Command + # Handles plugin related logic logic for the `Command` class. + # + # Plugins are loaded the first time a command run and are identified by the + # prefix specified in the command class. Plugins must adopt the following + # conventions: + # + # - Support being loaded by a file located under the + # `lib/#{plugin_prefix}_plugin` relative path. + # - Be stored in a folder named after the plugin. + # + class PluginManager + # @return [Hash] The loaded plugins, + # grouped by plugin prefix. + # + def self.loaded_plugins + @loaded_plugins ||= {} + end + + # @return [Array] Loads plugins via RubyGems looking + # for files named after the `PLUGIN_PREFIX_plugin` and returns the + # specifications of the gems loaded successfully. + # Plugins are required safely. + # + def self.load_plugins(plugin_prefix) + loaded_plugins[plugin_prefix] ||= + plugin_gems_for_prefix(plugin_prefix).map do |spec, paths| + spec if safe_require(paths) + end.compact + end + + # @return [Array] The RubyGems specifications for the + # loaded plugins. + # + def self.specifications + loaded_plugins.values.flatten.uniq + end + + # @return [Array] The RubyGems specifications for the + # installed plugins that match the given `plugin_prefix`. + # + def self.installed_specifications_for_prefix(plugin_prefix) + loaded_plugins[plugin_prefix] || + plugin_gems_for_prefix(plugin_prefix).map(&:first) + end + + # @return [Array] The list of the plugins whose root path appears + # in the backtrace of an exception. + # + # @param [Exception] exception + # The exception to analyze. + # + def self.plugins_involved_in_exception(exception) + specifications.select do |gemspec| + exception.backtrace.any? do |line| + full_require_paths_for(gemspec).any? do |plugin_path| + line.include?(plugin_path) + end + end + end.map(&:name) + end + + # @group Helper Methods + + # @return [Array<[Gem::Specification, Array]>] + # Returns an array of tuples containing the specifications and + # plugin files to require for a given plugin prefix. + # + def self.plugin_gems_for_prefix(prefix) + glob = "#{prefix}_plugin#{Gem.suffix_pattern}" + Gem::Specification.latest_specs(true).map do |spec| + matches = spec.matches_for_glob(glob) + [spec, matches] unless matches.empty? + end.compact + end + + # Requires the given paths. + # If any exception occurs it is caught and an + # informative message is printed. + # + # @param [String] paths + # The paths to require. + # + # @return [Bool] Whether requiring succeeded. + # + def self.safe_require(paths) + paths.each do |path| + begin + require(path) + rescue Exception => exception # rubocop:disable RescueException + message = "\n---------------------------------------------" + message << "\nError loading plugin file `#{path}`.\n" + message << "\n#{exception.class} - #{exception.message}" + message << "\n#{exception.backtrace.join("\n")}" + message << "\n---------------------------------------------\n" + warn message.ansi.yellow + return false + end + end + + true + end + + def self.full_require_paths_for(gemspec) + if gemspec.respond_to?(:full_require_paths) + return gemspec.full_require_paths + end + + # RubyGems < 2.2 + gemspec.require_paths.map do |require_path| + if require_path.include?(gemspec.full_gem_path) + require_path + else + File.join(gemspec.full_gem_path, require_path) + end + end + end + private_class_method :full_require_paths_for + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/gem_version.rb new file mode 100644 index 0000000..cdb020c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/gem_version.rb @@ -0,0 +1,7 @@ +module CLAide + # @return [String] + # + # CLAide’s version, following [semver](http://semver.org). + # + VERSION = '1.1.0'.freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/help.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/help.rb new file mode 100644 index 0000000..7d3b183 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/help.rb @@ -0,0 +1,58 @@ +# encoding: utf-8 + +module CLAide + require 'claide/informative_error' + + # The exception class that is raised to indicate a help banner should be + # shown while running {Command.run}. + # + class Help < StandardError + include InformativeError + + # @return [String] The banner containing the usage instructions of the + # command to show in the help. + # + attr_reader :banner + + # @return [String] An optional error message that will be shown before the + # help banner. + # + attr_reader :error_message + + # @param [String] banner @see banner + # @param [String] error_message @see error_message + # + # @note If an error message is provided, the exit status, used to + # terminate the program with, will be set to `1`, otherwise a {Help} + # exception is treated as not being a real error and exits with `0`. + # + def initialize(banner, error_message = nil) + @banner = banner + @error_message = error_message + @exit_status = @error_message.nil? ? 0 : 1 + end + + # @return [String] The optional error message, colored in red if + # {Command.ansi_output} is set to `true`. + # + def formatted_error_message + if error_message + message = "[!] #{error_message}" + prettify_error_message(message) + end + end + + # @return [String] + # + def prettify_error_message(message) + message.ansi.red + end + + # @return [String] The optional error message, combined with the help + # banner of the command. + # + def message + [formatted_error_message, banner].compact.join("\n\n") + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/informative_error.rb b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/informative_error.rb new file mode 100644 index 0000000..8a92bd0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/claide-1.1.0/lib/claide/informative_error.rb @@ -0,0 +1,21 @@ +# encoding: utf-8 + +module CLAide + # Including this module into an exception class will ensure that when raised, + # while running {Command.run}, only the message of the exception will be + # shown to the user. Unless disabled with the `--verbose` flag. + # + # In addition, the message will be colored red, if {Command.ansi_output} + # is set to `true`. + # + module InformativeError + # @return [Numeric] The exist status code that should be used to terminate + # the program with. Defaults to `1`. + # + attr_writer :exit_status + + def exit_status + @exit_status ||= 1 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/CHANGELOG.md new file mode 100644 index 0000000..9e2f258 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/CHANGELOG.md @@ -0,0 +1,7920 @@ +# Installation & Update + +To install or update CocoaPods see this [guide](https://guides.cocoapods.org/using/index.html). + +To install release candidates run `[sudo] gem install cocoapods --pre` + +## 1.12.1 (2023-04-18) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Xcode 14.3 fix: Pass the -f option when resolving the path to the symlinked source. + [Chris Vasselli](https://github.com/chrisvasselli) + [#11828](https://github.com/CocoaPods/CocoaPods/pull/11828) + [#11808](https://github.com/CocoaPods/CocoaPods/issues/11808) + +* Fix typo in validation for `--validation-dir` help message + [Austin Evans](https://github.com/ajevans99) + [#11857](https://github.com/CocoaPods/CocoaPods/issues/11857) + +# Xcode 14.3 fix: `pod lib lint` warning generation from main.m. + [Paul Beusterien](https://github.com/paulb777) + [#11846](https://github.com/CocoaPods/CocoaPods/issuess/11846) + +## 1.12.0 (2023-02-27) + +##### Enhancements + +* Add ability to specify the `validation-dir` during `lint`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11773](https://github.com/CocoaPods/CocoaPods/pull/11773) + +* Correctly handle `.docc` documentation in source_files. + [haifengkao](https://github.com/haifengkao) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11438](https://github.com/CocoaPods/CocoaPods/pull/11438) + [#10885](https://github.com/CocoaPods/CocoaPods/issues/10885) + +* Re-use the same path lists for pods that share the same root. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11417](https://github.com/CocoaPods/CocoaPods/pull/11417) + +* Integrate `parallelizable` scheme DSL option. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11399](https://github.com/CocoaPods/CocoaPods/pull/11399) + +* Use `${DEVELOPMENT_LANGUAGE}` as the default `CFBundleDevelopmentRegion` value in any generated `Info.plist`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10950](https://github.com/CocoaPods/CocoaPods/pull/10950) + +* Fix setting `LD_RUNTIME_SEARCH_PATHS` for aggregate targets that include dynamic xcframeworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11158](https://github.com/CocoaPods/CocoaPods/pull/11158) + +* Add method for formatting licenses for acknowledgements generation. + [Raihaan Shouhell](https://github.com/res0nance) + [#10940](https://github.com/CocoaPods/CocoaPods/pull/10940) + +* Add the ability to download pods in parallel + [Seth Friedman](https://github.com/sethfri) + [#11232](https://github.com/CocoaPods/CocoaPods/pull/11232) + +* Include subprojects in the plugin post-install hook context + [Eric Amorde](https://github.com/amorde) + [#11224](https://github.com/CocoaPods/CocoaPods/pull/11224) + +* Ensure the order of slices passed to the `install_xcframework` script (in the "Copy XCFrameworks" script build phase) is stable. + [Olivier Halligon](https://github.com/AliSoftware) + [#11707](https://github.com/CocoaPods/CocoaPods/pull/11707) + +##### Bug Fixes + +* Fix incremental installation when a development pod is deleted. + [John Szumski](https://github.com/jszumski) + [#11438](https://github.com/CocoaPods/CocoaPods/pull/11681) + +* Clean sandbox when a pod switches from remote to local. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11213](https://github.com/CocoaPods/CocoaPods/pull/11213) + +* Run post install hooks when "skip Pods.xcodeproj generation" option is set + [Elton Gao](https://github.com/gyfelton) + [#11073](https://github.com/CocoaPods/CocoaPods/pull/11073) + +* Change minimal required version of ruby-macho to 2.3.0. + [xuzhongping](https://github.com/xuzhongping) + [#10390](https://github.com/CocoaPods/CocoaPods/issues/10390) + +* Add .gitignores to the banana and snake fixtures + [Seth Friedman](https://github.com/sethfri) + [#11235](https://github.com/CocoaPods/CocoaPods/pull/11235) + +* Fix publishing podspecs with watchOS support on Xcode 14 + [Justin Martin](https://github.com/justinseanmartin) + [#11660](https://github.com/CocoaPods/CocoaPods/pull/11660) + +## 1.11.3 (2022-03-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix script breaking when attempting to print a warning. + [Igor Makarov](https://github.com/igor-makarov) + [#11251](https://github.com/CocoaPods/CocoaPods/issues/11251) + +* Do not consider podspec_repo when analying sandbox for changes. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10985](https://github.com/CocoaPods/CocoaPods/pull/10985) + +* Rewrite XCFramework slice selection using plist metadata. + [Igor Makarov](https://github.com/igor-makarov) + [#11229](https://github.com/CocoaPods/CocoaPods/pull/11229) + +* Fix setting `LD_RUNTIME_SEARCH_PATHS` for aggregate targets that include dynamic xcframeworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#11158](https://github.com/CocoaPods/CocoaPods/pull/11158) + +* Add catch for YAML syntax error to prevent crash in `cdn_url?` check. + [Kanstantsin Shautsou](https://github.com/KostyaSha) + [#11010](https://github.com/CocoaPods/CocoaPods/issues/11010) + +* Fix static Swift XCFramework import paths. + [Igor Makarov](https://github.com/igor-makarov) + [#11058](https://github.com/CocoaPods/CocoaPods/issues/10058) + [#11093](https://github.com/CocoaPods/CocoaPods/pull/11093) + +## 1.11.2 (2021-09-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not validate modular header dependencies for pre-built Swift pods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10912](https://github.com/CocoaPods/CocoaPods/issues/10912) + + +## 1.11.1 (2021-09-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle spec repo urls with user info when determining if they are CDN. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10941](https://github.com/CocoaPods/CocoaPods/issues/10941) + +* Set `INFOPLIST_FILE` build setting to `$(SRCROOT)/App/App-Info.plist` during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10927](https://github.com/CocoaPods/CocoaPods/issues/10927) + +* Set `PRODUCT_BUNDLE_IDENTIFIER` for generated app during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10933](https://github.com/CocoaPods/CocoaPods/issues/10933) + + +## 1.11.0 (2021-09-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.11.0.rc.1 (2021-08-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Correctly process multiple `xcframeworks` a pod provides. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10378](https://github.com/CocoaPods/CocoaPods/issues/10378) + + +## 1.11.0.beta.2 (2021-08-11) + +##### Enhancements + +* Integrate ODR categories into projects. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10855](https://github.com/CocoaPods/CocoaPods/pull/10855) + +##### Bug Fixes + +* Pass correct paths for `select_slice` method. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10430](https://github.com/CocoaPods/CocoaPods/issues/10430) + + +## 1.11.0.beta.1 (2021-08-09) + +##### Enhancements + +* Add support for integrating on demand resources. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [JunyiXie](https://github.com/JunyiXie) + [#9606](https://github.com/CocoaPods/CocoaPods/issues/9606) + [#10845](https://github.com/CocoaPods/CocoaPods/pull/10845) + +* Integrate `project_header_files` specified by specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9820](https://github.com/CocoaPods/CocoaPods/issues/9820) + +* Mark RealityComposer-projects (`.rcproject`) files defined in resources for compilation. + [Hendrik von Prince](https://github.com/parallaxe) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10793](https://github.com/CocoaPods/CocoaPods/pull/10793) + +* Integrate test specs and app specs of pre-built pods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10795](https://github.com/CocoaPods/CocoaPods/pull/10795) + +* Add support for `before_headers` and `after_headers` script phase DSL. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10770](https://github.com/CocoaPods/CocoaPods/issues/10770) + +* Fix touch on a missing directory for dSYM copy phase script. + [alvarollmenezes](https://github.com/alvarollmenezes) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10488](https://github.com/CocoaPods/CocoaPods/issues/10488) + +* Check the podfile sources and plugin sources when printing warnings without explicitly using the master source. + [gonghonglou](https://github.com/gonghonglou) + [#10764](https://github.com/CocoaPods/CocoaPods/pull/10764) + +* Use relative paths in copy dsyms script. + [Mickey Knox](https://github.com/knox) + [#10583](https://github.com/CocoaPods/CocoaPods/pull/10583) + +* Use `OpenURI.open_uri` instead. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10597](https://github.com/CocoaPods/CocoaPods/issues/10597) + +* Set minimum supported Ruby version to 2.6. + [Igor Makarov](https://github.com/igor-makarov) + [#10412](https://github.com/CocoaPods/CocoaPods/pull/10412) + +* Improve compatibility with ActiveSupport 6 + [Jun Jiang](https://github.com/jasl) + [#10364](https://github.com/CocoaPods/CocoaPods/pull/10364) + +* Add a `pre_integrate_hook` API + [dcvz](https://github.com/dcvz) + [#9935](https://github.com/CocoaPods/CocoaPods/pull/9935) + +* Rewrite the only place dependent on `typhoeus`. + [Jun Jiang](https://github.com/jasl), [Igor Makarov](https://github.com/igor-makarov) + [#10346](https://github.com/CocoaPods/CocoaPods/pull/10346) + +* Add a `--update-sources` option to `pod repo push` so one can ensure sources are up-to-date. + [Elton Gao](https://github.com/gyfelton) + [Justin Martin](https://github.com/justinseanmartin) + +* Installing a local (`:path`) pod that defines script phases will no longer + produce warnings. + [Samuel Giddins](https://github.com/segiddins) + +* Allow building app & test spec targets that depend on a library that uses + Swift without requiring an empty Swift file be present. + [Samuel Giddins](https://github.com/segiddins) + +* Add flag to ignore prerelease versions when reporting latest version for outdated pods. + [cltnschlosser](https://github.com/cltnschlosser) + [#9916](https://github.com/CocoaPods/CocoaPods/pull/9916) + +* Add possibility to skip modulemap generation + [till0xff](https://github.com/till0xff) + [#10235](https://github.com/CocoaPods/CocoaPods/issues/10235) + +* Add a `--version` option to `pod spec cat` and `pod spec which` for listing the podspec of a specific version + [pietbrauer](https://github.com/pietbrauer) + [#10609](https://github.com/CocoaPods/CocoaPods/pull/10609) + +##### Bug Fixes + +* Fix resource variant groups in static frameworks + [Igor Makarov](https://github.com/igor-makarov) + [#10834](https://github.com/CocoaPods/CocoaPods/pull/10834) + [#10605](https://github.com/CocoaPods/CocoaPods/issues/10605) + +* Fix adding embed frameworks script phase to unit test targets if xcframeworks are present. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10652](https://github.com/CocoaPods/CocoaPods/issues/10652) + +* Remove unused `install_xcframework_library` code. + [Gio Lodi](https://github.com/mokagio) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10831](https://github.com/CocoaPods/CocoaPods/pull/10831) + +* Validate vendored library names after they have been expanded. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10832](https://github.com/CocoaPods/CocoaPods/pull/10832) + +* Place frameworks from xcframeworks into a unique folder name to avoid duplicate outputs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10106](https://github.com/CocoaPods/CocoaPods/issues/10106) + +* Update pod in Pods folder when changing the pod from branch to version in Podfie. + [gonghonglou](https://github.com/gonghonglou) + [#10825](https://github.com/CocoaPods/CocoaPods/pull/10825) + +* Bump addressable dependency to 2.8. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10802](https://github.com/CocoaPods/CocoaPods/issues/10802) + +* Dedup bcsymbolmap paths found from multiple vendored frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10373](https://github.com/CocoaPods/CocoaPods/issues/10373) + +* Correctly filter dependencies for pod variants across different platforms. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10777](https://github.com/CocoaPods/CocoaPods/issues/10777) + +* Generate default `Info.plist` for consumer app during validation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8570](https://github.com/CocoaPods/CocoaPods/issues/8570) + +* Fix lint subspec error when the name of subspec start with the pod name. + [XianpuMeng](https://github.com/XianpuMeng) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9906](https://github.com/CocoaPods/CocoaPods/issues/9906) + +* Update `ruby-macho` gem version to support 1.x and 2.x. + [Eric Chamberlain](https://github.com/PeqNP) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10390](https://github.com/CocoaPods/CocoaPods/issues/10390) + +* Respect `--configuration` option when analyzing via `pod lib lint --analyze`. + [Jenn Magder](https://github.com/jmagman) + [#10476](https://github.com/CocoaPods/CocoaPods/issues/10476) + +* Do not add dependencies to 'Link Binary With Libraries' phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10133](https://github.com/CocoaPods/CocoaPods/pull/10133) + +* Ensure cache integrity on concurrent installations. + [Erik Blomqvist](https://github.com/codiophile) + [#10013](https://github.com/CocoaPods/CocoaPods/issues/10013) + +* Force a clean install if installation options change. + [Sebastian Shanus](https://github.com/sebastianv1) + [#10016](https://github.com/CocoaPods/CocoaPods/pull/10016) + +* Correctly detect that a prebuilt pod uses Swift. + [Elton Gao](https://github.com/gyfelton) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8649](https://github.com/CocoaPods/CocoaPods/issues/8649) + +* fix: ensure cached spec path uniq + [SolaWing](https://github.com/SolaWing) + [#10231](https://github.com/CocoaPods/CocoaPods/issues/10231) + +* Set `knownRegions` on generated projects with localized resources to prevent Xcode from re-saving projects to disk. + [Eric Amorde](https://github.com/amorde) + [#10290](https://github.com/CocoaPods/CocoaPods/pull/10290) + +* Serialize schemes that do not need to be rewritten by Xcode. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.10.2 (2021-07-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix errors when archiving a Catalyst app which depends on a pod which uses `header_mappings_dir`. + [Thomas Goyne](https://github.com/tgoyne) + [#10224](https://github.com/CocoaPods/CocoaPods/pull/10224) + +* Fix missing `-ObjC` for static XCFrameworks - take 2 + [Paul Beusterien](https://github.com/paulb777) + [#10459](https://github.com/CocoaPods/CocoaPods/issuess/10459) + +* Change URL validation failure to a note + [Paul Beusterien](https://github.com/paulb777) + [#10291](https://github.com/CocoaPods/CocoaPods/issues/10291) + + +## 1.10.1 (2021-01-07) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix library name in LD `-l` flags for XCFrameworks containing libraries + [Wes Campaigne](https://github.com/Westacular) + [#10165](https://github.com/CocoaPods/CocoaPods/issues/10165) + +* Fix file extension replacement for resource paths when using static frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#10206](https://github.com/CocoaPods/CocoaPods/issues/10206) + +* Fix processing of xcassets resources when pod target is static framework + [Federico Trimboli](https://github.com/fedetrim) + [#10175](https://github.com/CocoaPods/CocoaPods/pull/10175) + [#10170](https://github.com/CocoaPods/CocoaPods/issues/10170) + +* Fix missing `-ObjC` for static XCFrameworks + [Paul Beusterien](https://github.com/paulb777) + [#10234](https://github.com/CocoaPods/CocoaPods/pull/10234) + + +## 1.10.0 (2020-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Generate the correct LD `-l` flags for XCFrameworks containing libraries + [Wes Campaigne](https://github.com/Westacular) + [#10071](https://github.com/CocoaPods/CocoaPods/issues/10071) + +* Add support for automatically embedding XCFramework debug symbols for XCFrameworks generated with Xcode 12 + [johntmcintosh](https://github.com/johntmcintosh) + [#10111](https://github.com/CocoaPods/CocoaPods/issues/10111) + +## 1.10.0.rc.1 (2020-09-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix XCFramework slice selection + [lowip](https://github.com/lowip) + [#10026](https://github.com/CocoaPods/CocoaPods/issues/10026) + +* Honor test spec deployment target during validation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9999](https://github.com/CocoaPods/CocoaPods/pull/9999) + +* Ensure that incremental installation is able to set target dependencies for a + test spec that uses a custom `app_host_name` that is in a project that is not + regenerated. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.10.0.beta.2 (2020-08-12) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure that static frameworks are not embedded + [Bernard Gatt](https://github.com/BernardGatt) + [#9943](https://github.com/CocoaPods/CocoaPods/issues/9943) + +* Ensure that the non-compilable resource skipping in static frameworks happens only for the pod itself + [Igor Makarov](https://github.com/igor-makarov) + [#9922](https://github.com/CocoaPods/CocoaPods/pull/9922) + [#9920](https://github.com/CocoaPods/CocoaPods/issues/9920) + + +## 1.10.0.beta.1 (2020-07-17) + +##### Breaking + +* Bump minimum Ruby version to 2.3.3 (included with macOS High Sierra) + [Eric Amorde](https://github.com/amorde) + [#9821](https://github.com/CocoaPods/CocoaPods/issues/9821) + +##### Enhancements + +* Add the App Clip product symbol to the list of products that need embedding. + [Igor Makarov](https://github.com/igor-makarov) + [#9882](https://github.com/CocoaPods/CocoaPods/pull/9882) + +* Allow gem to run as root when passing argument flag `--allow-root` + [Sean Reinhardt](https://github.com/seanreinhardtapps) + [#8929](https://github.com/CocoaPods/CocoaPods/issues/8929) + +* Warn users to delete the master specs repo if its not explicitly used. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9871](https://github.com/CocoaPods/CocoaPods/pull/9871) + +* Use User Project's compatibilityVersion instead of objectVersion when + deciding when to use xcfilelists. + [Sean Reinhardt](https://github.com/seanreinhardtapps) + [#9140](https://github.com/CocoaPods/CocoaPods/issues/9140) + +* add a `--configuration` option to `pod lib lint` and `pod spec lint`. + [Gereon Steffens](https://github.com/gereons) + [#9686](https://github.com/CocoaPods/CocoaPods/issues/9686) + +* Add a `post_integrate_hook` API + [lucasmpaim](https://github.com/lucasmpaim) + [#7432](https://github.com/CocoaPods/CocoaPods/issues/7432) + +* Set the `BUILD_LIBRARY_FOR_DISTRIBUTION` build setting if integrating with a target that has the setting set to `YES`. + [Juanjo LÃŗpez](https://github.com/juanjonol) + [#9232](https://github.com/CocoaPods/CocoaPods/issues/9232) + +* Option to lint a specified set of test_specs + [Paul Beusterien](https://github.com/paulb777) + [#9392](https://github.com/CocoaPods/CocoaPods/pull/9392) + +* Add `--use-static-frameworks` lint option + [Paul Beusterien](https://github.com/paulb777) + [#9632](https://github.com/CocoaPods/CocoaPods/pull/9632) + +* Exclude the local spec-repos directory from Time Machine Backups. + [Jakob Krigovsky](https://github.com/sonicdoe) + [#8308](https://github.com/CocoaPods/CocoaPods/issues/8308) + +##### Bug Fixes + +* Override Xcode 12 default for erroring on quoted imports in umbrellas. + [Paul Beusterien](https://github.com/paulb777) + [#9902](https://github.com/CocoaPods/CocoaPods/issues/9902) + +* Remove bitcode symbol maps from embedded framework bundles + [Eric Amorde](https://github.com/amorde) + [#9681](https://github.com/CocoaPods/CocoaPods/issues/9681) + +* Prevent "source changed" message for every version change when using trunk source + [cltnschlosser](https://github.com/cltnschlosser) + [#9865](https://github.com/CocoaPods/CocoaPods/issues/9865) + +* When pod target is a static framework, save time by copying compiled resources + [Igor Makarov](https://github.com/igor-makarov) + [#9441](https://github.com/CocoaPods/CocoaPods/pull/9441) + +* Re-implement `bcsymbolmap` copying to avoid duplicate outputs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [mplorentz](https://github.com/mplorentz) + [#9734](https://github.com/CocoaPods/CocoaPods/pull/9734) + +* Fix Xcode 11 warning when setting Bundle Identifier in `info_plist` + [Sean Reinhardt](https://github.com/seanreinhardtapps) + [#9536](https://github.com/CocoaPods/CocoaPods/issues/9536) + +* Fix `incompatible encoding regexp match` for linting non-ascii pod name + [banjun](https://github.com/banjun) + [#9765](https://github.com/CocoaPods/CocoaPods/issues/9765) + [#9776](https://github.com/CocoaPods/CocoaPods/pull/9776) + +* Fix crash when targets missing in Podfile + [Paul Beusterien](https://github.com/paulb777) + [#9745](https://github.com/CocoaPods/CocoaPods/pull/9745) + +* Fix adding developer library search paths during pod validation. + [Nick Entin](https://github.com/NickEntin) + [#9736](https://github.com/CocoaPods/CocoaPods/pull/9736) + +* Fix an issue that caused multiple xcframework scripts to produce the same output files + [Eric Amorde](https://github.com/amorde) + [#9670](https://github.com/CocoaPods/CocoaPods/issues/9670) + [#9720](https://github.com/CocoaPods/CocoaPods/pull/9720) + +* Fix an issue preventing framework user targets with an xcframework dependency from building successfully + [Eric Amorde](https://github.com/amorde) + [#9525](https://github.com/CocoaPods/CocoaPods/issues/9525) + [#9720](https://github.com/CocoaPods/CocoaPods/pull/9720) + +* Fix an issue preventing xcframeworks that wrapped static libraries from linking successfully + [Eric Amorde](https://github.com/amorde) + [#9528](https://github.com/CocoaPods/CocoaPods/issues/9528) + [#9720](https://github.com/CocoaPods/CocoaPods/pull/9720) + +* Fix setting `swift_version` when deduplicate targets is turned off. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9689](https://github.com/CocoaPods/CocoaPods/pull/9689) + +* Honor prefix_header_file=false for subspecs + [Paul Beusterien](https://github.com/paulb777) + [#9687](https://github.com/CocoaPods/CocoaPods/pull/9687) + +* Do not clean user projects from sandbox. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9683](https://github.com/CocoaPods/CocoaPods/pull/9683) + +* Fix mapping of resource paths for app specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9676](https://github.com/CocoaPods/CocoaPods/pull/9676) + +* When preserving pod paths, preserve ALL the paths + [Igor Makarov](https://github.com/igor-makarov) + [#9483](https://github.com/CocoaPods/CocoaPods/pull/9483) + +* Re-implement `dSYM` copying and stripping to avoid duplicate outputs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9185](https://github.com/CocoaPods/CocoaPods/issues/9185) + +* Add support for running tests through the scheme of the app spec host of a test spec + [Eric Amorde](https://github.com/amorde) + [#9332](https://github.com/CocoaPods/CocoaPods/issues/9332) + +* Fix an issue that prevented variables in test bundle scheme settings from expanding + [Eric Amorde](https://github.com/amorde) + [#9539](https://github.com/CocoaPods/CocoaPods/pull/9539) + +* Fix project path handling issue that caused cmake projects to be incorrect + [Paul Beusterien](https://github.com/paulb777) + [Andrew](https://github.com/mad-rain) + [#6268](https://github.com/CocoaPods/CocoaPods/pull/6268) + +* Set `Missing Localizability` setting to `'YES'` to prevent warnings in Xcode 11 + [Eric Amorde](https://github.com/amorde) + [#9612](https://github.com/CocoaPods/CocoaPods/pull/9612) + +* Don't crash on non UTF-8 error message + [Kenji KATO](https://github.com/katoken-0215) + [#9706](https://github.com/CocoaPods/CocoaPods/pull/9706) + +* Fix XCFramework slice selection when having more archs in slice than requested with $ARCHS + [jerbob92](https://github.com/jerbob92) + [#9790](https://github.com/CocoaPods/CocoaPods/pull/9790) + +* Don't add app spec dependencies to the parent library's target in Xcode, + which was happening when the dependency's project was not being regenerated + due to incremental installation. + [segiddins][https://github.com/segiddins] + +* Add the trunk repo to the default `sources` for the `repo push` command + [Elf Sundae](https://github.com/ElfSundae) + [#9840](https://github.com/CocoaPods/CocoaPods/pull/9840) + +## 1.9.3 (2020-05-29) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.9.2 (2020-05-22) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.9.1 (2020-03-09) + +##### Enhancements + +##### Bug Fixes + +* Apply correct `SYSTEM_FRAMEWORK_SEARCH_PATHS` for `XCTUnwrap` fix. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9579](https://github.com/CocoaPods/CocoaPods/pull/9579) + +* Fix an issue that caused a build failure with vendored XCFrameworks on macOS + [Eric Amorde](https://github.com/amorde) + [#9572](https://github.com/CocoaPods/CocoaPods/issues/9572) + +* Fix an issue that prevented the correct XCFramework slice from being selected for watchOS extensions + [Eric Amorde](https://github.com/amorde) + [#9569](https://github.com/CocoaPods/CocoaPods/issues/9569) + + +## 1.9.0 (2020-02-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Also apply Xcode 11 `XCTUnwrap` fix to library and framework targets that weakly link `XCTest`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9518](https://github.com/CocoaPods/CocoaPods/pull/9518) + +* Fix dSYM handling for XCFrameworks. + [Eric Amorde](https://github.com/amorde) + [#9530](https://github.com/CocoaPods/CocoaPods/issues/9530) + +## 1.9.0.beta.3 (2020-02-04) + +##### Enhancements + +* PathList optimizations related to file system reads. + [manuyavuz](https://github.com/manuyavuz) + [#9428](https://github.com/CocoaPods/CocoaPods/pull/9428) + +##### Bug Fixes + +* Apply Xcode 11 `XCTUnwrap` fix to library and framework targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9500](https://github.com/CocoaPods/CocoaPods/pull/9500) + +* Fix resources script when building a project from a symlink. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9423](https://github.com/CocoaPods/CocoaPods/issues/9423) + +* Fix `pod install` crash on projects with atypical configuration names. + [Paul Beusterien](https://github.com/paulb777) + [#9465](https://github.com/CocoaPods/CocoaPods/pull/9465) + +* Fix an issue that caused iOS archives to be invalid when including a vendored XCFramework + [Eric Amorde](https://github.com/amorde) + [#9458](https://github.com/CocoaPods/CocoaPods/issues/9458) + +* Fix a bug where an incremental install missed library resources. + [Igor Makarov](https://github.com/igor-makarov) + [#9431](https://github.com/CocoaPods/CocoaPods/pull/9431) + +* Fix an issue that caused an incorrect warning to be emitted for CLI targets with static libraries + [Eric Amorde](https://github.com/amorde) + [#9498](https://github.com/CocoaPods/CocoaPods/issues/9498) + +## 1.9.0.beta.2 (2019-12-17) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix validator to properly integration project during `lint`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9416](https://github.com/CocoaPods/CocoaPods/pull/9416) + +## 1.9.0.beta.1 (2019-12-16) + +##### Enhancements + +* Support for scheme code coverage. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8921](https://github.com/CocoaPods/CocoaPods/issues/8921) + +* Support Swift version variants. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9230](https://github.com/CocoaPods/CocoaPods/pull/9230) + +* Configure dependencies per configuration. + [Samuel Giddins](https://github.com/segiddins) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9149](https://github.com/CocoaPods/CocoaPods/pull/9149) + +* Include Podfile Plugin changes for incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#9147](https://github.com/CocoaPods/CocoaPods/pull/9147) + +* Integrate `use_frameworks!` linkage DSL. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9099](https://github.com/CocoaPods/CocoaPods/issues/9099) + +* Add support for integrating dependency file in user script phases. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9082](https://github.com/CocoaPods/CocoaPods/issues/9082) + +* Add support for XCFrameworks using the `vendored_frameworks` Podspec DSL. + [Eric Amorde](https://github.com/amorde) + [#9148](https://github.com/CocoaPods/CocoaPods/issues/9148) + +##### Bug Fixes + +* Move `run_podfile_post_install_hooks` call to execute right before projects are saved. + [Yusuf Sobh](https://github.com/yusufoos) + [#9379](https://github.com/CocoaPods/CocoaPods/issues/9379) + +* Do not apply header mapping copy if the spec does not provide a header mappings dir. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9308](https://github.com/CocoaPods/CocoaPods/issues/9308) + +* Fix issue where workspace was missing user project references during incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#9237](https://github.com/CocoaPods/CocoaPods/issues/9237) + +* Search in users xcconfig's for figuring out when to set `APPLICATION_EXTENSION_API_ONLY`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9233](https://github.com/CocoaPods/CocoaPods/issues/9233) + +* Always generate a lockfile even if project integration is disabled. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9288](https://github.com/CocoaPods/CocoaPods/issues/9288) + +* Fix incremental installation with plugins that include arguments with different ordering. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9170](https://github.com/CocoaPods/CocoaPods/pull/9170) + +* Move custom `Copy Headers` script phase for header mappings before `Compile Sources`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9131](https://github.com/CocoaPods/CocoaPods/pull/9131) + +* Don't create a conflicting `LaunchScreen.storyboard` when an app spec contains a file + with that name in its `resources`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.8.4 (2019-10-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not crash if the repos dir is not setup. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9216](https://github.com/CocoaPods/CocoaPods/issues/9216) + +## 1.8.3 (2019-10-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix crash when running on mounted filesystems + [Paul Beusterien](https://github.com/paulb777) + [#9200](https://github.com/CocoaPods/CocoaPods/pull/9200) + + +## 1.8.1 (2019-09-27) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.8.0 (2019-09-23) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Include dependent vendored frameworks in linker flags + [Alex Coomans](https://github.com/drcapulet) + [#9045](https://github.com/CocoaPods/CocoaPods/pull/9045) + +* Correctly set deployment target for non library specs even if the root spec does not specify one. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9153](https://github.com/CocoaPods/CocoaPods/pull/9153) + +* Make `APPLICATION_EXTENSION_API_ONLY` build setting not break when performing a cached incremental install. + [Igor Makarov](https://github.com/igor-makarov) + [#8967](https://github.com/CocoaPods/CocoaPods/issues/8967) + [#9141](https://github.com/CocoaPods/CocoaPods/issues/9141) + [#9142](https://github.com/CocoaPods/CocoaPods/pull/9142) + +## 1.8.0.beta.2 (2019-08-27) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not verify deployment target version during resolution for non library specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9105](https://github.com/CocoaPods/CocoaPods/issues/9105) + +* Add `USE_RECURSIVE_SCRIPT_INPUTS_IN_SCRIPT_PHASES = YES` to all `.xcconfig`s + [Igor Makarov](https://github.com/igor-makarov) + [#8073](https://github.com/CocoaPods/CocoaPods/issues/8073) + [#9125](https://github.com/CocoaPods/CocoaPods/pull/9125) + [cocoapods-integration-specs#248](https://github.com/CocoaPods/cocoapods-integration-specs/pull/248) + +* Fix iOS app spec code signing. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9110](https://github.com/CocoaPods/CocoaPods/issues/9110) + +* Add Apple watch device family to resource bundles built for WatchOS + [Aaron McDaniel](https://github.com/Spilly) + [#9075](https://github.com/CocoaPods/CocoaPods/issues/9075) + +## 1.8.0.beta.1 (2019-08-05) + +##### Enhancements + +* Allow Algolia search for CDNSource + [Igor Makarov](https://github.com/igor-makarov) + [#9015](https://github.com/CocoaPods/CocoaPods/issues/9015) + [#9046](https://github.com/CocoaPods/CocoaPods/pull/9046) + [Core#569](https://github.com/CocoaPods/Core/pull/569) + +* Using `repo push` now pushes to the current repo branch (`HEAD`) instead of `master` + [Jhonatan Avalos](https://github.com/baguio) + [#8630](https://github.com/CocoaPods/CocoaPods/pull/8630) + +* Add support for UI test specs with `test_type` value `:ui` + [Yavuz Nuzumlali](https://github.com/manuyavuz) + [#9002](https://github.com/CocoaPods/CocoaPods/pull/9002) + [Core#562](https://github.com/CocoaPods/Core/pull/562) + +* Replace git-based `MasterSource` with CDN-based `TrunkSource` + [Igor Makarov](https://github.com/igor-makarov) + [#8923](https://github.com/CocoaPods/CocoaPods/pull/8923) + [Core#552](https://github.com/CocoaPods/Core/pull/552) + +* Integrate a pod into a custom project name if specified. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) & [Sebastian Shanus](https://github.com/sebastianv1) + [#8939](https://github.com/CocoaPods/CocoaPods/pull/8939) + +* Performance optimization for large number of files related to cleaning sandbox directory during installation + [hovox](https://github.com/hovox) + [#8797](https://github.com/CocoaPods/CocoaPods/issues/8797) + +* Add support for Specification Info.plist DSL + [Eric Amorde](https://github.com/amorde) + [#8753](https://github.com/CocoaPods/CocoaPods/issues/8753) + [#3032](https://github.com/CocoaPods/CocoaPods/issues/3032) + +* Fix target definition display name for inhibit warnings message. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8935](https://github.com/CocoaPods/CocoaPods/pull/8935) + +* Allow using an application defined by an app spec as the app host for a test spec. + [jkap](https://github.com/jkap) + [Samuel Giddins](https://github.com/segiddins) + [#8654](https://github.com/CocoaPods/CocoaPods/pull/8654) + +* Speed up dependency resolution when there are many requirements for the same pod + or many versions that do not satisfy the given requirements. + [Samuel Giddins](https://github.com/segiddins) + +* Add support for pods in abstract-only targets to be installed. + [Samuel Giddins](https://github.com/segiddins) + +* Emit a warning when attempting to integrate dynamic frameworks into command line tool targets + [Eric Amorde](https://github.com/amorde) + [#6493](https://github.com/CocoaPods/CocoaPods/issues/6493) + +* Always suggest `pod repo update` on dependency resolution conflict, + unless repo update was specifically requested. + [Artem Sheremet](https://github.com/dotdoom) + [#8768](https://github.com/CocoaPods/CocoaPods/pull/8768) + +* Set Default Module for Storyboards in resource bundle targets. + [James Treanor](https://github.com/jtreanor) + [#8890](https://github.com/CocoaPods/CocoaPods/pull/8890) + +* Print correct platform name when inferring target platform. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8916](https://github.com/CocoaPods/CocoaPods/pull/8916) + +* Do not re-write sandbox files if they have not changed. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8983](https://github.com/CocoaPods/CocoaPods/pull/8983) + +* Added option to skip Pods.xcodeproj generation + [Itay Brenner](https://github.com/itaybre) + [8872](https://github.com/CocoaPods/CocoaPods/pull/8872) + +##### Bug Fixes + +* Update symlink script to prevent duplicate files + [Alex Coomans](https://github.com/drcapulet) + [#9035](https://github.com/CocoaPods/CocoaPods/pull/9035) + +* Use correct `header_mappings_dir` for subspecs + [Alex Coomans](https://github.com/drcapulet) + [#9019](https://github.com/CocoaPods/CocoaPods/pull/9019) + +* Make CDNSource show up in `pod repo env` + [Igor Makarov](https://github.com/igor-makarov) + [#9016](https://github.com/CocoaPods/CocoaPods/pull/9016) + +* Fix regenerating aggregate targets for incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#9009](https://github.com/CocoaPods/CocoaPods/pull/9009) + +* Fix heuristic for determining whether the source URL to be added is CDN + [Igor Makarov](https://github.com/igor-makarov) + [#9000](https://github.com/CocoaPods/CocoaPods/issues/9000) + [#8999](https://github.com/CocoaPods/CocoaPods/issues/8999) + +* Fix set `cache_root` from config file error + [tripleCC](https://github.com/tripleCC) + [#8515](https://github.com/CocoaPods/CocoaPods/issues/8515) + +* Set default build configurations for app / test specs when installing with + `integrate_targets: false`, ensuring the `Embed Frameworks` and + `Copy Resources` scripts will copy the necessary build artifacts. + [Samuel Giddins](https://github.com/segiddins) + +* No longer show a warning when using an optional include (`#include?`) to + include the Pods .xcconfig from the base .xcconfig file + [Rob Hudson](https://github.com/robtimp) + +* Remove stale podspecs from 'Local Podspecs' when installing non-local counterparts. + [Pär Strindevall](https://github.com/parski) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8914](https://github.com/CocoaPods/CocoaPods/pull/8914) + +* Fix inheriting search paths for test targets in `init` command. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8868](https://github.com/CocoaPods/CocoaPods/issues/8868) + +* Allow detecting `SWIFT_VERSION` build settings from user targets when some + xcconfig files are missing. + [Samuel Giddins](https://github.com/segiddins) + +* Only return library itself as a framework path for library specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9029](https://github.com/CocoaPods/CocoaPods/pull/9029) + +* Fix a bug that prevented dependencies in a plugin source from resolving + [Eric Amorde](https://github.com/amorde) + [#8540](https://github.com/CocoaPods/CocoaPods/issues/8540) + +* Store relative project and file paths in the incremental cache. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9041](https://github.com/CocoaPods/CocoaPods/pull/9041) + +* Use correct deployment target for test specs and app specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9040](https://github.com/CocoaPods/CocoaPods/pull/9040) + +* Allow overriding custom xcconfig entries set for compiling a library when + specifying an app or test spec. + [Samuel Giddins](https://github.com/segiddins) + +* Pass a non-browser user agent for social media validation + [Dov Frankel](https://github.com/abbeycode) + [CocoaPods/Core#571](https://github.com/CocoaPods/Core/pull/571) + [#9053](https://github.com/CocoaPods/Cocoapods/pull/9053) + [#9049](https://github.com/CocoaPods/CocoaPods/issues/9049) + +* Do not add CocoaPods script phases to targets that have no paths to embed. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9069](https://github.com/CocoaPods/CocoaPods/pull/9069) + + +## 1.7.5 (2019-07-19) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not pass inhibit warnings compiler flags for Swift source files. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#9013](https://github.com/CocoaPods/CocoaPods/issues/9013) + + +## 1.7.4 (2019-07-09) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle scheme configuration for specs with unsupported platforms. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8850](https://github.com/CocoaPods/CocoaPods/issues/8850) + + +## 1.7.3 (2019-06-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Honor the Swift version set by a dependency pod during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8940](https://github.com/CocoaPods/CocoaPods/issues/8940) + + +## 1.7.2 (2019-06-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Prevent crash when configuring schemes for subspecs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8880](https://github.com/CocoaPods/CocoaPods/issues/8880) + +* Attempt to use Swift version for dependencies during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8764](https://github.com/CocoaPods/CocoaPods/issues/8764) + + +## 1.7.1 (2019-05-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Stabilize product reference UUIDs to fix Xcode crashing with incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8845](https://github.com/CocoaPods/CocoaPods/pull/8845) + +* Fix a 1.7.0 regression in header directory paths when using static libraries + [Eric Amorde](https://github.com/amorde) + [#8836](https://github.com/CocoaPods/CocoaPods/issues/8836) + + +## 1.7.0 (2019-05-22) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix 1.7.0.rc.2 regression - Resources need to be added for test specs in library builds + [Paul Beusterien](https://github.com/paulb777) + [#8812](https://github.com/CocoaPods/CocoaPods/pull/8812) + +* Configure schemes regardless if they are being shared or not. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8815](https://github.com/CocoaPods/CocoaPods/pull/8815) + +* Update dSYM stripping string matcher for 64-bit only dSYMs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8827](https://github.com/CocoaPods/CocoaPods/issues/8827) + + +## 1.7.0.rc.2 (2019-05-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't add resources to a second test_spec pod target build phase + [Paul Beusterien](https://github.com/paulb777) + [#8173](https://github.com/CocoaPods/CocoaPods/issues/8173) + +* Fix 1.7.0.rc.1 static library regression for pods with `header_dir` attribute + [Paul Beusterien](https://github.com/paulb777) + [#8765](https://github.com/CocoaPods/CocoaPods/issues/8765) + +* Scope app spec dependent targets when no dedup'ing targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8770](https://github.com/CocoaPods/CocoaPods/pull/8770) + +* Fix embedding static frameworks in extensions while using `use_frameworks!` + [Martin Fiebig](https://github.com/mfiebig) + [#8798](https://github.com/CocoaPods/CocoaPods/pull/8798) + + +## 1.7.0.rc.1 (2019-05-02) + +##### Enhancements + +* Replace Pods project `Dependencies` group with `Development Pods` and `Pods` groups. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8659](https://github.com/CocoaPods/CocoaPods/issues/8659) + +* Add an error message for :podspec pods not matching the version in Pods and on disk + [orta](https://github.com/orta) + [#8676](https://github.com/CocoaPods/CocoaPods/issues/8676) + +##### Bug Fixes + +* Allow insecure loads in requires_app_host's Info.plist + [Paul Beusterien](https://github.com/paulb777) + [#8747](https://github.com/CocoaPods/CocoaPods/pull/8747) + +* Fix a regression for static libraries with a custom module name + [Eric Amorde](https://github.com/amorde) + [#8695](https://github.com/CocoaPods/CocoaPods/issues/8695) + +* Fix target cache key SPECS key ordering. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8657](https://github.com/CocoaPods/CocoaPods/issues/8657) + +* Fix regression not compiling xcdatamodeld files in static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#8702](https://github.com/CocoaPods/CocoaPods/issues/8702) + +* Fix: AppIcon not found when executing `pod lib lint` + [Itay Brenner](https://github.com/itaybre) + [#8648](https://github.com/CocoaPods/CocoaPods/issues/8648) + + +## 1.7.0.beta.3 (2019-03-28) + +##### Enhancements + +* Adds support for referring to other podspecs during validation + [Orta Therox](https://github.com/orta) + [#8536](https://github.com/CocoaPods/CocoaPods/pull/8536) + +##### Bug Fixes + +* Deintegrate deleted targets even if `incremental_installation` is turned on. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) & [Doug Mead](https://github.com/dmead28) + [#8638](https://github.com/CocoaPods/CocoaPods/pull/8638) + +* Reduce the probability of multiple project UUID collisions. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8636](https://github.com/CocoaPods/CocoaPods/pull/8636) + +* Resolved an issue that could cause spec repo updates to fail on CI servers. + [rpecka](https://github.com/rpecka) + [#7317](https://github.com/CocoaPods/CocoaPods/issues/7317) + + +## 1.7.0.beta.2 (2019-03-08) + +##### Enhancements + +* Integrate `xcfilelist` input/output paths for script phases. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8585](https://github.com/CocoaPods/CocoaPods/pull/8585) + +##### Bug Fixes + +* Do not warn of `.swift-version` file being deprecated if pod does not use Swift. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8593](https://github.com/CocoaPods/CocoaPods/pull/8593) + +* Generate sibling targets for incremental installation. + [Sebastian Shanus](https://github.com/sebastianv1) & [Igor Makarov](https://github.com/igor-makarov) + [#8577](https://github.com/CocoaPods/CocoaPods/issues/8577) + +* Validator should filter our app specs from subspec analysis. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8592](https://github.com/CocoaPods/CocoaPods/pull/8592) + +* Do not warn that `--swift-version` parameter is deprecated. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8586](https://github.com/CocoaPods/CocoaPods/pull/8586) + +* Include `bcsymbolmap` file output paths into script phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8563](https://github.com/CocoaPods/CocoaPods/pull/8563) + +* Copy `bcsymbolmap` files into correct destination to avoid invalid app archives + [florianbuerger](https://github.com/florianbuerger) + [#8558](https://github.com/CocoaPods/CocoaPods/pull/8558) + +* Fix: unset GIT_DIR and GIT_WORK_TREE for git operations + [tripleCC](https://github.com/tripleCC) + [#7958](https://github.com/CocoaPods/CocoaPods/issues/7958) + +* Fix crash when running `pod update` with `--sources` and `--project-directory` + [tripleCC](https://github.com/tripleCC) + [#8565](https://github.com/CocoaPods/CocoaPods/issues/8565) + +* Do not use spaces around variable assignment in generated embed framework script + [florianbuerger](https://github.com/florianbuerger) + [#8548](https://github.com/CocoaPods/CocoaPods/pull/8548) + +* Do not link specs into user targets that are only used by app specs. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.7.0.beta.1 (2019-02-22) + +##### Enhancements + +* Copy `bcsymbolmap` files of a vendored framework. + [dacaiguoguogmail](https://github.com/dacaiguoguogmail) + [#8461](https://github.com/CocoaPods/CocoaPods/issues/8461) + +* Set the path of development pod groups to root directory of the Pod + [Eric Amorde](https://github.com/amorde) + [#8445](https://github.com/CocoaPods/CocoaPods/pull/8445) + [#8503](https://github.com/CocoaPods/CocoaPods/pull/8503) + +* Incremental Pod Installation + Enables only regenerating projects for pod targets that have changed since the previous installation. + This feature is gated by the `incremental_installation` option. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8319](https://github.com/CocoaPods/CocoaPods/issues/8319) + +* Podfile: Add a CDNSource automatically if it's not present, just like git source. + Convenience for CDNSource when specified as `source 'https://cdn.jsdelivr.net/cocoa/'`. + If source doesn't exist, it will be created. + [igor-makarov](https://github.com/igor-makarov) + [#8362](https://github.com/CocoaPods/CocoaPods/pull/8362) + +* Scheme configuration support. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7577](https://github.com/CocoaPods/CocoaPods/issues/7577) + +* Add support for `.rb` extension for Podfiles + [Eric Amorde](https://github.com/amorde) + [#8171](https://github.com/CocoaPods/CocoaPods/issues/8171) + +* Add CDN repo Source to allow retrieving specs from a web URL. + [igor-makarov](https://github.com/igor-makarov) + [#8268](https://github.com/CocoaPods/CocoaPods/issues/8268) (partial beta solution) + +* Multi Pod Project Generation Support. + Support for splitting the pods project into a subproject per pod target, gated by the `generate_multiple_pod_projects` installation option. + [Sebastian Shanus](https://github.com/sebastianv1) + [#8253](https://github.com/CocoaPods/CocoaPods/issues/8253) + +* Don't add main for app specs. + [Derek Ostrander](https://github.com/dostrander) + [#8235](https://github.com/CocoaPods/CocoaPods/pull/8235) + +* Multiple Swift versions support + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8191](https://github.com/CocoaPods/CocoaPods/issues/8191) + +* Adds app spec project integration. + [Derek Ostrander](https://github.com/dostrander) + [#8158](https://github.com/CocoaPods/CocoaPods/pull/8158) + +* Add documentation for the Podfile installation options + [Eric Amorde](https://github.com/amorde) + [#8198](https://github.com/CocoaPods/CocoaPods/issues/8198) + [guides.cocoapods.org #142](https://github.com/CocoaPods/guides.cocoapods.org/issues/142) + +##### Bug Fixes + +* Clean up old integrated framework references. + [Dimitris Koutsogiorgas](https://github.com/dnkouts) + [#8296](https://github.com/CocoaPods/CocoaPods/issues/8296) + +* Always update sources specified with the `:source` option when `--repo-update` is specified + [Eric Amorde](https://github.com/amorde) + [#8421](https://github.com/CocoaPods/CocoaPods/issues/8421) + +* Set `showEnvVarsInLog` for script phases only when its disabled. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8400](https://github.com/CocoaPods/CocoaPods/pull/8400) + +* Fix error when execute pod list --update --verbose command + [tripleCC](https://github.com/tripleCC) + [#8404](https://github.com/CocoaPods/CocoaPods/pull/8404) + +* Remove `manifest` attribute from sandbox. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8260](https://github.com/CocoaPods/CocoaPods/pull/8260) + +* Don't have libraries build the app spec. + [Derek Ostrander](https://github.com/dostrander) + [#8244](https://github.com/CocoaPods/CocoaPods/pull/8244) + +* Fix HTTPs -> HTTPS in warning message + [CydeWeys](https://github.com/CydeWeys) + [#8354](https://github.com/CocoaPods/CocoaPods/issues/8354) + +* Add the `FRAMEWORK_SEARCH_PATHS` necessary to import `XCTest` when it is + linked as a weak framework. + [Samuel Giddins](https://github.com/segiddins) + +* Treat `USER_HEADER_SEARCH_PATHS` as a plural build setting. + [Samuel Giddins](https://github.com/segiddins) + [#8451](https://github.com/CocoaPods/CocoaPods/issues/8451) + +* Trying to add a spec repo with a `file://` URL on Ruby 2.6 won't fail with a + a git unknown option error. + [Samuel Giddins](https://github.com/segiddins) + +* Fixed test host delegate methods to not warn about unused arguments. + [Jacek Suliga](https://github.com/jmkk) + [#8521](https://github.com/CocoaPods/CocoaPods/pull/8521) + + +## 1.6.2 (2019-05-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure all embedded pod targets are copied over to the host target. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8608](https://github.com/CocoaPods/CocoaPods/issues/8608) + + +## 1.6.1 (2019-02-21) + +##### Enhancements + +* Add `--analyze` option for the linters. + [Paul Beusterien](https://github.com/paulb777) + [#8792](https://github.com/CocoaPods/CocoaPods/issues/8792) + +##### Bug Fixes + +* Properly link system frameworks and weak frameworks into dynamic framework targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8493](https://github.com/CocoaPods/CocoaPods/issues/8493) + + +## 1.6.0 (2019-02-07) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.6.0.rc.2 (2019-01-29) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix linking of vendored libraries and frameworks in pod targets + [Wes Campaigne](https://github.com/Westacular) + [#8453](https://github.com/CocoaPods/CocoaPods/issues/8453) + + +## 1.6.0.rc.1 (2019-01-25) + +##### Enhancements + +* Generate Info.plist files for static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#8287](https://github.com/CocoaPods/CocoaPods/issues/8287) + +##### Bug Fixes + +* Do not force 64-bit architectures on Xcode 10 + [Eric Amorde](https://github.com/amorde) + [#8242](https://github.com/CocoaPods/CocoaPods/issues/8242) + +* Fix running test specs that support iOS 8. + [Jeff Kelley](https://github.com/SlaunchaMan) + [#8286](https://github.com/CocoaPods/CocoaPods/pull/8286) + +* Remove linker flags that linked dynamic libraries & frameworks from the build + settings for pod targets. + [Samuel Giddins](https://github.com/segiddins) + [#8314](https://github.com/CocoaPods/CocoaPods/pull/8314) + +## 1.6.0.beta.2 (2018-10-17) + +##### Enhancements + +* Remove contraction from docs to fix rendering on the website. + [stevemoser](https://github.com/stevemoser) + [#8131](https://github.com/CocoaPods/CocoaPods/pull/8131) + +* Provide an installation option to preserve folder structure + [dacaiguoguogmail](https://github.com/dacaiguoguogmail) + [#8097](https://github.com/CocoaPods/CocoaPods/pull/8097) + +* Nests test specs host apps inside that Pod's directory for cleaner project + navigators. + [Derek Ostrander](https://github.com/dostrander) + +* mark_ruby_file_ref add indent width and tab width config + [dacaiguoguogmail](https://github.com/dacaiguoguogmail) + +* Print an error that will show up in Xcode's issue navigator upon unexpected + failures in the copy resources and embed frameworks script phases. + [Samuel Giddins](https://github.com/segiddins) + +* Validate that all generated `PBXNativeTarget`s contain source files to build, + so specs (including test specs) with no source files won't fail at runtime + due to the lack of a generated executable. + [Samuel Giddins](https://github.com/segiddins) + +* Print better promote message when unable to find a specification. + [Xinyu Zhao](https://github.com/X140Yu) + [#8064](https://github.com/CocoaPods/CocoaPods/issues/8064) + +* Silence warnings in headers for Pods with `inhibit_warnings => true` + [Guillaume Algis](https://github.com/guillaumealgis) + [#6401](https://github.com/CocoaPods/CocoaPods/pull/6401) + +* When resolving a locked dependency, source the spec from the locked + specs repository. + [Samuel Giddins](https://github.com/segiddins) + +* Slightly improve resolution speed for Podfiles that contain multiple targets + with the same dependencies. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Don't generate unencrypted source warnings for localhost. + [Paul Beusterien](https://github.com/paulb777) + [#8156](https://github.com/CocoaPods/CocoaPods/issues/8156) + +* Fix linting when armv7 is included but i386 isn't. + [Paul Beusterien](https://github.com/paulb777) + [#8129](https://github.com/CocoaPods/CocoaPods/issues/8129) + +* Provide an installation option to disable usage of input/output paths. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8073](https://github.com/CocoaPods/CocoaPods/issues/8073) + +* Scope prefix header setting to each test spec. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8046](https://github.com/CocoaPods/CocoaPods/pull/8046) + +* Don't add incomplete subspec subset targets for extensions. + [Paul Beusterien](https://github.com/paulb777) + [#7850](https://github.com/CocoaPods/CocoaPods/issues/7850) + +* Clear out `MACH_O_TYPE` for unit test bundles that use static frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8031](https://github.com/CocoaPods/CocoaPods/issues/8031) + +* Fix `weak_frameworks` missing regression. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7872](https://github.com/CocoaPods/CocoaPods/issues/7872) + +* Fix line spacing for Swift error message. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8024](https://github.com/CocoaPods/CocoaPods/pull/8024) + +* Improve validation for test_specs on specific platforms + [icecrystal23](https://github.com/icecrystal23) + [#7009](https://github.com/CocoaPods/CocoaPods/issues/7009) + +* Fix running `pod outdated` with externally-sourced pods. + [Samuel Giddins](https://github.com/segiddins) + [#8025](https://github.com/CocoaPods/CocoaPods/issues/8025) + +* Remove codesign suppression + [Jaehong Kang](https://github.com/sinoru) + [#7606](https://github.com/CocoaPods/CocoaPods/issues/7606) + + +## 1.6.0.beta.1 (2018-08-16) + +##### Enhancements + +* Every test spec will have its own xctest bundle. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [Jenn Kaplan](https://github.com/jkap) + [#7908](https://github.com/CocoaPods/CocoaPods/pull/7908) + +* Generate a separate app host per pod. + [Samuel Giddins](https://github.com/segiddins) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8005](https://github.com/CocoaPods/CocoaPods/pull/8005) + +* Add default launch screen storyboard to test app hosts. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7971](https://github.com/CocoaPods/CocoaPods/pull/7971) + +* Always display downloader error message. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7625](https://github.com/CocoaPods/CocoaPods/issues/7625) + +* Warn instead of error when linting if `public_header_files` or + `private_header_files` do not match any files. + [Eric Amorde](https://github.com/amorde) + [#7427](https://github.com/CocoaPods/CocoaPods/issues/7427) + +* Add `--platforms` parameter to `pod spec lint` and `pod lib lint` to specify + which platforms to lint. + [Eric Amorde](https://github.com/amorde) + [#7783](https://github.com/CocoaPods/CocoaPods/issues/7783) + +* Warn if the `git://` protocol is used as the source of a pod. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7705](https://github.com/CocoaPods/CocoaPods/issues/7705) + +* Remove all xcode project state from target objects, + improving project generation performance. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7610](https://github.com/CocoaPods/CocoaPods/pull/7610) + +* Improve performance of Pods project generation by skipping native targets + for which dependent targets have already been added. + [Jacek Suliga](https://github.com/jmkk) + +* Refactor build settings generation to perform much better on large projects. + [Samuel Giddins](https://github.com/segiddins) + +* Make sure the temporary directory used to download a pod is removed, + even if an error is raised. + [augustorsouza](https://github.com/augustorsouza) + +* Avoid unlocking sources on every `pod install` when there are no + plugin post-install hooks for performance reasons. + [Samuel Giddins](https://github.com/segiddins) + +* Change shell script relative paths to use `${PODS_ROOT}` instead of + `${SRCROOT}/Pods`. + [Whirlwind](https://github.com/Whirlwind) + [#7878](https://github.com/CocoaPods/CocoaPods/pull/7878) + +* Set the path of the Pods group in the user project. + [Whirlwind](https://github.com/Whirlwind) + [#7886](https://github.com/CocoaPods/CocoaPods/pull/7886) + [#6194](https://github.com/CocoaPods/CocoaPods/issues/6194) + +* Add a `--deployment` flag to `pod install` that errors if there are any + changes to the Podfile or Lockfile. + [Samuel Giddins](https://github.com/segiddins) + +* Add `--use-modular-headers` flag to the `pod spec lint`, `pod lib lint`, + and `pod repo push` commands. + [Eric Amorde](https://github.com/amorde) + [#7683](https://github.com/CocoaPods/CocoaPods/issues/7683) + +##### Bug Fixes + +* Scope embedded pods to their host targets by their configuration. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#8011](https://github.com/CocoaPods/CocoaPods/issues/8011) + +* Set the `SWIFT_VERSION` on resource bundle targets that contain compiled + sources and use Swift. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7950](https://github.com/CocoaPods/CocoaPods/issues/7950) + +* Do not ignore `--no-overwrite` parameter if a commit message is specified. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7926](https://github.com/CocoaPods/CocoaPods/issues/7926) + +* Generate `-ObjC` in `OTHER_LDFLAGS` for apps with static frameworks. + [Paul Beusterien](https://github.com/paulb777) + [#7946](https://github.com/CocoaPods/CocoaPods/pull/7946) + +* Do not display that a source was changed if it uses different casing. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7883](https://github.com/CocoaPods/CocoaPods/pull/7883) + +* Set `CURRENT_PROJECT_VERSION` for generated app host targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7825](https://github.com/CocoaPods/CocoaPods/pull/7825) + +* Properly follow symlinks within macOS universal frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7587](https://github.com/CocoaPods/CocoaPods/issues/7587) + +* Validator adds a Swift file if any of the pod targets use Swift. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7738](https://github.com/CocoaPods/CocoaPods/issues/7738) + +* Fix `INFOPLIST_FILE` being overridden when set in a podspec's `pod_target_xcconfig`. + [Eric Amorde](https://github.com/amorde) + [#7530](https://github.com/CocoaPods/CocoaPods/issues/7530) + +* Raise an error if user target `SWIFT_VERSION` is missing. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7770](https://github.com/CocoaPods/CocoaPods/issues/7770) + +* Fix the umbrella header import path when `header_dir` is specified in the + podspec and building a static library with modular headers enabled. + [chuganzy](https://github.com/chuganzy) + [#7724](https://github.com/CocoaPods/CocoaPods/pull/7724) + +* Do not symlink headers that belong to test specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7762](https://github.com/CocoaPods/CocoaPods/pull/7762) + +* Do not build pod target if it only contains script phases. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7746](https://github.com/CocoaPods/CocoaPods/issues/7746) + +* Do not try to integrate uncreated test native targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7394](https://github.com/CocoaPods/CocoaPods/issues/7394) + +* Attempt to parse `SWIFT_VERSION` from xcconfig during target inspection. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7731](https://github.com/CocoaPods/CocoaPods/issues/7731) + +* Do not crash when creating build settings for a missing user build configuration. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7698](https://github.com/CocoaPods/CocoaPods/pull/7698) + +* Do not overwrite App host info plist when using multiple test specs. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7695](https://github.com/CocoaPods/CocoaPods/pull/7695) + +* Do not include test dependencies' input and output paths. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7688](https://github.com/CocoaPods/CocoaPods/pull/7688) + +* Skip test file accessors for `uses_swift?` and `should_build?` methods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7671](https://github.com/CocoaPods/CocoaPods/pull/7671) + +* When integrating a vendored framework while building pods as static + libraries, public headers will be found via `FRAMEWORK_SEARCH_PATHS` + instead of via the sandbox headers store. + [Samuel Giddins](https://github.com/segiddins) + +* Improve performance of grouping pods by configuration. + [Samuel Giddins](https://github.com/segiddins) + +* Stop linking frameworks to static libraries to avoid warnings with the new build system. + [Samuel Giddins](https://github.com/segiddins) + [#7570](https://github.com/CocoaPods/CocoaPods/pull/7570) + +* Allow `EXPANDED_CODE_SIGN_IDENTITY` to be unset. + [Keith Smiley](https://github.com/keith) + [#7708](https://github.com/CocoaPods/CocoaPods/issues/7708) + +* Running `pod install` with static library modules no longer causes pods to + be recompiled. + [Samuel Giddins](https://github.com/segiddins) + +* A pod built as a static library linked into multiple targets will only build + as a module when all of the targets it is linked into have opted into it. + [Samuel Giddins](https://github.com/segiddins) + +* Use `CP_HOME_DIR` as the base for all default directories. + [mcfedr](https://github.com/mcfedr) + [#7917](https://github.com/CocoaPods/CocoaPods/pull/7917) + +* Exclude 32-bit architectures from Pod targets when the deployment target is + iOS 11.0 or higher. + [Eric Amorde](https://github.com/amorde) + [#7148](https://github.com/CocoaPods/CocoaPods/issues/7148) + +* Fail gracefully when the analyzer has dependencies to fetch, but has been + told not to fetch them. + [Samuel Giddins](https://github.com/segiddins) + +* Don't generate framework or resource scripts if they will not be used. + [Eric Amorde](https://github.com/amorde) + +* Fix a crash when loading the `macho` gem in certain environments. + [Eric Amorde](https://github.com/amorde) + [#7867](https://github.com/CocoaPods/CocoaPods/issues/7867) + + +## 1.5.3 (2018-05-25) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix compatibility with RubyGems 2.7.7. + [Samuel Giddins](https://github.com/segiddins) + [#7765](https://github.com/CocoaPods/CocoaPods/issues/7765) + [#7766](https://github.com/CocoaPods/CocoaPods/issues/7766) + [#7763](https://github.com/CocoaPods/CocoaPods/issues/7763) + + +## 1.5.2 (2018-05-09) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.5.1 (2018-05-07) + +##### Enhancements + +* Improve performance of the dependency resolver by removing duplicates for dependency nodes. + [Jacek Suliga](https://github.com/jmkk) + +##### Bug Fixes + +* Do not include test dependencies input and output paths. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7688](https://github.com/CocoaPods/CocoaPods/pull/7688) + +* Remove [system] declaration attribute from generated module maps. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7589](https://github.com/CocoaPods/CocoaPods/issues/7589) + +* Properly namespace Info.plist names during target installation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7611](https://github.com/CocoaPods/CocoaPods/pull/7611) + +* Always generate FRAMEWORK_SEARCH_PATHS for vendored_frameworks. + [Paul Beusterien](https://github.com/paulb777) + [#7591](https://github.com/CocoaPods/CocoaPods/issues/7591) + +* Fix modular header access to header_dir's. + [Paul Beusterien](https://github.com/paulb777) + [#7597](https://github.com/CocoaPods/CocoaPods/issues/7597) + +* Fix static framework dependent target double linking without `use_frameworks`. + [Paul Beusterien](https://github.com/paulb777) + [#7592](https://github.com/CocoaPods/CocoaPods/issues/7592) + +* Make modular header private header access consistent with frameworks and static libraries. + [Paul Beusterien](https://github.com/paulb777) + [#7596](https://github.com/CocoaPods/CocoaPods/issues/7596) + +* Inhibit warnings for all dependencies during validation except for the one being validated. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7434](https://github.com/CocoaPods/CocoaPods/issues/7434) + +* Prevent duplicated targets from being stripped out from the framework search paths. + [Liquidsoul](https://github.com/liquidsoul) + [#7644](https://github.com/CocoaPods/CocoaPods/pull/7644) + +* Fix `assetcatalog_generated_info.plist` path in copy resources phase. + [Maxime Le Moine](https://github.com/MaximeLM) + [#7590](https://github.com/CocoaPods/CocoaPods/issues/7590) + +## 1.5.0 (2018-04-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Escape double quotes for module map contents + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7549](https://github.com/CocoaPods/CocoaPods/pull/7549) + +* Fix building Swift static library test specs. + [Samuel Giddins](https://github.com/segiddins) + +* Swift static libraries can be used in targets whose search paths are inherited. + [Samuel Giddins](https://github.com/segiddins) + +## 1.5.0.beta.1 (2018-03-23) + +##### Enhancements + +* Add `--exclude-pods` option to `pod update` to allow excluding specific pods from update + [Oleksandr Kruk](https://github.com/0mega) + [#7334](https://github.com/CocoaPods/CocoaPods/issues/7334) + +* Add support for mixed Objective-C and Swift static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7213](https://github.com/CocoaPods/CocoaPods/issues/7213) + +* Improve `pod install` performance for pods with exact file paths rather than glob patterns + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#7473](https://github.com/CocoaPods/CocoaPods/pull/7473) + +* Display a message when a pods source has changed during installation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7464](https://github.com/CocoaPods/CocoaPods/pull/7464) + +* Add support for modular header search paths, include "legacy" support. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7412](https://github.com/CocoaPods/CocoaPods/pull/7412) + +* Set direct and transitive dependency header search paths for pod targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7116](https://github.com/CocoaPods/CocoaPods/pull/7116) + +* Log target names missing host for libraries + [Keith Smiley](https://github.com/keith) + [#7346](https://github.com/CocoaPods/CocoaPods/pull/7346) + +* Add a `--no-overwrite` flag to `pod repo push` to disable overwriting + existing specs that have already been pushed. + [Samuel Giddins](https://github.com/segiddins) + +* Store which specs repo a pod comes from in the lockfile. + [Samuel Giddins](https://github.com/segiddins) + +* Add `set -u` to the copy frameworks and copy resources scripts. + [Keith Smiley](https://github.com/keith) + [#7180](https://github.com/CocoaPods/CocoaPods/pull/7180) + +* Allow integrating into static library targets without attempting to copy + resources or embed frameworks unless `UNLOCALIZED_RESOURCES_FOLDER_PATH` + or `FRAMEWORKS_FOLDER_PATH` is set. + [Samuel Giddins](https://github.com/segiddins) + +* Change color scheme of `pod outdated` from red-yellow-green to red-blue-green to be more colorblind friendly + [iv-mexx](https://github.com/iv-mexx) + [#7372](https://github.com/CocoaPods/CocoaPods/issues/7372) + +* Add support for integrating swift pods as static libraries. + [Danielle Tomlinson](https://github.com/dantoml) + [Samuel Giddins](https://github.com/segiddins) + [#6899](https://github.com/CocoaPods/CocoaPods/issues/6899) + +* Document format of POD_NAMES in pod update + [mrh-is](https://github.com/mrh-is) + +* Update validator to stream output as xcodebuild runs + [abbeycode](https://github.com/abbeycode) + [#7040](https://github.com/CocoaPods/CocoaPods/issues/7040) + +##### Bug Fixes + +* Create a generic Info.plist file for test targets + Use xcode default `PRODUCT_MODULE_NAME` for generated test targets + [Paul Zabelin](https://github.com/paulz) + [#7506](https://github.com/CocoaPods/CocoaPods/issues/7506) + +* Prevent `xcassets` compilation from stomping over the apps `xcassets` + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7003](https://github.com/CocoaPods/CocoaPods/issues/7003) + +* Fix script phase output path for `.xcasset` resources + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7511](https://github.com/CocoaPods/CocoaPods/issues/7511) + +* Fix `PRODUCT_MODULE_NAME` for generated test targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7507](https://github.com/CocoaPods/CocoaPods/issues/7507) + +* Ensure `SWIFT_VERSION` is set for test only pod targets during validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7498](https://github.com/CocoaPods/CocoaPods/issues/7498) + +* Fix iOS test native target signing settings + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7504](https://github.com/CocoaPods/CocoaPods/pull/7504) + +* Clear input/output paths if they exceed an arbitrary limit + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7362](https://github.com/CocoaPods/CocoaPods/issues/7362) + +* Warn instead of throwing an exception when a development pod specifies an invalid license file path + [Eric Amorde](https://github.com/amorde) + [#7377](https://github.com/CocoaPods/CocoaPods/issues/7377) + +* Better static frameworks transitive dependency error checking + [Paul Beusterien](https://github.com/paulb777) + [#7352](https://github.com/CocoaPods/CocoaPods/issues/7352) + +* Always update input/output paths even if they are empty + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7368](https://github.com/CocoaPods/CocoaPods/pull/7368) + +* Unique all available pre-release versions when displaying + [Samuel Giddins](https://github.com/segiddins) + [#7353](https://github.com/CocoaPods/CocoaPods/pull/7353) + +* Do not attempt compilation for pods with no sources and skipping import validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7336](https://github.com/CocoaPods/CocoaPods/issues/7336) + +* Avoid adding copy resources and frameworks script phases when those phases + would not copy anything. + [Keith Smiley](https://github.com/keith) + [Samuel Giddins](https://github.com/segiddins) + +* Speed up `pod install` times by up to 50% for very large project. + [Samuel Giddins](https://github.com/segiddins) + +* Avoid dependency resolution conflicts when a pod depends upon a local pod. + [Samuel Giddins](https://github.com/segiddins) + +* Fix legacy header search paths that broke due to #7116 and #7412. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7445](https://github.com/CocoaPods/CocoaPods/pull/7445) + +* Stop adding header search paths that do not contain any headers. + [Samuel Giddins](https://github.com/segiddins) + +* Do not warn when http source uses `file:///` URI scheme + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7460](https://github.com/CocoaPods/CocoaPods/issues/7460) + +* Remove bogus `PROVISIONING_PROFILE_SPECIFIER` value from Pods project. + [Ruenzuo](https://github.com/Ruenzuo) + [#6964](https://github.com/CocoaPods/CocoaPods/issues/6964) + +* Fix returning absolute paths from glob, fixes issue with static framework and public headers. + [Morgan McKenzie](https://github.com/rmtmckenzie) + [#7463](https://github.com/CocoaPods/CocoaPods/issues/7463) + +* Improve messages when integrating Swift pods as static libraries. + [Marcelo Fabri](https://github.com/marcelofabri) + [#7495](https://github.com/CocoaPods/CocoaPods/issues/7495) + +## 1.4.0 (2018-01-18) + +##### Enhancements + +* Show warning when Pod source uses unencrypted HTTP + [KrauseFx](https://github.com/KrauseFx) + [#7293](https://github.com/CocoaPods/CocoaPods/issues/7293) + +##### Bug Fixes + +* Do not include test spec resources and framework paths of dependent targets into test scripts + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7318](https://github.com/CocoaPods/CocoaPods/pull/7318) + +* Restore `development_pod_targets` public method in installer + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7292](https://github.com/CocoaPods/CocoaPods/pull/7292) + +* Fix resolution when multiple sources provide the same pods, and there are + (potential) dependencies between the sources. + [Samuel Giddins](https://github.com/segiddins) + [#7031](https://github.com/CocoaPods/CocoaPods/issues/7031) + +* Ensure that externally-sourced (e.g. local & git) pods are allowed to resolve + to prerelease versions. + [segiddins](https://github.com/segiddins) + +## 1.4.0.rc.1 (2017-12-16) + +##### Enhancements + +* Integrate `swift_version` DSL support into pod targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7134](https://github.com/CocoaPods/CocoaPods/issues/7134) + +* Add color indication to output of `pod outdated` + [iv-mexx](https://github.com/iv-mexx) + [#7204](https://github.com/CocoaPods/CocoaPods/pull/7204) + +* Set syntax of podspecs from development pods to Ruby when appropriate + [Eric Amorde](https://github.com/amorde) + [#7278](https://github.com/CocoaPods/CocoaPods/pull/7278) + +* Add support for editing the podspec, license, README, license, and docs of local development pods + [Eric Amorde](https://github.com/amorde) + [#7093](https://github.com/CocoaPods/CocoaPods/pull/7093) + +* Show warning when SDK provider tries to push a version with an unencrypted HTTP source + [KrauseFx](https://github.com/KrauseFx) + [#7250](https://github.com/CocoaPods/CocoaPods/pull/7250) + +##### Bug Fixes + +* Deduplicate output path file names for resources and frameworks + [Eric Amorde](https://github.com/amorde) + [#7259](https://github.com/CocoaPods/CocoaPods/issues/7259) + +* Allow installation of a pod with its own Swift version on multiple targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7261](https://github.com/CocoaPods/CocoaPods/pull/7261) + +* Quote framework names in OTHER_LDFLAGS + [Tyler Stromberg](https://github.com/AquaGeek) + [#7185](https://github.com/CocoaPods/CocoaPods/issues/7185) + +* Fix static framework archive regression from #7187 + [Paul Beusterien](https://github.com/paulb777) + [#7225](https://github.com/CocoaPods/CocoaPods/issues/7225) + +* Install resource bundles and embed frameworks for every test target's configuration + [Nickolay Tarbayev](https://github.com/tarbayev) + [#7012](https://github.com/CocoaPods/CocoaPods/issues/7012) + +* Set `SWIFT_VERSION` to test native targets during validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7216](https://github.com/CocoaPods/CocoaPods/pull/7216) + +* Add copied resources' paths to "Copy Pods Resources" output file list + [igor-makarov](https://github.com/igor-makarov) + [#6936](https://github.com/CocoaPods/CocoaPods/issues/6936) + +* Do not link system frameworks of test specs to library targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7205](https://github.com/CocoaPods/CocoaPods/pull/7205) + +* Be more lenient when stripping frameworks and dSYMs for non fat binaries + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7196](https://github.com/CocoaPods/CocoaPods/issues/7196) + [#5854](https://github.com/CocoaPods/CocoaPods/issues/5854) + +* Do not display script phases warnings multiple times per platform + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7193](https://github.com/CocoaPods/CocoaPods/pull/7193) + +* Fix unnecessary whole project recompilation with static frameworks + [Vladimir Gorbenko](https://github.com/volodg) + [#7187](https://github.com/CocoaPods/CocoaPods/issues/7187) + +* Prevent passing empty string to git when running `pod repo update --silent` + [Jon Sorrells](https://github.com/jonsorrells) + [#7176](https://github.com/CocoaPods/CocoaPods/issues/7176) + +* Do not propagate test spec frameworks and libraries into pod target xcconfig + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7172](https://github.com/CocoaPods/CocoaPods/issues/7172) + +* Set language to Swift for test native targets if any dependencies use Swift + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7170](https://github.com/CocoaPods/CocoaPods/issues/7170) + +* Prevent multiple script phases from stripping vendored dSYM + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7166](https://github.com/CocoaPods/CocoaPods/pull/7166) + +* Static library headers should all be `Project` in Xcode header build phase + [Paul Beusterien](https://github.com/paulb777) + [#4496](https://github.com/CocoaPods/CocoaPods/issues/4496) + +* Fix archiving apps with static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7158](https://github.com/CocoaPods/CocoaPods/issues/7158) + +## 1.4.0.beta.2 (2017-10-24) + +##### Enhancements + +* Integrate execution position for shell script phases + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7101](https://github.com/CocoaPods/CocoaPods/pull/7101) + +* Add support to integrate script phases from podspecs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7092](https://github.com/CocoaPods/CocoaPods/pull/7092) + +* Add support for preventing pch file generation with the skip_pch podspec attribute + [Paul Beusterien](https://github.com/paulb777) + [#7044](https://github.com/CocoaPods/CocoaPods/pull/7044) + +* Add app host support for test specs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6953](https://github.com/CocoaPods/CocoaPods/issues/6953) + +* Add support for resources in source static library frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7100](https://github.com/CocoaPods/CocoaPods/pull/7100) + +##### Bug Fixes + +* Copy .swiftmodule into static_frameworks to enable access to Swift static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#7140](https://github.com/CocoaPods/CocoaPods/issues/7140) + +* Fix docs for prefix header paths + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7149](https://github.com/CocoaPods/CocoaPods/pull/7149) + +* Fix integration `prefix_header_file` with test specs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7147](https://github.com/CocoaPods/CocoaPods/pull/7147) + +* Set the default Swift version to 3.2 during validation + [Victor Hugo Barros](https://github.com/heyzooi) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7136](https://github.com/CocoaPods/CocoaPods/pull/7136) + +* Better warning message for which Swift version was used during validation + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7121](https://github.com/CocoaPods/CocoaPods/issues/7121) + +* Fix static_framework Swift pod dependencies and implement pod access to dependent vendored_framework modules + [Paul Beusterien](https://github.com/paulb777) + [#7117](https://github.com/CocoaPods/CocoaPods/issues/7117) + +* Strip vendored dSYMs during embed script phase + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7111](https://github.com/CocoaPods/CocoaPods/issues/7111) + +* Warn when a pod that was added or changed includes script phases + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7110](https://github.com/CocoaPods/CocoaPods/pull/7110) + +* Build pod targets with script phases and integrate them properly + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7104](https://github.com/CocoaPods/CocoaPods/pull/7104) + +* Do not set a `CODE_SIGN_IDENTITY` for macOS app hosts or xctest bundles + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7103](https://github.com/CocoaPods/CocoaPods/pull/7103) + +* Fix framework and resources paths caching + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7068](https://github.com/CocoaPods/CocoaPods/pull/7068) + +* Build subspecs in static frameworks without error + [Paul Beusterien](https://github.com/paulb777) + [#7058](https://github.com/CocoaPods/CocoaPods/pull/7058) + +* Ensure `SYMROOT` is properly set for all user configurations + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7081](https://github.com/CocoaPods/CocoaPods/issues/7081) + +## 1.4.0.beta.1 (2017-09-24) + +##### Enhancements + +* Do not force include the master spec repo if plugins provide sources + [Eric Amorde](https://github.com/amorde) + [#7033](https://github.com/CocoaPods/CocoaPods/pull/7033) + +* Add custom shell script integration from Podfile + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6820](https://github.com/CocoaPods/CocoaPods/pull/6820) + +* Show full requirement trees when a version conflict is encountered during + dependency resolution. + [Samuel Giddins](https://github.com/segiddins) + +* Add support for source static library frameworks + [Paul Beusterien](https://github.com/paulb777) + [#6811](https://github.com/CocoaPods/CocoaPods/pull/6811) + +* Add Private Header support to static frameworks + [Paul Beusterien](https://github.com/paulb777) + [#6969](https://github.com/CocoaPods/CocoaPods/pull/6969) + +* For source static frameworks, include frameworks from dependent targets and libraries in OTHER_LDFLAGS + [Paul Beusterien](https://github.com/paulb777) + [#6988](https://github.com/CocoaPods/CocoaPods/pull/6988) + +##### Bug Fixes + +* Deduplicate test specs correctly from pod variants and targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7036](https://github.com/CocoaPods/CocoaPods/pull/7036) + +* Do not merge `pod_target_xcconfig` from test specs into non test xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7037](https://github.com/CocoaPods/CocoaPods/pull/7037) + +* Wrap `$PODS_CONFIGURATION_BUILD_DIR` and `$PODS_BUILD_DIR` with curlies + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7048](https://github.com/CocoaPods/CocoaPods/pull/7048) + +* Fix common paths sometimes calculating incorrectly + [amorde](https://github.com/amorde) + [#7028](https://github.com/CocoaPods/CocoaPods/pull/7028) + +* Do not code sign OSX targets for testing bundles + [Justin Martin](https://github.com/justinseanmartin) + [#7027](https://github.com/CocoaPods/CocoaPods/pull/7027) + +* Ensure a unique ID is generated for each resource bundle + [Justin Martin](https://github.com/justinseanmartin) + [#7015](https://github.com/CocoaPods/CocoaPods/pull/7015) + +* Do not include settings from file accessors of test specs into aggregate xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7019](https://github.com/CocoaPods/CocoaPods/pull/7019) + +* Use the resolver to identify which pod targets are test only + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [Justin Martin](https://github.com/justinseanmartin) + [#7014](https://github.com/CocoaPods/CocoaPods/pull/7014) + +* Perform code signing on xctest bundles in the Pods project generated by a test spec + [Justin Martin](https://github.com/justinseanmartin) + [#7013](https://github.com/CocoaPods/CocoaPods/pull/7013) + +* Exclude test resource and framework paths from aggregate targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#7000](https://github.com/CocoaPods/CocoaPods/pull/7000) + +* Wrap platform warning message with quotes + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6968](https://github.com/CocoaPods/CocoaPods/pull/6968) + +* Wire dependencies for pod targets not part of any aggregate target + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6948](https://github.com/CocoaPods/CocoaPods/pull/6948) + +* Fix validation warnings when using --swift-version + [Danielle Tomlinson](https://github.com/dantoml) + [#6971](https://github.com/CocoaPods/CocoaPods/pull/6971) + +* Fix xcconfig boolean merging when substrings include yes or no + [Paul Beusterien](https://github.com/paulb777) + [#6997](https://github.com/CocoaPods/CocoaPods/pull/6997) + +* Filter out subset dependent targets from FRAMEWORK_SEARCH_PATHS + [Paul Beusterien](https://github.com/paulb777) + [#7002](https://github.com/CocoaPods/CocoaPods/pull/7002) + +* Propagate HEADER_SEARCH_PATHS settings from search paths + [Paul Beusterien](https://github.com/paulb777) + [#7006](https://github.com/CocoaPods/CocoaPods/pull/7006) + +## 1.3.1 (2017-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not use `--delete` when copying resources to app target folder + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6927](https://github.com/CocoaPods/CocoaPods/issues/6927) + +## 1.3.0 (2017-08-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Ensure transitive dependencies are linked to test targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6917](https://github.com/CocoaPods/CocoaPods/pull/6917) + +* Properly install pod targets with test specs within subspecs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6915](https://github.com/CocoaPods/CocoaPods/pull/6915) + +* Add `--skip-tests` support `push` to push command + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6893](https://github.com/CocoaPods/CocoaPods/pull/6893) + +## 1.3.0.rc.1 (2017-07-27) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Cache result of resource and framework paths + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6893](https://github.com/CocoaPods/CocoaPods/pull/6893) + +* Ensure source urls are set when spec has subspecs with dependencies + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6888](https://github.com/CocoaPods/CocoaPods/pull/6888) + +## 1.3.0.beta.3 (2017-07-19) + +##### Enhancements + +* Protect rsync tmp files from being deleted if two targets sync at the same time + [Justin Martin](https://github.com/justinseanmartin) + [#6873](https://github.com/CocoaPods/CocoaPods/pull/6873) + +* Include test schemes within library schemes + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6765](https://github.com/CocoaPods/CocoaPods/issues/6765) + +* Truncate extra groups in Development Pods when they are parents of all files + [Eric Amorde](https://github.com/amorde) + [#6814](https://github.com/CocoaPods/CocoaPods/pull/6814) + +* Do not re-write generated files that have not changed + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [dingjingpisces2015](https://github.com/dingjingpisces2015) + [#6825](https://github.com/CocoaPods/CocoaPods/pull/6825) + +##### Bug Fixes + +* Set the test xcconfig file to resource bundles used only by tests + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6886](https://github.com/CocoaPods/CocoaPods/pull/6886) + +* Integrate test targets to embed frameworks and resources + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6828](https://github.com/CocoaPods/CocoaPods/pull/6828) + +* Ensure resource bundle and test dependencies are set for test native targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6829](https://github.com/CocoaPods/CocoaPods/pull/6829) + +* Provide a better error message when references are missing for non-source files + [David Airapetyan](https://github.com/davidair) + [#4887](https://github.com/CocoaPods/CocoaPods/issues/4887) + +* Select unique module_name(s) across host target's and embedded targets' pod targets + [Anand Biligiri](https://github.com/abiligiri) + [#6711](https://github.com/CocoaPods/CocoaPods/issues/6711) + +## 1.3.0.beta.2 (2017-06-22) + +##### Enhancements +* Add inputs and outputs for resources script phase + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6806](https://github.com/CocoaPods/CocoaPods/pull/6806) + +* Simplify logic around framework input and output paths + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6803](https://github.com/CocoaPods/CocoaPods/pull/6803) + +* Add inputs and outputs to check manifest lock and embed framework script phases + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6797](https://github.com/CocoaPods/CocoaPods/issues/6797) + +##### Bug Fixes + +* Remove 0.34 migration for a small boost in `pod install` time + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6783](hhttps://github.com/CocoaPods/CocoaPods/pull/6783) + +* Use a cache when figuring out if a pod target is test only + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6787](https://github.com/CocoaPods/CocoaPods/pull/6787) + +## 1.3.0.beta.1 (2017-06-06) + +##### Enhancements + +* Add validator support to run test specs during lint + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6753](https://github.com/CocoaPods/CocoaPods/pull/6753) + +* Fix to include proper runtime search paths for test native targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6727](https://github.com/CocoaPods/CocoaPods/pull/6727) + +* Aggregate targets should not include pod targets only used by tests + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6726](https://github.com/CocoaPods/CocoaPods/pull/6726) + +* Add support for test target creation in the pods project generator + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6703](https://github.com/CocoaPods/CocoaPods/pull/6703) + +* Copy dSYM for vendored frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#1698](https://github.com/CocoaPods/CocoaPods/issues/1698) + +* Prevents need for .swift-version file in Objective-C pods + [Austin Emmons](https://github.com/atreat) + [#6742](https://github.com/CocoaPods/CocoaPods/issues/6742) + +* Add a ipc command `podfile_json` converts a Podfile to JSON + [Dacaiguoguo](https://github.com/dacaiguoguogmail) + [#6779](https://github.com/CocoaPods/CocoaPods/pull/6779) + +##### Bug Fixes + +* Link `swiftSwiftOnoneSupport` for test xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6769](https://github.com/CocoaPods/CocoaPods/pull/6769) + +* Do not double add search paths to test xcconfig from parent + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6768](https://github.com/CocoaPods/CocoaPods/pull/6768) + +* Ensure product name for tests is not overridden by custom build settings + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6766](https://github.com/CocoaPods/CocoaPods/pull/6766) + +* Do not use the same product name for test targets + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6762](https://github.com/CocoaPods/CocoaPods/pull/6762) + +* Use unique temp folder during lint for parallel execution + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5117](https://github.com/CocoaPods/CocoaPods/issues/5117) + +* Stop adding `$(inherited)` for every static library linked + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6756](https://github.com/CocoaPods/CocoaPods/pull/6756) + +* Settings for dependent targets should include the parent target for test xcconfigs + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6755](https://github.com/CocoaPods/CocoaPods/pull/6755) + +* Only check for valid Swift version for pod targets that use Swift + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6733](https://github.com/CocoaPods/CocoaPods/pull/6733) + +* Fix pod install error from 1.2.1 when working with static lib-only projects. + [Ben Asher](https://github.com/benasher44) + [#6673](https://github.com/CocoaPods/CocoaPods/issues/6673) + +* Use `git!` when executing `push` command in order to raise informative and set exit code. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6700](https://github.com/CocoaPods/CocoaPods/pull/6700) + +* Make copy resources echoes always return true to work around issue where Xcode stops handling build script output greater than \~440 characters (rdar://30607704). + [postmechanical](https://github.com/postmechanical) + [#6595](https://github.com/CocoaPods/CocoaPods/issues/6595) + +* Inherit pod defined values for `SWIFT_ACTIVE_COMPILATION_CONDITIONS`. + [Louis D'hauwe](https://github.com/louisdh) + [#6629](https://github.com/CocoaPods/CocoaPods/pull/6629) + +* Delete extraneous files in rsync destination. + [jgavris](https://github.com/jgavris) + [#6694](https://github.com/CocoaPods/CocoaPods/pull/6694) + +## 1.2.1 (2017-04-11) + +##### Enhancements + +* None. + +##### Bug Fixes + +* No master specs cloning when not needed for `pod lib lint`. + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6154](https://github.com/CocoaPods/CocoaPods/issues/6154) + + +## 1.2.1.rc.1 (2017-04-05) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix generating `LD_RUNPATH_SEARCH_PATHS` without `use_frameworks!` but consuming a vendored dynamic artifact. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6596](https://github.com/CocoaPods/CocoaPods/issues/6596) + +* Fix building with static lib subprojects (previously only supported framework subprojects). + [Ben Asher](https://github.com/benasher44) + [#5830](https://github.com/CocoaPods/CocoaPods/issues/5830) + [#6306](https://github.com/CocoaPods/CocoaPods/issues/6306) + +* Fix regression from #6457 to ensure a correct error message is given when a spec is not found. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6457](https://github.com/CocoaPods/CocoaPods/issues/6457) + +* Provide a better error message if a podspec is found but cannot be parsed. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6457](https://github.com/CocoaPods/CocoaPods/issues/6457) + +* Only share pod target xcscheme if present during validation. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6558](https://github.com/CocoaPods/CocoaPods/pull/6558) + +* Properly compile storyboard for watch device family. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6516](https://github.com/CocoaPods/CocoaPods/issues/6516) + +* Support git progress for `pod repo update` and `pod install --repo-update` + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6525](https://github.com/CocoaPods/CocoaPods/issues/6525) + +* Return new exit code (31) when spec not found + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6033](https://github.com/CocoaPods/CocoaPods/issues/6033) + +* Provide better error message when spec not found + [Alfredo Delli Bovi](https://github.com/adellibovi) + [#6033](https://github.com/CocoaPods/CocoaPods/issues/6033) + + +## 1.2.1.beta.1 (2017-03-08) + +##### Enhancements + +* Use red text when pod installation fails + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6534](https://github.com/CocoaPods/CocoaPods/issues/6534) + +* Provide installation option to disable multiple pod sources warnings. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6497](https://github.com/CocoaPods/CocoaPods/pull/6497) + +* Use the colored2 gem instead of colored. + [Orta Therox](https://github.com/orta) + [xcodeproj#463](https://github.com/CocoaPods/Xcodeproj/pull/463) + +* Cache results of dynamic_binary? + [Ken Wigginton](https://github.com/hailstorm350) + [#6434](https://github.com/CocoaPods/CocoaPods/pull/6434) + +* Created `NOMENCLATURE.md` to keep a glossary of the most common terms used in cocoapods. + [Rob Contreras](https://github.com/robcontreras) + [#2379](https://github.com/CocoaPods/CocoaPods/pull/2379) + +##### Bug Fixes + +* Ensure Core Data models get added to the compile sources phase for header generation. + [Ben Asher](https://github.com/benasher44) + [#6259](https://github.com/CocoaPods/CocoaPods/issues/6259) + +* Do not crash when attempting to install pod with no supported targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6465](https://github.com/CocoaPods/CocoaPods/issues/6465) + +* Correctly handle `OTHER_LDFLAGS` for targets with inherit search paths and source pods. + [Justin Martin](https://github.com/justinseanmartin) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6481](https://github.com/CocoaPods/CocoaPods/pull/6481) + +* Uses `${PODS_PODFILE_DIR_PATH}` for generated manifest lock script phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5499](https://github.com/CocoaPods/CocoaPods/issues/5499) + +* Do not generate `UIRequiredDeviceCapabilities` for `tvOS` Info.plists. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6193](https://github.com/CocoaPods/CocoaPods/issues/6193) + +* Fix integration with vendored static frameworks and libraries. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6477](https://github.com/CocoaPods/CocoaPods/pull/6477) + +* Use `${SRCROOT}` rather than `${PODS_ROOT}` in the generated manifest lock script phase. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5499](https://github.com/CocoaPods/CocoaPods/issues/5499) + +* Fix build phase resource references to point at PBXVariantGroups where relevant. + [Wes Campaigne](https://github.com/Westacular) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6373](https://github.com/CocoaPods/CocoaPods/issues/6373) + +* Correctly set runtime search paths for OSX unit test bundles when using frameworks. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6435](https://github.com/CocoaPods/CocoaPods/pull/6435) + +* Add `--skip-import-validation` to skip linking a pod during lint. + [Samuel Giddins](https://github.com/segiddins) + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5670](https://github.com/CocoaPods/CocoaPods/issues/5670) + +* Updated the colored2 gem (previous version removed from rubygems.org). + [Ben Asher](https://github.com/benasher44) + [#6533](https://github.com/CocoaPods/CocoaPods/pull/6533) + +## 1.2.0 (2017-01-28) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Do not link static frameworks to targets that use `inherit! search_paths`. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6065](https://github.com/CocoaPods/CocoaPods/issues/6065) + + +## 1.2.0.rc.1 (2017-01-13) + +##### Enhancements + +* Show git progress when downloading the CocoaPods Specs repo. + [Danielle Tomlinson](https://github.com/dantoml) + [#5937](https://github.com/CocoaPods/CocoaPods/issues/5937) + +* Move Installer target verification into the Xcode namespace + [Danielle Tomlinson](https://github.com/DanToml) + [#5607](https://github.com/CocoaPods/CocoaPods/pull/5607) + +##### Bug Fixes + +* None. + + +## 1.2.0.beta.3 (2016-12-28) + +##### Enhancements + +* `pod repo push` now accepts the `--swift-version` argument. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6217](https://github.com/CocoaPods/CocoaPods/issues/6217) + +* Output Swift targets when multiple versions of Swift are detected. + [Justin Martin](https://github.com/justinseanmartin) & [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6191](https://github.com/CocoaPods/CocoaPods/issues/6191) + +* [update] adding --sources to specify to only update pods from a repo + [Mark Schall](https://github.com/maschall) + [#5809](https://github.com/CocoaPods/CocoaPods/pull/5809) + +* Add aggregated search paths targets to vendored build settings + [Chris Ortman](https://github.com/chrisortman) + [Johannes Plunien](https://github.com/plu) + [#5512](https://github.com/CocoaPods/CocoaPods/issues/5512) + +* Use fetch and reset rather than a pull when updating specs repos. + [Danielle Tomlinson](https://github.com/dantoml) + [#6206](https://github.com/CocoaPods/CocoaPods/pull/6206) + +##### Bug Fixes + +* Fix default LD_RUNPATH_SEARCH_PATHS for host targets. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6006](https://github.com/CocoaPods/CocoaPods/issues/6006) + +* Fix codesigning issues when targets have spaces. + [Sam Gammon](https://github.com/sgammon) + [#6153](https://github.com/CocoaPods/CocoaPods/issues/6153) + +* Raise an exception if unable to find a reference for a path and handle symlink references. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5427](https://github.com/CocoaPods/CocoaPods/issues/5427) + +* Re-escaped backslashes in embed_frameworks generator + [Harlan Haskins](https://github.com/harlanhaskins) + [#6121](https://github.com/CocoaPods/CocoaPods/issues/6121) + +* Escape spaces in CONFIGURATION_BUILD_DIR when creating header folders symlink + [Dmitry Obukhov](https://github.com/stel) + [#6146](https://github.com/CocoaPods/CocoaPods/pull/6146) + +* Fail gracefully when downloading a podspec in `pod spec lint` fails. + [Samuel Giddins](https://github.com/segiddins) + +* Remove the `const_missing` hack for `Pod::SourcesManager`. + [Samuel Giddins](https://github.com/segiddins) + +* Fixed code signing issue causing lint failure on macOS. + [Paul Cantrell](https://github.com/pcantrell) + [#5645](https://github.com/CocoaPods/CocoaPods/issues/5645) + +* Raise an exception when using a git version prior to 1.8.5. + [Danielle Tomlinson](https://github.com/dantoml) + [#6078](https://github.com/CocoaPods/CocoaPods/issues/6078) + +* Fix framework support for frameworks in sub-projects. + [Ben Asher](https://github.com/benasher44) + [#6123](https://github.com/CocoaPods/CocoaPods/issues/6123) + +* Remove errors that prevent host/extension target mismatches, which Xcode will warn about. + [Ben Asher](https://github.com/benasher44) + [#6173](https://github.com/CocoaPods/CocoaPods/issues/6173) + + +## 1.2.0.beta.1 (2016-10-28) + +##### Enhancements + +* Generate `PODS_TARGET_SRCROOT` build setting for each pod target. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5375](https://github.com/CocoaPods/CocoaPods/issues/5375) + +* Add support for running CocoaPods on Linux. + [Samuel Giddins](https://github.com/segiddins) + +* Use native Ruby ASCII plist parsing and serialization, removing dependencies + on FFI, Xcode, and macOS. + [Samuel Giddins](https://github.com/segiddins) + +* Run codesigning in parallel in the embed frameworks build phase when + `COCOAPODS_PARALLEL_CODE_SIGN` is set to `true`. + [Ben Asher](https://github.com/benasher44) + [#6088](https://github.com/CocoaPods/CocoaPods/pull/6088) + +##### Bug Fixes + +* Add target-device tvOS in copy_resources generator. + [Konrad Feiler](https://github.com/Bersaelor) + [#6052](https://github.com/CocoaPods/CocoaPods/issues/6052) + +* Read the correct `SWIFT_VERSION` when generating target XCConfigs. + [Ben Asher](https://github.com/benasher44) + [#6067](https://github.com/CocoaPods/CocoaPods/issues/6067) + +* Don't explicitly set `ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES` to NO. + [Ben Asher](https://github.com/benasher44) + [#6064](https://github.com/CocoaPods/CocoaPods/issues/6064) + +* Redefine FOUNDATION_EXPORT for C-only pods in umbrella header. + [Chris Ballinger](https://github.com/chrisballinger) + [#6024](https://github.com/CocoaPods/CocoaPods/issues/6024) + + +## 1.1.1 (2016-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Strip newlines from .swift-version files. + [Danielle Tomlinson](https://github.com/dantoml) + [#6059](https://github.com/CocoaPods/CocoaPods/pull/6059) + + +## 1.1.0 (2016-10-19) + +##### Enhancements + +* Use host target for frameworks of XPC services. + [Ingmar Stein](https://github.com/IngmarStein) + [#6029](https://github.com/CocoaPods/CocoaPods/pull/6029) + +* Use Swift 3.0 by default during validation. + [Danielle Tomlinson](https://github.com/dantoml) + [#6042](https://github.com/CocoaPods/CocoaPods/pull/6042) + +* Exit with non-zero exit status if pod repo update fails + [Uku Loskit](https://github.com/UkuLoskit) + [#6037](https://github.com/CocoaPods/CocoaPods/issues/6037) + +* The validator has an API for accessing which version of Swift was used. + [Orta Therox](https://github.com/orta) + [#6049](https://github.com/CocoaPods/CocoaPods/pull/6049) + +##### Bug Fixes + +* None. + +* Redefine FOUNDATION_EXPORT for C-only pods in umbrella header. + [Chris Ballinger](https://github.com/chrisballinger) + [#6024](https://github.com/CocoaPods/CocoaPods/issues/6024) + +## 1.1.0.rc.3 (2016-10-11) + +##### Enhancements + +* Cache result of inhibit_warnings and include_in_build_config to speed up pod install. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5934](https://github.com/CocoaPods/CocoaPods/pull/5934) + +* Tell users about the .swift-version file on validation failures. + [Danielle Tomlinson](https://github.com/dantoml) + [#5951](https://github.com/CocoaPods/CocoaPods/pull/5951) + +* Improve performance of PathList.read_file_system + [Heath Borders](https://github.com/hborders) + [#5890](https://github.com/CocoaPods/CocoaPods/issues/5890) + +* Cache result of uses_swift and should_build to speed up pod install. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5837](https://github.com/CocoaPods/CocoaPods/pull/5837) + +* Remove uses of `cd` in generated scripts + [Ben Asher](https://github.com/benasher44) + [#5959](https://github.com/CocoaPods/CocoaPods/pull/5959) + +* Error with helpful message when integrating a pod into targets that have mismatched Swift versions. + [Ben Asher](https://github.com/benasher44) + [#5984](https://github.com/CocoaPods/CocoaPods/pull/5984) + +* Allow users to share pods between Objective-C and Swift targets. + [Danielle Tomlinson](https://github.com/dantoml) + [#5984](https://github.com/CocoaPods/CocoaPods/pull/5984) + +* Allow setting the linting Swift version via `--swift-version=VERSION` + [Danielle Tomlinson](https://github.com/dantoml) + [#5989](https://github.com/CocoaPods/CocoaPods/pull/5989) + +* Greenify pod install success message + [Stephen Hayes](https://github.com/schayes04) + [#5713](https://github.com/CocoaPods/CocoaPods/issues/5713) + +* Update EMBEDDED_CONTENT_CONTAINS_SWIFT flag behaviour based on xcode version. + [codymoorhouse](https://github.com/codymoorhouse) + [#5732](https://github.com/CocoaPods/CocoaPods/issues/5732) + +##### Bug Fixes + +* Remove special handling for messages apps + [Ben Asher](https://github.com/benasher44) + [#5860](https://github.com/CocoaPods/CocoaPods/issues/5860) + +* Ensure messages apps have an embed frameworks build phase + [Ben Asher](https://github.com/benasher44) + [#5860](https://github.com/CocoaPods/CocoaPods/issues/5860) + +* Fix linting of private pods when using libraries. + [Stefan PÃŧhringer](https://github.com/b-ray) + [#5891](https://github.com/CocoaPods/CocoaPods/issues/5891) + + +## 1.1.0.rc.2 (2016-09-13) + +##### Enhancements + +* Use the SWIFT_VERSION when linting pods. To lint with Swift 3.0 + add a Swift Version file. `echo "3.0" >> .swift-version`. + [Danielle Tomlinson](https://github.com/dantoml) + [#5841](https://github.com/CocoaPods/CocoaPods/pull/5841) + +##### Bug Fixes + +* Correctly pass Pod:VERSION in `pod lib create`. + [Danielle Tomlinson](https://github.com/dantoml) + [#5840](https://github.com/CocoaPods/CocoaPods/issues/5840) + + +## 1.1.0.rc.1 (2016-09-10) + +##### Enhancements + +* + +##### Bug Fixes + +* Wrap generated import headers with __OBJC__ to fix C only pods. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#5291](https://github.com/CocoaPods/CocoaPods/issues/5291) + +* Prevent crash when generating acknowledgements when license type is not specified. + [Marcelo Fabri](https://github.com/marcelofabri) + [#5826](https://github.com/CocoaPods/CocoaPods/issues/5826) + +* Pass full path to App.xcworkspace for spec validation, and use `git -C` for `pod repo push` git ops. + [Ben Asher](https://github.com/benasher44) + [#5805](https://github.com/CocoaPods/CocoaPods/issues/5805) + + +## 1.1.0.beta.2 (2016-09-03) + +##### Enhancements + +* Remove references to the pre-1.0 Migrator. + [Danielle Tomlinson](https://github.com/dantoml) + [#5635](https://github.com/CocoaPods/CocoaPods/pull/5635) + +* Improve performance of dependency resolution. + [yanzhiwei147](https://github.com/yanzhiwei147) + [#5510](https://github.com/CocoaPods/CocoaPods/pull/5510) + +* Add support for building Messages applications. + [Ben Asher](https://github.com/benasher44) + [#5726](https://github.com/CocoaPods/CocoaPods/pull/5726) + +* Improved messaging when missing host targets for embedded targets. + Improved support for framework-only projects. + [Ben Asher](https://github.com/benasher44) + [#5733](https://github.com/CocoaPods/CocoaPods/pull/5733) + +* Set ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES when appropriate. + [Ben Asher](https://github.com/benasher44) + [#5732](https://github.com/CocoaPods/CocoaPods/pull/5732) + +* Verify that embedded target platform and swift version matches the host. + [Ben Asher](https://github.com/benasher44) + [#5747](https://github.com/CocoaPods/CocoaPods/pull/5747) + +* Pass the version of CocoaPods to `pod lib create`'s configure script. + [orta](https://github.com/orta) + [#5787](https://github.com/CocoaPods/CocoaPods/pull/5787) + +* Improve host target detection for embedded targets + in sub-projects. + [Ben Asher](https://github.com/benasher44) + [#5622](https://github.com/CocoaPods/CocoaPods/issues/5622) + +##### Bug Fixes + +* Hash scope suffixes if they are over 50 characters to prevent file paths from being too long. + [Danielle Tomlinson](https://github.com/dantoml) + [#5491](https://github.com/CocoaPods/CocoaPods/issues/5491) + +* Fix codesigning identity on watchOS and tvOS targets. + [Danielle Tomlinson](https://github.com/dantoml) + [#5686](https://github.com/CocoaPods/CocoaPods/issues/5686) + +* Fix SWIFT_VERSION not being read when only defined at the project level. + [Ben Asher](https://github.com/benasher44) + [#5700](https://github.com/CocoaPods/CocoaPods/issues/5700) and [#5737](https://github.com/CocoaPods/CocoaPods/issues/5737) + +* Fix analyzer checking the compatibility of an embedded target with a host that has not been added the Podfile. + [Ben Asher](https://github.com/benasher44) + [#5783](https://github.com/CocoaPods/CocoaPods/issues/5783) + +## 1.1.0.beta.1 (2016-07-11) + +##### Enhancements + +* Move Pods Project generation to an `Xcode` Namespace. + [Daniel Tomlinson](https://github.com/dantoml) + [#5480](https://github.com/CocoaPods/CocoaPods/pull/5480) + +* Add the ability to inhibit swift warnings. + [Peter Ryszkiewicz](https://github.com/pRizz) + [#5414](https://github.com/CocoaPods/CocoaPods/pull/5414) + +* Use `git ls-remote` to skip full clones for branch dependencies. + [Juan Civile](https://github.com/champo) + [#5376](https://github.com/CocoaPods/CocoaPods/issues/5376) + +* [repo/push] --use-json to convert podspecs to JSON format when pushing. + [Mark Schall](https://github.com/maschall) + [#5568](https://github.com/CocoaPods/CocoaPods/pull/5568) + +* Set 'Allow app extension API only' for Messages extensions. + [Boris BÃŧgling](https://github.com/neonichu) + [#5558](https://github.com/CocoaPods/CocoaPods/issues/5558) + +* Accept `pod repo push` with URL instead of only repo name. + [Mark Schall](https://github.com/maschall) + [#5572](https://github.com/CocoaPods/CocoaPods/pull/5572) + +* [Installer] Set the SWIFT_VERSION for CocoaPods generated targets. + [Danielle Tomlinson](https://github.com/DanToml) + [#5540](https://github.com/CocoaPods/CocoaPods/pull/5540) + +* Print message when skipping user project integration. + [Danielle Tomlinson](https://github.com/dantoml) + [#5517](https://github.com/CocoaPods/CocoaPods/issues/5517) + +* Show GitHub Issues that could be related to exceptions. + [Orta Therox](https://github.com/orta) + [#4817](https://github.com/CocoaPods/CocoaPods/issues/4817) + +* Improve handling of app extensions, watch os 1 extensions + and framework targets. + [Ben Asher](https://github.com/benasher44) + [#4203](https://github.com/CocoaPods/CocoaPods/issues/4203) + +* Add a license type to generated acknowledgements file in plist. + [Naoto Kaneko](https://github.com/naoty) + [#5436](https://github.com/CocoaPods/CocoaPods/pull/5436) + +##### Bug Fixes + +* Fix local pod platform conflict error message. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#5052](https://github.com/CocoaPods/CocoaPods/issues/5052) + +* Avoid use of `activesupport` version 5 to stay compatible with macOS system + Ruby. + [Boris BÃŧgling](https://github.com/neonichu) + [#5602](https://github.com/CocoaPods/CocoaPods/issues/5602) + +* Fix installing pods with `use_frameworks` when deduplication is disabled. + [Samuel Giddins](https://github.com/segiddins) + [#5481](https://github.com/CocoaPods/CocoaPods/issues/5481) + +* Running `pod setup --silent` will now properly silence git output while + updating the repository. + [Samuel Giddins](https://github.com/segiddins) + +* Fix linting pods that depend upon `XCTest`. + [Samuel Giddins](https://github.com/segiddins) + [#5321](https://github.com/CocoaPods/CocoaPods/issues/5321) + +* Use `require` instead of `autoload` to solve an issue with loading + `fourflusher`. + [Boris BÃŧgling](https://github.com/neonichu) + [#5445](https://github.com/CocoaPods/CocoaPods/issues/5445) + +* Resolve cyclic dependencies when creating pod targets. + [Juan Civile](https://github.com/champo) + [#5362](https://github.com/CocoaPods/CocoaPods/issues/5362) + +* Fix embedding frameworks in UI Testing bundles. + [Daniel Tomlinson](https://github.com/dantoml) + [#5250](https://github.com/CocoaPods/CocoaPods/issues/5250) + +* Ensure attempting to print a path in the error report doesn't itself error. + [Samuel Giddins](https://github.com/) + [#5541](https://github.com/CocoaPods/CocoaPods/issues/5541) + +* Fix linting with Xcode 8. + [Boris BÃŧgling](https://github.com/neonichu) + [#5529](https://github.com/CocoaPods/CocoaPods/issues/5529) + +* Fix linting with Xcode 8 by disabling it entirely. + [Boris BÃŧgling](https://github.com/neonichu) + [#5528](https://github.com/CocoaPods/CocoaPods/issues/5528) + +* Error during install when there are duplicate library names. + [Daniel Tomlinson](https://github.com/dantoml) + [#4014](https://github.com/CocoaPods/CocoaPods/issues/4014) + +* Make the `Check Pods Manifest.lock` script write errors to STDERR and improve + POSIX shell compatibility. + [Simon Warta](https://github.com/webmaster128) + [#5595](https://github.com/CocoaPods/CocoaPods/pull/5595) + + +## 1.0.1 (2016-06-02) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Symlink the header folders in the framework bundle's root directory + by a new shell script build phase if `header_mappings_dir` is used + with frameworks. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5313](https://github.com/CocoaPods/CocoaPods/issues/5313) + +* Removed emojis in Build Phases names — as it seems that some third party tools have trouble with them. + [Olivier Halligon](https://github.com/AliSoftware) + [#5382](https://github.com/CocoaPods/CocoaPods/pull/5382) + +* Ensure `Set` is defined before using it. + [Samuel Giddins](https://github.com/segiddins) + [#5287](https://github.com/CocoaPods/CocoaPods/issues/5287) + +* Add --target-device to ibtool invocation for XIBs + [Juan Civile](https://github.com/champo) + [#5282](https://github.com/CocoaPods/CocoaPods/issues/5282) + +* Fix error when executables cannot be found. + [Jan Berkel](https://github.com/jberkel) + [#5319](https://github.com/CocoaPods/CocoaPods/pull/5319) + +* Avoid removing all files when root directory contains unicode characters. + [Marc Boquet](https://github.com/marcboquet) + [#5294](https://github.com/CocoaPods/CocoaPods/issues/5294) + +* Guarding from crash if pod lib create has a + character in the name. + [William Entriken](https://github.com/fulldecent) + [CocoaPods/pod-template#69](https://github.com/CocoaPods/pod-template/issues/69) + +* Use target product types to determine whether a target is a test target when + running `pod init`. + [Samuel Giddins](https://github.com/segiddins) + [#5378](https://github.com/CocoaPods/CocoaPods/issues/5378) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* Validate that resource bundles declared in the podspec contain resources. + [Samuel Giddins](https://github.com/segiddins) + [#5218](https://github.com/CocoaPods/CocoaPods/issues/5218) + +* Improvements to the error messaging around missing dependencies. + [Orta Therox](https://github.com/orta) + [#5260](https://github.com/CocoaPods/CocoaPods/issues/5260) + +* Make sharing schemes for development pods an installation option + (`share_schemes_for_development_pods`) and disable sharing schemes + by default. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Fix search paths inheritance when there are transitive dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#5264](https://github.com/CocoaPods/CocoaPods/issues/5264) + + +## 1.0.0.rc.2 (2016-05-04) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Handle when an abstract target has no declared platform without crashing. + [Samuel Giddins](https://github.com/segiddins) + [#5236](https://github.com/CocoaPods/CocoaPods/issues/5236) + +* Don't recurse into child directories to find podspecs when running + `pod spec lint`. + [Samuel Giddins](https://github.com/segiddins) + [#5244](https://github.com/CocoaPods/CocoaPods/issues/5244) + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* The `pod init` command now uses target inheritance for test targets + in the generated Podfile. + [Orta Therox](https://github.com/orta) + [#4714](https://github.com/CocoaPods/CocoaPods/issues/4714) + +* Support customized build directories by letting user xcconfig definitions + rely on the new overridable alias build variable `PODS_BUILD_DIR`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5217](https://github.com/CocoaPods/CocoaPods/issues/5217) + +##### Bug Fixes + +* Fix for `pod repo push --help` throwing an error. + [Boris BÃŧgling](https://github.com/neonichu) + [#5214](https://github.com/CocoaPods/CocoaPods/pull/5214) + +* The warning for not having utf-8 set as the default encoding for a + terminal now properly respects the `--no-ansi` argument. + [Joshua Kalpin](https://github.com/Kapin) + [#5199](https://github.com/CocoaPods/CocoaPods/pull/5199) + + +## 1.0.0.beta.8 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Headers from vendored frameworks no longer end up in the `HEADER_SEARCH_PATH` + when using frameworks. They are now assumed to be already present as modular + headers in the framework itself. + [Mark Spanbroek](https://github.com/markspanbroek) + [#5146](https://github.com/CocoaPods/CocoaPods/pull/5146) + +* Access to the `Pod::SourcesManager` constant has been restored, though its use + is considered deprecated and subject to removal at any time. Migrate to use + `Pod::Config.instance.sources_manager` in some manner as soon as possible. + [Samuel Giddins](https://github.com/segiddins) + +* Running `pod repo update --silent` will now properly silence git output while + updating the repository. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.7 (2016-04-15) + +##### Enhancements + +* When an unknown build configuration is mentioned in the Podfile, CocoaPods + will suggest the build configurations found in the user project. + [Samuel Giddins](https://github.com/segiddins) + [#5113](https://github.com/CocoaPods/CocoaPods/issues/5113) + +* Improved the error message when a matching spec cannot be found, + mentioning that now `pod repo update` is not implicit when running `pod + install`. + [Orta Therox](https://github.com/orta) + [#5135](https://github.com/CocoaPods/CocoaPods/issues/5135) + +* Add support for sharded specs directories. + [Samuel Giddins](https://github.com/segiddins) + [#5002](https://github.com/CocoaPods/CocoaPods/issues/5002) + +* Pass the build setting `OTHER_CODE_SIGN_FLAGS` to codesign for the generated + embed frameworks build phase's script, as Xcode does when signing natively. + [VÃĄclav Slavík](https://github.com/vslavik) + [#5087](https://github.com/CocoaPods/CocoaPods/pull/5087) + +##### Bug Fixes + +* Sort files from `Dir.glob` explicitly to produce same result on case sensitive + file system as result on case insensitive file system. + [Soutaro Matsumoto](https://github.com/soutaro) + +* Fix build path for resource bundles. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5034](https://github.com/CocoaPods/CocoaPods/issues/5034) + +* Rely on `TARGET_BUILD_DIR` instead of `CONFIGURATION_BUILD_DIR` in the + generated embed resources build phase's script, so that UI test targets can + be run. + [seaders](https://github.com/seaders) + [#5133](https://github.com/CocoaPods/CocoaPods/issues/5133) + +* Ensure that a `CFBundleVersion` is set for resource bundles' Info.plist + files. + [Samuel Giddins](https://github.com/segiddins) + [#4897](https://github.com/CocoaPods/CocoaPods/issues/4897) + + +## 1.0.0.beta.6 (2016-03-15) + +##### Breaking + +* Running `pod install` doesn't imply an automatic spec repo update. + The old behavior can be achieved by passing in the option `--repo-update` + or running `pod repo update`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5004](https://github.com/CocoaPods/CocoaPods/issues/5004) + +* Remove the configuration variable `skip_repo_update` as the default behavior + varies now between `pod install` and `pod (update|outdated)`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5017](https://github.com/CocoaPods/CocoaPods/issues/5017) + +##### Enhancements + +* The master specs repo will no longer perform 'no-op' git fetches. This should + help to reduce the load on GitHub's servers. + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [#5005](https://github.com/CocoaPods/CocoaPods/issues/5005) + [#4989](https://github.com/CocoaPods/CocoaPods/issues/4989) + +* The specs repos will no longer support shallow clones to reduce CPU load + on git servers. Pre-existing shallow clones of the `master` repo will + automatically be upgraded to deep clones when the repo is updated. + [Samuel Giddins](https://github.com/segiddins) + [#5016](https://github.com/CocoaPods/CocoaPods/issues/5016) + +* The validator will check that all `public_header_files` and + `private_header_files` are also present in `source_files`. + [Samuel Giddins](https://github.com/segiddins) + [#4936](https://github.com/CocoaPods/CocoaPods/issues/4936) + +##### Bug Fixes + +* The master specs repository can no longer be added via `pod repo add`, but + instead must be done via `pod setup`. + [Samuel Giddins](https://github.com/segiddins) + +* Print a friendly error message when the platform for a target cannot be + inferred. + [Samuel Giddins](https://github.com/segiddins) + [#4790](https://github.com/CocoaPods/CocoaPods/issues/4790) + +* Rely on `TARGET_BUILD_DIR` instead of `CONFIGURATION_BUILD_DIR` in the + generated embed frameworks build phase's script, so that UI test targets can + be run. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5022](https://github.com/CocoaPods/CocoaPods/issues/5022) + +* Fix build paths for resources bundles. + [Marius Rackwitz](https://github.com/mrackwitz) + [#5028](https://github.com/CocoaPods/CocoaPods/pull/5028) + +* Validate that a Podfile does not declare the same target twice. + [Samuel Giddins](https://github.com/segiddins) + [#5029](https://github.com/CocoaPods/CocoaPods/issues/5029) + + +## 1.0.0.beta.5 (2016-03-08) + +##### Breaking + +* Development pods will no longer be implicitly unlocked. This makes CocoaPods respect + constraints related to dependencies of development pods in the lockfile. + + If you change the constraints of a dependency of your development pod and want to + override the locked version, you will have to use + `pod update ${DEPENDENCY_NAME}` manually. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#4211](https://github.com/CocoaPods/CocoaPods/issues/4211) + [#4577](https://github.com/CocoaPods/CocoaPods/issues/4577) + [#4580](https://github.com/CocoaPods/CocoaPods/issues/4580) + +##### Enhancements + +* Add the :package: emoji in front of CocoaPods Script Build Phases + to quickly and visually differentiate them from other phases. + [Olivier Halligon](https://github.com/AliSoftware) + [#4985](https://github.com/CocoaPods/CocoaPods/issues/4985) + +* Enable syntax highlighting on the Podfile in the generated + `Pods.xcodeproj`. + [Samuel Giddins](https://github.com/segiddins) + [#4962](https://github.com/CocoaPods/CocoaPods/issues/4962) + +##### Bug Fixes + +* Fixes paths passed for resources bundles in the copy resources script. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4954](https://github.com/CocoaPods/CocoaPods/pull/4954) + +* Fix saying the `master` specs repo exists when it has not been set up. + [Samuel Giddins](https://github.com/segiddins) + [#4955](https://github.com/CocoaPods/CocoaPods/issues/4955) + +* Move `${TARGET_DEVICE_ARGS}` out of the quotations for `--sdk` in the + `Copy Pods Resources` build phase. + [seaders](https://github.com/seaders) [#4940](https://github.com/CocoaPods/CocoaPods/issues/4940) + +* Handle when `$PATH` isn't set. + [Samuel Giddins](https://github.com/segiddins) + +* Module maps that are set per-platform will be installed for the correct + platform. + [Samuel Giddins](https://github.com/segiddins) + [#4968](https://github.com/CocoaPods/CocoaPods/issues/4968) + + +## 1.0.0.beta.4 (2016-02-24) + +##### Enhancements + +* Allow deduplication to take effect even when the same pod is used with + different sets of subspecs across different platforms. + This changes the general naming scheme scoped pod targets. They are + suffixed now on base of what makes them different among others for the + same root spec instead of being prefixed by the dependent target. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4146](https://github.com/CocoaPods/CocoaPods/pull/4146) + +* Pass `COCOAPODS_VERSION` as environment variable when invoking the + `prepare_command`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4933](https://github.com/CocoaPods/CocoaPods/pull/4933) + +##### Bug Fixes + +* Pods are built by default in another scoping level of the build products + directory identified by their name to prevent name clashes among + dependencies. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4146](https://github.com/CocoaPods/CocoaPods/pull/4146) + +* Fix mixed integrations where static libraries are used along frameworks + from different target definitions in one Podfile. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4146](https://github.com/CocoaPods/CocoaPods/pull/4146) + +* Pass target device arguments to `ibtool` in the copy resources script, fixing + compilation of storyboards when targeting versions of iOS prior to iOS 8. + [seaders](https://github.com/seaders) + [#4913](https://github.com/CocoaPods/CocoaPods/issues/4913) + +* Fix `pod repo lint` when passed a path argument. + [Boris BÃŧgling](https://github.com/neonichu) + [#4883](https://github.com/CocoaPods/CocoaPods/issues/4883) + + +## 1.0.0.beta.3 (2016-02-03) + +##### Breaking + +* Rename the `xcodeproj` Podfile directive to `project`. + [Marius Rackwitz](https://github.com/mrackwitz) + [Core#298](https://github.com/CocoaPods/Core/issues/298) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't try to embed project headers into frameworks. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4819](https://github.com/CocoaPods/CocoaPods/issues/4819) + +* Fix a crash in the analyzer when target deduplication is deactivated. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4751](https://github.com/CocoaPods/CocoaPods/issues/4751) + +* Handle CoreData mapping models with recursive resource globs. + [Eric Firestone](https://github.com/efirestone) + [#4809](https://github.com/CocoaPods/CocoaPods/pull/4809) + +* Generate valid xcconfig when target name includes spaces. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#4783](https://github.com/CocoaPods/CocoaPods/issues/4783) + +* Properly add resource files to resources build phase. + [Eric Firestone](https://github.com/efirestone) + [#4762](https://github.com/CocoaPods/CocoaPods/issues/4762) + +* Fix suggestion of sudo when it actually isn't needed. + [Marcel Jackwerth](https://github.com/sirlantis) + +* Set the `TARGET_DEVICE_FAMILY` to support both iPhone and iPad for iOS + resource bundle targets. + [Andy Rifken](https://github.com/arifken) + +* Share user schemes of `Pods.xcodeproj` after generating deterministic UUIDS. + [Samuel Giddins](https://github.com/segiddins) + +* Only attempt to `import` a framework during linting if the pod has source + files, and is thus being built by CocoaPods. + [Samuel Giddins](https://github.com/segiddins) + [#4823](https://github.com/CocoaPods/CocoaPods/issues/4823) + +* Determine whether an external source needs to be fetched when updating a + dependency regardless of subspec names. + [Samuel Giddins](https://github.com/segiddins) + [#4821](https://github.com/CocoaPods/CocoaPods/issues/4821) + + +## 1.0.0.beta.2 (2016-01-05) + +##### Enhancements + +* Present a friendly error suggesting running `pod install` when there are + missing local podspecs when running `pod outdated`. + [Samuel Giddins](https://github.com/segiddins) + [#4716](https://github.com/CocoaPods/CocoaPods/issues/4716) + +* Don't warn about setting base config when identical to current config. + [Jed Lewison](https://github.com/jedlewison) + [#4722](https://github.com/CocoaPods/CocoaPods/issues/4722) + +* Add `user_targets` method to the `UmbrellaTargetDescription` in the + post-install hooks context. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Always fetch a `:podspec` dependency's podspec when it is missing in the + `Pods` directory. + [Samuel Giddins](https://github.com/segiddins) + [#4717](https://github.com/CocoaPods/CocoaPods/issues/4717) + +* The `Info.plist` file will now be generated properly for resource bundles, + setting the proper `CFBundlePackageType` and omitting the `CFBundleExecutable` + key. + [Samuel Giddins](https://github.com/segiddins) + [Xcodeproj#259](https://github.com/CocoaPods/Xcodeproj/issues/259) + +* Fix crash when deintegrating due to major version change and there are + multiple root-level Xcode projects. + [Samuel Giddins](https://github.com/segiddins) + +* Ensure the `sandbox_root` attribute is set on the pre-install hooks context. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Breaking + +* The `link_with` Podfile DSL method has been removed in favor of target + inheritance. + [Samuel Giddins](https://github.com/segiddins) + +* The `:exclusive => true` Podfile DSL target option has been removed in favor + of the `inherit! :search_paths` directive. + [Samuel Giddins](https://github.com/segiddins) + +* The specification of `:head` dependencies has been removed. + [Samuel Giddins](https://github.com/segiddins) + [#4673](https://github.com/CocoaPods/CocoaPods/issues/4673) + +* The deprecated `:local` dependency option has been removed in favor of the + equivalent `:path` option. + [Samuel Giddins](https://github.com/segiddins) + +* The deprecated `dependency` method in the Podfile DSL has been removed in + favor of the equivalent `pod` method. + [Samuel Giddins](https://github.com/segiddins) + +* The deprecated `preferred_dependency` method in the Specification DSL has been + removed in favor of the equivalent `default_subspecs` method. + [Samuel Giddins](https://github.com/segiddins) + +* The `docset_url` Specification attribute has been removed. + [Samuel Giddins](https://github.com/segiddins) + [Core#284](https://github.com/CocoaPods/Core/issues/284) + +* Build configuration names are no longer set as pre-processor defines, but + rather `POD_CONFIGURATION_$CONFIGURATION_NAME` is defined in order to lessen + conflicts with pod code. + [#4143](https://github.com/CocoaPods/CocoaPods/issues/4143) + +##### Highlighted Enhancements That Need Testing + +* The Podfile DSL has been cleaned up, with the removal of confusing options and + the introduction of abstract targets, search paths-only inheritance, the + specification of installation options, and the removal of head dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#840](https://github.com/CocoaPods/CocoaPods/issues/840) + +##### Enhancements + +* Add the ability to add a custom commit message when pushing a spec. + [Bart Jacobs](https://github.com/bartjacobs) + [#4583](https://github.com/CocoaPods/CocoaPods/issues/4583) + +* Added support for `pod env` to print the pod environment without having to crash. + [Hemal Shah](https://github.com/hemal) + [#3660](https://github.com/CocoaPods/CocoaPods/issues/3660) + +* Add support for specifying :source with a pod dependency. + [Eric Firestone](https://github.com/efirestone) + [#4486](https://github.com/CocoaPods/CocoaPods/pull/4486) + +* Ask user to run `pod install` when a resource not found during in copy resources script. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + +* Add support to track `.def` sources. +* Add support to track `.def` files as headers. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#338](https://github.com/CocoaPods/Xcodeproj/pull/338) + +* `Pod::Installer::PostInstallHooksContext` now offers access to the `sandbox` + object. + [Marcelo Fabri](https://github.com/marcelofabri) + [#4487](https://github.com/CocoaPods/CocoaPods/pull/4487) + +* Improve sorting algorithm for `pod search`. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [cocoapods-search#12](https://github.com/CocoaPods/cocoapods-search/issues/12) + +* Improve `pod search` performance while using _`--full`_ flag. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [cocoapods-search#8](https://github.com/CocoaPods/cocoapods-search/issues/8) + +* Improve message when there is no spec in repos for dependency set in Podfile. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#4430](https://github.com/CocoaPods/CocoaPods/issues/4430) + +* Reduce the number of times the user's Xcode project is opened, speeding up + installation. + [Samuel Giddins](https://github.com/segiddins) + [#4374](https://github.com/CocoaPods/CocoaPods/issues/4374) + +* Improving the performance of Pod::Installer::Analyzer#generate_pod_targets + [Daniel Ribeiro](https://github.com/danielribeiro) + [#4399](https://github.com/CocoaPods/CocoaPods/pull/4399) + +* Framework pods that have a `header_mappings_dirs` set will now produce + frameworks with headers that respect the nesting. + [Samuel Giddins](https://github.com/segiddins) + +* The validator will now ensure that pods with a `header_mappings_dirs` have all + of their headers inside that directory. + [Samuel Giddins](https://github.com/segiddins) + +* Pods will be validated with the `-Wincomplete-umbrella` compiler flag to + ensure module maps are valid. + [Samuel Giddins](https://github.com/segiddins) + [#3428](https://github.com/CocoaPods/CocoaPods/issues/3428) + +* The validator will now attempt to build an app that imports the pod. + [Samuel Giddins](https://github.com/segiddins) + [#2095](https://github.com/CocoaPods/CocoaPods/issues/2095) + [#2134](https://github.com/CocoaPods/CocoaPods/issues/2134) + +* The `Info.plist` file's `CFBundleIdentifier` is now set via the + `PRODUCT_BUNDLE_IDENTIFIER` build setting, consistent with Xcode 7. + [Samuel Giddins](https://github.com/segiddins) + [#4426](https://github.com/CocoaPods/CocoaPods/issues/4426) + +* Externally-sourced pods will now have their specifications quickly linted. + [Samuel Giddins](https://github.com/segiddins) + +* Set the deployment target on pods to be that which is defined in the + podspec. + [Samuel Giddins](https://github.com/segiddins) + [#4354](https://github.com/CocoaPods/CocoaPods/issues/3454) + +* Set a deployment target for resource bundle targets. + [Samuel Giddins](https://github.com/segiddins) + [#3347](https://github.com/CocoaPods/CocoaPods/issues/3347) + +* Targets that are no longer integrated with CocoaPods will be properly + de-integrated when installation occurs. + [Samuel Giddins](https://github.com/segiddins) + +* Targets that are integrated will be ensured that they have all + CocoaPods-related settings and phases properly installed. + [Samuel Giddins](https://github.com/segiddins) + +* Total de-integration will happen whenever the major version of CocoaPods + changes, ensuring backwards-incompatible changes are properly applied. + [Samuel Giddins](https://github.com/segiddins) + +* The Podfile now allows specifying installation options via the `install!` + directive. + [Samuel Giddins](https://github.com/segiddins) + [Core#151](https://github.com/CocoaPods/Core/issues/151) + +* The Podfile now allows marking targets as `abstract` and specifying the pod + inheritance mode via the `inherit!` directive. + [Samuel Giddins](https://github.com/segiddins) + [#1249](https://github.com/CocoaPods/CocoaPods/issues/1249) + [#1626](https://github.com/CocoaPods/CocoaPods/issues/1626) + [#4001](https://github.com/CocoaPods/CocoaPods/issues/4001) + +##### Bug Fixes + +* Fix compiling of localized resources. + [Eric Firestone](https://github.com/efirestone) + [#1653](https://github.com/CocoaPods/CocoaPods/issues/1653) + +* Fix compiling of asset catalog files inside resource bundles. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#4501](https://github.com/CocoaPods/CocoaPods/issues/4501) + +* Prevent installer to be run from inside sandbox directory. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + +* Improve repo lint error message when no repo found with given name. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#4142](https://github.com/CocoaPods/CocoaPods/issues/4142) + +* Fix a crash in dependency resolution when running Ruby 2.3. + [Samuel Giddins](https://github.com/segiddins) + [#4345](https://github.com/CocoaPods/CocoaPods/issues/4345) + +* Fix handling of localized files in Pods installed as frameworks. + [Tim Bodeit](https://github.com/timbodeit) + [#2597](https://github.com/CocoaPods/CocoaPods/issues/2597) + +* Only include native targets when generating the Podfile in `pod init`. + [Samuel Giddins](https://github.com/segiddins) + [#2169](https://github.com/CocoaPods/CocoaPods/issues/2169) + +* Ensure that generated `Info.plist` files have a `CFBundleShortVersionString` + that is precisely three dot-separated numbers. + [Samuel Giddins](https://github.com/segiddins) + [#4421](https://github.com/CocoaPods/CocoaPods/issues/4421) + +* Set the `APPLICATION_EXTENSION_API_ONLY` build setting if integrating with a + tvOS extension target, or a target that has the setting set to `YES`. + [Samuel Giddins](https://github.com/segiddins) + [#3644](https://github.com/CocoaPods/CocoaPods/issues/3644) + [#4393](https://github.com/CocoaPods/CocoaPods/issues/4393) + +* Only the root directory of externally-sourced pods will be searched for + podspecs. + [Samuel Giddins](https://github.com/segiddins) + [#3683](https://github.com/CocoaPods/CocoaPods/issues/3683) + +* Remove the library name's extension when adding it in the "linker flags" build + setting to support dynamic libraries. + [Andrea Cremaschi](https://github.com/andreacremaschi) + [#4468](https://github.com/CocoaPods/CocoaPods/issues/4468) + +* Specifying relative subspec names to the linter is now supported. + [Samuel Giddins](https://github.com/segiddins) + [#1917](https://github.com/CocoaPods/CocoaPods/issues/1917) + +* Headers used to build a pod will no longer be duplicated for frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#4420](https://github.com/CocoaPods/CocoaPods/issues/4420) + +* The `UIRequiredDeviceCapabilities` key is now specified in the `Info.plist` + file for tvOS pods built as frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#4514](https://github.com/CocoaPods/CocoaPods/issues/4514) + +* Fix Swift code completion for Development Pods by using `realpath` for + symlinked source files. + [Boris BÃŧgling](https://github.com/neonichu) + [#3777](https://github.com/CocoaPods/CocoaPods/issues/3777) + +* Avoid the duplicate UUID warning when a Pod is installed for multiple + platforms. + [Samuel Giddins](https://github.com/segiddins) + [#4521](https://github.com/CocoaPods/CocoaPods/issues/4521) + +* Changing the name of a target in a Podfile will no longer cause warnings about + being unable to set the base configuration XCConfig. + [Samuel Giddins](https://github.com/segiddins) + +* Ensure that linking multiple times against the same framework does not trigger + the duplicate module name check for frameworks. + [Boris BÃŧgling](https://github.com/neonichu) + [Samuel Giddins](https://github.com/segiddins) + [#4550](https://github.com/CocoaPods/CocoaPods/issues/4550) + +* Fix lint in Xcode 7.2, it requires `-destination`. + [Boris BÃŧgling](https://github.com/neonichu) + [#4652](https://github.com/CocoaPods/CocoaPods/pull/4652) + +* Empty podfiles / target blocks no longer break the user's Xcode project. + [Samuel Giddins](https://github.com/segiddins) + [#3617](https://github.com/CocoaPods/CocoaPods/issues/3617) + +* The pre-processor define for `DEBUG` will be set for all debug-based build + configurations when building pods. + [Samuel Giddins](https://github.com/segiddins) + [#4148](https://github.com/CocoaPods/CocoaPods/issues/4148) + + +## 0.39.0 (2015-10-09) + +##### Enhancements + +* Podfile-specified options are passed to plugins as hashes that treat string + and symbol keys identically. + [Samuel Giddins](https://github.com/segiddins) + [#3354](https://github.com/CocoaPods/CocoaPods/issues/3354) + +##### Bug Fixes + +* Only link dynamic vendored frameworks and libraries of pod dependencies. + [Kevin Coleman](https://github.com/kcoleman731) + [#4336](https://github.com/CocoaPods/CocoaPods/issues/4336) + + +## 0.39.0.rc.1 (2015-10-05) + +##### Enhancements + +* Support for adding dependency target vendored libraries and frameworks to build settings. + [Kevin Coleman](https://github.com/kcoleman731) + [#4278](https://github.com/CocoaPods/CocoaPods/pull/4278) + +* Always link the aggregate target as static to the user project. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4137](https://github.com/CocoaPods/CocoaPods/pull/4137) + + +## 0.39.0.beta.5 (2015-10-01) + +##### Breaking + +* Activesupport 4 is now required, breaking compatibility with applications + locked to `3.x.y`. + +##### Enhancements + +* The `EMBEDDED_CONTENT_CONTAINS_SWIFT` build setting will now be set when + appropriate. + [Samuel Giddins](https://github.com/segiddins) + +* The embed frameworks script will no longer manually copy over the Swift + runtime libraries on Xcode 7 and later. + [Samuel Giddins](https://github.com/segiddins) + [earltedly](https://github.com/segiddins) + [DJ Tarazona](https://github.com/djtarazona) + [#4188](https://github.com/CocoaPods/CocoaPods/issues/4188) + +* A post-install summary of the pods installed is now printed. + [Samuel Giddins](https://github.com/segiddins) + [#4124](https://github.com/CocoaPods/CocoaPods/issues/4124) + +##### Bug Fixes + +* Give a meaningful message for the case where there is no available stable + version for a pod, and there is no explicit version requirement. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#4197](https://github.com/CocoaPods/CocoaPods/issues/4197) + +* Use `watchsimulator` when validating pods with the watchOS platform. + [Thomas Kollbach](https://github.com/toto) + [#4130](https://github.com/CocoaPods/CocoaPods/issues/4130) + +* C or C++ preprocessor output files with `.i` extension now have their compiler + flags set correctly. + [Andrea Aresu](https://github.com/aaresu/) + +* Remove SDKROOT relative search path as it isn't needed anymore since XCTest. + [Boris BÃŧgling](https://github.com/neonichu) + [#4219](https://github.com/CocoaPods/CocoaPods/issues/4219) + +* Podfile generated by `pod init` now specifies iOS 8.0 as the default platform + and includes `use_frameworks!` for Swift projects. + [Jamie Evans](https://github.com/JamieREvans) + +* Support for the new `tvos` platform. + [Boris BÃŧgling](https://github.com/neonichu) + [#4152](https://github.com/CocoaPods/CocoaPods/pull/4152) + +* Either generate just one pod target or generate it once for each target + definition. + [Marius Rackwitz](https://github.com/mrackwitz) + [#4034](https://github.com/CocoaPods/CocoaPods/issues/4034) + +* Stop setting `DYLIB_CURRENT_VERSION`, `CURRENT_PROJECT_VERSION`, and + `DYLIB_COMPATIBILITY_VERSION` for pods integrated as dynamic frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#4083](https://github.com/CocoaPods/CocoaPods/issues/4083) + +* The headers folders paths for static library pods will be unset, fixing + validation when archives are uploaded to iTunes Connect. + [Boris BÃŧgling](https://github.com/neonichu) + [Samuel Giddins](https://github.com/segiddins) + [#4119](https://github.com/CocoaPods/CocoaPods/issues/4119) + +* Don't require the `platform` attribute for targets without any declared pods + when running `pod install --no-integrate`. + [Sylvain GuillopÊ](https://github.com/sguillope) + [#3151](https://github.com/CocoaPods/CocoaPods/issues/3151) + +* Gracefully handle exception if creating the repos directory fails due to a + system error like a permission issue. + [Sylvain GuillopÊ](https://github.com/sguillope) + [#4177](https://github.com/CocoaPods/CocoaPods/issues/4177) + +## 0.39.0.beta.4 (2015-09-02) + +##### Bug Fixes + +* Using vendored frameworks without a `Headers` directory will no longer cause a + crash. + [Samuel Giddins](https://github.com/segiddins) + [#3967](https://github.com/CocoaPods/CocoaPods/issues/3967) + +* Computing the set of transitive dependencies for a pod target, + even if the target is scoped, will no longer smash the stack. + [Samuel Giddins](https://github.com/segiddins) + [#4092](https://github.com/CocoaPods/CocoaPods/issues/4092) + +* Take into account a specification's `exclude_files` when constructing resource + bundles. + [Samuel Giddins](https://github.com/segiddins) + [#4065](https://github.com/CocoaPods/CocoaPods/issues/4065) + +* Fix resolving to platform-compatible versions of transitive dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#4084](https://github.com/CocoaPods/CocoaPods/issues/4084) + + +## 0.39.0.beta.3 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.39.0.beta.2 (2015-08-27) + +##### Bug Fixes + +* Ensure all gem files are readable. + [Samuel Giddins](https://github.com/segiddins) + [#4085](https://github.com/CocoaPods/CocoaPods/issues/4085) + + +## 0.39.0.beta.1 (2015-08-26) + +##### Breaking + +* The `HEADER_SEARCH_PATHS` will no longer be constructed recursively. + [Samuel Giddins](https://github.com/segiddins) + [twoboxen](https://github.com/twoboxen) + [#1437](https://github.com/CocoaPods/CocoaPods/issues/1437) + [#3760](https://github.com/CocoaPods/CocoaPods/issues/3760) + +##### Enhancements + +* Collapse the namespaced public and private pod xcconfig into one single + xcconfig file. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3916](https://github.com/CocoaPods/CocoaPods/pull/3916) + +* Add `--sources` option to `push` command. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#3912](https://github.com/CocoaPods/CocoaPods/issues/3912) + +* Implicitly unlock all local dependencies when installing. + [Samuel Giddins](https://github.com/segiddins) + [#3764](https://github.com/CocoaPods/CocoaPods/issues/3764) + +* The resolver error message when a conflict occurred due to platform deployment + target mismatches will now explain that. + [Samuel Giddins](https://github.com/segiddins) + [#3926](https://github.com/CocoaPods/CocoaPods/issues/3926) + +* Add `:source_provider` hook to allow plugins to provide sources. + [Eric Amorde](https://github.com/amorde) + [#3190](https://github.com/CocoaPods/CocoaPods/issues/3190) + [#3792](https://github.com/CocoaPods/CocoaPods/pull/3792) + +* Remove embed frameworks build phase from target types, where it is not required. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3905](https://github.com/CocoaPods/CocoaPods/issues/3905) + [#4028](https://github.com/CocoaPods/CocoaPods/pull/4028) + +* Add a `--private` option to `pod spec lint`, `pod lib lint`, and + `pod repo push` that will ignore warnings that only apply to public + specifications and sources. + [Samuel Giddins](https://github.com/segiddins) + [Core#190](https://github.com/CocoaPods/Core/issues/190) + [#2682](https://github.com/CocoaPods/CocoaPods/issues/2682) + +* Add support for dynamic `vendored_frameworks` and `vendored_libraries`. + [Samuel Giddins](https://github.com/segiddins) + [#1993](https://github.com/CocoaPods/CocoaPods/issues/1993) + +##### Bug Fixes + +* Build settings specified in `pod_target_xcconfig` of a spec are also for + library targets only applied to the pod target. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3906](https://github.com/CocoaPods/CocoaPods/issues/3906) + +* Use APPLICATION_EXTENSION_API_ONLY for watchOS 2 extensions. + [Boris BÃŧgling](https://github.com/neonichu) + [#3920](https://github.com/CocoaPods/CocoaPods/pull/3920) + +* Prevent copying resources to installation directory when `SKIP_INSTALL` is enabled. + [Dominique d'Argent](https://github.com/nubbel) + [#3971](https://github.com/CocoaPods/CocoaPods/pull/3971) + +* Embed frameworks into app and watch extensions. + [Boris BÃŧgling](https://github.com/neonichu) + [#4004](https://github.com/CocoaPods/CocoaPods/pull/4004) + +* Fix missing `$(inherited)` for generated xcconfig `LIBRARY_SEARCH_PATHS` + and `HEADER_SEARCH_PATHS` build settings. + [Tyler Fox](https://github.com/smileyborg) + [#3908](https://github.com/CocoaPods/CocoaPods/issues/3908) + +* Fix source locking/unlocking. + [Samuel Giddins](https://github.com/segiddins) + [#4059](https://github.com/CocoaPods/CocoaPods/issues/4059) + +* Include the `-ObjC` linker flag when static `vendored_frameworks` are present. + [Samuel Giddins](https://github.com/segiddins) + [#3870](https://github.com/CocoaPods/CocoaPods/issues/3870) + [#3992](https://github.com/CocoaPods/CocoaPods/issues/3992) + + +## 0.38.2 (2015-07-25) + +##### Bug Fixes + +* Fix generation of xcconfig files that specify both `-iquote` and `-isystem` + headers. + [Russ Bishop](https://github.com/russbishop) + [#3893](https://github.com/CocoaPods/CocoaPods/issues/3893) + +* Pods integrated as static libraries can no longer be imported as + modules, as that change had unexpected side-effects. + [Boris BÃŧgling](https://github.com/neonichu) + [#3898](https://github.com/CocoaPods/CocoaPods/pull/3898) + [#3879](https://github.com/CocoaPods/CocoaPods/issues/3879) + [#3888](https://github.com/CocoaPods/CocoaPods/issues/3888) + [#3886](https://github.com/CocoaPods/CocoaPods/issues/3886) + [#3889](https://github.com/CocoaPods/CocoaPods/issues/3889) + [#3884](https://github.com/CocoaPods/CocoaPods/issues/3884) + +* Source file locking now happens after plugin and podfile post-install hooks + have run. + [Samuel Giddins](https://github.com/segiddins) + [#3529](https://github.com/CocoaPods/CocoaPods/issues/3529) + +* Only set project, dylib, and compatibility versions to valid, three integer + values. + [Samuel Giddins](https://github.com/segiddins) + [#3887](https://github.com/CocoaPods/CocoaPods/issues/3887) + + +## 0.38.1 (2015-07-23) + +##### Enhancements + +* Set project, dylib, and compatibility versions when building pods as + frameworks. + [Marius Rackwitz](https://github.com/mrackwitz) + +* Pods integrated as static libraries can now be imported as modules. + [Tomas Linhart](https://github.com/TomasLinhart) + [#3874](https://github.com/CocoaPods/CocoaPods/issues/3874) + +##### Bug Fixes + +* Ensure the aggregate `.xcconfig` file only has the settings for the + appropriate build configuration. + [Samuel Giddins](https://github.com/segiddins) + [#3842](https://github.com/CocoaPods/CocoaPods/issues/3842) + +* Show the correct error when `pod spec lint` finds multiple podspecs, and at + least one of them fails linting. + [Samuel Giddins](https://github.com/segiddins) + [#3869](https://github.com/CocoaPods/CocoaPods/issues/3869) + +* Set header search paths properly on the user target when `vendored_libraries` + Pods are used while integrating Pods as frameworks. + [Jonathan MacMillan](https://github.com/perotinus) + [#3857](https://github.com/CocoaPods/CocoaPods/issues/3857) + +* Only link public headers in the sandbox for Pods that are not being built + into dynamic frameworks, when integrating Pods as frameworks. + [Jonathan MacMillan](https://github.com/perotinus) + [#3867](https://github.com/CocoaPods/CocoaPods/issues/3867) + +* Don't lock resource files, only source files. + [Mason Glidden](https://github.com/mglidden) + [#3557](https://github.com/CocoaPods/CocoaPods/issues/3557) + +* Fix copying frameworks when integrating with today extensions. + [Samuel Giddins](https://github.com/segiddins) + [#3819](https://github.com/CocoaPods/CocoaPods/issues/3819) + + +## 0.38.0 (2015-07-18) + +##### Enhancements + +* Improve the message shown when trying to use Swift Pods without frameworks. + Now it includes the offending Pods so that the user can take action to remove + the Pods, if they don’t want to move to frameworks yet. + [Eloy DurÃĄn](https://github.com/alloy) + [#3830](https://github.com/CocoaPods/CocoaPods/pull/3830) + +##### Bug Fixes + +* Properly merge the `user_target_xcconfig`s of multiple subspecs. + [Samuel Giddins](https://github.com/segiddins) + [#3813](https://github.com/CocoaPods/CocoaPods/issues/3813) + + +## 0.38.0.beta.2 (2015-07-05) + +##### Enhancements + +* The resolver will now take supported platform deployment targets into account + when resolving dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#2443](https://github.com/CocoaPods/CocoaPods/issues/2443) + +* `Pods.xcodeproj` will now be written with deterministic UUIDs, vastly reducing + project churn and merge conflicts. This behavior can be disabled via the new + `COCOAPODS_DISABLE_DETERMINISTIC_UUIDS` environment variable. + [Samuel Giddins](https://github.com/segiddins) + +* [`cocoapods-stats`](https://github.com/CocoaPods/cocoapods-stats) + is now a default plugin. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Ensure that the `prepare_command` is run even when skipping the download + cache. + [Samuel Giddins](https://github.com/segiddins) + [#3674](https://github.com/CocoaPods/CocoaPods/issues/3674) + +* Public headers inside a directory named `framework` should be linked in the + sandbox. + [Vincent Isambart](https://github.com/vincentisambart) + [#3751](https://github.com/CocoaPods/CocoaPods/issues/3751) + +* Properly support targets with spaces in their name in the embed frameworks + script. + [Samuel Giddins](https://github.com/segiddins) + [#3754](https://github.com/CocoaPods/CocoaPods/issues/3754) + +* Don't add the `-ObjC` linker flag if it's unnecessary. + [Samuel Giddins](https://github.com/segiddins) + [#3537](https://github.com/CocoaPods/CocoaPods/issues/3537) + +* Ensure that no duplicate framework or target dependencies are created. + [Samuel Giddins](https://github.com/segiddins) + [#3763](https://github.com/CocoaPods/CocoaPods/issues/3763) + + +## 0.38.0.beta.1 (2015-06-26) + +##### Highlighted Enhancement That Needs Testing + +* De-duplicate Pod Targets: CocoaPods now recognizes when a dependency is used + multiple times across different user targets, but needs to be built only once. + The targets in `Pods.xcodeproj` need to be duplicated when one of the following + applies: + * They are used on different platforms. + * They are used with differents sets of subspecs. + * They have any dependency which needs to be duplicated. + + You can opt-out of this behavior installation-wise, by setting the following + option in your `~/.cocoapods/config.yaml`: + ```yaml + deduplicate_targets: false + ``` + + [Marius Rackwitz](https://github.com/mrackwitz) + [#3550](https://github.com/CocoaPods/CocoaPods/issues/3550) + +##### Breaking + +* The CocoaPods environment header has been removed. + [Samuel Giddins](https://github.com/segiddins) + [#2390](https://github.com/CocoaPods/CocoaPods/issues/2390) + +* The `Installer` is passed directly to the `pre_install` and `post_install` + hooks defined in the Podfile, instead of the previously used + `Hooks::InstallerRepresentation`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3648](https://github.com/CocoaPods/CocoaPods/issues/3648) + +* Deprecate the `xcconfig` attribute in the Podspec DSL, which is replaced by + the new attributes `pod_target_xcconfig` and `user_target_xcconfig`. + [Marius Rackwitz](https://github.com/mrackwitz) + [CocoaPods#3465](https://github.com/CocoaPods/CocoaPods/issues/3465) + +##### Enhancements + +* The notice about a new version being available will now include our + recommendation of using the latest stable version. + [Hugo Tunius](https://github.com/k0nserv) + [#3667](https://github.com/CocoaPods/CocoaPods/pull/3667) + +* New commands `pod cache list` and `pod cache clean` allows you to see the + contents of the cache and clean it. + [Olivier Halligon](https://github.com/AliSoftware) + [#3508](https://github.com/CocoaPods/CocoaPods/issues/3508) + +* The download cache will automatically be reset when changing CocoaPods + versions. + [Samuel Giddins](https://github.com/segiddins) + [#3542](https://github.com/CocoaPods/CocoaPods/issues/3542) + +* Supports running pre-install hooks in plugins. This happens before the resolver + does its work, and offers easy access to the sandbox, podfile and lockfile via a + `PreInstallHooksContext` object. This also renames the post-install hooks from `HooksContext` + to `PostInstallHooksContext`. + [Orta Therox](https://github.com/orta) + [#3540](https://github.com/CocoaPods/cocoapods/issues/3409) + +* Allow passing additional arguments to `pod lib create`, which then get passed + as-is to the `configure` scripts. + [Samuel Giddins](https://github.com/segiddins) + [#2160](https://github.com/CocoaPods/CocoaPods/issues/2160) + +* Use `-analyzer-disable-all-checks` to disable static analyzer for + pods with `inhibit_warnings` enabled (requires Xcode >= 6.1). + [Dieter Komendera](https://github.com/kommen) + [#2402](https://github.com/CocoaPods/CocoaPods/issues/2402) + +* Cache globbing in `PathList` to speed up `pod install`. + [Vincent Isambart](https://github.com/vincentisambart) + [#3699](https://github.com/CocoaPods/CocoaPods/pull/3699) + +* CocoaPods will validate your podfile and try to identify problems + and conflicts in how you've specified the dependencies. + [Hugo Tunius](https://github.com/k0nserv) + [#995](https://github.com/CocoaPods/CocoaPods/issues/995) + +* `pod update` will now accept root pod names, even when only subspecs are + installed. + [Samuel Giddins](https://github.com/segiddins) + [#3689](https://github.com/CocoaPods/CocoaPods/issues/3689) + +* Support for the new `watchos` platform. + [Boris BÃŧgling](https://github.com/neonichu) + [#3681](https://github.com/CocoaPods/CocoaPods/pull/3681) + +##### Bug Fixes + +* Added recursive support to the public headers of vendored frameworks + that are automatically linked in the sandbox. This fixes and issue + for framework header directories that contain sub-directories. + [Todd Casey](https://github.com/vhariable) + [#3161](https://github.com/CocoaPods/CocoaPods/issues/3161) + +* Public headers of vendored frameworks are now automatically linked in + the sandbox. That allows transitive inclusion of headers from other pods. + [Vincent Isambart](https://github.com/vincentisambart) + [#3161](https://github.com/CocoaPods/CocoaPods/issues/3161) + +* Fixes an issue that prevented static libraries from building. `OTHER_LIBTOOLFLAGS` + is no longer set to the value of `OTHER_LDFLAGS`. If you want to create a static + library that includes all dependencies for (internal/external) distribution then + you should use a tool like `cocoapods-packager`. + [Michael Moscardini](https://github.com/themackworth) + [#2747](https://github.com/CocoaPods/CocoaPods/issues/2747) + [#2704](https://github.com/CocoaPods/CocoaPods/issues/2704) + +* The embed frameworks script will now properly filter out symlinks to the + directories that are filtered, which fixes an issue when submitting to the + Mac App Store. + [Samuel Giddins](https://github.com/segiddins) + +* The error report template is now more robust against missing executables. + [Samuel Giddins](https://github.com/segiddins) + [#3719](https://github.com/CocoaPods/CocoaPods/issues/3719) + +* Attempting to specify a `git` source where a Podspec for the requested pod is + not found will have a more helpful error message. + [Samuel Giddins](https://github.com/segiddins) + +* `pod outdated` will now accept the `--no-repo-update` and `--no-integrate` + options. + [Samuel Giddins](https://github.com/segiddins) + +* Fixes an issue which prevented using a custom `CONFIGURATION_BUILD_DIR` when + integrating CocoaPods via dynamic frameworks. + [Tim Rosenblatt](https://github.com/timrosenblatt) + [#3675](https://github.com/CocoaPods/CocoaPods/pull/3675) + +* Pods frameworks in codesigned Mac apps are now signed. + [Nikolaj Schumacher](https://github.com/nschum) + [#3646](https://github.com/CocoaPods/CocoaPods/issues/3646) + + +## 0.37.2 (2015-05-27) + +##### Enhancements + +* Schemes of development pods will now be shared. + [Boris BÃŧgling](https://github.com/neonichu) + [#3600](https://github.com/CocoaPods/CocoaPods/issues/3600) + +* Recognizes incomplete cache when the original download of a pod was + interrupted. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3561](https://github.com/CocoaPods/CocoaPods/issues/3561) + +* Allow opting out of pod source locking, meaning `pod try` yields editable + projects. + [Samuel Giddins](https://github.com/segiddins) + [cocoapods-try#31](https://github.com/CocoaPods/cocoapods-try/issues/31) + +##### Bug Fixes + +* `pod repo push` will now find and push JSON podspecs. + [#3494](https://github.com/CocoaPods/CocoaPods/issues/3494) + [Kyle Fuller](https://github.com/kylef) + +* Flush stdin/stderr and wait a bit in `executable`. + [Boris BÃŧgling](https://github.com/neonichu) + [#3500](https://github.com/CocoaPods/CocoaPods/issues/3500) + +## 0.37.1 (2015-05-06) + +##### Bug Fixes + +* [Cache] Fixes a bug that caused that a pod, which was cached once is not updated + correctly when needed e.g. for `pod spec lint`. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3498](https://github.com/CocoaPods/CocoaPods/issues/3498) + +* Only add the "Embed Pods Frameworks" script for application and unit test targets. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3440](https://github.com/CocoaPods/CocoaPods/issues/3440) + +* C++ source files with `.cc`, `.cxx` and `.c++` extensions now have their + compiler flags set correctly. + [Chongyu Zhu](https://github.com/lembacon) + [Kyle Fuller](https://github.com/kylef) + +* Handle broken symlinks when installing a Pod. + [Daniel Barden](https://github.com/dbarden) + [#3515](https://github.com/cocoapods/cocoapods/issues/3515) + +* Just remove write permissions from files, so executables are unaffected. + [Mason Glidden](https://github.com/mglidden) + [#3501](https://github.com/CocoaPods/CocoaPods/issues/3501) + +* Always copy the generated `Podfile.lock` to `Pods/Manifest.lock` so they are + guaranteed to match, character-by-character, after installation. + [Samuel Giddins](https://github.com/segiddins) + [#3502](https://github.com/CocoaPods/CocoaPods/issues/3502) + +* Don't generate an umbrella header when a custom module map is specified. This + avoids an incomplete module map warning. + [Samuel Giddins](https://github.com/segiddins) + +* Actually allow skipping the download cache by downloading directly to the + download target when requested. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.37.0 (2015-05-03) + +For more details, see 📝 [CocoaPods 0.37](https://blog.cocoapods.org/CocoaPods-0.37/) on our blog. + +##### Bug Fixes + +* Print the UTF-8 warning to STDERR. + [Matt Holgate](https://github.com/mjholgate) + + +## 0.37.0.rc.2 (2015-04-30) + +##### Bug Fixes + +* Handle caching specs that have subspecs with higher minimum deployment targets + without deleting needed source files. + [Samuel Giddins](https://github.com/segiddins) + [#3471](https://github.com/CocoaPods/CocoaPods/issues/3471) + +* Automatically detect JSON podspecs in `pod lib lint`. + [Samuel Giddins](https://github.com/segiddins) + [#3477](https://github.com/CocoaPods/CocoaPods/issues/3477) + + +## 0.37.0.rc.1 (2015-04-27) + +[Core](https://github.com/CocoaPods/Core/compare/0.37.0.beta.1...0.37.0.rc.1) +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.24.0...0.24.1) + +##### Enhancements + +* Add environment variable `COCOAPODS_SKIP_UPDATE_MESSAGE` to disable new + version message. + [Andrea Mazzini](https://github.com/andreamazz) + [#3364](https://github.com/CocoaPods/CocoaPods/issues/3364) + +* Use user project's object version for pods project. + [Boris BÃŧgling](https://github.com/neonichu) + [#253](https://github.com/CocoaPods/Xcodeproj/issues/253) + +##### Bug Fixes + +* Adding `$(inherited)` to `FRAMEWORK_SEARCH_PATHS` build setting in xcconfig for aggregate. + [Tomohiro Kumagai](https://github.com/EZ-NET) + [#3429](https://github.com/CocoaPods/CocoaPods/pull/3429) + +* Don't crash when the downloader can't find an appropriate podspec in a `git` + pod. + [Samuel Giddins](https://github.com/segiddins) + [#3433](https://github.com/CocoaPods/CocoaPods/issues/3433) + +* Automatically lock Pod source files after installing. + [Mason Glidden](https://github.com/mglidden) + [#1154](https://github.com/CocoaPods/CocoaPods/issues/1154) + +* Handle subprocesses leaking STDOUT/STDERR pipes by more strictly managing + process lifetime and not allowing I/O to block completion of the task. + [Samuel Giddins](https://github.com/segiddins) + [#3101](https://github.com/CocoaPods/CocoaPods/issues/3101) + +* Do not create pod target if `source_files` only contains headers. + [Boris BÃŧgling](https://github.com/neonichu) + [#3106](https://github.com/CocoaPods/CocoaPods/issues/3106) + +* Run a pod's `prepare_command` (if it has one) before it is cleaned in the + download cache. + [Marius Rackwitz](https://github.com/mrackwitz) + [Samuel Giddins](https://github.com/segiddins) + [#3436](https://github.com/CocoaPods/CocoaPods/issues/3436) + +* Don't set the `-fno-objc-arc` compiler flags for files for which the flag + makes no sense. + [Samuel Giddins](https://github.com/segiddins) + [#2559](https://github.com/CocoaPods/CocoaPods/issues/2559) + +* Also apply a pod's configuration to any resource targets defined by the pod. + [Tom Adriaenssen](https://github.com/inferis) + [#3463](https://github.com/CocoaPods/CocoaPods/issues/3463) + + +## 0.37.0.beta.1 (2015-04-18) + +##### Enhancements + +* Allow the specification of custom module map files. + [Samuel Giddins](https://github.com/segiddins) + [Marius Rackwitz](https://github.com/mrackwitz) + [#3145](https://github.com/CocoaPods/CocoaPods/issues/3145) + +* Show the source URI for local Pod specification repositories in + `pod repo list`. + [Kyle Fuller](https://github.com/kylef) + +* Only show a warning when there is a minimum deployment target mismatch + between target and spec, instead of throwing a hard error. + [Samuel Giddins](https://github.com/segiddins) + [#1241](https://github.com/CocoaPods/CocoaPods/issues/1241) + +* Add download caching for pods, which speeds up `pod install` and linting, + potentially by several orders of magnitude. + [Samuel Giddins](https://github.com/segiddins) + [#2863](https://github.com/CocoaPods/CocoaPods/issues/2863) + [#3172](https://github.com/CocoaPods/CocoaPods/issues/3172) + +* Add a `--fail-fast` option to both `pod spec lint` and `pod lib lint` that + causes the linter to exit as soon as a single subspec or platform fails + linting. + [Marius Rackwitz](https://github.com/mrackwitz) + +* Naïvely prevent base xcconfig warnings for targets that have custom + config files set. + [Chris Brauchli](https://github.com/cbrauchli) + [#2633](https://github.com/CocoaPods/CocoaPods/issues/2633) + +* Ensure private headers are declared as such in a framework's generated module + map. + [Samuel Giddins](https://github.com/segiddins) + [#2974](https://github.com/CocoaPods/CocoaPods/issues/2974) + +##### Bug Fixes + +* Do not pass code-sign arguments to xcodebuild when linting OS X targets. + [Boris BÃŧgling](https://github.com/neonichu) + [#3310](https://github.com/CocoaPods/CocoaPods/issues/3310) + +* Fixes an issue showing the URL to remote resources in `pod repo list`. + [Kyle Fuller](https://github.com/kylef) + +* Fixes a problem with code signing when integrating CocoaPods + into a Today Widget extension. + [Christian Sampaio](https://github.com/chrisfsampaio) + [#3390](https://github.com/CocoaPods/CocoaPods/pull/3390) + + +## 0.36.4 (2015-04-16) + +##### Bug Fixes + +* Fixes various problems with Pods that use xcasset bundles. Pods that + use xcassets can now be used with the `pod :path` option. + [Kyle Fuller](https://github.com/kylef) + [#1549](https://github.com/CocoaPods/CocoaPods/issues/1549) + [#3384](https://github.com/CocoaPods/CocoaPods/pull/3383) + [#3358](https://github.com/CocoaPods/CocoaPods/pull/3358) + + +## 0.36.3 (2015-03-31) + +##### Bug Fixes + +* Fix using the downloader. + [Samuel Giddins](https://github.com/segiddins) + [#3344](https://github.com/CocoaPods/CocoaPods/issues/3344) + [#3345](https://github.com/CocoaPods/CocoaPods/issues/3345) + + +## 0.36.2 (2015-03-31) + +[Core](https://github.com/CocoaPods/Core/compare/0.36.1...0.36.2) + +##### Bug Fixes + +* Unique resources passed to the script generator. + [Diego Torres](https://github.com/dtorres) + [#3315](https://github.com/CocoaPods/CocoaPods/issues/3315) + [#3327](https://github.com/CocoaPods/CocoaPods/issues/3327) + +* Update the `Manifest.lock` when migrating local podspecs to JSON. This fixes + running `pod install` after upgrading to `0.36`. + [Samuel Giddins](https://github.com/segiddins) + [#3292](https://github.com/CocoaPods/CocoaPods/issues/3292) + [#3299](https://github.com/CocoaPods/CocoaPods/issues/3299) + + +## 0.36.1 (2015-03-27) + +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.23.0...0.23.1) + +##### Bug Fixes + +* Workarounds(âœģ) for the resource script's handling of `.xcasset` files. + [sodas](https://github.com/sodastsai) + [Tony Li](https://github.com/crazytonyli) + [Chongyu Zhu](https://github.com/lembacon) + [#3247](https://github.com/CocoaPods/CocoaPods/issues/3247) + [#3303](https://github.com/CocoaPods/CocoaPods/issues/3303) + +* Fix the sanitization of configuration names in the generated target + environment header. + [Samuel Giddins](https://github.com/segiddins) + [#3301](https://github.com/CocoaPods/CocoaPods/issues/3301) + +> _(âœģ) Note: these fixes are only temporary to avoid overriding the user project's `xcassets`. + We are aware that these workarounds are "too greedy" and thus user projects having different + `xcassets` for different targets will still have issues; we ([@AliSoftware](https://github.com/AliSoftware)) + are working on a deeper fix ([#3263](https://github.com/CocoaPods/CocoaPods/issues/3263)) for the next release._ + +## 0.36.0 (2015-03-11) + +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.22.0...0.23.0) + +For more details, see 📝 [CocoaPods 0.36](https://blog.cocoapods.org/CocoaPods-0.36/) on our blog. + +##### Enhancements + +* Allows Swift pods to have a deployment target under iOS 8.0 if they use + XCTest. + [Samuel Giddins](https://github.com/segiddins) + [#3225](https://github.com/CocoaPods/CocoaPods/issues/3225) + +##### Bug Fixes + +* Include Swift-specific build settings on target creation, i.e. disable optimizations + for debug configuration. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3238](https://github.com/CocoaPods/CocoaPods/issues/3238) + +* Only copy explicitly specified xcasset files into the bundle of the integrated target. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3219](https://github.com/CocoaPods/CocoaPods/issues/3219) + +* Correctly filter Xcode warnings about the use of dynamic frameworks. + [Boris BÃŧgling](https://github.com/neonichu) + +* Fixes warnings, when the aggregate target doesn't contain any pod target, which is build, + because `PODS_FRAMEWORK_BUILD_PATH` was added to `FRAMEWORK_SEARCH_PATHS`, but never created. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3217](https://github.com/CocoaPods/CocoaPods/issues/3217) + +* Allows the usage of `:head` dependencies even when the most recent published + version was a pre-release. + [Samuel Giddins](https://github.com/segiddins) + [#3212](https://github.com/CocoaPods/CocoaPods/issues/3212) + +* Limit the check for transitive static binaries to those which are directly linked to the user target. + [Boris BÃŧgling](https://github.com/neonichu) + [#3194](https://github.com/CocoaPods/CocoaPods/issues/3194) + +* Lint to prevent dynamic libraries and frameworks from passing with iOS 7. + [Boris BÃŧgling](https://github.com/neonichu) + [#3193](https://github.com/CocoaPods/CocoaPods/issues/3193) + +* Shows an informative error message when there is no base specification found + for a `:head` dependency. + [Samuel Giddins](https://github.com/segiddins) + [#3230](https://github.com/CocoaPods/CocoaPods/issues/3230) + +* Fix the `OTHER_SWIFT_FLAGS` generated, so it inherits previous definitions. + [Daniel Thorpe](https://github.com/danthorpe) + [#2983](https://github.com/CocoaPods/CocoaPods/issues/2983) + + +## 0.36.0.rc.1 (2015-02-24) + +##### Enhancements + +* Set the `APPLICATION_EXTENSION_API_ONLY` build setting if integrating with a watch extension target. + [Boris BÃŧgling](https://github.com/neonichu) + [#3153](https://github.com/CocoaPods/CocoaPods/issues/3153) + +* Build for iOS simulator only during validation. This allows validation without having + provisioning profiles set up. + [Boris BÃŧgling](https://github.com/neonichu) + [#3083](https://github.com/CocoaPods/CocoaPods/issues/3083) + [Swift#13](https://github.com/CocoaPods/swift/issues/13) + +* Explicitly inform the user to close existing project when switching to + a workspace for the first time. + [Kyle Fuller](https://github.com/kylef) + [#2996](https://github.com/CocoaPods/CocoaPods/issues/2996) + +* Automatically detect conflicts between framework names. + [Samuel Giddins](https://github.com/segiddins) + [#2943](https://github.com/CocoaPods/CocoaPods/issues/2943) + +* Use the proper `TMPDIR` for the CocoaPods process, instead of blindly using + `/tmp`. + [Samuel Giddins](https://github.com/segiddins) + +* Let lint fail for Swift pods supporting deployment targets below iOS 8.0. + [Boris BÃŧgling](https://github.com/neonichu) + [#2963](https://github.com/CocoaPods/CocoaPods/issues/2963) + +* Reject installation if a static library is used as a transitive dependency + while integrating Pods as frameworks. + [Samuel Giddins](https://github.com/segiddins) + [#2926](https://github.com/CocoaPods/CocoaPods/issues/2926) + +* Do not copy Swift standard libraries multiple times. + [Boris BÃŧgling](https://github.com/neonichu) + [#3131](https://github.com/CocoaPods/CocoaPods/issues/3131) + +* Check for Xcode License Agreement before running commands. + [Xavi Matos](https://github.com/CalQL8ed-K-OS) + [#3002](https://github.com/CocoaPods/CocoaPods/issues/3002) + +* `pod update PODNAME` will update pods in a case-insensitive manner. + [Samuel Giddins](https://github.com/segiddins) + [#2992](https://github.com/CocoaPods/CocoaPods/issues/2992) + +* Allow specifying repo names to `pod {spec,lib} lint --sources`. + [Samuel Giddins](https://github.com/segiddins) + [#2685](https://github.com/CocoaPods/CocoaPods/issues/2685) + +* Require explicit use of `use_frameworks!` for Pods written in Swift. + [Boris BÃŧgling](https://github.com/neonichu) + [#3029](https://github.com/CocoaPods/CocoaPods/issues/3029) + +* Lint as framework automatically. If needed, `--use-libraries` option + allows linting as a static library. + [Boris BÃŧgling](https://github.com/neonichu) + [#2912](https://github.com/CocoaPods/CocoaPods/issues/2912) + +* Adding Xcode Legacy build location support for default Pods.xcodeproj. + It defaults to `${SRCROOT}/../build` but can be changed in a `post_install` + hook by using the `Project#symroot=` writer. + [Sam Marshall](https://github.com/samdmarshall) + +##### Bug Fixes + +* Set `SKIP_INSTALL=YES` for all generated targets to avoid producing + *Generic Xcode Archives* on Archive. + [Marius Rackwitz](https://github.com/mrackwitz) + [#3188](https://github.com/CocoaPods/CocoaPods/issues/3188) + +* Added support for .tpp C++ header files in specs (previously were getting + filtered out and symlinks wouldn't get created in the Pods/Headers folder.) + [Honza Dvorsky](https://github.com/czechboy0) + [#3129](https://github.com/CocoaPods/CocoaPods/pull/3129) + +* Fixed installation for app-extension targets which had no dependencies + configured in the Podfile. + [Boris BÃŧgling](https://github.com/neonichu) + [#3102](https://github.com/CocoaPods/CocoaPods/issues/3102) + +* Correct escaping of resource bundles in 'Copy Pods Resources' script. + [SeÃĄn Labastille](https://github.com/flufff42) + [#3082](https://github.com/CocoaPods/CocoaPods/issues/3082) + +* Correctly update sources when calling `pod outdated`, and also respect the + `--[no-]repo-update` flag. + [Samuel Giddins](https://github.com/segiddins) + [#3137](https://github.com/CocoaPods/CocoaPods/issues/3137) + +* Fix the `OTHER_SWIFT_FLAGS` generated, so `#if COCOAPODS` works in Swift. + [Samuel Giddins](https://github.com/segiddins) + [#2983](https://github.com/CocoaPods/CocoaPods/issues/2983) + +* Output a properly-formed `Podfile` when running `pod init` with a target that + contains a `'` in its name. + [Samuel Giddins](https://github.com/segiddins) + [#3136](https://github.com/CocoaPods/CocoaPods/issues/3136) + +* Remove the stored lockfile checkout source when switching to a development + pod. + [Samuel Giddins](https://github.com/segiddins) + [#3141](https://github.com/CocoaPods/CocoaPods/issues/3141) + +* Migrate local Ruby podspecs to JSON, allowing updating those pods to work. + [Samuel Giddins](https://github.com/segiddins) + [#3038](https://github.com/CocoaPods/CocoaPods/issues/3038) + +* Removing grep color markup in the embed frameworks script. + [Adriano Bonat](https://github.com/tanob) + [#3117](https://github.com/CocoaPods/CocoaPods/issues/3117) + +* Fixes an issue where `pod ipc list` and `pod ipc podfile` was returning an + error. + [Kyle Fuller](https://github.com/kylef) + [#3134](https://github.com/CocoaPods/CocoaPods/issues/3134) + +* Fixes an issue with spaces in the path to the user's developer tools. + [Boris BÃŧgling](https://github.com/neonichu) + [#3181](https://github.com/CocoaPods/CocoaPods/issues/3181) + + +## 0.36.0.beta.2 (2015-01-28) + +[Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.21.0...0.21.2) + +##### Breaking + +* Changes the default spec repositories used from all configured spec + repositories, to the master spec repository when no spec repositories + are explicitly configured in a Podfile. + [Kyle Fuller](https://github.com/kylef) + [#2946](https://github.com/CocoaPods/CocoaPods/issues/2946) + +##### Enhancements + +* Set the APPLICATION_EXTENSION_API_ONLY build setting if integrating with an app extension target. + [Boris BÃŧgling](https://github.com/neonichu) + [#2980](https://github.com/CocoaPods/CocoaPods/issues/2980) + +* Xcodebuild warnings will now be reported as `warning` during linting + instead of `note`. + [Hugo Tunius](https://github.com/K0nserv) + +* Copy only the resources required for the current build configuration. + [Samuel Giddins](https://github.com/segiddins) + [#2391](https://github.com/CocoaPods/CocoaPods/issues/2391) + +##### Bug Fixes + +* Ensure that linting fails if xcodebuild doesn't successfully build your Pod. + [Kyle Fuller](https://github.com/kylef) + [#2981](https://github.com/CocoaPods/CocoaPods/issues/2981) + [cocoapods-trunk#33](https://github.com/CocoaPods/cocoapods-trunk/issues/33) + +* Clone the master spec repository when no spec repositories are explicitly + defined in the Podfile. This fixes problems using CocoaPods for the first + time without any explicit spec repositories. + [Kyle Fuller](https://github.com/kylef) + [#2946](https://github.com/CocoaPods/CocoaPods/issues/2946) + +* Xcodebuild warnings with the string `error` in them will no longer be + linted as errors if they are in fact warnings. + [Hugo Tunius](https://github.com/K0nserv) + [#2579](https://github.com/CocoaPods/CocoaPods/issues/2579) + +* Any errors which occur during fetching of external podspecs over HTTP + will now be gracefully handled. + [Hugo Tunius](https://github.com/K0nserv) + [#2823](https://github.com/CocoaPods/CocoaPods/issues/2823) + +* When updating spec repositories only update the git sourced repos. + [Dustin Clark](https://github.com/clarkda) + [#2558](https://github.com/CocoaPods/CocoaPods/issues/2558) + +* Pods referenced via the `:podspec` option will have their podspecs properly + parsed in the local directory if the path points to a local file. + [Samuel Giddins](https://github.com/segiddins) + +* Fix an issue where using Swift frameworks in an Objective-C host application + causes an error because the Swift frameworks we're not code signed. + [Joseph Ross](https://github.com/jrosssavant) + [#3008](https://github.com/CocoaPods/CocoaPods/issues/3008) + + +## 0.36.0.beta.1 (2014-12-25) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.35.0...0.36.0.beta.1) +â€ĸ [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.35.0...0.36.0.beta.1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.20.2...0.21.0) +â€ĸ [CLAide](https://github.com/CocoaPods/CLAide/compare/v0.7.0...0.8.0) +â€ĸ [Molinillo](https://github.com/CocoaPods/Molinillo/compare/0.1.2...0.2.0) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.8.0...0.8.1) +â€ĸ [cocoapods-try](https://github.com/CocoaPods/cocoapods-try/compare/0.4.2...0.4.3) +â€ĸ [cocoapods-trunk](https://github.com/CocoaPods/cocoapods-trunk/compare/0.4.1...0.5.0) +â€ĸ [cocoapods-plugins](https://github.com/CocoaPods/cocoapods-plugins/compare/0.3.2...0.4.0) + +##### Highlighted Enhancement That Needs Testing + +* Support Frameworks & Swift: CocoaPods now recognizes Swift source files and + builds dynamic frameworks when necessary. A project can explicitly + opt-in via `use_frameworks!` in the Podfile, or if any dependency contains + Swift code, all pods for that target will be integrated as frameworks. + + As a pod author, you can change the module name of the built framework by + specifying a `module_name` in the podspec. The built frameworks are embedded into + the host application with a new shell script build phase in the user's + project allowing configuration-dependent pods. + + [Marius Rackwitz](https://github.com/mrackwitz) + [#2835](https://github.com/CocoaPods/CocoaPods/issues/2835) + +##### Breaking + +* Bundle Resources into Frameworks: Previously all resources were compiled and + copied into the `mainBundle`. Now Pods have to use + `[NSBundle bundleForClass:<#Class from Pod#>]` to access provided resources + relative to the bundle. + + [Boris BÃŧgling](https://github.com/neonichu) + [#2835](https://github.com/CocoaPods/CocoaPods/issues/2730) + +* Only the hooks specified by usage of the `plugin` directive of the `Podfile` + will be run. Additionally, plugins that depend on hooks will have to update to + specify their 'plugin name' when registering the hooks in order to make it + possible for those hooks to be run. + [Samuel Giddins](https://github.com/segiddins) + [#2640](https://github.com/CocoaPods/CocoaPods/issues/2640) + +##### Enhancements + +* Do not generate targets for Pods without sources. + [Boris BÃŧgling](https://github.com/neonichu) + [#2918](https://github.com/CocoaPods/CocoaPods/issues/2918) + +* Show the name of the source for each hook that is run in verbose mode. + [Samuel Giddins](https://github.com/segiddins) + [#2639](https://github.com/CocoaPods/CocoaPods/issues/2639) + +* Move pods' private headers to `Headers/Private` from `Headers/Build`. + Since some SCM ignore templates include `build` by default, this makes it + easier to check in the `Pods/` directory. + [Samuel Giddins](https://github.com/segiddins) + [#2623](https://github.com/CocoaPods/CocoaPods/issues/2623) + +* Validate that a specification's `public_header_files` and + `private_header_files` file patterns only match header files. + Also, validate that all file patterns, if given, match at least one file. + [Samuel Giddins](https://github.com/segiddins) + [#2914](https://github.com/CocoaPods/CocoaPods/issues/2914) + +* Installer changed to organize a development pod's source and resource files + into subgroups reflecting their organization in the filesystem. + [Imre mihaly](https://github.com/imihaly) + +##### Bug Fixes + +* Fix updating a pod that has subspec dependencies. + [Samuel Giddins](https://github.com/segiddins) + [#2879](https://github.com/CocoaPods/CocoaPods/issues/2879) + +* Restore the `#define`s in the environment header when the `--no-integrate` + installation option is used. + [Samuel Giddins](https://github.com/segiddins) + [#2876](https://github.com/CocoaPods/CocoaPods/issues/2876) + +* Fix issues when trying to discover the xcodeproj automatically + but the current path contains special characters (`[`,`]`,`{`,`}`,`*`,`?`). + [Olivier Halligon](https://github.com/AliSoftware) + [#2852](https://github.com/CocoaPods/CocoaPods/issues/2852) + +* Fix linting subspecs that have a higher deployment target than the root + spec. + [Samuel Giddins](https://github.com/segiddins) + [#1919](https://github.com/CocoaPods/CocoaPods/issues/1919) + +* Fix the reading of podspecs that come from the `:git`, `:svn`, `:http`, or + `:hg` options in your `Podfile` that used context-dependent ruby code, such as + reading a file to determine the specification version. + [Samuel Giddins](https://github.com/segiddins) + [#2875](https://github.com/CocoaPods/CocoaPods/issues/2875) + +* Fix the updating of `:git`, `:svn`, and `:hg` dependencies when updating all + pods. + [Samuel Giddins](https://github.com/CocoaPods/CocoaPods/issues/2859) + [#2859](https://github.com/CocoaPods/CocoaPods/issues/2859) + +* Fix an issue when a user doesn't yet have any spec repositories configured. + [Boris BÃŧgling](https://github.com/neonichu) + +* Fix an issue updating repositories when another spec repository doesn't + have a remote. + [Kyle Fuller](https://github.com/kylef) + [#2965](https://github.com/CocoaPods/CocoaPods/issues/2965) + + +## 0.35.0 (2014-11-19) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.34.4...0.35.0) +â€ĸ [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.34.4...0.35.0) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.19.4...0.20.2) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.7.2...0.8.0) + +For more details, see 📝 [CocoaPods 0.35](https://blog.cocoapods.org/CocoaPods-0.35/) on our blog. + +##### Enhancements + +* Allow the specification of file patterns for the Podspec's `requires_arc` + attribute. + [Kyle Fuller](https://github.com/kylef) + [Samuel Giddins](https://github.com/segiddins) + [#532](https://github.com/CocoaPods/CocoaPods/issues/532) + +* From now on, pods installed directly from their repositories will be recorded + in the `Podfile.lock` file and will be guaranteed to be checked-out using the + same revision on subsequent installations. Examples of this are when using + the `:git`, `:svn`, or `:hg` options in your `Podfile`. + [Samuel Giddins](https://github.com/segiddins) + [#1058](https://github.com/CocoaPods/CocoaPods/issues/1058) + +##### Bug Fixes + +* Fix an output formatting issue with various commands like `pod search` + and `pod trunk`. + [Olivier Halligon](https://github.com/AliSoftware) + [#2603](https://github.com/CocoaPods/CocoaPods/issues/2603) + +* Show a helpful error message if the old resolver incorrectly activated a + pre-release version that now leads to a version conflict. + [Samuel Giddins](https://github.com/segiddins) + +* Provides a user friendly message when using `pod spec create` with a + repository that doesn't yet have any commits. + [Kyle Fuller](https://github.com/kylef) + [#2803](https://github.com/CocoaPods/CocoaPods/issues/2803) + +* Fixes an issue with integrating into projects where there is a slash in the + build configuration name. + [Kyle Fuller](https://github.com/kylef) + [#2767](https://github.com/CocoaPods/CocoaPods/issues/2767) + +* Pods will use `CLANG_ENABLE_OBJC_ARC = 'YES'` instead of + `CLANG_ENABLE_OBJC_ARC = 'NO'`. For pods with `requires_arc = false` the + `-fno-objc-arc` flag will be specified for the all source files. + [Hugo Tunius](https://github.com/K0nserv) + [#2262](https://github.com/CocoaPods/CocoaPods/issues/2262) + +* Fixed an issue that Core Data mapping models where not compiled when + copying resources to main application bundle. + [Yan Rabovik](https://github.com/rabovik) + +* Fix uninitialized constant Class::YAML crash in some cases. + [Tim Shadel](https://github.com/timshadel) + +##### Enhancements + +* `pod search`, `pod spec which`, `pod spec cat` and `pod spec edit` + now use plain text search by default instead of a regex. Especially + `pod search UIView+UI` now searches for pods containing exactly `UIView+UI` + in their name, not trying to interpret the `+` as a regular expression. + _Note: You can still use a regular expression with the new `--regex` flag that has + been added to these commands, e.g. `pod search --regex "(NS|UI)Color"`._ + [Olivier Halligon](https://github.com/AliSoftware) + [Core#188](https://github.com/CocoaPods/Core/issues/188) + +* Use `--allow-warnings` rather than `--error-only` for pod spec validation + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [#2820](https://github.com/CocoaPods/CocoaPods/issues/2820) + +## 0.35.0.rc2 (2014-11-06) + +##### Enhancements + +* Allow the resolver to fail faster when there are unresolvable conflicts + involving the Lockfile. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* Allows pre-release spec versions when a requirement has an external source + specified. + [Samuel Giddins](https://github.com/segiddins) + [#2768](https://github.com/CocoaPods/CocoaPods/issues/2768) + +* We no longer require git version 1.7.5 or greater. + [Kyle Fuller](https://github.com/kylef) + +* Fix the usage of `:head` pods. + [Samuel Giddins](https://github.com/segiddins) + [#2789](https://github.com/CocoaPods/CocoaPods/issues/2789) + +* Show a more informative message when attempting to lint a spec whose + source could not be downloaded. + [Samuel Giddins](https://github.com/segiddins) + [#2667](https://github.com/CocoaPods/CocoaPods/issues/2667) + [#2759](https://github.com/CocoaPods/CocoaPods/issues/2759) + +## 0.35.0.rc1 (2014-11-02) + +##### Highlighted Enhancements That Need Testing + +* The `Resolver` has been completely rewritten to use + [Molinillo](https://github.com/CocoaPods/Molinillo), an iterative dependency + resolution algorithm that automatically resolves version conflicts. + The order in which dependencies are declared in the `Podfile` no longer has + any effect on the resolution process. + + You should ensure that `pod install`, `pod update` and `pod update [NAME]` + work as expected and install the correct versions of your pods during + this RC1 release. + [Samuel Giddins](https://github.com/segiddins) + [#978](https://github.com/CocoaPods/CocoaPods/issues/978) + [#2002](https://github.com/CocoaPods/CocoaPods/issues/2002) + +##### Breaking + +* Support for older versions of Ruby has been dropped and CocoaPods now depends + on Ruby 2.0.0 or greater. This is due to the release of Xcode 6.0 which has + dropped support for OS X 10.8, which results in the minimum version of + Ruby pre-installed on OS X now being 2.0.0. + + If you are using a custom installation of Ruby older than 2.0.0, you + will need to update. Or even better, migrate to system Ruby. + [Kyle Fuller](https://github.com/kylef) + +* Attempts to resolve circular dependencies will now raise an exception. + [Samuel Giddins](https://github.com/segiddins) + [Molinillo#6](https://github.com/CocoaPods/Molinillo/issues/6) + +##### Enhancements + +* The use of implicit sources has been un-deprecated. By default, all available + spec-repos will be used. There should only be a need to specify explicit + sources if you want to specifically _exclude_ certain spec-repos, such as the + `master` spec-repo, if you want to declare the order of spec look-up + precedence, or if you want other users of a Podfile to automatically have a + spec-repo cloned on `pod install`. + [Eloy DurÃĄn](https://github.com/alloy) + +* The `pod push` command has been removed as it has been deprecated in favour of + `pod repo push` in CocoaPods 0.33. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* Refactorings in preparation to framework support, which could break usage + of the Hooks API. + [Marius Rackwitz](https://github.com/mrackwitz) + [#2461](https://github.com/CocoaPods/CocoaPods/issues/2461) + +* Implicit dependencies are now locked, so simply running `pod install` will not + cause them to be updated when they shouldn't be. + [Samuel Giddins](https://github.com/segiddins) + [#2318](https://github.com/CocoaPods/CocoaPods/issues/2318) + [#2506](https://github.com/CocoaPods/CocoaPods/issues/2506) + +* Pre-release versions are only considered in the resolution process when there + are dependencies that explicitly reference pre-release requirements. + [Samuel Giddins](https://github.com/segiddins) + [#1489](https://github.com/CocoaPods/CocoaPods/issues/1489) + +* Only setup the master specs repo if required. + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [#2562](https://github.com/CocoaPods/CocoaPods/issues/2562) + +* `Sandbox::FileAccessor` now optionally includes expanded paths of headers of + vendored frameworks in `public_headers`. + [Eloy DurÃĄn](https://github.com/alloy) + [#2722](https://github.com/CocoaPods/CocoaPods/pull/2722) + +* Analysis is now halted and the user informed when there are multiple different + external sources for dependencies with the same root name. + The user is also now warned when there are duplicate dependencies in the + Podfile. + [Samuel Giddins](https://github.com/segiddins) + [#2738](https://github.com/CocoaPods/CocoaPods/issues/2738) + +* Multiple subspecs that point to the same external dependency will now only + cause that external source to be fetched once. + [Samuel Giddins](https://github.com/segiddins) + [#2743](https://github.com/CocoaPods/CocoaPods/issues/2743) + +##### Bug Fixes + +* Fixes an issue in the `XCConfigIntegrator` where not all targets that need + integration were being integrated, but were getting incorrect warnings about + the user having specified a custom base configuration. + [Eloy DurÃĄn](https://github.com/alloy) + [2752](https://github.com/CocoaPods/CocoaPods/issues/2752) + +* Do not try to clone spec-repos in `/`. + [Eloy DurÃĄn](https://github.com/alloy) + [#2723](https://github.com/CocoaPods/CocoaPods/issues/2723) + +* Improved sanitizing of configuration names which have a numeric prefix. + [Steffen Matthischke](https://github.com/HeEAaD) + [#2700](https://github.com/CocoaPods/CocoaPods/pull/2700) + +* Fixes an issues where headers from a podspec with one platform are exposed to + targets with a different platform. The headers are now only exposed to the + targets with the same platform. + [Michael Melanson](https://github.com/michaelmelanson) + [Kyle Fuller](https://github.com/kylef) + [#1249](https://github.com/CocoaPods/CocoaPods/issues/1249) + + +## 0.34.4 (2014-10-18) + +##### Bug Fixes + +* Fixes a crash when running `pod outdated`. + [Samuel Giddins](https://github.com/segiddins) + [#2624](https://github.com/CocoaPods/CocoaPods/issues/2624) + +* Ensure that external sources (as specified in the `Podfile`) are downloaded + when their source is missing, even if their specification is present. + [Samuel Giddins](https://github.com/segiddins) + [#2494](https://github.com/CocoaPods/CocoaPods/issues/2494) + +* Fixes an issue where running `pod install/update` while the Xcode project + is open can cause the open project to have build failures until Xcode + is restarted. + [Kyle Fuller](https://github.com/kylef) + [#2627](https://github.com/CocoaPods/CocoaPods/issues/2627) + [#2665](https://github.com/CocoaPods/CocoaPods/issues/2665) + +* Fixes a crash when using file URLs as a source. + [Kurry Tran](https://github.com/kurry) + [#2683](https://github.com/CocoaPods/CocoaPods/issues/2683) + +* Fixes an issue when using pods in static library targets and building with + Xcode 6 which requires `OTHER_LIBTOOLFLAGS` instead of `OTHER_LDFLAGS`, thus + basically reverting to the previous Xcode behaviour, for now at least. + [Kyle Fuller](https://github.com/kylef) + [Eloy DurÃĄn](https://github.com/alloy) + [#2666](https://github.com/CocoaPods/CocoaPods/issues/2666) + +* Fixes an issue running the resources script when Xcode is installed to a + directory with a space when compiling xcassets. + [Kyle Fuller](https://github.com/kylef) + [#2684](https://github.com/CocoaPods/CocoaPods/issues/2684) + +* Fixes an issue when installing Pods with resources to a target which + doesn't have any resources. + [Kyle Fuller](https://github.com/kylef) + [#2083](https://github.com/CocoaPods/CocoaPods/issues/2083) + +* Ensure that git 1.7.5 or newer is installed when running pod. + [Kyle Fuller](https://github.com/kylef) + [#2651](https://github.com/CocoaPods/CocoaPods/issues/2651) + + +## 0.34.2 (2014-10-08) + +##### Enhancements + +* Make the output of `pod outdated` show what running `pod update` will do. + Takes into account the sources specified in the `Podfile`. + [Samuel Giddins](https://github.com/segiddins) + [#2470](https://github.com/CocoaPods/CocoaPods/issues/2470) + +* Allows the use of the `GCC_PREPROCESSOR_DEFINITION` flag `${inherited}` + without emitting a warning. + [Samuel Giddins](https://github.com/segiddins) + [#2577](https://github.com/CocoaPods/CocoaPods/issues/2577) + +* Integration with user project will no longer replace an existing + base build configuration. + [Robert Jones](https://github.com/redshirtrob) + [#1736](https://github.com/CocoaPods/CocoaPods/issues/1736) + +##### Bug Fixes + +* Improved sanitizing of configuration names to avoid generating invalid + preprocessor definitions. + [Boris BÃŧgling](https://github.com/neonichu) + [#2542](https://github.com/CocoaPods/CocoaPods/issues/2542) + +* More robust generation of source names from URLs. + [Samuel Giddins](https://github.com/segiddins) + [#2534](https://github.com/CocoaPods/CocoaPods/issues/2534) + +* Allow the `Validator` to only use specific sources. + Allows customizable source for `pod spec lint` and `pod lib lint`, + with both defaulting to `master`. + [Samuel Giddins](https://github.com/segiddins) + [#2543](https://github.com/CocoaPods/CocoaPods/issues/2543) + [cocoapods-trunk#28](https://github.com/CocoaPods/cocoapods-trunk/issues/28) + +* Takes into account the sources specified in `Podfile` running + `pod outdated`. + [Samuel Giddins](https://github.com/segiddins) + [#2553](https://github.com/CocoaPods/CocoaPods/issues/2553) + +* Ensures that the master repo is shallow cloned when added via a Podfile + `source` directive. + [Samuel Giddins](https://github.com/segiddins) + [#3586](https://github.com/CocoaPods/CocoaPods/issues/2586) + +* Ensures that the user project is not saved when there are no + user targets integrated. + [Samuel Giddins](https://github.com/segiddins) + [#2561](https://github.com/CocoaPods/CocoaPods/issues/2561) + [#2593](https://github.com/CocoaPods/CocoaPods/issues/2593) + +* Fix a crash when running `pod install` with an empty target that inherits a + pod from a parent target. + [Kyle Fuller](https://github.com/kylef) + [#2591](https://github.com/CocoaPods/CocoaPods/issues/2591) + +* Take into account versions of a Pod from all specified sources when + resolving dependencies. + [Thomas Visser](https://github.com/Thomvis) + [#2556](https://github.com/CocoaPods/CocoaPods/issues/2556) + +* Sanitize build configuration names in target environment header macros. + [Kra Larivain](https://github.com/olarivain) + [#2532](https://github.com/CocoaPods/CocoaPods/pull/2532) + + +## 0.34.1 (2014-09-26) + +##### Bug Fixes + +* Doesn't take into account the trailing `.git` in repository URLs when + trying to find a matching specs repo. + [Samuel Giddins](https://github.com/segiddins) + [#2526](https://github.com/CocoaPods/CocoaPods/issues/2526) + + +## 0.34.0 (2014-09-26) + +For more details, see 📝 [CocoaPods 0.34](https://blog.cocoapods.org/CocoaPods-0.34/) on our blog. + +##### Breaking + +* Add support for loading podspecs from *only* specific spec-repos via + `sources`. By default, when there are no sources specified in a Podfile all + source repos will be used. This has always been the case. However, this + implicit use of sources is now deprecated. Once you specify specific sources, + **no** repos will be included by default. For example: + + source 'https://github.com/artsy/Specs.git' + source 'https://github.com/CocoaPods/Specs.git' + + Any source URLs specified that have not yet been added will be cloned before + resolution begins. + [François Benaiteau](https://github.com/netbe) + [Fabio Pelosin](https://github.com/fabiopelosin) + [Samuel Giddins](https://github.com/segiddins) + [#1143](https://github.com/CocoaPods/CocoaPods/pull/1143) + [Core#19](https://github.com/CocoaPods/Core/pull/19) + [Core#170](https://github.com/CocoaPods/Core/issues/170) + [#2515](https://github.com/CocoaPods/CocoaPods/issues/2515) + +##### Enhancements + +* Added the `pod repo list` command which lists all the repositories. + [Luis Ascorbe](https://github.com/lascorbe) + [#1455](https://github.com/CocoaPods/CocoaPods/issues/1455) + +##### Bug Fixes + +* Works around an Xcode issue where linting would fail even though `xcodebuild` + actually succeeds. Xcode.app also doesn't fail when this issue occurs, so it's + safe for us to do the same. + [Kra Larivain](https://github.com/olarivain) + [Boris BÃŧgling](https://github.com/neonichu) + [Eloy DurÃĄn](https://github.com/alloy) + [Samuel E. Giddins](https://github.com/segiddins) + [#2394](https://github.com/CocoaPods/CocoaPods/issues/2394) + [#2395](https://github.com/CocoaPods/CocoaPods/pull/2395) + +* Fixes the detection of JSON podspecs included via `:path`. + [laiso](https://github.com/laiso) + [#2489](https://github.com/CocoaPods/CocoaPods/pull/2489) + +* Fixes an issue where `pod install` would crash during Plist building if any + pod has invalid UTF-8 characters in their title or description. + [Ladislav Martincik](https://github.com/martincik) + [#2482](https://github.com/CocoaPods/CocoaPods/issues/2482) + +* Fix crash when the URL of a private GitHub repo is passed to `pod spec + create` as an argument. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1543](https://github.com/CocoaPods/CocoaPods/issues/1543) + + +## 0.34.0.rc2 (2014-09-16) + +##### Bug Fixes + +* Fixes an issue where `pod lib lint` would crash if a podspec couldn't be + loaded. + [Kyle Fuller](https://github.com/kylef) + [#2147](https://github.com/CocoaPods/CocoaPods/issues/2147) + +* Fixes an issue where `pod init` would not add `source 'master'` to newly + created Podfiles. + [Ash Furrow](https://github.com/AshFurrow) + [#2473](https://github.com/CocoaPods/CocoaPods/issues/2473) + + +## 0.34.0.rc1 (2014-09-13) + +##### Breaking + +* The use of the `$PODS_ROOT` environment variable has been deprecated and + should not be used. It will be removed in future versions of CocoaPods. + [#2449](https://github.com/CocoaPods/CocoaPods/issues/2449) + +* Add support for loading podspecs from specific spec-repos _only_, a.k.a. ‘sources’. + By default, when not specifying any specific sources in your Podfile, the ‘master’ + spec-repo will be used, as was always the case. However, once you specify specific + sources the ‘master’ spec-repo will **not** be included by default. For example: + + source 'private-spec-repo' + source 'master' + + [François Benaiteau](https://github.com/netbe) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1143](https://github.com/CocoaPods/CocoaPods/pull/1143) + [Core#19](https://github.com/CocoaPods/Core/pull/19) + +* The `Pods` directory has been reorganized. This might require manual + intervention in projects where files generated by CocoaPods have manually been + imported into the user's project (common with the acknowledgements files). + [#1055](https://github.com/CocoaPods/CocoaPods/pull/1055) + [Fabio Pelosin](https://github.com/fabiopelosin) + [Michele Titolo](https://github.com/mtitolo) + +* Plugins are now expected to include the `cocoapods-plugin.rb` file in + `./lib`. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CLAide#28](https://github.com/CocoaPods/CLAide/pull/28) + +* The specification `requires_arc` attribute now defaults to true. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CocoaPods#267](https://github.com/CocoaPods/CocoaPods/issues/267) + +##### Enhancements + +* Add support to specify dependencies per build configuration: + + pod 'Lookback', :configurations => ['Debug'] + + Currently configurations can only be specified per single Pod. + [Joachim Bengtsson](https://github.com/nevyn) + [Eloy DurÃĄn](https://github.com/alloy) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1791](https://github.com/CocoaPods/CocoaPods/pull/1791) + [#1668](https://github.com/CocoaPods/CocoaPods/pull/1668) + [#731](https://github.com/CocoaPods/CocoaPods/pull/731) + +* Improved performance of git downloads using shallow clone. + [Marin Usalj](https://github.com/supermarin) + [Fabio Pelosin](https://github.com/fabiopelosin) + [cocoapods-downloader#29](https://github.com/CocoaPods/cocoapods-downloader/pull/29) + +* Simplify installation: CocoaPods no longer requires the + compilation of the troublesome native extensions. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Xcodeproj#168](https://github.com/CocoaPods/Xcodeproj/pull/168) + [Xcodeproj#167](https://github.com/CocoaPods/Xcodeproj/issues/167) + +* Add hooks for plugins. Currently only the installer hook is supported. + A plugin can register itself to be activated after the installation with the + following syntax: + + Pod::HooksManager.register(:post_install) do |installer_context| + # implementation + end + + The `installer_context` is an instance of the `Pod::Installer:HooksContext` + class which provides the information about the installation. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#132](https://github.com/CocoaPods/Core/pull/1755) + +* Add a support for migrating the sandbox to new versions of CocoaPods. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* Display an indication for deprecated Pods in the command line search. + [Hugo Tunius](https://github.com/k0nserv) + [#2180](https://github.com/CocoaPods/CocoaPods/issues/2180) + +* Use the CLIntegracon gem for the integration tests. + [Marius Rackwitz](https://github.com/mrackwitz) + [#2371](https://github.com/CocoaPods/CocoaPods/issues/2371) + +* Include configurations that a user explicitly specifies, in their Podfile, + when the `--no-integrate` option is specified. + [Eloy DurÃĄn](https://github.com/alloy) + +* Properly quote the `-isystem` values in the xcconfig files. + [Eloy DurÃĄn](https://github.com/alloy) + +* Remove the installation post install message which presents the CHANGELOG. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Eloy DurÃĄn](https://github.com/alloy) + +* Add support for user-specified project directories with the + `--project-directory` option. + [Samuel E. Giddins](https://github.com/segiddins) + [#2183](https://github.com/CocoaPods/CocoaPods/issues/2183) + +* Now the `plutil` tool is used when available to produce + output consistent with Xcode. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* Indicate the name of the pod whose requirements cannot be satisfied. + [Seivan Heidari](https://github.com/seivan) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1938](https://github.com/CocoaPods/CocoaPods/issues/1938) + +* Add support for JSON specs to external sources (`:path`, `:git`, etc) + options. + [Kyle Fuller](https://github.com/kylef) + [#2320](https://github.com/CocoaPods/CocoaPods/issues/2320) + +* Generate the workspaces using the same output of Xcode. + [Fabio Pelosin](https://github.com/fabiopelosin) + + +##### Bug Fixes + +* Fix `pod repo push` to first check if a Specs directory exists and if so + push there. + [Edward Valentini](edwardvalentini) + [#2060](https://github.com/CocoaPods/CocoaPods/issues/2060) + +* Fix `pod outdated` to not include subspecs. + [Ash Furrow](ashfurrow) + [#2136](https://github.com/CocoaPods/CocoaPods/issues/2136) + +* Always evaluate podspecs from the original podspec directory. This fixes + an issue when depending on a pod via `:path` and that pod's podspec uses + relative paths. + [Kyle Fuller](kylef) + [pod-template#50](https://github.com/CocoaPods/pod-template/issues/50) + +* Fix spec linting to not warn for missing license file in subspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#132](https://github.com/CocoaPods/Core/issues/132) + +* Fix `pod init` so that it doesn't recurse when checking for Podfiles. + [Paddy O'Brien](https://github.com/tapi) + [#2181](https://github.com/CocoaPods/CocoaPods/issues/2181) + +* Fix missing XCTest framework in Xcode 6. + [Paul Williamson](squarefrog) + [#2296](https://github.com/CocoaPods/CocoaPods/issues/2296) + +* Support multiple values in `ARCHS`. + [Robert Zuber](https://github.com/z00b) + [#1904](https://github.com/CocoaPods/CocoaPods/issues/1904) + +* Fix static analysis in Xcode 6. + [Samuel E. Giddins](https://github.com/segiddins) + [#2402](https://github.com/CocoaPods/CocoaPods/issues/2402) + +* Fix an issue where a version of a spec will not be locked when using + multiple subspecs of a podspec. + [Kyle Fuller](https://github.com/kylef) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2135](https://github.com/CocoaPods/CocoaPods/issues/2135) + +* Fix an issue using JSON podspecs installed directly from a lib's + repository. + [Kyle Fuller](https://github.com/kylef) + [#2320](https://github.com/CocoaPods/CocoaPods/issues/2320) + +* Support and use quotes in the `OTHER_LDFLAGS` of xcconfigs to avoid + issues with targets containing a space character in their name. + [Fabio Pelosin](https://github.com/fabiopelosin) + + +## 0.33.1 (2014-05-20) + +##### Bug Fixes + +* Fix `pod spec lint` for `json` podspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2157](https://github.com/CocoaPods/CocoaPods/issues/2157) + +* Fixed downloader issues related to `json` podspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2158](https://github.com/CocoaPods/CocoaPods/issues/2158) + +* Fixed `--no-ansi` flag in help banners. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#34](https://github.com/CocoaPods/CLAide/issues/34) + + +## 0.33.0 (2014-05-20) + +For more details, see 📝 [CocoaPods 0.33](https://blog.cocoapods.org/CocoaPods-0.33/) on our blog. + +##### Breaking + +* The deprecated `pre_install` and the `pod_install` hooks of the specification + class have been removed. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2151](https://github.com/CocoaPods/CocoaPods/issues/2151) + [#2153](https://github.com/CocoaPods/CocoaPods/pull/2153) + +##### Enhancements + +* Added the `cocoapods-trunk` plugin which introduces the `trunk` subcommand. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2151](https://github.com/CocoaPods/CocoaPods/issues/2151) + [#2153](https://github.com/CocoaPods/CocoaPods/pull/2153) + +* The `pod push` sub-command has been moved to the `pod repo push` sub-command. + Moreover pushing to the master repo from it has been disabled. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2151](https://github.com/CocoaPods/CocoaPods/issues/2151) + [#2153](https://github.com/CocoaPods/CocoaPods/pull/2153) + +* Overhauled command line interface. Add support for auto-completion script + (d). If auto-completion is enabled for your shell you can configure it for + CocoaPods with the following command: + + rm -f /usr/local/share/zsh/site-functions/\_pod + dpod --completion-script > /usr/local/share/zsh/site-functions/\_pod + exec zsh + + Currently only the Z shell is supported. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CLAide#25](https://github.com/CocoaPods/CLAide/issues/25) + [CLAide#20](https://github.com/CocoaPods/CLAide/issues/20) + [CLAide#19](https://github.com/CocoaPods/CLAide/issues/19) + [CLAide#17](https://github.com/CocoaPods/CLAide/issues/17) + [CLAide#12](https://github.com/CocoaPods/CLAide/issues/12) + +* The `--version` flag is now only supported for the root `pod` command. If + used in conjunction with the `--verbose` flag the version of the detected + plugins will be printed as well. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CLAide#13](https://github.com/CocoaPods/CLAide/issues/13) + [CLAide#14](https://github.com/CocoaPods/CLAide/issues/14) + +* The extremely meta `cocoaPods-plugins` is now installed by default providing + information about the available and the installed plug-ins. + [David Grandinetti](https://github.com/dbgrandi) + [Olivier Halligon](https://github.com/AliSoftware) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2092](https://github.com/CocoaPods/CocoaPods/issues/2092) + +* Validate the reachability of `social_media_url`, `documentation_url` and + `docset_url` in podspecs we while linting a specification. + [Kyle Fuller](https://github.com/kylef) + [#2025](https://github.com/CocoaPods/CocoaPods/issues/2025) + +* Print the current version when the repo/lockfile requires a higher version. + [Samuel E. Giddins](https://github.com/segiddins) + [#2049](https://github.com/CocoaPods/CocoaPods/issues/2049) + +* Show `help` when running the `pod` command instead of defaulting to `pod + install`. + [Kyle Fuller](https://github.com/kylef) + [#1771](https://github.com/CocoaPods/CocoaPods/issues/1771) + +##### Bug Fixes + +* Show the actual executable when external commands fail. + [Boris BÃŧgling](https://github.com/neonichu) + [#2102](https://github.com/CocoaPods/CocoaPods/issues/2102) + +* Fixed support for file references in the workspace generated by CocoaPods. + [Kyle Fuller](https://github.com/kylef) + [Fabio Pelosin](https://github.com/fabiopelosin) + [Xcodeproj#105](https://github.com/CocoaPods/Xcodeproj/pull/150) + +* Show a helpful error message when reading version information with merge + conflict. + [Samuel E. Giddins](https://github.com/segiddins) + [#1853](https://github.com/CocoaPods/CocoaPods/issues/1853) + +* Show deprecated specs when invoking `pod outdated`. + [Samuel E. Giddins](https://github.com/segiddins) + [#2003](https://github.com/CocoaPods/CocoaPods/issues/2003) + +* Fixes an issue where `pod repo update` may start an un-committed merge. + [Kyle Fuller](https://github.com/kylef) + [#2024](https://github.com/CocoaPods/CocoaPods/issues/2024) + +## 0.32.1 (2014-04-15) + +##### Bug Fixes + +* Fixed the Podfile `default_subspec` attribute in nested subspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#2050](https://github.com/CocoaPods/CocoaPods/issues/2050) + +## 0.32.0 (2014-04-15) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.31.1...0.32.0) +â€ĸ [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.31.1...0.32.0) + +For more details, see 📝 [CocoaPods 0.32](https://blog.cocoapods.org/CocoaPods-0.32/) on our blog. + +##### Enhancements + +* Allow to update only a list of given pods with `pod update [POD_NAMES...]`. + [Marius Rackwitz](https://github.com/mrackwitz) + [CocoaPods#760](https://github.com/CocoaPods/CocoaPods/issues/760) + +* `pod update` prints the previous version of the updated pods. + [Andrea Mazzini](https://github.com/andreamazz) + [#2008](https://github.com/CocoaPods/CocoaPods/issues/2008) + +* `pod update` falls back to `pod install` if no Lockfile is present. + [Marius Rackwitz](https://github.com/mrackwitz) + +* File references in the Pods project for development Pods now are absolute if + the dependency is specified with an absolute paths. + [Samuel Ford](https://github.com/samuelwford) + [#1042](https://github.com/CocoaPods/CocoaPods/issues/1042) + +* Added `deprecated` and `deprecated_in_favor_of` attributes to Specification + DSL. + [Paul Young](https://github.com/paulyoung) + [Core#87](https://github.com/CocoaPods/Core/pull/87) + +* Numerous improvements to the validator and to the linter. + * Validate the reachability of screenshot URLs in podspecs while linting a + specification. + [Kyle Fuller](https://github.com/kylef) + [#2010](https://github.com/CocoaPods/CocoaPods/issues/2010) + * Support HTTP redirects when linting homepage and screenshots. + [Boris BÃŧgling](https://github.com/neonichu) + [#2027](https://github.com/CocoaPods/CocoaPods/pull/2027) + * The linter now checks `framework` and `library` attributes for invalid + strings. + [Paul Williamson](https://github.com/squarefrog) + [Fabio Pelosin](fabiopelosin) + [Core#66](https://github.com/CocoaPods/Core/issues/66) + [Core#96](https://github.com/CocoaPods/Core/pull/96) + [Core#105](https://github.com/CocoaPods/Core/issues/105) + * The Linter will not check for comments anymore. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#108](https://github.com/CocoaPods/Core/issues/108) + * Removed legacy checks from the linter. + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#108](https://github.com/CocoaPods/Core/issues/108) + * Added logic to handle subspecs and platform scopes to linter check of + the `requries_arc` attribute. + [Fabio Pelosin](https://github.com/fabiopelosin) + [CocoaPods#2005](https://github.com/CocoaPods/CocoaPods/issues/2005) + * The linter no longer considers empty a Specification if it only specifies the + `resource_bundle` attribute. + [Joshua Kalpin](https://github.com/Kapin) + [#63](https://github.com/CocoaPods/Core/issues/63) + [#95](https://github.com/CocoaPods/Core/pull/95) + +* `pod lib create` is now using the `configure` file instead of the + `_CONFIGURE.rb` file. + [Piet Brauer](https://github.com/pietbrauer) + [Orta Therox](https://github.com/orta) + +* `pod lib create` now disallows any pod name that begins with a `.` + [Dustin Clark](https://github.com/clarkda) + [#2026](https://github.com/CocoaPods/CocoaPods/pull/2026) + [Core#97](https://github.com/CocoaPods/Core/pull/97) + [Core#98](https://github.com/CocoaPods/Core/issues/98) + +* Prevent the user from using `pod` commands as root. + [Kyle Fuller](https://github.com/kylef) + [#1815](https://github.com/CocoaPods/CocoaPods/issues/1815) + +* Dependencies declared with external sources now support HTTP downloads and + have improved support for all the options supported by the downloader. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* An informative error message is presented when merge conflict is detected in + a YAML file. + [Luis de la Rosa](https://github.com/luisdelarosa) + [#69](https://github.com/CocoaPods/Core/issues/69) + [#100](https://github.com/CocoaPods/Core/pull/100) + +##### Bug Fixes + +* Fixed the Podfile `default_subspec` attribute in nested subspecs. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1021](https://github.com/CocoaPods/CocoaPods/issues/1021) + +* Warn when including deprecated pods + [Samuel E. Giddins](https://github.com/segiddins) + [#2003](https://github.com/CocoaPods/CocoaPods/issues/2003) + + +## 0.31.1 (2014-04-01) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.31.0...0.31.1) +â€ĸ [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.31.0...0.31.1) + +##### Minor Enhancements + +* The specification now strips the indentation of the `prefix_header` and + `prepare_command` to aide their declaration as a here document (similarly to + what it already does with the description). + [Fabio Pelosin](https://github.com/fabiopelosin) + [Core#51](https://github.com/CocoaPods/Core/issues/51) + +##### Bug Fixes + +* Fix linting for Pods which declare a private repo as the source. + [Boris BÃŧgling](https://github.com/neonichu) + [Core#82](https://github.com/CocoaPods/Core/issues/82) + + +## 0.31.0 (2014-03-31) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.30.0...0.31.0) +â€ĸ [CocoaPods-Core](https://github.com/CocoaPods/Core/compare/0.30.0...0.31.0) + +For more details, see 📝 [CocoaPods 0.31](https://blog.cocoapods.org/CocoaPods-0.31/) on our blog. + +##### Enhancements + +* Warnings are not promoted to errors anymore to maximise compatibility with + existing libraries. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1629](https://github.com/CocoaPods/CocoaPods/issues/1629) + +* Include the versions of the Pods to the output of `pod list`. + [Stefan Damm](https://github.com/StefanDamm) + [Robert Zuber](https://github.com/z00b) + [#1617](https://github.com/CocoaPods/CocoaPods/issues/1617) + +* Generated prefix header file will now have unique prefix_header_contents for + Pods with subspecs. + [Luis de la Rosa](https://github.com/luisdelarosa) + [#1449](https://github.com/CocoaPods/CocoaPods/issues/1449) + +* The linter will now check the reachability of the homepage of Podspecs during + a full lint. + [Richard Lee](https://github.com/dlackty) + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1704](https://github.com/CocoaPods/CocoaPods/issues/1704) + [Core#70](https://github.com/CocoaPods/Core/pull/70) + +* Improved detection of the last version of a specification in `pod spec` + subcommands. + [Laurent Sansonetti](https://github.com/lrz) + [#1953](https://github.com/CocoaPods/CocoaPods/pull/1953) + +* Display advised settings for Travis CI in the warning related presented when + the terminal encoding is not set to UTF-8. + [Richard Lee](https://github.com/dlackty) + [#1933](https://github.com/CocoaPods/CocoaPods/issues/1933) + [#1941](https://github.com/CocoaPods/CocoaPods/pull/1941) + +* Unset the `CDPATH` env variable before shelling-out to `prepare_command`. + [Marc Boquet](https://github.com/apalancat) + [#1943](https://github.com/CocoaPods/CocoaPods/pull/1943) + +##### Bug Fixes + +* Resolve crash related to the I18n deprecation warning. + [Eloy DurÃĄn](https://github.com/alloy) + [#1950](https://github.com/CocoaPods/CocoaPods/issues/1950) + +* Fix compilation issues related to the native Extension of Xcodeproj. + [Eloy DurÃĄn](https://github.com/alloy) + +* Robustness against user Git configuration and against merge commits in `pod + repo` subcommands. + [Boris BÃŧgling](https://github.com/neonichu) + [#1949](https://github.com/CocoaPods/CocoaPods/issues/1949) + [#1978](https://github.com/CocoaPods/CocoaPods/pull/1978) + +* Gracefully inform the user if the `:head` option is not supported for a given + download strategy. + [Boris BÃŧgling](https://github.com/neonichu) + [#1947](https://github.com/CocoaPods/CocoaPods/issues/1947) + [#1958](https://github.com/CocoaPods/CocoaPods/pull/1958) + +* Cleanup a pod directory if error occurs while downloading. + [Alex Rothenberg](https://github.com/alexrothenberg) + [#1842](https://github.com/CocoaPods/CocoaPods/issues/1842) + [#1960](https://github.com/CocoaPods/CocoaPods/pull/1960) + +* No longer warn for Github repositories with OAuth authentication. + [Boris BÃŧgling](https://github.com/neonichu) + [#1928](https://github.com/CocoaPods/CocoaPods/issues/1928) + [Core#77](https://github.com/CocoaPods/Core/pull/77) + +* Fix for when using `s.version` as the `:tag` for a git repository in a + Podspec. + [Joel Parsons](https://github.com/joelparsons) + [#1721](https://github.com/CocoaPods/CocoaPods/issues/1721) + [Core#72](https://github.com/CocoaPods/Core/pull/72) + +* Improved escaping of paths in Git downloader. + [Vladimir Burdukov](https://github.com/chipp) + [cocoapods-downloader#14](https://github.com/CocoaPods/cocoapods-downloader/pull/14) + +* Podspec without explicitly set `requires_arc` attribute no longer passes the + lint. + [Richard Lee](https://github.com/dlackty) + [#1840](https://github.com/CocoaPods/CocoaPods/issues/1840) + [Core#71](https://github.com/CocoaPods/Core/pull/71) + +* Properly quote headers in the `-isystem` compiler flag of the aggregate + targets. + [Eloy DurÃĄn](https://github.com/alloy) + [#1862](https://github.com/CocoaPods/CocoaPods/issues/1862) + [#1894](https://github.com/CocoaPods/CocoaPods/pull/1894) + +## 0.30.0 (2014-03-29) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.29.0...0.30.0) + +For more details, see 📝 [CocoaPods 0.30](https://blog.cocoapods.org/CocoaPods-0.30/) on our blog. + +###### Enhancements + +* Radically reduce first run pod setup bandwidth by creating a shallow clone of + the ‘master’ repo by default. Use the `--no-shallow` option to perform a full + clone instead. + [Jeff Verkoeyen](https://github.com/jverkoey) + [#1803](https://github.com/CocoaPods/CocoaPods/pull/1803) + +* Improves the error message when searching with an invalid regular expression. + [Kyle Fuller](https://github.com/kylef) + +* Improves `pod init` to save Xcode project file in Podfile when one was supplied. + [Kyle Fuller](https://github.com/kylef) + +* Adds functionality to specify a template URL for the `pod lib create` command. + [Piet Brauer](https://github.com/pietbrauer) + +###### Bug Fixes + +* Fixes a bug with `pod repo remove` silently handling permission errors. + [Kyle Fuller](https://github.com/kylef) + [#1778](https://github.com/CocoaPods/CocoaPods/issues/1778) + +* `pod push` now properly checks that the repo has changed before attempting + to commit. This only affected pods with special characters (such as `+`) in + their names. + [Gordon Fontenot](https://github.com/gfontenot) + [#1739](https://github.com/CocoaPods/CocoaPods/pull/1739) + + +## 0.29.0 (2013-12-25) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.28.0...0.29.0) +â€ĸ [CocoaPods-core](https://github.com/CocoaPods/Core/compare/0.28.0...0.29.0) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.2.0...0.3.0) + +For more details, see 📝 [CocoaPods 0.29](https://blog.cocoapods.org/CocoaPods-0.29/) on our blog. + +###### Breaking + +* The command `podfile_info` is now a plugin offered by CocoaPods. + As a result, the command has been removed from CocoaPods. + [Joshua Kalpin](https://github.com/Kapin) + [#1589](https://github.com/CocoaPods/CocoaPods/issues/1589) + +* JSON has been adopted as the format to store specifications. As a result + the `pod ipc spec` command returns a JSON representation and the YAML + specifications are not supported anymore. JSON specifications adopt the + `.podspec.json` extension. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1568](https://github.com/CocoaPods/CocoaPods/pull/1568) + +###### Enhancements + +* Introduced `pod try` the easiest way to test the example project of a pod. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1568](https://github.com/CocoaPods/CocoaPods/pull/1568) + +* Pod headers are now provided to the user target as a system + header. This means that any warnings in a Pod's code will show + under its target in Xcode's build navigator, and never under the + user target. + [Swizzlr](https://github.com/swizzlr) + [#1596](https://github.com/CocoaPods/CocoaPods/pull/1596) + +* Support LZMA2 compressed tarballs in the downloader. + [Kyle Fuller](https://github.com/kylef) + [cocoapods-downloader#5](https://github.com/CocoaPods/cocoapods-downloader/pull/5) + +* Add Bazaar support for installing directly from a repo. + [Fred McCann](https://github.com/fmccann) + [#1632](https://github.com/CocoaPods/CocoaPods/pull/1632) + +* The `pod search ` command now supports regular expressions + for the query parameter when searching using the option `--full`. + [Florian Hanke](https://github.com/floere) + [#1643](https://github.com/CocoaPods/CocoaPods/pull/1643) + +* Pod lib lint now accepts multiple podspecs in the same folder. + [kra Larivain/OpenTable](https://github.com/opentable) + [#1635](https://github.com/CocoaPods/CocoaPods/pull/1635) + +* The `pod push` command will now silently test the upcoming CocoaPods trunk + service. The service is only tested when pushing to the master repo and the + test doesn't affect the normal workflow. + [Fabio Pelosin](https://github.com/fabiopelosin) + +* The `pod search ` command now supports searching on cocoapods.org + when searching using the option `--web`. Options `--ios` and `--osx` are + fully supported. + [Florian Hanke](https://github.com/floere) + [#1643](https://github.com/CocoaPods/CocoaPods/pull/1682) + +* The `pod search ` command now supports multiword queries when using + the `--web` option. + [Florian Hanke](https://github.com/floere) + [#1643](https://github.com/CocoaPods/CocoaPods/pull/1682) + +###### Bug Fixes + +* Fixed a bug which resulted in `pod lib lint` not being able to find the + headers. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1566](https://github.com/CocoaPods/CocoaPods/issues/1566) + +* Fixed the developer frameworks search paths so that + `$(SDKROOT)/Developer/Library/Frameworks` is used for iOS and + `$(DEVELOPER_LIBRARY_DIR)/Frameworks` is used for OS X. + [Kevin Wales](https://github.com/kwales) + [#1562](https://github.com/CocoaPods/CocoaPods/pull/1562) + +* When updating the pod repos, repositories with unreachable remotes + are now ignored. This fixes an issue with certain private repositories. + [Joshua Kalpin](https://github.com/Kapin) + [#1595](https://github.com/CocoaPods/CocoaPods/pull/1595) + [#1571](https://github.com/CocoaPods/CocoaPods/issues/1571) + +* The linter will now display an error if a Pod's name contains whitespace. + [Joshua Kalpin](https://github.com/Kapin) + [Core#39](https://github.com/CocoaPods/Core/pull/39) + [#1610](https://github.com/CocoaPods/CocoaPods/issues/1610) + +* Having the silent flag enabled in the config will no longer cause issues + with `pod search`. In addition, the flag `--silent` is no longer supported + for the command. + [Joshua Kalpin](https://github.com/Kapin) + [#1627](https://github.com/CocoaPods/CocoaPods/pull/1627) + +* The linter will now display an error if a framework ends with `.framework` + (i.e. `QuartzCore.framework`). + [Joshua Kalpin](https://github.com/Kapin) + [#1331](https://github.com/CocoaPods/CocoaPods/issues/1336) + [Core#45](https://github.com/CocoaPods/Core/pull/45) + +* The linter will now display an error if a library ends with `.a` or `.dylib` + (i.e. `z.dylib`). It will also display an error if it begins with `lib` + (i.e. `libxml`). + [Joshua Kalpin](https://github.com/Kapin) + [Core#44](https://github.com/CocoaPods/Core/issues/44) + +* The ARCHS build setting can come back as an array when more than one + architecture is specified. + [Carson McDonald](https://github.com/carsonmcdonald) + [#1628](https://github.com/CocoaPods/CocoaPods/issues/1628) + +* Fixed all issues caused by `/tmp` being a symlink to `/private/tmp`. + This affected mostly `pod lib lint`, causing it to fail when the + Pod used `prefix_header_*` or when the pod headers imported headers + using the namespaced syntax (e.g. `#import `). + [kra Larivain/OpenTable](https://github.com/opentable) + [#1514](https://github.com/CocoaPods/CocoaPods/pull/1514) + +* Fixed an incorrect path being used in the example app Podfile generated by + `pod lib create`. + [Eloy DurÃĄn](https://github.com/alloy) + [cocoapods-try#5](https://github.com/CocoaPods/cocoapods-try/issues/5) + + +## 0.28.0 (2013-11-14) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.27.1...0.28.0) +â€ĸ [CocoaPods-core](https://github.com/CocoaPods/Core/compare/0.27.1...0.28.0) +â€ĸ [CLAide](https://github.com/CocoaPods/CLAide/compare/0.3.2...0.4.0) + +For more details, see 📝 [CocoaPods 0.28](https://blog.cocoapods.org/CocoaPods-0.28/) on our blog. + +###### Enhancements + +* CLAide now supports gem plugins. An example CocoaPods plugin can be found at + [open\_pod\_bay](https://github.com/leshill/open_pod_bay). + + As of yet there are no promises made yet on the APIs, so try to fail as + gracefully as possible in case a CocoaPods update breaks your usage. In these + cases, also please let us know what you would need, so we can take this into + account when we do finalize APIs. + + [Les Hill](https://github.com/leshill) + [CLAide#1](https://github.com/CocoaPods/CLAide/pull/1) + [#959](https://github.com/CocoaPods/CocoaPods/issues/959) + +###### Bug Fixes + +* Compiling `xcassets` with `actool` now uses `UNLOCALIZED_RESOURCES_FOLDER_PATH` + instead of `PRODUCT_NAME.WRAPPER_EXTENSION` as output directory as it is more + accurate and allows the project to overwrite `WRAPPER_NAME`. + [Marc Knaup](https://github.com/fluidsonic) + [#1556](https://github.com/CocoaPods/CocoaPods/pull/1556) + +* Added a condition to avoid compiling xcassets when `WRAPPER_EXTENSION` + is undefined, as it would be in the case of static libraries. This prevents + trying to copy the compiled files to a directory that does not exist. + [Noah McCann](https://github.com/nmccann) + [#1521](https://github.com/CocoaPods/CocoaPods/pull/1521) + +* Added additional condition to check if `actool` is available when compiling + `xcassets`. This prevents build failures of Xcode 5 projects on Travis CI (or + lower Xcode versions). + [Michal Konturek](https://github.com/michalkonturek) + [#1511](https://github.com/CocoaPods/CocoaPods/pull/1511) + +* Added a condition to properly handle universal or mac apps when compiling + xcassets. This prevents build errors in the xcassets compilation stage + particularly when using xctool to build. + [Ryan Marsh](https://github.com/ryanwmarsh) + [#1594](https://github.com/CocoaPods/CocoaPods/pull/1594) + +* Vendored Libraries now correctly affect whether a podspec is considered empty. + [Joshua Kalpin](https://github.com/Kapin) + [Core#38](https://github.com/CocoaPods/Core/pull/38) + +* Vendored Libraries and Vendored Frameworks now have their paths validated correctly. + [Joshua Kalpin](https://github.com/Kapin) + [#1567](https://github.com/CocoaPods/CocoaPods/pull/1567) + +* Gists are now correctly accepted with https. + [Joshua Kalpin](https://github.com/Kapin) + [Core#38](https://github.com/CocoaPods/Core/pull/38) + +* The `pod push` command is now more specific about the branch it pushes to. + [orta](http://orta.github.io) + [#1561](https://github.com/CocoaPods/CocoaPods/pull/1561) + +* Dtrace files are now properly left unflagged when installing, regardless of configuration. + [Swizzlr](https://github.com/swizzlr) + [#1560](https://github.com/CocoaPods/CocoaPods/pull/1560) + +* Users are now warned if their terminal encoding is not UTF-8. This fixes an issue + with a small percentage of pod names that are incompatible with ASCII. + [Joshua Kalpin](https://github.com/Kapin) + [#1570](https://github.com/CocoaPods/CocoaPods/pull/1570) + + +## 0.27.1 (2013-10-24) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.26.2...0.27.1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.26.2...0.27.1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.13.0...0.14.0) + +For more details, see 📝 [CocoaPods 0.27 and improved installation UX](https://blog.cocoapods.org/CocoaPods-0.27-and-improved-installation-UX/) on our blog. + +###### Enhancements + +* The xcodeproj gem now comes bundled with prebuilt binaries for the Ruby + versions that come with OS X 10.8 and 10.9. Users now no longer need to + install the Xcode Command Line Tools or deal with the Ruby C header location. + [Eloy DurÃĄn](https://github.com/alloy) + [Xcodeproj#88](https://github.com/CocoaPods/Xcodeproj/issues/88) + +* Targets passed to the `link_with` method of the Podfile DSL no longer need + to be explicitly passed as an array. `link_with ['target1', 'target2']` can + now be written as `link_with 'target1', 'target2'`. + [Adam Sharp](https://github.com/sharplet) + [Core#30](https://github.com/CocoaPods/Core/pull/30) + +* The copy resources script now compiles xcassets resources. + [Ulrik Damm](https://github.com/ulrikdamm) + [#1427](https://github.com/CocoaPods/CocoaPods/pull/1427) + +* `pod repo` now support a `remove ['repo_name']` command. + [Joshua Kalpin](https://github.com/Kapin) + [#1493](https://github.com/CocoaPods/CocoaPods/issues/1493) + [#1484](https://github.com/CocoaPods/CocoaPods/issues/1484) + +###### Bug Fixes + +* The architecture is now set in the build settings of the user build + configurations. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1450](https://github.com/CocoaPods/CocoaPods/issues/1462) + [#1462](https://github.com/CocoaPods/CocoaPods/issues/1462) + +* Fixed a crash related to CocoaPods being unable to resolve an unique build + setting of an user target with custom build configurations. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1462](https://github.com/CocoaPods/CocoaPods/issues/1462) + [#1463](https://github.com/CocoaPods/CocoaPods/issues/1463) + [#1457](https://github.com/CocoaPods/CocoaPods/issues/1457) + +* Fixed a defect which prevented subspecs from being dependant on a pod with a + name closely matching the name of one of the subspec's parents. + [Noah McCann](https://github.com/nmccann) + [#29](https://github.com/CocoaPods/Core/pull/29) + +* The developer dir relative to the SDK is not added anymore if testing + frameworks are detected in OS X targets, as it doesn't exists, avoiding the + presentation of the relative warning in Xcode. + [Fabio Pelosin](https://github.com/fabiopelosin) + + +## 0.26.2 (2013-10-09) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.26.1...0.26.2) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.26.1...0.26.2) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.11.1...0.13.0) + +###### Bug Fixes + +* Fixed a crash which was causing a failure in `pod lib create` if the name of + the Pod included spaces. As spaces are not supported now this is gracefully + handled with an informative message. + [Kyle Fuller](https://github.com/kylef) + [#1456](https://github.com/CocoaPods/CocoaPods/issues/1456) + +* If an user target doesn't specify an architecture the value specified for the + project is used in CocoaPods targets. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1450](https://github.com/CocoaPods/CocoaPods/issues/1450) + +* The Pods project now properly configures ARC on all build configurations. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1454](https://github.com/CocoaPods/CocoaPods/issues/1454) + + +## 0.26.1 (2013-10-08) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.25.0...0.26.1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.25.0...0.26.1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.11.1...0.12.0) + +For more details, see 📝 [CocoaPods 0.26](https://blog.cocoapods.org/CocoaPods-0.26/) on our blog. + +###### Enhancements + +* CocoaPods now creates and hides the schemes of its targets after every + installation. The schemes are not shared because the flag which keeps track + whether they should be visible is a user only flag. The schemes are still + present and to debug a single Pod it is possible to make its scheme visible + in the Schemes manager of Xcode. This is rarely needed though because the + user targets trigger the compilation of the Pod targets. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1185](https://github.com/CocoaPods/CocoaPods/pull/1185) + +* Installations which don't integrate a user target (lint subcommands and + `--no-integrate` option) now set the architecture of OS X Pod targets to + `$(ARCHS_STANDARD_64_BIT)` (Xcode 4 default value for new targets). This + fixes lint issues with Xcode 4. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1185](https://github.com/CocoaPods/CocoaPods/pull/1185) + +* Further improvements to the organization of the Pods project + + - The project is now is sorted by name with groups at the bottom. + - Source files are now stored in the root group of the spec, subspecs are not + stored in a `Subspec` group anymore and the products of the Pods all are + stored in the products group of the project. + - The frameworks are referenced relative to the Developer directory and + namespaced per platform. + + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1389](https://github.com/CocoaPods/CocoaPods/pull/1389) + [#1420](https://github.com/CocoaPods/CocoaPods/pull/1420) + +* Added the `documentation_url` DSL attribute to the specifications. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1273](https://github.com/CocoaPods/CocoaPods/pull/1273) + +###### Bug Fixes + +* The search paths of vendored frameworks and libraries now are always + specified relatively. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1405](https://github.com/CocoaPods/CocoaPods/pull/1405) + +* Fix an issue where CocoaPods would fail to work when used with an older + version of the Active Support gem. This fix raises the dependency version to + the earliest compatible version of Active Support. + [Kyle Fuller](https://github.com/kylef) + [#1407](https://github.com/CocoaPods/CocoaPods/issues/1407) + +* CocoaPods will not attempt to load anymore all the version of a specification + preventing crashes if those are incompatible. + [Fabio Pelosin](https://github.com/fabiopelosin) + [#1272](https://github.com/CocoaPods/CocoaPods/pull/1272) + + +## 0.25.0 (2013-09-20) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.24.0...0.25.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.24.0...0.25.0) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.10.1...0.11.0) + +###### Enhancements + +* Added support for Xcode 5. + + The generated Pods Xcode project is now compatible with `arm64` projects and + is updated to use Xcode 5’s default settings removing all warnings. + + **NOTE to users migrating projects from Xcode 4, or are still using Xcode 4:** + 1. The Pods Xcode project now sets the `ONLY_ACTIVE_ARCH` build setting to + `YES` in the `Debug` configuration. You _will_ have to set the same on your + project/target, otherwise the build _will_ fail. + 2. Ensure your project/target has an `ARCHS` value set, otherwise the build + _will_ fail. + 3. When building a **iOS** project from the command-line, with the `xcodebuild` + tool that comes with Xcode 4, you’ll need to completely disable this setting + by appending to your build command: `ONLY_ACTIVE_ARCH=NO`. + + [#1352](https://github.com/CocoaPods/CocoaPods/pull/1352) + +* Speed up project generation in `pod install` and `pod update`. + +* The pre and post install hooks that have been deprecated now include the name + and version of the spec that’s using them. + +###### Bug Fixes + +* Only create a single resource bundle for all targets. Prior to this change a + resource bundle included into multiple targets within the project would create + duplicately named targets in the Pods Xcode project, causing duplicately named + Schemes to be created on each invocation of `pod install`. All targets that + reference a given resource bundle now have dependencies on a single common + target. + + [Blake Watters](https://github.com/blakewatters) + [#1338](https://github.com/CocoaPods/CocoaPods/issues/1338) + +* Solved outstanding issues with CocoaPods resource bundles and Archive builds: + 1. The rsync task copies symlinks into the App Bundle, producing an invalid + app. This change add `--copy-links` to the rsync invocation to ensure the + target files are copied rather than the symlink. + 2. The Copy Resources script uses `TARGET_BUILD_DIR` which points to the App + Archiving folder during an Archive action. Switching to + `BUILT_PRODUCTS_DIR` instead ensures that the path is correct for all + actions and configurations. + + [Blake Watters](https://github.com/blakewatters) + [#1309](https://github.com/CocoaPods/CocoaPods/issues/1309) + [#1329](https://github.com/CocoaPods/CocoaPods/issues/1329) + +* Ensure resource bundles are copied to installation location on install actions + [Chris Gummer](https://github.com/chrisgummer) + [#1364](https://github.com/CocoaPods/CocoaPods/issues/1364) + +* Various bugfixes in Xcodeproj, refer to its [CHANGELOG](https://github.com/CocoaPods/Xcodeproj/blob/0.11.0/CHANGELOG.md) + for details. + + +## 0.24.0 (2013-09-04) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.23.0...0.24.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.23.0...0.24.0) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.8.1...0.9.0) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.1.1...0.2.0) + +###### Enhancements + +* Added `pod init` command which generates a Podfile according to the + targets of the project stored in the working directory and to the templates + stored in the `~/.cocoapods/templates` folder. Two templates are supported: + - the `Podfile.default` template for regular targets. + - and the `Podfile.test` template for test targets. + [Ian Ynda-Hummel](https://github.com/ianyh) + [#1106](https://github.com/CocoaPods/CocoaPods/issues/1106) + [#1045](https://github.com/CocoaPods/CocoaPods/issues/1045) + +* CocoaPods will now leverage the [xcproj](https://github.com/0xced/xcproj) + command line tool if available in the path of the user to touch saved + projects. This will result in projects being serialized in the exact format + used by Xcode eliminating merge conflicts and other related issues. To learn + more about how to install xcproj see its + [readme](https://github.com/0xced/xcproj). + [CÊdric Luthi](https://github.com/0xced) + [#1275](https://github.com/CocoaPods/CocoaPods/issues/1275) + +* Rationalized and cleaned up Pods project group structure and path specification. + +* Create all necessary build configurations for *Pods.xcodeproj* at the project level. If the user’s project has more than just *Debug* and *Release* build configurations, they may be explicitly specified in the Podfile: +`xcodeproj 'MyApp', 'App Store' => :release, 'Debug' => :debug, 'Release' => :release` + If build configurations aren’t specified in the Podfile then they will be automatically picked from the user’s project in *Release* mode. + These changes will ensure that the `libPods.a` static library is not stripped for all configurations, as explained in [#1217](https://github.com/CocoaPods/CocoaPods/pull/1217). + [CÊdric Luthi](https://github.com/0xced) + [#1294](https://github.com/CocoaPods/CocoaPods/issues/1294) + +* Added basic support for Bazaar repositories. + [Fred McCann](https://github.com/fmccann) + [cocoapods-downloader#4](https://github.com/CocoaPods/cocoapods-downloader/pull/4) + +###### Bug Fixes + +* Fixed crash in `pod spec cat`. + +* Use the `TARGET_BUILD_DIR` environment variable for installing resource bundles. + [CÊdric Luthi](https://github.com/0xced) + [#1268](https://github.com/CocoaPods/CocoaPods/issues/1268) + +* CoreData versioned models are now properly handled respecting the contents of + the `.xccurrentversion` file. + [Ashton-W](https://github.com/Ashton-W) + [#1288](https://github.com/CocoaPods/CocoaPods/issues/1288), + [Xcodeproj#83](https://github.com/CocoaPods/Xcodeproj/pull/83) + +* OS X frameworks are now copied to the Resources folder using rsync to + properly overwrite existing files. + [Nikolaj Schumacher](https://github.com/nschum) + [#1063](https://github.com/CocoaPods/CocoaPods/issues/1063) + +* User defined build configurations are now added to the resource bundle + targets. + [#1309](https://github.com/CocoaPods/CocoaPods/issues/1309) + + +## 0.23.0 (2013-08-08) + + +## 0.23.0.rc1 (2013-08-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.3...0.23.0.rc1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.22.3...0.23.0.rc1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.8.1...0.9.0) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader/compare/0.1.1...0.1.2) + +###### Enhancements + +* Added `prepare_command` attribute to Specification DSL. The prepare command + will replace the `pre_install` hook. The `post_install` hook has also been + deprecated. + [#1247](https://github.com/CocoaPods/CocoaPods/issues/1247) + + The reason we provided Ruby hooks at first, was because we wanted to offer + the option to make any required configuration possible. By now, however, we + have a pretty good idea of the use-cases and are therefore locking down the + freedom that was once available. In turn, we’re adding attributes that can + replace the most common use-cases. _(See the enhancements directly following + this entry for more info)._ + + The second reason we need to lock this down is because this is the last + remaining obstacle to fully serialize specifications, which we need in order + to move to a ‘spec push’ web-service in the future. + +* Added `resource_bundles` attribute to the Specification DSL. + [#743](https://github.com/CocoaPods/CocoaPods/issues/743) + [#1186](https://github.com/CocoaPods/CocoaPods/issues/1186) + +* Added `vendored_frameworks` attribute to the Specification DSL. + [#809](https://github.com/CocoaPods/CocoaPods/issues/809) + [#1075](https://github.com/CocoaPods/CocoaPods/issues/1075) + +* Added `vendored_libraries` attribute to the Specification DSL. + [#809](https://github.com/CocoaPods/CocoaPods/issues/809) + [#1075](https://github.com/CocoaPods/CocoaPods/issues/1075) + +* Restructured `.cocoapods` folder to contain repos in a subdirectory. + [Ian Ynda-Hummel](https://github.com/ianyh) + [#1150](https://github.com/CocoaPods/CocoaPods/issues/1150) + +* Improved `pod spec create` template. + [#1223](https://github.com/CocoaPods/CocoaPods/issues/1223) + +* Added copy&paste-friendly dependency to `pod search`. + [#1073](https://github.com/CocoaPods/CocoaPods/issues/1073) + +* Improved performance of the installation of Pods with git + sources which specify a tag. + [#1077](https://github.com/CocoaPods/CocoaPods/issues/1077) + +* Core Data `xcdatamodeld` files are now properly referenced from the Pods + project. + [#1155](https://github.com/CocoaPods/CocoaPods/issues/1155) + +* Removed punctuation check from the specification validations. + [#1242](https://github.com/CocoaPods/CocoaPods/issues/1242) + +* Deprecated the `documentation` attribute of the Specification DSL. + [Core#20](https://github.com/CocoaPods/Core/issues/20) + +###### Bug Fixes + +* Fix copy resource script issue related to filenames with spaces. + [Denis Hennessy](https://github.com/dhennessy) + [#1231](https://github.com/CocoaPods/CocoaPods/issues/1231) + + + +## 0.22.3 (2013-07-23) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.2...0.22.3) + +###### Enhancements + +* Add support for .xcdatamodel resource files (in addition to .xcdatamodeld). + [#1201](https://github.com/CocoaPods/CocoaPods/pull/1201) + +###### Bug Fixes + +* Always exlude `USE_HEADERMAP` from the user’s project. + [#1216](https://github.com/CocoaPods/CocoaPods/issues/1216) + +* Use correct template repo when using the `pod lib create` command. + [#1214](https://github.com/CocoaPods/CocoaPods/issues/1214) + +* Fixed issue with `pod push` failing when the podspec is unchanged. It will now + report `[No change] ExamplePod (0.1.0)` and continue to push other podspecs if + they exist. [#1199](https://github.com/CocoaPods/CocoaPods/pull/1199) + +* Set STRIP_INSTALLED_PRODUCT = NO in the generated Pods project. This allows + Xcode to include symbols from CocoaPods in dSYMs during Archive builds. + [#1217](https://github.com/CocoaPods/CocoaPods/pull/1217) + +* Ensure the resource script doesn’t fail due to the resources list file not + existing when trying to delete it. + [#1198](https://github.com/CocoaPods/CocoaPods/pull/1198) + +* Fix handling of spaces in paths when compiling xcdatamodel(d) files. + [#1201](https://github.com/CocoaPods/CocoaPods/pull/1201) + + + +## 0.22.2 (2013-07-11) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.1...0.22.2) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.22.1...0.22.2) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.8.0...0.8.1) + +###### Enhancements + +* The build settings of the Pods project and of its target have been updated to + be in line with the new defaults of the future versions of Xcode. + +###### Bug fixes + +* Specifications defining build setting with the `[*]` syntax are now properly + handled. + [#1171](https://github.com/CocoaPods/CocoaPods/issues/1171) + +* The name of the files references are now properly set fixing a minor + regression introduced by CocoaPods 0.22.1 and matching more closely Xcode + behaviour. + +* The validator now builds the Pods target instead of the first target actually + performing the validation. + +* Build settings defined through the `xcconfig` attribute of a `podspec` are now + stripped of duplicate values when merged in an aggregate target. + [#1189](https://github.com/CocoaPods/CocoaPods/issues/1189) + + +## 0.22.1 (2013-07-03) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.22.0...0.22.1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.22.0...0.22.1) + +###### Bug fixes + +* Fixed a crash related to target dependencies and subspecs. + [#1168](https://github.com/CocoaPods/CocoaPods/issues/1168) + + +## 0.22.0 (2013-07-03) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.21.0...0.22.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.21.0...0.22.0) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.7.1...0.8.0) + +###### Enhancements + +* Added the `pod lib create` subcommand which allows to create a new Pod + adhering to the best practices. The template is still a bit primitive + and we encourage users to provide feedback by submitting patches and issues + to https://github.com/CocoaPods/CocoaPods. + [#850](https://github.com/CocoaPods/CocoaPods/issues/850) + +* Added the `pod lib lint` subcommand which allows to lint the Pod stored + in the working directory (a pod spec in the root is needed). This subcommand + is equivalent to the deprecated `pod spec lint --local`. + [#850](https://github.com/CocoaPods/CocoaPods/issues/850) + +* The dependencies of the targets of the Pods project are now made explicit. + [#1165](https://github.com/CocoaPods/CocoaPods/issues/1165) + +* The size of the cache used for the git repos is now configurable. For more + details see + https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/config.rb#L7-L25 + [#1159](https://github.com/CocoaPods/CocoaPods/issues/1159) + +* The copy resources shell script now aborts if any error occurs. + [#1098](https://github.com/CocoaPods/CocoaPods/issues/1098) + +* The output of shell script build phases no longer includes environment + variables to reduce noise. + [#1122](https://github.com/CocoaPods/CocoaPods/issues/1122) + +* CocoaPods no longer sets the deprecated `ALWAYS_SEARCH_USER_PATHS` build + setting. + +###### Bug fixes + +* Pods whose head state changes now are correctly detected and reinstalled. + [#1160](https://github.com/CocoaPods/CocoaPods/issues/1160) + +* Fixed the library reppresentation of the hooks which caused issues with the + `#copy_resources_script_path` method. + [#1157](https://github.com/CocoaPods/CocoaPods/issues/1157) + +* Frameworks symlinks are not properly preserved by the copy resources script. + Thanks to Thomas Dohmke (ashtom) for the fix. + [#1063](https://github.com/CocoaPods/CocoaPods/issues/1063) + +## 0.21.0 (2013-07-01) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.21.0.rc1...0.21.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.21.0.rc1...0.21.0) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.7.0...0.7.1) + +###### Bug fixes + +* Fixed a linter issue related to the dedicated targets change. + [#1130](https://github.com/CocoaPods/CocoaPods/issues/1130) + +* Fixed xcconfig issues related to Pods including a dot in the name. + [#1152](https://github.com/CocoaPods/CocoaPods/issues/1152) + + +## 0.21.0.rc1 (2013-06-18) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.20.2...0.21.0.rc1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.20.2...0.21.0.rc1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.6.0...0.7.0) + +###### Enhancements + +* Pods are now built in dedicated targets. This enhancement isolates the build + environment of each Pod from other ones eliminating pollution issues. It also + introduces an important architectural improvement which lays the foundation + for the upcoming CocoaPods features. Stay tuned! This feature has been + implemented by [Jeremy Slater](https://github.com/jasl8r). + [#1011](https://github.com/CocoaPods/CocoaPods/issues/1011) + [#983](https://github.com/CocoaPods/CocoaPods/issues/983) + [#841](https://github.com/CocoaPods/CocoaPods/issues/841) + +* Reduced external dependencies and deprecation of Rake::FileList. + [#1080](https://github.com/CocoaPods/CocoaPods/issues/1080) + +###### Bug fixes + +* Fixed crash due to Podfile.lock containing multiple version requirements for + a Pod. [#1076](https://github.com/CocoaPods/CocoaPods/issues/1076) + +* Fixed a build error due to the copy resources script using the same temporary + file for multiple targets. + [#1099](https://github.com/CocoaPods/CocoaPods/issues/1099) + +## 0.20.2 (2013-05-26) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.20.1...0.20.2) + +###### Bug fixes + +* Ensure that, in a sandbox-pod env, RubyGems loads the CocoaPods gem on system + Ruby (1.8.7). + [#939](https://github.com/CocoaPods/CocoaPods/issues/939#issuecomment-18396063) +* Allow sandbox-pod to execute any tool inside the Xcode.app bundle. +* Allow sandbox-pod to execute any tool inside a rbenv prefix. + +## 0.20.1 (2013-05-23) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.20.0...0.20.1) +â€ĸ [CLAide](https://github.com/CocoaPods/CLAide/compare/0.3.0...0.3.2) + +###### Bug fixes + +* Made sandbox-pod executable visible as it wasn't correctly configured in the + gemspec. +* Made sandbox-pod executable actually work when installed as a gem. (In which + case every executable is wrapped in a wrapper bin script and the DATA constant + can no longer be used.) +* Required CLAide 0.3.2 as 0.3.0 didn't include all the files in the gemspec + and 0.3.1 was not correctly processed by RubyGems. + +## 0.20.0 (2013-05-23) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.19.1...0.20.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.19.1...0.20.0) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/CLAide/compare/0.1.0...0.1.1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.5.5...0.6.0) +â€ĸ [CLAide](https://github.com/CocoaPods/CLAide/compare/0.2.0...0.3.0) + +###### Enhancements + +* Introduces an experimental sandbox feature. + [#939](https://github.com/CocoaPods/CocoaPods/issues/939) + + Let’s face it, even though we have a great community that spends an amazing + amount of time on curating the specifications, the internet can be a hostile + place and the community is growing too large to take a naive approach any + longer. + + As such, we have started leveraging OS X’s sandbox facilities to disallow + unsanctioned operations. This is still very experimental and therefore has to + be used explicitely, for now, but that does **not** mean we don’t want you to + start using it and **report issues**. + + To use the sandbox, simply use the `sandbox-pod` command instead. E.g.: + + $ sandbox-pod install + + In case of issues, be sure to check `/var/log/system.log` for ‘deny’ messages. + For instance, here’s an example where the sandbox denies read access to `/`: + + May 16 00:23:35 Khaos kernel[0]: Sandbox: ruby(98430) deny file-read-data / + + **NOTE**: _The above example is actually one that we know of. We’re not sure + yet which process causes this, but there shouldn’t be a need for any process + to read data from the root path anyways._ + + **NOTE 2**: _At the moment the sandbox is not compatible with the `:path` option + when referencing Pods that are not stored within the directory of the Podfile._ + +* The naked `pod` command now defaults to `pod install`. + [#958](https://github.com/CocoaPods/CocoaPods/issues/958) + +* CocoaPods will look for the Podfile in the ancestors paths if one is + not available in the working directory. + [#940](https://github.com/CocoaPods/CocoaPods/issues/940) + +* Documentation generation has been removed from CocoaPods as it graduated + to CocoaDocs. This decision was taken because CocoaDocs is a much better + solution which doesn't clutter Xcode's docsets while still allowing + access to the docsets with Xcode and with Dash. Removing this feature + keeps the installer leaner and easier to develop and paves the way for the + upcoming sandbox. Private pods can use pre install hook to generate the + documentation. If there will be enough demand this feature might be + reintegrated as plugin (see + [#1037](https://github.com/CocoaPods/CocoaPods/issues/1037)). + +* Improved performance of the copy resources script and thus build time of + the integrated targets. Contribution by [@onato](https://github.com/onato) + [#1050](https://github.com/CocoaPods/CocoaPods/issues/1050). + +* The changelog for the current version is printed after CocoaPods is + installed/updated. + [#853](https://github.com/CocoaPods/CocoaPods/issues/853). + + +###### Bug fixes + +* Inheriting `inhibit_warnings` per pod is now working + [#1032](https://github.com/CocoaPods/CocoaPods/issues/1032) +* Fix copy resources script for iOS < 6 and OS X < 10.8 by removing the + `--reference-external-strings-file` + flag. [#1030](https://github.com/CocoaPods/CocoaPods/pull/1030) +* Fixed issues with the `:head` option of the Podfile. + [#1046](https://github.com/CocoaPods/CocoaPods/issues/1046) + [#1039](https://github.com/CocoaPods/CocoaPods/issues/1039) + +## 0.19.1 (2013-04-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.19.0...0.19.1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.19.0...0.19.1) + +###### Bug fixes + +* Project-level preprocessor macros are not overwritten anymore. + [#903](https://github.com/CocoaPods/CocoaPods/issues/903) +* A Unique hash instances for the build settings of the Pods target is now + created resolving interferences in the hooks. + [#1014](https://github.com/CocoaPods/CocoaPods/issues/1014) + +## 0.19.0 (2013-04-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.18.1...0.19.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.18.1...0.19.0) + +###### Enhancements + +* Compile time introspection. Macro definitions which allow to inspect the + installed Pods and their version have been introduced in the build + environment of the Pod libraries + ([example](https://gist.github.com/fabiopelosin/5348551)). +* CocoaPods now defines the `COCOAPODS=1` macro in the Pod and the Client + targets. This is useful for libraries which conditionally expose interfaces. + [#903](https://github.com/CocoaPods/CocoaPods/issues/903) +* Added support for the `private_header_files` attribute of the Specification + DSL. + [#998](https://github.com/CocoaPods/CocoaPods/issues/998) +* CocoaPods now defines the deployment target of the Pods project computed as + the minimum deployment target of the Pods libraries. + [#556](https://github.com/CocoaPods/CocoaPods/issues/556) +* Added `pod podfile-info` command. Shows list of used Pods and their info + in a project or supplied Podfile. + Options: `--all` - with dependencies. `--md` - in Markdown. + [#855](https://github.com/CocoaPods/CocoaPods/issues/855) +* Added `pod help` command. You can still use the old format + with --help flag. + [#957](https://github.com/CocoaPods/CocoaPods/pull/957) +* Restored support for Podfiles named `CocoaPods.podfile`. Moreover, the + experimental YAML format of the Podfile now is associated with files named + `CocoaPods.podfile.yaml`. + [#1004](https://github.com/CocoaPods/CocoaPods/pull/1004) + +###### Deprecations + +* The `:local` flag in Podfile has been renamed to `:path` and the old syntax + has been deprecated. + [#971](https://github.com/CocoaPods/CocoaPods/issues/971) + +###### Bug fixes + +* Fixed issue related to `pod outdated` and external sources. + [#954](https://github.com/CocoaPods/CocoaPods/issues/954) +* Fixed issue with .svn folders in copy resources script. + [#972](https://github.com/CocoaPods/CocoaPods/issues/972) + +## 0.18.1 (2013-04-10) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.18.0...0.18.1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.18.0...0.18.) + +###### Bug fixes + +* Fixed a bug introduced in 0.18 which cause compilation issue due to the + quoting of the inherited value in the xcconfigs. + [#956](https://github.com/CocoaPods/CocoaPods/issues/956) +* Robustness against user targets including build files with missing file + references. + [#938](https://github.com/CocoaPods/CocoaPods/issues/938) +* Partially fixed slow performance from the command line + [#919](https://github.com/CocoaPods/CocoaPods/issues/919) + + +## 0.18.0 (2013-04-08) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.2...0.18.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.2...0.18.0) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.5.2...0.5.5) + +###### Enhancements + +* Added the ability to inhibit warnings per pod. + Just pass `:inhibit_warnings => true` inline. + This feature has been implemented by Marin Usalj (@mneorr). + [#10](https://github.com/CocoaPods/Core/pull/10) + [#934](https://github.com/CocoaPods/CocoaPods/pull/934) +* Inhibiting warnings will also suppress the warnings of the static analyzer. +* A new build phase has been added to check that your + installation is in sync with the `Podfile.lock` and fail the build otherwise. + The new build phase will not be added automatically to targets already + integrated with CocoaPods, for integrating targets manually see [this + comment](https://github.com/CocoaPods/CocoaPods/pull/946#issuecomment-16042419). + This feature has been implemented by Ullrich Schäfer (@stigi). + [#946](https://github.com/CocoaPods/CocoaPods/pull/946) +* The `pod search` commands now accepts the `--ios` and the `--osx` arguments + to filter the results by platform. + [#625](https://github.com/CocoaPods/CocoaPods/issues/625) +* The developer frameworks are automatically added if `SenTestingKit` is + detected. There is no need to specify them in specifications anymore. + [#771](https://github.com/CocoaPods/CocoaPods/issues/771) +* The `--no-update` argument of the `install`, `update`, `outdated` subcommands + has been renamed to `--no-repo-update`. + [#913](https://github.com/CocoaPods/CocoaPods/issues/913) + +###### Bug fixes + +* Improved handling for Xcode projects containing non ASCII characters. + Special thanks to CÊdric Luthi (@0xced), Vincent Isambart (@vincentisambart), + and Manfred Stienstra (@Manfred) for helping to develop the workaround. + [#926](https://github.com/CocoaPods/CocoaPods/issues/926) +* Corrected improper configuration of the PODS_ROOT xcconfig variable in + non-integrating installations. + [#918](https://github.com/CocoaPods/CocoaPods/issues/918) +* Improved support for pre-release versions using dashes. + [#935](https://github.com/CocoaPods/CocoaPods/issues/935) +* Documentation sets are now namespaced by pod solving improper attribution. + [#659](https://github.com/CocoaPods/CocoaPods/issues/659) + + +## 0.17.2 (2013-04-03) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.1...0.17.2) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.1...0.17.2) + +###### Bug fixes + +* Fix crash related to the specification of the workspace as a relative path. + [#920](https://github.com/CocoaPods/CocoaPods/issues/920) +* Fix an issue related to the `podspec` dsl directive of the Podfile for + specifications with internal dependencies. + [#928](https://github.com/CocoaPods/CocoaPods/issues/928) +* Fix crash related to search from the command line. + [#929](https://github.com/CocoaPods/CocoaPods/issues/929) + +###### Ancillary enhancements + +* Enabled the FileList deprecation warning in the Linter. +* CocoaPods will raise if versions requirements are specified for dependencies + with external sources. +* The exclude patterns now handle folders automatically. + + +## 0.17.1 (2013-03-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0...0.17.1) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0...0.17.1) + +###### Bug fixes + +* Always create the CACHE_ROOT directory when performing a search. + [#917](https://github.com/CocoaPods/CocoaPods/issues/917) + +## 0.17.0 (2013-03-29) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc7...0.17.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc7...0.17.0) + +#### GM + +###### Bug fixes + +* Don’t break when specifying doc options, but not appledoc ones. + [#906](https://github.com/CocoaPods/CocoaPods/issues/906) +* Sort resolved specifications. + [#907](https://github.com/CocoaPods/CocoaPods/issues/907) +* Subspecs do not need to include HEAD information. + [#905](https://github.com/CocoaPods/CocoaPods/issues/905) + +###### Ancillary enhancements + +* Allow the analyzer to do its work without updating sources. + [motion-cocoapods#50](https://github.com/HipByte/motion-cocoapods/pull/50) + +#### rc7 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc6...0.17.0.rc7) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc6...0.17.0.rc7) + +###### Bug fixes + +- Fixed an issue which lead to the missing declaration of the plural directives + of the Specification DSL. + [#816](https://github.com/CocoaPods/CocoaPods/issues/816) +- The resolver now respects the order of specification of the target + definitions. +- Restore usage of cache file to store a cache for expensive stats. +- Moved declaration of `Pod::FileList` to CocoaPods-core. + +###### Ancillary enhancements + +- Fine tuned the Specification linter and the health reporter of repositories. +- Search results are sorted. + +#### rc6 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc5...0.17.0.rc6) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc5...0.17.0.rc6) + +###### Bug fixes + +- CocoaPods updates the repositories by default. + [#872](https://github.com/CocoaPods/CocoaPods/issues/872) +- Fixed a crash which was present when the Podfile specifies a workspace. + [#871](https://github.com/CocoaPods/CocoaPods/issues/871) +- Fix for a bug which lead to a broken installation in paths containing + brackets and other glob metacharacters. + [#862](https://github.com/CocoaPods/CocoaPods/issues/862) +- Fix for a bug related to the case of the paths which lead to clean all files + in the directories of the Pods. + + +###### Ancillary enhancements + +- CocoaPods now maintains a search index which is updated incrementally instead + of analyzing all the specs every time. The search index can be updated + manually with the `pod ipc update-search-index` command. +- Enhancements to the `pod repo lint` command. +- CocoaPods will not create anymore the pre commit hook in the master repo + during setup. If already created it is possible remove it deleting the + `~/.cocoapods/master/.git/hooks/pre-commit` path. +- Improved support for linting and validating specs repo. + +#### rc5 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc4...0.17.0.rc5) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc4...0.17.0.rc5) + +###### Bug fixes + +- The `--no-clean` argument is not ignored anymore by the installer. +- Proper handling of file patterns ending with a slash. +- More user errors are raised as an informative. + +#### rc4 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc3...0.17.0.rc4) + +###### Bug fixes + +- Restored compatibility with `Podfile::TargetDefinition#copy_resources_script_name` + in the Podfile hooks. +- Updated copy resources script so that it will use base internationalization + [#846](https://github.com/CocoaPods/CocoaPods/issues/846) +- Robustness against an empty configuration file. +- Fixed a crash with `pod push` + [#848](https://github.com/CocoaPods/CocoaPods/issues/848) +- Fixed an issue which lead to the creation of a Pods project which would + crash Xcode. + [#854](https://github.com/CocoaPods/CocoaPods/issues/854) +- Fixed a crash related to a `PBXVariantGroup` present in the frameworks build + phase of client targets. + [#859](https://github.com/CocoaPods/CocoaPods/issues/859) + + +###### Ancillary enhancements + +- The `podspec` option of the `pod` directive of the Podfile DSL now accepts + folders. + +#### rc3 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc2...0.17.0.rc3 +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.5.0...0.5.1)) + +###### Bug fixes + +- CocoaPods will not crash anymore if the license file indicated on the spec + doesn't exits. +- Pre install hooks are called before the Pods are cleaned. +- Fixed and issue which prevent the inclusion of OTHER_CFLAGS and + OTHER_CPLUSPLUSFLAGS in the release builds of the Pods project. +- Fixed `pod lint --local` +- Fixed the `--allow-warnings` of `pod push` + [#835](https://github.com/CocoaPods/CocoaPods/issues/835) +- Added `copy_resources_script_name` to the library representation used in the + hooks. + [#837](https://github.com/CocoaPods/CocoaPods/issues/837) + +###### Ancillary enhancements + +- General improvements to `pod ipc`. +- Added `pod ipc repl` subcommand. + +#### rc2 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.17.0.rc1...0.17.0.rc2) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core/compare/0.17.0.rc1...0.17.0.rc2) + +###### Bug fixes + +- Restored output coloring. +- Fixed a crash related to subspecs + [#819](https://github.com/CocoaPods/CocoaPods/issues/819) +- Git repos were not cached for dependencies with external sources. + [#820](https://github.com/CocoaPods/CocoaPods/issues/820) +- Restored support for directories for the preserve_patterns specification + attribute. + [#823](https://github.com/CocoaPods/CocoaPods/issues/823) + +#### rc1 + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.4...0.17.0.rc1) +â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.3...0.5.0) +â€ĸ [cocoapods-core](https://github.com/CocoaPods/Core) +â€ĸ [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader) + +###### __Notice__ + +At some point in future the master repo will be switched to the YAML format of +specifications. This means that specifications with hooks (or any other kind of +dynamic logic) will not be accepted. Please let us know if there is need for +other DSL attributes or any other kind of support. + +Currently the following specifications fail to load as they depended on the +CocoaPods internals and need to be updated: + +- LibComponentLogging-pods/0.0.1/LibComponentLogging-pods.podspec +- RestKit/0.9.3/RestKit.podspec +- Three20/1.0.11/Three20.podspec +- ARAnalytics/1.1/ARAnalytics.podspec + +Other specifications, might present compatibility issues for the reasons +presented below. + +###### __Breaking__ + +- Subspecs do **not** inherit the files patterns from the parent spec anymore. + This feature made the implementation more complicated and was not easy to + explain to podspecs maintainers. Compatibility can be easily fixed by adding + a 'Core' subspec. +- Support for inline podspecs has been removed. +- The support for Rake::FileList is being deprecated, in favor of a more + consistent DSL. Rake::FileList also presented issues because it would access + the file system as soon as it was converted to an array. +- The hooks architecture has been re-factored and might present + incompatibilities (please open an issue if appropriate). +- The `requires_arc` attribute default value is transitioning from `false` to + `true`. In the meanwhile a value is needed to pass the lint. +- Deprecated `copy_header_mapping` hook. +- Deprecated `exclude_header_search_paths` attribute. +- External sources are not supported in the dependencies of specifications + anymore. Actually they never have been supported, they just happened to work. + +###### DSL + +- Podfile: + - It is not needed to specify the platform anymore (unless not integrating) + as CocoaPods now can infer the platform from the integrated targets. +- Specification: + - `preferred_dependency` has been renamed to `default_subspec`. + - Added `exclude_files` attribute. + - Added `screenshots` attribute. + - Added default values for attributes like `source_files`. + +###### Enhancements + +- Released preview [documentation](http://docs.cocoapods.org). +- CocoaPods now has support for working in teams and not committing the Pods + folder, as it will keep track of the status of the Pods folder. + [#552](https://github.com/CocoaPods/CocoaPods/issues/552) +- Simplified installation: no specific version of ruby gems is required anymore. +- The workspace is written only if needed greatly reducing the occasions in + which Xcode asks to revert. +- The Lockfile is sorted reducing the SCM noise. + [#591](https://github.com/CocoaPods/CocoaPods/issues/591) +- Added Podfile, Frameworks, and Resources to the Pods project. + [#647](https://github.com/CocoaPods/CocoaPods/issues/647) + [#588](https://github.com/CocoaPods/CocoaPods/issues/588) +- Adds new subcommand `pod spec cat NAME` to print a spec file to standard output. +- Specification hooks are only called when the specification is installed. +- The `--no-clean` option of the `pod spec lint` command now displays the Pods + project for inspection. +- It is now possible to specify default values for the configuration in + `~/.cocoapods/config.yaml` ([default values](https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/config.rb#L17)). +- CocoaPods now checks the checksums of the installed specifications and + reinstalls them if needed. +- Support for YAML formats of the Podfile and the Specification. +- Added new command `pod ipc` to provide support for inter process + communication through YAML formats. +- CocoaPods now detects if the folder of a Pod is empty and reinstalls it. + [#534](https://github.com/CocoaPods/CocoaPods/issues/534) +- Install hooks and the `prefix_header_contents` attribute are supported in subspecs. + [#617](https://github.com/CocoaPods/CocoaPods/issues/617) +- Dashes are now supported in the versions of the Pods. + [#293](https://github.com/CocoaPods/CocoaPods/issues/293) + +###### Bug fixes + +- CocoaPods is not confused anymore by target definitions with different activated subspec. + [#535](https://github.com/CocoaPods/CocoaPods/issues/535) +- CocoaPods is not confused anymore by to dependencies from external sources. + [#548](https://github.com/CocoaPods/CocoaPods/issues/548) +- The git cache will always update against the remote if a tag is requested, + resolving issues where library maintainers where updating the tag after a + lint and would be confused by CocoaPods using the cached commit for the tag. + [#407](https://github.com/CocoaPods/CocoaPods/issues/407) + [#596](https://github.com/CocoaPods/CocoaPods/issues/596) + +###### Codebase + +- Major clean up and refactor of the whole code base. +- Extracted the core classes into + [cocoapods-core](https://github.com/CocoaPods/Core) gem. +- Extracted downloader into + [cocoapods-downloader](https://github.com/CocoaPods/cocoapods-downloader). +- Extracted command-line command & option handling into + [CLAide](https://github.com/CocoaPods/CLAide). + +## 0.16.4 (2013-02-25) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.3...0.16.4) + +###### Enhancements + +- Add explicit flattening option to `Downloader:Http`: `:flatten => true`. + [#814](https://github.com/CocoaPods/CocoaPods/pull/814) + [#812](https://github.com/CocoaPods/CocoaPods/issues/812) + [#1314](https://github.com/CocoaPods/Specs/pull/1314) + +###### Bug fixes + +- Explicitely require `date` in the gemspec for Ruby 2.0.0. + [34da3f7](https://github.com/CocoaPods/CocoaPods/commit/34da3f792b2a36fafacd4122e29025c9cf2ff38d) + +## 0.16.3 (2013-02-20) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.2...0.16.3) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.3...0.5.0) + +###### Bug fixes + +- Only flatten tarballs, **not** zipballs, from HTTP sources. A zipball can + contain single directories in the root that should be preserved, for instance + a framework bundle. This reverts part of the change in 0.16.1. + **NOTE** This will break some podspecs that were changed after 0.16.1. + [#783](https://github.com/CocoaPods/CocoaPods/pull/783) + [#727](https://github.com/CocoaPods/CocoaPods/issues/727) +- Never consider aggregate targets in the user’s project for integration. + [#729](https://github.com/CocoaPods/CocoaPods/issues/729) + [#784](https://github.com/CocoaPods/CocoaPods/issues/784) +- Support comments on all build phases, groups and targets in Xcode projects. + [#51](https://github.com/CocoaPods/Xcodeproj/pull/51) +- Ensure default Xcode project values are copied before being used. + [b43087c](https://github.com/CocoaPods/Xcodeproj/commit/b43087cb342d8d44b491e702faddf54a222b23c3) +- Block assertions in Release builds. + [#53](https://github.com/CocoaPods/Xcodeproj/pull/53) + [#803](https://github.com/CocoaPods/CocoaPods/pull/803) + [#802](https://github.com/CocoaPods/CocoaPods/issues/802) + + +###### Enhancements + +- Compile Core Data model files. + [#795](https://github.com/CocoaPods/CocoaPods/pull/795) +- Add `Xcodeproj::Differ`, which shows differences between Xcode projects. + [308941e](https://github.com/CocoaPods/Xcodeproj/commit/308941eeaa3bca817742c774fd584cc5ab1c8f84) + + +## 0.16.2 (2013-02-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.1...0.16.2) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.1...0.4.3) + +###### Bug fixes + +- Quote storyboard and xib paths in ‘copy resource’ script. + [#740](https://github.com/CocoaPods/CocoaPods/pull/740) +- Fix use of `podspec` directive in Podfile with no options specified. + [#768](https://github.com/CocoaPods/CocoaPods/pull/768) +- Generate Mac OS X Pods target with the specified deployment target. + [#757](https://github.com/CocoaPods/CocoaPods/issues/757) +- Disable libSystem objects for ARC libs that target older platforms. + This applies when the deployment target is set to < iOS 6.0 or OS X 10.8, + or not specified at all. + [#352](https://github.com/CocoaPods/Specs/issues/352) + [#1161](https://github.com/CocoaPods/Specs/pull/1161) +- Mark header source files as ‘Project’ not ‘Public’. + [#747](https://github.com/CocoaPods/CocoaPods/issues/747) +- Add `PBXGroup` as acceptable `PBXFileReference` value. + [#49](https://github.com/CocoaPods/Xcodeproj/pull/49) +- Make `xcodeproj show` without further arguments actually work. + [#45](https://github.com/CocoaPods/Xcodeproj/issues/45) + +###### Enhancements + +- Added support for pre-download over Mercurial. + [#750](https://github.com/CocoaPods/CocoaPods/pull/750) + +## 0.16.1 (2013-01-13) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0...0.16.1) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.0...0.4.1) + +###### Bug fixes + +- After unpacking source from a HTTP location, move the source into the parent + dir if the archive contained only one child. This is done to make it + consistent with how source from other types of locations are described in a + podspec. + **NOTE** This might break some podspecs that assumed the incorrect layout. + [#727](https://github.com/CocoaPods/CocoaPods/issues/727) + [#728](https://github.com/CocoaPods/CocoaPods/pull/728) +- Remove duplicate option in `pod update` command. + [#725](https://github.com/CocoaPods/CocoaPods/issues/725) +- Memory fixes in Xcodeproj. + [#43](https://github.com/CocoaPods/Xcodeproj/pull/43) + +###### Xcodeproj Enhancements + +- Sort contents of xcconfig files by setting name. + [#591](https://github.com/CocoaPods/CocoaPods/issues/591) +- Add helpers to get platform name, deployment target, and frameworks build phases +- Take SDKROOT into account when adding frameworks. + +## 0.16.0 (2012-11-22) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc5...master) + +###### Enhancements + +- Use Rake 0.9.4 + [#657](https://github.com/CocoaPods/CocoaPods/issues/657) + +## 0.16.0.rc5 (2012-11-14) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc4...0.16.0.rc5) + +###### Deprecated + +- The usage of specifications defined in a Podfile is deprecated. Use the + `:podspec` option with a file path instead. Complete removal will most + probably happen in 0.17.0. + [#549](https://github.com/CocoaPods/CocoaPods/issues/549) + [#616](https://github.com/CocoaPods/CocoaPods/issues/616) + [#525](https://github.com/CocoaPods/CocoaPods/issues/525) + +###### Bug fixes + +- Always consider inline podspecs as needing installation. +- Fix detection when the lib has already been integrated with the user’s target. + [#643](https://github.com/CocoaPods/CocoaPods/issues/643) + [#614](https://github.com/CocoaPods/CocoaPods/issues/614) + [#613](https://github.com/CocoaPods/CocoaPods/issues/613) + +## 0.16.0.rc4 (2012-11-14) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc3...0.16.0.rc4) + +###### Bug fixes + +- Fix for Rake 0.9.3 + [#657](https://github.com/CocoaPods/CocoaPods/issues/657) + +## 0.16.0.rc3 (2012-11-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc2...0.16.0.rc3) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.4.0.rc1...0.4.0.rc6) + +###### Enhancements + +- Added support for copying frameworks to the app bundle. + [#597](https://github.com/CocoaPods/CocoaPods/pull/597) + +###### Bug fixes + +- Ignore PBXReferenceProxy while integrating into user project. + [#626](https://github.com/CocoaPods/CocoaPods/issues/626) +- Added support for PBXAggregateTarget and PBXLegacyTarget. + [#615](https://github.com/CocoaPods/CocoaPods/issues/615) +- Added support for PBXReferenceProxy. + [#612](https://github.com/CocoaPods/CocoaPods/issues/612) + +## 0.16.0.rc2 (2012-10-21) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.16.0.rc1...0.16.0.rc2) + +###### Bug fixes + +- Fix for uninitialized constant Xcodeproj::Constants error. + +## 0.16.0.rc1 (2012-10-21) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.15.2...0.16.0.rc1) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.5...0.4.0.rc1) + +###### Enhancements + +- Xcodeproj partial rewrite. + [#565](https://github.com/CocoaPods/CocoaPods/issues/565) + [#561](https://github.com/CocoaPods/CocoaPods/pull/561) + - Performance improvements in the `Generating support files` phase. + - Better support for editing existing projects and sorting groups. + +## 0.15.2 (2012-10-19) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.15.1...0.15.2) + +###### Enhancements + +- Added support for `.hh` headers. + [#576](https://github.com/CocoaPods/CocoaPods/pull/576) + +###### Bug fixes + +- Restored support for running CocoaPods without a terminal. + [#575](https://github.com/CocoaPods/CocoaPods/issues/575) + [#577](https://github.com/CocoaPods/CocoaPods/issues/577) +- The git cache now always uses a barebones repo preventing a number of related issues. + [#581](https://github.com/CocoaPods/CocoaPods/issues/581) + [#569](https://github.com/CocoaPods/CocoaPods/issues/569) +- Improved fix for the issue that lead to empty directories for Pods. + [#572](https://github.com/CocoaPods/CocoaPods/issues/572) + [#602](https://github.com/CocoaPods/CocoaPods/issues/602) +- Xcodeproj robustness against invalid values, such as malformed UTF8. + [#592](https://github.com/CocoaPods/CocoaPods/issues/592) + +## 0.15.1 (2012-10-04) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.15.0...0.15.1) + +###### Enhancements + +- Show error if syntax error in Podfile or Podfile.lock. + +###### Bug fixes + +- Fixed an issue that lead to empty directories for Pods. + [#519](https://github.com/CocoaPods/CocoaPods/issues/519) + [#568](https://github.com/CocoaPods/CocoaPods/issues/568) +- Fixed a crash related to the RubyGems version informative. + [#570](https://github.com/CocoaPods/CocoaPods/issues/570) +- Fixed a crash for `pod outdated`. + [#567](https://github.com/CocoaPods/CocoaPods/issues/567) +- Fixed an issue that lead to excessively slow sets computation. + +## 0.15.0 (2012-10-02) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.14.0...0.15.0) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.3...0.3.4) + +###### Enhancements + +- Pod `install` will update the specs repo only if needed. + [#533](https://github.com/CocoaPods/CocoaPods/issues/533) +- CocoaPods now searches for the highest version of a Pod on all the repos. + [#85](https://github.com/CocoaPods/CocoaPods/issues/85) +- Added a pre install hook to the Podfile and to root specifications. + [#486](https://github.com/CocoaPods/CocoaPods/issues/486) +- Support for `header_mappings_dir` attribute in subspecs. +- Added support for linting a Podspec using the files from its folder `pod spec + lint --local` +- Refactored UI. +- Added support for Podfiles named `CocoaPods.podfile` which allows to + associate an editor application in Mac OS X. + [#528](https://github.com/CocoaPods/CocoaPods/issues/528) +- Added config option to disable the new version available message. + [#448](https://github.com/CocoaPods/CocoaPods/issues/448) +- Added support for extracting `.tar.bz2` files + [#522](https://github.com/CocoaPods/CocoaPods/issues/522) +- Improved feedback for errors of repo subcommands. + [#505](https://github.com/CocoaPods/CocoaPods/issues/505) + + +###### Bug fixes + +- Subspecs namespacing has been restored. + [#541](https://github.com/CocoaPods/CocoaPods/issues/541) +- Improvements to the git cache that should be more robust. + [#517](https://github.com/CocoaPods/CocoaPods/issues/517) + - In certain conditions pod setup would execute twice. +- The git cache now is updated if a branch is not found + [#514](https://github.com/CocoaPods/CocoaPods/issues/514) +- Forcing UTF-8 encoding on licenses generation in Ruby 1.9. + [#530](https://github.com/CocoaPods/CocoaPods/issues/530) +- Added support for `.hpp` headers. + [#244](https://github.com/CocoaPods/CocoaPods/issues/244) + +## 0.14.0 (2012-09-10) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.14.0.rc2...0.14.0) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.2...0.3.3) + +###### Bug fixes + +- In certain conditions the spec of an external would have been overridden + by the spec in the root of a Pod. + [#489](https://github.com/CocoaPods/CocoaPods/issues/489) +- CocoaPods now uses a recent version of Octokit. + [#490](https://github.com/CocoaPods/CocoaPods/issues/490) +- Fixed a bug that caused Pods with preferred dependencies to be always + installed. + [Specs#464](https://github.com/CocoaPods/CocoaPods/issues/464) +- Fixed Xcode 4.4+ artwork warning. + [Specs#508](https://github.com/CocoaPods/CocoaPods/issues/508) + +## 0.14.0.rc2 (2012-08-30) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.14.0.rc1...0.14.0.rc2) + +###### Bug fixes + +- Fix incorrect name for Pods from external sources with preferred subspecs. + [#485](https://github.com/CocoaPods/CocoaPods/issues/485) +- Prevent duplication of Pod with a local source and mutliple activated specs. + [#485](https://github.com/CocoaPods/CocoaPods/issues/485) +- Fixed the `uninitialized constant Pod::Lockfile::Digest` error. + [#484](https://github.com/CocoaPods/CocoaPods/issues/484) + +## 0.14.0.rc1 (2012-08-28) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.13.0...0.14.0.rc1) â€ĸ [Xcodeproj](https://github.com/CocoaPods/Xcodeproj/compare/0.3.1...0.3.2) + +###### Enhancements + +- Improve installation process by preserving the installed versions of Pods + across installations and machines. A Pod is reinstalled if: + - the version required in the Podfile changes and becomes incompatible with + the installed one. + [#191](https://github.com/CocoaPods/CocoaPods/issues/191) + - the external source changes. + - the head status changes (from disabled to enabled or vice-versa). +- Introduce `pod update` command that installs the dependencies of the Podfile + **ignoring** the lockfile `Podfile.lock`. + [#131](https://github.com/CocoaPods/CocoaPods/issues/131) +- Introduce `pod outdated` command that shows the pods with known updates. +- Add `:local` option for dependencies which will use the source files directly + from a local directory. This is usually used for libraries that are being + developed in parallel to the end product (application/library). + [#458](https://github.com/CocoaPods/CocoaPods/issues/458), + [#415](https://github.com/CocoaPods/CocoaPods/issues/415), + [#156](https://github.com/CocoaPods/CocoaPods/issues/156). +- Folders of Pods which are no longer required are removed during installation. + [#298](https://github.com/CocoaPods/CocoaPods/issues/298) +- Add meaningful error messages + - ia podspec can’t be found in the root of an external source. + [#385](https://github.com/CocoaPods/CocoaPods/issues/385), + [#338](https://github.com/CocoaPods/CocoaPods/issues/338), + [#337](https://github.com/CocoaPods/CocoaPods/issues/337). + - a subspec name is misspelled. + [#327](https://github.com/CocoaPods/CocoaPods/issues/327) + - an unrecognized command and/or argument is provided. +- The subversion downloader now does an export instead of a checkout, which + makes it play nicer with SCMs that store metadata in each directory. + [#245](https://github.com/CocoaPods/CocoaPods/issues/245) +- Now the Podfile is added to the Pods project for convenient editing. + +###### Bug fixes + +- The git cache now fetches the tags from the remote if it can’t find the + reference. +- Xcodeproj now builds on 10.6.8 and Travis CI without symlinking headers. +- Only try to install, add source files to the project, and clean a Pod once. + [#376](https://github.com/CocoaPods/CocoaPods/issues/376) + +###### Notes + +- External Pods might be reinstalled due to the migration to the new + `Podfile.lock`. +- The SCM reference of head Pods is not preserved across machines. +- Pods whose inline specification changed are not detected as modified. As a + workaround, remove their folder stored in `Pods`. +- Pods whose specification changed are not detected as modified. As a + workaround, remove their folder stored in `Pods`. + + +## 0.13.0 (2012-08-22) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.12.0...0.13.0) + +###### Enhancements + +- Add Podfile `podspec` which allows to use the dependencies of a podspec file. + [#162](https://github.com/CocoaPods/CocoaPods/issues/162) +- Check if any of the build settings defined in the xcconfig files is + overridden. [#92](https://github.com/CocoaPods/CocoaPods/issues/92) +- The Linter now checks that there are no compiler flags that disable warnings. + +###### Bug fixes + +- The final project isn’t affected anymore by the `inhibit_all_warnings!` + option. +- Support for redirects while using podspec from an url. + [#462](https://github.com/CocoaPods/CocoaPods/issues/462) + + +## 0.12.0 (2012-08-21) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.11.1...0.12.0) + +###### Enhancements + +- The documentation is generated using the public headers if they are + specified. +- In case of a download failure the installation is aborted and the error + message is shown. +- Git submodules are initialized only if requested. +- Don’t impose a certain structure of the user’s project by raising if no + ‘Frameworks’ group exists. + [#431](https://github.com/CocoaPods/CocoaPods/pull/431) +- Support for GitHub Gists in the linter. +- Allow specifying ARC settings in subspecs. +- Add Podfile `inhibit_all_warnings!` which will inhibit all warnings from the + Pods library. [#209](https://github.com/CocoaPods/CocoaPods/issues/209) +- Make the Pods Xcode project prettier by namespacing subspecs in nested + groups. [#466](https://github.com/CocoaPods/CocoaPods/pull/466) + + +## 0.11.1 (2012-08-09) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.11.0...0.11.1) + +###### Bug fixes + +- Fixed a crash related to subspecs without header files. [#449] +- Git submodules are loaded after the appropriate referenced is checked out and + will be not loaded anymore in the cache. [#451] +- Fixed SVN support for the head version. [#432] + + +## 0.11.0 (2012-08-08) + +[CocoaPods](https://github.com/CocoaPods/CocoaPods/compare/0.10.0...0.11.0) + +###### Enhancements + +- Added support for public headers. [#440] +- Added `pod repo lint`. [#423] +- Improved support for `:head` option and SVN repositories. +- When integrating Pods with a project without "Frameworks" group in root of + the project, raise an informative message. + [#431](https://github.com/CocoaPods/CocoaPods/pull/431) +- Dropped support for legacy `config.ios?` and `config.osx?` + +###### Bug fixes + +- Version message now correctly terminates with a 0 exit status. +- Resolved an issue that lead to git error messages in the error report. + + +## 0.10.0 (2012-07-29) + +[CocoaPods](http://git.io/4i75YA) + +###### Enhancements + +- Added a `--local-only` option to `pod push` so that developers can push + locally and test before pushing to a remote. [#405](http://git.io/0ILJEw) +- Added line number information for errors generated in the Podfile. + [#408](http://git.io/fWQvMg) +- Pods stored in git repositories now initialize submodules. + [#406](http://git.io/L9ssSw) + +###### Bug fixes + +- Removed note about the post install hook form the linter. +- Improved xcodebuild error detection in the linter. +- Ensure the git cache exists, before updating it, when trying to install the + ‘bleeding edge’ of a pod. [#426](http://git.io/d4eqRA) +- Clean downloaded external pods **after** resolving and activating (sub)specs. + [#414](http://git.io/i77q_w) +- Support `tar.gz` as filename in a HTTP source. [#428](http://git.io/qhwKkA) + + +## 0.9.2 (2012-07-16) + +[CocoaPods](http://git.io/AVlRKg) â€ĸ [Xcodeproj](http://git.io/xHbc0w) + +###### Bug fixes + +- When generating the PodsDummy class, make that class unique to each target. [#402](http://git.io/NntYiQ) +- Raise an informative error message when the platform in the `Podfile` is omitted or incorrect. [#403](http://git.io/k5EcUQ) + + +## 0.9.1 (2012-07-14) + +[CocoaPods](http://git.io/_kqAbw) + +###### Bug fixes + +- CocoaPods 0.9.x needs Xcodeproj 0.3.0. + + +## 0.9.0 (2012-07-14) + +[CocoaPods](http://git.io/kucJQw) â€ĸ [Xcodeproj](http://git.io/5eLL8g) + +###### Enhancements + +- Force downloading the ‘bleeding edge’ version of a pod with the `:head` flag. [#392](http://git.io/t_NVRQ) +- Support for weak frameworks. [#263](http://git.io/XZDuog) +- Use double quotes when shelling out. This makes a url like `$HOME/local/lib` work. [#396](http://git.io/DnBzhA) + +###### Bug fixes + +- Relaxed linter to accepts pod that only specify paths to preserve (like TuneupJS). +- Gender neutralization of podfile documentation. [#384](http://git.io/MAsHXg) +- Exit early when using an old RubyGems version (< 1.4.0). These versions contain subtle bugs + related to prerelease version comparisons. Unfortunately, OS X >= 10.7 ships with 1.3.6. [#398](http://git.io/Lr7DoA) + + +## 0.8.0 (2012-07-09) + +[CocoaPods](http://git.io/RgMF3w) â€ĸ [Xcodeproj](http://git.io/KBKE_Q) + +###### Breaking change + +Syntax change in Podfile: `dependency` has been replaced by `pod`. + +``ruby +platform :ios +pod 'JSONKit', '~> 1.4' +pod 'Reachability', '~> 2.0.4' +`` + +###### Bug fixes + +- Properly quote all paths given to Git. + + +## 0.7.0 (2012-07-06) + +[CocoaPods](http://git.io/Agia6A) â€ĸ [Xcodeproj](http://git.io/mlqquw) + +###### Features + +- Added support for branches in git repos. +- Added support for linting remote files, i.e. `pod spec lint http://raw/file.podspec`. +- Improved `Spec create template`. +- The indentation is automatically stripped for podspecs strings. + +###### Bug fixes + +- The default warnings of Xcode are not overriden anymore. +- Improvements to the detection of the license files. +- Improvements to `pod spec lint`. +- CocoaPods is now case insensitive. + + +## 0.6.1 (2012-07-01) + +[CocoaPods](http://git.io/45wFjw) â€ĸ [Xcodeproj](http://git.io/rRA4XQ) + +###### Bug fixes + +- Switched to master branch for specs repo. +- Fixed a crash with `pod spec lint` related to `preserve_paths`. +- Fixed a bug that caused subspecs to not inherit the compiler flags of the top level specification. +- Fixed a bug that caused duplication of system framworks. + + +## 0.6.0 (2012-07-01) + +A full list of all the changes since 0.5.1 can be found [here][6]. + + +### Link with specific targets + +CocoaPods can now integrate all the targets specified in your `Podfile`. + +To specify which target, in your Xcode project, a Pods target should be linked +with, use the `link_with` method like so: + +```ruby +platform :ios + +workspace 'MyWorkspace' + +link_with ['MyAppTarget', 'MyOtherAppTarget'] +dependency 'JSONKit' + +target :test, :exclusive => true do + xcodeproj 'TestProject', 'Test' => :debug + link_with 'TestRunnerTarget' + dependency 'Kiwi' +end +``` + +_NOTE: As you can see it can take either one target name, or an array of names._ + +* If no explicit Xcode workspace is specified and only **one** project exists in +the same directory as the Podfile, then the name of that project is used as the +workspace’s name. + +* If no explicit Xcode project is specified for a target, it will use the Xcode +project of the parent target. If no target specifies an expicit Xcode project +and there is only **one** project in the same directory as the Podfile then that +project will be used. + +* If no explicit target is specified, then the Pods target will be linked with +the first target in your project. So if you only have one target you do not +need to specify the target to link with. + +See [#76](https://github.com/CocoaPods/CocoaPods/issues/76) for more info. + +Finally, CocoaPods will add build configurations to the Pods project for all +configurations in the other projects in the workspace. By default the +configurations are based on the `Release` configuration, to base them on the +`Debug` configuration you will have to explicitely specify them as can be seen +above in the following line: + +```ruby +xcodeproj 'TestProject', 'Test' => :debug +``` + + +### Documentation + +CocoaPods will now generate documentation for every library with the +[`appledoc`][5] tool and install it into Xcode’s documentation viewer. + +You can customize the settings used like so: + +```ruby +s.documentation = { :appledoc => ['--product-name', 'My awesome project!'] } +``` + +Alternatively, you can specify a URL where an HTML version of the documentation +can be found: + +```ruby +s.documentation = { :html => 'http://example.com/docs/index.html' } +``` + +See [#149](https://github.com/CocoaPods/CocoaPods/issues/149) and +[#151](https://github.com/CocoaPods/CocoaPods/issues/151) for more info. + + +### Licenses & Documentation + +CocoaPods will now generate two 'Acknowledgements' files for each target specified +in your Podfile which contain the License details for each Pod used in that target +(assuming details have been specified in the Pod spec). + +There is a markdown file, for general consumption, as well as a property list file +that can be added to a settings bundle for an iOS application. + +You don't need to do anything for this to happen, it should just work. + +If you're not happy with the default boilerplate text generated for the title, header +and footnotes in the files, it's possible to customize these by overriding the methods +that generate the text in your `Podfile` like this: + +```ruby +class ::Pod::Generator::Acknowledgements + def header_text + "My custom header text" + end +end +``` + +You can even go one step further and customize the text on a per target basis by +checking against the target name, like this: + +```ruby +class ::Pod::Generator::Acknowledgements + def header_text + if @target_definition.label.end_with?("MyTargetName") + "Custom header text for MyTargetName" + else + "Custom header text for other targets" + end + end +end +``` + +Finally, here's a list of the methods that are available to override: + +```ruby +header_title +header_text +footnote_title +footnote_text +``` + + +### Introduced two new classes: LocalPod and Sandbox. + +The Sandbox represents the entire contents of the `POD_ROOT` (normally +`SOURCE_ROOT/Pods`). A LocalPod represents a pod that has been installed within +the Sandbox. + +These two classes can be used as better homes for various pieces of logic +currently spread throughout the installation process and provide a better API +for working with the contents of this directory. + + +### Xcodeproj API + +All Xcodeproj APIs are now in `snake_case`, instead of `camelCase`. If you are +manipulating the project from your Podfile's `post_install` hook, or from a +podspec, then update these method calls. + + +### Enhancements + +* [#188](https://github.com/CocoaPods/CocoaPods/pull/188): `list` command now + displays the specifications introduced in the master repo if it is given as an + option the number of days to take into account. + +* [#188](https://github.com/CocoaPods/CocoaPods/pull/188): Transferred search + layout improvements and options to `list` command. + +* [#166](https://github.com/CocoaPods/CocoaPods/issues/166): Added printing + of homepage and source to search results. + +* [#177](https://github.com/CocoaPods/CocoaPods/issues/177): Added `--stat` + option to display watchers and forks for pods hosted on GitHub. + +* [#177](https://github.com/CocoaPods/CocoaPods/issues/177): Introduced colors + and tuned layout of search. + +* [#112](https://github.com/CocoaPods/CocoaPods/issues/112): Introduced `--push` + option to `$ pod setup`. It configures the master spec repository to use the private + push URL. The change is preserved in future calls to `$ pod setup`. + +* [#153](https://github.com/CocoaPods/CocoaPods/issues/153): It is no longer + required to call `$ pod setup`. + +* [#163](https://github.com/CocoaPods/CocoaPods/issues/163): Print a template + for a new ticket when an error occurs. + +* Added a new Github-specific downloader that can download repositories as a + gzipped tarball. + +* No more global state is kept during resolving of dependencies. + +* Updated Xcodeproj to have a friendlier API. + + +### Fixes + +* [#142](https://github.com/CocoaPods/CocoaPods/issues/142): Xcode 4.3.2 no longer + supports passing the -fobj-arc flag to the linker and will fail to build. The + addition of this flag was a workaround for a compiler bug in previous versions. + This flag is no longer included by default - to keep using this flag, you need to + add `set_arc_compatibility_flag!` to your Podfile. + +* [#183](https://github.com/CocoaPods/CocoaPods/issues/183): Fix for + `.DS_Store` file in `~/.cocoapods` prevents `$ pod install` from running. + +* [#134](https://github.com/CocoaPods/CocoaPods/issues/134): Match + `IPHONEOS_DEPLOYMENT_TARGET` build setting with `deployment_target` option in + generated Pods project file. + +* [#142](https://github.com/CocoaPods/CocoaPods/issues/): Add `-fobjc-arc` to + `OTHER_LDFLAGS` if _any_ pods require ARC. + +* [#148](https://github.com/CocoaPods/CocoaPods/issues/148): External encoding + set to UTF-8 on Ruby 1.9 to fix crash caused by non-ascii characters in pod + description. + +* Ensure all header search paths are quoted in the xcconfig file. + +* Added weak quoting to `ibtool` input paths. + + +## 0.5.0 (2011-11-22) + +No longer requires MacRuby. Runs on MRI 1.8.7 (OS X system version) and 1.9.3. + +A full list of all the changes since 0.3.0 can be found [here][7]. + + +## 0.4.0 + +Oops, accidentally skipped this version. + + +## 0.3.0 (2011-11-12) + +### Multiple targets + +Add support for multiple static library targets in the Pods Xcode project with +different sets of depedencies. This means that you can create a separate +library which contains all dependencies, including extra ones that you only use +in, for instance, a debug or test build. [[docs][1]] + +```ruby +# This Podfile will build three static libraries: +# * libPods.a +# * libPods-debug.a +# * libPods-test.a + +# This dependency is included in the `default` target, which generates the +# `libPods.a` library, and all non-exclusive targets. +dependency 'SSCatalog' + +target :debug do + # This dependency is only included in the `debug` target, which generates + # the `libPods-debug.a` library. + dependency 'CocoaLumberjack' +end + +target :test, :exclusive => true do + # This dependency is *only* included in the `test` target, which generates + # the `libPods-test.a` library. + dependency 'Kiwi' +end +``` + +### Install libraries from anywhere + +A dependency can take a git url if the repo contains a podspec file in its +root, or a podspec can be loaded from a file or HTTP location. If no podspec is +available, a specification can be defined inline in the Podfile. [[docs][2]] + +```ruby +# From a spec repo. +dependency 'SSToolkit' + +# Directly from the Pod’s repo (if it contains a podspec). +dependency 'SSToolkit', :git => 'https://github.com/samsoffes/sstoolkit.git' + +# Directly from the Pod’s repo (if it contains a podspec) with a specific commit (or tag). +dependency 'SSToolkit', :git => 'https://github.com/samsoffes/sstoolkit.git', + :commit => '2adcd0f81740d6b0cd4589af98790eee3bd1ae7b' + +# From a podspec that's outside a spec repo _and_ the library’s repo. This can be a file or http url. +dependency 'SSToolkit', :podspec => 'https://raw.github.com/gist/1353347/ef1800da9c5f5d267a642b8d3950b41174f2a6d7/SSToolkit-0.1.1.podspec' + +# If no podspec is available anywhere, you can define one right in your Podfile. +dependency do |s| + s.name = 'SSToolkit' + s.version = '0.1.3' + s.platform = :ios + s.source = { :git => 'https://github.com/samsoffes/sstoolkit.git', :commit => '2adcd0f81740d6b0cd4589af98790eee3bd1ae7b' } + s.resources = 'Resources' + s.source_files = 'SSToolkit/**/*.{h,m}' + s.frameworks = 'QuartzCore', 'CoreGraphics' + + def s.post_install(target) + prefix_header = config.project_pods_root + target.prefix_header_filename + prefix_header.open('a') do |file| + file.puts(%{#ifdef __OBJC__\n#import "SSToolkitDefines.h"\n#endif}) + end + end +end +``` + +### Add a `post_install` hook to the Podfile class + +This allows the user to customize, for instance, the generated Xcode project +_before_ it’s written to disk. [[docs][3]] + +```ruby +# Enable garbage collection support for MacRuby applications. +post_install do |installer| + installer.project.targets.each do |target| + target.build_configurations.each do |config| + config.build_settings['GCC_ENABLE_OBJC_GC'] = 'supported' + end + end +end +``` + +### Manifest + +Generate a Podfile.lock file next to the Podfile, which contains a manifest of +your application’s dependencies and their dependencies. + +``` +PODS: + - JSONKit (1.4) + - LibComponentLogging-Core (1.1.4) + - LibComponentLogging-NSLog (1.0.2): + - LibComponentLogging-Core (>= 1.1.4) + - RestKit-JSON-JSONKit (0.9.3): + - JSONKit + - RestKit (= 0.9.3) + - RestKit-Network (0.9.3): + - LibComponentLogging-NSLog + - RestKit (= 0.9.3) + - RestKit-ObjectMapping (0.9.3): + - RestKit (= 0.9.3) + - RestKit-Network (= 0.9.3) + +DOWNLOAD_ONLY: + - RestKit (0.9.3) + +DEPENDENCIES: + - RestKit-JSON-JSONKit + - RestKit-ObjectMapping +``` + +### Generate Xcode projects from scratch + +We no longer ship template projects with the gem, but instead generate them +programmatically. This code has moved out into its own [Xcodeproj gem][4], +allowing you to automate Xcode related tasks. + + + + +[1]: https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/podfile.rb#L151 +[2]: https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/podfile.rb#L82 +[3]: https://github.com/CocoaPods/CocoaPods/blob/master/lib/cocoapods/podfile.rb#L185 +[4]: https://github.com/CocoaPods/Xcodeproj +[5]: https://github.com/tomaz/appledoc +[6]: https://github.com/CocoaPods/CocoaPods/compare/0.5.1...0.6.0 +[7]: https://github.com/CocoaPods/CocoaPods/compare/0.3.10...0.5.0 diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/LICENSE b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/LICENSE new file mode 100644 index 0000000..6e8a60d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/LICENSE @@ -0,0 +1,33 @@ +This project is licensed under the MIT license. + +Copyright (c) 2011 Eloy DurÃĄn , + Fabio Pelosin , + Samuel Giddins , + Marius Rackwitz , + Kyle Fuller , + Boris BÃŧgling , + Orta Therox , + Olivier Halligon , + Danielle Tomlinson , + Dimitris Koutsogiorgas , + Paul Beusterien , and + Eric Amorde . + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/README.md new file mode 100644 index 0000000..fa1d946 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/README.md @@ -0,0 +1,81 @@ +![CocoaPods Logo](https://raw.github.com/CocoaPods/shared_resources/master/assets/cocoapods-banner-readme.png) + +### CocoaPods: The Cocoa dependency manager + +[![Build Status](https://github.com/CocoaPods/CocoaPods/workflows/Specs/badge.svg)](https://github.com/CocoaPods/CocoaPods/actions/workflows/Specs.yml) +[![Gem Version](https://img.shields.io/gem/v/cocoapods)](https://rubygems.org/gems/cocoapods) +[![Maintainability](https://api.codeclimate.com/v1/badges/8f0fe544baf2ae1acc2b/maintainability)](https://codeclimate.com/github/CocoaPods/CocoaPods/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/8f0fe544baf2ae1acc2b/test_coverage)](https://codeclimate.com/github/CocoaPods/CocoaPods/test_coverage) + +CocoaPods manages dependencies for your Xcode projects. + +You specify the dependencies for your project in a simple text file: your `Podfile`. +CocoaPods recursively resolves dependencies between libraries, fetches +source code for all dependencies, and creates and maintains an Xcode +workspace to build your project. The latest released Xcode versions and the +prior version are supported. + +Installing and updating CocoaPods is very easy. Don't miss the [Installation +guide](https://guides.cocoapods.org/using/getting-started.html#installation) and the +[Getting Started guide](https://guides.cocoapods.org/using/getting-started.html). + +## Project Goals + +CocoaPods aims to improve the engagement with, and discoverability +of, third party open-source Cocoa libraries. These +project goals influence and drive the design of CocoaPods: + +- Create and share libraries, and use them in your own projects, + without creating extra work for library authors. Integrate + non-CocoaPods libraries and hack on your own fork of any + CocoaPods library with a simple transparent `Podspec` standard. +- Allow library authors to structure their libraries however they like. +- Save time for library authors by automating a lot of Xcode work not + related to their libraries' functionality. +- Support any source management system. (Currently supported are `git`, + `svn`, `mercurial`, `bazaar`, and various types of archives downloaded over HTTP.) +- Promote a culture of distributed collaboration on pods, but also provide + features only possible with a centralised solution to foster a community. +- Build tools on top of the core Cocoa development system, including those + typically deployed to other operating systems, such as web-services. +- Provide opinionated and automated integration, but make it completely + optional. You may manually integrate your CocoaPods dependencies + into your Xcode project as you see fit, with or without a workspace. +- Solve everyday problems for Cocoa and Xcode developers. + +## Sponsors + +Lovingly sponsored by a collection of companies, see the footer of [CocoaPods.org](https://cocoapods.org) for an up-to-date list. + +## Collaborate + +All CocoaPods development happens on GitHub. Contributions make for good karma and +we [welcome new](https://blog.cocoapods.org/starting-open-source/) contributors with joy. We take contributors seriously, and thus have a +contributor [code of conduct](CODE_OF_CONDUCT.md). + +## Links + +| Link | Description | +| :----- | :------ | +[CocoaPods.org](https://cocoapods.org/) | Homepage and search for Pods. +[@CocoaPods](https://twitter.com/CocoaPods) | Follow CocoaPods on Twitter to stay up to date. +[Blog](https://blog.cocoapods.org) | The CocoaPods blog. +[Mailing List](https://groups.google.com/group/cocoapods) | Feel free to ask any kind of question. +[Guides](https://guides.cocoapods.org) | Everything you want to know about CocoaPods. +[Changelog](https://github.com/CocoaPods/CocoaPods/blob/master/CHANGELOG.md) | See the changes introduced in each CocoaPods version. +[New Pods RSS](https://feeds.cocoapods.org/new-pods.rss) | Don't miss any new Pods. +[Code of Conduct](CODE_OF_CONDUCT.md) | Find out the standards we hold ourselves to. + +## Projects + +CocoaPods is composed of the following projects: + +| Status | Project | Description | Info | +| :-------- | :------ | :--- | :--- | +| [![Build Status](https://github.com/CocoaPods/CocoaPods/workflows/Specs/badge.svg)](https://github.com/CocoaPods/CocoaPods/actions/workflows/Specs.yml) | [CocoaPods](https://github.com/CocoaPods/CocoaPods) | The CocoaPods command line tool. | [guides](https://guides.cocoapods.org) +| [![Build Status](https://github.com/CocoaPods/Core/workflows/Specs/badge.svg)](https://github.com/CocoaPods/Core/actions/workflows/Specs.yml) | [CocoaPods Core](https://github.com/CocoaPods/Core) | Support for working with specifications and podfiles. | [docs](https://guides.cocoapods.org/contributing/components.html) +| [![Build Status](https://github.com/CocoaPods/cocoapods-downloader/workflows/Specs/badge.svg)](https://github.com/CocoaPods/cocoapods-downloader/actions/workflows/Specs.yml) |[CocoaPods Downloader](https://github.com/CocoaPods/cocoapods-downloader) | Downloaders for various source types. | [docs](https://www.rubydoc.info/gems/cocoapods-downloader) +| [![Build Status](https://github.com/CocoaPods/Xcodeproj/workflows/Specs/badge.svg)](https://github.com/CocoaPods/Xcodeproj/actions/workflows/Specs.yml) | [Xcodeproj](https://github.com/CocoaPods/Xcodeproj) | Create and modify Xcode projects from Ruby. | [docs](https://www.rubydoc.info/gems/xcodeproj) +| [![Build Status](https://github.com/CocoaPods/CLAide/workflows/ci/badge.svg)](https://github.com/CocoaPods/CLAide/actions/workflows/ci.yml) | [CLAide](https://github.com/CocoaPods/CLAide) | A small command-line interface framework. | [docs](https://www.rubydoc.info/gems/claide) +| [![Build Status](https://github.com/CocoaPods/Molinillo/workflows/test/badge.svg)](https://github.com/CocoaPods/Molinillo/actions/workflows/test.yml) | [Molinillo](https://github.com/CocoaPods/Molinillo) | A powerful generic dependency resolver. | [docs](https://www.rubydoc.info/gems/molinillo) +| | [Master Repo ](https://github.com/CocoaPods/Specs) | Master repository of specifications. | [guides](https://guides.cocoapods.org/making/specs-and-specs-repo.html) diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/bin/pod b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/bin/pod new file mode 100644 index 0000000..47f6a27 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/bin/pod @@ -0,0 +1,56 @@ +#!/usr/bin/env ruby + +if Encoding.default_external != Encoding::UTF_8 + + if ARGV.include? '--no-ansi' + STDERR.puts <<-DOC + WARNING: CocoaPods requires your terminal to be using UTF-8 encoding. + Consider adding the following to ~/.profile: + + export LANG=en_US.UTF-8 + DOC + else + STDERR.puts <<-DOC + \e[33mWARNING: CocoaPods requires your terminal to be using UTF-8 encoding. + Consider adding the following to ~/.profile: + + export LANG=en_US.UTF-8 + \e[0m + DOC + end + +end + +if $PROGRAM_NAME == __FILE__ && !ENV['COCOAPODS_NO_BUNDLER'] + ENV['BUNDLE_GEMFILE'] = File.expand_path('../../Gemfile', __FILE__) + require 'rubygems' + require 'bundler/setup' + $LOAD_PATH.unshift File.expand_path('../../lib', __FILE__) +elsif ENV['COCOAPODS_NO_BUNDLER'] + require 'rubygems' + gem 'cocoapods' +end + +STDOUT.sync = true if ENV['CP_STDOUT_SYNC'] == 'TRUE' + +require 'cocoapods' + +if profile_filename = ENV['COCOAPODS_PROFILE'] + require 'ruby-prof' + reporter = + case (profile_extname = File.extname(profile_filename)) + when '.txt' + RubyProf::FlatPrinterWithLineNumbers + when '.html' + RubyProf::GraphHtmlPrinter + when '.callgrind' + RubyProf::CallTreePrinter + else + raise "Unknown profiler format indicated by extension: #{profile_extname}" + end + File.open(profile_filename, 'w') do |io| + reporter.new(RubyProf.profile { Pod::Command.run(ARGV) }).print(io) + end +else + Pod::Command.run(ARGV) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/bin/sandbox-pod b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/bin/sandbox-pod new file mode 100644 index 0000000..2752885 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/bin/sandbox-pod @@ -0,0 +1,168 @@ +#!/usr/bin/env ruby +# encoding: utf-8 + +# This bin wrapper runs the `pod` command in a OS X sandbox. The reason for this +# is to ensure that people can’t use malicious code from pod specifications. +# +# It does this by creating a ‘seatbelt’ profile on the fly and executing the +# given command through `/usr/bin/sandbox-exec`. This profile format is an +# undocumented format, which uses TinyScheme to implement its DSL. +# +# Even though it uses a undocumented format, it’s actually very self-explanatory. +# Because we use a whitelist approach, `(deny default)`, any action that is +# denied is logged to `/var/log/system.log`. So tailing that should provide +# enough information on steps that need to be take to get something to work. +# +# For more information see: +# +# * https://github.com/CocoaPods/CocoaPods/issues/939 +# * http://reverse.put.as/wp-content/uploads/2011/08/The-Apple-Sandbox-BHDC2011-Slides.pdf +# * http://reverse.put.as/wp-content/uploads/2011/08/The-Apple-Sandbox-BHDC2011-Paper.pdf +# * https://github.com/s7ephen/OSX-Sandbox--Seatbelt--Profiles +# * `$ man sandbox-exec` +# * `$ ls /usr/share/sandbox` + +if $0 == __FILE__ + $:.unshift File.expand_path('../../lib', __FILE__) +end + +require 'pathname' +require 'cocoapods/config' +require 'rbconfig' +require 'erb' + +PROFILE_ERB_TEMPLATE = <<-EOS +(version 1) +(debug allow) + +(import "mDNSResponder.sb") + +(allow file-ioctl) +(allow sysctl-read) +(allow mach-lookup) +(allow ipc-posix-shm) +(allow process-fork) +(allow system-socket) + +; TODO make this stricter if possible +(allow network-outbound) + +(allow process-exec + (literal + "<%= pod_bin %>" + "<%= ruby_bin %>" + ) + (regex +<% prefixes.each do |prefix| %> + #"^<%= prefix %>/*" +<% end %> + ) +) + +(allow file-read-metadata) +(allow file-read* + ; This is currenly only added because using `xcodebuild` to build a resource + ; bundle target starts a FSEvents stream on `/`. No idea why that would be + ; needed, but for now it doesn’t seem like a real problem. + (literal "/") + (regex + ; TODO see if we can restrict this more, but it's going to be hard + #"^/Users/[^.]+/*" + ;#"^/Users/[^.]+/.netrc" + ;#"^/Users/[^.]+/.gemrc" + ;#"^/Users/[^.]+/.gem/*" + ;#"^/Users/[^.]+/Library/.*" + #"^/Library/*" + #"^/System/Library/*" + #"^/usr/lib/*" + #"^/usr/share/*" + #"^/private/*" + #"^/dev/*" + #"^<%= ruby_prefix %>" + #"^<%= pod_prefix %>" + #"^<%= xcode_app_path %>" + #"^<%= Pod::Config.instance.repos_dir %>" +<% prefixes.each do |prefix| %> + #"^<%= prefix %>/*" +<% end %> + ) +) + +(allow file-write* + (literal + "/dev/dtracehelper" + "/dev/null" + ) + (regex + #"^<%= Pod::Config.instance.project_root %>" + #"^<%= Pod::Config.instance.repos_dir %>" + #"^/Users/[^.]+/Library/Caches/CocoaPods/*" + #"^/dev/tty" + #"^/private/var" + ) +) + +(deny default) +EOS + +class Profile + def pod_bin + File.expand_path('../pod', __FILE__) + end + + def pod_prefix + File.expand_path('../..', pod_bin) + end + + def ruby_bin + File.join(RbConfig::CONFIG['bindir'], RbConfig::CONFIG['ruby_install_name']) + end + + def ruby_prefix + RbConfig::CONFIG['prefix'] + end + + def prefix_from_bin(bin_name) + unless (path = `which #{bin_name}`.strip).empty? + File.dirname(File.dirname(path)) + end + end + + def prefixes + prefixes = ['/bin', '/usr/bin', '/usr/libexec', xcode_app_path] + prefixes << `brew --prefix`.strip unless `which brew`.strip.empty? + + # From asking people, it seems MacPorts does not have a `prefix` command, like + # Homebrew does, so make an educated guess: + if port_prefix = prefix_from_bin('port') + prefixes << port_prefix + end + + if rbenv_prefix = prefix_from_bin('rbenv') + prefixes << rbenv_prefix + end + + prefixes + end + + def developer_prefix + `xcode-select --print-path`.strip + end + + def xcode_app_path + File.expand_path('../..', developer_prefix) + end + + # TODO: raise SAFE level (0) to 4 if possible. + def generate + ERB.new(PROFILE_ERB_TEMPLATE, 0, '>').result(binding) + end +end + +# Ensure the `pod` bin doesn’t think it needs to use Bundler. +ENV['COCOAPODS_NO_BUNDLER'] = '1' + +profile = Profile.new +# puts profile.generate +command = ['/usr/bin/sandbox-exec', '-p', profile.generate, profile.pod_bin, *ARGV] +exec(*command) diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods.rb new file mode 100644 index 0000000..cd7728c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods.rb @@ -0,0 +1,78 @@ +require 'rubygems' +require 'xcodeproj' + +# It is very likely that we'll need these and as some of those paths will atm +# result in a I18n deprecation warning, we load those here now so that we can +# get rid of that warning. +require 'active_support/core_ext/string/strip' +require 'active_support/core_ext/string/inflections' +require 'active_support/core_ext/array/conversions' +# TODO: check what this actually does by the time we're going to add support for +# other locales. +require 'i18n' +if I18n.respond_to?(:enforce_available_locales=) + I18n.enforce_available_locales = false +end + +module Pod + require 'pathname' + require 'tmpdir' + + require 'cocoapods/gem_version' + require 'cocoapods/version_metadata' + require 'cocoapods-core' + require 'cocoapods/config' + require 'cocoapods/downloader' + + # Loaded immediately after dependencies to ensure proper override of their + # UI methods. + # + require 'cocoapods/user_interface' + + # Indicates an user error. This is defined in cocoapods-core. + # + class Informative < PlainInformative + def message + "[!] #{super}".red + end + end + + Xcodeproj::PlainInformative.send(:include, CLAide::InformativeError) + + autoload :AggregateTarget, 'cocoapods/target/aggregate_target' + autoload :Command, 'cocoapods/command' + autoload :Deintegrator, 'cocoapods_deintegrate' + autoload :Executable, 'cocoapods/executable' + autoload :ExternalSources, 'cocoapods/external_sources' + autoload :Installer, 'cocoapods/installer' + autoload :HooksManager, 'cocoapods/hooks_manager' + autoload :PodTarget, 'cocoapods/target/pod_target' + autoload :Project, 'cocoapods/project' + autoload :Resolver, 'cocoapods/resolver' + autoload :Sandbox, 'cocoapods/sandbox' + autoload :Target, 'cocoapods/target' + autoload :Validator, 'cocoapods/validator' + + module Generator + autoload :Acknowledgements, 'cocoapods/generator/acknowledgements' + autoload :Markdown, 'cocoapods/generator/acknowledgements/markdown' + autoload :Plist, 'cocoapods/generator/acknowledgements/plist' + autoload :BridgeSupport, 'cocoapods/generator/bridge_support' + autoload :Constant, 'cocoapods/generator/constant' + autoload :ScriptPhaseConstants, 'cocoapods/generator/script_phase_constants' + autoload :CopyResourcesScript, 'cocoapods/generator/copy_resources_script' + autoload :CopydSYMsScript, 'cocoapods/generator/copy_dsyms_script' + autoload :DummySource, 'cocoapods/generator/dummy_source' + autoload :EmbedFrameworksScript, 'cocoapods/generator/embed_frameworks_script' + autoload :CopyXCFrameworksScript, 'cocoapods/generator/copy_xcframework_script' + autoload :FileList, 'cocoapods/generator/file_list' + autoload :Header, 'cocoapods/generator/header' + autoload :InfoPlistFile, 'cocoapods/generator/info_plist_file' + autoload :ModuleMap, 'cocoapods/generator/module_map' + autoload :PrefixHeader, 'cocoapods/generator/prefix_header' + autoload :UmbrellaHeader, 'cocoapods/generator/umbrella_header' + autoload :AppTargetHelper, 'cocoapods/generator/app_target_helper' + end + + require 'cocoapods/core_overrides' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command.rb new file mode 100644 index 0000000..8c993fd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command.rb @@ -0,0 +1,185 @@ +require 'colored2' +require 'claide' +require 'molinillo/errors' + +module Molinillo + class ResolverError + include CLAide::InformativeError + end +end + +module Pod + class PlainInformative + include CLAide::InformativeError + end + + class Command < CLAide::Command + require 'cocoapods/command/options/repo_update' + require 'cocoapods/command/options/project_directory' + include Options + + require 'cocoapods/command/cache' + require 'cocoapods/command/env' + require 'cocoapods/command/init' + require 'cocoapods/command/install' + require 'cocoapods/command/ipc' + require 'cocoapods/command/lib' + require 'cocoapods/command/list' + require 'cocoapods/command/outdated' + require 'cocoapods/command/repo' + require 'cocoapods/command/setup' + require 'cocoapods/command/spec' + require 'cocoapods/command/update' + + self.abstract_command = true + self.command = 'pod' + self.version = VERSION + self.description = 'CocoaPods, the Cocoa library package manager.' + self.plugin_prefixes = %w(claide cocoapods) + + def self.options + [ + ['--allow-root', 'Allows CocoaPods to run as root'], + ['--silent', 'Show nothing'], + ].concat(super) + end + + def self.run(argv) + ensure_not_root_or_allowed! argv + verify_minimum_git_version! + verify_xcode_license_approved! + + super(argv) + ensure + UI.print_warnings + end + + def self.report_error(exception) + case exception + when Interrupt + puts '[!] Cancelled'.red + Config.instance.verbose? ? raise : exit(1) + when SystemExit + raise + else + if ENV['COCOA_PODS_ENV'] != 'development' + puts UI::ErrorReport.report(exception) + UI::ErrorReport.search_for_exceptions(exception) + exit 1 + else + raise exception + end + end + end + + # @todo If a command is run inside another one some settings which where + # true might return false. + # + # @todo We should probably not even load colored unless needed. + # + # @todo Move silent flag to CLAide. + # + # @note It is important that the commands don't override the default + # settings if their flag is missing (i.e. their value is nil) + # + def initialize(argv) + super + config.silent = argv.flag?('silent', config.silent) + config.allow_root = argv.flag?('allow-root', config.allow_root) + config.verbose = self.verbose? unless verbose.nil? + unless self.ansi_output? + Colored2.disable! + String.send(:define_method, :colorize) { |string, _| string } + end + end + + # Ensure root user + # + # @return [void] + # + def self.ensure_not_root_or_allowed!(argv, uid = Process.uid, is_windows = Gem.win_platform?) + root_allowed = argv.include?('--allow-root') || !ENV['COCOAPODS_ALLOW_ROOT'].nil? + help! 'You cannot run CocoaPods as root.' unless root_allowed || uid != 0 || is_windows + end + + # Ensure that the master spec repo exists + # + # @return [void] + # + def ensure_master_spec_repo_exists! + unless config.sources_manager.master_repo_functional? + Setup.new(CLAide::ARGV.new([])).run + end + end + + #-------------------------------------------------------------------------# + + include Config::Mixin + + private + + # Returns a new {Gem::Version} based on the systems `git` version. + # + # @return [Gem::Version] + # + def self.git_version + raw_version = Executable.capture_command('git', ['--version']).first + unless match = raw_version.scan(/\d+\.\d+\.\d+/).first + raise "Failed to extract git version from `git --version` (#{raw_version.inspect})" + end + Gem::Version.new(match) + end + + # Checks that the git version is at least 1.8.5 + # + # @raise If the git version is older than 1.8.5 + # + # @return [void] + # + def self.verify_minimum_git_version! + if git_version < Gem::Version.new('1.8.5') + raise Informative, 'You need at least git version 1.8.5 to use CocoaPods' + end + end + + # Returns a new {Installer} parametrized from the {Config}. + # + # @return [Installer] + # + def installer_for_config + Installer.new(config.sandbox, config.podfile, config.lockfile) + end + + # Checks that the podfile exists. + # + # @raise If the podfile does not exists. + # + # @return [void] + # + def verify_podfile_exists! + unless config.podfile + raise Informative, "No `Podfile' found in the project directory." + end + end + + # Checks that the lockfile exists. + # + # @raise If the lockfile does not exists. + # + # @return [void] + # + def verify_lockfile_exists! + unless config.lockfile + raise Informative, "No `Podfile.lock' found in the project directory, run `pod install'." + end + end + + def self.verify_xcode_license_approved! + if `/usr/bin/xcrun clang 2>&1` =~ /license/ && !$?.success? + raise Informative, 'You have not agreed to the Xcode license, which ' \ + 'you must do to use CocoaPods. Agree to the license by running: ' \ + '`xcodebuild -license`.' + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache.rb new file mode 100644 index 0000000..fd62ae1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache.rb @@ -0,0 +1,28 @@ +require 'cocoapods/downloader' +require 'cocoapods/command/cache/list' +require 'cocoapods/command/cache/clean' + +module Pod + class Command + class Cache < Command + self.abstract_command = true + self.summary = 'Manipulate the CocoaPods cache' + + self.description = <<-DESC + Manipulate the download cache for pods, like printing the cache content + or cleaning the pods cache. + DESC + + def initialize(argv) + @cache = Downloader::Cache.new(Config.instance.cache_root + 'Pods') + super + end + + private + + def pod_type(pod_cache_descriptor) + pod_cache_descriptor[:release] ? 'Release' : 'External' + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache/clean.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache/clean.rb new file mode 100644 index 0000000..aa3563f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache/clean.rb @@ -0,0 +1,90 @@ +module Pod + class Command + class Cache < Command + class Clean < Cache + self.summary = 'Remove the cache for pods' + + self.description = <<-DESC + Remove the cache for a given pod, or clear the cache completely. + + If there is multiple cache for various versions of the requested pod, + you will be asked which one to clean. Use `--all` to clean them all. + + If you do not give a pod `NAME`, you need to specify the `--all` + flag (this is to avoid cleaning all the cache by mistake). + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', false), + ] + + def self.options + [[ + '--all', 'Remove all the cached pods without asking' + ]].concat(super) + end + + def initialize(argv) + @pod_name = argv.shift_argument + @wipe_all = argv.flag?('all') + super + end + + def run + if @pod_name.nil? + # Note: at that point, @wipe_all is always true (thanks to `validate!`) + # Remove all + clear_cache + else + # Remove only cache for this pod + cache_descriptors = @cache.cache_descriptors_per_pod[@pod_name] + if cache_descriptors.nil? + UI.notice("No cache for pod named #{@pod_name} found") + elsif cache_descriptors.count > 1 && !@wipe_all + # Ask which to remove + choices = cache_descriptors.map { |c| "#{@pod_name} v#{c[:version]} (#{pod_type(c)})" } + index = UI.choose_from_array(choices, 'Which pod cache do you want to remove?') + remove_caches([cache_descriptors[index]]) + else + # Remove all found cache of this pod + remove_caches(cache_descriptors) + end + end + end + + def validate! + super + if @pod_name.nil? && !@wipe_all + # Security measure, to avoid removing the pod cache too agressively by mistake + help! 'You should either specify a pod name or use the --all flag' + end + end + + private + + # Removes the specified cache + # + # @param [Array] cache_descriptors + # An array of caches to remove, each specified with the same + # hash as cache_descriptors_per_pod especially :spec_file and :slug + # + def remove_caches(cache_descriptors) + cache_descriptors.each do |desc| + UI.message("Removing spec #{desc[:spec_file]} (v#{desc[:version]})") do + FileUtils.rm(desc[:spec_file]) + end + UI.message("Removing cache #{desc[:slug]}") do + FileUtils.rm_rf(desc[:slug]) + end + end + end + + def clear_cache + UI.message("Removing the whole cache dir #{@cache.root}") do + FileUtils.rm_rf(@cache.root) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache/list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache/list.rb new file mode 100644 index 0000000..7c6fffd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/cache/list.rb @@ -0,0 +1,69 @@ +module Pod + class Command + class Cache < Command + class List < Cache + self.summary = 'List the paths of pod caches for each known pod' + + self.description = <<-DESC + Shows the content of the pods cache as a YAML tree output, organized by pod. + If `NAME` is given, only the caches for that pod will be included in the output. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', false), + ] + + def self.options + [[ + '--short', 'Only print the path relative to the cache root' + ]].concat(super) + end + + def initialize(argv) + @pod_name = argv.shift_argument + @short_output = argv.flag?('short') + super + end + + def run + UI.puts("$CACHE_ROOT: #{@cache.root}") if @short_output + if @pod_name.nil? # Print all + @cache.cache_descriptors_per_pod.each do |pod_name, cache_descriptors| + print_pod_cache_infos(pod_name, cache_descriptors) + end + else # Print only for the requested pod + cache_descriptors = @cache.cache_descriptors_per_pod[@pod_name] + if cache_descriptors.nil? + UI.notice("No cache for pod named #{@pod_name} found") + else + print_pod_cache_infos(@pod_name, cache_descriptors) + end + end + end + + private + + # Prints the list of specs & pod cache dirs for a single pod name. + # + # This output is valid YAML so it can be parsed with 3rd party tools + # + # @param [Array] cache_descriptors + # The various infos about a pod cache. Keys are + # :spec_file, :version, :release and :slug + # + def print_pod_cache_infos(pod_name, cache_descriptors) + UI.puts "#{pod_name}:" + cache_descriptors.each do |desc| + if @short_output + [:spec_file, :slug].each { |k| desc[k] = desc[k].relative_path_from(@cache.root) } + end + UI.puts(" - Version: #{desc[:version]}") + UI.puts(" Type: #{pod_type(desc)}") + UI.puts(" Spec: #{desc[:spec_file]}") + UI.puts(" Pod: #{desc[:slug]}") + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/env.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/env.rb new file mode 100644 index 0000000..fac03b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/env.rb @@ -0,0 +1,66 @@ +require 'cocoapods/user_interface/error_report' + +module Pod + class Command + class Env < Command + self.summary = 'Display pod environment' + self.description = 'Display pod environment.' + + def self.options + options = [] + options.concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + super + config.silent = false + end + + def run + UI.puts report + end + + def report + <<-EOS + +#{stack} +#{executable_path} +### Plugins + +``` +#{plugins_string} +``` +#{markdown_podfile} +EOS + end + + def stack + UI::ErrorReport.stack + end + + def markdown_podfile + UI::ErrorReport.markdown_podfile + end + + def plugins_string + UI::ErrorReport.plugins_string + end + + private + + def executable_path + <<-EOS +### Installation Source + +``` +Executable Path: #{actual_path} +``` +EOS + end + + def actual_path + $PROGRAM_NAME + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/init.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/init.rb new file mode 100644 index 0000000..1f27c9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/init.rb @@ -0,0 +1,122 @@ +require 'xcodeproj' +require 'active_support/core_ext/string/strip' + +module Pod + class Command + class Init < Command + self.summary = 'Generate a Podfile for the current directory' + self.description = <<-DESC + Creates a Podfile for the current directory if none currently exists. If + an `XCODEPROJ` project file is specified or if there is only a single + project file in the current directory, targets will be automatically + generated based on targets defined in the project. + + It is possible to specify a list of dependencies which will be used by + the template in the `Podfile.default` (normal targets) `Podfile.test` + (test targets) files which should be stored in the + `#{Config.instance.templates_dir}` folder. + DESC + self.arguments = [ + CLAide::Argument.new('XCODEPROJ', :false), + ] + + def initialize(argv) + @podfile_path = Pathname.pwd + 'Podfile' + @project_path = argv.shift_argument + @project_paths = Pathname.pwd.children.select { |pn| pn.extname == '.xcodeproj' } + super + end + + def validate! + super + raise Informative, 'Existing Podfile found in directory' unless config.podfile_path_in_dir(Pathname.pwd).nil? + if @project_path + help! "Xcode project at #{@project_path} does not exist" unless File.exist? @project_path + project_path = @project_path + else + raise Informative, 'No Xcode project found, please specify one' unless @project_paths.length > 0 + raise Informative, 'Multiple Xcode projects found, please specify one' unless @project_paths.length == 1 + project_path = @project_paths.first + end + @xcode_project = Xcodeproj::Project.open(project_path) + end + + def run + @podfile_path.open('w') { |f| f << podfile_template(@xcode_project) } + end + + private + + # @param [Xcodeproj::Project] project + # The Xcode project to generate a podfile for. + # + # @return [String] the text of the Podfile for the provided project + # + def podfile_template(project) + podfile = '' + podfile << "project '#{@project_path}'\n\n" if @project_path + podfile << <<-PLATFORM.strip_heredoc + # Uncomment the next line to define a global platform for your project + # platform :ios, '9.0' + PLATFORM + + # Split out the targets into app and test targets + test_targets, app_targets = project.native_targets.sort_by { |t| t.name.downcase }.partition(&:test_target_type?) + + app_targets.each do |app_target| + test_targets_for_app = test_targets.select do |target| + target.name.downcase.start_with?(app_target.name.downcase) + end + podfile << target_module(app_target, test_targets_for_app) + end + + podfile + end + + # @param [PBXNativeTarget] host the native host target for the module. + # + # @param [Array] tests the native test targets for the module. + # + # @return [String] the text for the target module. + # + def target_module(host, tests) + target_module = "\ntarget '#{host.name.gsub(/'/, "\\\\\'")}' do\n" + + target_module << <<-RUBY + # Comment the next line if you don't want to use dynamic frameworks + use_frameworks! + + RUBY + + target_module << template_contents(config.default_podfile_path, ' ', "Pods for #{host.name}\n") + + tests.each do |test| + target_module << "\n target '#{test.name.gsub(/'/, "\\\\\'")}' do\n" + unless Pod::AggregateTarget::EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES.include?(host.symbol_type) || test.symbol_type == :ui_test_bundle + target_module << " inherit! :search_paths\n" + end + target_module << template_contents(config.default_test_podfile_path, ' ', 'Pods for testing') + target_module << "\n end\n" + end + + target_module << "\nend\n" + end + + # @param [Pathname] path the path of the template to load contents from. + # + # @param [String] prefix the prefix to use for each line. + # + # @param [String] fallback the fallback contents to use if the path for the template does not exist. + # + # @return [String] the template contents for the given path. + # + def template_contents(path, prefix, fallback) + if path.exist? + path.read.chomp.lines.map { |line| "#{prefix}#{line}" }.join("\n") + else + "#{prefix}# #{fallback}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/install.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/install.rb new file mode 100644 index 0000000..27c7086 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/install.rb @@ -0,0 +1,56 @@ +module Pod + class Command + class Install < Command + include RepoUpdate + include ProjectDirectory + + self.summary = 'Install project dependencies according to versions from a Podfile.lock' + + self.description = <<-DESC + Downloads all dependencies defined in `Podfile` and creates an Xcode + Pods library project in `./Pods`. + + The Xcode project file should be specified in your `Podfile` like this: + + project 'path/to/XcodeProject.xcodeproj' + + If no project is specified, then a search for an Xcode project will + be made. If more than one Xcode project is found, the command will + raise an error. + + This will configure the project to reference the Pods static library, + add a build configuration file, and add a post build script to copy + Pod resources. + + This may return one of several error codes if it encounters problems. + * `1` Generic error code + * `31` Spec not found (i.e out-of-date source repos, mistyped Pod name etc...) + DESC + + def self.options + [ + ['--repo-update', 'Force running `pod repo update` before install'], + ['--deployment', 'Disallow any changes to the Podfile or the Podfile.lock during installation'], + ['--clean-install', 'Ignore the contents of the project cache and force a full pod installation. This only ' \ + 'applies to projects that have enabled incremental installation'], + ].concat(super).reject { |(name, _)| name == '--no-repo-update' } + end + + def initialize(argv) + super + @deployment = argv.flag?('deployment', false) + @clean_install = argv.flag?('clean-install', false) + end + + def run + verify_podfile_exists! + installer = installer_for_config + installer.repo_update = repo_update?(:default => false) + installer.update = false + installer.deployment = @deployment + installer.clean_install = @clean_install + installer.install! + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc.rb new file mode 100644 index 0000000..88744dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc.rb @@ -0,0 +1,19 @@ +require 'cocoapods/command/ipc/list' +require 'cocoapods/command/ipc/podfile' +require 'cocoapods/command/ipc/podfile_json' +require 'cocoapods/command/ipc/repl' +require 'cocoapods/command/ipc/spec' +require 'cocoapods/command/ipc/update_search_index' + +module Pod + class Command + class IPC < Command + self.abstract_command = true + self.summary = 'Inter-process communication' + + def output_pipe + STDOUT + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/list.rb new file mode 100644 index 0000000..18fb3d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/list.rb @@ -0,0 +1,40 @@ +module Pod + class Command + class IPC < Command + class List < IPC + self.summary = 'Lists the specifications known to CocoaPods' + self.description = <<-DESC + Prints to STDOUT a YAML dictionary where the keys are the name of the + specifications and each corresponding value is a dictionary with + the following keys: + - defined_in_file + - version + - authors + - summary + - description + - platforms + DESC + + def run + require 'yaml' + sets = config.sources_manager.aggregate.all_sets + result = {} + sets.each do |set| + begin + spec = set.specification + result[spec.name] = { + 'authors' => spec.authors.keys, + 'summary' => spec.summary, + 'description' => spec.description, + 'platforms' => spec.available_platforms.map { |p| p.name.to_s }, + } + rescue DSLError + next + end + end + output_pipe.puts result.to_yaml + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/podfile.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/podfile.rb new file mode 100644 index 0000000..5cbf9bf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/podfile.rb @@ -0,0 +1,31 @@ +module Pod + class Command + class IPC < Command + class Podfile < IPC + include ProjectDirectory + + self.summary = 'Converts a Podfile to YAML' + self.description = 'Converts a Podfile to YAML and prints it to STDOUT.' + self.arguments = [ + CLAide::Argument.new('PATH', true), + ] + + def initialize(argv) + @path = argv.shift_argument + super + end + + def validate! + super + help! 'A Podfile path is required.' unless @path + end + + def run + require 'yaml' + podfile = Pod::Podfile.from_file(@path) + output_pipe.puts podfile.to_yaml + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/podfile_json.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/podfile_json.rb new file mode 100644 index 0000000..a6aca6a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/podfile_json.rb @@ -0,0 +1,30 @@ +module Pod + class Command + class IPC < Command + class PodfileJSON < IPC + include ProjectDirectory + + self.summary = 'Converts a Podfile to JSON' + self.description = 'Converts a Podfile to JSON and prints it to STDOUT.' + self.arguments = [ + CLAide::Argument.new('PATH', true), + ] + + def initialize(argv) + @path = argv.shift_argument + super + end + + def validate! + super + help! 'A Podfile path is required.' unless @path + end + + def run + podfile = Pod::Podfile.from_file(@path) + output_pipe.puts podfile.to_hash.to_json + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/repl.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/repl.rb new file mode 100644 index 0000000..75eee94 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/repl.rb @@ -0,0 +1,51 @@ +module Pod + class Command + class IPC < Command + class Repl < IPC + include ProjectDirectory + + END_OF_OUTPUT_SIGNAL = "\n\r".freeze + + self.summary = 'The repl listens to commands on standard input' + self.description = <<-DESC + The repl listens to commands on standard input and prints their + result to standard output. + It accepts all the other ipc subcommands. The repl will signal the + end of output with the the ASCII CR+LF `\\n\\r`. + DESC + + def run + print_version + signal_end_of_output + listen + end + + def print_version + output_pipe.puts "version: '#{Pod::VERSION}'" + end + + def signal_end_of_output + output_pipe.puts(END_OF_OUTPUT_SIGNAL) + STDOUT.flush + end + + def listen + while repl_command = STDIN.gets + execute_repl_command(repl_command) + end + end + + def execute_repl_command(repl_command) + unless repl_command == '\n' + repl_commands = repl_command.split + subcommand = repl_commands.shift.capitalize + arguments = repl_commands + subcommand_class = Pod::Command::IPC.const_get(subcommand) + subcommand_class.new(CLAide::ARGV.new(arguments)).run + signal_end_of_output + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/spec.rb new file mode 100644 index 0000000..9bab8b8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/spec.rb @@ -0,0 +1,29 @@ +module Pod + class Command + class IPC < Command + class Spec < IPC + self.summary = 'Converts a podspec to JSON' + self.description = 'Converts a podspec to JSON and prints it to STDOUT.' + self.arguments = [ + CLAide::Argument.new('PATH', true), + ] + + def initialize(argv) + @path = argv.shift_argument + super + end + + def validate! + super + help! 'A specification path is required.' unless @path + end + + def run + require 'json' + spec = Specification.from_file(@path) + output_pipe.puts(spec.to_pretty_json) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/update_search_index.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/update_search_index.rb new file mode 100644 index 0000000..31c0ae5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/ipc/update_search_index.rb @@ -0,0 +1,24 @@ +module Pod + class Command + class IPC < Command + class UpdateSearchIndex < IPC + self.summary = 'Updates the search index' + self.description = <<-DESC + Updates the search index and prints its path to standard output. + The search index is a YAML encoded dictionary where the keys + are the names of the Pods and the values are a dictionary containing + the following information: + - version + - summary + - description + - authors + DESC + + def run + config.sources_manager.updated_search_index + output_pipe.puts(config.sources_manager.search_index_path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib.rb new file mode 100644 index 0000000..72450ad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib.rb @@ -0,0 +1,11 @@ +require 'cocoapods/command/lib/create' +require 'cocoapods/command/lib/lint' + +module Pod + class Command + class Lib < Command + self.abstract_command = true + self.summary = 'Develop pods' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib/create.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib/create.rb new file mode 100644 index 0000000..038f842 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib/create.rb @@ -0,0 +1,104 @@ +module Pod + class Command + class Lib < Command + class Create < Lib + self.summary = 'Creates a new Pod' + + self.description = <<-DESC + Creates a scaffold for the development of a new Pod named `NAME` + according to the CocoaPods best practices. + If a `TEMPLATE_URL`, pointing to a git repo containing a compatible + template, is specified, it will be used in place of the default one. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def self.options + [ + ['--template-url=URL', 'The URL of the git repo containing a compatible template'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @template_url = argv.option('template-url', TEMPLATE_REPO) + super + @additional_args = argv.remainder! + end + + def validate! + super + help! 'A name for the Pod is required.' unless @name + help! 'The Pod name cannot contain spaces.' if @name =~ /\s/ + help! 'The Pod name cannot contain plusses.' if @name =~ /\+/ + help! "The Pod name cannot begin with a '.'" if @name[0, 1] == '.' + end + + def run + clone_template + configure_template + print_info + end + + private + + #----------------------------------------# + + # !@group Private helpers + + extend Executable + executable :git + + TEMPLATE_REPO = 'https://github.com/CocoaPods/pod-template.git'.freeze + TEMPLATE_INFO_URL = 'https://github.com/CocoaPods/pod-template'.freeze + CREATE_NEW_POD_INFO_URL = 'https://guides.cocoapods.org/making/making-a-cocoapod'.freeze + + # Clones the template from the remote in the working directory using + # the name of the Pod. + # + # @return [void] + # + def clone_template + UI.section("Cloning `#{template_repo_url}` into `#{@name}`.") do + git! ['clone', template_repo_url, @name] + end + end + + # Runs the template configuration utilities. + # + # @return [void] + # + def configure_template + UI.section("Configuring #{@name} template.") do + Dir.chdir(@name) do + if File.exist?('configure') + system({ 'COCOAPODS_VERSION' => Pod::VERSION }, './configure', @name, *@additional_args) + else + UI.warn 'Template does not have a configure file.' + end + end + end + end + + # Runs the template configuration utilities. + # + # @return [void] + # + def print_info + UI.puts "\nTo learn more about the template see `#{template_repo_url}`." + UI.puts "To learn more about creating a new pod, see `#{CREATE_NEW_POD_INFO_URL}`." + end + + # Checks if a template URL is given else returns the TEMPLATE_REPO URL + # + # @return String + # + def template_repo_url + @template_url || TEMPLATE_REPO + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib/lint.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib/lint.rb new file mode 100644 index 0000000..7b9283c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/lib/lint.rb @@ -0,0 +1,148 @@ +module Pod + class Command + class Lib < Command + class Lint < Lib + self.summary = 'Validates a Pod' + + self.description = <<-DESC + Validates the Pod using the files in the working directory. + DESC + + self.arguments = [ + CLAide::Argument.new('PODSPEC_PATHS', false, true), + ] + + def self.options + [ + ['--quick', 'Lint skips checks that would require to download and build the spec'], + ['--allow-warnings', 'Lint validates even if warnings are present'], + ['--subspec=NAME', 'Lint validates only the given subspec'], + ['--no-subspecs', 'Lint skips validation of subspecs'], + ['--no-clean', 'Lint leaves the build directory intact for inspection'], + ['--fail-fast', 'Lint stops on the first failing platform or subspec'], + ['--use-libraries', 'Lint uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--use-static-frameworks', 'Lint uses static frameworks during installation'], + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to pull dependent pods ' \ + "(defaults to #{Pod::TrunkSource::TRUNK_REPO_URL}). Multiple sources must be comma-delimited"], + ['--platforms=ios,macos', 'Lint against specific platforms (defaults to all platforms supported by the ' \ + 'podspec). Multiple platforms must be comma-delimited'], + ['--private', 'Lint skips checks that apply only to public specs'], + ['--swift-version=VERSION', 'The `SWIFT_VERSION` that should be used to lint the spec. ' \ + 'This takes precedence over the Swift versions specified by the spec or a `.swift-version` file'], + ['--include-podspecs=**/*.podspec', 'Additional ancillary podspecs which are used for linting via :path'], + ['--external-podspecs=**/*.podspec', 'Additional ancillary podspecs which are used for linting '\ + 'via :podspec. If there are --include-podspecs, then these are removed from them'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--test-specs=test-spec1,test-spec2,etc', 'List of test specs to run'], + ['--analyze', 'Validate with the Xcode Static Analysis tool'], + ['--configuration=CONFIGURATION', 'Build using the given configuration (defaults to Release)'], + ['--validation-dir', 'The directory to use for validation. If none is specified a temporary directory will be used.'], + ].concat(super) + end + + def initialize(argv) + @quick = argv.flag?('quick') + @allow_warnings = argv.flag?('allow-warnings') + @clean = argv.flag?('clean', true) + @fail_fast = argv.flag?('fail-fast', false) + @subspecs = argv.flag?('subspecs', true) + @only_subspec = argv.option('subspec') + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @use_static_frameworks = argv.flag?('use-static-frameworks') + @source_urls = argv.option('sources', Pod::TrunkSource::TRUNK_REPO_URL).split(',') + @platforms = argv.option('platforms', '').split(',') + @private = argv.flag?('private', false) + @swift_version = argv.option('swift-version', nil) + @include_podspecs = argv.option('include-podspecs', nil) + @external_podspecs = argv.option('external-podspecs', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @test_specs = argv.option('test-specs', nil)&.split(',') + @analyze = argv.flag?('analyze', false) + @podspecs_paths = argv.arguments! + @configuration = argv.option('configuration', nil) + @validation_dir = argv.option('validation-dir', nil) + super + end + + def validate! + super + end + + def run + UI.puts + podspecs_to_lint.each do |podspec| + validator = Validator.new(podspec, @source_urls, @platforms) + validator.local = true + validator.quick = @quick + validator.no_clean = !@clean + validator.fail_fast = @fail_fast + validator.allow_warnings = @allow_warnings + validator.no_subspecs = !@subspecs || @only_subspec + validator.only_subspec = @only_subspec + validator.use_frameworks = @use_frameworks + validator.use_modular_headers = @use_modular_headers + validator.use_static_frameworks = @use_static_frameworks + validator.ignore_public_only_results = @private + validator.swift_version = @swift_version + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.test_specs = @test_specs + validator.analyze = @analyze + validator.include_podspecs = @include_podspecs + validator.external_podspecs = @external_podspecs + validator.configuration = @configuration + validator.validation_dir = @validation_dir + validator.validate + + unless @clean + UI.puts "Pods workspace available at `#{validator.validation_dir}/App.xcworkspace` for inspection." + UI.puts + end + if validator.validated? + UI.puts "#{validator.spec.name} passed validation.".green + else + spec_name = podspec + spec_name = validator.spec.name if validator.spec + message = "#{spec_name} did not pass validation, due to #{validator.failure_reason}." + + if @clean + message << "\nYou can use the `--no-clean` option to inspect " \ + 'any issue.' + end + raise Informative, message + end + end + end + + private + + #----------------------------------------# + + # !@group Private helpers + + # @return [Pathname] The path of the podspec found in the current + # working directory. + # + # @raise If no podspec is found. + # @raise If multiple podspecs are found. + # + def podspecs_to_lint + if !@podspecs_paths.empty? + Array(@podspecs_paths) + else + podspecs = Pathname.glob(Pathname.pwd + '*.podspec{.json,}') + if podspecs.count.zero? + raise Informative, 'Unable to find a podspec in the working ' \ + 'directory' + end + podspecs + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/list.rb new file mode 100644 index 0000000..1f5f494 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/list.rb @@ -0,0 +1,37 @@ +module Pod + class Command + class List < Command + self.summary = 'List pods' + self.description = 'Lists all available pods.' + + def self.options + [ + ['--update', 'Run `pod repo update` before listing'], + ['--stats', 'Show additional stats (like GitHub watchers and forks)'], + ].concat(super) + end + + def initialize(argv) + @update = argv.flag?('update') + @stats = argv.flag?('stats') + super + end + + def run + update_if_necessary! + + sets = config.sources_manager.aggregate.all_sets + sets.each { |set| UI.pod(set, :name_and_version) } + UI.puts "\n#{sets.count} pods were found" + end + + def update_if_necessary! + UI.section("\nUpdating Spec Repositories\n".yellow) do + Repo::Update.new(CLAide::ARGV.new([])).run + end if @update + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/options/project_directory.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/options/project_directory.rb new file mode 100644 index 0000000..d12ab33 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/options/project_directory.rb @@ -0,0 +1,36 @@ +module Pod + class Command + module Options + # Provides support for commands to take a user-specified `project directory` + # + module ProjectDirectory + module Options + def options + [ + ['--project-directory=/project/dir/', 'The path to the root of the project directory'], + ].concat(super) + end + end + + def self.included(base) + base.extend(Options) + end + + def initialize(argv) + if project_directory = argv.option('project-directory') + @project_directory = Pathname.new(project_directory).expand_path + end + config.installation_root = @project_directory + super + end + + def validate! + super + if @project_directory && !@project_directory.directory? + raise Informative, "`#{@project_directory}` is not a valid directory." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/options/repo_update.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/options/repo_update.rb new file mode 100644 index 0000000..c6143a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/options/repo_update.rb @@ -0,0 +1,34 @@ +module Pod + class Command + module Options + # Provides support for commands to skip updating the spec repositories. + # + module RepoUpdate + module Options + def options + [ + ['--no-repo-update', 'Skip running `pod repo update` before install'], + ].concat(super) + end + end + + def self.included(base) + base.extend(Options) + end + + def repo_update?(default: false) + if @repo_update.nil? + default + else + @repo_update + end + end + + def initialize(argv) + @repo_update = argv.flag?('repo-update') + super + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/outdated.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/outdated.rb new file mode 100644 index 0000000..3730862 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/outdated.rb @@ -0,0 +1,151 @@ +module Pod + class Command + class Outdated < Command + include RepoUpdate + include ProjectDirectory + + self.summary = 'Show outdated project dependencies' + + self.description = <<-DESC + Shows the outdated pods in the current Podfile.lock, but only those from + spec repos, not those from local/external sources. + DESC + + def self.options + [ + ['--ignore-prerelease', "Don't consider prerelease versions to be updates"], + ].concat(super) + end + + def initialize(argv) + @ignore_prerelease = argv.flag?('ignore-prerelease') + super + end + + # Run the command + # + def run + if updates.empty? + UI.puts 'No pod updates are available.'.yellow + else + UI.section 'The color indicates what happens when you run `pod update`' do + UI.puts "#{''.green}\t - Will be updated to the newest version" + UI.puts "#{''.blue}\t - Will be updated, but not to the newest version because of specified version in Podfile" + UI.puts "#{''.red}\t - Will not be updated because of specified version in Podfile" + UI.puts '' + end if ansi_output? + UI.section 'The following pod updates are available:' do + updates.each do |(name, from_version, matching_version, to_version)| + color = :blue + if matching_version == to_version + color = :green + elsif from_version == matching_version + color = :red + end + # For the specs, its necessary that to_s is called here even though it is redundant + # https://github.com/CocoaPods/CocoaPods/pull/7204#issuecomment-342512015 + UI.puts "- #{name} #{from_version.to_s.send(color)} -> #{matching_version.to_s.send(color)} " \ + "(latest version #{to_version.to_s})" # rubocop:disable Lint/StringConversionInInterpolation + end + end + end + + if deprecated_pods.any? + UI.section 'The following pods are deprecated:' do + deprecated_pods.each do |spec| + if spec.deprecated_in_favor_of + UI.puts "- #{spec.name}" \ + " (in favor of #{spec.deprecated_in_favor_of})" + else + UI.puts "- #{spec.name}" + end + end + end + end + end + + private + + def analyzer + @analyzer ||= begin + verify_podfile_exists! + Installer::Analyzer.new(config.sandbox, config.podfile, config.lockfile) + end + end + + def updates + @updates ||= begin + ensure_external_podspecs_present! + spec_sets.map do |set| + spec = set.specification + source_version = set.versions.find { |version| !@ignore_prerelease || !version.prerelease? } + pod_name = spec.root.name + lockfile_version = lockfile.version(pod_name) + if source_version > lockfile_version + matching_spec = unlocked_pods.find { |s| s.name == pod_name } + matching_version = + matching_spec ? matching_spec.version : '(unused)' + [pod_name, lockfile_version, matching_version, source_version] + end + end.compact.uniq + end + end + + def unlocked_pods + @unlocked_pods ||= begin + pods = [] + UI.titled_section('Analyzing dependencies') do + pods = Installer::Analyzer.new(config.sandbox, config.podfile). + analyze(:outdated). + specs_by_target.values.flatten.uniq + end + pods + end + end + + def deprecated_pods + @deprecated_pods ||= begin + spec_sets.map(&:specification).select do |spec| + spec.deprecated || spec.deprecated_in_favor_of + end.compact.uniq + end + end + + def spec_sets + @spec_sets ||= begin + analyzer.send(:update_repositories) if repo_update?(:default => true) + aggregate = Source::Aggregate.new(analyzer.sources) + installed_pods.map do |pod_name| + aggregate.search(Dependency.new(pod_name)) + end.compact.uniq + end + end + + def installed_pods + @installed_pods ||= begin + verify_podfile_exists! + + lockfile.pod_names + end + end + + def lockfile + @lockfile ||= begin + verify_lockfile_exists! + config.lockfile + end + end + + def ensure_external_podspecs_present! + return unless config.podfile + config.podfile.dependencies.each do |dep| + next if dep.external_source.nil? + unless config.sandbox.specification(dep.root_name) + raise Informative, 'You must run `pod install` first to ensure that the ' \ + "podspec for `#{dep.root_name}` has been fetched." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo.rb new file mode 100644 index 0000000..89078f3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo.rb @@ -0,0 +1,30 @@ +require 'fileutils' +require 'cocoapods/command/repo/add' +require 'cocoapods/command/repo/add_cdn' +require 'cocoapods/command/repo/lint' +require 'cocoapods/command/repo/list' +require 'cocoapods/command/repo/push' +require 'cocoapods/command/repo/remove' +require 'cocoapods/command/repo/update' + +module Pod + class Command + class Repo < Command + self.abstract_command = true + + # @todo should not show a usage banner! + # + self.summary = 'Manage spec-repositories' + self.default_subcommand = 'list' + + #-----------------------------------------------------------------------# + + extend Executable + executable :git + + def dir + config.repos_dir + @name + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/add.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/add.rb new file mode 100644 index 0000000..5f667a6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/add.rb @@ -0,0 +1,102 @@ +module Pod + class Command + class Repo < Command + class Add < Repo + self.summary = 'Add a spec repo' + + self.description = <<-DESC + Clones `URL` in the local spec-repos directory at `#{Config.instance.repos_dir}`. The + remote can later be referred to by `NAME`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('URL', true), + CLAide::Argument.new('BRANCH', false), + ] + + def self.options + [ + ['--progress', 'Show the progress of cloning the spec repository'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @url = argv.shift_argument + @branch = argv.shift_argument + @progress = argv.flag?('progress') + super + end + + def validate! + super + unless @name && @url + help! 'Adding a repo needs a `NAME` and a `URL`.' + end + if @name == 'trunk' + raise Informative, + "Repo name `trunk` is reserved for CocoaPods' main spec repo accessed via CDN." + end + end + + def run + section = "Cloning spec repo `#{@name}` from `#{@url}`" + section << " (branch `#{@branch}`)" if @branch + UI.section(section) do + create_repos_dir + clone_repo + checkout_branch + config.sources_manager.sources([dir.basename.to_s]).each(&:verify_compatibility!) + end + end + + private + + # Creates the repos directory specified in the configuration by + # `config.repos_dir`. + # + # @return [void] + # + # @raise If the directory cannot be created due to a system error. + # + def create_repos_dir + config.repos_dir.mkpath + rescue => e + raise Informative, "Could not create '#{config.repos_dir}', the CocoaPods repo cache directory.\n" \ + "#{e.class.name}: #{e.message}" + end + + # Clones the git spec-repo according to parameters passed to the + # command. + # + # @return [void] + # + def clone_repo + changes = if @progress + { :verbose => true } + else + {} + end + + config.with_changes(changes) do + Dir.chdir(config.repos_dir) do + command = ['clone', @url] + command << '--progress' if @progress + command << '--' << @name + git!(command) + end + end + end + + # Checks out the branch of the git spec-repo if provided. + # + # @return [void] + # + def checkout_branch + Dir.chdir(dir) { git!('checkout', @branch) } if @branch + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/add_cdn.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/add_cdn.rb new file mode 100644 index 0000000..53d249b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/add_cdn.rb @@ -0,0 +1,58 @@ +module Pod + class Command + class Repo < Command + class AddCDN < Repo + self.summary = 'Add a spec repo backed by a CDN' + + self.description = <<-DESC + Add `URL` to the local spec-repos directory at `#{Config.instance.repos_dir}`. The + remote can later be referred to by `NAME`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('URL', true), + ] + + def initialize(argv) + @name = argv.shift_argument + @url = argv.shift_argument + super + end + + def validate! + super + unless @name && @url + help! 'Adding a repo needs a `NAME` and a `URL`.' + end + if @name == 'master' + raise Informative, + 'To setup the master specs repo, please run `pod setup`.' + end + end + + def run + section = "Adding spec repo `#{@name}` with CDN `#{@url}`" + UI.section(section) do + save_url + config.sources_manager.sources([dir.basename.to_s]).each(&:verify_compatibility!) + end + end + + private + + # Saves the spec-repo URL to a '.url' file. + # + # @return [void] + # + def save_url + dir.mkpath + File.open(dir + '.url', 'w') { |file| file.write(@url) } + rescue => e + raise Informative, "Could not create '#{config.repos_dir}', the CocoaPods repo cache directory.\n" \ + "#{e.class.name}: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/lint.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/lint.rb new file mode 100644 index 0000000..c8763cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/lint.rb @@ -0,0 +1,82 @@ +module Pod + class Command + class Repo < Command + class Lint < Repo + self.summary = 'Validates all specs in a repo' + + self.description = <<-DESC + Lints the spec-repo `NAME`. If a directory is provided it is assumed + to be the root of a repo. Finally, if `NAME` is not provided this + will lint all the spec-repos known to CocoaPods. + DESC + + self.arguments = [ + CLAide::Argument.new(%w(NAME DIRECTORY), false), + ] + + def self.options + [ + ['--only-errors', 'Lint presents only the errors'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @only_errors = argv.flag?('only-errors') + super + end + + # Run the command + # + # @todo Part of this logic needs to be ported to cocoapods-core so web + # services can validate the repo. + # + # @todo add UI.print and enable print statements again. + # + def run + sources = if @name + if File.exist?(@name) + [Source.new(Pathname(@name))] + else + config.sources_manager.sources([@name]) + end + else + config.sources_manager.all + end + + sources.each do |source| + source.verify_compatibility! + UI.puts "\nLinting spec repo `#{source.name}`\n".yellow + + validator = Source::HealthReporter.new(source.repo) + validator.pre_check do |_name, _version| + UI.print '.' + end + report = validator.analyze + UI.puts + UI.puts + + report.pods_by_warning.each do |message, versions_by_name| + UI.puts "-> #{message}".yellow + versions_by_name.each { |name, versions| UI.puts " - #{name} (#{versions * ', '})" } + UI.puts + end + + report.pods_by_error.each do |message, versions_by_name| + UI.puts "-> #{message}".red + versions_by_name.each { |name, versions| UI.puts " - #{name} (#{versions * ', '})" } + UI.puts + end + + UI.puts "Analyzed #{report.analyzed_paths.count} podspecs files.\n\n" + if report.pods_by_error.count.zero? + UI.puts 'All the specs passed validation.'.green << "\n\n" + else + raise Informative, "#{report.pods_by_error.count} podspecs failed validation." + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/list.rb new file mode 100644 index 0000000..26d487e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/list.rb @@ -0,0 +1,94 @@ +module Pod + class Command + class Repo < Command + class List < Repo + self.summary = 'List repos' + + self.description = <<-DESC + List the repos from the local spec-repos directory at `#{Config.instance.repos_dir}`. + DESC + + def self.options + [['--count-only', 'Show the total number of repos']].concat(super) + end + + def initialize(argv) + @count_only = argv.flag?('count-only') + super + end + + # @output Examples: + # + # trunk + # - type: CDN + # - URL: https://cdn.cocoapods.org/ + # - path: /Users/lascorbe/.cocoapods/repos/trunk + # + # test + # - type: local copy + # - URL: file:///Users/lascorbe/.cocoapods/repos/test + # - path: /Users/lascorbe/.cocoapods/repos/test + # + def run + sources = config.sources_manager.all + print_sources(sources) unless @count_only + print_count_of_sources(sources) + end + + private + + # Pretty-prints the source at the given path. + # + # @param [Source] source + # The source repository to be printed. + # + # @return [void] + # + def print_source(source) + if source.is_a?(Pod::CDNSource) + UI.puts '- Type: CDN' + elsif source.git? + branch_name, = Executable.capture_command('git', %w(name-rev --name-only HEAD), :capture => :out, :chdir => source.repo) + branch_name.strip! + branch_name = 'unknown' if branch_name.empty? + UI.puts "- Type: git (#{branch_name})" + else + UI.puts "- Type: #{source.type}" + end + + UI.puts "- URL: #{source.url}" + UI.puts "- Path: #{source.repo}" + end + + # Pretty-prints the given sources. + # + # @param [Array] sources + # The sources that should be printed. + # + # @return [void] + # + def print_sources(sources) + sources.each do |source| + UI.title source.name do + print_source(source) + end + end + UI.puts "\n" + end + + # Pretty-prints the number of sources. + # + # @param [Array] sources + # The sources whose count should be printed. + # + # @return [void] + # + def print_count_of_sources(sources) + number_of_repos = sources.length + repo_string = number_of_repos != 1 ? 'repos' : 'repo' + UI.puts "#{number_of_repos} #{repo_string}".green + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/push.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/push.rb new file mode 100644 index 0000000..cdfe3f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/push.rb @@ -0,0 +1,310 @@ +require 'tempfile' +require 'fileutils' +require 'active_support/core_ext/string/inflections' + +module Pod + class Command + class Repo < Command + class Push < Repo + self.summary = 'Push new specifications to a spec-repo' + + self.description = <<-DESC + Validates `NAME.podspec` or `*.podspec` in the current working dir, + creates a directory and version folder for the pod in the local copy of + `REPO` (#{Config.instance.repos_dir}/[REPO]), copies the podspec file into the + version directory, and finally it pushes `REPO` to its remote. + DESC + + self.arguments = [ + CLAide::Argument.new('REPO', true), + CLAide::Argument.new('NAME.podspec', false), + ] + + def self.options + [ + ['--allow-warnings', 'Allows pushing even if there are warnings'], + ['--use-libraries', 'Linter uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to pull dependent pods ' \ + '(defaults to all available repos). Multiple sources must be comma-delimited'], + ['--local-only', 'Does not perform the step of pushing REPO to its remote'], + ['--no-private', 'Lint includes checks that apply only to public repos'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--commit-message="Fix bug in pod"', 'Add custom commit message. Opens default editor if no commit ' \ + 'message is specified'], + ['--use-json', 'Convert the podspec to JSON before pushing it to the repo'], + ['--swift-version=VERSION', 'The `SWIFT_VERSION` that should be used when linting the spec. ' \ + 'This takes precedence over the Swift versions specified by the spec or a `.swift-version` file'], + ['--no-overwrite', 'Disallow pushing that would overwrite an existing spec'], + ['--update-sources', 'Make sure sources are up-to-date before a push'], + ['--validation-dir', 'The directory to use for validation. If none is specified a temporary directory will be used.'], + ].concat(super) + end + + def initialize(argv) + @allow_warnings = argv.flag?('allow-warnings') + @local_only = argv.flag?('local-only') + @repo = argv.shift_argument + @source = source_for_repo + @source_urls = argv.option('sources', config.sources_manager.all.map(&:url).append(Pod::TrunkSource::TRUNK_REPO_URL).uniq.join(',')).split(',') + @update_sources = argv.flag?('update-sources') + @podspec = argv.shift_argument + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers', false) + @private = argv.flag?('private', true) + @message = argv.option('commit-message') + @commit_message = argv.flag?('commit-message', false) + @use_json = argv.flag?('use-json') + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @allow_overwrite = argv.flag?('overwrite', true) + @validation_dir = argv.option('validation-dir', nil) + super + end + + def validate! + super + help! 'A spec-repo name or url is required.' unless @repo + unless @source && @source.repo.directory? + raise Informative, + "Unable to find the `#{@repo}` repo. " \ + 'If it has not yet been cloned, add it via `pod repo add`.' + end + end + + def run + open_editor if @commit_message && @message.nil? + check_if_push_allowed + update_sources if @update_sources + validate_podspec_files + check_repo_status + update_repo + add_specs_to_repo + push_repo unless @local_only + end + + #---------------------------------------------------------------------# + + private + + # @!group Push sub-steps + + extend Executable + executable :git + + # Open default editor to allow users to enter commit message + # + def open_editor + return if ENV['EDITOR'].nil? + + file = Tempfile.new('cocoapods') + File.chmod(0777, file.path) + file.close + + system("#{ENV['EDITOR']} #{file.path}") + @message = File.read file.path + end + + # Temporary check to ensure that users do not push accidentally private + # specs to the master repo. + # + def check_if_push_allowed + if @source.is_a?(CDNSource) + raise Informative, 'Cannot push to a CDN source, as it is read-only.' + end + + remotes, = Executable.capture_command('git', %w(remote --verbose), :capture => :merge, :chdir => repo_dir) + master_repo_urls = [ + 'git@github.com:CocoaPods/Specs.git', + 'https://github.com/CocoaPods/Specs.git', + ] + is_master_repo = master_repo_urls.any? do |url| + remotes.include?(url) + end + + if is_master_repo + raise Informative, 'To push to the CocoaPods master repo use ' \ + "the `pod trunk push` command.\n\nIf you are using a fork of " \ + 'the master repo for private purposes we recommend to migrate ' \ + 'to a clean private repo. To disable this check remove the ' \ + 'remote pointing to the CocoaPods master repo.' + end + end + + # Performs a full lint against the podspecs. + # + def validate_podspec_files + UI.puts "\nValidating #{'spec'.pluralize(count)}".yellow + podspec_files.each do |podspec| + validator = Validator.new(podspec, @source_urls) + validator.allow_warnings = @allow_warnings + validator.use_frameworks = @use_frameworks + validator.use_modular_headers = @use_modular_headers + validator.ignore_public_only_results = @private + validator.swift_version = @swift_version + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.validation_dir = @validation_dir + begin + validator.validate + rescue => e + raise Informative, "The `#{podspec}` specification does not validate." \ + "\n\n#{e.message}" + end + raise Informative, "The `#{podspec}` specification does not validate." unless validator.validated? + end + end + + # Checks that the repo is clean. + # + # @raise If the repo is not clean. + # + # @todo Add specs for staged and unstaged files. + # + # @todo Gracefully handle the case where source is not under git + # source control. + # + # @return [void] + # + def check_repo_status + porcelain_status, = Executable.capture_command('git', %w(status --porcelain), :capture => :merge, :chdir => repo_dir) + clean = porcelain_status == '' + raise Informative, "The repo `#{@repo}` at #{UI.path repo_dir} is not clean" unless clean + end + + # Updates the git repo against the remote. + # + # @return [void] + # + def update_repo + UI.puts "Updating the `#{@repo}' repo\n".yellow + git!(%W(-C #{repo_dir} pull)) + end + + # Update sources if present + # + # @return [void] + # + def update_sources + return if @source_urls.nil? + @source_urls.each do |source_url| + source = config.sources_manager.source_with_name_or_url(source_url) + dir = source.specs_dir + UI.puts "Updating a source at #{dir} for #{source}" + git!(%W(-C #{dir} pull)) + end + end + + # Commits the podspecs to the source, which should be a git repo. + # + # @note The pre commit hook of the repo is skipped as the podspecs have + # already been linted. + # + # @return [void] + # + def add_specs_to_repo + UI.puts "\nAdding the #{'spec'.pluralize(count)} to the `#{@repo}' repo\n".yellow + podspec_files.each do |spec_file| + spec = Pod::Specification.from_file(spec_file) + output_path = @source.pod_path(spec.name) + spec.version.to_s + message = if @message && !@message.empty? + @message + elsif output_path.exist? + "[Fix] #{spec}" + elsif output_path.dirname.directory? + "[Update] #{spec}" + else + "[Add] #{spec}" + end + + if output_path.exist? && !@allow_overwrite + raise Informative, "#{spec} already exists and overwriting has been disabled." + end + + FileUtils.mkdir_p(output_path) + + if @use_json + json_file_name = "#{spec.name}.podspec.json" + json_file = File.join(output_path, json_file_name) + File.open(json_file, 'w') { |file| file.write(spec.to_pretty_json) } + else + FileUtils.cp(spec_file, output_path) + end + + # only commit if modified + if repo_git('status', '--porcelain').include?(spec.name) + UI.puts " - #{message}" + repo_git('add', spec.name) + repo_git('commit', '--no-verify', '-m', message) + else + UI.puts " - [No change] #{spec}" + end + end + end + + # Pushes the git repo against the remote. + # + # @return [void] + # + def push_repo + UI.puts "\nPushing the `#{@repo}' repo\n".yellow + repo_git('push', 'origin', 'HEAD') + end + + #---------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return result of calling the git! with args in repo_dir + # + def repo_git(*args) + git!(['-C', repo_dir] + args) + end + + # @return [Pathname] The directory of the repository. + # + def repo_dir + @source.specs_dir + end + + # @return [Array] The path of the specifications to push. + # + def podspec_files + if @podspec + path = Pathname(@podspec) + raise Informative, "Couldn't find #{@podspec}" unless path.exist? + [path] + else + files = Pathname.glob('*.podspec{,.json}') + raise Informative, "Couldn't find any podspec files in current directory" if files.empty? + files + end + end + + # @return [Integer] The number of the podspec files to push. + # + def count + podspec_files.count + end + + # Returns source for @repo + # + # @note If URL is invalid or repo doesn't exist, validate! will throw the error + # + # @return [Source] + # + def source_for_repo + config.sources_manager.source_with_name_or_url(@repo) unless @repo.nil? + rescue + nil + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/remove.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/remove.rb new file mode 100644 index 0000000..c8bb550 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/remove.rb @@ -0,0 +1,36 @@ +module Pod + class Command + class Repo < Command + class Remove < Repo + self.summary = 'Remove a spec repo' + + self.description = <<-DESC + Deletes the remote named `NAME` from the local spec-repos directory at `#{Config.instance.repos_dir}`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def validate! + super + help! 'Deleting a repo needs a `NAME`.' unless @name + help! "repo #{@name} does not exist" unless File.directory?(dir) + help! "You do not have permission to delete the #{@name} repository." \ + 'Perhaps try prefixing this command with sudo.' unless File.writable?(dir) + end + + def run + UI.section("Removing spec repo `#{@name}`") do + FileUtils.rm_rf(dir) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/update.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/update.rb new file mode 100644 index 0000000..3a68777 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/repo/update.rb @@ -0,0 +1,39 @@ +module Pod + class Command + class Repo < Command + class Update < Repo + self.summary = 'Update a spec repo' + + self.description = <<-DESC + Updates the local clone of the spec-repo `NAME`. If `NAME` is omitted + this will update all spec-repos in `#{Config.instance.repos_dir}`. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', false), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def run + show_output = !config.silent? + config.sources_manager.update(@name, show_output) + exclude_repos_dir_from_backup + end + + private + + # Excludes the repos directory from backups. + # + # @return [void] + # + def exclude_repos_dir_from_backup + config.exclude_from_backup(config.repos_dir) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/setup.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/setup.rb new file mode 100644 index 0000000..830bf36 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/setup.rb @@ -0,0 +1,18 @@ +require 'fileutils' + +module Pod + class Command + class Setup < Command + self.summary = 'Set up the CocoaPods environment' + + self.description = <<-DESC + Set up the CocoaPods environment + DESC + + def run + # Right now, no setup is needed + UI.puts 'Setup completed'.green + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec.rb new file mode 100644 index 0000000..26d1961 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec.rb @@ -0,0 +1,121 @@ +# encoding: utf-8 + +require 'active_support/core_ext/string/inflections' +require 'cocoapods/command/spec/create' +require 'cocoapods/command/spec/lint' +require 'cocoapods/command/spec/which' +require 'cocoapods/command/spec/cat' +require 'cocoapods/command/spec/edit' + +module Pod + class Command + class Spec < Command + self.abstract_command = true + self.summary = 'Manage pod specs' + + #-----------------------------------------------------------------------# + + # @todo some of the following methods can probably move to one of the + # subclasses. + + private + + # @param [String] query the regular expression string to validate + # + # @raise if the query is not a valid regular expression + # + def validate_regex!(query) + /#{query}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + + # @param [String] spec + # The name of the specification. + # + # @param [Bool,String] version_filter + # - If set to false, will return only the spec path for the latest version (the default). + # - If set to true, will return a list of all paths of all the versions of that spec. + # - If set to a String, will return only the spec path for the version specified by that string. + # + # @return [Pathname] the absolute path or paths of the given podspec + # + def get_path_of_spec(spec, version_filter = false) + sets = config.sources_manager.search_by_name(spec) + + if sets.count == 1 + set = sets.first + elsif sets.map(&:name).include?(spec) + set = sets.find { |s| s.name == spec } + else + names = sets.map(&:name) * ', ' + raise Informative, "More than one spec found for '#{spec}':\n#{names}" + end + + if version_filter.is_a? String + all_paths_from_set(set, version_filter).split(/\n/).first + elsif version_filter == true + all_paths_from_set(set) + else + best_spec, spec_source = spec_and_source_from_set(set) + pathname_from_spec(best_spec, spec_source) + end + end + + # @return [Pathname] the absolute path of the given spec and source + # + def pathname_from_spec(spec, _source) + Pathname(spec.defined_in_file) + end + + # @return [String] of spec paths one on each line + # + def all_paths_from_set(set, specific_version = nil) + paths = '' + + sources = set.sources + + sources.each do |source| + versions = source.versions(set.name) + + if specific_version + versions = versions.select { |v| v.version == specific_version } + end + + versions.each do |version| + spec = source.specification(set.name, version) + paths += "#{pathname_from_spec(spec, source)}\n" + end + end + + raise Informative, "Can't find spec for #{set.name}." if paths.empty? + + paths + end + + # @return [Specification, Source] the highest known specification with it's source of the given + # set. + # + def spec_and_source_from_set(set) + sources = set.sources + + best_source = best_version = nil + sources.each do |source| + versions = source.versions(set.name) + versions.each do |version| + if !best_version || version > best_version + best_source = source + best_version = version + end + end + end + + if !best_source || !best_version + raise Informative, "Unable to locate highest known specification for `#{set.name}'" + end + + [best_source.specification(set.name, best_version), best_source] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/cat.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/cat.rb new file mode 100644 index 0000000..c3a42c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/cat.rb @@ -0,0 +1,53 @@ +module Pod + class Command + class Spec < Command + class Cat < Spec + self.summary = 'Prints a spec file' + + self.description = <<-DESC + Prints the content of the podspec(s) whose name matches `QUERY` to standard output. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', false), + ] + + def self.options + [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--show-all', 'Pick from all versions of the given podspec'], + ['--version', 'Print a specific version of the given podspec'], + ].concat(super) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @show_all = argv.flag?('show-all') + @query = argv.shift_argument + @query = @query.gsub('.podspec', '') unless @query.nil? + @version = argv.option('version') + super + end + + def validate! + super + help! 'A podspec name is required.' unless @query + validate_regex!(@query) if @use_regex + end + + def run + query = @use_regex ? @query : Regexp.escape(@query) + filepath = if @show_all + specs = get_path_of_spec(query, @show_all).split(/\n/) + index = UI.choose_from_array(specs, "Which spec would you like to print [1-#{specs.count}]? ") + specs[index] + else + get_path_of_spec(query, @version) + end + + UI.puts File.read(filepath) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/create.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/create.rb new file mode 100644 index 0000000..35298b9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/create.rb @@ -0,0 +1,283 @@ + +module Pod + class Command + class Spec < Command + class Create < Spec + self.summary = 'Create spec file stub.' + + self.description = <<-DESC + Creates a PodSpec, in the current working dir, called `NAME.podspec'. + If a GitHub url is passed the spec is prepopulated. + DESC + + self.arguments = [ + CLAide::Argument.new(%w(NAME https://github.com/USER/REPO), false), + ] + + def initialize(argv) + @name_or_url = argv.shift_argument + @url = argv.shift_argument + super + end + + def validate! + super + help! 'A pod name or repo URL is required.' unless @name_or_url + end + + def run + if repo_id_match = (@url || @name_or_url).match(%r{github.com/([^/\.]*\/[^/\.]*)\.*}) + repo_id = repo_id_match[1] + data = github_data_for_template(repo_id) + data[:name] = @name_or_url if @url + UI.puts semantic_versioning_notice(repo_id, data[:name]) if data[:version] == '0.0.1' + else + data = default_data_for_template(@name_or_url) + end + + spec = spec_template(data) + (Pathname.pwd + "#{data[:name]}.podspec").open('w') { |f| f << spec } + UI.puts "\nSpecification created at #{data[:name]}.podspec".green + end + + private + + #--------------------------------------# + + # Templates and GitHub information retrieval for spec create + # + # @todo It would be nice to have a template class that accepts options + # and uses the default ones if not provided. + # @todo The template is outdated. + + def default_data_for_template(name) + { + :name => name, + :version => '0.0.1', + :summary => "A short description of #{name}.", + :homepage => "http://EXAMPLE/#{name}", + :author_name => Executable.capture_command('git', %w(config --get user.name), :capture => :out).first.strip, + :author_email => Executable.capture_command('git', %w(config --get user.email), :capture => :out).first.strip, + :source_url => "http://EXAMPLE/#{name}.git", + :ref_type => ':tag', + :ref => '#{spec.version}', + } + end + + def github_data_for_template(repo_id) + repo = GitHub.repo(repo_id) + raise Informative, "Unable to fetch data for `#{repo_id}`" unless repo + user = GitHub.user(repo['owner']['login']) + raise Informative, "Unable to fetch data for `#{repo['owner']['login']}`" unless user + data = {} + + data[:name] = repo['name'] + data[:summary] = (repo['description'] || '').gsub(/["]/, '\"') + data[:homepage] = (repo['homepage'] && !repo['homepage'].empty?) ? repo['homepage'] : repo['html_url'] + data[:author_name] = user['name'] || user['login'] + data[:author_email] = user['email'] || 'email@address.com' + data[:source_url] = repo['clone_url'] + + data.merge suggested_ref_and_version(repo) + end + + def suggested_ref_and_version(repo) + tags = GitHub.tags(repo['html_url']).map { |tag| tag['name'] } + versions_tags = {} + tags.each do |tag| + clean_tag = tag.gsub(/^v(er)? ?/, '') + versions_tags[Gem::Version.new(clean_tag)] = tag if Gem::Version.correct?(clean_tag) + end + version = versions_tags.keys.sort.last || '0.0.1' + data = { :version => version } + if version == '0.0.1' + branches = GitHub.branches(repo['html_url']) + master_name = repo['master_branch'] || 'master' + master = branches.find { |branch| branch['name'] == master_name } + raise Informative, "Unable to find any commits on the master branch for the repository `#{repo['html_url']}`" unless master + data[:ref_type] = ':commit' + data[:ref] = master['commit']['sha'] + else + data[:ref_type] = ':tag' + data[:ref] = versions_tags[version] + data[:ref] = '#{spec.version}' if "#{version}" == versions_tags[version] + data[:ref] = 'v#{spec.version}' if "v#{version}" == versions_tags[version] + end + data + end + + def spec_template(data) + <<-SPEC +# +# Be sure to run `pod spec lint #{data[:name]}.podspec' to ensure this is a +# valid spec and to remove all comments including this before submitting the spec. +# +# To learn more about Podspec attributes see https://guides.cocoapods.org/syntax/podspec.html +# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/ +# + +Pod::Spec.new do |spec| + + # ――― Spec Metadata ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # These will help people to find your library, and whilst it + # can feel like a chore to fill in it's definitely to your advantage. The + # summary should be tweet-length, and the description more in depth. + # + + spec.name = "#{data[:name]}" + spec.version = "#{data[:version]}" + spec.summary = "#{data[:summary]}" + + # This description is used to generate tags and improve search results. + # * Think: What does it do? Why did you write it? What is the focus? + # * Try to keep it short, snappy and to the point. + # * Write the description between the DESC delimiters below. + # * Finally, don't worry about the indent, CocoaPods strips it! + spec.description = <<-DESC + DESC + + spec.homepage = "#{data[:homepage]}" + # spec.screenshots = "www.example.com/screenshots_1.gif", "www.example.com/screenshots_2.gif" + + + # ――― Spec License ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Licensing your code is important. See https://choosealicense.com for more info. + # CocoaPods will detect a license file if there is a named LICENSE* + # Popular ones are 'MIT', 'BSD' and 'Apache License, Version 2.0'. + # + + spec.license = "MIT (example)" + # spec.license = { :type => "MIT", :file => "FILE_LICENSE" } + + + # ――― Author Metadata ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Specify the authors of the library, with email addresses. Email addresses + # of the authors are extracted from the SCM log. E.g. $ git log. CocoaPods also + # accepts just a name if you'd rather not provide an email address. + # + # Specify a social_media_url where others can refer to, for example a twitter + # profile URL. + # + + spec.author = { "#{data[:author_name]}" => "#{data[:author_email]}" } + # Or just: spec.author = "#{data[:author_name]}" + # spec.authors = { "#{data[:author_name]}" => "#{data[:author_email]}" } + # spec.social_media_url = "https://twitter.com/#{data[:author_name]}" + + # ――― Platform Specifics ――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # If this Pod runs only on iOS or OS X, then specify the platform and + # the deployment target. You can optionally include the target after the platform. + # + + # spec.platform = :ios + # spec.platform = :ios, "5.0" + + # When using multiple platforms + # spec.ios.deployment_target = "5.0" + # spec.osx.deployment_target = "10.7" + # spec.watchos.deployment_target = "2.0" + # spec.tvos.deployment_target = "9.0" + + + # ――― Source Location ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Specify the location from where the source should be retrieved. + # Supports git, hg, bzr, svn and HTTP. + # + + spec.source = { :git => "#{data[:source_url]}", #{data[:ref_type]} => "#{data[:ref]}" } + + + # ――― Source Code ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # CocoaPods is smart about how it includes source code. For source files + # giving a folder will include any swift, h, m, mm, c & cpp files. + # For header files it will include any header in the folder. + # Not including the public_header_files will make all headers public. + # + + spec.source_files = "Classes", "Classes/**/*.{h,m}" + spec.exclude_files = "Classes/Exclude" + + # spec.public_header_files = "Classes/**/*.h" + + + # ――― Resources ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # A list of resources included with the Pod. These are copied into the + # target bundle with a build phase script. Anything else will be cleaned. + # You can preserve files from being cleaned, please don't preserve + # non-essential files like tests, examples and documentation. + # + + # spec.resource = "icon.png" + # spec.resources = "Resources/*.png" + + # spec.preserve_paths = "FilesToSave", "MoreFilesToSave" + + + # ――― Project Linking ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # Link your library with frameworks, or libraries. Libraries do not include + # the lib prefix of their name. + # + + # spec.framework = "SomeFramework" + # spec.frameworks = "SomeFramework", "AnotherFramework" + + # spec.library = "iconv" + # spec.libraries = "iconv", "xml2" + + + # ――― Project Settings ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # + # + # If your library depends on compiler flags you can set them in the xcconfig hash + # where they will only apply to your library. If you depend on other Podspecs + # you can include multiple dependencies to ensure it works. + + # spec.requires_arc = true + + # spec.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" } + # spec.dependency "JSONKit", "~> 1.4" + +end + SPEC + end + + def semantic_versioning_notice(repo_id, repo) + <<-EOS + +#{'――― MARKDOWN TEMPLATE ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +I’ve recently added [#{repo}](https://github.com/CocoaPods/Specs/tree/master/#{repo}) to the [CocoaPods](https://github.com/CocoaPods/CocoaPods) package manager repo. + +CocoaPods is a tool for managing dependencies for OSX and iOS Xcode projects and provides a central repository for iOS/OSX libraries. This makes adding libraries to a project and updating them extremely easy and it will help users to resolve dependencies of the libraries they use. + +However, #{repo} doesn't have any version tags. I’ve added the current HEAD as version 0.0.1, but a version tag will make dependency resolution much easier. + +[Semantic version](https://semver.org) tags (instead of plain commit hashes/revisions) allow for [resolution of cross-dependencies](https://github.com/CocoaPods/Specs/wiki/Cross-dependencies-resolution-example). + +In case you didn’t know this yet; you can tag the current HEAD as, for instance, version 1.0.0, like so: + +``` +$ git tag -a 1.0.0 -m "Tag release 1.0.0" +$ git push --tags +``` + +#{'――― TEMPLATE END ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +#{'[!] This repo does not appear to have semantic version tags.'.yellow} + +After commiting the specification, consider opening a ticket with the template displayed above: + - link: https://github.com/#{repo_id}/issues/new + - title: Please add semantic version tags + EOS + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/edit.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/edit.rb new file mode 100644 index 0000000..1261929 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/edit.rb @@ -0,0 +1,87 @@ +module Pod + class Command + class Spec < Command + class Edit < Spec + self.summary = 'Edit a spec file' + + self.description = <<-DESC + Opens the podspec matching `QUERY` to be edited. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', false), + ] + + def self.options + [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--show-all', 'Pick from all versions of the given podspec'], + ].concat(super) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @show_all = argv.flag?('show-all') + @query = argv.shift_argument + @query = @query.gsub('.podspec', '') unless @query.nil? + super + end + + def validate! + super + help! 'A podspec name is required.' unless @query + validate_regex!(@query) if @use_regex + end + + def run + query = @use_regex ? @query : Regexp.escape(@query) + if @show_all + specs = get_path_of_spec(query, @show_all).split(/\n/) + message = "Which spec would you like to edit [1-#{specs.count}]? " + index = UI.choose_from_array(specs, message) + filepath = specs[index] + else + filepath = get_path_of_spec(query) + end + + exec_editor(filepath.to_s) if File.exist? filepath + raise Informative, "#{filepath} doesn't exist." + end + + def which_editor + editor = ENV['EDITOR'] + # If an editor wasn't set, try to pick a sane default + return editor unless editor.nil? + + editors = [ + # Find Sublime Text 2 + 'subl', + # Find Textmate + 'mate', + # Find BBEdit / TextWrangler + 'edit', + # Find Atom + 'atom', + # Default to vim + 'vim', + ] + editor = editors.find { |e| Pod::Executable.which(e) } + return editor if editor + + raise Informative, "Failed to open editor. Set your 'EDITOR' environment variable." + end + + def exec_editor(*args) + return if args.to_s.empty? + safe_exec(which_editor, *args) + end + + def safe_exec(cmd, *args) + # This buys us proper argument quoting and evaluation + # of environment variables in the cmd parameter. + exec('/bin/sh', '-i', '-c', cmd + ' "$@"', '--', *args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/lint.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/lint.rb new file mode 100644 index 0000000..3eaa8c6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/lint.rb @@ -0,0 +1,153 @@ +module Pod + class Command + class Spec < Command + class Lint < Spec + self.summary = 'Validates a spec file' + + self.description = <<-DESC + Validates `NAME.podspec`. If a `DIRECTORY` is provided, it validates + the podspec files found, including subfolders. In case + the argument is omitted, it defaults to the current working dir. + DESC + + self.arguments = [ + CLAide::Argument.new(%w(NAME.podspec DIRECTORY http://PATH/NAME.podspec), false, true), + ] + + def self.options + [ + ['--quick', 'Lint skips checks that would require to download and build the spec'], + ['--allow-warnings', 'Lint validates even if warnings are present'], + ['--subspec=NAME', 'Lint validates only the given subspec'], + ['--no-subspecs', 'Lint skips validation of subspecs'], + ['--no-clean', 'Lint leaves the build directory intact for inspection'], + ['--fail-fast', 'Lint stops on the first failing platform or subspec'], + ['--use-libraries', 'Lint uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--use-static-frameworks', 'Lint uses static frameworks during installation'], + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to pull dependent pods ' \ + "(defaults to #{Pod::TrunkSource::TRUNK_REPO_URL}). Multiple sources must be comma-delimited"], + ['--platforms=ios,macos', 'Lint against specific platforms (defaults to all platforms supported by the ' \ + 'podspec). Multiple platforms must be comma-delimited'], + ['--private', 'Lint skips checks that apply only to public specs'], + ['--swift-version=VERSION', 'The `SWIFT_VERSION` that should be used to lint the spec. ' \ + 'This takes precedence over the Swift versions specified by the spec or a `.swift-version` file'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--test-specs=test-spec1,test-spec2,etc', 'List of test specs to run'], + ['--analyze', 'Validate with the Xcode Static Analysis tool'], + ['--configuration=CONFIGURATION', 'Build using the given configuration (defaults to Release)'], + ['--validation-dir', 'The directory to use for validation. If none is specified a temporary directory will be used.'], + ].concat(super) + end + + def initialize(argv) + @quick = argv.flag?('quick') + @allow_warnings = argv.flag?('allow-warnings') + @clean = argv.flag?('clean', true) + @fail_fast = argv.flag?('fail-fast', false) + @subspecs = argv.flag?('subspecs', true) + @only_subspec = argv.option('subspec') + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @use_static_frameworks = argv.flag?('use-static-frameworks') + @source_urls = argv.option('sources', Pod::TrunkSource::TRUNK_REPO_URL).split(',') + @platforms = argv.option('platforms', '').split(',') + @private = argv.flag?('private', false) + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @test_specs = argv.option('test-specs', nil)&.split(',') + @analyze = argv.flag?('analyze', false) + @podspecs_paths = argv.arguments! + @configuration = argv.option('configuration', nil) + @validation_dir = argv.option('validation-dir', nil) + super + end + + def run + UI.puts + failure_reasons = [] + podspecs_to_lint.each do |podspec| + validator = Validator.new(podspec, @source_urls, @platforms) + validator.quick = @quick + validator.no_clean = !@clean + validator.fail_fast = @fail_fast + validator.allow_warnings = @allow_warnings + validator.no_subspecs = !@subspecs || @only_subspec + validator.only_subspec = @only_subspec + validator.use_frameworks = @use_frameworks + validator.use_modular_headers = @use_modular_headers + validator.use_static_frameworks = @use_static_frameworks + validator.ignore_public_only_results = @private + validator.swift_version = @swift_version + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.test_specs = @test_specs + validator.analyze = @analyze + validator.configuration = @configuration + validator.validation_dir = @validation_dir + validator.validate + failure_reasons << validator.failure_reason + + unless @clean + UI.puts "Pods workspace available at `#{validator.validation_dir}/App.xcworkspace` for inspection." + UI.puts + end + end + + count = podspecs_to_lint.count + UI.puts "Analyzed #{count} #{'podspec'.pluralize(count)}.\n\n" + + failure_reasons.compact! + if failure_reasons.empty? + lint_passed_message = count == 1 ? "#{podspecs_to_lint.first.basename} passed validation." : 'All the specs passed validation.' + UI.puts lint_passed_message.green << "\n\n" + else + raise Informative, if count == 1 + "The spec did not pass validation, due to #{failure_reasons.first}." + else + "#{failure_reasons.count} out of #{count} specs failed validation." + end + end + podspecs_tmp_dir.rmtree if podspecs_tmp_dir.exist? + end + + private + + def podspecs_to_lint + @podspecs_to_lint ||= begin + files = [] + @podspecs_paths << '.' if @podspecs_paths.empty? + @podspecs_paths.each do |path| + if path =~ %r{https?://} + require 'cocoapods/open-uri' + output_path = podspecs_tmp_dir + File.basename(path) + output_path.dirname.mkpath + begin + OpenURI.open_uri(path) do |io| + output_path.open('w') { |f| f << io.read } + end + rescue => e + raise Informative, "Downloading a podspec from `#{path}` failed: #{e}" + end + files << output_path + elsif (pathname = Pathname.new(path)).directory? + files += Pathname.glob(pathname + '*.podspec{.json,}') + raise Informative, 'No specs found in the current directory.' if files.empty? + else + files << (pathname = Pathname.new(path)) + raise Informative, "Unable to find a spec named `#{path}'." unless pathname.exist? && path.include?('.podspec') + end + end + files + end + end + + def podspecs_tmp_dir + Pathname.new(Dir.tmpdir) + 'CocoaPods/Lint_podspec' + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/which.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/which.rb new file mode 100644 index 0000000..d52ef44 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/spec/which.rb @@ -0,0 +1,45 @@ +module Pod + class Command + class Spec < Command + class Which < Spec + self.summary = 'Prints the path of the given spec' + + self.description = <<-DESC + Prints the path of the .podspec file(s) whose name matches `QUERY` + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', false), + ] + + def self.options + [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--show-all', 'Print all versions of the given podspec'], + ['--version', 'Print a specific version of the given podspec'], + ].concat(super) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @show_all = argv.flag?('show-all') + @version = argv.option('version') + @query = argv.shift_argument + @query = @query.gsub('.podspec', '') unless @query.nil? + super + end + + def validate! + super + help! 'A podspec name is required.' unless @query + validate_regex!(@query) if @use_regex + end + + def run + query = @use_regex ? @query : Regexp.escape(@query) + UI.puts get_path_of_spec(query, @show_all || @version) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/update.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/update.rb new file mode 100644 index 0000000..d1d1940 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/command/update.rb @@ -0,0 +1,104 @@ +module Pod + class Command + class Update < Command + include RepoUpdate + include ProjectDirectory + + self.summary = 'Update outdated project dependencies and create new ' \ + 'Podfile.lock' + + self.description = <<-DESC + Updates the Pods identified by the specified `POD_NAMES`, which is a + space-delimited list of pod names. If no `POD_NAMES` are specified, it + updates all the Pods, ignoring the contents of the Podfile.lock. This + command is reserved for the update of dependencies; pod install should + be used to install changes to the Podfile. + DESC + + self.arguments = [ + CLAide::Argument.new('POD_NAMES', false, true), + ] + + def self.options + [ + ["--sources=#{Pod::TrunkSource::TRUNK_REPO_URL}", 'The sources from which to update dependent pods. ' \ + 'Multiple sources must be comma-delimited'], + ['--exclude-pods=podName', 'Pods to exclude during update. Multiple pods must be comma-delimited'], + ['--clean-install', 'Ignore the contents of the project cache and force a full pod installation. This only ' \ + 'applies to projects that have enabled incremental installation'], + ].concat(super) + end + + def initialize(argv) + @pods = argv.arguments! + + @source_urls = argv.option('sources', '').split(',') + @excluded_pods = argv.option('exclude-pods', '').split(',') + @clean_install = argv.flag?('clean-install', false) + @source_pods = @source_urls.flat_map { |url| config.sources_manager.source_with_name_or_url(url).pods } + + super + end + + def run + verify_podfile_exists! + + installer = installer_for_config + installer.repo_update = repo_update?(:default => true) + installer.clean_install = @clean_install + if @pods.any? || @excluded_pods.any? || @source_pods.any? + verify_lockfile_exists! + verify_pods_are_installed! + verify_excluded_pods_are_installed! + + @pods += @source_pods.select { |pod| config.lockfile.pod_names.include?(pod) } + @pods = config.lockfile.pod_names.dup if @pods.empty? + @pods -= @excluded_pods + + installer.update = { :pods => @pods } + else + UI.puts 'Update all pods'.yellow + installer.update = true + end + installer.install! + end + + private + + # Check if all given pods are installed + # + def verify_pods_are_installed! + missing_pods = lockfile_missing_pods(@pods) + + unless missing_pods.empty? + message = if missing_pods.length > 1 + "Pods `#{missing_pods.join('`, `')}` are not " \ + 'installed and cannot be updated' + else + "The `#{missing_pods.first}` Pod is not installed " \ + 'and cannot be updated' + end + raise Informative, message + end + end + + # Check if excluded pods are installed + # + def verify_excluded_pods_are_installed! + missing_pods = lockfile_missing_pods(@excluded_pods) + + unless missing_pods.empty? + pluralized_words = missing_pods.length > 1 ? %w(Pods are) : %w(Pod is) + message = "Trying to skip `#{missing_pods.join('`, `')}` #{pluralized_words.first} " \ + "which #{pluralized_words.last} not installed" + raise Informative, message + end + end + + def lockfile_missing_pods(pods) + lockfile_roots = config.lockfile.pod_names.map { |pod| Specification.root_name(pod) } + pods.map { |pod| Specification.root_name(pod) }.uniq - lockfile_roots + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/config.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/config.rb new file mode 100644 index 0000000..07e9414 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/config.rb @@ -0,0 +1,366 @@ +require 'active_support/multibyte/unicode' + +module Pod + # Stores the global configuration of CocoaPods. + # + class Config + # The default settings for the configuration. + # + # Users can specify custom settings in `~/.cocoapods/config.yaml`. + # An example of the contents of this file might look like: + # + # --- + # skip_repo_update: true + # new_version_message: false + # + DEFAULTS = { + :verbose => false, + :silent => false, + :skip_download_cache => !ENV['COCOAPODS_SKIP_CACHE'].nil?, + + :new_version_message => ENV['COCOAPODS_SKIP_UPDATE_MESSAGE'].nil?, + + :cache_root => Pathname.new(Dir.home) + 'Library/Caches/CocoaPods', + } + + # Applies the given changes to the config for the duration of the given + # block. + # + # @param [Hash<#to_sym,Object>] changes + # the changes to merge temporarily with the current config + # + # @yield [] is called while the changes are applied + # + def with_changes(changes) + old = {} + changes.keys.each do |key| + key = key.to_sym + old[key] = send(key) if respond_to?(key) + end + configure_with(changes) + yield if block_given? + ensure + configure_with(old) + end + + public + + #-------------------------------------------------------------------------# + + # @!group UI + + # @return [Boolean] Whether CocoaPods should provide detailed output about the + # performed actions. + # + attr_accessor :verbose + alias_method :verbose?, :verbose + + # @return [Boolean] Whether CocoaPods should produce not output. + # + attr_accessor :silent + alias_method :silent?, :silent + + # @return [Boolean] Whether CocoaPods is allowed to run as root. + # + attr_accessor :allow_root + alias_method :allow_root?, :allow_root + + # @return [Boolean] Whether a message should be printed when a new version of + # CocoaPods is available. + # + attr_accessor :new_version_message + alias_method :new_version_message?, :new_version_message + + #-------------------------------------------------------------------------# + + # @!group Installation + + # @return [Boolean] Whether the installer should skip the download cache. + # + attr_accessor :skip_download_cache + alias_method :skip_download_cache?, :skip_download_cache + + public + + #-------------------------------------------------------------------------# + + # @!group Cache + + # @return [Pathname] The directory where CocoaPods should cache remote data + # and other expensive to compute information. + # + attr_accessor :cache_root + + def cache_root + @cache_root.mkpath unless @cache_root.exist? + @cache_root + end + + public + + #-------------------------------------------------------------------------# + + # @!group Initialization + + def initialize(use_user_settings = true) + configure_with(DEFAULTS) + + unless ENV['CP_HOME_DIR'].nil? + @cache_root = home_dir + 'cache' + end + + if use_user_settings && user_settings_file.exist? + require 'yaml' + user_settings = YAML.load_file(user_settings_file) + configure_with(user_settings) + end + + unless ENV['CP_CACHE_DIR'].nil? + @cache_root = Pathname.new(ENV['CP_CACHE_DIR']).expand_path + end + end + + def verbose + @verbose && !silent + end + + public + + #-------------------------------------------------------------------------# + + # @!group Paths + + # @return [Pathname] the directory where repos, templates and configuration + # files are stored. + # + def home_dir + @home_dir ||= Pathname.new(ENV['CP_HOME_DIR'] || '~/.cocoapods').expand_path + end + + # @return [Pathname] the directory where the CocoaPods sources are stored. + # + def repos_dir + @repos_dir ||= Pathname.new(ENV['CP_REPOS_DIR'] || (home_dir + 'repos')).expand_path + end + + attr_writer :repos_dir + + # @return [Source::Manager] the source manager for the spec repos in `repos_dir` + # + def sources_manager + return @sources_manager if @sources_manager && @sources_manager.repos_dir == repos_dir + @sources_manager = Source::Manager.new(repos_dir) + end + + # @return [Pathname] the directory where the CocoaPods templates are stored. + # + def templates_dir + @templates_dir ||= Pathname.new(ENV['CP_TEMPLATES_DIR'] || (home_dir + 'templates')).expand_path + end + + # @return [Pathname] the root of the CocoaPods installation where the + # Podfile is located. + # + def installation_root + @installation_root ||= begin + current_dir = Pathname.new(Dir.pwd.unicode_normalize(:nfkc)) + current_path = current_dir + until current_path.root? + if podfile_path_in_dir(current_path) + installation_root = current_path + unless current_path == current_dir + UI.puts("[in #{current_path}]") + end + break + else + current_path = current_path.parent + end + end + installation_root || current_dir + end + end + + attr_writer :installation_root + alias_method :project_root, :installation_root + + # @return [Pathname] The root of the sandbox. + # + def sandbox_root + @sandbox_root ||= installation_root + 'Pods' + end + + attr_writer :sandbox_root + alias_method :project_pods_root, :sandbox_root + + # @return [Sandbox] The sandbox of the current project. + # + def sandbox + @sandbox ||= Sandbox.new(sandbox_root) + end + + # @return [Podfile] The Podfile to use for the current execution. + # @return [Nil] If no Podfile is available. + # + def podfile + @podfile ||= Podfile.from_file(podfile_path) if podfile_path + end + attr_writer :podfile + + # @return [Lockfile] The Lockfile to use for the current execution. + # @return [Nil] If no Lockfile is available. + # + def lockfile + @lockfile ||= Lockfile.from_file(lockfile_path) if lockfile_path + end + + # Returns the path of the Podfile. + # + # @note The Podfile can be named either `CocoaPods.podfile.yaml`, + # `CocoaPods.podfile` or `Podfile`. The first two are preferred as + # they allow to specify an OS X UTI. + # + # @return [Pathname] + # @return [Nil] + # + def podfile_path + @podfile_path ||= podfile_path_in_dir(installation_root) + end + + # Returns the path of the Lockfile. + # + # @note The Lockfile is named `Podfile.lock`. + # + def lockfile_path + @lockfile_path ||= installation_root + 'Podfile.lock' + end + + # Returns the path of the default Podfile pods. + # + # @note The file is expected to be named Podfile.default + # + # @return [Pathname] + # + def default_podfile_path + @default_podfile_path ||= templates_dir + 'Podfile.default' + end + + # Returns the path of the default Podfile test pods. + # + # @note The file is expected to be named Podfile.test + # + # @return [Pathname] + # + def default_test_podfile_path + @default_test_podfile_path ||= templates_dir + 'Podfile.test' + end + + # @return [Pathname] The file to use to cache the search data. + # + def search_index_file + cache_root + 'search_index.json' + end + + private + + #-------------------------------------------------------------------------# + + # @!group Private helpers + + # @return [Pathname] The path of the file which contains the user settings. + # + def user_settings_file + home_dir + 'config.yaml' + end + + # Sets the values of the attributes with the given hash. + # + # @param [Hash{String,Symbol => Object}] values_by_key + # The values of the attributes grouped by key. + # + # @return [void] + # + def configure_with(values_by_key) + return unless values_by_key + values_by_key.each do |key, value| + if key.to_sym == :cache_root + value = Pathname.new(value).expand_path + end + instance_variable_set("@#{key}", value) + end + end + + # @return [Array] The filenames that the Podfile can have ordered + # by priority. + # + PODFILE_NAMES = [ + 'CocoaPods.podfile.yaml', + 'CocoaPods.podfile', + 'Podfile', + 'Podfile.rb', + ].freeze + + public + + # Returns the path of the Podfile in the given dir if any exists. + # + # @param [Pathname] dir + # The directory where to look for the Podfile. + # + # @return [Pathname] The path of the Podfile. + # @return [Nil] If not Podfile was found in the given dir + # + def podfile_path_in_dir(dir) + PODFILE_NAMES.each do |filename| + candidate = dir + filename + if candidate.file? + return candidate + end + end + nil + end + + # Excludes the given dir from Time Machine backups. + # + # @param [Pathname] dir + # The directory to exclude from Time Machine backups. + # + # @return [void] + # + def exclude_from_backup(dir) + return if Gem.win_platform? + system('tmutil', 'addexclusion', dir.to_s, %i(out err) => File::NULL) + end + + public + + #-------------------------------------------------------------------------# + + # @!group Singleton + + # @return [Config] the current config instance creating one if needed. + # + def self.instance + @instance ||= new + end + + # Sets the current config instance. If set to nil the config will be + # recreated when needed. + # + # @param [Config, Nil] the instance. + # + # @return [void] + # + class << self + attr_writer :instance + end + + # Provides support for accessing the configuration instance in other + # scopes. + # + module Mixin + def config + Config.instance + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/core_overrides.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/core_overrides.rb new file mode 100644 index 0000000..3daebdf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/core_overrides.rb @@ -0,0 +1 @@ +require 'cocoapods/sources_manager' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader.rb new file mode 100644 index 0000000..c36619f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader.rb @@ -0,0 +1,192 @@ +require 'cocoapods-downloader' +require 'claide/informative_error' +require 'fileutils' +require 'tmpdir' + +module Pod + module Downloader + require 'cocoapods/downloader/cache' + require 'cocoapods/downloader/request' + require 'cocoapods/downloader/response' + + # Downloads a pod from the given `request` to the given `target` location. + # + # @return [Response] The download response for this download. + # + # @param [Request] request + # the request that describes this pod download. + # + # @param [Pathname,Nil] target + # the location to which this pod should be downloaded. If `nil`, + # then the pod will only be cached. + # + # @param [Boolean] can_cache + # whether caching is allowed. + # + # @param [Pathname,Nil] cache_path + # the path used to cache pod downloads. + # + def self.download( + request, + target, + can_cache: true, + cache_path: Config.instance.cache_root + 'Pods' + ) + can_cache &&= !Config.instance.skip_download_cache + + request = preprocess_request(request) + + if can_cache + raise ArgumentError, 'Must provide a `cache_path` when caching.' unless cache_path + cache = Cache.new(cache_path) + result = cache.download_pod(request) + else + raise ArgumentError, 'Must provide a `target` when caching is disabled.' unless target + + require 'cocoapods/installer/pod_source_preparer' + result, = download_request(request, target) + Installer::PodSourcePreparer.new(result.spec, result.location).prepare! + end + + if target && result.location && target != result.location + UI.message "Copying #{request.name} from `#{result.location}` to #{UI.path target}", '> ' do + Cache.read_lock(result.location) do + FileUtils.rm_rf target + FileUtils.cp_r(result.location, target) + end + end + end + result + end + + # Performs the download from the given `request` to the given `target` location. + # + # @return [Response, Hash] + # The download response for this download, and the specifications + # for this download grouped by name. + # + # @param [Request] request + # the request that describes this pod download. + # + # @param [Pathname,Nil] target + # the location to which this pod should be downloaded. If `nil`, + # then the pod will only be cached. + # + def self.download_request(request, target) + result = Response.new + result.checkout_options = download_source(target, request.params) + result.location = target + + if request.released_pod? + result.spec = request.spec + podspecs = { request.name => request.spec } + else + podspecs = Sandbox::PodspecFinder.new(target).podspecs + podspecs[request.name] = request.spec if request.spec + podspecs.each do |name, spec| + if request.name == name + result.spec = spec + end + end + end + + [result, podspecs] + end + + private + + # Downloads a pod with the given `params` to `target`. + # + # @param [Pathname] target + # + # @param [Hash] params + # + # @return [Hash] The checkout options required to re-download this exact + # same source. + # + def self.download_source(target, params) + FileUtils.rm_rf(target) + downloader = Downloader.for_target(target, params) + downloader.download + target.mkpath + + if downloader.options_specific? + params + else + downloader.checkout_options + end + end + + # Return a new request after preprocessing by the downloader + # + # @param [Request] request + # the request that needs preprocessing + # + # @return [Request] the preprocessed request + # + def self.preprocess_request(request) + Request.new( + :spec => request.spec, + :released => request.released_pod?, + :name => request.name, + :params => Downloader.preprocess_options(request.params)) + end + + public + + class DownloaderError; include CLAide::InformativeError; end + + class Base + override_api do + def execute_command(executable, command, raise_on_failure = false) + Executable.execute_command(executable, command, raise_on_failure) + rescue CLAide::InformativeError => e + raise DownloaderError, e.message + end + + # Indicates that an action will be performed. The action is passed as a + # block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_action(message) + UI.section(" > #{message}", '', 1) do + yield + end + end + + # Indicates that a minor action will be performed. The action is passed + # as a block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_sub_action(message) + UI.section(" > #{message}", '', 2) do + yield + end + end + + # Prints an UI message. + # + # @param [String] message + # The message associated with the action. + # + # @return [void] + # + def ui_message(message) + UI.puts message + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/cache.rb new file mode 100644 index 0000000..5d990e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/cache.rb @@ -0,0 +1,322 @@ +require 'fileutils' +require 'tmpdir' + +module Pod + module Downloader + # The class responsible for managing Pod downloads, transparently caching + # them in a cache directory. + # + class Cache + # @return [Pathname] The root directory where this cache store its + # downloads. + # + attr_reader :root + + # Initialize a new instance + # + # @param [Pathname,String] root + # see {#root} + # + def initialize(root) + @root = Pathname(root) + ensure_matching_version + end + + # Downloads the Pod from the given `request` + # + # @param [Request] request + # the request to be downloaded. + # + # @return [Response] the response from downloading `request` + # + def download_pod(request) + cached_pod(request) || uncached_pod(request) + rescue Informative + raise + rescue + UI.puts("\n[!] Error installing #{request.name}".red) + raise + end + + # @return [Hash>] + # A hash whose keys are the pod name + # And values are a hash with the following keys: + # :spec_file : path to the spec file + # :name : name of the pod + # :version : pod version + # :release : boolean to tell if that's a release pod + # :slug : the slug path where the pod cache is located + # + def cache_descriptors_per_pod + specs_dir = root + 'Specs' + release_specs_dir = specs_dir + 'Release' + return {} unless specs_dir.exist? + + spec_paths = specs_dir.find.select { |f| f.fnmatch('*.podspec.json') } + spec_paths.reduce({}) do |hash, spec_path| + spec = Specification.from_file(spec_path) + hash[spec.name] ||= [] + is_release = spec_path.to_s.start_with?(release_specs_dir.to_s) + request = Downloader::Request.new(:spec => spec, :released => is_release) + hash[spec.name] << { + :spec_file => spec_path, + :name => spec.name, + :version => spec.version, + :release => is_release, + :slug => root + request.slug, + } + hash + end + end + + # Convenience method for acquiring a shared lock to safely read from the + # cache. See `Cache.lock` for more details. + # + # @param [Pathname] location + # the path to require a lock for. + # + # @param [block] &block + # the block to execute inside the lock. + # + # @return [void] + # + def self.read_lock(location, &block) + Cache.lock(location, File::LOCK_SH, &block) + end + + # Convenience method for acquiring an exclusive lock to safely write to + # the cache. See `Cache.lock` for more details. + # + # @param [Pathname] location + # the path to require a lock for. + # + # @param [block] &block + # the block to execute inside the lock. + # + # @return [void] + # + def self.write_lock(location, &block) + Cache.lock(location, File::LOCK_EX, &block) + end + + # Creates a .lock file at `location`, aquires a lock of type + # `lock_type`, checks that it is valid, and executes passed block while + # holding on to that lock. Afterwards, the .lock file is deleted, which is + # why validation of the lock is necessary, as you might have a lock on a + # file that doesn't exist on the filesystem anymore. + # + # @param [Pathname] location + # the path to require a lock for. + # + # @param [locking_constant] lock_type + # the type of lock, either exclusive (File::LOCK_EX) or shared + # (File::LOCK_SH). + # + # @return [void] + # + def self.lock(location, lock_type) + raise ArgumentError, 'no block given' unless block_given? + lockfile = "#{location}.lock" + f = nil + loop do + f.close if f + f = File.open(lockfile, File::CREAT, 0o644) + f.flock(lock_type) + break if Cache.valid_lock?(f, lockfile) + end + begin + yield location + ensure + if lock_type == File::LOCK_SH + f.flock(File::LOCK_EX) + File.delete(lockfile) if Cache.valid_lock?(f, lockfile) + else + File.delete(lockfile) + end + f.close + end + end + + # Checks that the lock is on a file that still exists on the filesystem. + # + # @param [File] file + # the actual file that we have a lock for. + # + # @param [String] filename + # the filename of the file that we have a lock for. + # + # @return [Boolean] + # true if `filename` still exists and is the same file as `file` + # + def self.valid_lock?(file, filename) + file.stat.ino == File.stat(filename).ino + rescue Errno::ENOENT + false + end + + private + + # Ensures the cache on disk was created with the same CocoaPods version as + # is currently running. + # + # @return [Void] + # + def ensure_matching_version + version_file = root + 'VERSION' + version = version_file.read.strip if version_file.file? + + root.rmtree if version != Pod::VERSION && root.exist? + root.mkpath + + version_file.open('w') { |f| f << Pod::VERSION } + end + + # @param [Request] request + # the request to be downloaded. + # + # @param [Hash] slug_opts + # the download options that should be used in constructing the + # cache slug for this request. + # + # @return [Pathname] The path for the Pod downloaded from the given + # `request`. + # + def path_for_pod(request, slug_opts = {}) + root + request.slug(**slug_opts) + end + + # @param [Request] request + # the request to be downloaded. + # + # @param [Hash] slug_opts + # the download options that should be used in constructing the + # cache slug for this request. + # + # @return [Pathname] The path for the podspec downloaded from the given + # `request`. + # + def path_for_spec(request, slug_opts = {}) + path = root + 'Specs' + request.slug(**slug_opts) + Pathname.new(path.to_path + '.podspec.json') + end + + # @param [Request] request + # the request to be downloaded. + # + # @return [Response] The download response for the given `request` that + # was found in the download cache. + # + def cached_pod(request) + cached_spec = cached_spec(request) + path = path_for_pod(request) + + return unless cached_spec && path.directory? + spec = request.spec || cached_spec + Response.new(path, spec, request.params) + end + + # @param [Request] request + # the request to be downloaded. + # + # @return [Specification] The cached specification for the given + # `request`. + # + def cached_spec(request) + path = path_for_spec(request) + path.file? && Specification.from_file(path) + rescue JSON::ParserError + nil + end + + # @param [Request] request + # the request to be downloaded. + # + # @return [Response] The download response for the given `request` that + # was not found in the download cache. + # + def uncached_pod(request) + in_tmpdir do |target| + result, podspecs = download(request, target) + result.location = nil + + podspecs.each do |name, spec| + destination = path_for_pod(request, :name => name, :params => result.checkout_options) + copy_and_clean(target, destination, spec) + write_spec(spec, path_for_spec(request, :name => name, :params => result.checkout_options)) + if request.name == name + result.location = destination + end + end + + result + end + end + + def download(request, target) + Downloader.download_request(request, target) + end + + # Performs the given block inside a temporary directory, + # which is removed at the end of the block's scope. + # + # @return [Object] The return value of the given block + # + def in_tmpdir(&blk) + tmpdir = Pathname(Dir.mktmpdir) + blk.call(tmpdir) + ensure + FileUtils.remove_entry(tmpdir, :force => true) if tmpdir && tmpdir.exist? + end + + # Copies the `source` directory to `destination`, cleaning the directory + # of any files unused by `spec`. + # + # @param [Pathname] source + # + # @param [Pathname] destination + # + # @param [Specification] spec + # + # @return [Void] + # + def copy_and_clean(source, destination, spec) + specs_by_platform = group_subspecs_by_platform(spec) + destination.parent.mkpath + Cache.write_lock(destination) do + FileUtils.rm_rf(destination) + FileUtils.cp_r(source, destination) + Pod::Installer::PodSourcePreparer.new(spec, destination).prepare! + Sandbox::PodDirCleaner.new(destination, specs_by_platform).clean! + end + end + + def group_subspecs_by_platform(spec) + specs_by_platform = {} + [spec, *spec.recursive_subspecs].each do |ss| + ss.available_platforms.each do |platform| + specs_by_platform[platform] ||= [] + specs_by_platform[platform] << ss + end + end + specs_by_platform + end + + # Writes the given `spec` to the given `path`. + # + # @param [Specification] spec + # the specification to be written. + # + # @param [Pathname] path + # the path the specification is to be written to. + # + # @return [Void] + # + def write_spec(spec, path) + path.dirname.mkpath + Cache.write_lock(path) do + path.open('w') { |f| f.write spec.to_pretty_json } + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/request.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/request.rb new file mode 100644 index 0000000..10229aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/request.rb @@ -0,0 +1,86 @@ +require 'digest' + +module Pod + module Downloader + # This class represents a download request for a given Pod. + # + class Request + # @return [Specification,Nil] The specification for the pod whose download + # is being requested. + # + attr_reader :spec + + # @return [Boolean] Whether this download request is for a released pod. + # + attr_reader :released_pod + alias_method :released_pod?, :released_pod + + # @return [String] The name of the pod whose dowload is being requested. + # + attr_reader :name + + # @return [Hash] The download parameters for this request. + # + attr_reader :params + + # Initialize a new instance + # + # @param [Specification,Nil] spec + # see {#spec} + # + # @param [Boolean] released + # see {#released_pod} + # + # @param [String,Nil] name + # see {#name} + # + # @param [Hash,Nil] params + # see {#params} + # + def initialize(spec: nil, released: false, name: nil, params: false) + @released_pod = released + @spec = spec + @params = spec ? (spec.source && spec.source.dup) : params + @name = spec ? spec.name : name + + validate! + end + + # @param [String] name + # the name of the pod being downloaded. + # + # @param [Hash<#to_s, #to_s>] params + # the download parameters of the pod being downloaded. + # + # @param [Specification] spec + # the specification of the pod being downloaded. + # + # @return [String] The slug used to store the files resulting from this + # download request. + # + def slug(name: self.name, params: self.params, spec: self.spec) + checksum = spec && spec.checksum && '-' << spec.checksum[0, 5] + if released_pod? + "Release/#{name}/#{spec.version}#{checksum}" + else + opts = params.to_a.sort_by(&:first).map { |k, v| "#{k}=#{v}" }.join('-') + digest = Digest::MD5.hexdigest(opts) + "External/#{name}/#{digest}#{checksum}" + end + end + + private + + # Validates that the given request is well-formed. + # + # @return [Void] + # + def validate! + raise ArgumentError, 'Requires a name' unless name + raise ArgumentError, 'Must give a spec for a released download request' if released_pod? && !spec + raise ArgumentError, 'Requires a version if released' if released_pod? && !spec.version + raise ArgumentError, 'Requires params' unless params + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/response.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/response.rb new file mode 100644 index 0000000..9319b8a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/downloader/response.rb @@ -0,0 +1,16 @@ +module Pod + module Downloader + # A response to a download request. + # + # @attr [Pathname] location + # the location where this downloaded pod is stored on disk. + # + # @attr [Specification] spec + # the specification that describes this downloaded pod. + # + # @attr [Hash] checkout_options + # the downloader parameters necessary to recreate this exact download. + # + Response = Struct.new(:location, :spec, :checkout_options) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/executable.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/executable.rb new file mode 100644 index 0000000..4094a84 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/executable.rb @@ -0,0 +1,247 @@ +module Pod + # Module which provides support for running executables. + # + # In a class it can be used as: + # + # extend Executable + # executable :git + # + # This will create two methods `git` and `git!` both accept a command but + # the later will raise on non successful executions. The methods return the + # output of the command. + # + module Executable + # Creates the methods for the executable with the given name. + # + # @param [Symbol] name + # the name of the executable. + # + # @return [void] + # + def executable(name) + define_method(name) do |*command| + Executable.execute_command(name, Array(command).flatten, false) + end + + define_method(name.to_s + '!') do |*command| + Executable.execute_command(name, Array(command).flatten, true) + end + end + + # Executes the given command displaying it if in verbose mode. + # + # @param [String] executable + # The binary to use. + # + # @param [Array<#to_s>] command + # The command to send to the binary. + # + # @param [Boolean] raise_on_failure + # Whether it should raise if the command fails. + # + # @raise If the executable could not be located. + # + # @raise If the command fails and the `raise_on_failure` is set to true. + # + # @return [String] the output of the command (STDOUT and STDERR). + # + def self.execute_command(executable, command, raise_on_failure = true) + bin = which!(executable) + + command = command.map(&:to_s) + if File.basename(bin) == 'tar.exe' + # Tar on Windows needs --force-local + command.push('--force-local') + end + full_command = "#{bin} #{command.join(' ')}" + + if Config.instance.verbose? + UI.message("$ #{full_command}") + stdout = Indenter.new(STDOUT) + stderr = Indenter.new(STDERR) + else + stdout = Indenter.new + stderr = Indenter.new + end + + status = popen3(bin, command, stdout, stderr) + stdout = stdout.join + stderr = stderr.join + output = stdout + stderr + unless status.success? + if raise_on_failure + raise Informative, "#{full_command}\n\n#{output}" + else + UI.message("[!] Failed: #{full_command}".red) + end + end + + output + end + + # Returns the absolute path to the binary with the given name on the current + # `PATH`, or `nil` if none is found. + # + # @param [String] program + # The name of the program being searched for. + # + # @return [String,Nil] The absolute path to the given program, or `nil` if + # it wasn't found in the current `PATH`. + # + def self.which(program) + program = program.to_s + paths = ENV.fetch('PATH') { '' }.split(File::PATH_SEPARATOR) + paths.unshift('./') + paths.uniq! + paths.each do |path| + bin = File.expand_path(program, path) + if Gem.win_platform? + bin += '.exe' + end + if File.file?(bin) && File.executable?(bin) + return bin + end + end + nil + end + + # Returns the absolute path to the binary with the given name on the current + # `PATH`, or raises if none is found. + # + # @param [String] program + # The name of the program being searched for. + # + # @return [String] The absolute path to the given program. + # + def self.which!(program) + which(program).tap do |bin| + raise Informative, "Unable to locate the executable `#{program}`" unless bin + end + end + + # Runs the given command, capturing the desired output. + # + # @param [String] executable + # The binary to use. + # + # @param [Array<#to_s>] command + # The command to send to the binary. + # + # @param [Symbol] capture + # Whether it should raise if the command fails. + # + # @param [Hash] env + # Environment variables to be set for the command. + # + # @raise If the executable could not be located. + # + # @return [(String, Process::Status)] + # The desired captured output from the command, and the status from + # running the command. + # + def self.capture_command(executable, command, capture: :merge, env: {}, **kwargs) + bin = which!(executable) + + require 'open3' + command = command.map(&:to_s) + case capture + when :merge then Open3.capture2e(env, [bin, bin], *command, **kwargs) + when :both then Open3.capture3(env, [bin, bin], *command, **kwargs) + when :out then Open3.capture3(env, [bin, bin], *command, **kwargs).values_at(0, -1) + when :err then Open3.capture3(env, [bin, bin], *command, **kwargs).drop(1) + when :none then Open3.capture3(env, [bin, bin], *command, **kwargs).last + end + end + + # (see Executable.capture_command) + # + # @raise If running the command fails + # + def self.capture_command!(executable, command, **kwargs) + capture_command(executable, command, **kwargs).tap do |result| + result = Array(result) + status = result.last + unless status.success? + output = result[0..-2].join + raise Informative, "#{executable} #{command.join(' ')}\n\n#{output}".strip + end + end + end + + private + + def self.popen3(bin, command, stdout, stderr) + require 'open3' + Open3.popen3(bin, *command) do |i, o, e, t| + reader(o, stdout) + reader(e, stderr) + i.close + + status = t.value + + o.flush + e.flush + sleep(0.01) + + status + end + end + + def self.reader(input, output) + Thread.new do + buf = '' + begin + loop do + buf << input.readpartial(4096) + loop do + string, separator, buf = buf.partition(/[\r\n]/) + if separator.empty? + buf = string + break + end + output << (string << separator) + end + end + rescue EOFError, IOError + output << (buf << $/) unless buf.empty? + end + end + end + + #-------------------------------------------------------------------------# + + # Helper class that allows to write to an {IO} instance taking into account + # the UI indentation level. + # + class Indenter < ::Array + # @return [Fixnum] The indentation level of the UI. + # + attr_reader :indent + + # @return [IO] the {IO} to which the output should be printed. + # + attr_reader :io + + # Init a new Indenter + # + # @param [IO] io @see io + # + def initialize(io = nil) + @io = io + @indent = ' ' * UI.indentation_level + end + + # Stores a portion of the output and prints it to the {IO} instance. + # + # @param [String] value + # the output to print. + # + # @return [void] + # + def <<(value) + super + io << "#{indent}#{value}" if io + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources.rb new file mode 100644 index 0000000..389c139 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources.rb @@ -0,0 +1,57 @@ +require 'cocoapods/external_sources/abstract_external_source' +require 'cocoapods/external_sources/downloader_source' +require 'cocoapods/external_sources/path_source' +require 'cocoapods/external_sources/podspec_source' + +module Pod + # Provides support for initializing the correct concrete class of an external + # source. + # + module ExternalSources + # Instantiate a matching {AbstractExternalSource} for a given dependency. + # + # @param [Dependency] dependency + # the dependency + # + # @param [String] podfile_path + # @see AbstractExternalSource#podfile_path + # + # @param [Boolean] can_cache + # @see AbstractExternalSource#can_cache + # + # @return [AbstractExternalSource] an initialized instance of the concrete + # external source class associated with the option specified in the + # hash. + # + def self.from_dependency(dependency, podfile_path, can_cache) + from_params(dependency.external_source, dependency, podfile_path, can_cache) + end + + def self.from_params(params, dependency, podfile_path, can_cache) + name = dependency.root_name + if klass = concrete_class_from_params(params) + klass.new(name, params, podfile_path, can_cache) + else + msg = "Unknown external source parameters for `#{name}`: `#{params}`" + raise Informative, msg + end + end + + # Get the class to represent the defined source type of a dependency + # + # @param [Array] params + # the source params of the dependency + # + # @return [Class] + # + def self.concrete_class_from_params(params) + if params.key?(:podspec) + PodspecSource + elsif params.key?(:path) + PathSource + elsif Downloader.strategy_from_options(params) + DownloaderSource + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/abstract_external_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/abstract_external_source.rb new file mode 100644 index 0000000..80243d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/abstract_external_source.rb @@ -0,0 +1,205 @@ +module Pod + module ExternalSources + # Abstract class that defines the common behaviour of external sources. + # + class AbstractExternalSource + # @return [String] the name of the Pod described by this external source. + # + attr_reader :name + + # @return [Hash{Symbol => String}] the hash representation of the + # external source. + # + attr_reader :params + + # @return [String] the path where the podfile is defined to resolve + # relative paths. + # + attr_reader :podfile_path + + # @return [Boolean] Whether the source is allowed to touch the cache. + # + attr_reader :can_cache + alias_method :can_cache?, :can_cache + + # Initialize a new instance + # + # @param [String] name @see #name + # @param [Hash] params @see #params + # @param [String] podfile_path @see #podfile_path + # @param [Boolean] can_cache @see #can_cache + # + def initialize(name, params, podfile_path, can_cache = true) + @name = name + @params = params + @podfile_path = podfile_path + @can_cache = can_cache + end + + # @return [Boolean] whether an external source source is equal to another + # according to the {#name} and to the {#params}. + # + def ==(other) + return false if other.nil? + name == other.name && params == other.params + end + + public + + # @!group Subclasses hooks + + # Fetches the external source from the remote according to the params. + # + # @param [Sandbox] _sandbox + # the sandbox where the specification should be stored. + # + # @return [void] + # + def fetch(_sandbox) + raise 'Abstract method' + end + + # @return [String] a string representation of the source suitable for UI. + # + def description + raise 'Abstract method' + end + + protected + + # Return the normalized path for a podspec for a relative declared path. + # + # @param [String] declared_path + # The path declared in the podfile. + # + # @return [String] The uri of the podspec appending the name of the file + # and expanding it if necessary. + # + # @note If the declared path is expanded only if the represents a path + # relative to the file system. + # + def normalized_podspec_path(declared_path) + extension = File.extname(declared_path) + if extension == '.podspec' || extension == '.json' + path_with_ext = declared_path + else + path_with_ext = "#{declared_path}/#{name}.podspec" + end + podfile_dir = File.dirname(podfile_path || '') + File.expand_path(path_with_ext, podfile_dir) + end + + private + + # @! Subclasses helpers + + # Pre-downloads a Pod passing the options to the downloader and informing + # the sandbox. + # + # @param [Sandbox] sandbox + # The sandbox where the Pod should be downloaded. + # + # @note To prevent a double download of the repository the pod is + # marked as pre-downloaded indicating to the installer that only + # clean operations are needed. + # + # @todo The downloader configuration is the same of the + # #{PodSourceInstaller} and it needs to be kept in sync. + # + # @return [void] + # + def pre_download(sandbox) + title = "Pre-downloading: `#{name}` #{description}" + UI.titled_section(title, :verbose_prefix => '-> ') do + target = sandbox.pod_dir(name) + begin + download_result = Downloader.download(download_request, target, :can_cache => can_cache) + rescue Pod::DSLError => e + raise Informative, "Failed to load '#{name}' podspec: #{e.message}" + rescue => e + raise Informative, "Failed to download '#{name}': #{e.message}" + end + + spec = download_result.spec + raise Informative, "Unable to find a specification for '#{name}'." unless spec + + # since the podspec might be cleaned, we want the checksum to refer + # to the json in the sandbox + spec.defined_in_file = nil + + store_podspec(sandbox, spec) + sandbox.store_pre_downloaded_pod(name) + sandbox.store_checkout_source(name, download_result.checkout_options) + end + end + + def download_request + Downloader::Request.new( + :name => name, + :params => params, + ) + end + + # Stores the podspec in the sandbox and marks it as from an external + # source. + # + # @param [Sandbox] sandbox + # The sandbox where the specification should be stored. + # + # @param [Pathname, String, Specification] spec + # The path of the specification or its contents. + # + # @note All the concrete implementations of #{fetch} should invoke this + # method. + # + # @note The sandbox ensures that the podspec exists and that the names + # match. + # + # @return [void] + # + def store_podspec(sandbox, spec, json = false) + begin + spec = case spec + when Pathname + Specification.from_file(spec) + when String + path = "#{name}.podspec" + path << '.json' if json + Specification.from_string(spec, path).tap { |s| s.defined_in_file = nil } + when Specification + spec.dup + else + raise "Unknown spec type: #{spec}" + end + rescue Pod::DSLError => e + raise Informative, "Failed to load '#{name}' podspec: #{e.message}" + end + + validate_podspec(spec) + sandbox.store_podspec(name, spec, true, true) + end + + def validate_podspec(podspec) + defined_in_file = podspec.defined_in_file + podspec.defined_in_file = nil + + validator = validator_for_podspec(podspec) + validator.quick = true + validator.allow_warnings = true + validator.ignore_public_only_results = true + Config.instance.with_changes(:silent => true) do + validator.validate + end + unless validator.validated? + raise Informative, "The `#{name}` pod failed to validate due to #{validator.failure_reason}:\n#{validator.results_message}" + end + ensure + podspec.defined_in_file = defined_in_file + end + + def validator_for_podspec(podspec) + Validator.new(podspec, [], []) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/downloader_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/downloader_source.rb new file mode 100644 index 0000000..0d26b62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/downloader_source.rb @@ -0,0 +1,30 @@ +module Pod + module ExternalSources + # Provides support for fetching a specification file from a source handled + # by the downloader. Supports all the options of the downloader + # + # @note The podspec must be in the root of the repository and should have a + # name matching the one of the dependency. + # + class DownloaderSource < AbstractExternalSource + # @see AbstractExternalSource#fetch + # + def fetch(sandbox) + pre_download(sandbox) + end + + # @see AbstractExternalSource#description + # + def description + strategy = Downloader.strategy_from_options(params) + options = params.dup + url = options.delete(strategy) + result = "from `#{url}`" + options.each do |key, value| + result << ", #{key} `#{value}`" + end + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/path_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/path_source.rb new file mode 100644 index 0000000..991547d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/path_source.rb @@ -0,0 +1,55 @@ +module Pod + module ExternalSources + # Provides support for fetching a specification file from a path local to + # the machine running the installation. + # + class PathSource < AbstractExternalSource + # @see AbstractExternalSource#fetch + # + def fetch(sandbox) + title = "Fetching podspec for `#{name}` #{description}" + UI.section(title, '-> ') do + podspec = podspec_path + unless podspec.exist? + raise Informative, "No podspec found for `#{name}` in " \ + "`#{declared_path}`" + end + store_podspec(sandbox, podspec, podspec.extname == '.json') + is_absolute = absolute?(declared_path) + sandbox.store_local_path(name, podspec, is_absolute) + sandbox.remove_checkout_source(name) + end + end + + # @see AbstractExternalSource#description + # + def description + "from `#{declared_path}`" + end + + private + + # @!group Helpers + + # @return [String] The path as declared by the user. + # + def declared_path + result = params[:path] + result.to_s if result + end + + # @return [Pathname] The absolute path of the podspec. + # + def podspec_path + path = Pathname(normalized_podspec_path(declared_path)) + path.exist? ? path : Pathname("#{path}.json") + end + + # @return [Boolean] + # + def absolute?(path) + Pathname(path).absolute? || path.to_s.start_with?('~') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/podspec_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/podspec_source.rb new file mode 100644 index 0000000..02b11a3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/external_sources/podspec_source.rb @@ -0,0 +1,54 @@ +module Pod + module ExternalSources + # Provides support for fetching a specification file from an URL. Can be + # http, file, etc. + # + class PodspecSource < AbstractExternalSource + # @see AbstractExternalSource#fetch + # + def fetch(sandbox) + title = "Fetching podspec for `#{name}` #{description}" + UI.titled_section(title, :verbose_prefix => '-> ') do + podspec_path = Pathname(podspec_uri) + is_json = podspec_path.extname == '.json' + if podspec_path.exist? + store_podspec(sandbox, podspec_path, is_json) + else + require 'cocoapods/open-uri' + begin + OpenURI.open_uri(podspec_uri) { |io| store_podspec(sandbox, io.read, is_json) } + rescue OpenURI::HTTPError => e + status = e.io.status.join(' ') + raise Informative, "Failed to fetch podspec for `#{name}` at `#{podspec_uri}`.\n Error: #{status}" + end + end + end + end + + # @see AbstractExternalSource#description + # + def description + "from `#{params[:podspec]}`" + end + + private + + # @!group Helpers + + # @return [String] The uri of the podspec appending the name of the file + # and expanding it if necessary. + # + # @note If the declared path is expanded only if the represents a path + # relative to the file system. + # + def podspec_uri + declared_path = params[:podspec].to_s + if declared_path =~ %r{^.+://} + declared_path + else + normalized_podspec_path(declared_path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/gem_version.rb new file mode 100644 index 0000000..eba7c9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/gem_version.rb @@ -0,0 +1,5 @@ +module Pod + # The version of the CocoaPods command line tool. + # + VERSION = '1.12.1'.freeze unless defined? Pod::VERSION +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements.rb new file mode 100644 index 0000000..b5a8991 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements.rb @@ -0,0 +1,119 @@ +module Pod + module Generator + class Acknowledgements + # @return [Array] The classes of the acknowledgements generator + # subclasses. + # + def self.generators + [Plist, Markdown] + end + + # @return [Array] the list of the file accessors + # for the specs of the target that needs to generate the + # acknowledgements. + # + attr_reader :file_accessors + + # @param [Array] @see file_accessors. + # + def initialize(file_accessors) + @file_accessors = file_accessors + end + + #-----------------------------------------------------------------------# + + # !@group Configuration + + # @return [String] The title of the acknowledgements file. + # + def header_title + 'Acknowledgements' + end + + # @return [String] A text to present before listing the acknowledgements. + # + def header_text + 'This application makes use of the following third party libraries:' + end + + # @return [String] The title of the foot notes. + # + def footnote_title + '' + end + + # @return [String] the foot notes. + # + def footnote_text + 'Generated by CocoaPods - https://cocoapods.org' + end + + #-----------------------------------------------------------------------# + + private + + # !@group Private methods + + # @return [Array] The root specifications for which the + # acknowledgements should be generated. + # + def specs + file_accessors.map { |accessor| accessor.spec.root }.uniq + end + + # Returns the text of the license for the given spec. + # + # @param [Specification] spec + # the specification for which license is needed. + # + # @return [String] The text of the license. + # @return [Nil] If no license text could be found. + # + def license_text(spec) + return nil unless spec.license + text = spec.license[:text] + unless text + if license_file = spec.license[:file] + license_path = file_accessor(spec).root + license_file + if File.exist?(license_path) + text = IO.read(license_path) + else + UI.warn "Unable to read the license file `#{license_file}` " \ + "for the spec `#{spec}`" + end + elsif license_file = file_accessor(spec).license + text = IO.read(license_file) + end + text = format_license(text) + end + text + end + + # Convenience method for users to format licenses + # + # @param [String] text + # Unformatted license text + # + # @return [String] Formatted license text + # + def format_license(text) + text + end + + protected + + # Returns the file accessor for the given spec. + # + # @param [Specification] spec + # the specification for which the file accessor is needed. + # + # @return [Sandbox::FileAccessor] The file accessor. + # + def file_accessor(spec) + file_accessors.find { |accessor| accessor.spec.root == spec } + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements/markdown.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements/markdown.rb new file mode 100644 index 0000000..6d7d63b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements/markdown.rb @@ -0,0 +1,44 @@ +module Pod + module Generator + class Markdown < Acknowledgements + def self.path_from_basepath(path) + Pathname.new(path.dirname + "#{path.basename}.markdown") + end + + def save_as(path) + file = File.new(path, 'w') + file.write(licenses) + file.close + end + + # @return [String] The contents of the acknowledgements in Markdown format. + # + def generate + licenses + end + + def title_from_string(string, level) + unless string.empty? + '#' * level << " #{string}" + end + end + + def string_for_spec(spec) + if (license_text = license_text(spec)) + "\n" << title_from_string(spec.name, 2) << "\n\n" << license_text << "\n" + end + end + + def licenses + licenses_string = "#{title_from_string(header_title, 1)}\n#{header_text}\n" + specs.each do |spec| + if (license = string_for_spec(spec)) + license = license.force_encoding('UTF-8') if license.respond_to?(:force_encoding) + licenses_string += license + end + end + licenses_string += "#{title_from_string(footnote_title, 2)}#{footnote_text}\n" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements/plist.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements/plist.rb new file mode 100644 index 0000000..5bc1c34 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/acknowledgements/plist.rb @@ -0,0 +1,94 @@ +require 'stringio' + +module Pod + module Generator + class Plist < Acknowledgements + def self.path_from_basepath(path) + Pathname.new(path.dirname + "#{path.basename}.plist") + end + + def save_as(path) + Xcodeproj::Plist.write_to_path(plist_hash, path) + end + + # @return [String] The contents of the plist + # + def generate + plist = Nanaimo::Plist.new(plist_hash, :xml) + contents = StringIO.new + Nanaimo::Writer::XMLWriter.new(plist, :pretty => true, :output => contents, :strict => false).write + contents.string + end + + def plist_hash + { + :Title => plist_title, + :StringsTable => plist_title, + :PreferenceSpecifiers => licenses, + } + end + + def plist_title + 'Acknowledgements' + end + + def licenses + licences_array = [header_hash] + specs.each do |spec| + if (hash = hash_for_spec(spec)) + licences_array << hash + end + end + licences_array << footnote_hash + end + + def hash_for_spec(spec) + if (license = license_text(spec)) + hash = { + :Type => 'PSGroupSpecifier', + :Title => sanitize_encoding(spec.name), + :FooterText => sanitize_encoding(license), + } + hash[:License] = sanitize_encoding(spec.license[:type]) if spec.license[:type] + + hash + end + end + + def header_hash + { + :Type => 'PSGroupSpecifier', + :Title => sanitize_encoding(header_title), + :FooterText => sanitize_encoding(header_text), + } + end + + def footnote_hash + { + :Type => 'PSGroupSpecifier', + :Title => sanitize_encoding(footnote_title), + :FooterText => sanitize_encoding(footnote_text), + } + end + + #-----------------------------------------------------------------------# + + private + + # !@group Private methods + + # Returns the sanitized text with UTF-8 invalid characters eliminated. + # + # @param [String] text + # the text we want to sanitize. + # + # @return [String] The sanitized UTF-8 text. + # + def sanitize_encoding(text) + text.encode('UTF-8', :invalid => :replace, :undef => :replace, :replace => '') + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/app_target_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/app_target_helper.rb new file mode 100644 index 0000000..720d702 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/app_target_helper.rb @@ -0,0 +1,363 @@ +module Pod + module Generator + # Stores the common logic for creating app targets within projects including + # generating standard import and main files for app hosts. + # + module AppTargetHelper + # Adds a single app target to the given project with the provided name. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [Symbol] platform_name + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @param [String] product_basename + # The product basename to use for the target, defaults to `name`. + # + # @return [PBXNativeTarget] the new target that was created. + # + def self.add_app_target(project, platform_name, deployment_target, name = 'App', product_basename = nil) + project.new_target(:application, name, platform_name, deployment_target, nil, + nil, product_basename) + end + + # Creates and links an import file for the given pod target and into the given native target. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated import file into. + # + # @param [PodTarget] pod_target + # the pod target to use for when generating the contents of the import file. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [Array] the created build file references. + # + def self.add_app_project_import(project, target, pod_target, platform, name = 'App') + source_file = AppTargetHelper.create_app_import_source_file(project, pod_target, platform, name) + group = project[name] || project.new_group(name, name) + source_file_ref = group.new_file(source_file) + target.add_file_references([source_file_ref]) + end + + # Creates and links an empty Swift file for the given target. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated import file into. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [Array] the created build file references. + # + def self.add_empty_swift_file(project, target, name = 'App') + swift_file = project.path.dirname.+("#{name}/dummy.swift") + swift_file.parent.mkpath + File.write(swift_file, '') + group = project[name] || project.new_group(name, name) + swift_file_ref = group.new_file(swift_file) + target.add_file_references([swift_file_ref]) + end + + # Creates and links a default app host 'main.m' file. + # + # @param [Project] project + # the Xcodeproj to generate the main file into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated main file into. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [Array] the created build file references. + # + def self.add_app_host_main_file(project, target, platform, group, name = 'App') + source_file = AppTargetHelper.create_app_host_main_file(project, platform, name) + source_file_ref = group.new_file(source_file) + target.add_file_references([source_file_ref]) + end + + # Creates a default launchscreen storyboard. + # + # @param [Project] project + # the Xcodeproj to generate the launchscreen storyboard into. + # + # @param [PBXNativeTarget] target + # the native target to link the generated launchscreen storyboard into. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [String] name + # The name to use for the target, defaults to 'App'. + # + # @return [PBXFileReference] the created file reference of the launchscreen storyboard. + # + def self.add_launchscreen_storyboard(project, target, group, deployment_target, name = 'App') + launch_storyboard_file = AppTargetHelper.create_launchscreen_storyboard_file(project, deployment_target, name) + launch_storyboard_ref = group.new_file(launch_storyboard_file) + target.resources_build_phase.add_file_reference(launch_storyboard_ref) + end + + # Adds the xctest framework search paths into the given target. + # + # @param [PBXNativeTarget] target + # the native target to add XCTest into. + # + # @return [void] + # + def self.add_xctest_search_paths(target) + requires_libs = target.platform_name == :ios && + Version.new(target.deployment_target) < Version.new('12.2') + + target.build_configurations.each do |configuration| + framework_search_paths = configuration.build_settings['FRAMEWORK_SEARCH_PATHS'] ||= '$(inherited)' + framework_search_paths << ' "$(PLATFORM_DIR)/Developer/Library/Frameworks"' + + if requires_libs + library_search_paths = configuration.build_settings['LIBRARY_SEARCH_PATHS'] ||= '$(inherited)' + library_search_paths << ' "$(PLATFORM_DIR)/Developer/usr/lib"' + end + end + end + + # Adds the provided swift version into the given target. + # + # @param [PBXNativeTarget] target + # the native target to add the swift version into. + # + # @param [String] swift_version + # the swift version to set to. + # + # @return [void] + # + def self.add_swift_version(target, swift_version) + raise 'Cannot set empty Swift version to target.' if swift_version.blank? + target.build_configurations.each do |configuration| + configuration.build_settings['SWIFT_VERSION'] = swift_version + end + end + + # Creates a default import file for the given pod target. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [PodTarget] pod_target + # the pod target to use for when generating the contents of the import file. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`, etc. + # + # @param [String] name + # The name of the folder to use and save the generated main file. + # + # @return [Pathname] the new source file that was generated. + # + def self.create_app_import_source_file(project, pod_target, platform, name = 'App') + language = pod_target.uses_swift? ? :swift : :objc + + if language == :swift + source_file = project.path.dirname.+("#{name}/main.swift") + source_file.parent.mkpath + import_statement = pod_target.should_build? && pod_target.defines_module? ? "import #{pod_target.product_module_name}\n" : '' + source_file.open('w') { |f| f << import_statement } + else + source_file = project.path.dirname.+("#{name}/main.m") + source_file.parent.mkpath + import_statement = if pod_target.should_build? && pod_target.defines_module? + "@import #{pod_target.product_module_name};\n" + else + header_name = "#{pod_target.product_module_name}/#{pod_target.product_module_name}.h" + if pod_target.sandbox.public_headers.root.+(header_name).file? + "#import <#{header_name}>\n" + else + '' + end + end + source_file.open('w') do |f| + f << "@import Foundation;\n" + f << "@import UIKit;\n" if platform == :ios || platform == :tvos + f << "@import Cocoa;\n" if platform == :osx + f << "#{import_statement}int main(void) {}\n" + end + end + source_file + end + + # Creates a default launchscreen storyboard file. + # + # @param [Project] project + # the Xcodeproj to generate the launchscreen storyboard into. + # + # @param [String] deployment_target + # the deployment target for the platform. + # + # @param [String] name + # The name of the folder to use and save the generated launchscreen storyboard file. + # + # @return [Pathname] the new launchscreen storyboard file that was generated. + # + def self.create_launchscreen_storyboard_file(project, deployment_target, name = 'App') + launch_storyboard_file = project.path.dirname.+("#{name}/LaunchScreen.storyboard") + launch_storyboard_file.parent.mkpath + if Version.new(deployment_target) >= Version.new('9.0') + File.write(launch_storyboard_file, LAUNCHSCREEN_STORYBOARD_CONTENTS) + else + File.write(launch_storyboard_file, LAUNCHSCREEN_STORYBOARD_CONTENTS_IOS_8) + end + launch_storyboard_file + end + + # Creates a default app host 'main.m' file. + # + # @param [Project] project + # the Xcodeproj to generate the target into. + # + # @param [Symbol] platform + # the platform of the target. Can be `:ios` or `:osx`. + # + # @param [String] name + # The name of the folder to use and save the generated main file. + # + # @return [Pathname] the new source file that was generated. + # + def self.create_app_host_main_file(project, platform, name = 'App') + source_file = project.path.dirname.+("#{name}/main.m") + source_file.parent.mkpath + source_file.open('w') do |f| + case platform + when :ios, :tvos + f << IOS_APP_HOST_MAIN_CONTENTS + when :osx + f << MACOS_APP_HOST_MAIN_CONTENTS + end + end + source_file + end + + IOS_APP_HOST_MAIN_CONTENTS = < +#import + +@interface CPTestAppHostAppDelegate : UIResponder + +@property (nonatomic, strong) UIWindow *window; + +@end + +@implementation CPTestAppHostAppDelegate + +- (BOOL)application:(UIApplication *)__unused application didFinishLaunchingWithOptions:(NSDictionary *)__unused launchOptions +{ + self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]]; + self.window.rootViewController = [UIViewController new]; + + [self.window makeKeyAndVisible]; + + return YES; +} + +@end + +int main(int argc, char *argv[]) +{ + @autoreleasepool + { + return UIApplicationMain(argc, argv, nil, NSStringFromClass([CPTestAppHostAppDelegate class])); + } +} +EOS + + MACOS_APP_HOST_MAIN_CONTENTS = < + +int main(int argc, const char * argv[]) { + return NSApplicationMain(argc, argv); +} +EOS + + LAUNCHSCREEN_STORYBOARD_CONTENTS_IOS_8 = <<-XML.strip_heredoc.freeze + + + + + + + + + + + + + + + + + + + + + + + + + + + + XML + + LAUNCHSCREEN_STORYBOARD_CONTENTS = <<-XML.strip_heredoc.freeze + + + + + + + + + + + + + + + + + + + + + + + + + + XML + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/bridge_support.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/bridge_support.rb new file mode 100644 index 0000000..fc77aba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/bridge_support.rb @@ -0,0 +1,22 @@ +module Pod + module Generator + class BridgeSupport + extend Executable + executable :gen_bridge_metadata + + attr_reader :headers + + def initialize(headers) + @headers = headers + end + + def search_paths + @headers.map { |header| "-I '#{header.dirname}'" }.uniq + end + + def save_as(pathname) + gen_bridge_metadata('-c', search_paths.join(' '), '-o', pathname, *headers) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/constant.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/constant.rb new file mode 100644 index 0000000..f7f96b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/constant.rb @@ -0,0 +1,19 @@ +module Pod + module Generator + # Generates a constant file. + # + class Constant + def initialize(contents) + @generate = contents + end + + attr_reader :generate + + def save_as(path) + path.open('w') do |f| + f.write(generate) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/copy_dsyms_script.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/copy_dsyms_script.rb new file mode 100644 index 0000000..6dd7797 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/copy_dsyms_script.rb @@ -0,0 +1,56 @@ +module Pod + module Generator + class CopydSYMsScript + # @return [Array] dsym_paths the dSYM paths to include in the script contents. + # + attr_reader :dsym_paths + + # @return [Array] bcsymbolmap_paths the bcsymbolmap paths to include in the script contents. + # + attr_reader :bcsymbolmap_paths + + # Initialize a new instance + # + # @param [Array] dsym_paths @see dsym_paths + # @param [Array] bcsymbolmap_paths @see bcsymbolmap_paths + # + def initialize(dsym_paths, bcsymbolmap_paths) + @dsym_paths = Array(dsym_paths) + @bcsymbolmap_paths = Array(bcsymbolmap_paths) + end + + # Saves the copy dSYMs script to the given pathname. + # + # @param [Pathname] pathname + # The path where the copy dSYMs script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(generate) + end + File.chmod(0755, pathname.to_s) + end + + # @return [String] The generated of the copy dSYMs script. + # + def generate + script = <<-SH.strip_heredoc +#{Pod::Generator::ScriptPhaseConstants::DEFAULT_SCRIPT_PHASE_HEADER} +#{Pod::Generator::ScriptPhaseConstants::STRIP_INVALID_ARCHITECTURES_METHOD} +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_DSYM_METHOD} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_BCSYMBOLMAP_METHOD} + SH + dsym_paths.each do |dsym_path| + script << %(install_dsym "#{dsym_path}"\n) + end + bcsymbolmap_paths.each do |bcsymbolmap_path| + script << %(install_bcsymbolmap "#{bcsymbolmap_path}"\n) + end + script + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/copy_resources_script.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/copy_resources_script.rb new file mode 100644 index 0000000..c674254 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/copy_resources_script.rb @@ -0,0 +1,223 @@ +module Pod + module Generator + class CopyResourcesScript + # @return [Hash{String, Array{String}] A list of files relative to the + # project pods root, keyed by build configuration. + # + attr_reader :resources_by_config + + # @return [Platform] The platform of the library for which the copy + # resources script is needed. + # + attr_reader :platform + + # Initialize a new instance + # + # @param [Hash>] resources_by_config + # @see resources_by_config + # + # @param [Platform] platform + # @see platform + # + def initialize(resources_by_config, platform) + @resources_by_config = resources_by_config + @platform = platform + end + + # Saves the resource script to the given pathname. + # + # @param [Pathname] pathname + # The path where the copy resources script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(script) + end + File.chmod(0755, pathname.to_s) + end + + # @return [String] The contents of the copy resources script. + # + def generate + script + end + + private + + # @!group Private Helpers + + # @return [Hash{Symbol=>Version}] The minimum deployment target which + # supports the `--reference-external-strings-file` option for + # the `ibtool` command. + # + EXTERNAL_STRINGS_FILE_MIMINUM_DEPLOYMENT_TARGET = { + :ios => Version.new('6.0'), + :osx => Version.new('10.8'), + :watchos => Version.new('2.0'), + :tvos => Version.new('9.0'), + } + + # @return [Boolean] Whether the external strings file is supported by the + # `ibtool` according to the deployment target of the platform. + # + def use_external_strings_file? + minimum_deployment_target = EXTERNAL_STRINGS_FILE_MIMINUM_DEPLOYMENT_TARGET[platform.name] + platform.deployment_target >= minimum_deployment_target + end + + # @return [String] The install resources shell function. + # + def install_resources_function + if use_external_strings_file? + INSTALL_RESOURCES_FUNCTION + else + INSTALL_RESOURCES_FUNCTION.gsub(' --reference-external-strings-file', '') + end + end + + # @return [String] The contents of the copy resources script. + # + def script + # Define install function + script = install_resources_function + + # Call function for each configuration-dependent resource + resources_by_config.each do |config, resources| + unless resources.empty? + script += %(if [[ "$CONFIGURATION" == "#{config}" ]]; then\n) + resources.each do |resource| + script += %( install_resource "#{resource}"\n) + end + script += "fi\n" + end + end + + script += RSYNC_CALL + script += XCASSETS_COMPILE + script + end + + INSTALL_RESOURCES_FUNCTION = < "$RESOURCES_TO_COPY" + +XCASSET_FILES=() + +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} +case "${TARGETED_DEVICE_FAMILY:-}" in + 1,2) + TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone" + ;; + 1) + TARGET_DEVICE_ARGS="--target-device iphone" + ;; + 2) + TARGET_DEVICE_ARGS="--target-device ipad" + ;; + 3) + TARGET_DEVICE_ARGS="--target-device tv" + ;; + 4) + TARGET_DEVICE_ARGS="--target-device watch" + ;; + *) + TARGET_DEVICE_ARGS="--target-device mac" + ;; +esac + +install_resource() +{ + if [[ "$1" = /* ]] ; then + RESOURCE_PATH="$1" + else + RESOURCE_PATH="${PODS_ROOT}/$1" + fi + if [[ ! -e "$RESOURCE_PATH" ]] ; then + cat << EOM +error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script. +EOM + exit 1 + fi + case $RESOURCE_PATH in + *\.storyboard) + echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true + ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS} + ;; + *\.xib) + echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true + ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \\"$RESOURCE_PATH\\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS} + ;; + *.framework) + echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true + mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + ;; + *.xcdatamodel) + echo "xcrun momc \\"$RESOURCE_PATH\\" \\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\\"" || true + xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom" + ;; + *.xcdatamodeld) + echo "xcrun momc \\"$RESOURCE_PATH\\" \\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\\"" || true + xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd" + ;; + *.xcmappingmodel) + echo "xcrun mapc \\"$RESOURCE_PATH\\" \\"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\\"" || true + xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm" + ;; + *.xcassets) + ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH" + XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE") + ;; + *) + echo "$RESOURCE_PATH" || true + echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY" + ;; + esac +} +EOS + + RSYNC_CALL = <] List of xcframeworks to copy + # + attr_reader :xcframeworks + + # @return [Pathname] the root directory of the sandbox + # + attr_reader :sandbox_root + + # @return [Platform] the platform of the target for which this script will run + # + attr_reader :platform + + # Creates a script for copying XCFramework slcies into an intermediate build directory + # + # @param [Array] xcframeworks + # the list of xcframeworks to copy + # + # @param [Pathname] sandbox_root + # the root of the Sandbox into which this script will be installed + # + # @param [Platform] platform + # the platform of the target for which this script will be run + # + def initialize(xcframeworks, sandbox_root, platform) + @xcframeworks = xcframeworks + @sandbox_root = sandbox_root + @platform = platform + end + + # Saves the resource script to the given pathname. + # + # @param [Pathname] pathname + # The path where the embed frameworks script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(script) + end + File.chmod(0o755, pathname.to_s) + end + + # @return [String] The contents of the embed frameworks script. + # + def generate + script + end + + private + + # @!group Private Helpers + + # @return [String] The contents of the prepare artifacts script. + # + def script + script = <<-SH.strip_heredoc +#{Pod::Generator::ScriptPhaseConstants::DEFAULT_SCRIPT_PHASE_HEADER} + +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} + +#{variant_for_slice} + +#{archs_for_slice} + +copy_dir() +{ + local source="$1" + local destination="$2" + + # Use filter instead of exclude so missing patterns don't throw errors. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" \\"${source}*\\" \\"${destination}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" "${source}"/* "${destination}" +} + +SELECT_SLICE_RETVAL="" + +select_slice() { + local xcframework_name="$1" + xcframework_name="${xcframework_name##*/}" + local paths=("${@:2}") + # Locate the correct slice of the .xcframework for the current architectures + local target_path="" + + # Split archs on space so we can find a slice that has all the needed archs + local target_archs=$(echo $ARCHS | tr " " "\\n") + + local target_variant="" + if [[ "$PLATFORM_NAME" == *"simulator" ]]; then + target_variant="simulator" + fi + if [[ ! -z ${EFFECTIVE_PLATFORM_NAME+x} && "$EFFECTIVE_PLATFORM_NAME" == *"maccatalyst" ]]; then + target_variant="maccatalyst" + fi + for i in ${!paths[@]}; do + local matched_all_archs="1" + local slice_archs="$(archs_for_slice "${xcframework_name}/${paths[$i]}")" + local slice_variant="$(variant_for_slice "${xcframework_name}/${paths[$i]}")" + for target_arch in $target_archs; do + if ! [[ "${slice_variant}" == "$target_variant" ]]; then + matched_all_archs="0" + break + fi + + if ! echo "${slice_archs}" | tr " " "\\n" | grep -F -q -x "$target_arch"; then + matched_all_archs="0" + break + fi + done + + if [[ "$matched_all_archs" == "1" ]]; then + # Found a matching slice + echo "Selected xcframework slice ${paths[$i]}" + SELECT_SLICE_RETVAL=${paths[$i]} + break + fi + done +} + +install_xcframework() { + local basepath="$1" + local name="$2" + local package_type="$3" + local paths=("${@:4}") + + # Locate the correct slice of the .xcframework for the current architectures + select_slice "${basepath}" "${paths[@]}" + local target_path="$SELECT_SLICE_RETVAL" + if [[ -z "$target_path" ]]; then + echo "warning: [CP] $(basename ${basepath}): Unable to find matching slice in '${paths[@]}' for the current build architectures ($ARCHS) and platform (${EFFECTIVE_PLATFORM_NAME-${PLATFORM_NAME}})." + return + fi + local source="$basepath/$target_path" + + local destination="#{Pod::Target::BuildSettings::XCFRAMEWORKS_BUILD_DIR_VARIABLE}/${name}" + + if [ ! -d "$destination" ]; then + mkdir -p "$destination" + fi + + copy_dir "$source/" "$destination" + echo "Copied $source to $destination" +} + + SH + xcframeworks.each do |xcframework| + slices = xcframework.slices.select { |f| f.platform.symbolic_name == platform.symbolic_name } + next if slices.empty? + args = install_xcframework_args(xcframework, slices) + script << "install_xcframework #{args}\n" + end + + script << "\n" unless xcframeworks.empty? + script + end + + def shell_escape(value) + "\"#{value}\"" + end + + def install_xcframework_args(xcframework, slices) + root = xcframework.path + args = [shell_escape("${PODS_ROOT}/#{root.relative_path_from(sandbox_root)}")] + args << shell_escape(xcframework.target_name) + is_framework = xcframework.build_type.framework? + args << shell_escape(is_framework ? 'framework' : 'library') + slices.each do |slice| + args << shell_escape(slice.path.dirname.relative_path_from(root)) + end + args.join(' ') + end + + def variant_for_slice + script = '' + script << "variant_for_slice()\n" + script << "{\n" + script << " case \"$1\" in\n" + xcframeworks.each do |xcframework| + root = xcframework.path + xcframework.slices.each do |slice| + script << " #{shell_escape(root.basename.join(slice.path.dirname.relative_path_from(root)))})\n" + script << " echo \"#{slice.platform_variant}\"\n" + script << " ;;\n" + end + end + script << " esac\n" + script << '}' + end + + def archs_for_slice + script = '' + script << "archs_for_slice()\n" + script << "{\n" + script << " case \"$1\" in\n" + xcframeworks.each do |xcframework| + root = xcframework.path + xcframework.slices.each do |slice| + script << " #{shell_escape(root.basename.join(slice.path.dirname.relative_path_from(root)))})\n" + script << " echo \"#{slice.supported_archs.sort.join(' ')}\"\n" + script << " ;;\n" + end + end + script << " esac\n" + script << '}' + end + + class << self + # @param [Pathname] xcframework_path + # the base path of the .xcframework bundle + # + # @return [Array] all found .dSYM paths + # + def dsym_folder(xcframework_path) + basename = File.basename(xcframework_path, '.xcframework') + dsym_basename = basename + '.dSYMs' + path = xcframework_path.dirname + dsym_basename + Pathname.new(path) if File.directory?(path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/dummy_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/dummy_source.rb new file mode 100644 index 0000000..d04261d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/dummy_source.rb @@ -0,0 +1,31 @@ +module Pod + module Generator + class DummySource + attr_reader :class_name + + def initialize(class_name_identifier) + validated_class_name_identifier = class_name_identifier.gsub(/[^0-9a-z_]/i, '_') + @class_name = "PodsDummy_#{validated_class_name_identifier}" + end + + # @return [String] the string contents of the dummy source file. + # + def generate + result = <<-source.strip_heredoc + #import + @interface #{class_name} : NSObject + @end + @implementation #{class_name} + @end + source + result + end + + def save_as(pathname) + pathname.open('w') do |source| + source.write(generate) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/embed_frameworks_script.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/embed_frameworks_script.rb new file mode 100644 index 0000000..e4956ee --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/embed_frameworks_script.rb @@ -0,0 +1,196 @@ +require 'cocoapods/xcode' + +module Pod + module Generator + class EmbedFrameworksScript + # @return [Hash{String => Array}] Multiple lists of frameworks per + # configuration. + # + attr_reader :frameworks_by_config + + # @return [Hash{String => Array}] Multiple lists of frameworks per + # configuration. + # + attr_reader :xcframeworks_by_config + + # @param [Hash{String => Array] frameworks_by_config + # @see #frameworks_by_config + # + # @param [Hash{String => Array] xcframeworks_by_config + # @see #xcframeworks_by_config + # + def initialize(frameworks_by_config, xcframeworks_by_config) + @frameworks_by_config = frameworks_by_config + @xcframeworks_by_config = xcframeworks_by_config + end + + # Saves the resource script to the given pathname. + # + # @param [Pathname] pathname + # The path where the embed frameworks script should be saved. + # + # @return [void] + # + def save_as(pathname) + pathname.open('w') do |file| + file.puts(script) + end + File.chmod(0755, pathname.to_s) + end + + # @return [String] The contents of the embed frameworks script. + # + def generate + script + end + + private + + # @!group Private Helpers + + # @return [String] The contents of the embed frameworks script. + # + def script + script = <<-SH.strip_heredoc +#{Pod::Generator::ScriptPhaseConstants::DEFAULT_SCRIPT_PHASE_HEADER} +if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then + # If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy + # frameworks to, so exit 0 (signalling the script phase was successful). + exit 0 +fi + +echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" +mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + +COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}" +SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" +BCSYMBOLMAP_DIR="BCSymbolMaps" + + +#{Pod::Generator::ScriptPhaseConstants::RSYNC_PROTECT_TMP_FILES} +# Copies and strips a vendored framework +install_framework() +{ + if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then + local source="${BUILT_PRODUCTS_DIR}/$1" + elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then + local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" + elif [ -r "$1" ]; then + local source="$1" + fi + + local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" + + if [ -L "${source}" ]; then + echo "Symlinked..." + source="$(readlink -f "${source}")" + fi + + if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then + # Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied + find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do + echo "Installing $f" + install_bcsymbolmap "$f" "$destination" + rm "$f" + done + rmdir "${source}/${BCSYMBOLMAP_DIR}" + fi + + # Use filter instead of exclude so missing patterns don't throw errors. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" --filter \\"- Headers\\" --filter \\"- PrivateHeaders\\" --filter \\"- Modules\\" \\"${source}\\" \\"${destination}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" + + local basename + basename="$(basename -s .framework "$1")" + binary="${destination}/${basename}.framework/${basename}" + + if ! [ -r "$binary" ]; then + binary="${destination}/${basename}" + elif [ -L "${binary}" ]; then + echo "Destination binary is symlinked..." + dirname="$(dirname "${binary}")" + binary="${dirname}/$(readlink "${binary}")" + fi + + # Strip invalid architectures so "fat" simulator / device frameworks work on device + if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then + strip_invalid_archs "$binary" + fi + + # Resign the code if required by the build settings to avoid unstable apps + code_sign_if_enabled "${destination}/$(basename "$1")" + + # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. + if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then + local swift_runtime_libs + swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\\\/\\(.+dylib\\).*/\\\\1/g | uniq -u) + for lib in $swift_runtime_libs; do + echo "rsync -auv \\"${SWIFT_STDLIB_PATH}/${lib}\\" \\"${destination}\\"" + rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" + code_sign_if_enabled "${destination}/${lib}" + done + fi +} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_DSYM_METHOD} +#{Pod::Generator::ScriptPhaseConstants::STRIP_INVALID_ARCHITECTURES_METHOD} +#{Pod::Generator::ScriptPhaseConstants::INSTALL_BCSYMBOLMAP_METHOD} +# Signs a framework with the provided identity +code_sign_if_enabled() { + if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then + # Use the current code_sign_identity + echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" + local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'" + + if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then + code_sign_cmd="$code_sign_cmd &" + fi + echo "$code_sign_cmd" + eval "$code_sign_cmd" + fi +} + SH + contents_by_config = Hash.new do |hash, key| + hash[key] = '' + end + frameworks_by_config.each do |config, frameworks| + frameworks.each do |framework| + contents_by_config[config] << %( install_framework "#{framework.source_path}"\n) + end + end + xcframeworks_by_config.each do |config, xcframeworks| + xcframeworks.select { |xcf| xcf.build_type.dynamic_framework? }.each do |xcframework| + target_name = xcframework.target_name + name = xcframework.name + contents_by_config[config] << %( install_framework "#{Target::BuildSettings::XCFRAMEWORKS_BUILD_DIR_VARIABLE}/#{target_name}/#{name}.framework"\n) + end + end + script << "\n" unless contents_by_config.empty? + contents_by_config.keys.sort.each do |config| + contents = contents_by_config[config] + next if contents.empty? + script << %(if [[ "$CONFIGURATION" == "#{config}" ]]; then\n) + script << contents + script << "fi\n" + end + script << <<-SH.strip_heredoc + if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then + wait + fi + SH + script + end + + # @param [Xcode::FrameworkPaths] framework_path + # the framework path containing the dSYM + # + # @return [String, Nil] the name of the dSYM binary, if found + # + def dsym_binary_name(framework_path) + return nil if framework_path.dsym_path.nil? + if (path = Pathname.glob(framework_path.dsym_path.join('Contents/Resources/DWARF', '**/*')).first) + File.basename(path) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/file_list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/file_list.rb new file mode 100644 index 0000000..3e617f4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/file_list.rb @@ -0,0 +1,39 @@ +module Pod + module Generator + # Generates an xcfilelist file. + # + class FileList + # @return [Array] The paths of the files in the file list. + # + attr_reader :paths + + # Initialize a new instance + # + # @param [Array] paths + # @see paths + # + def initialize(paths) + @paths = paths + end + + # Generates the contents of the file list. + # + # @return [String] + # + def generate + paths.join("\n") + end + + # Generates and saves the file list to the given path. + # + # @param [Pathname] path + # The path where the file list should be stored. + # + # @return [void] + # + def save_as(path) + path.open('w') { |file_list| file_list.write(generate) } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/header.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/header.rb new file mode 100644 index 0000000..fd4fab8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/header.rb @@ -0,0 +1,103 @@ +module Pod + module Generator + # Generates a header file. + # + # According to the platform the header imports `UIKit/UIKit.h` or + # `Cocoa/Cocoa.h`. + # + class Header + # @return [Symbol] the platform for which the prefix header will be + # generated. + # + attr_reader :platform + + # @return [Array] The list of the headers to import. + # + attr_accessor :imports + + # @return [Array] The list of the modules to import. + # + attr_reader :module_imports + + # Initialize a new instance + # + # @param [Symbol] platform + # @see platform + # + def initialize(platform) + @platform = platform + @imports = [] + @module_imports = [] + end + + # Generates the contents of the header according to the platform. + # + # @note If the platform is iOS an import call to `UIKit/UIKit.h` is + # added to the top of the prefix header. For OS X `Cocoa/Cocoa.h` + # is imported. + # + # @return [String] + # + def generate + result = '' + result << "#ifdef __OBJC__\n" + result << generate_platform_import_header + result << "#else\n" + result << "#ifndef FOUNDATION_EXPORT\n" + result << "#if defined(__cplusplus)\n" + result << "#define FOUNDATION_EXPORT extern \"C\"\n" + result << "#else\n" + result << "#define FOUNDATION_EXPORT extern\n" + result << "#endif\n" + result << "#endif\n" + result << "#endif\n" + result << "\n" + + imports.each do |import| + result << %(#import "#{import}"\n) + end + + unless module_imports.empty? + module_imports.each do |import| + result << %(\n@import #{import}) + end + result << "\n" + end + + result + end + + # Generates and saves the header to the given path. + # + # @param [Pathname] path + # The path where the header should be stored. + # + # @return [void] + # + def save_as(path) + path.open('w') { |header| header.write(generate) } + end + + #-----------------------------------------------------------------------# + + protected + + # Generates the contents of the header according to the platform. + # + # @note If the platform is iOS an import call to `UIKit/UIKit.h` is + # added to the top of the header. For OS X `Cocoa/Cocoa.h` is + # imported. + # + # @return [String] + # + def generate_platform_import_header + case platform.name + when :ios then "#import \n" + when :tvos then "#import \n" + when :osx then "#import \n" + else "#import \n" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/info_plist_file.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/info_plist_file.rb new file mode 100644 index 0000000..894c1c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/info_plist_file.rb @@ -0,0 +1,128 @@ +module Pod + module Generator + # Generates Info.plist files. A Info.plist file is generated for each + # Pod and for each Pod target definition, that requires to be built as + # framework. It states public attributes. + # + class InfoPlistFile + # @return [String] version The version to use for when generating this Info.plist file. + # + attr_reader :version + + # @return [Platform] The platform to use for when generating this Info.plist file. + # + attr_reader :platform + + # @return [Symbol] the CFBundlePackageType of the target this Info.plist + # file is for. + # + attr_reader :bundle_package_type + + # @return [Hash] any additional entries to include in this Info.plist + # + attr_reader :additional_entries + + # Initialize a new instance + # + # @param [String] version @see #version + # @param [Platform] platform @see #platform + # @param [Symbol] bundle_package_type @see #bundle_package_type + # @param [Hash] additional_entries @see #additional_entries + # + def initialize(version, platform, bundle_package_type = :fmwk, additional_entries = {}) + @version = version + @platform = platform + @bundle_package_type = bundle_package_type + @additional_entries = additional_entries + end + + # Generates and saves the Info.plist to the given path. + # + # @param [Pathname] path + # the path where the prefix header should be stored. + # + # @return [void] + # + def save_as(path) + contents = generate + path.open('w') do |f| + f.write(contents) + end + end + + # Generates the contents of the Info.plist + # + # @return [String] + # + def generate + to_plist(info) + end + + private + + def header + <<-PLIST + + + + PLIST + end + + def footer + <<-PLIST + + PLIST + end + + def to_plist(root) + serialize(root, header) << footer + end + + def serialize(value, output, indentation = 0) + indent = ' ' * indentation + case value + when Array + output << indent << "\n" + value.each { |v| serialize(v, output, indentation + 2) } + output << indent << "\n" + when Hash + output << indent << "\n" + value.to_a.sort_by(&:first).each do |key, v| + output << indent << ' ' << "#{key}\n" + serialize(v, output, indentation + 2) + end + output << indent << "\n" + when String + output << indent << "#{value}\n" + when true + output << indent << "\n" + when false + output << indent << "\n" + end + output + end + + def info + info = { + 'CFBundleIdentifier' => '${PRODUCT_BUNDLE_IDENTIFIER}', + 'CFBundleInfoDictionaryVersion' => '6.0', + 'CFBundleName' => '${PRODUCT_NAME}', + 'CFBundlePackageType' => bundle_package_type.to_s.upcase, + 'CFBundleShortVersionString' => version, + 'CFBundleSignature' => '????', + 'CFBundleVersion' => '${CURRENT_PROJECT_VERSION}', + 'NSPrincipalClass' => '', + 'CFBundleDevelopmentRegion' => '${PODS_DEVELOPMENT_LANGUAGE}', + } + + info['CFBundleExecutable'] = '${EXECUTABLE_NAME}' if bundle_package_type != :bndl + info['CFBundleVersion'] = '1' if bundle_package_type == :bndl + info['NSPrincipalClass'] = 'NSApplication' if bundle_package_type == :appl && platform == :osx + + info.merge!(additional_entries) + + info + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/module_map.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/module_map.rb new file mode 100644 index 0000000..e3a9ea5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/module_map.rb @@ -0,0 +1,99 @@ +module Pod + module Generator + # Generates LLVM module map files. A module map file is generated for each + # Pod and for each Pod target definition that is built as a framework. It + # specifies a different umbrella header than usual to avoid name conflicts + # with existing headers of the podspec. + # + class ModuleMap + # @return [PodTarget, AggregateTarget] the target the module map is generated for. + # + attr_reader :target + + attr_reader :headers + + Header = Struct.new(:path, :umbrella, :private, :textual, :exclude, :size, :mtime) do + alias_method :private?, :private + def to_s + [ + (:private if private?), + (:textual if textual), + (:umbrella if umbrella), + (:exclude if exclude), + 'header', + %("#{path.to_s.gsub('"', '\"')}"), + attrs, + ].compact.join(' ') + end + + def attrs + attrs = { + 'size' => size, + 'mtime' => mtime, + }.reject { |_k, v| v.nil? } + return nil if attrs.empty? + attrs.to_s + end + end + + # Initialize a new instance + # + # @param [PodTarget, AggregateTarget] target @see target + # + def initialize(target) + @target = target + @headers = [ + Header.new(target.umbrella_header_path.basename, true), + ] + end + + # Generates and saves the Info.plist to the given path. + # + # @param [Pathname] path + # the path where the prefix header should be stored. + # + # @return [void] + # + def save_as(path) + contents = generate + path.open('w') do |f| + f.write(contents) + end + end + + # Generates the contents of the module.modulemap file. + # + # @return [String] + # + def generate + <<-MODULE_MAP.strip_heredoc +#{module_specifier_prefix}module #{target.product_module_name}#{module_declaration_attributes} { + #{headers.join("\n ")} + + export * + module * { export * } +} + MODULE_MAP + end + + private + + # The prefix to `module` to prepend in the module map. + # Ensures that only framework targets have `framework` prepended. + # + def module_specifier_prefix + if target.build_as_framework? + 'framework ' + else + '' + end + end + + # The suffix attributes to `module`. + # + def module_declaration_attributes + '' + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/prefix_header.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/prefix_header.rb new file mode 100644 index 0000000..34036e6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/prefix_header.rb @@ -0,0 +1,60 @@ +module Pod + module Generator + # Generates a prefix header file for a Pods library. The prefix header is + # generated according to the platform of the target and the pods. + # + # According to the platform the prefix header imports `UIKit/UIKit.h` or + # `Cocoa/Cocoa.h`. + # + class PrefixHeader < Header + # @return [Array] The file accessors for which to generate + # the prefix header. + # + attr_reader :file_accessors + + # Initialize a new instance + # + # @param [Array] file_accessors + # @see #file_accessors + # + # @param [Platform] platform + # @see Header#platform + # + def initialize(file_accessors, platform) + @file_accessors = file_accessors + super platform + end + + # Generates the contents of the prefix header according to the platform + # and the pods. + # + # @note Only unique prefix_header_contents are added to the prefix + # header. + # + # @return [String] + # + # @todo Subspecs can specify prefix header information too. + # @todo Check to see if we have a similar duplication issue with + # file_accessor.prefix_header. + # + def generate + result = super + + unique_prefix_header_contents = file_accessors.map do |file_accessor| + file_accessor.spec_consumer.prefix_header_contents + end.compact.uniq + + unique_prefix_header_contents.each do |prefix_header_contents| + result << prefix_header_contents + result << "\n" + end + + file_accessors.map(&:prefix_header).compact.uniq.each do |prefix_header| + result << Pathname(prefix_header).read + end + + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/script_phase_constants.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/script_phase_constants.rb new file mode 100644 index 0000000..ff95605 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/script_phase_constants.rb @@ -0,0 +1,100 @@ +module Pod + module Generator + module ScriptPhaseConstants + DEFAULT_SCRIPT_PHASE_HEADER = <<-SH.strip_heredoc.freeze +#!/bin/sh +set -e +set -u +set -o pipefail + +function on_error { + echo "$(realpath -mq "${0}"):$1: error: Unexpected failure" +} +trap 'on_error $LINENO' ERR + SH + + RSYNC_PROTECT_TMP_FILES = <<-SH.strip_heredoc.freeze +# This protects against multiple targets copying the same framework dependency at the same time. The solution +# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html +RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????") + SH + + STRIP_INVALID_ARCHITECTURES_METHOD = <<-SH.strip_heredoc.freeze +# Used as a return value for each invocation of `strip_invalid_archs` function. +STRIP_BINARY_RETVAL=0 + +# Strip invalid architectures +strip_invalid_archs() { + binary="$1" + warn_missing_arch=${2:-true} + # Get architectures for current target binary + binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)" + # Intersect them with the architectures we are building for + intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\\n' | sort | uniq -d)" + # If there are no archs supported by this binary then warn the user + if [[ -z "$intersected_archs" ]]; then + if [[ "$warn_missing_arch" == "true" ]]; then + echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)." + fi + STRIP_BINARY_RETVAL=1 + return + fi + stripped="" + for arch in $binary_archs; do + if ! [[ "${ARCHS}" == *"$arch"* ]]; then + # Strip non-valid architectures in-place + lipo -remove "$arch" -output "$binary" "$binary" + stripped="$stripped $arch" + fi + done + if [[ "$stripped" ]]; then + echo "Stripped $binary of architectures:$stripped" + fi + STRIP_BINARY_RETVAL=0 +} + SH + + INSTALL_DSYM_METHOD = <<-SH.strip_heredoc.freeze +# Copies and strips a vendored dSYM +install_dsym() { + local source="$1" + warn_missing_arch=${2:-true} + if [ -r "$source" ]; then + # Copy the dSYM into the targets temp dir. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" --filter \\"- Headers\\" --filter \\"- PrivateHeaders\\" --filter \\"- Modules\\" \\"${source}\\" \\"${DERIVED_FILES_DIR}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}" + + local basename + basename="$(basename -s .dSYM "$source")" + binary_name="$(ls "$source/Contents/Resources/DWARF")" + binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}" + + # Strip invalid architectures from the dSYM. + if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then + strip_invalid_archs "$binary" "$warn_missing_arch" + fi + if [[ $STRIP_BINARY_RETVAL == 0 ]]; then + # Move the stripped file into its final destination. + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \\"- CVS/\\" --filter \\"- .svn/\\" --filter \\"- .git/\\" --filter \\"- .hg/\\" --filter \\"- Headers\\" --filter \\"- PrivateHeaders\\" --filter \\"- Modules\\" \\"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\\" \\"${DWARF_DSYM_FOLDER_PATH}\\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}" + else + # The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing. + mkdir -p "${DWARF_DSYM_FOLDER_PATH}" + touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM" + fi + fi +} + SH + + INSTALL_BCSYMBOLMAP_METHOD = <<-SH.strip_heredoc.freeze +# Copies the bcsymbolmap files of a vendored framework +install_bcsymbolmap() { + local bcsymbolmap_path="$1" + local destination="${BUILT_PRODUCTS_DIR}" + echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${bcsymbolmap_path}\" \"${destination}\"" + rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}" +} + SH + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/umbrella_header.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/umbrella_header.rb new file mode 100644 index 0000000..a3e4a0f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/generator/umbrella_header.rb @@ -0,0 +1,46 @@ +module Pod + module Generator + # Generates an umbrella header file for clang modules, which are used by + # dynamic frameworks on iOS 8 and OSX 10.10 under the hood. + # + # If the target is a +PodTarget+, then the umbrella header is required + # to make all public headers in a convenient manner available without the + # need to write out header declarations for every library header. + # + class UmbrellaHeader < Header + # @return [Target] + # the target, which provides the product name + attr_reader :target + + # Initialize a new instance + # + # @param [Target] target + # @see target + # + def initialize(target) + super(target.platform) + @target = target + end + + # Generates the contents of the umbrella header according to the included + # pods. + # + # @return [String] + # + def generate + result = super + + result << "\n" + + result << <<-eos.strip_heredoc + FOUNDATION_EXPORT double #{target.product_module_name}VersionNumber; + FOUNDATION_EXPORT const unsigned char #{target.product_module_name}VersionString[]; + eos + + result << "\n" + + result + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/hooks_manager.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/hooks_manager.rb new file mode 100644 index 0000000..6cb5788 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/hooks_manager.rb @@ -0,0 +1,132 @@ +require 'active_support/core_ext/hash/indifferent_access' + +module Pod + # Provides support for the hook system of CocoaPods. The system is designed + # especially for plugins. Interested clients can register to notifications by + # name. + # + # The blocks, to prevent compatibility issues, will receive + # one and only one argument: a context object. This object should be simple + # storage of information (a typed hash). Notifications senders are + # responsible to indicate the class of the object associated with their + # notification name. + # + # Context object should not remove attribute accessors to not break + # compatibility with the plugins (this promise will be honoured strictly + # from CocoaPods 1.0). + # + module HooksManager + # Represents a single registered hook. + # + class Hook + # @return [String] + # The name of the plugin that registered the hook. + # + attr_reader :plugin_name + + # @return [String] + # The name of the hook. + # + attr_reader :name + + # @return [Proc] + # The block. + # + attr_reader :block + + # Initialize a new instance + # + # @param [String] name @see {#name}. + # + # @param [String] plugin_name @see {#plugin_name}. + # + # @param [Proc] block @see {#block}. + # + def initialize(name, plugin_name, block) + raise ArgumentError, 'Missing name' unless name + raise ArgumentError, 'Missing plugin_name' unless plugin_name + raise ArgumentError, 'Missing block' unless block + + @name = name + @plugin_name = plugin_name + @block = block + end + end + + class << self + # @return [Hash{Symbol => Array}] The list of the hooks that are + # registered for each hook name. + # + attr_reader :registrations + + # Registers a block for the hook with the given name. + # + # @param [String] plugin_name + # The name of the plugin the hook comes from. + # + # @param [Symbol] hook_name + # The name of the notification. + # + # @param [Proc] block + # The block. + # + def register(plugin_name, hook_name, &block) + @registrations ||= {} + @registrations[hook_name] ||= [] + @registrations[hook_name] << Hook.new(hook_name, plugin_name, block) + end + + # Returns all the hooks to run for the given event name + # and set of whitelisted plugins + # + # @see #run + # + # @return [Array] the hooks to run + # + def hooks_to_run(name, whitelisted_plugins = nil) + return [] unless registrations + hooks = registrations.fetch(name, []) + return hooks unless whitelisted_plugins + hooks.select { |hook| whitelisted_plugins.key?(hook.plugin_name) } + end + + # Runs all the registered blocks for the hook with the given name. + # + # @param [Symbol] name + # The name of the hook. + # + # @param [Object] context + # The context object which should be passed to the blocks. + # + # @param [Hash] whitelisted_plugins + # The plugins that should be run, in the form of a hash keyed by + # plugin name, where the values are the custom options that should + # be passed to the hook's block if it supports taking a second + # argument. + # + def run(name, context, whitelisted_plugins = nil) + raise ArgumentError, 'Missing name' unless name + raise ArgumentError, 'Missing options' unless context + + hooks = hooks_to_run(name, whitelisted_plugins) + return if hooks.empty? + + UI.message "- Running #{name.to_s.tr('_', ' ')} hooks" do + hooks.each do |hook| + UI.message "- #{hook.plugin_name} from " \ + "`#{hook.block.source_location.first}`" do + block = hook.block + if block.arity > 1 + user_options = whitelisted_plugins[hook.plugin_name] + user_options = user_options.with_indifferent_access if user_options + block.call(context, user_options) + else + block.call(context) + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer.rb new file mode 100644 index 0000000..d4d7fdb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer.rb @@ -0,0 +1,1112 @@ +require 'active_support/core_ext/string/inflections' +require 'fileutils' +require 'cocoapods/podfile' + +module Pod + # The Installer is responsible of taking a Podfile and transform it in the + # Pods libraries. It also integrates the user project so the Pods + # libraries can be used out of the box. + # + # The Installer is capable of doing incremental updates to an existing Pod + # installation. + # + # The Installer gets the information that it needs mainly from 3 files: + # + # - Podfile: The specification written by the user that contains + # information about targets and Pods. + # - Podfile.lock: Contains information about the pods that were previously + # installed and in concert with the Podfile provides information about + # which specific version of a Pod should be installed. This file is + # ignored in update mode. + # - Manifest.lock: A file contained in the Pods folder that keeps track of + # the pods installed in the local machine. This files is used once the + # exact versions of the Pods has been computed to detect if that version + # is already installed. This file is not intended to be kept under source + # control and is a copy of the Podfile.lock. + # + # The Installer is designed to work in environments where the Podfile folder + # is under source control and environments where it is not. The rest of the + # files, like the user project and the workspace are assumed to be under + # source control. + # + class Installer + autoload :Analyzer, 'cocoapods/installer/analyzer' + autoload :InstallationOptions, 'cocoapods/installer/installation_options' + autoload :PostInstallHooksContext, 'cocoapods/installer/post_install_hooks_context' + autoload :PreInstallHooksContext, 'cocoapods/installer/pre_install_hooks_context' + autoload :BaseInstallHooksContext, 'cocoapods/installer/base_install_hooks_context' + autoload :PostIntegrateHooksContext, 'cocoapods/installer/post_integrate_hooks_context' + autoload :PreIntegrateHooksContext, 'cocoapods/installer/pre_integrate_hooks_context' + autoload :SourceProviderHooksContext, 'cocoapods/installer/source_provider_hooks_context' + autoload :PodfileValidator, 'cocoapods/installer/podfile_validator' + autoload :PodSourceDownloader, 'cocoapods/installer/pod_source_downloader' + autoload :PodSourceInstaller, 'cocoapods/installer/pod_source_installer' + autoload :PodSourcePreparer, 'cocoapods/installer/pod_source_preparer' + autoload :UserProjectIntegrator, 'cocoapods/installer/user_project_integrator' + autoload :Xcode, 'cocoapods/installer/xcode' + autoload :SandboxHeaderPathsInstaller, 'cocoapods/installer/sandbox_header_paths_installer' + autoload :SandboxDirCleaner, 'cocoapods/installer/sandbox_dir_cleaner' + autoload :ProjectCache, 'cocoapods/installer/project_cache/project_cache' + autoload :TargetUUIDGenerator, 'cocoapods/installer/target_uuid_generator' + + include Config::Mixin + + MASTER_SPECS_REPO_GIT_URL = 'https://github.com/CocoaPods/Specs.git'.freeze + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Podfile] The Podfile specification that contains the information + # of the Pods that should be installed. + # + attr_reader :podfile + + # @return [Lockfile] The Lockfile that stores the information about the + # Pods previously installed on any machine. + # + attr_reader :lockfile + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Lockfile] lockfile @see #lockfile + # + def initialize(sandbox, podfile, lockfile = nil) + @sandbox = sandbox || raise(ArgumentError, 'Missing required argument `sandbox`') + @podfile = podfile || raise(ArgumentError, 'Missing required argument `podfile`') + @lockfile = lockfile + + @use_default_plugins = true + @has_dependencies = true + @pod_installers = [] + end + + # @return [Hash, Boolean, nil] Pods that have been requested to be + # updated or true if all Pods should be updated. + # If all Pods should been updated the contents of the Lockfile are + # not taken into account for deciding what Pods to install. + # + attr_accessor :update + + # @return [Boolean] Whether it has dependencies. Defaults to true. + # + attr_accessor :has_dependencies + alias_method :has_dependencies?, :has_dependencies + + # @return [Boolean] Whether the spec repos should be updated. + # + attr_accessor :repo_update + alias_method :repo_update?, :repo_update + + # @return [Boolean] Whether default plugins should be used during + # installation. Defaults to true. + # + attr_accessor :use_default_plugins + alias_method :use_default_plugins?, :use_default_plugins + + # @return [Boolean] Whether installation should verify that there are no + # Podfile or Lockfile changes. Defaults to false. + # + attr_accessor :deployment + alias_method :deployment?, :deployment + + # @return [Boolean] Whether installation should ignore the contents of the project cache + # when incremental installation is enabled. + # + attr_accessor :clean_install + alias_method :clean_install?, :clean_install + + #-------------------------------------------------------------------------# + + private + + # @return [Array] the pod installers created + # while installing pod targets + # + attr_reader :pod_installers + + # @return [ProjectInstallationCache] The installation cache stored in Pods/.project_cache/installation_cache + # + attr_reader :installation_cache + + # @return [ProjectMetadataCache] The metadata cache stored in Pods/.project_cache/metadata_cache + # + attr_reader :metadata_cache + + # @return [ProjectCacheVersion] The version of the project cache stored in Pods/.project_cache/version + # + attr_reader :project_cache_version + + #-------------------------------------------------------------------------# + + public + + # Installs the Pods. + # + # The installation process is mostly linear with a few minor complications + # to keep in mind: + # + # - The stored podspecs need to be cleaned before the resolution step + # otherwise the sandbox might return an old podspec and not download + # the new one from an external source. + # - The resolver might trigger the download of Pods from external sources + # necessary to retrieve their podspec (unless it is instructed not to + # do it). + # + # @return [void] + # + def install! + prepare + resolve_dependencies + download_dependencies + validate_targets + clean_sandbox + if installation_options.skip_pods_project_generation? + show_skip_pods_project_generation_message + run_podfile_post_install_hooks + else + integrate + end + write_lockfiles + perform_post_install_actions + end + + def show_skip_pods_project_generation_message + UI.section 'Skipping Pods Project Creation' + UI.section 'Skipping User Project Integration' + end + + def integrate + run_podfile_pre_integrate_hooks + generate_pods_project + if installation_options.integrate_targets? + integrate_user_project + else + UI.section 'Skipping User Project Integration' + end + end + + def analyze_project_cache + user_projects = aggregate_targets.map(&:user_project).compact.uniq + object_version = user_projects.min_by { |p| p.object_version.to_i }.object_version.to_i unless user_projects.empty? + + if !installation_options.incremental_installation + # Run entire installation. + ProjectCache::ProjectCacheAnalysisResult.new(pod_targets, aggregate_targets, {}, + analysis_result.all_user_build_configurations, object_version) + else + UI.message 'Analyzing Project Cache' do + @installation_cache = ProjectCache::ProjectInstallationCache.from_file(sandbox, sandbox.project_installation_cache_path) + @metadata_cache = ProjectCache::ProjectMetadataCache.from_file(sandbox, sandbox.project_metadata_cache_path) + @project_cache_version = ProjectCache::ProjectCacheVersion.from_file(sandbox.project_version_cache_path) + + force_clean_install = clean_install || project_cache_version.version != Version.create(VersionMetadata.project_cache_version) + cache_result = ProjectCache::ProjectCacheAnalyzer.new(sandbox, installation_cache, analysis_result.all_user_build_configurations, + object_version, plugins, pod_targets, aggregate_targets, installation_options.to_h, :clean_install => force_clean_install).analyze + aggregate_targets_to_generate = cache_result.aggregate_targets_to_generate || [] + pod_targets_to_generate = cache_result.pod_targets_to_generate + (aggregate_targets_to_generate + pod_targets_to_generate).each do |target| + UI.message "- Regenerating #{target.label}" + end + cache_result + end + end + end + + def prepare + # Raise if pwd is inside Pods + if Dir.pwd.start_with?(sandbox.root.to_path) + message = 'Command should be run from a directory outside Pods directory.' + message << "\n\n\tCurrent directory is #{UI.path(Pathname.pwd)}\n" + raise Informative, message + end + UI.message 'Preparing' do + deintegrate_if_different_major_version + sandbox.prepare + ensure_plugins_are_installed! + run_plugins_pre_install_hooks + end + end + + # @return [Analyzer] The analyzer used to resolve dependencies + # + def resolve_dependencies + plugin_sources = run_source_provider_hooks + analyzer = create_analyzer(plugin_sources) + + UI.section 'Updating local specs repositories' do + analyzer.update_repositories + end if repo_update? + + UI.section 'Analyzing dependencies' do + analyze(analyzer) + validate_build_configurations + end + + UI.section 'Verifying no changes' do + verify_no_podfile_changes! + verify_no_lockfile_changes! + end if deployment? + + analyzer + end + + def download_dependencies + UI.section 'Downloading dependencies' do + install_pod_sources + run_podfile_pre_install_hooks + clean_pod_sources + end + end + + # Stages the sandbox after analysis. + # + # @param [Sandbox] sandbox + # The sandbox to stage. + # + # @param [Array] pod_targets + # The list of all pod targets. + # + # @return [void] + # + def stage_sandbox(sandbox, pod_targets) + SandboxHeaderPathsInstaller.new(sandbox, pod_targets).install! + end + + #-------------------------------------------------------------------------# + + # @!group Pods Project Generation + + private + + def create_generator(pod_targets_to_generate, aggregate_targets_to_generate, build_configurations, project_object_version, generate_multiple_pod_projects = false) + if generate_multiple_pod_projects + Xcode::MultiPodsProjectGenerator.new(sandbox, aggregate_targets_to_generate, pod_targets_to_generate, + build_configurations, installation_options, config, project_object_version, metadata_cache) + else + Xcode::SinglePodsProjectGenerator.new(sandbox, aggregate_targets_to_generate, pod_targets_to_generate, build_configurations, installation_options, config, project_object_version) + end + end + + # Generates the Xcode project(s) that go inside the `Pods/` directory. + # + def generate_pods_project + stage_sandbox(sandbox, pod_targets) + + cache_analysis_result = analyze_project_cache + pod_targets_to_generate = cache_analysis_result.pod_targets_to_generate + aggregate_targets_to_generate = cache_analysis_result.aggregate_targets_to_generate + + pod_targets_to_generate.each do |pod_target| + pod_target.build_headers.implode_path!(pod_target.headers_sandbox) + sandbox.public_headers.implode_path!(pod_target.headers_sandbox) + end + + create_and_save_projects(pod_targets_to_generate, aggregate_targets_to_generate, + cache_analysis_result.build_configurations, cache_analysis_result.project_object_version) + SandboxDirCleaner.new(sandbox, pod_targets, aggregate_targets).clean! + + update_project_cache(cache_analysis_result, target_installation_results) + end + + def create_and_save_projects(pod_targets_to_generate, aggregate_targets_to_generate, build_configurations, project_object_version) + UI.section 'Generating Pods project' do + generator = create_generator(pod_targets_to_generate, aggregate_targets_to_generate, + build_configurations, project_object_version, + installation_options.generate_multiple_pod_projects) + + pod_project_generation_result = generator.generate! + @target_installation_results = pod_project_generation_result.target_installation_results + @pods_project = pod_project_generation_result.project + # The `pod_target_subprojects` is used for backwards compatibility so that consumers can iterate over + # all pod targets across projects without needing to open each one. + @pod_target_subprojects = pod_project_generation_result.projects_by_pod_targets.keys + @generated_projects = ([pods_project] + pod_target_subprojects || []).compact + @generated_pod_targets = pod_targets_to_generate + @generated_aggregate_targets = aggregate_targets_to_generate || [] + projects_by_pod_targets = pod_project_generation_result.projects_by_pod_targets + + predictabilize_uuids(generated_projects) if installation_options.deterministic_uuids? + stabilize_target_uuids(generated_projects) + + projects_writer = Xcode::PodsProjectWriter.new(sandbox, generated_projects, + target_installation_results.pod_target_installation_results, installation_options) + projects_writer.write! do + run_podfile_post_install_hooks + end + + pods_project_pod_targets = pod_targets_to_generate - projects_by_pod_targets.values.flatten + all_projects_by_pod_targets = {} + pods_project_by_targets = { pods_project => pods_project_pod_targets } if pods_project + all_projects_by_pod_targets.merge!(pods_project_by_targets) if pods_project_by_targets + all_projects_by_pod_targets.merge!(projects_by_pod_targets) if projects_by_pod_targets + all_projects_by_pod_targets.each do |project, pod_targets| + generator.configure_schemes(project, pod_targets, pod_project_generation_result) + end + end + end + + def predictabilize_uuids(projects) + UI.message('- Generating deterministic UUIDs') { Xcodeproj::Project.predictabilize_uuids(projects) } + end + + def stabilize_target_uuids(projects) + UI.message('- Stabilizing target UUIDs') { TargetUUIDGenerator.new(projects).generate! } + end + + #-------------------------------------------------------------------------# + + public + + # @!group Installation results + + # @return [Analyzer::AnalysisResult] the result of the analysis performed during installation + # + attr_reader :analysis_result + + # @return [Array] the installation results produced by the pods project + # generator + # + attr_reader :target_installation_results + + # @return [Pod::Project] the `Pods/Pods.xcodeproj` project. + # + attr_reader :pods_project + + # @return [Array] the subprojects nested under pods_project. + # + attr_reader :pod_target_subprojects + + # @return [Array] The model representations of an + # aggregation of pod targets generated for a target definition + # in the Podfile as result of the analyzer. + # + attr_reader :aggregate_targets + + # @return [Array] The model representations of pod targets + # generated as result of the analyzer. + # + attr_reader :pod_targets + + # @return [Array] The list of projects generated from the installation. + # + attr_reader :generated_projects + + # @return [Array] The list of pod targets that were generated from the installation. + # + attr_reader :generated_pod_targets + + # @return [Array] The list of aggregate targets that were generated from the installation. + # + attr_reader :generated_aggregate_targets + + # @return [Array] The specifications that were installed. + # + attr_accessor :installed_specs + + #-------------------------------------------------------------------------# + + private + + # @!group Installation steps + + # Performs the analysis. + # + # @param [Analyzer] analyzer the analyzer to use for analysis + # + # @return [void] + # + def analyze(analyzer = create_analyzer) + @analysis_result = analyzer.analyze + @aggregate_targets = @analysis_result.targets + @pod_targets = @analysis_result.pod_targets + end + + def create_analyzer(plugin_sources = nil) + Analyzer.new(sandbox, podfile, lockfile, plugin_sources, has_dependencies?, update) + end + + # Ensures that the white-listed build configurations are known to prevent + # silent typos. + # + # @raise If an unknown user configuration is found. + # + def validate_build_configurations + whitelisted_configs = pod_targets. + flat_map(&:target_definitions). + flat_map(&:all_whitelisted_configurations). + map(&:downcase). + uniq + all_user_configurations = analysis_result.all_user_build_configurations.keys.map(&:downcase) + + remainder = whitelisted_configs - all_user_configurations + unless remainder.empty? + raise Informative, + "Unknown #{'configuration'.pluralize(remainder.size)} whitelisted: #{remainder.sort.to_sentence}. " \ + "CocoaPods found #{all_user_configurations.sort.to_sentence}, did you mean one of these?" + end + end + + # @return [void] Performs a general clean up of the sandbox related to the sandbox state that was + # calculated. For example, pods that were marked for deletion are removed. + # + def clean_sandbox + unless sandbox_state.deleted.empty? + title_options = { :verbose_prefix => '-> '.red } + sandbox_state.deleted.each do |pod_name| + UI.titled_section("Removing #{pod_name}".red, title_options) do + root_name = Specification.root_name(pod_name) + pod_dir = sandbox.local?(root_name) ? nil : sandbox.pod_dir(root_name) + sandbox.clean_pod(pod_name, pod_dir) + end + end + end + + # Check any changed pods that became local pods and used to be remote pods and + # ensure the sandbox is cleaned up. + unless sandbox_state.changed.empty? + sandbox_state.changed.each do |pod_name| + previous_spec_repo = sandbox.manifest.spec_repo(pod_name) + should_clean = !previous_spec_repo.nil? && sandbox.local?(pod_name) + sandbox.clean_pod(pod_name, sandbox.sources_root + Specification.root_name(pod_name)) if should_clean + end + end + end + + # @raise [Informative] If there are any Podfile changes + # + def verify_no_podfile_changes! + return unless analysis_result.podfile_needs_install? + + changed_state = analysis_result.podfile_state.to_s(:states => %i(added deleted changed)) + raise Informative, "There were changes to the podfile in deployment mode:\n#{changed_state}" + end + + # @raise [Informative] If there are any Lockfile changes + # + def verify_no_lockfile_changes! + new_lockfile = generate_lockfile + return if new_lockfile == lockfile + + return unless diff = Xcodeproj::Differ.hash_diff(lockfile.to_hash, new_lockfile.to_hash, :key_1 => 'Old Lockfile', :key_2 => 'New Lockfile') + pretty_diff = YAMLHelper.convert_hash(diff, Lockfile::HASH_KEY_ORDER, "\n\n") + pretty_diff.gsub!(':diff:', 'diff:'.yellow) + + raise Informative, "There were changes to the lockfile in deployment mode:\n#{pretty_diff}" + end + + # Downloads, installs the documentation and cleans the sources of the Pods + # which need to be installed. + # + # @return [void] + # + def install_pod_sources + @downloaded_specs = [] + @installed_specs = [] + pods_to_install = sandbox_state.added | sandbox_state.changed + title_options = { :verbose_prefix => '-> '.green } + + sorted_root_specs = root_specs.sort_by(&:name) + + # Download pods in parallel before installing if the option is set + if installation_options.parallel_pod_downloads + require 'concurrent/executor/fixed_thread_pool' + thread_pool_size = installation_options.parallel_pod_download_thread_pool_size + thread_pool = Concurrent::FixedThreadPool.new(thread_pool_size, :idletime => 300) + + sorted_root_specs.each do |spec| + if pods_to_install.include?(spec.name) + title = section_title(spec, 'Downloading') + UI.titled_section(title.green, title_options) do + thread_pool.post do + download_source_of_pod(spec.name) + end + end + end + end + + thread_pool.shutdown + thread_pool.wait_for_termination + end + + # Install pods, which includes downloading only if parallel_pod_downloads is set to false + sorted_root_specs.each do |spec| + if pods_to_install.include?(spec.name) + title = section_title(spec, 'Installing') + UI.titled_section(title.green, title_options) do + install_source_of_pod(spec.name) + end + else + UI.section("Using #{spec}", title_options[:verbose_prefix]) do + create_pod_installer(spec.name) + end + end + end + end + + def section_title(spec, current_action) + if sandbox_state.changed.include?(spec.name) && sandbox.manifest + current_version = spec.version + previous_version = sandbox.manifest.version(spec.name) + has_changed_version = current_version != previous_version + current_repo = analysis_result.specs_by_source.detect { |key, values| break key if values.map(&:name).include?(spec.name) } + current_repo &&= (Pod::TrunkSource::TRUNK_REPO_NAME if current_repo.name == Pod::TrunkSource::TRUNK_REPO_NAME) || current_repo.url || current_repo.name + previous_spec_repo = sandbox.manifest.spec_repo(spec.name) + has_changed_repo = !previous_spec_repo.nil? && current_repo && !current_repo.casecmp(previous_spec_repo).zero? + title = "#{current_action} #{spec.name} #{spec.version}" + title << " (was #{previous_version} and source changed to `#{current_repo}` from `#{previous_spec_repo}`)" if has_changed_version && has_changed_repo + title << " (was #{previous_version})" if has_changed_version && !has_changed_repo + title << " (source changed to `#{current_repo}` from `#{previous_spec_repo}`)" if !has_changed_version && has_changed_repo + else + title = "#{current_action} #{spec}" + end + title + end + + def create_pod_installer(pod_name) + specs_by_platform = specs_for_pod(pod_name) + + if specs_by_platform.empty? + requiring_targets = pod_targets.select { |pt| pt.recursive_dependent_targets.any? { |dt| dt.pod_name == pod_name } } + message = "Could not install '#{pod_name}' pod" + message += ", depended upon by #{requiring_targets.to_sentence}" unless requiring_targets.empty? + message += '. There is either no platform to build for, or no target to build.' + raise StandardError, message + end + + pod_installer = PodSourceInstaller.new(sandbox, podfile, specs_by_platform, :can_cache => installation_options.clean?) + pod_installers << pod_installer + pod_installer + end + + def create_pod_downloader(pod_name) + specs_by_platform = specs_for_pod(pod_name) + + if specs_by_platform.empty? + requiring_targets = pod_targets.select { |pt| pt.recursive_dependent_targets.any? { |dt| dt.pod_name == pod_name } } + message = "Could not download '#{pod_name}' pod" + message += ", depended upon by #{requiring_targets.to_sentence}" unless requiring_targets.empty? + message += '. There is either no platform to build for, or no target to build.' + raise StandardError, message + end + + PodSourceDownloader.new(sandbox, podfile, specs_by_platform, :can_cache => installation_options.clean?) + end + + # The specifications matching the specified pod name + # + # @param [String] pod_name the name of the pod + # + # @return [Hash{Platform => Array}] the specifications grouped by platform + # + def specs_for_pod(pod_name) + pod_targets.each_with_object({}) do |pod_target, hash| + if pod_target.root_spec.name == pod_name + hash[pod_target.platform] ||= [] + hash[pod_target.platform].concat(pod_target.specs) + end + end + end + + # Install the Pods. If the resolver indicated that a Pod should be + # installed and it exits, it is removed and then reinstalled. In any case if + # the Pod doesn't exits it is installed. + # + # @return [void] + # + def install_source_of_pod(pod_name) + pod_installer = create_pod_installer(pod_name) + pod_installer.install! + @installed_specs.concat(pod_installer.specs_by_platform.values.flatten.uniq) + end + + # Download the pod unless it is local or has been predownloaded from an + # external source. + # + # @return [void] + # + def download_source_of_pod(pod_name) + return if sandbox.local?(pod_name) || sandbox.predownloaded?(pod_name) + + pod_downloader = create_pod_downloader(pod_name) + pod_downloader.download! + end + + # Cleans the sources of the Pods if the config instructs to do so. + # + def clean_pod_sources + return unless installation_options.clean? + return if installed_specs.empty? + pod_installers.each(&:clean!) + end + + # Unlocks the sources of the Pods. + # + def unlock_pod_sources + pod_installers.each do |installer| + pod_target = pod_targets.find { |target| target.pod_name == installer.name } + installer.unlock_files!(pod_target.file_accessors) + end + end + + # Locks the sources of the Pods if the config instructs to do so. + # + def lock_pod_sources + return unless installation_options.lock_pod_sources? + pod_installers.each do |installer| + pod_target = pod_targets.find { |target| target.pod_name == installer.name } + installer.lock_files!(pod_target.file_accessors) + end + end + + def validate_targets + validator = Xcode::TargetValidator.new(aggregate_targets, pod_targets, installation_options) + validator.validate! + end + + # Runs the registered callbacks for the plugins pre install hooks. + # + # @return [void] + # + def run_plugins_pre_install_hooks + context = PreInstallHooksContext.generate(sandbox, podfile, lockfile) + HooksManager.run(:pre_install, context, plugins) + end + + # Performs any post-installation actions + # + # @return [void] + # + def perform_post_install_actions + run_plugins_post_install_hooks + warn_for_deprecations + warn_for_installed_script_phases + warn_for_removing_git_master_specs_repo + print_post_install_message + end + + def print_post_install_message + podfile_dependencies = analysis_result.podfile_dependency_cache.podfile_dependencies.size + pods_installed = root_specs.size + title_options = { :verbose_prefix => '-> '.green } + UI.titled_section('Pod installation complete! ' \ + "There #{podfile_dependencies == 1 ? 'is' : 'are'} #{podfile_dependencies} " \ + "#{'dependency'.pluralize(podfile_dependencies)} from the Podfile " \ + "and #{pods_installed} total #{'pod'.pluralize(pods_installed)} installed.".green, + title_options) + end + + # Runs the registered callbacks for the plugins pre integrate hooks. + # + def run_plugins_pre_integrate_hooks + if any_plugin_pre_integrate_hooks? + context = PreIntegrateHooksContext.generate(sandbox, pods_project, pod_target_subprojects, aggregate_targets) + HooksManager.run(:pre_integrate, context, plugins) + end + end + + # Runs the registered callbacks for the plugins post install hooks. + # + def run_plugins_post_install_hooks + # This short-circuits because unlocking pod sources is expensive + if any_plugin_post_install_hooks? + unlock_pod_sources + + context = PostInstallHooksContext.generate(sandbox, pods_project, pod_target_subprojects, aggregate_targets) + HooksManager.run(:post_install, context, plugins) + end + + lock_pod_sources + end + + # Runs the registered callbacks for the plugins post integrate hooks. + # + def run_plugins_post_integrate_hooks + if any_plugin_post_integrate_hooks? + context = PostIntegrateHooksContext.generate(sandbox, pods_project, pod_target_subprojects, aggregate_targets) + HooksManager.run(:post_integrate, context, plugins) + end + end + + # @return [Boolean] whether there are any plugin pre-integrate hooks to run + # + def any_plugin_pre_integrate_hooks? + HooksManager.hooks_to_run(:pre_integrate, plugins).any? + end + + # @return [Boolean] whether there are any plugin post-install hooks to run + # + def any_plugin_post_install_hooks? + HooksManager.hooks_to_run(:post_install, plugins).any? + end + + # @return [Boolean] whether there are any plugin post-integrate hooks to run + # + def any_plugin_post_integrate_hooks? + HooksManager.hooks_to_run(:post_integrate, plugins).any? + end + + # Runs the registered callbacks for the source provider plugin hooks. + # + # @return [Array] the plugin sources + # + def run_source_provider_hooks + context = SourceProviderHooksContext.generate + HooksManager.run(:source_provider, context, plugins) + context.sources + end + + # Run the deintegrator against all projects in the installation root if the + # current CocoaPods major version part is different than the one in the + # lockfile. + # + # @return [void] + # + def deintegrate_if_different_major_version + return unless lockfile + return if lockfile.cocoapods_version.major == Version.create(VERSION).major + UI.section('Re-creating CocoaPods due to major version update.') do + projects = Pathname.glob(config.installation_root + '*.xcodeproj').map { |path| Xcodeproj::Project.open(path) } + deintegrator = Deintegrator.new + projects.each do |project| + config.with_changes(:silent => true) { deintegrator.deintegrate_project(project) } + project.save if project.dirty? + end + end + end + + # Ensures that all plugins specified in the {#podfile} are loaded. + # + # @return [void] + # + def ensure_plugins_are_installed! + require 'claide/command/plugin_manager' + + loaded_plugins = Command::PluginManager.specifications.map(&:name) + + podfile.plugins.keys.each do |plugin| + unless loaded_plugins.include? plugin + raise Informative, "Your Podfile requires that the plugin `#{plugin}` be installed. Please install it and try installation again." + end + end + end + + DEFAULT_PLUGINS = {} + + # Returns the plugins that should be run, as indicated by the default + # plugins and the podfile's plugins + # + # @return [Hash] The plugins to be used + # + def plugins + if use_default_plugins? + DEFAULT_PLUGINS.merge(podfile.plugins) + else + podfile.plugins + end + end + + # Prints a warning for any pods that are deprecated + # + # @return [void] + # + def warn_for_deprecations + deprecated_pods = root_specs.select do |spec| + spec.deprecated || spec.deprecated_in_favor_of + end + deprecated_pods.each do |spec| + if spec.deprecated_in_favor_of + UI.warn "#{spec.name} has been deprecated in " \ + "favor of #{spec.deprecated_in_favor_of}" + else + UI.warn "#{spec.name} has been deprecated" + end + end + end + + # Prints a warning for any pods that included script phases + # + # @return [void] + # + def warn_for_installed_script_phases + pods_to_install = sandbox_state.added | sandbox_state.changed + pod_targets.group_by(&:pod_name).each do |name, pod_targets| + if pods_to_install.include?(name) && !sandbox.local?(name) + script_phase_count = pod_targets.inject(0) { |sum, target| sum + target.script_phases.count } + unless script_phase_count.zero? + UI.warn "#{name} has added #{script_phase_count} #{'script phase'.pluralize(script_phase_count)}. " \ + 'Please inspect before executing a build. See `https://guides.cocoapods.org/syntax/podspec.html#script_phases` for more information.' + end + end + end + end + + # Prints a warning if the project is not explicitly using the git based master specs repo. + # + # Helps users to delete the git based master specs repo from the repos directory which reduces `--repo-update` + # speed and hopefully reduces Github workload. + # + # @return [void] + # + def warn_for_removing_git_master_specs_repo + return unless installation_options.warn_for_unused_master_specs_repo? + plugin_sources = run_source_provider_hooks + all_sources = podfile.sources + plugin_sources.map(&:url) + master_source = all_sources.find { |source| source == MASTER_SPECS_REPO_GIT_URL } + master_repo = config.sources_manager.all.find { |s| s.url == MASTER_SPECS_REPO_GIT_URL } + if master_source.nil? && !master_repo.nil? + UI.warn 'Your project does not explicitly specify the CocoaPods master specs repo. Since CDN is now used as the' \ + ' default, you may safely remove it from your repos directory via `pod repo remove master`. To suppress this warning' \ + ' please add `warn_for_unused_master_specs_repo => false` to your Podfile.' + end + end + + # @return [Lockfile] The lockfile to write to disk. + # + def generate_lockfile + external_source_pods = analysis_result.podfile_dependency_cache.podfile_dependencies.select(&:external_source).map(&:root_name).uniq + checkout_options = sandbox.checkout_sources.select { |root_name, _| external_source_pods.include? root_name } + Lockfile.generate(podfile, analysis_result.specifications, checkout_options, analysis_result.specs_by_source) + end + + # Writes the Podfile and the lock files. + # + # @return [void] + # + def write_lockfiles + @lockfile = generate_lockfile + + UI.message "- Writing Lockfile in #{UI.path config.lockfile_path}" do + # No need to invoke Sandbox#update_changed_file here since this logic already handles checking if the + # contents of the file are the same. + @lockfile.write_to_disk(config.lockfile_path) + end + + UI.message "- Writing Manifest in #{UI.path sandbox.manifest_path}" do + # No need to invoke Sandbox#update_changed_file here since this logic already handles checking if the + # contents of the file are the same. + @lockfile.write_to_disk(sandbox.manifest_path) + end + end + + # @param [ProjectCacheAnalysisResult] cache_analysis_result + # The cache analysis result for the current installation. + # + # @param [Hash{String => TargetInstallationResult}] target_installation_results + # The installation results for pod targets installed. + # + def update_project_cache(cache_analysis_result, target_installation_results) + return unless installation_cache || metadata_cache + installation_cache.update_cache_key_by_target_label!(cache_analysis_result.cache_key_by_target_label) + installation_cache.update_project_object_version!(cache_analysis_result.project_object_version) + installation_cache.update_build_configurations!(cache_analysis_result.build_configurations) + installation_cache.update_podfile_plugins!(plugins) + installation_cache.update_installation_options!(installation_options.to_h) + installation_cache.save_as(sandbox.project_installation_cache_path) + + metadata_cache.update_metadata!(target_installation_results.pod_target_installation_results || {}, + target_installation_results.aggregate_target_installation_results || {}) + metadata_cache.save_as(sandbox.project_metadata_cache_path) + + cache_version = ProjectCache::ProjectCacheVersion.new(VersionMetadata.project_cache_version) + cache_version.save_as(sandbox.project_version_cache_path) + end + + # Integrates the user projects adding the dependencies on the CocoaPods + # libraries, setting them up to use the xcconfigs and performing other + # actions. This step is also responsible of creating the workspace if + # needed. + # + # @return [void] + # + def integrate_user_project + UI.section "Integrating client #{'project'.pluralize(aggregate_targets.map(&:user_project_path).uniq.count)}" do + installation_root = config.installation_root + integrator = UserProjectIntegrator.new(podfile, sandbox, installation_root, aggregate_targets, generated_aggregate_targets, + :use_input_output_paths => !installation_options.disable_input_output_paths?) + integrator.integrate! + run_podfile_post_integrate_hooks + end + end + + #-------------------------------------------------------------------------# + + private + + # @!group Hooks + + # Runs the pre install hooks of the installed specs and of the Podfile. + # + # @return [void] + # + def run_podfile_pre_install_hooks + UI.message '- Running pre install hooks' do + executed = run_podfile_pre_install_hook + UI.message '- Podfile' if executed + end + end + + # Runs the pre install hook of the Podfile + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_pre_install_hook + podfile.pre_install!(self) + rescue => e + raise Informative, 'An error occurred while processing the pre-install ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + + # Runs the pre integrate hooks of the installed specs and of the Podfile. + # + # @note Pre integrate hooks run _before_ generation of the Pods project. + # + # @return [void] + # + def run_podfile_pre_integrate_hooks + UI.message '- Running pre integrate hooks' do + executed = run_podfile_pre_integrate_hook + UI.message '- Podfile' if executed + end + end + + # Runs the pre integrate hook of the Podfile. + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_pre_integrate_hook + podfile.pre_integrate!(self) + rescue => e + raise Informative, 'An error occurred while processing the pre-integrate ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + + # Runs the post install hooks of the installed specs and of the Podfile. + # + # @note Post install hooks run _before_ saving of project, so that they + # can alter it before it is written to the disk. + # + # @return [void] + # + def run_podfile_post_install_hooks + UI.message '- Running post install hooks' do + executed = run_podfile_post_install_hook + UI.message '- Podfile' if executed + end + end + + # Runs the post install hook of the Podfile + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_post_install_hook + podfile.post_install!(self) + rescue => e + raise Informative, 'An error occurred while processing the post-install ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + + # Runs the post integrate hooks of the installed specs and of the Podfile. + # + # @note Post integrate hooks run _after_ saving of project, so that they + # can alter it after it is written to the disk. + # + # @return [void] + # + def run_podfile_post_integrate_hooks + UI.message '- Running post integrate hooks' do + executed = run_podfile_post_integrate_hook + UI.message '- Podfile' if executed + end + end + + # Runs the post integrate hook of the Podfile. + # + # @raise Raises an informative if the hooks raises. + # + # @return [Boolean] Whether the hook was run. + # + def run_podfile_post_integrate_hook + podfile.post_integrate!(self) + rescue => e + raise Informative, 'An error occurred while processing the post-integrate ' \ + 'hook of the Podfile.' \ + "\n\n#{e.message}\n\n#{e.backtrace * "\n"}" + end + #-------------------------------------------------------------------------# + + public + + # @param [Array] targets + # + # @return [Array] The targets of the development pods generated by + # the installation process. This can be used as a convenience method for external scripts. + # + def development_pod_targets(targets = pod_targets) + targets.select do |pod_target| + sandbox.local?(pod_target.pod_name) + end + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Array] All the root specifications of the + # installation. + # + def root_specs + analysis_result.specifications.map(&:root).uniq + end + + # @return [SpecsState] The state of the sandbox returned by the analyzer. + # + def sandbox_state + analysis_result.sandbox_state + end + + # @return [InstallationOptions] the installation options to use during install + # + def installation_options + podfile.installation_options + end + + #-------------------------------------------------------------------------# + + public + + # @!group Convenience Methods + + def self.targets_from_sandbox(sandbox, podfile, lockfile) + raise Informative, 'You must run `pod install` to be able to generate target information' unless lockfile + + new(sandbox, podfile, lockfile).instance_exec do + plugin_sources = run_source_provider_hooks + analyzer = create_analyzer(plugin_sources) + analyze(analyzer) + if analysis_result.podfile_needs_install? + raise Pod::Informative, 'The Podfile has changed, you must run `pod install`' + elsif analysis_result.sandbox_needs_install? + raise Pod::Informative, 'The `Pods` directory is out-of-date, you must run `pod install`' + end + + aggregate_targets + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer.rb new file mode 100644 index 0000000..d13c190 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer.rb @@ -0,0 +1,1208 @@ +require 'cocoapods/podfile' + +module Pod + class Installer + # Analyzes the Podfile, the Lockfile, and the sandbox manifest to generate + # the information relative to a CocoaPods installation. + # + class Analyzer + include Config::Mixin + + autoload :AnalysisResult, 'cocoapods/installer/analyzer/analysis_result' + autoload :LockingDependencyAnalyzer, 'cocoapods/installer/analyzer/locking_dependency_analyzer' + autoload :PodfileDependencyCache, 'cocoapods/installer/analyzer/podfile_dependency_cache' + autoload :PodVariant, 'cocoapods/installer/analyzer/pod_variant' + autoload :PodVariantSet, 'cocoapods/installer/analyzer/pod_variant_set' + autoload :SandboxAnalyzer, 'cocoapods/installer/analyzer/sandbox_analyzer' + autoload :SpecsState, 'cocoapods/installer/analyzer/specs_state' + autoload :TargetInspectionResult, 'cocoapods/installer/analyzer/target_inspection_result' + autoload :TargetInspector, 'cocoapods/installer/analyzer/target_inspector' + + # @return [String] The version of iOS which requires binaries with only 64-bit architectures + # + IOS_64_BIT_ONLY_VERSION = Version.new('11.0') + + # @return [Integer] The Xcode object version until which 64-bit architectures should be manually specified + # + # Xcode 10 will automatically select the correct architectures based on deployment target + IOS_64_BIT_ONLY_PROJECT_VERSION = 50 + + # @return [Sandbox] The sandbox to use for this analysis. + # + attr_reader :sandbox + + # @return [Podfile] The Podfile specification that contains the information of the Pods that should be installed. + # + attr_reader :podfile + + # @return [Lockfile, nil] The Lockfile, if available, that stores the information about the Pods previously installed. + # + attr_reader :lockfile + + # @return [Array] Sources provided by plugins or `nil`. + # + attr_reader :plugin_sources + + # @return [Boolean] Whether the analysis has dependencies and thus sources must be configured. + # + # @note This is used by the `pod lib lint` command to prevent update of specs when not needed. + # + attr_reader :has_dependencies + alias_method :has_dependencies?, :has_dependencies + + # @return [Hash, Boolean, nil] Pods that have been requested to be updated or true if all Pods should be updated. + # This can be false if no pods should be updated. + # + attr_reader :pods_to_update + + # @return [InstallationOptions] the installation options specified by the Podfile + # + attr_reader :installation_options + + # @return [Source::Manager] the sources manager to use when resolving dependencies + # + attr_reader :sources_manager + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Lockfile, nil] lockfile @see #lockfile + # @param [Array] plugin_sources @see #plugin_sources + # @param [Boolean] has_dependencies @see #has_dependencies + # @param [Hash, Boolean, nil] pods_to_update @see #pods_to_update + # @param [Source::Manager] sources_manager @see #sources_manager + # + def initialize(sandbox, podfile, lockfile = nil, plugin_sources = nil, has_dependencies = true, + pods_to_update = false, sources_manager = Source::Manager.new(config.repos_dir)) + @sandbox = sandbox + @podfile = podfile + @lockfile = lockfile + @plugin_sources = plugin_sources + @has_dependencies = has_dependencies + @pods_to_update = pods_to_update + @installation_options = podfile.installation_options + @podfile_dependency_cache = PodfileDependencyCache.from_podfile(podfile) + @sources_manager = sources_manager + @path_lists = {} + @result = nil + end + + # Performs the analysis. + # + # The Podfile and the Lockfile provide the information necessary to + # compute which specification should be installed. The manifest of the + # sandbox returns which specifications are installed. + # + # @param [Boolean] allow_fetches + # whether external sources may be fetched + # + # @return [AnalysisResult] + # + def analyze(allow_fetches = true) + return @result if @result + validate_podfile! + validate_lockfile_version! + if installation_options.integrate_targets? + target_inspections = inspect_targets_to_integrate + else + verify_platforms_specified! + target_inspections = {} + end + podfile_state = generate_podfile_state + + store_existing_checkout_options + if allow_fetches == :outdated + # special-cased -- we're only really resolving for outdated, rather than doing a full analysis + elsif allow_fetches == true + fetch_external_sources(podfile_state) + elsif !dependencies_to_fetch(podfile_state).all?(&:local?) + raise Informative, 'Cannot analyze without fetching dependencies since the sandbox is not up-to-date. Run `pod install` to ensure all dependencies have been fetched.' \ + "\n The missing dependencies are:\n \t#{dependencies_to_fetch(podfile_state).reject(&:local?).join("\n \t")}" + end + + locked_dependencies = generate_version_locking_dependencies(podfile_state) + resolver_specs_by_target = resolve_dependencies(locked_dependencies) + validate_platforms(resolver_specs_by_target) + specifications = generate_specifications(resolver_specs_by_target) + aggregate_targets, pod_targets = generate_targets(resolver_specs_by_target, target_inspections) + sandbox_state = generate_sandbox_state(specifications) + specs_by_target = resolver_specs_by_target.each_with_object({}) do |rspecs_by_target, hash| + hash[rspecs_by_target[0]] = rspecs_by_target[1].map(&:spec) + end + specs_by_source = Hash[resolver_specs_by_target.values.flatten(1).group_by(&:source).map do |source, specs| + [source, specs.map(&:spec).uniq] + end] + sources.each { |s| specs_by_source[s] ||= [] } + @result = AnalysisResult.new(podfile_state, specs_by_target, specs_by_source, specifications, sandbox_state, + aggregate_targets, pod_targets, @podfile_dependency_cache) + end + + # Updates the git source repositories. + # + def update_repositories + sources.each do |source| + if source.updateable? + sources_manager.update(source.name, true) + else + UI.message "Skipping `#{source.name}` update because the repository is not an updateable repository." + end + end + @specs_updated = true + end + + # Returns the sources used to query for specifications. + # + # When no explicit Podfile sources or plugin sources are defined, this defaults to the master spec repository. + # + # @return [Array] the sources to be used in finding specifications, as specified by the podfile or all + # sources. + # + def sources + @sources ||= begin + sources = podfile.sources + plugin_sources = @plugin_sources || [] + + # Add any sources specified using the :source flag on individual dependencies. + dependency_sources = podfile_dependencies.map(&:podspec_repo).compact + all_dependencies_have_sources = dependency_sources.count == podfile_dependencies.count + + if all_dependencies_have_sources + sources = dependency_sources + elsif has_dependencies? && sources.empty? && plugin_sources.empty? + sources = [Pod::TrunkSource::TRUNK_REPO_URL] + dependency_sources + else + sources += dependency_sources + end + + result = sources.uniq.map do |source_url| + sources_manager.find_or_create_source_with_url(source_url) + end + unless plugin_sources.empty? + result.insert(0, *plugin_sources) + plugin_sources.each do |source| + sources_manager.add_source(source) + end + end + result + end + end + + #-----------------------------------------------------------------------# + + private + + # @!group Configuration + + # @return [Boolean] Whether the version of the dependencies which did not + # change in the Podfile should be locked. + # + def update_mode? + pods_to_update != nil + end + + # @return [Symbol] Whether and how the dependencies in the Podfile + # should be updated. + # + def update_mode + if !pods_to_update + :none + elsif pods_to_update == true + :all + elsif !pods_to_update[:pods].nil? + :selected + end + end + + def podfile_dependencies + @podfile_dependency_cache.podfile_dependencies + end + + #-----------------------------------------------------------------------# + + def validate_podfile! + validator = Installer::PodfileValidator.new(podfile, @podfile_dependency_cache) + validator.validate + + unless validator.valid? + raise Informative, validator.message + end + validator.warnings.uniq.each { |w| UI.warn(w) } + end + + # @!group Analysis steps + + # @note The warning about the version of the Lockfile doesn't use the + # `UI.warn` method because it prints the output only at the end + # of the installation. At that time CocoaPods could have crashed. + # + def validate_lockfile_version! + if lockfile && lockfile.cocoapods_version > Version.new(VERSION) + STDERR.puts '[!] The version of CocoaPods used to generate ' \ + "the lockfile (#{lockfile.cocoapods_version}) is "\ + "higher than the version of the current executable (#{VERSION}). " \ + 'Incompatibility issues may arise.'.yellow + end + end + + # Compares the {Podfile} with the {Lockfile} in order to detect which + # dependencies should be locked. + # + # @return [SpecsState] the states of the Podfile specs. + # + # @note As the target definitions share the same sandbox they should have + # the same version of a Pod. For this reason this method returns + # the name of the Pod (root name of the dependencies) and doesn't + # group them by target definition. + # + # @return [SpecState] + # + def generate_podfile_state + if lockfile + pods_state = nil + UI.section 'Finding Podfile changes' do + pods_by_state = lockfile.detect_changes_with_podfile(podfile) + pods_state = SpecsState.new(pods_by_state) + pods_state.print if config.verbose? + end + pods_state + else + state = SpecsState.new + state.added.merge(podfile_dependencies.map(&:root_name)) + state + end + end + + # Copies the pod targets of any of the app embedded aggregate targets into + # their potential host aggregate target, if that potential host aggregate target's + # user_target hosts any of the app embedded aggregate targets' user_targets + # + # @param [AggregateTarget] aggregate_target the aggregate target whose user_target + # might host one or more of the embedded aggregate targets' user_targets + # + # @param [Array] embedded_aggregate_targets the aggregate targets + # representing the embedded targets to be integrated + # + # @param [Boolean] libraries_only if true, only library-type embedded + # targets are considered, otherwise, all other types are have + # their pods copied to their host targets as well (extensions, etc.) + # + # @return [Hash{String=>Array}] the additional pod targets to include to the host + # keyed by their configuration. + # + def embedded_target_pod_targets_by_host(aggregate_target, embedded_aggregate_targets, libraries_only) + return {} if aggregate_target.requires_host_target? + aggregate_user_target_uuids = Set.new(aggregate_target.user_targets.map(&:uuid)) + embedded_pod_targets_by_build_config = Hash.new([].freeze) + embedded_aggregate_targets.each do |embedded_aggregate_target| + # Skip non libraries in library-only mode + next if libraries_only && !embedded_aggregate_target.library? + next if aggregate_target.search_paths_aggregate_targets.include?(embedded_aggregate_target) + next unless embedded_aggregate_target.user_targets.any? do |embedded_user_target| + # You have to ask the host target's project for the host targets of + # the embedded target, as opposed to asking user_project for the + # embedded targets of the host target. The latter doesn't work when + # the embedded target lives in a sub-project. The lines below get + # the host target uuids for the embedded target and checks to see if + # those match to any of the user_target uuids in the aggregate_target. + host_target_uuids = Set.new(aggregate_target.user_project.host_targets_for_embedded_target(embedded_user_target).map(&:uuid)) + !aggregate_user_target_uuids.intersection(host_target_uuids).empty? + end + embedded_aggregate_target.user_build_configurations.each_key do |configuration_name| + pod_target_names = Set.new(aggregate_target.pod_targets_for_build_configuration(configuration_name).map(&:name)) + embedded_pod_targets = embedded_aggregate_target.pod_targets_for_build_configuration(configuration_name).select do |pod_target| + if !pod_target_names.include?(pod_target.name) && + aggregate_target.pod_targets.none? { |aggregate_pod_target| (pod_target.specs - aggregate_pod_target.specs).empty? } && + (libraries_only || pod_target.build_as_dynamic?) + pod_target.name + end + end + embedded_pod_targets_by_build_config[configuration_name] += embedded_pod_targets + end + end + embedded_pod_targets_by_build_config + end + + # Raises an error if there are embedded targets in the Podfile, but + # their host targets have not been declared in the Podfile. As it + # finds host targets, it collection information on host target types. + # + # @param [Array] aggregate_targets the generated + # aggregate targets + # + # @param [Array] embedded_aggregate_targets the aggregate targets + # representing the embedded targets to be integrated + # + def analyze_host_targets_in_podfile(aggregate_targets, embedded_aggregate_targets) + target_definitions_by_uuid = {} + cli_host_with_dynamic_linkage = [] + cli_product_type = 'com.apple.product-type.tool' + # Collect aggregate target definitions by uuid to later lookup host target + # definitions and verify their compatibility with their embedded targets + aggregate_targets.each do |target| + target.user_targets.each do |user_target| + target_definition = target.target_definition + target_definitions_by_uuid[user_target.uuid] = target_definition + if user_target.product_type == cli_product_type && target_definition.build_type.linkage == :dynamic + cli_host_with_dynamic_linkage << user_target + end + end + end + aggregate_target_user_projects = aggregate_targets.map(&:user_project) + embedded_targets_missing_hosts = [] + host_uuid_to_embedded_target_definitions = {} + # Search all of the known user projects for each embedded target's hosts + embedded_aggregate_targets.each do |target| + host_uuids = aggregate_target_user_projects.product(target.user_targets).flat_map do |user_project, user_target| + user_project.host_targets_for_embedded_target(user_target).map(&:uuid) + end + # For each host, keep track of its embedded target definitions + # to later verify each embedded target's compatiblity with its host, + # ignoring the hosts that aren't known to CocoaPods (no target + # definitions in the Podfile) + host_uuids.each do |uuid| + (host_uuid_to_embedded_target_definitions[uuid] ||= []) << target.target_definition if target_definitions_by_uuid.key? uuid + end + # If none of the hosts are known to CocoaPods (no target definitions + # in the Podfile), add it to the list of targets missing hosts + embedded_targets_missing_hosts << target unless host_uuids.any? do |uuid| + target_definitions_by_uuid.key? uuid + end + end + + unless cli_host_with_dynamic_linkage.empty? + UI.warn "The Podfile contains command line tool target(s) (#{cli_host_with_dynamic_linkage.map(&:name).to_sentence}) which are attempting to integrate dynamic frameworks or libraries." \ + "\n" \ + 'This may not behave as expected, because command line tools are usually distributed as a single binary and cannot contain their own dynamic dependencies.' + end + + unless embedded_targets_missing_hosts.empty? + embedded_targets_missing_hosts_product_types = Set.new embedded_targets_missing_hosts.flat_map(&:user_targets).map(&:symbol_type) + target_names = embedded_targets_missing_hosts.map do |target| + target.name.sub('Pods-', '') # Make the target names more recognizable to the user + end.join ', ' + # If the targets missing hosts are only frameworks, then this is likely + # a project for doing framework development. In that case, just warn that + # the frameworks that these targets depend on won't be integrated anywhere + if embedded_targets_missing_hosts_product_types.subset?(Set.new([:framework, :static_library])) + UI.warn "The Podfile contains framework or static library targets (#{target_names}), for which the Podfile does not contain host targets (targets which embed the framework)." \ + "\n" \ + 'If this project is for doing framework development, you can ignore this message. Otherwise, add a target to the Podfile that embeds these frameworks to make this message go away (e.g. a test target).' + else + raise Informative, "Unable to find host target(s) for #{target_names}. Please add the host targets for the embedded targets to the Podfile." \ + "\n" \ + 'Certain kinds of targets require a host target. A host target is a "parent" target which embeds a "child" target. These are example types of targets that need a host target:' \ + "\n- Framework" \ + "\n- App Extension" \ + "\n- Watch OS 1 Extension" \ + "\n- Messages Extension (except when used with a Messages Application)" + end + end + + target_mismatches = [] + host_uuid_to_embedded_target_definitions.each do |uuid, target_definitions| + host_target_definition = target_definitions_by_uuid[uuid] + target_definitions.each do |target_definition| + unless host_target_definition.uses_frameworks? == target_definition.uses_frameworks? + target_mismatches << "- #{host_target_definition.name} (#{host_target_definition.uses_frameworks?}) and #{target_definition.name} (#{target_definition.uses_frameworks?}) do not both set use_frameworks!." + end + end + end + + unless target_mismatches.empty? + heading = 'Unable to integrate the following embedded targets with their respective host targets (a host target is a "parent" target which embeds a "child" target like a framework or extension):' + raise Informative, heading + "\n\n" + target_mismatches.sort.uniq.join("\n") + end + end + + # Creates the models that represent the targets generated by CocoaPods. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # mapping of targets to resolved specs (containing information about test usage) + # aggregate targets + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @return [(Array, Array)] the list of aggregate targets generated, + # and the list of pod targets generated. + # + def generate_targets(resolver_specs_by_target, target_inspections) + resolver_specs_by_target = resolver_specs_by_target.reject { |td, _| td.abstract? && !td.platform } + pod_targets = generate_pod_targets(resolver_specs_by_target, target_inspections) + pod_targets_by_target_definition = group_pod_targets_by_target_definition(pod_targets, resolver_specs_by_target) + aggregate_targets = resolver_specs_by_target.keys.reject(&:abstract?).map do |target_definition| + generate_aggregate_target(target_definition, target_inspections, pod_targets_by_target_definition) + end + aggregate_targets.each do |target| + search_paths_aggregate_targets = aggregate_targets.select do |aggregate_target| + target.target_definition.targets_to_inherit_search_paths.include?(aggregate_target.target_definition) + end + target.search_paths_aggregate_targets.concat(search_paths_aggregate_targets).freeze + end + + aggregate_targets.each do |aggregate_target| + is_app_extension = !(aggregate_target.user_targets.map(&:symbol_type) & + [:app_extension, :watch_extension, :watch2_extension, :tv_extension, :messages_extension]).empty? + is_app_extension ||= aggregate_target.user_targets.any? do |user_target| + user_target.common_resolved_build_setting('APPLICATION_EXTENSION_API_ONLY', :resolve_against_xcconfig => true) == 'YES' + end + if is_app_extension + aggregate_target.mark_application_extension_api_only + aggregate_target.pod_targets.each(&:mark_application_extension_api_only) + end + + build_library_for_distribution = aggregate_target.user_targets.any? do |user_target| + user_target.common_resolved_build_setting('BUILD_LIBRARY_FOR_DISTRIBUTION', :resolve_against_xcconfig => true) == 'YES' + end + if build_library_for_distribution + aggregate_target.mark_build_library_for_distribution + aggregate_target.pod_targets.each(&:mark_build_library_for_distribution) + end + end + + if installation_options.integrate_targets? + # Copy embedded target pods that cannot have their pods embedded as frameworks to + # their host targets, and ensure we properly link library pods to their host targets + embedded_targets = aggregate_targets.select(&:requires_host_target?) + analyze_host_targets_in_podfile(aggregate_targets, embedded_targets) + + use_frameworks_embedded_targets, non_use_frameworks_embedded_targets = embedded_targets.partition(&:build_as_framework?) + aggregate_targets = aggregate_targets.map do |aggregate_target| + # For targets that require dynamic frameworks, we always have to copy their pods to their + # host targets because those frameworks will all be loaded from the host target's bundle + embedded_pod_targets = embedded_target_pod_targets_by_host(aggregate_target, use_frameworks_embedded_targets, false) + + # For targets that don't require dynamic frameworks, we only have to consider library-type + # targets because their host targets will still need to link their pods + embedded_pod_targets.merge!(embedded_target_pod_targets_by_host(aggregate_target, non_use_frameworks_embedded_targets, true)) + + next aggregate_target if embedded_pod_targets.empty? + aggregate_target.merge_embedded_pod_targets(embedded_pod_targets) + end + end + [aggregate_targets, pod_targets] + end + + # Setup the aggregate target for a single user target + # + # @param [TargetDefinition] target_definition + # the target definition for the user target. + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @param [Hash{TargetDefinition => Array}] pod_targets_by_target_definition + # the pod targets grouped by target. + # + # @return [AggregateTarget] + # + def generate_aggregate_target(target_definition, target_inspections, pod_targets_by_target_definition) + if installation_options.integrate_targets? + target_inspection = target_inspections[target_definition] + raise "missing inspection for #{target_definition.inspect}" unless target_inspection + target_requires_64_bit = Analyzer.requires_64_bit_archs?(target_definition.platform, target_inspection.project.object_version) + user_project = target_inspection.project + client_root = target_inspection.client_root + user_target_uuids = target_inspection.project_target_uuids + user_build_configurations = target_inspection.build_configurations + archs = target_requires_64_bit ? ['$(ARCHS_STANDARD_64_BIT)'] : target_inspection.archs + else + target_requires_64_bit = Analyzer.requires_64_bit_archs?(target_definition.platform, nil) + user_project = nil + client_root = config.installation_root.realpath + user_target_uuids = [] + user_build_configurations = target_definition.build_configurations || Target::DEFAULT_BUILD_CONFIGURATIONS + archs = target_requires_64_bit ? ['$(ARCHS_STANDARD_64_BIT)'] : [] + end + platform = target_definition.platform + build_configurations = user_build_configurations.keys.concat(target_definition.all_whitelisted_configurations).uniq + pod_targets_for_build_configuration = filter_pod_targets_for_target_definition(target_definition, + pod_targets_by_target_definition, + build_configurations) + build_type = target_definition.uses_frameworks? ? BuildType.static_framework : BuildType.static_library + AggregateTarget.new(sandbox, build_type, user_build_configurations, archs, platform, target_definition, + client_root, user_project, user_target_uuids, pod_targets_for_build_configuration) + end + + # Returns a filtered list of pod targets that should or should not be part of the target definition. Pod targets + # used by tests only are filtered. + # + # @return [Hash{TargetDefinition => Array}] + # + def group_pod_targets_by_target_definition(pod_targets, resolver_specs_by_target) + pod_targets_by_target_definition = Hash.new { |h, td| h[td] = [] } + pod_targets.each do |pod_target| + pod_target.target_definitions.each do |td| + pod_targets_by_target_definition[td] << pod_target + end + end + resolver_specs_by_target.each do |td, resolver_specs| + specs_by_pod_name = resolver_specs.group_by { |s| s.root.name } + specs_by_pod_name.reject! { |_, specs| specs.all?(&:used_by_non_library_targets_only?) } + pod_targets_by_target_definition[td].keep_if { |pod_target| specs_by_pod_name.key?(pod_target.pod_name) } + end + + pod_targets_by_target_definition + end + + # Returns a filtered list of pod targets that should or should not be part of the target definition. Pod targets + # used by tests only are filtered. + # + # @param [TargetDefinition] target_definition + # the target definition to use as the base for filtering + # + # @param [Hash{TargetDefinition => Array}] pod_targets_by_target_definition + # the pod targets grouped by target. + # + # @param [Array] build_configurations + # The list of all build configurations the targets will be built for. + # + # @return [Hash{String => Array}] + # the filtered list of pod targets, grouped by build configuration. + # + def filter_pod_targets_for_target_definition(target_definition, pod_targets_by_target_definition, + build_configurations) + pod_targets_by_build_config = Hash.new([].freeze) + build_configurations.each { |config| pod_targets_by_build_config[config] = [] } + + dependencies_by_root_name = @podfile_dependency_cache.target_definition_dependencies(target_definition).group_by(&:root_name) + + pod_targets_by_target_definition[target_definition].each do |pod_target| + pod_name = pod_target.pod_name + dependencies = dependencies_by_root_name[pod_name] || [] + + build_configurations.each do |configuration_name| + whitelists = dependencies.map do |dependency| + target_definition.pod_whitelisted_for_configuration?(dependency.name, configuration_name) + end.uniq + + case whitelists + when [], [true] then nil + when [false] then next + else + raise Informative, "The subspecs of `#{pod_name}` are linked to " \ + "different build configurations for the `#{target_definition}` " \ + 'target. CocoaPods does not currently support subspecs across ' \ + 'different build configurations.' + end + + pod_targets_by_build_config[configuration_name] << pod_target + end + end + + pod_targets_by_build_config + end + + # Setup the pod targets for an aggregate target. Deduplicates resulting + # targets by grouping by platform and subspec by their root + # to create a {PodTarget} for each spec. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications grouped by target. + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @return [Array] + # + def generate_pod_targets(resolver_specs_by_target, target_inspections) + if installation_options.deduplicate_targets? + distinct_targets = resolver_specs_by_target.each_with_object({}) do |dependency, hash| + target_definition, dependent_specs = *dependency + dependent_specs.group_by(&:root).each do |root_spec, resolver_specs| + all_specs = resolver_specs.map(&:spec) + all_specs_by_type = all_specs.group_by(&:spec_type) + library_specs = all_specs_by_type[:library] || [] + test_specs = all_specs_by_type[:test] || [] + app_specs = all_specs_by_type[:app] || [] + build_type = determine_build_type(root_spec, target_definition.build_type) + pod_variant = PodVariant.new(library_specs, test_specs, app_specs, target_definition.platform, build_type) + hash[root_spec] ||= {} + (hash[root_spec][pod_variant] ||= []) << target_definition + pod_variant_spec = hash[root_spec].keys.find { |k| k == pod_variant } + pod_variant_spec.test_specs.concat(test_specs).uniq! + pod_variant_spec.app_specs.concat(app_specs).uniq! + end + end + + # Remap pod variants to a new instance that includes the Swift version since we now have the full set + # of target definitions. + distinct_targets = Hash[distinct_targets.map do |root, target_definitions_by_variant| + variants = Hash[target_definitions_by_variant.map do |variant, target_definitions| + swift_version = determine_swift_version(variant.root_spec, target_definitions) + [variant.scoped_with_swift_version(swift_version), target_definitions] + end] + [root, variants] + end] + + pod_targets = distinct_targets.flat_map do |_root, target_definitions_by_variant| + target_definitions_by_variant.each_value do |target_definitions| + target_definitions.reject!(&:abstract?) unless target_definitions.all?(&:abstract?) + end + suffixes = PodVariantSet.new(target_definitions_by_variant.keys).scope_suffixes + target_definitions_by_variant.map do |variant, target_definitions| + all_specs = variant.specs + variant.test_specs + variant.app_specs + generate_pod_target(target_definitions, variant.build_type, target_inspections, all_specs, + :scope_suffix => suffixes[variant], :swift_version => variant.swift_version) + end + end + + all_specs = resolver_specs_by_target.values.flatten.map(&:spec).uniq.group_by(&:name) + compute_pod_target_dependencies(pod_targets, all_specs) + else + dedupe_cache = {} + resolver_specs_by_target.flat_map do |target_definition, specs| + grouped_specs = specs.group_by(&:root).values.uniq + pod_targets = grouped_specs.flat_map do |pod_specs| + build_type = determine_build_type(pod_specs.first.spec, target_definition.build_type) + swift_version = determine_swift_version(pod_specs.first.spec, [target_definition]) + generate_pod_target([target_definition], build_type, target_inspections, pod_specs.map(&:spec), + :swift_version => swift_version).scoped(dedupe_cache) + end + + compute_pod_target_dependencies(pod_targets, specs.map(&:spec).group_by(&:name)) + end + end + end + + # Compute the dependencies for the set of pod targets. + # + # @param [Array] pod_targets + # pod targets. + # + # @param [Hash{String => Array}] all_specs + # specifications grouped by name. + # + # @return [Array] + # + def compute_pod_target_dependencies(pod_targets, all_specs) + pod_targets_by_name = pod_targets.group_by(&:pod_name).each_with_object({}) do |(name, values), hash| + # Sort the target by the number of activated subspecs, so that + # we prefer a minimal target as transitive dependency. + hash[name] = values.sort_by { |pt| pt.specs.count } + end + + pod_targets.each do |target| + dependencies_by_config = dependencies_for_specs(target.library_specs, target.platform, all_specs) + target.dependent_targets_by_config = Hash[dependencies_by_config.map { |k, v| [k, filter_dependencies(v, pod_targets_by_name, target)] }] + + target.test_dependent_targets_by_spec_name_by_config = target.test_specs.each_with_object({}) do |test_spec, hash| + test_dependencies_by_config = dependencies_for_specs([test_spec], target.platform, all_specs) + test_dependencies_by_config.each { |config, deps| deps.delete_if { |k, _| dependencies_by_config[config].key? k } } + hash[test_spec.name] = Hash[test_dependencies_by_config.map { |k, v| [k, filter_dependencies(v, pod_targets_by_name, target)] }] + end + + target.app_dependent_targets_by_spec_name_by_config = target.app_specs.each_with_object({}) do |app_spec, hash| + app_dependencies_by_config = dependencies_for_specs([app_spec], target.platform, all_specs) + app_dependencies_by_config.each { |config, deps| deps.delete_if { |k, _| dependencies_by_config[config].key? k } } + hash[app_spec.name] = Hash[app_dependencies_by_config.map { |k, v| [k, filter_dependencies(v, pod_targets_by_name, target)] }] + end + + target.test_app_hosts_by_spec = target.test_specs.each_with_object({}) do |test_spec, hash| + next unless app_host_name = test_spec.consumer(target.platform).app_host_name + app_host_spec = pod_targets_by_name[Specification.root_name(app_host_name)].flat_map(&:app_specs).find do |pt| + pt.name == app_host_name + end + app_host_dependencies = { app_host_spec.root => [app_host_spec] } + hash[test_spec] = [app_host_spec, filter_dependencies(app_host_dependencies, pod_targets_by_name, target).first] + end + end + end + + def filter_dependencies(dependencies, pod_targets_by_name, target) + dependencies.map do |root_spec, deps| + pod_targets_by_name[root_spec.name].find do |t| + next false if t.platform.symbolic_name != target.platform.symbolic_name || + # In the case of variants we must ensure that the platform this target is meant for is the same + # as the one we are interested in. + t.target_definitions.first.platform != target.target_definitions.first.platform || + # rather than target type or requires_frameworks? since we want to group by what was specified in that + # _target definition_. + t.build_as_framework? != target.build_as_framework? + spec_names = t.specs.map(&:name) + deps.all? { |dep| spec_names.include?(dep.name) } + end + end + end + + # Returns the specs upon which the given specs _directly_ depend. + # + # @note: This is implemented in the analyzer, because we don't have to + # care about the requirements after dependency resolution. + # + # @param [Array] specs + # The specs, whose dependencies should be returned. + # + # @param [Platform] platform + # The platform for which the dependencies should be returned. + # + # @param [Hash{String => Array}] all_specs + # All specifications which are installed alongside. + # + # @return [Hash{Symbol => Set}] + # + def dependencies_for_specs(specs, platform, all_specs) + dependent_specs = { + :debug => Set.new, + :release => Set.new, + } + + if !specs.empty? && !all_specs.empty? + specs.each do |s| + s.dependencies(platform).each do |dep| + all_specs[dep.name].each do |spec| + if spec.non_library_specification? + if s.test_specification? && spec.name == s.consumer(platform).app_host_name && spec.app_specification? + # This needs to be handled separately, since we _don't_ want to treat this as a "normal" dependency + next + end + raise Informative, "`#{s}` depends upon `#{spec}`, which is a `#{spec.spec_type}` spec." + end + + dependent_specs.each do |config, set| + next unless s.dependency_whitelisted_for_configuration?(dep, config) + set << spec + end + end + end + end + end + + Hash[dependent_specs.map { |k, v| [k, (v - specs).group_by(&:root)] }].freeze + end + + # Create a target for each spec group + # + # @param [Array] target_definitions + # the target definitions of the pod target + # + # @param [BuildType] build_type + # the BuildType to use for this pod target. + # + # @param [Hash{TargetDefinition => TargetInspectionResult}] target_inspections + # the user target inspections used to construct the aggregate and pod targets. + # + # @param [Array] specs + # the specifications of an equal root. + # + # @param [String] scope_suffix + # @see PodTarget#scope_suffix + # + # @param [String] swift_version + # @see PodTarget#swift_version + # + # @return [PodTarget] + # + def generate_pod_target(target_definitions, build_type, target_inspections, specs, scope_suffix: nil, + swift_version: nil) + target_inspections = target_inspections.select { |t, _| target_definitions.include?(t) }.values + object_version = target_inspections.map { |ti| ti.project.object_version }.min + target_requires_64_bit = target_definitions.all? { |td| Analyzer.requires_64_bit_archs?(td.platform, object_version) } + if !target_inspections.empty? + user_build_configurations = target_inspections.map(&:build_configurations).reduce({}, &:merge) + archs = if target_requires_64_bit + ['$(ARCHS_STANDARD_64_BIT)'] + else + target_inspections.flat_map(&:archs).compact.uniq.sort + end + else + user_build_configurations = Target::DEFAULT_BUILD_CONFIGURATIONS.merge( + target_definitions.map { |td| td.build_configurations || {} }.reduce({}, &:merge), + ) + archs = target_requires_64_bit ? ['$(ARCHS_STANDARD_64_BIT)'] : [] + end + platform = determine_platform(specs, target_definitions, build_type) + file_accessors = create_file_accessors(specs, platform) + PodTarget.new(sandbox, build_type, user_build_configurations, archs, platform, specs, target_definitions, + file_accessors, scope_suffix, swift_version) + end + + # Creates the file accessors for a given pod. + # + # @param [Array] specs + # the specs to map each file accessor to. + # + # @param [Platform] platform + # the platform to use when generating each file accessor. + # + # @return [Array] + # + def create_file_accessors(specs, platform) + name = specs.first.name + pod_root = sandbox.pod_dir(name) + path_list = @path_lists.fetch(pod_root) do |root| + path_list = Sandbox::PathList.new(root) + @path_lists[root] = path_list + end + specs.map do |spec| + Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) + end + end + + # Calculates and returns the platform to use for the given list specs and target definitions. + # + # @note The platform is only determined by all library specs and ignores non library ones. Subspecs are always + # integrated in the same target as the root spec therefore the max deployment target is always returned + # across the specs passed. + # + # @param [Array] specs + # the specs to inspect and calculate the platform for. + # + # @param [Array] target_definitions + # the target definitions these specs are part of. + # + # @param [BuildType] build_type + # the #BuildType used for calculating the platform. + # + # @return [Platform] + # + def determine_platform(specs, target_definitions, build_type) + library_specs = specs.select(&:library_specification?) + platform_name = target_definitions.first.platform.name + default = Podfile::TargetDefinition::PLATFORM_DEFAULTS[platform_name] + deployment_target = library_specs.map do |library_spec| + Version.new(library_spec.deployment_target(platform_name) || default) + end.max + if platform_name == :ios && build_type.framework? + minimum = Version.new('8.0') + deployment_target = [deployment_target, minimum].max + end + Platform.new(platform_name, deployment_target) + end + + # Determines the Swift version for the given spec within a list of target definitions. If the pod author has + # provided a set of Swift versions supported by their pod then the max Swift version is chosen, unless the target + # definitions specify explicit requirements for supported Swift versions. Otherwise the Swift version is derived + # by the target definitions that integrate this pod. + # + # @param [Specification] spec + # the specs to inspect and determine what Swift version to use. + # + # @param [Array] target_definitions + # the target definitions the spec is part of. + # + # @return [String, nil] the computed Swift version or `nil` if the Swift version could not be determined. + # + def determine_swift_version(spec, target_definitions) + if spec.swift_versions.empty? + target_definitions.map(&:swift_version).compact.uniq.first + else + spec.swift_versions.sort.reverse_each.find do |swift_version| + target_definitions.all? do |td| + td.supports_swift_version?(swift_version) + end + end.to_s + end + end + + # Calculates and returns the #BuildType to use for the given spec. If the spec specifies `static_framework` then + # it is honored as long as the host #BuildType also requires its pods to be integrated as frameworks. + # + # @param [Specification] spec + # the spec to determine the #BuildType for. + # + # @param [BuildType] target_definition_build_type + # The desired #BuildType by the target definition that integrates this target. If the pod target spec does + # not specify explicitly a `static_framework` #BuildType then the one from the target definition is used. + # + # @return [BuildType] + # + def determine_build_type(spec, target_definition_build_type) + if target_definition_build_type.framework? + root_spec = spec.root + root_spec.static_framework ? BuildType.static_framework : target_definition_build_type + else + BuildType.static_library + end + end + + # Generates dependencies that require the specific version of the Pods + # that haven't changed in the {Lockfile}. + # + # These dependencies are passed to the {Resolver}, unless the installer + # is in update mode, to prevent it from upgrading the Pods that weren't + # changed in the {Podfile}. + # + # @param [SpecState] podfile_state + # the state of the podfile for which dependencies have or have not changed, added, deleted or updated. + # + # @return [Molinillo::DependencyGraph] the dependencies + # generated by the lockfile that prevent the resolver to update + # a Pod. + # + def generate_version_locking_dependencies(podfile_state) + if update_mode == :all || !lockfile + LockingDependencyAnalyzer.unlocked_dependency_graph + else + deleted_and_changed = podfile_state.changed + podfile_state.deleted + deleted_and_changed += pods_to_update[:pods] if update_mode == :selected + local_pod_names = podfile_dependencies.select(&:local?).map(&:root_name) + pods_to_unlock = local_pod_names.to_set.delete_if do |pod_name| + next unless sandbox_specification = sandbox.specification(pod_name) + sandbox_specification.checksum == lockfile.checksum(pod_name) + end + LockingDependencyAnalyzer.generate_version_locking_dependencies(lockfile, deleted_and_changed, pods_to_unlock) + end + end + + # Fetches the podspecs of external sources if modifications to the + # sandbox are allowed. + # + # @note In update mode all the external sources are refreshed while in + # normal mode they are refreshed only if added or changed in the + # Podfile. Moreover, in normal specifications for unchanged Pods + # which are missing or are generated from an local source are + # fetched as well. + # + # @note It is possible to perform this step before the resolution + # process because external sources identify a single specific + # version (checkout). If the other dependencies are not + # compatible with the version reported by the podspec of the + # external source the resolver will raise. + # + # @param [SpecState] podfile_state + # the state of the podfile for which dependencies have or have not changed, added, deleted or updated. + # + # @return [void] + # + def fetch_external_sources(podfile_state) + verify_no_pods_with_different_sources! + deps = dependencies_to_fetch(podfile_state) + pods = pods_to_fetch(podfile_state) + return if deps.empty? + UI.section 'Fetching external sources' do + deps.sort.each do |dependency| + fetch_external_source(dependency, !pods.include?(dependency.root_name)) + end + end + end + + def verify_no_pods_with_different_sources! + deps_with_different_sources = podfile_dependencies.group_by(&:root_name). + select { |_root_name, dependencies| dependencies.map(&:external_source).uniq.count > 1 } + deps_with_different_sources.each do |root_name, dependencies| + raise Informative, 'There are multiple dependencies with different ' \ + "sources for `#{root_name}` in #{UI.path podfile.defined_in_file}:" \ + "\n\n- #{dependencies.map(&:to_s).join("\n- ")}" + end + end + + def fetch_external_source(dependency, use_lockfile_options) + source = if use_lockfile_options && lockfile && checkout_options = lockfile.checkout_options_for_pod_named(dependency.root_name) + ExternalSources.from_params(checkout_options, dependency, podfile.defined_in_file, installation_options.clean?) + else + ExternalSources.from_dependency(dependency, podfile.defined_in_file, installation_options.clean?) + end + source.fetch(sandbox) + end + + def dependencies_to_fetch(podfile_state) + @deps_to_fetch ||= begin + deps_to_fetch = [] + deps_with_external_source = podfile_dependencies.select(&:external_source) + + if update_mode == :all + deps_to_fetch = deps_with_external_source + else + deps_to_fetch = deps_with_external_source.select { |dep| pods_to_fetch(podfile_state).include?(dep.root_name) } + deps_to_fetch_if_needed = deps_with_external_source.select { |dep| podfile_state.unchanged.include?(dep.root_name) } + deps_to_fetch += deps_to_fetch_if_needed.select do |dep| + sandbox.specification_path(dep.root_name).nil? || + !dep.external_source[:path].nil? || + !sandbox.pod_dir(dep.root_name).directory? || + checkout_requires_update?(dep) + end + end + deps_to_fetch.uniq(&:root_name) + end + end + + def checkout_requires_update?(dependency) + return true unless lockfile && sandbox.manifest + locked_checkout_options = lockfile.checkout_options_for_pod_named(dependency.root_name) + sandbox_checkout_options = sandbox.manifest.checkout_options_for_pod_named(dependency.root_name) + locked_checkout_options != sandbox_checkout_options + end + + def pods_to_fetch(podfile_state) + @pods_to_fetch ||= begin + pods_to_fetch = podfile_state.added + podfile_state.changed + if update_mode == :selected + pods_to_fetch += pods_to_update[:pods] + elsif update_mode == :all + pods_to_fetch += podfile_state.unchanged + podfile_state.deleted + end + pods_to_fetch += podfile_dependencies. + select { |dep| Hash(dep.external_source).key?(:podspec) && sandbox.specification_path(dep.root_name).nil? }. + map(&:root_name) + pods_to_fetch + end + end + + def store_existing_checkout_options + return unless lockfile + podfile_dependencies.select(&:external_source).each do |dep| + if checkout_options = lockfile.checkout_options_for_pod_named(dep.root_name) + sandbox.store_checkout_source(dep.root_name, checkout_options) + end + end + end + + # Converts the Podfile in a list of specifications grouped by target. + # + # @note As some dependencies might have external sources the resolver + # is aware of the {Sandbox} and interacts with it to download the + # podspecs of the external sources. This is necessary because the + # resolver needs their specifications to analyze their + # dependencies. + # + # @note The specifications of the external sources which are added, + # modified or removed need to deleted from the sandbox before the + # resolution process. Otherwise the resolver might use an + # incorrect specification instead of pre-downloading it. + # + # @note In update mode the resolver is set to always update the specs + # from external sources. + # + # @return [Hash{TargetDefinition => Array}] the specifications + # grouped by target. + # + def resolve_dependencies(locked_dependencies) + duplicate_dependencies = podfile_dependencies.group_by(&:name). + select { |_name, dependencies| dependencies.count > 1 } + duplicate_dependencies.each do |name, dependencies| + UI.warn "There are duplicate dependencies on `#{name}` in #{UI.path podfile.defined_in_file}:\n\n" \ + "- #{dependencies.map(&:to_s).join("\n- ")}" + end + + resolver_specs_by_target = nil + UI.section "Resolving dependencies of #{UI.path(podfile.defined_in_file) || 'Podfile'}" do + resolver = Pod::Resolver.new(sandbox, podfile, locked_dependencies, sources, @specs_updated, :sources_manager => sources_manager) + resolver_specs_by_target = resolver.resolve + resolver_specs_by_target.values.flatten(1).map(&:spec).each(&:validate_cocoapods_version) + end + resolver_specs_by_target + end + + # Warns for any specification that is incompatible with its target. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications grouped by target. + # + def validate_platforms(resolver_specs_by_target) + resolver_specs_by_target.each do |target, specs| + specs.map(&:spec).each do |spec| + next unless target_platform = target.platform + unless spec.available_platforms.any? { |p| target_platform.supports?(p) } + UI.warn "The platform of the target `#{target.name}` " \ + "(#{target.platform}) may not be compatible with `#{spec}` which has " \ + "a minimum requirement of #{spec.available_platforms.join(' - ')}." + end + end + end + end + + # Returns the list of all the resolved specifications. + # + # @param [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications grouped by target. + # + # @return [Array] the list of the specifications. + # + def generate_specifications(resolver_specs_by_target) + resolver_specs_by_target.values.flatten.map(&:spec).uniq + end + + # Computes the state of the sandbox respect to the resolved + # specifications. + # + # @return [SpecsState] the representation of the state of the manifest + # specifications. + # + def generate_sandbox_state(specifications) + sandbox_state = nil + UI.section 'Comparing resolved specification to the sandbox manifest' do + sandbox_analyzer = SandboxAnalyzer.new(sandbox, podfile, specifications, update_mode?) + sandbox_state = sandbox_analyzer.analyze + sandbox_state.print + end + sandbox_state + end + + class << self + # @param [Platform] platform + # The platform to build against + # + # @param [String, Nil] object_version + # The user project's object version, or nil if not available + # + # @return [Boolean] Whether the platform requires 64-bit architectures + # + def requires_64_bit_archs?(platform, object_version) + return false unless platform + case platform.name + when :osx + true + when :ios + if (version = object_version) + platform.deployment_target >= IOS_64_BIT_ONLY_VERSION && version.to_i < IOS_64_BIT_ONLY_PROJECT_VERSION + else + platform.deployment_target >= IOS_64_BIT_ONLY_VERSION + end + when :watchos + false + when :tvos + false + end + end + end + + #-----------------------------------------------------------------------# + + # @!group Analysis sub-steps + + # Checks whether the platform is specified if not integrating + # + # @return [void] + # + def verify_platforms_specified! + return if installation_options.integrate_targets? + @podfile_dependency_cache.target_definition_list.each do |target_definition| + if !target_definition.empty? && target_definition.platform.nil? + raise Informative, 'It is necessary to specify the platform in the Podfile if not integrating.' + end + end + end + + # Precompute information for each target_definition in the Podfile + # + # @note The platforms are computed and added to each target_definition + # because it might be necessary to infer the platform from the + # user targets. + # + # @return [Hash{TargetDefinition => TargetInspectionResult}] + # + def inspect_targets_to_integrate + inspection_result = {} + UI.section 'Inspecting targets to integrate' do + inspectors = @podfile_dependency_cache.target_definition_list.map do |target_definition| + next if target_definition.abstract? + TargetInspector.new(target_definition, config.installation_root) + end.compact + inspectors.group_by(&:compute_project_path).each do |project_path, target_inspectors| + project = Xcodeproj::Project.open(project_path) + target_inspectors.each do |inspector| + target_definition = inspector.target_definition + results = inspector.compute_results(project) + inspection_result[target_definition] = results + UI.message('Using `ARCHS` setting to build architectures of ' \ + "target `#{target_definition.label}`: (`#{results.archs.join('`, `')}`)") + end + end + end + inspection_result + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/analysis_result.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/analysis_result.rb new file mode 100644 index 0000000..267060b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/analysis_result.rb @@ -0,0 +1,87 @@ +module Pod + class Installer + class Analyzer + # A simple container produced after a analysis is completed by the {Analyzer}. + # + class AnalysisResult + # @return [SpecsState] the states of the Podfile specs. + # + attr_reader :podfile_state + + # @return [Hash{TargetDefinition => Array}] the specifications grouped by target. + # + attr_reader :specs_by_target + + # @return [Hash{Source => Array}] the specifications grouped by spec repo source. + # + attr_reader :specs_by_source + + # @return [Array] the specifications of the resolved version of Pods that should be installed. + # + attr_reader :specifications + + # @return [SpecsState] the states of the {Sandbox} respect the resolved specifications. + # + attr_reader :sandbox_state + + # @return [Array] The aggregate targets created for each {TargetDefinition} from the {Podfile}. + # + attr_reader :targets + + # @return [Array] The pod targets created for all the aggregate targets. + # + attr_reader :pod_targets + + # @return [PodfileDependencyCache] the cache of all dependencies in the podfile. + # + attr_reader :podfile_dependency_cache + + def initialize(podfile_state, specs_by_target, specs_by_source, specifications, sandbox_state, targets, pod_targets, + podfile_dependency_cache) + @podfile_state = podfile_state + @specs_by_target = specs_by_target + @specs_by_source = specs_by_source + @specifications = specifications + @sandbox_state = sandbox_state + @targets = targets + @pod_targets = pod_targets + @podfile_dependency_cache = podfile_dependency_cache + end + + # @return [Hash{String=>Symbol}] A hash representing all the user build + # configurations across all integration targets. Each key + # corresponds to the name of a configuration and its value to + # its type (`:debug` or `:release`). + # + def all_user_build_configurations + targets.reduce({}) do |result, target| + result.merge(target.user_build_configurations) + end + end + + # @return [Boolean] Whether an installation should be performed or this + # CocoaPods project is already up to date. + # + def needs_install? + podfile_needs_install? || sandbox_needs_install? + end + + # @return [Boolean] Whether the podfile has changes respect to the lockfile. + # + def podfile_needs_install? + state = podfile_state + needing_install = state.added.length + state.changed.length + state.deleted.length + needing_install > 0 + end + + # @return [Boolean] Whether the sandbox is in synch with the lockfile. + # + def sandbox_needs_install? + state = sandbox_state + needing_install = state.added.length + state.changed.length + state.deleted.length + needing_install > 0 + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/locking_dependency_analyzer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/locking_dependency_analyzer.rb new file mode 100644 index 0000000..7cd419d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/locking_dependency_analyzer.rb @@ -0,0 +1,103 @@ +require 'molinillo/dependency_graph' + +module Pod + class Installer + class Analyzer + # Generates dependencies that require the specific version of the Pods + # that haven't changed in the {Lockfile}. + module LockingDependencyAnalyzer + # Generates dependencies that require the specific version of the Pods + # that haven't changed in the {Lockfile}. + # + # These dependencies are passed to the {Resolver}, unless the installer + # is in update mode, to prevent it from upgrading the Pods that weren't + # changed in the {Podfile}. + # + # @param [Lockfile] lockfile the lockfile containing dependency constraints + # + # @param [Array] pods_to_update + # List of pod names which needs to be updated because installer is + # in update mode for these pods. Pods in this list and all their recursive dependencies + # will not be included in generated dependency graph + # + # @param [Array] pods_to_unlock + # List of pod names whose version constraints will be removed from the generated dependency graph. + # Recursive dependencies of the pods won't be affected. This is currently used to force local pods + # to be evaluated again whenever checksum of the specification of the local pods changes. + # + # @return [Molinillo::DependencyGraph] the dependencies + # generated by the lockfile that prevent the resolver to update + # a Pod. + # + def self.generate_version_locking_dependencies(lockfile, pods_to_update, pods_to_unlock = []) + dependency_graph = Molinillo::DependencyGraph.new + + if lockfile + added_dependency_strings = Set.new + + explicit_dependencies = lockfile.dependencies + explicit_dependencies.each do |dependency| + dependency_graph.add_vertex(dependency.name, dependency, true) + end + + pods = lockfile.to_hash['PODS'] || [] + pods.each do |pod| + add_to_dependency_graph(pod, [], dependency_graph, pods_to_unlock, added_dependency_strings) + end + + pods_to_update = pods_to_update.flat_map do |u| + root_name = Specification.root_name(u).downcase + dependency_graph.vertices.each_key.select { |n| Specification.root_name(n).downcase == root_name } + end + + pods_to_update.each do |u| + dependency_graph.detach_vertex_named(u) + end + + dependency_graph.each do |vertex| + next unless dep = vertex.payload + dep.podspec_repo ||= lockfile.spec_repo(dep.root_name) + end + end + + dependency_graph + end + + # Generates a completely 'unlocked' dependency graph. + # + # @return [Molinillo::DependencyGraph] an empty dependency + # graph + # + def self.unlocked_dependency_graph + Molinillo::DependencyGraph.new + end + + private + + def self.add_child_vertex_to_graph(dependency_string, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + return unless added_dependency_strings.add?(dependency_string) + dependency = Dependency.from_string(dependency_string) + if pods_to_unlock.include?(dependency.root_name) + dependency = Dependency.new(dependency.name) + end + vertex = dependency_graph.add_child_vertex(dependency.name, nil, parents, nil) + dependency = vertex.payload.merge(dependency) if vertex.payload + vertex.payload = dependency + dependency + end + + def self.add_to_dependency_graph(object, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + case object + when String + add_child_vertex_to_graph(object, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + when Hash + object.each do |key, value| + dependency = add_child_vertex_to_graph(key, parents, dependency_graph, pods_to_unlock, added_dependency_strings) + value.each { |v| add_to_dependency_graph(v, [dependency.name], dependency_graph, pods_to_unlock, added_dependency_strings) } + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/pod_variant.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/pod_variant.rb new file mode 100644 index 0000000..98d2c71 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/pod_variant.rb @@ -0,0 +1,87 @@ +module Pod + class Installer + class Analyzer + # Bundles the information needed to setup a {PodTarget}. + class PodVariant + # @return [Array] the spec and subspecs for the target + # + attr_reader :specs + + # @return [Array] the test specs for the target + # + attr_reader :test_specs + + # @return [Array] the app specs for the target + # + attr_reader :app_specs + + # @return [Platform] the platform + # + attr_reader :platform + + # @return [BuildType] the build type of the target + # + attr_reader :build_type + + # @return [String] the Swift version of the target. + # + attr_reader :swift_version + + # Initialize a new instance from its attributes. + # + # @param [Array] specs @see #specs + # @param [Array] test_specs @see #test_specs + # @param [Array] app_specs @see #app_specs + # @param [Platform] platform @see #platform + # @param [BuildType] build_type @see #build_type + # @param [String] swift_version @see #swift_version + # + def initialize(specs, test_specs, app_specs, platform, build_type = BuildType.static_library, + swift_version = nil) + @specs = specs + @test_specs = test_specs + @app_specs = app_specs + @platform = platform + @build_type = build_type + @swift_version = swift_version + @hash = [specs, platform, build_type, swift_version].hash + end + + # @return [Specification] the root specification + # + def root_spec + specs.first.root + end + + # @note Non library specs are intentionally not included as part of the equality for pod variants since a pod + # variant should not be affected by the number of test nor app specs included. + # + # @return [Boolean] whether the {PodVariant} is equal to another taking all all their attributes into account + # + def ==(other) + self.class == other.class && + build_type == other.build_type && + swift_version == other.swift_version && + platform == other.platform && + specs == other.specs + end + alias_method :eql?, :== + + # Hashes the instance by all its attributes. + # + # This adds support to make instances usable as Hash keys. + # + # @!visibility private + attr_reader :hash + + # @param [String] swift_version The swift version to use for this variant. + # + # @return [PodVariant] A copy of this pod variant with the specified Swift version. + # + def scoped_with_swift_version(swift_version) + PodVariant.new(specs, test_specs, app_specs, platform, build_type, swift_version) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/pod_variant_set.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/pod_variant_set.rb new file mode 100644 index 0000000..c2f3ebd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/pod_variant_set.rb @@ -0,0 +1,175 @@ +require 'set' + +module Pod + class Installer + class Analyzer + # Collects all {PodVariant}. + class PodVariantSet + # @return [Array] the different variants within this set. + # + attr_reader :variants + + # Initialize a new instance. + # + # @param [Array] variants @see #variants + # + def initialize(variants) + @variants = variants + end + + # Describes what makes each {PodVariant} distinct among the others. + # + # @return [Hash] + # + def scope_suffixes + return { variants.first => nil } if variants.count == 1 + Hash[scope_by_specs.map do |variant, scope| + require 'digest' + scope = Digest::MD5.hexdigest(scope)[0..7] if !scope.nil? && scope.length >= 50 + [variant, scope] + end] + end + + # Groups the collection by result of the block. + # + # @param [Block] block + # @return [Array] + # + def group_by(&block) + variants.group_by(&block).map { |_, v| PodVariantSet.new(v) } + end + + # @private + # + # Prepends the given scoped {PodVariant}s with another scoping label, if there + # was more than one group of {PodVariant}s given. + # + # @param [Array>] scoped_variants + # {PodVariant}s, which where grouped on base of a criteria, which is used + # in the block argument to generate a descriptive label. + # + # @param [Block] block + # takes a {PodVariant} and returns a scope suffix which is prepended, if + # necessary. + # + # @return [Hash] + # + def scope_if_necessary(scoped_variants, &block) + if scoped_variants.count == 1 + return scoped_variants.first + end + Hash[scoped_variants.flat_map do |variants| + variants.map do |variant, suffix| + prefix = block.call(variant) + scope = [prefix, suffix].compact.join('-') + [variant, !scope.empty? ? scope : nil] + end + end] + end + + # @private + # @return [Hash] + # + def scope_by_build_type + scope_if_necessary(group_by { |v| v.build_type.packaging }.map(&:scope_by_linkage)) do |variant| + variant.build_type.packaging + end + end + + # @private + # @return [Hash] + # + def scope_by_linkage + scope_if_necessary(group_by { |v| v.build_type.linkage }.map(&:scope_by_platform)) do |variant| + variant.build_type.linkage + end + end + + # @private + # @return [Hash] + # + def scope_by_platform + grouped_variants = group_by { |v| v.platform.name } + if grouped_variants.all? { |set| set.variants.count == 1 } + # => Platform name + platform_name_proc = proc { |v| Platform.string_name(v.platform.symbolic_name).tr(' ', '') } + else + grouped_variants = group_by(&:platform) + # => Platform name + SDK version + platform_name_proc = proc { |v| v.platform.to_s.tr(' ', '') } + end + scope_if_necessary(grouped_variants.map(&:scope_by_swift_version), &platform_name_proc) + end + + # @private + # @return [Hash] + # + def scope_by_swift_version + scope_if_necessary(group_by(&:swift_version).map(&:scope_without_suffix)) do |variant| + variant.swift_version ? "Swift#{variant.swift_version}" : '' + end + end + + # @private + # @return [Hash] + # + def scope_by_specs + root_spec = variants.first.root_spec + specs = [root_spec] + specs += if root_spec.default_subspecs.empty? + root_spec.subspecs.compact + else + root_spec.default_subspecs.map do |subspec_name| + root_spec.subspec_by_name("#{root_spec.name}/#{subspec_name}") + end + end + default_specs = Set.new(specs) + grouped_variants = group_by(&:specs) + all_spec_variants = grouped_variants.map { |set| set.variants.first.specs } + common_specs = all_spec_variants.map(&:to_set).flatten.inject(&:&) + omit_common_specs = common_specs.any? && common_specs.proper_superset?(default_specs) + scope_if_necessary(grouped_variants.map(&:scope_by_build_type)) do |variant| + specs = variant.specs.to_set + + # The current variant contains all default specs + omit_default_specs = default_specs.any? && default_specs.subset?(specs) + if omit_default_specs + specs -= default_specs + end + + # There are common specs, which are different from the default specs + if omit_common_specs + specs -= common_specs + end + + spec_names = specs.map do |spec| + spec.root? ? '.root' : spec.name.split('/')[1..-1].join('_') + end.sort + if spec_names.empty? + omit_common_specs ? '.common' : nil + else + if omit_common_specs + spec_names.unshift('.common') + elsif omit_default_specs + spec_names.unshift('.default') + end + spec_names.reduce('') do |acc, name| + "#{acc}#{acc.empty? || name[0] == '.' ? '' : '-'}#{name}" + end + end + end + end + + # @private + # + # Helps to define scope suffixes recursively. + # + # @return [Hash] + # + def scope_without_suffix + Hash[variants.map { |v| [v, nil] }] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/podfile_dependency_cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/podfile_dependency_cache.rb new file mode 100644 index 0000000..095e3bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/podfile_dependency_cache.rb @@ -0,0 +1,55 @@ +module Pod + class Installer + class Analyzer + # Caches podfile & target definition dependencies, so they do not need to be re-computed + # from the internal hash on each access + # + class PodfileDependencyCache + # @return [Array] + # All the dependencies in the podfile + # + attr_reader :podfile_dependencies + + def initialize(podfile_dependencies, dependencies_by_target_definition) + @podfile_dependencies = podfile_dependencies + @dependencies_by_target_definition = dependencies_by_target_definition + end + + # Returns the dependencies for the given target definition + # + def target_definition_dependencies(target_definition) + @dependencies_by_target_definition[target_definition] || + raise(ArgumentError, "dependencies for #{target_definition.inspect} do not exist in the cache") + end + + # Returns a list of all of the target definitions in the Podfile + # + def target_definition_list + @dependencies_by_target_definition.keys + end + + # Creates a {PodfileDependencyCache} from the given {Podfile} + # + # @param [Podfile] podfile + # The {Podfile} from which dependencies should be cached + # + # @return [PodfileDependencyCache] + # A warmed, immutable cache of all the dependencies in the {Podfile} + # + def self.from_podfile(podfile) + raise ArgumentError, 'Must be initialized with a podfile' unless podfile + podfile_dependencies = [] + dependencies_by_target_definition = {} + podfile.target_definition_list.each do |target_definition| + deps = target_definition.dependencies.freeze + podfile_dependencies.concat deps + dependencies_by_target_definition[target_definition] = deps + end + podfile_dependencies.uniq! + + new(podfile_dependencies.freeze, dependencies_by_target_definition.freeze) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/sandbox_analyzer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/sandbox_analyzer.rb new file mode 100644 index 0000000..109e25b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/sandbox_analyzer.rb @@ -0,0 +1,268 @@ +module Pod + class Installer + class Analyzer + # Analyze the sandbox to detect which Pods should be removed, and which + # ones should be reinstalled. + # + # The logic is the following: + # + # Added + # - If not present in the sandbox lockfile. + # - The directory of the Pod doesn't exits. + # + # Changed + # - The version of the Pod changed. + # - The SHA of the specification file changed. + # - The specific installed (sub)specs of the same Pod changed. + # - The specification is from an external source and the + # installation process is in update mode. + # - The directory of the Pod is empty. + # - The Pod has been pre-downloaded. + # + # Removed + # - If a specification is present in the lockfile but not in the resolved + # specs. + # + # Unchanged + # - If none of the above conditions match. + # + class SandboxAnalyzer + # @return [Sandbox] The sandbox to analyze. + # + attr_reader :sandbox + + # @return [Podfile] The Podfile to analyze dependencies. + # + attr_reader :podfile + + # @return [Array] The specifications returned by the + # resolver. + # + attr_reader :specs + + # @return [Boolean] Whether the installation is performed in update mode. + # + attr_reader :update_mode + + alias_method :update_mode?, :update_mode + + # Init a new SandboxAnalyzer + # + # @param [Sandbox] sandbox @see sandbox + # @param [Podfile] podfile @see podfile + # @param [Array] specs @see specs + # @param [Boolean] update_mode @see update_mode + # + def initialize(sandbox, podfile, specs, update_mode) + @sandbox = sandbox + @podfile = podfile + @specs = specs + @update_mode = update_mode + end + + # Performs the analysis to the detect the state of the sandbox respect + # to the resolved specifications. + # + # @return [void] + # + def analyze + state = SpecsState.new + if sandbox_manifest + all_names = (resolved_pods + sandbox_pods).uniq.sort + all_names.sort.each do |name| + state.add_name(name, pod_state(name)) + end + else + state.added.merge(resolved_pods) + end + state + end + + #---------------------------------------------------------------------# + + private + + # @!group Pod state + + # Returns the state of the Pod with the given name. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Symbol] The state + # + def pod_state(pod) + return :deleted if pod_deleted?(pod) + return :added if pod_added?(pod) + return :changed if pod_changed?(pod) + :unchanged + end + + # Returns whether the Pod with the given name should be installed. + # + # @note A Pod whose folder doesn't exists is considered added. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Boolean] Whether the Pod is added. + # + def pod_added?(pod) + return true if resolved_pods.include?(pod) && !sandbox_pods.include?(pod) + return true if !sandbox.local?(pod) && !folder_exist?(pod) + false + end + + # Returns whether the Pod with the given name should be removed from + # the installation. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Boolean] Whether the Pod is deleted. + # + def pod_deleted?(pod) + return true if !resolved_pods.include?(pod) && sandbox_pods.include?(pod) + false + end + + # Returns whether the Pod with the given name should be considered + # changed and thus should be reinstalled. + # + # @note In update mode, as there is no way to know if a remote source + # hash changed the Pods from external + # sources are always marked as changed. + # + # @note A Pod whose folder is empty is considered changed. + # + # @param [String] pod + # the name of the Pod. + # + # @return [Boolean] Whether the Pod is changed. + # + def pod_changed?(pod) + spec = root_spec(pod) + return true if spec.version != sandbox_version(pod) + return true if spec.checksum != sandbox_checksum(pod) + return true if resolved_spec_names(pod) != sandbox_spec_names(pod) + podfile_dep = podfile_dependency(pod)&.tap { |dep| dep.podspec_repo = nil } + return true if podfile_dep != sandbox_dependency(pod) + return true if sandbox.predownloaded?(pod) + return true if folder_empty?(pod) + false + end + + #---------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Lockfile] The manifest to use for the sandbox. + # + def sandbox_manifest + sandbox.manifest + end + + #--------------------------------------# + + # @return [Array] The name of the resolved Pods. + # + def resolved_pods + @resolved_pods ||= specs.map { |spec| spec.root.name }.uniq + end + + # @return [Array] The name of the Pods stored in the sandbox + # manifest. + # + def sandbox_pods + @sandbox_pods ||= sandbox_manifest.pod_names.map { |name| Specification.root_name(name) }.uniq + end + + # @return [Array] The name of the resolved specifications + # (includes subspecs). + # + # @param [String] pod + # the name of the Pod. + # + def resolved_spec_names(pod) + specs.select { |s| s.root.name == pod }.map(&:name).uniq.sort + end + + # @return [Array] The name of the specifications stored in the + # sandbox manifest (includes subspecs). + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_spec_names(pod) + sandbox_manifest.pod_names.select { |name| Specification.root_name(name) == pod }.uniq.sort + end + + # @return [Specification] The root specification for the Pod with the + # given name. + # + # @param [String] pod + # the name of the Pod. + # + def root_spec(pod) + specs.find { |s| s.root.name == pod }.root + end + + #--------------------------------------# + + # @return [Version] The version of Pod with the given name stored in + # the sandbox. + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_version(pod) + sandbox_manifest.version(pod) + end + + # @return [String] The checksum of the specification of the Pod with + # the given name stored in the sandbox. + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_checksum(pod) + sandbox_manifest.checksum(pod) + end + + # @return [Dependency, nil] The dependency with the given name stored in the sandbox. + # + # @param [String] pod + # the name of the Pod. + # + def sandbox_dependency(pod) + sandbox_manifest.dependencies.find { |d| d.name == pod } + end + + #--------------------------------------# + + # @return [Dependency, nil] The dependency with the given name from the podfile. + # + # @param [String] pod + # the name of the Pod. + # + def podfile_dependency(pod) + podfile.dependencies.find { |d| d.name == pod } + end + + #--------------------------------------# + + def folder_exist?(pod) + sandbox.pod_dir(pod).exist? + end + + def folder_empty?(pod) + Dir.glob(sandbox.pod_dir(pod) + '*').empty? + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/specs_state.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/specs_state.rb new file mode 100644 index 0000000..b632d6b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/specs_state.rb @@ -0,0 +1,108 @@ +require 'set' + +module Pod + class Installer + class Analyzer + # This class represents the state of a collection of Pods. + # + # @note The names of the pods stored by this class are always the **root** + # name of the specification. + # + # @note The motivation for this class is to ensure that the names of the + # subspecs are added instead of the name of the Pods. + # + class SpecsState + # @return [Set] the names of the pods that were added. + # + attr_reader :added + + # @return [Set] the names of the pods that were changed. + # + attr_reader :changed + + # @return [Set] the names of the pods that were deleted. + # + attr_reader :deleted + + # @return [Set] the names of the pods that were unchanged. + # + attr_reader :unchanged + + # Initialize a new instance + # + # @param [Hash{Symbol=>String}] pods_by_state + # The name of the pods grouped by their state + # (`:added`, `:removed`, `:changed` or `:unchanged`). + # + def initialize(pods_by_state = nil) + @added = Set.new + @deleted = Set.new + @changed = Set.new + @unchanged = Set.new + + if pods_by_state + { + :added => :added, + :changed => :changed, + :removed => :deleted, + :unchanged => :unchanged, + }.each do |state, spec_state| + Array(pods_by_state[state]).each do |name| + add_name(name, spec_state) + end + end + end + end + + # Displays the state of each pod. + # + # @return [void] + # + def print + states = %i(added deleted changed unchanged) + lines(states).each do |line| + UI.message(line, '', 2) + end + end + + def to_s(states: %i(added deleted changed unchanged)) + lines(states).join("\n") + end + + # Adds the name of a Pod to the give state. + # + # @param [String] name + # the name of the Pod. + # + # @param [Symbol] state + # the state of the Pod. + # + # @return [void] + # + def add_name(name, state) + send(state) << Specification.root_name(name) + end + + private + + # @return [Array] A description of changes for the given states, + # one per line + # + def lines(states) + prefixes = { + :added => 'A'.green, + :deleted => 'R'.red, + :changed => 'M'.yellow, + :unchanged => '-', + } + + states.flat_map do |state| + send(state).sort.map do |pod| + prefixes[state.to_sym] + " #{pod}" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/target_inspection_result.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/target_inspection_result.rb new file mode 100644 index 0000000..21c72d6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/target_inspection_result.rb @@ -0,0 +1,58 @@ +module Pod + class Installer + class Analyzer + class TargetInspectionResult + # @return [TargetDefinition] the target definition, whose project was + # inspected + # + attr_reader :target_definition + + # @return [Xcodeproj::Project] the user's Xcode project + # + attr_reader :project + + # @return [Array] the uuid of the user's targets + # + attr_reader :project_target_uuids + + # @return [Hash{String=>Symbol}] A hash representing the user build + # configurations where each key corresponds to the name of a + # configuration and its value to its type (`:debug` or + # `:release`). + # + attr_reader :build_configurations + + # @return [Platform] the platform of the user targets + # + attr_reader :platform + + # @return [Array] the architectures used by user's targets + # + attr_reader :archs + + # @return [Pathname] the path to the root of the project containing the user target + # + attr_reader :client_root + + # Initialize a new instance + # + # @param [TargetDefinition] target_definition @see #target_definition + # @param [Xcodeproj::Project] project @see #project + # @param [Array] project_target_uuids @see #project_target_uuids + # @param [Hash{String=>Symbol}] build_configurations @see #build_configurations + # @param [Platform] platform @see #platform + # @param [Array] archs @see #archs + # + def initialize(target_definition, project, project_target_uuids, build_configurations, platform, archs) + @target_definition = target_definition + @project = project + @project_target_uuids = project_target_uuids + @build_configurations = build_configurations + @platform = platform + @archs = archs + @client_root = Pathname.new(project.project_dir + project.root_object.project_dir_path).realpath + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/target_inspector.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/target_inspector.rb new file mode 100644 index 0000000..717bd44 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/analyzer/target_inspector.rb @@ -0,0 +1,258 @@ +require 'active_support/core_ext/array/conversions' + +module Pod + class Installer + class Analyzer + class TargetInspector + PLATFORM_INFO_URL = 'https://guides.cocoapods.org/syntax/podfile.html#platform'.freeze + + # @return [TargetDefinition] the target definition to inspect + # + attr_reader :target_definition + + # @return [Pathname] the root of the CocoaPods installation where the + # Podfile is located + # + attr_reader :installation_root + + # Initialize a new instance + # + # @param [TargetDefinition] target_definition + # @see #target_definition + # + # @param [Pathname] installation_root + # @see #installation_root + # + def initialize(target_definition, installation_root) + @target_definition = target_definition + @installation_root = installation_root + end + + # Inspect the #target_definition + # + # @raise If no `user_project` is set + # + # @return [TargetInspectionResult] the result of the inspection of the target definition within the user project + # + def compute_results(user_project) + raise ArgumentError, 'Cannot compute results without a user project set' unless user_project + + targets = compute_targets(user_project) + project_target_uuids = targets.map(&:uuid) + build_configurations = compute_build_configurations(targets) + platform = compute_platform(targets) + archs = compute_archs(targets) + swift_version = compute_swift_version_from_targets(targets) + + result = TargetInspectionResult.new(target_definition, user_project, project_target_uuids, + build_configurations, platform, archs) + result.target_definition.swift_version = swift_version + result + end + + # Returns the path of the user project that the #target_definition + # should integrate. + # + # @raise If the project is implicit and there are multiple projects. + # + # @raise If the path doesn't exits. + # + # @return [Pathname] the path of the user project. + # + def compute_project_path + if target_definition.user_project_path + path = installation_root + target_definition.user_project_path + path = "#{path}.xcodeproj" unless File.extname(path) == '.xcodeproj' + path = Pathname.new(path) + unless path.exist? + raise Informative, 'Unable to find the Xcode project ' \ + "`#{path}` for the target `#{target_definition.label}`." + end + else + xcodeprojs = installation_root.children.select { |e| e.fnmatch('*.xcodeproj') } + if xcodeprojs.size == 1 + path = xcodeprojs.first + else + raise Informative, 'Could not automatically select an Xcode project. ' \ + "Specify one in your Podfile like so:\n\n" \ + " project 'path/to/Project.xcodeproj'\n" + end + end + path + end + + #-----------------------------------------------------------------------# + + private + + # Returns a list of the targets from the project of #target_definition + # that needs to be integrated. + # + # @note The method first looks if there is a target specified with + # the `link_with` option of the {TargetDefinition}. Otherwise + # it looks for the target that has the same name of the target + # definition. Finally if no target was found the first + # encountered target is returned (it is assumed to be the one + # to integrate in simple projects). + # + # @param [Xcodeproj::Project] user_project + # the user project + # + # @return [Array] + # + def compute_targets(user_project) + native_targets = user_project.native_targets + target = native_targets.find { |t| t.name == target_definition.name.to_s } + unless target + found = native_targets.map { |t| "`#{t.name}`" }.to_sentence + raise Informative, "Unable to find a target named `#{target_definition.name}` in project `#{Pathname(user_project.path).basename}`, did find #{found}." + end + [target] + end + + # @param [ArraySymbol}] A hash representing the user build + # configurations where each key corresponds to the name of a + # configuration and its value to its type (`:debug` or `:release`). + # + def compute_build_configurations(user_targets) + if user_targets + user_targets.flat_map { |t| t.build_configurations.map(&:name) }.each_with_object({}) do |name, hash| + hash[name] = name == 'Debug' ? :debug : :release + end.merge(target_definition.build_configurations || {}) + else + target_definition.build_configurations || {} + end + end + + # @param [Array Version.new(target.deployment_target) + deployment_target = Version.new(target.deployment_target) + end + end + + unless name + raise Informative, + "Unable to determine the platform for the `#{target_definition.name}` target." + end + + UI.warn "Automatically assigning platform `#{Platform.string_name(name)}` with version `#{deployment_target}` " \ + "on target `#{target_definition.name}` because no platform was specified. " \ + "Please specify a platform for this target in your Podfile. See `#{PLATFORM_INFO_URL}`." + + target_definition.set_platform(name, deployment_target) + Platform.new(name, deployment_target) + end + + # Computes the architectures relevant for the user's targets. + # + # @param [Array] + # + def compute_archs(user_targets) + user_targets.flat_map do |target| + Array(target.common_resolved_build_setting('ARCHS')) + end.compact.uniq.sort + end + + # Checks if any of the targets for the {TargetDefinition} computed before + # by #compute_user_project_targets is recommended to be build as a framework + # due the presence of Swift source code in any of the source build phases. + # + # @param [TargetDefinition] target_definition + # the target definition + # + # @param [Array] native_targets + # the targets which are checked for presence of Swift source code + # + # @return [Boolean] Whether the user project targets to integrate into + # uses Swift + # + def compute_recommends_frameworks(target_definition, native_targets) + file_predicate = nil + file_predicate = proc do |file_ref| + if file_ref.respond_to?(:last_known_file_type) + file_ref.last_known_file_type == 'sourcecode.swift' + elsif file_ref.respond_to?(:files) + file_ref.files.any?(&file_predicate) + else + false + end + end + target_definition.platform.supports_dynamic_frameworks? || native_targets.any? do |target| + target.source_build_phase.files.any? do |build_file| + file_predicate.call(build_file.file_ref) + end + end + end + + # Compute the Swift version for the target build configurations. If more + # than one Swift version is defined for a given target, then it will raise. + # + # @param [Array] targets + # the targets that are checked for Swift versions. + # + # @return [String] the targets Swift version or nil + # + def compute_swift_version_from_targets(targets) + versions_to_targets = targets.inject({}) do |memo, target| + # User project may have an xcconfig that specifies the `SWIFT_VERSION`. + # Xcodeproj handles that xcconfig either not being set or the file not being present on disk. + # After the first integration the xcconfig set is most probably + # the one that was generated from CocoaPods. See https://github.com/CocoaPods/CocoaPods/issues/7731 for + # more details. + versions = target.resolved_build_setting('SWIFT_VERSION', true).values + versions.each do |version| + memo[version] = [] if memo[version].nil? + memo[version] << target.name unless memo[version].include? target.name + end + memo + end + + case versions_to_targets.count + when 0 + nil + when 1 + versions_to_targets.keys.first + else + target_version_pairs = versions_to_targets.map do |version_names, target_names| + target_names.map { |target_name| [target_name, version_names] } + end + + sorted_pairs = target_version_pairs.flat_map { |i| i }.sort_by do |target_name, version_name| + "#{target_name} #{version_name}" + end + + formatted_output = sorted_pairs.map do |target, version_name| + "#{target}: Swift #{version_name}" + end.join("\n") + + raise Informative, "There may only be up to 1 unique SWIFT_VERSION per target. Found target(s) with multiple Swift versions:\n#{formatted_output}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/base_install_hooks_context.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/base_install_hooks_context.rb new file mode 100644 index 0000000..467fca9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/base_install_hooks_context.rb @@ -0,0 +1,150 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class BaseInstallHooksContext + # @return [Sandbox] The Sandbox for the project. + # + attr_reader :sandbox + + # @return [String] The path to the sandbox root (`Pods` directory). + # + attr_reader :sandbox_root + + # @return [Xcodeproj::Project] The Pods Xcode project. + # + attr_reader :pods_project + + # @return [Array] the subprojects nested under pods_project. + # + attr_reader :pod_target_subprojects + + # @return [Array] The list of + # the CocoaPods umbrella targets generated by the installer. + # + attr_reader :umbrella_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox see #sandbox + # @param [String] sandbox_root see #sandbox_root + # @param [Xcodeproj::Project] pods_project see #pods_project + # @param [Array] pod_target_subprojects see #pod_target_subprojects + # @param [Array] umbrella_targets see #umbrella_targets + # + def initialize(sandbox, sandbox_root, pods_project, pod_target_subprojects, umbrella_targets) + @sandbox = sandbox + @sandbox_root = sandbox_root + @pods_project = pods_project + @pod_target_subprojects = pod_target_subprojects + @umbrella_targets = umbrella_targets + end + + # @return [PostInstallHooksContext] Convenience class generator method + # + # @param [Sandbox] sandbox + # The sandbox + # + # @param [Project] pods_project + # The pods project. + # + # @param [Project] pod_target_subprojects + # The subprojects nested under pods_project. + # + # @param [Array] aggregate_targets + # The aggregate targets, which will been presented by an adequate + # {UmbrellaTargetDescription} in the generated context. + # + # @return [HooksContext] Convenience class method to generate the + # static context. + # + def self.generate(sandbox, pods_project, pod_target_subprojects, aggregate_targets) + umbrella_targets_descriptions = aggregate_targets.map do |umbrella| + user_project = umbrella.user_project + user_targets = umbrella.user_targets + specs = umbrella.specs + platform_name = umbrella.platform.name + platform_deployment_target = umbrella.platform.deployment_target.to_s + cocoapods_target_label = umbrella.label + UmbrellaTargetDescription.new(user_project, user_targets, specs, platform_name, platform_deployment_target, cocoapods_target_label) + end + + new(sandbox, sandbox.root.to_s, pods_project, pod_target_subprojects, umbrella_targets_descriptions) + end + + # @return [Array] all projects generated during installation + # + def generated_projects + [pods_project] + pod_target_subprojects + end + + # Pure data class which describes an umbrella target. + # + class UmbrellaTargetDescription + # @return [Xcodeproj::Project] The user project into which this target + # is integrated. + # + attr_reader :user_project + + # @return [Array] + # The list of user targets integrated by this umbrella target. + # + attr_reader :user_targets + + # @return [Array] The list of the + # specifications of the target. + # + attr_reader :specs + + # @return [Symbol] The platform (either `:ios`, `:watchos`, `:tvos`, or `:osx`). + # + attr_reader :platform_name + + # @return [String] The deployment target. + # + attr_reader :platform_deployment_target + + # @return [String] The label for the target. + # + attr_reader :cocoapods_target_label + + # Initialize a new instance + # + # @param [Xcodeproj::Project] user_project see #user_project + # @param [Array] user_targets see #user_targets + # @param [Array] specs see #specs + # @param [Symbol] platform_name see #platform_name + # @param [String] platform_deployment_target see #platform_deployment_target + # @param [String] cocoapods_target_label see #cocoapods_target_label + # + def initialize(user_project, user_targets, specs, platform_name, platform_deployment_target, cocoapods_target_label) + @user_project = user_project + @user_targets = user_targets + @specs = specs + @platform_name = platform_name + @platform_deployment_target = platform_deployment_target + @cocoapods_target_label = cocoapods_target_label + end + + # @return [String] The path of the user project + # integrated by this target. + # + def user_project_path + user_project.path if user_project + end + + # @return [Array] The list of the UUIDs of the + # user targets integrated by this umbrella + # target. They can be used to find the + # targets opening the project They can be used + # to find the targets opening the project with + # Xcodeproj. + # + def user_target_uuids + user_targets.map(&:uuid) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/installation_options.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/installation_options.rb new file mode 100644 index 0000000..a23e24f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/installation_options.rb @@ -0,0 +1,206 @@ +require 'active_support/hash_with_indifferent_access' + +module Pod + class Installer + # Represents the installation options the user can customize via a + # `Podfile`. + # + class InstallationOptions + # Parses installation options from a podfile. + # + # @param [Podfile] podfile the podfile to parse installation options + # from. + # + # @raise [Informative] if `podfile` does not specify a `CocoaPods` + # install. + # + # @return [Self] + # + def self.from_podfile(podfile) + name, options = podfile.installation_method + unless name.downcase == 'cocoapods' + raise Informative, "Currently need to specify a `cocoapods` install, you chose `#{name}`." + end + new(options) + end + + # Defines a new installation option. + # + # @param [#to_s] name the name of the option. + # + # @param default the default value for the option. + # + # @param [Boolean] boolean whether the option has a boolean value. + # + # @return [void] + # + # @!macro [attach] option + # + # @note this option defaults to $2. + # + # @return [Boolean] the $1 $0 for installation. + # + def self.option(name, default, boolean: true) + name = name.to_s + raise ArgumentError, "The `#{name}` option is already defined" if defaults.key?(name) + defaults[name] = default + attr_accessor name + alias_method "#{name}?", name if boolean + end + + # @return [Hash] all known installation options and their + # default values. + # + def self.defaults + @defaults ||= {} + end + + # @return [Array] the names of all known installation options. + # + def self.all_options + defaults.keys + end + + # Initializes the installation options with a hash of options from a + # Podfile. + # + # @param [Hash] options the options to parse. + # + # @raise [Informative] if `options` contains any unknown keys. + # + def initialize(options = {}) + options = ActiveSupport::HashWithIndifferentAccess.new(options) + unknown_keys = options.keys - self.class.all_options.map(&:to_s) + raise Informative, "Unknown installation options: #{unknown_keys.to_sentence}." unless unknown_keys.empty? + self.class.defaults.each do |key, default| + value = options.fetch(key, default) + send("#{key}=", value) + end + end + + # @param [Boolean] include_defaults whether values that match the default + # for their option should be included. Defaults to `true`. + # + # @return [Hash] the options, keyed by option name. + # + def to_h(include_defaults: true) + self.class.defaults.reduce(ActiveSupport::HashWithIndifferentAccess.new) do |hash, (option, default)| + value = send(option) + hash[option] = value if include_defaults || value != default + hash + end + end + + def ==(other) + other.is_a?(self.class) && to_h == other.to_h + end + + alias_method :eql, :== + + def hash + to_h.hash + end + + # Whether to clean the sources of the pods during installation + # + # Cleaning removes any files not used by the pod as specified by the podspec and the platforms + # that the project supports + # + # @see {PodSourceInstaller#clean!} + # + option :clean, true + + # Whether to deduplicate pod targets + # + # Target deduplication adds suffixes to pod targets for the cases where a pod is included + # in multiple targets that have different requirements. For example, a pod named 'MyPod' with a subspec 'SubA' + # that is included in two targets as follows: + # + # target 'MyTargetA' do + # pod 'MyPod/SubA' + # end + # + # target 'MyTargetB' do + # pod 'MyPod' + # end + # + # will result in two Pod targets: `MyPod` and `MyPod-SubA` + # + option :deduplicate_targets, true + + # Whether to generate deterministic UUIDs when creating the Pods project + # + # @see {Xcodeproj#generate_uuid} + # + option :deterministic_uuids, true + + # Whether to integrate the installed pods into the user project + # + # If set to false, Pods will be downloaded and installed to the `Pods/` directory + # but they will not be integrated into your project. + # + option :integrate_targets, true + + # Whether to lock the source files of pods. Xcode will prompt to unlock the files when attempting to modify + # their contents + # + # @note There is a performance penalty to locking the pods during installation. If this is significantly + # impacting the duration of `pod install` for your project, you can try setting this to `false` + # + option :lock_pod_sources, true + + # Whether to emit a warning when multiple sources contain a Pod with the same name and version + # + option :warn_for_multiple_pod_sources, true + + # Whether to emit a warning if a project is not explicitly specifying the git based master specs repo and can + # instead use CDN which is the default. + # + option :warn_for_unused_master_specs_repo, true + + # Whether to share Xcode schemes for development pods. + # + # Schemes for development pods are created automatically but are not shared by default. + # + option :share_schemes_for_development_pods, false + + # Whether to disable the input & output paths of the CocoaPods script phases (Copy Frameworks & Copy Resources) + # + # @see https://github.com/CocoaPods/CocoaPods/issues/8073 + # + option :disable_input_output_paths, false + + # Whether to preserve the file structure of all Pods, including externally sourced pods. + # + # By default, the file structure of Pod sources is preserved only for development pods. Setting + # `:preserve_pod_file_structure` to `true` will _always_ preserve the file structure. + # + option :preserve_pod_file_structure, false + + # Whether to generate a project per pod target. Instead of creating 1 `Pods.xcodeproj`, this option will generate + # a project for every pod target that will be nested under the `Pods.xcodeproj`. + # + option :generate_multiple_pod_projects, false + + # Whether to enable only regenerating targets and their associate projects that have changed + # since the previous installation. + # + option :incremental_installation, false + + # Whether to skip generating the `Pods.xcodeproj` and perform only dependency resolution and downloading. + # + option :skip_pods_project_generation, false + + # Whether to download pods in parallel before beginning the installation process + # + option :parallel_pod_downloads, false + + # The size of the thread pool to use when downloading pods in parallel. Only takes effect when + # `parallel_pod_downloads` is `true`. + # + # Default: 40 + # + option(:parallel_pod_download_thread_pool_size, 40, :boolean => false) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_downloader.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_downloader.rb new file mode 100644 index 0000000..d169fb1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_downloader.rb @@ -0,0 +1,159 @@ + +module Pod + class Installer + # Controller class responsible for downloading the activated specifications + # of a single Pod. + # + # @note This class needs to consider all the activated specs of a Pod. + # + class PodSourceDownloader + UNENCRYPTED_PROTOCOLS = %w(http git).freeze + + # @return [Sandbox] The installation target. + # + attr_reader :sandbox + + # @return [Podfile] the podfile that should be integrated with the user + # projects. + # + attr_reader :podfile + + # @return [Hash{Symbol=>Array}] The specifications that need to be + # installed grouped by platform. + # + attr_reader :specs_by_platform + + # @return [Boolean] Whether the installer is allowed to touch the cache. + # + attr_reader :can_cache + alias can_cache? can_cache + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Hash{Symbol=>Array}] specs_by_platform @see #specs_by_platform + # @param [Boolean] can_cache @see #can_cache + # + def initialize(sandbox, podfile, specs_by_platform, can_cache: true) + @sandbox = sandbox + @podfile = podfile + @specs_by_platform = specs_by_platform + @can_cache = can_cache + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<#{self.class} sandbox=#{sandbox.root} pod=#{root_spec.name}" + end + + # @return [String] The name of the pod this downloader is downloading. + # + def name + root_spec.name + end + + #-----------------------------------------------------------------------# + + public + + # @!group Downloading + + # Creates the target in the Pods project and the relative support files. + # + # @return [void] + # + def download! + verify_source_is_secure(root_spec) + download_result = Downloader.download(download_request, root, :can_cache => can_cache?) + + if (specific_source = download_result.checkout_options) && specific_source != root_spec.source + sandbox.store_checkout_source(root_spec.name, specific_source) + end + + sandbox.store_downloaded_pod(root_spec.name) + end + + #-----------------------------------------------------------------------# + + private + + # @!group Download Steps + + # Verify the source of the spec is secure, which is used to show a warning to the user if that isn't the case + # This method doesn't verify all protocols, but currently only prohibits unencrypted 'http://' and 'git://'' + # connections. + # + # @return [void] + # + def verify_source_is_secure(root_spec) + return if root_spec.source.nil? || (root_spec.source[:http].nil? && root_spec.source[:git].nil?) + source = if !root_spec.source[:http].nil? + URI(root_spec.source[:http].to_s) + elsif !root_spec.source[:git].nil? + git_source = root_spec.source[:git].to_s + return unless git_source =~ /^#{URI::DEFAULT_PARSER.make_regexp}$/ + URI(git_source) + end + if UNENCRYPTED_PROTOCOLS.include?(source.scheme) && source.host != 'localhost' + UI.warn "'#{root_spec.name}' uses the unencrypted '#{source.scheme}' protocol to transfer the Pod. " \ + 'Please be sure you\'re in a safe network with only trusted hosts. ' \ + 'Otherwise, please reach out to the library author to notify them of this security issue.' + end + end + + def download_request + Downloader::Request.new( + :spec => root_spec, + :released => released?, + ) + end + + #-----------------------------------------------------------------------# + + private + + # @!group Convenience methods. + + # @return [Array] the specification of the Pod used in + # this installation. + # + def specs + specs_by_platform.values.flatten + end + + # @return [Specification] the root specification of the Pod. + # + def root_spec + specs.first.root + end + + # @return [Pathname] the folder where the source of the Pod is located. + # + def root + sandbox.pod_dir(root_spec.name) + end + + # @return [Boolean] whether the source has been pre downloaded in the + # resolution process to retrieve its podspec. + # + def predownloaded? + sandbox.predownloaded_pods.include?(root_spec.name) + end + + # @return [Boolean] whether the pod uses the local option and thus + # CocoaPods should not interfere with the files of the user. + # + def local? + sandbox.local?(root_spec.name) + end + + def released? + sandbox.specification(root_spec.name) != root_spec + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_installer.rb new file mode 100644 index 0000000..0f915f6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_installer.rb @@ -0,0 +1,198 @@ +require 'active_support/core_ext/string/strip' + +module Pod + class Installer + # Controller class responsible of installing the activated specifications + # of a single Pod. + # + # @note This class needs to consider all the activated specs of a Pod. + # + class PodSourceInstaller + # @return [Sandbox] The installation target. + # + attr_reader :sandbox + + # @return [Podfile] the podfile that should be integrated with the user + # projects. + # + attr_reader :podfile + + # @return [Hash{Symbol=>Array}] The specifications that need to be + # installed grouped by platform. + # + attr_reader :specs_by_platform + + # @return [Boolean] Whether the installer is allowed to touch the cache. + # + attr_reader :can_cache + alias_method :can_cache?, :can_cache + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Podfile] podfile @see #podfile + # @param [Hash{Symbol=>Array}] specs_by_platform @see #specs_by_platform + # @param [Boolean] can_cache @see #can_cache + # + def initialize(sandbox, podfile, specs_by_platform, can_cache: true) + @sandbox = sandbox + @podfile = podfile + @specs_by_platform = specs_by_platform + @can_cache = can_cache + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<#{self.class} sandbox=#{sandbox.root} pod=#{root_spec.name}" + end + + # @return [String] The name of the pod this installer is installing. + # + def name + root_spec.name + end + + #-----------------------------------------------------------------------# + + public + + # @!group Installation + + # Creates the target in the Pods project and the relative support files. + # + # @return [void] + # + def install! + download_source unless predownloaded? || local? + PodSourcePreparer.new(root_spec, root).prepare! if local? + sandbox.remove_local_podspec(name) unless predownloaded? || local? || external? + end + + # Cleans the installations if appropriate. + # + # Cleaning the installation will remove any files that are not used during the build process, based on + # the podspec and platforms of the target that the pod is integrated into. + # + # @see {#clean_installation} + # + # @return [void] + # + def clean! + clean_installation unless local? + end + + # Locks the source files if appropriate. + # + # @return [void] + # + def lock_files!(file_accessors) + return if local? + unlocked_files = source_files(file_accessors).reject { |f| (File.stat(f).mode & 0o200).zero? } + FileUtils.chmod('u-w', unlocked_files) + end + + # Unlocks the source files if appropriate. + # + # @return [void] + # + def unlock_files!(file_accessors) + return if local? + FileUtils.chmod('u+w', source_files(file_accessors)) + end + + #-----------------------------------------------------------------------# + + private + + # @!group Installation Steps + + # Downloads the source of the Pod. + # + # @return [void] + # + def download_source + unless downloaded? + downloader = PodSourceDownloader.new(sandbox, podfile, specs_by_platform, :can_cache => can_cache?) + downloader.download! + end + end + + #-----------------------------------------------------------------------# + + private + + # Removes all the files not needed for the installation according to the + # specs by platform. + # + # @return [void] + # + def clean_installation + cleaner = Sandbox::PodDirCleaner.new(root, specs_by_platform) + cleaner.clean! + end + + # @!group Convenience methods. + + # @return [Array] the specification of the Pod used in + # this installation. + # + def specs + specs_by_platform.values.flatten + end + + # @return [Specification] the root specification of the Pod. + # + def root_spec + specs.first.root + end + + # @return [Pathname] the folder where the source of the Pod is located. + # + def root + sandbox.pod_dir(root_spec.name) + end + + # @return [Boolean] whether the source has already been downloaded prior + # to the installation process. + # + def downloaded? + sandbox.downloaded_pods.include?(root_spec.name) + end + + # @return [Boolean] whether the source has been pre downloaded in the + # resolution process to retrieve its podspec. + # + def predownloaded? + sandbox.predownloaded_pods.include?(root_spec.name) + end + + # @return [Boolean] whether the pod uses the local option and thus + # CocoaPods should not interfere with the files of the user. + # + def local? + sandbox.local?(root_spec.name) + end + + # @return [Boolean] whether the pod uses an external source (e.g. :podspec) in the + # resolution process to retrieve its podspec. + # + def external? + @dependencies ||= podfile.dependencies.select(&:external?).map(&:name) + @dependencies.include?(root_spec.name) + end + + def released? + !local? && !predownloaded? && sandbox.specification(root_spec.name) != root_spec + end + + # @return [Array] The paths of the source files + # + def source_files(file_accessors) + file_accessors.flat_map(&:source_files) + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_preparer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_preparer.rb new file mode 100644 index 0000000..c97d645 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pod_source_preparer.rb @@ -0,0 +1,77 @@ +module Pod + class Installer + # Controller class responsible of executing the prepare command + # of a single Pod. + # + class PodSourcePreparer + # @return [Specification] the root specification of the Pod. + # + attr_reader :spec + + # @return [Pathname] the folder where the source of the Pod is located. + # + attr_reader :path + + # Initialize a new instance + # + # @param [Specification] spec the root specification of the Pod. + # @param [Pathname] path the folder where the source of the Pod is located. + # + def initialize(spec, path) + raise "Given spec isn't a root spec, but must be." unless spec.root? + @spec = spec + @path = path + end + + #-----------------------------------------------------------------------# + + public + + # @!group Preparation + + # Executes the prepare command if there is one. + # + # @return [void] + # + def prepare! + run_prepare_command + end + + #-----------------------------------------------------------------------# + + private + + # @!group Preparation Steps + + extend Executable + executable :bash + + # Runs the prepare command bash script of the spec. + # + # @note Unsets the `CDPATH` env variable before running the + # shell script to avoid issues with relative paths + # (issue #1694). + # + # @return [void] + # + def run_prepare_command + return unless spec.prepare_command + UI.section(' > Running prepare command', '', 1) do + Dir.chdir(path) do + begin + ENV.delete('CDPATH') + ENV['COCOAPODS_VERSION'] = Pod::VERSION + prepare_command = spec.prepare_command.strip_heredoc.chomp + full_command = "\nset -e\n" + prepare_command + bash!('-c', full_command) + ensure + ENV.delete('COCOAPODS_VERSION') + end + end + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/podfile_validator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/podfile_validator.rb new file mode 100644 index 0000000..e324c7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/podfile_validator.rb @@ -0,0 +1,168 @@ +module Pod + class Installer + # Validate the podfile before installing to catch errors and + # problems + # + class PodfileValidator + # @return [Podfile] The podfile being validated + # + attr_reader :podfile + + # @return [Array] any errors that have occurred during the validation + # + attr_reader :errors + + # @return [Array] any warnings that have occurred during the validation + # + attr_reader :warnings + + # Initialize a new instance + # + # @param [Podfile] podfile + # The podfile to validate + # + # @param [Analyzer::PodfileDependencyCache] podfile_dependency_cache + # An (optional) cache of all the dependencies in the podfile + # + def initialize(podfile, podfile_dependency_cache = Analyzer::PodfileDependencyCache.from_podfile(podfile)) + @podfile = podfile + @podfile_dependency_cache = podfile_dependency_cache + @errors = [] + @warnings = [] + @validated = false + end + + # Validate the podfile + # Errors are added to the errors array + # + def validate + validate_installation_options + validate_pod_directives + validate_no_abstract_only_pods! + validate_dependencies_are_present! + validate_no_duplicate_targets! + + @validated = true + end + + # Wether the podfile is valid is not + # NOTE: Will execute `validate` if the podfile + # has not yet been validated + # + def valid? + validate unless @validated + + @validated && errors.empty? + end + + # A message describing any errors in the + # validation + # + def message + errors.join("\n") + end + + private + + def add_error(error) + errors << error + end + + def add_warning(warning) + warnings << warning + end + + def validate_installation_options + installation_options = podfile.installation_options + + # Validate `incremental_installation` depends on `generate_multiple_pod_projects` + invalid = installation_options.incremental_installation? && installation_options.incremental_installation != installation_options.generate_multiple_pod_projects + add_error 'The installation option `incremental_installation` requires the option `generate_multiple_pod_projects` to also be enabled.' if invalid + end + + def validate_pod_directives + @podfile_dependency_cache.podfile_dependencies.each do |dependency| + validate_conflicting_external_sources!(dependency) + end + end + + def validate_conflicting_external_sources!(dependency) + external_source = dependency.external_source + return false if external_source.nil? + + available_downloaders = Downloader.downloader_class_by_key.keys + specified_downloaders = external_source.select { |key| available_downloaders.include?(key) } + if specified_downloaders.size > 1 + add_error "The dependency `#{dependency.name}` specifies more than one download strategy(#{specified_downloaders.keys.join(',')})." \ + 'Only one is allowed' + end + + pod_spec_or_path = external_source[:podspec].present? || external_source[:path].present? + if pod_spec_or_path && specified_downloaders.size > 0 + add_error "The dependency `#{dependency.name}` specifies `podspec` or `path` in combination with other" \ + ' download strategies. This is not allowed' + end + end + + # Warns the user if the podfile is empty. + # + # @note The workspace is created in any case and all the user projects + # are added to it, however the projects are not integrated as + # there is no way to discern between target definitions which are + # empty and target definitions which just serve the purpose to + # wrap other ones. This is not an issue because empty target + # definitions generate empty libraries. + # + # @return [void] + # + def validate_dependencies_are_present! + if @podfile_dependency_cache.target_definition_list.all?(&:empty?) + add_warning 'The Podfile does not contain any dependencies.' + end + end + + # Verifies that no dependencies in the Podfile will end up not being built + # at all. In other words, all dependencies should belong to a non-abstract + # target, or be inherited by a target where `inheritance == complete`. + # + def validate_no_abstract_only_pods! + @podfile_dependency_cache.target_definition_list.each do |target_definition| + dependencies = @podfile_dependency_cache.target_definition_dependencies(target_definition) + next if dependencies.empty? + next unless target_definition.abstract? + + children = target_definition.recursive_children + next if children.any? { |child_target_definition| target_definition_inherits?(:parent => target_definition, :child => child_target_definition) } + + add_warning "The abstract target #{target_definition.name} is not inherited by a concrete target, " \ + "so the following dependencies won't make it into any targets in your project:" \ + "\n - #{dependencies.map(&:to_s).sort.join("\n - ")}" + + next if target_definition.platform + + add_error "The abstract target #{target_definition.name} must specify a platform since its dependencies are not inherited by a concrete target." + end + end + + def target_definition_inherits?(parent: nil, child: nil) + if parent == child + true + elsif child.exclusive? + false + else + target_definition_inherits?(:parent => parent, :child => child.parent) + end + end + + def validate_no_duplicate_targets! + @podfile_dependency_cache.target_definition_list.group_by { |td| [td.name, td.user_project_path] }. + each do |(name, project), definitions| + next unless definitions.size > 1 + error = "The target `#{name}` is declared multiple times" + error << " for the project `#{project}`" if project + add_error(error << '.') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/post_install_hooks_context.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/post_install_hooks_context.rb new file mode 100644 index 0000000..9f128d1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/post_install_hooks_context.rb @@ -0,0 +1,9 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class PostInstallHooksContext < BaseInstallHooksContext + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/post_integrate_hooks_context.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/post_integrate_hooks_context.rb new file mode 100644 index 0000000..17e1c3e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/post_integrate_hooks_context.rb @@ -0,0 +1,9 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class PostIntegrateHooksContext < BaseInstallHooksContext + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pre_install_hooks_context.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pre_install_hooks_context.rb new file mode 100644 index 0000000..7753abd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pre_install_hooks_context.rb @@ -0,0 +1,51 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer before analysis has been completed. + # + class PreInstallHooksContext + # @return [Podfile] The Podfile for the project. + # + attr_reader :podfile + + # @return [Sandbox] The Sandbox for the project. + # + attr_reader :sandbox + + # @return [String] The path to the sandbox root (`Pods` directory). + # + attr_reader :sandbox_root + + # @return [Lockfile] The Lockfile for the project. + # + attr_reader :lockfile + + # Initialize a new instance + # + # @param [Sandbox] sandbox see #sandbox + # @param [String] sandbox_root see #sandbox_root + # @param [Podfile] podfile see #podfile + # @param [Lockfile] lockfile see #lockfile + # + def initialize(podfile, sandbox, sandbox_root, lockfile) + @podfile = podfile + @sandbox = sandbox + @sandbox_root = sandbox_root + @lockfile = lockfile + end + + # @param [Sandbox] sandbox see {#sandbox} + # + # @param [Podfile] podfile see {#podfile} + # + # @param [Lockfile] lockfile see {#lockfile} + # + # @return [PreInstallHooksContext] Convenience class method to generate the + # static context. + # + def self.generate(sandbox, podfile, lockfile) + new(podfile, sandbox, sandbox.root.to_s, lockfile) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pre_integrate_hooks_context.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pre_integrate_hooks_context.rb new file mode 100644 index 0000000..7393613 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/pre_integrate_hooks_context.rb @@ -0,0 +1,9 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer. + # + class PreIntegrateHooksContext < BaseInstallHooksContext + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache.rb new file mode 100644 index 0000000..eac058c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache.rb @@ -0,0 +1,11 @@ +module Pod + class Installer + module ProjectCache + autoload :ProjectCacheAnalyzer, 'cocoapods/installer/project_cache/project_cache_analyzer' + autoload :ProjectInstallationCache, 'cocoapods/installer/project_cache/project_installation_cache' + autoload :ProjectMetadataCache, 'cocoapods/installer/project_cache/project_metadata_cache' + autoload :ProjectCacheAnalysisResult, 'cocoapods/installer/project_cache/project_cache_analysis_result' + autoload :ProjectCacheVersion, 'cocoapods/installer/project_cache/project_cache_version' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_analysis_result.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_analysis_result.rb new file mode 100644 index 0000000..50de9cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_analysis_result.rb @@ -0,0 +1,53 @@ +module Pod + class Installer + module ProjectCache + # The result object from analyzing the project cache. + # + class ProjectCacheAnalysisResult + # @return [Array] + # The list of pod targets that need to be regenerated. + # + attr_reader :pod_targets_to_generate + + # @return [Array] + # The list of aggregate targets that need to be regenerated. This can be nil if we don't want to + # generate ANY aggregate targets since we still want to be able to generate an empty list of aggregate + # targets. + # + attr_reader :aggregate_targets_to_generate + + # @return [Hash{String => TargetCacheKey}] + # Updated hash of target cache key by target label for all targets. + # + attr_reader :cache_key_by_target_label + + # @return [Hash{String => Symbol}] + # The build configurations to install with each target. + # + attr_reader :build_configurations + + # @return [Integer] + # The project object version to install with each target. + # + attr_reader :project_object_version + + # Initialize a new instance. + # + # @param [Array] pod_targets_to_generate @see #pod_targets_to_generate + # @param [Array TargetCacheKey}] cache_key_by_target_label @see #cache_key_by_target_label + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [Integer] project_object_version @see #project_object_version + # + def initialize(pod_targets_to_generate, aggregate_targets_to_generate, cache_key_by_target_label, + build_configurations, project_object_version) + @pod_targets_to_generate = pod_targets_to_generate + @aggregate_targets_to_generate = aggregate_targets_to_generate + @cache_key_by_target_label = cache_key_by_target_label + @build_configurations = build_configurations + @project_object_version = project_object_version + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_analyzer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_analyzer.rb new file mode 100644 index 0000000..ca1756e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_analyzer.rb @@ -0,0 +1,200 @@ +module Pod + class Installer + module ProjectCache + # Analyzes the project cache and computes which pod targets need to be generated. + # + class ProjectCacheAnalyzer + require 'cocoapods/installer/project_cache/project_cache_analysis_result' + + # @return [Sandbox] Project sandbox. + # + attr_reader :sandbox + + # @return [ProjectInstallationCache] The cache of targets that were previously installed. + # + attr_reader :cache + + # @return [Hash{String => Symbol}] The hash of user build configurations. + # + attr_reader :build_configurations + + # @return [Integer] The object version from the user project. + # + attr_reader :project_object_version + + # @return [Hash] The podfile plugins to be run for the installation. + # + attr_reader :podfile_plugins + + # @return [Array] The list of pod targets. + # + attr_reader :pod_targets + + # @return [Array] The list of aggregate targets. + # + attr_reader :aggregate_targets + + # @return [Hash] Hash of installation options. + # + attr_reader :installation_options + + # @return [Boolean] Flag indicating if we want to ignore the cache and force a clean installation. + # + attr_reader :clean_install + + # Initialize a new instance. + # + # @param [Sandbox] sandbox @see #sandbox + # @param [ProjectInstallationCache] cache @see #cache + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [Integer] project_object_version @see #project_object_version + # @param [Hash] podfile_plugins @see #podfile_plugins + # @param [Array] pod_targets @see #pod_targets + # @param [Array] aggregate_targets @see #aggregate_targets + # @param [Hash] installation_options @see #installation_options + # @param [Boolean] clean_install @see #clean_install + # + def initialize(sandbox, cache, build_configurations, project_object_version, podfile_plugins, pod_targets, aggregate_targets, installation_options, + clean_install: false) + @sandbox = sandbox + @cache = cache + @build_configurations = build_configurations + @podfile_plugins = podfile_plugins + @pod_targets = pod_targets + @aggregate_targets = aggregate_targets + @project_object_version = project_object_version + @installation_options = installation_options + @clean_install = clean_install + end + + # @return [ProjectCacheAnalysisResult] + # Compares all targets stored against the cache and computes which targets need to be regenerated. + # + def analyze + target_by_label = Hash[(pod_targets + aggregate_targets).map { |target| [target.label, target] }] + cache_key_by_target_label = create_cache_key_mappings(target_by_label) + + full_install_results = ProjectCacheAnalysisResult.new(pod_targets, aggregate_targets, cache_key_by_target_label, + build_configurations, project_object_version) + if clean_install + UI.message 'Ignoring project cache from the provided `--clean-install` flag.' + return full_install_results + end + + # Bail out early since these properties affect all targets and their associate projects. + if cache.build_configurations != build_configurations || + cache.project_object_version != project_object_version || + YAMLHelper.convert(cache.podfile_plugins) != YAMLHelper.convert(podfile_plugins) || + YAMLHelper.convert(cache.installation_options) != YAMLHelper.convert(installation_options) + UI.message 'Ignoring project cache due to project configuration changes.' + return full_install_results + end + + if project_names_changed?(pod_targets, cache) + UI.message 'Ignoring project cache due to project name changes.' + return full_install_results + end + + pod_targets_to_generate = Set[] + aggregate_targets_to_generate = Set[] + added_targets = compute_added_targets(target_by_label, cache_key_by_target_label.keys, cache.cache_key_by_target_label.keys) + added_pod_targets, added_aggregate_targets = added_targets.partition { |target| target.is_a?(PodTarget) } + pod_targets_to_generate.merge(added_pod_targets) + aggregate_targets_to_generate.merge(added_aggregate_targets) + + removed_aggregate_target_labels = compute_removed_targets(cache_key_by_target_label.keys, cache.cache_key_by_target_label.keys) + + changed_targets = compute_changed_targets_from_cache(cache_key_by_target_label, target_by_label, cache) + changed_pod_targets, changed_aggregate_targets = changed_targets.partition { |target| target.is_a?(PodTarget) } + pod_targets_to_generate.merge(changed_pod_targets) + aggregate_targets_to_generate.merge(changed_aggregate_targets) + + dirty_targets = compute_dirty_targets(pod_targets + aggregate_targets) + dirty_pod_targets, dirty_aggregate_targets = dirty_targets.partition { |target| target.is_a?(PodTarget) } + pod_targets_to_generate.merge(dirty_pod_targets) + aggregate_targets_to_generate.merge(dirty_aggregate_targets) + + # Since multi xcodeproj will group targets by PodTarget#project_name into individual projects, we need to + # append these "sibling" targets to the list of targets we need to generate before finalizing the total list, + # otherwise we will end up with missing targets. + # + sibling_pod_targets = compute_sibling_pod_targets(pod_targets, pod_targets_to_generate) + pod_targets_to_generate.merge(sibling_pod_targets) + + # We either return the full list of aggregate targets or none since the aggregate targets go into the + # Pods.xcodeproj and so we need to regenerate all aggregate targets when regenerating Pods.xcodeproj. + total_aggregate_targets_to_generate = unless aggregate_targets_to_generate.empty? && removed_aggregate_target_labels.empty? + aggregate_targets + end + + ProjectCacheAnalysisResult.new(pod_targets_to_generate.to_a, total_aggregate_targets_to_generate, + cache_key_by_target_label, build_configurations, project_object_version) + end + + private + + def create_cache_key_mappings(target_by_label) + Hash[target_by_label.map do |label, target| + case target + when PodTarget + local = sandbox.local?(target.pod_name) + checkout_options = sandbox.checkout_sources[target.pod_name] + [label, TargetCacheKey.from_pod_target(sandbox, target, :is_local_pod => local, + :checkout_options => checkout_options)] + when AggregateTarget + [label, TargetCacheKey.from_aggregate_target(sandbox, target)] + else + raise "[BUG] Unknown target type #{target}" + end + end] + end + + def compute_added_targets(target_by_label, target_labels, cached_target_labels) + (target_labels - cached_target_labels).map do |label| + target_by_label[label] + end + end + + def compute_removed_targets(target_labels, cached_target_labels) + cached_target_labels - target_labels + end + + def compute_changed_targets_from_cache(cache_key_by_target_label, target_by_label, cache) + cache_key_by_target_label.each_with_object([]) do |(label, cache_key), changed_targets| + next unless cache.cache_key_by_target_label[label] + if cache_key.key_difference(cache.cache_key_by_target_label[label]) == :project + changed_targets << target_by_label[label] + end + end + end + + def compute_dirty_targets(targets) + targets.reject do |target| + support_files_dir_exists = File.exist? target.support_files_dir + xcodeproj_exists = case target + when PodTarget + File.exist? sandbox.pod_target_project_path(target.project_name) + when AggregateTarget + File.exist? sandbox.project_path + else + raise "[BUG] Unknown target type #{target}" + end + support_files_dir_exists && xcodeproj_exists + end + end + + def compute_sibling_pod_targets(pod_targets, pod_targets_to_generate) + pod_targets_by_project_name = pod_targets.group_by(&:project_name) + pod_targets_to_generate.flat_map { |t| pod_targets_by_project_name[t.project_name] } + end + + def project_names_changed?(pod_targets, cache) + pod_targets.any? do |pod_target| + next unless (target_cache_key = cache.cache_key_by_target_label[pod_target.label]) + target_cache_key.project_name != pod_target.project_name + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_version.rb new file mode 100644 index 0000000..65957a8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_cache_version.rb @@ -0,0 +1,43 @@ +module Pod + class Installer + module ProjectCache + # Object that stores, loads, and holds the version of the project cache. + # + class ProjectCacheVersion + # @return [Version] The version of the project cache. + # + attr_reader :version + + # Initialize a new instance. + # + # @param [Version] version @see #version + # + def initialize(version = Version.create('0')) + @version = version + end + + # Constructs a ProjectCacheVersion from a file. + # + # @param [String] path + # The path of the project cache + # + # @return [ProjectCacheVersion] + # + def self.from_file(path) + return ProjectCacheVersion.new unless File.exist?(path) + cached_version = Version.create(File.read(path)) + ProjectCacheVersion.new(cached_version) + end + + # @return [void] + # + # @param [String] path + # The path of the project cache to save. + # + def save_as(path) + Sandbox.update_changed_file(path, version.to_s) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_installation_cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_installation_cache.rb new file mode 100644 index 0000000..daa6e04 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_installation_cache.rb @@ -0,0 +1,103 @@ +module Pod + class Installer + module ProjectCache + # Represents the cache stored at Pods/.project/installation_cache + # + class ProjectInstallationCache + require 'cocoapods/installer/project_cache/target_cache_key' + + # @return [Hash{String => TargetCacheKey}] + # Stored hash of target cache key objects for every pod target. + # + attr_reader :cache_key_by_target_label + + # @return [Hash{String => Symbol}] + # Build configurations stored in the cache. + # + attr_reader :build_configurations + + # @return [Integer] + # Project object stored in the cache. + # + attr_reader :project_object_version + + # @return [Hash] + # Podfile plugins used with a particular install. + # + attr_reader :podfile_plugins + + # @return [Hash] + # Configured installation options + # + attr_reader :installation_options + + # Initializes a new instance. + # + # @param [Hash{String => TargetCacheKey}] cache_key_by_target_label @see #cache_key_by_target_label + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [Integer] project_object_version @see #project_object_version + # @param [Hash] podfile_plugins @see #podfile_plugins + # @param [Hash] installation_options @see #installation_options + # + def initialize(cache_key_by_target_label = {}, build_configurations = nil, project_object_version = nil, podfile_plugins = {}, installation_options = {}) + @cache_key_by_target_label = cache_key_by_target_label + @build_configurations = build_configurations + @project_object_version = project_object_version + @podfile_plugins = podfile_plugins + @installation_options = installation_options + end + + def update_cache_key_by_target_label!(cache_key_by_target_label) + @cache_key_by_target_label = cache_key_by_target_label + end + + def update_build_configurations!(build_configurations) + @build_configurations = build_configurations + end + + def update_project_object_version!(project_object_version) + @project_object_version = project_object_version + end + + def update_podfile_plugins!(podfile_plugins) + @podfile_plugins = podfile_plugins + end + + def update_installation_options!(installation_options) + @installation_options = installation_options + end + + def save_as(path) + Pathname(path).dirname.mkpath + Sandbox.update_changed_file(path, YAMLHelper.convert(to_hash)) + end + + def self.from_file(sandbox, path) + return ProjectInstallationCache.new unless File.exist?(path) + contents = YAMLHelper.load_file(path) + cache_keys = contents.fetch('CACHE_KEYS', {}) + cache_key_by_target_label = Hash[cache_keys.map do |name, key_hash| + [name, TargetCacheKey.from_cache_hash(sandbox, key_hash)] + end] + project_object_version = contents['OBJECT_VERSION'] + build_configurations = contents['BUILD_CONFIGURATIONS'] + podfile_plugins = contents['PLUGINS'] + installation_options = contents['INSTALLATION_OPTIONS'] + ProjectInstallationCache.new(cache_key_by_target_label, build_configurations, project_object_version, podfile_plugins, installation_options) + end + + def to_hash + cache_key_contents = Hash[cache_key_by_target_label.map do |label, key| + [label, key.to_h] + end] + contents = { 'CACHE_KEYS' => cache_key_contents } + contents['BUILD_CONFIGURATIONS'] = build_configurations if build_configurations + contents['OBJECT_VERSION'] = project_object_version if project_object_version + contents['PLUGINS'] = podfile_plugins if podfile_plugins + contents['INSTALLATION_OPTIONS'] = installation_options if installation_options + contents + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_metadata_cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_metadata_cache.rb new file mode 100644 index 0000000..1fa8fe6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/project_metadata_cache.rb @@ -0,0 +1,73 @@ +module Pod + class Installer + module ProjectCache + # Represents the metadata cache + # + class ProjectMetadataCache + require 'cocoapods/installer/project_cache/target_metadata.rb' + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Hash{String => TargetMetadata}] + # Hash of string by target metadata. + # + attr_reader :target_label_by_metadata + + # Initialize a new instance. + # + # @param [Sandbox] sandbox see #sandbox + # @param [Hash{String => TargetMetadata}] target_label_by_metadata @see #target_label_by_metadata + # + def initialize(sandbox, target_label_by_metadata = {}) + @sandbox = sandbox + @target_label_by_metadata = target_label_by_metadata + end + + def to_hash + Hash[target_label_by_metadata.map do |target_label, metdata| + [target_label, metdata.to_hash] + end] + end + + # Rewrites the entire cache to the given path. + # + # @param [String] path + # + # @return [void] + # + def save_as(path) + Sandbox.update_changed_file(path, YAMLHelper.convert_hash(to_hash, nil)) + end + + # Updates the metadata cache based on installation results. + # + # @param [Hash{String => TargetInstallationResult}] pod_target_installation_results + # The installation results for pod targets installed. + # + # @param [Hash{String => TargetInstallationResult}] aggregate_target_installation_results + # The installation results for aggregate targets installed. + # + def update_metadata!(pod_target_installation_results, aggregate_target_installation_results) + installation_results = pod_target_installation_results.values + aggregate_target_installation_results.values + installation_results.each do |installation_result| + native_target = installation_result.native_target + target_label_by_metadata[native_target.name] = TargetMetadata.from_native_target(sandbox, native_target) + # app targets need to be added to the cache because they can be used as app hosts for test targets, even if those test targets live inside a different pod (and thus project) + installation_result.app_native_targets.each_value do |app_target| + target_label_by_metadata[app_target.name] = TargetMetadata.from_native_target(sandbox, app_target) + end + end + end + + def self.from_file(sandbox, path) + return ProjectMetadataCache.new(sandbox) unless File.exist?(path) + contents = YAMLHelper.load_file(path) + target_by_label_metadata = Hash[contents.map { |target_label, hash| [target_label, TargetMetadata.from_hash(hash)] }] + ProjectMetadataCache.new(sandbox, target_by_label_metadata) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/target_cache_key.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/target_cache_key.rb new file mode 100644 index 0000000..7cf4998 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/target_cache_key.rb @@ -0,0 +1,176 @@ +module Pod + class Installer + module ProjectCache + # Uniquely identifies a Target. + # + class TargetCacheKey + require 'cocoapods/target/pod_target.rb' + require 'cocoapods/target/aggregate_target.rb' + require 'digest' + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Symbol] + # The type of target. Either aggregate or pod target. + # + attr_reader :type + + # @return [Hash{String => Object}] + # The hash containing key-value pairs that identify the target. + # + attr_reader :key_hash + + # Initialize a new instance. + # + # @param [Sandbox] sandbox see #sandbox + # @param [Symbol] type @see #type + # @param [Hash{String => Object}] key_hash @see #key_hash + # + def initialize(sandbox, type, key_hash) + @sandbox = sandbox + @type = type + @key_hash = key_hash + end + + # Equality function used to compare TargetCacheKey objects to each other. + # + # @param [TargetCacheKey] other + # Other object to compare itself against. + # + # @return [Symbol] The difference between this and another TargetCacheKey object. + # # Symbol :none means no difference. + # + def key_difference(other) + if other.type != type + :project + else + case type + when :pod_target + return :project if (other.key_hash.keys - key_hash.keys).any? + return :project if other.key_hash['CHECKSUM'] != key_hash['CHECKSUM'] + return :project if other.key_hash['SPECS'] != key_hash['SPECS'] + return :project if other.key_hash['PROJECT_NAME'] != key_hash['PROJECT_NAME'] + end + + this_files = key_hash['FILES'] + other_files = other.key_hash['FILES'] + return :project if this_files != other_files + + this_build_settings = key_hash['BUILD_SETTINGS_CHECKSUM'] + other_build_settings = other.key_hash['BUILD_SETTINGS_CHECKSUM'] + return :project if this_build_settings != other_build_settings + + this_checkout_options = key_hash['CHECKOUT_OPTIONS'] + other_checkout_options = other.key_hash['CHECKOUT_OPTIONS'] + return :project if this_checkout_options != other_checkout_options + + :none + end + end + + def to_h + key_hash + end + + # @return [String] + # The name of the project the target belongs to. + # + def project_name + key_hash['PROJECT_NAME'] + end + + # Creates a TargetCacheKey instance from the given hash. + # + # @param [Sandbox] sandbox The sandbox to use to construct a TargetCacheKey object. + # + # @param [Hash{String => Object}] key_hash + # The hash used to construct a TargetCacheKey object. + # + # @return [TargetCacheKey] + # + def self.from_cache_hash(sandbox, key_hash) + cache_hash = key_hash.dup + if files = cache_hash['FILES'] + cache_hash['FILES'] = files.sort_by(&:downcase) + end + if specs = cache_hash['SPECS'] + cache_hash['SPECS'] = specs.sort_by(&:downcase) + end + type = cache_hash['CHECKSUM'] ? :pod_target : :aggregate + TargetCacheKey.new(sandbox, type, cache_hash) + end + + # Constructs a TargetCacheKey instance from a PodTarget. + # + # @param [Sandbox] sandbox The sandbox to use to construct a TargetCacheKey object. + # + # @param [PodTarget] pod_target + # The pod target used to construct a TargetCacheKey object. + # + # @param [Boolean] is_local_pod + # Used to also include its local files in the cache key. + # + # @param [Hash] checkout_options + # The checkout options for this pod target. + # + # @return [TargetCacheKey] + # + def self.from_pod_target(sandbox, pod_target, is_local_pod: false, checkout_options: nil) + build_settings = {} + build_settings[pod_target.label.to_s] = Hash[pod_target.build_settings.map do |k, v| + [k, Digest::MD5.hexdigest(v.xcconfig.to_s)] + end] + pod_target.test_spec_build_settings_by_config.each do |name, settings_by_config| + build_settings[name] = Hash[settings_by_config.map { |k, v| [k, Digest::MD5.hexdigest(v.xcconfig.to_s)] }] + end + pod_target.app_spec_build_settings_by_config.each do |name, settings_by_config| + build_settings[name] = Hash[settings_by_config.map { |k, v| [k, Digest::MD5.hexdigest(v.xcconfig.to_s)] }] + end + + contents = { + 'CHECKSUM' => pod_target.root_spec.checksum, + 'SPECS' => pod_target.specs.map(&:to_s).sort_by(&:downcase), + 'BUILD_SETTINGS_CHECKSUM' => build_settings, + 'PROJECT_NAME' => pod_target.project_name, + } + if is_local_pod + relative_file_paths = pod_target.all_files.map { |f| f.relative_path_from(sandbox.root).to_s } + contents['FILES'] = relative_file_paths.sort_by(&:downcase) + end + contents['CHECKOUT_OPTIONS'] = checkout_options if checkout_options + TargetCacheKey.new(sandbox, :pod_target, contents) + end + + # Construct a TargetCacheKey instance from an AggregateTarget. + # + # @param [Sandbox] sandbox The sandbox to use to construct a TargetCacheKey object. + # + # @param [AggregateTarget] aggregate_target + # The aggregate target used to construct a TargetCacheKey object. + # + # @return [TargetCacheKey] + # + def self.from_aggregate_target(sandbox, aggregate_target) + build_settings = {} + aggregate_target.user_build_configurations.keys.each do |configuration| + build_settings[configuration] = Digest::MD5.hexdigest(aggregate_target.build_settings(configuration).xcconfig.to_s) + end + + contents = { + 'BUILD_SETTINGS_CHECKSUM' => build_settings, + } + if aggregate_target.includes_resources? || aggregate_target.includes_on_demand_resources? + relative_resource_file_paths = aggregate_target.resource_paths_by_config.values.flatten.uniq + relative_on_demand_resource_file_paths = aggregate_target.on_demand_resources.map do |res| + res.relative_path_from(sandbox.project_path.dirname).to_s + end + contents['FILES'] = (relative_resource_file_paths + relative_on_demand_resource_file_paths).sort_by(&:downcase) + end + TargetCacheKey.new(sandbox, :aggregate, contents) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/target_metadata.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/target_metadata.rb new file mode 100644 index 0000000..62ea254 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/project_cache/target_metadata.rb @@ -0,0 +1,74 @@ +module Pod + class Installer + module ProjectCache + # Metadata used to reconstruct a PBXTargetDependency. + # + class TargetMetadata + # @return [String] + # The label of the native target. + # + attr_reader :target_label + + # @return [String] + # The UUID of the native target installed. + # + attr_reader :native_target_uuid + + # @return [String] + # The path of the container project the native target was installed into. + # + attr_reader :container_project_path + + # Initialize a new instance. + # + # @param [String] target_label @see #target_label + # @param [String] native_target_uuid @see #native_target_uuid + # @param [String] container_project_path @see #container_project_path + # + def initialize(target_label, native_target_uuid, container_project_path) + @target_label = target_label + @native_target_uuid = native_target_uuid + @container_project_path = container_project_path + end + + def to_hash + { + 'LABEL' => target_label, + 'UUID' => native_target_uuid, + 'PROJECT_PATH' => container_project_path, + } + end + + def to_s + "#{target_label} : #{native_target_uuid} : #{container_project_path}" + end + + # Constructs a TargetMetadata instance from a hash. + # + # @param [Hash] hash + # The hash used to construct a new TargetMetadata instance. + # + # @return [TargetMetadata] + # + def self.from_hash(hash) + TargetMetadata.new(hash['LABEL'], hash['UUID'], hash['PROJECT_PATH']) + end + + # Constructs a TargetMetadata instance from a native target. + # + # @param [Sandbox] sandbox + # The sandbox used for this installation. + # + # @param [PBXNativeTarget] native_target + # The native target used to construct a TargetMetadata instance. + # + # @return [TargetMetadata] + # + def self.from_native_target(sandbox, native_target) + TargetMetadata.new(native_target.name, native_target.uuid, + native_target.project.path.relative_path_from(sandbox.root).to_s) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/sandbox_dir_cleaner.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/sandbox_dir_cleaner.rb new file mode 100644 index 0000000..7152d50 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/sandbox_dir_cleaner.rb @@ -0,0 +1,105 @@ +module Pod + class Installer + # Cleans up the sandbox directory by removing stale target support files and headers. + # + class SandboxDirCleaner + # @return [Sandbox] The sandbox directory that will be cleaned. + # + attr_reader :sandbox + + # @return [Array] + # The list of all pod targets that will be installed into the Sandbox. + # + attr_reader :pod_targets + + # @return [Array] + # The list of all aggregate targets that will be installed into the Sandbox. + # + attr_reader :aggregate_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] pod_targets @see #pod_targets + # @param [Array] aggregate_targets @see #aggregate_targets + # + def initialize(sandbox, pod_targets, aggregate_targets) + @sandbox = sandbox + @pod_targets = pod_targets + @aggregate_targets = aggregate_targets + end + + def clean! + UI.message('Cleaning up sandbox directory') do + # Clean up Target Support Files Directory + target_support_dirs_to_install = (pod_targets + aggregate_targets).map(&:support_files_dir) + target_support_dirs = sandbox_target_support_dirs + + removed_target_support_dirs = target_support_dirs - target_support_dirs_to_install + removed_target_support_dirs.each { |dir| remove_dir(dir) } + + # Clean up Sandbox Headers Directory + sandbox_private_headers_to_install = pod_targets.flat_map do |pod_target| + if pod_target.header_mappings_by_file_accessor.empty? + [] + else + [pod_target.build_headers.root.join(pod_target.headers_sandbox)] + end + end + sandbox_public_headers_to_install = pod_targets.flat_map do |pod_target| + if pod_target.public_header_mappings_by_file_accessor.empty? + [] + else + [ + sandbox.public_headers.root.join(pod_target.headers_sandbox), + pod_target.module_map_path.dirname, + ].uniq + end + end + + removed_sandbox_public_headers = sandbox_public_headers - sandbox_public_headers_to_install + removed_sandbox_public_headers.each { |path| remove_dir(path) } + + removed_sandbox_private_headers = sandbox_private_headers(pod_targets) - sandbox_private_headers_to_install + removed_sandbox_private_headers.each { |path| remove_dir(path) } + + project_dir_names_to_install = pod_targets.map do |pod_target| + sandbox.pod_target_project_path(pod_target.project_name) + end + project_dir_names = sandbox_project_dir_names - [sandbox.project_path] + user_project_dir_names = aggregate_targets.map(&:user_project_path).uniq + + removed_project_dir_names = project_dir_names - user_project_dir_names - project_dir_names_to_install + removed_project_dir_names.each { |dir| remove_dir(dir) } + end + end + + private + + def sandbox_target_support_dirs + child_directories_of(sandbox.target_support_files_root) + end + + def sandbox_private_headers(pod_targets) + pod_targets.flat_map { |pod_target| child_directories_of(pod_target.build_headers.root) }.uniq + end + + def sandbox_project_dir_names + child_directories_of(sandbox.root).select { |d| d.extname == '.xcodeproj' } + end + + def sandbox_public_headers + child_directories_of(sandbox.public_headers.root) + end + + def child_directories_of(dir) + return [] unless dir.exist? + dir.children.select(&:directory?) + end + + def remove_dir(path) + FileUtils.rm_rf(path) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/sandbox_header_paths_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/sandbox_header_paths_installer.rb new file mode 100644 index 0000000..976afb5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/sandbox_header_paths_installer.rb @@ -0,0 +1,45 @@ +module Pod + class Installer + # Adds all the search paths into the sandbox HeaderStore and each pod target's HeaderStore. + # + class SandboxHeaderPathsInstaller + # @return [Sandbox] The sandbox to use for this analysis. + # + attr_reader :sandbox + + # @return [Array] The list of pod targets to analyze. + # + attr_reader :pod_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] pod_targets @see #pod_targets + # + def initialize(sandbox, pod_targets) + @pod_targets = pod_targets + @sandbox = sandbox + end + + def install! + # Link all pod target header search paths into the HeaderStore. + pod_targets.each do |pod_target| + next if pod_target.build_as_framework? && pod_target.should_build? + install_target(pod_target) + end + end + + private + + def install_target(pod_target) + pod_target_header_mappings = pod_target.header_mappings_by_file_accessor.values + public_header_mappings = pod_target.public_header_mappings_by_file_accessor.values + added_build_headers = !pod_target_header_mappings.all?(&:empty?) + added_public_headers = !public_header_mappings.all?(&:empty?) + + pod_target.build_headers.add_search_path(pod_target.headers_sandbox, pod_target.platform) if added_build_headers + sandbox.public_headers.add_search_path(pod_target.headers_sandbox, pod_target.platform) if added_public_headers + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/source_provider_hooks_context.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/source_provider_hooks_context.rb new file mode 100644 index 0000000..b312dc0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/source_provider_hooks_context.rb @@ -0,0 +1,34 @@ +module Pod + class Installer + # Context object designed to be used with the HooksManager which describes + # the context of the installer before spec sources have been created + # + class SourceProviderHooksContext + # @return [Array] The source objects to send to the installer + # + attr_reader :sources + + # @return [SourceProviderHooksContext] Convenience class method to generate the + # static context. + # + def self.generate + result = new + result + end + + def initialize + @sources = [] + end + + # @param [Source] source object to be added to the installer + # + # @return [void] + # + def add_source(source) + unless source.nil? + @sources << source + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/target_uuid_generator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/target_uuid_generator.rb new file mode 100644 index 0000000..28badbb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/target_uuid_generator.rb @@ -0,0 +1,34 @@ +module Pod + class Installer + # Generates stable UUIDs for Native Targets. + # + class TargetUUIDGenerator < Xcodeproj::Project::UUIDGenerator + # This method override is used to ONLY generate stable UUIDs for PBXNativeTarget instances and their sibling PBXFileReference + # product reference in the project. Stable native target UUIDs are necessary for incremental installation + # because other projects reference the target and product reference by its UUID in the remoteGlobalIDString field. + # + # @param [Array] projects + # The list of projects used to generate stabe target UUIDs. + # + def generate_all_paths_by_objects(projects) + @paths_by_object = {} + projects.each do |project| + project_basename = project.path.basename.to_s + project.objects.each do |object| + @paths_by_object[object] = object.uuid + end + project.targets.each do |target| + @paths_by_object[target] = Digest::MD5.hexdigest(project_basename + target.name).upcase + if target.is_a? Xcodeproj::Project::Object::PBXNativeTarget + @paths_by_object[target.product_reference] = Digest::MD5.hexdigest(project_basename + 'product_reference' + target.name).upcase + end + end + end + end + + def uuid_for_path(path) + path + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator.rb new file mode 100644 index 0000000..b487c7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator.rb @@ -0,0 +1,280 @@ +require 'xcodeproj/workspace' +require 'xcodeproj/project' + +require 'active_support/core_ext/string/inflections' +require 'active_support/core_ext/array/conversions' + +module Pod + class Installer + # The {UserProjectIntegrator} integrates the libraries generated by + # TargetDefinitions of the {Podfile} with their correspondent user + # projects. + # + class UserProjectIntegrator + autoload :TargetIntegrator, 'cocoapods/installer/user_project_integrator/target_integrator' + + # @return [Podfile] the podfile that should be integrated with the user + # projects. + # + attr_reader :podfile + + # @return [Sandbox] The sandbox used for this installation. + # + attr_reader :sandbox + + # @return [Pathname] the path of the installation. + # + # @todo This is only used to compute the workspace path in case that it + # should be inferred by the project. If the workspace should be in + # the same dir of the project, this could be removed. + # + attr_reader :installation_root + + # @return [Array] the targets represented in the Podfile. + # + attr_reader :targets + + # @return [Array] the targets that require integration. This will always be equal or a smaller + # subset of #targets. + # + attr_reader :targets_to_integrate + + # @return [Boolean] whether to use input/output paths for build phase scripts + # + attr_reader :use_input_output_paths + alias use_input_output_paths? use_input_output_paths + + # Initialize a new instance + # + # @param [Podfile] podfile @see #podfile + # @param [Sandbox] sandbox @see #sandbox + # @param [Pathname] installation_root @see #installation_root + # @param [Array] targets @see #targets + # @param [Array] targets_to_integrate @see #targets_to_integrate + # @param [Boolean] use_input_output_paths @see #use_input_output_paths + # + def initialize(podfile, sandbox, installation_root, targets, targets_to_integrate, use_input_output_paths: true) + @podfile = podfile + @sandbox = sandbox + @installation_root = installation_root + @targets = targets + @targets_to_integrate = targets_to_integrate + @use_input_output_paths = use_input_output_paths + end + + # Integrates the user projects associated with the {TargetDefinitions} + # with the Pods project and its products. + # + # @return [void] + # + def integrate! + create_workspace + deintegrated_projects = deintegrate_removed_targets + integrate_user_targets + warn_about_xcconfig_overrides + projects_to_save = (user_projects_to_integrate + deintegrated_projects).uniq + save_projects(projects_to_save) + end + + #-----------------------------------------------------------------------# + + private + + # @!group Integration steps + + # Creates and saved the workspace containing the Pods project and the + # user projects, if needed. + # + # @note If the workspace already contains the projects it is not saved + # to avoid Xcode from displaying the revert dialog: `Do you want to + # keep the Xcode version or revert to the version on disk?` + # + # @return [void] + # + def create_workspace + all_projects = user_project_paths.sort.push(sandbox.project_path).uniq + file_references = all_projects.map do |path| + relative_path = path.relative_path_from(workspace_path.dirname).to_s + Xcodeproj::Workspace::FileReference.new(relative_path, 'group') + end + + if workspace_path.exist? + workspace = Xcodeproj::Workspace.new_from_xcworkspace(workspace_path) + new_file_references = file_references - workspace.file_references + unless new_file_references.empty? + new_file_references.each { |fr| workspace << fr } + workspace.save_as(workspace_path) + end + + else + UI.notice "Please close any current Xcode sessions and use `#{workspace_path.basename}` for this project from now on." + workspace = Xcodeproj::Workspace.new(*file_references) + workspace.save_as(workspace_path) + end + end + + # Deintegrates the targets of the user projects that are no longer part of the installation. + # + # @return [Array] The list of projects that were deintegrated. + # + def deintegrate_removed_targets + Config.instance.with_changes(:silent => true) do + deintegrator = Deintegrator.new + all_project_targets = user_projects.flat_map(&:native_targets).uniq + all_native_targets = targets.flat_map(&:user_targets).uniq + targets_to_deintegrate = all_project_targets - all_native_targets + targets_to_deintegrate.each do |target| + deintegrator.deintegrate_target(target) + end + return targets_to_deintegrate.map(&:project).select(&:dirty?).uniq + end + end + + # Integrates the targets of the user projects with the libraries + # generated from the {Podfile}. + # + # @note {TargetDefinition} without dependencies are skipped prevent + # creating empty libraries for targets definitions which are only + # wrappers for others. + # + # @return [void] + # + def integrate_user_targets + target_integrators = targets_to_integrate.sort_by(&:name).map do |target| + TargetIntegrator.new(target, :use_input_output_paths => use_input_output_paths?) + end + target_integrators.each(&:integrate!) + end + + # Save all user projects. + # + # @param [Array] projects The projects to save. + # + # @return [void] + # + def save_projects(projects) + projects.each do |project| + if project.dirty? + project.save + else + # There is a bug in Xcode where the process of deleting and + # re-creating the xcconfig files used in the build + # configuration cause building the user project to fail until + # Xcode is relaunched. + # + # Touching/saving the project causes Xcode to reload these. + # + # https://github.com/CocoaPods/CocoaPods/issues/2665 + FileUtils.touch(project.path + 'project.pbxproj') + end + end + end + + IGNORED_KEYS = %w(CODE_SIGN_IDENTITY).freeze + INHERITED_FLAGS = %w($(inherited) ${inherited}).freeze + + # Checks whether the settings of the CocoaPods generated xcconfig are + # overridden by the build configuration of a target and prints a + # warning to inform the user if needed. + # + def warn_about_xcconfig_overrides + targets_to_integrate.each do |aggregate_target| + aggregate_target.user_targets.each do |user_target| + user_target.build_configurations.each do |config| + xcconfig = aggregate_target.xcconfigs[config.name] + if xcconfig + (xcconfig.to_hash.keys - IGNORED_KEYS).each do |key| + target_values = config.build_settings[key] + if target_values && + !INHERITED_FLAGS.any? { |flag| target_values.include?(flag) } + print_override_warning(aggregate_target, user_target, config, key) + end + end + end + end + end + end + end + + private + + # @!group Private Helpers + #-----------------------------------------------------------------------# + + # @return [Pathname] the path where the workspace containing the Pods + # project and the user projects should be saved. + # + def workspace_path + if podfile.workspace_path + declared_path = podfile.workspace_path + path_with_ext = File.extname(declared_path) == '.xcworkspace' ? declared_path : "#{declared_path}.xcworkspace" + podfile_dir = File.dirname(podfile.defined_in_file || '') + absolute_path = File.expand_path(path_with_ext, podfile_dir) + Pathname.new(absolute_path) + elsif user_project_paths.count == 1 + project = user_project_paths.first.basename('.xcodeproj') + installation_root + "#{project}.xcworkspace" + else + raise Informative, 'Could not automatically select an Xcode ' \ + "workspace. Specify one in your Podfile like so:\n\n" \ + " workspace 'path/to/Workspace.xcworkspace'\n" + end + end + + # @return [Array] the projects of all the targets that require integration. + # + # @note Empty target definitions are ignored. + # + def user_projects_to_integrate + targets_to_integrate.map(&:user_project).compact.uniq + end + + # @return [Array] the projects of all the targets regardless of whether they are integrated + # or not. + # + # @note Empty target definitions are ignored. + # + def user_projects + targets.map(&:user_project).compact.uniq + end + + # @return [Array] the paths of all the user projects from all targets regardless of whether they are + # integrated or not. + # + # @note Empty target definitions are ignored. + # + def user_project_paths + targets.map(&:user_project_path).compact.uniq + end + + # Prints a warning informing the user that a build configuration of + # the integrated target is overriding the CocoaPods build settings. + # + # @param [Target::AggregateTarget] aggregate_target + # The umbrella target. + # + # @param [Xcodeproj::PBXNativeTarget] user_target + # The native target. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + # @param [String] key + # The key of the overridden build setting. + # + def print_override_warning(aggregate_target, user_target, config, key) + actions = [ + 'Use the `$(inherited)` flag, or', + 'Remove the build settings from the target.', + ] + message = "The `#{user_target.name} [#{config.name}]` " \ + "target overrides the `#{key}` build setting defined in " \ + "`#{aggregate_target.xcconfig_relative_path(config.name)}'. " \ + 'This can lead to problems with the CocoaPods installation' + UI.warn(message, actions) + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator/target_integrator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator/target_integrator.rb new file mode 100644 index 0000000..caec8dc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator/target_integrator.rb @@ -0,0 +1,815 @@ +require 'active_support/core_ext/string/inflections' +require 'cocoapods/xcode/framework_paths' +require 'cocoapods/target/build_settings' + +module Pod + class Installer + class UserProjectIntegrator + # This class is responsible for integrating the library generated by a + # {TargetDefinition} with its destination project. + # + class TargetIntegrator + autoload :XCConfigIntegrator, 'cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator' + + # @return [String] the string to use as prefix for every build phase added to the user project + # + BUILD_PHASE_PREFIX = '[CP] '.freeze + + # @return [String] the string to use as prefix for every build phase declared by the user within a podfile + # or podspec. + # + USER_BUILD_PHASE_PREFIX = '[CP-User] '.freeze + + # @return [String] the name of the check manifest phase + # + CHECK_MANIFEST_PHASE_NAME = 'Check Pods Manifest.lock'.freeze + + # @return [Array] the symbol types, which require that the pod + # frameworks are embedded in the output directory / product bundle. + # + # @note This does not include :app_extension or :watch_extension because + # these types must have their frameworks embedded in their host targets. + # For messages extensions, this only applies if it's embedded in a messages + # application. + # + EMBED_FRAMEWORK_TARGET_TYPES = [:application, :application_on_demand_install_capable, :unit_test_bundle, + :ui_test_bundle, :watch2_extension, :messages_application].freeze + + # @return [String] the name of the embed frameworks phase + # + EMBED_FRAMEWORK_PHASE_NAME = 'Embed Pods Frameworks'.freeze + + # @return [String] the name of the copy xcframeworks phase + # + COPY_XCFRAMEWORKS_PHASE_NAME = 'Copy XCFrameworks'.freeze + + # @return [String] the name of the copy resources phase + # + COPY_PODS_RESOURCES_PHASE_NAME = 'Copy Pods Resources'.freeze + + # @return [String] the name of the copy dSYM files phase + # + COPY_DSYM_FILES_PHASE_NAME = 'Copy dSYMs'.freeze + + # @return [Integer] the maximum number of input and output paths to use for a script phase + # + MAX_INPUT_OUTPUT_PATHS = 1000 + + # @return [Array] names of script phases that existed in previous versions of CocoaPods + # + REMOVED_SCRIPT_PHASE_NAMES = [ + 'Prepare Artifacts'.freeze, + ].freeze + + # @return [float] Returns Minimum Xcode Compatibility version for FileLists + # + MIN_FILE_LIST_COMPATIBILITY_VERSION = 9.3 + + # @return [String] Returns Minimum Xcode Object version for FileLists + # + MIN_FILE_LIST_OBJECT_VERSION = 50 + + # @return [AggregateTarget] the target that should be integrated. + # + attr_reader :target + + # @return [Boolean] whether to use input/output paths for build phase scripts + # + attr_reader :use_input_output_paths + alias use_input_output_paths? use_input_output_paths + + # Init a new TargetIntegrator + # + # @param [AggregateTarget] target @see #target + # @param [Boolean] use_input_output_paths @see #use_input_output_paths + # + def initialize(target, use_input_output_paths: true) + @target = target + @use_input_output_paths = use_input_output_paths + end + + # @private + # + XCFileListConfigKey = Struct.new(:file_list_path, :file_list_relative_path) + + class << self + # @param [Xcodeproj::Project::Object::AbstractObject] object + # + # @return [Boolean] Whether input & output paths for the given object + # should be stored in a file list file. + # + def input_output_paths_use_filelist?(object) + unless object.project.root_object.compatibility_version.nil? + version_match = object.project.root_object.compatibility_version.match(/Xcode ([0-9]*\.[0-9]*)/).to_a + end + if version_match&.at(1).nil? + object.project.object_version.to_i >= MIN_FILE_LIST_OBJECT_VERSION + else + Pod::Version.new(version_match[1]) >= Pod::Version.new(MIN_FILE_LIST_COMPATIBILITY_VERSION) + end + end + + # Sets the input & output paths for the given script build phase. + # + # @param [Xcodeproj::Project::Object::PBXShellScriptBuildPhase] phase + # The phase to set input & output paths on. + # + # @param [Hash] input_paths_by_config + # + # @return [Void] + def set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + if input_output_paths_use_filelist?(phase) + [input_paths_by_config, output_paths_by_config].each do |hash| + hash.each do |file_list, files| + generator = Generator::FileList.new(files) + Xcode::PodsProjectGenerator::TargetInstallerHelper.update_changed_file(generator, file_list.file_list_path) + end + end + + phase.input_paths = nil + phase.output_paths = nil + phase.input_file_list_paths = input_paths_by_config.each_key.map(&:file_list_relative_path).uniq + phase.output_file_list_paths = output_paths_by_config.each_key.map(&:file_list_relative_path).uniq + else + input_paths = input_paths_by_config.values.flatten(1).uniq + output_paths = output_paths_by_config.values.flatten(1).uniq + TargetIntegrator.validate_input_output_path_limit(input_paths, output_paths) + + phase.input_paths = input_paths + phase.output_paths = output_paths + phase.input_file_list_paths = nil + phase.output_file_list_paths = nil + end + end + + # Adds a shell script build phase responsible to copy (embed) the frameworks + # generated by the TargetDefinition to the bundle of the product of the + # targets. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_path + # The script path to execute as part of this script phase. + # + # @param [Hash] input_paths_by_config + # The input paths (if any) to include for this script phase. + # + # @param [Hash] output_paths_by_config + # The output paths (if any) to include for this script phase. + # + # @return [void] + # + def create_or_update_embed_frameworks_script_phase_to_target(native_target, script_path, input_paths_by_config = {}, output_paths_by_config = {}) + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + EMBED_FRAMEWORK_PHASE_NAME) + phase.shell_script = %("#{script_path}"\n) + TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + end + + # Delete a 'Embed Pods Frameworks' Script Build Phase if present + # + # @param [PBXNativeTarget] native_target + # The native target to remove the script phase from. + # + def remove_embed_frameworks_script_phase_from_target(native_target) + remove_script_phase_from_target(native_target, EMBED_FRAMEWORK_PHASE_NAME) + end + + # Adds a shell script build phase responsible to copy the xcframework slice + # to the intermediate build directory. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_path + # The script path to execute as part of this script phase. + # + # @param [Hash] input_paths_by_config + # The input paths (if any) to include for this script phase. + # + # @param [Hash] output_paths_by_config + # The output paths (if any) to include for this script phase. + # + # @return [void] + # + def create_or_update_copy_xcframeworks_script_phase_to_target(native_target, script_path, input_paths_by_config = {}, output_paths_by_config = {}) + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + COPY_XCFRAMEWORKS_PHASE_NAME) + phase.shell_script = %("#{script_path}"\n) + TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + reorder_script_phase(native_target, phase, :before_compile) + end + + # Delete a 'Copy XCFrameworks' Script Build Phase if present + # + # @param [PBXNativeTarget] native_target + # The native target to remove the script phase from. + # + def remove_copy_xcframeworks_script_phase_from_target(native_target) + remove_script_phase_from_target(native_target, COPY_XCFRAMEWORKS_PHASE_NAME) + end + + # Removes a script phase from a native target by name + # + # @param [PBXNativeTarget] native_target + # The target from which the script phased should be removed + # + # @param [String] phase_name + # The name of the script phase to remove + # + def remove_script_phase_from_target(native_target, phase_name) + build_phase = native_target.shell_script_build_phases.find { |bp| bp.name && bp.name.end_with?(phase_name) } + return unless build_phase.present? + native_target.build_phases.delete(build_phase) + end + + # Adds a shell script build phase responsible to copy the resources + # generated by the TargetDefinition to the bundle of the product of the + # targets. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_path + # The script path to execute as part of this script phase. + # + # @param [Hash] input_paths_by_config + # The input paths (if any) to include for this script phase. + # + # @param [Hash] output_paths_by_config + # The output paths (if any) to include for this script phase. + # + # @return [void] + # + def create_or_update_copy_resources_script_phase_to_target(native_target, script_path, input_paths_by_config = {}, output_paths_by_config = {}) + phase_name = COPY_PODS_RESOURCES_PHASE_NAME + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + phase_name) + phase.shell_script = %("#{script_path}"\n) + TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + end + + # Delete a 'Copy Pods Resources' script phase if present + # + # @param [PBXNativeTarget] native_target + # The native target to remove the script phase from. + # + def remove_copy_resources_script_phase_from_target(native_target) + build_phase = native_target.shell_script_build_phases.find { |bp| bp.name && bp.name.end_with?(COPY_PODS_RESOURCES_PHASE_NAME) } + return unless build_phase.present? + native_target.build_phases.delete(build_phase) + end + + # Creates or update a shell script build phase for the given target. + # + # @param [PBXNativeTarget] native_target + # The native target to add the script phase into. + # + # @param [String] script_phase_name + # The name of the script phase to use. + # + # @param [String] show_env_vars_in_log + # The value to set for show environment variables in the log during execution of this script phase or + # `nil` for not setting the value at all. + # + # @return [PBXShellScriptBuildPhase] The existing or newly created shell script build phase. + # + def create_or_update_shell_script_build_phase(native_target, script_phase_name, show_env_vars_in_log = '0') + build_phases = native_target.build_phases.grep(Xcodeproj::Project::Object::PBXShellScriptBuildPhase) + build_phases.find { |phase| phase.name && phase.name.end_with?(script_phase_name) }.tap { |p| p.name = script_phase_name if p } || + native_target.project.new(Xcodeproj::Project::Object::PBXShellScriptBuildPhase).tap do |phase| + UI.message("Adding Build Phase '#{script_phase_name}' to project.") do + phase.name = script_phase_name + unless show_env_vars_in_log.nil? + phase.show_env_vars_in_log = show_env_vars_in_log + end + native_target.build_phases << phase + end + end + end + + # Updates all target script phases for the current target, including creating or updating, deleting + # and re-ordering. + # + # @return [void] + # + def create_or_update_user_script_phases(script_phases, native_target) + script_phase_names = script_phases.map { |k| k[:name] } + # Delete script phases no longer present in the target. + native_target_script_phases = native_target.shell_script_build_phases.select do |bp| + !bp.name.nil? && bp.name.start_with?(USER_BUILD_PHASE_PREFIX) + end + native_target_script_phases.each do |script_phase| + script_phase_name_without_prefix = script_phase.name.sub(USER_BUILD_PHASE_PREFIX, '') + unless script_phase_names.include?(script_phase_name_without_prefix) + native_target.build_phases.delete(script_phase) + end + end + # Create or update the ones that are expected to be. + script_phases.each do |script_phase| + name_with_prefix = USER_BUILD_PHASE_PREFIX + script_phase[:name] + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, name_with_prefix, nil) + phase.shell_script = script_phase[:script] + phase.shell_path = script_phase[:shell_path] || '/bin/sh' + phase.input_paths = script_phase[:input_files] + phase.output_paths = script_phase[:output_files] + phase.input_file_list_paths = script_phase[:input_file_lists] + phase.output_file_list_paths = script_phase[:output_file_lists] + phase.dependency_file = script_phase[:dependency_file] + # At least with Xcode 10 `showEnvVarsInLog` is *NOT* set to any value even if it's checked and it only + # gets set to '0' if the user has explicitly disabled this. + if (show_env_vars_in_log = script_phase.fetch(:show_env_vars_in_log, '1')) == '0' + phase.show_env_vars_in_log = show_env_vars_in_log + end + + execution_position = script_phase[:execution_position] + reorder_script_phase(native_target, phase, execution_position) + end + end + + def reorder_script_phase(native_target, script_phase, execution_position) + return if execution_position == :any || execution_position.to_s.empty? + target_phase_type = case execution_position + when :before_compile, :after_compile + Xcodeproj::Project::Object::PBXSourcesBuildPhase + when :before_headers, :after_headers + Xcodeproj::Project::Object::PBXHeadersBuildPhase + else + raise ArgumentError, "Unknown execution position `#{execution_position}`" + end + order_before = case execution_position + when :before_compile, :before_headers + true + when :after_compile, :after_headers + false + else + raise ArgumentError, "Unknown execution position `#{execution_position}`" + end + + target_phase_index = native_target.build_phases.index do |bp| + bp.is_a?(target_phase_type) + end + return if target_phase_index.nil? + script_phase_index = native_target.build_phases.index do |bp| + bp.is_a?(Xcodeproj::Project::Object::PBXShellScriptBuildPhase) && !bp.name.nil? && bp.name == script_phase.name + end + if (order_before && script_phase_index > target_phase_index) || + (!order_before && script_phase_index < target_phase_index) + native_target.build_phases.move_from(script_phase_index, target_phase_index) + end + end + + # Script phases can have a limited number of input and output paths due to each one being exported to `env`. + # A large number can cause a build failure because of limitations in `env`. See issue + # https://github.com/CocoaPods/CocoaPods/issues/7362. + # + # @param [Array] input_paths + # The input paths to trim. + # + # @param [Array] output_paths + # The output paths to trim. + # + # @return [void] + # + def validate_input_output_path_limit(input_paths, output_paths) + if (input_paths.count + output_paths.count) > MAX_INPUT_OUTPUT_PATHS + input_paths.clear + output_paths.clear + end + end + + # Returns the resource output paths for all given input paths. + # + # @param [Array] resource_input_paths + # The input paths to map to. + # + # @return [Array] The resource output paths. + # + def resource_output_paths(resource_input_paths) + resource_input_paths.map do |resource_input_path| + base_path = '${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}' + extname = File.extname(resource_input_path) + basename = extname == '.xcassets' ? 'Assets' : File.basename(resource_input_path) + output_extension = Target.output_extension_for_resource(extname) + File.join(base_path, File.basename(basename, extname) + output_extension) + end.uniq + end + + # Returns the framework input paths for the given framework paths + # + # @param [Array] framework_paths + # The target's framework paths to map to input paths. + # + # @param [Array] xcframeworks + # The target's xcframeworks to map to input paths. + # + # @return [Array] The embed frameworks script input paths + # + def embed_frameworks_input_paths(framework_paths, xcframeworks) + input_paths = framework_paths.map(&:source_path) + # Only include dynamic xcframeworks as the input since we will not be copying static xcframework slices + xcframeworks.select { |xcf| xcf.build_type.dynamic_framework? }.each do |xcframework| + name = xcframework.name + input_paths << "#{Pod::Target::BuildSettings.xcframework_intermediate_dir(xcframework)}/#{name}.framework/#{name}" + end + input_paths + end + + # Returns the framework output paths for the given framework paths + # + # @param [Array] framework_paths + # The framework input paths to map to output paths. + # + # @param [Array] xcframeworks + # The installed xcframeworks. + # + # @return [Array] The embed framework script output paths + # + def embed_frameworks_output_paths(framework_paths, xcframeworks) + paths = framework_paths.map do |framework_path| + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/#{File.basename(framework_path.source_path)}" + end.uniq + # Static xcframeworks are not copied to the build dir + # so only include dynamic artifacts that will be copied to the build folder + xcframework_paths = xcframeworks.select { |xcf| xcf.build_type.dynamic_framework? }.map do |xcframework| + "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/#{xcframework.name}.framework" + end + paths + xcframework_paths + end + + # Updates a projects native targets to include on demand resources specified by the supplied parameters. + # Note that currently, only app level targets are allowed to include on demand resources. + # + # @param [Sandbox] sandbox + # The sandbox to use for calculating ODR file references. + # + # @param [Xcodeproj::Project] project + # The project to update known asset tags as well as add the ODR group. + # + # @param [Xcodeproj::PBXNativeTarget, Array] native_targets + # The native targets to integrate on demand resources into. + # + # @param [Sandbox::FileAccessor, Array] file_accessors + # The file accessors that that provide the ODRs to integrate. + # + # @param [Xcodeproj::PBXGroup] parent_odr_group + # The group to use as the parent to add ODR file references into. + # + # @param [String] target_odr_group_name + # The name to use for the group created that contains the ODR file references. + # + # @return [void] + # + def update_on_demand_resources(sandbox, project, native_targets, file_accessors, parent_odr_group, + target_odr_group_name) + category_to_tags = {} + file_accessors = Array(file_accessors) + native_targets = Array(native_targets) + + # Target no longer provides ODR references so remove everything related to this target. + if file_accessors.all? { |fa| fa.on_demand_resources.empty? } + old_target_odr_group = parent_odr_group[target_odr_group_name] + old_odr_file_refs = old_target_odr_group&.recursive_children_groups&.each_with_object({}) do |group, hash| + hash[group.name] = group.files + end || {} + native_targets.each do |native_target| + native_target.remove_on_demand_resources(old_odr_file_refs) + update_on_demand_resources_build_settings(native_target, nil => old_odr_file_refs.keys) + end + old_target_odr_group&.remove_from_project + return + end + + target_odr_group = parent_odr_group[target_odr_group_name] || parent_odr_group.new_group(target_odr_group_name) + current_file_refs = target_odr_group.recursive_children_groups.flat_map(&:files) + + added_file_refs = file_accessors.flat_map do |file_accessor| + target_odr_files_refs = Hash[file_accessor.on_demand_resources.map do |tag, value| + tag_group = target_odr_group[tag] || target_odr_group.new_group(tag) + category_to_tags[value[:category]] ||= [] + category_to_tags[value[:category]] << tag + resources_file_refs = value[:paths].map do |resource| + odr_resource_file_ref = Pathname.new(resource).relative_path_from(sandbox.root) + tag_group.find_file_by_path(odr_resource_file_ref.to_s) || tag_group.new_file(odr_resource_file_ref) + end + [tag, resources_file_refs] + end] + native_targets.each do |native_target| + native_target.add_on_demand_resources(target_odr_files_refs) + end + target_odr_files_refs.values.flatten + end + + # if the target ODR file references were updated, make sure we remove the ones that are no longer present + # for the target. + remaining_refs = current_file_refs - added_file_refs + remaining_refs.each do |ref| + native_targets.each do |user_target| + user_target.resources_build_phase.remove_file_reference(ref) + end + ref.remove_from_project + end + target_odr_group.recursive_children_groups.each { |g| g.remove_from_project if g.empty? } + + attributes = project.root_object.attributes + attributes['KnownAssetTags'] = (attributes['KnownAssetTags'] ||= []) | category_to_tags.values.flatten + project.root_object.attributes = attributes + + native_targets.each do |native_target| + update_on_demand_resources_build_settings(native_target, category_to_tags) + end + end + + def update_on_demand_resources_build_settings(native_target, category_to_tags) + %w[ON_DEMAND_RESOURCES_INITIAL_INSTALL_TAGS ON_DEMAND_RESOURCES_PREFETCH_ORDER].each do |category_key| + native_target.build_configurations.each do |c| + key = case category_key + when 'ON_DEMAND_RESOURCES_INITIAL_INSTALL_TAGS' + :initial_install + when 'ON_DEMAND_RESOURCES_PREFETCH_ORDER' + :prefetched + else + :download_on_demand + end + tags_for_category = (c.build_settings[category_key] || '').split + category_to_tags_dup = category_to_tags.dup + tags_to_add = category_to_tags_dup.delete(key) || [] + tags_to_delete = category_to_tags_dup.values.flatten + tags_for_category = (tags_for_category + tags_to_add - tags_to_delete).flatten.compact.uniq + if tags_for_category.empty? + val = c.build_settings.delete(category_key) + native_target.project.mark_dirty! unless val.nil? + else + tags = tags_for_category.join(' ') + unless c.build_settings[category_key] == tags + c.build_settings[category_key] = tags + native_target.project.mark_dirty! + end + end + end + end + end + end + + # Integrates the user project targets. Only the targets that do **not** + # already have the Pods library in their frameworks build phase are + # processed. + # + # @return [void] + # + def integrate! + UI.section(integration_message) do + XCConfigIntegrator.integrate(target, native_targets) + + remove_obsolete_script_phases + add_pods_library + add_embed_frameworks_script_phase + remove_embed_frameworks_script_phase_from_embedded_targets + add_copy_resources_script_phase + add_check_manifest_lock_script_phase + add_user_script_phases + add_on_demand_resources + end + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class} for target `#{target.label}'>" + end + + private + + # @!group Integration steps + #---------------------------------------------------------------------# + + # Adds spec product reference to the frameworks build phase of the + # {TargetDefinition} integration libraries. Adds a file reference to + # the frameworks group of the project and adds it to the frameworks + # build phase of the targets. + # + # @return [void] + # + def add_pods_library + frameworks = user_project.frameworks_group + native_targets.each do |native_target| + build_phase = native_target.frameworks_build_phase + product_name = target.product_name + + # Delete previously integrated references. + product_build_files = build_phase.files.select do |build_file| + build_file.display_name =~ Pod::Deintegrator::FRAMEWORK_NAMES + end + + product_build_files.each do |product_file| + next unless product_name != product_file.display_name + UI.message("Removing old product reference `#{product_file.display_name}` from project.") + frameworks.remove_reference(product_file.file_ref) + build_phase.remove_build_file(product_file) + end + + # Find or create and add a reference for the current product type + new_product_ref = frameworks.files.find { |f| f.path == product_name } || + frameworks.new_product_ref_for_target(target.product_basename, target.product_type) + build_phase.build_file(new_product_ref) || + build_phase.add_file_reference(new_product_ref, true) + end + end + + # Find or create a 'Copy Pods Resources' build phase + # + # @return [void] + # + def add_copy_resources_script_phase + unless target.includes_resources? + native_targets.each do |native_target| + TargetIntegrator.remove_copy_resources_script_phase_from_target(native_target) + end + return + end + + script_path = target.copy_resources_script_relative_path + input_paths_by_config = {} + output_paths_by_config = {} + if use_input_output_paths + target.resource_paths_by_config.each do |config, resource_paths| + input_paths_key = XCFileListConfigKey.new(target.copy_resources_script_input_files_path(config), + target.copy_resources_script_input_files_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + resource_paths + + output_paths_key = XCFileListConfigKey.new(target.copy_resources_script_output_files_path(config), + target.copy_resources_script_output_files_relative_path) + output_paths_by_config[output_paths_key] = TargetIntegrator.resource_output_paths(resource_paths) + end + end + + native_targets.each do |native_target| + # Static library targets cannot include resources. Skip this phase from being added instead. + next if native_target.symbol_type == :static_library + TargetIntegrator.create_or_update_copy_resources_script_phase_to_target(native_target, script_path, + input_paths_by_config, + output_paths_by_config) + end + end + + # Removes the embed frameworks build phase from embedded targets + # + # @note Older versions of CocoaPods would add this build phase to embedded + # targets. They should be removed on upgrade because embedded targets + # will have their frameworks embedded in their host targets. + # + def remove_embed_frameworks_script_phase_from_embedded_targets + return unless target.requires_host_target? + native_targets.each do |native_target| + if AggregateTarget::EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES.include? native_target.symbol_type + TargetIntegrator.remove_embed_frameworks_script_phase_from_target(native_target) + end + end + end + + # Find or create a 'Embed Pods Frameworks' Copy Files Build Phase + # + # @return [void] + # + def add_embed_frameworks_script_phase + unless target.includes_frameworks? || (target.xcframeworks_by_config.values.flatten.any? { |xcf| xcf.build_type.dynamic_framework? }) + native_targets_to_embed_in.each do |native_target| + TargetIntegrator.remove_embed_frameworks_script_phase_from_target(native_target) + end + return + end + + script_path = target.embed_frameworks_script_relative_path + input_paths_by_config = {} + output_paths_by_config = {} + if use_input_output_paths? + configs = Set.new(target.framework_paths_by_config.keys + target.xcframeworks_by_config.keys).sort + configs.each do |config| + framework_paths = target.framework_paths_by_config[config] || [] + xcframeworks = target.xcframeworks_by_config[config] || [] + + input_paths_key = XCFileListConfigKey.new(target.embed_frameworks_script_input_files_path(config), target.embed_frameworks_script_input_files_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + TargetIntegrator.embed_frameworks_input_paths(framework_paths, xcframeworks) + + output_paths_key = XCFileListConfigKey.new(target.embed_frameworks_script_output_files_path(config), target.embed_frameworks_script_output_files_relative_path) + output_paths_by_config[output_paths_key] = TargetIntegrator.embed_frameworks_output_paths(framework_paths, xcframeworks) + end + end + + native_targets_to_embed_in.each do |native_target| + TargetIntegrator.create_or_update_embed_frameworks_script_phase_to_target(native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Updates all target script phases for the current target, including creating or updating, deleting + # and re-ordering. + # + # @return [void] + # + def add_user_script_phases + native_targets.each do |native_target| + TargetIntegrator.create_or_update_user_script_phases(target.target_definition.script_phases, native_target) + end + end + + # Adds a shell script build phase responsible for checking if the Pods + # locked in the Pods/Manifest.lock file are in sync with the Pods defined + # in the Podfile.lock. + # + # @note The build phase is appended to the front because to fail + # fast. + # + # @return [void] + # + def add_check_manifest_lock_script_phase + phase_name = CHECK_MANIFEST_PHASE_NAME + native_targets.each do |native_target| + phase = TargetIntegrator.create_or_update_shell_script_build_phase(native_target, BUILD_PHASE_PREFIX + phase_name) + native_target.build_phases.unshift(phase).uniq! unless native_target.build_phases.first == phase + phase.shell_script = <<-SH.strip_heredoc + diff "${PODS_PODFILE_DIR_PATH}/Podfile.lock" "${PODS_ROOT}/Manifest.lock" > /dev/null + if [ $? != 0 ] ; then + # print error to STDERR + echo "error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation." >&2 + exit 1 + fi + # This output is used by Xcode 'outputs' to avoid re-running this script phase. + echo "SUCCESS" > "${SCRIPT_OUTPUT_FILE_0}" + SH + phase.input_paths = %w(${PODS_PODFILE_DIR_PATH}/Podfile.lock ${PODS_ROOT}/Manifest.lock) + phase.output_paths = [target.check_manifest_lock_script_output_file_path] + end + end + + # @param [Array] removed_phase_names + # The names of the script phases that should be removed + # + def remove_obsolete_script_phases(removed_phase_names = REMOVED_SCRIPT_PHASE_NAMES) + native_targets.each do |native_target| + removed_phase_names.each do |phase_name| + TargetIntegrator.remove_script_phase_from_target(native_target, phase_name) + end + end + end + + def add_on_demand_resources + target.pod_targets.each do |pod_target| + # When integrating with the user's project we are only interested in integrating ODRs from library specs + # and not test specs or app specs. + library_file_accessors = pod_target.file_accessors.select { |fa| fa.spec.library_specification? } + target_odr_group_name = "#{pod_target.label}-OnDemandResources" + # The 'Pods' group would always be there for production code however for tests its sometimes not added. + # This ensures its always present and makes it easier for existing and new tests. + parent_odr_group = target.user_project.main_group['Pods'] || target.user_project.new_group('Pods') + TargetIntegrator.update_on_demand_resources(target.sandbox, target.user_project, target.user_targets, + library_file_accessors, parent_odr_group, target_odr_group_name) + end + end + + private + + # @!group Private Helpers + #---------------------------------------------------------------------# + + # @return [Array] The list of all the targets that + # match the given target. + # + def native_targets + @native_targets ||= target.user_targets + end + + # @return [Array] The list of all the targets that + # require that the pod frameworks are embedded in the output + # directory / product bundle. + # + def native_targets_to_embed_in + return [] if target.requires_host_target? + native_targets.select do |target| + EMBED_FRAMEWORK_TARGET_TYPES.include?(target.symbol_type) + end + end + + # Read the project from the disk to ensure that it is up to date as + # other TargetIntegrators might have modified it. + # + # @return [Project] + # + def user_project + target.user_project + end + + # @return [Specification::Consumer] the consumer for the specifications. + # + def spec_consumers + @spec_consumers ||= target.pod_targets.map(&:file_accessors).flatten.map(&:spec_consumer) + end + + # @return [String] the message that should be displayed for the target + # integration. + # + def integration_message + "Integrating target `#{target.name}` " \ + "(#{UI.path target.user_project_path} project)" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator.rb new file mode 100644 index 0000000..f68acbe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/user_project_integrator/target_integrator/xcconfig_integrator.rb @@ -0,0 +1,179 @@ +module Pod + class Installer + class UserProjectIntegrator + class TargetIntegrator + # Configures an user target to use the CocoaPods xcconfigs which allow + # lo link against the Pods. + # + class XCConfigIntegrator + # Integrates the user target. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [Array] targets + # The native targets associated which should be integrated + # with the Pod bundle. + # + def self.integrate(pod_bundle, targets) + targets.each do |target| + target.build_configurations.each do |config| + set_target_xcconfig(pod_bundle, target, config) + end + end + end + + private + + # @!group Integration steps + #-------------------------------------------------------------------# + + # Creates a file reference to the xcconfig generated by + # CocoaPods (if needed) and sets it as the base configuration of + # build configuration of the user target. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [PBXNativeTarget] target + # The native target. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + def self.set_target_xcconfig(pod_bundle, target, config) + file_ref = create_xcconfig_ref(pod_bundle, config) + path = file_ref.path + + existing = config.base_configuration_reference + + if existing && existing != file_ref + if existing.real_path.to_path.start_with?(pod_bundle.sandbox.root.to_path << '/') + config.base_configuration_reference = file_ref + elsif !xcconfig_includes_target_xcconfig?(config.base_configuration_reference, path) + unless existing_config_is_identical_to_pod_config?(existing.real_path, pod_bundle.xcconfig_path(config.name)) + UI.warn 'CocoaPods did not set the base configuration of your ' \ + 'project because your project already has a custom ' \ + 'config set. In order for CocoaPods integration to work at ' \ + 'all, please either set the base configurations of the target ' \ + "`#{target.name}` to `#{path}` or include the `#{path}` in your " \ + "build configuration (#{UI.path(existing.real_path)})." + end + end + elsif config.base_configuration_reference.nil? || file_ref.nil? + config.base_configuration_reference = file_ref + end + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------# + + # Prints a warning informing the user that a build configuration of + # the integrated target is overriding the CocoaPods build settings. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [XcodeProj::PBXNativeTarget] target + # The native target. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + # @param [String] key + # The key of the overridden build setting. + # + def self.print_override_warning(pod_bundle, target, config, key) + actions = [ + 'Use the `$(inherited)` flag, or', + 'Remove the build settings from the target.', + ] + message = "The `#{target.name} [#{config.name}]` " \ + "target overrides the `#{key}` build setting defined in " \ + "`#{pod_bundle.pod_bundle.xcconfig_relative_path(config.name)}'. " \ + 'This can lead to problems with the CocoaPods installation' + UI.warn(message, actions) + end + + # Naively checks to see if a given PBXFileReference imports a given + # path. + # + # @param [PBXFileReference] base_config_ref + # A file reference to an `.xcconfig` file. + # + # @param [String] target_config_path + # The path to check for. + # + SILENCE_WARNINGS_STRING = '// @COCOAPODS_SILENCE_WARNINGS@ //' + def self.xcconfig_includes_target_xcconfig?(base_config_ref, target_config_path) + return unless base_config_ref && base_config_ref.real_path.file? + regex = %r{ + ^( + (\s* # Possible, but unlikely, space before include statement + \#include(\?)?\s+ # Include statement + ['"] # Open quote + (.*\/)? # Possible prefix to path + #{Regexp.quote(target_config_path)} # The path should end in the target_config_path + ['"] # Close quote + ) + | + (#{Regexp.quote(SILENCE_WARNINGS_STRING)}) # Token to treat xcconfig as good and silence pod install warnings + ) + }x + base_config_ref.real_path.readlines.find { |line| line =~ regex } + end + + # Checks to see if the config files at two paths exist and are identical + # + # @param The existing config path + # + # @param The pod config path + # + def self.existing_config_is_identical_to_pod_config?(existing_config_path, pod_config_path) + existing_config_path.file? && (!pod_config_path.file? || FileUtils.compare_file(existing_config_path, pod_config_path)) + end + + # Creates a file reference to the xcconfig generated by + # CocoaPods (if needed). + # If the Pods group not exists, create the group and set + # the location to the `Pods` directory. + # If the file reference exists, the location is different + # with the xcconfig's path and the symlink target paths + # are different, we will update the location. + # + # @param [Target::AggregateTarget] pod_bundle + # The Pods bundle. + # + # @param [Xcodeproj::XCBuildConfiguration] config + # The build configuration. + # + # @return [PBXFileReference] the xcconfig reference. + # + def self.create_xcconfig_ref(pod_bundle, config) + # Xcode root group's path is absolute, we must get the relative path of the sandbox to the user project + group_path = pod_bundle.relative_pods_root_path + group = config.project['Pods'] || config.project.new_group('Pods', group_path) + + # support user custom paths of Pods group and xcconfigs files. + group_path = Pathname.new(group.real_path) + xcconfig_path = Pathname.new(pod_bundle.xcconfig_path(config.name)) + path = xcconfig_path.relative_path_from(group_path) + + filename = path.basename.to_s + file_ref = group.files.find { |f| f.display_name == filename } + if file_ref && file_ref.path != path + file_ref_path = Pathname.new(file_ref.real_path) + if !file_ref_path.exist? || !xcconfig_path.exist? || file_ref_path.realpath != xcconfig_path.realpath + file_ref.path = path.to_s + end + end + + file_ref || group.new_file(path.to_s) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode.rb new file mode 100644 index 0000000..51da34c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode.rb @@ -0,0 +1,11 @@ +module Pod + class Installer + class Xcode + autoload :PodsProjectGenerator, 'cocoapods/installer/xcode/pods_project_generator' + autoload :SinglePodsProjectGenerator, 'cocoapods/installer/xcode/single_pods_project_generator' + autoload :MultiPodsProjectGenerator, 'cocoapods/installer/xcode/multi_pods_project_generator' + autoload :PodsProjectWriter, 'cocoapods/installer/xcode/pods_project_generator/pods_project_writer' + autoload :TargetValidator, 'cocoapods/installer/xcode/target_validator' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/multi_pods_project_generator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/multi_pods_project_generator.rb new file mode 100644 index 0000000..613f19d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/multi_pods_project_generator.rb @@ -0,0 +1,82 @@ +module Pod + class Installer + class Xcode + # The {MultiPodsProjectGenerator} handles generation of the 'Pods/Pods.xcodeproj' and Xcode projects + # for every {PodTarget}. All Pod Target projects are nested under the 'Pods.xcodeproj'. + # + class MultiPodsProjectGenerator < PodsProjectGenerator + # Generates `Pods/Pods.xcodeproj` and all pod target subprojects. + # + # @return [PodsProjectGeneratorResult] + # + def generate! + # Generate container Pods.xcodeproj. + container_project = create_container_project(aggregate_targets, sandbox.project_path) + + project_paths_by_pod_targets = pod_targets.group_by do |pod_target| + sandbox.pod_target_project_path(pod_target.project_name) + end + projects_by_pod_targets = Hash[project_paths_by_pod_targets.map do |project_path, pod_targets| + project = create_pods_project(pod_targets, project_path, container_project) + [project, pod_targets] + end] + + # Note: We must call `install_file_references` on all pod targets before installing them. + pod_target_installation_results = install_all_pod_targets(projects_by_pod_targets) + aggregate_target_installation_results = install_aggregate_targets_into_project(container_project, aggregate_targets) + target_installation_results = InstallationResults.new(pod_target_installation_results, aggregate_target_installation_results) + + integrate_targets(target_installation_results.pod_target_installation_results) + wire_target_dependencies(target_installation_results) + PodsProjectGeneratorResult.new(container_project, projects_by_pod_targets, target_installation_results) + end + + private + + def create_container_project(aggregate_targets, path) + return unless aggregate_targets + platforms = aggregate_targets.map(&:platform) + ProjectGenerator.new(sandbox, path, [], build_configurations, platforms, + project_object_version, config.podfile_path).generate! + end + + def create_pods_project(pod_targets, path, parent_project) + platforms = pod_targets.map(&:platform) + project = ProjectGenerator.new(sandbox, path, pod_targets, build_configurations, platforms, + project_object_version, false, :pod_target_subproject => true).generate! + # Instead of saving every subproject to disk, we can optimize this by creating a temporary folder + # the file reference can use so that we only have to call `save` once for all projects. + project.path.mkpath + if parent_project + pod_name = pod_name_from_grouping(pod_targets) + is_local = sandbox.local?(pod_name) + parent_project.add_pod_subproject(project, is_local) + end + + install_file_references(project, pod_targets) + project + end + + def install_all_pod_targets(projects_by_pod_targets) + UI.message '- Installing Pod Targets' do + projects_by_pod_targets.each_with_object({}) do |(project, pod_targets), target_installation_results| + target_installation_results.merge!(install_pod_targets(project, pod_targets)) + end + end + end + + def install_aggregate_targets_into_project(project, aggregate_targets) + return {} unless project + install_aggregate_targets(project, aggregate_targets) + end + + def pod_name_from_grouping(pod_targets) + # The presumption here for multi pods project is that we group by `pod_name`, thus the grouping of `pod_targets` + # should share the same `pod_name`. + raise '[BUG] Expected at least 1 pod target' if pod_targets.empty? + pod_targets.first.pod_name + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator.rb new file mode 100644 index 0000000..f122c76 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator.rb @@ -0,0 +1,326 @@ +module Pod + class Installer + class Xcode + # The {PodsProjectGenerator} handles generation of CocoaPods Xcode projects. + # + class PodsProjectGenerator + require 'cocoapods/installer/xcode/pods_project_generator/target_installer_helper' + require 'cocoapods/installer/xcode/pods_project_generator/pod_target_integrator' + require 'cocoapods/installer/xcode/pods_project_generator/target_installer' + require 'cocoapods/installer/xcode/pods_project_generator/target_installation_result' + require 'cocoapods/installer/xcode/pods_project_generator/pod_target_installer' + require 'cocoapods/installer/xcode/pods_project_generator/file_references_installer' + require 'cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer' + require 'cocoapods/installer/xcode/pods_project_generator/project_generator' + require 'cocoapods/installer/xcode/pods_project_generator_result' + require 'cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer' + require 'cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer' + require 'cocoapods/native_target_extension.rb' + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Array] The model representations of an + # aggregation of pod targets generated for a target definition + # in the Podfile. + # + attr_reader :aggregate_targets + + # @return [Array] The model representations of pod targets. + # + attr_reader :pod_targets + + # @return [Hash{String => Symbol}] The build configurations that need to be installed. + # + attr_reader :build_configurations + + # @return [InstallationOptions] the installation options from the Podfile. + # + attr_reader :installation_options + + # @return [Config] the global CocoaPods configuration. + # + attr_reader :config + + # @return [Integer] the object version for the projects we will generate. + # + attr_reader :project_object_version + + # @return [ProjectMetadataCache] the metadata cache used to reconstruct target dependencies. + # + attr_reader :metadata_cache + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] aggregate_targets @see #aggregate_targets + # @param [Array] pod_targets @see #pod_targets + # @param [Hash{String => Symbol}] build_configurations @see #build_configurations + # @param [InstallationOptions] installation_options @see #installation_options + # @param [Config] config @see #config + # @param [Integer] project_object_version @see #project_object_version + # @param [ProjectMetadataCache] metadata_cache @see #metadata_cache + # + def initialize(sandbox, aggregate_targets, pod_targets, build_configurations, installation_options, config, + project_object_version, metadata_cache = nil) + @sandbox = sandbox + @aggregate_targets = aggregate_targets + @pod_targets = pod_targets + @build_configurations = build_configurations + @installation_options = installation_options + @config = config + @project_object_version = project_object_version + @metadata_cache = metadata_cache + end + + # Configure schemes for the specified project and pod targets. Schemes for development pods will be shared + # if requested by the integration. + # + # @param [PBXProject] project The project to configure schemes for. + # @param [Array] pod_targets The pod targets within that project to configure their schemes. + # @param [PodsProjectGeneratorResult] generator_result the result of the project generation + # + # @return [void] + # + def configure_schemes(project, pod_targets, generator_result) + pod_targets.each do |pod_target| + share_scheme = pod_target.should_build? && share_scheme_for_development_pod?(pod_target.pod_name) && sandbox.local?(pod_target.pod_name) + configure_schemes_for_pod_target(project, pod_target, share_scheme, generator_result) + end + end + + # @!attribute [Hash{String => TargetInstallationResult}] pod_target_installation_results + # @!attribute [Hash{String => TargetInstallationResult}] aggregate_target_installation_results + InstallationResults = Struct.new(:pod_target_installation_results, :aggregate_target_installation_results) + + private + + def install_file_references(project, pod_targets) + UI.message "- Installing files into #{project.project_name} project" do + installer = FileReferencesInstaller.new(sandbox, pod_targets, project, installation_options.preserve_pod_file_structure) + installer.install! + end + end + + def install_pod_targets(project, pod_targets) + umbrella_headers_by_dir = pod_targets.map do |pod_target| + next unless pod_target.should_build? && pod_target.defines_module? + pod_target.umbrella_header_path + end.compact.group_by(&:dirname) + + pod_target_installation_results = Hash[pod_targets.sort_by(&:name).map do |pod_target| + umbrella_headers_in_header_dir = umbrella_headers_by_dir[pod_target.module_map_path.dirname] + target_installer = PodTargetInstaller.new(sandbox, project, pod_target, umbrella_headers_in_header_dir) + [pod_target.name, target_installer.install!] + end] + + # Hook up system framework dependencies for the pod targets that were just installed. + pod_target_installation_result_values = pod_target_installation_results.values.compact + unless pod_target_installation_result_values.empty? + add_system_framework_dependencies(pod_target_installation_result_values) + end + + pod_target_installation_results + end + + def install_aggregate_targets(project, aggregate_targets) + UI.message '- Installing Aggregate Targets' do + aggregate_target_installation_results = Hash[aggregate_targets.sort_by(&:name).map do |target| + target_installer = AggregateTargetInstaller.new(sandbox, project, target) + [target.name, target_installer.install!] + end] + + aggregate_target_installation_results + end + end + + # @param [Hash{String => InstallationResult}] pod_target_installation_results + # the installations to integrate + # + # @return [void] + # + def integrate_targets(pod_target_installation_results) + pod_installations_to_integrate = pod_target_installation_results.values.select do |pod_target_installation_result| + pod_target = pod_target_installation_result.target + !pod_target_installation_result.test_native_targets.empty? || + !pod_target_installation_result.app_native_targets.empty? || + pod_target.contains_script_phases? || + pod_target.framework_paths.values.flatten.any? { |paths| !paths.dsym_path.nil? } || + pod_target.xcframeworks.values.any? { |xcframeworks| !xcframeworks.empty? } + end + return if pod_installations_to_integrate.empty? + + UI.message '- Integrating targets' do + use_input_output_paths = !installation_options.disable_input_output_paths + pod_installations_to_integrate.each do |pod_target_installation_result| + PodTargetIntegrator.new(pod_target_installation_result, :use_input_output_paths => use_input_output_paths).integrate! + end + end + end + + def add_system_framework_dependencies(pod_target_installation_results) + sorted_installation_results = pod_target_installation_results.sort_by do |pod_target_installation_result| + pod_target_installation_result.target.name + end + sorted_installation_results.each do |target_installation_result| + pod_target = target_installation_result.target + next unless pod_target.should_build? + next if pod_target.build_as_static? + pod_target.file_accessors.each do |file_accessor| + native_target = target_installation_result.native_target_for_spec(file_accessor.spec) + add_system_frameworks_to_native_target(native_target, file_accessor) + end + end + end + + # Adds a target dependency for each pod spec to each aggregate target and + # links the pod targets among each other. + # + # @param [Array[Hash{String=>TargetInstallationResult}]] target_installation_results + # the installation results that were produced when all targets were installed. This includes + # pod target installation results and aggregate target installation results. + # + # @return [void] + # + def wire_target_dependencies(target_installation_results) + pod_target_installation_results_hash = target_installation_results.pod_target_installation_results + aggregate_target_installation_results_hash = target_installation_results.aggregate_target_installation_results + + AggregateTargetDependencyInstaller.new(sandbox, aggregate_target_installation_results_hash, + pod_target_installation_results_hash, metadata_cache).install! + + PodTargetDependencyInstaller.new(sandbox, pod_target_installation_results_hash, metadata_cache).install! + end + + # @param [String] pod The root name of the development pod. + # + # @return [Boolean] whether the scheme for the given development pod should be + # shared. + # + def share_scheme_for_development_pod?(pod) + case dev_pods_to_share = installation_options.share_schemes_for_development_pods + when TrueClass, FalseClass, NilClass + dev_pods_to_share + when Array + dev_pods_to_share.any? { |dev_pod| dev_pod === pod } # rubocop:disable Style/CaseEquality + else + raise Informative, 'Unable to handle share_schemes_for_development_pods ' \ + "being set to #{dev_pods_to_share.inspect} -- please set it to true, " \ + 'false, or an array of pods to share schemes for.' + end + end + + #------------------------------------------------------------------------# + + # @! group Private Helpers + + def add_system_frameworks_to_native_target(native_target, file_accessor) + file_accessor.spec_consumer.frameworks.each do |framework| + native_target.add_system_framework(framework) + end + end + + # @param [Project] project + # the project of the pod target + # + # @param [Pod::PodTarget] pod_target + # the pod target for which to configure schemes + # + # @param [Boolean] share_scheme + # whether the created schemes should be shared + # + # @param [PodsProjectGeneratorResult] generator_result + # the project generation result + # + def configure_schemes_for_pod_target(project, pod_target, share_scheme, generator_result) + # Ignore subspecs because they do not provide a scheme configuration due to the fact that they are always + # merged with the root spec scheme. + specs = [pod_target.root_spec] + pod_target.test_specs + pod_target.app_specs + hosted_test_specs_by_host = Hash.new do |hash, key| + hash[key] = [] + end + pod_target.test_app_hosts_by_spec.each do |spec, (host_spec, host_target)| + if host_target == pod_target + hosted_test_specs_by_host[host_spec] << spec + end + end + is_custom_host = !hosted_test_specs_by_host.empty? + specs.each do |spec| + scheme_name = pod_target.spec_label(spec) + scheme_configuration = pod_target.scheme_for_spec(spec) + if !scheme_configuration.empty? || is_custom_host + scheme_path = Xcodeproj::XCScheme.user_data_dir(project.path) + "#{scheme_name}.xcscheme" + scheme = Xcodeproj::XCScheme.new(scheme_path) + command_line_arguments = scheme.launch_action.command_line_arguments + scheme_configuration.fetch(:launch_arguments, []).each do |launch_arg| + command_line_arguments.assign_argument(:argument => launch_arg, :enabled => true) + end + scheme.launch_action.command_line_arguments = command_line_arguments + environment_variables = scheme.launch_action.environment_variables + scheme_configuration.fetch(:environment_variables, {}).each do |k, v| + environment_variables.assign_variable(:key => k, :value => v) + end + scheme.launch_action.environment_variables = environment_variables + if scheme_configuration.key?(:code_coverage) + scheme.test_action.code_coverage_enabled = scheme_configuration[:code_coverage] + end + if scheme_configuration.key?(:parallelizable) + scheme.test_action.testables.each { |testable| testable.parallelizable = scheme_configuration[:parallelizable] } + end + set_scheme_build_configurations(scheme, scheme_configuration.fetch(:build_configurations, {})) + + hosted_test_specs_by_host[spec].each do |hosted_spec| + # We are an app spec which hosts this test spec. + # Include the test specs's test bundle within our scheme's test action + native_target = generator_result.native_target_for_spec(hosted_spec) + testable = Xcodeproj::XCScheme::TestAction::TestableReference.new(native_target) + scheme.test_action.add_testable(testable) + end + + if spec.test_specification? + # Default to using the test bundle to expand variables + native_target_for_expansion = generator_result.native_target_for_spec(spec) + macro_expansion = Xcodeproj::XCScheme::MacroExpansion.new(native_target_for_expansion) + scheme.launch_action.add_macro_expansion(macro_expansion) + end + scheme.save! + end + Xcodeproj::XCScheme.share_scheme(project.path, scheme_name) if share_scheme + end + end + + # @param [Xcodeproj::XCSheme] scheme + # scheme to apply configuration to + # + # @param [Hash{String => String}] configuration + # action => build configuration to use for the action + # + # @return [void] + # + def set_scheme_build_configurations(scheme, configuration) + configuration.each do |k, v| + unless @build_configurations.include?(v) + raise Informative, "Unable to set `#{v}` as a build configuration as " \ + "it doesn't match with any of your projects build configurations." + end + + case k + when 'Run' + scheme.launch_action.build_configuration = v + when 'Test' + scheme.test_action.build_configuration = v + when 'Analyze' + scheme.analyze_action.build_configuration = v + when 'Archive' + scheme.archive_action.build_configuration = v + else + raise Informative, "#{k} is not a valid scheme action " \ + "only one of ['run', 'test', 'analyze', 'archive'] is available" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer.rb new file mode 100644 index 0000000..7e26e9e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_dependency_installer.rb @@ -0,0 +1,66 @@ +module Pod + class Installer + class Xcode + # Wires up the dependencies for aggregate targets from the target installation results + # + class AggregateTargetDependencyInstaller + require 'cocoapods/native_target_extension.rb' + + # @return [Hash{String => TargetInstallationResult}] The target installation results for pod targets. + # + attr_reader :pod_target_installation_results + + # @return [Hash{String => TargetInstallationResult}] The target installation results for aggregate targets. + # + attr_reader :aggregate_target_installation_results + + # @return [ProjectMetadataCache] The project metadata cache. + # + attr_reader :metadata_cache + + # @return [Sandbox] The sandbox used for this installation. + # + attr_reader :sandbox + + # Initialize a new instance. + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Hash{String => TargetInstallationResult}] aggregate_target_installation_results @see #aggregate_target_installation_results + # @param [Hash{String => TargetInstallationResult}] pod_target_installation_results @see #pod_target_installation_results + # @param [ProjectMetadataCache] metadata_cache @see #metadata_cache + # + def initialize(sandbox, aggregate_target_installation_results, pod_target_installation_results, metadata_cache) + @sandbox = sandbox + @aggregate_target_installation_results = aggregate_target_installation_results + @pod_target_installation_results = pod_target_installation_results + @metadata_cache = metadata_cache + end + + def install! + aggregate_target_installation_results.values.each do |aggregate_target_installation_result| + aggregate_target = aggregate_target_installation_result.target + aggregate_native_target = aggregate_target_installation_result.native_target + project = aggregate_native_target.project + # Wire up dependencies that are part of inherit search paths for this aggregate target. + aggregate_target.search_paths_aggregate_targets.each do |search_paths_target| + aggregate_native_target.add_dependency(aggregate_target_installation_results[search_paths_target.name].native_target) + end + # Wire up all pod target dependencies to aggregate target. + aggregate_target.pod_targets.each do |pod_target| + if pod_target_installation_result = pod_target_installation_results[pod_target.name] + pod_target_native_target = pod_target_installation_result.native_target + aggregate_native_target.add_dependency(pod_target_native_target) + else + # Hit the cache + is_local = sandbox.local?(pod_target.pod_name) + cached_dependency = metadata_cache.target_label_by_metadata[pod_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, aggregate_native_target, cached_dependency) + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer.rb new file mode 100644 index 0000000..93ff4a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/aggregate_target_installer.rb @@ -0,0 +1,192 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Creates the targets which aggregate the Pods libraries in the Pods + # project and the relative support files. + # + class AggregateTargetInstaller < TargetInstaller + # @return [AggregateTarget] @see TargetInstaller#target + # + attr_reader :target + + # Creates the target in the Pods project and the relative support files. + # + # @return [TargetInstallationResult] the result of the installation of this target. + # + def install! + UI.message "- Installing target `#{target.name}` #{target.platform}" do + native_target = add_target + create_support_files_dir + create_support_files_group + create_xcconfig_file(native_target) + if target.build_as_framework? + create_info_plist_file(target.info_plist_path, native_target, target.version, target.platform) + create_module_map(native_target) + create_umbrella_header(native_target) + elsif target.uses_swift? + create_module_map(native_target) + create_umbrella_header(native_target) + end + # Because embedded targets live in their host target, CocoaPods + # copies all of the embedded target's pod_targets to its host + # targets. Having this script for the embedded target would + # cause an App Store rejection because frameworks cannot be + # embedded in embedded targets. + # + create_embed_frameworks_script if embed_frameworks_script_required? + create_bridge_support_file(native_target) + create_copy_resources_script if target.includes_resources? + create_acknowledgements + create_dummy_source(native_target) + clean_support_files_temp_dir + TargetInstallationResult.new(target, native_target) + end + end + + #-----------------------------------------------------------------------# + + private + + # @return [TargetDefinition] the target definition of the library. + # + def target_definition + target.target_definition + end + + # Ensure that vendored static frameworks and libraries are not linked + # twice to the aggregate target, which shares the xcconfig of the user + # target. + # + def custom_build_settings + settings = { + 'CODE_SIGN_IDENTITY[sdk=appletvos*]' => '', + 'CODE_SIGN_IDENTITY[sdk=iphoneos*]' => '', + 'CODE_SIGN_IDENTITY[sdk=watchos*]' => '', + 'MACH_O_TYPE' => 'staticlib', + 'OTHER_LDFLAGS' => '', + 'OTHER_LIBTOOLFLAGS' => '', + 'PODS_ROOT' => '$(SRCROOT)', + 'PRODUCT_BUNDLE_IDENTIFIER' => 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}', + 'SKIP_INSTALL' => 'YES', + + # Needed to ensure that static libraries won't try to embed the swift stdlib, + # since there's no where to embed in for a static library. + # Not necessary for dynamic frameworks either, since the aggregate targets are never shipped + # on their own, and are always further embedded into an app target. + 'ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES' => 'NO', + } + super.merge(settings) + end + + # @return [Boolean] whether this target requires an `Embed Frameworks` script phase + # + def embed_frameworks_script_required? + includes_dynamic_xcframeworks = target.xcframeworks_by_config.values.flatten.map(&:build_type).any?(&:dynamic_framework?) + (target.includes_frameworks? || includes_dynamic_xcframeworks) && !target.requires_host_target? + end + + # Creates the group that holds the references to the support files + # generated by this installer. + # + # @return [void] + # + def create_support_files_group + parent = project.support_files_group + name = target.name + dir = target.support_files_dir + @support_files_group = parent.new_group(name, dir) + end + + # Generates the contents of the xcconfig file and saves it to disk. + # + # @param [PBXNativeTarget] native_target + # the native target to link the module map file into. + # + # @return [void] + # + def create_xcconfig_file(native_target) + native_target.build_configurations.each do |configuration| + next unless target.user_build_configurations.key?(configuration.name) + path = target.xcconfig_path(configuration.name) + build_settings = target.build_settings(configuration.name) + update_changed_file(build_settings, path) + target.xcconfigs[configuration.name] = build_settings.xcconfig + xcconfig_file_ref = add_file_to_support_group(path) + configuration.base_configuration_reference = xcconfig_file_ref + end + end + + # Generates the bridge support metadata if requested by the {Podfile}. + # + # @note The bridge support metadata is added to the resources of the + # target because it is needed for environments interpreted at + # runtime. + # + # @param [PBXNativeTarget] native_target + # the native target to add the bridge support file into. + # + # @return [void] + # + def create_bridge_support_file(native_target) + if target.podfile.generate_bridge_support? + path = target.bridge_support_path + headers = native_target.headers_build_phase.files.map { |bf| sandbox.root + bf.file_ref.path } + generator = Generator::BridgeSupport.new(headers) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that copies the resources to the bundle of the client + # target. + # + # @note The bridge support file needs to be created before the prefix + # header, otherwise it will not be added to the resources script. + # + # @return [void] + # + def create_copy_resources_script + path = target.copy_resources_script_path + generator = Generator::CopyResourcesScript.new(target.resource_paths_by_config, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + + # Creates a script that embeds the frameworks to the bundle of the client + # target. + # + # @note We can't use Xcode default link libraries phase, because + # we need to ensure that we only copy the frameworks which are + # relevant for the current build configuration. + # + # @return [void] + # + def create_embed_frameworks_script + path = target.embed_frameworks_script_path + generator = Generator::EmbedFrameworksScript.new(target.framework_paths_by_config, target.xcframeworks_by_config) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + + # Generates the acknowledgement files (markdown and plist) for the target. + # + # @return [void] + # + def create_acknowledgements + basepath = target.acknowledgements_basepath + Generator::Acknowledgements.generators.each do |generator_class| + path = generator_class.path_from_basepath(basepath) + file_accessors = target.pod_targets.map(&:file_accessors).flatten + generator = generator_class.new(file_accessors) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + #-----------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/app_host_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/app_host_installer.rb new file mode 100644 index 0000000..dfffa0b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/app_host_installer.rb @@ -0,0 +1,154 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Installs an app host target to a given project. + # + class AppHostInstaller + include TargetInstallerHelper + + # @return [Sandbox] + # The sandbox used for this installation. + # + attr_reader :sandbox + + # @return [Pod::Project] + # The project to install the app host into. + # + attr_reader :project + + # @return [Platform] the platform to use for this app host. + # + attr_reader :platform + + # @return [String] the name of the sub group. + # + attr_reader :subgroup_name + + # @return [String] the name of the group the app host installer will be installing within. + # + attr_reader :group_name + + # @return [String] the name of the app target label that will be used. + # + attr_reader :app_target_label + + # @return [Boolean] whether the app host installer should add main.m + # + attr_reader :add_main + + # @return [Boolean] whether the app host installer should add a launch screen storyboard + # + attr_reader :add_launchscreen_storyboard + + # @return [Hash] Info.plist entries for the app host + # + attr_reader :info_plist_entries + + # @return [String] product_basename + # The product basename to use for the target. + # + attr_reader :product_basename + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Pod::Project] project @see #project + # @param [Platform] platform @see #platform + # @param [String] subgroup_name @see #subgroup_name + # @param [String] group_name @see #group_name + # @param [String] app_target_label see #app_target_label + # @param [Boolean] add_main see #add_main + # @param [Hash] info_plist_entries see #info_plist_entries + # @param [String] product_basename see #product_basename + # + def initialize(sandbox, project, platform, subgroup_name, group_name, app_target_label, add_main: true, + add_launchscreen_storyboard: platform == :ios, info_plist_entries: {}, product_basename: nil) + @sandbox = sandbox + @project = project + @platform = platform + @subgroup_name = subgroup_name + @group_name = group_name + @app_target_label = app_target_label + @add_main = add_main + @add_launchscreen_storyboard = add_launchscreen_storyboard + @info_plist_entries = info_plist_entries + @product_basename = product_basename || app_target_label + target_group = project.pod_group(group_name) + @group = target_group[subgroup_name] || target_group.new_group(subgroup_name) + end + + # @return [PBXNativeTarget] the app host native target that was installed. + # + def install! + platform_name = platform.name + app_host_target = Pod::Generator::AppTargetHelper.add_app_target(project, platform_name, deployment_target, + app_target_label, product_basename) + app_host_target.build_configurations.each do |configuration| + configuration.build_settings['PRODUCT_NAME'] = product_basename + configuration.build_settings['PRODUCT_BUNDLE_IDENTIFIER'] = 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}' + if platform == :osx + configuration.build_settings['CODE_SIGN_IDENTITY'] = '' + elsif platform == :ios + configuration.build_settings['CODE_SIGN_IDENTITY'] = 'iPhone Developer' + end + configuration.build_settings['CURRENT_PROJECT_VERSION'] = '1' + end + + Pod::Generator::AppTargetHelper.add_app_host_main_file(project, app_host_target, platform_name, @group, app_target_label) if add_main + Pod::Generator::AppTargetHelper.add_launchscreen_storyboard(project, app_host_target, @group, deployment_target, app_target_label) if add_launchscreen_storyboard + create_info_plist_file_with_sandbox(sandbox, app_host_info_plist_path, app_host_target, '1.0.0', platform, + :appl, :additional_entries => additional_info_plist_entries) + @group.new_file(app_host_info_plist_path) + app_host_target + end + + private + + ADDITIONAL_INFO_PLIST_ENTRIES = { + 'NSAppTransportSecurity' => { + 'NSAllowsArbitraryLoads' => true, + }, + }.freeze + + ADDITIONAL_IOS_INFO_PLIST_ENTRIES = { + 'UILaunchStoryboardName' => 'LaunchScreen', + 'UISupportedInterfaceOrientations' => %w( + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + ), + 'UISupportedInterfaceOrientations~ipad' => %w( + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + ), + }.freeze + + # @return [Hash] the additional Info.plist entries to be included + # + def additional_info_plist_entries + result = {} + result.merge!(ADDITIONAL_INFO_PLIST_ENTRIES) + result.merge!(ADDITIONAL_IOS_INFO_PLIST_ENTRIES) if platform == :ios + result.merge!(info_plist_entries) if info_plist_entries + result + end + + # @return [Pathname] The absolute path of the Info.plist to use for an app host. + # + def app_host_info_plist_path + project.path.dirname.+(subgroup_name).+("#{app_target_label}-Info.plist") + end + + # @return [String] The deployment target. + # + def deployment_target + platform.deployment_target.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/file_references_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/file_references_installer.rb new file mode 100644 index 0000000..9cf07ae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/file_references_installer.rb @@ -0,0 +1,363 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Controller class responsible of installing the file references of the + # specifications in the Pods project. + # + class FileReferencesInstaller + # Regex for extracting the region portion of a localized file path. Ex. `Resources/en.lproj` --> `en` + LOCALIZATION_REGION_FILEPATTERN_REGEX = /(\/|^)(?[^\/]*?)\.lproj(\/|$)/ + + # @return [Sandbox] The sandbox of the installation. + # + attr_reader :sandbox + + # @return [Array] The pod targets of the installation. + # + attr_reader :pod_targets + + # @return [Project] The project to install the file references into. + # + attr_reader :pods_project + + # @return [Boolean] add support for preserving the file structure of externally sourced pods, in addition to local pods. + # + attr_reader :preserve_pod_file_structure + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Array] pod_targets @see #pod_targets + # @param [Project] pods_project @see #pods_project + # @param [Boolean] preserve_pod_file_structure @see #preserve_pod_file_structure + # + def initialize(sandbox, pod_targets, pods_project, preserve_pod_file_structure = false) + @sandbox = sandbox + @pod_targets = pod_targets + @pods_project = pods_project + @preserve_pod_file_structure = preserve_pod_file_structure + end + + # Installs the file references. + # + # @return [void] + # + def install! + refresh_file_accessors + prepare_pod_groups + add_source_files_references + add_frameworks_bundles + add_vendored_libraries + add_resources + add_developer_files unless sandbox.development_pods.empty? + link_headers + end + + #-----------------------------------------------------------------------# + + private + + # @!group Installation Steps + + # Reads the file accessors contents from the file system. + # + # @note The contents of the file accessors are modified by the clean + # step of the #{PodSourceInstaller} and by the pre install hooks. + # + # @return [void] + # + def refresh_file_accessors + file_accessors.reject do |file_accessor| + pod_name = file_accessor.spec.name + sandbox.local?(pod_name) + end.map(&:path_list).uniq.each(&:read_file_system) + end + + # Prepares the main groups to which all files will be added for the respective target + # + def prepare_pod_groups + file_accessors.each do |file_accessor| + pod_name = file_accessor.spec.name + next unless sandbox.local?(pod_name) + root_name = Specification.root_name(pod_name) + path = file_accessor.root + group = pods_project.group_for_spec(root_name) + group.set_path(path) unless group.path == path + end + end + + # Adds the source files of the Pods to the Pods project. + # + # @note The source files are grouped by Pod and in turn by subspec + # (recursively). + # + # @return [void] + # + def add_source_files_references + UI.message '- Adding source files' do + add_file_accessors_paths_to_pods_group(:source_files, nil, true) + end + end + + # Adds the bundled frameworks to the Pods project + # + # @return [void] + # + def add_frameworks_bundles + UI.message '- Adding frameworks' do + add_file_accessors_paths_to_pods_group(:vendored_frameworks, :frameworks) + end + end + + # Adds the bundled libraries to the Pods project + # + # @return [void] + # + def add_vendored_libraries + UI.message '- Adding libraries' do + add_file_accessors_paths_to_pods_group(:vendored_libraries, :frameworks) + end + end + + # Adds the resources of the Pods to the Pods project. + # + # @note The source files are grouped by Pod and in turn by subspec + # (recursively) in the resources group. + # + # @return [void] + # + def add_resources + UI.message '- Adding resources' do + refs = add_file_accessors_paths_to_pods_group(:resources, :resources, true) + refs.concat add_file_accessors_paths_to_pods_group(:resource_bundle_files, :resources, true) + add_known_regions(refs) + end + end + + def add_developer_files + UI.message '- Adding development pod helper files' do + file_accessors.each do |file_accessor| + pod_name = file_accessor.spec.name + next unless sandbox.local?(pod_name) + root_name = Specification.root_name(pod_name) + paths = file_accessor.developer_files + next if paths.empty? + group = pods_project.group_for_spec(root_name, :developer) + paths.each do |path| + ref = pods_project.add_file_reference(path, group, false) + if path.extname == '.podspec' + pods_project.mark_ruby_file_ref(ref) + end + end + end + end + end + + # Creates the link to the headers of the Pod in the sandbox. + # + # @return [void] + # + def link_headers + UI.message '- Linking headers' do + pod_targets.each do |pod_target| + # When integrating Pod as frameworks, built Pods are built into + # frameworks, whose headers are included inside the built + # framework. Those headers do not need to be linked from the + # sandbox. + next if pod_target.build_as_framework? && pod_target.should_build? + + pod_target_header_mappings = pod_target.header_mappings_by_file_accessor.values + pod_target_header_mappings.each do |header_mappings| + header_mappings.each do |namespaced_path, files| + pod_target.build_headers.add_files(namespaced_path, files) + end + end + + public_header_mappings = pod_target.public_header_mappings_by_file_accessor.values + public_header_mappings.each do |header_mappings| + header_mappings.each do |namespaced_path, files| + sandbox.public_headers.add_files(namespaced_path, files) + end + end + end + end + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private Helpers + + # @return [Array] The file accessors for all the + # specs platform combinations. + # + def file_accessors + @file_accessors ||= pod_targets.flat_map(&:file_accessors).compact + end + + # Adds file references to the list of the paths returned by the file + # accessor with the given key to the given group of the Pods project. + # + # @param [Symbol] file_accessor_key + # The method of the file accessor which would return the list of + # the paths. + # + # @param [Symbol] group_key + # The key of the group of the Pods project. + # + # @param [Boolean] reflect_file_system_structure + # Whether organizing a local pod's files in subgroups inside + # the pod's group is allowed. + # + # @return [Array] the added file references + # + def add_file_accessors_paths_to_pods_group(file_accessor_key, group_key = nil, reflect_file_system_structure = false) + file_accessors.flat_map do |file_accessor| + paths = file_accessor.send(file_accessor_key) + paths = allowable_project_paths(paths) + next [] if paths.empty? + + pod_name = file_accessor.spec.name + preserve_pod_file_structure_flag = (sandbox.local?(pod_name) || preserve_pod_file_structure) && reflect_file_system_structure + base_path = preserve_pod_file_structure_flag ? common_path(paths) : nil + actual_group_key = preserve_pod_file_structure_flag ? nil : group_key + group = pods_project.group_for_spec(pod_name, actual_group_key) + paths.map do |path| + pods_project.add_file_reference(path, group, preserve_pod_file_structure_flag, base_path) + end + end + end + + # Filters a list of paths down to those paths which can be added to + # the Xcode project. Some paths are intermediates and only their children + # should be added, while some paths are treated as bundles and their + # children should not be added directly. + # + # @param [Array] paths + # The paths to files or directories on disk. + # + # @return [Array] The paths which can be added to the Xcode project + # + def allowable_project_paths(paths) + lproj_paths = Set.new + lproj_paths_with_files = Set.new + + # Remove all file ref under .docc folder, but preserve the .docc folder + paths = merge_to_docc_folder(paths) + + allowable_paths = paths.select do |path| + path_str = path.to_s + + # We add the directory for a Core Data model, but not the items in it. + next if path_str =~ /.*\.xcdatamodeld\/.+/i + + # We add the directory for a Core Data migration mapping, but not the items in it. + next if path_str =~ /.*\.xcmappingmodel\/.+/i + + # We add the directory for an asset catalog, but not the items in it. + next if path_str =~ /.*\.xcassets\/.+/i + + if path_str =~ /\.lproj(\/|$)/i + # If the element is an .lproj directory then save it and potentially + # add it later if we don't find any contained items. + if path_str =~ /\.lproj$/i && path.directory? + lproj_paths << path + next + end + + # Collect the paths for the .lproj directories that contain files. + lproj_path = /(^.*\.lproj)\/.*/i.match(path_str)[1] + lproj_paths_with_files << Pathname(lproj_path) + + # Directories nested within an .lproj directory are added as file + # system references so their contained items are not added directly. + next if path.dirname.dirname == lproj_path + end + + true + end + + # Only add the path for the .lproj directories that do not have anything + # within them added as well. This generally happens if the glob within the + # resources directory was not a recursive glob. + allowable_paths + lproj_paths.subtract(lproj_paths_with_files).to_a + end + + # Returns a Pathname of the nearest parent from which all the given paths descend. + # Converts each Pathname to a list of path components and finds the longest common prefix + # + # @param [Array] paths + # The paths to files or directories on disk. Must be absolute paths + # + # @return [Pathname] Pathname of the nearest parent shared by paths, or nil if none exists + # + def common_path(paths) + return nil if paths.empty? + strs = paths.map do |path| + unless path.absolute? + raise ArgumentError, "Paths must be absolute #{path}" + end + path.dirname.to_s + end + min, max = strs.minmax + min = min.split('/') + max = max.split('/') + idx = min.size.times { |i| break i if min[i] != max[i] } + result = Pathname.new(min[0...idx].join('/')) + # Don't consider "/" a common path + return result unless result.to_s == '' || result.to_s == '/' + end + + # Adds the known localization regions to the root of the project + # + # @param [Array] file_references the resource file references + # + def add_known_regions(file_references) + pattern = LOCALIZATION_REGION_FILEPATTERN_REGEX + regions = file_references.map do |ref| + if (match = ref.path.to_s.match(pattern)) + match[:region] + end + end.compact + + pods_project.root_object.known_regions = (pods_project.root_object.known_regions | regions).sort + end + + #-----------------------------------------------------------------------# + end + end + end + end +end + +# If we have an non-empty .docc folder, remove all paths under the folder +# but keep the folder itself +# +# @param [Array] paths the paths to inspect +# +# @return [Array] The resulted list of paths. +# +def merge_to_docc_folder(paths) + docc_paths_with_files = Set.new + allowable_paths = paths.select do |path| + path_str = path.to_s + + if path_str =~ /\.docc(\/|$)/i + + # we want folder with files + next if path.directory? + + # remove everything after ".docc", but keep ".docc" + folder_path = path_str.split("\.docc")[0] + "\.docc" + + docc_paths_with_files << Pathname(folder_path) + next + + end + true + end + + allowable_paths + docc_paths_with_files.to_a +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer.rb new file mode 100644 index 0000000..90ae39c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_dependency_installer.rb @@ -0,0 +1,195 @@ +module Pod + class Installer + class Xcode + # Wires up the dependencies between targets from the target installation results + # + class PodTargetDependencyInstaller + require 'cocoapods/native_target_extension.rb' + + # @return [Sandbox] The sandbox used for this installation. + # + attr_reader :sandbox + + # @return [TargetInstallationResults] The target installation results for pod targets. + # + attr_reader :pod_target_installation_results + + # @return [ProjectMetadataCache] The metadata cache for targets. + # + attr_reader :metadata_cache + + # Initialize a new instance. + # + # @param [Sandbox] sandbox @see #sandbox + # @param [TargetInstallationResults] pod_target_installation_results @see #pod_target_installation_results + # @param [ProjectMetadataCache] metadata_cache @see #metadata_cache + # + def initialize(sandbox, pod_target_installation_results, metadata_cache) + @sandbox = sandbox + @pod_target_installation_results = pod_target_installation_results + @metadata_cache = metadata_cache + end + + def install! + # Wire up pod targets + pod_target_installation_results.values.each do |pod_target_installation_result| + pod_target = pod_target_installation_result.target + native_target = pod_target_installation_result.native_target + project = native_target.project + + # First, wire up all resource bundles. + wire_resource_bundle_targets(pod_target_installation_result.resource_bundle_targets, + native_target, pod_target) + # Wire up all dependencies to this pod target, if any. + wire_target_dependencies(pod_target, native_target, project, pod_target_installation_results, + metadata_cache) + + # Wire up test native targets. + unless pod_target_installation_result.test_native_targets.empty? + wire_test_native_targets(pod_target, pod_target_installation_result, pod_target_installation_results, + project, metadata_cache) + end + + # Wire up app native targets. + unless pod_target_installation_result.app_native_targets.empty? + wire_app_native_targets(pod_target, pod_target_installation_result, pod_target_installation_results, + project, metadata_cache) + end + end + end + + private + + def wire_resource_bundle_targets(resource_bundle_targets, native_target, pod_target) + resource_bundle_targets.each do |resource_bundle_target| + native_target.add_dependency(resource_bundle_target) + if pod_target.build_as_dynamic_framework? && pod_target.should_build? + native_target.add_resources([resource_bundle_target.product_reference]) + end + end + end + + def wire_target_dependencies(pod_target, native_target, project, pod_target_installation_results, metadata_cache) + dependent_targets = pod_target.dependent_targets + dependent_targets.each do |dependent_target| + is_local = sandbox.local?(dependent_target.pod_name) + if installation_result = pod_target_installation_results[dependent_target.name] + dependent_project = installation_result.native_target.project + if dependent_project != project + project.add_pod_subproject(dependent_project, is_local) + end + native_target.add_dependency(installation_result.native_target) + else + # Hit the cache + cached_dependency = metadata_cache.target_label_by_metadata[dependent_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, native_target, cached_dependency) + end + end + end + + def wire_test_native_targets(pod_target, installation_result, pod_target_installation_results, project, metadata_cache) + installation_result.test_specs_by_native_target.each do |test_native_target, test_spec| + resource_bundle_native_targets = installation_result.test_resource_bundle_targets[test_spec.name] || [] + resource_bundle_native_targets.each do |test_resource_bundle_target| + test_native_target.add_dependency(test_resource_bundle_target) + end + + test_dependent_targets = pod_target.test_dependent_targets_by_spec_name.fetch(test_spec.name, []).+([pod_target]).uniq + test_dependent_targets.each do |test_dependent_target| + is_local = sandbox.local?(test_dependent_target.pod_name) + if dependency_installation_result = pod_target_installation_results[test_dependent_target.name] + dependent_test_project = dependency_installation_result.native_target.project + if dependent_test_project != project + project.add_pod_subproject(dependent_test_project, is_local) + end + test_native_target.add_dependency(dependency_installation_result.native_target) + else + # Hit the cache + cached_dependency = metadata_cache.target_label_by_metadata[test_dependent_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, test_native_target, cached_dependency) + end + end + + if app_host_target_label = pod_target.app_host_target_label(test_spec) + app_host_pod_target_label, app_host_target_label = *app_host_target_label + wire_test_native_target_app_host(test_native_target, pod_target, pod_target_installation_results, project, metadata_cache, app_host_pod_target_label, app_host_target_label) + end + end + end + + def wire_test_native_target_app_host(test_native_target, pod_target, pod_target_installation_results, project, metadata_cache, app_host_pod_target_label, app_host_target_label) + if dependency_installation_result = pod_target_installation_results[app_host_pod_target_label] + unless app_native_target = dependency_installation_result.app_host_target_labelled(app_host_target_label) + raise Informative, "Did not find target with label #{app_host_target_label} in the set of targets installed for #{app_host_pod_target_label}." + end + + dependent_test_project = app_native_target.project + if dependent_test_project != project + project.add_subproject_reference(dependent_test_project, project.dependencies_group) + end + + app_host_target_names = app_native_target.resolved_build_setting('PRODUCT_NAME', true) + test_native_target.build_configurations.each do |configuration| + app_host_target_name = app_host_target_names[configuration.name] || target.name + case test_native_target.symbol_type + when :unit_test_bundle + test_host = "$(BUILT_PRODUCTS_DIR)/#{app_host_target_name}.app/" + test_host << 'Contents/MacOS/' if pod_target.platform == :osx + test_host << app_host_target_name.to_s + configuration.build_settings['BUNDLE_LOADER'] = '$(TEST_HOST)' + configuration.build_settings['TEST_HOST'] = test_host + when :ui_test_bundle + configuration.build_settings['TEST_TARGET_NAME'] = app_host_target_name + end + end + target_attributes = project.root_object.attributes['TargetAttributes'] || {} + target_attributes[test_native_target.uuid.to_s] = { 'TestTargetID' => app_native_target.uuid.to_s } + project.root_object.attributes['TargetAttributes'] = target_attributes + test_native_target.add_dependency(app_native_target) + elsif cached_dependency = metadata_cache.target_label_by_metadata[app_host_target_label] + # Hit the cache + project.add_cached_subproject_reference(sandbox, cached_dependency, project.dependencies_group) + Project.add_cached_dependency(sandbox, test_native_target, cached_dependency) + else + raise "Expected to either have an installation or cache result for #{app_host_target_label} (from pod #{app_host_pod_target_label}) " \ + "for target #{test_native_target.name} in project #{project.project_name}" + end + end + + def wire_app_native_targets(pod_target, installation_result, pod_target_installation_results, project, metadata_cache) + installation_result.app_specs_by_native_target.each do |app_native_target, app_spec| + resource_bundle_native_targets = installation_result.app_resource_bundle_targets[app_spec.name] || [] + resource_bundle_native_targets.each do |app_resource_bundle_target| + app_native_target.add_dependency(app_resource_bundle_target) + end + + app_dependent_targets = pod_target.app_dependent_targets_by_spec_name.fetch(app_spec.name, []).unshift(pod_target).uniq + app_dependent_targets.each do |app_dependent_target| + is_local = sandbox.local?(app_dependent_target.pod_name) + if dependency_installation_result = pod_target_installation_results[app_dependent_target.name] + resource_bundle_native_targets = dependency_installation_result.app_resource_bundle_targets[app_spec.name] + unless resource_bundle_native_targets.nil? + resource_bundle_native_targets.each do |app_resource_bundle_target| + app_native_target.add_dependency(app_resource_bundle_target) + end + end + dependency_project = dependency_installation_result.native_target.project + if dependency_project != project + project.add_pod_subproject(dependency_project, is_local) + end + app_native_target.add_dependency(dependency_installation_result.native_target) + else + # Hit the cache + cached_dependency = metadata_cache.target_label_by_metadata[app_dependent_target.label] + project.add_cached_pod_subproject(sandbox, cached_dependency, is_local) + Project.add_cached_dependency(sandbox, app_native_target, cached_dependency) + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_installer.rb new file mode 100644 index 0000000..e32574d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_installer.rb @@ -0,0 +1,1245 @@ +require 'active_support/core_ext/array' +require 'active_support/core_ext/string/inflections' +require 'cocoapods/xcode' + +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Creates the target for the Pods libraries in the Pods project and the + # relative support files. + # + class PodTargetInstaller < TargetInstaller + require 'cocoapods/installer/xcode/pods_project_generator/app_host_installer' + + # @return [Array] Array of umbrella header paths in the headers directory + # + attr_reader :umbrella_header_paths + + # @return [PodTarget] @see TargetInstaller#target + # + attr_reader :target + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see TargetInstaller#sandbox + # @param [Pod::Project] project @see TargetInstaller#project + # @param [PodTarget] target @see TargetInstaller#target + # @param [Array] umbrella_header_paths @see #umbrella_header_paths + # + def initialize(sandbox, project, target, umbrella_header_paths = nil) + super(sandbox, project, target) + @umbrella_header_paths = umbrella_header_paths + end + + # Creates the target in the Pods project and the relative support files. + # + # @return [TargetInstallationResult] the result of the installation of this target. + # + def install! + UI.message "- Installing target `#{target.name}` #{target.platform}" do + create_support_files_dir + library_file_accessors = target.file_accessors.select { |fa| fa.spec.library_specification? } + test_file_accessors = target.file_accessors.select { |fa| fa.spec.test_specification? } + app_file_accessors = target.file_accessors.select { |fa| fa.spec.app_specification? } + + native_target = if target.should_build? + add_target + else + # For targets that should not be built (e.g. pre-built vendored frameworks etc), we add a placeholder + # PBXAggregateTarget that will be used to wire up dependencies later. + add_placeholder_target + end + + resource_bundle_targets = add_resources_bundle_targets(library_file_accessors).values.flatten + + test_native_targets = add_test_targets + test_app_host_targets = add_test_app_host_targets + test_resource_bundle_targets = add_resources_bundle_targets(test_file_accessors) + + app_native_targets = add_app_targets + app_resource_bundle_targets = add_resources_bundle_targets(app_file_accessors) + + add_files_to_build_phases(native_target, test_native_targets, app_native_targets) + targets_to_validate = test_native_targets + app_native_targets.values + targets_to_validate << native_target if target.should_build? + validate_targets_contain_sources(targets_to_validate) + validate_xcframeworks if target.should_build? + + create_copy_xcframeworks_script unless target.xcframeworks.values.all?(&:empty?) + + create_xcconfig_file(native_target, resource_bundle_targets) + create_test_xcconfig_files(test_native_targets, test_resource_bundle_targets) + create_app_xcconfig_files(app_native_targets, app_resource_bundle_targets) + + if target.should_build? && target.defines_module? && !skip_modulemap?(target.library_specs) + create_module_map(native_target) do |generator| + generator.headers.concat module_map_additional_headers + end + create_umbrella_header(native_target) do |generator| + generator.imports += library_file_accessors.flat_map do |file_accessor| + header_dir = if !target.build_as_framework? && dir = file_accessor.spec_consumer.header_dir + Pathname.new(dir) + end + + file_accessor.public_headers.map do |public_header| + public_header = if header_mappings_dir(file_accessor) + public_header.relative_path_from(header_mappings_dir(file_accessor)) + else + public_header.basename + end + if header_dir + public_header = header_dir.join(public_header) + end + public_header + end + end + end + end + + if target.should_build? && target.build_as_framework? + unless skip_info_plist?(native_target) + create_info_plist_file(target.info_plist_path, native_target, target.version, target.platform, + :additional_entries => target.info_plist_entries) + end + create_build_phase_to_symlink_header_folders(native_target) + end + + if target.should_build? && target.build_as_library? && target.uses_swift? + add_swift_library_compatibility_header_phase(native_target) + end + + project_directory = project.path.dirname + + if target.should_build? && !skip_pch?(target.library_specs) + path = target.prefix_header_path + create_prefix_header(path, library_file_accessors, target.platform, native_target, project_directory) + add_file_to_support_group(path) + end + unless skip_pch?(target.test_specs) + target.test_specs.each do |test_spec| + path = target.prefix_header_path_for_spec(test_spec) + test_spec_consumer = test_spec.consumer(target.platform) + test_native_target = test_native_target_from_spec(test_spec_consumer.spec, test_native_targets) + create_prefix_header(path, test_file_accessors, target.platform, test_native_target, project_directory) + add_file_to_support_group(path) + end + end + unless skip_pch?(target.app_specs) + target.app_specs.each do |app_spec| + path = target.prefix_header_path_for_spec(app_spec) + app_spec_consumer = app_spec.consumer(target.platform) + app_native_target = app_native_targets[app_spec_consumer.spec] + create_prefix_header(path, app_file_accessors, target.platform, app_native_target, project_directory) + add_file_to_support_group(path) + end + end + create_dummy_source(native_target) if target.should_build? + create_copy_dsyms_script + clean_support_files_temp_dir + TargetInstallationResult.new(target, native_target, resource_bundle_targets, + test_native_targets, test_resource_bundle_targets, test_app_host_targets, + app_native_targets, app_resource_bundle_targets) + end + end + + private + + # Adds the target for the library to the Pods project with the + # appropriate build configurations. + # + # @note Overrides the superclass implementation to remove settings that are set in the pod target xcconfig + # + # @return [PBXNativeTarget] the native target that was added. + # + def add_target + super.tap do |native_target| + remove_pod_target_xcconfig_overrides_from_target(target.build_settings, native_target) + end + end + + # Removes overrides of the `pod_target_xcconfig` settings from the target's + # build configurations. + # + # @param [Hash{Symbol => Pod::Target::BuildSettings}] build_settings_by_config the build settings by config + # of the target. + # + # @param [PBXNativeTarget] native_target + # the native target to remove pod target xcconfig overrides from. + # + # @return [Void] + # + # + def remove_pod_target_xcconfig_overrides_from_target(build_settings_by_config, native_target) + native_target.build_configurations.each do |configuration| + build_settings = build_settings_by_config[target.user_build_configurations[configuration.name]] + unless build_settings.nil? + build_settings.merged_pod_target_xcconfigs.each_key do |setting| + configuration.build_settings.delete(setting) + end + end + end + end + + # @param [Array] specs + # the specs to check against whether `.pch` generation should be skipped or not. + # + # @return [Boolean] Whether the target should build a pch file. + # + def skip_pch?(specs) + specs.any? { |spec| spec.root.prefix_header_file.is_a?(FalseClass) } + end + + def skip_modulemap?(specs) + specs.any? { |spec| spec.module_map.is_a?(FalseClass) } + end + + # True if info.plist generation should be skipped + # + # @param [PXNativeTarget] native_target + # + # @return [Boolean] Whether the target should build an Info.plist file + # + def skip_info_plist?(native_target) + existing_setting = native_target.resolved_build_setting('INFOPLIST_FILE', true).values.compact + !existing_setting.empty? + end + + # Remove the default headers folder path settings for static library pod + # targets. + # + # @return [Hash{String => String}] + # + def custom_build_settings + settings = super + unless target.build_as_framework? + settings['PRIVATE_HEADERS_FOLDER_PATH'] = '' + settings['PUBLIC_HEADERS_FOLDER_PATH'] = '' + end + + settings['PRODUCT_NAME'] = target.product_basename + settings['PRODUCT_MODULE_NAME'] = target.product_module_name + + settings['CODE_SIGN_IDENTITY[sdk=appletvos*]'] = '' + settings['CODE_SIGN_IDENTITY[sdk=iphoneos*]'] = '' + settings['CODE_SIGN_IDENTITY[sdk=watchos*]'] = '' + + settings['SWIFT_ACTIVE_COMPILATION_CONDITIONS'] = '$(inherited) ' + + if target.swift_version + settings['SWIFT_VERSION'] = target.swift_version + end + + if info_plist_bundle_id + settings['PRODUCT_BUNDLE_IDENTIFIER'] = info_plist_bundle_id + end + + settings + end + + # @return [String] Bundle Identifier found in the custom Info.plist entries + # + def info_plist_bundle_id + return @plist_bundle_id if defined?(@plist_bundle_id) + unless target.info_plist_entries.nil? + @plist_bundle_id = target.info_plist_entries['CFBundleIdentifier'] + unless @plist_bundle_id.nil? + message = "The `#{target.name}` target " \ + "sets a Bundle Identifier of `#{@plist_bundle_id}` in it's info.plist file. " \ + 'The Bundle Identifier should be set using pod_target_xcconfig: ' \ + "s.pod_target_xcconfig = { 'PRODUCT_BUNDLE_IDENTIFIER': '#{@plist_bundle_id}' }`." + UI.warn message + end + @plist_bundle_id + end + end + + # Filters the given resource file references discarding empty paths which are + # added by their parent directory. This will also include references to the parent [PBXVariantGroup] + # for all resources underneath it. + # + # @param [Array] resource_file_references + # The array of all resource file references to filter. + # + # @yield_param [Array} The filtered resource file references to be installed + # in the copy resources phase. + # + # @yield_param [Array} The filtered resource file references to be installed + # in the compile sources phase. + # + # @note Core Data model directories (.xcdatamodeld) and RealityKit projects (.rcproject) + # used to be added to the `Copy Resources` build phase like all other resources, + # since they would compile correctly in either the resources or compile phase. In + # recent versions of xcode, there's an exception for data models that generate + # headers. These need to be added to the compile sources phase of a real + # target for the headers to be built in time for code in the target to + # use them. These kinds of models generally break when added to resource + # bundles. + # + def filter_resource_file_references(resource_file_references) + file_references = resource_file_references.map do |resource_file_reference| + ref = project.reference_for_path(resource_file_reference) + + # Some nested files are not directly present in the Xcode project, such as the contents + # of an .xcdatamodeld directory. These files are implicitly included by including their + # parent directory. + next if ref.nil? + + # For variant groups, the variant group itself is added, not its members. + next ref.parent if ref.parent.is_a?(Xcodeproj::Project::Object::PBXVariantGroup) + + ref + end.compact.uniq + compile_phase_matcher = lambda { |ref| !(ref.path =~ /.*\.(xcdatamodeld|rcproject)/i).nil? } + compile_phase_refs, resources_phase_refs = file_references.partition(&compile_phase_matcher) + yield compile_phase_refs, resources_phase_refs + end + + #-----------------------------------------------------------------------# + + # Adds the build files of the pods to the target and adds a reference to + # the frameworks of the Pods. + # + # @note The Frameworks are used only for presentation purposes as the + # xcconfig is the authoritative source about their information. + # + # @note Core Data model directories (.xcdatamodeld) defined in the `resources` + # property are currently added to the `Copy Resources` build phase like + # all other resources. The Xcode UI adds these to the `Compile Sources` + # build phase, but they will compile correctly either way. + # + # @return [void] + # + def add_files_to_build_phases(library_native_target, test_native_targets, app_native_targets) + target.file_accessors.each do |file_accessor| + consumer = file_accessor.spec_consumer + + native_target = case consumer.spec.spec_type + when :library + library_native_target + when :test + test_native_target_from_spec(consumer.spec, test_native_targets) + when :app + app_native_targets[consumer.spec] + else + raise ArgumentError, "Unknown spec type #{consumer.spec.spec_type}." + end + + next if native_target.is_a?(Xcodeproj::Project::Object::PBXAggregateTarget) + + headers = file_accessor.headers + public_headers = file_accessor.public_headers.map(&:realpath) + project_headers = file_accessor.project_headers.map(&:realpath) + private_headers = file_accessor.private_headers.map(&:realpath) + other_source_files = file_accessor.other_source_files + + { + true => file_accessor.arc_source_files, + false => file_accessor.non_arc_source_files, + }.each do |arc, source_files| + next if source_files.empty? + source_files = source_files - headers - other_source_files + swift_source_files, non_swift_source_files = source_files.partition { |file| file.extname == '.swift' } + { + :objc => non_swift_source_files, + :swift => swift_source_files, + }.each do |language, files| + compiler_flags = compiler_flags_for_consumer(consumer, arc, language) + file_refs = project_file_references_array(files, 'source') + native_target.add_file_references(file_refs, compiler_flags) + end + end + + header_file_refs = project_file_references_array(headers, 'header') + native_target.add_file_references(header_file_refs) do |build_file| + add_header(file_accessor, build_file, public_headers, project_headers, private_headers, native_target) + end + + other_file_refs = project_file_references_array(other_source_files, 'other source') + native_target.add_file_references(other_file_refs, nil) + + next unless target.build_as_framework? + + filter_resource_file_references(file_accessor.resources.flatten) do |compile_phase_refs, resource_phase_refs| + native_target.add_file_references(compile_phase_refs, nil) + + if target.build_as_static_framework? && consumer.spec.library_specification? + resource_phase_refs = resource_phase_refs.select do |ref| + filename = ref.name || ref.path + Target.resource_extension_compilable?(File.extname(filename)) + end + end + + native_target.add_resources(resource_phase_refs) + end + end + end + + # Adds the test targets for the library to the Pods project with the + # appropriate build configurations. + # + # @return [Array] the test native targets created. + # + def add_test_targets + target.test_specs.map do |test_spec| + spec_consumer = test_spec.consumer(target.platform) + test_type = spec_consumer.test_type + product_type = target.product_type_for_test_type(test_type) + name = target.test_target_label(test_spec) + platform_name = target.platform.name + language = target.uses_swift_for_spec?(test_spec) ? :swift : :objc + product_basename = target.product_basename_for_spec(test_spec) + embedded_content_contains_swift = target.dependent_targets_for_test_spec(test_spec).any?(&:uses_swift?) + test_native_target = project.new_target(product_type, name, platform_name, + target.deployment_target_for_non_library_spec(test_spec), nil, + language, product_basename) + test_native_target.product_reference.name = name + + target.user_build_configurations.each do |bc_name, type| + test_native_target.add_build_configuration(bc_name, type) + end + + test_native_target.build_configurations.each do |configuration| + configuration.build_settings.merge!(custom_build_settings) + # target_installer will automatically add an empty `OTHER_LDFLAGS`. For test + # targets those are set via a test xcconfig file instead. + configuration.build_settings.delete('OTHER_LDFLAGS') + # target_installer will automatically set the product name to the module name if the target + # requires frameworks. For tests we always use the test target name as the product name + # irrelevant to whether we use frameworks or not. + configuration.build_settings['PRODUCT_NAME'] = name + # target_installer sets 'MACH_O_TYPE' for static frameworks ensure this does not propagate + # to test target. + configuration.build_settings.delete('MACH_O_TYPE') + # Use xcode default product module name, which is $(PRODUCT_NAME:c99extidentifier) + # this gives us always valid name that is distinct from the parent spec module name + # which allow tests to use either import or @testable import to access the parent framework + configuration.build_settings.delete('PRODUCT_MODULE_NAME') + # We must codesign iOS XCTest bundles that contain binary frameworks to allow them to be launchable in the simulator + unless target.platform == :osx + configuration.build_settings['CODE_SIGNING_REQUIRED'] = 'YES' + configuration.build_settings['CODE_SIGNING_ALLOWED'] = 'YES' + end + # For macOS we do not code sign the XCTest bundle because we do not code sign the frameworks either. + if target.platform == :osx + configuration.build_settings['CODE_SIGN_IDENTITY'] = '' + elsif target.platform == :ios + configuration.build_settings['CODE_SIGN_IDENTITY'] = 'iPhone Developer' + end + # Ensure swift stdlib gets copied in if needed, even when the target contains no swift files, + # because a dependency uses swift + configuration.build_settings['ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES'] = 'YES' if embedded_content_contains_swift + end + + remove_pod_target_xcconfig_overrides_from_target(target.test_spec_build_settings_by_config[test_spec.name], test_native_target) + + # Test native targets also need frameworks and resources to be copied over to their xctest bundle. + create_test_target_embed_frameworks_script(test_spec) + create_test_target_copy_resources_script(test_spec) + + # Generate vanilla Info.plist for test target similar to the one Xcode generates for new test target. + # This creates valid test bundle accessible at the runtime, allowing tests to load bundle resources + # defined in podspec. + additional_entries = spec_consumer.info_plist + path = target.info_plist_path_for_spec(test_spec) + create_info_plist_file(path, test_native_target, '1.0', target.platform, :bndl, :additional_entries => additional_entries) + + test_native_target + end + end + + # Adds the test app host targets for the library to the Pods project with the + # appropriate build configurations. + # + # @return [Array] the app host targets created. + # + def add_test_app_host_targets + target.test_spec_consumers.reject(&:requires_app_host?).select(&:app_host_name).each do |test_spec_consumer| + raise Informative, "`#{target.label}-#{test_spec_consumer.test_type}-Tests` manually specifies an app host but has not specified `requires_app_host = true`." + end + + target.test_spec_consumers.select(&:requires_app_host?).reject(&:app_host_name).group_by { |consumer| target.app_host_target_label(consumer.spec) }. + map do |(_, target_name), _| + AppHostInstaller.new(sandbox, project, target.platform, target_name, target.pod_name, target_name).install! + end + end + + # Adds the app targets for the library to the Pods project with the + # appropriate build configurations. + # + # @return [Hash{Specification => PBXNativeTarget}] the app native targets created, keyed by their app spec + # + def add_app_targets + target.app_specs.each_with_object({}) do |app_spec, hash| + spec_consumer = app_spec.consumer(target.platform) + spec_name = app_spec.parent.name + subspec_name = target.subspec_label(app_spec) + app_target_label = target.app_target_label(app_spec) + platform = Platform.new(target.platform.symbolic_name, target.deployment_target_for_non_library_spec(app_spec)) + info_plist_entries = spec_consumer.info_plist + resources = target.file_accessors.find { |fa| fa.spec == app_spec }.resources + add_launchscreen_storyboard = resources.none? { |resource| resource.basename.to_s == 'LaunchScreen.storyboard' } && platform.name == :ios + embedded_content_contains_swift = target.dependent_targets_for_app_spec(app_spec).any?(&:uses_swift?) + app_native_target = AppHostInstaller.new(sandbox, project, platform, subspec_name, spec_name, + app_target_label, :add_main => false, + :add_launchscreen_storyboard => add_launchscreen_storyboard, + :info_plist_entries => info_plist_entries, + :product_basename => target.product_basename_for_spec(app_spec)).install! + + app_native_target.product_reference.name = app_target_label + target.user_build_configurations.each do |bc_name, type| + app_native_target.add_build_configuration(bc_name, type) + end + + app_native_target.build_configurations.each do |configuration| + configuration.build_settings.merge!(custom_build_settings) + + # target_installer will automatically add an empty `OTHER_LDFLAGS`. For app + # targets those are set via an app xcconfig file instead. + configuration.build_settings.delete('OTHER_LDFLAGS') + # target_installer will automatically set the product name to the module name if the target + # requires frameworks. For apps we always use the app target name as the product name + # irrelevant to whether we use frameworks or not. + configuration.build_settings['PRODUCT_NAME'] = app_target_label + # target_installer sets 'MACH_O_TYPE' for static frameworks ensure this does not propagate + # to app target. + configuration.build_settings.delete('MACH_O_TYPE') + # Use xcode default product module name, which is $(PRODUCT_NAME:c99extidentifier) + # this gives us always valid name that is distinct from the parent spec module name + # which allow the app to use import to access the parent framework + configuration.build_settings.delete('PRODUCT_MODULE_NAME') + + # We must codesign iOS app bundles that contain binary frameworks to allow them to be launchable in the simulator + unless target.platform == :osx + configuration.build_settings['CODE_SIGNING_REQUIRED'] = 'YES' + configuration.build_settings['CODE_SIGNING_ALLOWED'] = 'YES' + end + # For macOS we do not code sign the appbundle because we do not code sign the frameworks either. + configuration.build_settings['CODE_SIGN_IDENTITY'] = '' if target.platform == :osx + # For iOS, we delete the target_installer empty values that get set for libraries since CocoaPods will + # code sign the libraries manually but for apps this is not true. + if target.platform == :ios + configuration.build_settings.delete('CODE_SIGN_IDENTITY[sdk=appletvos*]') + configuration.build_settings.delete('CODE_SIGN_IDENTITY[sdk=iphoneos*]') + configuration.build_settings.delete('CODE_SIGN_IDENTITY[sdk=watchos*]') + end + # Ensure swift stdlib gets copied in if needed, even when the target contains no swift files, + # because a dependency uses swift + configuration.build_settings['ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES'] = 'YES' if embedded_content_contains_swift + end + + remove_pod_target_xcconfig_overrides_from_target(target.app_spec_build_settings_by_config[app_spec.name], app_native_target) + + create_app_target_embed_frameworks_script(app_spec) + create_app_target_copy_resources_script(app_spec) + add_resources_to_target(resources, app_native_target) + + hash[app_spec] = app_native_target + end + end + + # Adds the resources to the compile resources phase of the target. + # + # @param [Array] paths the paths to add to the target. + # + # @param [PBXNativeTarget] target the target resources are added to. + # + # @return [Boolean] whether any compile phase references were added. + # + def add_resources_to_target(paths, target) + filter_resource_file_references(paths) do |compile_phase_refs, resource_phase_refs| + # Resource bundles are only meant to have resources, so install everything + # into the resources phase. See note in filter_resource_file_references. + target.add_resources(resource_phase_refs + compile_phase_refs) + !compile_phase_refs.empty? + end + end + + # Adds the resources of the Pods to the Pods project. + # + # @note The source files are grouped by Pod and in turn by subspec + # (recursively) in the resources group. + # + # @param [Array] file_accessors + # the file accessors list to generate resource bundles for. + # + # @return [Hash{String=>Array}] the resource bundle native targets created. + # + def add_resources_bundle_targets(file_accessors) + file_accessors.each_with_object({}) do |file_accessor, hash| + hash[file_accessor.spec.name] = file_accessor.resource_bundles.map do |bundle_name, paths| + label = target.resources_bundle_target_label(bundle_name) + resource_bundle_target = project.new_resources_bundle(label, file_accessor.spec_consumer.platform_name, nil, bundle_name) + resource_bundle_target.product_reference.name = label + contains_compile_phase_refs = add_resources_to_target(paths, resource_bundle_target) + + target.user_build_configurations.each do |bc_name, type| + resource_bundle_target.add_build_configuration(bc_name, type) + end + resource_bundle_target.deployment_target = if file_accessor.spec.non_library_specification? + target.deployment_target_for_non_library_spec(file_accessor.spec) + else + deployment_target + end + # Create Info.plist file for bundle + path = target.info_plist_path + path.dirname.mkdir unless path.dirname.exist? + info_plist_path = path.dirname + "ResourceBundle-#{bundle_name}-#{path.basename}" + create_info_plist_file(info_plist_path, resource_bundle_target, target.version, target.platform, :bndl) + + resource_bundle_target.build_configurations.each do |configuration| + configuration.build_settings['PRODUCT_NAME'] = bundle_name + # Do not set the CONFIGURATION_BUILD_DIR for resource bundles that are only meant for test targets. + # This is because the test target itself also does not set this configuration build dir and it expects + # all bundles to be copied from the default path. + unless file_accessor.spec.test_specification? + configuration.build_settings['CONFIGURATION_BUILD_DIR'] = target.configuration_build_dir('$(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)') + end + + # Set the 'IBSC_MODULE' build settings for resource bundles so that Storyboards and Xibs can load + # classes from the parent module. + configuration.build_settings['IBSC_MODULE'] = target.product_module_name + + # Xcode 14.x throws an error about code signing on resource bundles, turn it off for now. + configuration.build_settings['CODE_SIGNING_ALLOWED'] = 'NO' + + # Set the `SWIFT_VERSION` build setting for resource bundles that could have resources that get + # compiled such as an `xcdatamodeld` file which has 'Swift' as its code generation language. + if contains_compile_phase_refs && file_accessors.any? { |fa| target.uses_swift_for_spec?(fa.spec) } + configuration.build_settings['SWIFT_VERSION'] = target.swift_version + end + + # Set the correct device family for this bundle, based on the platform + device_family_by_platform = { + :ios => '1,2', + :tvos => '3', + :watchos => '1,2,4', + } + + if (family = device_family_by_platform[target.platform.name]) + configuration.build_settings['TARGETED_DEVICE_FAMILY'] = family + end + end + + remove_pod_target_xcconfig_overrides_from_target(target.build_settings_by_config_for_spec(file_accessor.spec), resource_bundle_target) + + resource_bundle_target + end + end + end + + # Generates the contents of the xcconfig file and saves it to disk. + # + # @param [PBXNativeTarget] native_target + # the native target to link the xcconfig file into. + # + # @param [Array] resource_bundle_targets + # the additional resource bundle targets to link the xcconfig file into. + # + # @return [void] + # + def create_xcconfig_file(native_target, resource_bundle_targets) + target.user_config_names_by_config_type.each do |config, names| + path = target.xcconfig_path(config) + update_changed_file(target.build_settings[config], path) + xcconfig_file_ref = add_file_to_support_group(path) + + # also apply the private config to resource bundle targets. + apply_xcconfig_file_ref_to_targets([native_target] + resource_bundle_targets, xcconfig_file_ref, names) + end + end + + # Generates the contents of the xcconfig file used for each test target type and saves it to disk. + # + # @param [Array] test_native_targets + # the test native target to link the xcconfig file into. + # + # @param [Hash{String=>Array}] test_resource_bundle_targets + # the additional test resource bundle targets to link the xcconfig file into. + # + # @return [void] + # + def create_test_xcconfig_files(test_native_targets, test_resource_bundle_targets) + target.test_specs.each do |test_spec| + spec_consumer = test_spec.consumer(target.platform) + test_type = spec_consumer.test_type + test_native_target = test_native_target_from_spec(spec_consumer.spec, test_native_targets) + + target.user_config_names_by_config_type.each do |config, names| + path = target.xcconfig_path("#{test_type.capitalize}-#{target.subspec_label(test_spec)}.#{config}") + test_spec_build_settings = target.build_settings_for_spec(test_spec, :configuration => config) + update_changed_file(test_spec_build_settings, path) + test_xcconfig_file_ref = add_file_to_support_group(path) + + # also apply the private config to resource bundle test targets related to this test spec. + scoped_test_resource_bundle_targets = test_resource_bundle_targets[test_spec.name] + apply_xcconfig_file_ref_to_targets([test_native_target] + scoped_test_resource_bundle_targets, test_xcconfig_file_ref, names) + end + end + end + + # Creates a script that copies the resources to the bundle of the test target. + # + # @param [Specification] test_spec + # The test spec to create the copy resources script for. + # + # @return [void] + # + def create_test_target_copy_resources_script(test_spec) + path = target.copy_resources_script_path_for_spec(test_spec) + host_target_spec_names = target.app_host_dependent_targets_for_spec(test_spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + resource_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), resources_by_config| + resources_by_config[config_name] = target.dependent_targets_for_test_spec(test_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << test_spec.name if pod_target == target + pod_target.resource_paths.values_at(*spec_paths_to_include).flatten.compact + end + end + unless resource_paths_by_config.each_value.all?(&:empty?) + generator = Generator::CopyResourcesScript.new(resource_paths_by_config, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that embeds the frameworks to the bundle of the test target. + # + # @param [Specification] test_spec + # The test spec to create the embed frameworks script for. + # + # @return [void] + # + def create_test_target_embed_frameworks_script(test_spec) + path = target.embed_frameworks_script_path_for_spec(test_spec) + host_target_spec_names = target.app_host_dependent_targets_for_spec(test_spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + framework_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_test_spec(test_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << test_spec.name if pod_target == target + pod_target.framework_paths.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + xcframeworks_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_test_spec(test_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << test_spec.name if pod_target == target + pod_target.xcframeworks.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + unless framework_paths_by_config.each_value.all?(&:empty?) && xcframeworks_by_config.each_value.all?(&:empty?) + generator = Generator::EmbedFrameworksScript.new(framework_paths_by_config, xcframeworks_by_config) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Generates the contents of the xcconfig file used for each app target type and saves it to disk. + # + # @param [Hash{Specification => PBXNativeTarget}] app_native_targets + # the app native targets to link the xcconfig file into. + # + # @param [Hash{String=>Array}] app_resource_bundle_targets + # the additional app resource bundle targets to link the xcconfig file into. + # + # @return [void] + # + def create_app_xcconfig_files(app_native_targets, app_resource_bundle_targets) + target.app_specs.each do |app_spec| + spec_consumer = app_spec.consumer(target.platform) + app_native_target = app_native_targets[spec_consumer.spec] + + target.user_config_names_by_config_type.each do |config, names| + path = target.xcconfig_path("#{target.subspec_label(app_spec)}.#{config}") + app_spec_build_settings = target.build_settings_for_spec(app_spec, :configuration => config) + update_changed_file(app_spec_build_settings, path) + app_xcconfig_file_ref = add_file_to_support_group(path) + + # also apply the private config to resource bundle app targets related to this app spec. + scoped_app_resource_bundle_targets = app_resource_bundle_targets[app_spec.name] + apply_xcconfig_file_ref_to_targets([app_native_target] + scoped_app_resource_bundle_targets, app_xcconfig_file_ref, names) + end + end + end + + # Creates a script that copies the resources to the bundle of the app target. + # + # @param [Specification] app_spec + # The app spec to create the copy resources script for. + # + # @return [void] + # + def create_app_target_copy_resources_script(app_spec) + path = target.copy_resources_script_path_for_spec(app_spec) + resource_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), resources_by_config| + pod_targets = target.dependent_targets_for_app_spec(app_spec, :configuration => config) + resources_by_config[config_name] = pod_targets.flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include << app_spec.name if pod_target == target + pod_target.resource_paths.values_at(*spec_paths_to_include).flatten.compact + end + end + unless resource_paths_by_config.each_value.all?(&:empty?) + generator = Generator::CopyResourcesScript.new(resource_paths_by_config, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that embeds the frameworks to the bundle of the app target. + # + # @param [Specification] app_spec + # The app spec to create the embed frameworks script for. + # + # @return [void] + # + def create_app_target_embed_frameworks_script(app_spec) + path = target.embed_frameworks_script_path_for_spec(app_spec) + framework_paths_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_app_spec(app_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include << app_spec.name if pod_target == target + pod_target.framework_paths.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + xcframeworks_by_config = target.user_build_configurations.each_with_object({}) do |(config_name, config), paths_by_config| + paths_by_config[config_name] = target.dependent_targets_for_app_spec(app_spec, :configuration => config).flat_map do |pod_target| + spec_paths_to_include = pod_target.library_specs.map(&:name) + spec_paths_to_include << app_spec.name if pod_target == target + pod_target.xcframeworks.values_at(*spec_paths_to_include).flatten.compact.uniq + end + end + + unless framework_paths_by_config.each_value.all?(&:empty?) && xcframeworks_by_config.each_value.all?(&:empty?) + generator = Generator::EmbedFrameworksScript.new(framework_paths_by_config, xcframeworks_by_config) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that copies and strips vendored dSYMs and bcsymbolmaps. + # + # @return [void] + # + def create_copy_dsyms_script + dsym_paths = PodTargetInstaller.dsym_paths(target) + bcsymbolmap_paths = PodTargetInstaller.bcsymbolmap_paths(target) + path = target.copy_dsyms_script_path + unless dsym_paths.empty? && bcsymbolmap_paths.empty? + generator = Generator::CopydSYMsScript.new(dsym_paths, bcsymbolmap_paths) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + end + + # Creates a script that copies the appropriate xcframework slice to the build dir. + # + # @note We can't use Xcode default link libraries phase, because + # we need to ensure that we only copy the frameworks which are + # relevant for the current build configuration. + # + # @return [void] + # + def create_copy_xcframeworks_script + path = target.copy_xcframeworks_script_path + generator = Generator::CopyXCFrameworksScript.new(target.xcframeworks.values.flatten, sandbox.root, target.platform) + update_changed_file(generator, path) + add_file_to_support_group(path) + end + + # Creates a build phase which links the versioned header folders + # of the OS X framework into the framework bundle's root directory. + # This is only necessary because the way how headers are copied + # via custom copy file build phases in combination with + # header_mappings_dir interferes with xcodebuild's expectations + # about the existence of private or public headers. + # + # @param [PBXNativeTarget] native_target + # the native target to add the script phase into. + # + # @return [void] + # + def create_build_phase_to_symlink_header_folders(native_target) + # This is required on iOS for Catalyst, which uses macOS framework layouts + return unless (target.platform.name == :osx || target.platform.name == :ios) && any_header_mapping_dirs? + + build_phase = native_target.new_shell_script_build_phase('Create Symlinks to Header Folders') + build_phase.shell_script = <<-eos.strip_heredoc + cd "$CONFIGURATION_BUILD_DIR/$WRAPPER_NAME" || exit 1 + if [ ! -d Versions ]; then + # Not a versioned framework, so no need to do anything + exit 0 + fi + + public_path="${PUBLIC_HEADERS_FOLDER_PATH\#\$CONTENTS_FOLDER_PATH/}" + if [ ! -f "$public_path" ]; then + ln -fs "${PUBLIC_HEADERS_FOLDER_PATH\#$WRAPPER_NAME/}" "$public_path" + fi + + private_path="${PRIVATE_HEADERS_FOLDER_PATH\#\$CONTENTS_FOLDER_PATH/}" + if [ ! -f "$private_path" ]; then + ln -fs "${PRIVATE_HEADERS_FOLDER_PATH\#\$WRAPPER_NAME/}" "$private_path" + fi + eos + end + + ENABLE_OBJECT_USE_OBJC_FROM = { + :ios => Version.new('6'), + :osx => Version.new('10.8'), + :watchos => Version.new('2.0'), + :tvos => Version.new('9.0'), + }.freeze + + # Returns the compiler flags for the source files of the given specification. + # + # The following behavior is regarding the `OS_OBJECT_USE_OBJC` flag. When + # set to `0`, it will allow code to use `dispatch_release()` on >= iOS 6.0 + # and OS X 10.8. + # + # * New libraries that do *not* require ARC don’t need to care about this + # issue at all. + # + # * New libraries that *do* require ARC _and_ have a deployment target of + # >= iOS 6.0 or OS X 10.8: + # + # These no longer use `dispatch_release()` and should *not* have the + # `OS_OBJECT_USE_OBJC` flag set to `0`. + # + # **Note:** this means that these libraries *have* to specify the + # deployment target in order to function well. + # + # * New libraries that *do* require ARC, but have a deployment target of + # < iOS 6.0 or OS X 10.8: + # + # These contain `dispatch_release()` calls and as such need the + # `OS_OBJECT_USE_OBJC` flag set to `1`. + # + # **Note:** libraries that do *not* specify a platform version are + # assumed to have a deployment target of < iOS 6.0 or OS X 10.8. + # + # For more information, see: https://opensource.apple.com/source/libdispatch/libdispatch-228.18/os/object.h + # + # @param [Specification::Consumer] consumer + # The consumer for the specification for which the compiler flags + # are needed. + # + # @param [Boolean] arc + # Whether the arc is enabled or not. + # + # @param [Symbol] language + # The language these compiler warnings are for. Can be either :objc or :swift. + # + # @return [String] The compiler flags. + # + def compiler_flags_for_consumer(consumer, arc, language) + flags = consumer.compiler_flags.dup + if !arc && language == :objc + flags << '-fno-objc-arc' + else + platform_name = consumer.platform_name + spec_deployment_target = consumer.spec.deployment_target(platform_name) + if spec_deployment_target.nil? || Version.new(spec_deployment_target) < ENABLE_OBJECT_USE_OBJC_FROM[platform_name] + flags << '-DOS_OBJECT_USE_OBJC=0' + end + end + if target.inhibit_warnings? && language == :objc + flags << '-w -Xanalyzer -analyzer-disable-all-checks' + end + flags * ' ' + end + + def apply_xcconfig_file_ref_to_targets(targets, xcconfig_file_ref, configurations) + targets.each do |config_target| + config_target.build_configurations.each do |configuration| + next unless configurations.include?(configuration.name) + configuration.base_configuration_reference = xcconfig_file_ref + end + end + end + + def create_module_map(native_target) + return super(native_target) unless custom_module_map + + path = target.module_map_path_to_write + UI.message "- Copying module map file to #{UI.path(path)}" do + contents = custom_module_map.read + unless target.build_as_framework? + contents.gsub!(/^(\s*)framework\s+(module[^{}]+){/, '\1\2{') + end + generator = Generator::Constant.new(contents) + update_changed_file(generator, path) + add_file_to_support_group(path) + + linked_path = target.module_map_path + if path != linked_path + linked_path.dirname.mkpath + source = path.relative_path_from(linked_path.dirname) + FileUtils.ln_sf(source, linked_path) + end + + relative_path = target.module_map_path.relative_path_from(sandbox.root).to_s + native_target.build_configurations.each do |c| + c.build_settings['MODULEMAP_FILE'] = relative_path.to_s + end + end + end + + def module_map_additional_headers + return [] unless umbrella_header_paths + + other_paths = umbrella_header_paths - [target.umbrella_header_path] + other_paths.map do |module_map_path| + # exclude other targets umbrella headers, to avoid + # incomplete umbrella warnings + Generator::ModuleMap::Header.new(module_map_path.basename, nil, nil, nil, true) + end + end + + def create_umbrella_header(native_target) + super(native_target) unless custom_module_map + end + + def custom_module_map + @custom_module_map ||= target.file_accessors.first.module_map + end + + def project_file_references_array(files, file_type) + error_message_for_missing_reference = lambda do |sf, target| + "Unable to find #{file_type} ref for `#{sf.basename}` for target `#{target.name}`." + end + + # Remove all file ref under .docc folder, but preserve the .docc folder + files = merge_to_docc_folder(files) + files.map do |sf| + begin + project.reference_for_path(sf).tap do |ref| + raise Informative, error_message_for_missing_reference.call(sf, target) unless ref + end + rescue Errno::ENOENT + # Normalize the error for Ruby < 2.7. Ruby 2.7 can crash on a different call of real path compared + # to older versions. This ensures that the error message is consistent. + raise Informative, error_message_for_missing_reference.call(sf, target) + end + end + end + + def any_header_mapping_dirs? + return @any_header_mapping_dirs if defined?(@any_header_mapping_dirs) + @any_header_mapping_dirs = target.file_accessors.any? { |fa| fa.spec_consumer.header_mappings_dir } + end + + def header_mappings_dir(file_accessor) + @header_mappings_dirs ||= {} + return @header_mappings_dirs[file_accessor] if @header_mappings_dirs.key?(file_accessor) + @header_mappings_dirs[file_accessor] = if dir = file_accessor.spec_consumer.header_mappings_dir + file_accessor.path_list.root + dir + end + end + + def add_header(file_accessor, build_file, public_headers, project_headers, private_headers, native_target) + file_ref = build_file.file_ref + acl = if !target.build_as_framework? # Headers are already rooted at ${PODS_ROOT}/Headers/P*/[pod]/... + 'Project' + elsif public_headers.include?(file_ref.real_path) + 'Public' + elsif project_headers.include?(file_ref.real_path) + 'Project' + elsif private_headers.include?(file_ref.real_path) + 'Private' + else + 'Project' + end + + if target.build_as_framework? && !header_mappings_dir(file_accessor).nil? && acl != 'Project' + relative_path = if mapping_dir = header_mappings_dir(file_accessor) + file_ref.real_path.relative_path_from(mapping_dir) + else + file_ref.real_path.relative_path_from(file_accessor.path_list.root) + end + compile_build_phase_index = native_target.build_phases.index do |bp| + bp.is_a?(Xcodeproj::Project::Object::PBXSourcesBuildPhase) + end + sub_dir = relative_path.dirname + copy_phase_name = "Copy #{sub_dir} #{acl} Headers" + copy_phase = native_target.copy_files_build_phases.find { |bp| bp.name == copy_phase_name } || + native_target.new_copy_files_build_phase(copy_phase_name) + native_target.build_phases.move(copy_phase, compile_build_phase_index - 1) unless compile_build_phase_index.nil? + copy_phase.symbol_dst_subfolder_spec = :products_directory + copy_phase.dst_path = "$(#{acl.upcase}_HEADERS_FOLDER_PATH)/#{sub_dir}" + copy_phase.add_file_reference(file_ref, true) + else + build_file.settings ||= {} + build_file.settings['ATTRIBUTES'] = [acl] + end + end + + def support_files_group + pod_name = target.pod_name + dir = target.support_files_dir + project.pod_support_files_group(pod_name, dir) + end + + def test_native_target_from_spec(spec, test_native_targets) + test_target_label = target.test_target_label(spec) + test_native_targets.find do |test_native_target| + test_native_target.name == test_target_label + end + end + + # Adds a placeholder native target for the library to the Pods project with the + # appropriate build configurations. + # + # @return [PBXAggregateTarget] the native target that was added. + # + def add_placeholder_target + native_target = project.new_aggregate_target(target.label, [], target.platform.name, deployment_target) + target.user_build_configurations.each do |bc_name, type| + native_target.add_build_configuration(bc_name, type) + end + unless target.archs.empty? + native_target.build_configurations.each do |configuration| + configuration.build_settings['ARCHS'] = target.archs + end + end + native_target + end + + # Adds a shell script phase, intended only for library targets that contain swift, + # to copy the ObjC compatibility header (the -Swift.h file that the swift compiler generates) + # to the built products directory. Additionally, the script phase copies the module map, appending a `.Swift` + # submodule that references the (moved) compatibility header. Since the module map has been moved, the umbrella header + # is _also_ copied, so that it is sitting next to the module map. This is necessary for a successful archive build. + # + # @param [PBXNativeTarget] native_target + # the native target to add the Swift static library script phase into. + # + # @return [Void] + # + def add_swift_library_compatibility_header_phase(native_target) + if custom_module_map + raise Informative, 'Using Swift static libraries with custom module maps is currently not supported. ' \ + "Please build `#{target.label}` as a framework or remove the custom module map." + end + + build_phase = native_target.new_shell_script_build_phase('Copy generated compatibility header') + + relative_module_map_path = target.module_map_path.relative_path_from(target.sandbox.root) + relative_umbrella_header_path = target.umbrella_header_path.relative_path_from(target.sandbox.root) + + build_phase.shell_script = <<-SH.strip_heredoc + COMPATIBILITY_HEADER_PATH="${BUILT_PRODUCTS_DIR}/Swift Compatibility Header/${PRODUCT_MODULE_NAME}-Swift.h" + MODULE_MAP_PATH="${BUILT_PRODUCTS_DIR}/${PRODUCT_MODULE_NAME}.modulemap" + + ditto "${DERIVED_SOURCES_DIR}/${PRODUCT_MODULE_NAME}-Swift.h" "${COMPATIBILITY_HEADER_PATH}" + ditto "${PODS_ROOT}/#{relative_module_map_path}" "${MODULE_MAP_PATH}" + ditto "${PODS_ROOT}/#{relative_umbrella_header_path}" "${BUILT_PRODUCTS_DIR}" + printf "\\n\\nmodule ${PRODUCT_MODULE_NAME}.Swift {\\n header \\"${COMPATIBILITY_HEADER_PATH}\\"\\n requires objc\\n}\\n" >> "${MODULE_MAP_PATH}" + SH + build_phase.input_paths = %W( + ${DERIVED_SOURCES_DIR}/${PRODUCT_MODULE_NAME}-Swift.h + ${PODS_ROOT}/#{relative_module_map_path} + ${PODS_ROOT}/#{relative_umbrella_header_path} + ) + build_phase.output_paths = %W( + ${BUILT_PRODUCTS_DIR}/${PRODUCT_MODULE_NAME}.modulemap + ${BUILT_PRODUCTS_DIR}/#{relative_umbrella_header_path.basename} + ${BUILT_PRODUCTS_DIR}/Swift\ Compatibility\ Header/${PRODUCT_MODULE_NAME}-Swift.h + ) + end + + def validate_targets_contain_sources(native_targets) + native_targets.each do |native_target| + next unless native_target.source_build_phase.files.empty? + raise Informative, "Unable to install the `#{target.label}` pod, because the `#{native_target}` target in Xcode would have no sources to compile." + end + end + + # Raises if a vendored xcframework contains frameworks of mixed linkage or mixed packaging + # + def validate_xcframeworks + target.xcframeworks.each_value do |xcframeworks| + xcframeworks.each do |xcframework| + if xcframework.slices.empty? + raise Informative, "Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}` because it does not contain any binaries." + end + if xcframework.build_type.dynamic_library? + raise Informative, <<-MSG.strip_heredoc + Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}` because it contains dynamic libraries which are not supported. + Use dynamic frameworks for dynamic linking instead. + MSG + end + if xcframework.build_type.static_library? + binary_names = xcframework.slices.map { |slice| File.basename(slice.binary_path, File.extname(slice.binary_path)) }.uniq + if binary_names.size > 1 + raise Informative, <<-MSG.strip_heredoc + Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}` because it contains static libraries + with differing binary names: #{binary_names.to_sentence}. + MSG + end + end + dynamic_slices, static_slices = xcframework.slices.partition(&:dynamic?) + if !dynamic_slices.empty? && !static_slices.empty? + raise Informative, "Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}`, because it contains both static and dynamic frameworks." + end + library_slices, framework_slices = xcframework.slices.partition(&:library?) + if !library_slices.empty? && !framework_slices.empty? + raise Informative, "Unable to install vendored xcframework `#{xcframework.name}` for Pod `#{target.label}`, because it contains both libraries and frameworks." + end + end + end + end + + #-----------------------------------------------------------------------# + + class << self + # @param [PodTarget] target the target to be installed + # + # @return [Array] the dSYM paths for the given target + # + def dsym_paths(target) + dsym_paths = target.framework_paths.values.flatten.reject { |fmwk_path| fmwk_path.dsym_path.nil? }.map(&:dsym_path) + dsym_paths.concat(target.xcframeworks.values.flatten.flat_map { |xcframework| xcframework_dsyms(xcframework.path) }) + dsym_paths.map do |dsym_path| + dsym_pathname = Pathname(dsym_path) + dsym_path = "${PODS_ROOT}/#{dsym_pathname.relative_path_from(target.sandbox.root)}" unless dsym_pathname.relative? + dsym_path + end + end + + # @param [PodTarget] target the target to be installed + # + # @return [Array] the bcsymbolmap paths for the given target + # + def bcsymbolmap_paths(target) + target.framework_paths.values.flatten.reject do |fmwk_path| + fmwk_path.bcsymbolmap_paths.nil? + end.flat_map(&:bcsymbolmap_paths).uniq + end + + # @param [Pathname] xcframework_path + # the base path of the .xcframework bundle + # + # @return [Array] all found .dSYM paths + # + def xcframework_dsyms(xcframework_path) + basename = File.basename(xcframework_path, '.xcframework') + dsym_basename = basename + '.dSYMs' + path = xcframework_path.dirname + dsym_basename + if File.directory?(path) + Dir.glob(path + '*.dSYM') + else + [] + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_integrator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_integrator.rb new file mode 100644 index 0000000..a1df9bb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pod_target_integrator.rb @@ -0,0 +1,312 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # This class is responsible for integrating a pod target. This includes integrating + # the test targets included by each pod target. + # + class PodTargetIntegrator + # @return [TargetInstallationResult] the installation result of the target that should be integrated. + # + attr_reader :target_installation_result + + # @return [Boolean] whether to use input/output paths for build phase scripts + # + attr_reader :use_input_output_paths + alias use_input_output_paths? use_input_output_paths + + # Initialize a new instance + # + # @param [TargetInstallationResult] target_installation_result @see #target_installation_result + # @param [Boolean] use_input_output_paths @see #use_input_output_paths + # + def initialize(target_installation_result, use_input_output_paths: true) + @target_installation_result = target_installation_result + @use_input_output_paths = use_input_output_paths + end + + # Integrates the pod target. + # + # @return [void] + # + def integrate! + UI.section(integration_message) do + target_installation_result.non_library_specs_by_native_target.each do |native_target, spec| + add_embed_frameworks_script_phase(native_target, spec) + add_copy_resources_script_phase(native_target, spec) + add_on_demand_resources(native_target, spec) if spec.app_specification? + UserProjectIntegrator::TargetIntegrator.create_or_update_user_script_phases(script_phases_for_specs(spec), native_target) + end + add_copy_dsyms_script_phase(target_installation_result.native_target) + add_copy_xcframeworks_script_phase(target_installation_result.native_target) + UserProjectIntegrator::TargetIntegrator.create_or_update_user_script_phases(script_phases_for_specs(target.library_specs), target_installation_result.native_target) + end + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class} for target `#{target.label}'>" + end + + private + + # @!group Integration steps + #---------------------------------------------------------------------# + + # Find or create a 'Copy Pods Resources' build phase + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the copy resources script + # + # @param [Pod::Specification] spec + # the specification to integrate + # + # @return [void] + # + def add_copy_resources_script_phase(native_target, spec) + script_path = "${PODS_ROOT}/#{target.copy_resources_script_path_for_spec(spec).relative_path_from(target.sandbox.root)}" + + input_paths_by_config = {} + output_paths_by_config = {} + + dependent_targets = if spec.test_specification? + target.dependent_targets_for_test_spec(spec) + else + target.dependent_targets_for_app_spec(spec) + end + host_target_spec_names = target.app_host_dependent_targets_for_spec(spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + resource_paths = dependent_targets.flat_map do |dependent_target| + spec_paths_to_include = dependent_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << spec.name if dependent_target == target + dependent_target.resource_paths.values_at(*spec_paths_to_include).flatten.compact + end.uniq + + if use_input_output_paths? && !resource_paths.empty? + input_file_list_path = target.copy_resources_script_input_files_path_for_spec(spec) + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + resource_paths + + output_file_list_path = target.copy_resources_script_output_files_path_for_spec(spec) + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths_by_config[output_paths_key] = UserProjectIntegrator::TargetIntegrator.resource_output_paths(resource_paths) + end + + if resource_paths.empty? + UserProjectIntegrator::TargetIntegrator.remove_copy_resources_script_phase_from_target(native_target) + else + UserProjectIntegrator::TargetIntegrator.create_or_update_copy_resources_script_phase_to_target( + native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Find or create a 'Embed Pods Frameworks' Run Script Build Phase + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the embed frameworks script + # + # @param [Pod::Specification] spec + # the specification to integrate + # + # @return [void] + # + def add_embed_frameworks_script_phase(native_target, spec) + script_path = "${PODS_ROOT}/#{target.embed_frameworks_script_path_for_spec(spec).relative_path_from(target.sandbox.root)}" + + input_paths_by_config = {} + output_paths_by_config = {} + + dependent_targets = if spec.test_specification? + target.dependent_targets_for_test_spec(spec) + else + target.dependent_targets_for_app_spec(spec) + end + host_target_spec_names = target.app_host_dependent_targets_for_spec(spec).flat_map do |pt| + pt.specs.map(&:name) + end.uniq + framework_paths = dependent_targets.flat_map do |dependent_target| + spec_paths_to_include = dependent_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << spec.name if dependent_target == target + dependent_target.framework_paths.values_at(*spec_paths_to_include).flatten.compact + end.uniq + xcframework_paths = dependent_targets.flat_map do |dependent_target| + spec_paths_to_include = dependent_target.library_specs.map(&:name) + spec_paths_to_include -= host_target_spec_names + spec_paths_to_include << spec.name if dependent_target == target + dependent_target.xcframeworks.values_at(*spec_paths_to_include).flatten.compact + end.uniq + + if use_input_output_paths? && !framework_paths.empty? || !xcframework_paths.empty? + input_file_list_path = target.embed_frameworks_script_input_files_path_for_spec(spec) + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths_by_config[input_paths_key] = [script_path] + UserProjectIntegrator::TargetIntegrator.embed_frameworks_input_paths(framework_paths, xcframework_paths) + + output_file_list_path = target.embed_frameworks_script_output_files_path_for_spec(spec) + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths_by_config[output_paths_key] = UserProjectIntegrator::TargetIntegrator.embed_frameworks_output_paths(framework_paths, xcframework_paths) + end + + if framework_paths.empty? && xcframework_paths.empty? + UserProjectIntegrator::TargetIntegrator.remove_embed_frameworks_script_phase_from_target(native_target) + else + UserProjectIntegrator::TargetIntegrator.create_or_update_embed_frameworks_script_phase_to_target( + native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Find or create a 'Prepare Artifacts' Run Script Build Phase + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the prepare artifacts script + # + # @return [void] + # + def add_copy_xcframeworks_script_phase(native_target) + script_path = "${PODS_ROOT}/#{target.copy_xcframeworks_script_path.relative_path_from(target.sandbox.root)}" + + input_paths_by_config = {} + output_paths_by_config = {} + + xcframeworks = target.xcframeworks.values.flatten + + if use_input_output_paths? && !xcframeworks.empty? + input_file_list_path = target.copy_xcframeworks_script_input_files_path + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths = input_paths_by_config[input_paths_key] = [script_path] + + framework_paths = xcframeworks.map { |xcf| "${PODS_ROOT}/#{xcf.path.relative_path_from(target.sandbox.root)}" } + input_paths.concat framework_paths + + output_file_list_path = target.copy_xcframeworks_script_output_files_path + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths_by_config[output_paths_key] = xcframeworks.map do |xcf| + "#{Target::BuildSettings::XCFRAMEWORKS_BUILD_DIR_VARIABLE}/#{xcf.target_name}/#{xcf.name}.framework" + end + end + + if xcframeworks.empty? + UserProjectIntegrator::TargetIntegrator.remove_copy_xcframeworks_script_phase_from_target(native_target) + else + UserProjectIntegrator::TargetIntegrator.create_or_update_copy_xcframeworks_script_phase_to_target( + native_target, script_path, input_paths_by_config, output_paths_by_config) + end + end + + # Adds a script phase that copies and strips dSYMs that are part of this target. Note this only deals with + # vendored dSYMs. + # + # @param [PBXNativeTarget] native_target + # the native target for which to add the copy dSYM files build phase. + # + # @return [void] + # + def add_copy_dsyms_script_phase(native_target) + script_path = "${PODS_ROOT}/#{target.copy_dsyms_script_path.relative_path_from(target.sandbox.root)}" + dsym_paths = PodTargetInstaller.dsym_paths(target) + bcsymbolmap_paths = PodTargetInstaller.bcsymbolmap_paths(target) + + if dsym_paths.empty? && bcsymbolmap_paths.empty? + script_phase = native_target.shell_script_build_phases.find do |bp| + bp.name && bp.name.end_with?(UserProjectIntegrator::TargetIntegrator::COPY_DSYM_FILES_PHASE_NAME) + end + native_target.build_phases.delete(script_phase) if script_phase.present? + return + end + + phase_name = UserProjectIntegrator::TargetIntegrator::BUILD_PHASE_PREFIX + UserProjectIntegrator::TargetIntegrator::COPY_DSYM_FILES_PHASE_NAME + phase = UserProjectIntegrator::TargetIntegrator.create_or_update_shell_script_build_phase(native_target, phase_name) + phase.shell_script = %("#{script_path}"\n) + + input_paths_by_config = {} + output_paths_by_config = {} + if use_input_output_paths? + input_file_list_path = target.copy_dsyms_script_input_files_path + input_file_list_relative_path = "${PODS_ROOT}/#{input_file_list_path.relative_path_from(target.sandbox.root)}" + input_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(input_file_list_path, input_file_list_relative_path) + input_paths = input_paths_by_config[input_paths_key] = [] + input_paths.concat([dsym_paths, *bcsymbolmap_paths].flatten.compact) + + output_file_list_path = target.copy_dsyms_script_output_files_path + output_file_list_relative_path = "${PODS_ROOT}/#{output_file_list_path.relative_path_from(target.sandbox.root)}" + output_paths_key = UserProjectIntegrator::TargetIntegrator::XCFileListConfigKey.new(output_file_list_path, output_file_list_relative_path) + output_paths = output_paths_by_config[output_paths_key] = [] + + dsym_output_paths = dsym_paths.map { |dsym_path| "${DWARF_DSYM_FOLDER_PATH}/#{File.basename(dsym_path)}" } + bcsymbolmap_output_paths = bcsymbolmap_paths.map { |bcsymbolmap_path| "${DWARF_DSYM_FOLDER_PATH}/#{File.basename(bcsymbolmap_path)}" } + output_paths.concat([dsym_output_paths, *bcsymbolmap_output_paths].flatten.compact) + end + + UserProjectIntegrator::TargetIntegrator.set_input_output_paths(phase, input_paths_by_config, output_paths_by_config) + end + + # Adds the ODRs that are related to this app spec. This includes the app spec dependencies as well as the ODRs + # coming from the app spec itself. + # + # @param [Xcodeproj::PBXNativeTarget] native_target + # the native target for which to add the ODR file references into. + # + # @param [Specification] app_spec + # the app spec to integrate ODRs for. + # + # @return [void] + # + def add_on_demand_resources(native_target, app_spec) + dependent_targets = target.dependent_targets_for_app_spec(app_spec) + parent_odr_group = native_target.project.group_for_spec(app_spec.name) + + # Add ODRs of the app spec dependencies first. + dependent_targets.each do |pod_target| + file_accessors = pod_target.file_accessors.select do |fa| + fa.spec.library_specification? || + fa.spec.test_specification? && pod_target.test_app_hosts_by_spec[fa.spec]&.first == app_spec + end + target_odr_group_name = "#{pod_target.label}-OnDemandResources" + UserProjectIntegrator::TargetIntegrator.update_on_demand_resources(target.sandbox, native_target.project, + native_target, file_accessors, + parent_odr_group, target_odr_group_name) + end + + # Now add the ODRs of our own app spec declaration. + file_accessor = target.file_accessors.find { |fa| fa.spec == app_spec } + target_odr_group_name = "#{target.subspec_label(app_spec)}-OnDemandResources" + UserProjectIntegrator::TargetIntegrator.update_on_demand_resources(target.sandbox, native_target.project, + native_target, file_accessor, + parent_odr_group, target_odr_group_name) + end + + # @return [String] the message that should be displayed for the target + # integration. + # + def integration_message + "Integrating target `#{target.name}`" + end + + # @return [PodTarget] the target part of the installation result. + # + def target + target_installation_result.target + end + + # @param [Specification, Array] specs + # the specs to return script phrases from. + # + # @return [ArrayString>] an array of all combined script phases from the specs. + # + def script_phases_for_specs(specs) + Array(specs).flat_map { |spec| spec.consumer(target.platform).script_phases } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pods_project_writer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pods_project_writer.rb new file mode 100644 index 0000000..e190631 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/pods_project_writer.rb @@ -0,0 +1,90 @@ +module Pod + class Installer + class Xcode + class PodsProjectWriter + # @return [Sandbox] sandbox + # The Pods sandbox instance. + # + attr_reader :sandbox + + # @return [Array] projects + # The list project to write. + # + attr_reader :projects + + # @return [Hash] pod_target_installation_results + # Hash of pod target name to installation results. + # + attr_reader :pod_target_installation_results + + # @return [InstallationOptions] installation_options + # + attr_reader :installation_options + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Project] projects @see #project + # @param [Hash] pod_target_installation_results @see #pod_target_installation_results + # @param [InstallationOptions] installation_options @see #installation_options + # + def initialize(sandbox, projects, pod_target_installation_results, installation_options) + @sandbox = sandbox + @projects = projects + @pod_target_installation_results = pod_target_installation_results + @installation_options = installation_options + end + + # Writes projects to disk. + # + # @yield If provided, this block will execute right before writing the projects to disk. + # + def write! + cleanup_projects(projects) + + projects.each do |project| + library_product_types = [:framework, :dynamic_library, :static_library] + results_by_native_target = Hash[pod_target_installation_results.map do |_, result| + [result.native_target, result] + end] + project.recreate_user_schemes(false) do |scheme, target| + next unless target.respond_to?(:symbol_type) + next unless library_product_types.include? target.symbol_type + installation_result = results_by_native_target[target] + next unless installation_result + installation_result.test_native_targets.each do |test_native_target| + scheme.add_test_target(test_native_target) + end + end + end + + yield if block_given? + + save_projects(projects) + end + + private + + # Cleans up projects before writing. + # + def cleanup_projects(projects) + projects.each do |project| + [project.pods, project.support_files_group, + project.development_pods, project.dependencies_group].each { |group| group.remove_from_project if group.empty? } + end + end + + # Sorts and then saves projects which writes them to disk. + # + def save_projects(projects) + projects.each do |project| + project.sort(:groups_position => :below) + UI.message "- Writing Xcode project file to #{UI.path project.path}" do + project.save + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/project_generator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/project_generator.rb new file mode 100644 index 0000000..26ad7be --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/project_generator.rb @@ -0,0 +1,120 @@ +module Pod + class Installer + class Xcode + # Responsible for creating and preparing a Pod::Project instance + # + class ProjectGenerator + # @return [Sandbox] sandbox + # The Pods sandbox instance. + # + attr_reader :sandbox + + # @return [String] path + # Path of the project. + # + attr_reader :path + + # @return [Array] pod_targets + # Array of pod targets this project includes. + # + attr_reader :pod_targets + + # @return [Hash{String=>Symbol}] A hash representing all the user build + # configurations across all integration targets. Each key + # corresponds to the name of a configuration and its value to + # its type (`:debug` or `:release`). + # + attr_reader :build_configurations + + # @return [Array] The list of all platforms this project supports. + # + attr_reader :platforms + + # @return [Integer] Object version for the Xcode project. + # + attr_reader :object_version + + # @return [String] Path to the Podfile included in the project. + # + attr_reader :podfile_path + + # @return [Boolean] Bool indicating if this project is a pod target subproject. + # Used by `generate_multiple_pod_projects` installation option. + # + attr_reader :pod_target_subproject + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [String] path @see #path + # @param [Array] pod_targets @see #pod_targets + # @param [Hash{String=>Symbol}] build_configurations @see #build_configurations + # @param [Array] platforms @see #platforms + # @param [Integer] object_version @see #object_version + # @param [String] podfile_path @see #podfile_path + # + def initialize(sandbox, path, pod_targets, build_configurations, platforms, + object_version, podfile_path = nil, pod_target_subproject: false) + @sandbox = sandbox + @path = path + @pod_targets = pod_targets + @build_configurations = build_configurations + @platforms = platforms + @object_version = object_version + @podfile_path = podfile_path + @pod_target_subproject = pod_target_subproject + end + + public + + # @return [Project] Generated and prepared project. + # + def generate! + project = create_project(path, object_version, pod_target_subproject) + prepare(sandbox, project, pod_targets, build_configurations, platforms, podfile_path) + project + end + + private + + def create_project(path, object_version, pod_target_subproject) + object_version ||= Xcodeproj::Constants::DEFAULT_OBJECT_VERSION + Pod::Project.new(path, false, object_version, :pod_target_subproject => pod_target_subproject) + end + + def prepare(sandbox, project, pod_targets, build_configurations, platforms, podfile_path) + UI.message "- Creating #{project.project_name} project" do + build_configurations.each do |name, type| + project.add_build_configuration(name, type) + end + # Reset symroot just in case the user has added a new build configuration other than 'Debug' or 'Release'. + project.symroot = Pod::Project::LEGACY_BUILD_ROOT + pod_names = pod_targets.map(&:pod_name).uniq + pod_names.each do |pod_name| + local = sandbox.local?(pod_name) + path = sandbox.pod_dir(pod_name) + was_absolute = sandbox.local_path_was_absolute?(pod_name) + project.add_pod_group(pod_name, path, local, was_absolute) + end + if podfile_path + project.add_podfile(podfile_path) + end + osx_deployment_target = platforms.select { |p| p.name == :osx }.map(&:deployment_target).min + ios_deployment_target = platforms.select { |p| p.name == :ios }.map(&:deployment_target).min + watchos_deployment_target = platforms.select { |p| p.name == :watchos }.map(&:deployment_target).min + tvos_deployment_target = platforms.select { |p| p.name == :tvos }.map(&:deployment_target).min + project.build_configurations.each do |build_configuration| + build_configuration.build_settings['MACOSX_DEPLOYMENT_TARGET'] = osx_deployment_target.to_s if osx_deployment_target + build_configuration.build_settings['IPHONEOS_DEPLOYMENT_TARGET'] = ios_deployment_target.to_s if ios_deployment_target + build_configuration.build_settings['WATCHOS_DEPLOYMENT_TARGET'] = watchos_deployment_target.to_s if watchos_deployment_target + build_configuration.build_settings['TVOS_DEPLOYMENT_TARGET'] = tvos_deployment_target.to_s if tvos_deployment_target + build_configuration.build_settings['STRIP_INSTALLED_PRODUCT'] = 'NO' + build_configuration.build_settings['CLANG_ENABLE_OBJC_ARC'] = 'YES' + build_configuration.build_settings['CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED'] = 'YES' + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installation_result.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installation_result.rb new file mode 100644 index 0000000..3e1d55e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installation_result.rb @@ -0,0 +1,140 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # A simple container produced after a target installation is completed. + # + class TargetInstallationResult + # @return [Target] target + # The target this installation result is for. + # + attr_reader :target + + # @return [PBXNativeTarget] native_target + # The native target that was produced for this target. + # + attr_reader :native_target + + # @return [Array] resource_bundle_targets + # The resource bundle targets that were produced for this target. Can be empty if the target had + # no resource bundles. + # + attr_reader :resource_bundle_targets + + # @return [Array] test_native_targets + # The test native targets that were produced for this target. Can be empty if there were no test + # native targets created (e.g. no test specs present). + # + attr_reader :test_native_targets + + # @return [Hash{String=>Array}] test_resource_bundle_targets + # The test resource bundle targets that were produced for this target keyed by test spec name. + # Can be empty if the target had no resource bundles for any tests. + # + attr_reader :test_resource_bundle_targets + + # @return [Array] test_app_host_targets + # The test app host native targets that were produced for this target. Can be empty. + # + attr_reader :test_app_host_targets + + # @return [Hash{Specification => PBXNativeTarget}] app_native_targets + # The app native targets that were produced for this target. Can be empty if there were no app + # native targets created (e.g. no app specs present). + # + attr_reader :app_native_targets + + # @return [Hash{String=>Array}] app_resource_bundle_targets + # The app resource bundle targets that were produced for this target keyed by app spec name. + # Can be empty if the target had no resource bundles for any apps. + # + attr_reader :app_resource_bundle_targets + + # Initialize a new instance + # + # @param [Target] target @see #target + # @param [PBXNativeTarget] native_target @see #native_target + # @param [Array] resource_bundle_targets @see #resource_bundle_targets + # @param [Array] test_native_targets @see #test_native_targets + # @param [Hash{String=>Array}] test_resource_bundle_targets @see #test_resource_bundle_targets + # @param [Array] test_app_host_targets @see #test_app_host_targets + # @param [Hash{Specification => PBXNativeTarget}] app_native_targets @see #app_native_targets + # @param [Hash{String=>Array}] app_resource_bundle_targets @see #app_resource_bundle_targets + # + def initialize(target, native_target, resource_bundle_targets = [], test_native_targets = [], + test_resource_bundle_targets = {}, test_app_host_targets = [], + app_native_targets = {}, app_resource_bundle_targets = {}) + @target = target + @native_target = native_target + @resource_bundle_targets = resource_bundle_targets + @test_native_targets = test_native_targets + @test_resource_bundle_targets = test_resource_bundle_targets + @test_app_host_targets = test_app_host_targets + @app_native_targets = app_native_targets + @app_resource_bundle_targets = app_resource_bundle_targets + end + + # Returns the corresponding native target to use based on the provided specification. + # + # @param [Specification] spec + # The specification to base from in order to find the native target. + # + # @return [PBXNativeTarget, Nil] the native target to use or `nil` if none is found. + # + def native_target_for_spec(spec) + return native_target if spec.library_specification? + return test_native_target_from_spec(spec) if spec.test_specification? + app_native_target_from_spec(spec) if spec.app_specification? + end + + # @return [Hash{PBXNativeTarget => Specification}] a hash where the keys are the test native targets and the value + # is the test spec associated with this native target. + # + def test_specs_by_native_target + test_specs_by_native_target = Hash[target.test_specs.map { |spec| [test_native_target_from_spec(spec), spec] }] + test_specs_by_native_target.delete_if { |k, _| k.nil? } + end + + # @return [Hash{PBXNativeTarget => Specification}] a hash where the keys are the app native targets and the value + # is the app spec associated with this native target. + # + def app_specs_by_native_target + app_specs_by_native_target = Hash[target.app_specs.map { |spec| [app_native_target_from_spec(spec), spec] }] + app_specs_by_native_target.delete_if { |k, _| k.nil? } + end + + # @return [Hash{PBXNativeTarget => Specification}] a hash where the keys are the native targets and the value + # is the non-library spec associated with this native target. + # + def non_library_specs_by_native_target + test_specs_by_native_target.merge(app_specs_by_native_target) + end + + # @param label [String] the label of the app host target. + # + # @return [PBXNativeTarget] the app host target with the given target label. + # + def app_host_target_labelled(label) + app_native_targets.values.find do |app_native_target| + app_native_target.name == label + end || test_app_host_targets.find do |app_native_target| + app_native_target.name == label + end + end + + private + + def test_native_target_from_spec(spec) + test_native_targets.find do |test_native_target| + test_native_target.name == target.test_target_label(spec) + end + end + + def app_native_target_from_spec(spec) + app_native_targets[spec] + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installer.rb new file mode 100644 index 0000000..353e3e3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installer.rb @@ -0,0 +1,257 @@ +require 'stringio' + +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # Controller class responsible of creating and configuring the static + # library target in Pods project. It also creates the support file needed + # by the target. + # + class TargetInstaller + include TargetInstallerHelper + + # @return [Sandbox] sandbox + # The sandbox where the support files should be generated. + # + attr_reader :sandbox + + # @return [Pod::Project] + # The project to install the target into. + # + attr_reader :project + + # @return [Target] target + # The library whose target needs to be generated. + # + attr_reader :target + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see #sandbox + # @param [Pod::Project] project @see #project + # @param [Target] target @see #target + # + def initialize(sandbox, project, target) + @sandbox = sandbox + @project = project + @target = target + end + + private + + #-----------------------------------------------------------------------# + + # @!group Installation steps + + # Adds the target for the library to the Pods project with the + # appropriate build configurations. + # + # @note The `PODS_HEADERS_SEARCH_PATHS` overrides the xcconfig. + # + # @return [PBXNativeTarget] the native target that was added. + # + def add_target + product_type = target.product_type + name = target.label + platform = target.platform.name + language = target.uses_swift? ? :swift : :objc + native_target = project.new_target(product_type, name, platform, deployment_target, nil, language, target.product_basename) + native_target.product_reference.name = name + + target.user_build_configurations.each do |bc_name, type| + native_target.add_build_configuration(bc_name, type) + end + + native_target.build_configurations.each do |configuration| + configuration.build_settings.merge!(custom_build_settings) + end + + native_target + end + + # @return [String] The deployment target. + # + def deployment_target + target.platform.deployment_target.to_s + end + + # Returns the customized build settings which are overridden in the build + # settings of the user target. + # + # @return [Hash{String => String}] + # + def custom_build_settings + settings = {} + + unless target.archs.empty? + settings['ARCHS'] = target.archs + end + + if target.build_as_static_framework? + settings['MACH_O_TYPE'] = 'staticlib' + elsif target.build_as_static_library? + settings.merge!('OTHER_LDFLAGS' => '', 'OTHER_LIBTOOLFLAGS' => '') + end + + settings + end + + # Creates the directory where to store the support files of the target. + # + def create_support_files_dir + target.support_files_dir.mkpath + end + + # Remove temp file whose store .prefix/config/dummy file. + # + def clean_support_files_temp_dir + support_files_temp_dir.rmtree if support_files_temp_dir.exist? + end + + # @return [String] The temp file path to store temporary files. + # + def support_files_temp_dir + sandbox.target_support_files_dir('generated_files_tmp') + end + + # Creates the Info.plist file which sets public framework attributes + # + # @param [Pathname] path + # the path to save the generated Info.plist file. + # + # @param [PBXNativeTarget] native_target + # the native target to link the generated Info.plist file into. + # + # @param [Version] version + # the version to use for when generating this Info.plist file. + # + # @param [Platform] platform + # the platform to use for when generating this Info.plist file. + # + # @param [Symbol] bundle_package_type + # the CFBundlePackageType of the target this Info.plist file is for. + # + # @param [Hash] additional_entries + # additional entries for the generated Info.plist + # + # @return [void] + # + def create_info_plist_file(path, native_target, version, platform, bundle_package_type = :fmwk, additional_entries: {}) + create_info_plist_file_with_sandbox(@sandbox, path, native_target, version, platform, bundle_package_type, + :additional_entries => additional_entries) + add_file_to_support_group(path) + end + + # Creates the module map file which ensures that the umbrella header is + # recognized with a customized path + # + # @param [PBXNativeTarget] native_target + # the native target to link the module map file into. + # + # @return [void] + # + def create_module_map(native_target) + path = target.module_map_path_to_write + UI.message "- Generating module map file at #{UI.path(path)}" do + generator = Generator::ModuleMap.new(target) + yield generator if block_given? + update_changed_file(generator, path) + add_file_to_support_group(path) + + linked_path = target.module_map_path + if path != linked_path + linked_path.dirname.mkpath + source = path.relative_path_from(linked_path.dirname) + FileUtils.ln_sf(source, linked_path) + end + + relative_path_string = target.module_map_path.relative_path_from(sandbox.root).to_s + native_target.build_configurations.each do |c| + c.build_settings['MODULEMAP_FILE'] = relative_path_string + end + end + end + + # Generates a header which ensures that all header files are exported + # in the module map + # + # @param [PBXNativeTarget] native_target + # the native target to link the umbrella header file into. + # + # @yield_param [Generator::UmbrellaHeader] + # yielded once to configure the imports + # + # @return [void] + # + def create_umbrella_header(native_target) + path = target.umbrella_header_path_to_write + UI.message "- Generating umbrella header at #{UI.path(path)}" do + generator = Generator::UmbrellaHeader.new(target) + yield generator if block_given? + update_changed_file(generator, path) + + # Add the file to the support group and the native target, + # so it will been added to the header build phase + file_ref = add_file_to_support_group(path) + build_file = native_target.headers_build_phase.add_file_reference(file_ref) + + linked_path = target.umbrella_header_path + if path != linked_path + linked_path.dirname.mkpath + source = path.relative_path_from(linked_path.dirname) + FileUtils.ln_sf(source, linked_path) + end + + acl = target.build_as_framework? ? 'Public' : 'Project' + build_file.settings ||= {} + build_file.settings['ATTRIBUTES'] = [acl] + end + end + + # Generates a dummy source file for each target so libraries that contain + # only categories build. + # + # @param [PBXNativeTarget] native_target + # the native target to link the dummy source file into. + # + # @return [void] + # + def create_dummy_source(native_target) + path = target.dummy_source_path + UI.message "- Generating dummy source at #{UI.path(path)}" do + generator = Generator::DummySource.new(target.label) + update_changed_file(generator, path) + file_reference = add_file_to_support_group(path) + native_target.source_build_phase.add_file_reference(file_reference) + end + end + + private + + #-----------------------------------------------------------------------# + + # @!group Private helpers. + + # @return [PBXGroup] the group where the file references to the support + # files should be stored. + # + attr_reader :support_files_group + + # Adds a reference to the given file in the support group of this target. + # + # @param [Pathname] path + # The path of the file to which the reference should be added. + # + # @return [PBXFileReference] the file reference of the added file. + # + def add_file_to_support_group(path) + support_files_group.new_file(path) + end + + #-----------------------------------------------------------------------# + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installer_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installer_helper.rb new file mode 100644 index 0000000..72b16b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator/target_installer_helper.rb @@ -0,0 +1,110 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + module TargetInstallerHelper + # @param [Generator] generator + # the generator to use for generating the content. + # + # @param [Pathname] path + # the pathname to save the content into. + # + # Saves the content the provided path unless the path exists and the contents are exactly the same. + # + def update_changed_file(generator, path) + if path.exist? + contents = generator.generate.to_s + content_stream = StringIO.new(contents) + identical = File.open(path, 'rb') { |f| FileUtils.compare_stream(f, content_stream) } + return if identical + + File.open(path, 'w') { |f| f.write(contents) } + else + path.dirname.mkpath + generator.save_as(path) + end + end + + # Creates the Info.plist file which sets public framework attributes + # + # @param [Sandbox] sandbox @see #sandbox + # The sandbox where the generated Info.plist file should be saved. + # + # @param [Pathname] path + # the path to save the generated Info.plist file. + # + # @param [PBXNativeTarget] native_target + # the native target to link the generated Info.plist file into. + # + # @param [String] version + # the version to use for when generating this Info.plist file. + # + # @param [Platform] platform + # the platform to use for when generating this Info.plist file. + # + # @param [Symbol] bundle_package_type + # the CFBundlePackageType of the target this Info.plist file is for. + # + # @param [Hash] additional_entries + # any additional entries to include in this Info.plist file. + # + # @param [String] build_setting_value + # an optional value to set for the `INFOPLIST_FILE` build setting on the + # native target. If none is specified then the value is calculated from the + # Info.plist path relative to the sandbox root. + # + # @return [void] + # + def create_info_plist_file_with_sandbox(sandbox, path, native_target, version, platform, + bundle_package_type = :fmwk, additional_entries: {}, + build_setting_value: nil) + UI.message "- Generating Info.plist file at #{UI.path(path)}" do + generator = Generator::InfoPlistFile.new(version, platform, bundle_package_type, additional_entries) + update_changed_file(generator, path) + + build_setting_value ||= path.relative_path_from(sandbox.root).to_s + native_target.build_configurations.each do |c| + c.build_settings['INFOPLIST_FILE'] = build_setting_value + end + end + end + + # Creates a prefix header file which imports `UIKit` or `Cocoa` according + # to the platform of the target. This file also include any prefix header + # content reported by the specification of the pods. + # + # @param [Pathname] path + # the path to generate the prefix header for. + # + # @param [Array] file_accessors + # the file accessors to use for this prefix header that point to a path of a prefix header. + # + # @param [Platform] platform + # the platform to use for this prefix header. + # + # @param [PBXNativeTarget] native_target + # the native target on which the prefix header should be configured for. + # + # @param [Pathname] project_directory + # the directory containing the project of the target + # + # @return [void] + # + def create_prefix_header(path, file_accessors, platform, native_target, project_directory) + generator = Generator::PrefixHeader.new(file_accessors, platform) + update_changed_file(generator, path) + + relative_path = path.relative_path_from(project_directory).to_s + native_target.build_configurations.each do |c| + c.build_settings['GCC_PREFIX_HEADER'] = relative_path + end + end + + module_function :update_changed_file + module_function :create_info_plist_file_with_sandbox + module_function :create_prefix_header + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator_result.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator_result.rb new file mode 100644 index 0000000..7280d02 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/pods_project_generator_result.rb @@ -0,0 +1,54 @@ +module Pod + class Installer + class Xcode + class PodsProjectGenerator + # A simple container produced after a pod project generation is completed. + # + class PodsProjectGeneratorResult + # @return [Project] project + # + attr_reader :project + + # @return [Hash{Project => Array}] Project by pod targets map + # + attr_reader :projects_by_pod_targets + + # @return [InstallationResults] target installation results + # + attr_reader :target_installation_results + + # Initialize a new instance + # + # @param [Project] project @see #project + # @param [Hash{Project => Array}] projects_by_pod_targets @see #projects_by_pod_targets + # @param [InstallationResults] target_installation_results @see #target_installation_results + # + def initialize(project, projects_by_pod_targets, target_installation_results) + @project = project + @projects_by_pod_targets = projects_by_pod_targets + @target_installation_results = target_installation_results + end + + # @param [Pod::Specification] spec + # A spec which was included in the generated project + # + # @return [Xcodeproj::PBXNativeTarget] the native target for the spec + # + def native_target_for_spec(spec) + installation_results_by_spec[spec.root].native_target_for_spec(spec) + end + + private + + def installation_results_by_spec + @target_installation_results_by_spec ||= begin + target_installation_results.pod_target_installation_results.values.each_with_object({}) do |installation_results, hash| + hash[installation_results.target.root_spec] = installation_results + end + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/single_pods_project_generator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/single_pods_project_generator.rb new file mode 100644 index 0000000..1287727 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/single_pods_project_generator.rb @@ -0,0 +1,38 @@ +module Pod + class Installer + class Xcode + # The {SinglePodsProjectGenerator} handles generation of the 'Pods/Pods.xcodeproj' + # + class SinglePodsProjectGenerator < PodsProjectGenerator + # Generates single `Pods/Pods.xcodeproj`. + # + # @return [PodsProjectGeneratorResult] + # + def generate! + project_path = sandbox.project_path + platforms = aggregate_targets.map(&:platform) + project_generator = ProjectGenerator.new(sandbox, project_path, pod_targets, build_configurations, + platforms, project_object_version, config.podfile_path) + project = project_generator.generate! + install_file_references(project, pod_targets) + + pod_target_installation_results = install_all_pod_targets(project, pod_targets) + aggregate_target_installation_results = install_aggregate_targets(project, aggregate_targets) + target_installation_results = InstallationResults.new(pod_target_installation_results, aggregate_target_installation_results) + + integrate_targets(target_installation_results.pod_target_installation_results) + wire_target_dependencies(target_installation_results) + PodsProjectGeneratorResult.new(project, {}, target_installation_results) + end + + private + + def install_all_pod_targets(project, pod_targets) + UI.message '- Installing Pod Targets' do + install_pod_targets(project, pod_targets) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/target_validator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/target_validator.rb new file mode 100644 index 0000000..738e3b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/installer/xcode/target_validator.rb @@ -0,0 +1,170 @@ +module Pod + class Installer + class Xcode + # The {Xcode::TargetValidator} ensures that the pod and aggregate target + # configuration is valid for installation. + # + class TargetValidator + # @return [Array] The aggregate targets that should be validated. + # + attr_reader :aggregate_targets + + # @return [Array] The pod targets that should be validated. + # + attr_reader :pod_targets + + # @return [InstallationOptions] The installation options used during this installation. + # + attr_reader :installation_options + + # Create a new TargetValidator with aggregate and pod targets to + # validate. + # + # @param [Array] aggregate_targets #see #aggregate_targets + # @param [Array] pod_targets see #pod_targets + # @param [InstallationOptions] installation_options see #installation_options + # + def initialize(aggregate_targets, pod_targets, installation_options) + @aggregate_targets = aggregate_targets + @pod_targets = pod_targets + @installation_options = installation_options + end + + # Perform the validation steps for the provided aggregate and pod + # targets. + # + def validate! + verify_no_duplicate_framework_and_library_names + verify_no_static_framework_transitive_dependencies + verify_swift_pods_swift_version + verify_swift_pods_have_module_dependencies + verify_no_multiple_project_names if installation_options.generate_multiple_pod_projects? + end + + private + + def verify_no_duplicate_framework_and_library_names + aggregate_targets.each do |aggregate_target| + aggregate_target.user_build_configurations.each_key do |config| + pod_targets = aggregate_target.pod_targets_for_build_configuration(config) + file_accessors = pod_targets.flat_map(&:file_accessors).select { |fa| fa.spec.library_specification? } + + frameworks = file_accessors.flat_map(&:vendored_frameworks).uniq.map(&:basename) + frameworks += pod_targets.select { |pt| pt.should_build? && pt.build_as_framework? }.map(&:product_module_name).uniq + verify_no_duplicate_names(frameworks, aggregate_target.label, 'frameworks') + + libraries = file_accessors.flat_map(&:vendored_libraries).uniq.map(&:basename) + libraries += pod_targets.select { |pt| pt.should_build? && pt.build_as_library? }.map(&:product_name) + verify_no_duplicate_names(libraries, aggregate_target.label, 'libraries') + end + end + end + + def verify_no_duplicate_names(names, label, type) + duplicates = names.group_by { |n| n.to_s.downcase }.select { |_, v| v.size > 1 }.keys + + unless duplicates.empty? + raise Informative, "The '#{label}' target has " \ + "#{type} with conflicting names: #{duplicates.to_sentence}." + end + end + + def verify_no_static_framework_transitive_dependencies + aggregate_targets.each do |aggregate_target| + aggregate_target.user_build_configurations.each_key do |config| + pod_targets = aggregate_target.pod_targets_for_build_configuration(config) + built_targets, unbuilt_targets = pod_targets.partition(&:should_build?) + dynamic_pod_targets = built_targets.select(&:build_as_dynamic?) + + dependencies = dynamic_pod_targets.flat_map(&:dependent_targets).uniq + depended_upon_targets = unbuilt_targets & dependencies + + static_libs = depended_upon_targets.flat_map(&:file_accessors).flat_map(&:vendored_static_artifacts) + unless static_libs.empty? + raise Informative, "The '#{aggregate_target.label}' target has " \ + "transitive dependencies that include statically linked binaries: (#{static_libs.to_sentence})" + end + + static_deps = dynamic_pod_targets.flat_map(&:recursive_dependent_targets).uniq.select(&:build_as_static?) + unless static_deps.empty? + raise Informative, "The '#{aggregate_target.label}' target has " \ + "transitive dependencies that include statically linked binaries: (#{static_deps.flat_map(&:name).to_sentence})" + end + end + end + end + + def verify_swift_pods_swift_version + error_message_for_target_definition = lambda do |target_definition| + "`#{target_definition.name}` (Swift #{target_definition.swift_version})" + end + swift_pod_targets = pod_targets.select(&:uses_swift?) + error_messages = swift_pod_targets.map do |swift_pod_target| + # Legacy targets that do not specify Swift versions derive their Swift version from the target definitions + # they are integrated with. An error is displayed if the target definition Swift versions collide or none + # of target definitions specify the `SWIFT_VERSION` attribute. + if swift_pod_target.spec_swift_versions.empty? + swift_target_definitions = swift_pod_target.target_definitions.reject { |target| target.swift_version.blank? } + next if swift_target_definitions.uniq(&:swift_version).count == 1 + if swift_target_definitions.empty? + "- `#{swift_pod_target.name}` does not specify a Swift version and none of the targets " \ + "(#{swift_pod_target.target_definitions.map { |td| "`#{td.name}`" }.to_sentence}) integrating it have the " \ + '`SWIFT_VERSION` attribute set. Please contact the author or set the `SWIFT_VERSION` attribute in at ' \ + 'least one of the targets that integrate this pod.' + else + target_errors = swift_target_definitions.map(&error_message_for_target_definition).to_sentence + "- `#{swift_pod_target.name}` is integrated by multiple targets that use a different Swift version: #{target_errors}." + end + elsif !swift_pod_target.swift_version.nil? && swift_pod_target.swift_version.empty? + "- `#{swift_pod_target.name}` does not specify a Swift version (#{swift_pod_target.spec_swift_versions.map { |v| "`#{v}`" }.to_sentence}) " \ + "that is satisfied by any of targets (#{swift_pod_target.target_definitions.map { |td| "`#{td.name}`" }.to_sentence}) integrating it." + end + end.compact + + unless error_messages.empty? + raise Informative, "Unable to determine Swift version for the following pods:\n\n#{error_messages.join("\n")}" + end + end + + def verify_swift_pods_have_module_dependencies + error_messages = [] + pod_targets.each do |pod_target| + next unless pod_target.uses_swift? && pod_target.should_build? + + non_module_dependencies = [] + pod_target.dependent_targets.each do |dependent_target| + next if !dependent_target.should_build? || dependent_target.defines_module? + non_module_dependencies << dependent_target.name + end + + next if non_module_dependencies.empty? + + error_messages << "The Swift pod `#{pod_target.name}` depends upon #{non_module_dependencies.map { |d| "`#{d}`" }.to_sentence}, " \ + "which #{non_module_dependencies.count == 1 ? 'does' : 'do'} not define modules. " \ + 'To opt into those targets generating module maps '\ + '(which is necessary to import them from Swift when building as static libraries), ' \ + 'you may set `use_modular_headers!` globally in your Podfile, '\ + 'or specify `:modular_headers => true` for particular dependencies.' + end + return if error_messages.empty? + + raise Informative, 'The following Swift pods cannot yet be integrated '\ + "as static libraries:\n\n#{error_messages.join("\n\n")}" + end + + def verify_no_multiple_project_names + error_messages = pod_targets.map do |pod_target| + project_names = pod_target.target_definitions.map { |td| td.project_name_for_pod(pod_target.pod_name) }.compact.uniq + next unless project_names.count > 1 + "- `#{pod_target.name}` specifies multiple project names (#{project_names.map { |pn| "`#{pn}`" }.to_sentence}) " \ + "in different targets (#{pod_target.target_definitions.map { |td| "`#{td.name}`" }.to_sentence})." + end.compact + return if error_messages.empty? + + raise Informative, 'The following pods cannot be integrated:' \ + "\n\n#{error_messages.join("\n\n")}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/native_target_extension.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/native_target_extension.rb new file mode 100644 index 0000000..912918d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/native_target_extension.rb @@ -0,0 +1,60 @@ +module Pod + class Project + # Adds a dependency on the given metadata cache. + # + # @param [Sandbox] sandbox + # The sandbox used for this installation. + # + # @param [AbstractTarget] target + # The parent target used to add a cached dependency. + # + # @param [MetadataCache] metadata + # The metadata holding all the required metadata to construct a target as a dependency. + # + # @return [void] + # + def self.add_cached_dependency(sandbox, target, metadata) + return if dependency_for_cached_target?(sandbox, target, metadata) + container_proxy = target.project.new(Xcodeproj::Project::PBXContainerItemProxy) + + subproject_reference = target.project.reference_for_path(sandbox.root + metadata.container_project_path) + raise ArgumentError, "add_dependency received target (#{target}) that belongs to a project that is not this project (#{self}) and is not a subproject of this project" unless subproject_reference + container_proxy.container_portal = subproject_reference.uuid + + container_proxy.proxy_type = Xcodeproj::Constants::PROXY_TYPES[:native_target] + container_proxy.remote_global_id_string = metadata.native_target_uuid + container_proxy.remote_info = metadata.target_label + + dependency = target.project.new(Xcodeproj::Project::PBXTargetDependency) + dependency.name = metadata.target_label + dependency.target_proxy = container_proxy + + target.dependencies << dependency + end + + # Checks whether this target has a dependency on the given target. + # + # @param [Sandbox] sandbox + # The sandbox used for this installation. + # + # @param [AbstractTarget] target + # The parent target used to add a cached dependency. + # + # @param [TargetMetadata] cached_target + # the target to search for. + # + # @return [Boolean] + # + def self.dependency_for_cached_target?(sandbox, target, cached_target) + target.dependencies.find do |dep| + if dep.target_proxy.remote? + subproject_reference = target.project.reference_for_path(sandbox.root + cached_target.container_project_path) + uuid = subproject_reference.uuid if subproject_reference + dep.target_proxy.remote_global_id_string == cached_target.native_target_uuid && dep.target_proxy.container_portal == uuid + else + dep.target.uuid == cached_target.native_target_uuid + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/open-uri.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/open-uri.rb new file mode 100644 index 0000000..c9a711d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/open-uri.rb @@ -0,0 +1,33 @@ +# rubocop:disable Naming/FileName + +require 'open-uri' + +# Allow OpenURI to follow http to https redirects. +# +module OpenURI + # Whether {#open} should follow a redirect. + # + # Inspiration from: https://gist.github.com/1271420 + # Relevant issue: https://bugs.ruby-lang.org/issues/3719 + # Source here: https://github.com/ruby/ruby/blob/trunk/lib/open-uri.rb + # + # This test is intended to forbid a redirection from http://... to + # file:///etc/passwd, file:///dev/zero, etc. CVE-2011-1521 + # https to http redirect is also forbidden intentionally. + # It avoids sending secure cookie or referrer by non-secure HTTP protocol. + # (RFC 2109 4.3.1, RFC 2965 3.3, RFC 2616 15.1.3) + # However this is ad hoc. It should be extensible/configurable. + # + # @param [URI::Generic] uri1 + # the origin uri from where the redirect origins + # + # @param [URI::Generic] uri2 + # the target uri where to where the redirect points to + # + # @return [Boolean] + # + def self.redirectable?(uri1, uri2) + uri1.scheme.downcase == uri2.scheme.downcase || + (/\A(?:http|ftp)\z/i =~ uri1.scheme && /\A(?:https?|ftp)\z/i =~ uri2.scheme) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/podfile.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/podfile.rb new file mode 100644 index 0000000..2282a4c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/podfile.rb @@ -0,0 +1,13 @@ +require 'cocoapods-core/podfile' + +module Pod + class Podfile + autoload :InstallationOptions, 'cocoapods/installer/installation_options' + + # @return [Pod::Installer::InstallationOptions] the installation options specified in the Podfile + # + def installation_options + @installation_options ||= Pod::Installer::InstallationOptions.from_podfile(self) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/project.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/project.rb new file mode 100644 index 0000000..3291372 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/project.rb @@ -0,0 +1,544 @@ +require 'xcodeproj' +require 'active_support/core_ext/string/inflections' + +module Pod + # The Pods project. + # + # Model class which provides helpers for working with the Pods project + # through the installation process. + # + class Project < Xcodeproj::Project + # @return [PBXGroup] The group for the support files of the aggregate + # targets. + # + attr_reader :support_files_group + + # @return [PBXGroup] The group for the Pods. + # + attr_reader :pods + + # @return [PBXGroup] The group for Development Pods. + # + attr_reader :development_pods + + # @return [PBXGroup] The group for dependencies. + # Used by #generate_multiple_pod_projects installation option. + # + attr_reader :dependencies_group + + # @return [Boolean] Bool indicating if this project is a pod target subproject. + # Used by `generate_multiple_pod_projects` installation option. + # + attr_reader :pod_target_subproject + alias pod_target_subproject? pod_target_subproject + + # @return [String] The basename of the project path without .xcodeproj extension. + # + attr_reader :project_name + + # Initialize a new instance + # + # @param [Pathname, String] path @see Xcodeproj::Project#path + # @param [Boolean] skip_initialization Whether the project should be initialized from scratch. + # @param [Int] object_version Object version to use for serialization, defaults to Xcode 3.2 compatible. + # + def initialize(path, skip_initialization = false, + object_version = Xcodeproj::Constants::DEFAULT_OBJECT_VERSION, pod_target_subproject: false) + @uuid_prefix = Digest('SHA256').hexdigest(File.basename(path)).upcase + super(path, skip_initialization, object_version) + @support_files_group = new_group('Targets Support Files') + @refs_by_absolute_path = {} + @variant_groups_by_path_and_name = {} + @pods = new_group('Pods') + @development_pods = new_group('Development Pods') + @dependencies_group = new_group('Dependencies') + @pod_target_subproject = pod_target_subproject + @project_name = Pathname(path).basename('.*').to_s + self.symroot = LEGACY_BUILD_ROOT + end + + # Generates a list of new UUIDs that created objects can be assigned. + # + # @note Overridden to generate UUIDs in a much faster way, since we don't need to check for collisions + # (as the Pods project is regenerated each time, and thus all UUIDs will have come from this method) + # + # @param [Integer] count + # The number of UUIDs to generate + # + # @return [Void] + # + def generate_available_uuid_list(count = 100) + start = @generated_uuids.size + uniques = Array.new(count) { |i| format('%.6s%07X0', @uuid_prefix, start + i) } + @generated_uuids += uniques + @available_uuids += uniques + end + + public + + # @!group Legacy Xcode build root + #-------------------------------------------------------------------------# + + LEGACY_BUILD_ROOT = '${SRCROOT}/../build' + + # @param [String] symroot + # The build root that is used when Xcode is configured to not use the + # workspace’s build root. Defaults to `${SRCROOT}/../build`. + # + # @return [void] + # + def symroot=(symroot) + root_object.build_configuration_list.build_configurations.each do |config| + config.build_settings['SYMROOT'] = symroot + end + end + + public + + # @!group Pod Groups + #-------------------------------------------------------------------------# + + # Creates a new group for the Pod with the given name and configures its + # path. + # + # @param [String] pod_name + # The name of the Pod. + # + # @param [#to_s] path + # The path to the root of the Pod. + # + # @param [Boolean] development + # Whether the group should be added to the Development Pods group. + # + # @param [Boolean] absolute + # Whether the path of the group should be set as absolute. + # + # @return [PBXGroup] The new group. + # + def add_pod_group(pod_name, path, development = false, absolute = false) + raise '[BUG]' if pod_group(pod_name) + + parent_group = + if pod_target_subproject + main_group + else + development ? development_pods : pods + end + source_tree = absolute ? :absolute : :group + + group = parent_group.new_group(pod_name, path, source_tree) + group + end + + # Creates a new subproject reference for the given project and configures its + # group location. + # + # @param [Project] project + # The subproject to be added. + # + # @param [Boolean] development + # Whether the project should be added to the Development Pods group. + # For projects where `pod_target_subproject` is enabled, all subprojects are added into the Dependencies group. + # + # @return [PBXFileReference] The new file reference. + # + def add_pod_subproject(project, development = false) + parent_group = group_for_subproject_reference(development) + add_subproject_reference(project, parent_group) + end + + # Creates a new subproject reference for the given cached metadata and configures its + # group location. + # + # @param [Sandbox] sandbox + # The sandbox used for installation. + # + # @param [TargetMetadata] metadata + # The project metadata to be added. + # + # @param [Boolean] development + # Whether the project should be added to the Development Pods group. + # For projects where `pod_target_subproject` is enabled, all subprojects are added into the Dependencies group. + # + # @return [PBXFileReference] The new file reference. + # + def add_cached_pod_subproject(sandbox, metadata, development = false) + parent_group = group_for_subproject_reference(development) + add_cached_subproject_reference(sandbox, metadata, parent_group) + end + + # @return [Array] Returns all the group of the Pods. + # + def pod_groups + if pod_target_subproject + main_group.children.objects + else + pods.children.objects + development_pods.children.objects + end + end + + # Returns the group for the Pod with the given name. + # + # @param [String] pod_name + # The name of the Pod. + # + # @return [PBXGroup] The group. + # + def pod_group(pod_name) + pod_groups.find { |group| group.name == pod_name } + end + + # @return [Hash] The names of the specification subgroups by key. + # + SPEC_SUBGROUPS = { + :resources => 'Resources', + :frameworks => 'Frameworks', + :developer => 'Pod', + } + + # Returns the group for the specification with the give name creating it if + # needed. + # + # @param [String] spec_name + # The full name of the specification. + # + # @return [PBXGroup] The group. + # + def group_for_spec(spec_name, subgroup_key = nil) + pod_name = Specification.root_name(spec_name) + group = pod_group(pod_name) + raise "[Bug] Unable to locate group for Pod named `#{pod_name}`" unless group + if spec_name != pod_name + subspecs_names = spec_name.gsub(pod_name + '/', '').split('/') + subspecs_names.each do |name| + group = group[name] || group.new_group(name) + end + end + + if subgroup_key + subgroup_name = SPEC_SUBGROUPS[subgroup_key] + raise ArgumentError, "Unrecognized subgroup key `#{subgroup_key}`" unless subgroup_name + group = group[subgroup_name] || group.new_group(subgroup_name) + end + + group + end + + # Returns the support files group for the Pod with the given name. + # + # @param [String] pod_name + # The name of the Pod. + # + # @return [PBXGroup] The group. + # + def pod_support_files_group(pod_name, dir) + group = pod_group(pod_name) + support_files_group = group['Support Files'] + unless support_files_group + support_files_group = group.new_group('Support Files', dir) + end + support_files_group + end + + public + + # @!group File references + #-------------------------------------------------------------------------# + + # Adds a file reference to given path as a child of the given group. + # + # @param [Array] absolute_path + # The path of the file. + # + # @param [PBXGroup] group + # The group for the new file reference. + # + # @param [Boolean] reflect_file_system_structure + # Whether group structure should reflect the file system structure. + # If yes, where needed, intermediate groups are created, similar to + # how mkdir -p operates. + # + # @param [Pathname] base_path + # The base path for newly created groups when reflect_file_system_structure is true. + # If nil, the provided group's real_path is used. + # + # @return [PBXFileReference] The new file reference. + # + def add_file_reference(absolute_path, group, reflect_file_system_structure = false, base_path = nil) + file_path_name = absolute_path.is_a?(Pathname) ? absolute_path : Pathname(absolute_path) + if ref = reference_for_path(file_path_name) + return ref + end + + group = group_for_path_in_group(file_path_name, group, reflect_file_system_structure, base_path) + ref = group.new_file(file_path_name.realpath) + @refs_by_absolute_path[file_path_name.to_s] = ref + end + + # @!group File references + #-------------------------------------------------------------------------# + + # Adds a file reference for a project as a child of the given group. + # + # @param [Project] project + # The project to add as a subproject reference. + # + # @param [PBXGroup] group + # The group for the new subproject reference. + # + # @return [PBXFileReference] The new file reference. + # + def add_subproject_reference(project, group) + new_subproject_file_reference(project.path, group) + end + + # Adds a file reference for a cached project as a child of the given group. + # + # @param [Sandbox] sandbox + # The sandbox used for installation. + # + # @param [MetadataCache] metadata + # The metadata holding the required properties to create a subproject reference. + # + # @param [PBXGroup] group + # The group for the new subproject reference. + # + # @return [PBXFileReference] The new file reference. + # + def add_cached_subproject_reference(sandbox, metadata, group) + new_subproject_file_reference(sandbox.root + metadata.container_project_path, group) + end + + # Returns the file reference for the given absolute path. + # + # @param [#to_s] absolute_path + # The absolute path of the file whose reference is needed. + # + # @return [PBXFileReference] The file reference. + # @return [Nil] If no file reference could be found. + # + def reference_for_path(absolute_path) + absolute_path = absolute_path.is_a?(Pathname) ? absolute_path : Pathname(absolute_path) + unless absolute_path.absolute? + raise ArgumentError, "Paths must be absolute #{absolute_path}" + end + + refs_by_absolute_path[absolute_path.to_s] ||= refs_by_absolute_path[absolute_path.realpath.to_s] + end + + # Adds a file reference to the Podfile. + # + # @param [#to_s] podfile_path + # The path of the Podfile. + # + # @return [PBXFileReference] The new file reference. + # + def add_podfile(podfile_path) + new_file(podfile_path, :project).tap do |podfile_ref| + mark_ruby_file_ref(podfile_ref) + end + end + + # Sets the syntax of the provided file reference to be Ruby, in the case that + # the file does not already have a ".rb" file extension (ex. the Podfile) + # + # @param [PBXFileReference] file_ref + # The file reference to change + # + def mark_ruby_file_ref(file_ref) + file_ref.xc_language_specification_identifier = 'xcode.lang.ruby' + file_ref.explicit_file_type = 'text.script.ruby' + file_ref.last_known_file_type = 'text' + file_ref.tab_width = '2' + file_ref.indent_width = '2' + end + + # Adds a new build configuration to the project and populates it with + # default settings according to the provided type. + # + # @note This method extends the original Xcodeproj implementation to + # include a preprocessor definition named after the build + # setting. This is done to support the TargetEnvironmentHeader + # specification of Pods available only on certain build + # configurations. + # + # @param [String] name + # The name of the build configuration. + # + # @param [Symbol] type + # The type of the build configuration used to populate the build + # settings, must be :debug or :release. + # + # @return [XCBuildConfiguration] The new build configuration. + # + def add_build_configuration(name, type) + build_configuration = super + settings = build_configuration.build_settings + definitions = settings['GCC_PREPROCESSOR_DEFINITIONS'] || ['$(inherited)'] + defines = [defininition_for_build_configuration(name)] + defines << 'DEBUG' if type == :debug + defines.each do |define| + value = "#{define}=1" + unless definitions.include?(value) + definitions.unshift(value) + end + end + settings['GCC_PREPROCESSOR_DEFINITIONS'] = definitions + + if type == :debug + settings['SWIFT_ACTIVE_COMPILATION_CONDITIONS'] = 'DEBUG' + end + + build_configuration + end + + # @param [String] name + # The name of the build configuration. + # + # @return [String] The preprocessor definition to set for the configuration. + # + def defininition_for_build_configuration(name) + "POD_CONFIGURATION_#{name.underscore}".gsub(/[^a-zA-Z0-9_]/, '_').upcase + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------------# + + # @return [Hash{String => PBXFileReference}] The file references grouped + # by absolute path. + # + attr_reader :refs_by_absolute_path + + # @return [Hash{[Pathname, String] => PBXVariantGroup}] The variant groups + # grouped by absolute path of parent dir and name. + # + attr_reader :variant_groups_by_path_and_name + + # Returns the group for an absolute file path in another group. + # Creates subgroups to reflect the file system structure if + # reflect_file_system_structure is set to true. + # Makes a variant group if the path points to a localized file inside a + # *.lproj directory. To support Apple Base Internationalization, the same + # variant group is returned for interface files and strings files with + # the same name. + # + # @param [Pathname] absolute_pathname + # The pathname of the file to get the group for. + # + # @param [PBXGroup] group + # The parent group used as the base of the relative path. + # + # @param [Boolean] reflect_file_system_structure + # Whether group structure should reflect the file system structure. + # If yes, where needed, intermediate groups are created, similar to + # how mkdir -p operates. + # + # @param [Pathname] base_path + # The base path for the newly created group. If nil, the provided group's real_path is used. + # + # @return [PBXGroup] The appropriate group for the filepath. + # Can be PBXVariantGroup, if the file is localized. + # + def group_for_path_in_group(absolute_pathname, group, reflect_file_system_structure, base_path = nil) + unless absolute_pathname.absolute? + raise ArgumentError, "Paths must be absolute #{absolute_pathname}" + end + unless base_path.nil? || base_path.absolute? + raise ArgumentError, "Paths must be absolute #{base_path}" + end + + relative_base = base_path.nil? ? group.real_path : base_path.realdirpath + relative_pathname = absolute_pathname.relative_path_from(relative_base) + relative_dir = relative_pathname.dirname + + # Add subgroups for directories, but treat .lproj as a file + if reflect_file_system_structure + path = relative_base + relative_dir.each_filename do |name| + break if name.to_s.downcase.include? '.lproj' + next if name == '.' + # Make sure groups have the correct absolute path set, as intermittent + # directories may not be included in the group structure + path += name + group = group.children.find { |c| c.display_name == name } || group.new_group(name, path) + end + end + + # Turn files inside .lproj directories into a variant group + if relative_dir.basename.to_s.downcase.include? '.lproj' + group_name = variant_group_name(absolute_pathname) + lproj_parent_dir = absolute_pathname.dirname.dirname + group = @variant_groups_by_path_and_name[[lproj_parent_dir, group_name]] ||= + group.new_variant_group(group_name, lproj_parent_dir) + end + + group + end + + # Returns the name to be used for a the variant group for a file at a given path. + # The path must be localized (within an *.lproj directory). + # + # @param [Pathname] path The localized path to get a variant group name for. + # + # @return [String] The variant group name. + # + def variant_group_name(path) + unless path.to_s.downcase.include?('.lproj/') + raise ArgumentError, 'Only localized resources can be added to variant groups.' + end + + # When using Base Internationalization for XIBs and Storyboards a strings + # file is generated with the same name as the XIB/Storyboard in each .lproj + # directory: + # Base.lproj/MyViewController.xib + # fr.lproj/MyViewController.strings + # + # In this scenario we want the variant group to be the same as the XIB or Storyboard. + # + # Base Internationalization: https://developer.apple.com/library/ios/documentation/MacOSX/Conceptual/BPInternational/InternationalizingYourUserInterface/InternationalizingYourUserInterface.html + if path.extname.downcase == '.strings' + %w(.xib .storyboard).each do |extension| + possible_interface_file = path.dirname.dirname + 'Base.lproj' + path.basename.sub_ext(extension) + return possible_interface_file.basename.to_s if possible_interface_file.exist? + end + end + + path.basename.to_s + end + + def new_subproject_file_reference(project_path, group) + if ref = reference_for_path(project_path) + return ref + end + + # We call into the private function `FileReferencesFactory.new_file_reference` instead of `FileReferencesFactory.new_reference` + # because it delegates into `FileReferencesFactory.new_subproject` which has the extra behavior of opening the Project which + # is an expensive operation for large projects. + # + ref = Xcodeproj::Project::FileReferencesFactory.send(:new_file_reference, group, project_path, :group) + ref.name = Pathname(project_path).basename('.*').to_s + ref.include_in_index = nil + + attribute = PBXProject.references_by_keys_attributes.find { |attrb| attrb.name == :project_references } + project_reference = ObjectDictionary.new(attribute, group.project.root_object) + project_reference[:project_ref] = ref + root_object.project_references << project_reference + refs_by_absolute_path[project_path.to_s] = ref + ref + end + + # Returns the parent group a new subproject reference should belong to. + # + def group_for_subproject_reference(development) + if pod_target_subproject + dependencies_group + else + development ? development_pods : pods + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver.rb new file mode 100644 index 0000000..60c0fc6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver.rb @@ -0,0 +1,600 @@ +require 'molinillo' +require 'cocoapods/podfile' + +module Pod + class NoSpecFoundError < Informative + def exit_status + @exit_status ||= 31 + end + end + + # The resolver is responsible of generating a list of specifications grouped + # by target for a given Podfile. + # + class Resolver + require 'cocoapods/resolver/lazy_specification' + require 'cocoapods/resolver/resolver_specification' + + # @return [Sandbox] the Sandbox used by the resolver to find external + # dependencies. + # + attr_reader :sandbox + + # @return [Podfile] the Podfile used by the resolver. + # + attr_reader :podfile + + # @return [Array] the list of dependencies locked to a specific + # version. + # + attr_reader :locked_dependencies + + # @return [Array] The list of the sources which will be used for + # the resolution. + # + attr_reader :sources + + # @return [Boolean] Whether the resolver has sources repositories up-to-date. + # + attr_reader :specs_updated + alias specs_updated? specs_updated + + # @return [Source::Manager] the manager to use for dependency resolution + # + attr_reader :sources_manager + + # Init a new Resolver + # + # @param [Sandbox] sandbox @see sandbox + # @param [Podfile] podfile @see podfile + # @param [Array] locked_dependencies @see locked_dependencies + # @param [Array, Source] sources @see sources + # @param [Boolean] specs_updated @see specs_updated + # @param [PodfileDependencyCache] podfile_dependency_cache the podfile dependency cache to use + # within this Resolver. + # + def initialize(sandbox, podfile, locked_dependencies, sources, specs_updated, + podfile_dependency_cache: Installer::Analyzer::PodfileDependencyCache.from_podfile(podfile), + sources_manager: Config.instance.sources_manager) + @sandbox = sandbox + @podfile = podfile + @locked_dependencies = locked_dependencies + @sources = Array(sources) + @specs_updated = specs_updated + @podfile_dependency_cache = podfile_dependency_cache + @sources_manager = sources_manager + @platforms_by_dependency = Hash.new { |h, k| h[k] = [] } + + @cached_sets = {} + @podfile_requirements_by_root_name = @podfile_dependency_cache.podfile_dependencies.group_by(&:root_name).each_value { |a| a.map!(&:requirement).freeze }.freeze + @search = {} + @validated_platforms = Set.new + end + + #-------------------------------------------------------------------------# + + public + + # @!group Resolution + + # Identifies the specifications that should be installed. + # + # @return [Hash{TargetDefinition => Array}] resolver_specs_by_target + # the resolved specifications that need to be installed grouped by target + # definition. + # + def resolve + dependencies = @podfile_dependency_cache.target_definition_list.flat_map do |target| + @podfile_dependency_cache.target_definition_dependencies(target).each do |dep| + next unless target.platform + @platforms_by_dependency[dep].push(target.platform) + end + end.uniq + @platforms_by_dependency.each_value(&:uniq!) + @activated = Molinillo::Resolver.new(self, self).resolve(dependencies, locked_dependencies) + resolver_specs_by_target + rescue Molinillo::ResolverError => e + handle_resolver_error(e) + end + + # @return [Hash{Podfile::TargetDefinition => Array}] + # returns the resolved specifications grouped by target. + # + # @note The returned specifications can be subspecs. + # + def resolver_specs_by_target + @resolver_specs_by_target ||= {}.tap do |resolver_specs_by_target| + @podfile_dependency_cache.target_definition_list.each do |target| + next if target.abstract? && !target.platform + + # can't use vertex.root? since that considers _all_ targets + explicit_dependencies = @podfile_dependency_cache.target_definition_dependencies(target).map(&:name).to_set + + used_by_aggregate_target_by_spec_name = {} + used_vertices_by_spec_name = {} + + # it's safe to make a single pass here since we iterate in topological order, + # so all of the predecessors have been visited before we get to a node. + # #tsort returns no-children vertices first, and we want them last (i.e. we want no-parent vertices first) + @activated.tsort.reverse_each do |vertex| + spec_name = vertex.name + explicitly_included = explicit_dependencies.include?(spec_name) + if explicitly_included || vertex.incoming_edges.any? { |edge| used_vertices_by_spec_name.key?(edge.origin.name) && edge_is_valid_for_target_platform?(edge, target.platform) } + validate_platform(vertex.payload, target) + used_vertices_by_spec_name[spec_name] = vertex + used_by_aggregate_target_by_spec_name[spec_name] = vertex.payload.library_specification? && + (explicitly_included || vertex.predecessors.any? { |predecessor| used_by_aggregate_target_by_spec_name.fetch(predecessor.name, false) }) + end + end + + resolver_specs_by_target[target] = used_vertices_by_spec_name.each_value. + map do |vertex| + payload = vertex.payload + non_library = !used_by_aggregate_target_by_spec_name.fetch(vertex.name) + spec_source = payload.respond_to?(:spec_source) && payload.spec_source + ResolverSpecification.new(payload, non_library, spec_source) + end. + sort_by(&:name) + end + end + end + + #-------------------------------------------------------------------------# + + public + + # @!group Specification Provider + + include Molinillo::SpecificationProvider + + # Returns (and caches) the specification that satisfy the given dependency. + # + # @return [Array] the specifications that satisfy the given + # `dependency`. + # + # @param [Dependency] dependency the dependency that is being searched for. + # + def search_for(dependency) + @search[dependency] ||= begin + additional_requirements = if locked_requirement = requirement_for_locked_pod_named(dependency.name) + [locked_requirement] + else + Array(@podfile_requirements_by_root_name[dependency.root_name]) + end + + specifications_for_dependency(dependency, additional_requirements).freeze + end + end + + # Returns the dependencies of `specification`. + # + # @return [Array] all dependencies of `specification`. + # + # @param [Specification] specification the specification whose own + # dependencies are being asked for. + # + def dependencies_for(specification) + root_name = Specification.root_name(specification.name) + specification.all_dependencies.map do |dependency| + if dependency.root_name == root_name + dependency.dup.tap { |d| d.specific_version = specification.version } + else + dependency + end + end + end + + # Returns the name for the given `dependency`. + # + # @return [String] the name for the given `dependency`. + # + # @param [Dependency] dependency the dependency whose name is being + # queried. + # + def name_for(dependency) + dependency.name + end + + # @return [String] the user-facing name for a {Podfile}. + # + def name_for_explicit_dependency_source + 'Podfile' + end + + # @return [String] the user-facing name for a {Lockfile}. + # + def name_for_locking_dependency_source + 'Podfile.lock' + end + + # Determines whether the given `requirement` is satisfied by the given + # `spec`, in the context of the current `activated` dependency graph. + # + # @return [Boolean] whether `requirement` is satisfied by `spec` in the + # context of the current `activated` dependency graph. + # + # @param [Dependency] requirement the dependency in question. + # + # @param [Molinillo::DependencyGraph] activated the current dependency + # graph in the resolution process. + # + # @param [Specification] spec the specification in question. + # + def requirement_satisfied_by?(requirement, activated, spec) + version = spec.version + return false unless requirement.requirement.satisfied_by?(version) + return false unless valid_possibility_version_for_root_name?(requirement, activated, spec) + return false unless spec_is_platform_compatible?(activated, requirement, spec) + true + end + + def valid_possibility_version_for_root_name?(requirement, activated, spec) + return true if prerelease_requirement = requirement.prerelease? || requirement.external_source || !spec.version.prerelease? + + activated.each do |vertex| + next unless vertex.payload + next unless Specification.root_name(vertex.name) == requirement.root_name + + prerelease_requirement ||= vertex.requirements.any? { |r| r.prerelease? || r.external_source } + + if vertex.payload.respond_to?(:version) + return true if vertex.payload.version == spec.version + break + end + end + + prerelease_requirement + end + private :valid_possibility_version_for_root_name? + + # Sort dependencies so that the ones that are easiest to resolve are first. + # Easiest to resolve is (usually) defined by: + # 1) Is this dependency already activated? + # 2) How relaxed are the requirements? + # 3) Are there any conflicts for this dependency? + # 4) How many possibilities are there to satisfy this dependency? + # + # @return [Array] the sorted dependencies. + # + # @param [Array] dependencies the unsorted dependencies. + # + # @param [Molinillo::DependencyGraph] activated the dependency graph of + # currently activated specs. + # + # @param [{String => Array}] conflicts the current conflicts. + # + def sort_dependencies(dependencies, activated, conflicts) + dependencies.sort_by! do |dependency| + name = name_for(dependency) + [ + activated.vertex_named(name).payload ? 0 : 1, + dependency.external_source ? 0 : 1, + dependency.prerelease? ? 0 : 1, + conflicts[name] ? 0 : 1, + search_for(dependency).count, + ] + end + end + + #-------------------------------------------------------------------------# + + public + + # @!group Resolver UI + + include Molinillo::UI + + # The UI object the resolver should use for displaying user-facing output. + # + # @return [UserInterface] the normal CocoaPods UI object. + # + def output + UI + end + + # Called before resolution starts. + # + # Completely silence this, as we show nothing. + # + # @return [Void] + # + def before_resolution + end + + # Called after resolution ends. + # + # Completely silence this, as we show nothing. + # + # @return [Void] + # + def after_resolution + end + + # Called during resolution to indicate progress. + # + # Completely silence this, as we show nothing. + # + # @return [Void] + # + def indicate_progress + end + + #-------------------------------------------------------------------------# + + private + + # !@ Resolution context + + # @return [Hash Set>] A cache that keeps tracks of the sets + # loaded by the resolution process. + # + # @note Sets store the resolved dependencies and return the highest + # available specification found in the sources. This is done + # globally and not per target definition because there can be just + # one Pod installation, so different version of the same Pods for + # target definitions are not allowed. + # + attr_reader :cached_sets + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # Returns available specifications which satisfy requirements of given dependency + # and additional requirements. + # + # @param [Dependency] dependency + # The dependency whose requirements will be satisfied. + # + # @param [Array] additional_requirements + # List of additional requirements which should also be satisfied. + # + # @return [Array] List of specifications satisfying given requirements. + # + def specifications_for_dependency(dependency, additional_requirements = []) + requirement_list = dependency.requirement.as_list + additional_requirements.flat_map(&:as_list) + requirement_list.uniq! + requirement = Requirement.new(requirement_list) + find_cached_set(dependency). + all_specifications(warn_for_multiple_pod_sources, requirement). + map { |s| s.subspec_by_name(dependency.name, false, true) }. + compact + end + + # @return [Set] Loads or returns a previously initialized set for the Pod + # of the given dependency. + # + # @param [Dependency] dependency + # The dependency for which the set is needed. + # + # @return [Set] the cached set for a given dependency. + # + def find_cached_set(dependency) + name = dependency.root_name + cached_sets[name] ||= begin + if dependency.external_source + spec = sandbox.specification(name) + unless spec + raise StandardError, '[Bug] Unable to find the specification ' \ + "for `#{dependency}`." + end + set = Specification::Set::External.new(spec) + else + set = create_set_from_sources(dependency) + end + + unless set + raise Molinillo::NoSuchDependencyError.new(dependency) # rubocop:disable Style/RaiseArgs + end + + set + end + end + + # @return [Requirement, Nil] + # The {Requirement} that locks the dependency with name `name` in + # {#locked_dependencies}. + # + def requirement_for_locked_pod_named(name) + if vertex = locked_dependencies.vertex_named(name) + if dependency = vertex.payload + dependency.requirement + end + end + end + + # @return [Set] Creates a set for the Pod of the given dependency from the + # sources. The set will contain all versions from all sources that + # include the Pod. + # + # @param [Dependency] dependency + # The dependency for which the set is needed. + # + def create_set_from_sources(dependency) + aggregate_for_dependency(dependency).search(dependency) + end + + # @return [Source::Aggregate] The aggregate of the {#sources}. + # + def aggregate_for_dependency(dependency) + if dependency && dependency.podspec_repo + sources_manager.aggregate_for_dependency(dependency) + elsif (locked_vertex = @locked_dependencies.vertex_named(dependency.name)) && (locked_dependency = locked_vertex.payload) && locked_dependency.podspec_repo + sources_manager.aggregate_for_dependency(locked_dependency) + else + @aggregate ||= Source::Aggregate.new(sources) + end + end + + # Ensures that a specification is compatible with the platform of a target. + # + # @raise If the specification is not supported by the target. + # + # @return [void] + # + def validate_platform(spec, target) + return unless target_platform = target.platform + return unless @validated_platforms.add?([spec.object_id, target_platform]) + unless spec.available_platforms.any? { |p| target_platform.to_sym == p.to_sym } + raise Informative, "The platform of the target `#{target.name}` " \ + "(#{target.platform}) is not compatible with `#{spec}`, which does " \ + "not support `#{target.platform.string_name}`." + end + end + + # Handles errors that come out of a {Molinillo::Resolver}. + # + # @return [void] + # + # @param [Molinillo::ResolverError] error + # + def handle_resolver_error(error) + message = error.message + type = Informative + unless specs_updated? + specs_update_message = "\n * out-of-date source repos which you can update with `pod repo update` or with `pod install --repo-update`." + end + case error + when Molinillo::VersionConflict + message = error.message_with_trees( + :solver_name => 'CocoaPods', + :possibility_type => 'pod', + :version_for_spec => lambda(&:version), + :additional_message_for_conflict => lambda do |o, name, conflict| + local_pod_parent = conflict.requirement_trees.flatten.reverse.find(&:local?) + if local_pod_parent && !specifications_for_dependency(conflict.requirement).empty? && !conflict.possibility && conflict.locked_requirement + # Conflict was caused by a requirement from a local dependency. + # Tell user to use `pod update`. + o << "\n\nYou have either:#{specs_update_message}" \ + "\n * changed the constraints of dependency `#{name}` inside your development pod `#{local_pod_parent.name}`." \ + "\n You should run `pod update #{name}` to apply changes you've made." + elsif !conflict.possibility && conflict.locked_requirement && conflict.locked_requirement.external_source && conflict.locked_requirement.external_source[:podspec] && + conflict.requirement && conflict.requirement.external_source && conflict.requirement.external_source[:podspec] + # The internal version of the Podspec doesn't match the external definition of a podspec + o << "\nIt seems like you've changed the version of the dependency `#{name}` " \ + "and it differs from the version stored in `Pods/Local Podspecs`.\nYou should run `pod update #{name} --no-repo-update` to apply " \ + 'changes made locally.' + elsif (conflict.possibility && conflict.possibility.version.prerelease?) && + (conflict.requirement && !( + conflict.requirement.prerelease? || + conflict.requirement.external_source) + ) + # Conflict was caused by not specifying an explicit version for the requirement #[name], + # and there is no available stable version satisfying constraints for the requirement. + o << "\nThere are only pre-release versions available satisfying the following requirements:\n" + conflict.requirements.values.flatten.uniq.each do |r| + unless search_for(r).empty? + o << "\n\t'#{name}', '#{r.requirement}'\n" + end + end + o << "\nYou should explicitly specify the version in order to install a pre-release version" + elsif !conflict.existing + conflicts = conflict.requirements.values.flatten.uniq + found_conflicted_specs = conflicts.reject { |c| search_for(c).empty? } + if found_conflicted_specs.empty? + # There are no existing specification inside any of the spec repos with given requirements. + type = NoSpecFoundError + dependencies = conflicts.count == 1 ? 'dependency' : 'dependencies' + o << "\nNone of your spec sources contain a spec satisfying "\ + "the #{dependencies}: `#{conflicts.join(', ')}`." \ + "\n\nYou have either:#{specs_update_message}" \ + "\n * mistyped the name or version." \ + "\n * not added the source repo that hosts the Podspec to your Podfile." + + else + o << "\nSpecs satisfying the `#{conflicts.join(', ')}` dependency were found, " \ + 'but they required a higher minimum deployment target.' + end + end + end, + ) + when Molinillo::NoSuchDependencyError + message += <<-EOS + + +You have either:#{specs_update_message} + * mistyped the name or version. + * not added the source repo that hosts the Podspec to your Podfile. + EOS + end + raise type.new(message).tap { |e| e.set_backtrace(error.backtrace) } + end + + # Returns whether the given spec is platform-compatible with the dependency + # graph, taking into account the dependency that has required the spec. + # + # @param [Molinillo::DependencyGraph] dependency_graph + # + # @param [Dependency] dependency + # + # @param [Specification] spec + # + # @return [Boolean] + # + def spec_is_platform_compatible?(dependency_graph, dependency, spec) + # This is safe since a pod will only be in locked dependencies if we're + # using the same exact version + return true if locked_dependencies.vertex_named(spec.name) + + vertex = dependency_graph.vertex_named(dependency.name) + predecessors = vertex.recursive_predecessors.select(&:root?) + predecessors << vertex if vertex.root? + platforms_to_satisfy = predecessors.flat_map(&:explicit_requirements).flat_map { |r| @platforms_by_dependency[r] }.uniq + + available_platforms = spec.available_platforms + + platforms_to_satisfy.all? do |platform_to_satisfy| + available_platforms.all? do |spec_platform| + next true unless spec_platform.name == platform_to_satisfy.name + # For non library specs all we care is to match by the platform name, not to satisfy the version. + next true if spec.non_library_specification? + platform_to_satisfy.supports?(spec_platform) + end + end + end + + class EdgeAndPlatform + def initialize(edge, target_platform) + @edge = edge + @target_platform = target_platform + end + attr_reader :edge, :target_platform + + def eql?(other) + edge.equal?(other.edge) && target_platform.eql?(other.target_platform) + end + + def hash + edge.object_id ^ target_platform.hash + end + end + private_constant :EdgeAndPlatform + + # Whether the given `edge` should be followed to find dependencies for the + # given `target_platform`. + # + # @return [Boolean] + # + def edge_is_valid_for_target_platform?(edge, target_platform) + @edge_validity ||= Hash.new do |hash, edge_and_platform| + e = edge_and_platform.edge + platform = edge_and_platform.target_platform + requirement_name = e.requirement.name + + hash[edge_and_platform] = e.origin.payload.all_dependencies(platform).any? do |dep| + dep.name == requirement_name + end + end + + @edge_validity[EdgeAndPlatform.new(edge, target_platform)] + end + + # @return [Boolean] whether to emit a warning when a pod is found in multiple sources + # + def warn_for_multiple_pod_sources + podfile.installation_options.warn_for_multiple_pod_sources + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver/lazy_specification.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver/lazy_specification.rb new file mode 100644 index 0000000..474e6a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver/lazy_specification.rb @@ -0,0 +1,88 @@ +require 'delegate' +module Pod + class Specification + class Set + class SpecWithSource < DelegateClass(Specification) + attr_reader :spec_source + def initialize(spec, source) + super(spec) + @spec_source = source + end + + undef is_a? + end + + class LazySpecification < DelegateClass(Specification) + attr_reader :name, :version, :spec_source + + def initialize(name, version, spec_source) + @name = name + @version = version + @spec_source = spec_source + end + + def subspec_by_name(name = nil, raise_if_missing = true, include_non_library_specifications = false) + subspec = + if !name || name == self.name + self + else + specification.subspec_by_name(name, raise_if_missing, include_non_library_specifications) + end + return unless subspec + + SpecWithSource.new subspec, spec_source + end + + def specification + @specification ||= spec_source.specification(name, version.version) + end + alias __getobj__ specification + + undef is_a? + end + + class External + def all_specifications(_warn_for_multiple_pod_sources, requirement) + if requirement.satisfied_by? specification.version + [specification] + else + [] + end + end + end + + # returns the highest versioned spec last + def all_specifications(warn_for_multiple_pod_sources, requirement) + @all_specifications ||= {} + @all_specifications[requirement] ||= begin + sources_by_version = {} + versions_by_source.each do |source, versions| + versions.each do |v| + next unless requirement.satisfied_by?(v) + + (sources_by_version[v] ||= []) << source + end + end + + if warn_for_multiple_pod_sources + duplicate_versions = sources_by_version.select { |_version, sources| sources.count > 1 } + + duplicate_versions.each do |version, sources| + UI.warn "Found multiple specifications for `#{name} (#{version})`:\n" + + sources. + map { |s| s.specification_path(name, version) }. + map { |v| "- #{v}" }.join("\n") + end + end + + # sort versions from high to low + sources_by_version.sort_by(&:first).flat_map do |version, sources| + # within each version, we want the prefered (first-specified) source + # to be the _last_ one + sources.reverse_each.map { |source| LazySpecification.new(name, version, source) } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver/resolver_specification.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver/resolver_specification.rb new file mode 100644 index 0000000..d88c5eb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/resolver/resolver_specification.rb @@ -0,0 +1,41 @@ +module Pod + class Resolver + # A small container that wraps a resolved specification for a given target definition. Additional metadata + # is included here such as if the specification is only used by tests. + # + class ResolverSpecification + # @return [Specification] the specification that was resolved + # + attr_reader :spec + + # @return [Source] the spec repo source the specification came from + # + attr_reader :source + + # @return [Boolean] whether this resolved specification is used by non-library targets. + # + attr_reader :used_by_non_library_targets_only + alias used_by_non_library_targets_only? used_by_non_library_targets_only + + def initialize(spec, used_by_non_library_targets_only, source) + @spec = spec + @used_by_non_library_targets_only = used_by_non_library_targets_only + @source = source + end + + def name + spec.name + end + + def root + spec.root + end + + def ==(other) + self.class == other.class && + spec == other.spec && + used_by_non_library_targets_only? == other.used_by_non_library_targets_only? + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox.rb new file mode 100644 index 0000000..6e0c8fd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox.rb @@ -0,0 +1,506 @@ +require 'fileutils' + +module Pod + # The sandbox provides support for the directory that CocoaPods uses for an + # installation. In this directory the Pods projects, the support files and + # the sources of the Pods are stored. + # + # CocoaPods assumes to have control of the sandbox. + # + # Once completed the sandbox will have the following file structure: + # + # Pods + # | + # +-- Headers + # | +-- Private + # | | +-- [Pod Name] + # | +-- Public + # | +-- [Pod Name] + # | + # +-- Local Podspecs + # | +-- External Sources + # | +-- Normal Sources + # | + # +-- Target Support Files + # | +-- [Target Name] + # | +-- Pods-acknowledgements.markdown + # | +-- Pods-acknowledgements.plist + # | +-- Pods-dummy.m + # | +-- Pods-prefix.pch + # | +-- Pods.xcconfig + # | + # +-- [Pod Name] + # | + # +-- Manifest.lock + # | + # +-- Pods.xcodeproj + # (if installation option 'generate_multiple_pod_projects' is enabled) + # | + # +-- PodTarget1.xcodeproj + # | + # ... + # | + # +-- PodTargetN.xcodeproj + # + # + class Sandbox + autoload :FileAccessor, 'cocoapods/sandbox/file_accessor' + autoload :HeadersStore, 'cocoapods/sandbox/headers_store' + autoload :PathList, 'cocoapods/sandbox/path_list' + autoload :PodDirCleaner, 'cocoapods/sandbox/pod_dir_cleaner' + autoload :PodspecFinder, 'cocoapods/sandbox/podspec_finder' + + # @return [Pathname] the root of the sandbox. + # + attr_reader :root + + # @return [HeadersStore] the header directory for the user targets. + # + attr_reader :public_headers + + # Initialize a new instance + # + # @param [String, Pathname] root @see #root + # + def initialize(root) + FileUtils.mkdir_p(root) + @root = Pathname.new(root).realpath + @public_headers = HeadersStore.new(self, 'Public', :public) + @predownloaded_pods = [] + @downloaded_pods = [] + @checkout_sources = {} + @development_pods = {} + @pods_with_absolute_path = [] + @stored_podspecs = {} + end + + # @return [Lockfile] the manifest which contains the information about the + # installed pods or `nil` if one is not present. + # + def manifest + @manifest ||= begin + Lockfile.from_file(manifest_path) if manifest_path.exist? + end + end + + # Removes the files of the Pod with the given name from the sandbox. + # + # @param [String] name The name of the pod, which is used to calculate additional paths to clean. + # @param [String] pod_dir The directory of the pod to clean. + # + # @return [void] + # + def clean_pod(name, pod_dir) + pod_dir.rmtree if pod_dir&.exist? + podspec_path = specification_path(name) + podspec_path.rmtree if podspec_path&.exist? + pod_target_project_path = pod_target_project_path(name) + pod_target_project_path.rmtree if pod_target_project_path&.exist? + end + + # Prepares the sandbox for a new installation removing any file that will + # be regenerated and ensuring that the directories exists. + # + def prepare + FileUtils.mkdir_p(headers_root) + FileUtils.mkdir_p(sources_root) + FileUtils.mkdir_p(specifications_root) + FileUtils.mkdir_p(target_support_files_root) + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class}> with root #{root}" + end + + #-------------------------------------------------------------------------# + + public + + # @!group Paths + + # @return [Pathname] the path of the manifest. + # + def manifest_path + root + 'Manifest.lock' + end + + # @return [Pathname] the path of the Pods project. + # + def project_path + root + 'Pods.xcodeproj' + end + + # @return [Pathname] the path of the installation cache. + # + def project_installation_cache_path + root.join('.project_cache', 'installation_cache.yaml') + end + + # @return [Pathname] the path of the metadata cache. + # + def project_metadata_cache_path + root.join('.project_cache', 'metadata_cache.yaml') + end + + # @return [Pathname] the path of the version cache. + # + def project_version_cache_path + root.join('.project_cache', 'version') + end + + # @param [String] pod_target_name + # Name of the pod target used to generate the path of its Xcode project. + # + # @return [Pathname] the path of the project for a pod target. + # + def pod_target_project_path(pod_target_name) + root + "#{pod_target_name}.xcodeproj" + end + + # Returns the path for the directory where the support files of + # a target are stored. + # + # @param [String] name + # The name of the target. + # + # @return [Pathname] the path of the support files. + # + def target_support_files_dir(name) + target_support_files_root + name + end + + # Returns the path where the Pod with the given name is stored, taking into + # account whether the Pod is locally sourced. + # + # @param [String] name + # The name of the Pod. + # + # @return [Pathname] the path of the Pod. + # + def pod_dir(name) + root_name = Specification.root_name(name) + if local?(root_name) + Pathname.new(development_pods[root_name].dirname) + else + sources_root + root_name + end + end + + # Returns true if the path as originally specified was absolute. + # + # @param [String] name + # + # @return [Boolean] true if originally absolute + # + def local_path_was_absolute?(name) + @pods_with_absolute_path.include? name + end + + # @return [Pathname] The directory where headers are stored. + # + def headers_root + root + 'Headers' + end + + # @return [Pathname] The directory where the downloaded sources of + # the Pods are stored. + # + def sources_root + root + end + + # @return [Pathname] the path for the directory where the + # specifications are stored. + # + def specifications_root + root + 'Local Podspecs' + end + + # @return [Pathname] The directory where the files generated by + # CocoaPods to support the umbrella targets are stored. + # + def target_support_files_root + root + 'Target Support Files' + end + + #-------------------------------------------------------------------------# + + public + + # @!group Specification store + + # Returns the specification for the Pod with the given name. + # + # @param [String] name + # the name of the Pod for which the specification is requested. + # + # @return [Specification] the specification if the file is found. + # + def specification(name) + @stored_podspecs[name] ||= if file = specification_path(name) + original_path = development_pods[name] + Specification.from_file(original_path || file) + end + end + + # Returns the path of the specification for the Pod with the + # given name, if one is stored. + # + # @param [String] name + # the name of the Pod for which the podspec file is requested. + # + # @return [Pathname] the path or nil. + # @return [Nil] if the podspec is not stored. + # + def specification_path(name) + name = Specification.root_name(name) + path = specifications_root + "#{name}.podspec" + if path.exist? + path + else + path = specifications_root + "#{name}.podspec.json" + if path.exist? + path + end + end + end + + # Stores a specification in the `Local Podspecs` folder. + # + # @param [String] name + # the name of the pod + # + # @param [String, Pathname, Specification] podspec + # The contents of the specification (String) or the path to a + # podspec file (Pathname). + # + # @return [void] + # + # + def store_podspec(name, podspec, _external_source = false, json = false) + file_name = json ? "#{name}.podspec.json" : "#{name}.podspec" + output_path = specifications_root + file_name + + spec = + case podspec + when String + Sandbox.update_changed_file(output_path, podspec) + Specification.from_file(output_path) + when Pathname + unless podspec.exist? + raise Informative, "No podspec found for `#{name}` in #{podspec}" + end + FileUtils.copy(podspec, output_path) + Specification.from_file(podspec) + when Specification + raise ArgumentError, 'can only store Specification objects as json' unless json + Sandbox.update_changed_file(output_path, podspec.to_pretty_json) + podspec.dup + else + raise ArgumentError, "Unknown type for podspec: #{podspec.inspect}" + end + + # we force the file to be the file in the sandbox, so specs that have been serialized to + # json maintain a consistent checksum. + # this is safe to do because `spec` is always a clean instance + spec.defined_in_file = output_path + + unless spec.name == name + raise Informative, "The name of the given podspec `#{spec.name}` doesn't match the expected one `#{name}`" + end + @stored_podspecs[spec.name] = spec + end + + #-------------------------------------------------------------------------# + + public + + # @!group Pods information + + # Marks a Pod as pre-downloaded + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def store_pre_downloaded_pod(name) + root_name = Specification.root_name(name) + predownloaded_pods << root_name + end + + # @return [Array] The names of the pods that have been + # pre-downloaded from an external source. + # + attr_reader :predownloaded_pods + + # Checks if a Pod has been pre-downloaded by the resolver in order to fetch + # the podspec. + # + # @param [String] name + # The name of the Pod. + # + # @return [Boolean] Whether the Pod has been pre-downloaded. + # + def predownloaded?(name) + root_name = Specification.root_name(name) + predownloaded_pods.include?(root_name) + end + + #--------------------------------------# + + # Marks a Pod as downloaded + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def store_downloaded_pod(name) + root_name = Specification.root_name(name) + downloaded_pods << root_name + end + + # Checks if a Pod has been downloaded before the installation + # process. + # + # @param [String] name + # The name of the Pod. + # + # @return [Boolean] Whether the Pod has been downloaded. + # + def downloaded?(name) + root_name = Specification.root_name(name) + downloaded_pods.include?(root_name) + end + + # @return [Array] The names of the pods that have been + # downloaded before the installation process begins. + # These are distinct from the pre-downloaded pods in + # that these do not necessarily come from external + # sources, and are only downloaded right before + # installation if the parallel_pod_downloads option is on. + # + attr_reader :downloaded_pods + + #--------------------------------------# + + # Stores the local path of a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @param [Hash] source + # The hash which contains the options as returned by the + # downloader. + # + # @return [void] + # + def store_checkout_source(name, source) + root_name = Specification.root_name(name) + checkout_sources[root_name] = source + end + + # Removes the checkout source of a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def remove_checkout_source(name) + root_name = Specification.root_name(name) + checkout_sources.delete(root_name) + end + + # Removes local podspec a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @return [void] + # + def remove_local_podspec(name) + local_podspec = specification_path(name) + FileUtils.rm(local_podspec) if local_podspec + end + + # @return [Hash{String=>Hash}] The options necessary to recreate the exact + # checkout of a given Pod grouped by its name. + # + attr_reader :checkout_sources + + #--------------------------------------# + + # Stores the local path of a Pod. + # + # @param [String] name + # The name of the Pod. + # + # @param [Pathname, String] path + # The path to the local Podspec + # + # @param [Boolean] was_absolute + # True if the specified local path was absolute. + # + # @return [void] + # + def store_local_path(name, path, was_absolute = false) + root_name = Specification.root_name(name) + path = Pathname.new(path) unless path.is_a?(Pathname) + development_pods[root_name] = path + @pods_with_absolute_path << root_name if was_absolute + end + + # @return [Hash{String=>Pathname}] The path of the Pods' podspecs with a local source + # grouped by their root name. + # + attr_reader :development_pods + + # Checks if a Pod is locally sourced? + # + # @param [String] name + # The name of the Pod. + # + # @return [Boolean] Whether the Pod is locally sourced. + # + def local?(name) + !local_podspec(name).nil? + end + + # @param [String] name + # The name of a locally specified Pod + # + # @return [Pathname] Path to the local Podspec of the Pod + # + def local_podspec(name) + root_name = Specification.root_name(name) + development_pods[root_name] + end + + # @!group Convenience Methods + + # Writes a file if it does not exist or if its contents have changed. + # + # @param [Pathname] path + # The path to read from and write to. + # + # @param [String] contents + # The contents to write if they do not match or the file does not exist. + # + # @return [void] + # + def self.update_changed_file(path, contents) + if path.exist? + content_stream = StringIO.new(contents) + identical = File.open(path, 'rb') { |f| FileUtils.compare_stream(f, content_stream) } + return if identical + end + File.open(path, 'w') { |f| f.write(contents) } + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/file_accessor.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/file_accessor.rb new file mode 100644 index 0000000..10d1bc4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/file_accessor.rb @@ -0,0 +1,532 @@ +require 'cocoapods/xcode/linkage_analyzer' + +module Pod + class Sandbox + # Resolves the file patterns of a specification against its root directory, + # taking into account any exclude pattern and the default extensions to use + # for directories. + # + # @note The FileAccessor always returns absolute paths. + # + class FileAccessor + HEADER_EXTENSIONS = Xcodeproj::Constants::HEADER_FILES_EXTENSIONS + SOURCE_FILE_EXTENSIONS = (%w(.m .mm .i .c .cc .cxx .cpp .c++ .swift) + HEADER_EXTENSIONS).uniq.freeze + + GLOB_PATTERNS = { + :readme => 'readme{*,.*}'.freeze, + :license => 'licen{c,s}e{*,.*}'.freeze, + :source_files => "*{#{SOURCE_FILE_EXTENSIONS.join(',')}}".freeze, + :public_header_files => "*{#{HEADER_EXTENSIONS.join(',')}}".freeze, + :podspecs => '*.{podspec,podspec.json}'.freeze, + :docs => 'doc{s}{*,.*}/**/*'.freeze, + }.freeze + + # @return [Sandbox::PathList] the directory where the source of the Pod + # is located. + # + attr_reader :path_list + + # @return [Specification::Consumer] the consumer of the specification for + # which the file patterns should be resolved. + # + attr_reader :spec_consumer + + # Initialize a new instance + # + # @param [Sandbox::PathList, Pathname] path_list @see #path_list + # @param [Specification::Consumer] spec_consumer @see #spec_consumer + # + def initialize(path_list, spec_consumer) + if path_list.is_a?(PathList) + @path_list = path_list + else + @path_list = PathList.new(path_list) + end + @spec_consumer = spec_consumer + + unless @spec_consumer + raise Informative, 'Attempt to initialize File Accessor without a specification consumer.' + end + end + + # @return [Pathname] the directory which contains the files of the Pod. + # + def root + path_list.root if path_list + end + + # @return [Specification] the specification. + # + def spec + spec_consumer.spec + end + + # @return [Specification] the platform used to consume the specification. + # + def platform_name + spec_consumer.platform_name + end + + # @return [String] A string suitable for debugging. + # + def inspect + "<#{self.class} spec=#{spec.name} platform=#{platform_name} root=#{root}>" + end + + #-----------------------------------------------------------------------# + + public + + # @!group Paths + + # @return [Array] the source files of the specification. + # + def source_files + paths_for_attribute(:source_files) + end + + # @return [Array] the source files of the specification that + # use ARC. + # + def arc_source_files + case spec_consumer.requires_arc + when TrueClass + source_files + when FalseClass + [] + else + paths_for_attribute(:requires_arc) & source_files + end + end + + # @return [Array] the source files of the specification that + # do not use ARC. + # + def non_arc_source_files + source_files - arc_source_files + end + + # @return [Array] the headers of the specification. + # + def headers + extensions = HEADER_EXTENSIONS + source_files.select { |f| extensions.include?(f.extname) } + end + + # @param [Boolean] include_frameworks + # Whether or not to include the headers of the vendored frameworks. + # Defaults to not include them. + # + # @return [Array] the public headers of the specification. + # + def public_headers(include_frameworks = false) + public_headers = public_header_files + project_headers = project_header_files + private_headers = private_header_files + if public_headers.nil? || public_headers.empty? + header_files = headers + else + header_files = public_headers + end + header_files += vendored_frameworks_headers if include_frameworks + header_files - project_headers - private_headers + end + + # @return [Array] The project headers of the specification. + # + def project_headers + project_header_files + end + + # @return [Array] The private headers of the specification. + # + def private_headers + private_header_files + end + + # @return [Array] the resources of the specification. + # + def resources + paths_for_attribute(:resources, true) + end + + # @return [Array] the files of the specification to preserve. + # + def preserve_paths + paths_for_attribute(:preserve_paths, true) + end + + # @return [Array] The paths of the framework bundles that come + # shipped with the Pod. + # + def vendored_frameworks + paths_for_attribute(:vendored_frameworks, true) + end + + # @return [Array] The paths of the dynamic framework bundles + # that come shipped with the Pod. + # + def vendored_dynamic_frameworks + (vendored_frameworks - vendored_xcframeworks).select do |framework| + Xcode::LinkageAnalyzer.dynamic_binary?(framework + framework.basename('.*')) + end + end + + # @return [Array] The paths of the static xcframework bundles + # that come shipped with the Pod. + # + def vendored_static_xcframeworks + vendored_xcframeworks.select do |path| + Xcode::XCFramework.new(spec.name, path).build_type == BuildType.static_framework + end + end + + # @return [Array] The paths of the dynamic xcframework bundles + # that come shipped with the Pod. + # + def vendored_dynamic_xcframeworks + vendored_xcframeworks.select do |path| + Xcode::XCFramework.new(spec.name, path).build_type == BuildType.dynamic_framework + end + end + + # @return [Array] The paths of the static (fake) framework + # bundles that come shipped with the Pod. + # + def vendored_static_frameworks + vendored_frameworks - vendored_dynamic_frameworks - vendored_xcframeworks + end + + # @return [Array] The paths of vendored .xcframework bundles + # that come shipped with the Pod. + # + def vendored_xcframeworks + vendored_frameworks.select do |framework| + File.extname(framework) == '.xcframework' + end + end + + # @param [Array] file_accessors + # The list of all file accessors to compute. + # + # @return [Array] The list of all file accessors that a target will integrate into the project. + # + def self.all_files(file_accessors) + files = [ + file_accessors.map(&:vendored_frameworks), + file_accessors.map(&:vendored_libraries), + file_accessors.map(&:resource_bundle_files), + file_accessors.map(&:license), + file_accessors.map(&:prefix_header), + file_accessors.map(&:preserve_paths), + file_accessors.map(&:readme), + file_accessors.map(&:resources), + file_accessors.map(&:on_demand_resources_files), + file_accessors.map(&:source_files), + file_accessors.map(&:module_map), + ] + files.flatten.compact.uniq + end + + # @param [Pathname] framework + # The vendored framework to search into. + # @return [Pathname] The path of the header directory of the + # vendored framework. + # + def self.vendored_frameworks_headers_dir(framework) + dir = framework + 'Headers' + dir.directory? ? dir.realpath : dir + end + + # @param [Pathname] framework + # The vendored framework to search into. + # @return [Array] The paths of the headers included in the + # vendored framework. + # + def self.vendored_frameworks_headers(framework) + headers_dir = vendored_frameworks_headers_dir(framework) + Pathname.glob(headers_dir + '**/' + GLOB_PATTERNS[:public_header_files]) + end + + # @param [String] target_name + # The target name this .xcframework belongs to + # + # @param [Pathname] framework_path + # The path to the .xcframework + # + # @return [Array] The paths to all the headers included in the + # vendored xcframework + # + def self.vendored_xcframework_headers(target_name, framework_path) + xcframework = Xcode::XCFramework.new(target_name, framework_path) + xcframework.slices.flat_map do |slice| + vendored_frameworks_headers(slice.path) + end + end + + # @return [Array] The paths of the framework headers that come + # shipped with the Pod. + # + def vendored_frameworks_headers + paths = (vendored_frameworks - vendored_xcframeworks).flat_map do |framework| + self.class.vendored_frameworks_headers(framework) + end.uniq + paths.concat Array.new(vendored_xcframeworks.flat_map do |framework| + self.class.vendored_xcframework_headers(spec.name, framework) + end) + paths + end + + # @return [Array] The paths of the library bundles that come + # shipped with the Pod. + # + def vendored_libraries + paths_for_attribute(:vendored_libraries) + end + + # @return [Array] The paths of the dynamic libraries + # that come shipped with the Pod. + # + def vendored_dynamic_libraries + vendored_libraries.select do |library| + Xcode::LinkageAnalyzer.dynamic_binary?(library) + end + end + + # @return [Array] The paths of the static libraries + # that come shipped with the Pod. + # + def vendored_static_libraries + vendored_libraries - vendored_dynamic_libraries + end + + # @return [Array] The paths of the dynamic binary artifacts + # that come shipped with the Pod. + # + def vendored_dynamic_artifacts + vendored_dynamic_libraries + vendored_dynamic_frameworks + end + + # @return [Array] The paths of the static binary artifacts + # that come shipped with the Pod. + # + def vendored_static_artifacts + vendored_static_libraries + vendored_static_frameworks + vendored_static_xcframeworks + end + + # @return [Hash{String => Array}] A hash that describes the + # resource bundles of the Pod. The keys represent the name of + # the bundle while the values the path of the resources. + # + def resource_bundles + result = {} + spec_consumer.resource_bundles.each do |name, file_patterns| + paths = expanded_paths(file_patterns, + :exclude_patterns => spec_consumer.exclude_files, + :include_dirs => true) + result[name] = paths + end + result + end + + # @return [Array] The paths of the files which should be + # included in resources bundles by the Pod. + # + def resource_bundle_files + resource_bundles.values.flatten + end + + # @return [Hash{String => Hash] The expanded paths of the on demand resources specified + # keyed by their tag including their category. + # + def on_demand_resources + result = {} + spec_consumer.on_demand_resources.each do |tag_name, file_patterns| + paths = expanded_paths(file_patterns[:paths], + :exclude_patterns => spec_consumer.exclude_files, + :include_dirs => true) + result[tag_name] = { :paths => paths, :category => file_patterns[:category] } + end + result + end + + # @return [Array] The expanded paths of the on demand resources. + # + def on_demand_resources_files + on_demand_resources.values.flat_map { |v| v[:paths] } + end + + # @return [Pathname] The of the prefix header file of the specification. + # + def prefix_header + if file = spec_consumer.prefix_header_file + path_list.root + file + end + end + + # @return [Pathname, nil] The path of the auto-detected README file. + # + def readme + path_list.glob([GLOB_PATTERNS[:readme]]).first + end + + # @return [Pathname] The path of the license file as indicated in the + # specification or auto-detected. + # + def license + spec_license || path_list.glob([GLOB_PATTERNS[:license]]).first + end + + # @return [Pathname, Nil] The path of the custom module map file of the + # specification, if specified. + def module_map + if module_map = spec_consumer.module_map + path_list.root + module_map + end + end + + # @return [Array] The paths of auto-detected podspecs + # + def specs + path_list.glob([GLOB_PATTERNS[:podspecs]]) + end + + # @return [Array] The paths of auto-detected docs + # + def docs + path_list.glob([GLOB_PATTERNS[:docs]]) + end + + # @return [Pathname] The path of the license file specified in the + # specification, if it exists + # + def spec_license + if file = spec_consumer.license[:file] + absolute_path = root + file + absolute_path if File.exist?(absolute_path) + end + end + + # @return [Array] Paths to include for local pods to assist in development + # + def developer_files + podspecs = specs + result = [module_map, prefix_header] + + if license_path = spec_consumer.license[:file] + license_path = root + license_path + unless File.exist?(license_path) + UI.warn "A license was specified in podspec `#{spec.name}` but the file does not exist - #{license_path}" + end + end + + if podspecs.size <= 1 + result += [license, readme, podspecs, docs] + else + # Manually add non-globbing files since there are multiple podspecs in the same folder + result << podspec_file + if license_file = spec_license + absolute_path = root + license_file + result << absolute_path if File.exist?(absolute_path) + end + end + result.compact.flatten.sort + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private paths + + # @return [Array] The paths of the user-specified public header + # files. + # + def public_header_files + paths_for_attribute(:public_header_files) + end + + # @return [Array] The paths of the user-specified project header + # files. + # + def project_header_files + paths_for_attribute(:project_header_files) + end + + # @return [Array] The paths of the user-specified private header + # files. + # + def private_header_files + paths_for_attribute(:private_header_files) + end + + # @return [Pathname] The path of the podspec matching @spec + # + def podspec_file + specs.lazy.select { |p| File.basename(p.to_s, '.*') == spec.name }.first + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private helpers + + # Returns the list of the paths founds in the file system for the + # attribute with given name. It takes into account any dir pattern and + # any file excluded in the specification. + # + # @param [Symbol] attribute + # the name of the attribute. + # + # @return [Array] the paths. + # + def paths_for_attribute(attribute, include_dirs = false) + file_patterns = spec_consumer.send(attribute) + options = { + :exclude_patterns => spec_consumer.exclude_files, + :dir_pattern => GLOB_PATTERNS[attribute], + :include_dirs => include_dirs, + } + expanded_paths(file_patterns, options) + end + + # Matches the given patterns to the file present in the root of the path + # list. + # + # @param [Array] patterns + # The patterns to expand. + # + # @param [Hash] options + # The options to use to expand the patterns to file paths. + # + # @option options [String] :dir_pattern + # The pattern to add to directories. + # + # @option options [Array] :exclude_patterns + # The exclude patterns to pass to the PathList. + # + # @option options [Boolean] :include_dirs + # Whether directories should be also included or just plain + # files. + # + # @raise [Informative] If the pod does not exists. + # + # @return [Array] A list of the paths. + # + def expanded_paths(patterns, options = {}) + return [] if patterns.empty? + path_list.glob(patterns, options).flatten.compact.uniq + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/headers_store.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/headers_store.rb new file mode 100644 index 0000000..9442419 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/headers_store.rb @@ -0,0 +1,163 @@ +module Pod + class Sandbox + # Provides support for managing a header directory. It also keeps track of + # the header search paths. + # + class HeadersStore + SEARCH_PATHS_KEY = Struct.new(:platform_name, :target_name, :use_modular_headers) + + # @return [Pathname] the absolute path of this header directory. + # + def root + sandbox.headers_root + @relative_path + end + + # @return [Sandbox] the sandbox where this header directory is stored. + # + attr_reader :sandbox + + # @param [Sandbox] @see #sandbox + # + # @param [String] relative_path + # the relative path to the sandbox root and hence to the Pods + # project. + # + # @param [Symbol] visibility_scope + # the header visibility scope to use in this store. Can be `:private` or `:public`. + # + def initialize(sandbox, relative_path, visibility_scope) + @sandbox = sandbox + @relative_path = relative_path + @search_paths = [] + @search_paths_cache = {} + @visibility_scope = visibility_scope + end + + # @param [Platform] platform + # the platform for which the header search paths should be + # returned. + # + # @param [String] target_name + # the target for which the header search paths should be + # returned. Can be `nil` in which case all headers that match the platform + # will be returned. + # + # @param [Boolean] use_modular_headers + # whether the search paths generated should use modular (stricter) style. + # + # @return [Array] All the search paths of the header directory in + # xcconfig format. The paths are specified relative to the pods + # root with the `${PODS_ROOT}` variable. + # + def search_paths(platform, target_name = nil, use_modular_headers = false) + key = SEARCH_PATHS_KEY.new(platform.name, target_name, use_modular_headers) + if (cached = @search_paths_cache[key]) + return cached + end + search_paths = @search_paths.select do |entry| + matches_platform = entry[:platform] == platform.name + matches_target = target_name.nil? || (File.basename(entry[:path]) == target_name) + matches_platform && matches_target + end + headers_dir = root.relative_path_from(sandbox.root).dirname + @search_paths_cache[key] = search_paths.flat_map do |entry| + paths = [] + paths << "${PODS_ROOT}/#{headers_dir}/#{@relative_path}" if !use_modular_headers || @visibility_scope == :public + paths << "${PODS_ROOT}/#{headers_dir}/#{entry[:path]}" if !use_modular_headers || @visibility_scope == :private + paths + end.tap(&:uniq!).freeze + end + + # Removes the entire root directory. + # + # @return [void] + # + def implode! + root.rmtree if root.exist? + end + + # Removes the directory at the given path relative to the root. + # + # @param [Pathname] path + # The path used to join with #root and remove. + # + # @return [void] + # + def implode_path!(path) + path = root.join(path) + path.rmtree if path.exist? + end + + #-----------------------------------------------------------------------# + + public + + # @!group Adding headers + + # Adds headers to the directory. + # + # @param [Pathname] namespace + # the path where the header file should be stored relative to the + # headers directory. + # + # @param [Array] relative_header_paths + # the path of the header file relative to the Pods project + # (`PODS_ROOT` variable of the xcconfigs). + # + # @note This method does _not_ add the files to the search paths. + # + # @return [Array] + # + def add_files(namespace, relative_header_paths) + root.join(namespace).mkpath unless relative_header_paths.empty? + relative_header_paths.map do |relative_header_path| + add_file(namespace, relative_header_path, :mkdir => false) + end + end + + # Adds a header to the directory. + # + # @param [Pathname] namespace + # the path where the header file should be stored relative to the + # headers directory. + # + # @param [Pathname] relative_header_path + # the path of the header file relative to the Pods project + # (`PODS_ROOT` variable of the xcconfigs). + # + # @note This method does _not_ add the file to the search paths. + # + # @return [Pathname] + # + def add_file(namespace, relative_header_path, mkdir: true) + namespaced_path = root + namespace + namespaced_path.mkpath if mkdir + + absolute_source = (sandbox.root + relative_header_path) + source = absolute_source.relative_path_from(namespaced_path) + if Gem.win_platform? + FileUtils.ln(absolute_source, namespaced_path, :force => true) + else + FileUtils.ln_sf(source, namespaced_path) + end + namespaced_path + relative_header_path.basename + end + + # Adds an header search path to the sandbox. + # + # @param [Pathname] path + # the path to add. + # + # @param [String] platform + # the platform the search path applies to + # + # @return [void] + # + def add_search_path(path, platform) + @search_paths << { :platform => platform.name, :path => File.join(@relative_path, path) } + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/path_list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/path_list.rb new file mode 100644 index 0000000..7d85a01 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/path_list.rb @@ -0,0 +1,242 @@ +require 'active_support/multibyte/unicode' +require 'find' + +module Pod + class Sandbox + # The PathList class is designed to perform multiple glob matches against + # a given directory. Basically, it generates a list of all the children + # paths and matches the globs patterns against them, resulting in just one + # access to the file system. + # + # @note A PathList once it has generated the list of the paths this is + # updated only if explicitly requested by calling + # {#read_file_system} + # + class PathList + # @return [Pathname] The root of the list whose files and directories + # are used to perform the matching operations. + # + attr_reader :root + + # Initialize a new instance + # + # @param [Pathname] root @see #root + # + def initialize(root) + root_dir = root.to_s.unicode_normalize(:nfkc) + @root = Pathname.new(root_dir) + @glob_cache = {} + end + + # @return [Array] The list of absolute the path of all the files + # contained in {root}. + # + def files + read_file_system unless @files + @files + end + + # @return [Array] The list of absolute the path of all the + # directories contained in {root}. + # + def dirs + read_file_system unless @dirs + @dirs + end + + # @return [void] Reads the file system and populates the files and paths + # lists. + # + def read_file_system + unless root.exist? + raise Informative, "Attempt to read non existent folder `#{root}`." + end + dirs = [] + files = [] + root_length = root.cleanpath.to_s.length + File::SEPARATOR.length + escaped_root = escape_path_for_glob(root) + Dir.glob(escaped_root + '**/*', File::FNM_DOTMATCH).each do |f| + directory = File.directory?(f) + # Ignore `.` and `..` directories + next if directory && f =~ /\.\.?$/ + + f = f.slice(root_length, f.length - root_length) + next if f.nil? + + (directory ? dirs : files) << f + end + + dirs.sort_by!(&:upcase) + files.sort_by!(&:upcase) + + @dirs = dirs + @files = files + @glob_cache = {} + end + + #-----------------------------------------------------------------------# + + public + + # @!group Globbing + + # Similar to {glob} but returns the absolute paths. + # + # @param [String,Array] patterns + # @see #relative_glob + # + # @param [Hash] options + # @see #relative_glob + # + # @return [Array] + # + def glob(patterns, options = {}) + cache_key = options.merge(:patterns => patterns) + @glob_cache[cache_key] ||= relative_glob(patterns, options).map { |p| root.join(p) } + end + + # The list of relative paths that are case insensitively matched by a + # given pattern. This method emulates {Dir#glob} with the + # {File::FNM_CASEFOLD} option. + # + # @param [String,Array] patterns + # A single {Dir#glob} like pattern, or a list of patterns. + # + # @param [Hash] options + # + # @option options [String] :dir_pattern + # An optional pattern to append to a pattern, if it is the path + # to a directory. + # + # @option options [Array] :exclude_patterns + # Exclude specific paths given by those patterns. + # + # @option options [Array] :include_dirs + # Additional paths to take into account for matching. + # + # @return [Array] + # + def relative_glob(patterns, options = {}) + return [] if patterns.empty? + + dir_pattern = options[:dir_pattern] + exclude_patterns = options[:exclude_patterns] + include_dirs = options[:include_dirs] + + if include_dirs + full_list = files + dirs + else + full_list = files + end + patterns_array = Array(patterns) + exact_matches = (full_list & patterns_array).to_set + + unless patterns_array.empty? + list = patterns_array.flat_map do |pattern| + if exact_matches.include?(pattern) + pattern + else + if directory?(pattern) && dir_pattern + pattern += '/' unless pattern.end_with?('/') + pattern += dir_pattern + end + expanded_patterns = dir_glob_equivalent_patterns(pattern) + full_list.select do |path| + expanded_patterns.any? do |p| + File.fnmatch(p, path, File::FNM_CASEFOLD | File::FNM_PATHNAME) + end + end + end + end + end + + list = list.map { |path| Pathname.new(path) } + if exclude_patterns + exclude_options = { :dir_pattern => '**/*', :include_dirs => include_dirs } + list -= relative_glob(exclude_patterns, exclude_options) + end + list + end + + #-----------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Boolean] Wether a path is a directory. The result of this method + # computed without accessing the file system and is case + # insensitive. + # + # @param [String, Pathname] sub_path The path that could be a directory. + # + def directory?(sub_path) + sub_path = sub_path.to_s.downcase.sub(/\/$/, '') + dirs.any? { |dir| dir.downcase == sub_path } + end + + # @return [Array] An array of patterns converted from a + # {Dir.glob} pattern to patterns that {File.fnmatch} can handle. + # This is used by the {#relative_glob} method to emulate + # {Dir.glob}. + # + # The expansion provides support for: + # + # - Literals + # + # dir_glob_equivalent_patterns('{file1,file2}.{h,m}') + # => ["file1.h", "file1.m", "file2.h", "file2.m"] + # + # - Matching the direct children of a directory with `**` + # + # dir_glob_equivalent_patterns('Classes/**/file.m') + # => ["Classes/**/file.m", "Classes/file.m"] + # + # @param [String] pattern A {Dir#glob} like pattern. + # + def dir_glob_equivalent_patterns(pattern) + pattern = pattern.gsub('/**/', '{/**/,/}') + values_by_set = {} + pattern.scan(/\{[^}]*\}/) do |set| + values = set.gsub(/[{}]/, '').split(',') + values_by_set[set] = values + end + + if values_by_set.empty? + [pattern] + else + patterns = [pattern] + values_by_set.each do |set, values| + patterns = patterns.flat_map do |old_pattern| + values.map do |value| + old_pattern.gsub(set, value) + end + end + end + patterns + end + end + + # Escapes the glob metacharacters from a given path so it can used in + # Dir#glob and similar methods. + # + # @note See CocoaPods/CocoaPods#862. + # + # @param [String, Pathname] path + # The path to escape. + # + # @return [Pathname] The escaped path. + # + def escape_path_for_glob(path) + result = path.to_s + characters_to_escape = ['[', ']', '{', '}', '?', '*'] + characters_to_escape.each do |character| + result.gsub!(character, "\\#{character}") + end + Pathname.new(result) + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/pod_dir_cleaner.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/pod_dir_cleaner.rb new file mode 100644 index 0000000..c47cf96 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/pod_dir_cleaner.rb @@ -0,0 +1,71 @@ +module Pod + class Sandbox + class PodDirCleaner + attr_reader :root + attr_reader :specs_by_platform + + def initialize(root, specs_by_platform) + @root = root + @specs_by_platform = specs_by_platform + end + + # Removes all the files not needed for the installation according to the + # specs by platform. + # + # @return [void] + # + def clean! + clean_paths.each { |path| FileUtils.rm_rf(path) } if root.exist? + end + + private + + # @return [Array] the file accessors for all the + # specifications on their respective platform. + # + def file_accessors + @file_accessors ||= specs_by_platform.flat_map do |platform, specs| + specs.flat_map { |spec| Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) } + end + end + + # @return [Sandbox::PathList] The path list for this Pod. + # + def path_list + @path_list ||= Sandbox::PathList.new(root) + end + + # Finds the absolute paths, including hidden ones, of the files + # that are not used by the pod and thus can be safely deleted. + # + # @note Implementation detail: Don't use `Dir#glob` as there is an + # unexplained issue (#568, #572 and #602). + # + # @todo The paths are down-cased for the comparison as issues similar + # to #602 lead the files not being matched and so cleaning all + # the files. This solution might create side effects. + # + # @return [Array] The paths that can be deleted. + # + def clean_paths + cached_used = used_files.map(&:downcase) + glob_options = File::FNM_DOTMATCH | File::FNM_CASEFOLD + files = Pathname.glob(root + '**/*', glob_options).map(&:to_s) + cached_used_set = cached_used.to_set + files.reject do |candidate| + candidate = candidate.downcase + candidate.end_with?('.', '..') || cached_used_set.include?(candidate) || cached_used.any? do |path| + path.include?(candidate) || candidate.include?(path) + end + end + end + + # @return [Array] The absolute path of all the files used by the + # specifications (according to their platform) of this Pod. + # + def used_files + FileAccessor.all_files(file_accessors).map(&:to_s) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/podspec_finder.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/podspec_finder.rb new file mode 100644 index 0000000..6a23cfe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sandbox/podspec_finder.rb @@ -0,0 +1,23 @@ +module Pod + class Sandbox + class PodspecFinder + attr_reader :root + + def initialize(root) + @root = root + end + + def podspecs + return @specs_by_name if @specs_by_name + @specs_by_name = {} + spec_files = Pathname.glob(root + '{,*}.podspec{,.json}') + spec_files.sort_by { |p| -p.to_path.split(File::SEPARATOR).size }.each do |file| + spec = Specification.from_file(file) + spec.validate_cocoapods_version + @specs_by_name[spec.name] = spec + end + @specs_by_name + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sources_manager.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sources_manager.rb new file mode 100644 index 0000000..15ab7d8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/sources_manager.rb @@ -0,0 +1,221 @@ +require 'cocoapods-core/source' +require 'cocoapods/open-uri' +require 'netrc' +require 'set' +require 'rest' +require 'yaml' + +module Pod + class Source + class Manager + # Returns the source whose {Source#url} is equal to `url`, adding the repo + # in a manner similarly to `pod repo add` if it is not found. + # + # @raise If no source with the given `url` could be created, + # + # @return [Source] The source whose {Source#url} is equal to `url`, + # + # @param [String] url + # The URL of the source. + # + def find_or_create_source_with_url(url) + source_with_url(url) || create_source_with_url(url) + end + + # Adds the source whose {Source#url} is equal to `url`, + # in a manner similarly to `pod repo add` if it is not found. + # + # @raise If no source with the given `url` could be created, + # + # @return [Source] The source whose {Source#url} is equal to `url`, + # + # @param [String] url + # The URL of the source. + # + def create_source_with_url(url) + name = name_for_url(url) + is_cdn = cdn_url?(url) + + # Hack to ensure that `repo add` output is shown. + previous_title_level = UI.title_level + UI.title_level = 0 + + begin + if is_cdn + Command::Repo::AddCDN.parse([name, url]).run + else + Command::Repo::Add.parse([name, url]).run + end + rescue Informative => e + message = "Unable to add a source with url `#{url}` " \ + "named `#{name}`.\n" + message << "(#{e})\n" if Config.instance.verbose? + message << 'You can try adding it manually in ' \ + "`#{Config.instance.repos_dir}` or via `pod repo add`." + raise Informative, message + ensure + UI.title_level = previous_title_level + end + source = source_with_url(url) + + raise "Unable to create a source with URL #{url}" unless source + + source + end + + # Determines whether `url` is a CocoaPods CDN URL. + # + # @return [Boolean] whether `url` is a CocoaPods CDN URL, + # + # @param [String] url + # The URL of the source. + # + def cdn_url?(url) + return false unless url =~ %r{^https?:\/\/} + + uri_options = {} + + netrc_info = Netrc.read + uri = URI.parse(url) + return false unless uri.userinfo.nil? + + netrc_host = uri.host + credentials = netrc_info[netrc_host] + uri_options[:http_basic_authentication] = credentials if credentials + + response = OpenURI.open_uri(url.chomp('/') + '/CocoaPods-version.yml', uri_options) + response_hash = YAML.load(response.read) # rubocop:disable Security/YAMLLoad + response_hash.is_a?(Hash) && !Source::Metadata.new(response_hash).latest_cocoapods_version.nil? + rescue Psych::SyntaxError, ::OpenURI::HTTPError, SocketError + return false + rescue => e + raise Informative, "Couldn't determine repo type for URL: `#{url}`: #{e}" + end + + # Returns the source whose {Source#name} or {Source#url} is equal to the + # given `name_or_url`. + # + # @return [Source] The source whose {Source#name} or {Source#url} is equal to the + # given `name_or_url`. + # + # @param [String] name_or_url + # The name or the URL of the source. + # + def source_with_name_or_url(name_or_url) + all.find { |s| s.name == name_or_url } || + find_or_create_source_with_url(name_or_url) + end + + # @return [Pathname] The path where the search index should be stored. + # + def search_index_path + @search_index_path ||= Config.instance.search_index_file + end + + # @!group Updating Sources + + # Updates the local clone of the spec-repo with the given name or of all + # the git repos if the name is omitted. + # + # @param [String] source_name + # + # @param [Boolean] show_output + # + # @return [void] + # + def update(source_name = nil, show_output = false) + if source_name + sources = [updateable_source_named(source_name)] + else + sources = updateable_sources + end + + changed_spec_paths = {} + + # Do not perform an update if the repos dir has not been setup yet. + return unless repos_dir.exist? + + # Create the Spec_Lock file if needed and lock it so that concurrent + # repo updates do not cause each other to fail + File.open("#{repos_dir}/Spec_Lock", File::CREAT) do |f| + f.flock(File::LOCK_EX) + sources.each do |source| + UI.section "Updating spec repo `#{source.name}`" do + changed_source_paths = source.update(show_output) + changed_spec_paths[source] = changed_source_paths if changed_source_paths.count > 0 + source.verify_compatibility! + end + end + end + # Perform search index update operation in background. + update_search_index_if_needed_in_background(changed_spec_paths) + end + + # Adds the provided source to the list of sources + # + # @param [Source] source the source to add + # + def add_source(source) + all << source unless all.any? { |s| s.url == source || s.name == source.name } + end + end + + extend Executable + executable :git + + def repo_git(args, include_error: false) + Executable.capture_command('git', ['-C', repo] + args, + :capture => include_error ? :merge : :out, + :env => { + 'GIT_CONFIG' => nil, + 'GIT_DIR' => nil, + 'GIT_WORK_TREE' => nil, + } + ). + first.strip + end + + def update_git_repo(show_output = false) + Config.instance.with_changes(:verbose => show_output) do + args = %W(-C #{repo} fetch origin) + args.push('--progress') if show_output + git!(args) + current_branch = git!(%W(-C #{repo} rev-parse --abbrev-ref HEAD)).strip + git!(%W(-C #{repo} reset --hard origin/#{current_branch})) + end + rescue + raise Informative, 'CocoaPods was not able to update the ' \ + "`#{name}` repo. If this is an unexpected issue " \ + 'and persists you can inspect it running ' \ + '`pod repo update --verbose`' + end + end + + class TrunkSource + def verify_compatibility! + super + latest_cocoapods_version = metadata.latest_cocoapods_version && Gem::Version.create(metadata.latest_cocoapods_version) + return unless Config.instance.new_version_message? && + latest_cocoapods_version && + latest_cocoapods_version > Gem::Version.new(Pod::VERSION) + + rc = latest_cocoapods_version.prerelease? + install_message = !Pathname(__FILE__).dirname.writable? ? 'sudo ' : '' + install_message << 'gem install cocoapods' + install_message << ' --pre' if rc + message = [ + '', + "CocoaPods #{latest_cocoapods_version} is available.".green, + "To update use: `#{install_message}`".green, + ("[!] This is a test version we'd love you to try.".yellow if rc), + '', + 'For more information, see https://blog.cocoapods.org ' \ + 'and the CHANGELOG for this version at ' \ + "https://github.com/CocoaPods/CocoaPods/releases/tag/#{latest_cocoapods_version}".green, + '', + '', + ].compact.join("\n") + UI.puts(message) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target.rb new file mode 100644 index 0000000..237f915 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target.rb @@ -0,0 +1,378 @@ +require 'cocoapods/target/build_settings' + +module Pod + # Model class which describes a Pods target. + # + # The Target class stores and provides the information necessary for + # working with a target in the Podfile and its dependent libraries. + # This class is used to represent both the targets and their libraries. + # + class Target + DEFAULT_VERSION = '1.0.0'.freeze + DEFAULT_NAME = 'Default'.freeze + DEFAULT_BUILD_CONFIGURATIONS = { 'Release' => :release, 'Debug' => :debug }.freeze + + # @return [Sandbox] The sandbox where the Pods should be installed. + # + attr_reader :sandbox + + # @return [Hash{String=>Symbol}] A hash representing the user build + # configurations where each key corresponds to the name of a + # configuration and its value to its type (`:debug` or `:release`). + # + attr_reader :user_build_configurations + + # @return [Array] The value for the ARCHS build setting. + # + attr_reader :archs + + # @return [Platform] the platform of this target. + # + attr_reader :platform + + # @return [BuildSettings] the build settings for this target. + # + attr_reader :build_settings + + # @return [BuildType] the build type for this target. + # + attr_reader :build_type + private :build_type + + # @return [Boolean] whether the target can be linked to app extensions only. + # + attr_reader :application_extension_api_only + + # @return [Boolean] whether the target must be compiled with Swift's library + # evolution support, necessary for XCFrameworks. + # + attr_reader :build_library_for_distribution + + # Initialize a new target + # + # @param [Sandbox] sandbox @see #sandbox + # @param [BuildType] build_type @see #build_type + # @param [Hash{String=>Symbol}] user_build_configurations @see #user_build_configurations + # @param [Array] archs @see #archs + # @param [Platform] platform @see #platform + # + def initialize(sandbox, build_type, user_build_configurations, archs, platform) + @sandbox = sandbox + @user_build_configurations = user_build_configurations + @archs = archs + @platform = platform + @build_type = build_type + + @application_extension_api_only = false + @build_library_for_distribution = false + @build_settings = create_build_settings + end + + # @return [String] the name of the library. + # + def name + label + end + + alias to_s name + + # @return [String] the label for the target. + # + def label + DEFAULT_NAME + end + + # @return [String] The version associated with this target + # + def version + DEFAULT_VERSION + end + + # @return [Boolean] Whether the target uses Swift code + # + def uses_swift? + false + end + + # @return [Boolean] whether the target is built dynamically + # + def build_as_dynamic? + build_type.dynamic? + end + + # @return [Boolean] whether the target is built as a dynamic framework + # + def build_as_dynamic_framework? + build_type.dynamic_framework? + end + + # @return [Boolean] whether the target is built as a dynamic library + # + def build_as_dynamic_library? + build_type.dynamic_library? + end + + # @return [Boolean] whether the target is built as a framework + # + def build_as_framework? + build_type.framework? + end + + # @return [Boolean] whether the target is built as a library + # + def build_as_library? + build_type.library? + end + + # @return [Boolean] whether the target is built statically + # + def build_as_static? + build_type.static? + end + + # @return [Boolean] whether the target is built as a static framework + # + def build_as_static_framework? + build_type.static_framework? + end + + # @return [Boolean] whether the target is built as a static library + # + def build_as_static_library? + build_type.static_library? + end + + # @deprecated Prefer {build_as_static_framework?}. + # + # @return [Boolean] Whether the target should build a static framework. + # + def static_framework? + build_as_static_framework? + end + + # @return [String] the name to use for the source code module constructed + # for this target, and which will be used to import the module in + # implementation source files. + # + def product_module_name + c99ext_identifier(label) + end + + # @return [String] the name of the product. + # + def product_name + if build_as_framework? + framework_name + else + static_library_name + end + end + + # @return [String] the name of the product excluding the file extension or + # a product type specific prefix, depends on #requires_frameworks? + # and #product_module_name or #label. + # + def product_basename + if build_as_framework? + product_module_name + else + label + end + end + + # @return [String] the name of the framework, depends on #label. + # + # @note This may not depend on #requires_frameworks? indirectly as it is + # used for migration. + # + def framework_name + "#{product_module_name}.framework" + end + + # @return [String] the name of the library, depends on #label. + # + # @note This may not depend on #requires_frameworks? indirectly as it is + # used for migration. + # + def static_library_name + "lib#{label}.a" + end + + # @return [Symbol] either :framework or :static_library, depends on + # #build_as_framework?. + # + def product_type + build_as_framework? ? :framework : :static_library + end + + # @return [String] A string suitable for debugging. + # + def inspect + "#<#{self.class} name=#{name}>" + end + + #-------------------------------------------------------------------------# + + # @!group Framework support + + # @deprecated Prefer {build_as_framework?}. + # + # @return [Boolean] whether the generated target needs to be implemented + # as a framework + # + def requires_frameworks? + build_as_framework? + end + + #-------------------------------------------------------------------------# + + # @!group Support files + + # @return [Pathname] the folder where to store the support files of this + # library. + # + def support_files_dir + sandbox.target_support_files_dir(name) + end + + # @param [String] variant + # The variant of the xcconfig. Used to differentiate build + # configurations. + # + # @return [Pathname] the absolute path of the xcconfig file. + # + def xcconfig_path(variant = nil) + if variant + support_files_dir + "#{label}.#{variant.to_s.gsub(File::SEPARATOR, '-').downcase}.xcconfig" + else + support_files_dir + "#{label}.xcconfig" + end + end + + # @return [Pathname] the absolute path of the header file which contains + # the exported foundation constants with framework version + # information and all headers, which should been exported in the + # module map. + # + def umbrella_header_path + module_map_path.parent + "#{label}-umbrella.h" + end + + def umbrella_header_path_to_write + module_map_path_to_write.parent + "#{label}-umbrella.h" + end + + # @return [Pathname] the absolute path of the LLVM module map file that + # defines the module structure for the compiler. + # + def module_map_path + module_map_path_to_write + end + + # @!private + # + # @return [Pathname] the absolute path of the module map file that + # CocoaPods writes. This can be different from `module_map_path` + # if the module map gets symlinked. + # + def module_map_path_to_write + basename = "#{label}.modulemap" + support_files_dir + basename + end + + # @return [Pathname] the absolute path of the bridge support file. + # + def bridge_support_path + support_files_dir + "#{label}.bridgesupport" + end + + # @return [Pathname] the absolute path of the Info.plist file. + # + def info_plist_path + support_files_dir + "#{label}-Info.plist" + end + + # @return [Hash] additional entries for the generated Info.plist + # + def info_plist_entries + {} + end + + # @return [Pathname] the path of the dummy source generated by CocoaPods + # + def dummy_source_path + support_files_dir + "#{label}-dummy.m" + end + + # Mark the target as extension-only. + # Translates to APPLICATION_EXTENSION_API_ONLY = YES in the build settings. + # + def mark_application_extension_api_only + @application_extension_api_only = true + end + + # Compiles the target with Swift's library evolution support, necessary to + # build XCFrameworks. + # Translates to BUILD_LIBRARY_FOR_DISTRIBUTION = YES in the build settings. + # + def mark_build_library_for_distribution + @build_library_for_distribution = true + end + + # @return [Pathname] The absolute path of the prepare artifacts script. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_path + support_files_dir + "#{label}-artifacts.sh" + end + + # Returns an extension in the target that corresponds to the + # resource's input extension. + # + # @param [String] input_extension + # The input extension to map to. + # + # @return [String] The output extension. + # + def self.output_extension_for_resource(input_extension) + case input_extension + when '.storyboard' then '.storyboardc' + when '.xib' then '.nib' + when '.xcdatamodel' then '.mom' + when '.xcdatamodeld' then '.momd' + when '.xcmappingmodel' then '.cdm' + when '.xcassets' then '.car' + else input_extension + end + end + + def self.resource_extension_compilable?(input_extension) + output_extension_for_resource(input_extension) != input_extension && input_extension != '.xcassets' + end + + #-------------------------------------------------------------------------# + + private + + # Transforms the given string into a valid +identifier+ after C99ext + # standard, so that it can be used in source code where escaping of + # ambiguous characters is not applicable. + # + # @param [String] name + # any name, which may contain leading numbers, spaces or invalid + # characters. + # + # @return [String] + # + def c99ext_identifier(name) + name.gsub(/^([0-9])/, '_\1').gsub(/[^a-zA-Z0-9_]/, '_') + end + + def create_build_settings + BuildSettings.new(self) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/aggregate_target.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/aggregate_target.rb new file mode 100644 index 0000000..081b642 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/aggregate_target.rb @@ -0,0 +1,558 @@ +require 'cocoapods/xcode/framework_paths' +require 'cocoapods/xcode/xcframework' + +module Pod + # Stores the information relative to the target used to cluster the targets + # of the single Pods. The client targets will then depend on this one. + # + class AggregateTarget < Target + # Product types where the product's frameworks must be embedded in a host target + # + EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES = [:app_extension, :framework, :static_library, :messages_extension, + :watch_extension, :xpc_service].freeze + + # @return [TargetDefinition] the target definition of the Podfile that + # generated this target. + # + attr_reader :target_definition + + # @return [Pathname] the folder where the client is stored used for + # computing the relative paths. If integrating it should be the + # folder where the user project is stored, otherwise it should + # be the installation root. + # + attr_reader :client_root + + # @return [Xcodeproj::Project] the user project that this target will + # integrate as identified by the analyzer. + # + attr_reader :user_project + + # @return [Array] the list of the UUIDs of the user targets that + # will be integrated by this target as identified by the analyzer. + # + # @note The target instances are not stored to prevent editing different + # instances. + # + attr_reader :user_target_uuids + + # @return [Hash] Map from configuration name to + # configuration file for the target + # + # @note The configurations are generated by the {TargetInstaller} and + # used by {UserProjectIntegrator} to check for any overridden + # values. + # + attr_reader :xcconfigs + + # @return [Array] The dependencies for this target. + # + attr_reader :pod_targets + + # @return [Array] The aggregate targets whose pods this + # target must be able to import, but will not directly link against. + # + attr_reader :search_paths_aggregate_targets + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see Target#sandbox + # @param [BuildType] build_type @see Target#build_type + # @param [Hash{String=>Symbol}] user_build_configurations @see Target#user_build_configurations + # @param [Array] archs @see Target#archs + # @param [Platform] platform @see #Target#platform + # @param [TargetDefinition] target_definition @see #target_definition + # @param [Pathname] client_root @see #client_root + # @param [Xcodeproj::Project] user_project @see #user_project + # @param [Array] user_target_uuids @see #user_target_uuids + # @param [Hash{String=>Array}] pod_targets_for_build_configuration @see #pod_targets_for_build_configuration + # + def initialize(sandbox, build_type, user_build_configurations, archs, platform, target_definition, client_root, + user_project, user_target_uuids, pod_targets_for_build_configuration) + super(sandbox, build_type, user_build_configurations, archs, platform) + raise "Can't initialize an AggregateTarget without a TargetDefinition!" if target_definition.nil? + raise "Can't initialize an AggregateTarget with an abstract TargetDefinition!" if target_definition.abstract? + @target_definition = target_definition + @client_root = client_root + @user_project = user_project + @user_target_uuids = user_target_uuids + @pod_targets_for_build_configuration = pod_targets_for_build_configuration + @pod_targets = pod_targets_for_build_configuration.values.flatten.uniq + @search_paths_aggregate_targets = [] + @xcconfigs = {} + end + + # Merges this aggregate target with additional pod targets that are part of embedded aggregate targets. + # + # @param [Hash{String=>Array}] embedded_pod_targets_for_build_configuration + # The pod targets to merge with. + # + # @return [AggregateTarget] a new instance of this aggregate target with additional pod targets to be used from + # pod targets of embedded aggregate targets. + # + def merge_embedded_pod_targets(embedded_pod_targets_for_build_configuration) + merged = @pod_targets_for_build_configuration.merge(embedded_pod_targets_for_build_configuration) do |_, before, after| + (before + after).uniq + end + AggregateTarget.new(sandbox, build_type, user_build_configurations, archs, platform, + target_definition, client_root, user_project, user_target_uuids, merged).tap do |aggregate_target| + aggregate_target.search_paths_aggregate_targets.concat(search_paths_aggregate_targets).freeze + aggregate_target.mark_application_extension_api_only if application_extension_api_only + aggregate_target.mark_build_library_for_distribution if build_library_for_distribution + end + end + + def build_settings(configuration_name = nil) + if configuration_name + @build_settings[configuration_name] || + raise(ArgumentError, "#{self} does not contain a build setting for the #{configuration_name.inspect} configuration, only #{@build_settings.keys.inspect}") + else + @build_settings.each_value.first || + raise(ArgumentError, "#{self} does not contain any build settings") + end + end + + # @return [Boolean] True if the user_target refers to a + # library (framework, static or dynamic lib). + # + def library? + # Without a user_project, we can't say for sure + # that this is a library + return false if user_project.nil? + symbol_types = user_targets.map(&:symbol_type).uniq + unless symbol_types.count == 1 + raise ArgumentError, "Expected single kind of user_target for #{name}. Found #{symbol_types.join(', ')}." + end + [:framework, :dynamic_library, :static_library].include? symbol_types.first + end + + # @return [Boolean] True if the user_target's pods are + # for an extension and must be embedded in a host, + # target, otherwise false. + # + def requires_host_target? + # If we don't have a user_project, then we can't + # glean any info about how this target is going to + # be integrated, so return false since we can't know + # for sure that this target refers to an extension + # target that would require a host target + return false if user_project.nil? + symbol_types = user_targets.map(&:symbol_type).uniq + unless symbol_types.count == 1 + raise ArgumentError, "Expected single kind of user_target for #{name}. Found #{symbol_types.join(', ')}." + end + EMBED_FRAMEWORKS_IN_HOST_TARGET_TYPES.include?(symbol_types[0]) + end + + # @return [String] the label for the target. + # + def label + target_definition.label.to_s + end + + # @return [Podfile] The podfile which declares the dependency + # + def podfile + target_definition.podfile + end + + # @return [Pathname] the path of the user project that this target will + # integrate as identified by the analyzer. + # + def user_project_path + user_project.path if user_project + end + + # List all user targets that will be integrated by this #target. + # + # @return [Array] + # + def user_targets + return [] unless user_project + user_target_uuids.map do |uuid| + native_target = user_project.objects_by_uuid[uuid] + unless native_target + raise Informative, '[Bug] Unable to find the target with ' \ + "the `#{uuid}` UUID for the `#{self}` integration library" + end + native_target + end + end + + # @param [String] build_configuration The build configuration for which the + # the pod targets should be returned. + # + # @return [Array] the pod targets for the given build + # configuration. + # + def pod_targets_for_build_configuration(build_configuration) + @pod_targets_for_build_configuration[build_configuration] || [] + end + + # @return [Array] The specifications used by this aggregate target. + # + def specs + pod_targets.flat_map(&:specs) + end + + # @return [Hash{Symbol => Array}] The pod targets for each + # build configuration. + # + def specs_by_build_configuration + result = {} + user_build_configurations.each_key do |build_configuration| + result[build_configuration] = pod_targets_for_build_configuration(build_configuration). + flat_map(&:specs) + end + result + end + + # @return [Array] The consumers of the Pod. + # + def spec_consumers + specs.map { |spec| spec.consumer(platform) } + end + + # @return [Boolean] Whether the target uses Swift code + # + def uses_swift? + pod_targets.any?(&:uses_swift?) + end + + # @return [Boolean] Whether the target contains any resources + # + def includes_resources? + !resource_paths_by_config.each_value.all?(&:empty?) + end + + # @return [Boolean] Whether the target contains any on demand resources + # + def includes_on_demand_resources? + !on_demand_resources.empty? + end + + # @return [Boolean] Whether the target contains frameworks to be embedded into + # the user target + # + def includes_frameworks? + !framework_paths_by_config.each_value.all?(&:empty?) + end + + # @return [Boolean] Whether the target contains xcframeworks to be embedded into + # the user target + # + def includes_xcframeworks? + !xcframeworks_by_config.each_value.all?(&:empty?) + end + + # @return [Hash{String => Array}] The vendored dynamic artifacts and framework target + # input and output paths grouped by config + # + def framework_paths_by_config + @framework_paths_by_config ||= begin + framework_paths_by_config = {} + user_build_configurations.each_key do |config| + relevant_pod_targets = pod_targets_for_build_configuration(config) + framework_paths_by_config[config] = relevant_pod_targets.flat_map do |pod_target| + library_specs = pod_target.library_specs.map(&:name) + pod_target.framework_paths.values_at(*library_specs).flatten.compact.uniq + end + end + framework_paths_by_config + end + end + + # @return [Hash{String => Array}] The vendored dynamic artifacts and framework target + # input and output paths grouped by config + # + def xcframeworks_by_config + @xcframeworks_by_config ||= begin + xcframeworks_by_config = {} + user_build_configurations.each_key do |config| + relevant_pod_targets = pod_targets_for_build_configuration(config) + xcframeworks_by_config[config] = relevant_pod_targets.flat_map do |pod_target| + library_specs = pod_target.library_specs.map(&:name) + pod_target.xcframeworks.values_at(*library_specs).flatten.compact.uniq + end + end + xcframeworks_by_config + end + end + + # @return [Array] Uniqued On Demand Resources for this target. + # + # @note On Demand Resources are not separated by config as they are integrated directly into the users target via + # the resources build phase. + # + def on_demand_resources + @on_demand_resources ||= begin + pod_targets.flat_map do |pod_target| + library_file_accessors = pod_target.file_accessors.select { |fa| fa.spec.library_specification? } + library_file_accessors.flat_map(&:on_demand_resources_files) + end.uniq + end + end + + # @return [Hash{String => Array}] Uniqued Resources grouped by config + # + def resource_paths_by_config + @resource_paths_by_config ||= begin + relevant_pod_targets = pod_targets.reject do |pod_target| + pod_target.should_build? && pod_target.build_as_dynamic_framework? + end + user_build_configurations.each_key.each_with_object({}) do |config, resources_by_config| + targets = relevant_pod_targets & pod_targets_for_build_configuration(config) + resources_by_config[config] = targets.flat_map do |pod_target| + library_specs = pod_target.library_specs.map(&:name) + resource_paths = pod_target.resource_paths.values_at(*library_specs).flatten + + if pod_target.build_as_static_framework? + built_product_dir = Pathname.new(pod_target.build_product_path('${BUILT_PRODUCTS_DIR}')) + resource_paths = resource_paths.map do |resource_path| + extname = File.extname(resource_path) + if self.class.resource_extension_compilable?(extname) + output_extname = self.class.output_extension_for_resource(extname) + output_path_components = Pathname(resource_path).each_filename.select { |component| File.extname(component) == '.lproj' } + output_path_components << File.basename(resource_path) + built_product_dir.join(*output_path_components).sub_ext(output_extname).to_s + else + resource_path + end + end + end + + resource_paths << bridge_support_file + resource_paths.compact.uniq + end + end + end + end + + # @return [Pathname] the path of the bridge support file relative to the + # sandbox or `nil` if bridge support is disabled. + # + def bridge_support_file + bridge_support_path.relative_path_from(sandbox.root) if podfile.generate_bridge_support? + end + + #-------------------------------------------------------------------------# + + # @!group Support files + + # @return [Pathname] The absolute path of acknowledgements file. + # + # @note The acknowledgements generators add the extension according to + # the file type. + # + def acknowledgements_basepath + support_files_dir + "#{label}-acknowledgements" + end + + # @return [Pathname] The absolute path of the copy resources script. + # + def copy_resources_script_path + support_files_dir + "#{label}-resources.sh" + end + + # @return [Pathname] The absolute path of the embed frameworks script. + # + def embed_frameworks_script_path + support_files_dir + "#{label}-frameworks.sh" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the copy resources script input file list. + # + def copy_resources_script_input_files_path(configuration) + support_files_dir + "#{label}-resources-#{configuration}-input-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the copy resources script output file list. + # + def copy_resources_script_output_files_path(configuration) + support_files_dir + "#{label}-resources-#{configuration}-output-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script input file list. + # + def embed_frameworks_script_input_files_path(configuration) + support_files_dir + "#{label}-frameworks-#{configuration}-input-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script output file list. + # + def embed_frameworks_script_output_files_path(configuration) + support_files_dir + "#{label}-frameworks-#{configuration}-output-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script input file list. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_input_files_path(configuration) + support_files_dir + "#{label}-artifacts-#{configuration}-input-files.xcfilelist" + end + + # @param [String] configuration the configuration this path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script output file list. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_output_files_path(configuration) + support_files_dir + "#{label}-artifacts-#{configuration}-output-files.xcfilelist" + end + + # @return [String] The output file path fo the check manifest lock script. + # + def check_manifest_lock_script_output_file_path + "$(DERIVED_FILE_DIR)/#{label}-checkManifestLockResult.txt" + end + + # @return [Pathname] The relative path of the Pods directory from user project's directory. + # + def relative_pods_root_path + sandbox.root.relative_path_from(client_root) + end + + # @return [String] The xcconfig path of the root from the `$(SRCROOT)` + # variable of the user's project. + # + def relative_pods_root + "${SRCROOT}/#{relative_pods_root_path}" + end + + # @return [String] The path of the Podfile directory relative to the + # root of the user project. + # + def podfile_dir_relative_path + podfile_path = target_definition.podfile.defined_in_file + return "${SRCROOT}/#{podfile_path.relative_path_from(client_root).dirname}" unless podfile_path.nil? + # Fallback to the standard path if the Podfile is not represented by a file. + '${PODS_ROOT}/..' + end + + # @param [String] config_name The build configuration name to get the xcconfig for + # @return [String] The path of the xcconfig file relative to the root of + # the user project. + # + def xcconfig_relative_path(config_name) + xcconfig_path(config_name).relative_path_from(client_root).to_s + end + + # @return [String] The path of the copy resources script relative to the + # root of the Pods project. + # + def copy_resources_script_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(copy_resources_script_path)}" + end + + # @return [String] The path of the copy resources script input file list + # relative to the root of the Pods project. + # + def copy_resources_script_input_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(copy_resources_script_input_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the copy resources script output file list + # relative to the root of the Pods project. + # + def copy_resources_script_output_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(copy_resources_script_output_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the embed frameworks relative to the + # root of the Pods project. + # + def embed_frameworks_script_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(embed_frameworks_script_path)}" + end + + # @return [String] The path of the embed frameworks script input file list + # relative to the root of the Pods project. + # + def embed_frameworks_script_input_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(embed_frameworks_script_input_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the embed frameworks script output file list + # relative to the root of the Pods project. + # + def embed_frameworks_script_output_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(embed_frameworks_script_output_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the prepare artifacts script relative to the + # root of the Pods project. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(prepare_artifacts_script_path)}" + end + + # @return [String] The path of the prepare artifacts script input file list + # relative to the root of the Pods project. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_input_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(prepare_artifacts_script_input_files_path('${CONFIGURATION}'))}" + end + + # @return [String] The path of the prepare artifacts script output file list + # relative to the root of the Pods project. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_output_files_relative_path + "${PODS_ROOT}/#{relative_to_pods_root(prepare_artifacts_script_output_files_path('${CONFIGURATION}'))}" + end + + private + + # @!group Private Helpers + #-------------------------------------------------------------------------# + + # Computes the relative path of a sandboxed file from the `$(PODS_ROOT)` + # variable of the Pods's project. + # + # @param [Pathname] path + # A relative path from the root of the sandbox. + # + # @return [String] The computed path. + # + def relative_to_pods_root(path) + path.relative_path_from(sandbox.root).to_s + end + + def create_build_settings + settings = {} + + user_build_configurations.each do |configuration_name, configuration| + settings[configuration_name] = BuildSettings::AggregateTargetSettings.new(self, configuration_name, :configuration => configuration) + end + + settings + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/build_settings.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/build_settings.rb new file mode 100644 index 0000000..0513479 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/build_settings.rb @@ -0,0 +1,1390 @@ +# frozen_string_literal: true + +module Pod + class Target + # @since 1.5.0 + class BuildSettings + #-------------------------------------------------------------------------# + + # @!group Constants + + # @return [Set] + # The build settings that should be treated as arrays, rather than strings. + # + PLURAL_SETTINGS = %w( + ALTERNATE_PERMISSIONS_FILES + ARCHS + BUILD_VARIANTS + EXCLUDED_SOURCE_FILE_NAMES + FRAMEWORK_SEARCH_PATHS + GCC_PREPROCESSOR_DEFINITIONS + GCC_PREPROCESSOR_DEFINITIONS_NOT_USED_IN_PRECOMPS + HEADER_SEARCH_PATHS + INCLUDED_SOURCE_FILE_NAMES + INFOPLIST_PREPROCESSOR_DEFINITIONS + LD_RUNPATH_SEARCH_PATHS + LIBRARY_SEARCH_PATHS + LOCALIZED_STRING_MACRO_NAMES + OTHER_CFLAGS + OTHER_CPLUSPLUSFLAGS + OTHER_LDFLAGS + OTHER_SWIFT_FLAGS + REZ_SEARCH_PATHS + SECTORDER_FLAGS + SWIFT_ACTIVE_COMPILATION_CONDITIONS + SWIFT_INCLUDE_PATHS + SYSTEM_FRAMEWORK_SEARCH_PATHS + SYSTEM_HEADER_SEARCH_PATHS + USER_HEADER_SEARCH_PATHS + WARNING_CFLAGS + WARNING_LDFLAGS + ).to_set.freeze + + # @return [String] + # The variable for the configuration build directory used when building pod targets. + # + CONFIGURATION_BUILD_DIR_VARIABLE = '${PODS_CONFIGURATION_BUILD_DIR}' + + # @return [String] + # The variable for the configuration intermediate frameworks directory used for building pod targets + # that contain vendored xcframeworks. + # + XCFRAMEWORKS_BUILD_DIR_VARIABLE = '${PODS_XCFRAMEWORKS_BUILD_DIR}' + + #-------------------------------------------------------------------------# + + # @!group DSL + + # Creates a method that calculates a part of the build settings for the {#target}. + # + # @!visibility private + # + # @param [Symbol,String] method_name + # The name of the method to define + # + # @param [Boolean] build_setting + # Whether the method name should be added (upcased) to {.build_setting_names} + # + # @param [Boolean] memoized + # Whether the method should be memoized + # + # @param [Boolean] sorted + # Whether the return value should be sorted + # + # @param [Boolean] uniqued + # Whether the return value should be uniqued + # + # @param [Boolean] compacted + # Whether the return value should be compacted + # + # @param [Boolean] frozen + # Whether the return value should be frozen + # + # @param [Boolean, Symbol] from_search_paths_aggregate_targets + # If truthy, the method from {Aggregate} that should be used to concatenate build settings from + # {::Pod::AggregateTarget#search_paths_aggregate_target} + # + # @param [Symbol] from_pod_targets_to_link + # If truthy, the `_to_import` values from `BuildSettings#pod_targets_to_link` will be concatenated + # + # @param [Block] implementation + # + # @macro [attach] define_build_settings_method + # @!method $1 + # + # The `$1` build setting for the {#target}. + # + # The return value from this method will be: `${1--1}`. + # + def self.define_build_settings_method(method_name, build_setting: false, + memoized: false, sorted: false, uniqued: false, compacted: false, frozen: true, + from_search_paths_aggregate_targets: false, from_pod_targets_to_link: false, + &implementation) + + memoized_key = "#{self}##{method_name}" + + (@build_settings_names ||= Set.new) << method_name.to_s.upcase if build_setting + + raw_method_name = :"_raw_#{method_name}" + define_method(raw_method_name, &implementation) + private(raw_method_name) + + dup_before_freeze = frozen && (from_pod_targets_to_link || from_search_paths_aggregate_targets || uniqued || sorted) + + define_method(method_name) do + if memoized + retval = @__memoized.fetch(memoized_key, :not_found) + return retval if :not_found != retval + end + + retval = send(raw_method_name) + if retval.nil? + @__memoized[memoized_key] = retval if memoized + return + end + + retval = retval.dup if dup_before_freeze && retval.frozen? + + retval.concat(pod_targets_to_link.flat_map { |pod_target| pod_target.build_settings_for_spec(pod_target.root_spec, :configuration => configuration_name).public_send("#{method_name}_to_import") }) if from_pod_targets_to_link + retval.concat(search_paths_aggregate_target_pod_target_build_settings.flat_map(&from_search_paths_aggregate_targets)) if from_search_paths_aggregate_targets + + retval.compact! if compacted + retval.uniq! if uniqued + retval.sort! if sorted + retval.freeze if frozen + + @__memoized[memoized_key] = retval if memoized + + retval + end + end + private_class_method :define_build_settings_method + + # @param [XCFramework] xcframework the xcframework slice that will be copied to the intermediates dir + # + # @return [String] the path to the directory containing the xcframework slice + # + def self.xcframework_intermediate_dir(xcframework) + "#{XCFRAMEWORKS_BUILD_DIR_VARIABLE}/#{xcframework.target_name}" + end + + class << self + #-------------------------------------------------------------------------# + + # @!group Public API + + # @return [Set] a set of all the build settings names that will + # be present in the #xcconfig + # + attr_reader :build_settings_names + end + + #-------------------------------------------------------------------------# + + # @!group Public API + + # @return [Target] + # The target this build settings object is generating build settings for + # + attr_reader :target + + # Initialize a new instance + # + # @param [Target] target + # see {#target} + # + def initialize(target) + @target = target + @__memoized = {} + end + + def initialize_copy(other) + super + @__memoized = {} + end + + # @return [Xcodeproj::Config] + define_build_settings_method :xcconfig, :memoized => true do + settings = add_inherited_to_plural(to_h) + Xcodeproj::Config.new(settings) + end + + alias generate xcconfig + + # Saves the generated xcconfig to the given path + # + # @return [Xcodeproj::Config] + # + # @see #xcconfig + # + # @param [String,Pathname] path + # The path the xcconfig will be saved to + # + def save_as(path) + xcconfig.save_as(path) + end + + #-------------------------------------------------------------------------# + + # @!group Build System + + # @return [String] + define_build_settings_method :pods_build_dir, :build_setting => true do + '${BUILD_DIR}' + end + + # @return [String] + define_build_settings_method :pods_configuration_build_dir, :build_setting => true do + '${PODS_BUILD_DIR}/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)' + end + + define_build_settings_method :pods_xcframeworks_build_dir, :build_setting => true do + '$(PODS_CONFIGURATION_BUILD_DIR)/XCFrameworkIntermediates' + end + + # @return [String] + define_build_settings_method :use_recursive_script_inputs_in_script_phases, :build_setting => true do + 'YES' + end + + #-------------------------------------------------------------------------# + + # @!group Code Signing + + # @return [String] + define_build_settings_method :code_sign_identity, :build_setting => true do + return unless target.build_as_dynamic? + return unless target.platform.to_sym == :osx + '' + end + + #-------------------------------------------------------------------------# + + # @!group Frameworks + + # @return [Array] + define_build_settings_method :frameworks do + [] + end + + # @return [Array] + define_build_settings_method :weak_frameworks do + [] + end + + # @return [Array] + define_build_settings_method :framework_search_paths, :build_setting => true, :memoized => true do + framework_search_paths_to_import_developer_frameworks(frameworks + weak_frameworks) + end + + # @param [Array] frameworks + # The list of framework names + # + # @return [Array] + # the `FRAMEWORK_SEARCH_PATHS` needed to import developer frameworks + def framework_search_paths_to_import_developer_frameworks(frameworks) + if frameworks.include?('XCTest') || frameworks.include?('SenTestingKit') + %w[ $(PLATFORM_DIR)/Developer/Library/Frameworks ] + else + [] + end + end + + #-------------------------------------------------------------------------# + + # @!group Libraries + + # @return [Array] + define_build_settings_method :libraries do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Clang + + # @return [Array] + define_build_settings_method :gcc_preprocessor_definitions, :build_setting => true do + %w( COCOAPODS=1 ) + end + + # @return [Array] + define_build_settings_method :other_cflags, :build_setting => true, :memoized => true do + module_map_files.map { |f| "-fmodule-map-file=#{f}" } + end + + # @return [Array] + define_build_settings_method :module_map_files do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Swift + + # @return [Boolean] + # Whether `OTHER_SWIFT_FLAGS` should be generated when the target + # does not use swift. + # + def other_swift_flags_without_swift? + false + end + + # @return [Array] + define_build_settings_method :other_swift_flags, :build_setting => true, :memoized => true do + return unless target.uses_swift? || other_swift_flags_without_swift? + flags = %w(-D COCOAPODS) + flags.concat module_map_files.flat_map { |f| ['-Xcc', "-fmodule-map-file=#{f}"] } + flags + end + + #-------------------------------------------------------------------------# + + # @!group Linking + + # @return [Boolean] + define_build_settings_method :requires_objc_linker_flag? do + false + end + + # @return [Boolean] + define_build_settings_method :requires_fobjc_arc? do + false + end + + # Xcode 12 turns on this warning by default which is problematic for CocoaPods-generated + # imports which use double-quoted paths. + # @return [Boolean] + define_build_settings_method :clang_warn_quoted_include_in_framework_header, :build_setting => true do + 'NO' + end + + # @return [Array] + # the `LD_RUNPATH_SEARCH_PATHS` needed for dynamically linking the {#target} + # + # @param [Boolean] requires_host_target + # + # @param [Boolean] test_bundle + # + def _ld_runpath_search_paths(requires_host_target: false, test_bundle: false, uses_swift: false) + paths = [] + if uses_swift + paths << '/usr/lib/swift' + paths << '$(PLATFORM_DIR)/Developer/Library/Frameworks' if test_bundle + end + if target.platform.symbolic_name == :osx + paths << "'@executable_path/../Frameworks'" + paths << if test_bundle + "'@loader_path/../Frameworks'" + else + "'@loader_path/Frameworks'" + end + paths << '${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}' if uses_swift + else + paths << "'@executable_path/Frameworks'" + paths << "'@loader_path/Frameworks'" + paths << "'@executable_path/../../Frameworks'" if requires_host_target + end + paths + end + private :_ld_runpath_search_paths + + # @return [Array] + define_build_settings_method :other_ldflags, :build_setting => true, :memoized => true do + ld_flags = [] + ld_flags << '-ObjC' if requires_objc_linker_flag? + if requires_fobjc_arc? + ld_flags << '-fobjc-arc' + end + libraries.each { |l| ld_flags << %(-l"#{l}") } + frameworks.each { |f| ld_flags << '-framework' << %("#{f}") } + weak_frameworks.each { |f| ld_flags << '-weak_framework' << %("#{f}") } + ld_flags + end + + #-------------------------------------------------------------------------# + + # @!group Private Methods + + private + + # @return [Hash String|Array>] + def to_h + hash = {} + self.class.build_settings_names.sort.each do |setting| + hash[setting] = public_send(setting.downcase) + end + hash + end + + # @return [Hash String>] + def add_inherited_to_plural(hash) + Hash[hash.map do |key, value| + next [key, '$(inherited)'] if value.nil? + if PLURAL_SETTINGS.include?(key) + raise ArgumentError, "#{key} is a plural setting, cannot have #{value.inspect} as its value" unless value.is_a? Array + + value = "$(inherited) #{quote_array(value)}" + else + raise ArgumentError, "#{key} is not a plural setting, cannot have #{value.inspect} as its value" unless value.is_a? String + end + + [key, value] + end] + end + + # @return [Array] + # + # @param [Array] array + # + def quote_array(array) + array.map do |element| + case element + when /\A([\w-]+?)=(.+)\z/ + key = Regexp.last_match(1) + value = Regexp.last_match(2) + value = %("#{value}") if value =~ /[^\w\d]/ + %(#{key}=#{value}) + when /[\$\[\]\ ]/ + %("#{element}") + else + element + end + end.join(' ') + end + + # @param [Hash] xcconfig_values_by_consumer_by_key + # + # @param [#to_s] attribute + # The name of the attribute being merged + # + # @return [Hash] + # + def merged_xcconfigs(xcconfig_values_by_consumer_by_key, attribute, overriding: {}) + xcconfig_values_by_consumer_by_key.each_with_object(overriding.dup) do |(key, values_by_consumer), xcconfig| + uniq_values = values_by_consumer.values.uniq + values_are_bools = uniq_values.all? { |v| v.is_a?(String) && v =~ /\A(yes|no)\z/i } + if values_are_bools + # Boolean build settings + if uniq_values.count > 1 + UI.warn "Can't merge #{attribute} for pod targets: " \ + "#{values_by_consumer.keys.map(&:name)}. Boolean build " \ + "setting #{key} has different values." + else + xcconfig[key] = uniq_values.first + end + elsif PLURAL_SETTINGS.include? key + # Plural build settings + if xcconfig.key?(key) + overridden = xcconfig[key] + uniq_values.prepend(overridden) + end + xcconfig[key] = uniq_values.uniq.join(' ') + elsif uniq_values.count > 1 + # Singular build settings + UI.warn "Can't merge #{attribute} for pod targets: " \ + "#{values_by_consumer.keys.map(&:name)}. Singular build " \ + "setting #{key} has different values." + else + xcconfig[key] = uniq_values.first + end + end + end + + # Merges the spec-defined xcconfig into the derived xcconfig, + # overriding any singular settings and merging plural settings. + # + # @param [Hash] spec_xcconfig_hash the merged xcconfig defined in the spec. + # + # @param [Xcodeproj::Config] xcconfig the config to merge into. + # + # @return [Xcodeproj::Config] the merged config. + # + def merge_spec_xcconfig_into_xcconfig(spec_xcconfig_hash, xcconfig) + plural_configs, singlular_configs = spec_xcconfig_hash.partition { |k, _v| PLURAL_SETTINGS.include?(k) }.map { |a| Hash[a] } + xcconfig.attributes.merge!(singlular_configs) + xcconfig.merge!(plural_configs) + xcconfig + end + + # Filters out pod targets whose `specs` are a subset of + # another target's. + # + # @param [Array] pod_targets + # + # @return [Array] + # + def select_maximal_pod_targets(pod_targets) + subset_targets = [] + pod_targets.uniq.group_by(&:pod_name).each do |_pod_name, targets| + targets.combination(2) do |a, b| + if (a.specs - b.specs).empty? + subset_targets << a + elsif (b.specs - a.specs).empty? + subset_targets << b + end + end + end + pod_targets - subset_targets + end + + # @param [String] target_name the name of the target this xcframework belongs to + # + # @param [Pathname,String] path the path to the xcframework bundle + # + # @return [Xcode::XCFramework] the xcframework at the given path + # + def load_xcframework(target_name, path) + Xcode::XCFramework.new(target_name, path) + end + + # A subclass that generates build settings for a {PodTarget} + class PodTargetSettings < BuildSettings + #-------------------------------------------------------------------------# + + # @!group Public API + + # @see BuildSettings.build_settings_names + def self.build_settings_names + @build_settings_names | BuildSettings.build_settings_names + end + + # @return [Boolean] + # whether settings are being generated for a test bundle + # + attr_reader :test_xcconfig + alias test_xcconfig? test_xcconfig + + # @return [Boolean] + # whether settings are being generated for an application bundle + # + attr_reader :app_xcconfig + alias app_xcconfig? app_xcconfig + + # @return [Boolean] + # whether settings are being generated for an library bundle + # + attr_reader :library_xcconfig + alias library_xcconfig? library_xcconfig + + def non_library_xcconfig? + !library_xcconfig? + end + + # @return [Specification] + # The non-library specification these build settings are for or `nil`. + # + attr_reader :non_library_spec + + # Initializes a new instance + # + # @param [PodTarget] target + # see {#target} + # + # @param [Specification] non_library_spec + # see {#non_library_spec} + # + # @param [Symbol] configuration + # see {#configuration} + # + def initialize(target, non_library_spec = nil, configuration: nil) + super(target) + if @non_library_spec = non_library_spec + @test_xcconfig = non_library_spec.test_specification? + @app_xcconfig = non_library_spec.app_specification? + @xcconfig_spec_type = non_library_spec.spec_type + @library_xcconfig = false + else + @test_xcconfig = @app_xcconfig = false + @xcconfig_spec_type = :library + @library_xcconfig = true + end + (@configuration = configuration) || raise("No configuration for #{self}.") + end + + # @return [Xcodeproj::Xconfig] + define_build_settings_method :xcconfig, :memoized => true do + xcconfig = super() + merge_spec_xcconfig_into_xcconfig(merged_pod_target_xcconfigs, xcconfig) + end + + #-------------------------------------------------------------------------# + + # @!group Paths + + # @return [String] + define_build_settings_method :pods_root, :build_setting => true do + '${SRCROOT}' + end + + # @return [String] + define_build_settings_method :pods_target_srcroot, :build_setting => true do + target.pod_target_srcroot + end + + # @return [String] + define_build_settings_method :pods_development_language, :build_setting => true do + '${DEVELOPMENT_LANGUAGE}' + end + + #-------------------------------------------------------------------------# + + # @!group Frameworks + + # @return [Array] + define_build_settings_method :consumer_frameworks, :memoized => true do + spec_consumers.flat_map(&:frameworks) + end + + # @return [Array] + define_build_settings_method :frameworks, :memoized => true, :sorted => true, :uniqued => true do + return [] if target.build_as_static? && library_xcconfig? + + frameworks = [] + frameworks.concat consumer_frameworks + if library_xcconfig? + # We know that this library target is being built dynamically based + # on the guard above, so include any vendored static frameworks and vendored xcframeworks. + if target.should_build? + frameworks.concat vendored_static_frameworks.map { |l| File.basename(l, '.framework') } + frameworks.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.static_framework? }. + map(&:name). + uniq + + # Include direct dynamic dependencies to the linker flags. We used to add those in the 'Link Binary With Libraries' + # phase but we no longer do since we cannot differentiate between debug or release configurations within + # that phase. + frameworks.concat target.dependent_targets_by_config[@configuration].flat_map { |pt| pt.build_settings[@configuration].dynamic_frameworks_to_import } + else + # Also include any vendored dynamic frameworks of dependencies. + frameworks.concat dependent_targets.reject(&:should_build?).flat_map { |pt| pt.build_settings[@configuration].dynamic_frameworks_to_import } + end + else + frameworks.concat dependent_targets_to_link.flat_map { |pt| pt.build_settings[@configuration].frameworks_to_import } + end + + frameworks + end + + # @return [Array] + define_build_settings_method :static_frameworks_to_import, :memoized => true do + static_frameworks_to_import = [] + static_frameworks_to_import.concat vendored_static_frameworks.map { |f| File.basename(f, '.framework') } unless target.should_build? && target.build_as_dynamic? + unless target.should_build? && target.build_as_dynamic? + static_frameworks_to_import.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.static_framework? }. + map(&:name). + uniq + end + static_frameworks_to_import << target.product_basename if target.should_build? && target.build_as_static_framework? + static_frameworks_to_import + end + + # @return [Array] + define_build_settings_method :dynamic_frameworks_to_import, :memoized => true do + dynamic_frameworks_to_import = vendored_dynamic_frameworks.map { |f| File.basename(f, '.framework') } + dynamic_frameworks_to_import.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.dynamic_framework? }. + map(&:name). + uniq + dynamic_frameworks_to_import << target.product_basename if target.should_build? && target.build_as_dynamic_framework? + dynamic_frameworks_to_import.concat consumer_frameworks + dynamic_frameworks_to_import + end + + # @return [Array] + define_build_settings_method :weak_frameworks, :memoized => true do + return [] if target.build_as_static? && library_xcconfig? + + weak_frameworks = spec_consumers.flat_map(&:weak_frameworks) + weak_frameworks.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].weak_frameworks_to_import } + weak_frameworks + end + + # @return [Array] + define_build_settings_method :frameworks_to_import, :memoized => true, :sorted => true, :uniqued => true do + static_frameworks_to_import + dynamic_frameworks_to_import + end + + # @return [Array] + define_build_settings_method :weak_frameworks_to_import, :memoized => true, :sorted => true, :uniqued => true do + spec_consumers.flat_map(&:weak_frameworks) + end + + # @return [Array] + define_build_settings_method :framework_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + paths = super().dup + paths.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].framework_search_paths_to_import } + paths.concat framework_search_paths_to_import + paths.delete(target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)) if library_xcconfig? + paths + end + + # @return [String] + define_build_settings_method :framework_header_search_path, :memoized => true do + return unless target.build_as_framework? + "#{target.build_product_path}/Headers" + end + + # @return [Array] + define_build_settings_method :vendored_framework_search_paths, :memoized => true do + search_paths = [] + search_paths.concat file_accessors. + flat_map(&:vendored_frameworks). + map { |f| File.join '${PODS_ROOT}', f.dirname.relative_path_from(target.sandbox.root) } + xcframework_intermediates = vendored_xcframeworks. + select { |xcf| xcf.build_type.framework? }. + map { |xcf| BuildSettings.xcframework_intermediate_dir(xcf) }. + uniq + search_paths.concat xcframework_intermediates + search_paths + end + + # @return [Array] + define_build_settings_method :framework_search_paths_to_import, :memoized => true do + paths = framework_search_paths_to_import_developer_frameworks(consumer_frameworks) + paths.concat vendored_framework_search_paths + return paths unless target.build_as_framework? && target.should_build? + + paths + [target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)] + end + + # @return [Array] + define_build_settings_method :vendored_static_frameworks, :memoized => true do + file_accessors.flat_map(&:vendored_static_frameworks) + end + + # @return [Array] + define_build_settings_method :vendored_dynamic_frameworks, :memoized => true do + file_accessors.flat_map(&:vendored_dynamic_frameworks) + end + + # @return [Array] + define_build_settings_method :vendored_xcframeworks, :memoized => true do + file_accessors.flat_map do |file_accessor| + file_accessor.vendored_xcframeworks.map { |path| load_xcframework(file_accessor.spec.name, path) } + end + end + + # @return [Array] + define_build_settings_method :system_framework_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + return ['$(PLATFORM_DIR)/Developer/Library/Frameworks'] if should_apply_xctunwrap_fix? + [] + end + + #-------------------------------------------------------------------------# + + # @!group Libraries + + # Converts array of library path references to just the names to use + # link each library, e.g. from '/path/to/libSomething.a' to 'Something' + # + # @param [Array] libraries + # + # @return [Array] + # + def linker_names_from_libraries(libraries) + libraries.map { |l| File.basename(l, File.extname(l)).sub(/\Alib/, '') } + end + + # @return [Array] + define_build_settings_method :libraries, :memoized => true, :sorted => true, :uniqued => true do + return [] if library_xcconfig? && target.build_as_static? + + libraries = [] + if non_library_xcconfig? || target.build_as_dynamic? + libraries.concat linker_names_from_libraries(vendored_static_libraries) + libraries.concat libraries_to_import + xcframework_libraries = vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + flat_map { |xcf| linker_names_from_libraries([xcf.slices.first.binary_path]) }. + uniq + libraries.concat xcframework_libraries + end + if non_library_xcconfig? + libraries.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].dynamic_libraries_to_import } + libraries.concat dependent_targets_to_link.flat_map { |pt| pt.build_settings[@configuration].static_libraries_to_import } + end + libraries + end + + # @return [Array] + define_build_settings_method :static_libraries_to_import, :memoized => true do + static_libraries_to_import = [] + unless target.should_build? && target.build_as_dynamic? + static_libraries_to_import.concat linker_names_from_libraries(vendored_static_libraries) + xcframework_libraries = vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + flat_map { |xcf| linker_names_from_libraries([xcf.slices.first.binary_path]) }. + uniq + static_libraries_to_import.concat linker_names_from_libraries(xcframework_libraries) + end + static_libraries_to_import << target.product_basename if target.should_build? && target.build_as_static_library? + static_libraries_to_import + end + + # @return [Array] + define_build_settings_method :dynamic_libraries_to_import, :memoized => true do + dynamic_libraries_to_import = linker_names_from_libraries(vendored_dynamic_libraries) + dynamic_libraries_to_import.concat spec_consumers.flat_map(&:libraries) + dynamic_libraries_to_import << target.product_basename if target.should_build? && target.build_as_dynamic_library? + dynamic_libraries_to_import + end + + # @return [Array] + define_build_settings_method :libraries_to_import, :memoized => true, :sorted => true, :uniqued => true do + static_libraries_to_import + dynamic_libraries_to_import + end + + # @return [Array] + define_build_settings_method :library_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + library_search_paths = should_apply_xctunwrap_fix? ? ['$(PLATFORM_DIR)/Developer/usr/lib'] : [] + return library_search_paths if library_xcconfig? && target.build_as_static? + + library_search_paths.concat library_search_paths_to_import.dup + library_search_paths.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_dynamic_library_search_paths } + if library_xcconfig? + library_search_paths.delete(target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)) + else + library_search_paths.concat(dependent_targets.flat_map { |pt| pt.build_settings[@configuration].library_search_paths_to_import }) + end + + library_search_paths + end + + # @return [Array] + define_build_settings_method :vendored_static_libraries, :memoized => true do + file_accessors.flat_map(&:vendored_static_libraries) + end + + # @return [Array] + define_build_settings_method :vendored_dynamic_libraries, :memoized => true do + file_accessors.flat_map(&:vendored_dynamic_libraries) + end + + # @return [Array] + define_build_settings_method :vendored_static_library_search_paths, :memoized => true do + paths = vendored_static_libraries.map { |f| File.join '${PODS_ROOT}', f.dirname.relative_path_from(target.sandbox.root) } + paths.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + map { |xcf| BuildSettings.xcframework_intermediate_dir(xcf) } + paths + end + + # @return [Array] + define_build_settings_method :vendored_dynamic_library_search_paths, :memoized => true do + paths = vendored_dynamic_libraries.map { |f| File.join '${PODS_ROOT}', f.dirname.relative_path_from(target.sandbox.root) } + paths.concat vendored_xcframeworks. + select { |xcf| xcf.build_type.dynamic_library? }. + map { |xcf| BuildSettings.xcframework_intermediate_dir(xcf) } + paths + end + + # @return [Array] + define_build_settings_method :library_search_paths_to_import, :memoized => true do + search_paths = vendored_static_library_search_paths + vendored_dynamic_library_search_paths + if target.uses_swift? || other_swift_flags_without_swift? + search_paths << '/usr/lib/swift' + search_paths << '${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}' + search_paths << '$(PLATFORM_DIR)/Developer/Library/Frameworks' if test_xcconfig? + end + return search_paths if target.build_as_framework? || !target.should_build? + + search_paths << target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE) + end + + #-------------------------------------------------------------------------# + + # @!group Clang + + # @return [Array] + define_build_settings_method :module_map_files, :memoized => true do + dependent_targets.map { |pt| pt.build_settings[@configuration].module_map_file_to_import }.compact.sort + end + + # @return [Array] + define_build_settings_method :module_map_file_to_import, :memoized => true do + return unless target.should_build? + return if target.build_as_framework? # framework module maps are automatically discovered + return unless target.defines_module? + + if target.uses_swift? + # for swift, we have a custom build phase that copies in the module map, appending the .Swift module + "${PODS_CONFIGURATION_BUILD_DIR}/#{target.label}/#{target.product_module_name}.modulemap" + else + "${PODS_ROOT}/#{target.module_map_path.relative_path_from(target.sandbox.root)}" + end + end + + # @return [Array] + define_build_settings_method :header_search_paths, :build_setting => true, :memoized => true, :sorted => true do + paths = target.header_search_paths(:include_dependent_targets_for_test_spec => test_xcconfig? && non_library_spec, :include_dependent_targets_for_app_spec => app_xcconfig? && non_library_spec, :configuration => @configuration) + + dependent_vendored_xcframeworks = [] + dependent_vendored_xcframeworks.concat vendored_xcframeworks + dependent_vendored_xcframeworks.concat dependent_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_xcframeworks } + paths.concat dependent_vendored_xcframeworks. + select { |xcf| xcf.build_type.static_library? }. + map { |xcf| "#{BuildSettings.xcframework_intermediate_dir(xcf)}/Headers" }. + compact + paths + end + + # @return [Array] + define_build_settings_method :public_header_search_paths, :memoized => true, :sorted => true do + target.header_search_paths(:include_dependent_targets_for_test_spec => test_xcconfig? && non_library_spec, :include_dependent_targets_for_app_spec => app_xcconfig? && non_library_spec, :include_private_headers => false, :configuration => @configuration) + end + + #-------------------------------------------------------------------------# + + # @!group Swift + + # @see BuildSettings#other_swift_flags_without_swift? + def other_swift_flags_without_swift? + return false if library_xcconfig? + + target.uses_swift_for_spec?(non_library_spec) + end + + # @return [Array] + define_build_settings_method :other_swift_flags, :build_setting => true, :memoized => true do + return unless target.uses_swift? || other_swift_flags_without_swift? + + flags = super() + flags << '-suppress-warnings' if target.inhibit_warnings? && library_xcconfig? + if !target.build_as_framework? && target.defines_module? && library_xcconfig? + flags.concat %w( -import-underlying-module -Xcc -fmodule-map-file=${SRCROOT}/${MODULEMAP_FILE} ) + end + flags + end + + # @return [Array] + define_build_settings_method :swift_include_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + paths = dependent_targets.flat_map { |pt| pt.build_settings[@configuration].swift_include_paths_to_import } + paths.concat swift_include_paths_to_import if non_library_xcconfig? + vendored_static_library_search_paths = dependent_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_static_library_search_paths } + paths.concat vendored_static_library_search_paths + paths.concat ['$(PLATFORM_DIR)/Developer/usr/lib'] if should_apply_xctunwrap_fix? + paths + end + + # @return [Array] + define_build_settings_method :swift_include_paths_to_import, :memoized => true do + return [] unless target.uses_swift? && !target.build_as_framework? + + [target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE)] + end + + #-------------------------------------------------------------------------# + + # @!group Linking + + # @return [Boolean] whether the `-ObjC` linker flag is required. + # + # @note this is only true when generating build settings for a test bundle + # + def requires_objc_linker_flag? + test_xcconfig? || app_xcconfig? + end + + # @return [Boolean] whether the `-fobjc-arc` linker flag is required. + # + define_build_settings_method :requires_fobjc_arc?, :memoized => true do + target.podfile.set_arc_compatibility_flag? && + file_accessors.any? { |fa| fa.spec_consumer.requires_arc? } + end + + # @return [Array] + define_build_settings_method :ld_runpath_search_paths, :build_setting => true, :memoized => true do + return if library_xcconfig? + _ld_runpath_search_paths(:test_bundle => test_xcconfig?, + :uses_swift => other_swift_flags_without_swift? || dependent_targets.any?(&:uses_swift?)) + end + + #-------------------------------------------------------------------------# + + # @!group Packaging + + # @return [String] + define_build_settings_method :skip_install, :build_setting => true do + 'YES' + end + + # @return [String] + define_build_settings_method :product_bundle_identifier, :build_setting => true do + 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}' + end + + # @return [String] + define_build_settings_method :configuration_build_dir, :build_setting => true, :memoized => true do + return if non_library_xcconfig? + target.configuration_build_dir(CONFIGURATION_BUILD_DIR_VARIABLE) + end + + # @return [String] + define_build_settings_method :application_extension_api_only, :build_setting => true, :memoized => true do + target.application_extension_api_only ? 'YES' : nil + end + + # @return [String] + define_build_settings_method :build_library_for_distribution, :build_setting => true, :memoized => true do + target.build_library_for_distribution ? 'YES' : nil + end + + #-------------------------------------------------------------------------# + + # @!group Target Properties + + # @return [Array] + define_build_settings_method :dependent_targets, :memoized => true do + select_maximal_pod_targets( + if test_xcconfig? + target.dependent_targets_for_test_spec(non_library_spec, :configuration => @configuration) + elsif app_xcconfig? + target.dependent_targets_for_app_spec(non_library_spec, :configuration => @configuration) + else + target.recursive_dependent_targets(:configuration => @configuration) + end, + ) + end + + # @return [Array] + define_build_settings_method :dependent_targets_to_link, :memoized => true do + if test_xcconfig? + # we're embedding into an app defined by an app spec + host_targets = target.app_host_dependent_targets_for_spec(non_library_spec, :configuration => @configuration) + dependent_targets - host_targets + else + dependent_targets + end + end + + # Returns the +pod_target_xcconfig+ for the pod target and its spec + # consumers grouped by keys + # + # @return [Hash{String,Hash{Target,String}] + # + def pod_target_xcconfig_values_by_consumer_by_key + spec_consumers.each_with_object({}) do |spec_consumer, hash| + spec_consumer.pod_target_xcconfig.each do |k, v| + (hash[k] ||= {})[spec_consumer] = v + end + end + end + + # Merges the +pod_target_xcconfig+ for all pod targets into a + # single hash and warns on conflicting definitions. + # + # @return [Hash{String, String}] + # + define_build_settings_method :merged_pod_target_xcconfigs, :memoized => true do + merged_xcconfigs(pod_target_xcconfig_values_by_consumer_by_key, :pod_target_xcconfig, + :overriding => non_library_xcconfig? ? target.build_settings[@configuration].merged_pod_target_xcconfigs : {}) + end + + # @return [Array] + define_build_settings_method :file_accessors, :memoized => true do + if non_library_xcconfig? + target.file_accessors.select { |fa| non_library_spec == fa.spec } + else + target.file_accessors.select { |fa| fa.spec.spec_type == @xcconfig_spec_type } + end + end + + # @return [Array] + define_build_settings_method :spec_consumers, :memoized => true do + if non_library_xcconfig? + target.spec_consumers.select { |sc| non_library_spec == sc.spec } + else + target.spec_consumers.select { |sc| sc.spec.spec_type == @xcconfig_spec_type } + end + end + + # Xcode 11 causes an issue with frameworks or libraries before 12.2 deployment target that link or are part of + # test bundles that use XCTUnwrap. Apple has provided an official work around for this. + # + # @see https://developer.apple.com/documentation/xcode_release_notes/xcode_11_release_notes + # + # @return [Boolean] Whether to apply the fix or not. + # + define_build_settings_method :should_apply_xctunwrap_fix?, :memoized => true do + library_xcconfig? && + target.platform.name == :ios && + Version.new(target.platform.deployment_target) < Version.new('12.2') && + (frameworks_to_import + weak_frameworks_to_import).uniq.include?('XCTest') + end + + #-------------------------------------------------------------------------# + end + + # A subclass that generates build settings for a `PodTarget` + class AggregateTargetSettings < BuildSettings + #-------------------------------------------------------------------------# + + # @!group Public API + + # @see BuildSettings.build_settings_names + def self.build_settings_names + @build_settings_names | BuildSettings.build_settings_names + end + + # @return [Symbol] + # The build configuration these settings will be used for + attr_reader :configuration_name + + # Initializes a new instance + # + # @param [AggregateTarget] target + # see {#target} + # + # @param [Symbol] configuration_name + # see {#configuration_name} + # + def initialize(target, configuration_name, configuration: nil) + super(target) + @configuration_name = configuration_name + (@configuration = configuration) || raise("No configuration for #{self}.") + end + + # @return [Xcodeproj::Config] xcconfig + define_build_settings_method :xcconfig, :memoized => true do + xcconfig = super() + merge_spec_xcconfig_into_xcconfig(merged_user_target_xcconfigs, xcconfig) + end + + #-------------------------------------------------------------------------# + + # @!group Paths + + # @return [String] + define_build_settings_method :pods_podfile_dir_path, :build_setting => true, :memoized => true do + target.podfile_dir_relative_path + end + + # @return [String] + define_build_settings_method :pods_root, :build_setting => true, :memoized => true do + target.relative_pods_root + end + + #-------------------------------------------------------------------------# + + # @!group Frameworks + + # @return [Array] + define_build_settings_method :frameworks, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :dynamic_frameworks_to_import do + [] + end + + # @return [Array] + define_build_settings_method :weak_frameworks, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :weak_frameworks do + [] + end + + # @return [Array] + define_build_settings_method :framework_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :framework_search_paths_to_import do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Libraries + + # @return [Array] + define_build_settings_method :libraries, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :dynamic_libraries_to_import do + [] + end + + # @return [Array] + define_build_settings_method :library_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :vendored_dynamic_library_search_paths do + [] + end + + #-------------------------------------------------------------------------# + + # @!group Clang + + # @return [Array] + define_build_settings_method :header_search_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true do + paths = [] + + if !target.build_as_framework? || !pod_targets.all?(&:should_build?) + paths.concat target.sandbox.public_headers.search_paths(target.platform) + end + + # Make frameworks headers discoverable with any syntax (quotes, + # brackets, @import, etc.) + paths.concat pod_targets. + select { |pt| pt.build_as_framework? && pt.should_build? }. + map { |pt| pt.build_settings[@configuration].framework_header_search_path } + + xcframework_library_headers = pod_targets.flat_map { |pt| pt.build_settings[@configuration].vendored_xcframeworks }. + select { |xcf| xcf.build_type.static_library? }. + map { |xcf| "#{BuildSettings.xcframework_intermediate_dir(xcf)}/Headers" }. + compact + + paths.concat xcframework_library_headers + + paths.concat target.search_paths_aggregate_targets.flat_map { |at| at.build_settings(configuration_name).header_search_paths } + + paths + end + + # @return [Array] + define_build_settings_method :other_cflags, :build_setting => true, :memoized => true do + flags = super() + + pod_targets_inhibiting_warnings = pod_targets.select(&:inhibit_warnings?) + + silenced_headers = [] + silenced_frameworks = [] + pod_targets_inhibiting_warnings.each do |pt| + if pt.build_as_framework? && pt.should_build? + silenced_headers.append pt.build_settings[@configuration].framework_header_search_path + else + silenced_headers.concat pt.build_settings[@configuration].public_header_search_paths + end + silenced_frameworks.concat pt.build_settings[@configuration].framework_search_paths_to_import + end + + flags += silenced_headers.uniq.flat_map { |p| ['-isystem', p] } + flags += silenced_frameworks.uniq.flat_map { |p| ['-iframework', p] } + + flags + end + + # @return [Array] + define_build_settings_method :module_map_files, :memoized => true, :sorted => true, :uniqued => true, :compacted => true, :from_search_paths_aggregate_targets => :module_map_file_to_import do + pod_targets.map { |pt| pt.build_settings[@configuration].module_map_file_to_import } + end + + #-------------------------------------------------------------------------# + + # @!group Swift + + # @see BuildSettings#other_swift_flags_without_swift? + def other_swift_flags_without_swift? + module_map_files.any? + end + + # @return [Array] + define_build_settings_method :swift_include_paths, :build_setting => true, :memoized => true, :sorted => true, :uniqued => true, :from_pod_targets_to_link => true, :from_search_paths_aggregate_targets => :swift_include_paths_to_import do + [] + end + + # @return [String] + define_build_settings_method :always_embed_swift_standard_libraries, :build_setting => true, :memoized => true do + return unless must_embed_swift? + return if target_swift_version < EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION + + 'YES' + end + + # @return [String] + define_build_settings_method :embedded_content_contains_swift, :build_setting => true, :memoized => true do + return unless must_embed_swift? + return if target_swift_version >= EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION + + 'YES' + end + + # @return [Boolean] + define_build_settings_method :must_embed_swift?, :memoized => true do + !target.requires_host_target? && pod_targets.any?(&:uses_swift?) + end + + #-------------------------------------------------------------------------# + + # @!group Linking + + # @return [Array] + define_build_settings_method :ld_runpath_search_paths, :build_setting => true, :memoized => true, :uniqued => true do + return unless pod_targets.any?(&:build_as_dynamic?) || any_vendored_dynamic_artifacts? + symbol_type = target.user_targets.map(&:symbol_type).uniq.first + test_bundle = symbol_type == :octest_bundle || symbol_type == :unit_test_bundle || symbol_type == :ui_test_bundle + _ld_runpath_search_paths(:requires_host_target => target.requires_host_target?, :test_bundle => test_bundle, + :uses_swift => pod_targets.any?(&:uses_swift?)) + end + + # @return [Boolean] + define_build_settings_method :any_vendored_dynamic_artifacts?, :memoized => true do + pod_targets.any? do |pt| + pt.file_accessors.any? do |fa| + !fa.vendored_dynamic_artifacts.empty? || !fa.vendored_dynamic_xcframeworks.empty? + end + end + end + + # @return [Boolean] + define_build_settings_method :any_vendored_static_artifacts?, :memoized => true do + pod_targets.any? do |pt| + pt.file_accessors.any? do |fa| + !fa.vendored_static_artifacts.empty? + end + end + end + + # @return [Boolean] + define_build_settings_method :requires_objc_linker_flag?, :memoized => true do + pod_targets.any?(&:build_as_static?) || + any_vendored_static_artifacts? + end + + # @return [Boolean] + define_build_settings_method :requires_fobjc_arc?, :memoized => true do + target.podfile.set_arc_compatibility_flag? && + target.spec_consumers.any?(&:requires_arc?) + end + + #-------------------------------------------------------------------------# + + # @!group Target Properties + + # @return [Version] the SWIFT_VERSION of the target being integrated + # + define_build_settings_method :target_swift_version, :memoized => true, :frozen => false do + swift_version = target.target_definition.swift_version + swift_version = nil if swift_version.blank? + Version.new(swift_version) + end + + EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION = Version.new('2.3') + private_constant :EMBED_STANDARD_LIBRARIES_MINIMUM_VERSION + + # Returns the {PodTarget}s which are active for the current + # configuration name. + # + # @return [Array] + # + define_build_settings_method :pod_targets, :memoized => true do + target.pod_targets_for_build_configuration(configuration_name) + end + + # @return [Array] + define_build_settings_method :pod_targets_to_link, :memoized => true do + pod_targets - + target.search_paths_aggregate_targets.flat_map { |at| at.build_settings(configuration_name).pod_targets_to_link } + end + + # @return [Array] + define_build_settings_method :search_paths_aggregate_target_pod_target_build_settings, :memoized => true, :uniqued => true do + pod_targets = target.search_paths_aggregate_targets.flat_map { |at| at.build_settings(configuration_name).pod_targets } + pod_targets = select_maximal_pod_targets(pod_targets) + pod_targets.map { |pt| pt.build_settings[@configuration] } + end + + # Returns the +user_target_xcconfig+ for all pod targets and their spec + # consumers grouped by keys + # + # @return [Hash{String,Hash{Target,String}] + # + def user_target_xcconfig_values_by_consumer_by_key + targets = (pod_targets + target.search_paths_aggregate_targets.flat_map(&:pod_targets)).uniq + targets.each_with_object({}) do |target, hash| + target.spec_consumers.each do |spec_consumer| + spec_consumer.user_target_xcconfig.each do |k, v| + # TODO: Need to decide how we are going to ensure settings like these + # are always excluded from the user's project. + # + # See https://github.com/CocoaPods/CocoaPods/issues/1216 + next if k == 'USE_HEADERMAP' + (hash[k] ||= {})[spec_consumer] = v + end + end + end + end + + # Merges the +user_target_xcconfig+ for all pod targets into a + # single hash and warns on conflicting definitions. + # + # @return [Hash{String, String}] + # + define_build_settings_method :merged_user_target_xcconfigs, :memoized => true do + merged_xcconfigs(user_target_xcconfig_values_by_consumer_by_key, :user_target_xcconfig) + end + + #-------------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/pod_target.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/pod_target.rb new file mode 100644 index 0000000..5686145 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/target/pod_target.rb @@ -0,0 +1,1168 @@ +require 'cocoapods/xcode/framework_paths' +require 'cocoapods/xcode/xcframework' + +module Pod + # Stores the information relative to the target used to compile a single Pod. + # A pod can have one or more activated spec, subspecs and test specs. + # + class PodTarget < Target + # @return [Array] the spec, subspecs and test specs of the target. + # + attr_reader :specs + + # @return [Array] All of the test specs within this target. + # Subset of #specs. + # + attr_reader :test_specs + + # @return [Array] All of the specs within this target that are library specs. + # Subset of #specs. + # + attr_reader :library_specs + + # @return [Array] All of the specs within this target that are app specs. + # Subset of #specs. + # + attr_reader :app_specs + + # @return [Array] the target definitions of the Podfile + # that generated this target. + # + attr_reader :target_definitions + + # @return [Array] the file accessors for the + # specifications of this target. + # + attr_reader :file_accessors + + # @return [String] the suffix used for this target when deduplicated. May be `nil`. + # + # @note This affects the value returned by #configuration_build_dir + # and accessors relying on this as #build_product_path. + # + attr_reader :scope_suffix + + # @return [HeadersStore] the header directory for the target. + # + attr_reader :build_headers + + # @return [Array] the targets that this target has a dependency + # upon. + # + attr_reader :dependent_targets + attr_reader :dependent_targets_by_config + + # @deprecated + def dependent_targets=(dependent_targets) + @dependent_targets = dependent_targets + @dependent_targets_by_config = { :debug => dependent_targets, :release => dependent_targets } + end + + def dependent_targets_by_config=(dependent_targets_by_config) + @dependent_targets_by_config = dependent_targets_by_config + @dependent_targets = dependent_targets_by_config.each_value.reduce([], &:|) + end + + # @return [Hash{String=>Array}] all target dependencies by test spec name. + # + attr_reader :test_dependent_targets_by_spec_name + attr_reader :test_dependent_targets_by_spec_name_by_config + + # @deprecated + def test_dependent_targets_by_spec_name=(test_dependent_targets_by_spec_name) + @test_dependent_targets_by_spec_name = test_dependent_targets_by_spec_name + @test_dependent_targets_by_spec_name_by_config = Hash[test_dependent_targets_by_spec_name.map do |k, v| + [k, { :debug => v, :release => v }] + end] + end + + def test_dependent_targets_by_spec_name_by_config=(test_dependent_targets_by_spec_name_by_config) + @test_dependent_targets_by_spec_name_by_config = test_dependent_targets_by_spec_name_by_config + @test_dependent_targets_by_spec_name = Hash[test_dependent_targets_by_spec_name_by_config.map do |k, v| + [k, v.each_value.reduce(Set.new, &:|).to_a] + end] + end + + # @return [Hash{String=>Array}] all target dependencies by app spec name. + # + attr_reader :app_dependent_targets_by_spec_name + attr_reader :app_dependent_targets_by_spec_name_by_config + + # @deprecated + def app_dependent_targets_by_spec_name=(app_dependent_targets_by_spec_name) + @app_dependent_targets_by_spec_name = app_dependent_targets_by_spec_name + @app_dependent_targets_by_spec_name_by_config = Hash[app_dependent_targets_by_spec_name.map do |k, v| + [k, { :debug => v, :release => v }] + end] + end + + def app_dependent_targets_by_spec_name_by_config=(app_dependent_targets_by_spec_name_by_config) + @app_dependent_targets_by_spec_name_by_config = app_dependent_targets_by_spec_name_by_config + @app_dependent_targets_by_spec_name = Hash[app_dependent_targets_by_spec_name_by_config.map do |k, v| + [k, v.each_value.reduce(Set.new, &:|).to_a] + end] + end + + # @return [Hash{Specification => (Specification,PodTarget)}] tuples of app specs and pod targets by test spec. + # + attr_accessor :test_app_hosts_by_spec + + # @return [Hash{String => BuildSettings}] the test spec build settings for this target. + # + attr_reader :test_spec_build_settings + attr_reader :test_spec_build_settings_by_config + + # @return [Hash{String => BuildSettings}] the app spec build settings for this target. + # + attr_reader :app_spec_build_settings + attr_reader :app_spec_build_settings_by_config + + # @return [String] the Swift version for this target. + # + attr_reader :swift_version + + # Initialize a new instance + # + # @param [Sandbox] sandbox @see Target#sandbox + # @param [BuildType] build_type @see Target#build_type + # @param [Hash{String=>Symbol}] user_build_configurations @see Target#user_build_configurations + # @param [Array] archs @see Target#archs + # @param [Platform] platform @see Target#platform + # @param [Array] specs @see #specs + # @param [Array] target_definitions @see #target_definitions + # @param [Array] file_accessors @see #file_accessors + # @param [String] scope_suffix @see #scope_suffix + # @param [String] swift_version @see #swift_version + # + def initialize(sandbox, build_type, user_build_configurations, archs, platform, specs, target_definitions, + file_accessors = [], scope_suffix = nil, swift_version = nil) + super(sandbox, build_type, user_build_configurations, archs, platform) + raise "Can't initialize a PodTarget without specs!" if specs.nil? || specs.empty? + raise "Can't initialize a PodTarget without TargetDefinition!" if target_definitions.nil? || target_definitions.empty? + raise "Can't initialize a PodTarget with an empty string scope suffix!" if scope_suffix == '' + @specs = specs.dup.freeze + @target_definitions = target_definitions + @file_accessors = file_accessors + @scope_suffix = scope_suffix + @swift_version = swift_version + all_specs_by_type = @specs.group_by(&:spec_type) + @library_specs = all_specs_by_type[:library] || [] + @test_specs = all_specs_by_type[:test] || [] + @app_specs = all_specs_by_type[:app] || [] + @build_headers = Sandbox::HeadersStore.new(sandbox, 'Private', :private) + self.dependent_targets = [] + self.test_dependent_targets_by_spec_name = Hash[test_specs.map { |ts| [ts.name, []] }] + self.app_dependent_targets_by_spec_name = Hash[app_specs.map { |as| [as.name, []] }] + @test_app_hosts_by_spec = {} + @build_config_cache = {} + @test_spec_build_settings_by_config = create_test_build_settings_by_config + @app_spec_build_settings_by_config = create_app_build_settings_by_config + end + + # Scopes the current target based on the existing pod targets within the cache. + # + # @param [Hash{Array => PodTarget}] cache + # the cached target for a previously scoped target. + # + # @return [Array] a scoped copy for each target definition. + # + def scoped(cache = {}) + target_definitions.map do |target_definition| + cache_key = [specs, target_definition] + cache[cache_key] ||= begin + target = PodTarget.new(sandbox, build_type, user_build_configurations, archs, platform, specs, + [target_definition], file_accessors, target_definition.label, swift_version) + scope_dependent_targets = ->(dependent_targets) do + dependent_targets.flat_map do |pod_target| + pod_target.scoped(cache).select { |pt| pt.target_definitions == [target_definition] } + end + end + + target.dependent_targets_by_config = Hash[dependent_targets_by_config.map { |k, v| [k, scope_dependent_targets[v]] }] + target.test_dependent_targets_by_spec_name_by_config = Hash[test_dependent_targets_by_spec_name_by_config.map do |spec_name, test_pod_targets_by_config| + [spec_name, Hash[test_pod_targets_by_config.map { |k, v| [k, scope_dependent_targets[v]] }]] + end] + target.app_dependent_targets_by_spec_name_by_config = Hash[app_dependent_targets_by_spec_name_by_config.map do |spec_name, app_pod_targets_by_config| + [spec_name, Hash[app_pod_targets_by_config.map { |k, v| [k, scope_dependent_targets[v]] }]] + end] + target.test_app_hosts_by_spec = Hash[test_app_hosts_by_spec.map do |spec, (app_host_spec, app_pod_target)| + [spec, [app_host_spec, app_pod_target.scoped(cache).find { |pt| pt.target_definitions == [target_definition] }]] + end] + target + end + end + end + + # @return [String] the label for the target. + # + def label + if scope_suffix.nil? || scope_suffix[0] == '.' + "#{root_spec.name}#{scope_suffix}" + else + "#{root_spec.name}-#{scope_suffix}" + end + end + + # @return [Array] The list of all files tracked. + # + def all_files + Sandbox::FileAccessor.all_files(file_accessors) + end + + # @return [Pathname] the pathname for headers in the sandbox. + # + def headers_sandbox + Pathname.new(pod_name) + end + + # @return [Hash{FileAccessor => Hash}] Hash of file accessors by header mappings. + # + def header_mappings_by_file_accessor + valid_accessors = file_accessors.reject { |fa| fa.spec.non_library_specification? } + Hash[valid_accessors.map do |file_accessor| + # Private headers will always end up in Pods/Headers/Private/PodA/*.h + # This will allow for `""` imports to work. + [file_accessor, header_mappings(file_accessor, file_accessor.headers)] + end] + end + + # @return [Hash{FileAccessor => Hash}] Hash of file accessors by public header mappings. + # + def public_header_mappings_by_file_accessor + valid_accessors = file_accessors.reject { |fa| fa.spec.non_library_specification? } + Hash[valid_accessors.map do |file_accessor| + # Public headers on the other hand will be added in Pods/Headers/Public/PodA/PodA/*.h + # The extra folder is intentional in order for `<>` imports to work. + [file_accessor, header_mappings(file_accessor, file_accessor.public_headers)] + end] + end + + # @return [Array] the Swift versions supported. Might be empty if the author has not + # specified any versions, most likely due to legacy reasons. + # + def spec_swift_versions + root_spec.swift_versions + end + + # @return [Podfile] The podfile which declares the dependency. + # + def podfile + target_definitions.first.podfile + end + + # @return [String] the project name derived from the target definitions that integrate this pod. If none is + # specified then the name of the pod is used by default. + # + # @note The name is guaranteed to be the same across all target definitions and is validated by the target + # validator during installation. + # + def project_name + target_definitions.map { |td| td.project_name_for_pod(pod_name) }.compact.first || pod_name + end + + # @return [String] The name to use for the source code module constructed + # for this target, and which will be used to import the module in + # implementation source files. + # + def product_module_name + root_spec.module_name + end + + # @param [Specification] spec the specification + # + # @return [String] the product basename of the specification's target + def product_basename_for_spec(spec) + user_specified = build_settings_by_config_for_spec(spec). + each_value. + map { |settings| settings.merged_pod_target_xcconfigs['PRODUCT_NAME'] }. + compact. + uniq + + if user_specified.size == 1 + user_specified.first + else + spec_label(spec) + end + end + + # @return [Boolean] Whether or not this target should be built. + # + # A target should not be built if it has no source files. + # + def should_build? + return @should_build if defined? @should_build + accessors = file_accessors.select { |fa| fa.spec.library_specification? } + source_files = accessors.flat_map(&:source_files) + source_files -= accessors.flat_map(&:headers) + @should_build = !source_files.empty? + end + + # @return [Array] the specification consumers for + # the target. + # + def spec_consumers + specs.map { |spec| spec.consumer(platform) } + end + + # @return [Array] the test specification consumers for + # the target. + # + def test_spec_consumers + test_specs.map { |test_spec| test_spec.consumer(platform) } + end + + # @return [Array] the app specification consumers for + # the target. + # + def app_spec_consumers + app_specs.map { |app_spec| app_spec.consumer(platform) } + end + + # Checks whether the target itself plus its specs uses Swift code. + # This check excludes source files from non library specs. + # Note that if a target does not need to be built (no source code), + # we fallback to check whether it indicates a swift version. + # + # @return [Boolean] Whether the target uses Swift code. + # + def uses_swift? + return @uses_swift if defined? @uses_swift + @uses_swift = (!should_build? && !spec_swift_versions.empty?) || + file_accessors.select { |a| a.spec.library_specification? }.any? do |file_accessor| + uses_swift_for_spec?(file_accessor.spec) + end + end + + # Checks whether a specification uses Swift or not. + # + # @param [Specification] spec + # The spec to query against. + # + # @return [Boolean] Whether the target uses Swift code within the requested non library spec. + # + def uses_swift_for_spec?(spec) + @uses_swift_for_spec_cache ||= {} + return @uses_swift_for_spec_cache[spec.name] if @uses_swift_for_spec_cache.key?(spec.name) + @uses_swift_for_spec_cache[spec.name] = begin + file_accessor = file_accessors.find { |fa| fa.spec == spec } + raise "[Bug] Unable to find file accessor for spec `#{spec.inspect}` in pod target `#{label}`" unless file_accessor + file_accessor.source_files.any? { |sf| sf.extname == '.swift' } + end + end + + # @return [Boolean] Whether the target defines a "module" + # (and thus will need a module map and umbrella header). + # + # @note Static library targets can temporarily opt in to this behavior by setting + # `DEFINES_MODULE = YES` in their specification's `pod_target_xcconfig`. + # + def defines_module? + return @defines_module if defined?(@defines_module) + return @defines_module = true if uses_swift? || build_as_framework? + + explicit_target_definitions = target_definitions.select { |td| td.dependencies.any? { |d| d.root_name == pod_name } } + tds_by_answer = explicit_target_definitions.group_by { |td| td.build_pod_as_module?(pod_name) } + + if tds_by_answer.size > 1 + UI.warn "Unable to determine whether to build `#{label}` as a module due to a conflict " \ + "between the following target definitions:\n\t- #{tds_by_answer.map do |a, td| + "`#{td.to_sentence}` #{a ? "requires `#{label}` as a module" : "does not require `#{label}` as a module"}" + end.join("\n\t- ")}\n\n" \ + "Defaulting to skip building `#{label}` as a module." + elsif tds_by_answer.keys.first == true || target_definitions.all? { |td| td.build_pod_as_module?(pod_name) } + return @defines_module = true + end + + @defines_module = library_specs.any? { |s| s.consumer(platform).pod_target_xcconfig['DEFINES_MODULE'] == 'YES' } + end + + # @return [ArrayString}>] An array of hashes where each hash represents a single script phase. + # + def script_phases + spec_consumers.flat_map(&:script_phases) + end + + # @return [Boolean] Whether the target contains any script phases. + # + def contains_script_phases? + !script_phases.empty? + end + + # @return [Boolean] Whether the target has any tests specifications. + # + def contains_test_specifications? + !test_specs.empty? + end + + # @return [Boolean] Whether the target has any app specifications. + # + def contains_app_specifications? + !app_specs.empty? + end + + # @return [Hash{String=>Array}] The vendored and non vendored framework paths this target + # depends upon keyed by spec name. For the root spec and subspecs the framework path of the target itself + # is included. + # + def framework_paths + @framework_paths ||= begin + file_accessors.each_with_object({}) do |file_accessor, hash| + frameworks = file_accessor.vendored_dynamic_artifacts.map do |framework_path| + relative_path_to_sandbox = framework_path.relative_path_from(sandbox.root) + framework_source = "${PODS_ROOT}/#{relative_path_to_sandbox}" + # Until this can be configured, assume the dSYM file uses the file name as the framework. + # See https://github.com/CocoaPods/CocoaPods/issues/1698 + dsym_name = "#{framework_path.basename}.dSYM" + dsym_path = Pathname.new("#{framework_path.dirname}/#{dsym_name}") + dsym_source = if dsym_path.exist? + "${PODS_ROOT}/#{relative_path_to_sandbox}.dSYM" + end + dirname = framework_path.dirname + bcsymbolmap_paths = if dirname.exist? + Dir.chdir(dirname) do + Dir.glob('*.bcsymbolmap').map do |bcsymbolmap_file_name| + bcsymbolmap_path = dirname + bcsymbolmap_file_name + "${PODS_ROOT}/#{bcsymbolmap_path.relative_path_from(sandbox.root)}" + end + end + end + Xcode::FrameworkPaths.new(framework_source, dsym_source, bcsymbolmap_paths) + end + if file_accessor.spec.library_specification? && should_build? && build_as_dynamic_framework? + frameworks << Xcode::FrameworkPaths.new(build_product_path('${BUILT_PRODUCTS_DIR}')) + end + hash[file_accessor.spec.name] = frameworks + end + end + end + + # @return [Hash{String=>Array}] The vendored xcframeworks this target + # depends upon keyed by spec name. + # + def xcframeworks + @xcframeworks ||= begin + file_accessors.each_with_object({}) do |file_accessor, hash| + frameworks = file_accessor.vendored_xcframeworks.map do |framework_path| + Xcode::XCFramework.new(file_accessor.spec.name, framework_path) + end + hash[file_accessor.spec.name] = frameworks + end + end + end + + # @return [Hash{String=>Array}] The resource and resource bundle paths this target depends upon keyed by + # spec name. Resource (not resource bundles) paths can vary depending on the type of spec: + # - App specs _always_ get their resource paths added to "Copy Bundle Resources" phase from + # [PodTargetInstaller] therefore their resource paths are never included here. + # - Test specs may have their resource paths added to "Copy Bundle Resources" if the target itself is + # built as a framework, which is again checked and handled by PodTargetInstaller. If that is true then + # the resource paths are not included, otherwise they are included and handled via the CocoaPods copy + # resources script phase. + # - Library specs _do not_ have per-configuration CocoaPods copy resources script phase and their resource + # paths will be added to "Copy Bundle Resources" phase if the target is built as a framework because + # it supports it. We always include the resource paths for library specs because they are also + # integrated to the user target. + # + def resource_paths + @resource_paths ||= begin + file_accessors.each_with_object({}) do |file_accessor, hash| + resource_paths = if file_accessor.spec.app_specification? || (file_accessor.spec.test_specification? && build_as_framework?) + [] + else + file_accessor.resources.map do |res| + "${PODS_ROOT}/#{res.relative_path_from(sandbox.project_path.dirname)}" + end + end + prefix = Pod::Target::BuildSettings::CONFIGURATION_BUILD_DIR_VARIABLE + prefix = configuration_build_dir unless file_accessor.spec.test_specification? + resource_bundle_paths = file_accessor.resource_bundles.keys.map { |name| "#{prefix}/#{name.shellescape}.bundle" } + hash[file_accessor.spec.name] = (resource_paths + resource_bundle_paths).map(&:to_s) + end + end + end + + # @param [Specification] spec The non library spec to calculate the deployment target for. + # + # @return [String] The deployment target to use for the non library spec. If the non library spec explicitly + # specifies one then this is the one used otherwise the one that was determined by the analyzer is used. + # + def deployment_target_for_non_library_spec(spec) + raise ArgumentError, 'Must be a non library spec.' unless spec.non_library_specification? + spec.deployment_target(platform.name.to_s) || platform.deployment_target.to_s + end + + # Returns the corresponding native product type to use given the test type. + # This is primarily used when creating the native targets in order to produce the correct test bundle target + # based on the type of tests included. + # + # @param [Symbol] test_type + # The test type to map to provided by the test specification DSL. + # + # @return [Symbol] The native product type to use. + # + def product_type_for_test_type(test_type) + case test_type + when :unit + :unit_test_bundle + when :ui + :ui_test_bundle + else + raise ArgumentError, "Unknown test type `#{test_type}`." + end + end + + # Returns the label to use for the given test type. + # This is used to generate native target names for test specs. + # + # @param [Symbol] test_type + # The test type to map to provided by the test specification DSL. + # + # @return [String] The native product type to use. + # + def label_for_test_type(test_type) + case test_type + when :unit + 'Unit' + when :ui + 'UI' + else + raise ArgumentError, "Unknown test type `#{test_type}`." + end + end + + # @return [Specification] The root specification for the target. + # + def root_spec + @root_spec ||= specs.first.root + end + + # @return [String] The name of the Pod that this target refers to. + # + def pod_name + root_spec.name + end + + # @return [Pathname] the absolute path of the LLVM module map file that + # defines the module structure for the compiler. + # + def module_map_path + basename = "#{label}.modulemap" + if build_as_framework? + super + elsif file_accessors.any?(&:module_map) + build_headers.root + product_module_name + basename + else + sandbox.public_headers.root + product_module_name + basename + end + end + + # @return [Pathname] the absolute path of the prefix header file. + # + def prefix_header_path + support_files_dir + "#{label}-prefix.pch" + end + + # @return [Hash] the additional entries to add to the generated Info.plist + # + def info_plist_entries + root_spec.info_plist + end + + # @param [String] bundle_name + # The name of the bundle product, which is given by the +spec+. + # + # @return [String] The derived name of the resource bundle target. + # + def resources_bundle_target_label(bundle_name) + "#{label}-#{bundle_name}" + end + + # @param [Specification] subspec + # The subspec to use for producing the label. + # + # @return [String] The derived name of the target. + # + def subspec_label(subspec) + raise ArgumentError, 'Must not be a root spec' if subspec.root? + subspec.name.split('/')[1..-1].join('-').to_s + end + + # @param [Specification] test_spec + # The test spec to use for producing the test label. + # + # @return [String] The derived name of the test target. + # + def test_target_label(test_spec) + "#{label}-#{label_for_test_type(test_spec.test_type)}-#{subspec_label(test_spec)}" + end + + # @param [Specification] app_spec + # The app spec to use for producing the app label. + # + # @return [String] The derived name of the app target. + # + def app_target_label(app_spec) + "#{label}-#{subspec_label(app_spec)}" + end + + # @param [Specification] test_spec + # the test spec to use for producing the app host target label. + # + # @return [(String,String)] a tuple, where the first item is the PodTarget#label of the pod target that defines the + # app host, and the second item is the target name of the app host + # + def app_host_target_label(test_spec) + app_spec, app_target = test_app_hosts_by_spec[test_spec] + + if app_spec + [app_target.name, app_target.app_target_label(app_spec)] + elsif test_spec.consumer(platform).requires_app_host? + [name, "AppHost-#{label}-#{label_for_test_type(test_spec.test_type)}-Tests"] + end + end + + # @param [Specification] spec + # the spec to return app host dependencies for + # + # @param [String] configuration + # the configuration to retrieve the app host dependent targets for. + # + # @return [Array] the app host dependent targets for the given spec. + # + def app_host_dependent_targets_for_spec(spec, configuration: nil) + return [] unless spec.test_specification? && spec.consumer(platform).test_type == :unit + app_host_info = test_app_hosts_by_spec[spec] + if app_host_info.nil? + [] + else + app_spec, app_target = *app_host_info + app_target.dependent_targets_for_app_spec(app_spec, :configuration => configuration) + end + end + + def spec_label(spec) + case spec.spec_type + when :library then label + when :test then test_target_label(spec) + when :app then app_target_label(spec) + else raise ArgumentError, "Unhandled spec type #{spec.spec_type.inspect} for #{spec.inspect}" + end + end + # for backwards compatibility + alias non_library_spec_label spec_label + + # @param [Specification] spec + # The spec to return scheme configuration for. + # + # @return [Hash] The scheme configuration used or empty if none is specified. + # + def scheme_for_spec(spec) + return {} if (spec.library_specification? && !spec.root?) || spec.available_platforms.none? do |p| + p.name == platform.name + end + spec.consumer(platform).scheme + end + + # @param [Specification] spec + # The spec this copy resources script path is for. + # + # @return [Pathname] The absolute path of the copy resources script for the given spec. + # + def copy_resources_script_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-resources.sh" + end + + # @param [Specification] spec + # The spec this copy resources script path is for. + # + # @return [Pathname] The absolute path of the copy resources script input file list for the given spec. + # + def copy_resources_script_input_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-resources-input-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this copy resources script path is for. + # + # @return [Pathname] The absolute path of the copy resources script output file list for the given spec. + # + def copy_resources_script_output_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-resources-output-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this embed frameworks script path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script for the given spec. + # + def embed_frameworks_script_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-frameworks.sh" + end + + # @param [Specification] spec + # The spec this embed frameworks script path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script input file list for the given spec. + # + def embed_frameworks_script_input_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-frameworks-input-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this embed frameworks script path is for. + # + # @return [Pathname] The absolute path of the embed frameworks script output file list for the given spec. + # + def embed_frameworks_script_output_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-frameworks-output-files.xcfilelist" + end + + # @return [Pathname] The absolute path of the copy xcframeworks script. + # + def copy_xcframeworks_script_path + support_files_dir + "#{label}-xcframeworks.sh" + end + + # @return [String] The path of the copy xcframeworks input files file list + # + def copy_xcframeworks_script_input_files_path + support_files_dir + "#{label}-xcframeworks-input-files.xcfilelist" + end + + # @return [String] The path of the copy xcframeworks output files file list + # + def copy_xcframeworks_script_output_files_path + support_files_dir + "#{label}-xcframeworks-output-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this script path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script for the given spec. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-artifacts.sh" + end + + # @param [Specification] spec + # The spec this script path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script input file list for the given spec. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_input_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-artifacts-input-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this script path is for. + # + # @return [Pathname] The absolute path of the prepare artifacts script output file list for the given spec. + # + # @deprecated + # + # @todo Remove in 2.0 + # + def prepare_artifacts_script_output_files_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-artifacts-output-files.xcfilelist" + end + + # @return [Pathname] The absolute path of the copy dSYMs script. + # + def copy_dsyms_script_path + support_files_dir + "#{label}-copy-dsyms.sh" + end + + # @return [Pathname] The absolute path of the copy dSYM script phase input file list. + # + def copy_dsyms_script_input_files_path + support_files_dir + "#{label}-copy-dsyms-input-files.xcfilelist" + end + + # @return [Pathname] The absolute path of the copy dSYM script phase output file list. + # + def copy_dsyms_script_output_files_path + support_files_dir + "#{label}-copy-dsyms-output-files.xcfilelist" + end + + # @param [Specification] spec + # The spec this Info.plist path is for. + # + # @return [Pathname] The absolute path of the Info.plist for the given spec. + # + def info_plist_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-Info.plist" + end + + # @param [Specification] spec + # The spec this prefix header path is for. + # + # @return [Pathname] the absolute path of the prefix header file for the given spec. + # + def prefix_header_path_for_spec(spec) + support_files_dir + "#{spec_label(spec)}-prefix.pch" + end + + # @return [Array] The names of the Pods on which this target + # depends. + # + def dependencies + spec_consumers.flat_map do |consumer| + consumer.dependencies.map { |dep| Specification.root_name(dep.name) } + end.uniq + end + + # Returns all dependent targets of this target. If a configuration is passed then the list can be scoped to a given + # configuration. + # + # @param [String] configuration + # The configuration to return the dependent targets for or `nil` if all configurations should be included. + # + # @return [Array] the recursive targets that this target has a dependency upon. + # + def recursive_dependent_targets(configuration: nil) + @recursive_dependent_targets ||= begin + hash = Hash[config_variants.map do |config| + [config, _add_recursive_dependent_targets(Set.new, :configuration => config).delete(self).to_a.freeze] + end] + hash[nil] = hash.each_value.reduce(Set.new, &:|).to_a + hash + end + @recursive_dependent_targets.fetch(configuration) { raise ArgumentError, "No configuration #{configuration} for #{self}, known configurations are #{@recursive_dependent_targets.keys}" } + end + + def _add_recursive_dependent_targets(set, configuration: nil) + if defined?(@recursive_dependent_targets) + return set.merge(@recursive_dependent_targets[configuration]) + end + dependent_targets = configuration ? dependent_targets_by_config[configuration] : self.dependent_targets + dependent_targets.each do |target| + target._add_recursive_dependent_targets(set, :configuration => configuration) if set.add?(target) + end + + set + end + protected :_add_recursive_dependent_targets + + # @param [Specification] test_spec + # the test spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the test dependent targets for. + # + # @return [Array] the recursive targets that this target has a + # test dependency upon. + # + def recursive_test_dependent_targets(test_spec, configuration: nil) + @recursive_test_dependent_targets ||= {} + @recursive_test_dependent_targets[test_spec] ||= begin + hash = Hash[config_variants.map do |config| + [config, _add_recursive_test_dependent_targets(test_spec, Set.new, :configuration => config).to_a.freeze] + end] + hash[nil] = hash.each_value.reduce(Set.new, &:|).to_a.freeze + hash + end + @recursive_test_dependent_targets[test_spec][configuration] + end + + def _add_recursive_test_dependent_targets(test_spec, set, configuration: nil) + raise ArgumentError, 'Must give a test spec' unless test_spec + dependent_targets = configuration ? test_dependent_targets_by_spec_name_by_config[test_spec.name][configuration] : test_dependent_targets_by_spec_name[test_spec.name] + raise ArgumentError, "Unable to find deps for #{test_spec} for config #{configuration.inspect} (out of #{test_dependent_targets_by_spec_name_by_config.inspect})" unless dependent_targets + + dependent_targets.each do |target| + target._add_recursive_dependent_targets(set, :configuration => configuration) if set.add?(target) + end + + set + end + private :_add_recursive_test_dependent_targets + + # @param [Specification] test_spec + # the test spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the test dependent targets for. + # + # @return [Array] the canonical list of dependent targets this target has a dependency upon. + # This list includes the target itself as well as its recursive dependent and test dependent targets. + # + def dependent_targets_for_test_spec(test_spec, configuration: nil) + [self, *recursive_dependent_targets(:configuration => configuration), *recursive_test_dependent_targets(test_spec, :configuration => configuration)].uniq + end + + # @param [Specification] app_spec + # the app spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the app dependent targets for. + # + # @return [Array] the recursive targets that this target has a + # app dependency upon. + # + def recursive_app_dependent_targets(app_spec, configuration: nil) + @recursive_app_dependent_targets ||= {} + @recursive_app_dependent_targets[app_spec] ||= begin + hash = Hash[config_variants.map do |config| + [config, _add_recursive_app_dependent_targets(app_spec, Set.new, :configuration => config).to_a.freeze] + end] + hash[nil] = hash.each_value.reduce(Set.new, &:|).to_a.freeze + hash + end + @recursive_app_dependent_targets[app_spec][configuration] + end + + def _add_recursive_app_dependent_targets(app_spec, set, configuration: nil) + raise ArgumentError, 'Must give a app spec' unless app_spec + dependent_targets = configuration ? app_dependent_targets_by_spec_name_by_config[app_spec.name][configuration] : app_dependent_targets_by_spec_name[app_spec.name] + raise ArgumentError, "Unable to find deps for #{app_spec} for config #{configuration.inspect} #{app_dependent_targets_by_spec_name_by_config.inspect}" unless dependent_targets + + dependent_targets.each do |target| + target._add_recursive_dependent_targets(set, :configuration => configuration) if set.add?(target) + end + + set + end + private :_add_recursive_app_dependent_targets + + # @param [Specification] app_spec + # the app spec to scope dependencies for + # + # @param [String] configuration + # the configuration to retrieve the app dependent targets for. + # + # @return [Array] the canonical list of dependent targets this target has a dependency upon. + # This list includes the target itself as well as its recursive dependent and app dependent targets. + # + def dependent_targets_for_app_spec(app_spec, configuration: nil) + [self, *recursive_dependent_targets(:configuration => configuration), *recursive_app_dependent_targets(app_spec, :configuration => configuration)].uniq + end + + # Checks if warnings should be inhibited for this pod. + # + # @return [Boolean] + # + def inhibit_warnings? + return @inhibit_warnings if defined? @inhibit_warnings + whitelists = target_definitions.map do |target_definition| + target_definition.inhibits_warnings_for_pod?(root_spec.name) + end.uniq + + if whitelists.empty? + @inhibit_warnings = false + false + elsif whitelists.count == 1 + @inhibit_warnings = whitelists.first + whitelists.first + else + UI.warn "The pod `#{pod_name}` is linked to different targets " \ + "(#{target_definitions.map { |td| "`#{td.name}`" }.to_sentence}), which contain different " \ + 'settings to inhibit warnings. CocoaPods does not currently ' \ + 'support different settings and will fall back to your preference ' \ + 'set in the root target definition.' + @inhibit_warnings = podfile.root_target_definitions.first.inhibits_warnings_for_pod?(root_spec.name) + end + end + + # @param [String] dir + # The directory (which might be a variable) relative to which + # the returned path should be. This must be used if the + # $CONFIGURATION_BUILD_DIR is modified. + # + # @return [String] The absolute path to the configuration build dir + # + def configuration_build_dir(dir = BuildSettings::CONFIGURATION_BUILD_DIR_VARIABLE) + "#{dir}/#{label}" + end + + # @param [String] dir + # @see #configuration_build_dir + # + # @return [String] The absolute path to the build product + # + def build_product_path(dir = BuildSettings::CONFIGURATION_BUILD_DIR_VARIABLE) + "#{configuration_build_dir(dir)}/#{product_name}" + end + + # @return [String] The source path of the root for this target relative to `$(PODS_ROOT)` + # + def pod_target_srcroot + "${PODS_ROOT}/#{sandbox.pod_dir(pod_name).relative_path_from(sandbox.root)}" + end + + # @return [String] The version associated with this target + # + def version + version = root_spec.version + [version.major, version.minor, version.patch].join('.') + end + + # @param [Boolean] include_dependent_targets_for_test_spec + # whether to include header search paths for test dependent targets + # + # @param [Boolean] include_dependent_targets_for_app_spec + # whether to include header search paths for app dependent targets + # + # @param [Boolean] include_private_headers + # whether to include header search paths for private headers of this + # target + # + # @param [String] configuration + # the configuration to return header search paths for or `nil` for all configurations. + # + # @return [Array] The set of header search paths this target uses. + # + def header_search_paths(include_dependent_targets_for_test_spec: nil, include_dependent_targets_for_app_spec: nil, + include_private_headers: true, configuration: nil) + header_search_paths = [] + header_search_paths.concat(build_headers.search_paths(platform, nil, false)) if include_private_headers + header_search_paths.concat(sandbox.public_headers.search_paths(platform, pod_name, uses_modular_headers?)) + dependent_targets = recursive_dependent_targets(:configuration => configuration) + if include_dependent_targets_for_test_spec + dependent_targets += recursive_test_dependent_targets(include_dependent_targets_for_test_spec, :configuration => configuration) + end + if include_dependent_targets_for_app_spec + dependent_targets += recursive_app_dependent_targets(include_dependent_targets_for_app_spec, :configuration => configuration) + end + dependent_targets.uniq.each do |dependent_target| + header_search_paths.concat(sandbox.public_headers.search_paths(platform, dependent_target.pod_name, defines_module? && dependent_target.uses_modular_headers?(false))) + end + header_search_paths.uniq + end + + # @param [Specification] spec the specification to return build settings for. + # + # @param [String] configuration the configuration to scope the build settings. + # + # @return [BuildSettings::PodTargetSettings] The build settings for the given spec + # + def build_settings_for_spec(spec, configuration: nil) + raise ArgumentError, 'Must give configuration' unless configuration + configuration = user_build_configurations[configuration] if user_build_configurations.key?(configuration) + build_settings_by_config_for_spec(spec)[configuration] || raise(ArgumentError, "No build settings for #{spec} (configuration #{configuration.inspect}) (known configurations #{config_variants})") + end + + def build_settings_by_config_for_spec(spec) + case spec.spec_type + when :test then test_spec_build_settings_by_config[spec.name] + when :app then app_spec_build_settings_by_config[spec.name] + else build_settings + end || raise(ArgumentError, "No build settings for #{spec}") + end + + def user_config_names_by_config_type + user_build_configurations.each_with_object({}) do |(user, type), hash| + hash[type] ||= [] + hash[type] << user + end.each_value(&:freeze).freeze + end + + protected + + # Returns whether the pod target should use modular headers. + # + # @param [Boolean] only_if_defines_modules + # whether the use of modular headers should require the target to define a module + # + # @note This must return false when a pod has a `header_mappings_dir` or `header_dir`, + # as that allows the spec to customize the header structure, and + # therefore it might not be expecting the module name to be prepended + # to imports at all. + # + def uses_modular_headers?(only_if_defines_modules = true) + return false if only_if_defines_modules && !defines_module? + return @uses_modular_headers if defined? @uses_modular_headers + @uses_modular_headers = spec_consumers.none?(&:header_mappings_dir) && spec_consumers.none?(&:header_dir) + end + + private + + def config_variants + if user_build_configurations.empty? + %i(debug release) + else + user_build_configurations.values.uniq + end + end + + def create_build_settings + Hash[config_variants.map do |config| + [config, BuildSettings::PodTargetSettings.new(self, nil, :configuration => config)] + end] + end + + def create_test_build_settings_by_config + Hash[test_specs.map do |test_spec| + [test_spec.name, Hash[config_variants.map do |config| + [config, BuildSettings::PodTargetSettings.new(self, test_spec, :configuration => config)] + end]] + end] + end + + def create_app_build_settings_by_config + Hash[app_specs.map do |app_spec| + [app_spec.name, Hash[config_variants.map do |config| + [config, BuildSettings::PodTargetSettings.new(self, app_spec, :configuration => config)] + end]] + end] + end + + # Computes the destination sub-directory in the sandbox + # + # @param [Sandbox::FileAccessor] file_accessor + # The consumer file accessor for which the headers need to be + # linked. + # + # @param [Array] headers + # The absolute paths of the headers which need to be mapped. + # + # @return [Hash{Pathname => Array}] A hash containing the + # headers folders as the keys and the absolute paths of the + # header files as the values. + # + def header_mappings(file_accessor, headers) + consumer = file_accessor.spec_consumer + header_mappings_dir = consumer.header_mappings_dir + dir = headers_sandbox + dir += consumer.header_dir if consumer.header_dir + + mappings = {} + headers.each do |header| + next if header.to_s.include?('.framework/') + + sub_dir = dir + if header_mappings_dir + relative_path = header.relative_path_from(file_accessor.path_list.root + header_mappings_dir) + sub_dir += relative_path.dirname + end + mappings[sub_dir] ||= [] + mappings[sub_dir] << header + end + mappings + end + + # @!group Deprecated APIs + # ----------------------------------------------------------------------- # + + public + + # @deprecated Use `test_app_hosts_by_spec` instead. + # + # @todo Remove in 2.0 + # + # @return [Hash{String => (Specification,PodTarget)}] tuples of app specs and pod targets by test spec name. + # + def test_app_hosts_by_spec_name + Hash[test_app_hosts_by_spec.map do |spec, value| + [spec.name, value] + end] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface.rb new file mode 100644 index 0000000..50b49ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface.rb @@ -0,0 +1,463 @@ +require 'cocoapods/user_interface/error_report' +require 'cocoapods/user_interface/inspector_reporter' + +module Pod + # Provides support for UI output. It provides support for nested sections of + # information and for a verbose mode. + # + module UserInterface + require 'colored2' + + @title_colors = %w( yellow green ) + @title_level = 0 + @indentation_level = 2 + @treat_titles_as_messages = false + @warnings = [] + + class << self + include Config::Mixin + + attr_accessor :indentation_level + attr_accessor :title_level + attr_accessor :warnings + + # @return [IO] IO object to which UI output will be directed. + # + attr_accessor :output_io + + # @return [Boolean] Whether the wrapping of the strings to the width of the + # terminal should be disabled. + # + attr_accessor :disable_wrap + alias_method :disable_wrap?, :disable_wrap + + # Prints a title taking an optional verbose prefix and + # a relative indentation valid for the UI action in the passed + # block. + # + # In verbose mode titles are printed with a color according + # to their level. In normal mode titles are printed only if + # they have nesting level smaller than 2. + # + # @todo Refactor to title (for always visible titles like search) + # and sections (titles that represent collapsible sections). + # + # @param [String] title + # The title to print + # + # @param [String] verbose_prefix + # See #message + # + # @param [FixNum] relative_indentation + # The indentation level relative to the current, + # when the message is printed. + # + def section(title, verbose_prefix = '', relative_indentation = 0) + if config.verbose? + title(title, verbose_prefix, relative_indentation) + elsif title_level < 1 + puts title + end + + self.indentation_level += relative_indentation + self.title_level += 1 + yield if block_given? + ensure + self.indentation_level -= relative_indentation + self.title_level -= 1 + end + + # In verbose mode it shows the sections and the contents. + # In normal mode it just prints the title. + # + # @return [void] + # + def titled_section(title, options = {}) + relative_indentation = options[:relative_indentation] || 0 + verbose_prefix = options[:verbose_prefix] || '' + if config.verbose? + title(title, verbose_prefix, relative_indentation) + else + puts title + end + + self.indentation_level += relative_indentation + self.title_level += 1 + yield if block_given? + ensure + self.indentation_level -= relative_indentation + self.title_level -= 1 + end + + # A title opposed to a section is always visible + # + # @param [String] title + # The title to print + # + # @param [String] verbose_prefix + # See #message + # + # @param [FixNum] relative_indentation + # The indentation level relative to the current, + # when the message is printed. + # + def title(title, verbose_prefix = '', relative_indentation = 2) + if @treat_titles_as_messages + message(title, verbose_prefix) + else + title = verbose_prefix + title if config.verbose? + title = "\n#{title}" if @title_level < 2 + if (color = @title_colors[@title_level]) + title = title.send(color) + end + puts "#{title}" + end + + self.indentation_level += relative_indentation + self.title_level += 1 + yield if block_given? + ensure + self.indentation_level -= relative_indentation + self.title_level -= 1 + end + + # Prints a verbose message taking an optional verbose prefix and + # a relative indentation valid for the UI action in the passed + # block. + # + # @todo Clean interface. + # + # @param [String] message + # The message to print. + # + # @param [String] verbose_prefix + # See #message + # + # @param [FixNum] relative_indentation + # The indentation level relative to the current, + # when the message is printed. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def message(message, verbose_prefix = '', relative_indentation = 2) + message = verbose_prefix + message if config.verbose? + puts_indented message if config.verbose? + + self.indentation_level += relative_indentation + yield if block_given? + ensure + self.indentation_level -= relative_indentation + end + + # Prints an info to the user. The info is always displayed. + # It respects the current indentation level only in verbose + # mode. + # + # Any title printed in the optional block is treated as a message. + # + # @param [String] message + # The message to print. + # + def info(message) + indentation = config.verbose? ? self.indentation_level : 0 + indented = wrap_string(message, indentation) + puts(indented) + + self.indentation_level += 2 + @treat_titles_as_messages = true + yield if block_given? + ensure + @treat_titles_as_messages = false + self.indentation_level -= 2 + end + + # Prints an important message to the user. + # + # @param [String] message The message to print. + # + # return [void] + # + def notice(message) + puts("\n[!] #{message}".green) + end + + # Returns a string containing relative location of a path from the Podfile. + # The returned path is quoted. If the argument is nil it returns the + # empty string. + # + # @param [#to_str] pathname + # The path to print. + # + def path(pathname) + if pathname + from_path = config.podfile_path.dirname if config.podfile_path + from_path ||= Pathname.pwd + path = begin + Pathname(pathname).relative_path_from(from_path) + rescue + pathname + end + "`#{path}`" + else + '' + end + end + + # Prints the textual representation of a given set. + # + # @param [Set] set + # the set that should be presented. + # + # @param [Symbol] mode + # the presentation mode, either `:normal` or `:name_and_version`. + # + def pod(set, mode = :normal) + if mode == :name_and_version + puts_indented "#{set.name} #{set.versions.first.version}" + else + pod = Specification::Set::Presenter.new(set) + title = "-> #{pod.name} (#{pod.version})" + if pod.spec.deprecated? + title += " #{pod.deprecation_description}" + colored_title = title.red + else + colored_title = title.green + end + + title(colored_title, '', 1) do + puts_indented pod.summary if pod.summary + puts_indented "pod '#{pod.name}', '~> #{pod.version}'" + labeled('Homepage', pod.homepage) + labeled('Source', pod.source_url) + labeled('Versions', pod.versions_by_source) + if mode == :stats + labeled('Authors', pod.authors) if pod.authors =~ /,/ + labeled('Author', pod.authors) if pod.authors !~ /,/ + labeled('License', pod.license) + labeled('Platform', pod.platform) + labeled('Stars', pod.github_stargazers) + labeled('Forks', pod.github_forks) + end + labeled('Subspecs', pod.subspecs) + end + end + end + + # Prints a message with a label. + # + # @param [String] label + # The label to print. + # + # @param [#to_s] value + # The value to print. + # + # @param [FixNum] justification + # The justification of the label. + # + def labeled(label, value, justification = 12) + if value + title = "- #{label}:" + if value.is_a?(Array) + lines = [wrap_string(title, self.indentation_level)] + value.each do |v| + lines << wrap_string("- #{v}", self.indentation_level + 2) + end + puts lines.join("\n") + else + puts wrap_string(title.ljust(justification) + "#{value}", self.indentation_level) + end + end + end + + # Prints a message respecting the current indentation level and + # wrapping it to the terminal width if necessary. + # + # @param [String] message + # The message to print. + # + def puts_indented(message = '') + indented = wrap_string(message, self.indentation_level) + puts(indented) + end + + # Prints the stored warnings. This method is intended to be called at the + # end of the execution of the binary. + # + # @return [void] + # + def print_warnings + STDOUT.flush + warnings.each do |warning| + next if warning[:verbose_only] && !config.verbose? + STDERR.puts("\n[!] #{warning[:message]}".yellow) + warning[:actions].each do |action| + string = "- #{action}" + string = wrap_string(string, 4) + puts(string) + end + end + end + + # Presents a choice among the elements of an array to the user. + # + # @param [Array<#to_s>] array + # The list of the elements among which the user should make his + # choice. + # + # @param [String] message + # The message to display to the user. + # + # @return [Fixnum] The index of the chosen array item. + # + def choose_from_array(array, message) + array.each_with_index do |item, index| + UI.puts "#{index + 1}: #{item}" + end + + UI.puts message + + index = UI.gets.chomp.to_i - 1 + if index < 0 || index > array.count - 1 + raise Informative, "#{index + 1} is invalid [1-#{array.count}]" + else + index + end + end + + public + + # @!group Basic methods + #-----------------------------------------------------------------------# + + # prints a message followed by a new line unless config is silent. + # + # @param [String] message + # The message to print. + # + def puts(message = '') + return if config.silent? + begin + (output_io || STDOUT).puts(message) + rescue Errno::EPIPE + exit 0 + end + end + + # prints a message followed by a new line unless config is silent. + # + # @param [String] message + # The message to print. + # + def print(message) + return if config.silent? + begin + (output_io || STDOUT).print(message) + rescue Errno::EPIPE + exit 0 + end + end + + # gets input from $stdin + # + def gets + $stdin.gets + end + + # Stores important warning to the user optionally followed by actions + # that the user should take. To print them use {#print_warnings}. + # + # @param [String] message The message to print. + # @param [Array] actions The actions that the user should take. + # @param [Boolean] verbose_only + # Restrict the appearance of the warning to verbose mode only + # + # return [void] + # + def warn(message, actions = [], verbose_only = false) + warnings << { :message => message, :actions => actions, :verbose_only => verbose_only } + end + + # Pipes all output inside given block to a pager. + # + # @yield Code block in which inputs to {#puts} and {#print} methods will be printed to the piper. + # + def with_pager + prev_handler = Signal.trap('INT', 'IGNORE') + IO.popen((ENV['PAGER'] || 'less -R'), 'w') do |io| + UI.output_io = io + yield + end + ensure + Signal.trap('INT', prev_handler) + UI.output_io = nil + end + + private + + # @!group Helpers + #-----------------------------------------------------------------------# + + # @return [String] Wraps a string taking into account the width of the + # terminal and an option indent. Adapted from + # https://macromates.com/blog/2006/wrapping-text-with-regular-expressions/ + # + # @param [String] string The string to wrap + # + # @param [String] indent The string to use to indent the result. + # + # @return [String] The formatted string. + # + # @note If CocoaPods is not being run in a terminal or the width of the + # terminal is too small a width of 80 is assumed. + # + def wrap_string(string, indent = 0) + if disable_wrap + (' ' * indent) + string + else + first_space = ' ' * indent + indented = CLAide::Command::Banner::TextWrapper.wrap_with_indent(string, indent, 9999) + first_space + indented + end + end + end + end + UI = UserInterface + + #---------------------------------------------------------------------------# + + # Redirects cocoapods-core UI. + # + module CoreUI + class << self + def puts(message) + UI.puts message + end + + def print(message) + UI.print(message) + end + + def warn(message) + UI.warn message + end + end + end +end + +#---------------------------------------------------------------------------# + +module Xcodeproj + # Redirects xcodeproj UI. + # + module UserInterface + def self.puts(message) + ::Pod::UI.puts message + end + + def self.warn(message) + ::Pod::UI.warn message + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface/error_report.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface/error_report.rb new file mode 100644 index 0000000..aa03e65 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface/error_report.rb @@ -0,0 +1,204 @@ +# encoding: UTF-8 + +require 'rbconfig' +require 'cgi' +require 'gh_inspector' + +module Pod + module UserInterface + module ErrorReport + class << self + def report(exception) + <<-EOS + +#{'――― MARKDOWN TEMPLATE ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +### Command + +``` +#{original_command} +``` + +#{report_instructions} + +#{stack} +### Plugins + +``` +#{plugins_string} +``` +#{markdown_podfile} +### Error + +``` +#{exception.class} - #{exception.message.force_encoding('UTF-8')} +#{exception.backtrace.join("\n") if exception.backtrace} +``` + +#{'――― TEMPLATE END ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――'.reversed} + +#{'[!] Oh no, an error occurred.'.red} +#{error_from_podfile(exception)} +#{'Search for existing GitHub issues similar to yours:'.yellow} +#{issues_url(exception)} + +#{'If none exists, create a ticket, with the template displayed above, on:'.yellow} +https://github.com/CocoaPods/CocoaPods/issues/new + +#{'Be sure to first read the contributing guide for details on how to properly submit a ticket:'.yellow} +https://github.com/CocoaPods/CocoaPods/blob/master/CONTRIBUTING.md + +Don't forget to anonymize any private data! + +EOS + end + + def report_instructions + <<-EOS +### Report + +* What did you do? + +* What did you expect to happen? + +* What happened instead? +EOS + end + + def stack + parts = { + 'CocoaPods' => Pod::VERSION, + 'Ruby' => RUBY_DESCRIPTION, + 'RubyGems' => Gem::VERSION, + 'Host' => host_information, + 'Xcode' => xcode_information, + 'Git' => git_information, + 'Ruby lib dir' => RbConfig::CONFIG['libdir'], + 'Repositories' => repo_information, + } + justification = parts.keys.map(&:size).max + + str = <<-EOS +### Stack + +``` +EOS + parts.each do |name, value| + str << name.rjust(justification) + str << ' : ' + str << Array(value).join("\n" << (' ' * (justification + 3))) + str << "\n" + end + + str << "```\n" + end + + def plugins_string + plugins = installed_plugins + max_name_length = plugins.keys.map(&:length).max + plugins.map do |name, version| + "#{name.ljust(max_name_length)} : #{version}" + end.sort.join("\n") + end + + def markdown_podfile + return '' unless Config.instance.podfile_path && Config.instance.podfile_path.exist? + <<-EOS + +### Podfile + +```ruby +#{Config.instance.podfile_path.read.strip} +``` +EOS + end + + def search_for_exceptions(exception) + inspector = GhInspector::Inspector.new 'cocoapods', 'cocoapods' + message_delegate = UserInterface::InspectorReporter.new + inspector.search_exception exception, message_delegate + rescue => e + warn "Searching for inspections failed: #{e}" + nil + end + + private + + def `(other) + super + rescue Errno::ENOENT => e + "Unable to find an executable (#{e})" + end + + def pathless_exception_message(message) + message.gsub(/- \(.*\):/, '-') + end + + def error_from_podfile(error) + if error.message =~ /Podfile:(\d*)/ + "\nIt appears to have originated from your Podfile at line #{Regexp.last_match[1]}.\n" + end + end + + def remove_color(string) + string.gsub(/\e\[(\d+)m/, '') + end + + def issues_url(exception) + message = remove_color(pathless_exception_message(exception.message)) + 'https://github.com/CocoaPods/CocoaPods/search?q=' \ + "#{CGI.escape(message)}&type=Issues" + end + + def host_information + product, version, build = `sw_vers`.strip.split("\n").map { |line| line.split(':').last.strip } + "#{product} #{version} (#{build})" + end + + def xcode_information + version, build = `xcodebuild -version`.strip.split("\n").map { |line| line.split(' ').last } + "#{version} (#{build})" + end + + def git_information + `git --version`.strip.split("\n").first + end + + def installed_plugins + CLAide::Command::PluginManager.specifications. + reduce({}) { |hash, s| hash.tap { |h| h[s.name] = s.version.to_s } } + end + + def repo_information + Config.instance.sources_manager.all.map do |source| + repo = source.repo + if source.is_a?(Pod::CDNSource) + "#{repo.basename} - CDN - #{source.url}" + elsif source.git? + sha = git_hash(source) + "#{repo.basename} - git - #{source.url} @ #{sha}" + else + "#{repo.basename} - #{source.type}" + end + end + end + + def original_command + "#{$PROGRAM_NAME} #{ARGV.join(' ')}" + end + + private + + # @param [Source] source + # a git source + # + # @return [String] the current git SHA + def git_hash(source) + Dir.chdir(source.repo) do + `git rev-parse HEAD 2>&1` + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface/inspector_reporter.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface/inspector_reporter.rb new file mode 100644 index 0000000..76057f4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/user_interface/inspector_reporter.rb @@ -0,0 +1,102 @@ +require 'addressable' +require 'uri' + +module Pod + module UserInterface + # Redirects GH-issues delegate callbacks to CocoaPods UI methods. + # + class InspectorReporter + # Called just as the investigation has begun. + # Lets the user know that it's looking for an issue. + # + # @param [GhInspector::Inspector] inspector + # The current inspector + # + # @return [void] + # + def inspector_started_query(_, inspector) + UI.puts "Looking for related issues on #{inspector.repo_owner}/#{inspector.repo_name}..." + end + + # Called once the inspector has received a report with more than one issue, + # showing the top 3 issues, and offering a link to see more. + # + # @param [GhInspector::InspectionReport] report + # Report a list of the issues + # + # @return [void] + # + def inspector_successfully_received_report(report, _) + report.issues[0..2].each { |issue| print_issue_full(issue) } + + if report.issues.count > 3 + UI.puts "and #{report.total_results - 3} more at:" + UI.puts report.url + end + end + + # Called once the report has been received, but when there are no issues found. + # + # @param [GhInspector::Inspector] inspector + # The current inspector + # + # @return [void] + # + def inspector_received_empty_report(_, inspector) + UI.puts 'Found no similar issues. To create a new issue, please visit:' + UI.puts "https://github.com/#{inspector.repo_owner}/#{inspector.repo_name}/issues/new" + end + + # Called when there have been networking issues in creating the report. + # + # @param [Error] error + # The error returned during networking + # + # @param [String] query + # The original search query + # + # @param [GhInspector::Inspector] inspector + # The current inspector + # + # @return [void] + # + def inspector_could_not_create_report(error, query, inspector) + safe_query = Addressable::URI.escape query + UI.puts 'Could not access the GitHub API, you may have better luck via the website.' + UI.puts "https://github.com/#{inspector.repo_owner}/#{inspector.repo_name}/search?q=#{safe_query}&type=Issues&utf8=✓" + UI.puts "Error: #{error.name}" + end + + private + + def print_issue_full(issue) + safe_url = Addressable::URI.escape issue.html_url + UI.puts " - #{issue.title}" + UI.puts " #{safe_url} [#{issue.state}] [#{issue.comments} comment#{issue.comments == 1 ? '' : 's'}]" + UI.puts " #{pretty_date(issue.updated_at)}" + UI.puts '' + end + + # Taken from https://stackoverflow.com/questions/195740/how-do-you-do-relative-time-in-rails + def pretty_date(date_string) + date = Time.parse(date_string) + a = (Time.now - date).to_i + + case a + when 0 then 'just now' + when 1 then 'a second ago' + when 2..59 then a.to_s + ' seconds ago' + when 60..119 then 'a minute ago' # 120 = 2 minutes + when 120..3540 then (a / 60).to_i.to_s + ' minutes ago' + when 3541..7100 then 'an hour ago' # 3600 = 1 hour + when 7101..82_800 then ((a + 99) / 3600).to_i.to_s + ' hours ago' + when 82_801..172_000 then 'a day ago' # 86400 = 1 day + when 172_001..518_400 then ((a + 800) / (60 * 60 * 24)).to_i.to_s + ' days ago' + when 518_400..1_036_800 then 'a week ago' + when 1_036_801..4_147_204 then ((a + 180_000) / (60 * 60 * 24 * 7)).to_i.to_s + ' weeks ago' + else date.strftime('%d %b %Y') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/validator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/validator.rb new file mode 100644 index 0000000..1e3daa8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/validator.rb @@ -0,0 +1,1172 @@ +require 'active_support/core_ext/array' +require 'active_support/core_ext/string/inflections' + +module Pod + # Validates a Specification. + # + # Extends the Linter from the Core to add additional which require the + # LocalPod and the Installer. + # + # In detail it checks that the file patterns defined by the user match + # actually do match at least a file and that the Pod builds, by installing + # it without integration and building the project with xcodebuild. + # + class Validator + include Config::Mixin + + # The default version of Swift to use when linting pods + # + DEFAULT_SWIFT_VERSION = '4.0'.freeze + + # The valid platforms for linting + # + VALID_PLATFORMS = Platform.all.freeze + + # @return [Specification::Linter] the linter instance from CocoaPods + # Core. + # + attr_reader :linter + + # Initialize a new instance + # + # @param [Specification, Pathname, String] spec_or_path + # the Specification or the path of the `podspec` file to lint. + # + # @param [Array] source_urls + # the Source URLs to use in creating a {Podfile}. + # + # @param [Array] platforms + # the platforms to lint. + # + def initialize(spec_or_path, source_urls, platforms = []) + @use_frameworks = true + @linter = Specification::Linter.new(spec_or_path) + @source_urls = if @linter.spec && @linter.spec.dependencies.empty? && @linter.spec.recursive_subspecs.all? { |s| s.dependencies.empty? } + [] + else + source_urls.map { |url| config.sources_manager.source_with_name_or_url(url) }.map(&:url) + end + + @platforms = platforms.map do |platform| + result = case platform.to_s.downcase + # Platform doesn't recognize 'macos' as being the same as 'osx' when initializing + when 'macos' then Platform.macos + else Platform.new(platform, nil) + end + unless valid_platform?(result) + raise Informative, "Unrecognized platform `#{platform}`. Valid platforms: #{VALID_PLATFORMS.join(', ')}" + end + result + end + end + + #-------------------------------------------------------------------------# + + # @return [Specification] the specification to lint. + # + def spec + @linter.spec + end + + # @return [Pathname] the path of the `podspec` file where {#spec} is + # defined. + # + def file + @linter.file + end + + # Returns a list of platforms to lint for a given Specification + # + # @param [Specification] spec + # The specification to lint + # + # @return [Array] platforms to lint for the given specification + # + def platforms_to_lint(spec) + return spec.available_platforms if @platforms.empty? + + # Validate that the platforms specified are actually supported by the spec + results = @platforms.map do |platform| + matching_platform = spec.available_platforms.find { |p| p.name == platform.name } + unless matching_platform + raise Informative, "Platform `#{platform}` is not supported by specification `#{spec}`." + end + matching_platform + end.uniq + + results + end + + # @return [Sandbox::FileAccessor] the file accessor for the spec. + # + attr_accessor :file_accessor + + #-------------------------------------------------------------------------# + + # Lints the specification adding a {Result} for any + # failed check to the {#results} list. + # + # @note This method shows immediately which pod is being processed and + # overrides the printed line once the result is known. + # + # @return [Boolean] whether the specification passed validation. + # + def validate + @results = [] + + # Replace default spec with a subspec if asked for + a_spec = spec + if spec && @only_subspec + subspec_name = @only_subspec.start_with?("#{spec.root.name}/") ? @only_subspec : "#{spec.root.name}/#{@only_subspec}" + a_spec = spec.subspec_by_name(subspec_name, true, true) + @subspec_name = a_spec.name + end + + UI.print " -> #{a_spec ? a_spec.name : file.basename}\r" unless config.silent? + $stdout.flush + + perform_linting + perform_extensive_analysis(a_spec) if a_spec && !quick + + UI.puts ' -> '.send(result_color) << (a_spec ? a_spec.to_s : file.basename.to_s) + print_results + validated? + end + + # Prints the result of the validation to the user. + # + # @return [void] + # + def print_results + UI.puts results_message + end + + def results_message + message = '' + results.each do |result| + if result.platforms == [:ios] + platform_message = '[iOS] ' + elsif result.platforms == [:osx] + platform_message = '[OSX] ' + elsif result.platforms == [:watchos] + platform_message = '[watchOS] ' + elsif result.platforms == [:tvos] + platform_message = '[tvOS] ' + end + + subspecs_message = '' + if result.is_a?(Result) + subspecs = result.subspecs.uniq + if subspecs.count > 2 + subspecs_message = '[' + subspecs[0..2].join(', ') + ', and more...] ' + elsif subspecs.count > 0 + subspecs_message = '[' + subspecs.join(',') + '] ' + end + end + + case result.type + when :error then type = 'ERROR' + when :warning then type = 'WARN' + when :note then type = 'NOTE' + else raise "#{result.type}" end + message << " - #{type.ljust(5)} | #{platform_message}#{subspecs_message}#{result.attribute_name}: #{result.message}\n" + end + message << "\n" + end + + def failure_reason + results_by_type = results.group_by(&:type) + results_by_type.default = [] + return nil if validated? + reasons = [] + if (size = results_by_type[:error].size) && size > 0 + reasons << "#{size} #{'error'.pluralize(size)}" + end + if !allow_warnings && (size = results_by_type[:warning].size) && size > 0 + reason = "#{size} #{'warning'.pluralize(size)}" + pronoun = size == 1 ? 'it' : 'them' + reason << " (but you can use `--allow-warnings` to ignore #{pronoun})" if reasons.empty? + reasons << reason + end + if results.all?(&:public_only) + reasons << 'all results apply only to public specs, but you can use ' \ + '`--private` to ignore them if linting the specification for a private pod' + end + + reasons.to_sentence + end + + #-------------------------------------------------------------------------# + + # @!group Configuration + + # @return [Boolean] whether the validation should skip the checks that + # requires the download of the library. + # + attr_accessor :quick + + # @return [Boolean] whether the linter should not clean up temporary files + # for inspection. + # + attr_accessor :no_clean + + # @return [Boolean] whether the linter should fail as soon as the first build + # variant causes an error. Helpful for i.e. multi-platforms specs, + # specs with subspecs. + # + attr_accessor :fail_fast + + # @return [Boolean] whether the validation should be performed against the root of + # the podspec instead to its original source. + # + # @note Uses the `:path` option of the Podfile. + # + attr_accessor :local + alias_method :local?, :local + + # @return [Boolean] Whether the validator should fail on warnings, or only on errors. + # + attr_accessor :allow_warnings + + # @return [String] name of the subspec to check, if nil all subspecs are checked. + # + attr_accessor :only_subspec + + # @return [Boolean] Whether the validator should validate all subspecs. + # + attr_accessor :no_subspecs + + # @return [Boolean] Whether the validator should skip building and running tests. + # + attr_accessor :skip_tests + + # @return [Array] List of test_specs to run. If nil, all tests are run (unless skip_tests is specified). + # + attr_accessor :test_specs + + # @return [Boolean] Whether the validator should run Xcode Static Analysis. + # + attr_accessor :analyze + + # @return [Boolean] Whether frameworks should be used for the installation. + # + attr_accessor :use_frameworks + + # @return [Boolean] Whether modular headers should be used for the installation. + # + attr_accessor :use_modular_headers + + # @return [Boolean] Whether static frameworks should be used for the installation. + # + attr_accessor :use_static_frameworks + + # @return [Boolean] Whether attributes that affect only public sources + # Bool be skipped. + # + attr_accessor :ignore_public_only_results + + # @return [String] A glob for podspecs to be used during building of + # the local Podfile via :path. + # + attr_accessor :include_podspecs + + # @return [String] A glob for podspecs to be used during building of + # the local Podfile via :podspec. + # + attr_accessor :external_podspecs + + attr_accessor :skip_import_validation + alias_method :skip_import_validation?, :skip_import_validation + + attr_accessor :configuration + + #-------------------------------------------------------------------------# + + # !@group Lint results + + # + # + attr_reader :results + + # @return [Boolean] + # + def validated? + result_type != :error && (result_type != :warning || allow_warnings) + end + + # @return [Symbol] The type, which should been used to display the result. + # One of: `:error`, `:warning`, `:note`. + # + def result_type + applicable_results = results + applicable_results = applicable_results.reject(&:public_only?) if ignore_public_only_results + types = applicable_results.map(&:type).uniq + if types.include?(:error) then :error + elsif types.include?(:warning) then :warning + else :note + end + end + + # @return [Symbol] The color, which should been used to display the result. + # One of: `:green`, `:yellow`, `:red`. + # + def result_color + case result_type + when :error then :red + when :warning then :yellow + else :green end + end + + # @return [Pathname] the temporary directory used by the linter. + # + def validation_dir + @validation_dir ||= Pathname(Dir.mktmpdir(['CocoaPods-Lint-', "-#{spec.name}"])) + end + + def validation_dir=(validation_dir) + @validation_dir = Pathname(validation_dir) unless validation_dir.nil? + end + + # @return [String] The SWIFT_VERSION that should be used to validate the pod. This is set by passing the + # `--swift-version` parameter during validation. + # + attr_accessor :swift_version + + # @return [String] the SWIFT_VERSION within the .swift-version file or nil. + # + def dot_swift_version + return unless file + swift_version_path = file.dirname + '.swift-version' + return unless swift_version_path.exist? + swift_version_path.read.strip + end + + # @return [String] The derived Swift version to use for validation. The order of precedence is as follows: + # - The `--swift-version` parameter is always checked first and honored if passed. + # - The `swift_versions` DSL attribute within the podspec, in which case the latest version is always chosen. + # - The Swift version within the `.swift-version` file if present. + # - If none of the above are set then the `#DEFAULT_SWIFT_VERSION` is used. + # + def derived_swift_version + @derived_swift_version ||= begin + if !swift_version.nil? + swift_version + elsif version = spec.swift_versions.max || dot_swift_version + version.to_s + else + DEFAULT_SWIFT_VERSION + end + end + end + + # @return [Boolean] Whether any of the pod targets part of this validator use Swift or not. + # + def uses_swift? + @installer.pod_targets.any?(&:uses_swift?) + end + + #-------------------------------------------------------------------------# + + private + + # !@group Lint steps + + # + # + def perform_linting + linter.lint + @results.concat(linter.results.to_a) + end + + # Perform analysis for a given spec (or subspec) + # + def perform_extensive_analysis(spec) + if spec.non_library_specification? + error('spec', "Validating a non library spec (`#{spec.name}`) is not supported.") + return false + end + validate_homepage(spec) + validate_screenshots(spec) + validate_social_media_url(spec) + validate_documentation_url(spec) + validate_source_url(spec) + + platforms = platforms_to_lint(spec) + + valid = platforms.send(fail_fast ? :all? : :each) do |platform| + UI.message "\n\n#{spec} - Analyzing on #{platform} platform.".green.reversed + @consumer = spec.consumer(platform) + setup_validation_environment + begin + create_app_project + download_pod + check_file_patterns + install_pod + validate_swift_version + add_app_project_import + validate_vendored_dynamic_frameworks + build_pod + test_pod unless skip_tests + ensure + tear_down_validation_environment + end + validated? + end + return false if fail_fast && !valid + perform_extensive_subspec_analysis(spec) unless @no_subspecs + rescue => e + message = e.to_s + message << "\n" << e.backtrace.join("\n") << "\n" if config.verbose? + error('unknown', "Encountered an unknown error (#{message}) during validation.") + false + end + + # Recursively perform the extensive analysis on all subspecs + # + def perform_extensive_subspec_analysis(spec) + spec.subspecs.reject(&:non_library_specification?).send(fail_fast ? :all? : :each) do |subspec| + @subspec_name = subspec.name + perform_extensive_analysis(subspec) + end + end + + # @return [Consumer] the consumer for the current platform being validated + # + attr_accessor :consumer + + # @return [String, Nil] the name of the current subspec being validated, or nil if none + # + attr_accessor :subspec_name + + # Performs validation of a URL + # + def validate_url(url, user_agent = nil) + resp = Pod::HTTP.validate_url(url, user_agent) + + if !resp + warning('url', "There was a problem validating the URL #{url}.", true) + elsif !resp.success? + note('url', "The URL (#{url}) is not reachable.", true) + end + + resp + end + + # Performs validations related to the `homepage` attribute. + # + def validate_homepage(spec) + if spec.homepage + validate_url(spec.homepage) + end + end + + # Performs validation related to the `screenshots` attribute. + # + def validate_screenshots(spec) + spec.screenshots.compact.each do |screenshot| + response = validate_url(screenshot) + if response && !(response.headers['content-type'] && response.headers['content-type'].first =~ /image\/.*/i) + warning('screenshot', "The screenshot #{screenshot} is not a valid image.") + end + end + end + + # Performs validations related to the `social_media_url` attribute. + # + def validate_social_media_url(spec) + validate_url(spec.social_media_url, 'CocoaPods') if spec.social_media_url + end + + # Performs validations related to the `documentation_url` attribute. + # + def validate_documentation_url(spec) + validate_url(spec.documentation_url) if spec.documentation_url + end + + # Performs validations related to the `source` -> `http` attribute (if exists) + # + def validate_source_url(spec) + return if spec.source.nil? || spec.source[:http].nil? + url = URI(spec.source[:http]) + return if url.scheme == 'https' || url.scheme == 'file' + warning('http', "The URL (`#{url}`) doesn't use the encrypted HTTPS protocol. " \ + 'It is crucial for Pods to be transferred over a secure protocol to protect your users from man-in-the-middle attacks. '\ + 'This will be an error in future releases. Please update the URL to use https.') + end + + # Performs validation for the version of Swift used during validation. + # + # An error will be displayed if the user has provided a `swift_versions` attribute within the podspec but is also + # using either `--swift-version` parameter or a `.swift-version` file with a Swift version that is not declared + # within the attribute. + # + # The user will be warned that the default version of Swift was used if the following things are true: + # - The project uses Swift at all + # - The user did not supply a Swift version via a parameter + # - There is no `swift_versions` attribute set within the specification + # - There is no `.swift-version` file present either. + # + def validate_swift_version + return unless uses_swift? + spec_swift_versions = spec.swift_versions.map(&:to_s) + + unless spec_swift_versions.empty? + message = nil + if !dot_swift_version.nil? && !spec_swift_versions.include?(dot_swift_version) + message = "Specification `#{spec.name}` specifies inconsistent `swift_versions` (#{spec_swift_versions.map { |s| "`#{s}`" }.to_sentence}) compared to the one present in your `.swift-version` file (`#{dot_swift_version}`). " \ + 'Please remove the `.swift-version` file which is now deprecated and only use the `swift_versions` attribute within your podspec.' + elsif !swift_version.nil? && !spec_swift_versions.include?(swift_version) + message = "Specification `#{spec.name}` specifies inconsistent `swift_versions` (#{spec_swift_versions.map { |s| "`#{s}`" }.to_sentence}) compared to the one passed during lint (`#{swift_version}`)." + end + unless message.nil? + error('swift', message) + return + end + end + + if swift_version.nil? && spec.swift_versions.empty? + if !dot_swift_version.nil? + # The user will be warned to delete the `.swift-version` file in favor of the `swift_versions` DSL attribute. + # This is intentionally not a lint warning since we do not want to break existing setups and instead just soft + # deprecate this slowly. + # + UI.warn 'Usage of the `.swift_version` file has been deprecated! Please delete the file and use the ' \ + "`swift_versions` attribute within your podspec instead.\n".yellow + else + warning('swift', + 'The validator used ' \ + "Swift `#{DEFAULT_SWIFT_VERSION}` by default because no Swift version was specified. " \ + 'To specify a Swift version during validation, add the `swift_versions` attribute in your podspec. ' \ + 'Note that usage of a `.swift-version` file is now deprecated.') + end + end + end + + def setup_validation_environment + validation_dir.rmtree if validation_dir.exist? + validation_dir.mkpath + @original_config = Config.instance.clone + config.installation_root = validation_dir + config.silent = !config.verbose + end + + def tear_down_validation_environment + clean! unless no_clean + Config.instance = @original_config + end + + def clean! + validation_dir.rmtree + end + + # @return [String] The deployment targret of the library spec. + # + def deployment_target + deployment_target = spec.subspec_by_name(subspec_name).deployment_target(consumer.platform_name) + if consumer.platform_name == :ios && use_frameworks + minimum = Version.new('8.0') + deployment_target = [Version.new(deployment_target), minimum].max.to_s + end + deployment_target + end + + def download_pod + test_spec_names = consumer.spec.test_specs.select { |ts| ts.supported_on_platform?(consumer.platform_name) }.map(&:name) + podfile = podfile_from_spec(consumer.platform_name, deployment_target, use_frameworks, test_spec_names, use_modular_headers, use_static_frameworks) + sandbox = Sandbox.new(config.sandbox_root) + @installer = Installer.new(sandbox, podfile) + @installer.use_default_plugins = false + @installer.has_dependencies = !spec.dependencies.empty? + %i(prepare resolve_dependencies download_dependencies write_lockfiles).each { |m| @installer.send(m) } + @file_accessor = @installer.pod_targets.flat_map(&:file_accessors).find { |fa| fa.spec.name == consumer.spec.name } + end + + def create_app_project + app_project = Xcodeproj::Project.new(validation_dir + 'App.xcodeproj') + app_target = Pod::Generator::AppTargetHelper.add_app_target(app_project, consumer.platform_name, deployment_target) + sandbox = Sandbox.new(config.sandbox_root) + info_plist_path = app_project.path.dirname.+('App/App-Info.plist') + Pod::Installer::Xcode::PodsProjectGenerator::TargetInstallerHelper.create_info_plist_file_with_sandbox(sandbox, + info_plist_path, + app_target, + '1.0.0', + Platform.new(consumer.platform_name), + :appl, + :build_setting_value => '$(SRCROOT)/App/App-Info.plist') + Pod::Generator::AppTargetHelper.add_swift_version(app_target, derived_swift_version) + app_target.build_configurations.each do |config| + # Lint will fail if a AppIcon is set but no image is found with such name + # Happens only with Static Frameworks enabled but shouldn't be set anyway + config.build_settings.delete('ASSETCATALOG_COMPILER_APPICON_NAME') + # Ensure this is set generally but we have seen an issue with ODRs: + # see: https://github.com/CocoaPods/CocoaPods/issues/10933 + config.build_settings['PRODUCT_BUNDLE_IDENTIFIER'] = 'org.cocoapods.${PRODUCT_NAME:rfc1034identifier}' + end + app_project.save + app_project.recreate_user_schemes + end + + def add_app_project_import + app_project = Xcodeproj::Project.open(validation_dir + 'App.xcodeproj') + app_target = app_project.targets.first + pod_target = validation_pod_target + Pod::Generator::AppTargetHelper.add_app_project_import(app_project, app_target, pod_target, consumer.platform_name) + Pod::Generator::AppTargetHelper.add_xctest_search_paths(app_target) if @installer.pod_targets.any? { |pt| pt.spec_consumers.any? { |c| c.frameworks.include?('XCTest') || c.weak_frameworks.include?('XCTest') } } + Pod::Generator::AppTargetHelper.add_empty_swift_file(app_project, app_target) if @installer.pod_targets.any?(&:uses_swift?) + app_project.save + Xcodeproj::XCScheme.share_scheme(app_project.path, 'App') + # Share the pods xcscheme only if it exists. For pre-built vendored pods there is no xcscheme generated. + Xcodeproj::XCScheme.share_scheme(@installer.pods_project.path, pod_target.label) if shares_pod_target_xcscheme?(pod_target) + end + + # Returns the pod target for the pod being validated. Installation must have occurred before this can be invoked. + # + def validation_pod_target + @installer.pod_targets.find { |pt| pt.pod_name == spec.root.name } + end + + # It creates a podfile in memory and builds a library containing the pod + # for all available platforms with xcodebuild. + # + def install_pod + %i(validate_targets generate_pods_project integrate_user_project + perform_post_install_actions).each { |m| @installer.send(m) } + + deployment_target = spec.subspec_by_name(subspec_name).deployment_target(consumer.platform_name) + configure_pod_targets(@installer.target_installation_results) + validate_dynamic_framework_support(@installer.aggregate_targets, deployment_target) + @installer.pods_project.save + end + + # @param [Array] target_installation_results + # The installation results to configure + # + def configure_pod_targets(target_installation_results) + target_installation_results.first.values.each do |pod_target_installation_result| + pod_target = pod_target_installation_result.target + native_target = pod_target_installation_result.native_target + native_target.build_configuration_list.build_configurations.each do |build_configuration| + (build_configuration.build_settings['OTHER_CFLAGS'] ||= '$(inherited)') << ' -Wincomplete-umbrella' + if pod_target.uses_swift? + # The Swift version for the target being validated can be overridden by `--swift-version` or the + # `.swift-version` file so we always use the derived Swift version. + # + # For dependencies, if the derived Swift version is supported then it is the one used. Otherwise, the Swift + # version for dependencies is inferred by the target that is integrating them. + swift_version = if pod_target == validation_pod_target + derived_swift_version + else + pod_target.spec_swift_versions.map(&:to_s).find do |v| + v == derived_swift_version + end || pod_target.swift_version + end + build_configuration.build_settings['SWIFT_VERSION'] = swift_version + end + end + pod_target_installation_result.test_specs_by_native_target.each do |test_native_target, test_spec| + if pod_target.uses_swift_for_spec?(test_spec) + test_native_target.build_configuration_list.build_configurations.each do |build_configuration| + swift_version = pod_target == validation_pod_target ? derived_swift_version : pod_target.swift_version + build_configuration.build_settings['SWIFT_VERSION'] = swift_version + end + end + end + end + end + + # Produces an error of dynamic frameworks were requested but are not supported by the deployment target + # + # @param [Array] aggregate_targets + # The aggregate targets installed by the installer + # + # @param [String,Version] deployment_target + # The deployment target of the installation + # + def validate_dynamic_framework_support(aggregate_targets, deployment_target) + return unless consumer.platform_name == :ios + return unless deployment_target.nil? || Version.new(deployment_target).major < 8 + aggregate_targets.each do |target| + if target.pod_targets.any?(&:uses_swift?) + uses_xctest = target.spec_consumers.any? { |c| (c.frameworks + c.weak_frameworks).include? 'XCTest' } + error('swift', 'Swift support uses dynamic frameworks and is therefore only supported on iOS > 8.') unless uses_xctest + end + end + end + + def validate_vendored_dynamic_frameworks + deployment_target = spec.subspec_by_name(subspec_name).deployment_target(consumer.platform_name) + + unless file_accessor.nil? + dynamic_frameworks = file_accessor.vendored_dynamic_frameworks + dynamic_libraries = file_accessor.vendored_dynamic_libraries + if (dynamic_frameworks.count > 0 || dynamic_libraries.count > 0) && consumer.platform_name == :ios && + (deployment_target.nil? || Version.new(deployment_target).major < 8) + error('dynamic', 'Dynamic frameworks and libraries are only supported on iOS 8.0 and onwards.') + end + end + end + + # Performs platform specific analysis. It requires to download the source + # at each iteration + # + # @note Xcode warnings are treated as notes because the spec maintainer + # might not be the author of the library + # + # @return [void] + # + def build_pod + if !xcodebuild_available? + UI.warn "Skipping compilation with `xcodebuild` because it can't be found.\n".yellow + else + UI.message "\nBuilding with `xcodebuild`.\n".yellow do + scheme = if skip_import_validation? + validation_pod_target.label if validation_pod_target.should_build? + else + 'App' + end + if scheme.nil? + UI.warn "Skipping compilation with `xcodebuild` because target contains no sources.\n".yellow + else + requested_configuration = configuration ? configuration : 'Release' + if analyze + output = xcodebuild('analyze', scheme, requested_configuration, :deployment_target => deployment_target) + find_output = Executable.execute_command('find', [validation_dir, '-name', '*.html'], false) + if find_output != '' + message = 'Static Analysis failed.' + message += ' You can use `--verbose` for more information.' unless config.verbose? + message += ' You can use `--no-clean` to save a reproducible buid environment.' unless no_clean + error('build_pod', message) + end + else + output = xcodebuild('build', scheme, requested_configuration, :deployment_target => deployment_target) + end + parsed_output = parse_xcodebuild_output(output) + translate_output_to_linter_messages(parsed_output) + end + end + end + end + + # Builds and runs all test sources associated with the current specification being validated. + # + # @note Xcode warnings are treated as notes because the spec maintainer + # might not be the author of the library + # + # @return [void] + # + def test_pod + if !xcodebuild_available? + UI.warn "Skipping test validation with `xcodebuild` because it can't be found.\n".yellow + else + UI.message "\nTesting with `xcodebuild`.\n".yellow do + pod_target = validation_pod_target + all_test_specs = consumer.spec.test_specs + unless test_specs.nil? + test_spec_names = all_test_specs.map(&:base_name) + all_test_specs.select! { |test_spec| test_specs.include? test_spec.base_name } + test_specs.each do |test_spec| + unless test_spec_names.include? test_spec + UI.warn "Requested test spec `#{test_spec}` does not exist in the podspec. Existing test specs are `#{test_spec_names}`" + end + end + end + all_test_specs.each do |test_spec| + if !test_spec.supported_on_platform?(consumer.platform_name) + UI.warn "Skipping test spec `#{test_spec.name}` on platform `#{consumer.platform_name}` since it is not supported.\n".yellow + else + scheme = @installer.target_installation_results.first[pod_target.name].native_target_for_spec(test_spec) + output = xcodebuild('test', scheme, 'Debug', :deployment_target => test_spec.deployment_target(consumer.platform_name)) + parsed_output = parse_xcodebuild_output(output) + translate_output_to_linter_messages(parsed_output) + end + end + end + end + end + + def xcodebuild_available? + !Executable.which('xcodebuild').nil? && ENV['COCOAPODS_VALIDATOR_SKIP_XCODEBUILD'].nil? + end + + FILE_PATTERNS = %i(source_files resources preserve_paths vendored_libraries + vendored_frameworks public_header_files preserve_paths + project_header_files private_header_files resource_bundles).freeze + + # It checks that every file pattern specified in a spec yields + # at least one file. It requires the pods to be already present + # in the current working directory under Pods/spec.name. + # + # @return [void] + # + def check_file_patterns + FILE_PATTERNS.each do |attr_name| + if respond_to?("_validate_#{attr_name}", true) + send("_validate_#{attr_name}") + else + validate_nonempty_patterns(attr_name, :error) + end + end + + _validate_header_mappings_dir + if consumer.spec.root? + _validate_license + _validate_module_map + end + end + + # Validates that the file patterns in `attr_name` match at least 1 file. + # + # @param [String,Symbol] attr_name the name of the attribute to check (ex. :public_header_files) + # + # @param [String,Symbol] message_type the type of message to send if the patterns are empty (ex. :error) + # + def validate_nonempty_patterns(attr_name, message_type) + return unless !file_accessor.spec_consumer.send(attr_name).empty? && file_accessor.send(attr_name).empty? + + add_result(message_type, 'file patterns', "The `#{attr_name}` pattern did not match any file.") + end + + def _validate_vendored_libraries + file_accessor.vendored_libraries.each do |lib| + basename = File.basename(lib) + lib_name = basename.downcase + unless lib_name.end_with?('.a', '.dylib') && lib_name.start_with?('lib') + warning('vendored_libraries', "`#{basename}` does not match the expected library name format `lib[name].a` or `lib[name].dylib`") + end + end + validate_nonempty_patterns(:vendored_libraries, :warning) + end + + def _validate_project_header_files + _validate_header_files(:project_header_files) + validate_nonempty_patterns(:project_header_files, :warning) + end + + def _validate_private_header_files + _validate_header_files(:private_header_files) + validate_nonempty_patterns(:private_header_files, :warning) + end + + def _validate_public_header_files + _validate_header_files(:public_header_files) + validate_nonempty_patterns(:public_header_files, :warning) + end + + def _validate_license + unless file_accessor.license || spec.license && (spec.license[:type] == 'Public Domain' || spec.license[:text]) + warning('license', 'Unable to find a license file') + end + end + + def _validate_module_map + if spec.module_map + unless file_accessor.module_map.exist? + error('module_map', 'Unable to find the specified module map file.') + end + unless file_accessor.module_map.extname == '.modulemap' + relative_path = file_accessor.module_map.relative_path_from file_accessor.root + error('module_map', "Unexpected file extension for modulemap file (#{relative_path}).") + end + end + end + + def _validate_resource_bundles + file_accessor.resource_bundles.each do |bundle, resource_paths| + next unless resource_paths.empty? + error('file patterns', "The `resource_bundles` pattern for `#{bundle}` did not match any file.") + end + end + + # Ensures that a list of header files only contains header files. + # + def _validate_header_files(attr_name) + header_files = file_accessor.send(attr_name) + non_header_files = header_files. + select { |f| !Sandbox::FileAccessor::HEADER_EXTENSIONS.include?(f.extname) }. + map { |f| f.relative_path_from(file_accessor.root) } + unless non_header_files.empty? + error(attr_name, "The pattern matches non-header files (#{non_header_files.join(', ')}).") + end + non_source_files = header_files - file_accessor.source_files + unless non_source_files.empty? + error(attr_name, 'The pattern includes header files that are not listed ' \ + "in source_files (#{non_source_files.join(', ')}).") + end + end + + def _validate_header_mappings_dir + return unless header_mappings_dir = file_accessor.spec_consumer.header_mappings_dir + absolute_mappings_dir = file_accessor.root + header_mappings_dir + unless absolute_mappings_dir.directory? + error('header_mappings_dir', "The header_mappings_dir (`#{header_mappings_dir}`) is not a directory.") + end + non_mapped_headers = file_accessor.headers. + reject { |h| h.to_path.start_with?(absolute_mappings_dir.to_path) }. + map { |f| f.relative_path_from(file_accessor.root) } + unless non_mapped_headers.empty? + error('header_mappings_dir', "There are header files outside of the header_mappings_dir (#{non_mapped_headers.join(', ')}).") + end + end + + #-------------------------------------------------------------------------# + + private + + # !@group Result Helpers + + def error(*args) + add_result(:error, *args) + end + + def warning(*args) + add_result(:warning, *args) + end + + def note(*args) + add_result(:note, *args) + end + + def translate_output_to_linter_messages(parsed_output) + parsed_output.each do |message| + # Checking the error for `InputFile` is to work around an Xcode + # issue where linting would fail even though `xcodebuild` actually + # succeeds. Xcode.app also doesn't fail when this issue occurs, so + # it's safe for us to do the same. + # + # For more details see https://github.com/CocoaPods/CocoaPods/issues/2394#issuecomment-56658587 + # + if message.include?("'InputFile' should have") + next + end + + if message =~ /\S+:\d+:\d+: error:/ + error('xcodebuild', message) + elsif message =~ /\S+:\d+:\d+: warning:/ + warning('xcodebuild', message) + else + note('xcodebuild', message) + end + end + end + + def shares_pod_target_xcscheme?(pod_target) + Pathname.new(@installer.pods_project.path + pod_target.label).exist? + end + + def add_result(type, attribute_name, message, public_only = false) + result = results.find do |r| + r.type == type && r.attribute_name && r.message == message && r.public_only? == public_only + end + unless result + result = Result.new(type, attribute_name, message, public_only) + results << result + end + result.platforms << consumer.platform_name if consumer + result.subspecs << subspec_name if subspec_name && !result.subspecs.include?(subspec_name) + end + + # Specialized Result to support subspecs aggregation + # + class Result < Specification::Linter::Results::Result + def initialize(type, attribute_name, message, public_only = false) + super(type, attribute_name, message, public_only) + @subspecs = [] + end + + attr_reader :subspecs + end + + #-------------------------------------------------------------------------# + + private + + # !@group Helpers + + # @return [Array] an array of source URLs used to create the + # {Podfile} used in the linting process + # + attr_reader :source_urls + + # @param [String] platform_name + # the name of the platform, which should be declared + # in the Podfile. + # + # @param [String] deployment_target + # the deployment target, which should be declared in + # the Podfile. + # + # @param [Boolean] use_frameworks + # whether frameworks should be used for the installation + # + # @param [Array] test_spec_names + # the test spec names to include in the podfile. + # + # @return [Podfile] a podfile that requires the specification on the + # current platform. + # + # @note The generated podfile takes into account whether the linter is + # in local mode. + # + def podfile_from_spec(platform_name, deployment_target, use_frameworks = true, test_spec_names = [], use_modular_headers = false, use_static_frameworks = false) + name = subspec_name || spec.name + podspec = file.realpath + local = local? + urls = source_urls + + additional_podspec_pods = external_podspecs ? Dir.glob(external_podspecs) : [] + additional_path_pods = (include_podspecs ? Dir.glob(include_podspecs) : []) .select { |path| spec.name != Specification.from_file(path).name } - additional_podspec_pods + + Pod::Podfile.new do + install! 'cocoapods', :deterministic_uuids => false, :warn_for_unused_master_specs_repo => false + # By default inhibit warnings for all pods, except the one being validated. + inhibit_all_warnings! + urls.each { |u| source(u) } + target 'App' do + if use_static_frameworks + use_frameworks!(:linkage => :static) + else + use_frameworks!(use_frameworks) + end + use_modular_headers! if use_modular_headers + platform(platform_name, deployment_target) + if local + pod name, :path => podspec.dirname.to_s, :inhibit_warnings => false + else + pod name, :podspec => podspec.to_s, :inhibit_warnings => false + end + + additional_path_pods.each do |podspec_path| + podspec_name = File.basename(podspec_path, '.*') + pod podspec_name, :path => File.dirname(podspec_path) + end + + additional_podspec_pods.each do |podspec_path| + podspec_name = File.basename(podspec_path, '.*') + pod podspec_name, :podspec => podspec_path + end + + test_spec_names.each do |test_spec_name| + if local + pod test_spec_name, :path => podspec.dirname.to_s, :inhibit_warnings => false + else + pod test_spec_name, :podspec => podspec.to_s, :inhibit_warnings => false + end + end + end + end + end + + # Parse the xcode build output to identify the lines which are relevant + # to the linter. + # + # @param [String] output the output generated by the xcodebuild tool. + # + # @note The indentation and the temporary path is stripped form the + # lines. + # + # @return [Array] the lines that are relevant to the linter. + # + def parse_xcodebuild_output(output) + lines = output.split("\n") + selected_lines = lines.select do |l| + l.include?('error: ') && (l !~ /errors? generated\./) && (l !~ /error: \(null\)/) || + l.include?('warning: ') && (l !~ /warnings? generated\./) && (l !~ /frameworks only run on iOS 8/) || + l.include?('note: ') && (l !~ /expanded from macro/) + end + selected_lines.map do |l| + new = l.force_encoding('UTF-8').gsub(%r{#{validation_dir}/Pods/}, '') + new.gsub!(/^ */, ' ') + end + end + + # @return [String] Executes xcodebuild in the current working directory and + # returns its output (both STDOUT and STDERR). + # + def xcodebuild(action, scheme, configuration, deployment_target:) + require 'fourflusher' + command = %W(clean #{action} -workspace #{File.join(validation_dir, 'App.xcworkspace')} -scheme #{scheme} -configuration #{configuration}) + case consumer.platform_name + when :osx, :macos + command += %w(CODE_SIGN_IDENTITY=) + when :ios + command += %w(CODE_SIGN_IDENTITY=- -sdk iphonesimulator) + command += Fourflusher::SimControl.new.destination(:oldest, 'iOS', deployment_target) + xcconfig = consumer.pod_target_xcconfig + if xcconfig + archs = xcconfig['VALID_ARCHS'] + if archs && (archs.include? 'armv7') && !(archs.include? 'i386') && (archs.include? 'x86_64') + # Prevent Xcodebuild from testing the non-existent i386 simulator if armv7 is specified without i386 + command += %w(ARCHS=x86_64) + end + end + when :watchos + command += %w(CODE_SIGN_IDENTITY=- -sdk watchsimulator) + when :tvos + command += %w(CODE_SIGN_IDENTITY=- -sdk appletvsimulator) + command += Fourflusher::SimControl.new.destination(:oldest, 'tvOS', deployment_target) + end + + if analyze + command += %w(CLANG_ANALYZER_OUTPUT=html CLANG_ANALYZER_OUTPUT_DIR=analyzer) + end + + begin + _xcodebuild(command, true) + rescue => e + message = 'Returned an unsuccessful exit code.' + message += ' You can use `--verbose` for more information.' unless config.verbose? + error('xcodebuild', message) + e.message + end + end + + # Executes the given command in the current working directory. + # + # @return [String] The output of the given command + # + def _xcodebuild(command, raise_on_failure = false) + Executable.execute_command('xcodebuild', command, raise_on_failure) + end + + # Whether the platform with the specified name is valid + # + # @param [Platform] platform + # The platform to check + # + # @return [Boolean] True if the platform is valid + # + def valid_platform?(platform) + VALID_PLATFORMS.any? { |p| p.name == platform.name } + end + + # Whether the platform is supported by the specification + # + # @param [Platform] platform + # The platform to check + # + # @param [Specification] spec + # The specification which must support the provided platform + # + # @return [Boolean] Whether the platform is supported by the specification + # + def supported_platform?(platform, spec) + available_platforms = spec.available_platforms + + available_platforms.any? { |p| p.name == platform.name } + end + + # Whether the provided name matches the platform + # + # @param [Platform] platform + # The platform + # + # @param [String] name + # The name to check against the provided platform + # + def platform_name_match?(platform, name) + [platform.name, platform.string_name].any? { |n| n.casecmp(name) == 0 } + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/version_metadata.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/version_metadata.rb new file mode 100644 index 0000000..4e9b53c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/version_metadata.rb @@ -0,0 +1,26 @@ +module Pod + module VersionMetadata + CACHE_VERSION = '003'.freeze + + def self.gem_version + Pod::VERSION + end + + def self.project_cache_version + [ + gem_version, + cocoapods_sha, + 'project-cache', + CACHE_VERSION, + ].compact.join('.') + end + + def self.cocoapods_sha + return unless gemspec = Gem.loaded_specs['cocoapods'] + return unless source = gemspec.source + return unless source.respond_to?(:revision) + source.revision + end + private_class_method :cocoapods_sha + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode.rb new file mode 100644 index 0000000..847f5f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode.rb @@ -0,0 +1,7 @@ +module Pod + module Xcode + autoload :LinkageAnalyzer, 'cocoapods/xcode/linkage_analyzer' + autoload :XCFramework, 'cocoapods/xcode/xcframework' + autoload :FrameworkPaths, 'cocoapods/xcode/framework_paths' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/framework_paths.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/framework_paths.rb new file mode 100644 index 0000000..9c33462 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/framework_paths.rb @@ -0,0 +1,54 @@ +module Pod + module Xcode + class FrameworkPaths + # @return [String] the path to the .framework + # + attr_reader :source_path + + # @return [String, Nil] the dSYM path, if one exists + # + attr_reader :dsym_path + + # @return [Array, Nil] the bcsymbolmap files path array, if one exists + # + attr_reader :bcsymbolmap_paths + + def initialize(source_path, dsym_path = nil, bcsymbolmap_paths = nil) + @source_path = source_path + @dsym_path = dsym_path + @bcsymbolmap_paths = bcsymbolmap_paths + end + + def ==(other) + if other.class == self.class + other.source_path == @source_path && other.dsym_path == @dsym_path && other.bcsymbolmap_paths == @bcsymbolmap_paths + else + false + end + end + + alias eql? == + + def hash + [source_path, dsym_path, bcsymbolmap_paths].hash + end + + def all_paths + [source_path, dsym_path, bcsymbolmap_paths].flatten.compact + end + + # @param [Pathname] path the path to the `.framework` bundle + # + # @return [FrameworkPaths] the path of the framework with dsym & bcsymbolmap paths, if found + # + def self.from_path(path) + dsym_name = "#{path.basename}.dSYM" + dsym_path = Pathname.new("#{path.dirname}/#{dsym_name}") + dsym_path = nil unless dsym_path.exist? + bcsymbolmap_paths = Pathname.glob(path.dirname, '*.bcsymbolmap') + + FrameworkPaths.new(path, dsym_path, bcsymbolmap_paths) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/linkage_analyzer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/linkage_analyzer.rb new file mode 100644 index 0000000..270623e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/linkage_analyzer.rb @@ -0,0 +1,22 @@ +require 'macho' + +module Pod + module Xcode + class LinkageAnalyzer + # @param [Pathname] binary + # The file to be checked for being a dynamic Mach-O binary. + # + # @return [Boolean] Whether `binary` can be dynamically linked. + # + def self.dynamic_binary?(binary) + @cached_dynamic_binary_results ||= {} + return @cached_dynamic_binary_results[binary] unless @cached_dynamic_binary_results[binary].nil? + return false unless binary.file? + + @cached_dynamic_binary_results[binary] = MachO.open(binary).dylib? + rescue MachO::MachOError + @cached_dynamic_binary_results[binary] = false + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/xcframework.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/xcframework.rb new file mode 100644 index 0000000..42f58dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/xcframework.rb @@ -0,0 +1,99 @@ +# frozen_string_literal: true + +require 'cocoapods/xcode/xcframework/xcframework_slice' + +module Pod + module Xcode + class XCFramework + # @return [String] target_name the target name this XCFramework belongs to + # + attr_reader :target_name + + # @return [Pathname] path the path to the .xcframework on disk + # + attr_reader :path + + # @return [Pod::Version] the format version of the .xcframework + # + attr_reader :format_version + + # @return [Array] the slices contained inside this .xcframework + # + attr_reader :slices + + # @return [Hash] the contents of the parsed plist + # + attr_reader :plist + + # Initializes an XCFramework instance with a path on disk + # + # @param [String] target_name @see target_name + # @param [Pathname, String] path @see path + # + # @return [XCFramework] the xcframework at the given path + # + def initialize(target_name, path) + @target_name = target_name + @path = Pathname.new(path).tap do |p| + raise 'Absolute path is required' unless p.absolute? + end + + @plist = Xcodeproj::Plist.read_from_path(plist_path) + parse_plist_contents + end + + # @return [Pathname] the path to the Info.plist + # + def plist_path + path + 'Info.plist' + end + + # @return [String] the basename of the framework + # + def name + File.basename(path, '.xcframework') + end + + # @return [Boolean] true if any slices use dynamic linkage + # + def includes_dynamic_slices? + build_type.dynamic? + end + + # @return [Boolean] true if any slices use dynamic linkage + # + def includes_static_slices? + build_type.static? + end + + # @return [Pod::BuildType] the build type of the contained slices + # + # @note As CocoaPods does not support mixed packaging nor linkage for xcframework slices, + # we pick the first slice and assume all are the same + # + def build_type + @build_type ||= slices.first.build_type + end + + private + + def parse_plist_contents + @format_version = Pod::Version.new(plist['XCFrameworkFormatVersion']) + @slices = plist['AvailableLibraries'].map do |library| + identifier = library['LibraryIdentifier'] + relative_path = library['LibraryPath'] + archs = library['SupportedArchitectures'] + platform_name = library['SupportedPlatform'] + platform_variant = library['SupportedPlatformVariant'] + headers = library['HeadersPath'] + + slice_root = path.join(identifier) + slice_path = slice_root.join(relative_path) + headers = slice_root.join(headers) unless headers.nil? + XCFramework::Slice.new(slice_path, identifier, archs, platform_name, :platform_variant => platform_variant, :headers => headers) + end.sort_by(&:identifier) + raise Informative, "XCFramework at #{path} does not contain any frameworks or libraries." if slices.empty? + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/xcframework/xcframework_slice.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/xcframework/xcframework_slice.rb new file mode 100644 index 0000000..db3621f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-1.12.1/lib/cocoapods/xcode/xcframework/xcframework_slice.rb @@ -0,0 +1,144 @@ +require 'cocoapods/xcode/linkage_analyzer' + +module Pod + module Xcode + class XCFramework + class Slice + # @return [Pathname] the path to the .framework, .a or .dylib of this slice + # + attr_reader :path + + # @return [Array] list of supported architectures + # + attr_reader :supported_archs + + # @return [String] the framework identifier + # + attr_reader :identifier + + # @return [Platform] the supported platform + # + attr_reader :platform + + # @return [Symbol] the platform variant. Either :simulator or nil + # + attr_reader :platform_variant + + # @return [Pathname] the path to the headers + # + attr_reader :headers + + def initialize(path, identifier, archs, platform, platform_variant: nil, headers: path.join('Headers')) + @path = path + @identifier = identifier + @supported_archs = archs + @platform = Pod::Platform.new(platform) + @platform_variant = platform_variant.to_sym unless platform_variant.nil? + @headers = headers + end + + # @return [String] the name of the framework + # + def name + @name ||= begin + case package_type + when :framework + File.basename(path, '.framework') + when :library + ext = File.extname(path) + case ext + when '.a', '.dylib' + result = File.basename(path).gsub(/^lib/, '') + result[0] = result.downcase[0] + result + else + raise Informative, "Invalid package type `#{package_type}`" + end + else + raise Informative, "Invalid package type `#{package_type}`" + end + end + end + + # @return [Boolean] true if this is a slice built for simulator + # + def simulator_variant? + @platform_variant == :simulator + end + + # @return [Symbol] the package type of the slice - either :framework or :library + # + def package_type + @package_type ||= begin + ext = File.extname(path) + case ext + when '.framework' + :framework + when '.a', '.dylib' + :library + else + raise Informative, "Invalid XCFramework slice type `#{ext}`" + end + end + end + + # @return [Boolean] true if this slice is a framework, not a library + # + def framework? + build_type.framework? + end + + # @return [Boolean] true if this slice is a library, not a framework + # + def library? + build_type.library? + end + + # @return [Boolean] true if this slice contains a statically-linked binary + # + def static? + build_type.static? + end + + # @return [Boolean] true if this slice contains a dynamically-linked binary + # + def dynamic? + build_type.dynamic? + end + + # @return [BuildType] the build type of the binary + # + def build_type + @build_type ||= begin + linkage = Xcode::LinkageAnalyzer.dynamic_binary?(binary_path) ? :dynamic : :static + ext = File.extname(path) + packaging = case ext + when '.framework' + :framework + when '.a', '.dylib' + :library + else + raise Informative, "Invalid XCFramework slice type `#{ext}`" + end + BuildType.new(:linkage => linkage, :packaging => packaging) + end + end + + # @return [Pathname] the path to the bundled binary + # + def binary_path + @binary_path ||= begin + case package_type + when :framework + path + name + when :library + path + else + raise Informative, "Invalid package type `#{package_type}`" + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/LICENSE b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/LICENSE new file mode 100644 index 0000000..f25cc61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2011 - 2012 Eloy DurÃĄn +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/README.md new file mode 100644 index 0000000..5133f40 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/README.md @@ -0,0 +1,42 @@ +# CocoaPods Core + +[![Build Status](https://github.com/CocoaPods/Core/workflows/Specs/badge.svg)](https://github.com/CocoaPods/Core/actions/workflows/Specs.yml) +[![Test Coverage](https://api.codeclimate.com/v1/badges/91a2d70b9ed977815c66/test_coverage)](https://codeclimate.com/github/CocoaPods/Core/test_coverage) +[![Maintainability](https://api.codeclimate.com/v1/badges/91a2d70b9ed977815c66/maintainability)](https://codeclimate.com/github/CocoaPods/Core/maintainability) + +The CocoaPods-Core gem provides support to work with the models of CocoaPods. +It is intended to be used in place of the CocoaPods gem when the installation +of the dependencies is not needed. Therefore, it is suitable for web services. + +Provides support for working with the following models: + +- `Pod::Specification` - [Podspec Syntax Reference](https://guides.cocoapods.org/syntax/podspec.html). +- `Pod::Podfile` - [Podfile Syntax Reference](https://guides.cocoapods.org/syntax/podfile.html). +- `Pod::Source` - collections of podspec files like the [CocoaPods Spec repo](https://github.com/CocoaPods/Specs). + +The gem also provides support for ancillary features like +`Pod::Specification::Set::Presenter` suitable for presetting descriptions of +Pods and the `Specification::Linter`, which ensures the validity of podspec +files. + +## Installation + +``` +$ [sudo] gem install cocoapods-core +``` + +The `cocoapods-core` gem requires Ruby 2.6.0 or later. + +## Collaborate + +All CocoaPods development happens on GitHub, there is a repository for +[CocoaPods](https://github.com/CocoaPods/CocoaPods) and one for the [CocoaPods +specs](https://github.com/CocoaPods/Specs). Contributing patches or Pods is +really easy and gratifying. + +Follow [@CocoaPods](http://twitter.com/CocoaPods) to get up to date +information about what's going on in the CocoaPods world. + +## License + +This gem and CocoaPods are available under the MIT license. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core.rb new file mode 100644 index 0000000..c9045b8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core.rb @@ -0,0 +1,40 @@ +# The Pod modules name-spaces all the classes of CocoaPods. +# +module Pod + require 'cocoapods-core/gem_version' + + # Indicates a runtime error **not** caused by a bug. + # + class PlainInformative < StandardError; end + + # Indicates a user error. + # + class Informative < PlainInformative; end + + require 'pathname' + require 'cocoapods-core/vendor' + + autoload :Version, 'cocoapods-core/version' + autoload :Requirement, 'cocoapods-core/requirement' + autoload :Dependency, 'cocoapods-core/dependency' + + autoload :CoreUI, 'cocoapods-core/core_ui' + autoload :DSLError, 'cocoapods-core/standard_error' + autoload :GitHub, 'cocoapods-core/github' + autoload :HTTP, 'cocoapods-core/http' + autoload :Lockfile, 'cocoapods-core/lockfile' + autoload :Metrics, 'cocoapods-core/metrics' + autoload :Platform, 'cocoapods-core/platform' + autoload :Podfile, 'cocoapods-core/podfile' + autoload :Source, 'cocoapods-core/source' + autoload :CDNSource, 'cocoapods-core/cdn_source' + autoload :TrunkSource, 'cocoapods-core/trunk_source' + autoload :Specification, 'cocoapods-core/specification' + autoload :StandardError, 'cocoapods-core/standard_error' + autoload :YAMLHelper, 'cocoapods-core/yaml_helper' + autoload :BuildType, 'cocoapods-core/build_type' + + # TODO: Fix + # + Spec = Specification +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/build_type.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/build_type.rb new file mode 100644 index 0000000..0c36d71 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/build_type.rb @@ -0,0 +1,121 @@ +module Pod + class BuildType + # @return [Array] known packaging options. + # + KNOWN_PACKAGING_OPTIONS = %i(library framework).freeze + + # @return [Array] known linking options. + # + KNOWN_LINKAGE_OPTIONS = %i(static dynamic).freeze + + # @return [Symbol] the packaging for this build type, one of #KNOWN_PACKAGING_OPTIONS + # + attr_reader :packaging + + # @return [Symbol] the linkage for this build type, one of #KNOWN_LINKAGE_OPTIONS + # + attr_reader :linkage + + attr_reader :hash + + def initialize(linkage: :static, packaging: :library) + unless KNOWN_LINKAGE_OPTIONS.include?(linkage) + raise ArgumentError, "Invalid linkage option #{linkage.inspect}, valid options are #{KNOWN_LINKAGE_OPTIONS.inspect}" + end + unless KNOWN_PACKAGING_OPTIONS.include?(packaging) + raise ArgumentError, "Invalid packaging option #{packaging.inspect}, valid options are #{KNOWN_PACKAGING_OPTIONS.inspect}" + end + @packaging = packaging + @linkage = linkage + @hash = packaging.hash ^ linkage.hash + end + + # @return [BuildType] the build type for a dynamic library + def self.dynamic_library + new(:linkage => :dynamic, :packaging => :library) + end + + # @return [BuildType] the build type for a static library + # + def self.static_library + new(:linkage => :static, :packaging => :library) + end + + # @return [BuildType] the build type for a dynamic framework + # + def self.dynamic_framework + new(:linkage => :dynamic, :packaging => :framework) + end + + # @return [BuildType] the build type for a static framework + # + def self.static_framework + new(:linkage => :static, :packaging => :framework) + end + + # @return [Boolean] whether the target is built dynamically + # + def dynamic? + linkage == :dynamic + end + + # @return [Boolean] whether the target is built statically + # + def static? + linkage == :static + end + + # @return [Boolean] whether the target is built as a framework + # + def framework? + packaging == :framework + end + + # @return [Boolean] whether the target is built as a library + # + def library? + packaging == :library + end + + # @return [Boolean] whether the target is built as a dynamic framework + # + def dynamic_framework? + dynamic? && framework? + end + + # @return [Boolean] whether the target is built as a dynamic library + # + def dynamic_library? + dynamic? && library? + end + + # @return [Boolean] whether the target is built as a static framework + # + def static_framework? + static? && framework? + end + + # @return [Boolean] whether the target is built as a static library + # + def static_library? + static? && library? + end + + def to_s + "#{linkage} #{packaging}" + end + + def to_hash + { :linkage => linkage, :packaging => packaging } + end + + def inspect + "#<#{self.class} linkage=#{linkage} packaging=#{packaging}>" + end + + def ==(other) + linkage == other.linkage && + packaging == other.packaging + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/cdn_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/cdn_source.rb new file mode 100644 index 0000000..9345a9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/cdn_source.rb @@ -0,0 +1,501 @@ +require 'cocoapods-core/source' +require 'rest' +require 'concurrent' +require 'netrc' +require 'addressable' + +module Pod + # Subclass of Pod::Source to provide support for CDN-based Specs repositories + # + class CDNSource < Source + include Concurrent + + MAX_NUMBER_OF_RETRIES = (ENV['COCOAPODS_CDN_MAX_NUMBER_OF_RETRIES'] || 5).to_i + # Single thread executor for all network activity. + HYDRA_EXECUTOR = Concurrent::SingleThreadExecutor.new + + private_constant :HYDRA_EXECUTOR + + # @param [String] repo The name of the repository + # + def initialize(repo) + @check_existing_files_for_update = false + # Optimization: we initialize startup_time when the source is first initialized + # and then test file modification dates against it. Any file that was touched + # after the source was initialized, is considered fresh enough. + @startup_time = Time.new + + @version_arrays_by_fragment_by_name = {} + + super(repo) + end + + # @return [String] The URL of the source. + # + def url + @url ||= File.read(repo.join('.url')).chomp.chomp('/') + '/' + end + + # @return [String] The type of the source. + # + def type + 'CDN' + end + + def refresh_metadata + if metadata.nil? + unless repo.exist? + debug "CDN: Repo #{name} does not exist!" + return + end + + specs_dir.mkpath + download_file('CocoaPods-version.yml') + end + + super + end + + def preheat_existing_files + files_to_update = files_definitely_to_update + deprecated_local_podspecs - ['deprecated_podspecs.txt'] + debug "CDN: #{name} Going to update #{files_to_update.count} files" + + concurrent_requests_catching_errors do + # Queue all tasks first + loaders = files_to_update.map do |file| + download_file_async(file) + end + # Block and wait for all to complete running on Hydra + Promises.zip_futures_on(HYDRA_EXECUTOR, *loaders).wait! + end + end + + def files_definitely_to_update + Pathname.glob(repo.join('**/*.{txt,yml}')).map { |f| f.relative_path_from(repo).to_s } + end + + def deprecated_local_podspecs + download_file('deprecated_podspecs.txt') + local_file('deprecated_podspecs.txt', &:to_a). + map { |f| Pathname.new(f.chomp) }. + select { |f| repo.join(f).exist? } + end + + # @return [Pathname] The directory where the specs are stored. + # + def specs_dir + @specs_dir ||= repo + 'Specs' + end + + # @!group Querying the source + #-------------------------------------------------------------------------# + + # @return [Array] the list of the name of all the Pods. + # + def pods + download_file('all_pods.txt') + local_file('all_pods.txt', &:to_a).map(&:chomp) + end + + # @return [Array] all the available versions for the Pod, sorted + # from highest to lowest. + # + # @param [String] name + # the name of the Pod. + # + def versions(name) + return nil unless specs_dir + raise ArgumentError, 'No name' unless name + + fragment = pod_shard_fragment(name) + + ensure_versions_file_loaded(fragment) + + return @versions_by_name[name] unless @versions_by_name[name].nil? + + pod_path_actual = pod_path(name) + pod_path_relative = relative_pod_path(name) + + return nil if @version_arrays_by_fragment_by_name[fragment][name].nil? + + concurrent_requests_catching_errors do + loaders = [] + + @versions_by_name[name] ||= @version_arrays_by_fragment_by_name[fragment][name].map do |version| + # Optimization: ensure all the podspec files at least exist. The correct one will get refreshed + # in #specification_path regardless. + podspec_version_path_relative = Pathname.new(version).join("#{name}.podspec.json") + + unless pod_path_actual.join(podspec_version_path_relative).exist? + # Queue all podspec download tasks first + loaders << download_file_async(pod_path_relative.join(podspec_version_path_relative).to_s) + end + + begin + Version.new(version) if version[0, 1] != '.' + rescue ArgumentError + raise Informative, 'An unexpected version directory ' \ + "`#{version}` was encountered for the " \ + "`#{pod_path_actual}` Pod in the `#{name}` repository." + end + end.compact.sort.reverse + + # Block and wait for all to complete running on Hydra + Promises.zip_futures_on(HYDRA_EXECUTOR, *loaders).wait! + end + + @versions_by_name[name] + end + + # Returns the path of the specification with the given name and version. + # + # @param [String] name + # the name of the Pod. + # + # @param [Version,String] version + # the version for the specification. + # + # @return [Pathname] The path of the specification. + # + def specification_path(name, version) + raise ArgumentError, 'No name' unless name + raise ArgumentError, 'No version' unless version + unless versions(name).include?(Version.new(version)) + raise StandardError, "Unable to find the specification #{name} " \ + "(#{version}) in the #{self.name} source." + end + + podspec_version_path_relative = Pathname.new(version.to_s).join("#{name}.podspec.json") + relative_podspec = relative_pod_path(name).join(podspec_version_path_relative).to_s + download_file(relative_podspec) + pod_path(name).join(podspec_version_path_relative) + end + + # @return [Array] all the specifications contained by the + # source. + # + def all_specs + raise Informative, "Can't retrieve all the specs for a CDN-backed source, it will take forever" + end + + # @return [Array] the sets of all the Pods. + # + def pod_sets + raise Informative, "Can't retrieve all the pod sets for a CDN-backed source, it will take forever" + end + + # @!group Searching the source + #-------------------------------------------------------------------------# + + # @return [Set] a set for a given dependency. The set is identified by the + # name of the dependency and takes into account subspecs. + # + # @note This method is optimized for fast lookups by name, i.e. it does + # *not* require iterating through {#pod_sets} + # + # @todo Rename to #load_set + # + def search(query) + unless specs_dir + raise Informative, "Unable to find a source named: `#{name}`" + end + if query.is_a?(Dependency) + query = query.root_name + end + + fragment = pod_shard_fragment(query) + + ensure_versions_file_loaded(fragment) + + version_arrays_by_name = @version_arrays_by_fragment_by_name[fragment] || {} + + found = version_arrays_by_name[query].nil? ? nil : query + + if found + set = set(query) + set if set.specification_name == query + end + end + + # @return [Array] The list of the sets that contain the search term. + # + # @param [String] query + # the search term. Can be a regular expression. + # + # @param [Boolean] full_text_search + # performed using Algolia + # + # @note full text search requires to load the specification for each pod, + # and therefore not supported. + # + def search_by_name(query, full_text_search = false) + if full_text_search + require 'algoliasearch' + begin + algolia_result = algolia_search_index.search(query, :attributesToRetrieve => 'name') + names = algolia_result['hits'].map { |r| r['name'] } + names.map { |n| set(n) }.reject { |s| s.versions.compact.empty? } + rescue Algolia::AlgoliaError => e + raise Informative, "CDN: #{name} - Cannot perform full-text search because Algolia returned an error: #{e}" + end + else + super(query) + end + end + + # Check update dates for all existing files. + # Does not download non-existing specs, since CDN-backed repo is updated live. + # + # @param [Boolean] show_output + # + # @return [Array] Always returns empty array, as it cannot know + # everything that actually changed. + # + def update(_show_output) + @check_existing_files_for_update = true + begin + preheat_existing_files + ensure + @check_existing_files_for_update = false + end + [] + end + + def updateable? + true + end + + def git? + false + end + + def indexable? + false + end + + private + + def ensure_versions_file_loaded(fragment) + return if !@version_arrays_by_fragment_by_name[fragment].nil? && !@check_existing_files_for_update + + # Index file that contains all the versions for all the pods in the shard. + # We use those because you can't get a directory listing from a CDN. + index_file_name = index_file_name_for_fragment(fragment) + download_file(index_file_name) + versions_raw = local_file(index_file_name, &:to_a).map(&:chomp) + @version_arrays_by_fragment_by_name[fragment] = versions_raw.reduce({}) do |hash, row| + row = row.split('/') + pod = row.shift + versions = row + + hash[pod] = versions + hash + end + end + + def algolia_search_index + @index ||= begin + require 'algoliasearch' + + raise Informative, "Cannot perform full-text search in repo #{name} because it's missing Algolia config" if download_file('AlgoliaSearch.yml').nil? + algolia_config = YAMLHelper.load_string(local_file('AlgoliaSearch.yml', &:read)) + + client = Algolia::Client.new(:application_id => algolia_config['application_id'], :api_key => algolia_config['api_key']) + Algolia::Index.new(algolia_config['index'], client) + end + end + + def index_file_name_for_fragment(fragment) + fragment_joined = fragment.join('_') + fragment_joined = '_' + fragment_joined unless fragment.empty? + "all_pods_versions#{fragment_joined}.txt" + end + + def pod_shard_fragment(pod_name) + metadata.path_fragment(pod_name)[0..-2] + end + + def local_file_okay?(partial_url) + file_path = repo.join(partial_url) + File.exist?(file_path) && File.size(file_path) > 0 + end + + def local_file(partial_url) + file_path = repo.join(partial_url) + File.open(file_path) do |file| + yield file if block_given? + end + end + + def relative_pod_path(pod_name) + pod_path(pod_name).relative_path_from(repo) + end + + def download_file(partial_url) + # Block the main thread waiting for Hydra to finish + # + # Used for single-file downloads + download_file_async(partial_url).wait! + end + + def download_file_async(partial_url) + file_remote_url = Addressable::URI.encode(url + partial_url.to_s) + path = repo + partial_url + + file_okay = local_file_okay?(partial_url) + if file_okay + if @startup_time < File.mtime(path) + debug "CDN: #{name} Relative path: #{partial_url} modified during this run! Returning local" + return Promises.fulfilled_future(partial_url, HYDRA_EXECUTOR) + end + + unless @check_existing_files_for_update + debug "CDN: #{name} Relative path: #{partial_url} exists! Returning local because checking is only performed in repo update" + return Promises.fulfilled_future(partial_url, HYDRA_EXECUTOR) + end + end + + path.dirname.mkpath + + etag_path = path.sub_ext(path.extname + '.etag') + + etag = File.read(etag_path) if file_okay && File.exist?(etag_path) + debug "CDN: #{name} Relative path: #{partial_url}, has ETag? #{etag}" unless etag.nil? + + download_and_save_with_retries_async(partial_url, file_remote_url, etag) + end + + def download_and_save_with_retries_async(partial_url, file_remote_url, etag, retries = MAX_NUMBER_OF_RETRIES) + path = repo + partial_url + etag_path = path.sub_ext(path.extname + '.etag') + + download_task = download_typhoeus_impl_async(file_remote_url, etag).then do |response| + case response.response_code + when 301, 302 + redirect_location = response.headers['location'] + debug "CDN: #{name} Redirecting from #{file_remote_url} to #{redirect_location}" + download_and_save_with_retries_async(partial_url, redirect_location, etag) + when 304 + debug "CDN: #{name} Relative path not modified: #{partial_url}" + # We need to update the file modification date, as it is later used for freshness + # optimization. See #initialize for more information. + FileUtils.touch path + partial_url + when 200 + File.open(path, 'w') { |f| f.write(response.response_body.force_encoding('UTF-8')) } + + etag_new = response.headers['etag'] unless response.headers.nil? + debug "CDN: #{name} Relative path downloaded: #{partial_url}, save ETag: #{etag_new}" + File.open(etag_path, 'w') { |f| f.write(etag_new) } unless etag_new.nil? + partial_url + when 404 + debug "CDN: #{name} Relative path couldn't be downloaded: #{partial_url} Response: #{response.response_code}" + nil + when 502, 503, 504 + # Retryable HTTP errors, usually related to server overloading + if retries <= 1 + raise Informative, "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.response_code} #{response.response_body}" + else + debug "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.response_code} #{response.response_body}, retries: #{retries - 1}" + exponential_backoff_async(retries).then do + download_and_save_with_retries_async(partial_url, file_remote_url, etag, retries - 1) + end + end + when 0 + # Non-HTTP errors, usually network layer + if retries <= 1 + raise Informative, "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.return_message}" + else + debug "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.return_message}, retries: #{retries - 1}" + exponential_backoff_async(retries).then do + download_and_save_with_retries_async(partial_url, file_remote_url, etag, retries - 1) + end + end + else + raise Informative, "CDN: #{name} URL couldn't be downloaded: #{file_remote_url} Response: #{response.response_code} #{response.response_body}" + end + end + + # Calling `Future#run` flattens the chained futures created by retries or redirects + # + # Does not, in fact, run the task - that is already happening in Hydra at this point + download_task.run + end + + def exponential_backoff_async(retries) + sleep_async(backoff_time(retries)) + end + + def backoff_time(retries) + current_retry = MAX_NUMBER_OF_RETRIES - retries + 4 * 2**current_retry + end + + def sleep_async(seconds) + # Async sleep to avoid blocking either the main or the Hydra thread + Promises.schedule_on(HYDRA_EXECUTOR, seconds) + end + + def download_typhoeus_impl_async(file_remote_url, etag) + require 'typhoeus' + + # Create a prefereably HTTP/2 request - the protocol is ultimately responsible for picking + # the maximum supported protocol + # When debugging with proxy, use the following extra options: + # :proxy => 'http://localhost:8888', + # :ssl_verifypeer => false, + # :ssl_verifyhost => 0, + request = Typhoeus::Request.new( + file_remote_url, + :method => :get, + :http_version => :httpv2_0, + :timeout => 10, + :connecttimeout => 10, + :accept_encoding => 'gzip', + :netrc => :optional, + :netrc_file => Netrc.default_path, + :headers => etag.nil? ? {} : { 'If-None-Match' => etag }, + ) + + future = Promises.resolvable_future_on(HYDRA_EXECUTOR) + queue_request(request) + request.on_complete do |response| + future.fulfill(response) + end + + # This `Future` should never reject, network errors are exposed on `Typhoeus::Response` + future + end + + def debug(message) + if defined?(Pod::UI) + Pod::UI.message(message) + else + CoreUI.puts(message) + end + end + + def concurrent_requests_catching_errors + yield + rescue MultipleErrors => e + # aggregated error message from `Concurrent` + errors = e.errors + raise Informative, "CDN: #{name} Repo update failed - #{e.errors.size} error(s):\n#{errors.join("\n")}" + end + + def queue_request(request) + @hydra ||= Typhoeus::Hydra.new + + # Queue the request into the Hydra (libcurl reactor). + @hydra.queue(request) + + # Cycle the reactor on a separate thread + # + # The way it works is that if more requests are queued while Hydra is in the `#run` + # method, it will keep executing them + # + # The upcoming calls to `#run` will simply run empty. + HYDRA_EXECUTOR.post(@hydra, &:run) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/core_ui.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/core_ui.rb new file mode 100644 index 0000000..d6c9377 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/core_ui.rb @@ -0,0 +1,19 @@ +module Pod + # Manages the UI output so dependent gems can customize it. + # + module CoreUI + def self.puts(message) + STDOUT.puts message + end + + def self.print(message) + STDOUT.print(message) + end + + def self.warn(message) + STDERR.puts message + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/dependency.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/dependency.rb new file mode 100644 index 0000000..294c19f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/dependency.rb @@ -0,0 +1,408 @@ +module Pod + # The Dependency allows to specify dependencies of a {Podfile} or a + # {Specification} on a Pod. It stores the name of the dependency, version + # requirements and external sources information. + # + # This class is based on the dependency class of RubyGems and mimics its + # implementation with adjustments specific to CocoaPods. RubyGems is + # available under the + # [MIT license](https://github.com/rubygems/rubygems/blob/master/MIT.txt). + # + class Dependency + # @return [String] The name of the Pod described by this dependency. + # + attr_accessor :name + + # @return [Hash{Symbol=>String}] a hash describing the external source + # where the pod should be fetched. The external source has to + # provide its own {Specification} file. + # + attr_accessor :external_source + + # @return [String] The source URL of the podspec repo to use to resolve + # this dependency. If not set then the standard source list + # should be used to resolve the dependency. + attr_accessor :podspec_repo + + # @overload initialize(name, requirements) + # + # @param [String] name + # the name of the Pod. + # + # @param [Array, Version, String, Requirement] requirements + # an array specifying the version requirements of the + # dependency. + # + # @example Initialization with version requirements. + # + # Dependency.new('AFNetworking') + # Dependency.new('AFNetworking', '~> 1.0') + # Dependency.new('AFNetworking', '>= 0.5', '< 0.7') + # + # @overload initialize(name, external_source) + # + # @param [String] name + # the name of the Pod. + # + # @param [Hash] external_source + # a hash describing the external source. + # + # @example Initialization with an external source. + # + # Dependency.new('libPusher', {:git => 'example.com/repo.git'}) + # Dependency.new('libPusher', {:path => 'path/to/folder'}) + # Dependency.new('libPusher', {:podspec => 'example.com/libPusher.podspec'}) + # + # @overload initialize(name, requirements, podspec_repo) + # + # @param [String] name + # the name of the Pod. + # + # @param [Array, Version, String, Requirement] requirements + # an array specifying the version requirements of the + # dependency. + # + # @param [Hash] podspec_repo + # The URL of the specific podspec repo to resolve this dependency from. + # + # @example Initialization with a specific podspec repo + # + # Dependency.new('Artsy+UILabels', '~> 1.0', :source => 'https://github.com/Artsy/Specs.git') + # + def initialize(name = nil, *requirements) + if requirements.last.is_a?(Hash) + additional_params = requirements.pop.select { |_, v| !v.nil? } + additional_params = nil if additional_params.empty? + + if additional_params && @podspec_repo = additional_params[:source] + # This dependency specifies the exact source podspec repo to use. + additional_params.delete(:source) + unless additional_params.empty? + raise Informative, 'A dependency with a specified podspec repo may ' \ + "not include other source parameters (#{name})." + end + elsif @external_source = additional_params + unless requirements.empty? + raise Informative, 'A dependency with an external source may not ' \ + "specify version requirements (#{name})." + end + end + + elsif requirements.last == :head + raise Informative, '`:head` dependencies have been removed. Please use ' \ + "normal external source dependencies (`:git => 'GIT_REPO_URL'`) " \ + "instead of `:head` for `#{name}`." + end + + if requirements.length == 1 && requirements.first.is_a?(Requirement) + requirements = requirements.first + end + @name = name + @requirement = Requirement.create(requirements) + @specific_requirement ||= nil + @external_source ||= nil + end + + # @return [Version] whether the dependency points to a specific version. + # + attr_reader :specific_version + + # @return [Requirement] the requirement of this dependency (a set of + # one or more version restrictions). + # + def requirement + @specific_requirement || @requirement + end + + # @param [Version] version the specific version to point to + # + def specific_version=(version) + @specific_version = version + @specific_requirement = if version + Requirement.new(Version.new(version.version)) + end + end + + # @return [Boolean] whether the dependency points to a subspec. + # + def subspec_dependency? + @name.include?('/') + end + + # @return [Boolean] whether the dependency points to an external source. + # + def external? + !@external_source.nil? + end + + # @return [Boolean] whether the dependency points to a local path. + # + def local? + if external_source + external_source[:path] + end + end + + # Creates a new dependency with the name of the top level spec and the same + # version requirements. + # + # @note This is used by the {Specification::Set} class to merge + # dependencies and resolve the required version of a Pod regardless + # what particular specification (subspecs or top level) is + # required. + # + # @todo This should not use `dup`. The `name` property should be an + # attr_reader. + # + # @return [Dependency] a dependency with the same versions requirements + # that is guaranteed to point to a top level specification. + # + def to_root_dependency + dep = dup + dep.name = root_name + dep + end + + # Returns the name of the Pod that the dependency is pointing to. + # + # @note In case this is a dependency for a subspec, e.g. + # 'RestKit/Networking', this returns 'RestKit', which is what the + # Pod::Source needs to know to retrieve the correct {Specification} + # from disk. + # + # @return [String] the name of the Pod. + # + def root_name + subspec_dependency? ? @name.split('/').first : @name + end + + # Checks if a dependency would be satisfied by the requirements of another + # dependency. + # + # @param [Dependency] other + # the other dependency. + # + # @note This is used by the Lockfile to check if a stored dependency is + # still compatible with the Podfile. + # + # @return [Boolean] whether the dependency is compatible with the given one. + # + def compatible?(other) + return false unless name == other.name + return false unless external_source == other.external_source + + other.requirement.requirements.all? do |_operator, version| + requirement.satisfied_by? Version.new(version) + end + end + + # @return [Boolean] whether the dependency is equal to another taking into + # account the loaded specification, the head options and the + # external source. + # + def ==(other) + self.class == other.class && + name == other.name && + external_source == other.external_source && + podspec_repo == other.podspec_repo && + requirement == other.requirement + end + alias_method :eql?, :== + + # @return [Fixnum] The hash value based on the name and on the + # requirements. + # + def hash + name.hash ^ requirement.hash + end + + # @return [Fixnum] How the dependency should be sorted respect to another + # one according to its name. + # + def <=>(other) + name <=> other.name + end + + # Merges the version requirements of the dependency with another one. + # + # @param [Dependency] other + # the other dependency to merge with. + # + # @note If one of the dependencies specifies an external source or is head, + # the resulting dependency preserves this attributes. + # + # @return [Dependency] a dependency (not necessarily a new instance) that + # also includes the version requirements of the given one. + # + def merge(other) + unless name == other.name + raise ArgumentError, "#{self} and #{other} have different names" + end + + default = Requirement.default + self_req = requirement + other_req = other.requirement + + req = if other_req == default + self_req + elsif self_req == default + other_req + else + self_req.as_list.concat(other_req.as_list) + end + + opts = {} + + if external_source || other.external_source + opts. + merge!(external_source || {}). + merge!(other.external_source || {}) + + req_to_set = req + req = [] + end + + if podspec_repo && other.podspec_repo && podspec_repo != other.podspec_repo + raise ArgumentError, "#{self} and #{other} have different podspec repos" + end + + if repo = podspec_repo || other.podspec_repo + opts[:source] = repo + end + + self.class.new(name, *req, opts).tap do |dep| + dep.instance_variable_set(:@requirement, Requirement.create(req_to_set)) if req_to_set + end + end + + # Whether the dependency has any pre-release requirements + # + # @return [Boolean] Whether the dependency has any pre-release requirements + # + def prerelease? + return @prerelease if defined?(@prerelease) + @prerelease = requirement.requirements.any? { |_op, version| version.prerelease? } + end + + # Checks whether the dependency would be satisfied by the specification + # with the given name and version. + # + # @param [String] + # The proposed name. + # + # @param [String, Version] version + # The proposed version. + # + # @return [Boolean] Whether the dependency is satisfied. + # + def match?(name, version) + return false unless self.name == name + return true if requirement.none? + requirement.satisfied_by?(Version.new(version)) + end + + #-------------------------------------------------------------------------# + + # !@group String representation + + # Creates a string representation of the dependency suitable for + # serialization and de-serialization without loss of information. The + # string is also suitable for UI. + # + # @note This representation is used by the {Lockfile}. + # + # @example Output examples + # + # "libPusher" + # "libPusher (= 1.0)" + # "libPusher (~> 1.0.1)" + # "libPusher (> 1.0, < 2.0)" + # "libPusher (from `www.example.com')" + # "libPusher (defined in Podfile)" + # "RestKit/JSON" + # + # @return [String] the representation of the dependency. + # + def to_s + version = '' + if external? + version << external_source_description(external_source) + elsif requirement != Requirement.default + version << requirement.to_s + end + result = @name.dup + result << " (#{version})" unless version.empty? + result + end + + # Generates a dependency from its string representation. + # + # @param [String] string + # The string that describes the dependency generated from + # {#to_s}. + # + # @note The information about external sources is not completely + # serialized in the string representation and should be stored a + # part by clients that need to create a dependency equal to the + # original one. + # + # @return [Dependency] the dependency described by the string. + # + def self.from_string(string) + match_data = string.match(/((?:\s?[^\s(])+)( (?:.*))?/) + name = match_data[1] + version = match_data[2] + version = version.gsub(/[()]/, '') if version + case version + when nil, /from `(.*)(`|')/ + Dependency.new(name) + else + version_requirements = version.split(',') if version + Dependency.new(name, version_requirements) + end + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "<#{self.class} name=#{name} requirements=#{requirement} " \ + "source=#{podspec_repo || 'nil'} external_source=#{external_source || 'nil'}>" + end + + #--------------------------------------# + + private + + # Creates a string representation of the external source suitable for UI. + # + # @example Output examples + # + # "from `www.example.com/libPusher.git', tag `v0.0.1'" + # "from `www.example.com/libPusher.podspec'" + # "from `~/path/to/libPusher'" + # + # @todo Improve the description for Mercurial and Subversion. + # + # @return [String] the description of the external source. + # + def external_source_description(source) + if source.key?(:git) + desc = "`#{source[:git]}`" + desc << ", commit `#{source[:commit]}`" if source[:commit] + desc << ", branch `#{source[:branch]}`" if source[:branch] + desc << ", tag `#{source[:tag]}`" if source[:tag] + elsif source.key?(:hg) + desc = "`#{source[:hg]}`" + elsif source.key?(:svn) + desc = "`#{source[:svn]}`" + elsif source.key?(:podspec) + desc = "`#{source[:podspec]}`" + elsif source.key?(:path) + desc = "`#{source[:path]}`" + else + desc = "`#{source}`" + end + "from #{desc}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/gem_version.rb new file mode 100644 index 0000000..3e57ed9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/gem_version.rb @@ -0,0 +1,5 @@ +module Pod + # The version of the cocoapods-core. + # + CORE_VERSION = '1.12.1'.freeze unless defined? Pod::CORE_VERSION +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/github.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/github.rb new file mode 100644 index 0000000..c3b0b7e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/github.rb @@ -0,0 +1,161 @@ +module Pod + # Allows to access information about the GitHub repos. + # + # This class is stored in Core because it might be used by web services. + # + module GitHub + # Returns the information of a user. + # + # @param [String] login + # The name of the user. + # + # @return [Hash] The data of user. + # + def self.user(login) + peform_request("https://api.github.com/users/#{login}") + end + + # Returns the information of a repo. + # + # @param [String] url + # The URL of the repo. + # + # @return [Hash] The hash containing the data as reported by GitHub. + # + def self.repo(url) + if repo_id = normalized_repo_id(url) + peform_request("https://api.github.com/repos/#{repo_id}") + end + end + + # Returns the tags of a repo. + # + # @param [String] url @see #repo + # + # @return [Array] The list of the tags. + # + def self.tags(url) + if repo_id = normalized_repo_id(url) + peform_request("https://api.github.com/repos/#{repo_id}/tags") + end + end + + # Returns the branches of a repo. + # + # @param [String] url @see #repo + # + # @return [Array] The list of the branches. + # + def self.branches(url) + if repo_id = normalized_repo_id(url) + peform_request("https://api.github.com/repos/#{repo_id}/branches") + end + end + + # Returns the contents of a file or directory in a repository. + # + # @param [String] url @see #repo + # + # @param [#to_s] path + # The path for which the contents are needed. + # + # @param [String] branch + # The branch for which to fetch the contents of the path. + # + # @return [Array] The list of the files and of the directories if the given + # path is a directory. + # + # @return [Hash] The contents of the file (usually base64 encoded). + # + def self.contents(url, path = nil, branch = nil) + if repo_id = normalized_repo_id(url) + request_url = "https://api.github.com/repos/#{repo_id}/contents" + request_url << "/#{path}" if path + request_url << "?ref=#{branch}" if branch + peform_request(request_url) + end + end + + # Returns whether the repository has been updated since a given commit. + # If the request fails, the response will be true as the API is still in + # beta and likely to change. + # + # @param [String] url @see #repo + # + # @param [String] commit + # The current HEAD commit. + # + # @return [Boolean] Whether the repository has been updated since the commit. + # + def self.modified_since_commit(url, commit) + return true unless repo_id = normalized_repo_id(url) + require 'rest' + request_url = "https://api.github.com/repos/#{repo_id}/commits/master" + headers = { + 'User-Agent' => 'CocoaPods', + 'Accept' => 'application/vnd.github.v3.sha', + 'If-None-Match' => %("#{commit}"), + } + + begin + response = REST.get(request_url, headers) + code = response.status_code + code != 304 + rescue + raise Informative, "Failed to connect to GitHub to update the #{repo_id} specs repo - Please check if you are offline, or that GitHub is down" + end + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------------# + + # Returns the repo ID as it is or converting a GitHub URL. + # + # @param [String] url_or_id + # A repo ID or the URL of the repo. + # + # @return [String] the repo ID. + # + def self.normalized_repo_id(url_or_id) + repo_id_from_url(url_or_id) || url_or_id + end + + # Returns the repo ID given it's URL. + # + # @param [String] url + # The URL of the repo. + # + # @return [String] the repo ID. + # @return [Nil] if the given url is not a valid github repo url. + # + def self.repo_id_from_url(url) + url[%r{github.com[/:]([^/]*/(?:(?!\.git)[^/])*)\.*}, 1] + end + + # Performs a get request with the given URL. + # + # @param [String] url + # The URL of the resource. + # + # @return [Array, Hash] The information of the resource as Ruby objects. + # + def self.peform_request(url) + require 'rest' + require 'json' + headers = { 'User-Agent' => 'CocoaPods' } + response = REST.get(url, headers) + body = JSON.parse(response.body) + if response.ok? + body + else + CoreUI.warn "Request to #{url} failed - #{response.status_code}" + CoreUI.warn body['message'] + nil + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/http.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/http.rb new file mode 100644 index 0000000..bf5f0b9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/http.rb @@ -0,0 +1,86 @@ +require 'uri' + +module Pod + # Handles HTTP requests + # + module HTTP + # Resolve potential redirects and return the final URL. + # + # @return [string] + # + def self.get_actual_url(url, user_agent = nil) + redirects = 0 + + loop do + response = perform_head_request(url, user_agent) + + if [301, 302, 303, 307, 308].include? response.status_code + location = response.headers['location'].first + + if location =~ %r{://} + url = location + else + url = URI.join(url, location).to_s + end + + redirects += 1 + else + break + end + + break unless redirects < MAX_HTTP_REDIRECTS + end + + url + end + + # Performs validation of a URL + # + # @return [REST::response] + # + def self.validate_url(url, user_agent = nil) + return nil unless url =~ /^#{URI.regexp}$/ + + begin + url = get_actual_url(url, user_agent) + resp = perform_head_request(url, user_agent) + rescue SocketError, URI::InvalidURIError, REST::Error, REST::Error::Connection + resp = nil + end + + resp + end + + #-------------------------------------------------------------------------# + + private + + # Does a HEAD request and in case of any errors a GET request + # + # @return [REST::response] + # + def self.perform_head_request(url, user_agent) + require 'rest' + + user_agent ||= USER_AGENT + + resp = ::REST.head(url, 'User-Agent' => user_agent) + + if resp.status_code >= 400 + resp = ::REST.get(url, 'User-Agent' => user_agent, + 'Range' => 'bytes=0-0') + + if resp.status_code >= 400 + resp = ::REST.get(url, 'User-Agent' => user_agent) + end + end + + resp + end + + MAX_HTTP_REDIRECTS = 3 + USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/538.43.40 (KHTML, like Gecko) Version/8.0 Safari/538.43.40' + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/lockfile.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/lockfile.rb new file mode 100644 index 0000000..6ef672b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/lockfile.rb @@ -0,0 +1,539 @@ +module Pod + # The Lockfile stores information about the pods that were installed by + # CocoaPods. + # + # It is used in combination with the Podfile to resolve the exact version of + # the Pods that should be installed (i.e. to prevent `pod install` from + # upgrading dependencies). + # + # Moreover it is used as a manifest of an installation to detect which Pods + # need to be installed or removed. + # + class Lockfile + # @todo The symbols should be converted to a String and back to symbol + # when reading (EXTERNAL SOURCES Download options) + + # @return [String] the hash used to initialize the Lockfile. + # + attr_reader :internal_data + + # @param [Hash] hash + # a hash representation of the Lockfile. + # + def initialize(hash) + @internal_data = hash + end + + # Loads a lockfile form the given path. + # + # @note This method returns nil if the given path doesn't exists. + # + # @raise If there is a syntax error loading the YAML data. + # + # @param [Pathname] path + # the path where the lockfile is serialized. + # + # @return [Lockfile] a new lockfile. + # + def self.from_file(path) + return nil unless path.exist? + hash = YAMLHelper.load_file(path) + unless hash && hash.is_a?(Hash) + raise Informative, "Invalid Lockfile in `#{path}`" + end + lockfile = Lockfile.new(hash) + lockfile.defined_in_file = path + lockfile + end + + # @return [String] the file where the Lockfile is serialized. + # + attr_accessor :defined_in_file + + # @return [Boolean] Whether the Podfiles are equal. + # + def ==(other) + other && to_hash == other.to_hash + end + + # @return [String] a string representation suitable for debugging. + # + def inspect + "#<#{self.class}>" + end + + #-------------------------------------------------------------------------# + + # !@group Accessing the Data + + public + + # @return [Array] the names of the installed Pods. + # + def pod_names + generate_pod_names_and_versions unless @pod_names + @pod_names + end + + # Returns the version of the given Pod. + # + # @param [String] pod_name The name of the Pod (root name of the specification). + # + # @return [Version] The version of the pod. + # + # @return [Nil] If there is no version stored for the given name. + # + def version(pod_name) + version = pod_versions[pod_name] + return version if version + root_name = pod_versions.keys.find do |name| + Specification.root_name(name) == pod_name + end + pod_versions[root_name] + end + + # Returns the source of the given Pod. + # + # @param [String] pod_name The name of the Pod (root name of the specification). + # + # @return [String] The source of the pod. + # + # @return [Nil] If there is no source stored for the given name. + # + def spec_repo(pod_name) + spec_repos_by_pod[pod_name] + end + + # Returns the checksum for the given Pod. + # + # @param [String] name The name of the Pod (root name of the specification). + # + # @return [String] The checksum of the specification for the given Pod. + # + # @return [Nil] If there is no checksum stored for the given name. + # + def checksum(name) + checksum_data[name] + end + + # @return [Array] the dependencies of the Podfile used for the + # last installation. + # + # @note It includes only the dependencies explicitly required in the + # podfile and not those triggered by the Resolver. + def dependencies + unless @dependencies + data = internal_data['DEPENDENCIES'] || [] + @dependencies = data.map do |string| + dep = Dependency.from_string(string) + dep.external_source = external_sources_data[dep.root_name] + dep + end + end + @dependencies + end + + # Returns pod names grouped by the spec repo they were sourced from. + # + # @return [Hash>] A hash, where the keys are spec + # repo source URLs (or names), and the values are arrays of pod names. + # + # @note It does not include pods that come from "external sources". + # + def pods_by_spec_repo + @pods_by_spec_repo ||= internal_data['SPEC REPOS'] || {} + end + + # Generates a dependency that requires the exact version of the Pod with the + # given name. + # + # @param [String] name + # the name of the Pod + # + # @note The generated dependencies used are by the Resolver from + # upgrading a Pod during an installation. + # + # @raise If there is no version stored for the given name. + # + # @return [Array] the generated dependency. + # + def dependencies_to_lock_pod_named(name) + deps = dependencies.select { |d| d.root_name == name } + if deps.empty? + raise StandardError, "Attempt to lock the `#{name}` Pod without a " \ + 'known dependency.' + end + + deps.map do |dep| + version = version(dep.name) + locked_dependency = dep.dup + locked_dependency.specific_version = version + locked_dependency + end + end + + # Returns the specific checkout options for the external source of the pod + # with the given name. + # + # @example Output + # {:commit => "919903db28535c3f387c4bbaa6a3feae4428e993" + # :git => "https://github.com/luisdelarosa/AFRaptureXMLRequestOperation.git"} + # + # @return [Hash] a hash of the checkout options for the external source of + # the pod with the given name. + # + # @param [String] name + # the name of the Pod. + # + def checkout_options_for_pod_named(name) + checkout_options_data[name] + end + + # @return [Version] The version of CocoaPods which generated this lockfile. + # + def cocoapods_version + Version.new(internal_data['COCOAPODS']) + end + + #--------------------------------------# + + # !@group Accessing the internal data. + + private + + # @return [Array Array[String]}>] the pods installed + # and their dependencies. + # + def generate_pod_names_and_versions + @pod_names = [] + @pod_versions = {} + + return unless pods = internal_data['PODS'] + pods.each do |pod| + pod = pod.keys.first unless pod.is_a?(String) + name, version = Spec.name_and_version_from_string(pod) + @pod_names << name + @pod_versions[name] = version + end + end + + # @return [Hash{String => Hash}] a hash where the name of the pods are the + # keys and the values are the external source hash the dependency + # that required the pod. + # + def external_sources_data + @external_sources_data ||= internal_data['EXTERNAL SOURCES'] || {} + end + + # @return [Hash{String => Hash}] a hash where the name of the pods are the + # keys and the values are a hash of specific checkout options. + # + def checkout_options_data + @checkout_options_data ||= internal_data['CHECKOUT OPTIONS'] || {} + end + + # @return [Hash{String => Version}] a Hash containing the name of the root + # specification of the installed Pods as the keys and their + # corresponding {Version} as the values. + # + def pod_versions + generate_pod_names_and_versions unless @pod_versions + @pod_versions + end + + # @return [Hash{String => Version}] A Hash containing the checksums of the + # specification by the name of their root. + # + def checksum_data + internal_data['SPEC CHECKSUMS'] || {} + end + + # @return [Hash{String => String}] A hash containing the spec repo used for the specification + # by the name of the root spec. + # + def spec_repos_by_pod + @spec_repos_by_pod ||= pods_by_spec_repo.each_with_object({}) do |(spec_repo, pods), spec_repos_by_pod| + pods.each do |pod| + spec_repos_by_pod[pod] = spec_repo + end + end + end + + #-------------------------------------------------------------------------# + + # !@group Comparison with a Podfile + + public + + # Analyzes the {Lockfile} and detects any changes applied to the {Podfile} + # since the last installation. + # + # For each Pod, it detects one state among the following: + # + # - added: Pods that weren't present in the Podfile. + # - changed: Pods that were present in the Podfile but changed: + # - Pods whose version is not compatible anymore with Podfile, + # - Pods that changed their external options. + # - removed: Pods that were removed form the Podfile. + # - unchanged: Pods that are still compatible with Podfile. + # + # @param [Podfile] podfile + # the podfile that should be analyzed. + # + # @return [Hash{Symbol=>Array[Strings]}] a hash where pods are grouped + # by the state in which they are. + # + # @todo Why do we look for compatibility instead of just comparing if the + # two dependencies are equal? + # + def detect_changes_with_podfile(podfile) + result = {} + [:added, :changed, :removed, :unchanged].each { |k| result[k] = [] } + + installed_deps = {} + dependencies.each do |dep| + name = dep.root_name + installed_deps[name] ||= dependencies_to_lock_pod_named(name) + end + + installed_deps = installed_deps.values.flatten(1).group_by(&:name) + + podfile_dependencies = podfile.dependencies + podfile_dependencies_by_name = podfile_dependencies.group_by(&:name) + + all_dep_names = (dependencies + podfile_dependencies).map(&:name).uniq + all_dep_names.each do |name| + installed_dep = installed_deps[name] + installed_dep &&= installed_dep.first + podfile_dep = podfile_dependencies_by_name[name] + podfile_dep &&= podfile_dep.first + + if installed_dep.nil? then key = :added + elsif podfile_dep.nil? then key = :removed + elsif podfile_dep.compatible?(installed_dep) then key = :unchanged + else key = :changed + end + result[key] << name + end + result + end + + #-------------------------------------------------------------------------# + + # !@group Serialization + + public + + # Writes the Lockfile to the given path. + # + # @param [Pathname] path + # the path where the lockfile should be saved. + # + # @return [void] + # + def write_to_disk(path) + path.dirname.mkpath unless path.dirname.exist? + self.defined_in_file = path + # rubocop:disable Lint/RescueException + # rubocop:disable Lint/HandleExceptions + begin + existing = Lockfile.from_file(path) + return if existing == self + rescue Exception + end + path.open('w') { |f| f.write(to_yaml) } + # rubocop:enable Lint/HandleExceptions + # rubocop:enable Lint/RescueException + end + + # @return [Hash{String=>Array,Hash,String}] a hash representation of the + # Lockfile. + # + # @example Output + # + # { + # 'PODS' => [ { BananaLib (1.0) => [monkey (< 1.0.9, ~> 1.0.1)] }, + # "JSONKit (1.4)", + # "monkey (1.0.8)"] + # 'DEPENDENCIES' => [ "BananaLib (~> 1.0)", + # "JSONKit (from `path/JSONKit.podspec`)" ], + # 'EXTERNAL SOURCES' => { "JSONKit" => { :podspec => path/JSONKit.podspec } }, + # 'SPEC CHECKSUMS' => { "BananaLib" => "439d9f683377ecf4a27de43e8cf3bce6be4df97b", + # "JSONKit", "92ae5f71b77c8dec0cd8d0744adab79d38560949" }, + # 'PODFILE CHECKSUM' => "439d9f683377ecf4a27de43e8cf3bce6be4df97b", + # 'COCOAPODS' => "0.17.0" + # } + # + # + def to_hash + hash = {} + internal_data.each do |key, value| + hash[key] = value unless value.nil? || value.empty? + end + hash + end + + # @return [Array] The order in which the hash keys should appear in + # a serialized Lockfile. + # + HASH_KEY_ORDER = [ + 'PODS', + 'DEPENDENCIES', + 'SPEC REPOS', + 'EXTERNAL SOURCES', + 'CHECKOUT OPTIONS', + 'SPEC CHECKSUMS', + 'PODFILE CHECKSUM', + 'COCOAPODS', + ].map(&:freeze).freeze + + # @return [String] the YAML representation of the Lockfile, used for + # serialization. + # + # @note Empty root keys are discarded. + # + # @note The YAML string is prettified. + # + def to_yaml + YAMLHelper.convert_hash(to_hash, HASH_KEY_ORDER, "\n\n") + end + + #-------------------------------------------------------------------------# + + class << self + # !@group Generation + + public + + # Generates a hash representation of the Lockfile generated from a given + # Podfile and the list of resolved Specifications. This representation is + # suitable for serialization. + # + # @param [Podfile] podfile + # the podfile that should be used to generate the lockfile. + # + # @param [Array] specs + # an array containing the podspec that were generated by + # resolving the given podfile. + # + # @return [Lockfile] a new lockfile. + # + def generate(podfile, specs, checkout_options, spec_repos = {}) + hash = { + 'PODS' => generate_pods_data(specs), + 'DEPENDENCIES' => generate_dependencies_data(podfile), + 'SPEC REPOS' => generate_spec_repos(spec_repos), + 'EXTERNAL SOURCES' => generate_external_sources_data(podfile), + 'CHECKOUT OPTIONS' => checkout_options, + 'SPEC CHECKSUMS' => generate_checksums(specs), + 'PODFILE CHECKSUM' => podfile.checksum, + 'COCOAPODS' => CORE_VERSION, + } + Lockfile.new(hash) + end + + #--------------------------------------# + + private + + # !@group Private helpers + + # Generates the list of the installed Pods and their dependencies. + # + # @note The dependencies of iOS and OS X version of the same pod are + # merged. + # + # @todo Specifications should be stored per platform, otherwise they + # list dependencies which actually might not be used. + # + # @return [Array] the generated data. + # + # @example Output + # [ {"BananaLib (1.0)"=>["monkey (< 1.0.9, ~> 1.0.1)"]}, + # "monkey (1.0.8)" ] + # + # + def generate_pods_data(specs) + pods_and_deps_merged = specs.reduce({}) do |result, spec| + name = spec.to_s + result[name] ||= [] + result[name].concat(spec.all_dependencies.map(&:to_s)) + result + end + + pod_and_deps = pods_and_deps_merged.map do |name, deps| + deps.empty? ? name : { name => YAMLHelper.sorted_array(deps.uniq) } + end + YAMLHelper.sorted_array(pod_and_deps) + end + + # Generates the list of the dependencies of the Podfile. + # + # @example Output + # [ "BananaLib (~> 1.0)", + # "JSONKit (from `path/JSONKit.podspec')" ] + # + # @return [Array] the generated data. + # + def generate_dependencies_data(podfile) + YAMLHelper.sorted_array(podfile.dependencies.map(&:to_s)) + end + + # Generates the hash of spec repo sources used in the Podfile. + # + # @example Output + # { "https://github.com/cocoapods/cocoapods.git" => ["Alamofire", "Moya"] } + # + def generate_spec_repos(spec_repos) + Hash[spec_repos.map do |source, specs| + next unless source + next if specs.empty? + key = source.url || source.name + + # save `trunk` as 'trunk' so that the URL itself can be changed without lockfile churn + key = Pod::TrunkSource::TRUNK_REPO_NAME if source.name == Pod::TrunkSource::TRUNK_REPO_NAME + + value = specs.map { |s| s.root.name }.uniq + [key, YAMLHelper.sorted_array(value)] + end.compact] + end + + # Generates the information of the external sources. + # + # @example Output + # { "JSONKit"=>{:podspec=>"path/JSONKit.podspec"} } + # + # @return [Hash] a hash where the keys are the names of the pods and + # the values store the external source hashes of each + # dependency. + # + def generate_external_sources_data(podfile) + deps = podfile.dependencies.select(&:external?) + deps = deps.sort { |d, other| d.name <=> other.name } + sources = {} + deps.each { |d| sources[d.root_name] = d.external_source } + sources + end + + # Generates the relative to the checksum of the specifications. + # + # @example Output + # { + # "BananaLib"=>"9906b267592664126923875ce2c8d03824372c79", + # "JSONKit"=>"92ae5f71b77c8dec0cd8d0744adab79d38560949" + # } + # + # @return [Hash] a hash where the keys are the names of the root + # specifications and the values are the SHA1 digest of the + # podspec file. + # + def generate_checksums(specs) + checksums = {} + specs.select(&:defined_in_file).each do |spec| + checksums[spec.root.name] = spec.checksum + end + checksums + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/metrics.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/metrics.rb new file mode 100644 index 0000000..814008f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/metrics.rb @@ -0,0 +1,47 @@ +module Pod + # Allows to access metrics about pods. + # + # This class is stored in Core because it might be used by web services. + # + module Metrics + # Returns the metrics of a pod. + # + # @param [String] name + # The name of the pod. + # + # @return [Hash] The metrics for the pod. + # + def self.pod(name) + peform_request("http://metrics.cocoapods.org/api/v1/pods/#{name}") + end + + private + + # @!group Private helpers + #-------------------------------------------------------------------------# + + # Performs a get request with the given URL. + # + # @param [String] url + # The URL of the resource. + # + # @return [Array, Hash] The information of the resource as Ruby objects. + # + def self.peform_request(url) + require 'rest' + require 'json' + headers = { 'User-Agent' => "CocoaPods #{Pod::CORE_VERSION}" } + response = REST.get(url, headers) + body = JSON.parse(response.body) + if response.ok? + body + else + CoreUI.warn "Request to #{url} failed - #{response.status_code}" + CoreUI.warn body['message'] + nil + end + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/platform.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/platform.rb new file mode 100644 index 0000000..8fa515f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/platform.rb @@ -0,0 +1,246 @@ +module Pod + # A Platform describes an SDK name and deployment target. + # + class Platform + # @return [Symbol, String] the name of the SDK represented by the platform. + # + attr_reader :symbolic_name + alias_method :name, :symbolic_name + + # @return [Version] the deployment target of the platform. + # + attr_reader :deployment_target + + # Constructs a platform from either another platform or by + # specifying the symbolic name and optionally the deployment target. + # + # @overload initialize(name, deployment_target) + # + # @param [Symbol, String] name + # the name of platform. + # + # @param [String, Version] deployment_target + # the optional deployment. + # + # @note If the deployment target is not provided a default deployment + # target will not be assigned. + # + # @example Initialization with symbol + # + # Platform.new(:ios) + # Platform.new(:ios, '4.3') + # + # @overload initialize(platform) + # + # @param [Platform] platform + # Another {Platform}. + # + # @example Initialization with another platform + # + # platform = Platform.new(:ios) + # Platform.new(platform) + # + def initialize(input, target = nil) + if input.is_a? Platform + @symbolic_name = input.name + @deployment_target = input.deployment_target + else + # Allow `Platform.new('macos')` to be equivalent to `Platform.macos` + if input == 'macos' + input = 'osx' + end + @symbolic_name = input.to_sym + target = target[:deployment_target] if target.is_a?(Hash) + @deployment_target = Version.create(target) + end + end + + # Convenience method to initialize an iOS platform. + # + # @return [Platform] an iOS platform. + # + def self.ios + new :ios + end + + # Convenience method to initialize an OS X platform. + # + # @return [Platform] an OS X platform. + # + def self.osx + new :osx + end + + # Convenience method to initialize a macOS platform. + # + # @return [Platform] a macOS platform. + # + def self.macos + osx + end + + # Convenience method to initialize a tvOS platform. + # + # @return [Platform] a tvOS platform. + # + def self.tvos + new :tvos + end + + # Convenience method to initialize a watchOS platform. + # + # @return [Platform] a watchOS platform. + # + def self.watchos + new :watchos + end + + # Convenience method to get all available platforms. + # + # @return [Array] list of platforms. + # + def self.all + [ios, osx, watchos, tvos] + end + + # Checks if a platform is equivalent to another one or to a symbol + # representation. + # + # @param [Platform, Symbol] other + # the other platform to check. + # + # @note If a symbol is passed the comparison does not take into account + # the deployment target. + # + # @return [Boolean] whether two platforms are the equivalent. + # + def ==(other) + if other.is_a?(Symbol) + @symbolic_name == other + else + (name == other.name) && (deployment_target == other.deployment_target) + end + end + + # (see #==) + alias_method :eql?, :== + + # Hashes the instance by the platform name and deployment target. + # + # This adds support to make instances usable as Hash keys. + # + # @!visibility private + def hash + name.hash ^ deployment_target.hash + end + + # Checks whether a platform supports another one. + # + # In the context of operating system SDKs, a platform supports another + # one if they have the same name and the other platform has a minor or + # equal deployment target. + # + # @return [Boolean] whether the platform supports another platform. + # + def supports?(other) + other = Platform.new(other) + if other.deployment_target && deployment_target + (other.name == name) && (other.deployment_target <= deployment_target) + else + other.name == name + end + end + + # @return [String] a string representation that includes the deployment + # target. + # + def to_s + s = self.class.string_name(@symbolic_name) + s << " #{deployment_target}" if deployment_target + s + end + + # @return [String] the debug representation. + # + def inspect + "#<#{self.class.name} name=#{name.inspect} " \ + "deployment_target=#{deployment_target.inspect}>" + end + + # @return [Symbol] a symbol representing the name of the platform. + # + def to_sym + name + end + + # Compares the platform first by name and the by deployment_target for + # sorting. + # + # @param [Platform] other + # The other platform to compare. + # + # @return [Fixnum] -1, 0, or +1 depending on whether the receiver is less + # than, equal to, or greater than other. + # + def <=>(other) + name_sort = name.to_s <=> other.name.to_s + if name_sort.zero? + deployment_target <=> other.deployment_target + else + name_sort + end + end + + # @return [Boolean] whether the platform requires legacy architectures for + # iOS. + # + def requires_legacy_ios_archs? + if name == :ios + deployment_target && (deployment_target < Version.new('4.3')) + else + false + end + end + + # @return [Boolean] whether the platform supports dynamic frameworks. + # + def supports_dynamic_frameworks? + if name == :ios + deployment_target && (deployment_target >= Version.new(8.0)) + else + true + end + end + + # @return [String] The string that describes the #symbolic_name. + # + def string_name + self.class.string_name(symbolic_name) + end + + # @return [String] The string that describes the #symbolic_name, + # which doesn't contain spaces and is so safe to use in + # paths which might not be quoted or escaped consequently. + def safe_string_name + string_name.tr(' ', '') + end + + # Converts the symbolic name of a platform to a string name suitable to be + # presented to the user. + # + # @param [Symbol] symbolic_name + # the symbolic name of a platform. + # + # @return [String] The string that describes the name of the given symbol. + # + def self.string_name(symbolic_name) + case symbolic_name + when :ios then 'iOS' + when :osx then 'macOS' + when :watchos then 'watchOS' + when :tvos then 'tvOS' + else symbolic_name.to_s + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile.rb new file mode 100644 index 0000000..02ca15f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile.rb @@ -0,0 +1,436 @@ +require 'cocoapods-core/podfile/dsl' +require 'cocoapods-core/podfile/target_definition' + +module Pod + # The Podfile is a specification that describes the dependencies of the + # targets of an Xcode project. + # + # It supports its own DSL and is stored in a file named `Podfile`. + # + # The Podfile creates a hierarchy of target definitions that store the + # information necessary to generate the CocoaPods libraries. + # + class Podfile + # @!group DSL support + + include Pod::Podfile::DSL + + #-------------------------------------------------------------------------# + + class StandardError < ::StandardError; end + + #-------------------------------------------------------------------------# + + # @return [Pathname] the path used to load the Podfile. It is nil + # if the Podfile was generated programmatically. + # + attr_accessor :defined_in_file + + # @param [Pathname] defined_in_file + # the path of the podfile. + # + # @param [Proc] block + # an optional block that configures the Podfile through the DSL. + # + # @example Creating a Podfile. + # + # platform :ios, "6.0" + # target :my_app do + # pod "AFNetworking", "~> 1.0" + # end + # + def initialize(defined_in_file = nil, internal_hash = {}, &block) + self.defined_in_file = defined_in_file + @internal_hash = internal_hash + if block + default_target_def = TargetDefinition.new('Pods', self) + default_target_def.abstract = true + @root_target_definitions = [default_target_def] + @current_target_definition = default_target_def + instance_eval(&block) + else + @root_target_definitions = [] + end + end + + # @return [String] a string useful to represent the Podfile in a message + # presented to the user. + # + def to_s + 'Podfile' + end + + #-------------------------------------------------------------------------# + + public + + # @!group Working with a Podfile + + # @return [Hash{Symbol,String => TargetDefinition}] the target definitions + # of the Podfile stored by their name. + # + def target_definitions + Hash[target_definition_list.map { |td| [td.name, td] }] + end + + # @return [Array] all target definitions in the Podfile. + # + def target_definition_list + root_target_definitions.map { |td| [td, td.recursive_children] }.flatten + end + + # @return [Array] The root target definitions. + # + attr_accessor :root_target_definitions + + # @return [Array] the dependencies of all of the target + # definitions. + # + def dependencies + target_definition_list.map(&:dependencies).flatten.uniq + end + + #-------------------------------------------------------------------------# + + public + + # @!group Attributes + + # @return [Array] The names of the sources. + # + def sources + get_hash_value('sources') || [] + end + + # @return [Hash] The plugins, keyed by name. + # + def plugins + get_hash_value('plugins') || {} + end + + # @return [String] the path of the workspace if specified by the user. + # + def workspace_path + path = get_hash_value('workspace') + if path + if File.extname(path) == '.xcworkspace' + path + else + "#{path}.xcworkspace" + end + end + end + + # @return [Boolean] whether the podfile should generate a BridgeSupport + # metadata document. + # + def generate_bridge_support? + get_hash_value('generate_bridge_support') + end + + # @return [Boolean] whether the -fobjc-arc flag should be added to the + # OTHER_LD_FLAGS. + # + def set_arc_compatibility_flag? + get_hash_value('set_arc_compatibility_flag') + end + + # @return [(String,Hash)] the installation strategy and installation options + # to be used during installation. + # + def installation_method + get_hash_value('installation_method', 'name' => 'cocoapods', 'options' => {}). + values_at('name', 'options') + end + + #-------------------------------------------------------------------------# + + public + + # @!group Hooks + + # Calls the pre install callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Boolean] whether a pre install callback was specified and it was + # called. + # + def pre_install!(installer) + if @pre_install_callback + @pre_install_callback.call(installer) + true + else + false + end + end + + # Calls the pre integrate callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Boolean] whether a pre integrate callback was specified and it was + # called. + # + def pre_integrate!(installer) + if @pre_integrate_callback + @pre_integrate_callback.call(installer) + true + else + false + end + end + + # Calls the post install callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Boolean] whether a post install callback was specified and it was + # called. + # + def post_install!(installer) + if @post_install_callback + @post_install_callback.call(installer) + true + else + false + end + end + + # Calls the post integrate callback if defined. + # + # @param [Pod::Installer] installer + # the installer that is performing the installation. + # + # @return [Boolean] whether a post install callback was specified and it was + # called. + # + def post_integrate!(installer) + if @post_integrate_callback + @post_integrate_callback.call(installer) + true + else + false + end + end + + #-------------------------------------------------------------------------# + + public + + # @!group Representations + + # @return [Array] The keys used by the hash representation of the Podfile. + # + HASH_KEYS = %w( + installation_method + workspace + sources + plugins + set_arc_compatibility_flag + generate_bridge_support + target_definitions + ).freeze + + # @return [Hash] The hash representation of the Podfile. + # + def to_hash + hash = {} + hash['target_definitions'] = root_target_definitions.map(&:to_hash) + hash.merge!(internal_hash) + hash + end + + # @return [String] The YAML representation of the Podfile. + # + def to_yaml + require 'cocoapods-core/yaml_helper' + "---\n" << YAMLHelper.convert_hash(to_hash, HASH_KEYS) + end + + # @return [String] The SHA1 digest of the file in which the Podfile + # is defined. + # + # @return [Nil] If the podfile is not defined in a file. + # + def checksum + @checksum ||= begin + unless defined_in_file.nil? + require 'digest' + checksum = Digest::SHA1.hexdigest(File.read(defined_in_file)) + checksum = checksum.encode('UTF-8') if checksum.respond_to?(:encode) + checksum + end + end + end + + def ==(other) + self.class == other.class && + to_hash == other.to_hash + end + + # @!group Class methods + #-------------------------------------------------------------------------# + + # Initializes a Podfile from the file with the given path. + # + # @param [Pathname] path + # the path from where the Podfile should be loaded. + # + # @return [Podfile] the generated Podfile. + # + def self.from_file(path) + path = Pathname.new(path) + unless path.exist? + raise Informative, "No Podfile exists at path `#{path}`." + end + + case path.extname + when '', '.podfile', '.rb' + Podfile.from_ruby(path) + when '.yaml' + Podfile.from_yaml(path) + else + raise Informative, "Unsupported Podfile format `#{path}`." + end + end + + # Configures a new Podfile from the given ruby string. + # + # @param [Pathname] path + # The path from which the Podfile is loaded. + # + # @param [String] contents + # The ruby string which will configure the Podfile with the DSL. + # + # @return [Podfile] the new Podfile + # + def self.from_ruby(path, contents = nil) + contents ||= File.open(path, 'r:utf-8', &:read) + + # Work around for Rubinius incomplete encoding in 1.9 mode + if contents.respond_to?(:encoding) && contents.encoding.name != 'UTF-8' + contents.encode!('UTF-8') + end + + if contents.tr!('“”‘’‛', %(""''')) + # Changes have been made + CoreUI.warn "Smart quotes were detected and ignored in your #{path.basename}. " \ + 'To avoid issues in the future, you should not use ' \ + 'TextEdit for editing it. If you are not using TextEdit, ' \ + 'you should turn off smart quotes in your editor of choice.' + end + + podfile = Podfile.new(path) do + # rubocop:disable Lint/RescueException + begin + # rubocop:disable Security/Eval + eval(contents, nil, path.to_s) + # rubocop:enable Security/Eval + rescue Exception => e + message = "Invalid `#{path.basename}` file: #{e.message}" + raise DSLError.new(message, path, e, contents) + end + # rubocop:enable Lint/RescueException + end + podfile + end + + # Configures a new Podfile from the given YAML representation. + # + # @param [Pathname] path + # The path from which the Podfile is loaded. + # + # @return [Podfile] the new Podfile + # + def self.from_yaml(path) + string = File.open(path, 'r:utf-8', &:read) + # Work around for Rubinius incomplete encoding in 1.9 mode + if string.respond_to?(:encoding) && string.encoding.name != 'UTF-8' + string.encode!('UTF-8') + end + hash = YAMLHelper.load_string(string) + from_hash(hash, path) + end + + # Configures a new Podfile from the given hash. + # + # @param [Hash] hash + # The hash which contains the information of the Podfile. + # + # @param [Pathname] path + # The path from which the Podfile is loaded. + # + # @return [Podfile] the new Podfile + # + def self.from_hash(hash, path = nil) + internal_hash = hash.dup + target_definitions = internal_hash.delete('target_definitions') || [] + podfile = Podfile.new(path, internal_hash) + target_definitions.each do |definition_hash| + definition = TargetDefinition.from_hash(definition_hash, podfile) + podfile.root_target_definitions << definition + end + podfile + end + + #-------------------------------------------------------------------------# + + private + + # @!group Private helpers + + # @return [Hash] The hash which store the attributes of the Podfile. + # + attr_accessor :internal_hash + + # Set a value in the internal hash of the Podfile for the given key. + # + # @param [String] key + # The key for which to store the value. + # + # @param [Object] value + # The value to store. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [void] + # + def set_hash_value(key, value) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash[key] = value + end + + # Returns the value for the given key in the internal hash of the Podfile. + # + # @param [String] key + # The key for which the value is needed. + # + # @param default + # The default value to return if the internal hash has no entry for + # the given `key`. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [Object] The value for the key. + # + def get_hash_value(key, default = nil) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash.fetch(key, default) + end + + # @return [TargetDefinition] The current target definition to which the DSL + # commands apply. + # + attr_accessor :current_target_definition + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile/dsl.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile/dsl.rb new file mode 100644 index 0000000..bfdcb69 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile/dsl.rb @@ -0,0 +1,997 @@ +module Pod + class Podfile + # The methods defined in this file and the order of the methods is + # relevant for the documentation generated on + # https://github.com/CocoaPods/guides.cocoapods.org. + + # The Podfile is a specification that describes the dependencies of the + # targets of one or more Xcode projects. + # + # A Podfile can be very simple: + # + # target 'MyApp' + # pod 'AFNetworking', '~> 1.0' + # + # An example of a more complex Podfile can be: + # + # platform :ios, '9.0' + # inhibit_all_warnings! + # + # target 'MyApp' do + # pod 'ObjectiveSugar', '~> 0.5' + # + # target 'MyAppTests' do + # inherit! :search_paths + # pod 'OCMock', '~> 2.0.1' + # end + # end + # + # post_install do |installer| + # installer.pods_project.targets.each do |target| + # puts "#{target.name}" + # end + # end + # + module DSL + # @!group Root Options + # Configuration that applies to the Podfile as a whole. + # + # * `install!` declares the installation method and options to be used + # during installation. + + # Specifies the installation method and options to be used when + # CocoaPods installs this Podfile. + # + # The first parameter indicates the installation method to use; + # next parameters indicate installation options. + # + # For now the only accepted installation method is `'cocoapods'`, so + # you'll always use this value for the first parameter; but more + # installation methods might be available in future versions. + # + # @param [String] installation_method + # the name of the installation strategy. + # + # @param [Hash] options + # the installation options. + # + # @example Specifying custom CocoaPods installation options + # + # install! 'cocoapods', + # :deterministic_uuids => false, + # :integrate_targets => false + # + # @return [void] + # + def install!(installation_method, options = {}) + unless current_target_definition.root? + raise Informative, 'The installation method can only be set at the root level of the Podfile.' + end + + set_hash_value('installation_method', 'name' => installation_method, 'options' => options) + end + + # Raises a warning when CocoaPods is run using the Global Gemset. + # A Semantic version can be supplied to warn if the bundler version + # does not match the required version. + # + # @param [String] version + # The required bundler version, in semantic version format. + # + # @example + # + # ensure_bundler! + # + # @example + # + # ensure_bundler! '~> 2.0.0' + # + # @return [void] + # + def ensure_bundler!(version = nil) + unless current_target_definition.root? + raise Informative, 'The Ensure Bundler check can only be set at the root level of the Podfile.' + end + unless %w(BUNDLE_BIN_PATH BUNDLE_GEMFILE).all? { |key| ENV.key?(key) } + raise Informative, "CocoaPods was invoked from Global Gemset.\nPlease re-run using: `bundle exec pod #{ARGV.join(' ')}`" + end + unless ENV['BUNDLER_VERSION'].nil? || Requirement.create(version).satisfied_by?(Version.new(ENV['BUNDLER_VERSION'])) + raise Informative, "The installed Bundler version: #{ENV['BUNDLER_VERSION']} does not match the required version: #{version}" + end + end + + #-----------------------------------------------------------------------# + + # @!group Dependencies + # The Podfile specifies the dependencies of each user target. + # + # * `pod` is the way to declare a specific dependency. + # * `podspec` provides an easy API for the creation of podspecs. + # * `target` is how you scope your dependencies to specific + # targets in your Xcode projects. + + #-----------------------------------------------------------------------# + + # Specifies a dependency of the project. + # + # A dependency requirement is defined by the name of the Pod and + # optionally a list of version requirements. + # + # When starting out with a project it is likely that you will want to use + # the latest version of a Pod. If this is the case, simply omit the + # version requirements. + # + # pod 'SSZipArchive' + # + # Later on in the project you may want to freeze to a specific version of + # a Pod, in which case you can specify that version number. + # + # pod 'Objection', '0.9' + # + # Besides no version, or a specific one, it is also possible to use + # operators: + # + # * `= 0.1` Version 0.1. + # * `> 0.1` Any version higher than 0.1. + # * `>= 0.1` Version 0.1 and any higher version. + # * `< 0.1` Any version lower than 0.1. + # * `<= 0.1` Version 0.1 and any lower version. + # * `~> 0.1.2` Version 0.1.2 and the versions up to 0.2, not including 0.2. + # This operator works based on _the last component_ that you + # specify in your version requirement. The example is equal to + # `>= 0.1.2` combined with `< 0.2.0` and will always match the + # latest known version matching your requirements. + # * `~> 0` Version 0 and the versions up to 1, not including 1. + # * `~> 0.1.3-beta.0` Beta and release versions for 0.1.3, release versions + # up to 0.2 excluding 0.2. Components separated by a dash (-) + # will not be considered for the version requirement. + # + # A list of version requirements can be specified for even more fine + # grained control. + # + # For more information, regarding versioning policy, see: + # + # * [Semantic Versioning](http://semver.org) + # * [RubyGems Versioning Policies](http://guides.rubygems.org/patterns/#semantic-versioning) + # + # ------ + # + # ### Build configurations + # + # By default dependencies are installed in all the build configurations + # of the target. For debug purposes or for other reasons, they can be + # only enabled on a list of build configurations. + # + # pod 'PonyDebugger', :configurations => ['Debug', 'Beta'] + # + # Alternatively, you can specify to have it included on a single build + # configuration. + # + # pod 'PonyDebugger', :configuration => 'Debug' + # + # Note that transitive dependencies are included in all configurations + # and you have to manually specify build configurations for them as well in + # case this is not desired. + # + # ------ + # + # ### Modular Headers + # + # If you would like to use modular headers per Pod you can use the + # following syntax: + # + # pod 'SSZipArchive', :modular_headers => true + # + # Additionally, when you use the `use_modular_headers!` attribute, + # you can exclude a particular Pod from modular headers using the following: + # + # pod 'SSZipArchive', :modular_headers => false + # + # ------ + # + # ### Source + # + # By default the sources specified at the global level are searched in the order + # they are specified for a dependency match. This behaviour can be altered + # for a specific dependency by specifying the source with the dependency: + # + # pod 'PonyDebugger', :source => 'https://cdn.cocoapods.org/' + # + # In this case only the specified source will be searched for the dependency + # and any global sources ignored. + # + # ------ + # + # ### Subspecs + # + # When installing a Pod via its name, it will install all of the + # default subspecs defined in the podspec. + # + # You may install a specific subspec using the following: + # + # pod 'QueryKit/Attribute' + # + # You may specify a collection of subspecs to be installed as follows: + # + # pod 'QueryKit', :subspecs => ['Attribute', 'QuerySet'] + # + # ### Test Specs + # + # Test specs can be optionally included via the `:testspecs` option. By default, + # none of a Pod's test specs are included. + # + # You may specify a list of test spec names to install using the following: + # + # pod 'AFNetworking', :testspecs => ['UnitTests', 'SomeOtherTests'] + # + # The values provided to `:testspecs` correspond to the name provided to the + # `test_spec` DSL attribute in a Podspec. + # + # ------ + # + # Dependencies can be obtained also from external sources. + # + # + # ### Using the files from a local path. + # + # If you would like to use develop a Pod in tandem with its client + # project you can use the `path` option. + # + # pod 'AFNetworking', :path => '~/Documents/AFNetworking' + # + # Using this option CocoaPods will assume the given folder to be the + # root of the Pod and will link the files directly from there in the + # Pods project. This means that your edits will persist to CocoaPods + # installations. + # + # The referenced folder can be a checkout of your your favourite SCM or + # even a git submodule of the current repository. + # + # Note that the `podspec` of the Pod file is expected to be in the + # folder. + # + # + # ### From a podspec in the root of a library repository. + # + # Sometimes you may want to use the bleeding edge version of a Pod. Or a + # specific revision. If this is the case, you can specify that with your + # pod declaration. + # + # To use the `master` branch of the repository: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git' + # + # + # To use a different branch of the repository: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git', :branch => 'dev' + # + # + # To use a tag of the repository: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git', :tag => '0.7.0' + # + # + # Or specify a commit: + # + # pod 'AFNetworking', :git => 'https://github.com/gowalla/AFNetworking.git', :commit => '082f8319af' + # + # It is important to note, though, that this means that the version will + # have to satisfy any other dependencies on the Pod by other Pods. + # + # The `podspec` file is expected to be in the root of the repository, + # if this library does not have a `podspec` file in its repository + # yet, you will have to use one of the approaches outlined in the + # sections below. + # + # + # ### From a podspec outside a spec repository, for a library without podspec. + # + # If a podspec is available from another source outside of the library’s + # repository. Consider, for instance, a podspec available via HTTP: + # + # pod 'JSONKit', :podspec => 'https://example.com/JSONKit.podspec' + # + # + # @note This method allow a nil name and the raises to be more + # informative. + # + # @return [void] + # + def pod(name = nil, *requirements) + unless name + raise StandardError, 'A dependency requires a name.' + end + + current_target_definition.store_pod(name, *requirements) + end + + # Use just the dependencies of a Pod defined in the given podspec file. + # If no arguments are passed the first podspec in the root of the Podfile + # is used. It is intended to be used by the project of a library. Note: + # this does not include the sources derived from the podspec just the + # CocoaPods infrastructure. + # + # @example + # podspec + # + # @example + # podspec :name => 'QuickDialog' + # + # @example + # podspec :path => '/Documents/PrettyKit/PrettyKit.podspec' + # + # @param [Hash {Symbol=>String}] options + # the path where to load the {Specification}. If not provided + # the first podspec in the directory of the Podfile is used. + # + # @option options [String] :path + # the path of the podspec file + # + # @option options [String] :name + # the name of the podspec + # + # @note This method uses the dependencies declared for the + # platform of the target definition. + # + # + # @note This method requires that the Podfile has a non nil value for + # {#defined_in_file} unless the path option is used. + # + # @return [void] + # + def podspec(options = nil) + current_target_definition.store_podspec(options) + end + + # Defines a CocoaPods target and scopes dependencies defined + # within the given block. A target should correspond to an Xcode target. + # By default the target includes the dependencies defined outside of + # the block, unless instructed not to `inherit!` them. + # + # @param [Symbol, String] name + # the name of the target. + # + # @example Defining a target + # + # target 'ZipApp' do + # pod 'SSZipArchive' + # end + # + # @example Defining a test target accessing SSZipArchive pod from its parent + # + # target 'ZipApp' do + # pod 'SSZipArchive' + # + # target 'ZipAppTests' do + # inherit! :search_paths + # pod 'Nimble' + # end + # end + # + # @example Defining a target applies Pods to multiple targets via its parent target + # + # target 'ShowsApp' do + # pod 'ShowsKit' + # + # # Has its own copy of ShowsKit + ShowTVAuth + # target 'ShowsTV' do + # pod 'ShowTVAuth' + # end + # + # # Has its own copy of Specta + Expecta + # # and has access to ShowsKit via the app + # # that the test target is bundled into + # + # target 'ShowsTests' do + # inherit! :search_paths + # pod 'Specta' + # pod 'Expecta' + # end + # end + # + # @return [void] + # + def target(name, options = nil) + if options + raise Informative, "Unsupported options `#{options}` for " \ + "target `#{name}`." + end + + parent = current_target_definition + definition = TargetDefinition.new(name, parent) + self.current_target_definition = definition + yield if block_given? + ensure + self.current_target_definition = parent + end + + # Adds a script phase to be integrated with this target. A script phase can be used to execute an arbitrary + # script that can use all Xcode environment variables during execution. A target may include multiple script + # phases which they will be added in the order they were declared. Deleting a script phase will effectively remove + # it from the target if it has been added previously. + # + # @example + # script_phase :name => 'HelloWorldScript', :script => 'echo "Hello World"' + # + # @example + # script_phase :name => 'HelloWorldScript', :script => 'puts "Hello World"', :shell_path => '/usr/bin/ruby' + # + # @param [Hash] options + # the options for this script phase. + # + # @option options [String] :name + # the name of the script phase. This option is required. + # + # @option options [String] :script + # the body of the script to execute. This option is required. + # + # @option options [String] :shell_path + # the shell path to use for this script phase, for example `/usr/bin/ruby` to use Ruby for this phase. + # + # @option options [Array] :input_files + # the input paths to use for this script phase. This is used by Xcode to determine whether to re-execute + # this script phase if the input paths have changed or not. + # + # @option options [Array] :output_files + # the output paths to use for this script phase. This is used by Xcode to avoid re-executing this script + # phase if none of the output paths have changed. + # + # @option options [Array] :input_file_lists + # the input file lists to use for this script phase. This is used by Xcode to determine whether to + # re-execute this script phase if the input paths have changed or not. + # + # @option options [Array] :output_file_lists + # the output file lists to use for this script phase. This is used by Xcode to avoid re-executing this + # script phase if none of the output paths have changed. + # + # @option options [Boolean] :show_env_vars_in_log + # whether this script phase should output the environment variables during execution. + # + # @option options [Symbol] :execution_position + # specifies the position of which this script phase should be executed. The currently supported values are: + # `:before_compile`, `:after_compile` and `:any` which is the default. + # + # @option options [String] :dependency_file + # specifies the dependency file to use for this script phase. + # + # @return [void] + # + def script_phase(options) + raise Informative, 'Script phases can only be added within target definitions.' if current_target_definition.root? + raise Informative, 'Script phases cannot be added to abstract targets.' if current_target_definition.abstract? + current_target_definition.store_script_phase(options) + end + + # Defines a new abstract target that can be used for convenient + # target dependency inheritance. + # + # @param [Symbol, String] name + # the name of the target. + # + # @example Defining an abstract target + # + # abstract_target 'Networking' do + # pod 'AlamoFire' + # + # target 'Networking App 1' + # target 'Networking App 2' + # end + # + # @example Defining an abstract_target wrapping Pods to multiple targets + # + # # Note: There are no targets called "Shows" in any of this workspace's Xcode projects + # abstract_target 'Shows' do + # pod 'ShowsKit' + # + # # The target ShowsiOS has its own copy of ShowsKit (inherited) + ShowWebAuth (added here) + # target 'ShowsiOS' do + # pod 'ShowWebAuth' + # end + # + # # The target ShowsTV has its own copy of ShowsKit (inherited) + ShowTVAuth (added here) + # target 'ShowsTV' do + # pod 'ShowTVAuth' + # end + # + # # Our tests target has its own copy of + # # our testing frameworks, and has access + # # to ShowsKit as well because it is + # # a child of the abstract target 'Shows' + # + # target 'ShowsTests' do + # inherit! :search_paths + # pod 'Specta' + # pod 'Expecta' + # end + # end + # + # @return [void] + # + def abstract_target(name) + target(name) do + abstract! + yield if block_given? + end + end + + # Denotes that the current target is abstract, and thus will not directly + # link against an Xcode target. + # + # @return [void] + # + def abstract!(abstract = true) + current_target_definition.abstract = abstract + end + + # Sets the inheritance mode for the current target. + # + # @param [Symbol] inheritance + # the inheritance mode to set. + # + # **Available Modes:** + # + `:complete` The target inherits all + # behaviour from the parent. + # + `:none` The target inherits none of + # the behaviour from the parent. + # + `:search_paths` The target inherits + # the search paths of the parent only. + # + # + # @example Inheriting only search paths + # + # target 'App' do + # target 'AppTests' do + # inherit! :search_paths + # end + # end + # + # @return [void] + # + def inherit!(inheritance) + current_target_definition.inheritance = inheritance + end + + #-----------------------------------------------------------------------# + + # @!group Target configuration + # These settings are used to control the CocoaPods generated project. + # + # This starts out simply with stating what `platform` you are working + # on. `xcodeproj` allows you to state specifically which project to + # link with. + + #-----------------------------------------------------------------------# + + # Specifies the platform for which a static library should be built. + # + # CocoaPods provides a default deployment target if one is not specified. + # The current default values are `4.3` for iOS, `10.6` for OS X, `9.0` for tvOS + # and `2.0` for watchOS. + # + # If the deployment target requires it (iOS < `4.3`), `armv6` + # architecture will be added to `ARCHS`. + # + # @param [Symbol] name + # the name of platform, can be either `:osx` for OS X, `:ios` + # for iOS, `:tvos` for tvOS, or `:watchos` for watchOS. + # + # @param [String, Version] target + # The optional deployment. If not provided a default value + # according to the platform name will be assigned. + # + # @example Specifying the platform + # + # platform :ios, '4.0' + # platform :ios + # + # @return [void] + # + def platform(name, target = nil) + # Support for deprecated options parameter + target = target[:deployment_target] if target.is_a?(Hash) + current_target_definition.set_platform!(name, target) + end + + # Specifies the Xcode project that contains the target that the Pods + # library should be linked with. + # + # ----- + # + # If none of the target definitions specify an explicit project + # and there is only **one** project in the same directory as the Podfile + # then that project will be used. + # + # It is possible also to specify whether the build settings of your + # custom build configurations should be modelled after the release or + # the debug presets. To do so you need to specify a hash where the name + # of each build configuration is associated to either `:release` or + # `:debug`. + # + # @param [String] path + # the path of the project to link with + # + # @param [Hash{String => symbol}] build_configurations + # a hash where the keys are the name of the build + # configurations in your Xcode project and the values are + # Symbols that specify if the configuration should be based on + # the `:debug` or `:release` configuration. If no explicit + # mapping is specified for a configuration in your project, it + # will default to `:release`. + # + # @example Specifying the user project + # + # # This Target can be found in a Xcode project called `FastGPS` + # target 'MyGPSApp' do + # project 'FastGPS' + # ... + # end + # + # # Same Podfile, multiple Xcodeprojects + # target 'MyNotesApp' do + # project 'FastNotes' + # ... + # end + # + # @example Using custom build configurations + # + # project 'TestProject', 'Mac App Store' => :release, 'Test' => :debug + # + # @return [void] + # + def project(path, build_configurations = {}) + current_target_definition.user_project_path = path + current_target_definition.build_configurations = build_configurations + end + + # @!visibility private + # + # @deprecated #{xcodeproj} was renamed to #{project}. + # + # `xcodeproj` is deprecated in [1.0](http://blog.cocoapods.org/CocoaPods-1.0/) and has been renamed to `project`. + # For pre-1.0 versions use `xcodeproj`. + # + def xcodeproj(*args) + CoreUI.warn '`xcodeproj` was renamed to `project`. Please update your Podfile accordingly.' + project(*args) + end + + # @!visibility private + # + # @deprecated linking a single target with multiple Xcode targets is no + # longer supported. Use an {#abstract_target} and target + # inheritance instead. + # + # `link_with` is deprecated in [1.0](http://blog.cocoapods.org/CocoaPods-1.0/) in + # favour of `abstract_target` and target inheritance instead. + # + def link_with(*) + raise Informative, 'The specification of `link_with` in the Podfile ' \ + 'is now unsupported, please use target blocks instead.' + end + + # Inhibits **all** the warnings from the CocoaPods libraries. + # + # ------ + # + # This attribute is inherited by child target definitions. + # + # If you would like to inhibit warnings per Pod you can use the + # following syntax: + # + # pod 'SSZipArchive', :inhibit_warnings => true + # + # Additionally, when you use `inhibit_all_warnings!` attribute, + # you can exclude a particular Pod from being inhibited using the following: + # + # pod 'SSZipArchive', :inhibit_warnings => false + # + def inhibit_all_warnings! + current_target_definition.inhibit_all_warnings = true + end + + # Use modular headers for all CocoaPods static libraries. + # + # ------ + # + # This attribute is inherited by child target definitions. + # + # If you would like to use modular headers per Pod you can use the + # following syntax: + # + # pod 'SSZipArchive', :modular_headers => true + # + # Additionally, when you use the `use_modular_headers!` attribute, + # you can exclude a particular Pod from modular headers using the following: + # + # pod 'SSZipArchive', :modular_headers => false + # + def use_modular_headers! + current_target_definition.use_modular_headers_for_all_pods = true + end + + # Use frameworks instead of static libraries for Pods. When using frameworks, you may also specify the `:linkage` + # style to use, either `:static` or `:dynamic`. + # + # ------ + # + # This attribute is inherited by child target definitions. + # + # @param [Boolean, Hash] option + # The option to use for configuring packaging and linkage style. + # + # @example + # + # target 'MyApp' do + # use_frameworks! + # pod 'AFNetworking', '~> 1.0' + # end + # + # @example + # + # target 'MyApp' do + # use_frameworks! :linkage => :dynamic + # pod 'AFNetworking', '~> 1.0' + # end + # + # target 'ZipApp' do + # use_frameworks! :linkage => :static + # pod 'SSZipArchive' + # end + # + # @return [void] + # + def use_frameworks!(option = true) + current_target_definition.use_frameworks!(option) + end + + # Specifies the Swift version requirements this target definition supports. + # + # **Note** These requirements are inherited from the parent, if specified and if none + # are specified at the root level then all versions are considered to be supported. + # + # @param [String, Version, Array, Array] requirements + # The set of requirements this target supports. + # + # @example + # + # target 'MyApp' do + # supports_swift_versions '>= 3.0', '< 4.0' + # pod 'AFNetworking', '~> 1.0' + # end + # + # @example + # + # supports_swift_versions '>= 3.0', '< 4.0' + # + # target 'MyApp' do + # pod 'AFNetworking', '~> 1.0' + # end + # + # target 'ZipApp' do + # pod 'SSZipArchive' + # end + # + # @return [void] + # + def supports_swift_versions(*requirements) + current_target_definition.store_swift_version_requirements(*requirements) + end + + #-----------------------------------------------------------------------# + + # @!group Workspace + # + # This group list the options to configure workspace and to set global + # settings. + + #-----------------------------------------------------------------------# + + # Specifies the Xcode workspace that should contain all the projects. + # + # ----- + # + # If no explicit Xcode workspace is specified and only **one** project + # exists in the same directory as the Podfile, then the name of that + # project is used as the workspace’s name. + # + # @param [String] path + # path of the workspace. + # + # @example Specifying a workspace + # + # workspace 'MyWorkspace' + # + # @return [void] + # + def workspace(path) + set_hash_value('workspace', path.to_s) + end + + # Specifies that a BridgeSupport metadata document should be generated + # from the headers of all installed Pods. + # + # ----- + # + # This is for scripting languages such as [MacRuby](http://macruby.org), + # [Nu](http://programming.nu/index), and + # [JSCocoa](http://inexdo.com/JSCocoa), which use it to bridge types, + # functions, etc. + # + # @return [void] + # + def generate_bridge_support! + set_hash_value('generate_bridge_support', true) + end + + # Specifies that the -fobjc-arc flag should be added to the + # `OTHER_LD_FLAGS`. + # + # ----- + # + # This is used as a workaround for a compiler bug with non-ARC projects + # (see #142). This was originally done automatically but libtool as of + # Xcode 4.3.2 no longer seems to support the `-fobjc-arc` flag. Therefore + # it now has to be enabled explicitly using this method. + # + # Support for this method might be dropped in CocoaPods `1.0`. + # + # @return [void] + # + def set_arc_compatibility_flag! + set_hash_value('set_arc_compatibility_flag', true) + end + + #-----------------------------------------------------------------------# + + # @!group Sources + # + # The Podfile retrieves specs from a given list of sources (repositories). + # + # Sources are __global__ and they are not stored per target definition. + + #-----------------------------------------------------------------------# + + # Specifies the location of specs + # + # ----- + # + # Use this method to specify sources. The order of the sources is + # relevant. CocoaPods will use the highest version of a Pod of the first + # source which includes the Pod (regardless whether other sources have a + # higher version). + # + # The official CocoaPods source is implicit. + # Once you specify another source, then it will need to be included. + # + # @param [String] source + # The URL of a specs repository. + # + # @example Specifying to first use the Artsy repository and then the CocoaPods Master Repository + # + # source 'https://github.com/artsy/Specs.git' + # source 'https://cdn.cocoapods.org/' + # + # @return [void] + # + def source(source) + hash_sources = get_hash_value('sources') || [] + hash_sources << source + set_hash_value('sources', hash_sources.uniq) + end + + #-----------------------------------------------------------------------# + + # @!group Hooks + # The Podfile provides hooks that will be called during the + # installation process. + # + # Hooks are __global__ and not stored per target definition. + + #-----------------------------------------------------------------------# + + # Specifies the plugins that should be used during installation. + # + # ----- + # + # Use this method to specify a plugin that should be used during + # installation, along with the options that should be passed to the plugin + # when it is invoked. + # + # @param [String] name + # The name of the plugin. + # + # @param [Hash] options + # The optional options that should be passed to the plugin when + # its hooks are invoked. + # + # @example Specifying to use the `slather` and `cocoapods-keys` plugins. + # + # plugin 'cocoapods-keys', :keyring => 'Eidolon' + # plugin 'slather' + # + # @return [void] + # + def plugin(name, options = {}) + hash_plugins = get_hash_value('plugins') || {} + (hash_plugins[name] ||= {}).merge!(options.deep_stringify_keys) + set_hash_value('plugins', hash_plugins) + end + + # This hook allows you to make any changes to the Pods after they have + # been downloaded but before they are installed. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Defining a pre-install hook in a Podfile. + # + # pre_install do |installer| + # # Do something fancy! + # end + # + # + def pre_install(&block) + @pre_install_callback = block + end + + # This hook allows you to make changes before the project is written + # to disk. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Customizing the dependencies before integration + # + # pre_integrate do |installer| + # # perform some changes on dependencies + # end + # + # @return [void] + # + def pre_integrate(&block) + raise Informative, 'Specifying multiple `pre_integrate` hooks is unsupported.' if @pre_integrate_callback + @pre_integrate_callback = block + end + + # This hook allows you to make any last changes to the generated Xcode + # project before it is written to disk, or any other tasks you might want + # to perform. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Customizing the build settings of all targets + # + # post_install do |installer| + # installer.pods_project.targets.each do |target| + # target.build_configurations.each do |config| + # config.build_settings['GCC_ENABLE_OBJC_GC'] = 'supported' + # end + # end + # end + # + # @return [void] + # + def post_install(&block) + raise Informative, 'Specifying multiple `post_install` hooks is unsupported.' if @post_install_callback + @post_install_callback = block + end + + # This hook allows you to make changes after the project is written + # to disk. + # + # It receives the [Pod::Installer] as its only argument. + # + # @example Customizing the build settings of all targets + # + # post_integrate do |installer| + # # some change after project write to disk + # end + # + # @return [void] + # + def post_integrate(&block) + raise Informative, 'Specifying multiple `post_integrate` hooks is unsupported.' if @post_integrate_callback + @post_integrate_callback = block + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile/target_definition.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile/target_definition.rb new file mode 100644 index 0000000..26dcc5d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/podfile/target_definition.rb @@ -0,0 +1,1181 @@ +module Pod + class Podfile + # The TargetDefinition stores the information of a CocoaPods static + # library. The target definition can be linked with one or more targets of + # the user project. + # + # Target definitions can be nested and by default inherit the dependencies + # of the parent. + # + class TargetDefinition + # @return [TargetDefinition, Podfile] the parent target definition or the + # Podfile if the receiver is root. + # + attr_reader :parent + + # @param [String, Symbol] + # name @see name + # + # @param [TargetDefinition] parent + # @see parent + # + def initialize(name, parent, internal_hash = nil) + @internal_hash = internal_hash || {} + @parent = parent + @children = [] + @label = nil + self.name ||= name + if parent.is_a?(TargetDefinition) + parent.children << self + end + end + + # @return [Array] the children target definitions. + # + attr_reader :children + + # @return [Array] the targets definition descending + # from this one. + # + def recursive_children + (children + children.map(&:recursive_children)).flatten + end + + # @return [Boolean] Whether the target definition is root. + # + def root? + parent.is_a?(Podfile) || parent.nil? + end + + # @return [TargetDefinition] The root target definition. + # + def root + if root? + self + else + parent.root + end + end + + # @return [Podfile] The podfile that contains the specification for this + # target definition. + # + def podfile + root.parent + end + + # @return [Array] The list of the dependencies of the target + # definition including the inherited ones. + # + def dependencies + if exclusive? + non_inherited_dependencies + else + non_inherited_dependencies + parent.dependencies + end + end + + # @return [Array] the targets from which this target + # definition should inherit only search paths. + # + def targets_to_inherit_search_paths + can_inherit = !root? && matches_platform?(parent) + if inheritance == 'search_paths' # && can_inherit + parent.targets_to_inherit_search_paths << parent + elsif can_inherit + parent.targets_to_inherit_search_paths + else + [] + end + end + + # @return [Array] The list of the dependencies of the target definition, + # excluding inherited ones. + # + def non_inherited_dependencies + pod_dependencies.concat(podspec_dependencies) + end + + # @return [Boolean] Whether the target definition has at least one + # dependency, excluding inherited ones. + # + def empty? + non_inherited_dependencies.empty? + end + + # @return [String] The label of the target definition according to its + # name. + # + def label + @label ||= if root? && name == 'Pods' + 'Pods' + elsif exclusive? || parent.nil? + "Pods-#{name}" + else + "#{parent.label}-#{name}" + end + end + + alias_method :to_s, :label + + # @return [String] A string representation suitable for debug. + # + def inspect + "#<#{self.class} label=#{label}>" + end + + #-----------------------------------------------------------------------# + + public + + # @!group Attributes + + # @return [String] the path of the project this target definition should + # link with. + # + def name + get_hash_value('name') + end + + # Sets the path of the user project this target definition should link + # with. + # + # @param [String] name + # The path of the project. + # + # @return [void] + # + def name=(name) + @label = nil + set_hash_value('name', name) + end + + #--------------------------------------# + + # @return [Boolean] whether this target definition is abstract. + # + def abstract? + get_hash_value('abstract', root?) + end + + # Sets whether this target definition is abstract. + # + # @param [Boolean] abstract + # whether this target definition is abstract. + # + # @return [void] + # + def abstract=(abstract) + set_hash_value('abstract', abstract) + end + + #--------------------------------------# + + # @return [String] the inheritance mode for this target definition. + # + def inheritance + get_hash_value('inheritance', 'complete') + end + + # Sets the inheritance mode for this target definition. + # + # @param [#to_s] inheritance + # the inheritance mode for this target definition. + # + # @raise [Informative] if this target definition is a root target + # definition or if the `inheritance` value is unknown. + # + # @return [void] + # + def inheritance=(inheritance) + inheritance = inheritance.to_s + unless %w(none search_paths complete).include?(inheritance) + raise Informative, "Unrecognized inheritance option `#{inheritance}` specified for target `#{name}`." + end + if root? + raise Informative, 'Cannot set inheritance for the root target definition.' + end + if abstract? + raise Informative, 'Cannot set inheritance for abstract target definition.' + end + set_hash_value('inheritance', inheritance) + end + + #--------------------------------------# + + # Returns whether the target definition should inherit the dependencies + # of the parent. + # + # @note A target is always `exclusive` if it is root. + # + # @note A target is always `exclusive` if the `platform` does + # not match the parent's `platform`. + # + # @return [Boolean] whether is exclusive. + # + def exclusive? + if root? + true + else + !matches_platform?(parent) || (inheritance != 'complete') + end + end + + # @param [TargetDefinition, Nil] target_definition + # the target definition to check for platform compatibility. + # + # @return [Boolean] + # whether this target definition matches the platform of + # `target_definition`. + # + def matches_platform?(target_definition) + return false unless target_definition + return true if target_definition.platform == platform + !target_definition.platform && target_definition.abstract? + end + + #--------------------------------------# + + # @return [String] the path of the project this target definition should + # link with. + # + def user_project_path + path = get_hash_value('user_project_path') + if path + Pathname(path).sub_ext('.xcodeproj').to_path + else + parent.user_project_path unless root? + end + end + + # Sets the path of the user project this target definition should link + # with. + # + # @param [String] path + # The path of the project. + # + # @return [void] + # + def user_project_path=(path) + set_hash_value('user_project_path', path) + end + + #--------------------------------------# + + # @return [Hash{String => symbol}] A hash where the keys are the name of + # the build configurations and the values a symbol that + # represents their type (`:debug` or `:release`). + # + def build_configurations + if root? + get_hash_value('build_configurations') + else + get_hash_value('build_configurations') || parent.build_configurations + end + end + + # Sets the build configurations for this target. + # + # @return [Hash{String => Symbol}] hash + # A hash where the keys are the name of the build configurations + # and the values the type. + # + # @return [void] + # + def build_configurations=(hash) + set_hash_value('build_configurations', hash) unless hash.empty? + end + + #--------------------------------------# + + # @return [Array] The list of the script phases of the target definition. + # + def script_phases + get_hash_value('script_phases') || [] + end + + #--------------------------------------# + + # @return [String] The project name to use for the given pod name or `nil` if none specified. + # + # @note When querying for a subspec then use the root pod spec name instead as this is what's stored. + # + def project_name_for_pod(pod_name) + if root? + raw_project_names_hash[pod_name] + else + raw_project_names_hash[pod_name] || parent.project_name_for_pod(pod_name) + end + end + + #--------------------------------------# + # + # @return [Boolean] whether the target definition should inhibit warnings + # for a single pod. If inhibit_all_warnings is true, it will + # return true for any asked pod. + # + def inhibits_warnings_for_pod?(pod_name) + if Array(inhibit_warnings_hash['not_for_pods']).include?(pod_name) + false + elsif inhibit_warnings_hash['all'] + true + elsif !root? && parent.inhibits_warnings_for_pod?(pod_name) + true + else + Array(inhibit_warnings_hash['for_pods']).include? pod_name + end + end + + # Sets whether the target definition should inhibit the warnings during + # compilation for all pods. + # + # @param [Boolean] flag + # Whether the warnings should be suppressed. + # + # @return [void] + # + def inhibit_all_warnings=(flag) + raw_inhibit_warnings_hash['all'] = flag + end + + # Inhibits warnings for a specific pod during compilation. + # + # @param [String] pod_name + # Name of the pod for which the warnings will be inhibited or not. + # + # @param [Boolean] should_inhibit + # Whether the warnings should be inhibited or not for given pod. + # + # @return [void] + # + def set_inhibit_warnings_for_pod(pod_name, should_inhibit) + hash_key = case should_inhibit + when true + 'for_pods' + when false + 'not_for_pods' + when nil + return + else + raise ArgumentError, "Got `#{should_inhibit.inspect}`, should be a boolean" + end + raw_inhibit_warnings_hash[hash_key] ||= [] + raw_inhibit_warnings_hash[hash_key] << pod_name + end + + #--------------------------------------# + + # The (desired) build type for the pods integrated in this target definition. Defaults to static libraries and can + # only be overridden through Pod::Podfile::DSL#use_frameworks!. + # + # @return [BuildType] + # + def build_type + value = get_hash_value('uses_frameworks', root? ? BuildType.static_library : parent.build_type) + case value + when true, false + value ? BuildType.dynamic_framework : BuildType.static_library + when Hash + BuildType.new(:linkage => value.fetch(:linkage), :packaging => value.fetch(:packaging)) + when BuildType + value + else + raise ArgumentError, "Got `#{value.inspect}`, should be a boolean, hash or BuildType." + end + end + + # Sets whether the target definition's pods should be built as frameworks. + # + # @param [Boolean, Hash] option + # Whether pods that are integrated in this target should be built as frameworks. If the option is a + # boolean then the value affects both packaging and linkage styles. If set to true, then dynamic frameworks + # are used and if it's set to false, then static libraries are used. If the option is a hash then + # `:framework` packaging is implied and the user configures the `:linkage` style to use. + # + # @return [void] + # + def use_frameworks!(option = true) + value = case option + when true, false + option ? BuildType.dynamic_framework : BuildType.static_library + when Hash + BuildType.new(:linkage => option.fetch(:linkage), :packaging => :framework) + else + raise ArgumentError, "Got `#{option.inspect}`, should be a boolean or hash." + end + set_hash_value('uses_frameworks', value.to_hash) + end + + # @return [Boolean] whether the target definition pods should be built as frameworks. + # + def uses_frameworks? + if internal_hash['uses_frameworks'].nil? + root? ? false : parent.uses_frameworks? + else + build_type.framework? + end + end + + #--------------------------------------# + + # Sets the Swift version that the target definition should use. + # + # @param [String] version + # The Swift version that the target definition should use. + # + # @return [void] + # + def swift_version=(version) + set_hash_value('swift_version', version) + end + + # @return [String] the Swift version that the target definition should + # use. + # + def swift_version + get_hash_value('swift_version') + end + + # @return [Array] the Swift version requirements this target definition enforces. + # + def swift_version_requirements + get_hash_value('swift_version_requirements') + end + + # Queries the target if a version of Swift is supported or not. + # + # @param [Version] swift_version + # The Swift version to query against. + # + # @return [Boolean] Whether the target accepts the specified Swift version. + # + def supports_swift_version?(swift_version) + if swift_version_requirements.nil? + root? || parent.supports_swift_version?(swift_version) + else + Requirement.create(swift_version_requirements).satisfied_by?(swift_version) + end + end + + #--------------------------------------# + + # Whether a specific pod should be linked to the target when building for + # a specific configuration. If a pod has not been explicitly whitelisted + # for any configuration, it is implicitly whitelisted. + # + # @param [String] pod_name + # The pod that we're querying about inclusion for in the given + # configuration. + # + # @param [String] configuration_name + # The configuration that we're querying about inclusion of the + # pod in. + # + # @note Build configurations are case compared case-insensitively in + # CocoaPods. + # + # @return [Boolean] flag + # Whether the pod should be linked with the target + # + def pod_whitelisted_for_configuration?(pod_name, configuration_name) + found = false + configuration_pod_whitelist.each do |configuration, pods| + if pods.include?(pod_name) + found = true + if configuration.downcase == configuration_name.to_s.downcase + return true + end + end + end + !found && (root? || (inheritance != 'none' && parent.pod_whitelisted_for_configuration?(pod_name, configuration_name))) + end + + # Whitelists a pod for a specific configuration. If a pod is whitelisted + # for any configuration, it will only be linked with the target in the + # configuration(s) specified. If it is not whitelisted for any + # configuration, it is implicitly included in all configurations. + # + # @param [String] pod_name + # The pod that should be included in the given configuration. + # + # @param [String, Symbol] configuration_name + # The configuration that the pod should be included in + # + # @note Build configurations are stored as a String. + # + # @return [void] + # + def whitelist_pod_for_configuration(pod_name, configuration_name) + configuration_name = configuration_name.to_s + list = raw_configuration_pod_whitelist[configuration_name] ||= [] + list << pod_name + end + + # @return [Array] unique list of all configurations for which + # pods have been whitelisted. + # + def all_whitelisted_configurations + parent_configurations = (root? || inheritance == 'none') ? [] : parent.all_whitelisted_configurations + (configuration_pod_whitelist.keys + parent_configurations).uniq + end + + #--------------------------------------# + + def raw_use_modular_headers_hash + get_hash_value('use_modular_headers', {}) + end + private :raw_use_modular_headers_hash + + # Returns the use_modular_headers hash pre-populated with default values. + # + # @return [Hash] Hash with :all key for building all + # pods as modules, :for_pods key for building as module per Pod, + # and :not_for_pods key for not biulding as module per Pod. + # + def use_modular_headers_hash + raw_hash = raw_use_modular_headers_hash + if exclusive? + raw_hash + else + parent_hash = parent.send(:use_modular_headers_hash).dup + if parent_hash['not_for_pods'] + # Remove pods that are set to not use modular headers inside parent + # if they are set to use modular headers inside current target. + parent_hash['not_for_pods'] -= Array(raw_hash['for_pods']) + end + if parent_hash['for_pods'] + # Remove pods that are set to use modular headers inside parent if they are set to not use modular headers inside current target. + parent_hash['for_pods'] -= Array(raw_hash['for_pods']) + end + if raw_hash['all'] + # Clean pods that are set to not use modular headers inside parent if use_modular_headers! was set. + parent_hash['not_for_pods'] = nil + end + parent_hash.merge(raw_hash) do |_, l, r| + Array(l).concat(r).uniq + end + end + end + + # @return [Boolean] whether the target definition should use modular headers + # for a single pod. If use_modular_headers! is true, it will + # return true for any asked pod. + # + def build_pod_as_module?(pod_name) + if Array(use_modular_headers_hash['not_for_pods']).include?(pod_name) + false + elsif use_modular_headers_hash['all'] + true + elsif !root? && parent.build_pod_as_module?(pod_name) + true + else + Array(use_modular_headers_hash['for_pods']).include? pod_name + end + end + + # Sets whether the target definition should use modular headers for all pods. + # + # @param [Boolean] flag + # Whether the warnings should be suppressed. + # + # @return [void] + # + def use_modular_headers_for_all_pods=(flag) + raw_use_modular_headers_hash['all'] = flag + end + + # Use modular headers for a specific pod during compilation. + # + # @param [String] pod_name + # Name of the pod for which modular headers will be used. + # + # @param [Boolean] flag + # Whether modular headers should be used. + # + # @return [void] + # + def set_use_modular_headers_for_pod(pod_name, flag) + hash_key = case flag + when true + 'for_pods' + when false + 'not_for_pods' + when nil + return + else + raise ArgumentError, "Got `#{flag.inspect}`, should be a boolean" + end + raw_use_modular_headers_hash[hash_key] ||= [] + raw_use_modular_headers_hash[hash_key] << pod_name + end + + #--------------------------------------# + + PLATFORM_DEFAULTS = { :ios => '4.3', :osx => '10.6', :tvos => '9.0', :watchos => '2.0' }.freeze + + # @return [Platform] the platform of the target definition. + # + # @note If no deployment target has been specified a default value is + # provided. + # + def platform + name_or_hash = get_hash_value('platform') + if name_or_hash + if name_or_hash.is_a?(Hash) + name = name_or_hash.keys.first.to_sym + target = name_or_hash.values.first + else + name = name_or_hash.to_sym + end + target ||= PLATFORM_DEFAULTS[name] + Platform.new(name, target) + else + parent.platform unless root? + end + end + + # Sets the platform of the target definition. + # + # @param [Symbol] name + # The name of the platform. + # + # @param [String] target + # The deployment target of the platform. + # + # @raise When the name of the platform is unsupported. + # + # @return [void] + # + def set_platform(name, target = nil) + name = :osx if name == :macos + unless [:ios, :osx, :tvos, :watchos].include?(name) + raise StandardError, "Unsupported platform `#{name}`. Platform " \ + 'must be `:ios`, `:osx`, `:macos`, `:tvos`, or `:watchos`.' + end + + if target + value = { name.to_s => target } + else + value = name.to_s + end + set_hash_value('platform', value) + end + + # Sets the platform of the target definition. + # + # @see #set_platform + # + # @raise When the target definition already has a platform set. + # + # @return [void] + # + def set_platform!(name, target = nil) + raise StandardError, "The target `#{label}` already has a platform set." if get_hash_value('platform') + set_platform(name, target) + end + + # Stores the Swift version requirements to be used for this target. + # + # @param [String, Version, Array, Array] requirements + # The set of requirements this target supports. + # + # @return [void] + # + def store_swift_version_requirements(*requirements) + set_hash_value('swift_version_requirements', requirements.flatten.map(&:to_s)) + end + + #--------------------------------------# + + # Stores the dependency for a Pod with the given name. + # + # @param [String] name + # The name of the Pod + # + # @param [Array] requirements + # The requirements and the options of the dependency. + # + # @note The dependencies are stored as an array. To simplify the YAML + # representation if they have requirements they are represented + # as a Hash, otherwise only the String of the name is added to + # the array. + # + # @todo This needs urgently a rename. + # + # @return [void] + # + def store_pod(name, *requirements) + return if parse_subspecs(name, requirements) # This parse method must be called first + parse_inhibit_warnings(name, requirements) + parse_modular_headers(name, requirements) + parse_configuration_whitelist(name, requirements) + parse_project_name(name, requirements) + + if requirements && !requirements.empty? + pod = { name => requirements } + else + pod = name + end + + get_hash_value('dependencies', []) << pod + nil + end + + #--------------------------------------# + + # Stores the podspec whose dependencies should be included by the + # target. + # + # @param [Hash] options + # The options used to find the podspec (either by name or by + # path). If nil the podspec is auto-detected (i.e. the first one + # in the folder of the Podfile) + # + # @note The storage of this information is optimized for YAML + # readability. + # + # @todo This urgently needs a rename. + # + # @return [void] + # + def store_podspec(options = nil) + options ||= {} + unless options.keys.all? { |key| [:name, :path, :subspecs, :subspec].include?(key) } + raise StandardError, 'Unrecognized options for the podspec ' \ + "method `#{options}`" + end + if subspec_name = options[:subspec] + unless subspec_name.is_a?(String) + raise StandardError, "Option `:subspec => #{subspec_name.inspect}` should be a String" + end + end + if subspec_names = options[:subspecs] + if !subspec_names.is_a?(Array) || !subspec_names.all? { |name| name.is_a? String } + raise StandardError, "Option `:subspecs => #{subspec_names.inspect}` " \ + 'should be an Array of Strings' + end + end + options[:autodetect] = true if !options.include?(:name) && !options.include?(:path) + get_hash_value('podspecs', []) << options + end + + #--------------------------------------# + + # Stores the script phase to add for this target definition. + # + # @param [Hash] options + # The options to use for this script phase. The required keys + # are: `:name`, `:script`, while the optional keys are: + # `:shell_path`, `:input_files`, `:output_files`, `:show_env_vars_in_log`, `:execution_position` and + # `:dependency_file`. + # + # @return [void] + # + def store_script_phase(options) + option_keys = options.keys + unrecognized_keys = option_keys - Specification::ALL_SCRIPT_PHASE_KEYS + unless unrecognized_keys.empty? + raise StandardError, "Unrecognized options `#{unrecognized_keys}` in shell script `#{options[:name]}` within `#{name}` target. " \ + "Available options are `#{Specification::ALL_SCRIPT_PHASE_KEYS}`." + end + missing_required_keys = Specification::SCRIPT_PHASE_REQUIRED_KEYS - option_keys + unless missing_required_keys.empty? + raise StandardError, "Missing required shell script phase options `#{missing_required_keys.join(', ')}`" + end + script_phases_hash = get_hash_value('script_phases', []) + if script_phases_hash.map { |script_phase_options| script_phase_options[:name] }.include?(options[:name]) + raise StandardError, "Script phase with name `#{options[:name]}` name already present for target `#{name}`." + end + options[:execution_position] = :any unless options.key?(:execution_position) + unless Specification::EXECUTION_POSITION_KEYS.include?(options[:execution_position]) + raise StandardError, "Invalid execution position value `#{options[:execution_position]}` in shell script `#{options[:name]}` within `#{name}` target. " \ + "Available options are `#{Specification::EXECUTION_POSITION_KEYS}`." + end + script_phases_hash << options + end + + #-----------------------------------------------------------------------# + + public + + # @!group Representations + + # @return [Array] The keys used by the hash representation of the + # target definition. + # + HASH_KEYS = %w( + name + platform + podspecs + exclusive + link_with + link_with_first_target + inhibit_warnings + use_modular_headers + user_project_path + build_configurations + project_names + dependencies + script_phases + children + configuration_pod_whitelist + uses_frameworks + swift_version_requirements + inheritance + abstract + swift_version + ).freeze + + # @return [Hash] The hash representation of the target definition. + # + def to_hash + hash = internal_hash.dup + unless children.empty? + hash['children'] = children.map(&:to_hash) + end + hash + end + + # Configures a new target definition from the given hash. + # + # @param [Hash] the hash which contains the information of the + # Podfile. + # + # @return [TargetDefinition] the new target definition + # + def self.from_hash(hash, parent) + internal_hash = hash.dup + children_hashes = internal_hash.delete('children') || [] + definition = TargetDefinition.new(nil, parent, internal_hash) + children_hashes.each do |child_hash| + TargetDefinition.from_hash(child_hash, definition) + end + definition + end + + #-----------------------------------------------------------------------# + + protected + + # @!group Private helpers + + # @return [Array] + # + attr_writer :children + + # @return [Hash] The hash which store the attributes of the target + # definition. + # + attr_accessor :internal_hash + + private + + # Set a value in the internal hash of the target definition for the given + # key. + # + # @param [String] key + # The key for which to store the value. + # + # @param [Object] value + # The value to store. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [void] + # + def set_hash_value(key, value) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash[key] = value + end + + # Returns the value for the given key in the internal hash of the target + # definition. + # + # @param [String] key + # The key for which the value is needed. + # + # @param [Object] base_value + # The value to set if they key is nil. Useful for collections. + # + # @raise [StandardError] If the key is not recognized. + # + # @return [Object] The value for the key. + # + def get_hash_value(key, base_value = nil) + unless HASH_KEYS.include?(key) + raise StandardError, "Unsupported hash key `#{key}`" + end + internal_hash[key] = base_value if internal_hash[key].nil? + internal_hash[key] + end + + def raw_inhibit_warnings_hash + get_hash_value('inhibit_warnings', {}) + end + private :raw_inhibit_warnings_hash + + # Returns the inhibit_warnings hash pre-populated with default values. + # + # @return [Hash] Hash with :all key for inhibiting all + # warnings, :for_pods key for inhibiting warnings per Pod, + # and :not_for_pods key for not inhibiting warnings per Pod. + # + def inhibit_warnings_hash + inhibit_hash = raw_inhibit_warnings_hash + if exclusive? + inhibit_hash + else + parent_hash = parent.send(:inhibit_warnings_hash).dup + if parent_hash['not_for_pods'] + # Remove pods that are set to not inhibit inside parent if they are set to inhibit inside current target. + parent_hash['not_for_pods'] -= Array(inhibit_hash['for_pods']) + end + if parent_hash['for_pods'] + # Remove pods that are set to inhibit inside parent if they are set to not inhibit inside current target. + parent_hash['for_pods'] -= Array(inhibit_hash['for_pods']) + end + if inhibit_hash['all'] + # Clean pods that are set to not inhibit inside parent if inhibit_all_warnings! was set. + parent_hash['not_for_pods'] = nil + inhibit_hash.delete('all') if parent_hash['all'] + end + parent_hash.merge(inhibit_hash) do |_, l, r| + Array(l).concat(r).uniq + end + end + end + + def raw_configuration_pod_whitelist + get_hash_value('configuration_pod_whitelist', {}) + end + private :raw_configuration_pod_whitelist + + # Returns the configuration_pod_whitelist hash + # + # @return [Hash] Hash with configuration name as key, + # array of pod names to be linked in builds with that configuration + # as value. + # + def configuration_pod_whitelist + whitelist_hash = raw_configuration_pod_whitelist + if exclusive? + whitelist_hash + else + parent.send(:configuration_pod_whitelist).merge(whitelist_hash) { |_, l, r| Array(l).concat(r).uniq } + end + end + + # @return [Array] The dependencies specified by the user for + # this target definition. + # + def pod_dependencies + pods = get_hash_value('dependencies') || [] + pods.map do |name_or_hash| + if name_or_hash.is_a?(Hash) + name = name_or_hash.keys.first + requirements = name_or_hash.values.first + Dependency.new(name, *requirements) + else + Dependency.new(name_or_hash) + end + end + end + + # @return [Array] The dependencies inherited by the podspecs. + # + # @note The podspec directive is intended to include the dependencies of + # a spec in the project where it is developed. For this reason the + # spec, or any of it subspecs, cannot be included in the + # dependencies. Otherwise it would generate a chicken-and-egg + # problem. + # + def podspec_dependencies + podspecs = get_hash_value('podspecs') || [] + podspecs.map do |options| + file = podspec_path_from_options(options) + spec = Specification.from_file(file) + subspec_names = options[:subspecs] || options[:subspec] + specs = if subspec_names.blank? + [spec] + else + subspec_names = [subspec_names] if subspec_names.is_a?(String) + subspec_names.map { |subspec_name| spec.subspec_by_name("#{spec.name}/#{subspec_name}") } + end + specs.map do |subspec| + all_specs = [subspec, *subspec.recursive_subspecs] + all_deps = all_specs.map { |s| s.dependencies(platform) }.flatten + all_deps.reject { |dep| dep.root_name == subspec.root.name } + end.flatten + end.flatten.uniq + end + + # The path of the podspec with the given options. + # + # @param [Hash] options + # The options to use for finding the podspec. The supported keys + # are: `:name`, `:path`, `:autodetect`. + # + # @return [Pathname] The path. + # + def podspec_path_from_options(options) + if path = options[:path] + if File.basename(path).include?('.podspec') + path_with_ext = path + else + path_with_ext = "#{path}.podspec" + end + path_without_tilde = path_with_ext.gsub('~', ENV['HOME']) + podfile.defined_in_file.dirname + path_without_tilde + elsif name = options[:name] + name = File.basename(name).include?('.podspec') ? name : "#{name}.podspec" + podfile.defined_in_file.dirname + name + elsif options[:autodetect] + glob_pattern = podfile.defined_in_file.dirname + '*.podspec{,.json}' + path = Pathname.glob(glob_pattern).first + unless path + raise Informative, 'Could not locate a podspec in the current directory. '\ + 'You can specify the path via the path option.' + end + + path + end + end + + # Removes :inhibit_warnings from the requirements list, and adds + # the pod's name into internal hash for disabling warnings. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :inhibit_warnings is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_inhibit_warnings(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + should_inhibit = options.delete(:inhibit_warnings) + pod_name = Specification.root_name(name) + set_inhibit_warnings_for_pod(pod_name, should_inhibit) + + requirements.pop if options.empty? + end + + # Removes :modular_headers from the requirements list, and adds + # the pods name into internal hash for modular headers. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :modular_headers is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_modular_headers(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + defines_module = options.delete(:modular_headers) + pod_name = Specification.root_name(name) + set_use_modular_headers_for_pod(pod_name, defines_module) + + requirements.pop if options.empty? + end + + # Removes :project_name from the requirements list, and adds + # the pods name into internal hash. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :project_name is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_project_name(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + project_name = options.delete(:project_name) + pod_name = Specification.root_name(name) + raw_project_names_hash[pod_name] = project_name if project_name + + requirements.pop if options.empty? + end + + def raw_project_names_hash + get_hash_value('project_names', {}) + end + private :raw_project_names_hash + + # Removes :configurations or :configuration from the requirements list, + # and adds the pod's name into the internal hash for which pods should be + # linked in which configuration only. + # + # @param [String] name The name of the pod + # + # @param [Array] requirements + # If :configurations is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [void] + # + def parse_configuration_whitelist(name, requirements) + options = requirements.last + return requirements unless options.is_a?(Hash) + + configurations = options.delete(:configurations) + configurations ||= options.delete(:configuration) + Array(configurations).each do |configuration| + whitelist_pod_for_configuration(name, configuration) + end + requirements.pop if options.empty? + end + + # Removes :subspecs and :testspecs from the requirements list, and stores the pods + # with the given subspecs or test specs as dependencies. + # + # @param [String] name + # + # @param [Array] requirements + # If :subspecs is the only key in the hash, the hash + # should be destroyed because it confuses Gem::Dependency. + # + # @return [Boolean] Whether new subspecs were added + # + def parse_subspecs(name, requirements) + options = requirements.last + return false unless options.is_a?(Hash) + + subspecs = options.delete(:subspecs) + test_specs = options.delete(:testspecs) + app_specs = options.delete(:appspecs) + + subspecs.each do |ss| + store_pod("#{name}/#{ss}", *requirements.dup) + end if subspecs + + test_specs.each do |ss| + requirements_copy = requirements.map(&:dup) + store_pod("#{name}/#{ss}", *requirements_copy) + end if test_specs + + app_specs.each do |as| + requirements_copy = requirements.map(&:dup) + store_pod("#{name}/#{as}", *requirements_copy) + end if app_specs + + requirements.pop if options.empty? + !subspecs.nil? + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/requirement.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/requirement.rb new file mode 100644 index 0000000..23fbe45 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/requirement.rb @@ -0,0 +1,104 @@ +module Pod + # A Requirement is a set of one or more version restrictions of a + # {Dependency}. + # + # It is based on the RubyGems class adapted to support CocoaPods specific + # information. + # + # @todo Move support about external sources and head information here from + # the Dependency class. + # + class Requirement < Pod::Vendor::Gem::Requirement + quoted_operators = OPS.keys.map { |k| Regexp.quote k }.join '|' + + # @return [Regexp] The regular expression used to validate input strings. + # + PATTERN = /\A\s*(#{quoted_operators})?\s*(#{Version::VERSION_PATTERN})\s*\z/ + + DefaultRequirement = ['>=', Version.new(0)] # rubocop:disable Naming/ConstantName + + #-------------------------------------------------------------------------# + + # Factory method to create a new requirement. + # + # @param [Requirement, Version, Array, String, Nil] input + # The input used to create the requirement. + # + # @return [Requirement] A new requirement. + # + def self.create(input) + case input + when Requirement + input + when Version, Array + new(input) + else + if input.respond_to? :to_str + new([input.to_str]) + else + default + end + end + end + + # @return [Requirement] The default requirement. + # + def self.default + new('>= 0') + end + + # Parses the given object returning a tuple where the first entry is an + # operator and the second a version. If not operator is provided it + # defaults to `=`. + # + # @param [String, Version] input + # The input passed to create the requirement. + # + # @return [Array] A tuple representing the requirement. + # + def self.parse(input) + return ['=', input] if input.is_a?(Version) + + unless PATTERN =~ input.to_s + raise ArgumentError, "Illformed requirement `#{input.inspect}`" + end + + operator = Regexp.last_match[1] || '=' + version = Version.new(Regexp.last_match[2]) + [operator, version] + end + + # Constructs a requirement from `requirements`. + # + # @param [String, Version, Array, Array] requirements + # The set of requirements + # + # @note Duplicate requirements are ignored. + # + # @note An empty set of `requirements` is the same as `">= 0"` + # + def initialize(*requirements) + requirements = requirements.flatten + requirements.compact! + requirements.uniq! + + @requirements = if requirements.empty? + [DefaultRequirement] + else + requirements.map! { |r| self.class.parse r } + end + end + + # + # @return [Boolean] true if this pod has no requirements. + # + def none? + if @requirements.size == 1 + @requirements[0] == DefaultRequirement + else + false + end + end + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source.rb new file mode 100644 index 0000000..294da72 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source.rb @@ -0,0 +1,476 @@ +require 'cocoapods-core/source/acceptor' +require 'cocoapods-core/source/aggregate' +require 'cocoapods-core/source/health_reporter' +require 'cocoapods-core/source/manager' +require 'cocoapods-core/source/metadata' + +module Pod + # The Source class is responsible to manage a collection of podspecs. + # + # The backing store of the podspecs collection is an implementation detail + # abstracted from the rest of CocoaPods. + # + # The default implementation uses a git repo as a backing store, where the + # podspecs are namespaced as: + # + # "#{SPEC_NAME}/#{VERSION}/#{SPEC_NAME}.podspec" + # + class Source + # The default branch in which the specs are stored + DEFAULT_SPECS_BRANCH = 'master'.freeze + + # @return [Pod::Source::Metadata] The metadata for this source. + # + attr_reader :metadata + + # @param [Pathname, String] repo @see #repo. + # + def initialize(repo) + @repo = Pathname(repo).expand_path + @versions_by_name = {} + refresh_metadata + end + + # @return [String] The name of the source. + # + def name + repo.basename.to_s + end + + # @return [String] The URL of the source. + # + # @note In the past we had used `git ls-remote --get-url`, but this could + # lead to an issue when finding a source based on its URL when `git` + # is configured to rewrite URLs with the `url..insteadOf` + # option. See https://github.com/CocoaPods/CocoaPods/issues/2724. + # + def url + @url ||= begin + remote = repo_git(%w(config --get remote.origin.url)) + if !remote.empty? + remote + elsif (repo + '.git').exist? + "file://#{repo}/.git" + end + end + end + + # @return [String] The type of the source. + # + def type + git? ? 'git' : 'file system' + end + + alias_method :to_s, :name + + # @return [Integer] compares a source with another one for sorting + # purposes. + # + # @note Source are compared by the alphabetical order of their name, and + # this convention should be used in any case where sources need to + # be disambiguated. + # + def <=>(other) + name <=> other.name + end + + # @return [String] A description suitable for debugging. + # + def inspect + "#<#{self.class} name:#{name} type:#{type}>" + end + + # @!group Paths + #-------------------------------------------------------------------------# + + # @return [Pathname] The path where the source is stored. + # + attr_reader :repo + + # @return [Pathname] The directory where the specs are stored. + # + # @note In previous versions of CocoaPods they used to be stored in + # the root of the repo. This lead to issues, especially with + # the GitHub interface and now they are stored in a dedicated + # folder. + # + def specs_dir + @specs_dir ||= begin + specs_sub_dir = repo + 'Specs' + if specs_sub_dir.exist? + specs_sub_dir + elsif repo.exist? + repo + end + end + end + + # @param [String] name The name of the pod. + # + # @return [Pathname] The path at which the specs for the given pod are + # stored. + # + def pod_path(name) + specs_dir.join(*metadata.path_fragment(name)) + end + + # @return [Pathname] The path at which source metadata is stored. + # + def metadata_path + repo + 'CocoaPods-version.yml' + end + + public + + # @!group Querying the source + #-------------------------------------------------------------------------# + + # @return [Array] the list of the name of all the Pods. + # + # + def pods + unless specs_dir + raise Informative, "Unable to find a source named: `#{name}`" + end + glob = specs_dir.join('*/' * metadata.prefix_lengths.size, '*') + Pathname.glob(glob).reduce([]) do |pods, entry| + pods << entry.basename.to_s if entry.directory? + pods + end.sort + end + + # Returns pod names for given array of specification paths. + # + # @param [Array] spec_paths + # Array of file path names for specifications. Path strings should be relative to the source path. + # + # @return [Array] the list of the name of Pods corresponding to specification paths. + # + def pods_for_specification_paths(spec_paths) + spec_paths.map do |path| + absolute_path = repo + path + relative_path = absolute_path.relative_path_from(specs_dir) + # The first file name returned by 'each_filename' is the pod name + relative_path.each_filename.first + end + end + + # @return [Array] all the available versions for the Pod, sorted + # from highest to lowest. + # + # @param [String] name + # the name of the Pod. + # + def versions(name) + return nil unless specs_dir + raise ArgumentError, 'No name' unless name + pod_dir = pod_path(name) + return unless pod_dir.exist? + @versions_by_name[name] ||= pod_dir.children.map do |v| + next nil unless v.directory? + basename = v.basename.to_s + next unless basename[0, 1] != '.' + begin + Version.new(basename) + rescue ArgumentError + raise Informative, 'An unexpected version directory ' \ + "`#{basename}` was encountered for the " \ + "`#{pod_dir}` Pod in the `#{name}` repository." + end + end.compact.sort.reverse + end + + # @return [Specification] the specification for a given version of Pod. + # + # @param @see specification_path + # + def specification(name, version) + Specification.from_file(specification_path(name, version)) + end + + # Returns the path of the specification with the given name and version. + # + # @param [String] name + # the name of the Pod. + # + # @param [Version,String] version + # the version for the specification. + # + # @return [Pathname] The path of the specification. + # + def specification_path(name, version) + raise ArgumentError, 'No name' unless name + raise ArgumentError, 'No version' unless version + path = pod_path(name) + version.to_s + specification_path = path + "#{name}.podspec.json" + unless specification_path.exist? + specification_path = path + "#{name}.podspec" + end + unless specification_path.exist? + raise StandardError, "Unable to find the specification #{name} " \ + "(#{version}) in the #{self.name} source." + end + specification_path + end + + # @return [Array] all the specifications contained by the + # source. + # + def all_specs + glob = specs_dir.join('*/' * metadata.prefix_lengths.size, '*', '*', '*.podspec{.json,}') + specs = Pathname.glob(glob).map do |path| + begin + Specification.from_file(path) + rescue + CoreUI.warn "Skipping `#{path.relative_path_from(repo)}` because the " \ + 'podspec contains errors.' + next + end + end + specs.compact + end + + # Returns the set for the Pod with the given name. + # + # @param [String] pod_name + # The name of the Pod. + # + # @return [Sets] the set. + # + def set(pod_name) + Specification::Set.new(pod_name, self) + end + + # @return [Array] the sets of all the Pods. + # + def pod_sets + pods.map { |pod_name| set(pod_name) } + end + + public + + # @!group Searching the source + #-------------------------------------------------------------------------# + + # @return [Set] a set for a given dependency. The set is identified by the + # name of the dependency and takes into account subspecs. + # + # @note This method is optimized for fast lookups by name, i.e. it does + # *not* require iterating through {#pod_sets} + # + # @todo Rename to #load_set + # + def search(query) + unless specs_dir + raise Informative, "Unable to find a source named: `#{name}`" + end + if query.is_a?(Dependency) + query = query.root_name + end + + if (versions = @versions_by_name[query]) && !versions.empty? + set = set(query) + return set if set.specification_name == query + end + + found = [] + Pathname.glob(pod_path(query)) do |path| + next unless Dir.foreach(path).any? { |child| child != '.' && child != '..' } + found << path.basename.to_s + end + + if [query] == found + set = set(query) + set if set.specification_name == query + end + end + + # @return [Array] The list of the sets that contain the search term. + # + # @param [String] query + # the search term. Can be a regular expression. + # + # @param [Boolean] full_text_search + # whether the search should be limited to the name of the Pod or + # should include also the author, the summary, and the description. + # + # @note full text search requires to load the specification for each pod, + # hence is considerably slower. + # + # @todo Rename to #search + # + def search_by_name(query, full_text_search = false) + regexp_query = /#{query}/i + if full_text_search + pod_sets.reject do |set| + texts = [] + begin + s = set.specification + texts << s.name + texts += s.authors.keys + texts << s.summary + texts << s.description + rescue + CoreUI.warn "Skipping `#{set.name}` because the podspec " \ + 'contains errors.' + end + texts.grep(regexp_query).empty? + end + else + names = pods.grep(regexp_query) + names.map { |pod_name| set(pod_name) } + end + end + + # Returns the set of the Pod whose name fuzzily matches the given query. + # + # @param [String] query + # The query to search for. + # + # @return [Set] The name of the Pod. + # @return [Nil] If no Pod with a suitable name was found. + # + def fuzzy_search(query) + require 'fuzzy_match' + pod_name = FuzzyMatch.new(pods).find(query) + if pod_name + search(pod_name) + end + end + + # @!group Updating the source + #-------------------------------------------------------------------------# + + # Updates the local clone of the source repo. + # + # @param [Boolean] show_output + # + # @return [Array] changed_spec_paths + # Returns the list of changed spec paths. + # + def update(show_output) + return [] if unchanged_github_repo? + prev_commit_hash = git_commit_hash + update_git_repo(show_output) + @versions_by_name.clear + refresh_metadata + if version = metadata.last_compatible_version(Version.new(CORE_VERSION)) + tag = "v#{version}" + CoreUI.warn "Using the `#{tag}` tag of the `#{name}` source because " \ + "it is the last version compatible with CocoaPods #{CORE_VERSION}." + repo_git(['checkout', tag]) + end + diff_until_commit_hash(prev_commit_hash) + end + + def updateable? + git? + end + + def git? + repo.join('.git').exist? && !repo_git(%w(rev-parse HEAD)).empty? + end + + def indexable? + true + end + + def verify_compatibility! + return if metadata.compatible?(CORE_VERSION) + + version_msg = if metadata.minimum_cocoapods_version == metadata.maximum_cocoapods_version + metadata.minimum_cocoapods_version + else + "#{metadata.minimum_cocoapods_version} - #{metadata.maximum_cocoapods_version}" + end + raise Informative, "The `#{name}` repo requires " \ + "CocoaPods #{version_msg} (currently using #{CORE_VERSION})\n" \ + 'Update CocoaPods, or checkout the appropriate tag in the repo.' + end + + public + + # @!group Representations + #-------------------------------------------------------------------------# + + # @return [Hash{String=>{String=>Specification}}] the static representation + # of all the specifications grouped first by name and then by + # version. + # + def to_hash + hash = {} + all_specs.each do |spec| + hash[spec.name] ||= {} + hash[spec.name][spec.version.version] = spec.to_hash + end + hash + end + + # @return [String] the YAML encoded {to_hash} representation. + # + def to_yaml + require 'yaml' + to_hash.to_yaml + end + + private + + # @group Private Helpers + #-------------------------------------------------------------------------# + + # Loads the specification for the given Pod gracefully. + # + # @param [String] name + # the name of the Pod. + # + # @return [Specification] The specification for the last version of the + # Pod. + # @return [Nil] If the spec could not be loaded. + # + def load_spec_gracefully(name) + versions = versions(name) + version = versions.sort.last if versions + specification(name, version) if version + rescue Informative + Pod::CoreUI.warn "Skipping `#{name}` because the podspec " \ + 'contains errors.' + nil + end + + def refresh_metadata + @metadata = Metadata.from_file(metadata_path) + end + + def git_commit_hash + repo_git(%w(rev-parse HEAD)) + end + + def update_git_repo(show_output = false) + repo_git(['checkout', git_tracking_branch]) + output = repo_git(%w(pull --ff-only), :include_error => true) + CoreUI.puts output if show_output + end + + def git_tracking_branch + path = repo.join('.git', 'cocoapods_branch') + path.file? ? path.read.strip : DEFAULT_SPECS_BRANCH + end + + def diff_until_commit_hash(commit_hash) + repo_git(%W(diff --name-only #{commit_hash}..HEAD)).split("\n") + end + + def repo_git(args, include_error: false) + command = "env -u GIT_CONFIG git -C \"#{repo}\" " << args.join(' ') + command << ' 2>&1' if include_error + (`#{command}` || '').strip + end + + def unchanged_github_repo? + return unless url =~ /github.com/ + !GitHub.modified_since_commit(url, git_commit_hash) + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/acceptor.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/acceptor.rb new file mode 100644 index 0000000..fb05c9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/acceptor.rb @@ -0,0 +1,170 @@ +module Pod + class Source + # Checks whether a podspec can be accepted by a source. The check takes + # into account the introduction of 0.0.1 version if there are already + # tagged ones or whether there is change in the source. + # + class Acceptor + # @return [Source] the source where the podspec should be added. + # + attr_reader :source + + # @param [Pathname] repo @see Source#repo. + # + def initialize(repo) + @source = Source.new(repo) + end + + public + + # @!group Actions + #-----------------------------------------------------------------------# + + # Checks whether the given specification can be accepted. + # + # @return [Array] A list of errors. If the list is empty the + # specification should be accepted. + # + def analyze(spec, previous_spec = nil) + errors = [] + check_spec_source_change(spec, errors) + check_if_untagged_version_is_acceptable(spec, previous_spec, errors) + check_commit_change_for_untagged_version(spec, previous_spec, errors) + check_dependencies(spec, errors) + errors + end + + # Checks whether the specification at the given path can be accepted. + # + # @return [Array] A list of errors. If the list is empty the + # specification should be accepted. + # + def analyze_path(spec_path) + spec = Specification.from_file(spec_path) + analyze(spec) + rescue + ['Unable to load the specification.'] + end + + private + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # Checks whether the source of the proposed specification is different + # from the one of the reference specification. + # + # @note HTTP Sources are ignored as they change per version. + # + # @return [void] + # + def check_spec_source_change(spec, errors) + require 'cocoapods-core/http' + + return unless spec + return if spec.source[:http] + return unless reference_spec(spec) + keys = Spec::DSL::SOURCE_KEYS.keys + source = spec.source.values_at(*keys).compact.first + old_source = reference_spec(spec).source.values_at(*keys).compact.first + unless source == old_source + source = HTTP.get_actual_url(source) + old_source = HTTP.get_actual_url(old_source) + unless source == old_source + message = "The source of the spec doesn't match with the recorded " + message << "ones. Source: `#{source}`. Previous: `#{old_source}`.\n " + message << 'Please contact the specs repo maintainers if the ' + message << 'library changed location.' + errors << message + end + end + end + + # Checks there are already tagged specifications if the specification has + # a git source and doesn't specify a tag (i.e. rejects 0.0.1 specs if + # they are not admissible anymore). + # + # @return [void] + # + def check_if_untagged_version_is_acceptable(spec, previous_spec, errors) + return if !spec.source[:git] || spec.source[:tag] + return unless related_specifications(spec) + return if previous_spec + has_tagged_spec = related_specifications(spec).any? do |s| + s.version != '0.0.1' + end + if has_tagged_spec + errors << 'There is already at least one versioned specification ' \ + 'so untagged versions cannot be accepted.' + end + end + + # If the previous specification for the given file is passed it is + # checked for any attempt to update the commit of a 0.0.1 version. + # + # @return [void] + # + def check_commit_change_for_untagged_version(spec, previous_spec, errors) + return unless previous_spec + return unless spec.version == Version.new('0.0.1') + unless spec.source[:commit] == previous_spec.source[:commit] + errors << 'Attempt to rewrite the commit of a 0.0.1 version.' + end + end + + # Checks that there is a specification available for each of the + # dependencies of the given specification. + # + # @return [void] + # + def check_dependencies(spec, errors) + spec.dependencies.each do |dep| + set = source.search(dep) + unless set && set.specification + errors << "Unable to find a specification for the `#{dep}` " \ + 'dependency.' + end + end + end + + private + + # @!group Source helpers + #-----------------------------------------------------------------------# + + # Returns the specifications related to the given spec. + # + # @param [Specification] spec + # The specification for which the siblings specs are needed. + # + # @return [Array] The other specifications of the Pod. + # + # @return [Nil] If there are no other specifications stored. + # + def related_specifications(spec) + versions = source.versions(spec.name) + return unless versions + specs = versions.sort.map { |v| source.specification(spec.name, v) } + specs.delete(spec) + specs + end + + # Returns the most representative specification for the Pod of the given + # spec. + # + # @param [Specification] spec + # The specification for which the representative spec is needed. + # + # @return [Specification] The specification with the highest version. + # + # @return [Nil] If there are no other specifications stored. + # + def reference_spec(spec) + specs = related_specifications(spec) + specs.last if specs + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/aggregate.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/aggregate.rb new file mode 100644 index 0000000..f78ae95 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/aggregate.rb @@ -0,0 +1,218 @@ +module Pod + class Source + # The Aggregate manages a directory of sources repositories. + # + class Aggregate + # @return [Array] The ordered list of sources. + # + attr_reader :sources + + # @param [Array] repos_dirs @see Sources + # + def initialize(sources) + raise "Cannot initialize an aggregate with a nil source: (#{sources})" if sources.include?(nil) + @sources = sources + end + + # @return [Array] the names of all the pods available. + # + def all_pods + sources.map(&:pods).flatten.uniq + end + + # @return [Array] The sets for all the pods available. + # + # @note Implementation detail: The sources don't cache their values + # because they might change in response to an update. Therefore + # this method to preserve performance caches the values before + # processing them. + # + def all_sets + pods_by_source = {} + sources.each do |source| + pods_by_source[source] = source.pods + end + pods = pods_by_source.values.flatten.uniq + + pods.map do |pod| + pod_sources = sources.select { |s| pods_by_source[s].include?(pod) } + pod_sources = pod_sources.compact + Specification::Set.new(pod, pod_sources) + end + end + + # Returns a set configured with the source which contains the highest + # version in the aggregate. + # + # @param [String] name + # The name of the Pod. + # + # @return [Set] The most representative set for the Pod with the given + # name. Returns nil if no representative source found containing a pod with given name. + # + def representative_set(name) + representative_source = nil + highest_version = nil + sources.each do |source| + source_versions = source.versions(name) + if source_versions + source_version = source_versions.first + if highest_version.nil? || (highest_version < source_version) + highest_version = source_version + representative_source = source + end + end + end + representative_source ? Specification::Set.new(name, representative_source) : nil + end + + public + + # @!group Search + #-----------------------------------------------------------------------# + + # @return [Set, nil] a set for a given dependency including all the + # {Source} that contain the Pod. If no sources containing the + # Pod where found it returns nil. + # + # @raise If no source including the set can be found. + # + # @see Source#search + # + def search(dependency) + found_sources = sources.select { |s| s.search(dependency) } + unless found_sources.empty? + Specification::Set.new(dependency.root_name, found_sources) + end + end + + # @return [Array] the sets that contain the search term. + # + # @raise If no source including the set can be found. + # + # @todo Clients should raise not this method. + # + # @see Source#search_by_name + # + def search_by_name(query, full_text_search = false) + pods_by_source = {} + result = [] + sources.each do |s| + source_pods = s.search_by_name(query, full_text_search) + pods_by_source[s] = source_pods.map(&:name) + end + + root_spec_names = pods_by_source.values.flatten.uniq + root_spec_names.each do |pod| + result_sources = sources.select do |source| + pods_by_source[source].include?(pod) + end + + result << Specification::Set.new(pod, result_sources) + end + + if result.empty? + extra = ', author, summary, or description' if full_text_search + raise Informative, 'Unable to find a pod with name' \ + "#{extra} matching `#{query}'" + end + result + end + + public + + # @!group Search Index + #-----------------------------------------------------------------------# + + # Generates from scratch the search data for given source. + # This operation can take a considerable amount of time + # (seconds) as it needs to evaluate the most representative podspec + # for each Pod. + # + # @param [Source] source + # The source from which a search index will be generated. + # + # @return [Hash{String=>Hash}] The search data for the source. + # + def generate_search_index_for_source(source) + generate_search_index_for_sets(source.pod_sets) + end + + # Generates from scratch the search data for changed specifications in given source. + # + # @param [Source] source + # The source from which a search index will be generated. + # @param [Array] spec_paths + # Array of file path names for corresponding changed specifications. + # + # @return [Hash{String=>Hash}] The search data for changed specifications. + # + def generate_search_index_for_changes_in_source(source, spec_paths) + pods = source.pods_for_specification_paths(spec_paths) + sets = pods.map do |pod| + Specification::Set.new(pod, source) + end + generate_search_index_for_sets(sets) + end + + private + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # Generates search data for given array of sets. + def generate_search_index_for_sets(sets) + result = {} + sets.each do |set| + word_list_from_set(set).each do |w| + (result[w] ||= []).push(set.name) + end + end + result + end + + # Returns the vocabulary extracted from the most representative + # specification of the set. Vocabulary contains words from following information: + # + # - version + # - summary + # - description + # - authors + # + # @param [Set] set + # The set for which the information is needed. + # + # @note If the specification can't load an empty array is returned and + # a warning is printed. + # + # @note For compatibility with non Ruby clients a strings are used + # instead of symbols for the keys. + # + # @return [Array] An array of words contained by the set's search related information. + # + def word_list_from_set(set) + spec = set.specification + word_list = [set.name.dup] + if spec.summary + word_list += spec.summary.split + end + if spec.description + word_list += spec.description.split + end + if spec.authors + spec.authors.each_pair do |k, v| + word_list += k.split if k + word_list += v.split if v + end + end + word_list.uniq + rescue + CoreUI.warn "Skipping `#{set.name}` because the podspec contains " \ + 'errors.' + [] + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/health_reporter.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/health_reporter.rb new file mode 100644 index 0000000..9c834ce --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/health_reporter.rb @@ -0,0 +1,192 @@ +module Pod + class Source + # Checks a source for errors and warnings. + # + class HealthReporter + # @return [Source] the source to check. + # + attr_reader :source + + # @param [Pathname] repo @see Source#repo. + # + def initialize(repo) + @source = Source.new(repo) + @errors = {} + @linter_results = {} + end + + public + + # @!group Configuration + #-----------------------------------------------------------------------# + + # Allows to specify an optional callback which is called before + # analysing every spec. Suitable for UI. + # + # @param [Proc] A callback which is called before checking any + # specification. It receives the name and the version of the + # spec. + # + # @return [void] + # + def pre_check(&block) + @pre_check_callback = block + end + + public + + # @!group Actions + #-----------------------------------------------------------------------# + + # Analyzes all the specification files in the source. + # + # @return [HealthReport] A report which contains the information about the + # state of the source. + # + def analyze + @report = HealthReport.new(source) + + source.pods.each do |name| + source.versions(name).each do |version| + @pre_check_callback.call(name, version) if @pre_check_callback + spec_path = source.specification_path(name, version) + spec = lint_spec(name, version, spec_path) + check_spec_path(name, version, spec) if spec + report.analyzed_paths << spec_path + end + end + + check_stray_specs + report + end + + # @return [HealtReport] The report produced by the analysis. + # + attr_reader :report + + private + + # @!group Private helpers + #-----------------------------------------------------------------------# + + # Checks the validity of the specification with the linter. + # + # @param [String] name + # The name of the Pod. + # + # @param [Version] version + # The version of the specification. + # + # @param [Pathname] spec_path + # The path of the specification to check. + # + # @return [Specification] The specification loaded by the linter. + # @return [Nil] If the specifications raised during evaluation. + # + def lint_spec(name, version, spec_path) + linter = Specification::Linter.new(spec_path) + linter.lint + linter.results.each do |result| + next if result.public_only? + report.add_message(result.type, result.message, name, version) + end + linter.spec + end + + # Ensures that the name and the version of the specification correspond + # to the ones expected by the repo given its path. + # + # @param [String] name + # The name of the Pod. + # + # @param [Version] version + # The version of the specification. + # + # @param [Specification] spec + # The specification to check. + # + # @return [void] + # + def check_spec_path(name, version, spec) + unless spec.name == name && spec.version.to_s == version.to_s + message = "Incorrect path #{spec.defined_in_file}" + report.add_message(:error, message, name, spec.version) + end + end + + # Checks for any stray specification in the repo. + # + # @param [Array] analyzed_paths + # The specification to check. + # + # @return [void] + # + def check_stray_specs + all_paths = Pathname.glob(source.repo + '**/*.podspec{,.json}') + stray_specs = all_paths - report.analyzed_paths + stray_specs.each do |path| + report.add_message(:error, 'Stray spec', path) + end + end + + #-----------------------------------------------------------------------# + + # Encapsulates the information about the state of a repo. + # + class HealthReport + # @return [Source] the source analyzed. + # + attr_reader :source + + # @param [Source] @see source. + # + def initialize(source) + @source = source + @analyzed_paths = [] + @pods_by_error = {} + @pods_by_warning = {} + end + + # @return [Array] The list of the analyzed paths. + # + attr_accessor :analyzed_paths + + # @return [Hash{ String => Hash }] The pods (the version grouped by + # name) grouped by an error message. + # + attr_accessor :pods_by_error + + # @return [Hash{ String => Hash }] The pods (the version grouped by + # name) grouped by a warning message. + # + attr_accessor :pods_by_warning + + # Adds a message with the given type for the specification with the + # given name and version. + # + # @param [Symbol] type + # The type of message. Either `:error` or `:warning`. + # + # @param [String] message + # The contents of the message. + # + # @param [String] spec_name + # The name of the Pod. + # + # @param [String] spec_version + # The version of the specification. + # + # @return [void] + # + def add_message(type, message, spec_name, spec_version = nil) + pods = send(:"pods_by_#{type}") + pods[message] ||= {} + pods[message][spec_name] ||= [] + pods[message][spec_name] << spec_version + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/manager.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/manager.rb new file mode 100644 index 0000000..394a916 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/manager.rb @@ -0,0 +1,488 @@ +require 'public_suffix' + +module Pod + class Source + class Manager + # @return [Pathname] The directory that contains the source repo + # directories. + # + attr_reader :repos_dir + + def initialize(repos_dir) + @repos_dir = Pathname(repos_dir).expand_path + end + + # @return [Array] The source repo directories. + # + def source_repos + return [] unless repos_dir.exist? + repos_dir.children.select(&:directory?).sort_by { |d| d.basename.to_s.downcase } + end + + # @return [Source::Aggregate] The aggregate of all the sources with the + # known Pods. + # + def aggregate + aggregate_with_repos(source_repos) + end + + # @return [Source::Aggregate] The aggregate of the sources from repos. + # + # @param [Dependency] dependency + # The dependency for which to find or build the appropriate. + # aggregate. If the dependency specifies a source podspec repo + # then only that source will be used, otherwise all sources + # will be used. + # + def aggregate_for_dependency(dependency) + return aggregate if dependency.podspec_repo.nil? + + source = source_with_url(dependency.podspec_repo) || source_with_name(dependency.podspec_repo) + return aggregate if source.nil? + + aggregate_with_repos([source.repo]) + end + + # @return [Array] The list of the sources with the given names. + # + # @param [Array<#to_s>] names + # The names of the sources. + # + def sources(names) + dirs = names.map { |name| source_dir(name) } + dirs.map { |repo| source_from_path(repo) } + end + + # @return [Array] The list of all the sources known to this + # installation of CocoaPods. + # + def all + aggregate.sources + end + + # @return [Array] The list of all the non-indexable sources known to this + # installation of CocoaPods. + # + def all_non_indexable + aggregate.sources.reject(&:indexable?) + end + + # @return [Array] The CocoaPods Master Repo source. + # + def master + sources([Pod::TrunkSource::TRUNK_REPO_NAME]).select { |s| s.repo.directory? } + end + + # @!group Master repo + + # @return [Pathname] The path of the master repo. + # + def master_repo_dir + source_dir(Pod::TrunkSource::TRUNK_REPO_NAME) + end + + # @return [Boolean] Checks if the master repo is usable. + # + # @note Note this is used to automatically setup the master repo if + # needed. + # + def master_repo_functional? + return false unless master_repo = master.first + master_repo.metadata.compatible?(CORE_VERSION) + end + + # Search the appropriate sources to match the set for the given dependency. + # + # @return [Set, nil] a set for a given dependency including all the + # {Source} that contain the Pod. If no sources containing the + # Pod where found it returns nil. + # + # @raise If no source can be found that includes the dependency. + # + def search(dependency) + aggregate_for_dependency(dependency).search(dependency) + end + + # Search all the sources with the given search term. + # + # @param [String] query + # The search term. + # + # @param [Boolean] full_text_search + # Whether the search should be limited to the name of the Pod or + # should include also the author, the summary, and the + # description. + # + # @raise If no source including the set can be found. + # + # @return [Array] The sets that contain the search term. + # + def search_by_name(query, full_text_search = false) + query_word_regexps = query.split.map { |word| /#{word}/i } + if full_text_search + query_word_results_hash = {} + updated_search_index.each_value do |word_spec_hash| + word_spec_hash.each_pair do |word, spec_names| + query_word_regexps.each do |query_word_regexp| + set = (query_word_results_hash[query_word_regexp] ||= Set.new) + set.merge(spec_names) if word =~ query_word_regexp + end + end + end + found_set_names = query_word_results_hash.values.reduce(:&) + found_set_names ||= [] + + sets_from_non_indexable = all_non_indexable.map { |s| s.search_by_name(query, true) }.flatten + + found_set_names += sets_from_non_indexable.map(&:name).flatten.uniq + + sets = found_set_names.map do |name| + aggregate.representative_set(name) + end + + # Remove nil values because representative_set return nil if no pod is found in any of the sources. + sets.compact! + else + sets = aggregate.search_by_name(query, false) + end + if sets.empty? + extra = ', author, summary, or description' if full_text_search + raise Informative, "Unable to find a pod with name#{extra} " \ + "matching `#{query}`" + end + sorted_sets(sets, query_word_regexps) + end + + # Returns given set array by sorting it in-place. + # + # @param [Array] sets + # Array of sets to be sorted. + # + # @param [Array] query_word_regexps + # Array of regexp objects for user query. + # + # @return [Array] Given sets parameter itself after sorting it in-place. + # + def sorted_sets(sets, query_word_regexps) + sets.sort_by! do |set| + pre_match_length = nil + found_query_index = nil + found_query_count = 0 + query_word_regexps.each_with_index do |q, idx| + if (m = set.name.match(/#{q}/i)) + pre_match_length ||= m.pre_match.length + found_query_index ||= idx + found_query_count += 1 + end + end + pre_match_length ||= 1000 + found_query_index ||= 1000 + [-found_query_count, pre_match_length, found_query_index, set.name.downcase] + end + sets + end + + # Returns the search data. If a saved search data exists, retrieves it from file and returns it. + # Else, creates the search data from scratch, saves it to file system, and returns it. + # Search data is grouped by source repos. For each source, it contains a hash where keys are words + # and values are the pod names containing corresponding word. + # + # For each source, list of unique words are generated from the following spec information. + # - version + # - summary + # - description + # - authors + # + # @return [Hash{String => Hash{String => Array}}] The up to date search data. + # + def updated_search_index + index = stored_search_index || {} + indexable_sources.each do |source| + source_name = source.name + unless index[source_name] + CoreUI.print "Creating search index for spec repo '#{source_name}'.." + index[source_name] = aggregate.generate_search_index_for_source(source) + CoreUI.puts ' Done!' + end + end + save_search_index(index) + index + end + + # Updates the stored search index if there are changes in spec repos while updating them. + # Update is performed incrementally. Only the changed pods' search data is re-generated and updated. + # @param [Hash{Source => Array}] changed_spec_paths + # A hash containing changed specification paths for each source. + # + def update_search_index_if_needed(changed_spec_paths) + search_index = stored_search_index + return unless search_index + changed_spec_paths.each_pair do |source, spec_paths| + next unless source.indexable? + index_for_source = search_index[source.name] + next unless index_for_source && !spec_paths.empty? + updated_pods = source.pods_for_specification_paths(spec_paths) + + new_index = aggregate.generate_search_index_for_changes_in_source(source, spec_paths) + # First traverse search_index and update existing words + # Remove traversed words from new_index after adding to search_index, + # so that only non existing words will remain in new_index after enumeration completes. + index_for_source.each_pair do |word, _| + if new_index[word] + index_for_source[word] |= new_index[word] + new_index.delete(word) + else + index_for_source[word] -= updated_pods + end + end + + # Now add non existing words remained in new_index to search_index + index_for_source.merge!(new_index) + end + save_search_index(search_index) + end + + # Updates search index for changed pods in background + # @param [Hash{Source => Array}] changed_spec_paths + # A hash containing changed specification paths for each source. + # + def update_search_index_if_needed_in_background(changed_spec_paths) + if Gem.win_platform? + update_search_index_if_needed(changed_spec_paths) + return + end + Process.fork do + Process.daemon + update_search_index_if_needed(changed_spec_paths) + exit + end + end + + # Returns the search data stored in the file system. + # If existing data in the file system is not valid, returns nil. + # + def stored_search_index + @updated_search_index ||= begin + if search_index_path.exist? + require 'json' + begin + index = JSON.parse(search_index_path.read) + unless index # JSON.parse("null") => nil + search_index_path.delete + return nil + end + + index if index.is_a?(Hash) # TODO: should we also check if hash has correct hierarchy? + rescue JSON::ParserError + search_index_path.delete + nil + end + end + end + end + + # Stores given search data in the file system. + # @param [Hash] index + # Index to be saved in file system + # + def save_search_index(index) + require 'json' + @updated_search_index = index + search_index_path.open('w') do |io| + io.write(@updated_search_index.to_json) + end + end + + # Allows to clear the search index. + # + attr_writer :updated_search_index + + # @return [Pathname] The path where the search index should be stored. + # + attr_accessor :search_index_path + + private + + # @return [Source] The Source at a given path. + # + # @param [Pathname] path + # The local file path to one podspec repo. + # + def source_from_path(path) + @sources_by_path ||= Hash.new do |hash, key| + hash[key] = case + when key.basename.to_s == Pod::TrunkSource::TRUNK_REPO_NAME + TrunkSource.new(key) + when (key + '.url').exist? + CDNSource.new(key) + else + Source.new(key) + end + end + @sources_by_path[path] + end + + # @return [Source::Aggregate] The aggregate of the sources from repos. + # + # @param [Array] repos + # The local file paths to one or more podspec repo caches. + # + def aggregate_with_repos(repos) + sources = repos.map { |path| source_from_path(path) } + @aggregates_by_repos ||= {} + @aggregates_by_repos[repos] ||= Source::Aggregate.new(sources) + end + + # @return [Source] The source with the given name. + # + # @param [String] name + # The name of the source. + # + def source_with_name(name) + source = sources([name]).first + return nil unless source.repo.exist? + source + end + + # @return [Source] The updateable source with the given name. If no updateable source + # with given name is found it raises. + # + # @param [String] name + # The name of the source. + # + def updateable_source_named(name) + specified_source = source_with_name(name) + unless specified_source + raise Informative, "Unable to find the `#{name}` repo." + end + unless specified_source.updateable? + raise Informative, "The `#{name}` repo is not a updateable repo." + end + specified_source + end + + # @return [Source] The list of the updateable sources. + # + def updateable_sources + all.select(&:updateable?) + end + + # @return [Source] The list of the indexable sources. + # + def indexable_sources + all.select(&:indexable?) + end + + # @return [Pathname] The path of the source with the given name. + # + # @param [String] name + # The name of the source. + # + def source_dir(name) + repos_dir + name + end + + # @return [Source] The source whose {Source#url} is equal to `url`. + # + # @param [String] url + # The URL of the source. + # + def source_with_url(url) + url = canonic_url(url) + url = 'https://github.com/cocoapods/specs' if url =~ %r{github.com[:/]+cocoapods/specs} + all.find do |source| + source.url && canonic_url(source.url) == url + end + end + + def canonic_url(url) + url.downcase.gsub(/\.git$/, '').gsub(%r{\/$}, '') + end + + # Returns a suitable repository name for `url`. + # + # @example A GitHub.com URL + # + # name_for_url('https://github.com/Artsy/Specs.git') + # # "artsy" + # name_for_url('https://github.com/Artsy/Specs.git') + # # "artsy-1" + # + # @example A non-Github.com URL + # + # name_for_url('https://sourceforge.org/Artsy/Specs.git') + # # sourceforge-artsy + # + # @example A file URL + # + # name_for_url('file:///Artsy/Specs.git') + # # artsy + # + # @param [#to_s] url + # The URL of the source. + # + # @return [String] A suitable repository name for `url`. + # + def name_for_url(url) + base_from_host_and_path = lambda do |host, path| + if host && !host.empty? + domain = PublicSuffix.parse(host) rescue nil + base = [domain&.sld || host] + base = [] if base == %w(github) + else + base = [] + end + + path = path.gsub(/.git$/, '').gsub(%r{^/}, '').split('/') + path.pop if path.last == 'specs' + + (base + path).join('-') + end + + valid_url = lambda do |url| + url =~ URI.regexp && (URI(url) rescue false) + end + + valid_scp_url = lambda do |url| + valid_url['scp://' + url] + end + + url = url.to_s.downcase + + case url + when %r{https://#{Regexp.quote(trunk_repo_hostname)}}i + # Main CDN repo + base = Pod::TrunkSource::TRUNK_REPO_NAME + when valid_url + # HTTPS URL or something similar + url = valid_url[url] + base = base_from_host_and_path[url.host, url.path] + when valid_scp_url + # SCP-style URLs for private git repos + url = valid_scp_url[url] + base = base_from_host_and_path[url.host, url.path] + when %r{(?:git|ssh|https?|[a-z0-9_-]+@([-\w.]+)):(\/\/)?(.*?)(\.git)?(\/?|\#[-\d\w._]+?)$}i + # Additional SCP-style URLs for private git repos + host, _, path = Regexp.last_match.captures + base = base_from_host_and_path[host, path] + else + # This is nearly impossible, with all the previous cases + raise Informative, "Couldn't determine repo name for URL: #{url}" + end + + name = base + (1..).each do |i| + break unless source_dir(name).exist? + name = "#{base}-#{i}" + end + name + end + + # Returns hostname for for `trunk` URL. + # + def trunk_repo_hostname + URI.parse(TrunkSource::TRUNK_REPO_URL).host.downcase.freeze + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/metadata.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/metadata.rb new file mode 100644 index 0000000..7186302 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/source/metadata.rb @@ -0,0 +1,79 @@ +autoload :Digest, 'digest/md5' +require 'active_support/hash_with_indifferent_access' +require 'active_support/core_ext/hash/indifferent_access' + +module Pod + class Source + class Metadata + attr_reader :minimum_cocoapods_version + attr_reader :maximum_cocoapods_version + attr_reader :latest_cocoapods_version + attr_reader :prefix_lengths + attr_reader :last_compatible_versions + + def initialize(hash = {}) + hash = hash.with_indifferent_access + @minimum_cocoapods_version = hash['min'] + @minimum_cocoapods_version &&= Pod::Version.new(@minimum_cocoapods_version) + @maximum_cocoapods_version = hash['max'] + @maximum_cocoapods_version &&= Pod::Version.new(@maximum_cocoapods_version) + @latest_cocoapods_version = hash['last'] + @latest_cocoapods_version &&= Pod::Version.new(@latest_cocoapods_version) + @prefix_lengths = Array(hash['prefix_lengths']).map!(&:to_i) + @last_compatible_versions = Array(hash['last_compatible_versions']).map(&Pod::Version.method(:new)).sort + end + + def self.from_file(file) + hash = file.file? ? YAMLHelper.load_file(file) : {} + new(hash) + end + + def to_hash + hash = ActiveSupport::HashWithIndifferentAccess.new + hash['min'] = @minimum_cocoapods_version.to_s if @minimum_cocoapods_version + hash['max'] = @maximum_cocoapods_version.to_s if @maximum_cocoapods_version + hash['last'] = @latest_cocoapods_version.to_s if @latest_cocoapods_version + hash['prefix_lengths'] = @prefix_lengths unless @prefix_lengths.empty? + hash['last_compatible_versions'] = @last_compatible_versions.map(&:to_s) unless @last_compatible_versions.empty? + hash + end + + def path_fragment(pod_name, version = nil) + prefixes = if prefix_lengths.empty? + [] + else + hashed = Digest::MD5.hexdigest(pod_name) + prefix_lengths.map do |length| + hashed.slice!(0, length) + end + end + prefixes.concat([pod_name, version]).compact + end + + def last_compatible_version(target_version) + return unless minimum_cocoapods_version + return if minimum_cocoapods_version <= target_version + @last_compatible_versions.reverse.bsearch { |v| v <= target_version }.tap do |version| + raise Informative, 'Unable to find compatible version' unless version + end + end + + # Returns whether a source is compatible with the current version of + # CocoaPods. + # + # @param [Pathname] dir + # The directory where the source is stored. + # + # @return [Boolean] whether the source is compatible. + # + def compatible?(version) + bin_version = Gem::Version.new(version) + supports_min = !minimum_cocoapods_version || + (bin_version >= Gem::Version.new(minimum_cocoapods_version)) + supports_max = !maximum_cocoapods_version || + (bin_version <= Gem::Version.new(maximum_cocoapods_version)) + supports_min && supports_max + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification.rb new file mode 100644 index 0000000..fa9c267 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification.rb @@ -0,0 +1,847 @@ +require 'active_support/core_ext/string/strip.rb' + +require 'cocoapods-core/specification/consumer' +require 'cocoapods-core/specification/dsl' +require 'cocoapods-core/specification/linter' +require 'cocoapods-core/specification/root_attribute_accessors' +require 'cocoapods-core/specification/set' +require 'cocoapods-core/specification/json' + +module Pod + # The Specification provides a DSL to describe a Pod. A pod is defined as a + # library originating from a source. A specification can support detailed + # attributes for modules of code through subspecs. + # + # Usually it is stored in files with `podspec` extension. + # + class Specification + include Pod::Specification::DSL + include Pod::Specification::DSL::Deprecations + include Pod::Specification::RootAttributesAccessors + include Pod::Specification::JSONSupport + + # @return [Specification] the parent of the specification unless the + # specification is a root. + # + attr_reader :parent + + # @return [Integer] the cached hash value for this spec. + # + attr_reader :hash_value + + # @param [Specification] parent @see parent + # + # @param [String] name + # the name of the specification. + # + # @param [Boolean] test_specification + # Whether the specification is a test specification + # + # @param [Boolean] app_specification + # Whether the specification is an app specification + # + def initialize(parent = nil, name = nil, test_specification = false, app_specification: false) + raise StandardError, "#{self} can not be both an app and test specification." if test_specification && app_specification + @attributes_hash = {} + @subspecs = [] + @consumers = {} + @parent = parent + @hash_value = nil + @test_specification = test_specification + @app_specification = app_specification + attributes_hash['name'] = name + attributes_hash['test_type'] = :unit if test_specification + + yield self if block_given? + end + + def initialize_copy(other) + super + + @subspecs = @subspecs.map do |subspec| + subspec = subspec.dup + subspec.instance_variable_set :@parent, self + subspec + end + end + + # @return [Hash] the hash that stores the information of the attributes of + # the specification. + # + attr_accessor :attributes_hash + + # @return [Array] The subspecs of the specification. + # + attr_accessor :subspecs + + # @return [Boolean] If this specification is a test specification. + # + attr_accessor :test_specification + alias_method :test_specification?, :test_specification + + # @return [Boolean] If this specification is an app specification. + # + attr_accessor :app_specification + alias_method :app_specification?, :app_specification + + # Checks if a specification is equal to the given one according its name + # and to its version. + # + # @param [Specification] other + # the specification to compare with. + # + # @todo Not sure if comparing only the name and the version is the way to + # go. This is used by the installer to group specifications by root + # spec. + # + # @return [Boolean] Whether the specifications are equal. + # + def ==(other) + other.is_a?(self.class) && + name == other.name && + version == other.version + end + + alias_method :eql?, :== + + # Return the hash value for this specification according to its attributes + # hash. + # + # @note This function must have the property that a.eql?(b) implies + # a.hash == b.hash. + # + # @note This method is used by the Hash class. + # + # @return [Fixnum] The hash value. + # + def hash + if @hash_value.nil? + @hash_value = (name.hash * 53) ^ version.hash + end + @hash_value + end + + # @return [String] A string suitable for representing the specification in + # clients. + # + def to_s + specified_version = raw_version || '' + if name && !specified_version.empty? + "#{name} (#{specified_version})" + elsif name + name + else + 'No-name' + end + end + + # @return [String] A string suitable for debugging. + # + def inspect + "#<#{self.class.name} name=#{name.inspect}>" + end + + # @param [String] string_representation + # the string that describes a {Specification} generated from + # {Specification#to_s}. + # + # @example Input examples + # + # "libPusher (1.0)" + # "RestKit/JSON (1.0)" + # + # @return [Array] the name and the version of a + # pod. + # + def self.name_and_version_from_string(string_representation) + match_data = string_representation.match(/\A((?:\s?[^\s(])+)(?: \((.+)\))?\Z/) + unless match_data + raise Informative, 'Invalid string representation for a ' \ + "specification: `#{string_representation}`. " \ + 'The string representation should include the name and ' \ + 'optionally the version of the Pod.' + end + name = match_data[1] + vers = Version.new(match_data[2]) + [name, vers] + end + + # Returns the root name of a specification. + # + # @param [String] the name of a specification or of a subspec. + # + # @return [String] the root name + # + def self.root_name(full_name) + if index = full_name.index('/') + full_name.slice(0, index) + else + full_name + end + end + + # Returns the module name of a specification + # + # @return [String] the module name + # + def module_name + attributes_hash['module_name'] || + c99ext_identifier(attributes_hash['header_dir']) || + c99ext_identifier(attributes_hash['name']) + end + + private + + # Transforms the given string into a valid +identifier+ after C99ext + # standard, so that it can be used in source code where escaping of + # ambiguous characters is not applicable. + # + # @param [String] name + # any name, which may contain leading numbers, spaces or invalid + # characters. + # + # @return [String] + # + def c99ext_identifier(name) + return nil if name.nil? + I18n.transliterate(name).gsub(/^([0-9])/, '_\1'). + gsub(/[^a-zA-Z0-9_]/, '_').gsub(/_+/, '_') + end + + # @return [Object, Nil] + # the raw value specified for the version attribute, or nil + # + def raw_version + root.attributes_hash['version'] + end + + #-------------------------------------------------------------------------# + + public + + # @!group Hierarchy + + # @return [Specification] The root specification or itself if it is root. + # + def root + parent ? parent.root : self + end + + # @return [Boolean] whether the specification is root. + # + def root? + parent.nil? + end + + # @return [Boolean] whether the specification is a subspec. + # + def subspec? + !parent.nil? + end + + #-------------------------------------------------------------------------# + + public + + # @return [Symbol] Spec type of the current spec. + # + # @note see Attribute#SUPPORTED_SPEC_TYPES for the list of available spec_types. + # + def spec_type + return :app if app_specification? + return :test if test_specification? + + :library + end + + # @!group Dependencies & Subspecs + + # @return [Boolean] If this specification is a library specification. + # + # @note a library specification is a specification that is not of type app or test. + # + def library_specification? + !app_specification? && !test_specification? + end + + # @return [Boolean] If this specification is not a library specification. + # + # @note see #library_specification? + # + def non_library_specification? + !library_specification? + end + + # @return [Symbol] the test type supported if this is a test specification. + # + def test_type + attributes_hash['test_type'].to_sym + end + + # @return [Array] the list of all the test subspecs of + # a specification. + # + def test_specs + subspecs.select(&:test_specification?) + end + + # @return [Array] the list of all the app subspecs of + # a specification. + # + def app_specs + subspecs.select(&:app_specification?) + end + + # @return [Array] the list of all the non libary (app or test) subspecs of + # a specification. + # + def non_library_specs + subspecs.select(&:non_library_specification?) + end + + # @return [Array] the recursive list of all the subspecs of + # a specification. + # + def recursive_subspecs + mapper = lambda do |spec| + spec.subspecs.map do |subspec| + [subspec, *mapper.call(subspec)] + end.flatten + end + mapper.call(self) + end + + # Returns the subspec with the given name or the receiver if the name is + # nil or equal to the name of the receiver. + # + # @param [String] relative_name + # the relative name of the subspecs starting from the receiver + # including the name of the receiver. + # + # @param [Boolean] raise_if_missing + # whether an exception should be raised if no specification named + # `relative_name` is found. + # + # @example Retrieving a subspec + # + # s.subspec_by_name('Pod/subspec').name #=> 'subspec' + # + # @return [Specification] the subspec with the given name or self. + # + def subspec_by_name(relative_name, raise_if_missing = true, include_non_library_specifications = false) + if relative_name.nil? || relative_name == base_name + self + elsif relative_name.downcase == base_name.downcase + raise Informative, "Trying to access a `#{relative_name}` " \ + "specification from `#{base_name}`, which has a different case." + else + remainder = relative_name[base_name.size + 1..-1] + subspec_name = remainder.split('/').shift + subspec = subspecs.find { |s| s.base_name == subspec_name && (include_non_library_specifications || !s.non_library_specification?) } + unless subspec + if raise_if_missing + raise Informative, 'Unable to find a specification named ' \ + "`#{relative_name}` in `#{name} (#{version})`." + else + return nil + end + end + subspec.subspec_by_name(remainder, raise_if_missing, include_non_library_specifications) + end + end + + # @return [Array, Symbol] the name(s) of the default subspecs if provided or :none for no default subspecs. + # + def default_subspecs + # TODO: remove singular form and update the JSON specs. + value = Array(attributes_hash['default_subspecs'] || attributes_hash['default_subspec']) + first = value.first + if first == :none || first == 'none' + first.to_sym + else + value + end + end + + # Returns the dependencies on subspecs. + # + # @note A specification has a dependency on either the + # {#default_subspecs} or each of its children subspecs that are + # compatible with its platform. + # + # @param [Platform] platform + # return only dependencies supported on the given platform. + # + # @return [Array] the dependencies on subspecs. + # + def subspec_dependencies(platform = nil) + specs = if default_subspecs.empty? + subspecs.compact.reject(&:non_library_specification?) + elsif default_subspecs == :none + [] + else + default_subspecs.map do |subspec_name| + root.subspec_by_name("#{name}/#{subspec_name}") + end + end + if platform + specs = specs.select { |s| s.supported_on_platform?(platform) } + end + specs.map { |s| Dependency.new(s.name, version) } + end + + # Returns the dependencies on other Pods or subspecs of other Pods. + # + # @param [Platform] platform + # return only dependencies supported on the given platform. + # + # @note External dependencies are inherited by subspecs + # + # @return [Array] the dependencies on other Pods. + # + def dependencies(platform = nil) + if platform + consumer(platform).dependencies || [] + else + available_platforms.map do |spec_platform| + consumer(spec_platform).dependencies + end.flatten.uniq + end + end + + # @return [Array] all the dependencies of the specification. + # + def all_dependencies(platform = nil) + dependencies(platform) + subspec_dependencies(platform) + end + + # Returns whether a dependency is whitelisted for the given configuration. + # + # @param [Pod::Dependency] dependency + # the dependency verify. + # + # @param [Symbol, String] configuration + # the configuration to check against. + # + # @return [Boolean] whether the dependency is whitelisted or not. + # + def dependency_whitelisted_for_configuration?(dependency, configuration) + inherited = -> { root? ? true : parent.dependency_whitelisted_for_configuration?(dependency, configuration) } + + return inherited[] unless configuration_whitelist = attributes_hash['configuration_pod_whitelist'] + return inherited[] unless whitelist_for_pod = configuration_whitelist[dependency.name] + + whitelist_for_pod.include?(configuration.to_s.downcase) + end + + # Returns a consumer to access the multi-platform attributes. + # + # @param [String, Symbol, Platform] platform + # the platform of the consumer + # + # @return [Specification::Consumer] the consumer for the given platform + # + def consumer(platform) + platform = platform.to_sym + @consumers[platform] ||= Consumer.new(self, platform) + end + + # @return [Bool, String] The prefix_header_file value. + # + def prefix_header_file + attributes_hash['prefix_header_file'] + end + + # @return [ArrayString}>] The script_phases value. + # + def script_phases + script_phases = attributes_hash['script_phases'] || [] + script_phases.map do |script_phase| + phase = Specification.convert_keys_to_symbol(script_phase) + phase[:execution_position] = if phase.key?(:execution_position) + phase[:execution_position].to_sym + else + :any + end + phase + end + end + + # @return [Hash] The on demand resources value. + # + def on_demand_resources + attributes_hash['on_demand_resources'] || {} + end + + # @return [Hash] The scheme value. + # + def scheme + value = attributes_hash['scheme'] || {} + Specification.convert_keys_to_symbol(value, :recursive => false) + end + + # @return [Hash] The Info.plist value. + # + def info_plist + attributes_hash['info_plist'] || {} + end + + #-------------------------------------------------------------------------# + + public + + # @!group DSL helpers + + # @return [Boolean] whether the specification should use a directory as its + # source. + # + def local? + return true if source[:path] + false + end + + # @return [Boolean] whether the specification is supported in the given + # platform. + # + # @overload supported_on_platform?(platform) + # + # @param [Platform] platform + # the platform which is checked for support. + # + # @overload supported_on_platform?(symbolic_name, deployment_target) + # + # + def supported_on_platform?(*platform) + platform = Platform.new(*platform) + available_platforms.any? { |available| platform.supports?(available) } + end + + # @return [Array] The platforms that the Pod is supported on. + # + # @note If no platform is specified, this method returns all known + # platforms. + # + def available_platforms + names = supported_platform_names + names = PLATFORMS if names.empty? + names.map { |name| Platform.new(name, deployment_target(name)) } + end + + # Returns the deployment target for the specified platform. + # + # @param [String] platform_name + # the symbolic name of the platform. + # + # @return [String] the deployment target + # @return [Nil] if not deployment target was specified for the platform. + # + def deployment_target(platform_name) + result = platform_hash[platform_name.to_s] + result ||= parent.deployment_target(platform_name) if parent + result + end + + protected + + # @return [Array[Symbol]] the symbolic name of the platform in which the + # specification is supported. + # + # @return [Nil] if the specification is supported on all the known + # platforms. + # + def supported_platform_names + result = platform_hash.keys + if result.empty? && parent + result = parent.supported_platform_names + end + result + end + + # @return [Hash] the normalized hash which represents the platform + # information. + # + def platform_hash + case value = attributes_hash['platforms'] + when String + { value => nil } + when Array + result = {} + value.each do |a_value| + result[a_value] = nil + end + result + when Hash + value + else + {} + end + end + + public + + #-------------------------------------------------------------------------# + + # @!group DSL attribute writers + + # Sets the value for the attribute with the given name. + # + # @param [Symbol] name + # the name of the attribute. + # + # @param [Object] value + # the value to store. + # + # @param [Symbol] platform_name + # If provided the attribute is stored only for the given platform. + # + # @note If the provides value is Hash the keys are converted to a string. + # + # @return void + # + def store_attribute(name, value, platform_name = nil) + name = name.to_s + value = Specification.convert_keys_to_string(value) if value.is_a?(Hash) + value = value.strip_heredoc.strip if value.respond_to?(:strip_heredoc) + if platform_name + platform_name = platform_name.to_s + attributes_hash[platform_name] ||= {} + attributes_hash[platform_name][name] = value + else + attributes_hash[name] = value + end + end + + # Defines the setters methods for the attributes providing support for the + # Ruby DSL. + # + DSL.attributes.values.each do |a| + define_method(a.writer_name) do |value| + store_attribute(a.name, value) + end + + if a.writer_singular_form + alias_method(a.writer_singular_form, a.writer_name) + end + end + + # Converts the keys of the given hash to a string. + # + # @param [Object] value + # the value that needs to be stripped from the Symbols. + # + # @param [Boolean] recursive + # whether to convert keys of nested hashes. + # + # @return [Hash] the hash with the keys as strings instead of symbols. + # + def self.convert_keys_to_string(value, recursive: true) + return unless value + result = {} + value.each do |key, subvalue| + subvalue = Specification.convert_keys_to_string(subvalue) if recursive && subvalue.is_a?(Hash) + result[key.to_s] = subvalue + end + result + end + + # Converts the keys of the given hash to a symbol. + # + # @param [Object] value + # the value that needs to be stripped from the Strings. + # + # @param [Boolean] recursive + # whether to convert keys of nested hashes. + # + # @return [Hash] the hash with the keys as symbols instead of strings. + # + def self.convert_keys_to_symbol(value, recursive: true) + return unless value + result = {} + value.each do |key, subvalue| + subvalue = Specification.convert_keys_to_symbol(subvalue) if recursive && subvalue.is_a?(Hash) + result[key.to_sym] = subvalue + end + result + end + + #-------------------------------------------------------------------------# + + public + + # @!group File representation + + # @return [String] The SHA1 digest of the file in which the specification + # is defined. + # + # @return [Nil] If the specification is not defined in a file. + # + def checksum + @checksum ||= begin + if root? + unless defined_in_file.nil? + require 'digest' + checksum = Digest::SHA1.hexdigest(File.read(defined_in_file)) + checksum = checksum.encode('UTF-8') if checksum.respond_to?(:encode) + checksum + end + else + root.checksum + end + end + end + + # @return [String] the path where the specification is defined, if loaded + # from a file. + # + def defined_in_file + root? ? @defined_in_file : root.defined_in_file + end + + # Loads a specification form the given path. + # + # @param [Pathname, String] path + # the path of the `podspec` file. + # + # @param [String] subspec_name + # the name of the specification that should be returned. If it is + # nil returns the root specification. + # + # @raise If the file doesn't return a Pods::Specification after + # evaluation. + # + # @return [Specification] the specification + # + def self.from_file(path, subspec_name = nil) + path = Pathname.new(path) + unless path.exist? + raise Informative, "No podspec exists at path `#{path}`." + end + + string = File.open(path, 'r:utf-8', &:read) + # Work around for Rubinius incomplete encoding in 1.9 mode + if string.respond_to?(:encoding) && string.encoding.name != 'UTF-8' + string.encode!('UTF-8') + end + + from_string(string, path, subspec_name) + end + + # Loads a specification with the given string. + # The specification is evaluated in the context of `path`. + # + # @param [String] spec_contents + # A string describing a specification. + # + # @param [Pathname, String] path @see from_file + # @param [String] subspec_name @see from_file + # + # @return [Specification] the specification + # + def self.from_string(spec_contents, path, subspec_name = nil) + path = Pathname.new(path).expand_path + spec = nil + case path.extname + when '.podspec' + Dir.chdir(path.parent.directory? ? path.parent : Dir.pwd) do + spec = ::Pod._eval_podspec(spec_contents, path) + unless spec.is_a?(Specification) + raise Informative, "Invalid podspec file at path `#{path}`." + end + end + when '.json' + spec = Specification.from_json(spec_contents, path) + else + raise Informative, "Unsupported specification format `#{path.extname}` for spec at `#{path}`." + end + + spec.defined_in_file = path + spec.subspec_by_name(subspec_name, true) + end + + # Sets the path of the `podspec` file used to load the specification. + # + # @param [String] file + # the `podspec` file. + # + # @return [void] + # + # @visibility private + # + def defined_in_file=(file) + unless root? + raise StandardError, 'Defined in file can be set only for root specs.' + end + @defined_in_file = file + end + + # Sets the name of the `podspec`. + # + # @param [String] name + # the `podspec` name. + # + # @return [void] + # + # @visibility private + # + def name=(name) + @hash_value = nil + attributes_hash['name'] = name + end + + # Sets the version of the `podspec`. + # + # @param [String] version + # the `podspec` version. + # + # @return [void] + # + # @visibility private + # + def version=(version) + @hash_value = nil + store_attribute(:version, version) + @version = nil + end + + # @!group Validation + + # Validates the cocoapods_version in the specification against the current version of Core. + # It will raise an Informative error if the version is not satisfied. + # + def validate_cocoapods_version + unless cocoapods_version.satisfied_by?(Version.create(CORE_VERSION)) + raise Informative, "`#{name}` requires CocoaPods version `#{cocoapods_version}`, " \ + "which is not satisfied by your current version, `#{CORE_VERSION}`." + end + end + end + + #---------------------------------------------------------------------------# + + # @visibility private + # + # Evaluates the given string in the namespace of the Pod module. + # + # @param [String] string + # The string containing the Ruby description of the Object to + # evaluate. + # + # @param [Pathname] path + # The path where the object to evaluate is stored. + # + # @return [Object] it can return any object but, is expected to be called on + # `podspec` files that should return a #{Specification}. + # + # + def self._eval_podspec(string, path) + # rubocop:disable Security/Eval + eval(string, nil, path.to_s) + # rubocop:enable Security/Eval + + # rubocop:disable Lint/RescueException + rescue Exception => e + # rubocop:enable Lint/RescueException + message = "Invalid `#{path.basename}` file: #{e.message}" + raise DSLError.new(message, path, e, string) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/consumer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/consumer.rb new file mode 100644 index 0000000..e469504 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/consumer.rb @@ -0,0 +1,515 @@ +require 'cocoapods-core/specification/root_attribute_accessors' + +module Pod + class Specification + # Allows to conveniently access a Specification programmatically. + # + # It takes care of: + # + # - standardizing the attributes + # - handling multi-platform values + # - handle default values + # - handle automatic container wrapping of values + # - handle inherited values + # + # This class allows to store the values of the attributes in the + # Specification as specified in the DSL. The benefits is reduced reliance + # on meta programming to access the attributes and the possibility of + # serializing a specification back exactly as defined in a file. + # + class Consumer + # @return [Specification] The specification to consume. + # + attr_reader :spec + + # @return [Symbol] The name of the platform for which the specification + # needs to be consumed. + # + attr_reader :platform_name + + # @param [Specification] spec @see spec + # @param [Symbol, Platform] platform + # The platform for which the specification needs to be consumed. + # + def initialize(spec, platform) + @spec = spec + @platform_name = platform.is_a?(Symbol) ? platform : platform.name + + unless spec.supported_on_platform?(platform) + raise StandardError, "#{self} is not compatible with #{platform}." + end + end + + # Creates a method to access the contents of the attribute. + # + # @param [Symbol] name + # the name of the attribute. + # + # @macro [attach] + # @!method $1 + # + def self.spec_attr_accessor(name) + define_method(name) do + value_for_attribute(name) + end + end + + DSL::RootAttributesAccessors.instance_methods.each do |root_accessor| + define_method(root_accessor) do + spec.root.send(root_accessor) + end + end + + #-----------------------------------------------------------------------# + + # @!group Regular attributes + + # @return [String] The name of the specification. + # + spec_attr_accessor :name + + # @return [Boolean] Whether the source files of the specification require to + # be compiled with ARC. + # + spec_attr_accessor :requires_arc + alias_method :requires_arc?, :requires_arc + + # @return [Array] A list of frameworks that the user’s target + # needs to link against + # + spec_attr_accessor :frameworks + + # @return [Array] A list of frameworks that the user’s target + # needs to **weakly** link against + # + spec_attr_accessor :weak_frameworks + + # @return [Array] A list of libraries that the user’s target + # needs to link against + # + spec_attr_accessor :libraries + + # @return [Array] the list of compiler flags needed by the + # specification files. + # + spec_attr_accessor :compiler_flags + + # @return [Hash{String => String}] the xcconfig flags for the current + # specification for the pod target. + # + def pod_target_xcconfig + attr = Specification::DSL.attributes[:pod_target_xcconfig] + merge_values(attr, value_for_attribute(:xcconfig), value_for_attribute(:pod_target_xcconfig)) + end + + # @return [Hash{String => String}] the xcconfig flags for the current + # specification for the user target. + # + def user_target_xcconfig + attr = Specification::DSL.attributes[:user_target_xcconfig] + merge_values(attr, value_for_attribute(:xcconfig), value_for_attribute(:user_target_xcconfig)) + end + + # @return [Hash{String => String}] the Info.plist values for the current specification + # + spec_attr_accessor :info_plist + + # @return [String] The contents of the prefix header. + # + spec_attr_accessor :prefix_header_contents + + # @return [String] The path of the prefix header file. + # + spec_attr_accessor :prefix_header_file + + # @return [String] the module name. + # + spec_attr_accessor :module_name + + # @return [String] the path of the module map file. + # + spec_attr_accessor :module_map + + # @return [String] the headers directory. + # + spec_attr_accessor :header_dir + + # @return [String] the directory from where to preserve the headers + # namespacing. + # + spec_attr_accessor :header_mappings_dir + + #-----------------------------------------------------------------------# + + # @!group Test Support + + # @return [Boolean] Whether this test specification requires an app host. + # + spec_attr_accessor :requires_app_host + alias_method :requires_app_host?, :requires_app_host + + # @return [String] Name of the app host this spec requires + # + spec_attr_accessor :app_host_name + + # @return [Symbol] the test type supported by this specification. + # + spec_attr_accessor :test_type + + #-----------------------------------------------------------------------# + + # @!group File patterns + + # @return [Array] the source files of the Pod. + # + spec_attr_accessor :source_files + + # @return [Array] the public headers of the Pod. + # + spec_attr_accessor :public_header_files + + # @return [Array] the project headers of the Pod. + # + spec_attr_accessor :project_header_files + + # @return [Array] the private headers of the Pod. + # + spec_attr_accessor :private_header_files + + # @return [Array] The paths of the framework bundles shipped with + # the Pod. + # + spec_attr_accessor :vendored_frameworks + + # @return [Array] The paths of the libraries shipped with the + # Pod. + # + spec_attr_accessor :vendored_libraries + + # @return [Hash{String => Array}] hash where the keys are the tags of + # the on demand resources and the values are their relative file + # patterns. + # + spec_attr_accessor :on_demand_resources + + # @return [Hash{String=>String}]] hash where the keys are the names of + # the resource bundles and the values are their relative file + # patterns. + # + spec_attr_accessor :resource_bundles + + # @return [ArrayString}>] An array of hashes where each hash + # represents a script phase. + # + spec_attr_accessor :script_phases + + # @return [Hash] A hash that contains the scheme configuration. + # + spec_attr_accessor :scheme + + # @return [Array] A hash where the key represents the + # paths of the resources to copy and the values the paths of + # the resources that should be copied. + # + spec_attr_accessor :resources + + # @return [Array] The file patterns that the + # Pod should ignore. + # + spec_attr_accessor :exclude_files + + # @return [Array] The paths that should be not + # cleaned. + # + spec_attr_accessor :preserve_paths + + #-----------------------------------------------------------------------# + + # @return [Array] the dependencies on other Pods. + # + def dependencies + value = value_for_attribute(:dependencies) + value.map do |name, requirements| + Dependency.new(name, requirements) + end + end + + # Raw values need to be prepared as soon as they are read so they can be + # safely merged to support multi platform attributes and inheritance + #-----------------------------------------------------------------------# + + # Returns the value for the attribute with the given name for the + # specification. It takes into account inheritance, multi-platform + # attributes and default values. + # + # @param [Symbol] attr_name + # The name of the attribute. + # + # @return [String, Array, Hash] the value for the attribute. + # + def value_for_attribute(attr_name) + attr = Specification::DSL.attributes[attr_name] + value = value_with_inheritance(spec, attr) + value = attr.default(platform_name) if value.nil? + value = attr.container.new if value.nil? && attr.container + value + end + + # Returns the value of a given attribute taking into account inheritance. + # + # @param [Specification] the_spec + # the specification for which the value is needed. + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @return [String, Array, Hash] the value for the attribute. + # + def value_with_inheritance(the_spec, attr) + value = raw_value_for_attribute(the_spec, attr) + if the_spec.root? || !attr.inherited? + return value + end + + parent_value = value_with_inheritance(the_spec.parent, attr) + merge_values(attr, parent_value, value) + end + + # Returns the value of a given attribute taking into account multi + # platform values. + # + # @param [Specification] the_spec + # the specification for which the value is needed. + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @return [String, Array, Hash] The value for an attribute. + # + def raw_value_for_attribute(the_spec, attr) + value = the_spec.attributes_hash[attr.name.to_s] + value = prepare_value(attr, value) + + if attr.multi_platform? + if platform_hash = the_spec.attributes_hash[platform_name.to_s] + platform_value = platform_hash[attr.name.to_s] + platform_value = prepare_value(attr, platform_value) + value = merge_values(attr, value, platform_value) + end + end + value + end + + # Merges the values of an attribute, either because the attribute is + # multi platform or because it is inherited. + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @param [String, Array, Hash] existing_value + # the current value (the value of the parent or non-multiplatform + # value). + # + # @param [String, Array, Hash] new_value + # the value to append (the value of the spec or the + # multi-platform value). + # + # @return [String, Array, Hash] The merged value. + # + def merge_values(attr, existing_value, new_value) + return existing_value if new_value.nil? + return new_value if existing_value.nil? + + if attr.types.include?(TrueClass) + new_value.nil? ? existing_value : new_value + elsif attr.container == Array + r = [*existing_value] + [*new_value] + r.compact + elsif attr.container == Hash + existing_value.merge(new_value) do |_, old, new| + merge_hash_value(attr, old, new) + end + else + new_value + end + end + + # Wraps a value in an Array if needed and calls the prepare hook to + # allow further customization of a value before storing it in the + # instance variable. + # + # @note Only array containers are wrapped. To automatically wrap + # values for attributes with hash containers a prepare hook + # should be used. + # + # @return [Object] the customized value of the original one if no + # prepare hook was defined. + # + def prepare_value(attr, value) + if attr.container == Array + value = if value.is_a?(Hash) + [value] + else + [*value].compact + end + end + + hook_name = prepare_hook_name(attr) + if self.respond_to?(hook_name, true) + send(hook_name, value) + else + value + end + end + + private + + # Merges two values in a hash together based on the needs of the attribute + # + # @param [Specification::DSL::Attribute] attr + # the attribute for which that value is needed. + # + # @param [Object] old the value from the original hash + # + # @param [Object] new the value from the new hash + # + # @return [Object] the merged value + # + def merge_hash_value(attr, old, new) + case attr.name + when :info_plist + new + when ->(name) { spec.non_library_specification? && [:pod_target_xcconfig, :user_target_xcconfig, :xcconfig].include?(name) } + new + else + if new.is_a?(Array) || old.is_a?(Array) + r = Array(old) + Array(new) + r.compact + else + old + ' ' + new + end + end + end + + # @!group Preparing Values + #-----------------------------------------------------------------------# + + # @return [String] the name of the prepare hook for this attribute. + # + # @note The hook is called after the value has been wrapped in an + # array (if needed according to the container) but before + # validation. + # + def prepare_hook_name(attr) + "_prepare_#{attr.name}" + end + + # Converts the prefix header to a string if specified as an array. + # + # @param [String, Array] value. + # The value of the attribute as specified by the user. + # + # @return [String] the prefix header. + # + def _prepare_prefix_header_contents(value) + if value + value = value.join("\n") if value.is_a?(Array) + value.strip_heredoc.chomp + end + end + + # Converts the test type value from a string to a symbol. + # + # @param [String, Symbol] value. + # The value of the test type attributed as specified by the user. + # + # @return [Symbol] the test type as a symbol. + # + def _prepare_test_type(value) + if value + value.to_sym + end + end + + # Converts the array of hashes (script phases) where keys are strings into symbols. + # + # @param [ArrayString}>] value. + # The value of the attribute as specified by the user. + # + # @return [ArrayString}>] the script phases array with symbols for each hash instead of strings. + # + def _prepare_script_phases(value) + if value + value.map do |script_phase| + if script_phase.is_a?(Hash) + phase = Specification.convert_keys_to_symbol(script_phase) + phase[:execution_position] = if phase.key?(:execution_position) + phase[:execution_position].to_sym + else + :any + end + phase + end + end.compact + end + end + + # Converts the a scheme where keys are strings into symbols. + # + # @param [Hash] value. + # The value of the attribute as specified by the user. + # + # @return [Hash] the scheme with symbols as keys instead of strings or `nil` if the value is not a hash. + # + def _prepare_scheme(value) + Specification.convert_keys_to_symbol(value, :recursive => false) if value && value.is_a?(Hash) + end + + # Ensures that the file patterns of the on demand resources are contained in + # an array. + # + # @param [String, Array, Hash] value. + # The value of the attribute as specified by the user. + # + # @return [Hash] the on demand resources. + # + def _prepare_on_demand_resources(value) + result = {} + if value + value.each do |key, patterns| + case patterns + when String, Array + result[key] = { :paths => [*patterns].compact, :category => :download_on_demand } + when Hash + patterns = Specification.convert_keys_to_symbol(patterns, :recursive => false) + result[key] = { :paths => [*patterns[:paths]].compact, :category => patterns.fetch(:category, :download_on_demand).to_sym } + else + raise StandardError, "Unknown on demand resource value type `#{patterns}`." + end + end + end + result + end + + # Ensures that the file patterns of the resource bundles are contained in + # an array. + # + # @param [String, Array, Hash] value. + # The value of the attribute as specified by the user. + # + # @return [Hash] the resources. + # + def _prepare_resource_bundles(value) + result = {} + if value + value.each do |key, patterns| + result[key] = [*patterns].compact + end + end + result + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl.rb new file mode 100644 index 0000000..e3364b9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl.rb @@ -0,0 +1,1895 @@ +require 'cocoapods-core/specification/dsl/attribute_support' +require 'cocoapods-core/specification/dsl/attribute' +require 'cocoapods-core/specification/dsl/platform_proxy' + +module Pod + class Specification + #- NOTE ------------------------------------------------------------------# + # The order of the methods defined in this file and the order of the + # methods is relevant for the documentation generated on the + # CocoaPods/cocoapods.github.com repository. + #-------------------------------------------------------------------------# + + # A specification describes a version of Pod library. It includes details + # about where the source should be fetched from, what files to use, the + # build settings to apply, and other general metadata such as its name, + # version, and description. + # + # --- + # + # A stub specification file can be generated by the [pod spec + # create](http://guides.cocoapods.org/terminal/commands.html#pod_spec_create) command. + # + # --- + # + # The specification DSL provides great flexibility and dynamism. Moreover, + # the DSL adopts the + # [convention over configuration](http://en.wikipedia.org/wiki/Convention_over_configuration) + # and thus it can be very simple: + # + # Pod::Spec.new do |spec| + # spec.name = 'Reachability' + # spec.version = '3.1.0' + # spec.license = { :type => 'BSD' } + # spec.homepage = 'https://github.com/tonymillion/Reachability' + # spec.authors = { 'Tony Million' => 'tonymillion@gmail.com' } + # spec.summary = 'ARC and GCD Compatible Reachability Class for iOS and OS X.' + # spec.source = { :git => 'https://github.com/tonymillion/Reachability.git', :tag => 'v3.1.0' } + # spec.source_files = 'Reachability.{h,m}' + # spec.framework = 'SystemConfiguration' + # end + # + # Or it can be quite detailed: + # + # Pod::Spec.new do |spec| + # spec.name = 'Reachability' + # spec.version = '3.1.0' + # spec.license = { :type => 'BSD' } + # spec.homepage = 'https://github.com/tonymillion/Reachability' + # spec.authors = { 'Tony Million' => 'tonymillion@gmail.com' } + # spec.summary = 'ARC and GCD Compatible Reachability Class for iOS and OS X.' + # spec.source = { :git => 'https://github.com/tonymillion/Reachability.git', :tag => 'v3.1.0' } + # spec.module_name = 'Rich' + # spec.swift_version = '4.0' + # + # spec.ios.deployment_target = '9.0' + # spec.osx.deployment_target = '10.10' + # + # spec.source_files = 'Reachability/common/*.swift' + # spec.ios.source_files = 'Reachability/ios/*.swift', 'Reachability/extensions/*.swift' + # spec.osx.source_files = 'Reachability/osx/*.swift' + # + # spec.framework = 'SystemConfiguration' + # spec.ios.framework = 'UIKit' + # spec.osx.framework = 'AppKit' + # + # spec.dependency 'SomeOtherPod' + # end + # + module DSL + extend Pod::Specification::DSL::AttributeSupport + + # Deprecations must be required after include AttributeSupport + require 'cocoapods-core/specification/dsl/deprecations' + + #-----------------------------------------------------------------------# + + # @!group Root specification + # + # A ‘root’ specification stores the information about the specific + # version of a library. + # + # The attributes in this group can only be written to on the ‘root’ + # specification, **not** on the ‘sub-specifications’. + # + # --- + # + # The attributes listed in this group are the only one which are + # required by a podspec. + # + # The attributes of the other groups are offered to refine the podspec + # and follow a convention over configuration approach. A root + # specification can describe these attributes either directly of + # through ‘[sub-specifications](#subspec)’. + + #-----------------------------------------------------------------------# + + # @!method name=(name) + # + # The name of the Pod. + # + # @example + # + # spec.name = 'AFNetworking' + # + # @param [String] name + # the name of the pod. + # + attribute :name, + :required => true, + :inherited => false, + :multi_platform => false + + #------------------# + + # @!method version=(version) + # + # The version of the Pod. CocoaPods follows + # [semantic versioning](http://semver.org). + # + # @example + # + # spec.version = '0.0.1' + # + # @param [String] version + # the version of the Pod. + # + root_attribute :version, + :required => true + + #------------------# + + # @!method swift_versions=(version) + # + # The versions of Swift that the specification supports. A version of '4' will be treated as + # '4.0' by CocoaPods and not '4.1' or '4.2'. + # + # **Note** The Swift compiler mostly accepts major versions and sometimes will honor minor versions. + # While CocoaPods allows specifying a minor or patch version it might not be honored fully by the Swift compiler. + # + # @example + # + # spec.swift_versions = ['3.0'] + # + # @example + # + # spec.swift_versions = ['3.0', '4.0', '4.2'] + # + # @example + # + # spec.swift_version = '3.0' + # + # @example + # + # spec.swift_version = '3.0', '4.0' + # + # @param [String, Array] swift_versions + # + root_attribute :swift_versions, + :container => Array, + :singularize => true + + #-----------------------------------------------------------------------# + + # @!method cocoapods_version=(cocoapods_version) + # + # The version of CocoaPods that the specification supports. + # + # @example + # + # spec.cocoapods_version = '>= 0.36' + # + # @param [String] cocoapods_version + # the CocoaPods version that the specification supports. + # CocoaPods follows [semantic versioning](http://semver.org). + # + root_attribute :cocoapods_version + + #------------------# + + # @!method authors=(authors) + # + # The name and email addresses of the library maintainers, not the + # Podspec maintainer. + # + # @example + # + # spec.author = 'Darth Vader' + # + # @example + # + # spec.authors = 'Darth Vader', 'Wookiee' + # + # @example + # + # spec.authors = { 'Darth Vader' => 'darthvader@darkside.com', + # 'Wookiee' => 'wookiee@aggrrttaaggrrt.com' } + # + # @param [String, Hash{String=>String}] authors + # the list of the authors of the library and their emails. + # + root_attribute :authors, + :types => [String, Array, Hash], + :container => Hash, + :required => true, + :singularize => true + + #------------------# + + # @!method social_media_url=(social_media_url) + # + # The URL for the social media contact of the Pod, CocoaPods web + # services can use this. + # + # For example, the @CocoaPodsFeed notifications will include the + # Twitter handle (shortening the description) if the URL is relative to + # Twitter. This does **not** necessarily have to be a Twitter URL, but + # only those are included in the Twitter @CocoaPodsFeed notifications. + # + # @example + # + # spec.social_media_url = 'https://twitter.com/cocoapods' + # + # @example + # + # spec.social_media_url = 'https://groups.google.com/forum/#!forum/cocoapods' + # + # @param [String] social_media_url + # the social media URL. + # + root_attribute :social_media_url + + #------------------# + + # The keys accepted by the license attribute. + # + LICENSE_KEYS = [:type, :file, :text].freeze + + # @!method license=(license) + # + # The license of the Pod. + # + # --- + # + # Unless the source contains a file named `LICENSE.*` or `LICENCE.*`, + # the path of the license file **or** the integral text of the notice + # commonly used for the license type must be specified. + # If a license file is specified, it either must be without a file + # extensions or be one of `txt`, `md`, or `markdown`. + # + # This information is used by CocoaPods to generate acknowledgement + # files (markdown and plist) which can be used in the acknowledgements + # section of the final application. + # + # @example + # + # spec.license = 'MIT' + # + # @example + # + # spec.license = { :type => 'MIT', :file => 'MIT-LICENSE.txt' } + # + # @example + # + # spec.license = { :type => 'MIT', :text => <<-LICENSE + # Copyright 2012 + # Permission is granted to... + # LICENSE + # } + # + # @param [String] license + # The type of the license + # + # @overload license=(license) + # @param [String, Hash{Symbol=>String}] license + # @option license [String] :type license type + # @option license [String] :file file containing full license text. Supports txt, md, and markdown + # @option license [String] :text full license text + # + root_attribute :license, + :container => Hash, + :keys => LICENSE_KEYS, + :required => true + + #------------------# + + # @!method homepage=(homepage) + # + # The URL of the homepage of the Pod. + # + # @example + # + # spec.homepage = 'http://www.example.com' + # + # @param [String] homepage + # the URL of the homepage of the Pod. + # + root_attribute :homepage, + :required => true + + #------------------# + + # @!method readme=(readme) + # + # The URL for the README markdown file for this pod version. + # + # @example + # + # spec.readme = 'https://www.example.com/Pod-1.5-README.md' + # + # @param [String] readme + # the readme markdown URL. + # + root_attribute :readme + + #------------------# + + # @!method changelog=(changelog) + # + # The URL for the CHANGELOG markdown file for this pod version. + # + # @example + # + # spec.changelog = 'https://www.example.com/Pod-1.5-CHANGELOG.md' + # + # @param [String] changelog + # the changelog markdown URL. + # + root_attribute :changelog + + #------------------# + + # The keys accepted by the hash of the source attribute. + # + SOURCE_KEYS = { + :git => [:tag, :branch, :commit, :submodules].freeze, + :svn => [:folder, :tag, :revision].freeze, + :hg => [:revision].freeze, + :http => [:flatten, :type, :sha256, :sha1, :headers].freeze, + }.freeze + + # @!method source=(source) + # + # The location from where the library should be retrieved. + # + # @example Specifying a Git source with a tag. This is how most OSS Podspecs work. + # + # spec.source = { :git => 'https://github.com/AFNetworking/AFNetworking.git', + # :tag => spec.version.to_s } + # + # @example Using a tag prefixed with 'v' and submodules. + # + # spec.source = { :git => 'https://github.com/typhoon-framework/Typhoon.git', + # :tag => "v#{spec.version}", :submodules => true } + # + # @example Using Subversion with a tag. + # + # spec.source = { :svn => 'http://svn.code.sf.net/p/polyclipping/code', :tag => '4.8.8' } + # + # @example Using Mercurial with the same revision as the spec's semantic version string. + # + # spec.source = { :hg => 'https://bitbucket.org/dcutting/hyperbek', :revision => "#{s.version}" } + # + # @example Using HTTP to download a compressed file of the code. It supports zip, tgz, bz2, txz and tar. + # + # spec.source = { :http => 'http://dev.wechatapp.com/download/sdk/WeChat_SDK_iOS_en.zip' } + # + # @example Using HTTP to download a file using a hash to verify the download. It supports sha1 and sha256. + # + # spec.source = { :http => 'http://dev.wechatapp.com/download/sdk/WeChat_SDK_iOS_en.zip', + # :sha1 => '7e21857fe11a511f472cfd7cfa2d979bd7ab7d96' } + # + # + # @overload source=(git) + # @param [Hash] git + # @option git [String] :git git source URI + # @option git [String] :tag version tag + # @option git [Boolean] :submodules Whether to checkout submodules + # @option git [String] :branch branch name + # @option git [String] :commit commit hash + # + # @overload source=(svn) + # @param [Hash] svn + # @option svn [String] :svn svn source URI + # @option svn [String] :tag version tag + # @option svn [String] :folder folder + # @option svn [String] :revision revision + # + # @overload source=(hg) + # @param [Hash] hg + # @option hg [String] :hg mercurial source URI + # @option hg [String] :revision revision + # + # @overload source=(http) + # @param [Hash] http + # @option http [String] :http compressed source URL + # @option http [String] :type file type. Supports zip, tgz, bz2, txz and tar + # @option http [String] :sha1 SHA hash. Supports SHA1 and SHA256 + # + root_attribute :source, + :container => Hash, + :keys => SOURCE_KEYS, + :required => true + + #------------------# + + # @!method summary=(summary) + # + # A short (maximum 140 characters) description of the Pod. + # + # --- + # + # The description should be short, yet informative. It represents the + # tag line of the Pod and there is no need to specify that a Pod is a + # library (they always are). + # + # The summary is expected to be properly capitalised and containing the + # correct punctuation. + # + # @example + # + # spec.summary = 'Computes the meaning of life.' + # + # @param [String] summary + # A short description of the Pod. + # + root_attribute :summary, + :required => true + + #------------------# + + # @!method description=(description) + # + # A description of the Pod more detailed than the summary. + # + # @example + # + # spec.description = <<-DESC + # Computes the meaning of life. + # Features: + # 1. Is self aware + # ... + # 42. Likes candies. + # DESC + # + # @param [String] description + # A longer description of the Pod. + # + root_attribute :description + + #------------------# + + # @!method screenshots=(screenshots) + # + # A list of URLs to images showcasing the Pod. Intended for UI oriented + # libraries. CocoaPods recommends the usage of the `gif` format. + # + # @example + # + # spec.screenshot = 'http://dl.dropbox.com/u/378729/MBProgressHUD/1.png' + # + # @example + # + # spec.screenshots = [ 'http://dl.dropbox.com/u/378729/MBProgressHUD/1.png', + # 'http://dl.dropbox.com/u/378729/MBProgressHUD/2.png' ] + # + # @param [String] screenshots + # An URL for the screenshot of the Pod. + # + root_attribute :screenshots, + :singularize => true, + :container => Array + + #------------------# + + # @!method documentation_url=(documentation_url) + # + # An optional URL for the documentation of the Pod which will be honoured by + # CocoaPods web properties. Leaving it blank will default to a CocoaDocs + # generated URL for your library. + # + # @example + # + # spec.documentation_url = 'http://www.example.com/docs.html' + # + # @param [String] documentation_url + # The link of the web documentation of the Pod. + # + root_attribute :documentation_url + + #------------------# + + # @!method prepare_command=(command) + # + # A bash script that will be executed after the Pod is downloaded. This + # command can be used to create, delete and modify any file downloaded + # and will be ran before any paths for other file attributes of the + # specification are collected. + # + # This command is executed before the Pod is cleaned and before the + # Pods project is created. The working directory is the root of the + # Pod. + # + # If the pod is installed with the `:path` option this command will not + # be executed. + # + # @example + # + # spec.prepare_command = 'ruby build_files.rb' + # + # @example + # + # spec.prepare_command = <<-CMD + # sed -i 's/MyNameSpacedHeader/Header/g' ./**/*.h + # sed -i 's/MyNameOtherSpacedHeader/OtherHeader/g' ./**/*.h + # CMD + # + # @param [String] command + # the prepare command of the pod. + # + root_attribute :prepare_command + + #------------------# + + # @!method static_framework=(flag) + # + # Indicates, that if use_frameworks! is specified, the + # pod should include a static library framework. + # + # @example + # + # spec.static_framework = true + # + # @param [Boolean] flag + # Indicates, that if use_frameworks! is specified, the + # pod should include a static library framework. + # + root_attribute :static_framework, + :types => [TrueClass, FalseClass], + :default_value => false + + #------------------# + + # @!method deprecated=(flag) + # + # Whether the library has been deprecated. + # + # @example + # + # spec.deprecated = true + # + # @param [Boolean] flag + # whether the library has been deprecated. + # + root_attribute :deprecated, + :types => [TrueClass, FalseClass], + :default_value => false + + # @!method deprecated_in_favor_of=(deprecated_in_favor_of) + # + # The name of the Pod that this one has been deprecated in favor of. + # + # @example + # + # spec.deprecated_in_favor_of = 'NewMoreAwesomePod' + # + # @param [String] deprecated_in_favor_of + # the name of the Pod that this one has been deprecated in + # favor of. + # + root_attribute :deprecated_in_favor_of + + #-----------------------------------------------------------------------# + + # @!group Platform + # + # A specification should indicate the platform and the correspondent + # deployment targets on which the library is supported. + # + # If not defined in a subspec the attributes of this group inherit the + # value of the parent. + + #-----------------------------------------------------------------------# + + # The names of the platforms supported by the specification class. + # + PLATFORMS = [:osx, :ios, :tvos, :watchos].freeze + + # @todo This currently is not used in the Ruby DSL. + # + attribute :platforms, + :container => Hash, + :keys => PLATFORMS, + :multi_platform => false, + :inherited => true + + # The platform on which this Pod is supported. Leaving this blank + # means the Pod is supported on all platforms. When supporting multiple + # platforms you should use deployment_target below instead. + # + # @example + # + # spec.platform = :osx, '10.8' + # + # @example + # + # spec.platform = :ios + # + # @example + # + # spec.platform = :osx + # + # @param [Array] args + # A tuple where the first value is the name of the platform, + # (either `:ios` or `:osx`) and the second is the deployment + # target. + # + def platform=(args) + name, deployment_target = args + name = :osx if name.to_s == 'macos' + attributes_hash['platforms'] = if name + { name.to_s => deployment_target } + else + {} + end + end + + #------------------# + + # The minimum deployment targets of the supported platforms. + # + # As opposed to the `platform` attribute, the `deployment_target` + # attribute allows to specify multiple platforms on which this pod + # is supported — specifying a different deployment target for each. + # + # @example + # + # spec.ios.deployment_target = '6.0' + # + # @example + # + # spec.osx.deployment_target = '10.8' + # + # @param [String] _args + # The deployment target of the platform. + # + def deployment_target=(*_args) + raise Informative, 'The deployment target can be declared only per ' \ + 'platform.' + end + + #-----------------------------------------------------------------------# + + # @!group Build settings + # + # In this group are listed the attributes related to the configuration + # of the build environment that should be used to build the library. + # + # If not defined in a subspec the attributes of this group inherit the + # value of the parent. + + #-----------------------------------------------------------------------# + + # @todo This currently is not used in the Ruby DSL. + # + attribute :dependencies, + :container => Hash, + :inherited => true + + # Any dependency on other Pods or to a ‘sub-specification’. + # + # --- + # + # Dependencies can specify versions requirements. The use of the optimistic + # version indicator `~>` is recommended because it provides good + # control over the version without being too restrictive. For example, + # `~> 1.0.1` is equivalent to `>= 1.0.1` combined with `< 1.1`. Similarly, + # `~> 1.0` will match `1.0`, `1.0.1`, `1.1`, but will not upgrade to `2.0`. + # + # Pods with overly restrictive dependencies limit their compatibility with + # other Pods. + # + # @example + # spec.dependency 'AFNetworking', '~> 1.0' + # + # @example + # spec.dependency 'AFNetworking', '~> 1.0', :configurations => ['Debug'] + # + # @example + # spec.dependency 'AFNetworking', '~> 1.0', :configurations => :debug + # + # @example + # spec.dependency 'RestKit/CoreData', '~> 0.20.0' + # + # @example + # spec.ios.dependency 'MBProgressHUD', '~> 0.5' + # + def dependency(*args) + name, *version_requirements = args + if name == self.name + raise Informative, "A specification can't require itself as a " \ + 'subspec' + end + if @parent + composed_name = '' + @parent.name.split('/').each do |component| + composed_name << component + if name == composed_name + raise Informative, "A subspec can't require one of its " \ + 'parents specifications' + else + composed_name << '/' + end + end + end + + configurations_option = version_requirements.find { |option| option.is_a?(Hash) && option.key?(:configurations) } + whitelisted_configurations = if configurations_option + version_requirements.delete(configurations_option) + Array(configurations_option.delete(:configurations)).map { |c| c.to_s.downcase } + end + + dependency_options = version_requirements.reject { |req| req.is_a?(String) } + dependency_options.each do |dependency_option| + if dependency_option.is_a?(Hash) + if !dependency_option[:path].nil? + raise Informative, 'Podspecs cannot specify the source of dependencies. The `:path` option is not supported.'\ + ' `:path` can be used in the Podfile instead to override global dependencies.' + elsif !dependency_option[:git].nil? + raise Informative, 'Podspecs cannot specify the source of dependencies. The `:git` option is not supported.'\ + ' `:git` can be used in the Podfile instead to override global dependencies.' + end + end + + raise Informative, "Unsupported version requirements. #{version_requirements.inspect} is not valid." + end + + attributes_hash['dependencies'] ||= {} + attributes_hash['dependencies'][name] = version_requirements + + unless whitelisted_configurations.nil? + if (extras = whitelisted_configurations - %w(debug release)) && !extras.empty? + raise Informative, "Only `Debug` & `Release` are allowed under configurations for dependency on `#{name}`. " \ + "Found #{extras.map { |configuration| "`#{configuration}`" }.to_sentence}." + end + attributes_hash['configuration_pod_whitelist'] ||= {} + attributes_hash['configuration_pod_whitelist'][name] = whitelisted_configurations + end + end + + def dependency=(args) + joined = args.join('\', \'') + arguments = "\'#{joined}\'" + raise Informative, "Cannot assign value to `dependency`. Did you mean: `dependency #{arguments}`?" + end + + #------------------# + + # @!method info_plist=(info_plist) + # + # Key-Value pairs to add to the generated `Info.plist`. + # + # The values will be merged with the default values that + # CocoaPods generates, overriding any duplicates. + # + # For library specs, the values will be merged into the generated Info.plist + # for libraries that are integrated using frameworks. It will have no effect + # for static libraries. + # + # Subspecs (other than app and test specs) are not supported. + # + # For app specs, the values will be merged into the application host's `Info.plist`. + # + # For test specs, the values will be merged into the test bundle's `Info.plist`. + # + # @example + # + # spec.info_plist = { + # 'CFBundleIdentifier' => 'com.myorg.MyLib', + # 'MY_VAR' => 'SOME_VALUE' + # } + # + # @param [Hash] info_plist + # The Info.plist values for the Pod. + # + attribute :info_plist, + :container => Hash, + :inherited => false + + #------------------# + + # @!method requires_arc=(flag) + # + # `requires_arc` allows you to specify which source_files use ARC. + # This can either be the files which support ARC, or true to indicate + # all of the source_files use ARC. + # + # Files which do not use ARC will have the `-fno-objc-arc` compiler + # flag. + # + # The default value of this attribute is `true`. + # + # @example + # + # spec.requires_arc = false + # + # @example + # + # spec.requires_arc = 'Classes/Arc' + # + # @example + # + # spec.requires_arc = ['Classes/*ARC.m', 'Classes/ARC.mm'] + # + # @param [Bool, String, Array] flag + # whether the source files require ARC. + # + attribute :requires_arc, + :types => [TrueClass, FalseClass, String, Array], + :file_patterns => true, + :default_value => true, + :inherited => true + + #------------------# + + # @!method frameworks=(*frameworks) + # + # A list of system frameworks that the user’s target needs to link + # against. + # + # @example + # + # spec.ios.framework = 'CFNetwork' + # + # @example + # + # spec.frameworks = 'QuartzCore', 'CoreData' + # + # @param [String, Array] frameworks + # A list of framework names. + # + attribute :frameworks, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method weak_frameworks=(*frameworks) + # + # A list of frameworks that the user’s target needs to **weakly** link + # against. + # + # @example + # + # spec.weak_framework = 'Twitter' + # + # @example + # + # spec.weak_frameworks = 'Twitter', 'SafariServices' + # + # @param [String, Array] weak_frameworks + # A list of frameworks names. + # + attribute :weak_frameworks, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method libraries=(*libraries) + # + # A list of system libraries that the user’s target (application) needs to + # link against. + # + # @example + # + # spec.ios.library = 'xml2' + # + # @example + # + # spec.libraries = 'xml2', 'z' + # + # @param [String, Array] libraries + # A list of library names. + # + attribute :libraries, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method compiler_flags=(flags) + # + # A list of flags which should be passed to the compiler. + # + # @example + # + # spec.compiler_flags = '-DOS_OBJECT_USE_OBJC=0', '-Wno-format' + # + # @param [String, Array] flags + # A list of flags. + # + attribute :compiler_flags, + :container => Array, + :singularize => true, + :inherited => true + + #------------------# + + # @!method pod_target_xcconfig=(value) + # + # Any flag to add to the final __private__ pod target xcconfig file. + # + # @example + # + # spec.pod_target_xcconfig = { 'OTHER_LDFLAGS' => '-lObjC' } + # + # @param [Hash{String => String}] value + # Key-value pairs representing build settings. + # + attribute :pod_target_xcconfig, + :container => Hash, + :inherited => true + + # @!method user_target_xcconfig=(value) + # + # Specifies flags to add to the final aggregate target xcconfig file, + # which propagates to non-overridden and inheriting build settings to + # the integrated user targets. + # + # --- + # + # This attribute is __not recommended__ as Pods should not pollute the + # build settings of the user project and this can cause conflicts. + # + # Multiple definitions for build settings that take multiple values + # will be merged. The user is warned on conflicting definitions for + # custom build settings and build settings that take only one value. + # + # Typically clang compiler flags or precompiler macro definitions go + # in here if they are required when importing the pod in the user + # target. Note that, this influences not only the compiler view of the + # public interface of your pod, but also all other integrated pods + # alongside to yours. You should always prefer [`pod_target_xcconfig`]( + # http://guides.cocoapods.org/syntax/podspec.html#pod_target_xcconfig), + # which can contain the same settings, but only influence the + # toolchain when compiling your pod target. + # + # @example + # + # spec.user_target_xcconfig = { 'MY_SUBSPEC' => 'YES' } + # + # @param [Hash{String => String}] value + # Key-value pairs representing build settings. + # + attribute :user_target_xcconfig, + :container => Hash, + :inherited => true + + #------------------# + + # @!method prefix_header_contents=(content) + # + # Any content to inject in the prefix header of the pod project. + # + # --- + # + # This attribute is __not recommended__ as Pods should not pollute the + # prefix header of other libraries or of the user project. + # + # @example + # + # spec.prefix_header_contents = '#import ' + # + # @example + # + # spec.prefix_header_contents = '#import ', '#import ' + # + # @param [String] content + # The contents of the prefix header. + # + attribute :prefix_header_contents, + :types => [Array, String], + :inherited => true + + #------------------# + + # @!method prefix_header_file=(path) + # + # A path to a prefix header file to inject in the prefix header of the + # pod project. + # `false` indicates that the default CocoaPods prefix header should not + # be generated. + # `true` is the default and indicates that the default CocoaPods prefix + # header should be generated. + # + # --- + # + # The file path options is __not recommended__ as Pods should not + # pollute the prefix header of other libraries or of the user project. + # + # + # @example + # + # spec.prefix_header_file = 'iphone/include/prefix.pch' + # + # @example + # + # spec.prefix_header_file = false + # + # @param [Bool, String] path + # The path to the prefix header file or whether to disable + # prefix_header generation. + # + attribute :prefix_header_file, + :types => [TrueClass, FalseClass, String], + :inherited => true + + #------------------# + + # @!method module_name=(name) + # + # The name to use for the framework / clang module which + # will be generated for this specification instead of the + # default (header_dir if set, otherwise the specification + # name). + # + # @example + # + # spec.module_name = 'Three20' + # + # @param [String] name + # the module name. + # + root_attribute :module_name + + #------------------# + + # @!method header_dir=(dir) + # + # The directory where to store the headers files so they don't break + # includes. + # + # @example + # + # spec.header_dir = 'Three20Core' + # + # @param [String] dir + # the headers directory. + # + attribute :header_dir, + :inherited => true + + #------------------# + + # @!method header_mappings_dir=(dir) + # + # A directory from where to preserve the folder structure for the + # headers files. If not provided the headers files are flattened. + # + # @example + # + # spec.header_mappings_dir = 'src/include' + # + # @param [String] dir + # the directory from where to preserve the headers namespacing. + # + attribute :header_mappings_dir, + :inherited => true + + #------------------# + + SCRIPT_PHASE_REQUIRED_KEYS = [:name, :script].freeze + + SCRIPT_PHASE_OPTIONAL_KEYS = [:shell_path, :input_files, :output_files, :input_file_lists, :output_file_lists, + :show_env_vars_in_log, :execution_position, :dependency_file].freeze + + EXECUTION_POSITION_KEYS = [:before_compile, :after_compile, :before_headers, :after_headers, :any].freeze + + ALL_SCRIPT_PHASE_KEYS = (SCRIPT_PHASE_REQUIRED_KEYS + SCRIPT_PHASE_OPTIONAL_KEYS).freeze + + # @!method script_phases=(*script_phases) + # + # This attribute allows to define a script phase to execute as part of compilation of the Pod. + # Unlike a prepare command, script phases execute as part of `xcodebuild` they can also utilize all environment + # variables that are set during compilation. + # + # A Pod can provide multiple script phases to execute and they will be added in the order they were + # declared and after taking into consideration their execution position setting. + # + # **Note** In order to provide visibility and awareness of the contents of all script phases, + # a warning will be presented to the user upon installing your pod if it includes any script phases. + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"' } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"', :execution_position => :before_compile } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'puts "Hello World"', :shell_path => '/usr/bin/ruby' } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"', + # :input_files => ['/path/to/input_file.txt'], :output_files => ['/path/to/output_file.txt'] + # } + # + # @example + # + # spec.script_phase = { :name => 'Hello World', :script => 'echo "Hello World"', + # :input_file_lists => ['/path/to/input_files.xcfilelist'], :output_file_lists => ['/path/to/output_files.xcfilelist'] + # } + # + # @example + # + # spec.script_phases = [ + # { :name => 'Hello World', :script => 'echo "Hello World"' }, + # { :name => 'Hello Ruby World', :script => 'puts "Hello World"', :shell_path => '/usr/bin/ruby' }, + # ] + # + # @param [ArrayString}>] script_phases + # An array of hashes where each hash represents a single script phase. + # + attribute :script_phases, + :types => [Hash], + :container => Array, + :singularize => true + + #-----------------------------------------------------------------------# + + # @!group File patterns + # + # Podspecs should be located at the **root** of the repository, and paths + # to files should be specified **relative** to the root of the repository + # as well. File patterns do not support traversing the parent directory ( `..` ). + # File patterns may contain the following wildcard patterns: + # + # --- + # + # ### Pattern: * + # + # Matches any file. Can be restricted by other values in the glob. + # + # * `*` will match all files + # * `c*` will match all files beginning with `c` + # * `*c` will match all files ending with `c` + # * `*c*` will match all files that have `c` in them (including at the + # beginning or end) + # + # Equivalent to `/.*/x` in regexp. + # + # **Note** this will not match Unix-like hidden files (dotfiles). In + # order to include those in the match results, you must use something + # like `{*,.*}`. + # + # --- + # + # ### Pattern: ** + # + # Matches directories recursively. + # + # --- + # + # ### Pattern: ? + # + # Matches any one character. Equivalent to `/.{1}/` in regexp. + # + # --- + # + # ### Pattern: [set] + # + # Matches any one character in set. + # + # Behaves exactly like character sets in Regexp, including set negation + # (`[^a-z]`). + # + # --- + # + # ### Pattern: {p,q} + # + # Matches either literal `p` or literal `q`. + # + # Matching literals may be more than one character in length. More than + # two literals may be specified. + # + # Equivalent to pattern alternation in regexp. + # + # --- + # + # ### Pattern: \ + # + # Escapes the next meta-character. + # + # --- + # + # ### Examples + # + # Consider these to be evaluated in the source root of + # [JSONKit](https://github.com/johnezang/JSONKit). + # + # "JSONKit.?" #=> ["JSONKit.h", "JSONKit.m"] + # "*.[a-z][a-z]" #=> ["CHANGELOG.md", "README.md"] + # "*.[^m]*" #=> ["JSONKit.h"] + # "*.{h,m}" #=> ["JSONKit.h", "JSONKit.m"] + # "*" #=> ["CHANGELOG.md", "JSONKit.h", "JSONKit.m", "README.md"] + + #-----------------------------------------------------------------------# + + # @!method source_files=(source_files) + # + # The source files of the Pod. + # + # @example + # + # spec.source_files = 'Classes/**/*.{h,m}' + # + # @example + # + # spec.source_files = 'Classes/**/*.{h,m}', 'More_Classes/**/*.{h,m}' + # + # @param [String, Array] source_files + # the source files of the Pod. + # + attribute :source_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method public_header_files=(public_header_files) + # + # A list of file patterns that should be used as public headers. + # + # --- + # + # These patterns are matched against the source files to include headers + # that will be exposed to the user’s project and + # from which documentation will be generated. When the library is built, + # these headers will appear in the build directory. If no public headers + # are specified then **all** the headers in source_files are considered + # public. + # + # @example + # + # spec.public_header_files = 'Headers/Public/*.h' + # + # @param [String, Array] public_header_files + # the public headers of the Pod. + # + attribute :public_header_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method project_header_files=(project_header_files) + # + # A list of file patterns that should be used to mark project headers. + # + # --- + # + # These patterns are matched against the public headers (or all the + # headers if no public headers have been specified) to exclude those + # headers which should not be exposed to the user project and which + # should not be used to generate the documentation. When the library + # is built, these headers will _not_ appear in the build directory. + # + # + # @example + # + # spec.project_header_files = 'Headers/Project/*.h' + # + # @param [String, Array] project_header_files + # the project headers of the Pod. + # + attribute :project_header_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method private_header_files=(private_header_files) + # + # A list of file patterns that should be used to mark private headers. + # + # --- + # + # These patterns are matched against the public headers (or all the + # headers if no public headers have been specified) to exclude those + # headers which should not be exposed to the user project and which + # should not be used to generate the documentation. When the library + # is built, these headers will appear in the build directory. + # + # Header files that are not listed as neither public nor project or private will + # be treated as private, but in addition will not appear in the build + # directory at all. + # + # + # @example + # + # spec.private_header_files = 'Headers/Private/*.h' + # + # @param [String, Array] private_header_files + # the private headers of the Pod. + # + attribute :private_header_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method vendored_frameworks=(*frameworks) + # + # The paths of the framework bundles that come shipped with the Pod. Supports both `.framework` and `.xcframework` bundles. + # The frameworks will be made available to the Pod and to the consumers of the pod. + # + # @example + # + # spec.ios.vendored_frameworks = 'Frameworks/MyFramework.framework' + # + # @example + # + # spec.vendored_frameworks = 'MyFramework.framework', 'TheirFramework.xcframework' + # + # @param [String, Array] vendored_frameworks + # A list of framework bundles paths. + # + attribute :vendored_frameworks, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method vendored_libraries=(*frameworks) + # + # The paths of the libraries that come shipped with the Pod. The libraries will be available to the Pod and the + # consumers of the Pod. + # + # @example + # + # spec.ios.vendored_library = 'Libraries/libProj4.a' + # + # @example + # + # spec.vendored_libraries = 'libProj4.a', 'libJavaScriptCore.a' + # + # @param [String, Array] vendored_libraries + # A list of library paths. + # + attribute :vendored_libraries, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # The keys accepted by the category attribute for each on demand resource entry. + # + ON_DEMAND_RESOURCES_CATEGORY_KEYS = [:download_on_demand, :prefetched, :initial_install].freeze + + # @!method on_demand_resources=(on_demand_resources) + # + # A hash of on demand resources that should be copied into the target bundle. Resources specified here + # will automatically become part of the resources build phase of the target this pod is integrated into. + # + # If no category is specified then `:download_on_demand` is used as the default. + # + # @note + # + # Tags specified by pods are _always_ managed by CocoaPods. If a tag is renamed, changed or deleted then + # CocoaPods will update the tag within the targets the pod was integrated into. It is highly recommended not to + # share the same tags for your project as the ones used by the pods your project consumes. + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => 'file1.png' + # } + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => ['file1.png', 'file2.png'] + # } + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => { :paths => ['file1.png', 'file2.png'], :category => :download_on_demand } + # } + # + # @example + # + # s.on_demand_resources = { + # 'Tag1' => { :paths => ['file1.png', 'file2.png'], :category => :initial_install } + # } + # + # @param [Hash{String=>String}, Hash{String=>Array}, Hash{String=>Hash}] on_demand_resources + # The on demand resources shipped with the Pod. + # + attribute :on_demand_resources, + :types => [String, Array, Hash], + :container => Hash, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method resource_bundles=(*resource_bundles) + # + # This attribute allows to define the name and the file of the resource + # bundles which should be built for the Pod. They are specified as a + # hash where the keys represent the name of the bundles and the values + # the file patterns that they should include. + # + # For building the Pod as a static library, we strongly **recommend** + # library developers to adopt resource bundles as there can be name + # collisions using the resources attribute. + # + # The names of the bundles should at least include the name of the Pod + # to minimise the chance of name collisions. + # + # To provide different resources per platform namespaced bundles *must* + # be used. + # + # @example + # + # spec.ios.resource_bundle = { 'MapBox' => 'MapView/Map/Resources/*.png' } + # + # @example + # + # spec.resource_bundles = { + # 'MapBox' => ['MapView/Map/Resources/*.png'], + # 'MapBoxOtherResources' => ['MapView/Map/OtherResources/*.png'] + # } + # + # @param [Hash{String=>String}, Hash{String=>Array}] resource_bundles + # A hash where the keys are the names of the resource bundles + # and the values are their relative file patterns. + # + attribute :resource_bundles, + :types => [String, Array], + :container => Hash, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method resources=(resources) + # + # A list of resources that should be copied into the target bundle. + # + # For building the Pod as a static library, we strongly **recommend** + # library developers to adopt [resource bundles](http://guides.cocoapods.org/syntax/podspec.html#resource_bundles) + # as there can be name collisions using the resources attribute. + # Moreover, resources specified with this attribute are copied + # directly to the client target and therefore they are not + # optimised by Xcode. + # + # @example + # + # spec.resource = 'Resources/HockeySDK.bundle' + # + # @example + # + # spec.resources = ['Images/*.png', 'Sounds/*'] + # + # @param [String, Array] resources + # The resources shipped with the Pod. + # + attribute :resources, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method exclude_files=(exclude_files) + # + # A list of file patterns that should be excluded from the other + # file patterns. + # + # @example + # + # spec.ios.exclude_files = 'Classes/osx' + # + # @example + # + # spec.exclude_files = 'Classes/**/unused.{h,m}' + # + # @param [String, Array] exclude_files + # the file patterns that the Pod should ignore. + # + attribute :exclude_files, + :container => Array, + :file_patterns => true + + #------------------# + + # @!method preserve_paths=(preserve_paths) + # + # Any file that should **not** be removed after being downloaded. + # + # --- + # + # By default, CocoaPods removes all files that are not matched by any + # of the other file pattern. + # + # @example + # + # spec.preserve_path = 'IMPORTANT.txt' + # + # @example + # + # spec.preserve_paths = 'Frameworks/*.framework' + # + # @param [String, Array] preserve_paths + # the paths that should be not cleaned. + # + attribute :preserve_paths, + :container => Array, + :file_patterns => true, + :singularize => true + + #------------------# + + # @!method module_map=(module_map) + # + # The module map file that should be used when this pod is integrated as + # a framework. + # + # `false` indicates that the default CocoaPods `modulemap` file should not + # be generated. + # + # `true` is the default and indicates that the default CocoaPods + # `modulemap` file should be generated. + # + # --- + # + # By default, CocoaPods creates a module map file based upon the public + # headers in a specification. + # + # @example + # + # spec.module_map = 'source/module.modulemap' + # + # @example + # + # spec.module_map = false + # + # @param [String, Bool] module_map + # the path to the module map file that should be used + # or whether to disable module_map generation. + # + attribute :module_map, + :types => [TrueClass, FalseClass, String], + :root_only => true + + #-----------------------------------------------------------------------# + + # @!group Subspecs + # + # A library can specify a dependency on either another library, a + # subspec of another library, or a subspec of itself. + + #-----------------------------------------------------------------------# + + # Represents specification for a module of the library. + # + # --- + # + # Subspecs participate on a dual hierarchy. + # + # On one side, a specification automatically inherits as a dependency all + # it children ‘sub-specifications’ (unless a default subspec is + # specified). + # + # On the other side, a ‘sub-specification’ inherits the value of the + # attributes of the parents so common values for attributes can be + # specified in the ancestors. + # + # Although it sounds complicated in practice it means that subspecs in + # general do what you would expect: + # + # pod 'ShareKit', '2.0' + # + # Installs ShareKit with all the sharers like `ShareKit/Evernote`, + # `ShareKit/Facebook`, etc, as they are defined as subspecs. + # + # pod 'ShareKit/Twitter', '2.0' + # pod 'ShareKit/Pinboard', '2.0' + # + # Installs ShareKit with only the source files for `ShareKit/Twitter`, + # `ShareKit/Pinboard`. Note that, in this case, the ‘sub-specifications’ + # to compile need the source files, the dependencies, and the other + # attributes defined by the root specification. CocoaPods is smart enough + # to handle any issues arising from duplicate attributes. + # + # @example Subspecs with different source files. + # + # subspec 'Twitter' do |sp| + # sp.source_files = 'Classes/Twitter' + # end + # + # subspec 'Pinboard' do |sp| + # sp.source_files = 'Classes/Pinboard' + # end + # + # @example Subspecs referencing dependencies to other subspecs. + # + # Pod::Spec.new do |s| + # s.name = 'RestKit' + # + # s.subspec 'Core' do |cs| + # cs.dependency 'RestKit/ObjectMapping' + # cs.dependency 'RestKit/Network' + # cs.dependency 'RestKit/CoreData' + # end + # + # s.subspec 'ObjectMapping' do |os| + # end + # end + # + # @example Nested subspecs. + # + # Pod::Spec.new do |s| + # s.name = 'Root' + # + # s.subspec 'Level_1' do |sp| + # sp.subspec 'Level_2' do |ssp| + # end + # end + # end + # + def subspec(name, &block) + subspec = Specification.new(self, name, &block) + @subspecs << subspec + subspec + end + + # The list of the test types currently supported. + # + SUPPORTED_TEST_TYPES = [:unit, :ui].freeze + + # The test type this specification supports. This only applies to test specifications. + # + # --- + # + # @example + # + # test_spec.test_type = :unit + # + # @example + # + # test_spec.test_type = 'unit' + # + # @param [Symbol, String] type + # The test type to use. + # + attribute :test_type, + :types => [Symbol, String], + :multi_platform => false, + :spec_types => [:test] + + # @!method requires_app_host=(flag) + # + # Whether a test specification requires an app host to run tests. This only applies to test specifications. + # + # @example + # + # test_spec.requires_app_host = true + # + # @param [Boolean] flag + # whether a test specification requires an app host to run tests. + # + attribute :requires_app_host, + :types => [TrueClass, FalseClass], + :default_value => false, + :spec_types => [:test] + + # @!method app_host_name=(name) + # + # The app specification to use as an app host, if necessary. + # + # @note + # + # You must depend on that app spec using `test_spec.dependency 'PodName'`. + # + # @example + # + # Pod::Spec.new do |spec| + # spec.name = 'NSAttributedString+CCLFormat' + # + # spec.test_spec do |test_spec| + # test_spec.source_files = 'NSAttributedString+CCLFormatTests.m' + # test_spec.requires_app_host = true + # test_spec.app_host_name = 'NSAttributedString+CCLFormat/App' + # test_spec.dependency 'NSAttributedString+CCLFormat/App' + # end + # + # spec.app_spec 'App' do |app_spec| + # app_spec.source_files = 'NSAttributedString+CCLFormat.m' + # app_spec.dependency 'AFNetworking' + # end + # end + # + # @param [String] name + # The app specification to use as an app host, if necessary. + # + attribute :app_host_name, + :types => [String], + :spec_types => [:test] + + SCHEME_KEYS = [:launch_arguments, :environment_variables, :code_coverage, :parallelizable, :build_configurations].freeze + + # @!method scheme=(flag) + # + # Specifies the scheme configuration to be used for this specification. + # + # --- + # + # @example + # + # spec.scheme = { :launch_arguments => ['Arg1'] } + # + # @example + # + # spec.scheme = { :launch_arguments => ['Arg1', 'Arg2'], :environment_variables => { 'Key1' => 'Val1'} } + # + # @param [Hash] scheme + # the scheme configuration to be used for this specification. + # + attribute :scheme, + :types => [Hash], + :container => Hash, + :keys => SCHEME_KEYS + + # Represents a test specification for the library. Here you can place all + # your tests for your podspec along with the test dependencies. + # + # --- + # + # @example + # + # Pod::Spec.new do |spec| + # spec.name = 'NSAttributedString+CCLFormat' + # + # spec.test_spec do |test_spec| + # test_spec.source_files = 'NSAttributedString+CCLFormatTests.m' + # test_spec.dependency 'Expecta' + # end + # end + # + def test_spec(name = 'Tests', &block) + subspec = Specification.new(self, name, true, &block) + @subspecs << subspec + subspec + end + + # Represents an app specification for the library. Here you can place all + # your app source files for your podspec along with the app dependencies. + # + # --- + # + # @example + # + # Pod::Spec.new do |spec| + # spec.name = 'NSAttributedString+CCLFormat' + # + # spec.app_spec do |app_spec| + # app_spec.source_files = 'NSAttributedString+CCLFormat.m' + # app_spec.dependency 'AFNetworking' + # end + # end + # + def app_spec(name = 'App', &block) + appspec = Specification.new(self, name, :app_specification => true, &block) + @subspecs << appspec + appspec + end + + #------------------# + + # @!method default_subspecs=(subspec_array) + # + # An array of subspecs names that should be used as preferred dependency. + # If not specified, a specification requires all of its subspecs as + # dependencies. + # + # You may use the value `:none` to specify that none of the subspecs are + # required to compile this pod and that all subspecs are optional. + # + # --- + # + # A Pod should make available the full library by default. Users can + # fine tune their dependencies, and exclude unneeded subspecs, once + # their requirements are known. Therefore, this attribute is rarely + # needed. It is intended to be used to select a default if there are + # ‘sub-specifications’ which provide alternative incompatible + # implementations, or to exclude modules rarely needed (especially if + # they trigger dependencies on other libraries). + # + # @example + # + # spec.default_subspec = 'Core' + # + # @example + # + # spec.default_subspecs = 'Core', 'UI' + # + # @example + # + # spec.default_subspecs = :none + # + # @param [Array, String, Symbol] subspec_names + # An array of subspec names that should be inherited as + # dependency. + # + root_attribute :default_subspecs, + :container => Array, + :types => [Array, String, Symbol], + :singularize => true + + #-----------------------------------------------------------------------# + + # @!group Multi-Platform support + # + # A specification can store values which are specific to only one + # platform. + # + # --- + # + # For example one might want to store resources which are specific to + # only iOS projects. + # + # spec.resources = 'Resources/**/*.png' + # spec.ios.resources = 'Resources_ios/**/*.png' + + #-----------------------------------------------------------------------# + + # Provides support for specifying iOS attributes. + # + # @example + # spec.ios.source_files = 'Classes/ios/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def ios + PlatformProxy.new(self, :ios) + end + + # Provides support for specifying OS X attributes. + # + # @example + # spec.osx.source_files = 'Classes/osx/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def osx + PlatformProxy.new(self, :osx) + end + + alias macos osx + + # Provides support for specifying tvOS attributes. + # + # @example + # spec.tvos.source_files = 'Classes/tvos/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def tvos + PlatformProxy.new(self, :tvos) + end + + # Provides support for specifying watchOS attributes. + # + # @example + # spec.watchos.source_files = 'Classes/watchos/**/*.{h,m}' + # + # @return [PlatformProxy] the proxy that will set the attributes. + # + def watchos + PlatformProxy.new(self, :watchos) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/attribute.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/attribute.rb new file mode 100644 index 0000000..9c76b92 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/attribute.rb @@ -0,0 +1,206 @@ +module Pod + class Specification + module DSL + # A Specification attribute stores the information of an attribute. It + # also provides logic to implement any required logic. + # + class Attribute + require 'active_support/inflector/inflections' + + # Spec types currently supported. + # + SUPPORTED_SPEC_TYPES = [:library, :app, :test].freeze + + # @return [Symbol] the name of the attribute. + # + attr_reader :name + + # Returns a new attribute initialized with the given options. + # + # Attributes by default are: + # + # - inherited + # - multi-platform + # + # @param [Symbol] name @see name + # + # @param [Hash{Symbol=>Object}] options + # The options for configuring the attribute (see Options + # group). + # + # @raise If there are unrecognized options. + # + def initialize(name, options) + @name = name + + @multi_platform = options.delete(:multi_platform) { true } + @root_only = options.delete(:root_only) { false } + @spec_types = options.delete(:spec_types) { SUPPORTED_SPEC_TYPES } + @inherited = options.delete(:inherited) { @root_only } + @required = options.delete(:required) { false } + @singularize = options.delete(:singularize) { false } + @file_patterns = options.delete(:file_patterns) { false } + @container = options.delete(:container) { nil } + @keys = options.delete(:keys) { nil } + @default_value = options.delete(:default_value) { nil } + @ios_default = options.delete(:ios_default) { nil } + @osx_default = options.delete(:osx_default) { nil } + @types = options.delete(:types) { [String] } + + unless options.empty? + raise StandardError, "Unrecognized options: #{options} for #{self}" + end + unless (@spec_types - SUPPORTED_SPEC_TYPES).empty? + raise StandardError, "Unrecognized spec type option: #{@spec_types} for #{self}" + end + end + + # @return [String] A string representation suitable for UI. + # + def to_s + "Specification attribute `#{name}`" + end + + # @return [String] A string representation suitable for debugging. + # + def inspect + "<#{self.class} name=#{name} types=#{types} " \ + "multi_platform=#{multi_platform?}>" + end + + #---------------------------------------------------------------------# + + # @!group Options + + # @return [Array] the list of the classes of the values + # supported by the attribute writer. If not specified defaults + # to #{String}. + # + attr_reader :types + + # @return [Array] the list of the classes of the values + # supported by the attribute, including the container. + # + def supported_types + @supported_types ||= @types.dup.push(container).compact + end + + # @return [Class] if defined it can be #{Array} or #{Hash}. It is used + # as default initialization value and to automatically wrap + # other values to arrays. + # + attr_reader :container + + # @return [Array, Hash] the list of the accepted keys for an attribute + # wrapped by a Hash. + # + # @note A hash is accepted to group the keys associated only with + # certain keys (see the source attribute of a Spec). + # + attr_reader :keys + + # @return [Object] if the attribute follows configuration over + # convention it can specify a default value. + # + # @note The default value is not automatically wrapped and should be + # specified within the container if any. + # + attr_reader :default_value + + # @return [Object] similar to #{default_value} but for iOS. + # + attr_reader :ios_default + + # @return [Object] similar to #{default_value} but for OS X. + # + attr_reader :osx_default + + # @return [Boolean] whether the specification should be considered invalid + # if a value for the attribute is not specified. + # + def required? + @required + end + + # @return [Boolean] whether the attribute should be specified only on the + # root specification. + # + def root_only? + @root_only + end + + # @return [Boolean] whether the attribute should be specified only on + # test specifications. + # + def test_only? + @spec_types == [:test] + end + + # @return [Boolean] whether the attribute is multi-platform and should + # work in conjunction with #{PlatformProxy}. + # + def multi_platform? + @multi_platform + end + + # @return [Boolean] whether there should be a singular alias for the + # attribute writer. + # + def singularize? + @singularize + end + + # @return [Boolean] whether the attribute describes file patterns. + # + # @note This is mostly used by the linter. + # + def file_patterns? + @file_patterns + end + + # @return [Boolean] defines whether the attribute reader should join the + # values with the parent. + # + # @note Attributes stored in wrappers are always inherited. + # + def inherited? + @inherited + end + + #---------------------------------------------------------------------# + + # @!group Accessors support + + # Returns the default value for the attribute. + # + # @param [Symbol] platform + # the platform for which the default value is requested. + # + # @return [Object] The default value. + # + def default(platform = nil) + if platform && multi_platform? + platform_value = ios_default if platform == :ios + platform_value = osx_default if platform == :osx + platform_value || default_value + else + default_value + end + end + + # @return [String] the name of the setter method for the attribute. + # + def writer_name + "#{name}=" + end + + # @return [String] an aliased attribute writer offered for convenience + # on the DSL. + # + def writer_singular_form + "#{name.to_s.singularize}=" if singularize? + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/attribute_support.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/attribute_support.rb new file mode 100644 index 0000000..c8ebc13 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/attribute_support.rb @@ -0,0 +1,74 @@ +module Pod + class Specification + module DSL + # @return [Array] The attributes of the class. + # + class << self + attr_reader :attributes + end + + # This module provides support for storing the runtime information of the + # {Specification} DSL. + # + module AttributeSupport + # Defines a root attribute for the extended class. + # + # Root attributes make sense only in root specification, they never are + # multi-platform, they don't have inheritance, and they never have a + # default value. + # + # @param [Symbol, String] name + # The name of the attribute. + # + # @param [Hash] options + # The options used to initialize the attribute. + # + # @return [void] + # + def root_attribute(name, options = {}) + options[:root_only] = true + options[:multi_platform] = false + store_attribute(name, options) + end + + # Defines an attribute for the extended class. + # + # Regular attributes in general support inheritance and multi-platform + # values, so resolving them for a given specification is not trivial. + # + # @param [Symbol, String] name + # The name of the attribute. + # + # @param [Hash] options + # The options used to initialize the attribute. + # + # @return [void] + # + def attribute(name, options = {}) + store_attribute(name, options) + end + + #---------------------------------------------------------------------# + + # Creates an attribute with the given name and options and stores it in + # the {DSL.attributes} hash. + # + # @param [String] name + # The name of the attribute. + # + # @param [Hash] options + # The options used to initialize the attribute. + # + # @return [void] + # + # @visibility private + # + def store_attribute(name, options) + attr = Attribute.new(name, options) + @attributes ||= {} + @attributes[name] = attr + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/deprecations.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/deprecations.rb new file mode 100644 index 0000000..3d44734 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/deprecations.rb @@ -0,0 +1,20 @@ +module Pod + class Specification + module DSL + # Provides warning and errors for the deprecated attributes of the DSL. + # + module Deprecations + DSL.attribute :xcconfig, + :container => Hash, + :inherited => true + + def xcconfig=(value) + self.pod_target_xcconfig = value + self.user_target_xcconfig = value + CoreUI.warn "[#{self}] `xcconfig` has been split into "\ + '`pod_target_xcconfig` and `user_target_xcconfig`.' + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/platform_proxy.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/platform_proxy.rb new file mode 100644 index 0000000..63db751 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/dsl/platform_proxy.rb @@ -0,0 +1,82 @@ +module Pod + class Specification + module DSL + # The PlatformProxy works in conjunction with Specification#_on_platform. + # It provides support for a syntax like `spec.ios.source_files = 'file'`. + # + class PlatformProxy + # @return [Specification] the specification for this platform proxy. + # + attr_accessor :spec + + # @return [Symbol] the platform described by this proxy. Can be either + # `:ios` or `:osx`. + # + attr_reader :platform + + # @param [Specification] spec @see spec + # @param [Symbol] platform @see platform + # + def initialize(spec, platform) + @spec = spec + @platform = platform + end + + # Defines a setter method for each attribute of the specification + # class, that forwards the message to the {#specification} using the + # {Specification#on_platform} method. + # + # @return [void] + # + def method_missing(meth, *args, &block) + return super unless attribute = attribute_for_method(meth) + raise NoMethodError, "#{attribute} cannot be set per-platform" unless attribute.multi_platform? + spec.store_attribute(attribute.name, args.first, platform) + end + + # @!visibility private + # + def respond_to_missing?(method, include_all) + attribute = attribute_for_method(method) + (attribute && attribute.multi_platform?) || super + end + + # Allows to add dependency for the platform. + # + # @return [void] + # + def dependency(*args) + name, *version_requirements = args + platform_name = platform.to_s + platform_hash = spec.attributes_hash[platform_name] || {} + platform_hash['dependencies'] ||= {} + platform_hash['dependencies'][name] = version_requirements + spec.attributes_hash[platform_name] = platform_hash + end + + # Allows to set the deployment target for the platform. + # + # @return [void] + # + def deployment_target=(value) + platform_name = platform.to_s + spec.attributes_hash['platforms'] ||= {} + spec.attributes_hash['platforms'][platform_name] = value + end + + private + + def attribute_for_method(method) + method = method.to_sym + Specification::DSL.attributes.values.find do |attribute| + if attribute.writer_name.to_sym == method + true + elsif attribute.writer_singular_form + attribute.writer_singular_form.to_sym == method + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/json.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/json.rb new file mode 100644 index 0000000..ba3c477 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/json.rb @@ -0,0 +1,111 @@ +module Pod + class Specification + module JSONSupport + # @return [String] the json representation of the specification. + # + def to_json(*a) + require 'json' + to_hash.to_json(*a) << "\n" + end + + # @return [String] the pretty json representation of the specification. + # + def to_pretty_json(*a) + require 'json' + JSON.pretty_generate(to_hash, *a) << "\n" + end + + #-----------------------------------------------------------------------# + + # @return [Hash] the hash representation of the specification including + # subspecs. + # + def to_hash + hash = attributes_hash.dup + if root? || available_platforms != parent.available_platforms + platforms = Hash[available_platforms.map { |p| [p.name.to_s, p.deployment_target && p.deployment_target.to_s] }] + hash['platforms'] = platforms + end + specs_by_type = subspecs.group_by(&:spec_type) + all_appspecs = specs_by_type[:app] || [] + all_testspecs = specs_by_type[:test] || [] + all_subspecs = specs_by_type[:library] || [] + + hash.delete('testspecs') + hash['testspecs'] = all_testspecs.map(&:to_hash) unless all_testspecs.empty? + hash.delete('appspecs') + hash['appspecs'] = all_appspecs.map(&:to_hash) unless all_appspecs.empty? + hash.delete('subspecs') + hash['subspecs'] = all_subspecs.map(&:to_hash) unless all_subspecs.empty? + + # Since CocoaPods 1.7 version the DSL has changed to be pluralized. When we serialize a podspec to JSON with + # 1.7, ensure that we also include the singular version in the hash to maintain backwards compatibility with + # < 1.7 versions. We also delete this key and re-add it to ensure it gets added at the end. + hash.delete('swift_version') + hash['swift_version'] = swift_version.to_s unless swift_version.nil? + + hash + end + end + + # Configures a new specification from the given JSON representation. + # + # @param [String] the JSON encoded hash which contains the information of + # the specification. + # + # + # @return [Specification] the specification + # + def self.from_json(json, path="") + require 'json' + begin + hash = JSON.parse(json) + from_hash(hash) + rescue JSON::ParserError => e + if path != "" + raise e.class, "Failed to parse JSON at file: '#{path}'.\n\n#{e.message}" + else raise + end + end + end + + # Configures a new specification from the given hash. + # + # @param [Hash] hash the hash which contains the information of the + # specification. + # + # @param [Specification] parent the parent of the specification unless the + # specification is a root. + # + # @return [Specification] the specification + # + def self.from_hash(hash, parent = nil, test_specification: false, app_specification: false) + attributes_hash = hash.dup + spec = Spec.new(parent, nil, test_specification, :app_specification => app_specification) + subspecs = attributes_hash.delete('subspecs') + testspecs = attributes_hash.delete('testspecs') + appspecs = attributes_hash.delete('appspecs') + + ## backwards compatibility with 1.3.0 + spec.test_specification = !attributes_hash['test_type'].nil? + + spec.attributes_hash = attributes_hash + spec.subspecs.concat(subspecs_from_hash(spec, subspecs, false, false)) + spec.subspecs.concat(subspecs_from_hash(spec, testspecs, true, false)) + spec.subspecs.concat(subspecs_from_hash(spec, appspecs, false, true)) + + spec + end + + def self.subspecs_from_hash(spec, subspecs, test_specification, app_specification) + return [] if subspecs.nil? + subspecs.map do |s_hash| + Specification.from_hash(s_hash, spec, + :test_specification => test_specification, + :app_specification => app_specification) + end + end + + #-----------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter.rb new file mode 100644 index 0000000..91e7bf1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter.rb @@ -0,0 +1,582 @@ +require 'cocoapods-core/specification/linter/result' +require 'cocoapods-core/specification/linter/analyzer' + +module Pod + class Specification + # The Linter check specifications for errors and warnings. + # + # It is designed not only to guarantee the formal functionality of a + # specification, but also to support the maintenance of sources. + # + class Linter + # @return [Specification] the specification to lint. + # + attr_reader :spec + + # @return [Pathname] the path of the `podspec` file where {#spec} is + # defined. + # + attr_reader :file + + attr_reader :results + + # @param [Specification, Pathname, String] spec_or_path + # the Specification or the path of the `podspec` file to lint. + # + def initialize(spec_or_path) + if spec_or_path.is_a?(Specification) + @spec = spec_or_path + @file = @spec.defined_in_file + else + @file = Pathname.new(spec_or_path) + begin + @spec = Specification.from_file(@file) + rescue => e + @spec = nil + @raise_message = e.message + end + end + end + + # Lints the specification adding a {Result} for any failed check to the + # {#results} object. + # + # @return [Boolean] whether the specification passed validation. + # + def lint + @results = Results.new + if spec + validate_root_name + check_required_attributes + check_requires_arc_attribute + run_root_validation_hooks + perform_all_specs_analysis + else + results.add_error('spec', "The specification defined in `#{file}` "\ + "could not be loaded.\n\n#{@raise_message}") + end + results.empty? + end + + #-----------------------------------------------------------------------# + + # !@group Lint results + + public + + # @return [Array] all the errors generated by the Linter. + # + def errors + @errors ||= results.select { |r| r.type == :error } + end + + # @return [Array] all the warnings generated by the Linter. + # + def warnings + @warnings ||= results.select { |r| r.type == :warning } + end + + #-----------------------------------------------------------------------# + + private + + # !@group Lint steps + + # Checks that the spec's root name matches the filename. + # + # @return [void] + # + def validate_root_name + if spec.root.name && file + acceptable_names = [ + spec.root.name + '.podspec', + spec.root.name + '.podspec.json', + ] + names_match = acceptable_names.include?(file.basename.to_s) + unless names_match + results.add_error('name', 'The name of the spec should match the ' \ + 'name of the file.') + end + end + end + + # Generates a warning if the requires_arc attribute has true or false string values. + # + # @return [void] + # + def check_requires_arc_attribute + attribute = DSL.attributes.values.find { |attr| attr.name == :requires_arc } + if attribute + value = spec.send(attribute.name) + if value == 'true' || value == 'false' + results.add_warning('requires_arc', value + ' is considered to be the name of a file.') + end + end + end + + # Checks that every required attribute has a value. + # + # @return [void] + # + def check_required_attributes + attributes = DSL.attributes.values.select(&:required?) + attributes.each do |attr| + begin + value = spec.send(attr.name) + unless value && (!value.respond_to?(:empty?) || !value.empty?) + if attr.name == :license + results.add_warning('attributes', 'Missing required attribute ' \ + "`#{attr.name}`.") + else + results.add_error('attributes', 'Missing required attribute ' \ + "`#{attr.name}`.") + end + end + rescue => exception + results.add_error('attributes', "Unable to parse attribute `#{attr.name}` due to error: #{exception}") + end + end + end + + # Runs the validation hook for root only attributes. + # + # @return [void] + # + def run_root_validation_hooks + attributes = DSL.attributes.values.select(&:root_only?) + run_validation_hooks(attributes, spec) + end + + # Run validations for multi-platform attributes activating. + # + # @return [void] + # + def perform_all_specs_analysis + all_specs = [spec, *spec.recursive_subspecs] + all_specs.each do |current_spec| + current_spec.available_platforms.each do |platform| + @consumer = Specification::Consumer.new(current_spec, platform) + results.consumer = @consumer + run_all_specs_validation_hooks + analyzer = Analyzer.new(@consumer, results) + results = analyzer.analyze + @consumer = nil + results.consumer = nil + end + end + end + + # @return [Specification::Consumer] the current consumer. + # + attr_accessor :consumer + + # Runs the validation hook for the attributes that are not root only. + # + # @return [void] + # + def run_all_specs_validation_hooks + attributes = DSL.attributes.values.reject(&:root_only?) + run_validation_hooks(attributes, consumer) + end + + # Runs the validation hook for each attribute. + # + # @note Hooks are called only if there is a value for the attribute as + # required attributes are already checked by the + # {#check_required_attributes} step. + # + # @return [void] + # + def run_validation_hooks(attributes, target) + attributes.each do |attr| + validation_hook = "_validate_#{attr.name}" + next unless respond_to?(validation_hook, true) + begin + value = target.send(attr.name) + next unless value + send(validation_hook, value) + rescue => e + results.add_error(attr.name, "Unable to validate due to exception: #{e}") + end + end + end + + #-----------------------------------------------------------------------# + + private + + # Performs validations related to the `name` attribute. + # + def _validate_name(name) + if name =~ %r{/} + results.add_error('name', 'The name of a spec should not contain ' \ + 'a slash.') + end + + if name =~ /\s/ + results.add_error('name', 'The name of a spec should not contain ' \ + 'whitespace.') + end + + if name[0, 1] == '.' + results.add_error('name', 'The name of a spec should not begin' \ + ' with a period.') + end + end + + # @!group Root spec validation helpers + + # Performs validations related to the `authors` attribute. + # + def _validate_authors(a) + if a.is_a? Hash + if a == { 'YOUR NAME HERE' => 'YOUR EMAIL HERE' } + results.add_error('authors', 'The authors have not been updated ' \ + 'from default') + end + end + end + + # Performs validations related to the `version` attribute. + # + def _validate_version(v) + if v.to_s.empty? + results.add_error('version', 'A version is required.') + end + end + + # Performs validations related to the `module_name` attribute. + # + def _validate_module_name(m) + unless m.nil? || m =~ /^[a-z_][0-9a-z_]*$/i + results.add_error('module_name', 'The module name of a spec' \ + ' should be a valid C99 identifier.') + end + end + + # Performs validations related to the `summary` attribute. + # + def _validate_summary(s) + if s.length > 140 + results.add_warning('summary', 'The summary should be a short ' \ + 'version of `description` (max 140 characters).') + end + if s =~ /A short description of/ + results.add_warning('summary', 'The summary is not meaningful.') + end + end + + # Performs validations related to the `description` attribute. + # + def _validate_description(d) + if d == spec.summary + results.add_warning('description', 'The description is equal to' \ + ' the summary.') + end + + if d.strip.empty? + results.add_error('description', 'The description is empty.') + elsif spec.summary && d.length < spec.summary.length + results.add_warning('description', 'The description is shorter ' \ + 'than the summary.') + end + end + + # Performs validations related to the `homepage` attribute. + # + def _validate_homepage(h) + return unless h.is_a?(String) + if h =~ %r{http://EXAMPLE} + results.add_warning('homepage', 'The homepage has not been updated' \ + ' from default') + end + end + + # Performs validations related to the `frameworks` attribute. + # + def _validate_frameworks(frameworks) + if frameworks_invalid?(frameworks) + results.add_error('frameworks', 'A framework should only be' \ + ' specified by its name') + end + end + + # Performs validations related to the `weak frameworks` attribute. + # + def _validate_weak_frameworks(frameworks) + if frameworks_invalid?(frameworks) + results.add_error('weak_frameworks', 'A weak framework should only be' \ + ' specified by its name') + end + end + + # Performs validations related to the `libraries` attribute. + # + def _validate_libraries(libs) + libs.each do |lib| + lib = lib.downcase + if lib.end_with?('.a') || lib.end_with?('.dylib') + results.add_error('libraries', 'Libraries should not include the' \ + ' extension ' \ + "(`#{lib}`)") + end + + if lib.start_with?('lib') + results.add_error('libraries', 'Libraries should omit the `lib`' \ + ' prefix ' \ + " (`#{lib}`)") + end + + if lib.include?(',') + results.add_error('libraries', 'Libraries should not include comas ' \ + "(`#{lib}`)") + end + end + end + + # Performs validations related to the `license` attribute. + # + def _validate_license(l) + type = l[:type] + file = l[:file] + if type.nil? + results.add_warning('license', 'Missing license type.') + end + if type && type.delete(' ').delete("\n").empty? + results.add_warning('license', 'Invalid license type.') + end + if type && type =~ /\(example\)/ + results.add_error('license', 'Sample license type.') + end + if file && Pathname.new(file).extname !~ /^(\.(txt|md|markdown|))?$/i + results.add_error('license', 'Invalid file type') + end + end + + # Performs validations related to the `source` attribute. + # + def _validate_source(s) + return unless s.is_a?(Hash) + if git = s[:git] + tag, commit = s.values_at(:tag, :commit) + version = spec.version.to_s + + if git =~ %r{http://EXAMPLE} + results.add_error('source', 'The Git source still contains the ' \ + 'example URL.') + end + if commit && commit.downcase =~ /head/ + results.add_error('source', 'The commit of a Git source cannot be' \ + ' `HEAD`.') + end + if tag && !tag.to_s.include?(version) + results.add_warning('source', 'The version should be included in' \ + ' the Git tag.') + end + if tag.nil? + results.add_warning('source', 'Git sources should specify a tag.', true) + end + end + + perform_github_source_checks(s) + check_git_ssh_source(s) + end + + # Performs validations related to the `deprecated_in_favor_of` attribute. + # + def _validate_deprecated_in_favor_of(d) + if spec.root.name == Specification.root_name(d) + results.add_error('deprecated_in_favor_of', 'a spec cannot be ' \ + 'deprecated in favor of itself') + end + end + + # Performs validations related to the `test_type` attribute. + # + def _validate_test_type(t) + supported_test_types = Specification::DSL::SUPPORTED_TEST_TYPES.map(&:to_s) + unless supported_test_types.include?(t.to_s) + results.add_error('test_type', "The test type `#{t}` is not supported. " \ + "Supported test type values are #{supported_test_types}.") + end + end + + def _validate_app_host_name(n) + unless consumer.requires_app_host? + results.add_error('app_host_name', '`requires_app_host` must be set to ' \ + '`true` when `app_host_name` is specified.') + end + + unless consumer.dependencies.map(&:name).include?(n) + results.add_error('app_host_name', "The app host name (#{n}) specified by `#{consumer.spec.name}` could " \ + 'not be found. You must explicitly declare a dependency on that app spec.') + end + end + + # Performs validations related to the `script_phases` attribute. + # + def _validate_script_phases(s) + s.each do |script_phase| + keys = script_phase.keys + unrecognized_keys = keys - Specification::ALL_SCRIPT_PHASE_KEYS + unless unrecognized_keys.empty? + results.add_error('script_phases', "Unrecognized option(s) `#{unrecognized_keys.join(', ')}` in script phase `#{script_phase[:name]}`. " \ + "Available options are `#{Specification::ALL_SCRIPT_PHASE_KEYS.join(', ')}`.") + end + missing_required_keys = Specification::SCRIPT_PHASE_REQUIRED_KEYS - keys + unless missing_required_keys.empty? + results.add_error('script_phases', "Missing required shell script phase options `#{missing_required_keys.join(', ')}` in script phase `#{script_phase[:name]}`.") + end + unless Specification::EXECUTION_POSITION_KEYS.include?(script_phase[:execution_position]) + results.add_error('script_phases', "Invalid execution position value `#{script_phase[:execution_position]}` in shell script `#{script_phase[:name]}`. " \ + "Available options are `#{Specification::EXECUTION_POSITION_KEYS.join(', ')}`.") + end + end + end + + # Performs validations related to the `on_demand_resources` attribute. + # + def _validate_on_demand_resources(h) + h.values.each do |value| + unless Specification::ON_DEMAND_RESOURCES_CATEGORY_KEYS.include?(value[:category]) + results.add_error('on_demand_resources', "Invalid on demand resources category value `#{value[:category]}`. " \ + "Available options are `#{Specification::ON_DEMAND_RESOURCES_CATEGORY_KEYS.join(', ')}`.") + end + end + end + + # Performs validation related to the `scheme` attribute. + # + def _validate_scheme(s) + unless s.empty? + if consumer.spec.subspec? && consumer.spec.library_specification? + results.add_error('scheme', 'Scheme configuration is not currently supported for subspecs.') + return + end + if s.key?(:launch_arguments) && !s[:launch_arguments].is_a?(Array) + results.add_error('scheme', 'Expected an array for key `launch_arguments`.') + end + if s.key?(:environment_variables) && !s[:environment_variables].is_a?(Hash) + results.add_error('scheme', 'Expected a hash for key `environment_variables`.') + end + if s.key?(:code_coverage) && ![true, false].include?(s[:code_coverage]) + results.add_error('scheme', 'Expected a boolean for key `code_coverage`.') + end + if s.key?(:parallelizable) && ![true, false].include?(s[:parallelizable]) + results.add_error('scheme', 'Expected a boolean for key `parallelizable`.') + end + if s.key?(:build_configurations) && !s[:build_configurations].is_a?(Hash) + results.add_error('scheme', 'Expected a hash for key `build_configurations`.') + end + end + end + + # Performs validations related to github sources. + # + def perform_github_source_checks(s) + require 'uri' + + if git = s[:git] + return unless git =~ /^#{URI.regexp}$/ + git_uri = URI.parse(git) + if git_uri.host + perform_github_uri_checks(git, git_uri) if git_uri.host.end_with?('github.com') + end + end + end + + def perform_github_uri_checks(git, git_uri) + if git_uri.host.start_with?('www.') + results.add_warning('github_sources', 'Github repositories should ' \ + 'not use `www` in their URL.') + end + unless git.end_with?('.git') + results.add_warning('github_sources', 'Github repositories ' \ + 'should end in `.git`.') + end + unless git_uri.scheme == 'https' + results.add_warning('github_sources', 'Github repositories ' \ + 'should use an `https` link.', true) + end + end + + # Performs validations related to SSH sources + # + def check_git_ssh_source(s) + if git = s[:git] + if git =~ %r{\w+\@(\w|\.)+\:(/\w+)*} + results.add_warning('source', 'Git SSH URLs will NOT work for ' \ + 'people behind firewalls configured to only allow HTTP, ' \ + 'therefore HTTPS is preferred.', true) + end + end + end + + # Performs validations related to the `social_media_url` attribute. + # + def _validate_social_media_url(s) + if s =~ %r{https://twitter.com/EXAMPLE} + results.add_warning('social_media_url', 'The social media URL has ' \ + 'not been updated from the default.') + end + end + + # Performs validations related to the `readme` attribute. + # + def _validate_readme(s) + if s =~ %r{https://www.example.com/README} + results.add_warning('readme', 'The readme has ' \ + 'not been updated from the default.') + end + end + + # Performs validations related to the `changelog` attribute. + # + def _validate_changelog(s) + if s =~ %r{https://www.example.com/CHANGELOG} + results.add_warning('changelog', 'The changelog has ' \ + 'not been updated from the default.') + end + end + + # @param [Hash,Object] value + # + def _validate_info_plist(value) + return if value.empty? + if consumer.spec.subspec? && consumer.spec.library_specification? + results.add_error('info_plist', 'Info.plist configuration is not currently supported for subspecs.') + end + end + + #-----------------------------------------------------------------------# + + # @!group All specs validation helpers + + private + + # Performs validations related to the `compiler_flags` attribute. + # + def _validate_compiler_flags(flags) + if flags.join(' ').split(' ').any? { |flag| flag.start_with?('-Wno') } + results.add_warning('compiler_flags', 'Warnings must not be disabled' \ + '(`-Wno compiler` flags).') + end + end + + # Returns whether the frameworks are valid + # + # @param frameworks [Array] + # The frameworks to be validated + # + # @return [Boolean] true if a framework contains any + # non-alphanumeric character or includes an extension. + # + def frameworks_invalid?(frameworks) + frameworks.any? do |framework| + framework_regex = /[^\w\-\+]/ + framework =~ framework_regex + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter/analyzer.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter/analyzer.rb new file mode 100644 index 0000000..a3478a0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter/analyzer.rb @@ -0,0 +1,218 @@ +require 'cocoapods-core/specification/linter/result' + +module Pod + class Specification + class Linter + class Analyzer + def initialize(consumer, results) + @consumer = consumer + @results = results + @results.consumer = @consumer + end + + # Analyzes the consumer adding a {Result} for any failed check to + # the {#results} object. + # + # @return [Results] the results of the analysis. + # + def analyze + check_attributes + validate_file_patterns + check_if_spec_is_empty + results + end + + private + + attr_reader :consumer + + attr_reader :results + + # @return [Array] Keys that are valid but have been deprecated. + # + DEPRECATED_KEYS = ['swift_version'].freeze + + # @return [Array] Keys that are only used for internal purposes. + # + INTERNAL_KEYS = ['configuration_pod_whitelist'].freeze + + # Checks the attributes hash for any unknown key which might be the + # result of a misspelling in a JSON file. + # + # @note Sub-keys are not checked per-platform as + # there is no attribute supporting this combination. + # + # @note The keys of sub-keys are not checked as they are only used by + # the `source` attribute and they are subject + # to change according to the support in the + # `cocoapods-downloader` gem. + # + def check_attributes + attributes_keys = Pod::Specification::DSL.attributes.keys.map(&:to_s) + platform_keys = Specification::DSL::PLATFORMS.map(&:to_s) + valid_keys = attributes_keys + platform_keys + DEPRECATED_KEYS + INTERNAL_KEYS + attributes_hash = consumer.spec.attributes_hash + keys = attributes_hash.keys + Specification::DSL::PLATFORMS.each do |platform| + if attributes_hash[platform.to_s] + keys += attributes_hash[platform.to_s].keys + end + end + unknown_keys = keys - valid_keys + + unknown_keys.each do |key| + results.add_warning('attributes', "Unrecognized `#{key}` key.") + end + + Pod::Specification::DSL.attributes.each do |_key, attribute| + declared_value = consumer.spec.attributes_hash[attribute.name.to_s] + validate_attribute_occurrence(attribute, declared_value) + validate_attribute_type(attribute, declared_value) + if attribute.name != :platforms + value = value_for_attribute(attribute) + validate_attribute_value(attribute, value) if value + end + end + end + + # Checks the attributes that represent file patterns. + # + # @todo Check the attributes hash directly. + # + def validate_file_patterns + attributes = DSL.attributes.values.select(&:file_patterns?) + attributes.each do |attrb| + patterns = consumer.send(attrb.name) + + if patterns.is_a?(Hash) + patterns = patterns.values.flatten(1) + end + + if patterns.respond_to?(:each) + patterns.each do |pattern| + pattern = pattern[:paths].join if attrb.name == :on_demand_resources + if pattern.respond_to?(:start_with?) && pattern.start_with?('/') + results.add_error('File Patterns', 'File patterns must be ' \ + "relative and cannot start with a slash (#{attrb.name}).") + end + end + end + end + end + + # Check empty subspec attributes + # + def check_if_spec_is_empty + methods = %w( source_files on_demand_resources resources resource_bundles preserve_paths + dependencies vendored_libraries vendored_frameworks ) + empty_patterns = methods.all? { |m| consumer.send(m).empty? } + empty = empty_patterns && consumer.spec.subspecs.empty? + if empty + results.add_error('File Patterns', "The #{consumer.spec} spec is " \ + 'empty (no source files, resources, resource_bundles, ' \ + 'preserve paths, vendored_libraries, vendored_frameworks, ' \ + 'dependencies, nor subspecs).') + end + end + + private + + # Returns the own or inherited (if applicable) value of the + # given attribute. + # + # @param [Spec::DSL::Attribute] attribute + # The attribute. + # + # @return [mixed] + # + def value_for_attribute(attribute) + if attribute.root_only? + consumer.spec.send(attribute.name) + else + consumer.send(attribute.name) if consumer.respond_to?(attribute.name) + end + rescue => e + results.add_error('attributes', "Unable to validate `#{attribute.name}` (#{e}).") + nil + end + + # Validates that root attributes don't occur in subspecs. + # + # @param [Spec::DSL::Attribute] attribute + # The attribute. + + # @param [Object] value + # The value of the attribute. + # + def validate_attribute_occurrence(attribute, value) + if attribute.root_only? && !value.nil? && !consumer.spec.root? + results.add_error('attributes', "Can't set `#{attribute.name}` attribute for " \ + "subspecs (in `#{consumer.spec.name}`).") + end + if attribute.test_only? && !value.nil? && !consumer.spec.test_specification? + results.add_error('attributes', "Attribute `#{attribute.name}` can only be set " \ + "within test specs (in `#{consumer.spec.name}`).") + end + end + + # Validates the given value for the given attribute. + # + # @param [Spec::DSL::Attribute] attribute + # The attribute. + # + # @param [Object] value + # The value of the attribute. + # + def validate_attribute_value(attribute, value) + if attribute.keys.is_a?(Array) + validate_attribute_array_keys(attribute, value) + elsif attribute.keys.is_a?(Hash) + validate_attribute_hash_keys(attribute, value) + end + end + + def validate_attribute_type(attribute, value) + return unless value + types = attribute.supported_types + if types.none? { |klass| value.class == klass } + results.add_error('attributes', 'Unacceptable type ' \ + "`#{value.class}` for `#{attribute.name}`. Allowed values: `#{types.inspect}`.") + end + end + + def validate_attribute_array_keys(attribute, value) + unknown_keys = value.keys.map(&:to_s) - attribute.keys.map(&:to_s) + unknown_keys.each do |unknown_key| + results.add_warning('keys', "Unrecognized `#{unknown_key}` key for " \ + "`#{attribute.name}` attribute.") + end + end + + def validate_attribute_hash_keys(attribute, value) + unless value.is_a?(Hash) + results.add_error(attribute.name, "Unsupported type `#{value.class}`, expected `Hash`") + return + end + major_keys = value.keys & attribute.keys.keys + if major_keys.count.zero? + results.add_warning('keys', "Missing primary key for `#{attribute.name}` " \ + 'attribute. The acceptable ones are: ' \ + "`#{attribute.keys.keys.map(&:to_s).sort.join(', ')}`.") + elsif major_keys.count == 1 + acceptable = attribute.keys[major_keys.first] || [] + unknown = value.keys - major_keys - acceptable + unless unknown.empty? + results.add_warning('keys', "Incompatible `#{unknown.sort.join(', ')}` " \ + "key(s) with `#{major_keys.first}` primary key for " \ + "`#{attribute.name}` attribute.") + end + else + sorted_keys = major_keys.map(&:to_s).sort + results.add_warning('keys', "Incompatible `#{sorted_keys.join(', ')}` " \ + "keys for `#{attribute.name}` attribute.") + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter/result.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter/result.rb new file mode 100644 index 0000000..1843a97 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/linter/result.rb @@ -0,0 +1,128 @@ +module Pod + class Specification + class Linter + class Results + public + + class Result + # @return [Symbol] the type of result. + # + attr_reader :type + + # @return[String] the name of the attribute associated with result. + # + attr_reader :attribute_name + + # @return [String] the message associated with result. + # + attr_reader :message + + # @return [Boolean] whether the result only applies to public specs. + # + attr_reader :public_only + alias_method :public_only?, :public_only + + # @param [Symbol] type @see type + # @param [String] message @see message + # + def initialize(type, attribute_name, message, public_only = false) + @type = type + @attribute_name = attribute_name + @message = message + @public_only = public_only + @platforms = [] + end + + # @return [Array] the platforms where this result was + # generated. + # + attr_reader :platforms + + # @return [String] a string representation suitable for UI output. + # + def to_s + r = "[#{type.to_s.upcase}] [#{attribute_name}] #{message}" + if platforms != Specification::PLATFORMS + platforms_names = platforms.uniq.map do |p| + Platform.string_name(p) + end + r << " [#{platforms_names * ' - '}]" unless platforms.empty? + end + r + end + end + + def initialize + @results = [] + @consumer = nil + end + + include Enumerable + + def each + results.each { |r| yield r } + end + + def empty? + results.empty? + end + + # @return [Specification::Consumer] the current consumer. + # + attr_accessor :consumer + + # Adds an error result with the given message. + # + # @param [String] message + # The message of the result. + # + # @return [void] + # + def add_error(attribute_name, message, public_only = false) + add_result(:error, attribute_name, message, public_only) + end + + # Adds a warning result with the given message. + # + # @param [String] message + # The message of the result. + # + # @return [void] + # + def add_warning(attribute_name, message, public_only = false) + add_result(:warning, attribute_name, message, public_only) + end + + private + + # @return [Array] all of the generated results. + # + attr_reader :results + + # Adds a result of the given type with the given message. If there is a + # current platform it is added to the result. If a result with the same + # type and the same message is already available the current platform is + # added to the existing result. + # + # @param [Symbol] type + # The type of the result (`:error`, `:warning`). + # + # @param [String] message + # The message of the result. + # + # @return [void] + # + def add_result(type, attribute_name, message, public_only) + result = results.find do |r| + r.type == type && r.attribute_name == attribute_name && r.message == message && r.public_only? == public_only + end + unless result + result = Result.new(type, attribute_name, message, public_only) + results << result + end + result.platforms << @consumer.platform_name if @consumer + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/root_attribute_accessors.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/root_attribute_accessors.rb new file mode 100644 index 0000000..705d8b1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/root_attribute_accessors.rb @@ -0,0 +1,226 @@ +module Pod + class Specification + module DSL + # Provides the accessors methods for the root attributes. Root attributes + # do not support multi-platform values and inheritance. + # + module RootAttributesAccessors + # @return [String] The name of the specification *not* including the + # names of the parents, in case of ‘sub-specifications’. + # + def base_name + attributes_hash['name'] + end + + # @return [String] The name of the specification including the names of + # the parents, in case of ‘sub-specifications’. + # + def name + parent ? "#{parent.name}/#{base_name}" : base_name + end + + # @return [Bool, String, Array] The requires_arc value. + # + def requires_arc + attributes_hash['requires_arc'] + end + + # @return [Version] The version of the Pod. + # + def version + if root? + @version ||= Version.new(attributes_hash['version']) + else + @version ||= root.version + end + end + + # @deprecated in favor of #swift_versions + # + # @return [Version] The Swift version specified by the specification. + # + def swift_version + swift_versions.last + end + + # @return [Array] The Swift versions supported by the specification. + # + def swift_versions + @swift_versions ||= begin + swift_versions = Array(attributes_hash['swift_versions']).dup + # Pre 1.7.0, the DSL was singularized as it supported only a single version of Swift. In 1.7.0 the DSL + # is now pluralized always and a specification can support multiple versions of Swift. This ensures + # we parse the old JSON serialized format and include it as part of the Swift versions supported. + swift_versions << attributes_hash['swift_version'] unless attributes_hash['swift_version'].nil? + swift_versions.map { |swift_version| Version.new(swift_version) }.uniq.sort + end + end + + # @return [Requirement] The CocoaPods version required to use the specification. + # + def cocoapods_version + @cocoapods_version ||= Requirement.create(attributes_hash['cocoapods_version']) + end + + # @return [Hash] a hash containing the authors as the keys and their + # email address as the values. + # + # @note The value is coerced to a hash with a nil email if needed. + # + # @example Possible values + # + # { 'Author' => 'email@host.com' } + # [ 'Author', { 'Author_2' => 'email@host.com' } ] + # [ 'Author', 'Author_2' ] + # 'Author' + # + def authors + authors = attributes_hash['authors'] + if authors.is_a?(Hash) + authors + elsif authors.is_a?(Array) + result = {} + authors.each do |name_or_hash| + if name_or_hash.is_a?(String) + result[name_or_hash] = nil + else + result.merge!(name_or_hash) + end + end + result + elsif authors.is_a?(String) + { authors => nil } + end + end + + # @return [String] The social media URL. + # + def social_media_url + attributes_hash['social_media_url'] + end + + # @return [String] The readme. + # + def readme + attributes_hash['readme'] + end + + # @return [String] The changelog. + # + def changelog + attributes_hash['changelog'] + end + + # @return [Hash] A hash containing the license information of the Pod. + # + # @note The indentation is stripped from the license text. + # + def license + license = attributes_hash['license'] + if license.is_a?(String) + { :type => license } + elsif license.is_a?(Hash) + license = Specification.convert_keys_to_symbol(license) + license[:text] = license[:text].strip_heredoc if license[:text] + license + else + {} + end + end + + # @return [String] The URL of the homepage of the Pod. + # + def homepage + attributes_hash['homepage'] + end + + # @return [Hash{Symbol=>String}] The location from where the library + # should be retrieved. + # + def source + value = attributes_hash['source'] + if value && value.is_a?(Hash) + Specification.convert_keys_to_symbol(value) + else + value + end + end + + # @return [String] A short description of the Pod. + # + def summary + summary = attributes_hash['summary'] + summary.strip_heredoc.chomp if summary + end + + # @return [String] A longer description of the Pod. + # + # @note The indentation is stripped from the description. + # + def description + description = attributes_hash['description'] + description.strip_heredoc.chomp if description + end + + # @return [Array] The list of the URL for the screenshots of + # the Pod. + # + # @note The value is coerced to an array. + # + def screenshots + value = attributes_hash['screenshots'] + [*value] + end + + # @return [String, Nil] The documentation URL of the Pod if specified. + # + def documentation_url + attributes_hash['documentation_url'] + end + + # @return [String, Nil] The prepare command of the Pod if specified. + # + def prepare_command + command = attributes_hash['prepare_command'] + command.strip_heredoc.chomp if command + end + + # @return [Boolean] Indicates, that if use_frameworks! is specified, the + # framework should include a static library. + # + def static_framework + attributes_hash['static_framework'] + end + + # @return [Boolean] Whether the Pod has been deprecated. + # + def deprecated + attributes_hash['deprecated'] + end + + # @return [String] The name of the Pod that this one has been + # deprecated in favor of. + # + def deprecated_in_favor_of + attributes_hash['deprecated_in_favor_of'] + end + + # @return [Boolean] Wether the pod is deprecated either in favor of some other + # pod or simply deprecated. + # + def deprecated? + deprecated || !deprecated_in_favor_of.nil? + end + + # @return [String, Nil] The custom module map file of the Pod, + # if specified. + # + def module_map + attributes_hash['module_map'] + end + + #---------------------------------------------------------------------# + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/set.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/set.rb new file mode 100644 index 0000000..397e60e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/set.rb @@ -0,0 +1,177 @@ +require 'active_support/core_ext/array/conversions' +require 'cocoapods-core/specification/set/presenter' + +module Pod + class Specification + # A Specification::Set is responsible of handling all the specifications of + # a Pod. This class stores the information of the dependencies that required + # a Pod in the resolution process. + # + # @note The order in which the sets are provided is used to select a + # specification if multiple are available for a given version. + # + # @note The set class is not and should be not aware of the backing store + # of a Source. + # + class Set + # @return [String] the name of the Pod. + # + attr_reader :name + + # @return [Array] the sources that contain the specifications for + # the available versions of a Pod. + # + attr_reader :sources + + # @param [String] name + # the name of the Pod. + # + # @param [Array,Source] sources + # the sources that contain a Pod. + # + def initialize(name, sources = []) + @name = name + @sources = Array(sources) + end + + # @return [Specification] the top level specification of the Pod for the + # {#required_version}. + # + # @note If multiple sources have a specification for the + # {#required_version}, the order in which they are provided + # is used to disambiguate. + # + def specification + unless highest_version_spec_path + raise Informative, "Could not find the highest version for `#{name}`. "\ + "This could be due to an empty #{name} directory in a local repository." + end + + Specification.from_file(highest_version_spec_path) + end + + # @return [Specification] the top level specification for this set for any version. + # + def specification_name + versions_by_source.each do |source, versions| + next unless version = versions.first + return source.specification(name, version).name + end + nil + end + + # @return [Array] the paths to specifications for the given + # version + # + def specification_paths_for_version(version) + sources = @sources.select { |source| versions_by_source[source].include?(version) } + sources.map { |source| source.specification_path(name, version) } + end + + # @return [Array] all the available versions for the Pod, sorted + # from highest to lowest. + # + def versions + @versions ||= versions_by_source.values.flatten.uniq.sort.reverse + end + + # @return [Version] The highest version known of the specification. + # + def highest_version + versions.first + end + + # @return [Pathname] The path of the highest version. + # + # @note If multiple sources have a specification for the + # {#required_version}, the order in which they are provided + # is used to disambiguate. + # + def highest_version_spec_path + @highest_version_spec_path ||= specification_paths_for_version(highest_version).first + end + + # @return [Hash{Source => Version}] all the available versions for the + # Pod grouped by source. + # + def versions_by_source + @versions_by_source ||= sources.each_with_object({}) do |source, result| + result[source] = source.versions(name) + end + end + + def ==(other) + self.class == other.class && + @name == other.name && + @sources.map(&:name) == other.sources.map(&:name) + end + + def to_s + "#<#{self.class.name} for `#{name}' available at `#{sources.map(&:name).join(', ')}'>" + end + alias_method :inspect, :to_s + + # Returns a hash representation of the set composed by dumb data types. + # + # @example + # + # "name" => "CocoaLumberjack", + # "versions" => { "master" => [ "1.6", "1.3.3"] }, + # "highest_version" => "1.6", + # "highest_version_spec" => 'REPO/CocoaLumberjack/1.6/CocoaLumberjack.podspec' + # + # @return [Hash] The hash representation. + # + def to_hash + versions = versions_by_source.reduce({}) do |memo, (source, version)| + memo[source.name] = version.map(&:to_s) + memo + end + { + 'name' => name, + 'versions' => versions, + 'highest_version' => highest_version.to_s, + 'highest_version_spec' => highest_version_spec_path.to_s, + } + end + + #-----------------------------------------------------------------------# + + # The Set::External class handles Pods from external sources. Pods from + # external sources don't use the {Source} and are initialized by a given + # specification. + # + # @note External sources *don't* support subspecs. + # + class External < Set + attr_reader :specification + + def initialize(spec) + @specification = spec.root + super(@specification.name) + end + + def ==(other) + self.class == other.class && specification == other.specification + end + + def versions + [specification.version] + end + end + + #-----------------------------------------------------------------------# + + # The Set::Head class handles Pods in head mode. Pods in head + # mode don't use the {Source} and are initialized by a given + # specification. + # + class Head < External + def initialize(spec) + super + specification.version.head = true + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/set/presenter.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/set/presenter.rb new file mode 100644 index 0000000..3f7dd58 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/specification/set/presenter.rb @@ -0,0 +1,203 @@ +require 'active_support/core_ext/array/conversions' + +module Pod + class Specification + class Set + # Provides support for presenting a Pod described by a {Set} in a + # consistent way across clients of CocoaPods-Core. + # + class Presenter + # @return [Set] the set that should be presented. + # + attr_reader :set + + # @param [Set] set @see #set. + # + def initialize(set) + @set = set + end + + #---------------------------------------------------------------------# + + # @!group Set Information + + # @return [String] the name of the Pod. + # + def name + @set.name + end + + # @return [Version] the highest version of available for the Pod. + # + def version + @set.versions.first + end + + # @return [Array] all the versions available ascending + # order. + # + def versions + @set.versions + end + + # @return [String] all the versions available sorted from the highest + # to the lowest. + # + # @example Return example + # + # "1.5pre, 1.4 [master repo] - 1.4 [test_repo repo]" + # + # @note This method orders the sources by name. + # + def versions_by_source + result = [] + versions_by_source = @set.versions_by_source + @set.sources.sort.each do |source| + versions = versions_by_source[source] + result << "#{versions.map(&:to_s) * ', '} [#{source.name} repo]" + end + result * ' - ' + end + + # @return [Array] The name of the sources that contain the Pod + # sorted alphabetically. + # + def sources + @set.sources.map(&:name).sort + end + + #---------------------------------------------------------------------# + + # @!group Specification Information + + # @return [Specification] the specification of the {Set}. If no + # versions requirements where passed to the set it returns the + # highest available version. + # + def spec + @spec ||= @set.specification + end + + # @return [String] the list of the authors of the Pod in sentence + # format. + # + # @example Output example + # + # "Author 1, Author 2 and Author 3" + # + def authors + return '' unless spec.authors + spec.authors.keys.to_sentence + end + + # @return [String] the homepage of the pod. + # + def homepage + spec.homepage + end + + # @return [String] a short description, expected to be 140 characters + # long of the Pod. + # + def summary + spec.summary + end + + # @return [String] the description of the Pod, if no description is + # available the summary is returned. + # + def description + spec.description || spec.summary + end + + # @return [String] A string that describes the deprecation of the pod. + # If the pod is deprecated in favor of another pod it will contain + # information about that. If the pod is not deprecated returns nil. + # + # @example Output example + # + # "[DEPRECATED]" + # "[DEPRECATED in favor of NewAwesomePod]" + # + def deprecation_description + if spec.deprecated? + description = '[DEPRECATED' + description += if spec.deprecated_in_favor_of.nil? + ']' + else + " in favor of #{spec.deprecated_in_favor_of}]" + end + + description + end + end + + # @return [String] the URL of the source of the Pod. + # + def source_url + url_keys = [:git, :svn, :http, :hg, :path] + key = spec.source.keys.find { |k| url_keys.include?(k) } + key ? spec.source[key] : 'No source url' + end + + # @return [String] the platforms supported by the Pod. + # + # @example + # + # "iOS" + # "iOS - OS X" + # + def platform + sorted_platforms = spec.available_platforms.sort do |a, b| + a.to_s.downcase <=> b.to_s.downcase + end + sorted_platforms.join(' - ') + end + + # @return [String] the type of the license of the Pod. + # + # @example + # + # "MIT" + # + def license + spec.license[:type] if spec.license + end + + # @return [Array] an array containing all the subspecs of the Pod. + # + def subspecs + (spec.recursive_subspecs.any? && spec.recursive_subspecs) || nil + end + + #---------------------------------------------------------------------# + + # @!group Statistics + + # @return [Integer] the GitHub likes of the repo of the Pod. + # + def github_stargazers + github_metrics['stargazers'] + end + + # @return [Integer] the GitHub forks of the repo of the Pod. + # + def github_forks + github_metrics['forks'] + end + + #---------------------------------------------------------------------# + + # @!group Private Helpers + + def metrics + @metrics ||= Metrics.pod(name) || {} + end + + def github_metrics + metrics['github'] || {} + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/standard_error.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/standard_error.rb new file mode 100644 index 0000000..bdd9405 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/standard_error.rb @@ -0,0 +1,108 @@ +module Pod + # Namespaces all the errors raised by CocoaPods. + # + class StandardError < ::StandardError; end + + #-------------------------------------------------------------------------# + + # Wraps an exception raised by a DSL file in order to show to the user the + # contents of the line that raised the exception. + # + class DSLError < Informative + # @return [String] the description that should be presented to the user. + # + attr_reader :description + + # @return [String] the path of the dsl file that raised the exception. + # + attr_reader :dsl_path + + # @return [Exception] the exception raised by the + # evaluation of the dsl file. + # + attr_reader :underlying_exception + + # @param [Exception] underlying_exception @see underlying_exception + # @param [String] dsl_path @see dsl_path + # + def initialize(description, dsl_path, underlying_exception, contents = nil) + @description = description + @dsl_path = dsl_path + @underlying_exception = underlying_exception + @contents = contents + end + + # @return [String] the contents of the DSL that cause the exception to + # be raised. + # + def contents + @contents ||= begin + dsl_path && File.exist?(dsl_path) && File.read(dsl_path) + end + end + + # The message of the exception reports the content of podspec for the + # line that generated the original exception. + # + # @example Output + # + # Invalid podspec at `RestKit.podspec` - undefined method + # `exclude_header_search_paths=' for # + # + # from spec-repos/master/RestKit/0.9.3/RestKit.podspec:36 + # ------------------------------------------- + # # because it would break: #import + # > ns.exclude_header_search_paths = 'Code/RestKit.h' + # end + # ------------------------------------------- + # + # @return [String] the message of the exception. + # + def message + @message ||= begin + trace_line, description = parse_line_number_from_description + + m = "\n[!] #{description}.\n" + m = m.red if m.respond_to?(:red) + + backtrace = underlying_exception.backtrace + return m unless backtrace && dsl_path && contents + + trace_line = backtrace.find { |l| l.include?(dsl_path.to_s) } || trace_line + return m unless trace_line + line_numer = trace_line.split(':')[1].to_i - 1 + return m unless line_numer + + lines = contents.lines + indent = ' # ' + indicator = indent.tr('#', '>') + first_line = (line_numer.zero?) + last_line = (line_numer == (lines.count - 1)) + + m << "\n" + m << "#{indent}from #{trace_line.gsub(/:in.*$/, '')}\n" + m << "#{indent}-------------------------------------------\n" + m << "#{indent}#{lines[line_numer - 1]}" unless first_line + m << "#{indicator}#{lines[line_numer]}" + m << "#{indent}#{lines[line_numer + 1]}" unless last_line + m << "\n" unless m.end_with?("\n") + m << "#{indent}-------------------------------------------\n" + end + end + + private + + def parse_line_number_from_description + description = self.description + if dsl_path && description =~ /((#{Regexp.quote File.expand_path(dsl_path)}|#{Regexp.quote dsl_path.to_s}):\d+)/ + trace_line = Regexp.last_match[1] + description = description.sub(/#{Regexp.quote trace_line}:\s*/, '') + if description =~ /^\s*\^\z/ + description = description.lines[0..-3].join.chomp + end + end + [trace_line, description] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/trunk_source.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/trunk_source.rb new file mode 100644 index 0000000..ce7a268 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/trunk_source.rb @@ -0,0 +1,14 @@ +module Pod + class TrunkSource < CDNSource + # On-disk master repo name + TRUNK_REPO_NAME = 'trunk'.freeze + + # Remote CDN repo URL + TRUNK_REPO_URL = 'https://cdn.cocoapods.org/'.freeze + + def url + @url ||= TRUNK_REPO_URL + super + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor.rb new file mode 100644 index 0000000..b1656b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor.rb @@ -0,0 +1,50 @@ +module Pod + # Namespaces the vendored modules. + # + module Vendor + # Namespaces the classes of RubyGems used by CocoaPods. + # + # CocoaPods needs to vendor RubyGems because OS X ships with `v1.3.6` which + # has a couple of bugs related to the comparison of pre-release versions. + # + # E.g. https://github.com/CocoaPods/CocoaPods/issues/398 + # + # The following classes are copied from RubyGems `v2.6.3`. The changes + # performed to the source files are the following: + # + # - Namespaced in `Pod::Vendor` + # - commented all the `require` calls + # - replaced `::Gem` with `Pod::Vendor::Gem` + # + module Gem + require 'cocoapods-core/vendor/version' + require 'cocoapods-core/vendor/requirement' + + #-----------------------------------------------------------------------# + # RubyGems License # + # https://github.com/rubygems/rubygems/blob/master/MIT.txt # + #-----------------------------------------------------------------------# + + # Copyright (c) Chad Fowler, Rich Kilmer, Jim Weirich and others. + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # 'Software'), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be + # included in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, + # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor/requirement.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor/requirement.rb new file mode 100644 index 0000000..d9ee99f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor/requirement.rb @@ -0,0 +1,288 @@ +# frozen_string_literal: true +module Pod::Vendor + + # require "rubygems/version" + # require "rubygems/deprecate" + + # If we're being loaded after yaml was already required, then + # load our yaml + workarounds now. + # Gem.load_yaml if defined? ::YAML + + ## + # A Requirement is a set of one or more version restrictions. It supports a + # few (=, !=, >, <, >=, <=, ~>) different restriction operators. + # + # See Gem::Version for a description on how versions and requirements work + # together in RubyGems. + + class Gem::Requirement + OPS = { #:nodoc: + "=" => lambda { |v, r| v == r }, + "!=" => lambda { |v, r| v != r }, + ">" => lambda { |v, r| v > r }, + "<" => lambda { |v, r| v < r }, + ">=" => lambda { |v, r| v >= r }, + "<=" => lambda { |v, r| v <= r }, + "~>" => lambda { |v, r| v >= r && v.release < r.bump } + } + + SOURCE_SET_REQUIREMENT = Struct.new(:for_lockfile).new "!" # :nodoc: + + quoted = OPS.keys.map { |k| Regexp.quote k }.join "|" + PATTERN_RAW = "\\s*(#{quoted})?\\s*(#{Gem::Version::VERSION_PATTERN})\\s*" # :nodoc: + + ## + # A regular expression that matches a requirement + + PATTERN = /\A#{PATTERN_RAW}\z/ + + ## + # The default requirement matches any version + + DefaultRequirement = [">=", Gem::Version.new(0)] + + ## + # Raised when a bad requirement is encountered + + class BadRequirementError < ArgumentError; end + + ## + # Factory method to create a Gem::Requirement object. Input may be + # a Version, a String, or nil. Intended to simplify client code. + # + # If the input is "weird", the default version requirement is + # returned. + + def self.create input + case input + when Gem::Requirement then + input + when Gem::Version, Array then + new input + when '!' then + source_set + else + if input.respond_to? :to_str then + new [input.to_str] + else + default + end + end + end + + ## + # A default "version requirement" can surely _only_ be '>= 0'. + + def self.default + new '>= 0' + end + + ### + # A source set requirement, used for Gemfiles and lockfiles + + def self.source_set # :nodoc: + SOURCE_SET_REQUIREMENT + end + + ## + # Parse +obj+, returning an [op, version] pair. +obj+ can + # be a String or a Gem::Version. + # + # If +obj+ is a String, it can be either a full requirement + # specification, like ">= 1.2", or a simple version number, + # like "1.2". + # + # parse("> 1.0") # => [">", Gem::Version.new("1.0")] + # parse("1.0") # => ["=", Gem::Version.new("1.0")] + # parse(Gem::Version.new("1.0")) # => ["=, Gem::Version.new("1.0")] + + def self.parse obj + return ["=", obj] if Gem::Version === obj + + unless PATTERN =~ obj.to_s + raise BadRequirementError, "Illformed requirement [#{obj.inspect}]" + end + + if $1 == ">=" && $2 == "0" + DefaultRequirement + else + [$1 || "=", Gem::Version.new($2)] + end + end + + ## + # An array of requirement pairs. The first element of the pair is + # the op, and the second is the Gem::Version. + + attr_reader :requirements #:nodoc: + + ## + # Constructs a requirement from +requirements+. Requirements can be + # Strings, Gem::Versions, or Arrays of those. +nil+ and duplicate + # requirements are ignored. An empty set of +requirements+ is the + # same as ">= 0". + + def initialize *requirements + requirements = requirements.flatten + requirements.compact! + requirements.uniq! + + if requirements.empty? + @requirements = [DefaultRequirement] + else + @requirements = requirements.map! { |r| self.class.parse r } + end + end + + ## + # Concatenates the +new+ requirements onto this requirement. + + def concat new + new = new.flatten + new.compact! + new.uniq! + new = new.map { |r| self.class.parse r } + + @requirements.concat new + end + + ## + # Formats this requirement for use in a Gem::RequestSet::Lockfile. + + def for_lockfile # :nodoc: + return if [DefaultRequirement] == @requirements + + list = requirements.sort_by { |_, version| + version + }.map { |op, version| + "#{op} #{version}" + }.uniq + + " (#{list.join ', '})" + end + + ## + # true if this gem has no requirements. + + def none? + if @requirements.size == 1 + @requirements[0] == DefaultRequirement + else + false + end + end + + ## + # true if the requirement is for only an exact version + + def exact? + return false unless @requirements.size == 1 + @requirements[0][0] == "=" + end + + def as_list # :nodoc: + requirements.map { |op, version| "#{op} #{version}" }.sort + end + + def hash # :nodoc: + requirements.sort.hash + end + + def marshal_dump # :nodoc: + fix_syck_default_key_in_requirements + + [@requirements] + end + + def marshal_load array # :nodoc: + @requirements = array[0] + + fix_syck_default_key_in_requirements + end + + def yaml_initialize(tag, vals) # :nodoc: + vals.each do |ivar, val| + instance_variable_set "@#{ivar}", val + end + + Gem.load_yaml + fix_syck_default_key_in_requirements + end + + def init_with coder # :nodoc: + yaml_initialize coder.tag, coder.map + end + + def to_yaml_properties # :nodoc: + ["@requirements"] + end + + def encode_with coder # :nodoc: + coder.add 'requirements', @requirements + end + + ## + # A requirement is a prerelease if any of the versions inside of it + # are prereleases + + def prerelease? + requirements.any? { |r| r.last.prerelease? } + end + + def pretty_print q # :nodoc: + q.group 1, 'Gem::Requirement.new(', ')' do + q.pp as_list + end + end + + ## + # True if +version+ satisfies this Requirement. + + def satisfied_by? version + raise ArgumentError, "Need a Gem::Version: #{version.inspect}" unless + Gem::Version === version + # #28965: syck has a bug with unquoted '=' YAML.loading as YAML::DefaultKey + requirements.all? { |op, rv| (OPS[op] || OPS["="]).call version, rv } + end + + alias :=== :satisfied_by? + alias :=~ :satisfied_by? + + ## + # True if the requirement will not always match the latest version. + + def specific? + return true if @requirements.length > 1 # GIGO, > 1, > 2 is silly + + not %w[> >=].include? @requirements.first.first # grab the operator + end + + def to_s # :nodoc: + as_list.join ", " + end + + def == other # :nodoc: + Gem::Requirement === other and to_s == other.to_s + end + + private + + def fix_syck_default_key_in_requirements # :nodoc: + Gem.load_yaml + + # Fixup the Syck DefaultKey bug + @requirements.each do |r| + if r[0].kind_of? Gem::SyckDefaultKey + r[0] = "=" + end + end + end + end + + class Gem::Version + # This is needed for compatibility with older yaml + # gemspecs. + + Requirement = Gem::Requirement # :nodoc: + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor/version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor/version.rb new file mode 100644 index 0000000..3e454ff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/vendor/version.rb @@ -0,0 +1,377 @@ +# frozen_string_literal: true +module Pod::Vendor + + ## + # The Version class processes string versions into comparable + # values. A version string should normally be a series of numbers + # separated by periods. Each part (digits separated by periods) is + # considered its own number, and these are used for sorting. So for + # instance, 3.10 sorts higher than 3.2 because ten is greater than + # two. + # + # If any part contains letters (currently only a-z are supported) then + # that version is considered prerelease. Versions with a prerelease + # part in the Nth part sort less than versions with N-1 + # parts. Prerelease parts are sorted alphabetically using the normal + # Ruby string sorting rules. If a prerelease part contains both + # letters and numbers, it will be broken into multiple parts to + # provide expected sort behavior (1.0.a10 becomes 1.0.a.10, and is + # greater than 1.0.a9). + # + # Prereleases sort between real releases (newest to oldest): + # + # 1. 1.0 + # 2. 1.0.b1 + # 3. 1.0.a.2 + # 4. 0.9 + # + # If you want to specify a version restriction that includes both prereleases + # and regular releases of the 1.x series this is the best way: + # + # s.add_dependency 'example', '>= 1.0.0.a', '< 2.0.0' + # + # == How Software Changes + # + # Users expect to be able to specify a version constraint that gives them + # some reasonable expectation that new versions of a library will work with + # their software if the version constraint is true, and not work with their + # software if the version constraint is false. In other words, the perfect + # system will accept all compatible versions of the library and reject all + # incompatible versions. + # + # Libraries change in 3 ways (well, more than 3, but stay focused here!). + # + # 1. The change may be an implementation detail only and have no effect on + # the client software. + # 2. The change may add new features, but do so in a way that client software + # written to an earlier version is still compatible. + # 3. The change may change the public interface of the library in such a way + # that old software is no longer compatible. + # + # Some examples are appropriate at this point. Suppose I have a Stack class + # that supports a push and a pop method. + # + # === Examples of Category 1 changes: + # + # * Switch from an array based implementation to a linked-list based + # implementation. + # * Provide an automatic (and transparent) backing store for large stacks. + # + # === Examples of Category 2 changes might be: + # + # * Add a depth method to return the current depth of the stack. + # * Add a top method that returns the current top of stack (without + # changing the stack). + # * Change push so that it returns the item pushed (previously it + # had no usable return value). + # + # === Examples of Category 3 changes might be: + # + # * Changes pop so that it no longer returns a value (you must use + # top to get the top of the stack). + # * Rename the methods to push_item and pop_item. + # + # == RubyGems Rational Versioning + # + # * Versions shall be represented by three non-negative integers, separated + # by periods (e.g. 3.1.4). The first integers is the "major" version + # number, the second integer is the "minor" version number, and the third + # integer is the "build" number. + # + # * A category 1 change (implementation detail) will increment the build + # number. + # + # * A category 2 change (backwards compatible) will increment the minor + # version number and reset the build number. + # + # * A category 3 change (incompatible) will increment the major build number + # and reset the minor and build numbers. + # + # * Any "public" release of a gem should have a different version. Normally + # that means incrementing the build number. This means a developer can + # generate builds all day long, but as soon as they make a public release, + # the version must be updated. + # + # === Examples + # + # Let's work through a project lifecycle using our Stack example from above. + # + # Version 0.0.1:: The initial Stack class is release. + # Version 0.0.2:: Switched to a linked=list implementation because it is + # cooler. + # Version 0.1.0:: Added a depth method. + # Version 1.0.0:: Added top and made pop return nil + # (pop used to return the old top item). + # Version 1.1.0:: push now returns the value pushed (it used it + # return nil). + # Version 1.1.1:: Fixed a bug in the linked list implementation. + # Version 1.1.2:: Fixed a bug introduced in the last fix. + # + # Client A needs a stack with basic push/pop capability. They write to the + # original interface (no top), so their version constraint looks like: + # + # gem 'stack', '>= 0.0' + # + # Essentially, any version is OK with Client A. An incompatible change to + # the library will cause them grief, but they are willing to take the chance + # (we call Client A optimistic). + # + # Client B is just like Client A except for two things: (1) They use the + # depth method and (2) they are worried about future + # incompatibilities, so they write their version constraint like this: + # + # gem 'stack', '~> 0.1' + # + # The depth method was introduced in version 0.1.0, so that version + # or anything later is fine, as long as the version stays below version 1.0 + # where incompatibilities are introduced. We call Client B pessimistic + # because they are worried about incompatible future changes (it is OK to be + # pessimistic!). + # + # == Preventing Version Catastrophe: + # + # From: http://blog.zenspider.com/2008/10/rubygems-howto-preventing-cata.html + # + # Let's say you're depending on the fnord gem version 2.y.z. If you + # specify your dependency as ">= 2.0.0" then, you're good, right? What + # happens if fnord 3.0 comes out and it isn't backwards compatible + # with 2.y.z? Your stuff will break as a result of using ">=". The + # better route is to specify your dependency with an "approximate" version + # specifier ("~>"). They're a tad confusing, so here is how the dependency + # specifiers work: + # + # Specification From ... To (exclusive) + # ">= 3.0" 3.0 ... ∞ + # "~> 3.0" 3.0 ... 4.0 + # "~> 3.0.0" 3.0.0 ... 3.1 + # "~> 3.5" 3.5 ... 4.0 + # "~> 3.5.0" 3.5.0 ... 3.6 + # "~> 3" 3.0 ... 4.0 + # + # For the last example, single-digit versions are automatically extended with + # a zero to give a sensible result. + + class Gem::Version + autoload :Requirement, 'rubygems/requirement' + + include Comparable + + VERSION_PATTERN = '[0-9]+(?>\.[0-9a-zA-Z]+)*(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?' # :nodoc: + ANCHORED_VERSION_PATTERN = /\A\s*(#{VERSION_PATTERN})?\s*\z/ # :nodoc: + + ## + # A string representation of this Version. + + def version + @version.dup + end + + alias to_s version + + ## + # True if the +version+ string matches RubyGems' requirements. + + def self.correct? version + version.to_s =~ ANCHORED_VERSION_PATTERN + end + + ## + # Factory method to create a Version object. Input may be a Version + # or a String. Intended to simplify client code. + # + # ver1 = Version.create('1.3.17') # -> (Version object) + # ver2 = Version.create(ver1) # -> (ver1) + # ver3 = Version.create(nil) # -> nil + + def self.create input + if self === input then # check yourself before you wreck yourself + input + elsif input.nil? then + nil + else + new input + end + end + + @@all = {} + + def self.new version # :nodoc: + return super unless Gem::Version == self + + @@all[version] ||= super + end + + ## + # Constructs a Version from the +version+ string. A version string is a + # series of digits or ASCII letters separated by dots. + + def initialize version + raise ArgumentError, "Malformed version number string #{version}" unless + self.class.correct?(version) + + @version = version.to_s.strip.gsub("-",".pre.") + @segments = nil + end + + ## + # Return a new version object where the next to the last revision + # number is one greater (e.g., 5.3.1 => 5.4). + # + # Pre-release (alpha) parts, e.g, 5.3.1.b.2 => 5.4, are ignored. + + def bump + @bump ||= begin + segments = self.segments + segments.pop while segments.any? { |s| String === s } + segments.pop if segments.size > 1 + + segments[-1] = segments[-1].succ + self.class.new segments.join(".") + end + end + + ## + # A Version is only eql? to another version if it's specified to the + # same precision. Version "1.0" is not the same as version "1". + + def eql? other + self.class === other and @version == other._version + end + + def hash # :nodoc: + @version.hash + end + + def init_with coder # :nodoc: + yaml_initialize coder.tag, coder.map + end + + def inspect # :nodoc: + "#<#{self.class} #{version.inspect}>" + end + + ## + # Dump only the raw version string, not the complete object. It's a + # string for backwards (RubyGems 1.3.5 and earlier) compatibility. + + def marshal_dump + [version] + end + + ## + # Load custom marshal format. It's a string for backwards (RubyGems + # 1.3.5 and earlier) compatibility. + + def marshal_load array + initialize array[0] + end + + def yaml_initialize(tag, map) # :nodoc: + @version = map['version'] + @segments = nil + @hash = nil + end + + def to_yaml_properties # :nodoc: + ["@version"] + end + + def encode_with coder # :nodoc: + coder.add 'version', @version + end + + ## + # A version is considered a prerelease if it contains a letter. + + def prerelease? + unless instance_variable_defined? :@prerelease + @prerelease = !!(@version =~ /[a-zA-Z]/) + end + @prerelease + end + + def pretty_print q # :nodoc: + q.text "Gem::Version.new(#{version.inspect})" + end + + ## + # The release for this version (e.g. 1.2.0.a -> 1.2.0). + # Non-prerelease versions return themselves. + + def release + @release ||= if prerelease? + segments = self.segments + segments.pop while segments.any? { |s| String === s } + self.class.new segments.join('.') + else + self + end + end + + def segments # :nodoc: + _segments.dup + end + + ## + # A recommended version for use with a ~> Requirement. + + def approximate_recommendation + segments = self.segments + + segments.pop while segments.any? { |s| String === s } + segments.pop while segments.size > 2 + segments.push 0 while segments.size < 2 + + "~> #{segments.join(".")}" + end + + ## + # Compares this version with +other+ returning -1, 0, or 1 if the + # other version is larger, the same, or smaller than this + # one. Attempts to compare to something that's not a + # Gem::Version return +nil+. + + def <=> other + return unless Gem::Version === other + return 0 if @version == other._version + + lhsegments = _segments + rhsegments = other._segments + + lhsize = lhsegments.size + rhsize = rhsegments.size + limit = (lhsize > rhsize ? lhsize : rhsize) - 1 + + i = 0 + + while i <= limit + lhs, rhs = lhsegments[i] || 0, rhsegments[i] || 0 + i += 1 + + next if lhs == rhs + return -1 if String === lhs && Numeric === rhs + return 1 if Numeric === lhs && String === rhs + + return lhs <=> rhs + end + + return 0 + end + + protected + + def _version + @version + end + + def _segments + # segments is lazy so it can pick up version values that come from + # old marshaled versions, which don't go through marshal_load. + # since this version object is cached in @@all, its @segments should be frozen + + @segments ||= @version.scan(/[0-9]+|[a-z]+/i).map do |s| + /^\d+$/ =~ s ? s.to_i : s + end.freeze + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/version.rb new file mode 100644 index 0000000..b65b3ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/version.rb @@ -0,0 +1,239 @@ +module Pod + # The Version class stores information about the version of a + # {Specification}. + # + # It is based on the RubyGems class adapted to support head information. + # + # ### From RubyGems: + # + # The Version class processes string versions into comparable + # values. A version string should normally be a series of numbers + # separated by periods. Each part (digits separated by periods) is + # considered its own number, and these are used for sorting. So for + # instance, 3.10 sorts higher than 3.2 because ten is greater than + # two. + # + # If any part contains letters (currently only a-z are supported) then + # that version is considered prerelease. Versions with a prerelease + # part in the Nth part sort less than versions with N-1 + # parts. Prerelease parts are sorted alphabetically using the normal + # Ruby string sorting rules. If a prerelease part contains both + # letters and numbers, it will be broken into multiple parts to + # provide expected sort behavior (1.0.a10 becomes 1.0.a.10, and is + # greater than 1.0.a9). + # + # Prereleases sort between real releases (newest to oldest): + # + # 1. 1.0 + # 2. 1.0.b1 + # 3. 1.0.a.2 + # 4. 0.9 + # + class Version < Pod::Vendor::Gem::Version + # Override the constants defined by the superclass to add: + # - Semantic Versioning prerelease support (with a dash). E.g.: 1.0.0-alpha1 + # - Semantic Versioning metadata support (with a +) E.g: 1.0.0+96ef7ed + # + # For more info, see: http://semver.org + # + METADATA_PATTERN = '(\+[0-9a-zA-Z\-\.]+)' + VERSION_PATTERN = "[0-9]+(\\.[0-9a-zA-Z\\-]+)*#{METADATA_PATTERN}?" + ANCHORED_VERSION_PATTERN = /\A\s*(#{VERSION_PATTERN})*\s*\z/ + + # @param [String,Version] version + # A string representing a version, or another version. + # + def initialize(version) + raise ArgumentError, "Malformed version number string #{version}" unless + self.class.correct?(version) + + @version = version.to_s.strip + end + + # An instance that represents version 0. + # + ZERO = new('0') + + # @return [String] a string representation suitable for debugging. + # + def inspect + "<#{self.class} version=#{version}>" + end + + # @return [Boolean] indicates whether or not the version is a prerelease. + # + # @note Prerelease Pods can contain a hyphen and/or a letter (conforms to + # Semantic Versioning instead of RubyGems). + # + # For more info, see: http://semver.org + # + def prerelease? + return @prerelease if defined?(@prerelease) + comparable_version = @version.sub(/#{METADATA_PATTERN}$/, '') + @prerelease = comparable_version =~ /[a-zA-Z\-]/ + end + + # @return [Boolean] Whether a string representation is correct. + # + def self.correct?(version) + version.to_s =~ ANCHORED_VERSION_PATTERN + end + + #-------------------------------------------------------------------------# + + # @!group Semantic Versioning + + SEMVER_PATTERN = "[0-9]+(\\.[0-9]+(\\.[0-9]+(-[0-9A-Za-z\\-\\.]+)?#{METADATA_PATTERN}?)?)?" + ANCHORED_SEMANTIC_VERSION_PATTERN = /\A\s*(#{SEMVER_PATTERN})*\s*\z/ + + # @return [Boolean] Whether the version conforms to the Semantic Versioning + # specification (2.0.0-rc.1). + # + # @note This comparison is lenient. + # + # @note It doesn't support build identifiers. + # + def semantic? + version.to_s =~ ANCHORED_SEMANTIC_VERSION_PATTERN + end + + # @return [Fixnum] The semver major identifier. + # + def major + numeric_segments[0].to_i + end + + # @return [Fixnum] The semver minor identifier. + # + def minor + numeric_segments[1].to_i + end + + # @return [Fixnum] The semver patch identifier. + # + def patch + numeric_segments[2].to_i + end + + # Compares the versions for sorting. + # + # @param [Version] other + # The other version to compare. + # + # @return [Fixnum] -1, 0, or +1 depending on whether the receiver is less + # than, equal to, or greater than other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def <=>(other) + comparison = compare_segments(other) + comparison == 0 ? version <=> other.version : comparison + end + + # @private + # + # Compares the versions for equality. + # + # @param [Version] other + # The other version to compare. + # + # @return [Boolean] whether the receiver is equal to other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def ==(other) + compare_segments(other) == 0 + end + + # @private + # + # Compares the versions for equality. + # + # @param [Version] other + # The other version to compare. + # + # @return [Boolean] whether the receiver is greater than or equal to other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def >=(other) + comparison = compare_segments(other) + comparison >= 0 + end + + # @private + # + # Compares the versions for equality. + # + # @param [Version] other + # The other version to compare. + # + # @return [Boolean] whether the receiver is less than or equal to other. + # + # @note Attempts to compare something that's not a {Version} return nil + # + def <=(other) + comparison = compare_segments(other) + comparison <= 0 + end + + protected + + # This overrides the Gem::Version implementation of `_segments` to drop the + # metadata from comparisons as per http://semver.org/#spec-item-10 + # + def _segments + # segments is lazy so it can pick up version values that come from + # old marshaled versions, which don't go through marshal_load. + # since this version object is cached in @@all, its @segments should be frozen + + @segments ||= @version.sub(/#{METADATA_PATTERN}$/, '').scan(/[0-9]+|[a-z]+/i).map do |s| + /^\d+$/ =~ s ? s.to_i : s + end.freeze + end + + def numeric_segments + @numeric_segments ||= segments.take_while { |s| s.is_a?(Numeric) }.reverse_each.drop_while { |s| s == 0 }.reverse + end + + def prerelease_segments + @prerelease_segments ||= segments.drop_while { |s| s.is_a?(Numeric) } + end + + def compare_segments(other) + return unless other.is_a?(Pod::Version) + return 0 if @version == other.version + + compare = proc do |segments, other_segments, is_pre_release| + limit = [segments.size, other_segments.size].max + + 0.upto(limit) do |i| + lhs = segments[i] + rhs = other_segments[i] + + next if lhs == rhs + # If it's pre-release and the first segment, then + # this is a special case because a segment missing + # means that one is not a pre-release version + if is_pre_release && i == 0 + return 1 if lhs.nil? + return -1 if rhs.nil? + else + return -1 if lhs.nil? + return 1 if rhs.nil? + end + + if comparison = lhs <=> rhs + return comparison + end + end + end + + compare[numeric_segments, other.numeric_segments, false] + compare[prerelease_segments, other.prerelease_segments, true] + 0 + end + + #-------------------------------------------------------------------------# + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/yaml_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/yaml_helper.rb new file mode 100644 index 0000000..287f05e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-core-1.12.1/lib/cocoapods-core/yaml_helper.rb @@ -0,0 +1,323 @@ +require 'yaml' + +module Pod + # Converts objects to their YAML representation. + # + # This class was created for the need having control on how the YAML is + # representation is generated. In details it provides: + # + # - sorting for hashes in ruby 1.8.x + # - ability to hint the sorting of the keys of a dictionary when converting + # it. In this case the keys are also separated by an additional new line + # feed for readability. + # + # @note This class misses important features necessary for a correct YAML + # serialization and thus it is safe to use only for the Lockfile. + # The missing features include: + # - Strings are never quoted even when ambiguous. + # + # @todo Remove any code required solely for Ruby 1.8.x. + # + class YAMLHelper + class << self + # Returns the YAML representation of the given object. If the given object + # is a Hash, it accepts an optional hint for sorting the keys. + # + # @param [String, Symbol, Array, Hash] object + # the object to convert + # + # @param [Array] hash_keys_hint + # an array to use as a hint for sorting the keys of the object to + # convert if it is a hash. + # + # @return [String] the YAML representation of the given object. + # + def convert(value) + result = process_according_to_class(value) + result << "\n" + end + + def convert_hash(value, hash_keys_hint, line_separator = "\n") + result = process_hash(value, hash_keys_hint, line_separator) + result << "\n" + end + + # Loads a YAML string and provide more informative + # error messages in special cases like merge conflict. + # + # @param [String] yaml_string + # The YAML String to be loaded + # + # @param [Pathname] file_path + # The (optional) file path to be used for read for the YAML file + # + # @return [Hash, Array] the Ruby YAML representaton + # + def load_string(yaml_string, file_path = nil) + YAML.load(yaml_string) + rescue + if yaml_has_merge_error?(yaml_string) + raise Informative, yaml_merge_conflict_msg(yaml_string, file_path) + else + raise Informative, yaml_parsing_error_msg(yaml_string, file_path) + end + end + + # Loads a YAML file and leans on the #load_string imp + # to do error detection + # + # @param [Pathname] file_path + # The file path to be used for read for the YAML file + # + # @return [Hash, Array] the Ruby YAML representaton + # + def load_file(file_path) + load_string(File.read(file_path), file_path) + end + + #-----------------------------------------------------------------------# + + private + + # Implementation notes: + # + # - each of the methods returns a YAML partial without an ending new + # line. + # - if a partial needs to be indented is responsibility of the method + # using it. + # + # --- + + # @!group Private Helpers + + # @return [String] the YAML representation of the given object. + # + def process_according_to_class(value, hash_keys_hint = nil) + case value + when Array then process_array(value) + when Hash then process_hash(value, hash_keys_hint) + when String then process_string(value) + else YAML.dump(value, :line_width => 2**31 - 1).sub(/\A---/, '').sub(/[.]{3}\s*\Z/, '') + end.strip + end + + # Converts an array to YAML after sorting it. + # + # @param [Array] array + # the array to convert. + # + # @return [String] the YAML representation of the given object. + # + def process_array(array) + return '[]' if array.empty? + + result = sorted_array(array).map do |array_value| + processed = process_according_to_class(array_value) + case array_value + when Array, Hash + if array_value.size > 1 + processed = processed.gsub(/^.*/).to_a + head = processed.shift + processed.map { |s| " #{s}" }.prepend(head).join("\n") + else + processed + end + else + processed + end + end + "- #{result.join("\n- ").strip}" + end + + # Converts a hash to YAML after sorting its keys. Optionally accepts a + # hint for sorting the keys. + # + # @note If a hint for sorting the keys is provided the array is assumed + # to be the root object and the keys are separated by an + # additional new line feed for readability. + # + # @note If the value of a given key is a String it displayed inline, + # otherwise it is displayed below and indented. + # + # @param [Hash] hash + # the hash to convert. + # + # @return [String] the YAML representation of the given object. + # + def process_hash(hash, hash_keys_hint = nil, line_separator = "\n") + return '{}' if hash.empty? + + keys = sorted_array_with_hint(hash.keys, hash_keys_hint) + key_lines = keys.map do |key| + key_value = hash[key] + processed = process_according_to_class(key_value) + processed_key = process_according_to_class(key) + case key_value + when Hash, Array + key_partial_yaml = processed.lines.map { |line| " #{line}" } * '' + "#{processed_key}:\n#{key_partial_yaml}" + else + "#{processed_key}: #{processed}" + end + end + key_lines * line_separator + end + + # Check for merge errors in a YAML string. + # + # @param [String] yaml_string + # A YAML string to evaluate + # + # @return If a merge error was detected or not. + # + def yaml_has_merge_error?(yaml_string) + yaml_string.include?('<<<<<<< HEAD') + end + + # Error message describing that a merge conflict was found + # while parsing the YAML. + # + # @param [String] yaml + # Offending YAML + # + # @param [Pathname] path + # The (optional) offending path + # + # @return [String] The Error Message + # + def yaml_merge_conflict_msg(yaml, path = nil) + err = 'ERROR: Parsing unable to continue due ' + err += "to merge conflicts present in:\n" + err += "the file located at #{path}\n" if path + err + "#{yaml}" + end + + # Error message describing a general error took happened + # while parsing the YAML. + # + # @param [String] yaml + # Offending YAML + # + # @param [Pathname] path + # The (optional) offending path + # + # @return [String] The Error Message + # + def yaml_parsing_error_msg(yaml, path = nil) + err = 'ERROR: Parsing unable to continue due ' + err += "to parsing error:\n" + err += "contained in the file located at #{path}\n" if path + err + "#{yaml}" + end + + #-----------------------------------------------------------------------# + + # @!group Array Sorting + + # Sorts an array using another one as a sort hint. All the values of the + # hint which appear in the array will be returned respecting the order in + # the hint. If any other key is present in the original array they are + # sorted using the {#sorted_array} method. + # + # @param [Array] array + # The array which needs to be sorted. + # + # @param [Array] sort_hint + # The array which should be used to sort the keys. + # + # @return [Array] The sorted Array. + # + def sorted_array_with_hint(array, sort_hint) + if sort_hint + hinted = sort_hint & array + remaining = array - sort_hint + hinted + sorted_array(remaining) + else + sorted_array(array) + end + end + + public + + # Sorts an array according to the string representation of it values. + # This method allows to sort arrays which contains strings or hashes. + # + # @note If the value contained in the array is another Array or a Hash + # the first value of the collection is used for sorting, as this + # method is more useful, for arrays which contains a collection + # composed by one object. + # + # @todo This stuff is here only because the Lockfile intermixes strings + # and hashes for the `PODS` key. The Lockfile should be more + # consistent. + # + # @return [Array] The sorted array. + # + def sorted_array(array) + array.each_with_index.sort_by do |element, index| + [sorting_string(element), index] + end.map(&:first) + end + + private + + # Returns the string representation of a value useful for sorting. + # + # @param [String, Symbol, Array, Hash] value + # The value which needs to be sorted + # + # @return [String] A string useful to compare the value with other ones. + # + def sorting_string(value) + return '' unless value + case value + when String then value.downcase + when Symbol then sorting_string(value.to_s) + when Array then sorting_string(value.first) + when Hash then value.keys.map { |key| key.to_s.downcase }.sort.first + else raise ArgumentError, "Cannot sort #{value.inspect}" + end + end + + RESOLVED_TAGS = Regexp.union( + 'null', 'Null', 'NULL', '~', '', # resolve to null + 'true', 'True', 'TRUE', 'false', 'False', 'FALSE', # bool + 'yes', 'Yes', 'YES', 'no', 'No', 'NO', # yes/no + 'on', 'On', 'ON', 'off', 'Off', 'OFF', # no/off + /[-+]?[0-9]+/, # base 10 int + /00[0-7]+/, # base 8 int + /0x[0-9a-fA-F]+/, # base 16 int + /[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?/, # float + /[-+]?\.(inf|Inf|INF)/, # infinity + /\.(nan|NaN|NAN)/ # NaN + ) + private_constant :RESOLVED_TAGS + + INDICATOR_START_CHARS = %w(- ? : , [ ] { } # & * ! | > ' " % @ `).freeze + INDICATOR_START = /\A#{Regexp.union(INDICATOR_START_CHARS)}/.freeze + private_constant :INDICATOR_START_CHARS, :INDICATOR_START + + RESOLVED_TAGS_PATTERN = /\A#{Regexp.union(RESOLVED_TAGS)}\z/.freeze + private_constant :RESOLVED_TAGS_PATTERN + + VALID_PLAIN_SCALAR_STRING = %r{\A + [\w&&[^#{INDICATOR_START_CHARS}]] # valid first character + [\w/\ \(\)~<>=\.:`,-]* # all characters allowed after the first one + \z}ox.freeze + private_constant :VALID_PLAIN_SCALAR_STRING + + def process_string(string) + case string + when RESOLVED_TAGS_PATTERN + "'#{string}'" + when /\A\s*\z/, INDICATOR_START, /:\z/ + string.inspect + when VALID_PLAIN_SCALAR_STRING + string + else + string.inspect + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/LICENSE b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/LICENSE new file mode 100644 index 0000000..5a7b88b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015 Kyle Fuller + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/README.md new file mode 100644 index 0000000..1fde3ed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/README.md @@ -0,0 +1,56 @@ +# cocoapods-deintegrate + +A CocoaPods plugin to remove and deintegrate CocoaPods from your project. +Removing all traces of CocoaPods from an Xcode project. + +## Installation + +```bash +$ [sudo] gem install cocoapods-deintegrate +``` + +## Usage + +Running `pod deintegrate` will deintegrate your Xcode project from +CocoaPods. Before running you should ensure you have a backup of your project. + +```bash +$ pod deintegrate +Deintegrating Palaver.xcodeproj +Deintegrating target Palaver +Deleted 1 'Copy Pods Resources' build phases. +Deleted 1 'Check Pods Manifest.lock' build phases. +Removing Pod libraries from build phase: +- libPods-Palaver.a +Deleting Pod file references from project +- libPods-Palaver.a +- libPods-PalaverTests.a +- Pods-Palaver.debug.xcconfig +- Pods-Palaver.release.xcconfig +- Pods-Palaver.ad hoc.xcconfig +- Pods-PalaverTests.debug.xcconfig +- Pods-PalaverTests.release.xcconfig +- Pods-PalaverTests.ad hoc.xcconfig +Deleted 1 `Pod` groups from project. +Deintegrating target PalaverTests +Deleted 1 'Copy Pods Resources' build phases. +Deleted 1 'Check Pods Manifest.lock' build phases. + +Project has been deintegrated. No traces of CocoaPods left in project. +Note: The workspace referencing the Pods project still remains. +``` + +The only things that will remains are as follows: + +- Podfile, Podfile.lock +- Workspace + +### Credits + +This CocoaPods plugin was created by [Kyle Fuller](http://kylefuller.co.uk/) +([@kylefuller](https://twitter.com/kylefuller)). + +### License + +cocoapods-deintegrate is released under the MIT license. See [LICENSE](LICENSE). + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/command/deintegrate.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/command/deintegrate.rb new file mode 100644 index 0000000..d792a13 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/command/deintegrate.rb @@ -0,0 +1,53 @@ +module Pod + class Command + # @CocoaPods 1.0.0.beta.1 + # + class Deintegrate < Command + include ProjectDirectory + + self.summary = 'Deintegrate CocoaPods from your project' + self.description = <<-DESC + Deintegrate your project from CocoaPods. Removing all traces + of CocoaPods from your Xcode project. + + If no xcodeproj is specified, then a search for an Xcode project + will be made in the current directory. + DESC + self.arguments = [ + CLAide::Argument.new('XCODE_PROJECT', false), + ] + + def initialize(argv) + path = argv.shift_argument + @project_path = Pathname.new(path) if path + super + end + + def validate! + super + + unless @project_path + xcodeprojs = Pathname.glob('*.xcodeproj') + @project_path = xcodeprojs.first if xcodeprojs.size == 1 + end + + help! 'A valid Xcode project file is required.' unless @project_path + help! "#{@project_path} does not exist." unless @project_path.exist? + unless @project_path.directory? && (@project_path + 'project.pbxproj').exist? + help! "#{@project_path} is not a valid Xcode project." + end + + @project = Xcodeproj::Project.open(@project_path) + end + + def run + # We don't traverse a Podfile and try to de-intergrate each target. + # Instead, we're just deintegrating anything CP could have done to a + # project. This is so that it will clean stale, and modified projects. + deintegrator = Deintegrator.new + deintegrator.deintegrate_project(@project) + @project.save + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrate/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrate/gem_version.rb new file mode 100644 index 0000000..e77e334 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrate/gem_version.rb @@ -0,0 +1,3 @@ +module CocoapodsDeintegrate + VERSION = '1.0.5'.freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrator.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrator.rb new file mode 100644 index 0000000..e3827dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods/deintegrator.rb @@ -0,0 +1,149 @@ +module Pod + class Deintegrator + include Config::Mixin + + FRAMEWORK_NAMES = /^(libPods.*\.a)|(Pods.*\.framework)$/i + XCCONFIG_NAMES = /^Pods.*\.xcconfig$/i + + def deintegrate_project(project) + UI.section("Deintegrating #{UI.path project.path}") do + project.native_targets.each do |target| + deintegrate_target(target) + end + end + + delete_pods_file_references(project) + remove_sandbox + + UI.puts + UI.puts('Project has been deintegrated. No traces of CocoaPods left in project.'.green) + UI.puts('Note: The workspace referencing the Pods project still remains.') + end + + def deintegrate_target(target) + UI.section("Deintegrating target `#{target.name}`") do + deintegrate_shell_script_phase(target, 'Copy Pods Resources') + deintegrate_shell_script_phase(target, 'Check Pods Manifest.lock') + deintegrate_shell_script_phase(target, 'Embed Pods Frameworks') + deintegrate_user_shell_script_phases(target) + deintegrate_pods_libraries(target) + deintegrate_configuration_file_references(target) + end + end + + def remove_sandbox + pods_directory = config.sandbox.root + if pods_directory.exist? + UI.puts("Removing #{UI.path pods_directory} directory.") + pods_directory.rmtree + end + end + + def deintegrate_pods_libraries(target) + # `frameworks_build_phases` returns but does not automatically create this build phase + # when we are deinitegrating. Its a bit of a weird API but thats what Xcodeproj gives + # us. + frameworks_build_phase = target.frameworks_build_phases + return if frameworks_build_phase.nil? + + pods_build_files = frameworks_build_phase.files.select do |build_file| + build_file.display_name =~ FRAMEWORK_NAMES + end + + unless pods_build_files.empty? + UI.section('Removing Pod libraries from build phase:') do + pods_build_files.each do |build_file| + UI.puts("- #{build_file.display_name}") + if build_file.file_ref.build_files.count == 1 + build_file.file_ref.remove_from_project + end + frameworks_build_phase.remove_build_file(build_file) + end + end + end + end + + def deintegrate_user_shell_script_phases(target) + user_script_phases = target.shell_script_build_phases.select do |phase| + next unless phase.name + phase.name.start_with?('[CP-User] ') + end + + unless user_script_phases.empty? + user_script_phases.each do |phase| + target.build_phases.delete(phase) + end + + UI.puts("Deleted #{user_script_phases.count} user build phases.") + end + end + + def deintegrate_shell_script_phase(target, phase_name) + phases = target.shell_script_build_phases.select do |phase| + phase.name && phase.name =~ /#{Regexp.escape(phase_name)}\z$/ + end + + unless phases.empty? + phases.each do |phase| + target.build_phases.delete(phase) + end + + UI.puts("Deleted #{phases.count} '#{phase_name}' build phases.") + end + end + + def delete_empty_group(project, group_name) + groups = project.main_group.recursive_children_groups.select do |group| + group.name == group_name && group.children.empty? + end + + unless groups.empty? + groups.each(&:remove_from_project) + UI.puts "Deleted #{groups.count} empty `#{group_name}` groups from project." + end + end + + def deintegrate_configuration_file_references(target) + config_files = target.build_configurations.map do |config| + config_file = config.base_configuration_reference + config_file if config_file && config_file.name =~ XCCONFIG_NAMES + end.compact + unless config_files.empty? + UI.section('Deleting configuration file references') do + config_files.each do |file_reference| + UI.puts("- #{file_reference.name}") + file_reference.remove_from_project + end + end + end + end + + def delete_pods_file_references(project) + # The following implementation goes for files and empty groups so it + # should catch cases where a user has changed the structure manually. + + groups = project.main_group.recursive_children_groups + groups << project.main_group + + pod_files = groups.flat_map do |group| + group.files.select do |obj| + obj.name =~ XCCONFIG_NAMES || + obj.path =~ /^(libPods.*\.a)|(Pods_.*\.framework)$/i + end + end + + unless pod_files.empty? + UI.section('Deleting Pod file references from project') do + pod_files.each do |file_reference| + UI.puts("- #{file_reference.name || file_reference.path}") + file_reference.remove_from_project + end + end + end + + # Delete empty `Pods` group if exists + delete_empty_group(project, 'Pods') + delete_empty_group(project, 'Frameworks') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_deintegrate.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_deintegrate.rb new file mode 100644 index 0000000..f20a484 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_deintegrate.rb @@ -0,0 +1,2 @@ +require 'cocoapods/deintegrate/gem_version' +require 'cocoapods/deintegrator' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..8049ee2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-deintegrate-1.0.5/lib/cocoapods_plugin.rb @@ -0,0 +1,4 @@ +module Pod + require 'cocoapods_deintegrate' + require 'cocoapods/command/deintegrate' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/LICENSE b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/LICENSE new file mode 100644 index 0000000..7c1df53 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2011 - 2012 Eloy DurÃĄn +Copyright (c) 2012 Fabio Pelosin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/README.markdown b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/README.markdown new file mode 100644 index 0000000..5d93a5a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/README.markdown @@ -0,0 +1,81 @@ +# Downloader + +A small library for downloading files from remotes in a folder. + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/CocoaPods-Downloader/Spec)](https://github.com/CocoaPods/cocoapods-downloader/actions) +[![Gem Version](https://img.shields.io/gem/v/cocoapods-downloader)](https://rubygems.org/gems/cocoapods-downloader) +[![Maintainability](https://api.codeclimate.com/v1/badges/2253ffb0c2c98e4d1c71/maintainability)](https://codeclimate.com/github/CocoaPods/cocoapods-downloader/maintainability) + +## Install + +``` +$ [sudo] gem install cocoapods-downloader +``` + +## Usage + +```ruby +require 'cocoapods-downloader' + +target_path = './Downloads/MyDownload' +options = { :git => 'example.com' } +options = Pod::Downloader.preprocess_options(options) +downloader = Pod::Downloader.for_target(target_path, options) +downloader.cache_root = '~/Library/Caches/APPNAME' +downloader.max_cache_size = 500 +downloader.download +downloader.checkout_options #=> { :git => 'example.com', :commit => 'd7f410490dabf7a6bde665ba22da102c3acf1bd9' } +``` + +The downloader class supports the following option keys: + +- git: commit, tag, branch, submodules +- svn: revision, tag, folder, externals +- hg: revision, tag, branch +- http: type, flatten +- scp: type, flatten +- bzr: revision, tag + +The downloader also provides hooks which allow to customize its output or the way in which the commands are executed + +```ruby +require 'cocoapods-downloader' + +module Pod + module Downloader + class Base + + override_api do + def self.execute_command(executable, command, raise_on_failure = false) + puts "Will download" + super + end + + def self.ui_action(ui_message) + puts ui_message.green + yield + end + end + + end + end +end +``` + +## Extraction + +This gem was extracted from [CocoaPods](https://github.com/CocoaPods/CocoaPods). Refer to also that repository for the history and the contributors. + +## Collaborate + +All CocoaPods development happens on GitHub, there is a repository for [CocoaPods](https://github.com/CocoaPods/CocoaPods) and one for the [CocoaPods specs](https://github.com/CocoaPods/Specs). Contributing patches or Pods is really easy and gratifying and for a lot of people is their first time. + +Follow [@CocoaPods](http://twitter.com/CocoaPods) to get up to date information about what's going on in the CocoaPods world. + +## Development + +You need to have `svn`, `bzr`, `hg` and `git` installed to run the specs. There are some specs which require `hdiutil` which will only run on macOS. + +## License + +This gem and CocoaPods are available under the MIT license. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader.rb new file mode 100644 index 0000000..0dd759a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader.rb @@ -0,0 +1,102 @@ +module Pod + module Downloader + require 'cocoapods-downloader/gem_version' + require 'cocoapods-downloader/api' + require 'cocoapods-downloader/api_exposable' + require 'cocoapods-downloader/base' + + autoload :Bazaar, 'cocoapods-downloader/bazaar' + autoload :Git, 'cocoapods-downloader/git' + autoload :Http, 'cocoapods-downloader/http' + autoload :Mercurial, 'cocoapods-downloader/mercurial' + autoload :Scp, 'cocoapods-downloader/scp' + autoload :Subversion, 'cocoapods-downloader/subversion' + + # Denotes the error generated by a Downloader + # + class DownloaderError < StandardError; end + + # @return [Hash{Symbol=>Class}] The concrete classes of the supported + # strategies by key. + # + def self.downloader_class_by_key + { + :bzr => Bazaar, + :git => Git, + :hg => Mercurial, + :http => Http, + :scp => Scp, + :svn => Subversion, + } + end + + # Identifies the concrete strategy for the given options. + # + # @param [Hash{Symbol}] options + # The options for which a strategy is needed. + # + # @return [Symbol] The symbol associated with a concrete strategy. + # @return [Nil] If no suitable concrete strategy could be selected. + # + def self.strategy_from_options(options) + common = downloader_class_by_key.keys & options.keys + if common.count == 1 + common.first + end + end + + # @return [Downloader::Base] A concrete downloader according to the + # options. + # + def self.for_target(target_path, options) + options = options_to_sym(options) + + if target_path.nil? + raise DownloaderError, 'No target path provided.' + end + + strategy, klass = class_for_options(options) + + url = options[strategy] + sub_options = options.dup + sub_options.delete(strategy) + + klass.new(target_path, url, sub_options) + end + + # Have the concrete strategy preprocess options + # + # @param [Hash] options + # The request options to preprocess + # + # @return [Hash] the new options + # + def self.preprocess_options(options) + options = options_to_sym(options) + + _, klass = class_for_options(options) + klass.preprocess_options(options) + end + + private_class_method + + def self.options_to_sym(options) + Hash[options.map { |k, v| [k.to_sym, v] }] + end + + def self.class_for_options(options) + if options.nil? || options.empty? + raise DownloaderError, 'No source URL provided.' + end + + strategy = strategy_from_options(options) + unless strategy + raise DownloaderError, 'Unsupported download strategy ' \ + "`#{options.inspect}`." + end + + # Explicit return for multiple params, rubocop thinks it's useless but it's not + return strategy, downloader_class_by_key[strategy] # rubocop:disable Style/RedundantReturn + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/api.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/api.rb new file mode 100644 index 0000000..9375f10 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/api.rb @@ -0,0 +1,73 @@ +module Pod + module Downloader + # The Downloader::Hooks module allows to adapt the Downloader to + # the UI of other gems. + # + module API + # Executes + # @return [String] the output of the command. + # + def execute_command(executable, command, raise_on_failure = false) + require 'shellwords' + command = command.map(&:to_s).map(&:shellescape).join(' ') + output = `\n#{executable} #{command} 2>&1` + check_exit_code!(executable, command, output) if raise_on_failure + puts output + output + end + + # Checks if the just executed command completed successfully. + # + # @raise If the command failed. + # + # @return [void] + # + def check_exit_code!(executable, command, output) + if $?.exitstatus != 0 + raise DownloaderError, "Error on `#{executable} #{command}`.\n#{output}" + end + end + + # Indicates that an action will be performed. The action is passed as a + # block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_action(message) + puts message + yield + end + + # Indicates that a minor action will be performed. The action is passed as + # a block. + # + # @param [String] message + # The message associated with the action. + # + # @yield The action, this block is always executed. + # + # @return [void] + # + def ui_sub_action(message) + puts message + yield + end + + # Prints an UI message. + # + # @param [String] message + # The message associated with the action. + # + # @return [void] + # + def ui_message(message) + puts message + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/api_exposable.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/api_exposable.rb new file mode 100644 index 0000000..62066db --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/api_exposable.rb @@ -0,0 +1,23 @@ +module Pod + module Downloader + module APIExposable + def expose_api(mod = nil, &block) + if mod.nil? + if block.nil? + raise "Either a module or a block that's used to create a module is required." + else + mod = Module.new(&block) + end + elsif mod && block + raise 'Only a module *or* is required, not both.' + end + include mod + # TODO: Try to find a nicer way to do this + # See https://github.com/CocoaPods/cocoapods-downloader/pull/57 + extend mod + end + + alias override_api expose_api + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/base.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/base.rb new file mode 100644 index 0000000..c7f54b1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/base.rb @@ -0,0 +1,194 @@ +require 'shellwords' + +class Pathname + # @return [String] a version of the path that is escaped to be safe to use in + # a shell. + def shellescape + to_s.shellescape + end +end + +module Pod + module Downloader + # The base class defines the common behaviour of the downloaders. + # + # @abstract Subclass and implement {#download}. + # + # @private + # + class Base + extend APIExposable + expose_api API + + # @abstract Override in subclasses. + # + # @return [Array] the options accepted by the concrete class. + # + def self.options + [] + end + + # @return [Pathname] the destination folder for the download. + # + attr_reader :target_path + + # @return [String] the url of the remote source. + # + attr_reader :url + + # @return [Hash={Symbol=>String}] options specific to each concrete + # downloader. + # + attr_reader :options + + # @param [String, Pathname] target_path @see target_path + # @param [String] url @see url + # @param [Hash={Symbol=>String}] options @see options + # + def initialize(target_path, url, options) + require 'pathname' + @target_path = Pathname.new(target_path) + @url = url + @options = options + + unrecognized_options = options.keys - self.class.options + unless unrecognized_options.empty? + raise DownloaderError, "Unrecognized options `#{unrecognized_options}`" + end + end + + # @return [String] the name of the downloader. + # + # @example Downloader::Mercurial name + # + # "Mercurial" + # + def name + self.class.name.split('::').last + end + + #-----------------------------------------------------------------------# + + # @!group Downloading + + # Downloads the revision specified in the option of a source. If no + # revision is specified it fall-back to {#download_head}. + # + # @return [void] + # + def download + validate_input + ui_action("#{name} download") do + target_path.mkpath + download! + end + end + + # Downloads the head revision of a source. + # + # @todo Spec for raise. + # + # @return [void] + # + def download_head + ui_action("#{name} HEAD download") do + if head_supported? + download_head! + else + raise DownloaderError, "The `#{name}` downloader does not support " \ + 'the HEAD option.' + end + end + end + + # @return [Bool] Whether the downloader supports the head download + # strategy. + # + def head_supported? + respond_to?(:download_head!, true) + end + + # @return [Bool] Whether the options provided completely identify a source + # or could lead to the download of different files in future. + # + def options_specific? + true + end + + # @return [Hash{Symbol=>String}] The options that would allow to + # re-download the exact files. + # + def checkout_options + raise 'Abstract method' + end + + # Provides a before-download check for safety of the options in the + # concrete downloader. + # + # @return [void] + # + def validate_input + end + + # Returns a User-Agent string that itentifies http network requests as + # originating from CocoaPods. + # Contains version numbers from the CocoaPods Gem and the cocoapods-downloader Gem. + # + # @param [module] base_module The Base CocoaPods Module to retrieve the version number from. + # @return [String] the User-Agent string. + # + def self.user_agent_string(base_module = Pod) + pods_version = base_module.const_defined?('VERSION') ? "CocoaPods/#{base_module::VERSION} " : '' + "#{pods_version}cocoapods-downloader/#{Pod::Downloader::VERSION}" + end + + #-----------------------------------------------------------------------# + + # Defines two methods for an executable, based on its name. The bang + # version raises if the executable terminates with a non-zero exit code. + # + # For example + # + # executable :git + # + # generates + # + # def git(command) + # Hooks.execute_with_check("git", command, false) + # end + # + # def git!(command) + # Hooks.execute_with_check("git", command, true) + # end + # + # @param [Symbol] name + # the name of the executable. + # + # @return [void] + # + def self.executable(name) + define_method(name) do |*command| + execute_command(name.to_s, command.flatten, false) + end + + define_method(name.to_s + '!') do |*command| + execute_command(name.to_s, command.flatten, true) + end + end + + # preprocess download options + # + # Usage of this method is optional. concrete strategies should not + # assume options are preprocessed for correct execution. + # + # @param [Hash] options + # The request options to preprocess + # + # @return [Hash] the new options + # + def self.preprocess_options(options) + options + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/bazaar.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/bazaar.rb new file mode 100644 index 0000000..dee53db --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/bazaar.rb @@ -0,0 +1,60 @@ +module Pod + module Downloader + class Bazaar < Base + def self.options + [:revision, :tag] + end + + def options_specific? + !options[:revision].nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:bzr] = url + options[:revision] = `bzr revno`.chomp + options + end + end + + private + + # @group Private Helpers + #-----------------------------------------------------------------------# + + executable :bzr + + def download! + if options[:tag] + download_revision!(options[:tag]) + elsif options[:revision] + download_revision!(options[:revision]) + else + download_head! + end + end + + def download_head! + bzr! 'branch', url, *dir_opts, target_path + end + + def download_revision!(rev) + bzr! 'branch', url, *dir_opts, '-r', rev, @target_path + end + + # @return [String] The command line flags to use according to whether the + # target path exits. + # + def dir_opts + if @target_path.exist? + %w(--use-existing-dir) + else + [] + end + end + + #-----------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/gem_version.rb new file mode 100644 index 0000000..1381e16 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/gem_version.rb @@ -0,0 +1,8 @@ +module Pod + module Downloader + # @return [String] Downloader’s version, following + # [semver](http://semver.org). + # + VERSION = '1.6.3'.freeze + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/git.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/git.rb new file mode 100644 index 0000000..a247644 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/git.rb @@ -0,0 +1,170 @@ +module Pod + module Downloader + # Concreted Downloader class that provides support for specifications with + # git sources. + # + class Git < Base + def self.options + [:commit, :tag, :branch, :submodules] + end + + def options_specific? + !(options[:commit] || options[:tag]).nil? + end + + def checkout_options + options = {} + options[:git] = url + options[:commit] = target_git('rev-parse', 'HEAD').chomp + options[:submodules] = true if self.options[:submodules] + options + end + + def self.preprocess_options(options) + return options unless options[:branch] + + input = [options[:git], options[:commit]].map(&:to_s) + invalid = input.compact.any? { |value| value.start_with?('--') || value.include?(' --') } + raise DownloaderError, "Provided unsafe input for git #{options}." if invalid + + command = ['ls-remote', + '--', + options[:git], + options[:branch]] + + output = Git.execute_command('git', command) + match = commit_from_ls_remote output, options[:branch] + + return options if match.nil? + + options[:commit] = match + options.delete(:branch) + + options + end + + # Matches a commit from the branches reported by git ls-remote. + # + # @note When there is a branch and tag with the same name, it will match + # the branch, since `refs/heads` is sorted before `refs/tags`. + # + # @param [String] output + # The output from git ls-remote. + # + # @param [String] branch_name + # The desired branch to match a commit to. + # + # @return [String] commit hash string, or nil if no match found + # + def self.commit_from_ls_remote(output, branch_name) + return nil if branch_name.nil? + encoded_branch_name = branch_name.dup.force_encoding(Encoding::ASCII_8BIT) + match = %r{([a-z0-9]*)\trefs\/(heads|tags)\/#{Regexp.quote(encoded_branch_name)}}.match(output) + match[1] unless match.nil? + end + + private_class_method :commit_from_ls_remote + + private + + # @!group Base class hooks + + def download! + clone + checkout_commit if options[:commit] + end + + # @return [void] Checks out the HEAD of the git source in the destination + # path. + # + def download_head! + clone(true) + end + + # @!group Download implementations + + executable :git + + # Clones the repo. If possible the repo will be shallowly cloned. + # + # @note The `:commit` option requires a specific strategy as it is not + # possible to specify the commit to the `clone` command. + # + # @note `--branch` command line option can also take tags and detaches + # the HEAD. + # + # @param [Bool] force_head + # If any specific option should be ignored and the HEAD of the + # repo should be cloned. + # + # @param [Bool] shallow_clone + # Whether a shallow clone of the repo should be attempted, if + # possible given the specified {#options}. + # + def clone(force_head = false, shallow_clone = true) + ui_sub_action('Git download') do + begin + git! clone_arguments(force_head, shallow_clone) + update_submodules + rescue DownloaderError => e + if e.message =~ /^fatal:.*does not support (--depth|shallow capabilities)$/im + clone(force_head, false) + else + raise + end + end + end + end + + def update_submodules + return unless options[:submodules] + target_git %w(submodule update --init --recursive) + end + + # The arguments to pass to `git` to clone the repo. + # + # @param [Bool] force_head + # If any specific option should be ignored and the HEAD of the + # repo should be cloned. + # + # @param [Bool] shallow_clone + # Whether a shallow clone of the repo should be attempted, if + # possible given the specified {#options}. + # + # @return [Array] arguments to pass to `git` to clone the repo. + # + def clone_arguments(force_head, shallow_clone) + command = ['clone', url, target_path, '--template='] + + if shallow_clone && !options[:commit] + command += %w(--single-branch --depth 1) + end + + unless force_head + if tag_or_branch = options[:tag] || options[:branch] + command += ['--branch', tag_or_branch] + end + end + + command + end + + # Checks out a specific commit of the cloned repo. + # + def checkout_commit + target_git 'checkout', '--quiet', options[:commit] + update_submodules + end + + def target_git(*args) + git!(['-C', target_path] + args) + end + + def validate_input + input = [url, options[:branch], options[:commit], options[:tag]].map(&:to_s) + invalid = input.compact.any? { |value| value.start_with?('--') || value.include?(' --') } + raise DownloaderError, "Provided unsafe input for git #{options}." if invalid + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/http.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/http.rb new file mode 100644 index 0000000..90e13dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/http.rb @@ -0,0 +1,34 @@ +require 'cocoapods-downloader/remote_file' + +module Pod + module Downloader + class Http < RemoteFile + USER_AGENT_HEADER = 'User-Agent'.freeze + + private + + executable :curl + + def download_file(full_filename) + parameters = ['-f', '-L', '-o', full_filename, url, '--create-dirs', '--netrc-optional', '--retry', '2'] + parameters << user_agent_argument if headers.nil? || + headers.none? { |header| header.casecmp(USER_AGENT_HEADER).zero? } + + headers.each do |h| + parameters << '-H' + parameters << h + end unless headers.nil? + + curl! parameters + end + + # Returns a cURL command flag to add the CocoaPods User-Agent. + # + # @return [String] cURL command -A flag and User-Agent. + # + def user_agent_argument + "-A '#{Http.user_agent_string}'" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/mercurial.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/mercurial.rb new file mode 100644 index 0000000..5582383 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/mercurial.rb @@ -0,0 +1,60 @@ +module Pod + module Downloader + class Mercurial < Base + def self.options + [:revision, :tag, :branch] + end + + def options_specific? + !(options[:revision] || options[:tag]).nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:hg] = url + options[:revision] = `hg --debug id -i`.chomp + options + end + end + + private + + executable :hg + + def download! + if options[:revision] + download_revision! + elsif options[:tag] + download_tag! + elsif options[:branch] + download_branch! + else + download_head! + end + end + + def download_head! + hg! 'clone', url, @target_path + end + + def download_revision! + hg! 'clone', url, '--rev', options[:revision], @target_path + end + + def download_tag! + hg! 'clone', url, '--updaterev', options[:tag], @target_path + end + + def download_branch! + hg! 'clone', url, '--updaterev', options[:branch], @target_path + end + + def validate_input + input = [url, options[:revision], options[:branch], options[:tag]].map(&:to_s) + invalid = input.compact.any? { |value| value.start_with?('--') || value.include?(' --') } + raise DownloaderError, "Provided unsafe input for hg #{options}." if invalid + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/remote_file.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/remote_file.rb new file mode 100644 index 0000000..3388342 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/remote_file.rb @@ -0,0 +1,176 @@ +require 'fileutils' +require 'uri' +require 'zlib' + +module Pod + module Downloader + class RemoteFile < Base + def self.options + [:type, :flatten, :sha1, :sha256, :headers] + end + + class UnsupportedFileTypeError < StandardError; end + + private + + executable :unzip + executable :tar + executable :hdiutil + + attr_accessor :filename, :download_path + + def download! + @filename = filename_with_type(type) + @download_path = target_path + @filename + download_file(@download_path) + verify_checksum(@download_path) + extract_with_type(@download_path, type) + end + + def type + if options[:type] + options[:type].to_sym + else + type_with_url(url) + end + end + + def headers + options[:headers] + end + + # @note The archive is flattened if it contains only one folder and its + # extension is either `tgz`, `tar`, `tbz` or the options specify + # it. + # + # @return [Bool] Whether the archive should be flattened if it contains + # only one folder. + # + def should_flatten? + if options.key?(:flatten) + options[:flatten] + elsif [:tgz, :tar, :tbz, :txz].include?(type) + true # those archives flatten by default + else + false # all others (actually only .zip) default not to flatten + end + end + + def type_with_url(url) + case URI.parse(url).path + when /\.zip$/ + :zip + when /\.(tgz|tar\.gz)$/ + :tgz + when /\.tar$/ + :tar + when /\.(tbz|tar\.bz2)$/ + :tbz + when /\.(txz|tar\.xz)$/ + :txz + when /\.dmg$/ + :dmg + end + end + + def filename_with_type(type = :zip) + case type + when :zip, :tgz, :tar, :tbz, :txz, :dmg + "file.#{type}" + else + raise UnsupportedFileTypeError, "Unsupported file type: #{type}" + end + end + + def download_file(_full_filename) + raise NotImplementedError + end + + def extract_with_type(full_filename, type = :zip) + unpack_from = full_filename + unpack_to = @target_path + + case type + when :zip + unzip! unpack_from, '-d', unpack_to + when :tar, :tgz, :tbz, :txz + tar! 'xf', unpack_from, '-C', unpack_to + when :dmg + extract_dmg(unpack_from, unpack_to) + else + raise UnsupportedFileTypeError, "Unsupported file type: #{type}" + end + + # If the archive is a tarball and it only contained a folder, move its + # contents to the target (#727) + # + if should_flatten? + contents = target_path.children + contents.delete(target_path + @filename) + entry = contents.first + if contents.count == 1 && entry.directory? + tmp_entry = entry.sub_ext("#{entry.extname}.tmp") + begin + FileUtils.move(entry, tmp_entry) + FileUtils.move(tmp_entry.children, target_path) + ensure + FileUtils.remove_entry(tmp_entry) + end + end + end + + FileUtils.rm(unpack_from) if File.exist?(unpack_from) + end + + def extract_dmg(unpack_from, unpack_to) + require 'rexml/document' + plist_s = hdiutil! 'attach', '-plist', '-nobrowse', unpack_from, '-mountrandom', unpack_to + plist = REXML::Document.new plist_s + xpath = '//key[.="mount-point"]/following-sibling::string' + mount_point = REXML::XPath.first(plist, xpath).text + FileUtils.cp_r(Dir.glob(mount_point + '/*'), unpack_to) + hdiutil! 'detach', mount_point + end + + def compare_hash(filename, hasher, hash) + incremental_hash = hasher.new + + File.open(filename, 'rb') do |file| + buf = '' + incremental_hash << buf while file.read(1024, buf) + end + + computed_hash = incremental_hash.hexdigest + + if computed_hash != hash + raise DownloaderError, 'Verification checksum was incorrect, ' \ + "expected #{hash}, got #{computed_hash}" + end + end + + # Verify that the downloaded file matches a sha1 hash + # + def verify_sha1_hash(filename, hash) + require 'digest/sha1' + compare_hash(filename, Digest::SHA1, hash) + end + + # Verify that the downloaded file matches a sha256 hash + # + def verify_sha256_hash(filename, hash) + require 'digest/sha2' + compare_hash(filename, Digest::SHA2, hash) + end + + # Verify that the downloaded file matches the hash if set + # + def verify_checksum(filename) + if options[:sha256] + verify_sha256_hash(filename, options[:sha256]) + elsif options[:sha1] + verify_sha1_hash(filename, options[:sha1]) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/scp.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/scp.rb new file mode 100644 index 0000000..c8a92f2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/scp.rb @@ -0,0 +1,30 @@ +require 'uri' +require 'cocoapods-downloader/remote_file' + +module Pod + module Downloader + class Scp < RemoteFile + DEFAULT_PORT = 22 + + private + + executable :scp + + def download_file(full_filename) + scp! '-P', port, '-q', source, full_filename + end + + def source + "#{uri.user ? uri.user + '@' : ''}#{uri.host}:'#{uri.path}'" + end + + def port + uri.port || DEFAULT_PORT + end + + def uri + @uri ||= URI.parse(url) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/subversion.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/subversion.rb new file mode 100644 index 0000000..a7ff2ab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-downloader-1.6.3/lib/cocoapods-downloader/subversion.rb @@ -0,0 +1,69 @@ +module Pod + module Downloader + class Subversion < Base + def self.options + [:revision, :tag, :folder, :externals, :checkout] + end + + def options_specific? + !(options[:revision] || options[:tag]).nil? + end + + def checkout_options + Dir.chdir(target_path) do + options = {} + options[:svn] = url + options[:revision] = @exported_revision + options + end + end + + private + + executable :svn + + def download! + output = svn!(*subcommand, *reference_url, @target_path) + store_exported_revision(output) + end + + def download_head! + output = svn!(*subcommand, *trunk_url, @target_path) + store_exported_revision(output) + end + + def store_exported_revision(output) + output =~ /Exported revision ([0-9]+)\./ + @exported_revision = Regexp.last_match[1] if Regexp.last_match + end + + def subcommand + result = if options[:checkout] + %w(checkout) + else + %w(export) + end + + result += %w(--non-interactive --trust-server-cert --force) + result << '--ignore-externals' if options[:externals] == false + result + end + + def reference_url + result = url.dup + result << '/' << options[:folder] if options[:folder] + result << '/tags/' << options[:tag] if options[:tag] + result = [result] + result << '-r' << options[:revision] if options[:revision] + result + end + + def trunk_url + result = url.dup + result << '/' << options[:folder] if options[:folder] + result << '/trunk' + [result] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.gitignore b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.gitignore new file mode 100644 index 0000000..40855cf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.gitignore @@ -0,0 +1,40 @@ +*.gem +*.rbc +/.config +/coverage/ +/InstalledFiles +/pkg/ +/spec/reports/ +/test/tmp/ +/test/version_tmp/ +/tmp/ + +## Specific to RubyMotion: +.dat* +.repl_history +build/ + +## Documentation cache and generated files: +/.yardoc/ +/_yardoc/ +/doc/ +/rdoc/ + +## Environment normalisation: +/.bundle/ +/lib/bundler/man/ + +# for a library or gem, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# Gemfile.lock +# .ruby-version +# .ruby-gemset + +# unless supporting rvm < 1.11.0 or doing something fancy, ignore this: +.rvmrc + +/coverage/ + +# RubyMine Editor +.idea + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.rubocop.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.rubocop.yml new file mode 100644 index 0000000..7583205 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.rubocop.yml @@ -0,0 +1,4 @@ +inherit_from: + - .rubocop_cocoapods.yml + + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..9104ebd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.rubocop_cocoapods.yml @@ -0,0 +1,116 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +TrailingComma: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.travis.yml new file mode 100644 index 0000000..282bfc7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/.travis.yml @@ -0,0 +1,24 @@ +# Sets Travis to run the Ruby specs on OS X machines to be as close as possible +# to the user environment. +# +language: objective-c +addons: + code_climate: + repo_token: 2926ae7ea0b2a6ced8b0d67efa235769ab85de1d9c9f6702f40d80bacec3c9c4 + +env: + - RVM_RUBY_VERSION=system + # - RVM_RUBY_VERSION=1.8.7-p358 + +before_install: + - export LANG=en_US.UTF-8 + - curl http://curl.haxx.se/ca/cacert.pem -o /usr/local/share/cacert.pem + - source ~/.rvm/scripts/rvm + - if [[ $RVM_RUBY_VERSION != 'system' ]]; then rvm install $RVM_RUBY_VERSION; fi + - rvm use $RVM_RUBY_VERSION + - if [[ $RVM_RUBY_VERSION == 'system' ]]; then sudo gem install bundler --no-ri --no-rdoc; else gem install bundler --no-ri --no-rdoc; fi + +install: + - sudo bundle install --without=documentation + +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/CHANGELOG.md new file mode 100644 index 0000000..4d1a245 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/CHANGELOG.md @@ -0,0 +1,102 @@ +# Cocoapods::Plugins Changelog + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +This version contains no changes. + + +## 1.0.0.beta.1 (2015-12-30) + +This version contains no changes. + + +## 0.4.2 (2015-04-03) + + +## 0.4.1 (2015-02-25) + +* Added the `pod plugins installed` subcommand. + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.4.0 (2014-12-25) + +* Added the `pod plugins publish` subcommand. + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.3.2 (2014-11-02) + +* Switch to using cocoapods-plugins JSON file instead of from Cocoapods.org's repo. + [542919](https://github.com/CocoaPods/cocoapods-plugins/commit/542919902e611c33bb0e02848037474529ddd0f9) + [Florian Hanke](https://github.com/floere) + + +## 0.3.1 (2014-09-12) + +* Restore compatibility with Ruby 1.8.7. + [#30](https://github.com/CocoaPods/cocoapods-plugins/issues/30) + [Fabio Pelosin](https://github.com/fabiopelosin) + +## 0.3.0 (2014-09-11) + +* Added a reminder to add plugin to `plugins.json` once released. + [#27](https://github.com/CocoaPods/cocoapods-plugins/issues/27) + [Olivier Halligon](https://github.com/AliSoftware) + +* Print out the version of plugins when invoked with `--verbose`. + [#16](https://github.com/CocoaPods/cocoapods-plugins/issues/16) + [David Grandinetti](https://github.com/dbgrandi) + +## 0.2.0 (2014-05-20) + +* Migrating to new syntax of CLAide::Command#arguments. + [#23](https://github.com/CocoaPods/cocoapods-plugins/issues/23) + [Olivier Halligon](https://github.com/AliSoftware) + +* Printing URL of template used. + [#21](https://github.com/CocoaPods/cocoapods-plugins/issues/21) + [Olivier Halligon](https://github.com/AliSoftware) + +* `create` subcommand now prefixes the given name if not already. + [#20](https://github.com/CocoaPods/cocoapods-plugins/issues/20) + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.1.1 (2014-05-02) + +* Making `pod plugins` an abstract command, with `list` the default subcommand. + [#11](https://github.com/CocoaPods/cocoapods-plugins/issues/11) + [#12](https://github.com/CocoaPods/cocoapods-plugins/issues/12) + [Olivier Halligon](https://github.com/AliSoftware) + +* Added `search` subcommand to search plugins by name, author and description. + [#6](https://github.com/CocoaPods/cocoapods-plugins/issues/6) + [Olivier Halligon](https://github.com/AliSoftware) + +* Refactoring and improved output formatting. + [#8](https://github.com/CocoaPods/cocoapods-plugins/issues/8) + [#10](https://github.com/CocoaPods/cocoapods-plugins/issues/10) + [#13](https://github.com/CocoaPods/cocoapods-plugins/issues/13) + [Olivier Halligon](https://github.com/AliSoftware) + +* Fixing coding conventions and RuboCop offenses. + [#17](https://github.com/CocoaPods/cocoapods-plugins/issues/17) + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.1.0 (2014-04-22) + +* Initial implementation. + [David Grandinetti](https://github.com/dbgrandi) + +* Added `create` subcommand to create an empty project for a new plugin. + [#6](https://github.com/CocoaPods/cocoapods-plugins/issues/6) + [Boris BÃŧgling](https://github.com/neonichu) diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Gemfile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Gemfile new file mode 100644 index 0000000..23f47e7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Gemfile @@ -0,0 +1,18 @@ +source 'https://rubygems.org' + +gemspec + +group :development do + gem 'cocoapods', :git => 'https://github.com/CocoaPods/CocoaPods.git', :branch => 'master' + gem 'cocoapods-core', :git => 'https://github.com/CocoaPods/Core.git', :branch => 'master' + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + + gem 'bacon' + gem 'mocha-on-bacon' + gem 'prettybacon' + gem 'vcr' + gem 'webmock' + + gem 'codeclimate-test-reporter', :require => nil + gem 'rubocop' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Gemfile.lock new file mode 100644 index 0000000..de064d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Gemfile.lock @@ -0,0 +1,134 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: 00927807580554b7d3485d673c90386d3fd8fde0 + branch: master + specs: + claide (0.8.1) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 06f9a9870f1fc205f06c6d0193502fd7cf0241ef + branch: master + specs: + cocoapods (0.36.3) + activesupport (>= 3.2.15) + claide (~> 0.8.1) + cocoapods-core (= 0.36.3) + cocoapods-downloader (~> 0.9.0) + cocoapods-plugins (= 1.0.0) + cocoapods-trunk (~> 0.6.0) + cocoapods-try (~> 0.4.3) + colored (~> 1.2) + escape (~> 0.0.4) + molinillo (~> 0.2.1) + nap (~> 0.8) + open4 (~> 1.3) + xcodeproj (~> 0.23.1) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: 11f3eee2008e822e5af0b01866a5a0b376c930a7 + branch: master + specs: + cocoapods-core (0.36.3) + activesupport (>= 3.2.15) + fuzzy_match (~> 2.0.4) + nap (~> 0.8.0) + +PATH + remote: . + specs: + cocoapods-plugins (1.0.0) + nap + +GEM + remote: https://rubygems.org/ + specs: + activesupport (4.2.1) + i18n (~> 0.7) + json (~> 1.7, >= 1.7.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + addressable (2.3.7) + ast (2.0.0) + astrolabe (1.3.0) + parser (>= 2.2.0.pre.3, < 3.0) + bacon (1.2.0) + cocoapods-downloader (0.9.0) + cocoapods-trunk (0.6.0) + nap (>= 0.8) + netrc (= 0.7.8) + cocoapods-try (0.4.3) + codeclimate-test-reporter (0.4.0) + simplecov (>= 0.7.1, < 1.0.0) + colored (1.2) + crack (0.4.2) + safe_yaml (~> 1.0.0) + docile (1.1.5) + escape (0.0.4) + fuzzy_match (2.0.4) + i18n (0.7.0) + json (1.8.2) + metaclass (0.0.4) + minitest (5.5.1) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + molinillo (0.2.3) + multi_json (1.10.1) + nap (0.8.0) + netrc (0.7.8) + open4 (1.3.4) + parser (2.2.0.3) + ast (>= 1.1, < 3.0) + powerpack (0.1.0) + prettybacon (0.0.2) + bacon (~> 1.2) + rainbow (2.0.0) + rake (10.3.2) + rubocop (0.29.1) + astrolabe (~> 1.3) + parser (>= 2.2.0.1, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.4) + ruby-progressbar (1.7.5) + safe_yaml (1.0.4) + simplecov (0.9.0) + docile (~> 1.1.0) + multi_json + simplecov-html (~> 0.8.0) + simplecov-html (0.8.0) + thread_safe (0.3.5) + tzinfo (1.2.2) + thread_safe (~> 0.1) + vcr (2.9.3) + webmock (1.20.4) + addressable (>= 2.3.6) + crack (>= 0.3.2) + xcodeproj (0.23.1) + activesupport (>= 3) + colored (~> 1.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-plugins! + codeclimate-test-reporter + mocha-on-bacon + prettybacon + rake + rubocop + vcr + webmock + +BUNDLED WITH + 1.11.2 diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/LICENSE b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/LICENSE new file mode 100644 index 0000000..0d932da --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 David Grandinetti + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/README.md new file mode 100644 index 0000000..a38af09 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/README.md @@ -0,0 +1,44 @@ +# Cocoapods plugins + +[![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-plugins/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-plugins) +[![Coverage](https://img.shields.io/codeclimate/coverage/github/CocoaPods/cocoapods-plugins.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-plugins) +[![Code Climate](https://img.shields.io/codeclimate/github/CocoaPods/cocoapods-plugins.svg?style=flat)](https://codeclimate.com/github/CocoaPods/cocoapods-plugins) + +CocoaPods plugin which shows info about available CocoaPods plugins or helps you get started developing a new plugin. Yeah, it's very meta. + +## Installation + + $ gem install cocoapods-plugins + +## Usage + +##### List installed plugins + + $ pod plugins installed + +List all installed CocoaPods plugins with their respective version (and pre_install/post_insall hooks if any) + +##### List known plugins + + $ pod plugins list + +List all known CocoaPods plugins (according to the list hosted on `http://github.com/CocoaPods/cocoapods-plugins`) + +##### Search plugins + + $ pod plugins search QUERY + +Search plugins whose name contains the given text (ignoring case). With --full, it searches by name but also by author and description. + +##### Create a new plugin + + $ pod plugins create NAME [TEMPLATE_URL] + +Create a scaffold for the development of a new plugin according to the CocoaPods best practices. +If a `TEMPLATE_URL`, pointing to a git repo containing a compatible template, is specified, it will be used in place of the default one. + +## Get your plugin listed + + $ pod plugins publish + +Create an issue in the `cocoapods-plugins` GitHub repository to ask for your plugin to be added to the official list (with the proper JSON fragment to be added to `plugins.json` so we just have to copy/paste it). diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Rakefile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Rakefile new file mode 100644 index 0000000..ce44a28 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/Rakefile @@ -0,0 +1,88 @@ +# Bootstrap +#-----------------------------------------------------------------------------# + +task :bootstrap do + if system('which bundle') + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + + require 'bundler/gem_tasks' + + task :default => 'spec' + + # Spec + #-----------------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + start_time = Time.now + sh "bundle exec bacon #{specs('**')}" + duration = Time.now - start_time + puts "Tests completed in #{duration}s" + Rake::Task['rubocop'].invoke + Rake::Task['validate_json'].invoke + end + + def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') + end + + # Rubocop + #-----------------------------------------------------------------------------# + + desc 'Checks code style' + task :rubocop do + require 'rubocop' + cli = RuboCop::CLI.new + result = cli.run(FileList['{spec,lib}/**/*.rb']) + abort('RuboCop failed!') unless result == 0 + end + + # plugins.json + #----------------------------------------------------------------------------# + + desc 'Validates plugins.json' + task :validate_json do + require 'json' + require 'pathname' + + puts 'Validating plugins.json' + + json_file = Pathname(__FILE__).parent + 'plugins.json' + json = json_file.read + plugins = JSON.load(json) + abort('Invalid JSON in plugins.json') unless plugins + keys = %w(gem name author social_media_url url description) + optional_keys = %w(social_media_url) + errors = plugins['plugins'].reduce([]) do |errors, plugin| + extra_keys = plugin.keys - keys + unless extra_keys.empty? + errors << "plugin `#{plugin['name']}` has extra keys #{extra_keys}" + end + (keys - optional_keys).each do |key| + unless plugin[key] + errors << "plugin `#{plugin['name']}` is missing key `#{key}`" + end + end + errors + end + unless errors.empty? + abort("Invalid plugins.json:\n\n#{errors.join("\n")}") + end + end + +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/cocoapods-plugins.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/cocoapods-plugins.gemspec new file mode 100644 index 0000000..555c86f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/cocoapods-plugins.gemspec @@ -0,0 +1,31 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_plugins.rb' + +Gem::Specification.new do |spec| + spec.name = 'cocoapods-plugins' + spec.version = CocoapodsPlugins::VERSION + spec.authors = ['David Grandinetti', 'Olivier Halligon'] + spec.summary = %q{CocoaPods plugin which shows info about available CocoaPods plugins.} + spec.description = <<-DESC + This CocoaPods plugin shows information about all available CocoaPods plugins + (yes, this is very meta!). + This CP plugin adds the "pod plugins" command to CocoaPods so that you can list + all plugins (registered in the reference JSON hosted at CocoaPods/cocoapods-plugins) + DESC + spec.homepage = 'https://github.com/cocoapods/cocoapods-plugins' + spec.license = 'MIT' + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ['lib'] + + spec.add_runtime_dependency 'nap' + + spec.add_development_dependency 'bundler', '~> 1.3' + spec.add_development_dependency 'rake' + + spec.required_ruby_version = '>= 2.0.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..4ef5c44 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/plugins' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugins.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugins.rb new file mode 100644 index 0000000..cb8a09e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/cocoapods_plugins.rb @@ -0,0 +1,5 @@ +# The namespace of the Cocoapods plugins plugin. +# +module CocoapodsPlugins + VERSION = '1.0.0'.freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_helper.rb new file mode 100644 index 0000000..e1a0715 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_helper.rb @@ -0,0 +1,120 @@ +require 'pod/command/gem_index_cache' + +module Pod + class Command + # This module is used by Command::PluginsHelper to download the Gem + # Specification data, check if a Gem is installed, and provide info + # on all versions of a Gem. + # + module GemHelper + # A GemIndexCache to manage downloading/caching the spec index. + # + @cache = nil + + # Getter for GemIndexCache + # + # @return [GemIndexCache] a new or memoized GemIndexCache + # + def self.cache + @cache ||= GemIndexCache.new + end + + # Instantiate a cache and download the spec index if it has + # not already been done. + # + def self.download_and_cache_specs + cache.download_and_cache_specs + end + + # Tells if a gem is installed + # + # @param [String] gem_name + # The name of the plugin gem to test + # + # @param [String] version_string + # An optional version string, used to check if a specific + # version of a gem is installed + # + # @return [Bool] true if the gem is installed, false otherwise. + # + def self.gem_installed?(gem_name, version_string = nil) + version = Gem::Version.new(version_string) if version_string + + if Gem::Specification.respond_to?(:find_all_by_name) + gems = Gem::Specification.find_all_by_name(gem_name) + return !gems.empty? unless version + gems.each { |gem| return true if gem.version == version } + false + else + dep = Gem::Dependency.new(gem_name, version_string) + !Gem.source_index.search(dep).empty? + end + end + + # Get the version of a gem that is installed locally. If more than + # one version is installed, this returns the first version found, + # which MAY not be the highest/newest version. + # + # @return [String] The version of the gem that is installed, + # or nil if it is not installed. + # + def self.installed_version(gem_name) + if Gem::Specification.respond_to?(:find_all_by_name) + gem = Gem::Specification.find_all_by_name(gem_name).first + else + dep = Gem::Dependency.new(gem_name) + gem = Gem.source_index.search(dep).first + end + gem ? gem.version.to_s : nil + end + + # Create a string containing all versions of a plugin, + # colored to indicate if a specific version is installed + # locally. + # + # @param [String] plugin_name + # The name of the plugin gem + # + # @param [GemIndexCache] index_cache + # Optional index cache can be passed in, otherwise + # the module instance is used. + # + # @return [String] a string containing a comma separated + # concatenation of all versions of a plugin + # that were found on rubygems.org + # + def self.versions_string(plugin_name, index_cache = @cache) + name_tuples = index_cache.specs_with_name(plugin_name) + sorted_versions = name_tuples.sort_by(&:version) + version_strings = colorize_versions(sorted_versions) + version_strings.join ', ' + end + + #----------------# + + private + + # Colorize an Array of version strings so versions that are installed + # are green and uninstalled versions are yellow. + # + # @param [Array] versions + # sorted array of Gem::NameTuples representing all versions of + # a plugin gem. + # + # @return [Array] An array of strings, each one being the version + # string of the same plugin + # + def self.colorize_versions(versions) + colored_strings = [] + versions.reverse_each do |name_tuple| + if gem_installed?(name_tuple.name, name_tuple.version.to_s) + colored_strings << name_tuple.version.to_s.green + else + colored_strings << name_tuple.version.to_s.yellow + end + end + colored_strings + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_index_cache.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_index_cache.rb new file mode 100644 index 0000000..b9c667a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/gem_index_cache.rb @@ -0,0 +1,87 @@ +require 'pod/command/gem_helper' + +module Pod + class Command + # This class is used by Command::GemsHelper to download the Gem + # Specification index from rubygems.org and provide info about + # the index. + # + class GemIndexCache + # A memoized hash of all the rubygem specs. If it is nil, the specs will + # be downloaded, which will take a few seconds to download. + # + # @return [Hash] The hash of all rubygems + # + def specs + @specs ||= download_specs + end + + # Alias to make the initial caching process more readable. + # + alias_method :download_and_cache_specs, :specs + + # Get an Array of Gem::NameTuple objects that match a given + # spec name. + # + # @param [String] name + # The name of the gem to match on (e.g. 'cocoapods-try') + # + # @return [Array] Array of Gem::NameTuple that match the name + # + def specs_with_name(name) + matching_specs = @specs.select do |spec| + spec[0].name == name + end + + name_tuples = [] + matching_specs.each do |(name_tuple, _)| + name_tuples << name_tuple + end + + name_tuples + end + + #----------------# + + private + + # Force the rubygem spec index file + # + # @return [Hash] The hash of all rubygems + # + def download_specs + UI.puts 'Downloading Rubygem specification index...' + fetcher = Gem::SpecFetcher.fetcher + results, errors = fetcher.available_specs(:released) + + unless errors.empty? + UI.puts 'Error downloading Rubygem specification index: ' + + errors.first.error.to_s + return [] + end + + flatten_fetcher_results(results) + end + + # Flatten the dictionary returned from Gem::SpecFetcher + # to a simple array. + # + # @param [Hash] results + # the hash returned from the call to + # Gem::SpecFetcher.available_specs() + # + # @return [Array] Array of all spec results + # + def flatten_fetcher_results(results) + specs = [] + results.each do |source, source_specs| + source_specs.each do |tuple| + specs << [tuple, source] + end + end + + specs + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins.rb new file mode 100644 index 0000000..632b10e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins.rb @@ -0,0 +1,30 @@ +require 'rest' +require 'json' + +# The CocoaPods namespace +# +module Pod + class Command + # The pod plugins command. + # + class Plugins < Command + require 'pod/command/plugins/list' + require 'pod/command/plugins/search' + require 'pod/command/plugins/create' + require 'pod/command/plugins/publish' + require 'pod/command/plugins/installed' + + self.abstract_command = true + self.default_subcommand = 'list' + + self.summary = 'Show available CocoaPods plugins' + self.description = <<-DESC + Lists or searches the available CocoaPods plugins + and show if you have them installed or not. + + Also allows you to quickly create a new Cocoapods + plugin using a provided template. + DESC + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/create.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/create.rb new file mode 100644 index 0000000..17344e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/create.rb @@ -0,0 +1,120 @@ +require 'pod/command/plugins_helper' + +module Pod + class Command + class Plugins + # The create subcommand. Used to create a new plugin using either the + # default template (CocoaPods/cocoapods-plugin-template) or a custom + # template + # + class Create < Plugins + NAME_PREFIX = 'cocoapods-' + + self.summary = 'Creates a new plugin' + self.description = <<-DESC + Creates a scaffold for the development of a new plugin + named `NAME` according to the CocoaPods best practices. + + If a `TEMPLATE_URL`, pointing to a git repo containing a + compatible template, is specified, it will be used + in place of the default one. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('TEMPLATE_URL', false), + ] + + def initialize(argv) + @name = argv.shift_argument + unless @name.nil? || @name.empty? || @name.start_with?(NAME_PREFIX) + @name = NAME_PREFIX + @name.dup + end + @template_url = argv.shift_argument + super + end + + def validate! + super + if @name.nil? || @name.empty? + help! 'A name for the plugin is required.' + end + + help! 'The plugin name cannot contain spaces.' if @name.match(/\s/) + end + + def run + clone_template + configure_template + show_reminder + end + + #----------------------------------------# + + private + + # !@group Private helpers + + extend Executable + executable :git + + TEMPLATE_BASE_URL = 'https://github.com/CocoaPods/' + TEMPLATE_REPO = TEMPLATE_BASE_URL + 'cocoapods-plugin-template.git' + TEMPLATE_INFO_URL = TEMPLATE_BASE_URL + 'cocoapods-plugin-template' + + # Clones the template from the remote in the working directory using + # the name of the plugin. + # + # @return [void] + # + def clone_template + UI.section("-> Creating `#{@name}` plugin") do + UI.notice "using template '#{template_repo_url}'" + command = ['clone', template_repo_url, @name] + if method(:git!).arity == -1 + git! command + else + # TODO: delete this conditional and use the other branch when + # 0.5.0 is released + require 'shellwords' + git! command.map(&:to_s).map(&:shellescape).join(' ') + end + end + end + + # Runs the template configuration utilities. + # + # @return [void] + # + def configure_template + UI.section('-> Configuring template') do + Dir.chdir(@name) do + if File.file? 'configure' + system "./configure #{@name}" + else + UI.warn 'Template does not have a configure file.' + end + end + end + end + + # Checks if a template URL is given else returns the TEMPLATE_REPO URL + # + # @return String + # + def template_repo_url + @template_url || TEMPLATE_REPO + end + + # Shows a reminder to the plugin author to make a Pull Request + # in order to update plugins.json once the plugin is released + # + def show_reminder + repo = PluginsHelper::PLUGINS_JSON_REPO + UI.notice "Don't forget to create a Pull Request on #{repo}\n" \ + ' to add your plugin to the plugins.json file once it is released!' + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/installed.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/installed.rb new file mode 100644 index 0000000..e777b8f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/installed.rb @@ -0,0 +1,92 @@ + +module Pod + class Command + class Plugins + # The `installed` subcommand. + # Used to list all installed plugins. + # + class Installed < Plugins + self.summary = 'List plugins installed on your machine' + self.description = <<-DESC + List all installed plugins and their + respective version. + DESC + + def self.options + # Silent mode is meaningless for this command as + # the command only purpose is to print information + super.reject { |option, _| option == '--silent' } + end + + def run + plugins = CLAide::Command::PluginManager.specifications + + UI.title 'Installed CocoaPods Plugins:' do + if verbose? + print_verbose_list(plugins) + else + print_compact_list(plugins) + end + end + end + + private + + # Print the given plugins as a compact list, one line + # per plugin with only its name & version + # + # @param [Array] plugins + # The list of plugins to print + # + def print_compact_list(plugins) + max_length = plugins.map { |p| p.name.length }.max + plugins.each do |plugin| + name_just = plugin.name.ljust(max_length) + hooks = registered_hooks(plugin) + hooks_list = '' + unless hooks.empty? + suffix = 'hook'.pluralize(hooks.count) + hooks_list = " (#{hooks.to_sentence} #{suffix})" + end + UI.puts_indented " - #{name_just} : #{plugin.version}#{hooks_list}" + end + end + + # Print the given plugins as a verbose list, with name, version, + # homepage and summary for each plugin. + # + # @param [Array] plugins + # The list of plugins to print + # + def print_verbose_list(plugins) + plugins.each do |plugin| + hooks = registered_hooks(plugin) + + UI.title(plugin.name) + UI.labeled('Version', plugin.version.to_s) + UI.labeled('Hooks', hooks) unless hooks.empty? + unless plugin.homepage.empty? + UI.labeled('Homepage', plugin.homepage) + end + UI.labeled('Summary', plugin.summary) + end + end + + # Names of the registered hook(s) (if any) for the given plugin + # + # @return [Array] + # Names of the hooks the given plugin did register for. + # + def registered_hooks(plugin) + registrations = Pod::HooksManager.registrations + return [] if registrations.nil? + + registrations.reduce([]) do |list, (name, hooks)| + list.push(name) if hooks.any? { |h| h.plugin_name == plugin.name } + list + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/list.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/list.rb new file mode 100644 index 0000000..e773165 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/list.rb @@ -0,0 +1,33 @@ +require 'pod/command/plugins_helper' +require 'pod/command/gem_helper' + +module Pod + class Command + class Plugins + # The list subcommand. Used to list all known plugins + # + class List < Plugins + self.summary = 'List all known plugins' + self.description = <<-DESC + List all known plugins (according to the list + hosted on github.com/CocoaPods/cocoapods-plugins) + DESC + + def self.options + super.reject { |option, _| option == '--silent' } + end + + def run + plugins = PluginsHelper.known_plugins + GemHelper.download_and_cache_specs if self.verbose? + + UI.title 'Available CocoaPods Plugins:' do + plugins.each do |plugin| + PluginsHelper.print_plugin plugin, self.verbose? + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/publish.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/publish.rb new file mode 100644 index 0000000..b10ac79 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/publish.rb @@ -0,0 +1,76 @@ +require 'json' +require 'cgi' + +module Pod + class Command + class Plugins + # The publish subcommand. Used to request to add a plugin + # to the official list of plugins + # + class Publish < Plugins + self.summary = 'Request to add the plugin to the official plugins list' + self.description = <<-DESC + This command is only useful for developers of CocoaPods plugins. + + It opens a new GitHub issue to request adding the plugin + currently being developped to the list of official plugins. + + The current directory is expected to have one (and only one) + `.gemspec` file describing the CocoaPods plugin gem. + DESC + + def initialize(argv) + @gemspec_files = Dir.glob('*.gemspec') + super + end + + def validate! + super + if @gemspec_files.count > 1 + help! 'There is more than one gemspec in the current directory' + elsif @gemspec_files.empty? + help! 'No `.gemspec` file found in the current directory.' + end + end + + def run + gemspec = Gem::Specification.load(@gemspec_files.first) + unless gemspec.name.start_with?('cocoapods-') + UI.notice 'Your gem name should start with `cocoapods-` to be ' \ + 'loaded as a plugin by CocoaPods' + end + + json = json_from_gemspec(gemspec) + + title = "[plugins.json] Add #{gemspec.name}" + body = 'Please add the following entry to the `plugins.json` file:' \ + "\n\n```\n#{json}\n```" + open_new_issue_url(title, body) + end + + private + + def json_from_gemspec(gemspec) + JSON.pretty_generate( + :gem => gemspec.name, + :name => pretty_name_from_gemname(gemspec.name), + :author => gemspec.authors.join(', '), + :url => gemspec.homepage, + :description => gemspec.summary || gemspec.description, + ) + end + + def pretty_name_from_gemname(gemname) + gemname.split('-').map(&:capitalize).join(' '). + gsub(/cocoapods/i, 'CocoaPods') + end + + def open_new_issue_url(title, body) + url = 'https://github.com/CocoaPods/cocoapods-plugins/issues/new?' \ + "title=#{CGI.escape(title)}&body=#{CGI.escape(body)}" + `open "#{url}"` + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/search.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/search.rb new file mode 100644 index 0000000..bfc6718 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins/search.rb @@ -0,0 +1,58 @@ +require 'pod/command/plugins_helper' +require 'pod/command/gem_helper' + +module Pod + class Command + class Plugins + # The search subcommand. + # Used to search a plugin in the list of known plugins, + # searching into the name, author description fields + # + class Search < Plugins + self.summary = 'Search for known plugins' + self.description = <<-DESC + Searches plugins whose 'name' contains the given `QUERY`. + `QUERY` is a regular expression, ignoring case. + + With `--full`, it also searches by 'author' and 'description'. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', true), + ] + + def self.options + [ + ['--full', 'Search by name, author, and description'], + ].concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + @full_text_search = argv.flag?('full') + @query = argv.shift_argument unless argv.arguments.empty? + super + end + + def validate! + super + help! 'A search query is required.' if @query.nil? || @query.empty? + begin + /#{@query}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + end + + def run + plugins = PluginsHelper.matching_plugins(@query, @full_text_search) + GemHelper.download_and_cache_specs if self.verbose? + + UI.title "Available CocoaPods Plugins matching '#{@query}':" + plugins.each do |plugin| + PluginsHelper.print_plugin plugin, self.verbose? + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins_helper.rb new file mode 100644 index 0000000..2a627ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/lib/pod/command/plugins_helper.rb @@ -0,0 +1,137 @@ +require 'pod/command/gem_helper' + +module Pod + class Command + # This module is used by Command::Plugins::List + # and Command::Plugins::Search to download and parse + # the JSON describing the plugins list and manipulate it + # + module PluginsHelper + PLUGINS_JSON_REPO_NAME = 'CocoaPods/cocoapods-plugins' + PLUGINS_JSON_REPO = 'https://github.com/' + PLUGINS_JSON_REPO_NAME + PLUGINS_JSON_REL_URL = '/master/plugins.json' + + PLUGINS_RAW_URL = 'https://raw.githubusercontent.com/' \ + + PLUGINS_JSON_REPO_NAME + PLUGINS_JSON_REL_URL + + # Force-download the JSON + # + # @return [Hash] The hash representing the JSON with all known plugins + # + def self.download_json + UI.puts 'Downloading Plugins list...' + response = REST.get(PLUGINS_RAW_URL) + if response.ok? + parse_json(response.body) + else + raise Informative, 'Could not download plugins list ' \ + "from cocoapods-plugins: #{response.inspect}" + end + end + + # The list of all known plugins, according to + # the JSON hosted on github's cocoapods-plugins + # + # @return [Array] all known plugins, as listed in the downloaded JSON + # + def self.known_plugins + json = download_json + json['plugins'] + end + + # Filter plugins to return only matching ones + # + # @param [String] query + # A query string that corresponds to a valid RegExp pattern. + # + # @param [Bool] full_text_search + # false only searches in the plugin's name. + # true searches in the plugin's name, author and description. + # + # @return [Array] all plugins matching the query + # + def self.matching_plugins(query, full_text_search) + query_regexp = /#{query}/i + known_plugins.reject do |plugin| + texts = [plugin['name']] + if full_text_search + texts << plugin['author'] if plugin['author'] + texts << plugin['description'] if plugin['description'] + end + texts.grep(query_regexp).empty? + end + end + + # Display information about a plugin + # + # @param [Hash] plugin + # The hash describing the plugin + # + # @param [Bool] verbose + # If true, will also print the author of the plugins. + # Defaults to false. + # + def self.print_plugin(plugin, verbose = false) + plugin_colored_name = plugin_title(plugin) + + UI.title(plugin_colored_name, '', 1) do + UI.puts_indented plugin['description'] + ljust = verbose ? 16 : 11 + UI.labeled('Gem', plugin['gem'], ljust) + UI.labeled('URL', plugin['url'], ljust) + print_verbose_plugin(plugin, ljust) if verbose + end + end + + #----------------# + + private + + # Smaller helper to print out the verbose details + # for a plugin. + # + # @param [Hash] plugin + # The hash describing the plugin + # + # @param [Integer] ljust + # The left justification that is passed into UI.labeled + # + def self.print_verbose_plugin(plugin, ljust) + UI.labeled('Author', plugin['author'], ljust) + unless GemHelper.cache.specs.empty? + versions = GemHelper.versions_string(plugin['gem']) + UI.labeled('Versions', versions, ljust) + end + end + + # Parse the given JSON data, handling parsing errors if any + # + # @param [String] json_str + # The string representation of the JSON to parse + # + def self.parse_json(json_str) + JSON.parse(json_str) + rescue JSON::ParserError => e + raise Informative, "Invalid plugins list from cocoapods-plugins: #{e}" + end + + # Format the title line to print the plugin info with print_plugin + # coloring it according to whether the plugin is installed or not + # + # @param [Hash] plugin + # The hash describing the plugin + # + # @return [String] The formatted and colored title + # + def self.plugin_title(plugin) + plugin_name = "-> #{plugin['name']}" + if GemHelper.gem_installed?(plugin['gem']) + plugin_name += " (#{GemHelper.installed_version(plugin['gem'])})" + plugin_name.green + else + plugin_name.yellow + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/plugins.json b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/plugins.json new file mode 100644 index 0000000..2f6f985 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/plugins.json @@ -0,0 +1,245 @@ +{ + "plugins": [ + { + "gem": "cocoapods-appledoc", + "name": "AppleDoc", + "author": "Kyle Fuller", + "social_media_url": "http://twitter.com/kylefuller", + "url": "https://github.com/CocoaPods/cocoapods-appledoc", + "description": "Generates docset and documentation for a pod." + }, + { + "gem": "cocoapods-deploy", + "name": "Deploy", + "author": "James Campbell", + "social_media_url": "https://twitter.com/jcampbell_05", + "url": "https://github.com/jcampbell05/cocoapods-deploy", + "description": "Deploys dependencies for a CocoaPods project without needing to clone the repo (Similar to Bundler's `--deployment`)." + }, + { + "gem": "cocoapods-rome", + "name": "Rome", + "author": "Boris BÃŧgling", + "social_media_url": "https://twitter.com/neonichu", + "url": "https://github.com/neonichu/rome", + "description": "Rome makes it easy to build a list of frameworks for consumption outside of Xcode, e.g. for a Swift script." + }, + { + "gem": "cocoapods-deintegrate", + "name": "Deintegrate", + "author": "Kyle Fuller", + "social_media_url": "http://twitter.com/kylefuller", + "url": "https://github.com/kylef/cocoapods-deintegrate", + "description": "Deintegrates a project from CocoaPods." + }, + { + "gem": "cocoapods-dependencies", + "name": "Pod Dependencies", + "author": "Samuel E. Giddins", + "social_media_url": "http://twitter.com/segiddins", + "url": "https://github.com/segiddins/cocoapods-dependencies", + "description": "Shows a project's CocoaPod dependency graph." + }, + { + "gem": "cocoapods-browser", + "name": "Pod browser", + "author": "Toshihiro Morimoto", + "social_media_url": "http://twitter.com/dealforest", + "url": "https://github.com/dealforest/cocoapods-browser", + "description": "Open a pod's homepage in the browser." + }, + { + "gem": "cocoapods-check_latest", + "name": "Check Latest", + "author": "Yuji Nakayama", + "social_media_url": "http://twitter.com/nkym37", + "url": "https://github.com/yujinakayama/cocoapods-check_latest", + "description": "Checks if the latest version of a pod is up to date." + }, + { + "gem": "cocoapods-docs", + "name": "Pod docs", + "author": "CocoaPods Dev Team", + "social_media_url": "http://twitter.com/CocoaPods", + "url": "https://github.com/CocoaPods/cocoapods-docs", + "description": "Convenient access to the documentation of a Pod via cocoadocs.org." + }, + { + "gem": "cocoapods-docstats", + "name": "docstats", + "author": "Boris BÃŧgling", + "social_media_url": "http://twitter.com/NeoNacho", + "url": "https://github.com/neonichu/cocoapods-docstats", + "description": "Showing documentation metrics of Pods." + }, + { + "gem": "cocoapods-open", + "name": "open", + "author": "Les Hill", + "social_media_url": "http://twitter.com/leshill", + "url": "https://github.com/leshill/open_pod_bay", + "description": "Open a pod’s workspace." + }, + { + "gem": "cocoapods-podfile_info", + "name": "Pod info", + "author": "CocoaPods Dev Team", + "social_media_url": "http://twitter.com/CocoaPods", + "url": "https://github.com/cocoapods/cocoapods-podfile_info", + "description": "Shows information on installed Pods." + }, + { + "gem": "cocoapods-repo-svn", + "name": "repo-svn", + "author": "Dusty Clarkda", + "social_media_url": "http://twitter.com/_clarkda", + "url": "https://github.com/clarkda/cocoapods-repo-svn", + "description": "Adds subversion support to manage spec-repositories." + }, + { + "gem": "cocoapods-repo-hg", + "name": "repo-hg", + "author": "Dusty Clarkda", + "social_media_url": "http://twitter.com/_clarkda", + "url": "https://github.com/clarkda/cocoapods-repo-hg", + "description": "Adds mercurial support to manage spec-repositories." + }, + { + "gem": "cocoapods-try", + "name": "Pod try", + "author": "CocoaPods Dev Team", + "social_media_url": "http://twitter.com/CocoaPods", + "url": "https://github.com/CocoaPods/cocoapods-try", + "description": "Quickly try the demo project of a Pod." + }, + { + "gem": "cocoapods-watch", + "name": "Pod watch", + "author": "Marin Usalj", + "url": "https://github.com/supermarin/cocoapods-watch", + "description": "Watch for Podfile changes and run pod install." + }, + { + "gem": "cocoapods-roulette", + "name": "Pods Roulette", + "author": "Heiko Behrens, Marcel Jackwerth", + "url": "https://github.com/sirlantis/cocoapods-roulette", + "description": "Builds an empty project with three random pods." + }, + { + "gem": "cocoapods-sorted-search", + "name": "Sorted Search", + "author": "Denys Telezhkin", + "url": "https://github.com/DenHeadless/cocoapods-sorted-search", + "description": "Adds a sort subcommand for pod search to sort search results by amount of stars, forks, or github activity." + }, + { + "gem": "cocoapods-release", + "name": "Release", + "author": "Oliver Letterer", + "social_media_url": "https://twitter.com/oletterer", + "url": "https://github.com/Sparrow-Labs/cocoapods-release", + "description": "Tags and releases pods for you." + }, + { + "gem": "cocoapods-clean", + "name": "cocoapods clean", + "author": "Luca Querella", + "url": "https://github.com/BendingSpoons/cocoapods-clean", + "description": "Remove Podfile.lock, Pods/ and *.xcworkspace." + }, + { + "gem": "cocoapods-keys", + "name": "CocoaPods Keys", + "author": "Orta Therox, Samuel E. Giddins", + "url": "https://github.com/orta/cocoapods-keys", + "description": "Store sensitive data in your Mac's keychain, that will be installed into your app's source code via the Pods library." + }, + { + "gem": "cocoapods-packager", + "name": "CocoaPods Packager", + "author": "Kyle Fuller, Boris BÃŧgling", + "url": "https://github.com/CocoaPods/cocoapods-packager", + "description": "Generate a framework or static library from a podspec." + }, + { + "gem": "cocoapods-links", + "name": "CocoaPods Links", + "author": "Mike Owens", + "social_media_url": "https://twitter.com/mikejowens", + "url": "https://github.com/mowens/cocoapods-links", + "description": "A CocoaPods plugin to manage local development pods" + }, + { + "gem": "cocoapods-prune-localizations", + "name": "CocoaPods Prune Localizations", + "author": "Diego Torres", + "social_media_url": "https://twitter.com/dtorres", + "url": "https://github.com/dtorres/cocoapods-prune-localizations", + "description": "Upon running pod install, this plugin will remove unused localizations by your project" + }, + { + "gem": "cocoapods-readonly", + "name": "CocoaPods Readonly", + "author": "Mason Glidden", + "url": "https://github.com/Yelp/cocoapods-readonly", + "description": "Developers switching from submodules are used to modifying library source files from within Xcode. This locks those files as needed so Xcode warns you when attempting to edit them." + }, + { + "gem": "cocoapods-thumbs", + "name": "CocoaPods Thumbs", + "author": "Pablo Bendersky", + "url": "https://github.com/quadion/cocoapods-thumbs", + "description": "Use cocoapods-thumbs to check upvotes or downvotes of Podspecs from your peers based on past experiences." + }, + { + "gem": "cocoapods-blacklist", + "name": "CocoaPods Blacklist", + "author": "David Grandinetti", + "url": "https://github.com/yahoo/cocoapods-blacklist", + "description": "Check if a project is using a banned version of a pod. Handy for security audits." + }, + { + "gem": "cocoapods-superdeintegrate", + "name": "CocoaPods Superdeintegrate", + "author": "Ash Furrow", + "url": "https://github.com/ashfurrow/cocoapods-superdeintegrate", + "description": "Deletes the CocoaPods cache, your derived data folder, and makes sure that your Pods directory is gone." + }, + { + "gem": "cocoapods-archive", + "name": "CocoaPods Archive", + "author": "fjbelchi, alexito4", + "url": "https://github.com/fjbelchi/cocoapods-archive", + "description": "cocoapods-archive plugin that archive your project" + }, + { + "gem": "cocoapods-check", + "name": "CocoaPods Check", + "author": "Matt Di Iorio", + "url": "https://github.com/square/cocoapods-check", + "description": "Displays differences between locked and installed Pods" + }, + { + "gem": "cocoapods-acknowledgements", + "name": "CocoaPods Acknowledgements", + "author": "Fabio Pelosin, Orta Therox, Marcelo Fabri", + "url": "https://github.com/CocoaPods/cocoapods-acknowledgements", + "description": "CocoaPods plugin that generates an acknowledgements plist to make it easy to create tools to use in apps." + }, + { + "gem": "cocoapods-generator", + "name": "CocoaPods Generator", + "author": "äģŽæƒ", + "url": "https://github.com/zhzhy/cocoapods-generator", + "description": "Add files to empty target from *.podspec, such as souce files, libraries, frameworks, resources and so on." + }, + { + "gem": "cocoapods-debug", + "name": "CocoaPods Debug", + "author": "Samuel Giddins", + "url": "https://github.com/segiddins/cocoapods-debug", + "description": "A simple plugin to ease debugging CocoaPods." + } + ] +} diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_helper_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_helper_spec.rb new file mode 100644 index 0000000..45c4d17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_helper_spec.rb @@ -0,0 +1,40 @@ +require File.expand_path('../spec_helper', File.dirname(__FILE__)) + +# The CocoaPods namespace +# +module Pod + describe Command::GemHelper do + before do + UI.output = '' + end + + after do + mocha_teardown + end + + it 'detects if a gem is installed' do + Command::GemHelper.gem_installed?('bacon').should.be.true + Command::GemHelper.gem_installed?('fake-fake-fake-gem').should.be.false + end + + it 'detects if a specific version of a gem is installed' do + Command::GemHelper.gem_installed?('bacon', Bacon::VERSION).should.be.true + impossibacon = Gem::Version.new(Bacon::VERSION).bump + Command::GemHelper.gem_installed?('bacon', impossibacon).should.be.false + end + + it 'creates a version list that includes all versions of a single gem' do + spec2 = Gem::NameTuple.new('cocoapods-plugins', Gem::Version.new('0.2.0')) + spec1 = Gem::NameTuple.new('cocoapods-plugins', Gem::Version.new('0.1.0')) + response = [{ 1 => [spec2, spec1] }, []] + Gem::SpecFetcher.any_instance.stubs(:available_specs).returns(response) + + @cache = Command::GemIndexCache.new + @cache.download_and_cache_specs + versions_string = + Command::GemHelper.versions_string('cocoapods-plugins', @cache) + versions_string.should.include('0.2.0') + versions_string.should.include('0.1.0') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_index_cache_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_index_cache_spec.rb new file mode 100644 index 0000000..34f63ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/gem_index_cache_spec.rb @@ -0,0 +1,37 @@ +require File.expand_path('../spec_helper', File.dirname(__FILE__)) + +# The CocoaPods namespace +# +module Pod + describe Command::GemIndexCache do + before do + @cache = Command::GemIndexCache.new + UI.output = '' + end + + after do + mocha_teardown + end + + it 'notifies the user that it is downloading the spec index' do + response = [{}, []] + Gem::SpecFetcher.any_instance.stubs(:available_specs).returns(response) + + @cache.download_and_cache_specs + UI.output.should.include('Downloading Rubygem specification index...') + UI.output.should.not.include('Error downloading Rubygem specification') + end + + it 'notifies the user when getting the spec index fails' do + error = Gem::RemoteFetcher::FetchError.new('no host', 'bad url') + wrapper_error = stub(:error => error) + response = [[], [wrapper_error]] + Gem::SpecFetcher.any_instance.stubs(:available_specs).returns(response) + + @cache.download_and_cache_specs + @cache.specs.should.be.empty? + UI.output.should.include('Downloading Rubygem specification index...') + UI.output.should.include('Error downloading Rubygem specification') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/create_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/create_spec.rb new file mode 100644 index 0000000..fa3e7cf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/create_spec.rb @@ -0,0 +1,89 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +# The CocoaPods namespace +# +module Pod + describe Command::Plugins::Create do + extend SpecHelper::PluginsCreateCommand + + before do + UI.output = '' + end + + it 'registers itself' do + Command.parse(%w(plugins create)). + should.be.instance_of Command::Plugins::Create + end + + #--- Validation + + it 'should require a name is passed in' do + @command = create_command + should.raise(CLAide::Help) do + @command.validate! + end.message.should.match(/A name for the plugin is required./) + end + + it 'should require a non-empty name is passed in' do + @command = create_command('') + should.raise(CLAide::Help) do + @command.validate! + end.message.should.match(/A name for the plugin is required./) + end + + it 'should require the name does not have spaces' do + @command = create_command('my gem') + should.raise(CLAide::Help) do + @command.validate! + end.message.should.match(/The plugin name cannot contain spaces./) + end + + #--- Naming + + it 'should prefix the given name if not already' do + @command = create_command('unprefixed') + Dir.mktmpdir do |tmpdir| + Dir.chdir(tmpdir) do + @command.run + end + end + UI.output.should.include('Creating `cocoapods-unprefixed` plugin') + end + + it 'should not prefix the name if already prefixed' do + @command = create_command('cocoapods-prefixed') + Dir.mktmpdir do |tmpdir| + Dir.chdir(tmpdir) do + @command.run + end + end + UI.output.should.include('Creating `cocoapods-prefixed` plugin') + end + + #--- Template download + + it 'should download the default template repository' do + @command = create_command('cocoapods-banana') + + template_repo = 'https://github.com/CocoaPods/' \ + 'cocoapods-plugin-template.git' + git_command = ['clone', template_repo, 'cocoapods-banana'] + @command.expects(:git!).with(git_command) + @command.expects(:configure_template) + @command.run + UI.output.should.include('Creating `cocoapods-banana` plugin') + end + + it 'should download the passed in template repository' do + alt_repo = 'https://github.com/CocoaPods/' \ + 'cocoapods-banana-plugin-template.git' + @command = create_command('cocoapods-banana', alt_repo) + + @command.expects(:git!).with(['clone', alt_repo, 'cocoapods-banana']) + @command.expects(:configure_template) + @command.run + UI.output.should.include('Creating `cocoapods-banana` plugin') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/installed_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/installed_spec.rb new file mode 100644 index 0000000..4f27be1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins/installed_spec.rb @@ -0,0 +1,140 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +# The CocoaPods namespace +# +module Pod + describe Command::Plugins::Installed do + extend SpecHelper::PluginsStubs + + def stub_plugins(plugins_and_hooks) + specs = [] + registrations = {} + plugins_and_hooks.each do |(plugin_name, hooks)| + # Load Plugin GemSpec + fixture_path = fixture("#{plugin_name}.gemspec") + specs.push Gem::Specification.load(fixture_path.to_s) + # Fill hook registrations hash + Array(hooks).each do |hook_name| + registrations[hook_name] ||= [] + hook = Pod::HooksManager::Hook.new(hook_name, plugin_name, {}) + registrations[hook_name] << hook + end + end + + Pod::HooksManager.stubs(:registrations).returns(registrations) + CLAide::Command::PluginManager.stubs(:specifications).returns(specs) + end + + before do + UI.output = '' + end + + it 'registers itself' do + Command.parse(%w(plugins installed)). + should.be.instance_of Command::Plugins::Installed + end + + #--- Output printing + + describe 'Compact List' do + before do + @command = Pod::Command::Plugins::Installed.new CLAide::ARGV.new([]) + end + + it 'no hooks' do + stub_plugins('cocoapods-foo1' => nil, 'cocoapods-foo2' => nil) + + @command.run + UI.output.should.include(' - cocoapods-foo1 : 2.0.1') + UI.output.should.include(' - cocoapods-foo2 : 2.0.2') + UI.output.should.not.include('pre_install') + UI.output.should.not.include('post_install') + end + + it 'one hook' do + stub_plugins( + 'cocoapods-foo1' => :pre_install, + 'cocoapods-foo2' => :post_install, + ) + + @command.run + UI.output.should.include(' - cocoapods-foo1 : 2.0.1 ' \ + '(pre_install hook)') + UI.output.should.include(' - cocoapods-foo2 : 2.0.2 ' \ + '(post_install hook)') + end + + it 'two hooks' do + stub_plugins('cocoapods-foo1' => [:pre_install, :post_install]) + + @command.run + UI.output.should.include(' - cocoapods-foo1 : 2.0.1 ' \ + '(pre_install and post_install hooks)') + end + end + + describe 'Verbose List' do + before do + verbose_args = CLAide::ARGV.new(['--verbose']) + @command = Pod::Command::Plugins::Installed.new verbose_args + end + + it 'no hooks' do + stub_plugins('cocoapods-foo1' => nil, 'cocoapods-foo2' => nil) + + @command.run + + UI.output.should.include < :pre_install, + 'cocoapods-foo2' => :post_install, + ) + + @command.run + UI.output.should.include < [:pre_install, :post_install]) + + @command.run + UI.output.should.include < CocoaPods Fake Gem') + UI.output.should.include('-> CocoaPods Searchable Fake Gem') + UI.output.should.not.include('-> Bacon') + end + + it 'should filter plugins by name, author, description with full search' do + stub_plugins_json_request + @command = search_command('--full', 'search') + @command.run + UI.output.should.include('-> CocoaPods Fake Gem') + UI.output.should.include('-> CocoaPods Searchable Fake Gem') + UI.output.should.not.include('-> Bacon') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_helper_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_helper_spec.rb new file mode 100644 index 0000000..166fd60 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_helper_spec.rb @@ -0,0 +1,33 @@ +require File.expand_path('../spec_helper', File.dirname(__FILE__)) + +# The CocoaPods namespace +# +module Pod + describe Command::PluginsHelper do + extend SpecHelper::PluginsStubs + + it 'downloads the json file' do + stub_plugins_json_request + json = Command::PluginsHelper.download_json + json.should.not.be.nil? + json.should.be.is_a? Hash + json['plugins'].size.should.eql? 3 + end + + it 'handles empty/bad JSON' do + stub_plugins_json_request 'This is not JSON' + expected_error = /Invalid plugins list from cocoapods-plugins/ + should.raise(Pod::Informative) do + Command::PluginsHelper.download_json + end.message.should.match(expected_error) + end + + it 'notifies the user if the download fails' do + stub_plugins_json_request '', [404, 'Not Found'] + expected_error = /Could not download plugins list from cocoapods-plugins/ + should.raise(Pod::Informative) do + Command::PluginsHelper.download_json + end.message.should.match(expected_error) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_spec.rb new file mode 100644 index 0000000..f15b1cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/command/plugins_spec.rb @@ -0,0 +1,20 @@ +require File.expand_path('../../spec_helper', __FILE__) + +# The CocoaPods namespace +# +module Pod + describe Command::Plugins do + before do + argv = CLAide::ARGV.new([]) + @command = Command::Plugins.new(argv) + end + + it 'registers itself and uses the default subcommand' do + Command.parse(%w(plugins)).should.be.instance_of Command::Plugins::List + end + + it 'exists' do + @command.should.not.be.nil? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo1.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo1.gemspec new file mode 100644 index 0000000..6d27dbc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo1.gemspec @@ -0,0 +1,10 @@ +# coding: utf-8 +Gem::Specification.new do |spec| + spec.name = 'cocoapods-foo1' + spec.version = '2.0.1' + spec.authors = ['Author 1'] + spec.summary = 'Gem Summary 1' + spec.description = 'Gem Description 1' + spec.homepage = 'https://github.com/proper-man/cocoapods-foo1' + spec.license = 'MIT' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo2.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo2.gemspec new file mode 100644 index 0000000..ed0428f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/cocoapods-foo2.gemspec @@ -0,0 +1,9 @@ +# coding: utf-8 +Gem::Specification.new do |spec| + spec.name = 'cocoapods-foo2' + spec.version = '2.0.2' + spec.authors = ['Author 1', 'Author 2'] + spec.description = 'Gem Description 2' + spec.homepage = 'https://github.com/proper-man/cocoapods-foo2' + spec.license = 'MIT' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/plugins.json b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/plugins.json new file mode 100644 index 0000000..ebe0d5c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/plugins.json @@ -0,0 +1,22 @@ +{ + "plugins":[ + { + "gem":"cocoapods-fake-fake-fake-1", + "name":"CocoaPods Fake Gem", + "url":"https://github.com/CocoaPods/cocoapods-fake-1", + "description":"A Pod that should not exist and should only be found by full search" + }, + { + "gem":"bacon", + "name":"Bacon", + "url":"https://github.com/chneukirchen/bacon", + "description":"A minimal RSpec clone." + }, + { + "gem":"cocoapods-fake-fake-fake-2", + "name":"CocoaPods Searchable Fake Gem", + "url":"https://github.com/CocoaPods/cocoapods-fake-2", + "description":"A Pod that should not exist but should be found with search" + } + ] +} \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/unprefixed.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/unprefixed.gemspec new file mode 100644 index 0000000..abd90e6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/fixtures/unprefixed.gemspec @@ -0,0 +1,10 @@ +# coding: utf-8 +Gem::Specification.new do |spec| + spec.name = 'unprefixed-plugin' + spec.version = '1.2.3' + spec.authors = ['Author 1', 'Author 2'] + spec.summary = 'Gem Summary' + spec.description = 'Gem Description' + spec.homepage = 'https://github.com/messy-man/unprefixed-plugins' + spec.license = 'MIT' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/spec_helper.rb new file mode 100644 index 0000000..8a3720f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-plugins-1.0.0/spec/spec_helper.rb @@ -0,0 +1,122 @@ +# Set up coverage analysis +#-----------------------------------------------------------------------------# + +require 'codeclimate-test-reporter' +CodeClimate::TestReporter.configure do |config| + config.logger.level = Logger::WARN +end +CodeClimate::TestReporter.start + +# Set up +#-----------------------------------------------------------------------------# + +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$LOAD_PATH.unshift((ROOT + 'lib').to_s) +$LOAD_PATH.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' + +require 'webmock' +include WebMock::API + +require 'cocoapods' +require 'cocoapods_plugin' + +# VCR +#--------------------------------------# + +require 'vcr' +VCR.configure do |c| + c.cassette_library_dir = ROOT + 'spec/fixtures/vcr_cassettes' + c.hook_into :webmock + c.ignore_hosts 'codeclimate.com' +end + +#-----------------------------------------------------------------------------# + +# The CocoaPods namespace +# +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end +end + +#-----------------------------------------------------------------------------# + +# Bacon namespace +# +module Bacon + # Add a fixture helper to the Bacon Context + class Context + ROOT = ::ROOT + 'spec/fixtures' + + def fixture(name) + ROOT + name + end + end +end + +#-----------------------------------------------------------------------------# + +# SpecHelper namespace +# +module SpecHelper + # Add this as an extension into the Search and List specs + # to help stub the plugins.json request + module PluginsStubs + def stub_plugins_json_request(json = nil, status = 200) + body = json || File.read(fixture('plugins.json')) + stub_request(:get, Pod::Command::PluginsHelper::PLUGINS_RAW_URL). + to_return(:status => status, :body => body, :headers => {}) + end + end + + # Add this as an extension into the Create specs + module PluginsCreateCommand + def create_command(*args) + Pod::Command::Plugins::Create.new CLAide::ARGV.new(args) + end + end + + # Add this as an extension into the Search specs + module PluginsSearchCommand + def search_command(*args) + Pod::Command::Plugins::Search.new CLAide::ARGV.new(args) + end + end + + module PluginsPublishCommand + def publish_command + Pod::Command::Plugins::Publish.new CLAide::ARGV.new [] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/.github/workflows/Specs.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/.github/workflows/Specs.yml new file mode 100644 index 0000000..bd3bb06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/.github/workflows/Specs.yml @@ -0,0 +1,42 @@ +name: Specs + +jobs: + specs: + strategy: + matrix: + os: [ubuntu-16.04] + ruby: [2.6, 2.7, 3.0] + + name: ${{ matrix.os }} / Ruby ${{ matrix.ruby }} + runs-on: ${{ matrix.os }} + steps: + - name: Checkout git + uses: actions/checkout@v1 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + if: ${{ matrix.ruby != 'system' }} + with: + ruby-version: ${{ matrix.ruby }} + + - name: Update git submodules + run: git submodule update --init --recursive + + - name: Run bundle install + run: | + gem install bundler -v "~> 1.17" + bundle install --jobs 4 --retry 3 --without debugging documentation + + - name: Run Specs + run: bundle exec rake specs + +on: + push: + branches: + - "master" + - "*-stable" + pull_request: + branches: + - master + - "*-stable" + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/.gitignore b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/.gitignore new file mode 100644 index 0000000..54a36d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/.gitignore @@ -0,0 +1,39 @@ +*.gem +*.rbc +/.config +/coverage/ +/InstalledFiles +/pkg/ +/spec/reports/ +/test/tmp/ +/test/version_tmp/ +/tmp/ + +## Specific to RubyMotion: +.dat* +.repl_history +build/ + +## Documentation cache and generated files: +/.yardoc/ +/_yardoc/ +/doc/ +/rdoc/ + +## Environment normalisation: +/.bundle/ +/lib/bundler/man/ + +# for a library or gem, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# Gemfile.lock +# .ruby-version +# .ruby-gemset + +# unless supporting rvm < 1.11.0 or doing something fancy, ignore this: +.rvmrc + +/coverage/ + +# RubyMine Editor +.idea diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/CHANGELOG.md new file mode 100644 index 0000000..34b4660 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/CHANGELOG.md @@ -0,0 +1,83 @@ +# Cocoapods::Search Changelog + +## 1.0.1 (2021-08-13) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix running with `--web`. + [Samuel Giddins](https://github.com/segiddins) + [#25](https://github.com/CocoaPods/cocoapods-search/issues/25) + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-04-14) + +##### Bug Fixes + +* Compatibility with CocoaPods 1.0.0.beta.7. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Enhancements + +* Perform full search as default, add `--simple` option to search only by + name. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#13](https://github.com/CocoaPods/cocoapods-search/issues/13) + +* Add support for tvOS and any possible future platforms. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#11](https://github.com/CocoaPods/cocoapods-search/issues/11) + +##### Bug Fixes + +* Print output in reverse order. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + +* Perform regexp escape on individual query words before joining them. + [Muhammed Yavuz NuzumlalÄą](https://github.com/manuyavuz) + [#8](https://github.com/CocoaPods/cocoapods-search/issues/8) + + +## 0.1.0 (2015-09-03) + +* Version number must not collide with old gem called cocoapods-search 0.0.7 + + +## 0.0.1 (2015-09-03) + +* Initial implementation. This version is an extraction from [CocoaPods](https://github.com/CocoaPods/CocoaPods). + +Original creators: +[Eloy DurÃĄn](https://github.com/alloy) +[Fabio Pelosin](https://github.com/fabiopelosin) + +Extractor: +[Emma Koszinowski](http://github.com/emkosz) diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Gemfile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Gemfile new file mode 100644 index 0000000..0f7f267 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Gemfile @@ -0,0 +1,13 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in cocoapods-search.gemspec +gemspec + +group :development do + gem 'cocoapods', :git => "https://github.com/CocoaPods/CocoaPods.git", :branch => 'master' + gem 'cocoapods-core', :git => "https://github.com/CocoaPods/Core.git", :branch => 'master' + gem 'bacon' + gem 'mocha-on-bacon' + gem 'prettybacon' +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Gemfile.lock new file mode 100644 index 0000000..5db12c3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Gemfile.lock @@ -0,0 +1,126 @@ +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 2e285ae6be8aadf0e6319a51626d5176c47e0ede + branch: master + specs: + cocoapods (1.11.0.beta.2) + addressable (~> 2.8) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.11.0.beta.2) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.4.0, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (= 1.0.1) + cocoapods-trunk (>= 1.4.0, < 2.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.8.0) + nap (~> 1.0) + ruby-macho (>= 1.0, < 3.0) + xcodeproj (>= 1.21.0, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: 0a0394afabd9c5f0838fc044e1c817024499dace + branch: master + specs: + cocoapods-core (1.11.0.beta.2) + activesupport (>= 5.0, < 7) + addressable (~> 2.8) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 4.0) + typhoeus (~> 1.0) + +PATH + remote: . + specs: + cocoapods-search (1.0.1) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.3) + activesupport (6.1.4) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) + addressable (2.8.0) + public_suffix (>= 2.0.2, < 5.0) + algoliasearch (1.27.5) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + atomos (0.1.3) + bacon (1.2.0) + claide (1.0.3) + cocoapods-deintegrate (1.0.4) + cocoapods-downloader (1.4.0) + cocoapods-plugins (1.0.0) + nap + cocoapods-trunk (1.5.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + cocoapods-try (1.2.0) + colored2 (3.1.2) + concurrent-ruby (1.1.9) + escape (0.0.4) + ethon (0.14.0) + ffi (>= 1.15.0) + ffi (1.15.3) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + httpclient (2.8.3) + i18n (1.8.10) + concurrent-ruby (~> 1.0) + json (2.5.1) + minitest (5.14.4) + mocha (1.13.0) + mocha-on-bacon (0.2.3) + mocha (>= 0.13.0) + molinillo (0.8.0) + nanaimo (0.3.0) + nap (1.1.0) + netrc (0.11.0) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (4.0.6) + rake (13.0.6) + rexml (3.2.5) + ruby-macho (2.5.1) + typhoeus (1.4.0) + ethon (>= 0.9.0) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + xcodeproj (1.21.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + zeitwerk (2.4.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + cocoapods! + cocoapods-core! + cocoapods-search! + mocha-on-bacon + prettybacon + rake + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/LICENSE.txt new file mode 100644 index 0000000..288b8fe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2015 Eloy DurÃĄn , Fabio Pelosin +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/README.md new file mode 100644 index 0000000..1e8a609 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/README.md @@ -0,0 +1,106 @@ +# cocoapods-search + +[![Build Status](https://travis-ci.org/CocoaPods/cocoapods-search.svg)](https://travis-ci.org/CocoaPods/cocoapods-search) + +A CocoaPods plugin that allows you to search multiple pod spec repositories for specific pods matching a query. cocoapods-search is by default included in CocoaPods. + +## Installation + +If you have CocoaPods, you already have cocoapods-search installed by default. If not, you can also install it as a seperate gem as followed. + + $ gem install cocoapods-search + +## Usage + +Search for pods by using the pod search command as followed. + + $ pod search QUERY + +e.g. + + $ pod search networkin + + -> ACSNetworking (0.0.1) + On the basis of AFNetworking encapsulation. + pod 'ACSNetworking', '~> 0.0.1' + - Homepage: https://github.com/Hyosung/ACSNetworking + - Source: https://github.com/Hyosung/ACSNetworking.git + - Versions: 0.0.1 [master repo] + + + -> AFNetworking (2.5.4) + A delightful iOS and OS X networking framework. + pod 'AFNetworking', '~> 2.5.4' + - Homepage: https://github.com/AFNetworking/AFNetworking + - Source: https://github.com/AFNetworking/AFNetworking.git + - Versions: 2.5.4, 2.5.3, 2.5.2, 2.5.1, 2.5.0, 2.4.1, 2.4.0, 2.3.1, 2.3.0, 2.2.4, 2.2.3, 2.2.2, 2.2.1, 2.2.0, 2.1.0, 2.0.3, 2.0.2, 2.0.1, 2.0.0, 2.0.0-RC3, + 2.0.0-RC2, 2.0.0-RC1, 1.3.4, 1.3.3, 1.3.2, 1.3.1, 1.3.0, 1.2.1, 1.2.0, 1.1.0, 1.0.1, 1.0, 1.0RC3, 1.0RC2, 1.0RC1, 0.10.1, 0.10.0, 0.9.2, 0.9.1, 0.9.0, 0.7.0, + 0.5.1 [master repo] + - Subspecs: + - AFNetworking/Serialization (2.5.4) + - AFNetworking/Security (2.5.4) + - AFNetworking/Reachability (2.5.4) + - AFNetworking/NSURLConnection (2.5.4) + - AFNetworking/NSURLSession (2.5.4) + - AFNetworking/UIKit (2.5.4) + + + -> AFNetworking+AutoRetry (0.0.5) + Auto Retries for AFNetworking requests + pod 'AFNetworking+AutoRetry', '~> 0.0.5' + - Homepage: https://github.com/shaioz/AFNetworking-AutoRetry + - Source: https://github.com/shaioz/AFNetworking-AutoRetry.git + - Versions: 0.0.5, 0.0.4, 0.0.3, 0.0.2, 0.0.1 [master repo] + + ... + + +### Options + +You can use the following options with the search command. + +| Flag | Description | +|----------- |-------------| +| `--regex` | Interpret the `QUERY` as a regular expression | +| `--full` | Search by name, summary, and description | +| `--stats` | Show additional stats (like GitHub watchers and forks) | +| `--ios` | Restricts the search to Pods supported on iOS | +| `--osx` | Restricts the search to Pods supported on OS X | +| `--watchos` | Restricts the seach to Pods supported on Watch OS | +| `--web` | Opens a new search on cocoapods.org | + + +e.g. + + $ pod search video --osx + + -> AMCoreAudio (2.0.7) + AMCoreAudio is a Swift wrapper for Apple's CoreAudio framework + pod 'AMCoreAudio', '~> 2.0.7' + - Homepage: https://github.com/rnine/AMCoreAudio + - Source: https://github.com/rnine/AMCoreAudio.git + - Versions: 2.0.7, 2.0.6, 2.0.5, 2.0.4, 2.0.3, 2.0.2, 2.0.1, 2.0, 1.5, 1.4.3, 1.4.2, 1.4.1, 1.4, 1.3.2, 1.3.1, 1.3, 1.2, 1.1, 1.0.1, 1.0 [master repo] + + + -> AppleCoreAudioUtilityClasses@thehtb (2013.09.17) + A git mirror of Apple's Core Audio Utility Classes for better versioning and with clang/llvm fixes. + pod 'AppleCoreAudioUtilityClasses@thehtb', '~> 2013.09.17' + - Homepage: https://github.com/thehtb/AppleCoreAudioUtilityClasses + - Source: https://github.com/thehtb/AppleCoreAudioUtilityClasses.git + - Versions: 2013.09.17, 2013.2.18, 2013.1.2 [master repo] + - Subspecs: + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CAProcess (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CAAutoDisposer (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CABitOperations (2013.09.17) + - AppleCoreAudioUtilityClasses@thehtb/PublicUtility/CASpectralProcessor (2013.09.17) + + + -> AudioKit (2.1.1) + Open-source audio synthesis, processing, & analysis platform. + pod 'AudioKit', '~> 2.1.1' + - Homepage: http://audiokit.io/ + - Source: https://github.com/audiokit/AudioKit.git + - Versions: 2.1.1, 2.0.1, 2.0, 1.3, 1.2-01, 1.2 [master repo] + + ... diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Rakefile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Rakefile new file mode 100644 index 0000000..c34b828 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/Rakefile @@ -0,0 +1,13 @@ +require 'bundler/gem_tasks' + +def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') +end + +desc 'Runs all the specs' +task :specs do + sh "bundle exec bacon #{specs('**')}" +end + +task :default => :specs + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/cocoapods-search.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/cocoapods-search.gemspec new file mode 100644 index 0000000..80d106f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/cocoapods-search.gemspec @@ -0,0 +1,25 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods-search/gem_version.rb' + +Gem::Specification.new do |spec| + spec.name = 'cocoapods-search' + spec.version = CocoapodsSearch::VERSION + spec.authors = ['Eloy DurÃĄn', 'Fabio Pelosin', 'Emma Koszinowski'] + spec.email = ['eloy.de.enige@gmail.com', 'fabiopelosin@gmail.com', 'emkosz@gmail.com'] + spec.description = %q{Search for pods.} + spec.summary = %q{Searches for pods, ignoring case, whose name matches `QUERY`. If the + `--full` option is specified, this will also search in the summary and + description of the pods.} + spec.homepage = 'https://github.com/CocoaPods/cocoapods-search' + spec.license = 'MIT' + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ['lib'] + + spec.add_development_dependency 'bundler', '~> 1.3' + spec.add_development_dependency 'rake' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search.rb new file mode 100644 index 0000000..b955220 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search.rb @@ -0,0 +1 @@ +require 'cocoapods-search/gem_version' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command.rb new file mode 100644 index 0000000..61b9cb3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command.rb @@ -0,0 +1 @@ +require 'cocoapods-search/command/search' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command/search.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command/search.rb new file mode 100644 index 0000000..4cc195d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/command/search.rb @@ -0,0 +1,115 @@ +module Pod + class Command + # @CocoaPods 0.0.2 + # + class Search < Command + self.summary = 'Search for pods' + + self.description = <<-DESC + Searches for pods, ignoring case, whose name, summary, description, or authors match `QUERY`. If the + `--simple` option is specified, this will only search in the names of the pods. + DESC + + self.arguments = [ + CLAide::Argument.new('QUERY', true), + ] + + def self.options + options = [ + ['--regex', 'Interpret the `QUERY` as a regular expression'], + ['--simple', 'Search only by name'], + ['--stats', 'Show additional stats (like GitHub watchers and forks)'], + ['--web', 'Searches on cocoapods.org'], + ] + options += Platform.all.map do |platform| + ["--#{platform.name.to_s}", "Restricts the search to Pods supported on #{Platform.string_name(platform.to_sym)}"] + end + options << ['--no-pager', 'Do not pipe search results into a pager'] + options.concat(super.reject { |option, _| option == '--silent' }) + end + + def initialize(argv) + @use_regex = argv.flag?('regex') + @simple_search = argv.flag?('simple') + @stats = argv.flag?('stats') + @web = argv.flag?('web') + @platform_filters = Platform.all.map do |platform| + argv.flag?(platform.name.to_s) ? platform.to_sym : nil + end.compact + @query = argv.arguments! unless argv.arguments.empty? + config.silent = false + @use_pager = argv.flag?('pager', true) + super + end + + def validate! + super + help! 'A search query is required.' unless @query + + unless @web || !@use_regex + begin + /#{@query.join(' ').strip}/ + rescue RegexpError + help! 'A valid regular expression is required.' + end + end + end + + def run + ensure_master_spec_repo_exists! + if @web + web_search + else + local_search + end + end + + def sources_manager + defined?(Pod::SourcesManager) ? Pod::SourcesManager : config.sources_manager + end + + def web_search + queries = @platform_filters.map do |platform| + "on:#{platform}" + end + queries += @query + query_parameter = queries.compact.flatten.join(' ') + url = "https://cocoapods.org/?q=#{CGI.escape(query_parameter).gsub('+', '%20')}" + UI.puts("Opening #{url}") + Executable.execute_command(:open, [url]) + end + + def local_search + query_regex = @query.reduce([]) { |result, q| + result << (@use_regex ? q : Regexp.escape(q)) + }.join(' ').strip + + sets = sources_manager.search_by_name(query_regex, !@simple_search) + + @platform_filters.each do |platform| + sets.reject! { |set| !set.specification.available_platforms.map(&:name).include?(platform) } + end + + if(@use_pager) + UI.with_pager { print_sets(sets) } + else + print_sets(sets) + end + end + + def print_sets(sets) + sets.each do |set| + begin + if @stats + UI.pod(set, :stats) + else + UI.pod(set, :normal) + end + rescue DSLError + UI.warn "Skipping `#{set.name}` because the podspec contains errors." + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/gem_version.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/gem_version.rb new file mode 100644 index 0000000..6364df6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods-search/gem_version.rb @@ -0,0 +1,3 @@ +module CocoapodsSearch + VERSION = '1.0.1'.freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..678d204 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'cocoapods-search/command' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/command/search_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/command/search_spec.rb new file mode 100644 index 0000000..f3d4334 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/command/search_spec.rb @@ -0,0 +1,148 @@ +require File.expand_path('../../spec_helper', __FILE__) + +module Pod + describe Command::Search do + extend SpecHelper::TemporaryRepos + + describe 'Search' do + it 'registers it self' do + Command.parse(%w{ search }).should.be.instance_of Command::Search + end + + it 'runs with correct parameters' do + lambda { run_command('search', 'JSON') }.should.not.raise + lambda { run_command('search', 'JSON', '--simple') }.should.not.raise + end + + it 'complains for wrong parameters' do + lambda { run_command('search') }.should.raise CLAide::Help + lambda { run_command('search', 'too', '--wrong') }.should.raise CLAide::Help + lambda { run_command('search', '--wrong') }.should.raise CLAide::Help + end + + it 'searches for a pod with name matching the given query ignoring case' do + output = run_command('search', 'json', '--simple') + output.should.include? 'JSONKit' + end + + it 'searches for a pod with name, summary, or description matching the given query ignoring case' do + output = run_command('search', 'engelhart') + output.should.include? 'JSONKit' + end + + it 'searches for a pod with name, summary, or description matching the given multi-word query ignoring case' do + output = run_command('search', 'very', 'high', 'performance') + output.should.include? 'JSONKit' + end + + it 'prints search results in order' do + output = run_command('search', 'lib') + output.should.match /BananaLib.*JSONKit/m + end + + it 'restricts the search to Pods supported on iOS' do + output = run_command('search', 'BananaLib', '--ios') + output.should.include? 'BananaLib' + Specification.any_instance.stubs(:available_platforms).returns([Platform.osx]) + output = run_command('search', 'BananaLib', '--ios') + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on OS X' do + output = run_command('search', 'BananaLib', '--osx') + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on Watch OS' do + output = run_command('search', 'a', '--watchos') + output.should.include? 'Realm' + output.should.not.include? 'BananaLib' + end + + it 'restricts the search to Pods supported on tvOS' do + output = run_command('search', 'n', '--tvos') + output.should.include? 'monkey' + output.should.not.include? 'BananaLib' + end + + it 'outputs with the silent parameter' do + output = run_command('search', 'BananaLib', '--silent') + output.should.include? 'BananaLib' + end + + it 'shows a friendly message when locally searching with invalid regex' do + lambda { run_command('search', '--regex', '+') }.should.raise CLAide::Help + end + + it 'does not try to validate the query as a regex with plain-text search' do + lambda { run_command('search', '+') }.should.not.raise CLAide::Help + end + + it 'uses regex search when asked for regex mode' do + output = run_command('search', '--regex', 'Ba(na)+Lib') + output.should.include? 'BananaLib' + output.should.not.include? 'Pod+With+Plus+Signs' + output.should.not.include? 'JSONKit' + end + + it 'uses plain-text search when not asked for regex mode' do + output = run_command('search', 'Pod+With+Plus+Signs') + output.should.include? 'Pod+With+Plus+Signs' + output.should.not.include? 'BananaLib' + end + end + + describe 'option --web' do + extend SpecHelper::TemporaryRepos + + it 'searches with invalid regex' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=NSAttributedString%2BCCLFormat']) + run_command('search', '--web', 'NSAttributedString+CCLFormat') + end + + it 'should url encode search queries' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=NSAttributedString%2BCCLFormat']) + run_command('search', '--web', 'NSAttributedString+CCLFormat') + end + + it 'searches the web via the open! command' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=bananalib']) + run_command('search', '--web', 'bananalib') + end + + it 'includes option --osx correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aosx%20bananalib']) + run_command('search', '--web', '--osx', 'bananalib') + end + + it 'includes option --ios correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aios%20bananalib']) + run_command('search', '--web', '--ios', 'bananalib') + end + + it 'includes option --watchos correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Awatchos%20bananalib']) + run_command('search', '--web', '--watchos', 'bananalib') + end + + it 'includes option --tvos correctly' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Atvos%20bananalib']) + run_command('search', '--web', '--tvos', 'bananalib') + end + + it 'includes any new platform option correctly' do + Platform.stubs(:all).returns([Platform.ios, Platform.tvos, Platform.new('whateveros')]) + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Awhateveros%20bananalib']) + run_command('search', '--web', '--whateveros', 'bananalib') + end + + it 'does not matter in which order the ios/osx options are set' do + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aios%20on%3Aosx%20bananalib']) + run_command('search', '--web', '--ios', '--osx', 'bananalib') + + Executable.expects(:execute_command).with(:open, ['https://cocoapods.org/?q=on%3Aios%20on%3Aosx%20bananalib']) + run_command('search', '--web', '--osx', '--ios', 'bananalib') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec new file mode 100644 index 0000000..881ed82 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/BananaLib/1.0/BananaLib.podspec @@ -0,0 +1,21 @@ +Pod::Spec.new do |s| + s.name = 'BananaLib' + s.version = '1.0' + s.authors = 'Banana Corp', { 'Monkey Boy' => 'monkey@banana-corp.local' } + s.homepage = 'http://banana-corp.local/banana-lib.html' + s.summary = 'Chunky bananas!' + s.description = 'Full of chunky bananas.' + s.platform = :ios + + s.source = { :git => 'http://banana-corp.local/banana-lib.git', :tag => 'v1.0' } + s.source_files = 'Classes/*.{h,m}', 'Vendor' + s.xcconfig = { 'OTHER_LDFLAGS' => '-framework SystemConfiguration' } + s.prefix_header_file = 'Classes/BananaLib.pch' + s.resources = "Resources/*.png" + s.dependency 'monkey', '~> 1.0.1', '< 1.0.9' + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec new file mode 100644 index 0000000..687fe5b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/1.4/JSONKit.podspec @@ -0,0 +1,11 @@ +Pod::Spec.new do |s| + s.name = 'JSONKit' + s.version = '1.4' + s.license = 'BSD / Apache License, Version 2.0' + s.summary = 'A Very High Performance Objective-C JSON Library.' + s.homepage = 'https://github.com/johnezang/JSONKit' + s.author = 'John Engelhart' + s.source = { :git => 'https://github.com/johnezang/JSONKit.git', :tag => 'v1.4' } + + s.source_files = 'JSONKit.*' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec new file mode 100644 index 0000000..d9f2c9d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/JSONKit/999.999.999/JSONKit.podspec @@ -0,0 +1,12 @@ +Pod::Spec.new do |s| + s.name = 'JSONKit' + s.version = '999.999.999' + s.license = 'BSD / Apache License, Version 2.0' + s.summary = 'A Very High Performance Objective-C JSON Library.' + s.homepage = 'https://github.com/johnezang/JSONKit' + s.author = 'John Engelhart' + s.source = { :git => 'https://github.com/johnezang/JSONKit.git', :commit => '0aff3deb5e1bb2bbc88a83fd71c8ad5550185cce' } + + s.source_files = 'JSONKit.*' + s.compiler_flags = '-Wno-deprecated-objc-isa-usage', '-Wno-format' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec new file mode 100644 index 0000000..ef91e61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/OrangeFramework/0.1.0/OrangeFramework.podspec @@ -0,0 +1,16 @@ +Pod::Spec.new do |s| + s.name = "OrangeFramework" + s.version = "0.1.0" + s.author = { "Swiftest Orang-Utan" => "swiftest@orang.utan.local" } + s.summary = "Fresh juice!" + s.description = "Blends fresh orange juice." + s.homepage = "http://httpbin.org/html" + s.source = { :git => "http://utan.local/orange-framework.git", :tag => s.version.to_s } + s.license = 'MIT' + + s.platform = :ios, '8.0' + + s.source_files = 'Source/Juicer.swift' + + s.frameworks = 'UIKit' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec new file mode 100644 index 0000000..9aa9123 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Pod+With+Plus+Signs/1.0/Pod+With+Plus+Signs.podspec @@ -0,0 +1,17 @@ +Pod::Spec.new do |s| + s.name = 'Pod+With+Plus+Signs' + s.version = '1.0' + s.authors = 'Evil Corp' + s.homepage = 'http://evil-corp.local/pod_with_plus_signs.html' + s.summary = 'Messing with special chars' + s.description = 'I love messing up with special chars in my pod name! Mouahahahahaa (evil laugh)' + s.platform = :ios + + s.source = { :git => 'http://evil-corp.local/pod_with_plus_signs.git', :tag => '1.0' } + s.source_files = 'Classes/*.{h,m}' + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec new file mode 100644 index 0000000..14f9360 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/Realm/0.94/Realm.podspec @@ -0,0 +1,18 @@ +Pod::Spec.new do |s| + s.name = 'Realm' + s.version = '0.94' + s.authors = 'Realm', { 'Realm' => 'help@realm.io' } + s.homepage = 'https://realm.io/' + s.summary = 'Realm is a modern data framework & database for iOS & OS X.' + s.description = 'The Realm database, for Objective-C. (If you want to use Realm from Swift, see the “RealmSwift” pod.)\n\nRealm is a mobile database: a replacement for Core Data & SQLite. You can use it on iOS & OS X. Realm is not an ORM on top SQLite: instead it uses its own persistence engine, built for simplicity (& speed). Learn more and get help at https://realm.io' + s.platform = :watchos + + s.source = { :git => 'https://github.com/realm/realm-cocoa.git', :tag => 'v0.94.0' } + s.source_files = 'Realm/*.{m,mm}', 'Realm/ObjectStore/*.cpp' + s.xcconfig = { 'CLANG_CXX_LANGUAGE_STANDARD": "compiler-default' => 'OTHER_CPLUSPLUSFLAGS": "-std=c++1y $(inherited)' } + s.prefix_header_file = 'Classes/Realm.pch' + s.license = { + :type => 'Apache 2.0', + :file => 'LICENSE' + } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec new file mode 100644 index 0000000..60d4234 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/fixtures/spec-repos/test_repo/monkey/1.0.2/monkey.podspec @@ -0,0 +1,11 @@ +Pod::Spec.new do |s| + s.name = "monkey" + s.version = "1.0.2" + s.author = { "Funky Monkey" => "funky@monkey.local" } + s.summary = "🙈🙉🙊" + s.description = "See no evil! Hear no evil! Speak no evil!" + s.homepage = "http://httpbin.org/html" + s.source = { :git => "http://monkey.local/monkey.git", :tag => s.version.to_s } + s.license = 'MIT' + s.vendored_library = 'monkey.a' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper.rb new file mode 100644 index 0000000..d10a130 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper.rb @@ -0,0 +1,85 @@ +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$:.unshift((ROOT + 'lib').to_s) +$:.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'cocoapods' + +require 'cocoapods_plugin' + +require 'spec_helper/command' # Allows to run Pod commands and returns their output. +require 'spec_helper/fixture' # Provides access to the fixtures and unpacks them if needed. +require 'spec_helper/temporary_repos' # Allows to create and modify temporary spec repositories. +require 'spec_helper/user_interface' # Redirects UI to UI.output & UI.warnings. +require 'spec_helper/pre_flight' # Cleans the temporary directory, the config & the UI.output before every test. + +module Bacon + class Context + include Pod::Config::Mixin + include SpecHelper::Fixture + include SpecHelper::Command + + def skip_xcodebuild? + ENV['SKIP_XCODEBUILD'] + end + + def temporary_directory + SpecHelper.temporary_directory + end + end +end + +#Mocha::Configuration.prevent(:stubbing_non_existent_method) + +module SpecHelper + def self.temporary_directory + ROOT + 'tmp' + end +end + +def temporary_sandbox + Pod::Sandbox.new(temporary_directory + 'Pods') +end + +def fixture_spec(name) + file = SpecHelper::Fixture.fixture(name) + Pod::Specification.from_file(file) +end + +def fixture_file_accessor(spec_or_name, platform = Pod::Platform.ios) + spec = spec_or_name.is_a?(Pod::Specification) ? spec_or_name : fixture_spec(spec_or_name) + path_list = Pod::Sandbox::PathList.new(spec.defined_in_file.dirname) + Pod::Sandbox::FileAccessor.new(path_list, spec.consumer(platform)) +end + +def fixture_target_definition(name = 'Pods', platform = Pod::Platform.ios) + Pod::Podfile::TargetDefinition.new(name, Pod::Podfile.new, 'name' => name, 'platform' => platform) +end + +def fixture_pod_target(spec_or_name, target_definition = nil) + spec = spec_or_name.is_a?(Pod::Specification) ? spec_or_name : fixture_spec(spec_or_name) + target_definition ||= fixture_target_definition + target_definition.store_pod(spec.name) + Pod::PodTarget.new([spec], [target_definition], config.sandbox).tap do |pod_target| + pod_target.file_accessors << fixture_file_accessor(spec, pod_target.platform) + consumer = spec.consumer(pod_target.platform) + pod_target.spec_consumers << consumer + end +end + +def fixture_aggregate_target(pod_targets = [], target_definition = nil) + target_definition ||= pod_targets.flat_map(&:target_definitions).first || fixture_target_definition + target = Pod::AggregateTarget.new(target_definition, config.sandbox) + target.client_root = config.sandbox.root.dirname + target.pod_targets = pod_targets + target +end + +#-----------------------------------------------------------------------------# + +SpecHelper::Fixture.fixture('banana-lib') # ensure it exists +SpecHelper::Fixture.fixture('orange-framework') diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/command.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/command.rb new file mode 100644 index 0000000..6662114 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/command.rb @@ -0,0 +1,27 @@ +module SpecHelper + module Command + def argv(*argv) + CLAide::ARGV.new(argv) + end + + def command(*argv) + argv += ['--no-ansi', '--no-pager'] + Pod::Command.parse(argv) + end + + def run_command(*args) + Dir.chdir(SpecHelper.temporary_directory) do + Pod::UI.output = '' + # @todo Remove this once all cocoapods has + # been converted to use the UI.puts + config_silent = config.silent? + config.silent = false + cmd = command(*args) + cmd.validate! + cmd.run + config.silent = config_silent + Pod::UI.output + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/fixture.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/fixture.rb new file mode 100644 index 0000000..39704e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/fixture.rb @@ -0,0 +1,32 @@ +module SpecHelper + def self.fixture(name) + Fixture.fixture(name) + end + + def self.create_sample_app_copy_from_fixture(fixture_name) + fixture_copy_path = temporary_directory + fixture_name + FileUtils.cp_r(fixture(fixture_name), temporary_directory) + fixture_copy_path + "#{fixture_name}.xcodeproj" + end + + def self.test_repo_url + 'https://github.com/CocoaPods/test_repo.git' + + end + + module Fixture + ROOT = ::ROOT + 'spec/fixtures' + + def fixture(name) + file = ROOT + name + unless file.exist? + archive = Pathname.new(file.to_s + '.tar.gz') + if archive.exist? + system "cd '#{archive.dirname}' && tar -zxvf '#{archive}' > /dev/null 2>&1" + end + end + file + end + module_function :fixture + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/pre_flight.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/pre_flight.rb new file mode 100644 index 0000000..ee52b35 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/pre_flight.rb @@ -0,0 +1,36 @@ +# Restores the config to the default state before each requirement + +module Bacon + class Context + old_run_requirement = instance_method(:run_requirement) + + + define_method(:run_requirement) do |description, spec| + ::Pod::Config.instance = nil + ::Pod::Config.instance.tap do |c| + c.verbose = false + c.silent = true + c.repos_dir = fixture('spec-repos') + c.installation_root = SpecHelper.temporary_directory + c.cache_root = SpecHelper.temporary_directory + 'Cache' + end + + ::Pod::UI.output = '' + ::Pod::UI.warnings = '' + ::Pod::UI.next_input = '' + # The following prevents a nasty behaviour where the increments are not + # balanced when testing informative which might lead to sections not + # being printed to the output as they are too nested. + ::Pod::UI.indentation_level = 0 + ::Pod::UI.title_level = 0 + + SpecHelper.temporary_directory.rmtree if SpecHelper.temporary_directory.exist? + SpecHelper.temporary_directory.mkpath + + # TODO + #::Pod::SourcesManager.stubs(:search_index_path).returns(temporary_directory + 'search_index.yaml') + + old_run_requirement.bind(self).call(description, spec) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/temporary_repos.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/temporary_repos.rb new file mode 100644 index 0000000..2c6f5da --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/temporary_repos.rb @@ -0,0 +1,90 @@ +module SpecHelper + def self.tmp_repos_path + TemporaryRepos.tmp_repos_path + end + + module TemporaryRepos + extend Pod::Executable + executable :git + + # @return [Pathname] The path for the repo with the given name. + # + def repo_path(name) + tmp_repos_path + name + end + + # Makes a repo with the given name. + # + def repo_make(name) + path = repo_path(name) + path.mkpath + Dir.chdir(path) do + `git init` + repo_make_readme_change(name, 'Added') + `git add .` + `git commit -m "Initialized."` + end + path + end + + # Clones a repo to the given name. + # + def repo_clone(from_name, to_name) + Dir.chdir(tmp_repos_path) { `git clone #{from_name} #{to_name} 2>&1 > /dev/null` } + repo_path(to_name) + end + + def repo_make_readme_change(name, string) + file = repo_path(name) + 'README' + file.open('w') { |f| f << "#{string}" } + end + + #--------------------------------------# + + def test_repo_path + repo_path('master') + end + + # Sets up a lighweight master repo in `tmp/cocoapods/repos/master` with the + # contents of `spec/fixtures/spec-repos/test_repo`. + # + def set_up_test_repo + require 'fileutils' + test_repo_path.mkpath + origin = ROOT + 'spec/fixtures/spec-repos/test_repo/.' + destination = tmp_repos_path + 'master' + FileUtils.cp_r(origin, destination) + repo_make('master') + end + + def test_old_repo_path + repo_path('../master') + end + + # Sets up a lighweight master repo in `tmp/cocoapods/master` with the + # contents of `spec/fixtures/spec-repos/test_repo`. + # + def set_up_old_test_repo + require 'fileutils' + test_old_repo_path.mkpath + origin = ROOT + 'spec/fixtures/spec-repos/test_repo/.' + destination = tmp_repos_path + '../master' + FileUtils.cp_r(origin, destination) + repo_make('../master') + end + + #--------------------------------------# + + def tmp_repos_path + SpecHelper.temporary_directory + 'cocoapods/repos' + end + + module_function :tmp_repos_path + + def self.extended(base) + base.before do + TemporaryRepos.tmp_repos_path.mkpath + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/user_interface.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/user_interface.rb new file mode 100644 index 0000000..d603a45 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-search-1.0.1/spec/spec_helper/user_interface.rb @@ -0,0 +1,36 @@ +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + @next_input = '' + + class << self + attr_accessor :output + attr_accessor :warnings + attr_accessor :next_input + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + + alias_method :gets, :next_input + + def print_warnings + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.github/workflows/ci.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.github/workflows/ci.yml new file mode 100644 index 0000000..f73b8e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.github/workflows/ci.yml @@ -0,0 +1,64 @@ +name: Specs + +jobs: + specs: + strategy: + fail-fast: false + matrix: + task: [SPECS] + ruby: [2.6, 2.7] + os: [ubuntu-16.04] + include: + - task: SPECS + os: macos-10.15 + ruby: system + + name: ${{ matrix.task }} / ${{ matrix.os }} / Ruby ${{ matrix.ruby }} + runs-on: ${{ matrix.os }} + + steps: + - name: Set build image var + run: echo "ImageVersion=$ImageVersion" >> $GITHUB_ENV + + - name: Checkout git + uses: actions/checkout@v1 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + if: ${{ matrix.ruby != 'system' }} + with: + ruby-version: ${{ matrix.ruby }} + + - name: Update git submodules + run: git submodule update --init + + - uses: actions/cache@v2 + with: + path: vendor/bundle + key: gems@v1-${{ matrix.os }}-${{ env.ImageVersion }}-Ruby${{ matrix.ruby }}-${{ hashFiles('Gemfile.lock') }} + restore-keys: | + gems@v1-${{ matrix.os }}-${{ env.ImageVersion }}-Ruby${{ matrix.ruby }}- + - name: Run bundle install + run: | + gem install bundler -v "~> 1.17" + bundle config path vendor/bundle + bundle install --jobs 4 --retry 3 --without debugging documentation + - name: Set up git identity + run: | + git config --global user.email "tests@cocoapods.org" + git config --global user.name "CocoaPods Tests" + + - name: Run Tests + run: bundle exec rake spec + env: + COCOAPODS_CI_TASKS: ${{ matrix.task }} + +on: + push: + branches: + - "master" + - "*-stable" + pull_request: + branches: + - master + - "*-stable" diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.gitignore b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.kick b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.kick new file mode 100644 index 0000000..612ecc6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.kick @@ -0,0 +1,29 @@ +recipe :ruby + +Kicker::Recipes::Ruby.runner_bin = 'bacon --quiet' + +process do |files| + specs = files.take_and_map do |file| + if file =~ %r{lib/(.+?)\.rb$} + s = Dir.glob("spec/**/#{File.basename(file, '.rb')}_spec.rb") + s.uniq unless s.empty? + end + end + Kicker::Recipes::Ruby.run_tests(specs) +end + +# Have written this so many times, probably should make a recipe out of it. +process do |files| + files.each do |file| + case file + when 'Gemfile' + files.delete(file) + execute 'bundle install' + end + end +end + +recipe :ignore +ignore(/.*\/?tags/) +ignore(/.*\/?\.git/) + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop.yml new file mode 100644 index 0000000..227d50d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..ebc2123 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop_cocoapods.yml @@ -0,0 +1,138 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +AssignmentInCondition: + Enabled: false + +# Allow backticks +AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +IfUnlessModifier: + Enabled: false + +# No enforced convention here. +SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInArguments: + EnforcedStyleForMultiline: comma + +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Style/MultilineOperationIndentation: + EnforcedStyle: indented + +# Clashes with CLAide Command#validate! +GuardClause: + Enabled: false + +# Not always desirable: lib/claide/command/plugins_helper.rb:12:15 +Next: + Enabled: false + +# Autocorrect makes this cop much more useful, taking away needless guessing +Lint/EndAlignment: + AutoCorrect: true + + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/PerceivedComplexity: + Enabled: false + +#- CocoaPods support for Ruby 1.8.7 ------------------------------------------# + +HashSyntax: + EnforcedStyle: hash_rockets + +Lambda: + Enabled: false + +DotPosition: + EnforcedStyle: trailing + +EachWithObject: + Enabled: false + +Style/SpecialGlobalVars: + Enabled: false + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +Performance/RedundantMatch: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Void: + Exclude: + - spec/**/* + +ClassAndModuleChildren: + Exclude: + - spec/**/* + +UselessComparison: + Exclude: + - spec/**/* diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop_todo.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop_todo.yml new file mode 100644 index 0000000..e8af53d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/.rubocop_todo.yml @@ -0,0 +1,33 @@ +# This configuration was generated by `rubocop --auto-gen-config` +# on 2014-08-20 17:00:42 +0200 using RuboCop version 0.25.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +Metrics/CyclomaticComplexity: + Max: 8 + +# Offense count: 9 +# Configuration parameters: AllowURI. +Metrics/LineLength: + Max: 105 + +# Offense count: 7 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 42 + +# Offense count: 1 +Metrics/PerceivedComplexity: + Max: 9 + +# Offense count: 1 +Style/ClassVars: + Enabled: false + +# Offense count: 1 +# Configuration parameters: Keywords. +Style/CommentAnnotation: + Enabled: false diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/CHANGELOG.md new file mode 100644 index 0000000..3a382bf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/CHANGELOG.md @@ -0,0 +1,378 @@ +## 1.6.0 (2021-09-01) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.5.0 (2020-05-01) + +##### Enhancements + +* Add --synchronous option to `pod trunk push`. + [Paul Beusterien](https://github.com/paulb777) + [#147](https://github.com/CocoaPods/cocoapods-trunk/pull/147) + [CocoaPods#9497](https://github.com/CocoaPods/CocoaPods/issues/9497) + +##### Bug Fixes + +* None. + + +## 1.4.1 (2019-09-26) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Use a more robust `Trunk` init when pushing. + [Igor Makarov](https://github.com/igor-makarov) + [#135](https://github.com/CocoaPods/cocoapods-trunk/pull/135) + + +## 1.4.0 (2019-08-21) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Update to get the master spec repo from `Source::Manager` for validation - effectively + use the new CDN `TrunkSource` for podspec validation and not a hard-coded URL + [Igor Makarov](https://github.com/igor-makarov) + [#132](https://github.com/CocoaPods/cocoapods-trunk/pull/132) + [CocoaPods#9112](https://github.com/CocoaPods/CocoaPods/issues/9112) + +## 1.3.1 (2018-08-16) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.3.0 (2017-10-02) + +##### Enhancements + +* Add skip test option to trunk push + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/93) + +* Loosen netrc requirement + [jasl](https://github.com/jasl) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/95) + +* Update development dependencies to support MRI 2.3+ + [jasl](https://github.com/jasl) + [#93](https://github.com/CocoaPods/cocoapods-trunk/pull/95) + +##### Bug Fixes + +* None. + + +## 1.2.0 (2017-04-11) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Properly display `pod trunk deprecate` command line options + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#6486](https://github.com/CocoaPods/CocoaPods/issues/6486) + +* Add `--skip-import-validation` to skip linking a pod during lint. + [Dimitris Koutsogiorgas](https://github.com/dnkoutso) + [#86](https://github.com/CocoaPods/cocoapods-trunk/pull/86) + + +## 1.1.2 (2016-12-17) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Checks that `Pod::Validator` has `swift_version=` for CocoaPods <= 1.1.0 support. + [Danielle Tomlinson](https://github.com/dantoml) + [#6209](https://github.com/CocoaPods/CocoaPods/issues/6209) + + +## 1.1.1 (2016-10-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Support submitting from multiple versions of CocoaPods. + [Samuel Giddins](https://github.com/segiddins) + +## 1.1.0 (2016-10-19) + +##### Enhancements + +* Passes the pod's version of Swift used for deployment to the CocoaPods Specs repo + [Orta](https://github.com/orta) + [#92](https://github.com/CocoaPods/cocoapods-trunk/pull/72) + +* Prettier success message when successfully pushed a new version + [Marin](https://github.com/icanzilb) + [#76](https://github.com/CocoaPods/cocoapods-trunk/pull/76) + +##### Bug Fixes + +* None. + + +## 1.1.0.beta.1 (2016-10-10) + +##### Enhancements + +* Pass --swift-version to the Validator during `pod lib lint` + [Danielle Tomlinson](https://github.com/dantoml) + [#92](https://github.com/CocoaPods/cocoapods-trunk/pull/72) + +##### Bug Fixes + +* None. + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Don't print the invocation of `/bin/date`. + [Samuel Giddins](https://github.com/segiddins) + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* Make the error loading a specification during `pod trunk push` more + informative. + [Samuel Giddins](https://github.com/segiddins) + [#63](https://github.com/CocoaPods/cocoapods-trunk/issues/63) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.4 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Compatibility With CocoaPods 1.0.0.beta.8. + [Samuel Giddins](https://github.com/segiddins) + [#61](https://github.com/CocoaPods/cocoapods-trunk/issues/61) + + +## 1.0.0.beta.3 (2016-04-14) + +##### Enhancements + +* The failure reason is printed when validation fails during `pod trunk push`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#5073](https://github.com/CocoaPods/CocoaPods/issues/5073) + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.2 (2016-02-03) + +##### Bug Fixes + +* Send a body with the `PATCH` request to deprecate a pod. + [Samuel Giddins](https://github.com/segiddins) + [#52](https://github.com/CocoaPods/cocoapods-trunk/issues/52) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Enhancements + +* The `pod deprecate PODNAME` command has been added to deprecate all versions + of a pod. + [Samuel Giddins](https://github.com/segiddins) + [#31](https://github.com/CocoaPods/cocoapods-trunk/issues/31) + +* The `pod delete PODNAME VERSION` command has been added to delete a single + version of a pod. + [Samuel Giddins](https://github.com/segiddins) + +##### Bug Fixes + +* If the master repo has not been setup when pushing a spec, run `pod setup` + instead of failing. + [Samuel Giddins](https://github.com/segiddins) + [#48](https://github.com/CocoaPods/cocoapods-trunk/issues/48) + + +## 0.6.4 (2015-08-28) + +##### Bug Fixes + +* This release fixes installation compatibility issues when using the RubyGem + due to an incompatible dependency on `nap`. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.3 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.6.2 (2015-08-26) + +##### Enhancements + +* The `--allow-warnings` flag to `pod trunk push` is now propagated to the trunk + server. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#3855](https://github.com/CocoaPods/CocoaPods/issues/3855) + + +## 0.6.1 (2015-05-27) + +##### Enhancements + +* The `master` specs repo is updated before and after pushing a new spec to + trunk. + [Samuel Giddins](https://github.com/segiddins) + [#43](https://github.com/CocoaPods/cocoapods-trunk/issues/43) + + +## 0.6.0 (2015-03-11) + +##### Enhancements + +* Allow specifying a Trunk token via the `COCOAPODS_TRUNK_TOKEN` environment + variable. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#3224](https://github.com/CocoaPods/CocoaPods/issues/3224) + + +## 0.5.1 (2015-02-25) + +##### Enhancements + +* Lint as a framework automatically. If needed, the `--use-libraries` + option allows linting as a static library. + [Boris BÃŧgling](https://github.com/neonichu) + [#2912](https://github.com/CocoaPods/CocoaPods/issues/2912) + +##### Bug Fixes + +* Fix the detection of spec validation errors, and present the proper error + (and messages) to the user. + [Orta Therox](https://github.com/orta) + [#39](https://github.com/CocoaPods/cocoapods-trunk/issues/39) + + +## 0.5.0 (2014-12-25) + +##### Enhancements + +* Added `pod trunk remove-owner` command to remove an owner from a pod. + [Samuel Giddins](https://github.com/segiddins) + [#35](https://github.com/CocoaPods/cocoapods-trunk/issues/35) + +* Added `pod trunk info` command to get information for a pod, including the + owners. + [Kyle Fuller](https://github.com/kylef) + [#15](https://github.com/CocoaPods/cocoapods-trunk/issues/15) + + +## 0.4.1 (2014-11-19) + +##### Enhancements + +* Improved code readability and structure by splitting subcommands + into individual files. + [Olivier Halligon](https://github.com/alisoftware) + [#21](https://github.com/CocoaPods/CocoaPods/issues/21) + +##### Bug Fixes + +* Updates for changes in CocoaPods regarding `--allow-warnings`. + [Kyle Fuller](https://github.com/kylef) + [Cocoapods#2831](https://github.com/CocoaPods/CocoaPods/pull/2831) + + +## 0.4.0 (2014-11-06) + +##### Bug Fixes + +* Fixes installation issues with the JSON dependency. + [Eloy DurÃĄn](https://github.com/alloy) + [CocoaPods#2773](https://github.com/CocoaPods/CocoaPods/issues/2773) + +## 0.3.1 (2014-10-15) + +##### Bug Fixes + +* Fixes an issue introduced with the release of `netrc 0.7.8`. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#2674](https://github.com/CocoaPods/CocoaPods/issues/2674) + + +## 0.3.0 (2014-10-07) + +##### Enhancements + +* When linting, only allow dependencies from the 'master' specs repository. + [Samuel Giddins](https://github.com/segiddins) + [#28](https://github.com/CocoaPods/cocoapods-trunk/issues/28) + +##### Bug Fixes + +* Fixes an issue where `pod trunk push` doesn't show which validation errors + and just stated it failed. + [Kyle Fuller](https://github.com/kylef) + [#26](https://github.com/CocoaPods/cocoapods-trunk/issues/26) + + +## 0.2.0 (2014-09-11) + +##### Enhancements + +* Network errors are now gracefully handled. + [Samuel E. Giddins](https://github.com/segiddins) + +* Adopted new argument format of CLAide. + [Olivier Halligon](https://github.com/AliSoftware) + + +## 0.1.0 (2014-05-19) + +* Initial release. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Gemfile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Gemfile new file mode 100644 index 0000000..0244104 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Gemfile @@ -0,0 +1,25 @@ +source 'https://rubygems.org' + +gemspec + +# This is the version that ships with OS X 10.10, so be sure we test against it. +# At the same time, the 1.7.7 version won't install cleanly on Ruby > 2.2, +# so we use a fork that makes a trivial change to a macro invocation. +gem 'json', :git => 'https://github.com/segiddins/json.git', :branch => 'seg-1.7.7-ruby-2.2' + +group :development do + gem 'cocoapods', :git => "https://github.com/CocoaPods/CocoaPods.git", :branch => 'master' + gem 'cocoapods-core', :git => "https://github.com/CocoaPods/Core.git", :branch => 'master' + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + + gem 'bacon' + gem 'kicker' + gem 'mocha' + gem 'mocha-on-bacon' + gem 'prettybacon' + gem 'webmock' + + gem 'codeclimate-test-reporter', :require => nil + gem 'rubocop' +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Gemfile.lock new file mode 100644 index 0000000..45210fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Gemfile.lock @@ -0,0 +1,190 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: a5d1a29b08ca88f90f47104805bc4fad2efc93c9 + branch: master + specs: + claide (1.0.3) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: 035518e56945778e9916d8118ea5e61ecb96beb0 + branch: master + specs: + cocoapods (1.11.0) + addressable (~> 2.8) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.11.0) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.4.0, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-trunk (= 1.6.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.8.0) + nap (~> 1.0) + ruby-macho (>= 1.0, < 3.0) + xcodeproj (>= 1.21.0, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: a8e38de9907968d6e627b1465f053c55fc778118 + branch: master + specs: + cocoapods-core (1.11.0) + activesupport (>= 5.0, < 7) + addressable (~> 2.8) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 4.0) + typhoeus (~> 1.0) + +GIT + remote: https://github.com/segiddins/json.git + revision: a9588bc4334c2f5bf985f255b61c05eafdcd8907 + branch: seg-1.7.7-ruby-2.2 + specs: + json (1.7.7) + +PATH + remote: . + specs: + cocoapods-trunk (1.6.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.3) + activesupport (6.1.4.1) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) + addressable (2.8.0) + public_suffix (>= 2.0.2, < 5.0) + algoliasearch (1.27.5) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + ast (2.2.0) + atomos (0.1.3) + bacon (1.2.0) + cocoapods-deintegrate (1.0.5) + cocoapods-downloader (1.5.0) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.1) + cocoapods-try (1.2.0) + codeclimate-test-reporter (0.4.7) + simplecov (>= 0.7.1, < 1.0.0) + colored2 (3.1.2) + concurrent-ruby (1.1.9) + crack (0.4.3) + safe_yaml (~> 1.0.0) + docile (1.1.5) + escape (0.0.4) + ethon (0.14.0) + ffi (>= 1.15.0) + ffi (1.15.3) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + hashdiff (0.3.4) + httpclient (2.8.3) + i18n (1.8.10) + concurrent-ruby (~> 1.0) + kicker (3.0.0) + listen (~> 1.3.0) + notify (~> 0.5.2) + listen (1.3.1) + rb-fsevent (>= 0.9.3) + rb-inotify (>= 0.9) + rb-kqueue (>= 0.2) + metaclass (0.0.4) + minitest (5.14.4) + mocha (1.1.0) + metaclass (~> 0.0.1) + mocha-on-bacon (0.2.2) + mocha (>= 0.13.0) + molinillo (0.8.0) + multi_json (1.11.2) + nanaimo (0.3.0) + nap (1.1.0) + netrc (0.11.0) + notify (0.5.2) + parser (2.3.0.7) + ast (~> 2.2) + powerpack (0.1.1) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (4.0.6) + rainbow (2.1.0) + rake (10.4.2) + rb-fsevent (0.9.5) + rb-inotify (0.9.5) + ffi (>= 0.5.0) + rb-kqueue (0.2.4) + ffi (>= 0.5.0) + rexml (3.2.5) + rubocop (0.39.0) + parser (>= 2.3.0.7, < 3.0) + powerpack (~> 0.1) + rainbow (>= 1.99.1, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-macho (2.5.1) + ruby-progressbar (1.7.5) + safe_yaml (1.0.4) + simplecov (0.9.2) + docile (~> 1.1.0) + multi_json (~> 1.0) + simplecov-html (~> 0.9.0) + simplecov-html (0.9.0) + typhoeus (1.4.0) + ethon (>= 0.9.0) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + unicode-display_width (1.0.3) + webmock (3.5.1) + addressable (>= 2.3.6) + crack (>= 0.3.2) + hashdiff + xcodeproj (1.21.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.3.0) + rexml (~> 3.2.4) + zeitwerk (2.4.2) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-trunk! + codeclimate-test-reporter + json! + kicker + mocha + mocha-on-bacon + prettybacon + rake (~> 10.0) + rubocop + webmock + +BUNDLED WITH + 1.17.3 diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/LICENSE.txt new file mode 100644 index 0000000..d011c24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2013 Eloy DurÃĄn + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/README.md new file mode 100644 index 0000000..a600242 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/README.md @@ -0,0 +1,35 @@ +# CocoaPods::Trunk + +[![Build Status](https://img.shields.io/github/workflow/status/CocoaPods/cocoapods-trunk/Specs)](https://github.com/CocoaPods/cocoapods-trunk/actions) +[![Maintainability](https://api.codeclimate.com/v1/badges/157b8b7f7b73976f3edf/maintainability)](https://codeclimate.com/github/CocoaPods/cocoapods-trunk/maintainability) + +CocoaPods plugin for trunk. + +## Installation + +Add this line to your application's Gemfile: + + gem 'cocoapods-trunk' + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install cocoapods-trunk + +## Usage + +With a local install of `trunk.cocoapods.org` up and running: + + $ env TRUNK_SCHEME_AND_HOST=http://localhost:4567 bundle exec pod trunk --help + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Rakefile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Rakefile new file mode 100644 index 0000000..2a8b679 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/Rakefile @@ -0,0 +1,68 @@ +# encoding: utf-8 + +#-- Bootstrap --------------------------------------------------------------# + +desc 'Initializes your working copy to run the specs' +task :bootstrap do + if system('which bundle') + title 'Installing gems' + sh 'bundle install' + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' + "\e[0m" + exit 1 + end +end + +begin + require "bundler/gem_tasks" + task :default => :spec + + #-- Specs ------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + title 'Running Unit Tests' + files = FileList['spec/**/*_spec.rb'].shuffle.join(' ') + sh "bundle exec bacon #{files}" + + title 'Checking code style...' + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + #-- Kick -------------------------------------------------------------------# + + desc 'Automatically run specs for updated files' + task :kick do + exec 'bundle exec kicker -c' + end + + #-- RuboCop ----------------------------------------------------------------# + + if RUBY_VERSION >= '1.9.3' + require 'rubocop/rake_task' + RuboCop::RakeTask.new + end + +rescue LoadError => e + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" + $stderr.puts e.message + $stderr.puts e.backtrace + $stderr.puts +end + +#-- Helpers ------------------------------------------------------------------# + +def title(title) + cyan_title = "\033[0;36m#{title}\033[0m" + puts + puts '-' * 80 + puts cyan_title + puts '-' * 80 + puts +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/cocoapods-trunk.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/cocoapods-trunk.gemspec new file mode 100644 index 0000000..e4f525f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/cocoapods-trunk.gemspec @@ -0,0 +1,26 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_trunk' + +Gem::Specification.new do |spec| + spec.name = "cocoapods-trunk" + spec.version = CocoaPodsTrunk::VERSION + spec.authors = ["Eloy DurÃĄn"] + spec.email = ["eloy.de.enige@gmail.com"] + spec.summary = "Interact with trunk.cocoapods.org" + spec.homepage = "https://github.com/CocoaPods/cocoapods-trunk" + spec.license = "MIT" + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ["lib"] + + spec.add_dependency 'nap', '>= 0.8', '< 2.0' + spec.add_dependency 'netrc', '~> 0.11' + spec.add_development_dependency "bundler", "~> 1.3" + spec.add_development_dependency "rake", '~> 10.0' + + spec.required_ruby_version = '>= 2.0.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..60a2dea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/trunk' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_trunk.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_trunk.rb new file mode 100644 index 0000000..1896acf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/cocoapods_trunk.rb @@ -0,0 +1,3 @@ +module CocoaPodsTrunk + VERSION = '1.6.0'.freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk.rb new file mode 100644 index 0000000..7101e5d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk.rb @@ -0,0 +1,146 @@ +# encoding: UTF-8 + +require 'json' +require 'rest' +require 'netrc' + +module Pod + class Command + class Trunk < Command + self.abstract_command = true + self.summary = 'Interact with the CocoaPods API (e.g. publishing new specs)' + + SCHEME_AND_HOST = ENV['TRUNK_SCHEME_AND_HOST'] || 'https://trunk.cocoapods.org' + BASE_URL = "#{SCHEME_AND_HOST}/api/v1".freeze + + require 'pod/command/trunk/add_owner' + require 'pod/command/trunk/delete' + require 'pod/command/trunk/deprecate' + require 'pod/command/trunk/info' + require 'pod/command/trunk/me' + require 'pod/command/trunk/push' + require 'pod/command/trunk/register' + require 'pod/command/trunk/remove_owner' + + private + + def request_url(action, url, *args) + response = create_request(action, url, *args) + if (400...600).cover?(response.status_code) + print_error(response.body) + end + response + end + + def request_path(action, path, *args) + request_url(action, "#{BASE_URL}/#{path}", *args) + end + + def create_request(*args) + if verbose? + REST.send(*args) do |request| + request.set_debug_output($stdout) + end + else + REST.send(*args) + end + end + + def print_error(body) + begin + json = JSON.parse(body) + rescue JSON::ParserError + json = {} + end + error = json['error'] || "An unexpected error occurred: #{body}" + + case data = json['data'] + when Hash + lines = data.sort_by(&:first).map do |attr, messages| + attr = attr[0, 1].upcase << attr[1..-1] + messages.sort.map do |message| + "- #{attr}: #{message}" + end + end.flatten + count = lines.size + lines.unshift "The following #{'validation'.pluralize(count)} failed:" + error += "\n" << lines.join("\n") + end + + raise Informative, error + end + + def print_messages(data_url, messages, spec = nil, action = nil) + if verbose? || spec.nil? + # Using UI.labeled here is dangerous, as it wraps the URL and indents + # it, which breaks the URL when you try to copy-paste it. + UI.puts " - Data URL: #{data_url}" + + server_logs = messages.map do |entry| + at, message = entry.to_a.flatten + "#{formatted_time(at)}: #{message}" + end + UI.labeled 'Log messages', server_logs + else + separator = '-' * 80 + UI.puts + UI.puts separator + UI.puts " 🎉 Congrats" + UI.puts + UI.puts " 🚀 #{spec.name} (#{spec.version}) successfully #{action}" + unless messages.empty? + at = messages.first.to_a.flatten.first + UI.puts " 📅 #{formatted_time(at)}" + end + UI.puts " 🌎 https://cocoapods.org/pods/#{spec.name}" + UI.puts " 👍 Tell your friends!" + UI.puts separator + end + end + + def json(response) + JSON.parse(response.body) + end + + def netrc + @@netrc ||= Netrc.read + end + + def token + ENV['COCOAPODS_TRUNK_TOKEN'] || + (netrc['trunk.cocoapods.org'] && netrc['trunk.cocoapods.org'].password) + end + + def default_headers + { + 'Content-Type' => 'application/json; charset=utf-8', + 'Accept' => 'application/json; charset=utf-8', + 'User-Agent' => "CocoaPods/#{Pod::VERSION}", + } + end + + def auth_headers + default_headers.merge('Authorization' => "Token #{token}") + end + + def formatted_time(time_string) + require 'active_support/time' + @tz_offset ||= Time.zone_offset(time_zone) + @current_year ||= Date.today.year + + time = Time.parse(time_string) + @tz_offset + formatted = time.to_formatted_s(:long_ordinal) + # No need to show the current year, the user will probably know. + if time.year == @current_year + formatted.sub!(" #{@current_year}", '') + end + formatted + end + + def time_zone + out, = Executable.capture_command('/bin/date', %w(+%Z), :capture => :out) + out.strip + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/add_owner.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/add_owner.rb new file mode 100644 index 0000000..529a6f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/add_owner.rb @@ -0,0 +1,47 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class AddOwner < Trunk + self.summary = 'Add an owner to a pod' + self.description = <<-DESC + Adds the registered user with specified `OWNER-EMAIL` as an owner + of the given `POD`. + An ‘owner’ is a registered user whom is allowed to make changes to a + pod, such as pushing new versions and adding and removing other ‘owners’. + DESC + + self.arguments = [ + CLAide::Argument.new('POD', true), + CLAide::Argument.new('OWNER-EMAIL', true), + ] + + def initialize(argv) + @pod = argv.shift_argument + @email = argv.shift_argument + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + unless @pod && @email + help! 'Specify the pod name and the new owner’s email address.' + end + end + + def run + body = { 'email' => @email }.to_json + json = json(request_path(:patch, "pods/#{@pod}/owners", body, auth_headers)) + UI.labeled 'Owners', json.map { |o| "#{o['name']} <#{o['email']}>" } + rescue REST::Error => e + raise Informative, "There was an error adding #{@email} to " \ + "#{@pod} on trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/delete.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/delete.rb new file mode 100644 index 0000000..e10203b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/delete.rb @@ -0,0 +1,70 @@ +module Pod + class Command + class Trunk + # @CocoaPods 1.0.0.beta.1 + # + class Delete < Trunk + self.summary = 'Deletes a version of a pod.' + self.description = <<-DESC + WARNING: It is generally considered bad behavior to remove + versions of a Pod that others are depending on! Please + consider using the deprecate command instead. + + Deletes the specified pod version from trunk and the master specs + repo. Once deleted, this version can never be pushed again. + DESC + + self.arguments = [ + CLAide::Argument.new('NAME', true), + CLAide::Argument.new('VERSION', true), + ] + + def initialize(argv) + @name = argv.shift_argument + @version = argv.shift_argument + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + help! 'Please specify a version.' unless @version + end + + def run + return unless confirm_deletion? + json = delete + print_messages(json['data_url'], json['messages'], nil, nil) + end + + private + + WARNING_MESSAGE = 'WARNING: It is generally considered bad behavior ' \ + "to remove versions of a Pod that others are depending on!\n" \ + 'Please consider using the `deprecate` command instead.'.freeze + + def confirm_deletion? + UI.puts(WARNING_MESSAGE.yellow) + loop do + UI.print("Are you sure you want to delete this Pod version?\n> ") + answer = UI.gets.strip.downcase + UI.puts # ensures a newline is printed after the user input + affirmatives = %w(y yes true 1) + negatives = %w(n no false 0) + return true if affirmatives.include?(answer) + return false if negatives.include?(answer) + end + end + + def delete + response = request_path(:delete, "pods/#{@name}/#{@version}", auth_headers) + url = response.headers['location'].first + json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error deleting the pod version ' \ + "from trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/deprecate.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/deprecate.rb new file mode 100644 index 0000000..4e3b78d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/deprecate.rb @@ -0,0 +1,48 @@ +module Pod + class Command + class Trunk + # @CocoaPods 1.0.0.beta.1 + # + class Deprecate < Trunk + self.summary = 'Deprecates a pod.' + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def self.options + [ + ['--in-favor-of=OTHER_NAME', 'The pod to deprecate this pod in favor of.'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @in_favor_of = argv.option('in-favor-of') + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + end + + def run + json = deprecate + print_messages(json['data_url'], json['messages'], nil, nil) + end + + def deprecate + body = { + :in_favor_of => @in_favor_of, + }.to_json + response = request_path(:patch, "pods/#{@name}/deprecated", body, auth_headers) + url = response.headers['location'].first + json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error deprecating the pod ' \ + "via trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/info.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/info.rb new file mode 100644 index 0000000..2941890 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/info.rb @@ -0,0 +1,35 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Info < Trunk + self.summary = 'Returns information about a Pod.' + self.arguments = [ + CLAide::Argument.new('NAME', true), + ] + + def initialize(argv) + @name = argv.shift_argument + super + end + + def validate! + super + help! 'Please specify a pod name.' unless @name + end + + def run + response = json(request_path(:get, "pods/#{@name}", auth_headers)) + versions = response['versions'] || [] + owners = response['owners'] || [] + + UI.title(@name) do + UI.labeled 'Versions', versions.map { |v| "#{v['name']} (#{v['created_at']})" } + UI.labeled 'Owners', owners.map { |o| "#{o['name']} <#{o['email']}>" } + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/me.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/me.rb new file mode 100644 index 0000000..b8aab38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/me.rb @@ -0,0 +1,119 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Me < Trunk + self.summary = 'Display information about your sessions' + self.description = <<-DESC + Includes information about your registration, followed by all your + sessions. + + These are your current session, other valid sessions, unverified + sessions, and expired sessions. + DESC + + def validate! + super + unless token + help! 'You need to register a session first.' + end + end + + def run + me = json(request_path(:get, 'sessions', auth_headers)) + owner = json(request_path(:get, "owners/#{me['email']}")) + UI.labeled 'Name', owner['name'] + UI.labeled 'Email', owner['email'] + UI.labeled 'Since', formatted_time(owner['created_at']) + + pods = owner['pods'] || [] + pods = pods.map { |pod| pod['name'] } + pods = 'None' unless pods.any? + UI.labeled 'Pods', pods + + sessions = me['sessions'].map do |session| + hash = { + :created_at => formatted_time(session['created_at']), + :valid_until => formatted_time(session['valid_until']), + :created_from_ip => session['created_from_ip'], + :description => session['description'], + } + if Time.parse(session['valid_until']) <= Time.now.utc + hash[:color] = :red + elsif session['verified'] + hash[:color] = session['current'] ? :cyan : :green + else + hash[:color] = :yellow + hash[:valid_until] = 'Unverified' + end + hash + end + + columns = [:created_at, :valid_until, :created_from_ip, :description].map do |key| + find_max_size(sessions, key) + end + + sessions = sessions.map do |session| + created_at = session[:created_at].ljust(columns[0]) + valid_until = session[:valid_until].rjust(columns[1]) + created_from_ip = session[:created_from_ip].ljust(columns[2]) + description = session[:description] + msg = "#{created_at} - #{valid_until}. IP: #{created_from_ip}" + msg << " Description: #{description}" if description + msg.send(session[:color]) + end + + UI.labeled 'Sessions', sessions + + rescue REST::Error => e + raise Informative, 'There was an error fetching your info ' \ + "from trunk: #{e.message}" + end + + private + + def find_max_size(sessions, key) + sessions.map { |s| (s[key] || '').size }.max + end + + class CleanSessions < Me + self.summary = 'Remove sessions' + self.description = <<-DESC + By default this will clean-up your sessions by removing expired and + unverified sessions. + + To remove all your sessions, except for the one you are currently + using, specify the `--all` flag. + DESC + + def self.options + [ + ['--all', 'Removes all your sessions, except for the current one'], + ].concat(super) + end + + def initialize(argv) + @remove_all = argv.flag?('all', false) + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + end + + def run + path = @remove_all ? 'sessions/all' : 'sessions' + request_path(:delete, path, auth_headers) + rescue REST::Error => e + raise Informative, 'There was an error cleaning up your ' \ + "sessions from trunk: #{e.message}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/push.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/push.rb new file mode 100644 index 0000000..209018b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/push.rb @@ -0,0 +1,169 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Push < Trunk + self.summary = 'Publish a podspec' + self.description = <<-DESC + Publish the podspec at `PATH` to make it available to all users of + the ‘trunk’ spec-repo. If `PATH` is not provided, defaults to the + current directory. + + Before pushing the podspec to cocoapods.org, this will perform a local + lint of the podspec, including a build of the library. However, it + remains *your* responsibility to ensure that the published podspec + will actually work for your users. Thus it is recommended that you + *first* try to use the podspec to integrate the library into your demo + and/or real application. + + If this is the first time you publish a spec for this pod, you will + automatically be registered as the ‘owner’ of this pod. (Note that + ‘owner’ in this case implies a person that is allowed to publish new + versions and add other ‘owners’, not necessarily the library author.) + DESC + + self.arguments = [ + CLAide::Argument.new('PATH', false), + ] + + def self.options + [ + ['--allow-warnings', 'Allows push even if there are lint warnings'], + ['--use-libraries', 'Linter uses static libraries to install the spec'], + ['--use-modular-headers', 'Lint uses modular headers during installation'], + ['--swift-version=VERSION', 'The SWIFT_VERSION that should be used to lint the spec. ' \ + 'This takes precedence over a .swift-version file.'], + ['--skip-import-validation', 'Lint skips validating that the pod can be imported'], + ['--skip-tests', 'Lint skips building and running tests during validation'], + ['--synchronous', 'If validation depends on other recently pushed pods, synchronize'], + ].concat(super) + end + + def initialize(argv) + @allow_warnings = argv.flag?('allow-warnings', false) + @use_frameworks = !argv.flag?('use-libraries') + @use_modular_headers = argv.flag?('use-modular-headers') + @swift_version = argv.option('swift-version', nil) + @skip_import_validation = argv.flag?('skip-import-validation', false) + @skip_tests = argv.flag?('skip-tests', false) + @path = argv.shift_argument || '.' + @synchronous = argv.flag?('synchronous', false) + find_podspec_file if File.directory?(@path) + super + end + + def validate! + super + unless token + help! 'You need to run `pod trunk register` to register a session first.' + end + unless @path + help! 'Please specify the path to the podspec file.' + end + unless File.exist?(@path) && !File.directory?(@path) + help! "The specified path `#{@path}` does not point to " \ + 'an existing podspec file.' + end + end + + def run + update_master_repo + validate_podspec + status, json = push_to_trunk + update_master_repo + + if (400...600).cover?(status) + print_messages(json['data_url'], json['messages'], nil) + else + print_messages(json['data_url'], json['messages'], spec, 'published') + end + end + + private + + MASTER_GIT_REPO_URL = 'https://github.com/CocoaPods/Specs.git'.freeze + + def push_to_trunk + spec.attributes_hash[:pushed_with_swift_version] = @swift_version if @swift_version + response = request_path(:post, "pods?allow_warnings=#{@allow_warnings}", + spec.to_json, auth_headers) + url = response.headers['location'].first + return response.status_code, json(request_url(:get, url, default_headers)) + rescue REST::Error => e + raise Informative, 'There was an error pushing a new version ' \ + "to trunk: #{e.message}" + end + + def find_podspec_file + podspecs = Dir[Pathname(@path) + '*.podspec{.json,}'] + case podspecs.count + when 0 + UI.notice "No podspec found in directory `#{@path}`" + when 1 + UI.notice "Found podspec `#{podspecs[0]}`" + else + UI.notice "Multiple podspec files in directory `#{@path}`. " \ + 'You need to explicitly specify which one to use.' + end + @path = (podspecs.count == 1) ? podspecs[0] : nil + end + + def spec + @spec ||= Pod::Specification.from_file(@path) + rescue Informative => e # TODO: this should be a more specific error + raise Informative, 'Unable to interpret the specified path ' \ + "#{UI.path(@path)} as a podspec (#{e})." + end + + # Performs a full lint against the podspecs. + # + # TODO: Currently copied verbatim from `pod push`. + def validate_podspec + UI.puts 'Validating podspec'.yellow + + validator = Validator.new(spec, [repo_url]) + validator.allow_warnings = @allow_warnings + validator.use_frameworks = @use_frameworks + if validator.respond_to?(:use_modular_headers=) + validator.use_modular_headers = @use_modular_headers + end + if validator.respond_to?(:swift_version=) + validator.swift_version = @swift_version + end + validator.skip_import_validation = @skip_import_validation + validator.skip_tests = @skip_tests + validator.validate + unless validator.validated? + raise Informative, "The spec did not pass validation, due to #{validator.failure_reason}." + end + + # Let the validator's logic for the swift version + # set the value for the trunk JSON uploader + @swift_version = validator.respond_to?(:used_swift_version) && validator.used_swift_version + end + + def repo_url + @synchronous ? MASTER_GIT_REPO_URL : Pod::TrunkSource::TRUNK_REPO_URL + end + + def update_master_repo + # more robust Trunk setup logic: + # - if Trunk exists, updates it + # - if Trunk doesn't exist, add it and update it + # + repo = sources_manager.find_or_create_source_with_url(repo_url) + sources_manager.update(repo.name) + end + + def sources_manager + if defined?(Pod::SourcesManager) + Pod::SourcesManager + else + config.sources_manager + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/register.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/register.rb new file mode 100644 index 0000000..19f1a09 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/register.rb @@ -0,0 +1,78 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class Register < Trunk + self.summary = 'Manage sessions' + self.description = <<-DESC + Register a new account, or create a new session. + + If this is your first registration, both an `EMAIL` address and + `YOUR_NAME` are required. If you’ve already registered with trunk, you may + omit the `YOUR_NAME` (unless you would like to change it). + + It is recommended that you provide a description of the session, so + that it will be easier to identify later on. For instance, when you + would like to clean-up your sessions. A common example is to specify + the location where the machine, that you are using the session for, is + physically located. + + Examples: + + $ pod trunk register eloy@example.com 'Eloy DurÃĄn' --description='Personal Laptop' + $ pod trunk register eloy@example.com --description='Work Laptop' + $ pod trunk register eloy@example.com + DESC + + self.arguments = [ + CLAide::Argument.new('EMAIL', true), + CLAide::Argument.new('YOUR_NAME', false), + ] + + def self.options + [ + ['--description=DESCRIPTION', 'An arbitrary description to ' \ + 'easily identify your session ' \ + 'later on.'], + ].concat(super) + end + + def initialize(argv) + @session_description = argv.option('description') + @email = argv.shift_argument + @name = argv.shift_argument + super + end + + def validate! + super + unless @email + help! 'Specify at least your email address.' + end + end + + def run + body = { + 'email' => @email, + 'name' => @name, + 'description' => @session_description, + }.to_json + json = json(request_path(:post, 'sessions', body, default_headers)) + save_token(json['token']) + # TODO UI.notice inserts an empty line :/ + UI.puts '[!] Please verify the session by clicking the link in the ' \ + "verification email that has been sent to #{@email}".yellow + rescue REST::Error => e + raise Informative, 'There was an error registering with trunk: ' \ + "#{e.message}" + end + + def save_token(token) + netrc['trunk.cocoapods.org'] = @email, token + netrc.save + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/remove_owner.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/remove_owner.rb new file mode 100644 index 0000000..b0d170e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/lib/pod/command/trunk/remove_owner.rb @@ -0,0 +1,46 @@ +module Pod + class Command + class Trunk + # @CocoaPods 0.33.0 + # + class RemoveOwner < Trunk + self.summary = 'Remove an owner from a pod' + self.description = <<-DESC + Removes the user with specified `OWNER-EMAIL` from being an owner + of the given `POD`. + An ‘owner’ is a registered user whom is allowed to make changes to a + pod, such as pushing new versions and adding and removing other ‘owners’. + DESC + + self.arguments = [ + CLAide::Argument.new('POD', true), + CLAide::Argument.new('OWNER-EMAIL', true), + ] + + def initialize(argv) + @pod = argv.shift_argument + @email = argv.shift_argument + super + end + + def validate! + super + unless token + help! 'You need to register a session first.' + end + unless @pod && @email + help! 'Specify the pod name and the owner’s email address.' + end + end + + def run + json = json(request_path(:delete, "pods/#{@pod}/owners/#{@email}", auth_headers)) + UI.labeled 'Owners', json.map { |o| "#{o['name']} <#{o['email']}>" } + rescue REST::Error => e + raise Informative, "There was an error removing #{@email} from " \ + "#{@pod} on trunk: #{e.message}" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/addowner_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/addowner_spec.rb new file mode 100644 index 0000000..2600ada --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/addowner_spec.rb @@ -0,0 +1,52 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::AddOwner do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk add-owner )).should.be.instance_of Command::Trunk::AddOwner + end + end + + describe 'validation' do + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk push )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error if pod name is not supplied' do + command = Command.parse(%w( trunk add-owner )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'pod name' + end + + it 'should error if new owners email is not supplied' do + command = Command.parse(%w( trunk add-owner QueryKit )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email' + end + + it 'should should validate with valid pod and email' do + command = Command.parse(%w( trunk add-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('token') + lambda { command.validate! }.should.not.raise CLAide::Help + end + end + + it 'should successfully add an owner' do + url = 'https://trunk.cocoapods.org/api/v1/pods/QueryKit/owners' + stub_request(:patch, url). + with(:body => '{"email":"kyle@cocoapods.org"}', + :headers => { 'Authorization' => 'Token 527d11fe429f3426cb8dbeba183a0d80' }). + to_return(:status => 200, :body => '[]', :headers => {}) + + command = Command.parse(%w( trunk add-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('527d11fe429f3426cb8dbeba183a0d80') + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/delete_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/delete_spec.rb new file mode 100644 index 0000000..7538773 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/delete_spec.rb @@ -0,0 +1,69 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Delete do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk delete )).should.be.instance_of Command::Trunk::Delete + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk delete )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'should error without a version' do + command = Command.parse(%w( trunk delete Stencil )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'confirms deletion' do + Colored2.disable! + UI.inputs += %w(garbage true false) + command = Command.parse(%w( trunk delete Stencil 1.0.0 )) + command.send(:confirm_deletion?).should.be.true + command.send(:confirm_deletion?).should.be.false + + UI.output.should == <<-OUTPUT.gsub(/^>$/, '> ') +WARNING: It is generally considered bad behavior to remove versions of a Pod that others are depending on! +Please consider using the `deprecate` command instead. +Are you sure you want to delete this Pod version? +> +Are you sure you want to delete this Pod version? +> +WARNING: It is generally considered bad behavior to remove versions of a Pod that others are depending on! +Please consider using the `deprecate` command instead. +Are you sure you want to delete this Pod version? +> + OUTPUT + end + + it 'does not delete if the user does not confirm' do + Command::Trunk::Delete.any_instance.expects(:confirm_deletion?).returns(false) + Command::Trunk::Delete.any_instance.expects(:delete).never + Command::Trunk::Delete.invoke(%w(Stencil 1.0.0)) + end + + it 'should show information for a pod' do + response = { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => 'Push for `Stencil 1.0.0` initiated.', + }, + { + '2015-12-05 02:00:26 UTC' => 'Push for `Stencil 1.0.0` has been pushed (1.02409270 s).', + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/1.0.0/Stencil.podspec.json', + } + Command::Trunk::Delete.any_instance.expects(:delete).returns(response) + UI.inputs << 'TRUE ' + Command::Trunk::Delete.invoke(%w(Stencil 1.0.0)) + + UI.output.should.include 'Data URL: https://raw.githubusercontent' + UI.output.should.include 'Push for `Stencil 1.0.0` initiated' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/deprecate_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/deprecate_spec.rb new file mode 100644 index 0000000..7f43498 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/deprecate_spec.rb @@ -0,0 +1,58 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Deprecate do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk deprecate )).should.be.instance_of Command::Trunk::Deprecate + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk deprecate )) + lambda { command.validate! }.should.raise CLAide::Help + end + + before do + @push_response = { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => 'Push for `Stencil 0.96.3` initiated.', + }, + { + '2015-12-05 02:00:26 UTC' => 'Push for `Stencil 0.96.3` has been pushed (1.02409270 s).', + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/0.96.3/Stencil.podspec.json', + } + end + + it 'should show information for a pod' do + Command::Trunk::Deprecate.any_instance.expects(:deprecate).returns(@push_response) + Command::Trunk::Deprecate.invoke(%w(Stencil)) + + UI.output.should.include 'Data URL: https://raw.githubusercontent' + UI.output.should.include 'Push for `Stencil 0.96.3` initiated' + end + + it 'should send the proper network request' do + redirect = 'http://redirected.com' + stub_request(:patch, 'https://trunk.cocoapods.org/api/v1/pods/Stencil/deprecated'). + with(:body => hash_including('in_favor_of' => 'Stamp')). + to_return(:status => 201, :headers => { :location => redirect }) + + stub_request(:get, redirect). + to_return(:status => 200, :body => @push_response.to_json) + + Command::Trunk::Deprecate.invoke(%w(Stencil --in-favor-of=Stamp)) + + UI.output.should == <<-EOS + - Data URL: https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/Stencil/0.96.3/Stencil.podspec.json +- Log messages: + - December 5th, 2015 02:00: Push for `Stencil 0.96.3` initiated. + - December 5th, 2015 02:00: Push for `Stencil 0.96.3` has been pushed (1.02409270 s). + EOS + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/info_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/info_spec.rb new file mode 100644 index 0000000..1ff1a38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/info_spec.rb @@ -0,0 +1,36 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Info do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk info )).should.be.instance_of Command::Trunk::Info + end + end + + it 'should error without a pod name' do + command = Command.parse(%w( trunk info )) + lambda { command.validate! }.should.raise CLAide::Help + end + + it 'should show information for a pod' do + url = 'https://trunk.cocoapods.org/api/v1/pods/Stencil' + stub_request(:get, url).to_return(:body => { + 'owners' => [ + { + 'name' => 'Kyle Fuller', + 'email' => 'kyle@example.com', + }, + ], + }.to_json) + + command = Command.parse(%w( trunk info Stencil )) + lambda { command.validate! }.should.not.raise CLAide::Help + command.run + + UI.output.should.include 'Owners' + UI.output.should.include 'Kyle Fuller ' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/me_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/me_spec.rb new file mode 100644 index 0000000..585bb38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/me_spec.rb @@ -0,0 +1,17 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::Me do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk me )).should.be.instance_of Command::Trunk::Me + end + end + + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk me )) + lambda { command.validate! }.should.raise CLAide::Help + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/push_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/push_spec.rb new file mode 100644 index 0000000..c6c62bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/push_spec.rb @@ -0,0 +1,332 @@ +require File.expand_path('../../../spec_helper', __FILE__) +require 'tmpdir' + +module Pod + describe Command::Trunk::Push do + def success_json + { + 'messages' => [ + { + '2015-12-05 02:00:25 UTC' => "Push for `BananaLib 0.96.3' initiated.", + }, + { + '2015-12-05 02:00:26 UTC' => "Push for `BananaLib 0.96.3' has been pushed (1.02409270 s).", + }, + ], + 'data_url' => 'https://raw.githubusercontent.com/CocoaPods/Specs/ce4efe9f986d297008e8c61010a4b0d5881c50d0/Specs/BananaLib/0.96.3/BananaLib.podspec.json', + } + end + + before do + Command::Trunk::Push.any_instance.stubs(:update_master_repo) + end + + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk push )).should.be.instance_of Command::Trunk::Push + end + end + + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk push )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error when the trunk service returns an error' do + url = 'https://trunk.cocoapods.org/api/v1/pods?allow_warnings=false' + stub_request(:post, url).to_return(:status => 422, :body => { + 'error' => 'The Pod Specification did not pass validation.', + 'data' => { + 'warnings' => [ + 'A value for `requires_arc` should be specified until the migration to a `true` default.', + ], + }, + }.to_json) + command = Command.parse(%w(trunk push)) + command.stubs(:validate_podspec) + command.stubs(:spec).returns(Pod::Specification.new) + exception = lambda { command.run }.should.raise Informative + exception.message.should.include 'following validation failed' + exception.message.should.include 'should be specified' + exception.message.should.include 'The Pod Specification did not pass validation' + end + + describe 'PATH' do + before do + UI.output = '' + end + it 'defaults to the current directory' do + # Disable the podspec finding algorithm so we can check the raw path + Command::Trunk::Push.any_instance.stubs(:find_podspec_file) { |path| path } + command = Command.parse(%w( trunk push )) + command.instance_eval { @path }.should == '.' + end + + def found_podspec_among_files(files) + # Create a temp directory with the dummy `files` in it + Dir.mktmpdir do |dir| + files.each do |filename| + path = Pathname(dir) + filename + File.open(path, 'w') {} + end + # Execute `pod trunk push` with this dir as parameter + command = Command.parse(%w( trunk push ) + [dir]) + path = command.instance_eval { @path } + return File.basename(path) if path + end + end + + it 'should find the only JSON podspec in a given directory' do + files = %w(foo bar.podspec.json baz) + found_podspec_among_files(files).should == files[1] + end + + it 'should find the only Ruby podspec in a given directory' do + files = %w(foo bar.podspec baz) + found_podspec_among_files(files).should == files[1] + end + + it 'should warn when no podspec found in a given directory' do + files = %w(foo bar baz) + found_podspec_among_files(files).should.nil? + UI.output.should.match /No podspec found in directory/ + end + + it 'should warn when multiple podspecs found in a given directory' do + files = %w(foo bar.podspec bar.podspec.json baz) + found_podspec_among_files(files).should.nil? + UI.output.should.match /Multiple podspec files in directory/ + end + end + + describe 'validation' do + before do + Installer.any_instance.stubs(:aggregate_targets).returns([]) + Installer.any_instance.stubs(:pod_targets).returns([]) + + Validator.any_instance.stubs(:check_file_patterns) + Validator.any_instance.stubs(:validate_url) + Validator.any_instance.stubs(:validate_screenshots) + Validator.any_instance.stubs(:xcodebuild).returns('') + Validator.any_instance.stubs(:install_pod) + Validator.any_instance.stubs(:build_pod) + Validator.any_instance.stubs(:add_app_project_import) + Validator.any_instance.stubs(:used_swift_version).returns(nil) + %i(prepare resolve_dependencies download_dependencies write_lockfiles).each do |m| + Installer.any_instance.stubs(m) + end + Command::Trunk::Push.any_instance.stubs(:master_repo_url). + returns(Pod::TrunkSource::TRUNK_REPO_URL) + end + + it 'passes the SWIFT_VERSION to the Validator' do + Validator.any_instance.expects(:swift_version=).with('3.0') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --swift-version=3.0)) + cmd.send(:validate_podspec) + end + + it 'passes a swift version back to command, to handle .swift-version files' do + Validator.any_instance.stubs(:dot_swift_version).returns('1.2.3') + Validator.any_instance.stubs(:used_swift_version).returns('1.2.3') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --allow-warnings)) + cmd.send(:validate_podspec) + cmd.instance_variable_get(:@swift_version).should == '1.2.3' + end + + it 'validates specs as frameworks by default' do + Validator.any_instance.expects(:podfile_from_spec). + with(:ios, '8.0', true, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:osx, nil, true, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:tvos, nil, true, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:watchos, nil, true, [], nil, nil).once.returns(Podfile.new) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + cmd.send(:validate_podspec) + end + + it 'validates specs as libraries if requested' do + Validator.any_instance.expects(:podfile_from_spec). + with(:ios, nil, false, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:osx, nil, false, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:tvos, nil, false, [], nil, nil).once.returns(Podfile.new) + Validator.any_instance.expects(:podfile_from_spec). + with(:watchos, nil, false, [], nil, nil).once.returns(Podfile.new) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-libraries)) + cmd.send(:validate_podspec) + end + + it 'prints the failure reason' do + Validator.any_instance.expects(:validated?).returns(false) + Validator.any_instance.expects(:validate) + Validator.any_instance.expects(:failure_reason).returns('failure_reason') + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-libraries)) + e = should.raise(Informative) { cmd.send(:validate_podspec) } + e.message.should.include 'The spec did not pass validation, due to failure_reason.' + end + + it 'passes skip import validation' do + Validator.any_instance.expects(:skip_import_validation=).with(true) + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --skip-import-validation)) + cmd.send(:validate_podspec) + end + + it 'passes skip test' do + Validator.any_instance.expects(:skip_tests=).with(true) + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --skip-tests)) + cmd.send(:validate_podspec) + end + + it 'passes use modular headers' do + Validator.any_instance.expects(:use_modular_headers=) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --use-modular-headers)) + cmd.send(:validate_podspec) + end + end + + describe 'sending the swift version up to trunk' do + before do + # This won't get called + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + # For faking the networking when sending + Pod::Command::Trunk.any_instance.expects(:json).returns({}) + Pod::Command::Trunk.any_instance.expects(:auth_headers).returns({}) + end + + it 'passes the value to trunk' do + # Fakes for the network response + response = mock + response.expects(:headers).returns('location' => ['http://theinternet.com']) + response.expects(:status_code).returns(200) + + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --swift-version=1.1.2)) + + # Using a blank podspec - JSON should include `"pushed_with_swift_version":"1.1.2"` + cmd.stubs(:spec).returns(Pod::Specification.new) + + json = <<-JSON +{"name":null,"pushed_with_swift_version":"1.1.2","platforms":{"osx":null,"ios":null,"tvos":null,"watchos":null}} + JSON + + cmd.stubs(:validate_podspec) + cmd.stubs(:request_url) + + api_route = 'pods?allow_warnings=false' + cmd.expects(:request_path).with(:post, api_route, json, {}).returns(response) + cmd.send(:push_to_trunk) + end + end + + describe 'updating the master repo' do + before do + @cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + @cmd.stubs(:validate_podspec) + @cmd.stubs(:push_to_trunk).returns([200, success_json]) + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + Command::Trunk::Push.any_instance.stubs(:master_repo_name). + returns(Pod::TrunkSource::TRUNK_REPO_NAME) + end + + it 'updates the master repo when it exists' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(2). + returns(Pod::TrunkSource.new(Pod::TrunkSource::TRUNK_REPO_NAME)) + + Config.instance.sources_manager.expects(:update).with(Pod::TrunkSource::TRUNK_REPO_NAME).twice + Command::Repo::AddCDN.any_instance.expects(:run).never + + @cmd.run + end + + it 'sets up the master repo when it does not exist' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(3). + returns(nil). + returns(Pod::TrunkSource.new(Pod::TrunkSource::TRUNK_REPO_NAME)) + Config.instance.sources_manager.expects(:update).with(Pod::TrunkSource::TRUNK_REPO_NAME).twice + Command::Repo::AddCDN.any_instance.expects(:run) + + @cmd.run + end + end + + describe 'synchronous updating the git repo' do + before do + @cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --synchronous)) + @cmd.stubs(:validate_podspec) + @cmd.stubs(:push_to_trunk).returns([200, success_json]) + Command::Trunk::Push.any_instance.unstub(:update_master_repo) + Command::Trunk::Push.any_instance.stubs(:master_repo_name).returns('master') + end + + it 'updates the git repo when it exists' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(2). + returns(Pod::TrunkSource.new('master')) + + Config.instance.sources_manager.expects(:update).with('master').twice + Command::Repo::AddCDN.any_instance.expects(:run).never + + @cmd.run + end + + it 'sets up the git repo when it does not exist' do + Config.instance.sources_manager.stubs(:source_with_url). + at_most(3). + returns(nil). + returns(Pod::TrunkSource.new('master')) + Config.instance.sources_manager.stubs(:cdn_url?).returns(false) + Config.instance.sources_manager.stubs(:create_source_with_url).once. + returns(Pod::TrunkSource.new('master')) + Config.instance.sources_manager.expects(:update).with('master').twice + + @cmd.run + end + end + + describe 'Presenting Responses to the user' do + before do + Command::Trunk::Push.any_instance.stubs(:update_master_repo) + Config.instance.sources_manager.stubs(:master_repo_functional?).returns(true) + end + + it 'shows full logs when verbose' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --verbose)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([200, success_json]) + + cmd.run + UI.output.should.match %r{- Data URL: https://raw.githubusercontent.com/CocoaPods/Specs} + end + + it 'shows full logs when errored' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec --verbose)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([400, success_json]) + + cmd.run + UI.output.should.match %r{- Data URL: https://raw.githubusercontent.com/CocoaPods/Specs} + end + + it 'shows thanks emojis when success' do + cmd = Command.parse(%w(trunk push spec/fixtures/BananaLib.podspec)) + cmd.stubs(:validate_podspec) + cmd.stubs(:push_to_trunk).returns([200, success_json]) + cmd.run + + UI.output.should.match %r{https://cocoapods.org/pods/BananaLib} + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/register_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/register_spec.rb new file mode 100644 index 0000000..7bc316a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/register_spec.rb @@ -0,0 +1,31 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::Register do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk register )).should.be.instance_of Command::Trunk::Register + end + end + + it 'should error if email is not supplied' do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk register )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email address' + end + + it 'should register user' do + url = 'https://trunk.cocoapods.org/api/v1/sessions' + stub_request(:post, url). + with(:body => hash_including('email' => 'kyle@cocoapods.org')). + to_return(:status => 200, :body => '{"token": "acct"}') + Netrc.any_instance.stubs(:[]).returns(nil) + Netrc.any_instance.expects(:[]=).with('trunk.cocoapods.org', ['kyle@cocoapods.org', 'acct']) + Netrc.any_instance.expects(:save) + + command = Command.parse(%w( trunk register kyle@cocoapods.org )) + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/remove_owner_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/remove_owner_spec.rb new file mode 100644 index 0000000..bed408f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk/remove_owner_spec.rb @@ -0,0 +1,51 @@ +require File.expand_path('../../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk::RemoveOwner do + describe 'CLAide' do + it 'registers itself' do + Command.parse(%w( trunk remove-owner )).should.be.instance_of Command::Trunk::RemoveOwner + end + end + + describe 'validation' do + it "should error if we don't have a token" do + Netrc.any_instance.stubs(:[]).returns(nil) + command = Command.parse(%w( trunk remove-owner )) + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'register a session' + end + + it 'should error if pod name is not supplied' do + command = Command.parse(%w( trunk remove-owner )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'pod name' + end + + it 'should error if new owners email is not supplied' do + command = Command.parse(%w( trunk remove-owner QueryKit )) + command.stubs(:token).returns('token') + exception = lambda { command.validate! }.should.raise CLAide::Help + exception.message.should.include 'email' + end + + it 'should should validate with valid pod and email' do + command = Command.parse(%w( trunk remove-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('token') + lambda { command.validate! }.should.not.raise CLAide::Help + end + end + + it 'should successfully remove an owner' do + url = 'https://trunk.cocoapods.org/api/v1/pods/QueryKit/owners/kyle@cocoapods.org' + stub_request(:delete, url). + with(:headers => { 'Authorization' => 'Token 527d11fe429f3426cb8dbeba183a0d80' }). + to_return(:status => 200, :body => '[]', :headers => {}) + + command = Command.parse(%w( trunk remove-owner QueryKit kyle@cocoapods.org )) + command.stubs(:token).returns('527d11fe429f3426cb8dbeba183a0d80') + lambda { command.run }.should.not.raise + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk_spec.rb new file mode 100644 index 0000000..e062461 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/command/trunk_spec.rb @@ -0,0 +1,23 @@ +require File.expand_path('../../spec_helper', __FILE__) + +module Pod + describe Command::Trunk do + describe 'CLAide' do + it 'registers it self' do + Command.parse(%w( trunk )).should.be.instance_of Command::Trunk + end + end + + before do + @command = Command.parse(%w(trunk)) + end + + describe 'authorization' do + it 'will use the trunk token from ENV if present' do + ENV.stubs(:[]).with('COCOAPODS_TRUNK_TOKEN').returns('token') + + @command.send(:token).should == 'token' + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/fixtures/BananaLib.podspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/fixtures/BananaLib.podspec new file mode 100644 index 0000000..b9f98d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/fixtures/BananaLib.podspec @@ -0,0 +1,25 @@ +Pod::Spec.new do |s| + s.name = 'BananaLib' + s.version = '1.0' + s.authors = 'Banana Corp', { 'Monkey Boy' => 'monkey@banana-corp.local' } + s.homepage = 'http://banana-corp.local/banana-lib.html' + s.summary = 'Chunky bananas!' + s.description = 'Full of chunky bananas.' + s.source = { :git => 'http://banana-corp.local/banana-lib.git', :tag => 'v1.0' } + s.license = { + :type => 'MIT', + :file => 'LICENSE', + :text => 'Permission is hereby granted ...' + } + s.source_files = 'Classes/*.{h,m,d}', 'Vendor' + s.resources = "Resources/*" + s.vendored_framework = 'Bananalib.framework' + s.vendored_library = 'libBananalib.a' + s.preserve_paths = 'preserve_me.txt' + s.public_header_files = 'Classes/Banana.h' + + s.prefix_header_file = 'Classes/BananaLib.pch' + s.xcconfig = { 'OTHER_LDFLAGS' => '-framework SystemConfiguration' } + s.dependency 'monkey', '~> 1.0.1', '< 1.0.9' + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/spec_helper.rb new file mode 100644 index 0000000..3ca19a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-trunk-1.6.0/spec/spec_helper.rb @@ -0,0 +1,97 @@ +# Set up coverage analysis +#-----------------------------------------------------------------------------# + +if RUBY_VERSION >= '1.9.3' + require 'codeclimate-test-reporter' + CodeClimate::TestReporter.configure do |config| + config.logger.level = Logger::WARN + end + CodeClimate::TestReporter.start +end + +# Set up +#-----------------------------------------------------------------------------# + +require 'pathname' +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$:.unshift((ROOT + 'lib').to_s) +$:.unshift((ROOT + 'spec').to_s) + +require 'bundler/setup' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'webmock' + +include WebMock::API +WebMock.enable! +WebMock.disable_net_connect!(:allow => ['codeclimate.com', 'cdn.cocoapods.org']) + +require 'cocoapods' + +require 'cocoapods_plugin' + +# Helpers +#-----------------------------------------------------------------------------# + +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + attr_accessor :inputs + + def gets + inputs.shift + end + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end + + class Command::Trunk + def time_zone + 'UTC' + end + end +end + +module Bacon + class Context + old_run_requirement = instance_method(:run_requirement) + define_method(:run_requirement) do |description, spec| + ::Pod::Config.instance = nil + ::Pod::UI.output = '' + ::Pod::UI.warnings = '' + ::Pod::UI.inputs = [] + # The following prevents a nasty behaviour where the increments are not + # balanced when testing informative which might lead to sections not + # being printed to the output as they are too nested. + ::Pod::UI.indentation_level = 0 + ::Pod::UI.title_level = 0 + + WebMock.reset! + + old_run_requirement.bind(self).call(description, spec) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.gitignore b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.gitignore new file mode 100644 index 0000000..001d28f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp +.idea diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop.yml new file mode 100644 index 0000000..e3dc640 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop.yml @@ -0,0 +1,3 @@ +inherit_from: + - .rubocop_todo.yml + - .rubocop_cocoapods.yml diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop_cocoapods.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop_cocoapods.yml new file mode 100644 index 0000000..93f03f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop_cocoapods.yml @@ -0,0 +1,129 @@ +AllCops: + Include: + - ./Rakefile + - ./Gemfile + - ./*.gemspec + Exclude: + - ./spec/fixtures/**/* + - ./vendor/bundle/**/* + +# At the moment not ready to be used +# https://github.com/bbatsov/rubocop/issues/947 +Style/Documentation: + Enabled: false + +#- CocoaPods -----------------------------------------------------------------# + +# We adopted raise instead of fail. +Style/SignalException: + EnforcedStyle: only_raise + +# They are idiomatic +Lint/AssignmentInCondition: + Enabled: false + +# Allow backticks +Style/AsciiComments: + Enabled: false + +# Indentation clarifies logic branches in implementations +Style/IfUnlessModifier: + Enabled: false + +# No enforced convention here. +Style/SingleLineBlockParams: + Enabled: false + +# We only add the comment when needed. +Style/Encoding: + Enabled: false + +# Having these make it easier to *not* forget to add one when adding a new +# value and you can simply copy the previous line. +Style/TrailingCommaInLiteral: + EnforcedStyleForMultiline: comma + +Layout/MultilineOperationIndentation: + EnforcedStyle: indented + +Style/PercentLiteralDelimiters: + PreferredDelimiters: + default: '()' + '%w': '()' + +# Clashes with CLAide Command#validate! +Style/GuardClause: + Enabled: false + +# Not always desirable +Style/Next: + Enabled: false + +# Arbitrary max lengths for classes simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ClassLength: + Enabled: false + +# Arbitrary max lengths for modules simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/ModuleLength: + Enabled: false + +# Arbitrary max lengths for methods simply do not work and enabling this will +# lead to a never ending stream of annoyance and changes. +Metrics/MethodLength: + Enabled: false + +# No enforced convention here. +Metrics/BlockNesting: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/AbcSize: + Enabled: false + +# It will be obvious which code is complex, Rubocop should only lint simple +# rules for us. +Metrics/CyclomaticComplexity: + Enabled: false + +Style/HashSyntax: + EnforcedStyle: hash_rockets + +Style/Lambda: + Enabled: true + +Layout/DotPosition: + EnforcedStyle: trailing + +Style/EachWithObject: + Enabled: true + +Performance/HashEachMethods: + Enabled: true + +#- CocoaPods specs -----------------------------------------------------------# + +# Allow for `should.match /regexp/`. +Lint/AmbiguousRegexpLiteral: + Exclude: + - spec/**/* + +# Allow `object.should == object` syntax. +Lint/Void: + Exclude: + - spec/**/* + +Style/ClassAndModuleChildren: + Exclude: + - spec/**/* + +Lint/UselessComparison: + Exclude: + - spec/**/* + +Metrics/BlockLength: + Exclude: + - spec/**/* + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop_todo.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop_todo.yml new file mode 100644 index 0000000..a5288bf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.rubocop_todo.yml @@ -0,0 +1,72 @@ +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2020-04-16 22:06:47 -0700 using RuboCop version 0.50.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLineAfterMagicComment: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 2 +# Configuration parameters: ContextCreatingMethods, MethodCreatingMethods. +Lint/UselessAccessModifier: + Exclude: + - 'lib/pod/command/try.rb' + +# Offense count: 37 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 108 + +# Offense count: 3 +# Configuration parameters: ExpectMatchingDefinition, Regex, IgnoreExecutableScripts, AllowedAcronyms. +# AllowedAcronyms: CLI, DSL, ACL, API, ASCII, CPU, CSS, DNS, EOF, GUID, HTML, HTTP, HTTPS, ID, IP, JSON, LHS, QPS, RAM, RHS, RPC, SLA, SMTP, SQL, SSH, TCP, TLS, TTL, UDP, UI, UID, UUID, URI, URL, UTF8, VM, XML, XMPP, XSRF, XSS +Naming/FileName: + Exclude: + - 'Gemfile' + - 'Rakefile' + - 'cocoapods-try.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle, SupportedStyles. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'lib/pod/command/try.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: PreferredDelimiters. +Style/PercentLiteralDelimiters: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: use_perl_names, use_english_names +Style/SpecialGlobalVars: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, ConsistentQuotesInMultiline. +# SupportedStyles: single_quotes, double_quotes +Style/StringLiterals: + Exclude: + - 'cocoapods-try.gemspec' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/UnneededPercentQ: + Exclude: + - 'cocoapods-try.gemspec' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.travis.yml new file mode 100644 index 0000000..49cb2c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/.travis.yml @@ -0,0 +1,34 @@ +# Sets Travis to run the Ruby specs on OS X machines to be as close as possible +# to the user environment. +# +language: ruby + +dist: trusty + +branches: + only: + - master + - /.+-stable$/ + +matrix: + include: + - rvm: 2.4.1 + +rvm: + - 2.0.0-p647 + - 2.3.4 + - 2.4.1 + - 2.6.2 + +before_install: + # There is a bug in travis. When using system ruby, bundler is not + # installed and causes the default install action to fail. + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem install "bundler:~> 1.17"; else gem install "bundler:~> 1.17"; fi + # RubyGems 2.0.14 isn't a fun time on 2.0.0p648 + - if [ "$TRAVIS_RUBY_VERSION" = "system" ]; then sudo gem update --system; fi + +install: + - bundle install --path .bundle + - bundle exec pod repo add-cdn trunk 'https://cdn.cocoapods.org' + +script: bundle exec rake spec diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/CHANGELOG.md new file mode 100644 index 0000000..7068a50 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/CHANGELOG.md @@ -0,0 +1,204 @@ +# CocoaPods::Try CHANGELOG + +## 1.2.0 (2020-04-20) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Fix a crash when using `pod try` with CocoaPods 1.8.0 or higher. + [@arielpollack](https://github.com/arielpollack) + [#63](https://github.com/CocoaPods/cocoapods-try/issues/63) + [#65](https://github.com/CocoaPods/cocoapods-try/pull/65) + + +## 1.1.0 (2016-07-10) + +##### Enhancements + +* Added a command line option for specifying the podspec file from Git URL + [@rockwotj](https://github.com/rockwotj) + [59](https://github.com/CocoaPods/CocoaPods-try/issues/59) + +##### Bug Fixes + +* None. + + +## 1.0.0 (2016-05-10) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.rc.1 (2016-04-30) + +##### Enhancements + +* None. + +##### Bug Fixes + +* None. + + +## 1.0.0.beta.4 (2016-04-15) + +##### Enhancements + +* None. + +##### Bug Fixes + +* Compatibility With CocoaPods 1.0.0.beta.8. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods#5159](https://github.com/CocoaPods/CocoaPods/issues/5159) + + +## 1.0.0.beta.3 (2016-03-15) + +##### Bug Fixes + +* Compatibility with CocoaPods 1.0.0.beta.6. + [Marius Rackwitz](https://github.com/mrackwitz) + + +## 1.0.0.beta.2 (2016-01-05) + +##### Bug Fixes + +* Ensure that the pod's source is re-downloaded, instead of pulling from the + cache, which only holds cleaned sources. + [Samuel Giddins](https://github.com/segiddins) + [#43](https://github.com/CocoaPods/cocoapods-try/issues/43) + + +## 1.0.0.beta.1 (2015-12-30) + +##### Bug Fixes + +* Ensure commands in the `.cocoapods` file are strings, and uses the pods folder when executing commands. + [Samuel Giddins](https://github.com/segiddins) + [CocoaPods-Try#40](https://github.com/CocoaPods/cocoapods-try/issues/40) + + +## 0.5.1 (2015-08-28) + +##### Bug Fixes + +* This release fixes a file permissions error when using the RubyGem. + [Samuel Giddins](https://github.com/segiddins) + + +## 0.5.0 (2015-08-26) + +##### Enhancements + +* Any CocoaPod / GitHub repo can now declare their own pre-install commands, and prefer a + project. To use this, add a `.cocoapods.yml` file to the root of your repo. The yaml file + should have a structure like: + + ``` yaml + try: + install: + pre: + - pod install + - git submodule init + project: 'ORStackView.xcworkspace' + ``` + + [Orta Therox](https://github.com/orta) + [#33](https://github.com/CocoaPods/cocoapods-try/issues/33) + + +## 0.4.5 (2015-05-27) + +##### Bug Fixes + +* Use `Dir.tmpdir` instead of explicit `/tmp`. + [Boris BÃŧgling](https://github.com/neonichu) + [#34](https://github.com/CocoaPods/cocoapods-try/pull/34) + +* Automatically detect JSON podspecs. + [Samuel Giddins](https://github.com/segiddins) + [#35](https://github.com/CocoaPods/cocoapods-try/issues/35) + + +## 0.4.4 (2015-05-06) + +##### Bug Fixes + +* Fix working with the CocoaPods download cache introduced in 0.37. + [Samuel Giddins](https://github.com/) + [#30](https://github.com/CocoaPods/cocoapods-try/issues/30) + + +## 0.4.3 (2014-12-25) + +##### Bug Fixes + +* Ensure that the master repo is setup on try. + [Daniel Tomlinson](https://github.com/DanielTomlinson) + [CocoaPods/CocoaPods#2563](https://github.com/CocoaPods/CocoaPods/pull/2563) + +## 0.4.2 (2014-10-29) + +* Prefer projects or workspaces with the name including Sample over others. + [Kyle Fuller](https://github.com/kylef) + +## 0.4.1 (2014-09-26) + +* Add `--no-repo-update` option. + [Eloy DurÃĄn](https://github.com/alloy) + +## 0.4.0 (2014-09-11) + +### Enhancements + +* Adopted new argument format of CLAide. + [Olivier Halligon](https://github.com/AliSoftware) + +## 0.3.0 (2014-05-19) + +### Enhancements + +* Adopted new CLAide release. + [Fabio Pelosin](https://github.com/irrationalfab) + +## 0.2.0 (2014-03-28) + +### Enhancements + +* Added support for the specification of an URL instead of the name of a Pod. + [David Grandinetti](https://github.com/dbgrandi) + [Fabio Pelosin](https://github.com/irrationalfab) + +## 0.1.2 + +### Enhancements + +* Prefer workspaces over projects. + [Kyle Fuller](https://github.com/kylef) + +* Open workspaces if available. + [Kyle Fuller](https://github.com/kylef) + +### Fixes + +* Don't consider workspaces in bundles. + [Eloy DurÃĄn](https://github.com/alloy) + +* Typo fixes. + [Mark Townsend](https://github.com/markltownsend) + +## 0.1.0 (2013-12-02) + +* Initial implementation. + [Fabio Pelosin](https://github.com/fabiopelosin) diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Gemfile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Gemfile new file mode 100644 index 0000000..8a73433 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Gemfile @@ -0,0 +1,16 @@ +source 'https://rubygems.org' + +gemspec + +group :development do + gem 'claide', :git => 'https://github.com/CocoaPods/CLAide.git', :branch => 'master' + gem 'cocoapods', :git => 'https://github.com/CocoaPods/CocoaPods.git', :branch => 'master' + gem 'cocoapods-core', :git => 'https://github.com/CocoaPods/Core.git', :branch => 'master' + + gem 'bacon' + gem 'mocha' + gem 'mocha-on-bacon' + gem 'prettybacon' + + gem 'rubocop', '0.50.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Gemfile.lock b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Gemfile.lock new file mode 100644 index 0000000..0cc12aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Gemfile.lock @@ -0,0 +1,148 @@ +GIT + remote: https://github.com/CocoaPods/CLAide.git + revision: b5ced9cc141df732e8027078543eb92fc6447567 + branch: master + specs: + claide (1.0.3) + +GIT + remote: https://github.com/CocoaPods/CocoaPods.git + revision: c75c4a6dd226c45e0ad876caa926bff51a3f00d9 + branch: master + specs: + cocoapods (1.9.1) + activesupport (>= 4.0.2, < 5) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.9.1) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 1.2.2, < 2.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-trunk (>= 1.4.0, < 2.0) + cocoapods-try (= 1.2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.6.6) + nap (~> 1.0) + ruby-macho (~> 1.4) + xcodeproj (>= 1.14.0, < 2.0) + +GIT + remote: https://github.com/CocoaPods/Core.git + revision: 8923c0cdca68d4bc7126cd64106a5fc1e9217ced + branch: master + specs: + cocoapods-core (1.9.1) + activesupport (>= 4.0.2, < 6) + addressable (~> 2.6) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 2.0) + typhoeus (~> 1.0) + +PATH + remote: . + specs: + cocoapods-try (1.2.0) + +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.2) + activesupport (4.2.11.1) + i18n (~> 0.7) + minitest (~> 5.1) + thread_safe (~> 0.3, >= 0.3.4) + tzinfo (~> 1.1) + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) + algoliasearch (1.27.1) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + ast (2.4.0) + atomos (0.1.3) + bacon (1.2.0) + cocoapods-deintegrate (1.0.4) + cocoapods-downloader (1.3.0) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.0) + cocoapods-trunk (1.4.1) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + colored2 (3.1.2) + concurrent-ruby (1.1.6) + escape (0.0.4) + ethon (0.12.0) + ffi (>= 1.3.0) + ffi (1.12.2) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + httpclient (2.8.3) + i18n (0.9.5) + concurrent-ruby (~> 1.0) + json (2.3.0) + minitest (5.8.4) + mocha (1.11.2) + mocha-on-bacon (0.2.3) + mocha (>= 0.13.0) + molinillo (0.6.6) + nanaimo (0.2.6) + nap (1.1.0) + netrc (0.11.0) + parallel (1.10.0) + parser (2.7.1.1) + ast (~> 2.4.0) + powerpack (0.1.2) + prettybacon (0.0.2) + bacon (~> 1.2) + public_suffix (2.0.5) + rainbow (2.2.2) + rake + rake (10.5.0) + rubocop (0.50.0) + parallel (~> 1.10) + parser (>= 2.3.3.1, < 3.0) + powerpack (~> 0.1) + rainbow (>= 2.2.2, < 3.0) + ruby-progressbar (~> 1.7) + unicode-display_width (~> 1.0, >= 1.0.1) + ruby-macho (1.4.0) + ruby-progressbar (1.10.1) + thread_safe (0.3.6) + typhoeus (1.3.1) + ethon (>= 0.9.0) + tzinfo (1.2.7) + thread_safe (~> 0.1) + unicode-display_width (1.7.0) + xcodeproj (1.16.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.2.6) + +PLATFORMS + ruby + +DEPENDENCIES + bacon + bundler (~> 1.3) + claide! + cocoapods! + cocoapods-core! + cocoapods-try! + mocha + mocha-on-bacon + prettybacon + rake (~> 10.0) + rubocop (= 0.50.0) + +BUNDLED WITH + 1.17.2 diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/LICENSE b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/LICENSE new file mode 100644 index 0000000..c000bf0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Fabio Pelosin + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/README.md b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/README.md new file mode 100644 index 0000000..b61acde --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/README.md @@ -0,0 +1,17 @@ +# Cocoapods try + +[![Build Status](https://img.shields.io/travis/CocoaPods/cocoapods-try/master.svg?style=flat)](https://travis-ci.org/CocoaPods/cocoapods-try) + +CocoaPods plugin which allows to quickly try the demo project of a Pod. + +![](http://i.imgur.com/xxWNUrg.gif) + +## Usage + + $ pod try POD_NAME + +## Installation + + $ gem install cocoapods-try + + diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Rakefile b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Rakefile new file mode 100644 index 0000000..9f8fa94 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/Rakefile @@ -0,0 +1,54 @@ +# Bootstrap +#-----------------------------------------------------------------------------# + +task :bootstrap, :use_bundle_dir? do |_t, args| + if system('which bundle') + if args[:use_bundle_dir?] + sh 'bundle install --path ./travis_bundle_dir' + else + sh 'bundle install' + end + else + $stderr.puts "\033[0;31m" \ + "[!] Please install the bundler gem manually:\n" \ + ' $ [sudo] gem install bundler' \ + "\e[0m" + exit 1 + end +end + +begin + require 'bundler/gem_tasks' + + task :default => 'spec' + + # Spec + #-----------------------------------------------------------------------------# + + desc 'Runs all the specs' + task :spec do + puts "\033[0;32mUsing #{`ruby --version`}\033[0m" + start_time = Time.now + sh "bundle exec bacon #{specs('**')}" + duration = Time.now - start_time + puts "Tests completed in #{duration}s" + + Rake::Task['rubocop'].invoke if RUBY_VERSION >= '1.9.3' + end + + def specs(dir) + FileList["spec/#{dir}/*_spec.rb"].shuffle.join(' ') + end + + #-- Rubocop ----------------------------------------------------------------# + + if RUBY_VERSION >= '1.9.3' + require 'rubocop/rake_task' + RuboCop::RakeTask.new + end +rescue LoadError + $stderr.puts "\033[0;31m" \ + '[!] Some Rake tasks haven been disabled because the environment' \ + ' couldn’t be loaded. Be sure to run `rake bootstrap` first.' \ + "\e[0m" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/cocoapods-try.gemspec b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/cocoapods-try.gemspec new file mode 100644 index 0000000..220b951 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/cocoapods-try.gemspec @@ -0,0 +1,21 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'cocoapods_try.rb' + +Gem::Specification.new do |spec| + spec.name = "cocoapods-try" + spec.version = CocoapodsTry::VERSION + spec.authors = ["Fabio Pelosin"] + spec.summary = %q{CocoaPods plugin which allows to quickly try the demo project of a Pod.} + spec.homepage = "https://github.com/cocoapods/cocoapods-try" + spec.license = "MIT" + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ["lib"] + + spec.add_development_dependency "bundler", "~> 1.3" + spec.add_development_dependency "rake", '~> 10.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/cocoapods_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/cocoapods_plugin.rb new file mode 100644 index 0000000..360daf4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/cocoapods_plugin.rb @@ -0,0 +1 @@ +require 'pod/command/try' diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/cocoapods_try.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/cocoapods_try.rb new file mode 100644 index 0000000..44b3fa5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/cocoapods_try.rb @@ -0,0 +1,5 @@ +# The namespace of the Cocoapods try plugin. +# +module CocoapodsTry + VERSION = '1.2.0'.freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/pod/command/try.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/pod/command/try.rb new file mode 100644 index 0000000..445945a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/pod/command/try.rb @@ -0,0 +1,279 @@ +require 'pod/try_settings' + +# The CocoaPods namespace +# +module Pod + class Command + # The pod try command. + # @CocoaPods 0.29.0 + # + class Try < Command + include RepoUpdate + + self.summary = 'Try a Pod!' + + self.description = <<-DESC + Downloads the Pod with the given `NAME` (or Git `URL`), install its + dependencies if needed and opens its demo project. If a Git URL is + provided the head of the repo is used. + + If a Git URL is specified, then a --podspec_name can be provided, + if the podspec name is different than the git repo for some reason. + DESC + + self.arguments = [CLAide::Argument.new(%w(NAME URL), true)] + + def self.options + [ + ['--podspec_name=[name]', 'The name of the podspec file within the Git Repository'], + ].concat(super) + end + + def initialize(argv) + @name = argv.shift_argument + @podspec_name = argv.option('podspec_name') + super + end + + def validate! + super + help! 'A Pod name or URL is required.' unless @name + help! 'Podspec name can only be used with a Git URL' if @podspec_name && !git_url?(@name) + end + + def run + ensure_master_spec_repo_exists! + sandbox = Sandbox.new(TRY_TMP_DIR) + spec = setup_spec_in_sandbox(sandbox) + + UI.title "Trying #{spec.name}" do + pod_dir = install_pod(spec, sandbox) + settings = TrySettings.settings_from_folder(pod_dir) + Dir.chdir(pod_dir) { settings.run_pre_install_commands(true) } + proj = settings.project_path || pick_demo_project(pod_dir) + file = install_podfile(proj) + if file + open_project(file) + else + UI.puts "Unable to locate a project for #{spec.name}" + end + end + end + + public + + # Helpers + #-----------------------------------------------------------------------# + + # @return [Pathname] + # + TRY_TMP_DIR = Pathname.new(Dir.tmpdir) + 'CocoaPods/Try' + + # Puts the spec's data in the sandbox + # + def setup_spec_in_sandbox(sandbox) + if git_url?(@name) + spec = spec_with_url(@name, @podspec_name) + sandbox.store_pre_downloaded_pod(spec.name) + else + update_specs_repos + spec = spec_with_name(@name) + end + spec + end + + # Returns the specification of the last version of the Pod with the given + # name. + # + # @param [String] name + # The name of the pod. + # + # @return [Specification] The specification. + # + def spec_with_name(name) + set = config.sources_manager.search(Dependency.new(name)) + if set + set.specification.root + else + raise Informative, "Unable to find a specification for `#{name}`" + end + end + + # Returns the specification found in the given Git repository URL by + # downloading the repository. + # + # @param [String] url + # The URL for the pod Git repository. + # + # @param [String] spec_name + # The name of the podspec file within the Git repository. + # + # @return [Specification] The specification. + # + def spec_with_url(url, spec_name = nil) + name = url.split('/').last + name = name.chomp('.git') if name.end_with?('.git') + name = spec_name unless spec_name.nil? + + target_dir = TRY_TMP_DIR + name + target_dir.rmtree if target_dir.exist? + + downloader = Pod::Downloader.for_target(target_dir, :git => url) + downloader.download + + spec_file = Pathname.glob(target_dir + "#{name}.podspec{,.json}").first + Pod::Specification.from_file(spec_file) + end + + # Installs the specification in the given directory. + # + # @param [Specification] The specification of the Pod. + # @param [Pathname] The directory of the sandbox where to install the + # Pod. + # + # @return [Pathname] The path where the Pod was installed + # + def install_pod(spec, sandbox) + specs = { :ios => spec, :osx => spec } + if cocoapods_version >= Pod::Version.new('1.8.0') + dummy_podfile = Podfile.new + installer = Installer::PodSourceInstaller.new(sandbox, dummy_podfile, specs, :can_cache => false) + else + installer = Installer::PodSourceInstaller.new(sandbox, specs, :can_cache => false) + end + installer.install! + sandbox.root + spec.name + end + + # Picks a project or workspace suitable for the demo purposes in the + # given directory. + # + # To decide the project simple heuristics are used according to the name, + # if no project is found this method raises and `Informative` otherwise + # if more than one project is found the choice is presented to the user. + # + # @param [#to_s] dir + # The path where to look for projects. + # + # @return [String] The path of the project. + # + def pick_demo_project(dir) + dir = Pathname.new(dir) + projs = projects_in_dir(dir) + if projs.count == 0 + raise Informative, 'Unable to find any project in the source files' \ + " of the Pod: `#{dir}`" + elsif projs.count == 1 + projs.first + elsif (workspaces = projs.grep(/(demo|example|sample).*\.xcworkspace$/i)).count == 1 + workspaces.first + elsif (projects = projs.grep(/demo|example|sample/i)).count == 1 + projects.first + else + message = 'Which project would you like to open' + selection_array = projs.map do |p| + Pathname.new(p).relative_path_from(dir).to_s + end + index = UI.choose_from_array(selection_array, message) + projs[index] + end + end + + # Performs a CocoaPods installation for the given project if Podfile is + # found. Shells out to avoid issues with the config of the process + # running the try command. + # + # @return [String] proj + # The path of the project. + # + # @return [String] The path of the file to open, in other words the + # workspace of the installation or the given project. + # + def install_podfile(proj) + return unless proj + dirname = Pathname.new(proj).dirname + podfile_path = dirname + 'Podfile' + if podfile_path.exist? + Dir.chdir(dirname) do + perform_cocoapods_installation + + podfile = Pod::Podfile.from_file(podfile_path) + + if podfile.workspace_path + File.expand_path(podfile.workspace_path) + else + proj.to_s.chomp(File.extname(proj.to_s)) + '.xcworkspace' + end + end + else + proj + end + end + + public + + # Private Helpers + #-----------------------------------------------------------------------# + + # @return [void] Updates the specs repo unless disabled by the config. + # + def update_specs_repos + return unless repo_update?(:default => true) + UI.section 'Updating spec repositories' do + config.sources_manager.update + end + end + + # Opens the project at the given path. + # + # @return [String] path + # The path of the project. + # + # @return [void] + # + def open_project(path) + UI.puts "Opening '#{path}'" + `open "#{path}"` + end + + # @return [void] Performs a CocoaPods installation in the working + # directory. + # + def perform_cocoapods_installation + UI.titled_section 'Performing CocoaPods Installation' do + Command::Install.invoke + end + end + + # @return [Bool] Wether the given string is the name of a Pod or an URL + # for a Git repo. + # + def git_url?(name) + prefixes = ['https://', 'http://'] + prefixes.any? { |prefix| name.start_with?(prefix) } + end + + # @return [Array] The list of the workspaces and projects in a + # given directory excluding The Pods project and the projects + # that have a sister workspace. + # + def projects_in_dir(dir) + glob_match = Dir.glob("#{dir}/**/*.xc{odeproj,workspace}") + glob_match = glob_match.reject do |p| + next true if p.include?('Pods.xcodeproj') + next true if p.end_with?('.xcodeproj/project.xcworkspace') + sister_workspace = p.chomp(File.extname(p.to_s)) + '.xcworkspace' + p.end_with?('.xcodeproj') && glob_match.include?(sister_workspace) + end + end + + # @return [Pod::Version] the version of CocoaPods currently running + # + def cocoapods_version + Pod::Version.new(Pod::VERSION) + end + + #-------------------------------------------------------------------# + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/pod/try_settings.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/pod/try_settings.rb new file mode 100644 index 0000000..473c299 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/lib/pod/try_settings.rb @@ -0,0 +1,53 @@ +module Pod + class TrySettings + attr_accessor :pre_install_commands, :project_path + + # Creates a TrySettings instance based on a folder path + # + def self.settings_from_folder(path) + settings_path = Pathname.new(path) + '.cocoapods.yml' + return TrySettings.new unless File.exist? settings_path + + settings = YAMLHelper.load_file(settings_path) + try_settings = TrySettings.new + return try_settings unless settings['try'] + + if settings['try']['install'] + try_settings.pre_install_commands = Array(settings['try']['install']['pre']) + end + + if settings['try']['project'] + try_settings.project_path = Pathname.new(path) + settings['try']['project'] + end + + try_settings + end + + # If we need to run commands from pod-try we should let the users know + # what is going to be running on their device. + # + def prompt_for_permission + UI.titled_section 'Running Pre-Install Commands' do + commands = pre_install_commands.length > 1 ? 'commands' : 'command' + UI.puts "In order to try this pod, CocoaPods-Try needs to run the following #{commands}:" + pre_install_commands.each { |command| UI.puts " - #{command}" } + UI.puts "\nPress return to run these #{commands}, or press `ctrl + c` to stop trying this pod." + end + + # Give an elegant exit point. + UI.gets.chomp + end + + # Runs the pre_install_commands from + # + # @param [Bool] prompt + # Should CocoaPods-Try show a prompt with the commands to the user. + # + def run_pre_install_commands(prompt) + if pre_install_commands + prompt_for_permission if prompt + pre_install_commands.each { |command| Executable.execute_command('bash', ['-ec', command], true) } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/spec/command/try_settings_spec.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/spec/command/try_settings_spec.rb new file mode 100644 index 0000000..d206186 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/spec/command/try_settings_spec.rb @@ -0,0 +1,105 @@ +require 'tmpdir' +require File.expand_path('../../spec_helper', __FILE__) + +# The CocoaPods namespace +# +module Pod + describe TrySettings do + it 'returns an instance with empty defaults when there are no yml settings files' do + Dir.mktmpdir do |dir| + settings = TrySettings.settings_from_folder dir + settings.should.be.instance_of TrySettings + settings.pre_install_commands.should.be.nil? + settings.project_path.should.be.nil? + end + end + + it 'returns an instance with the right defaults when there are no yml settings files' do + Dir.mktmpdir do |dir| + yaml = < 'ARAnalytics') + Pod::Specification.stubs(:from_file).with(spec_file).returns(stub_spec) + + command = Pod::Command.parse(['try', 'https://github.com/orta/ARAnalytics.git']) + Installer::PodSourceInstaller.any_instance.expects(:install!) + command.expects(:update_specs_repos).never + command.expects(:pick_demo_project).returns(XCODE_PROJECT) + command.expects(:open_project).with(XCODE_PROJECT) + command.run + end + + describe 'updates of the spec repos' do + it 'updates the spec repos by default' do + command = Pod::Command.parse(%w(try ARAnalytics)) + Installer::PodSourceInstaller.any_instance.expects(:install!) + command.config.sources_manager.expects(:update) + command.expects(:pick_demo_project).returns(XCODE_PROJECT) + command.expects(:open_project).with(XCODE_PROJECT) + command.run + end + + it "doesn't update the spec repos if that option was given" do + command = Pod::Command.parse(%w(try ARAnalytics --no-repo-update)) + Installer::PodSourceInstaller.any_instance.expects(:install!) + command.config.sources_manager.expects(:update).never + command.expects(:pick_demo_project).returns(XCODE_PROJECT) + command.expects(:open_project).with(XCODE_PROJECT) + command.run + end + end + end + + describe 'Helpers' do + before do + @sut = Pod::Command.parse(['try']) + end + + it 'returns the spec with the given name' do + spec = @sut.spec_with_name('ARAnalytics') + spec.name.should == 'ARAnalytics' + end + + describe '#spec_at_url' do + it 'returns a spec for an https git repo' do + require 'cocoapods-downloader/git' + Pod::Downloader::Git.any_instance.expects(:download) + spec_file = Pod::Command::Try::TRY_TMP_DIR + 'ARAnalytics/ARAnalytics.podspec' + Pathname.stubs(:glob).once.returns([spec_file]) + stub_spec = stub + Pod::Specification.stubs(:from_file).with(spec_file).returns(stub_spec) + spec = @sut.spec_with_url('https://github.com/orta/ARAnalytics.git') + spec.should == stub_spec + end + + it 'returns a spec for an https git repo with podspec_name option' do + require 'cocoapods-downloader/git' + Pod::Downloader::Git.any_instance.expects(:download) + spec_file = Pod::Command::Try::TRY_TMP_DIR + 'ARAnalytics/Analytics.podspec' + Pathname.stubs(:glob).once.returns([spec_file]) + stub_spec = stub + Pod::Specification.stubs(:from_file).with(spec_file).returns(stub_spec) + spec = @sut.spec_with_url('https://github.com/orta/ARAnalytics.git', 'Analytics') + spec.should == stub_spec + end + end + + it 'installs the pod' do + Installer::PodSourceInstaller.any_instance.expects(:install!) + spec = stub(:name => 'ARAnalytics') + sandbox_root = Pathname.new(Pod::Command::Try::TRY_TMP_DIR) + sandbox = Sandbox.new(sandbox_root) + path = @sut.install_pod(spec, sandbox) + path.should == sandbox.root + 'ARAnalytics' + end + + it 'installs the pod on older versions of CocoaPods' do + @sut.stubs(:cocoapods_version).returns(Pod::Version.new('1.7.0')) + spec = stub(:name => 'ARAnalytics') + sandbox_root = Pathname.new(Pod::Command::Try::TRY_TMP_DIR) + sandbox = Sandbox.new(sandbox_root) + installer = stub('Installer') + installer.stubs(:install!) + Pod::Installer::PodSourceInstaller.expects(:new).with(any_parameters) do |*args| + args.size == 3 + end.returns(installer).once + @sut.install_pod(spec, sandbox) + + @sut.stubs(:cocoapods_version).returns(Pod::Version.new('1.8.0')) + Pod::Installer::PodSourceInstaller.expects(:new).with(any_parameters) do |*args| + args.size == 4 + end.returns(installer) + @sut.install_pod(spec, sandbox) + end + + describe '#pick_demo_project' do + it 'raises if no demo project could be found' do + @sut.stubs(:projects_in_dir).returns([]) + should.raise Informative do + @sut.pick_demo_project('.') + end.message.should.match(/Unable to find any project/) + end + + it 'picks a demo project' do + projects = ['Demo.xcodeproj'] + Dir.stubs(:glob).returns(projects) + path = @sut.pick_demo_project('.') + path.should == 'Demo.xcodeproj' + end + + it 'is not case sensitive' do + projects = ['demo.xcodeproj'] + Dir.stubs(:glob).returns(projects) + path = @sut.pick_demo_project('.') + path.should == 'demo.xcodeproj' + end + + it 'considers also projects named example' do + projects = ['Example.xcodeproj'] + Dir.stubs(:glob).returns(projects) + path = @sut.pick_demo_project('.') + path.should == 'Example.xcodeproj' + end + + it 'returns the project if only one is found' do + projects = [Pathname.new('Lib.xcodeproj')] + @sut.stubs(:projects_in_dir).returns(projects) + path = @sut.pick_demo_project('.') + path.to_s.should == 'Lib.xcodeproj' + end + + it 'asks the user which project would like to open if not a single suitable one is found' do + projects = ['Lib_1.xcodeproj', 'Lib_2.xcodeproj'] + @sut.stubs(:projects_in_dir).returns(projects) + UI.stubs(:choose_from_array).returns(0) + path = @sut.pick_demo_project('.') + path.to_s.should == 'Lib_1.xcodeproj' + + UI.stubs(:choose_from_array).returns(1) + path = @sut.pick_demo_project('.') + path.to_s.should == 'Lib_2.xcodeproj' + end + + it 'should prefer demo or example workspaces' do + @sut.stubs(:projects_in_dir).returns(['Project Demo.xcodeproj', 'Project Demo.xcworkspace']) + path = @sut.pick_demo_project('.') + path.should == 'Project Demo.xcworkspace' + end + + it 'should not show workspaces inside a project' do + Dir.stubs(:glob).returns(['Project Demo.xcodeproj', 'Project Demo.xcodeproj/project.xcworkspace']) + path = @sut.pick_demo_project('.') + path.should == 'Project Demo.xcodeproj' + end + + it 'should prefer workspaces over projects with the same name' do + @sut.stubs(:projects_in_dir).returns(['Project Demo.xcodeproj', 'Project Demo.xcworkspace']) + path = @sut.pick_demo_project('.') + path.should == 'Project Demo.xcworkspace' + end + end + + describe '#install_podfile' do + it 'returns the original project if no Podfile could be found' do + Pathname.any_instance.stubs(:exist?).returns(false) + proj = XCODE_PROJECT + path = @sut.install_podfile(proj) + path.should == proj + end + + it 'performs an installation and returns the path of the workspace' do + Pathname.any_instance.stubs(:exist?).returns(true) + proj = XCODE_PROJECT + @sut.expects(:perform_cocoapods_installation) + Podfile.stubs(:from_file).returns(stub('Workspace', :workspace_path => XCODE_WORKSPACE)) + path = @sut.install_podfile(proj) + path.to_s.should == XCODE_WORKSPACE.to_s + end + + it 'returns the default workspace if one is not set' do + Pathname.any_instance.stubs(:exist?).returns(true) + proj = XCODE_PROJECT + Podfile.stubs(:from_file).returns(stub('Workspace', :workspace_path => nil)) + @sut.expects(:perform_cocoapods_installation).once + path = @sut.install_podfile(proj) + path.to_s.should == XCODE_WORKSPACE.to_s + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/spec/spec_helper.rb new file mode 100644 index 0000000..313584d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/cocoapods-try-1.2.0/spec/spec_helper.rb @@ -0,0 +1,49 @@ + +# Set up +#-----------------------------------------------------------------------------# + +require 'bundler/setup' +require 'pathname' +require 'bacon' +require 'mocha-on-bacon' +require 'pretty_bacon' +require 'cocoapods' + +ROOT = Pathname.new(File.expand_path('../../', __FILE__)) +$LOAD_PATH.unshift((ROOT + 'lib').to_s) +$LOAD_PATH.unshift((ROOT + 'spec').to_s) +require 'cocoapods_plugin' + +#-----------------------------------------------------------------------------# + +module Pod + # Disable the wrapping so the output is deterministic in the tests. + # + UI.disable_wrap = true + + # Redirects the messages to an internal store. + # + module UI + @output = '' + @warnings = '' + + class << self + attr_accessor :output + attr_accessor :warnings + + def puts(message = '') + @output << "#{message}\n" + end + + def warn(message = '', _actions = []) + @warnings << "#{message}\n" + end + + def print(message) + @output << message + end + end + end +end + +#-----------------------------------------------------------------------------# diff --git a/vendor/bundle/ruby/2.7.0/gems/colored-1.2/LICENSE b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/LICENSE new file mode 100644 index 0000000..04e0c0f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2010 Chris Wanstrath + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +Software), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/colored-1.2/README b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/README new file mode 100644 index 0000000..822b766 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/README @@ -0,0 +1,23 @@ +cute. + + >> puts "this is red".red + + >> puts "this is red with a blue background (read: ugly)".red_on_blue + + >> puts "this is red with an underline".red.underline + + >> puts "this is really bold and really blue".bold.blue + + >> logger.debug "hey this is broken!".red_on_yellow # in rails + + >> puts Color.red "This is red" # but this part is mostly untested + +Windows users: + You will need the Win32 Console Ansi gem. Get it: + + $ sudo gem install win32console-1.0.0 --source require.errtheblog.com + + (We're trying to make it official. Hang in there.) + +>> chris[at]ozmm[dot]org +=> http://github.com/defunkt diff --git a/vendor/bundle/ruby/2.7.0/gems/colored-1.2/Rakefile b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/Rakefile new file mode 100644 index 0000000..29baff8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/Rakefile @@ -0,0 +1,16 @@ +require 'rake/testtask' + +task :default => :test + +Rake::TestTask.new do |t| + t.libs << 'lib' + t.pattern = 'test/**/*_test.rb' + t.verbose = false +end + +begin + require 'mg' + MG.new("colored.gemspec") +rescue LoadError + abort "Please `gem install mg`" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored-1.2/lib/colored.rb b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/lib/colored.rb new file mode 100644 index 0000000..c107d73 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/lib/colored.rb @@ -0,0 +1,91 @@ +require 'Win32/Console/ANSI' if RUBY_PLATFORM =~ /win32/ + +## +# cute. +# +# >> "this is red".red +# +# >> "this is red with a blue background (read: ugly)".red_on_blue +# +# >> "this is red with an underline".red.underline +# +# >> "this is really bold and really blue".bold.blue +# +# >> Colored.red "This is red" # but this part is mostly untested +module Colored + extend self + + COLORS = { + 'black' => 30, + 'red' => 31, + 'green' => 32, + 'yellow' => 33, + 'blue' => 34, + 'magenta' => 35, + 'cyan' => 36, + 'white' => 37 + } + + EXTRAS = { + 'clear' => 0, + 'bold' => 1, + 'underline' => 4, + 'reversed' => 7 + } + + COLORS.each do |color, value| + define_method(color) do + colorize(self, :foreground => color) + end + + define_method("on_#{color}") do + colorize(self, :background => color) + end + + COLORS.each do |highlight, value| + next if color == highlight + define_method("#{color}_on_#{highlight}") do + colorize(self, :foreground => color, :background => highlight) + end + end + end + + EXTRAS.each do |extra, value| + next if extra == 'clear' + define_method(extra) do + colorize(self, :extra => extra) + end + end + + define_method(:to_eol) do + tmp = sub(/^(\e\[[\[\e0-9;m]+m)/, "\\1\e[2K") + if tmp == self + return "\e[2K" << self + end + tmp + end + + def colorize(string, options = {}) + colored = [color(options[:foreground]), color("on_#{options[:background]}"), extra(options[:extra])].compact * '' + colored << string + colored << extra(:clear) + end + + def colors + @@colors ||= COLORS.keys.sort + end + + def extra(extra_name) + extra_name = extra_name.to_s + "\e[#{EXTRAS[extra_name]}m" if EXTRAS[extra_name] + end + + def color(color_name) + background = color_name.to_s =~ /on_/ + color_name = color_name.to_s.sub('on_', '') + return unless color_name && COLORS[color_name] + "\e[#{COLORS[color_name] + (background ? 10 : 0)}m" + end +end unless Object.const_defined? :Colored + +String.send(:include, Colored) diff --git a/vendor/bundle/ruby/2.7.0/gems/colored-1.2/test/colored_test.rb b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/test/colored_test.rb new file mode 100644 index 0000000..3b77990 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored-1.2/test/colored_test.rb @@ -0,0 +1,44 @@ +require 'test/unit' +require File.dirname(__FILE__) + '/../lib/colored' + +class TestColor < Test::Unit::TestCase + def test_one_color + assert_equal "\e[31mred\e[0m", "red".red + end + + def test_two_colors + assert_equal "\e[34m\e[31mblue\e[0m\e[0m", "blue".red.blue + end + + def test_background_color + assert_equal "\e[43mon yellow\e[0m", "on yellow".on_yellow + end + + def test_hot_color_on_color_action + assert_equal "\e[31m\e[44mred on blue\e[0m", "red on blue".red_on_blue + end + + def test_modifier + assert_equal "\e[1mway bold\e[0m", "way bold".bold + end + + def test_modifiers_stack + assert_equal "\e[4m\e[1munderlined bold\e[0m\e[0m", "underlined bold".bold.underline + end + + def test_modifiers_stack_with_colors + assert_equal "\e[36m\e[4m\e[1mcyan underlined bold\e[0m\e[0m\e[0m", "cyan underlined bold".bold.underline.cyan + end + + def test_eol + assert_equal "\e[2Knothing to see here really.", "nothing to see here really.".to_eol + end + + def test_eol_with_with_two_colors + assert_equal "\e[34m\e[31m\e[2Kblue\e[0m\e[0m", "blue".red.blue.to_eol + end + + def test_eol_with_modifiers_stack_with_colors + assert_equal "\e[36m\e[4m\e[1m\e[2Kcyan underlined bold\e[0m\e[0m\e[0m", "cyan underlined bold".bold.underline.cyan.to_eol + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/LICENSE b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/LICENSE new file mode 100644 index 0000000..e543acb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2010 Chris Wanstrath +Copyright (c) 2016 Konstantin Gredeskoul + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/README.md b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/README.md new file mode 100644 index 0000000..f2da87b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/README.md @@ -0,0 +1,93 @@ +[![Gem Version](https://badge.fury.io/rb/colored2.svg)](https://badge.fury.io/rb/colored2) +[![Downloads](http://ruby-gem-downloads-badge.herokuapp.com/colored2?type=total)](https://rubygems.org/gems/colored2) +[![Gitter](https://img.shields.io/gitter/room/gitterHQ/gitter.svg)](https://gitter.im/colored2) + +[![Build Status](https://travis-ci.org/kigster/colored2.svg?branch=master)](https://travis-ci.org/kigster/colored2) +[![Test Coverage](https://codeclimate.com/github/kigster/colored2/badges/coverage.svg)](https://codeclimate.com/github/kigster/colored2/coverage) +[![Code Climate](https://codeclimate.com/github/kigster/colored2/badges/gpa.svg)](https://codeclimate.com/github/kigster/colored2) +[![Issue Count](https://codeclimate.com/github/kigster/colored2/badges/issue_count.svg)](https://codeclimate.com/github/kigster/colored2) + +## Colored2 + +This is an actively maintained fork of Chris (defunkt) Wanstrath's gem [colored](https://github.com/defunkt/colored), which appears to be no longer supported. + +This fork comes with a slightly spruced up syntax, some additional features, and a test suite written in [RSpec](http://rspec.info/). + +## Usage + +In addition to the simple syntax of the original gem, which affected only the string to the left of the method call, the new "bang" syntax affects a string to the right. If the block or a method argument is provided, the contents is wrapped in the color, and the color is then reset back. + +If no block or argument is provided, the color is left open-ended, and must be explicitly reset – when using the 'bang' notation. + +![](doc/colored2-session1.png) + +### Complete set of colors: + + * black + * red + * green + * yellow + * blue + * magenta + * cyan + * white + +### Complete Set of Effects + +> Note: previous versions used method name `clear` instead of `no_color`, which clashed with many 3rd party frameworks that defined similarly named method in the global namespace. +> This highlights the dangers of introducing so many words into the `String` namespace. + + * no_color + * bold + * dark + * italic + * underlined + * reversed + * plain + * normal + +## Usage in Other Classes + +With this gem you can add color to not just strings, but to any other class. `String` class is automatically decorated as soon as `require 'colored2'` is parsed by the ruby interpreter. Note that future versions may refrain from auto-requiring `colored2/strings`, and rely on explicitly requiring components they need colorized, eg `require 'colored2/numbers'`. + +To color numbers, require the following file, which automatically decorates `Integer` and `Float`. You can also add color methods to the `Object`. Finally, you can add the methods to any custom class by including the `Colored2` Module. + +Below is an `IRB` — session that shows a slightly more advanced usage. + +![](doc/colored2-session2.png) + +## Additional Helpers + +There are several additional helpers tucked onto the `String` class. + + * `#to_bol` (to beginning of the line) will rewind the cursor back to the beginning of the current line. + * `#to_eol` (to end of line) + +## Installation + +Add this line to your application's Gemfile: + + + gem 'colored2' + + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install colored2 + + +## Development + +To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org). + +## Contributing + +Bug reports and pull requests are welcome on GitHub at [https://github.com/kigster/colored2](https://github.com/kigster/colored2). + +## License + +The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT). diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/Rakefile b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/Rakefile new file mode 100644 index 0000000..47ca61b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/Rakefile @@ -0,0 +1,13 @@ +require 'bundler' +require 'bundler/gem_tasks' +require 'rake/clean' + +CLEAN.include %w(pkg coverage *.gem) + +begin + require 'rspec/core/rake_task' + RSpec::Core::RakeTask.new(:spec) +rescue LoadError +end + +task :default => [:spec] diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2.rb new file mode 100644 index 0000000..c4305dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2.rb @@ -0,0 +1,85 @@ +require 'colored2/codes' +require 'colored2/ascii_decorator' + +module Colored2 + def self.decorate(a_class) + a_class.send(:include, Colored2) + end + + def self.included(from_class) + from_class.class_eval do + + def surround_with_color(color: nil, effect: nil, color_self: nil, string: nil, &block) + color_type = if Colored2.background_next? && effect.nil? + Colored2.foreground_next! + :background + else + :foreground + end + + opts = {} + opts.merge!(color_type => color) if color + opts.merge!(effect: effect) if effect + + if color_self then + opts.merge!( beginning: :on, end: :off) + colored = Colored2::AsciiDecorator.new(self).decorate(opts) + if string || block + arg = "#{string}#{block.call if block}" + colored << Colored2::AsciiDecorator.new(arg).decorate(opts) if arg.length > 0 + end + else + opts.merge!( end: :on ) + colored = Colored2::AsciiDecorator.new(self).decorate(opts) + if string || block + arg = "#{string}#{block.call if block}" + colored << Colored2::AsciiDecorator.new(arg).decorate(opts.merge(end: :off)) if arg.length > 0 + end + end + colored + end + + def on + Colored2.background_next! + self + end + end + + from_class.instance_eval do + COLORS.keys.each do |color| + define_method(color) do |string = nil, &block| + surround_with_color(color: color, color_self: true, string: string, &block) + end + + define_method("#{color}!".to_sym) do |string = nil, &block| + surround_with_color(color: color, color_self: false, string: string, &block) + end + end + + EFFECTS.keys.each do |effect| + next if effect == 'no_color' + define_method(effect) do |string = nil, &block| + surround_with_color(effect: effect, color_self: true, string: string, &block) + end + + define_method("#{effect}!".to_sym) do |string = nil, &block| + surround_with_color(effect: effect, color_self: false, string: string, &block) + end + end + + define_method(:to_eol) do + tmp = sub(/^(\e\[[\[\e0-9;m]+m)/, "\\1\e[2K") + if tmp == self + return "\e[2K" << self + end + tmp + end + + define_method(:to_bol) do + "#{self}\033[#{length}D\033[0D" + end + end + end +end + +require 'colored2/strings' diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/ascii_decorator.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/ascii_decorator.rb new file mode 100644 index 0000000..c491e61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/ascii_decorator.rb @@ -0,0 +1,86 @@ +require 'colored2/codes' +require 'forwardable' + +module Colored2 + def self.enable! + Colored2::AsciiDecorator.enable! + end + def self.disable! + Colored2::AsciiDecorator.disable! + end + def self.background_next! + Colored2::AsciiDecorator.background_next! + end + def self.foreground_next! + Colored2::AsciiDecorator.foreground_next! + end + def self.background_next? + Colored2::AsciiDecorator.background_next? + end + + class AsciiDecorator + @__background_next = false + @__colors_disabled = false + class << self + attr_accessor :__background_next, :__colors_disabled + def enable! + self.__colors_disabled = false + end + def enabled? + !self.__colors_disabled + end + def disable! + self.__colors_disabled = true + end + def background_next! + self.__background_next = true + end + def foreground_next! + self.__background_next = false + end + def background_next? + self.__background_next + end + end + + extend Forwardable + def_delegators :@my_class, :enable!, :disable! + + attr_accessor :string, :my_class + + def initialize(a_string) + self.string = a_string.instance_of?(Object) ? '' : a_string.to_s + self.my_class = self.class + end + + # options[:start] = :color + # options[:end] = :color | :no_color + def decorate(options = {}) + return string if !self.class.enabled? || string.length == 0 + escape_sequence = [ + Colored2::TextColor.new(options[:foreground]), + Colored2::BackgroundColor.new(options[:background]), + Colored2::Effect.new(options[:effect]) + ].compact.join + + colored = '' + colored << escape_sequence if options[:beginning] == :on + colored << string + if options[:end] + colored << no_color if options[:end] == :off && !colored.end_with?(no_color) + colored << escape_sequence if options[:end] == :on + end + colored + end + + def un_decorate + string.gsub(%r{\e\[\d+(;\d+)*m}, '') + end + + private + + def no_color + @no_color ||= Colored2::Effect.new(:no_color).to_s + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/codes.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/codes.rb new file mode 100644 index 0000000..8f46f43 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/codes.rb @@ -0,0 +1,63 @@ +module Colored2 + + COLORS = { + black: 30, + red: 31, + green: 32, + yellow: 33, + blue: 34, + magenta: 35, + cyan: 36, + white: 37 + } + + EFFECTS = { + no_color: 0, + bold: 1, + dark: 2, + italic: 3, + underlined: 4, + reversed: 7, + plain: 21, # non-bold + normal: 22 + } + + class Code + attr_accessor :name, :escape + def initialize(name) + @name = name + return if name.nil? + @escape = codes[name.to_sym] + raise ArgumentError.new("No color or effect named #{name} exists for #{self.class}.") if @escape.nil? + end + + def value(shift = nil) + escape_code = escape + escape_code += shift if shift && escape_code + name && escape ? "\e[#{escape_code}m" : '' + end + + def to_s + value + end + end + + class Effect < Code + def codes + EFFECTS + end + end + + class TextColor < Code + def codes + COLORS + end + end + + class BackgroundColor < TextColor + def value + super 10 + end + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/numbers.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/numbers.rb new file mode 100644 index 0000000..64161b0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/numbers.rb @@ -0,0 +1,11 @@ +require 'colored2' unless defined?(Colored2) && Colored2.respond_to?(:decorate) + +module Colored2 + def self.integer_class + major, minor = RUBY_VERSION.split(/\./).map(&:to_i) + major >= 2 && minor >= 4 ? Integer : Kernel.const_get(:Fixnum) + end +end + +Colored2.decorate(Colored2.integer_class) +Colored2.decorate(Float) diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/object.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/object.rb new file mode 100644 index 0000000..439b0f9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/object.rb @@ -0,0 +1,2 @@ +require 'colored2' unless defined?(Colored2) && Colored2.respond_to?(:decorate) +Colored2.decorate(Object) diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/strings.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/strings.rb new file mode 100644 index 0000000..8e099e8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/strings.rb @@ -0,0 +1,2 @@ +require 'colored2' unless defined?(Colored2) && Colored2.respond_to?(:decorate) +Colored2.decorate(String) diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/version.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/version.rb new file mode 100644 index 0000000..ab0f8a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/lib/colored2/version.rb @@ -0,0 +1,3 @@ +module Colored2 + VERSION = '3.1.2' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/numbers_spec.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/numbers_spec.rb new file mode 100644 index 0000000..f686bfa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/numbers_spec.rb @@ -0,0 +1,26 @@ +require File.expand_path('spec/spec_helper') +require 'colored2/numbers' +require 'colored2/strings' + +RSpec.describe Colored2.integer_class do + describe 'with foreground and background colors' do + it 'should work with one color' do + expect(32.red).to eql('32'.red) + end + it 'should insert escape sequences' do + expect(32.red).to eql("\e[31m32\e[0m") + end + end +end + +RSpec.describe Float do + describe 'with foreground and background colors' do + it 'should add two colors chained' do + expect((32.5).blue.on.red).to eql('32.5'.blue.on.red) + end + + it 'should insert escape sequences' do + expect((32.5).blue.on.red).to eql("\e[41m\e[34m32.5\e[0m") + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/object_spec.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/object_spec.rb new file mode 100644 index 0000000..6256254 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/object_spec.rb @@ -0,0 +1,24 @@ +require File.expand_path('spec/spec_helper') +require 'colored2/object' + +subject1 = red('hello') +subject2 = red('blue').on.blue +subject3 = on.yellow('on yellow') + +RSpec.describe Object do + + describe 'with foreground and background colors' do + it 'should work with one color' do + expect(subject1).to eql('hello'.red) + end + + it 'should work with color on color' do + expect(subject2).to eql('blue'.red.on.blue) + end + + it 'should add background color using on_' do + expect(subject3).to eql('on yellow'.on.yellow) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/strings_spec.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/strings_spec.rb new file mode 100644 index 0000000..10da2a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2/strings_spec.rb @@ -0,0 +1,77 @@ +require File.expand_path('spec/spec_helper') +require 'colored2/strings' + +RSpec.describe String do + before do + Colored2.decorate(String) + end + + describe 'with foreground and background colors' do + it 'should work with one color' do + expect('red'.red).to eql("\e[31mred\e[0m") + end + + it 'should add two colors chained' do + expect('blue'.red.blue).to eql("\e[34m\e[31mblue\e[0m") + end + + it 'should add background color using on_' do + expect('on yellow'.on.yellow).to eql("\e[43mon yellow\e[0m") + end + + it 'should work with _on_ syntax' do + expect('red on blue'.red.on.blue).to eql("\e[44m\e[31mred on blue\e[0m") + end + end + + describe 'with effects' do + it 'should add a bold modifier' do + expect('way bold'.bold).to eql("\e[1mway bold\e[0m") + end + + it 'should let modifiers stack' do + expect('underlinedd bold'.bold.underlined).to eql("\e[4m\e[1munderlinedd bold\e[0m") + end + + it 'should let modifiers stack with colors' do + expect('cyan underlinedd bold'.bold.underlined.cyan).to eql("\e[36m\e[4m\e[1mcyan underlinedd bold\e[0m") + end + end + + describe 'new block syntax' do + it 'should defined block syntax nested colors' do + expect('No Color, then'.blue!('blue inside')).to eql('No Color, then' + 'blue inside'.blue) + end + + it 'should defined block syntax nested colors two levels deep' do + expect('regular here'.blue! + 'blue here'.no_color!).to eql('regular here' << 'blue here'.blue) + end + + it 'should defined block syntax nested colors two levels deep' do + expect('regular here'.blue! { 'something else'.red!('red riding hood') }).to eql('regular here'.blue! << 'something else'.red! << 'red riding hood'.no_color!) + end + + it 'should defined block syntax nested colors two levels deep' do + expectation = 'this is regular, but '.red! do + 'this is red '.yellow! do + ' and yellow'.no_color! + end + end + expect(expectation).to eql('this is regular, but '.red! << 'this is red '.yellow! << ' and yellow'.no_color!) + end + end + + describe 'end of line' do + it 'should work with eol' do + expect('nothing to see here really.'.to_eol).to eql("\e[2Knothing to see here really.") + end + + it 'should work with eol_with_with_two_colors' do + expect('blue'.red.blue.to_eol).to eql("\e[34m\e[31m\e[2Kblue\e[0m") + end + + it 'should work with eol_with_modifiers_stack_with_colors' do + expect('cyan underlinedd bold'.bold.underlined.cyan.to_eol).to eql("\e[36m\e[4m\e[1m\e[2Kcyan underlinedd bold\e[0m") + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2_spec.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2_spec.rb new file mode 100644 index 0000000..3b55f07 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/colored2_spec.rb @@ -0,0 +1,23 @@ +require 'spec_helper' +require 'colored2/strings' + +RSpec.describe Colored2 do + describe 'global enable and disable' do + before do + Colored2.disable! + end + after do + Colored2.enable! + end + let(:sample) { 'sample string' } + + describe 'colors' do + subject { sample.red.on.blue } + it { should eql(sample) } + end + describe 'effects' do + subject { sample.bold.on.red } + it { should eql(sample) } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/spec_helper.rb new file mode 100644 index 0000000..3f2faa0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/colored2-3.1.2/spec/spec_helper.rb @@ -0,0 +1,5 @@ +require 'simplecov' +SimpleCov.start + +require 'rspec/core' + diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.gitignore b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.gitignore new file mode 100644 index 0000000..1f8e013 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.gitignore @@ -0,0 +1,7 @@ +*.gem +.bundle +Gemfile.lock +pkg/* +.rvmrc +.ruby-version +coverage/* diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rspec b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rspec new file mode 100644 index 0000000..83e16f8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rspec @@ -0,0 +1,2 @@ +--color +--require spec_helper diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rubocop.yml b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rubocop.yml new file mode 100644 index 0000000..18a1efc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rubocop.yml @@ -0,0 +1,64 @@ +inherit_from: .rubocop_todo.yml + +AllCops: + TargetRubyVersion: 2.4 + NewCops: enable + SuggestExtensions: false + +# not Ruby 1.9.3 compatible +Style/PercentLiteralDelimiters: + Enabled: false + +# Enforce trailing comma after last item of multiline arrays. +Style/TrailingCommaInArrayLiteral: + EnforcedStyleForMultiline: comma + +# Enforce trailing comma after last item of multiline hashes. +Style/TrailingCommaInHashLiteral: + EnforcedStyleForMultiline: comma + +Metrics/BlockLength: + Enabled: false + +Style/SignalException: + EnforcedStyle: only_fail + +Style/ParallelAssignment: + Enabled: false + +Style/NumericLiteralPrefix: + EnforcedOctalStyle: zero_only + +Style/MissingRespondToMissing: + Enabled: false + +Layout/SpaceInsideStringInterpolation: + Enabled: false + +Layout/MultilineOperationIndentation: + Enabled: false + +Style/EmptyMethod: + EnforcedStyle: expanded + +Metrics/ModuleLength: + CountComments: false # count full line comments? + Max: 150 + +Lint/EmptyBlock: + Enabled: false + +Style/FormatStringToken: + Enabled: false + +Style/MixinUsage: + Enabled: false + +Lint/ErbNewArguments: + Enabled: false + +Style/DocumentDynamicEvalDefinition: + Enabled: false + +Naming/HeredocDelimiterNaming: + Enabled: false diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rubocop_todo.yml b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rubocop_todo.yml new file mode 100644 index 0000000..936c5f2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.rubocop_todo.yml @@ -0,0 +1,77 @@ +# This configuration was generated by `rubocop --auto-gen-config` +# on 2015-02-16 16:08:54 -0800 using RuboCop version 0.29.0. +# The point is for the user to remove these configuration records +# one by one as the offenses are removed from the code base. +# Note that changes in the inspected code, or installation of new +# versions of RuboCop, may require this file to be generated again. + +# Offense count: 2 +Security/Eval: + Enabled: false + +# Offense count: 2 +Lint/SuppressedException: + Enabled: false + +# Offense count: 5 +Metrics/AbcSize: + Max: 30 + +# Offense count: 1 +# Configuration parameters: CountComments. +Metrics/ClassLength: + Enabled: false + +# Offense count: 4 +Metrics/CyclomaticComplexity: + Max: 13 + +# Offense count: 89 +# Configuration parameters: AllowURI, URISchemes. +Layout/LineLength: + Max: 242 + +# Offense count: 7 +# Configuration parameters: CountComments. +Metrics/MethodLength: + Max: 36 + +# Offense count: 4 +Metrics/PerceivedComplexity: + Max: 14 + +# Offense count: 1 +Naming/AccessorMethodName: + Enabled: false + +# Offense count: 18 +Style/Documentation: + Enabled: false + +# Offense count: 12 +# Configuration parameters: AllowedVariables. +Style/GlobalVars: + Enabled: false + +# Offense count: 1 +# Configuration parameters: MaxLineLength. +Style/IfUnlessModifier: + Enabled: false + +# Offense count: 1 +Style/MultilineBlockChain: + Enabled: false + +# Offense count: 1 +Style/MultilineTernaryOperator: + Enabled: false + +# Offense count: 5 +Style/RescueModifier: + Enabled: false + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: ExactNameMatch, AllowPredicates, AllowDSLWriters, Whitelist. +Style/TrivialAccessors: + Enabled: false diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.travis.yml new file mode 100644 index 0000000..0dbcc1a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/.travis.yml @@ -0,0 +1,13 @@ +language: ruby +cache: bundler +before_install: + - gem update --system + - gem update bundler +rvm: + - 2.4 + - 2.5 + - 2.6 + - 2.7 + - 3.0 + - jruby + - ruby-head diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/DEVELOPMENT b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/DEVELOPMENT new file mode 100644 index 0000000..ef7af1b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/DEVELOPMENT @@ -0,0 +1,15 @@ +To run the development rake tasks, you need bundler installed. + +Before you push any changes, run the RSpec suite: + + $ rake spec + +To build a new version of the gem: + + $ rake build + +To push the new version to Rubygems: + + $ rake release + +(http://rubygems.org/gems/commander) \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Gemfile b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Gemfile new file mode 100644 index 0000000..7f4f5e9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Gemfile @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +gemspec diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/History.rdoc b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/History.rdoc new file mode 100644 index 0000000..4e659d1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/History.rdoc @@ -0,0 +1,465 @@ +=== 4.6.0 / 2021-04-09 + +* Fix error with SortedSet on Ruby 3.0 (#98). +* Remove `#reset_io` as it didn't do anything. +* Drop support for Ruby < 2.4. + +=== 4.5.2 / 2020-03-12 + +* Fix bug handling global options provided in option=value form (#47). (@orien) +* Fix ERB warnings under Ruby 2.7. (@esotericpig) +* Fix bug handling global options placed before command name (#32). (@orien) + +=== 4.5.1 / 2020-03-08 + +* Fix bug causing global options to be ignored when arguments are present (#86). (@orien) + +=== 4.5.0 / 2020-01-21 + +* Drop support for Ruby < 2.3. +* Fix bug parsing double dash (#75). + +=== 4.4.7 / 2018-10-22 + +* Update HighLine dependency to 2.0.0. (@rohitpaulk) + +=== 4.4.6 / 2018-07-31 + +* Fix unexpected internal behavior change introduced in 4.4.5. + +=== 4.4.5 / 2018-05-22 + +* Make internal command state less mutable. (@doriantaylor) + +=== 4.4.4 / 2018-01-18 + +* Fix deprecated constant warnings on Ruby 2.5. (@mattbrictson) + +=== 4.4.3 / 2016-12-28 + +* Fix deprecated constant warnings on Ruby 2.4. + +=== 4.4.2 / 2016-12-20 + +* Add `help_paging` program flag so that help paging may be disabled. (@gogiel) + +=== 4.4.1 / 2016-12-02 + +* Fix #36 - Warning about MissingSourceFile (@fallwith) +* Fix build on Ruby 1.9 + +=== 4.4.0 / 2016-02-19 + +* Add modular style template initialization. (@lebogan) +* Allow option names that start with a global option name. +* Fix handling of negatable global flags. (@mfurtak) + +=== 4.3.8 / 2016-02-09 + +* Fix regression for deprecation warnings. + +=== 4.3.7 / 2016-01-26 + +* Fix regression in help formatter introduced in 4.3.6. + +=== 4.3.6 / 2016-01-26 [YANKED] + +* Fix deprecation warnings on Ruby 2.3.0. +* Indent option descriptions the same way as program and command descriptions. (@badeball) + +=== 4.3.5 / 2015-08-09 + +* Fixed a bug with small terminal widths, changed minimum screen width for wrapping to 40 columns. (@toolmantim) + +=== 4.3.4 / 2015-05-03 + +* Fixed a regression with the compact help formatter. + +=== 4.3.3 / 2015-04-21 + +* Updated to highline 1.7.2 to fix a regression with terminal size (https://github.com/JEG2/highline/pull/139). +* Help formatting updated to look better for commands with long names. (@jszwedko) + +=== 4.3.2 / 2015-03-31 + +* Version bump to publish new location of Commander to Rubygems. Moved to https://github.com/commander-rb/commander + +=== 4.3.1 / 2015-03-27 + +* Fix various Ruby warnings for circular requires, etc. +* Update to use highline 1.7 (@SkyTrix) + +=== 4.3.0 / 2015-02-07 + +* Drops support for Ruby 1.8. Please use 4.2.1 if you still need Ruby 1.8 compatibility. +* Fixed a bug with `always_trace` (#91) (@KrauseFx) +* Renamed `commands` to `defined_commands` for better compatibility with Pry + +=== 4.2.1 / 2014-09-28 + +* Improve `choose` compatibility with HighLine's version (#79) + +=== 4.2.0 / 2014-04-29 + +* Add ability to use commander without importing into the global namespace (#61) (@krissi) + +=== 4.1.6 / 2014-02-11 + +* Respect environment setting for $LESS (@ellemenno) +* Add ability to hide trace flags and globally enable tracing (#16, #17) (@jamesrwhite) + +=== 4.1.5 / 2013-08-11 + +* Prevent deprecation warning when loaded in a Rails 4 environment (#58) + +=== 4.1.4 / 2013-07-21 + +* Improve help formatting for multiline program and command descriptions +* Add ability to set speaking rate (@kethomassen) +* Clean up examples in docs (@parkr) + +=== 4.1.3 / 2012-12-15 + +* Remove use of eval in Array.parse +* Make generated template executable and set better defaults (@suan) +* Fixed valid_command_names_from to match exact commands only (@fgalassi) + +=== 4.1.2 / 2012-02-17 + +* Improvement to `ask_editor` to be more portable across *nix variants. (thanks to Federico Galassi) + +=== 4.1.1 / 2012-02-16 + +* Update `ask_editor` to work with any *nix editor - emacs, vim, etc. (thanks to Federico Galassi) + +=== 4.1.0 / 2012-02-12 + +* Update highline dependency. +* Make optional arguments true when present (issue #2). + +=== 4.0.7 / 2012-01-23 + +* Improved support for JRuby and Windows (and any other platforms that don't support Kernel.fork). +* Fixed bug #33 - support `--help` after commands. +* Reorganized help output to display synopsis before description (issue #12). + +=== 4.0.6 / 2011-09-15 + +* Improved load time on Ruby 1.9. (thanks to Jonathon M. Abbott) +* Updated documentation. + +=== 4.0.5 / 2011-08-09 + +* Updated documentation to fix inaccuracies and unclear information. +* Improved rake tasks for gem development. +* Added say_ok, say_warning and say_error methods to print messages in green, yellow or red. (thanks to Simon Courtois) +* Fixed; Allow global options to be passed in any order, even mixed with command options. (thanks to Rich Grundy) +* Fixed; Global options can be passed before or after the command, they can even be mixed with command options. Closes #8. (thanks to Rich Grundy) +* Fixed; Platform test should now correctly identify JRuby. (thanks to Justin Lynn) +* Fixed; Add to_s to exceptions as option parser no longer does implicit conversion. (thanks to Justin Lynn) + +=== 4.0.4 / 2011-04-04 + +* Fixed program(:key) behavior for non-Array keys like name, version, description under Ruby 1.9 +* All specs should pass under Ruby 1.9 now + +=== 4.0.3 / 2010-04-06 + +* Fixed global_option which was consuming arguments when not expected. Closes #22 + +=== 4.0.2 / 2010-01-19 + +* Added program(:int_block) to allow a block to be run on interrupt. +* Fixed; ProgressBar immediately shows, and doesn't die on empty lists. + +=== 4.0.1 / 2010-01-14 + +* Fixed commander when using JRuby +* Fixed template; should require "commander/import" + +=== 4.0.0 / 2009-10-10 + +* Changed; Aliased #when_called as #action [#9] +* Changed; Sort commands and aliases alphabetically. +* Changed; Sort aliases alphabetically +* Changed; Sort commands alphabetically. +* Changed; require user to require 'commander/import' for dsl +* Fixed broken regexp; Changed :int_message, only traps INT when available +* Fixed Ruby 1.9 warning caused by removing object_id from Object +* Removed #eval const hack +* Moving to Gemcutter (GRRRR Github) + +=== 3.3.0 / 2009-05-12 + +* Added #choose +* Added aliasing for help formatters [#24] +* Added UI.replace_tokens +* Added #converse +* Added #speak +* Added #applescript +* Added #reset_io +* Added #io +* Removed String#tokenize +* Removed #delete_switches + +=== 3.2.9 / 2009-04-09 + +* Fixed bug in removal of global options + +=== 3.2.8 / 2009-04-09 + +* Added auto-loading support of visionmedia-growl convenience methods when the gem is available +* Changed; growl related methods are now part of visionmedia-growl +* Fixed doc typos + +=== 3.2.7 / 2009-04-08 + +* Added #notify, #notify_ok, #notify_warning, #notify_info and #notify_error + These methods all utilize the MacOS growlnotify binary, very cool stuff check it out! + +=== 3.2.6 / 2009-04-08 + +* Added Commander::UI::AskForClass +* Added support to #remove_global_options for options with arguments +* Removed .re directory used for visionmedia-release +* Fixed bug preventing --trace from working + +=== 3.2.5 / 2009-04-02 + +* Added #ask_editor + +=== 3.2.4 / 2009-04-02 + +* Added #enable_paging [#9] + +=== 3.2.3 / 2009-04-01 + +* Added new pager using less [#9] + +=== 3.2.2 / 2009-04-01 + +* Github's gem builder timed out ? ... + +=== 3.2.1 / 2009-04-01 + +* Added Commander::HelpFormatter::TerminalCompact +* Fix; updated gemspec / manifest removing test/* causing issue [#26] + +=== 3.2.0 / 2009-03-26 + +* Added implied small switches so they appear in help (-h, -v, etc) +* Added #inspect back to Commander::Command::Options [#1] +* Added inheritance of global options for commands [#7] +* Added #require_valid_command +* Renamed #call_active_command to #run_active_command +* Changed; using same option format as command options for globals [#18] +* Changed; program :name is now optional, and is auto-defined when not specified [#21] +* Moved #switch_to_sym from Command to Commander::Runner +* Moved #separate_switches_from_description into Commander::Runner [#22] +* Removed program :name from commander init template since its not required anymore + +=== 3.1.8 / 2009-03-25 + +* Utilizing abort and $stderr instead of using #say [#16] +* Fixed INT trapping issue (purely cosmetic) [#14] +* Removed todo, use lighthouse now for commander at: + http://visionmedia.lighthouseapp.com/projects/27643-commander/overview + +=== 3.1.7 / 2009-03-24 + +* Added global --trace option + +=== 3.1.6 / 2009-03-22 + +* Changed Options struct to always use symbols + +=== 3.1.5 / 2009-03-22 + +* Added Options#__hash__ to access the internal hash table +* Refactored Options struct to function much like an OpenStruct +* Updated commander exec init help description + +=== 3.1.4 / 2009-03-20 + +* Gemspec typo + +=== 3.1.3 / 2009-03-20 + +* Added #remove_global_options +* Added core global option descriptions +* Added display of global options +* Removed small core switches (-v, -h) because they are implicitly set +* Fixed issue with option switchs failing when named as common methods like --send, or --password +* Fixed bug causing InvalidOption error when global options do not abort the process. + This was due to these options remaining in the arguments array, in turn being parsed + by the command's option parser, issuing the error. This is fixed by #remove_global_options. + +=== 3.1.2 / 2009-03-16 + +* Added support for global options via #global_option +* Added #call_active_command and #say to clean things up internally +* Misc refactoring + +=== 3.1.1 / 2009-03-13 + +* Fixed some terminal formatter spacing issues + +=== 3.1.0 / 2009-03-13 + +* Added Command#inspect +* Added displaying of aliases in global help +* Added support for alias args +* Added #default_command + +=== 3.0.3 / 2009-03-12 + +* Added #alias_command + +=== 3.0.2 / 2009-03-12 + +* Added terminal formatter specs +* Fixed bug preventing command names passed to help from working + +=== 3.0.1 / 2009-03-12 + +* Fixed bug in #command_name_from_args preventing the left-most match for a command name to be used +* Fixed bug in Command#example preventing the formatter from outputting them correctly + +=== 3.0.0 / 2009-03-12 + +* Added sub command help support when using the --help switch +* #delete_switches is now mutative +* Added #valid_command_names_from +* #when_called now raises argument error when neither a block, object, or class is passed +* Added options#default method and specs +* Added support for multi-word list parsing in Array#parse +* Added summary to commander template +* Added optional block param for #new_command_runner spec helper +* Added .re (metadata for visionmedia-release gem) +* Moved delgation of Kernel methods to Object +* Refactored multi-word command name specs +* Refactored Object#method_missing; using call chain to implement +* Refactored all specs +* Change #color calls in formatters to $terminal#color for now +* Using highlines $terminal.output for IO stream with the progressbar +* Replaced #ensure_program_key_set with #require_program, taking an array of keys +* Renamed #args_without_command to #args_without_command_name +* Renamed #populate_options_to_when_called to #option_proc +* Renamed #sym_from_switch to #switch_to_sym +* Removed #get_command, use #command without supplying a block +* Removed Command#inspect +* Removed command description aliases of short_description, long_description. Use description / summary +* Removed VerboseFileUtils +* Removed String#camelcase as its not really needed (was only used in commander's executable for creating templates) +* Removed commander/import for now +* Removed LOG_FORMAT +* Removed Kernel ext which only had #color + +=== 2.5.7 / 2009-03-11 + +* Fixed paren warnings in import spec +* Fixed Kernel#method_missing to preserve its original behavior (Raise errors for missing methods or variables) + +=== 2.5.6 / 2009-03-06 + +* Replaced Commander::VERSION::STRING with Commander::VERSION (for release gem) +* Refactored progress bar +* Fixed String#tokenize + +=== 2.4.5 / 2009-03-03 + +* Fixed bug which was not allowing switch arguments which are strings + such as --eval 'something here' + +=== 2.4.4 / 2009-01-15 + +* Fixed bug which was not allowing multi-word commands to view help + +=== 2.4.3 / 2009-01-15 + +* Fixed bug in command_name_from_args, preventing multi-word commands + +=== 2.4.2 / 2009-01-12 + +* Github! build me! + +=== 2.4.1 / 2009-01-11 + +* Added ask_for_CLASS, which may be any class name such as ask_for_date, ask_for_array, etc +* Changed doc to *.rdoc for pretty markup on github + +=== 2.3.0 / 2008-12-16 + +* Removed input, output in favour of HighLines $terminal +* Autoloading terminal formatter since it may not always be used +* Added wrapping of text with 10 padding, defaults to 80 +* Added friendly interruption message by default, overridable with trap or int_message +* Fixed description now showing as summary when no summary is available + +=== 2.2.1 / 2008-12-09 + +* Fixed typo in String#tokenize, preventing progress bar from working +* Fixed some small formatting issues with help output + +=== 2.2.0 / 2008-12-09 + +* Additional documentation +* Added support for arbitrary help 'blocks' using program :help, 'Your block', 'Your blocks contents' +* Added support for description, summary, short_description, and large_description, which are displaying conditionally within help docs +* Allowing uncaught exceptions to reach stderr, for debugging purposes + +=== 2.1.1 / 2008-12-01 + +* Changed #password, now requires that the user must enter a non-empty? value + +=== 2.1.0 / 2008-12-01 + +* Added progress bar +* Added auto-inclusion of FileUtils module +* Added stdout logging method +* Fixed issue with options placed before command + +=== 2.0.1 / 2008-11-24 + +* Fixed new line after command options +* Fixed padding for command list + +=== 2.0.0 / 2008-11-24 + +* Rewrite of entire gem +* Added auto-parsing and struct population of options +* Added better documentation + +=== 1.2.2 / 2008-11-06 + +* Forgot to add array.rb + +=== 1.2.0 / 2008-11-06 + +* Added paging ability (similar to 'less') +* Added coloring to default help generator + +=== 1.1.0 / 2008-11-06 + +* Added dependency for Highline gem, which replaces Commander's user interaction lib + +=== 1.0.4 / 2008-11-04 + +* Added support for --help and --version flags + +=== 1.0.3 / 2008-11-01 + +* Typo causing the gem to fail build on github + +=== 1.0.2 / 2008-11-01 + +* Added gemspec for github + +=== 1.0.1 / 2008-10-31 + +* Added shebang line to commander init +* Added require 'rubygems' + +=== 1.0.0 / 2008-10-31 + +* Initial release diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/LICENSE b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/LICENSE new file mode 100644 index 0000000..02caa4d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2008-2013 TJ Holowaychuk + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Manifest b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Manifest new file mode 100644 index 0000000..6702e7a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Manifest @@ -0,0 +1,38 @@ +DEVELOPMENT +History.rdoc +Manifest +README.rdoc +Rakefile +bin/commander +commander.gemspec +lib/commander.rb +lib/commander/blank.rb +lib/commander/command.rb +lib/commander/core_ext.rb +lib/commander/core_ext/array.rb +lib/commander/core_ext/object.rb +lib/commander/delegates.rb +lib/commander/help_formatters.rb +lib/commander/help_formatters/base.rb +lib/commander/help_formatters/terminal.rb +lib/commander/help_formatters/terminal/command_help.erb +lib/commander/help_formatters/terminal/help.erb +lib/commander/help_formatters/terminal_compact.rb +lib/commander/help_formatters/terminal_compact/command_help.erb +lib/commander/help_formatters/terminal_compact/help.erb +lib/commander/import.rb +lib/commander/platform.rb +lib/commander/runner.rb +lib/commander/user_interaction.rb +lib/commander/version.rb +spec/command_spec.rb +spec/core_ext/array_spec.rb +spec/core_ext/object_spec.rb +spec/help_formatters/terminal_spec.rb +spec/runner_spec.rb +spec/spec.opts +spec/spec_helper.rb +spec/ui_spec.rb +tasks/dev_setup.rake +tasks/docs.rake +tasks/gemspec.rake diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/README.md b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/README.md new file mode 100644 index 0000000..a48f166 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/README.md @@ -0,0 +1,475 @@ +[Build Status](https://travis-ci.org/commander-rb/commander) +[![Inline docs](https://inch-ci.org/github/commander-rb/commander.svg)](https://inch-ci.org/github/commander-rb/commander) + +# Commander + +The complete solution for Ruby command-line executables. +Commander bridges the gap between other terminal related libraries +you know and love (OptionParser, HighLine), while providing many new +features, and an elegant API. + +## Features + +* Easier than baking cookies +* Parses options using OptionParser +* Auto-populates struct with options ( no more `{ |v| options[:recursive] = v }` ) +* Auto-generates help documentation via pluggable help formatters +* Optional default command when none is present +* Global / Command level options +* Packaged with two help formatters (Terminal, TerminalCompact) +* Imports the highline gem for interacting with the terminal +* Adds additional user interaction functionality +* Highly customizable progress bar with intuitive, simple usage +* Multi-word command name support such as `drupal module install MOD`, rather than `drupal module_install MOD` +* Sexy paging for long bodies of text +* Support for MacOS text-to-speech +* Command aliasing (very powerful, as both switches and arguments can be used) +* Growl notification support for MacOS +* Use the `commander` executable to initialize a commander driven program + +## Installation + + $ gem install commander + +## Quick Start + +To generate a quick template for a commander app, run: + + $ commander init yourfile.rb + +To generate a quick modular style template for a commander app, run: + + $ commander init --modular yourfile.rb + +## Example + +For more option examples view the `Commander::Command#option` method. Also +an important feature to note is that action may be a class to instantiate, +as well as an object, specifying a method to call, so view the RDoc for more information. + +### Classic style + +```ruby +require 'rubygems' +require 'commander/import' + +# :name is optional, otherwise uses the basename of this executable +program :name, 'Foo Bar' +program :version, '1.0.0' +program :description, 'Stupid command that prints foo or bar.' + +command :foo do |c| + c.syntax = 'foobar foo' + c.description = 'Displays foo' + c.action do |args, options| + say 'foo' + end +end + +command :bar do |c| + c.syntax = 'foobar bar [options]' + c.description = 'Display bar with optional prefix and suffix' + c.option '--prefix STRING', String, 'Adds a prefix to bar' + c.option '--suffix STRING', String, 'Adds a suffix to bar' + c.action do |args, options| + options.default :prefix => '(', :suffix => ')' + say "#{options.prefix}bar#{options.suffix}" + end +end +``` + +Example output: + +``` +$ foobar bar +# => (bar) + +$ foobar bar --suffix '}' --prefix '{' +# => {bar} +``` + +### Modular style + +**NOTE:** Make sure to use `require 'commander'` rather than `require 'commander/import'`, otherwise Commander methods will still be imported into the global namespace. + +```ruby +require 'rubygems' +require 'commander' + +class MyApplication + include Commander::Methods + + def run + program :name, 'Foo Bar' + program :version, '1.0.0' + program :description, 'Stupid command that prints foo or bar.' + + command :foo do |c| + c.syntax = 'foobar foo' + c.description = 'Displays foo' + c.action do |args, options| + say 'foo' + end + end + + run! + end +end + +MyApplication.new.run if $0 == __FILE__ +``` + +### Block style + +```ruby +require 'rubygems' +require 'commander' + +Commander.configure do + program :name, 'Foo Bar' + program :version, '1.0.0' + program :description, 'Stupid command that prints foo or bar.' + + # see classic style example for options +end +``` + +## HighLine + +As mentioned above, the highline gem is imported into the global scope. Here +are some quick examples for how to utilize highline in your commands: + +```ruby +# Ask for password masked with '*' character +ask("Password: ") { |q| q.echo = "*" } + +# Ask for password +ask("Password: ") { |q| q.echo = false } + +# Ask if the user agrees (yes or no) +agree("Do something?") + +# Asks on a single line (note the space after ':') +ask("Name: ") + +# Asks with new line after "Description:" +ask("Description:") + +# Calls Date#parse to parse the date string passed +ask("Birthday? ", Date) + +# Ensures Integer is within the range specified +ask("Age? ", Integer) { |q| q.in = 0..105 } + +# Asks for a list of strings, converts to array +ask("Fav colors?", Array) +``` + +## HighLine & Interaction Additions + +In addition to highline's fantastic choice of methods, commander adds the +following methods to simplify common tasks: + +```ruby +# Ask for password +password + +# Ask for password with specific message and mask character +password "Enter your password please:", '-' + +# Ask for CLASS, which may be any valid class responding to #parse. Date, Time, Array, etc +names = ask_for_array 'Names: ' +bday = ask_for_date 'Birthday?: ' + +# Simple progress bar (Commander::UI::ProgressBar) +uris = %w[ + http://vision-media.ca + http://google.com + http://yahoo.com +] +progress uris do |uri| + res = open uri + # Do something with response +end + +# 'Log' action to stdout +log "create", "path/to/file.rb" + +# Enable paging of output after this point +enable_paging + +# Ask editor for input (EDITOR environment variable or whichever is available: TextMate, vim, vi, emacs, nano, pico) +ask_editor + +# Ask editor, supplying initial text +ask_editor 'previous data to update' + +# Ask editor, preferring a specific editor +ask_editor 'previous data', 'vim' + +# Choose from an array of elements +choice = choose("Favorite language?", :ruby, :perl, :js) + +# Alter IO for the duration of the block +io new_input, new_output do + new_input_contents = $stdin.read + puts new_input_contents # outputs to new_output stream +end +# $stdin / $stdout reset back to original streams + +# Speech synthesis +speak 'What is your favorite food? ' +food = ask 'favorite food?: ' +speak "Wow, I like #{food} too. We have so much in common." +speak "I like #{food} as well!", "Victoria", 190 + +# Execute arbitrary applescript +applescript 'foo' + +# Converse with speech recognition server +case converse 'What is the best food?', :cookies => 'Cookies', :unknown => 'Nothing' +when :cookies + speak 'o.m.g. you are awesome!' +else + case converse 'That is lame, shall I convince you cookies are the best?', :yes => 'Ok', :no => 'No', :maybe => 'Maybe another time' + when :yes + speak 'Well you see, cookies are just fantastic, they melt in your mouth.' + else + speak 'Ok then, bye.' + end +end +``` + +## Growl Notifications + +Commander provides methods for displaying Growl notifications. To use these +methods you need to install https://github.com/tj/growl which utilizes +the [growlnotify](https://growl.info/extras.php#growlnotify) executable. Note that +growl is auto-imported by Commander when available, no need to require. + +```ruby +# Display a generic Growl notification +notify 'Something happened' + +# Display an 'info' status notification +notify_info 'You have #{emails.length} new email(s)' + +# Display an 'ok' status notification +notify_ok 'Gems updated' + +# Display a 'warning' status notification +notify_warning '1 gem failed installation' + +# Display an 'error' status notification +notify_error "Gem #{name} failed" +``` + +## Commander Goodies + +### Option Defaults + +The options struct passed to `#action` provides a `#default` method, allowing you +to set defaults in a clean manner for options which have not been set. + +```ruby +command :foo do |c| + c.option '--interval SECONDS', Integer, 'Interval in seconds' + c.option '--timeout SECONDS', Integer, 'Timeout in seconds' + c.action do |args, options| + options.default \ + :interval => 2, + :timeout => 60 + end +end +``` + +### Command Aliasing + +Aliases can be created using the `#alias_command` method like below: + +```ruby +command :'install gem' do |c| + c.action { puts 'foo' } +end +alias_command :'gem install', :'install gem' +``` + +Or more complicated aliases can be made, passing any arguments +as if it was invoked via the command line: + +```ruby +command :'install gem' do |c| + c.syntax = 'install gem [options]' + c.option '--dest DIR', String, 'Destination directory' + c.action { |args, options| puts "installing #{args.first} to #{options.dest}" } +end +alias_command :update, :'install gem', 'rubygems', '--dest', 'some_path' +``` + +``` +$ foo update +# => installing rubygems to some_path +``` + +### Command Defaults + +Although working with a command executable framework provides many +benefits over a single command implementation, sometimes you still +want the ability to create a terse syntax for your command. With that +in mind we may use `#default_command` to help with this. Considering +our previous `:'install gem'` example: + +```ruby +default_command :update +``` + +``` +$ foo +# => installing rubygems to some_path +``` + +Keeping in mind that commander searches for the longest possible match +when considering a command, so if you were to pass arguments to foo +like below, expecting them to be passed to `:update`, this would be incorrect, +and would end up calling `:'install gem'`, so be careful that the users do +not need to use command names within the arguments. + +``` +$ foo install gem +# => installing to +``` + +### Long descriptions + +If you need to have a long command description, keep your short description under `summary`, and consider multi-line strings for `description`: + +```ruby + program :summary, 'Stupid command that prints foo or bar.' + program :description, %q( +#{c.summary} + +More information about that stupid command that prints foo or bar. + +And more + ) +``` + +### Additional Global Help + +Arbitrary help can be added using the following `#program` symbol: + +```ruby +program :help, 'Author', 'TJ Holowaychuk ' +``` + +Which will output the rest of the help doc, along with: + + AUTHOR: + + TJ Holowaychuk + +### Global Options + +Although most switches will be at the command level, several are available by +default at the global level, such as `--version`, and `--help`. Using +`#global_option` you can add additional global options: + +```ruby +global_option('-c', '--config FILE', 'Load config data for your commands to use') { |file| ... } +``` + +This method accepts the same syntax as `Commander::Command#option` so check it out for documentation. + +All global options regardless of providing a block are accessable at the command level. This +means that instead of the following: + +```ruby +global_option('--verbose') { $verbose = true } +... +c.action do |args, options| + say 'foo' if $verbose +... +``` + +You may: + +```ruby +global_option '--verbose' +... +c.action do |args, options| + say 'foo' if options.verbose +... +``` + +### Formatters + +Two core formatters are currently available, the default `Terminal` formatter +as well as `TerminalCompact`. To utilize a different formatter simply use +`:help_formatter` like below: + +```ruby +program :help_formatter, Commander::HelpFormatter::TerminalCompact +``` + +Or utilize the help formatter aliases: + +```ruby +program :help_formatter, :compact +``` + +This abstraction could be utilized to generate HTML documentation for your executable. + +### Tracing + +By default the `-t` and `--trace` global options are provided to allow users to get a backtrace to aid debugging. + +You can disable these options: + +```ruby +never_trace! +``` + +Or make it always on: + +```ruby +always_trace! +``` + +## Tips + +When adding a global or command option, OptionParser implicitly adds a small +switch even when not explicitly created, for example `-c` will be the same as +`--config` in both examples, however `-c` will only appear in the documentation +when explicitly assigning it. + +```ruby +global_option '-c', '--config FILE' +global_option '--config FILE' +``` + +## ASCII Tables + +For feature rich ASCII tables for your terminal app check out the terminal-table gem at https://github.com/tj/terminal-table + + +----------+-------+----+--------+-----------------------+ + | Terminal | Table | Is | Wicked | Awesome | + +----------+-------+----+--------+-----------------------+ + | | | | | get it while its hot! | + +----------+-------+----+--------+-----------------------+ + +## Running Specifications + + $ rake spec + +OR + + $ spec --color spec + +## Contrib + +Feel free to fork and request a pull, or submit a ticket +https://github.com/commander-rb/commander/issues + +## License + +This project is available under the MIT license. See LICENSE for details. diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Rakefile b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Rakefile new file mode 100644 index 0000000..28ec6d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/Rakefile @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require 'bundler/gem_tasks' +require 'rspec/core/rake_task' +require 'rubocop/rake_task' + +desc 'Run specs' +RSpec::Core::RakeTask.new do |t| + t.verbose = false + t.rspec_opts = '--color --order random' +end + +RuboCop::RakeTask.new + +task default: %i[spec rubocop] diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/bin/commander b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/bin/commander new file mode 100644 index 0000000..8d75356 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/bin/commander @@ -0,0 +1,105 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'rubygems' +require 'commander/import' + +program :name, 'commander' +program :version, Commander::VERSION +program :description, 'Commander utility program.' + +command :init do |c| + c.syntax = 'commander init [option] ' + c.summary = 'Initialize a commander template' + c.description = 'Initialize an empty with a commander template, + allowing very quick creation of commander executables.' + c.example 'Create a new classic style template file.', 'commander init bin/my_executable' + c.example 'Create a new modular style template file.', 'commander init --modular bin/my_executable' + c.option '-m', '--modular', 'Initialize a modular style template' + c.action do |args, options| + file = args.shift || abort('file argument required.') + name = ask 'Machine name of program: ' + description = ask 'Describe your program: ' + commands = ask_for_array 'List the commands you wish to create: ' + begin + if options.modular + File.open(file, 'w') do |f| + f.write <<-"...".gsub!(/^ {10}/, '') + #!/usr/bin/env ruby + + require 'rubygems' + require 'commander' + + class MyApplication + include Commander::Methods + # include whatever modules you need + + def run + program :name, '#{name}' + program :version, '0.0.1' + program :description, '#{description}' + + ... + commands.each do |command| + f.write <<-"...".gsub!(/^ {12}/, '') + command :#{command} do |c| + c.syntax = '#{name} #{command} [options]' + c.summary = '' + c.description = '' + c.example 'description', 'command example' + c.option '--some-switch', 'Some switch that does something' + c.action do |args, options| + # Do something or c.when_called #{name.capitalize}::Commands::#{command.capitalize} + end + end + + ... + end + f.write <<-"...".gsub!(/^ {12}/, '') + run! + end + end + + MyApplication.new.run if $0 == __FILE__ + ... + end + + File.chmod(0755, file) + say "Initialized modular template in #{file}" + else + File.open(file, 'w') do |f| + f.write <<-"...".gsub!(/^ {10}/, '') + #!/usr/bin/env ruby + + require 'rubygems' + require 'commander/import' + + program :name, '#{name}' + program :version, '0.0.1' + program :description, '#{description}' + + ... + commands.each do |command| + f.write <<-"...".gsub!(/^ {12}/, '') + command :#{command} do |c| + c.syntax = '#{name} #{command} [options]' + c.summary = '' + c.description = '' + c.example 'description', 'command example' + c.option '--some-switch', 'Some switch that does something' + c.action do |args, options| + # Do something or c.when_called #{name.capitalize}::Commands::#{command.capitalize} + end + end + + ... + end + end + File.chmod 0755, file + say "Initialized template in #{file}" + end + rescue StandardError => e + abort e + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/commander.gemspec b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/commander.gemspec new file mode 100644 index 0000000..b56d9d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/commander.gemspec @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +$LOAD_PATH.push File.expand_path('lib', __dir__) +require 'commander/version' + +Gem::Specification.new do |s| + s.name = 'commander' + s.version = Commander::VERSION + s.authors = ['TJ Holowaychuk', 'Gabriel Gilder'] + s.email = ['gabriel@gabrielgilder.com'] + s.license = 'MIT' + s.homepage = 'https://github.com/commander-rb/commander' + s.summary = 'The complete solution for Ruby command-line executables' + s.description = 'The complete solution for Ruby command-line executables. Commander bridges the gap between other terminal related libraries you know and love (OptionParser, HighLine), while providing many new features, and an elegant API.' + s.metadata = { + 'bug_tracker_uri' => "#{s.homepage}/issues", + 'changelog_uri' => "#{s.homepage}/blob/master/History.rdoc", + 'documentation_uri' => "https://www.rubydoc.info/gems/commander/#{s.version}", + 'homepage_uri' => s.homepage, + 'source_code_uri' => "#{s.homepage}/tree/v#{s.version}", + } + s.required_ruby_version = '>= 2.4' + + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") + s.executables = `git ls-files -- bin/*`.split("\n").map { |f| File.basename(f) } + s.require_paths = ['lib'] + + s.add_runtime_dependency('highline', '~> 2.0.0') + + s.add_development_dependency('rake') + s.add_development_dependency('rspec', '~> 3.2') + s.add_development_dependency('rubocop', '~> 1.12.1') + s.add_development_dependency('simplecov') +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander.rb new file mode 100644 index 0000000..ce2645a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +#-- +# Copyright (c) 2008-2009 TJ Holowaychuk +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +#++ + +require 'highline/import' +require 'commander/version' +require 'commander/blank' +require 'commander/user_interaction' +require 'commander/core_ext' +require 'commander/runner' +require 'commander/command' +require 'commander/help_formatters' +require 'commander/platform' +require 'commander/delegates' +require 'commander/methods' +require 'commander/configure' diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/blank.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/blank.rb new file mode 100644 index 0000000..923ac69 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/blank.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +module Blank + def self.included(base) + base.class_eval do + instance_methods.each { |m| undef_method m unless m =~ /^__|object_id/ } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/command.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/command.rb new file mode 100644 index 0000000..beca205 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/command.rb @@ -0,0 +1,219 @@ +# frozen_string_literal: true + +require 'optparse' + +module Commander + class Command + attr_accessor :name, :examples, :syntax, :description, :summary, :proxy_options, :options + attr_reader :global_options + + ## + # Options struct. + + class Options + include Blank + + def initialize + @table = {} + end + + def __hash__ + @table + end + + def method_missing(meth, *args) + meth.to_s =~ /=$/ ? @table[meth.to_s.chop.to_sym] = args.first : @table[meth] + end + + def default(defaults = {}) + @table = defaults.merge! @table + end + + def inspect + "" + end + end + + ## + # Initialize new command with specified _name_. + + def initialize(name) + @name, @examples, @when_called = name.to_s, [], [] + @options, @proxy_options = [], [] + @global_options = [] + end + + ## + # Add a usage example for this command. + # + # Usage examples are later displayed in help documentation + # created by the help formatters. + # + # === Examples + # + # command :something do |c| + # c.example "Should do something", "my_command something" + # end + # + + def example(description, command) + @examples << [description, command] + end + + ## + # Add an option. + # + # Options are parsed via OptionParser so view it + # for additional usage documentation. A block may optionally be + # passed to handle the option, otherwise the _options_ struct seen below + # contains the results of this option. This handles common formats such as: + # + # -h, --help options.help # => bool + # --[no-]feature options.feature # => bool + # --large-switch options.large_switch # => bool + # --file FILE options.file # => file passed + # --list WORDS options.list # => array + # --date [DATE] options.date # => date or nil when optional argument not set + # + # === Examples + # + # command :something do |c| + # c.option '--recursive', 'Do something recursively' + # c.option '--file FILE', 'Specify a file' + # c.option('--info', 'Display info') { puts "handle with block" } + # c.option '--[no-]feature', 'With or without feature' + # c.option '--list FILES', Array, 'List the files specified' + # + # c.when_called do |args, options| + # do_something_recursively if options.recursive + # do_something_with_file options.file if options.file + # end + # end + # + # === Help Formatters + # + # This method also parses the arguments passed in order to determine + # which were switches, and which were descriptions for the + # option which can later be used within help formatters + # using option[:switches] and option[:description]. + # + # === Input Parsing + # + # Since Commander utilizes OptionParser you can pre-parse and evaluate + # option arguments. Simply require 'optparse/time', or 'optparse/date', as these + # objects must respond to #parse. + # + # c.option '--time TIME', Time + # c.option '--date [DATE]', Date + # + + def option(*args, &block) + switches, description = Runner.separate_switches_from_description(*args) + proc = block || option_proc(switches) + @options << { + args: args, + proc: proc, + switches: switches, + description: description, + } + end + + ## + # Handle execution of command. The handler may be a class, + # object, or block (see examples below). + # + # === Examples + # + # # Simple block handling + # c.when_called do |args, options| + # # do something + # end + # + # # Create inst of Something and pass args / options + # c.when_called MyLib::Command::Something + # + # # Create inst of Something and use arbitrary method + # c.when_called MyLib::Command::Something, :some_method + # + # # Pass an object to handle callback (requires method symbol) + # c.when_called SomeObject, :some_method + # + + def when_called(*args, &block) + fail ArgumentError, 'must pass an object, class, or block.' if args.empty? && !block + + @when_called = block ? [block] : args + end + alias action when_called + + ## + # Run the command with _args_. + # + # * parses options, call option blocks + # * invokes when_called proc + # + + def run(*args) + call parse_options_and_call_procs(*args) + end + + #:stopdoc: + + ## + # Parses options and calls associated procs, + # returning the arguments remaining. + + def parse_options_and_call_procs(*args) + return args if args.empty? + + # empty proxy_options before populating via OptionParser + # prevents duplication of options if the command is run twice + proxy_options.clear + @options.each_with_object(OptionParser.new) do |option, opts| + opts.on(*option[:args], &option[:proc]) + opts + end.parse! args + end + + ## + # Call the commands when_called block with _args_. + + def call(args = []) + object, meth = @when_called[0, 2] + meth ||= :call + options = proxy_option_struct + + case object + when Proc then object.call(args, options) + when Class then meth == :call ? object.new(args, options) : object.new.send(meth, args, options) + else object&.send(meth, args, options) + end + end + + ## + # Creates an Options instance populated with the option values + # collected by the #option_proc. + + def proxy_option_struct + (global_options + proxy_options).each_with_object(Options.new) do |(option, value), options| + # options that are present will evaluate to true + value = true if value.nil? + options.__send__ :"#{option}=", value + options + end + end + + ## + # Option proxy proc used when a block is not explicitly passed + # via the #option method. This allows commander to auto-populate + # and work with option values. + + def option_proc(switches) + ->(value) { proxy_options << [Runner.switch_to_sym(switches.last), value] } + end + + def inspect + "" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/configure.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/configure.rb new file mode 100644 index 0000000..13a79a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/configure.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module Commander + def configure(*configuration_opts, &configuration_block) + configuration_module = Module.new + configuration_module.extend Commander::Methods + + configuration_module.class_exec(*configuration_opts, &configuration_block) + + configuration_module.class_exec do + run! + end + end + + module_function :configure +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext.rb new file mode 100644 index 0000000..5cd476c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true + +require 'commander/core_ext/array' +require 'commander/core_ext/object' diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext/array.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext/array.rb new file mode 100644 index 0000000..39b9b89 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext/array.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +class Array + ## + # Split _string_ into an array. Used in + # conjunction with HighLine's #ask, or #ask_for_array + # methods, which must respond to #parse. + # + # This method allows escaping of whitespace. For example + # the arguments foo bar\ baz will become ['foo', 'bar baz'] + # + # === Example + # + # # ask invokes Array#parse + # list = ask 'Favorite cookies:', Array + # + # # or use ask_for_CLASS + # list = ask_for_array 'Favorite cookies: ' + # + + def self.parse(string) + # Using reverse + lookahead to work around Ruby 1.8's lack of lookbehind + # TODO: simplify now that we don't support Ruby 1.8 + string.reverse.split(/\s(?!\\)/).reverse.map { |s| s.reverse.gsub('\\ ', ' ') } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext/object.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext/object.rb new file mode 100644 index 0000000..644dec0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/core_ext/object.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +class Object + ## + # Return the current binding. + + def get_binding + binding + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/delegates.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/delegates.rb new file mode 100644 index 0000000..5b551ee --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/delegates.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Commander + module Delegates + %w( + add_command + command + program + run! + global_option + alias_command + default_command + always_trace! + never_trace! + ).each do |meth| + eval <<-END, binding, __FILE__, __LINE__ + 1 + def #{meth}(*args, &block) + ::Commander::Runner.instance.#{meth}(*args, &block) + end + END + end + + def defined_commands(*args, &block) + ::Commander::Runner.instance.commands(*args, &block) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters.rb new file mode 100644 index 0000000..12cac3a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module Commander + module HelpFormatter + autoload :Base, 'commander/help_formatters/base' + autoload :Terminal, 'commander/help_formatters/terminal' + autoload :TerminalCompact, 'commander/help_formatters/terminal_compact' + + class Context + def initialize(target) + @target = target + end + + def get_binding + @target.instance_eval { binding }.tap do |bind| + decorate_binding(bind) + end + end + + # No-op, override in subclasses. + def decorate_binding(_bind) + end + end + + class ProgramContext < Context + def decorate_binding(bind) + bind.eval("max_command_length = #{max_command_length(bind)}") + bind.eval("max_aliases_length = #{max_aliases_length(bind)}") + end + + def max_command_length(bind) + max_key_length(bind.eval('@commands')) + end + + def max_aliases_length(bind) + max_key_length(bind.eval('@aliases')) + end + + def max_key_length(hash, default = 20) + longest = hash.keys.max_by(&:size) + longest ? longest.size : default + end + end + + module_function + + def indent(amount, text) + text.to_s.gsub("\n", "\n#{' ' * amount}") + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/base.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/base.rb new file mode 100644 index 0000000..d6b3126 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/base.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module Commander + ## + # = Help Formatter + # + # Commander's help formatters control the output when + # either the help command, or --help switch are called. + # The default formatter is Commander::HelpFormatter::Terminal. + + module HelpFormatter + class Base + def initialize(runner) + @runner = runner + end + + def render + 'Implement global help here' + end + + def render_command(command) + "Implement help for #{command.name} here" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal.rb new file mode 100644 index 0000000..1a42619 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +require 'erb' + +module Commander + module HelpFormatter + class Terminal < Base + def render + template(:help).result(ProgramContext.new(@runner).get_binding) + end + + def render_command(command) + template(:command_help).result(Context.new(command).get_binding) + end + + def template(name) + if RUBY_VERSION < '2.6' + ERB.new(File.read(File.join(File.dirname(__FILE__), 'terminal', "#{name}.erb")), nil, '-') + else + ERB.new(File.read(File.join(File.dirname(__FILE__), 'terminal', "#{name}.erb")), trim_mode: '-') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal/command_help.erb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal/command_help.erb new file mode 100644 index 0000000..23b15fc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal/command_help.erb @@ -0,0 +1,35 @@ + + <%= HighLine.default_instance.color "NAME", :bold %>: + + <%= @name %> +<% if @syntax -%> + + <%= HighLine.default_instance.color "SYNOPSIS", :bold %>: + + <%= @syntax -%> + +<% end -%> + + <%= HighLine.default_instance.color "DESCRIPTION", :bold %>: + + <%= Commander::HelpFormatter.indent 4, (@description || @summary || 'No description.') -%> + +<% unless @examples.empty? -%> + + <%= HighLine.default_instance.color "EXAMPLES", :bold %>: + <% for description, command in @examples -%> + + # <%= description %> + <%= command %> + <% end -%> +<% end -%> +<% unless @options.empty? -%> + + <%= HighLine.default_instance.color "OPTIONS", :bold %>: + <% for option in @options -%> + + <%= option[:switches].join ', ' %> + <%= Commander::HelpFormatter.indent 8, option[:description] %> + <% end -%> +<% end -%> + diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal/help.erb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal/help.erb new file mode 100644 index 0000000..7758b55 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal/help.erb @@ -0,0 +1,36 @@ + <%= HighLine.default_instance.color "NAME", :bold %>: + + <%= program :name %> + + <%= HighLine.default_instance.color "DESCRIPTION", :bold %>: + + <%= Commander::HelpFormatter.indent 4, program(:description) %> + + <%= HighLine.default_instance.color "COMMANDS", :bold %>: +<% for name, command in @commands.sort -%> + <% unless alias? name %> + <%= "%-#{max_command_length}s %s" % [command.name, command.summary || command.description] -%> + <% end -%> +<% end %> +<% unless @aliases.empty? %> + <%= HighLine.default_instance.color "ALIASES", :bold %>: + <% for alias_name, args in @aliases.sort %> + <%= "%-#{max_aliases_length}s %s %s" % [alias_name, command(alias_name).name, args.join(' ')] -%> + <% end %> +<% end %> +<% unless @options.empty? -%> + <%= HighLine.default_instance.color "GLOBAL OPTIONS", :bold %>: + <% for option in @options -%> + + <%= option[:switches].join ', ' %> + <%= option[:description] %> + <% end -%> +<% end -%> +<% if program :help -%> + <% for title, body in program(:help) %> + <%= HighLine.default_instance.color title.to_s.upcase, :bold %>: + + <%= body %> + <% end -%> +<% end -%> + diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact.rb new file mode 100644 index 0000000..6bced60 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require 'erb' + +module Commander + module HelpFormatter + class TerminalCompact < Terminal + def template(name) + if RUBY_VERSION < '2.6' + ERB.new(File.read(File.join(File.dirname(__FILE__), 'terminal_compact', "#{name}.erb")), nil, '-') + else + ERB.new(File.read(File.join(File.dirname(__FILE__), 'terminal_compact', "#{name}.erb")), trim_mode: '-') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact/command_help.erb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact/command_help.erb new file mode 100644 index 0000000..070935c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact/command_help.erb @@ -0,0 +1,27 @@ + + <%= @name %> +<% if @syntax -%> + + Usage: <%= @syntax %> +<% end -%> +<% if @description || @summary -%> + + <%= @description || @summary %> +<% end -%> +<% unless @examples.empty? -%> + + Examples: + <% for description, command in @examples -%> + + # <%= description %> + <%= command %> + <% end -%> +<% end -%> +<% unless @options.empty? -%> + + Options: +<% for option in @options -%> + <%= "%-20s %s" % [option[:switches].join(', '), option[:description]] %> +<% end -%> +<% end -%> + diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact/help.erb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact/help.erb new file mode 100644 index 0000000..a01a432 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/help_formatters/terminal_compact/help.erb @@ -0,0 +1,29 @@ + <%= program :name %> + + <%= program :description %> + + Commands: +<% for name, command in @commands.sort -%> +<% unless alias? name -%> + <%= "%-#{max_command_length}s %s" % [command.name, command.summary || command.description] %> +<% end -%> +<% end -%> +<% unless @aliases.empty? %> + Aliases: +<% for alias_name, args in @aliases.sort -%> + <%= "%-#{max_aliases_length}s %s %s" % [alias_name, command(alias_name).name, args.join(' ')] %> +<% end -%> +<% end %> +<% unless @options.empty? -%> + Global Options: +<% for option in @options -%> + <%= "%-20s %s" % [option[:switches].join(', '), option[:description]] -%> +<% end -%> +<% end -%> +<% if program :help -%> + <% for title, body in program(:help) %> + <%= title %>: + <%= body %> + <% end %> +<% end -%> + diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/import.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/import.rb new file mode 100644 index 0000000..ec24e15 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/import.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require 'commander' + +include Commander::Methods + +at_exit { run! } diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/methods.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/methods.rb new file mode 100644 index 0000000..c719260 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/methods.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +module Commander + module Methods + include Commander::UI + include Commander::UI::AskForClass + include Commander::Delegates + + if $stdin.tty? && (cols = HighLine.default_instance.output_cols) >= 40 + HighLine.default_instance.wrap_at = cols - 5 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/platform.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/platform.rb new file mode 100644 index 0000000..35b6f43 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/platform.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +module Commander + module Platform + def self.jruby? + defined?(RUBY_ENGINE) && (RUBY_ENGINE == 'jruby') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/runner.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/runner.rb new file mode 100644 index 0000000..0d02bd9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/runner.rb @@ -0,0 +1,462 @@ +# frozen_string_literal: true + +require 'optparse' + +module Commander + class Runner + #-- + # Exceptions + #++ + + class CommandError < StandardError; end + + class InvalidCommandError < CommandError; end + + attr_reader :commands, :options, :help_formatter_aliases + + ## + # Initialize a new command runner. Optionally + # supplying _args_ for mocking, or arbitrary usage. + + def initialize(args = ARGV) + @args, @commands, @aliases, @options = args, {}, {}, [] + @help_formatter_aliases = help_formatter_alias_defaults + @program = program_defaults + @always_trace = false + @never_trace = false + create_default_commands + end + + ## + # Return singleton Runner instance. + + def self.instance + @instance ||= new + end + + ## + # Run command parsing and execution process. + + def run! + trace = @always_trace || false + require_program :version, :description + trap('INT') { abort program(:int_message) } if program(:int_message) + trap('INT') { program(:int_block).call } if program(:int_block) + global_option('-h', '--help', 'Display help documentation') do + args = @args - %w(-h --help) + command(:help).run(*args) + return + end + global_option('-v', '--version', 'Display version information') do + say version + return + end + global_option('-t', '--trace', 'Display backtrace when an error occurs') { trace = true } unless @never_trace || @always_trace + parse_global_options + remove_global_options options, @args + if trace + run_active_command + else + begin + run_active_command + rescue InvalidCommandError => e + abort "#{e}. Use --help for more information" + rescue \ + OptionParser::InvalidOption, + OptionParser::InvalidArgument, + OptionParser::MissingArgument => e + abort e.to_s + rescue StandardError => e + if @never_trace + abort "error: #{e}." + else + abort "error: #{e}. Use --trace to view backtrace" + end + end + end + end + + ## + # Return program version. + + def version + format('%s %s', program(:name), program(:version)) + end + + ## + # Enable tracing on all executions (bypasses --trace) + + def always_trace! + @always_trace = true + @never_trace = false + end + + ## + # Hide the trace option from the help menus and don't add it as a global option + + def never_trace! + @never_trace = true + @always_trace = false + end + + ## + # Assign program information. + # + # === Examples + # + # # Set data + # program :name, 'Commander' + # program :version, Commander::VERSION + # program :description, 'Commander utility program.' + # program :help, 'Copyright', '2008 TJ Holowaychuk' + # program :help, 'Anything', 'You want' + # program :int_message 'Bye bye!' + # program :help_formatter, :compact + # program :help_formatter, Commander::HelpFormatter::TerminalCompact + # + # # Get data + # program :name # => 'Commander' + # + # === Keys + # + # :version (required) Program version triple, ex: '0.0.1' + # :description (required) Program description + # :name Program name, defaults to basename of executable + # :help_formatter Defaults to Commander::HelpFormatter::Terminal + # :help Allows addition of arbitrary global help blocks + # :help_paging Flag for toggling help paging + # :int_message Message to display when interrupted (CTRL + C) + # + + def program(key, *args, &block) + if key == :help && !args.empty? + @program[:help] ||= {} + @program[:help][args.first] = args.at(1) + elsif key == :help_formatter && !args.empty? + @program[key] = (@help_formatter_aliases[args.first] || args.first) + elsif block + @program[key] = block + else + unless args.empty? + @program[key] = args.count == 1 ? args[0] : args + end + @program[key] + end + end + + ## + # Creates and yields a command instance when a block is passed. + # Otherwise attempts to return the command, raising InvalidCommandError when + # it does not exist. + # + # === Examples + # + # command :my_command do |c| + # c.when_called do |args| + # # Code + # end + # end + # + + def command(name, &block) + yield add_command(Commander::Command.new(name)) if block + @commands[name.to_s] + end + + ## + # Add a global option; follows the same syntax as Command#option + # This would be used for switches such as --version, --trace, etc. + + def global_option(*args, &block) + switches, description = Runner.separate_switches_from_description(*args) + @options << { + args: args, + proc: block, + switches: switches, + description: description, + } + end + + ## + # Alias command _name_ with _alias_name_. Optionally _args_ may be passed + # as if they were being passed straight to the original command via the command-line. + + def alias_command(alias_name, name, *args) + @commands[alias_name.to_s] = command name + @aliases[alias_name.to_s] = args + end + + ## + # Default command _name_ to be used when no other + # command is found in the arguments. + + def default_command(name) + @default_command = name + end + + ## + # Add a command object to this runner. + + def add_command(command) + @commands[command.name] = command + end + + ## + # Check if command _name_ is an alias. + + def alias?(name) + @aliases.include? name.to_s + end + + ## + # Check if a command _name_ exists. + + def command_exists?(name) + @commands[name.to_s] + end + + #:stopdoc: + + ## + # Get active command within arguments passed to this runner. + + def active_command + @active_command ||= command(command_name_from_args) + end + + ## + # Attempts to locate a command name from within the arguments. + # Supports multi-word commands, using the largest possible match. + # Returns the default command, if no valid commands found in the args. + + def command_name_from_args + @command_name_from_args ||= (longest_valid_command_name_from(@args) || @default_command) + end + + ## + # Returns array of valid command names found within _args_. + + def valid_command_names_from(*args) + remove_global_options options, args + arg_string = args.delete_if { |value| value =~ /^-/ }.join ' ' + commands.keys.find_all { |name| name if arg_string =~ /^#{name}\b/ } + end + + ## + # Help formatter instance. + + def help_formatter + @help_formatter ||= program(:help_formatter).new self + end + + ## + # Return arguments without the command name. + + def args_without_command_name + removed = [] + parts = command_name_from_args.split rescue [] + @args.dup.delete_if do |arg| + removed << arg if parts.include?(arg) && !removed.include?(arg) + end + end + + ## + # Returns hash of help formatter alias defaults. + + def help_formatter_alias_defaults + { + compact: HelpFormatter::TerminalCompact, + } + end + + ## + # Returns hash of program defaults. + + def program_defaults + { + help_formatter: HelpFormatter::Terminal, + name: File.basename($PROGRAM_NAME), + help_paging: true, + } + end + + ## + # Creates default commands such as 'help' which is + # essentially the same as using the --help switch. + + def create_default_commands + command :help do |c| + c.syntax = 'commander help [command]' + c.description = 'Display global or [command] help documentation' + c.example 'Display global help', 'command help' + c.example "Display help for 'foo'", 'command help foo' + c.when_called do |args, _options| + UI.enable_paging if program(:help_paging) + if args.empty? + say help_formatter.render + else + command = command(longest_valid_command_name_from(args)) + begin + require_valid_command command + rescue InvalidCommandError => e + abort "#{e}. Use --help for more information" + end + say help_formatter.render_command(command) + end + end + end + end + + ## + # Raises InvalidCommandError when a _command_ is not found. + + def require_valid_command(command = active_command) + fail InvalidCommandError, 'invalid command', caller if command.nil? + end + + ## + # Removes global _options_ from _args_. This prevents an invalid + # option error from occurring when options are parsed + # again for the command. + + def remove_global_options(options, args) + options.each do |option| + switches = option[:switches] + next if switches.empty? + + option_takes_argument = switches.any? { |s| s =~ /[ =]/ } + switches = expand_optionally_negative_switches(switches) + + option_argument_needs_removal = false + args.delete_if do |token| + break if token == '--' + + # Use just the portion of the token before the = when + # comparing switches. + index_of_equals = token.index('=') if option_takes_argument + token = token[0, index_of_equals] if index_of_equals + token_contains_option_argument = !index_of_equals.nil? + + if switches.any? { |s| s[0, token.length] == token } + option_argument_needs_removal = + option_takes_argument && !token_contains_option_argument + true + elsif option_argument_needs_removal && token !~ /^-/ + option_argument_needs_removal = false + true + else + option_argument_needs_removal = false + false + end + end + end + end + + # expand switches of the style '--[no-]blah' into both their + # '--blah' and '--no-blah' variants, so that they can be + # properly detected and removed + def expand_optionally_negative_switches(switches) + switches.reduce([]) do |memo, val| + if val =~ /\[no-\]/ + memo << val.gsub(/\[no-\]/, '') + memo << val.gsub(/\[no-\]/, 'no-') + else + memo << val + end + end + end + + ## + # Parse global command options. + + def parse_global_options + parser = options.inject(OptionParser.new) do |options, option| + options.on(*option[:args], &global_option_proc(option[:switches], &option[:proc])) + end + + options = @args.dup + begin + parser.parse!(options) + rescue OptionParser::InvalidOption => e + # Remove the offending args and retry. + options = options.reject { |o| e.args.include?(o) } + retry + end + end + + ## + # Returns a proc allowing for commands to inherit global options. + # This functionality works whether a block is present for the global + # option or not, so simple switches such as --verbose can be used + # without a block, and used throughout all commands. + + def global_option_proc(switches, &block) + lambda do |value| + unless active_command.nil? + active_command.global_options << [Runner.switch_to_sym(switches.last), value] + end + yield value if block && !value.nil? + end + end + + ## + # Raises a CommandError when the program any of the _keys_ are not present, or empty. + + def require_program(*keys) + keys.each do |key| + fail CommandError, "program #{key} required" if program(key).nil? || program(key).empty? + end + end + + ## + # Return switches and description separated from the _args_ passed. + + def self.separate_switches_from_description(*args) + switches = args.find_all { |arg| arg.to_s =~ /^-/ } + description = args.last if args.last.is_a?(String) && !args.last.match(/^-/) + [switches, description] + end + + ## + # Attempts to generate a method name symbol from +switch+. + # For example: + # + # -h # => :h + # --trace # => :trace + # --some-switch # => :some_switch + # --[no-]feature # => :feature + # --file FILE # => :file + # --list of,things # => :list + # + + def self.switch_to_sym(switch) + switch.scan(/[\-\]](\w+)/).join('_').to_sym rescue nil + end + + ## + # Run the active command. + + def run_active_command + require_valid_command + if alias? command_name_from_args + active_command.run(*(@aliases[command_name_from_args.to_s] + args_without_command_name)) + else + active_command.run(*args_without_command_name) + end + end + + def say(*args) #:nodoc: + HighLine.default_instance.say(*args) + end + + private + + ## + # Attempts to locate a command name from within the provided arguments. + # Supports multi-word commands, using the largest possible match. + + def longest_valid_command_name_from(args) + valid_command_names_from(*args.dup).max + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/user_interaction.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/user_interaction.rb new file mode 100644 index 0000000..9d4a896 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/user_interaction.rb @@ -0,0 +1,557 @@ +# frozen_string_literal: true + +require 'tempfile' +require 'shellwords' + +module Commander + ## + # = User Interaction + # + # Commander's user interaction module mixes in common + # methods which extend HighLine's functionality such + # as a #password method rather than calling #ask directly. + + module UI + module_function + + #-- + # Auto include growl when available. + #++ + + begin + require 'growl' + rescue LoadError + # Do nothing + else + include Growl + end + + ## + # Ask the user for a password. Specify a custom + # _message_ other than 'Password: ' or override the + # default _mask_ of '*'. + + def password(message = 'Password: ', mask = '*') + pass = ask(message) { |q| q.echo = mask } + pass = password message, mask if pass.nil? || pass.empty? + pass + end + + ## + # Choose from a set array of _choices_. + + def choose(message = nil, *choices, &block) + say message if message + super(*choices, &block) + end + + ## + # 'Log' an _action_ to the terminal. This is typically used + # for verbose output regarding actions performed. For example: + # + # create path/to/file.rb + # remove path/to/old_file.rb + # remove path/to/old_file2.rb + # + + def log(action, *args) + say format('%15s %s', action, args.join(' ')) + end + + ## + # 'Say' something using the OK color (green). + # + # === Examples + # say_ok 'Everything is fine' + # say_ok 'It is ok', 'This is ok too' + # + + def say_ok(*args) + args.each do |arg| + say HighLine.default_instance.color(arg, :green) + end + end + + ## + # 'Say' something using the WARNING color (yellow). + # + # === Examples + # say_warning 'This is a warning' + # say_warning 'Be careful', 'Think about it' + # + + def say_warning(*args) + args.each do |arg| + say HighLine.default_instance.color(arg, :yellow) + end + end + + ## + # 'Say' something using the ERROR color (red). + # + # === Examples + # say_error 'Everything is not fine' + # say_error 'It is not ok', 'This is not ok too' + # + + def say_error(*args) + args.each do |arg| + say HighLine.default_instance.color(arg, :red) + end + end + + ## + # 'Say' something using the specified color + # + # === Examples + # color 'I am blue', :blue + # color 'I am bold', :bold + # color 'White on Red', :white, :on_red + # + # === Notes + # You may use: + # * color: black blue cyan green magenta red white yellow + # * style: blink bold clear underline + # * highligh: on_ + + def color(*args) + say HighLine.default_instance.color(*args) + end + + ## + # Speak _message_ using _voice_ at a speaking rate of _rate_ + # + # Voice defaults to 'Alex', which is one of the better voices. + # Speaking rate defaults to 175 words per minute + # + # === Examples + # + # speak 'What is your favorite food? ' + # food = ask 'favorite food?: ' + # speak "Wow, I like #{food} too. We have so much in common." + # speak "I like #{food} as well!", "Victoria", 190 + # + # === Notes + # + # * MacOS only + # + + def speak(message, voice = :Alex, rate = 175) + Thread.new { applescript "say #{message.inspect} using #{voice.to_s.inspect} speaking rate #{rate}" } + end + + ## + # Converse with speech recognition. + # + # Currently a "poorman's" DSL to utilize applescript and + # the MacOS speech recognition server. + # + # === Examples + # + # case converse 'What is the best food?', :cookies => 'Cookies', :unknown => 'Nothing' + # when :cookies + # speak 'o.m.g. you are awesome!' + # else + # case converse 'That is lame, shall I convince you cookies are the best?', :yes => 'Ok', :no => 'No', :maybe => 'Maybe another time' + # when :yes + # speak 'Well you see, cookies are just fantastic.' + # else + # speak 'Ok then, bye.' + # end + # end + # + # === Notes + # + # * MacOS only + # + + def converse(prompt, responses = {}) + i, commands = 0, responses.map { |_key, value| value.inspect }.join(',') + statement = responses.inject '' do |inner_statement, (key, value)| + inner_statement << + ( + (i += 1) == 1 ? + %(if response is "#{value}" then\n) : + %(else if response is "#{value}" then\n) + ) << + %(do shell script "echo '#{key}'"\n) + end + applescript( + %( + tell application "SpeechRecognitionServer" + set response to listen for {#{commands}} with prompt "#{prompt}" + #{statement} + end if + end tell + ) + ).strip.to_sym + end + + ## + # Execute apple _script_. + + def applescript(script) + `osascript -e "#{ script.gsub('"', '\"') }"` + end + + ## + # Normalize IO streams, allowing for redirection of + # +input+ and/or +output+, for example: + # + # $ foo # => read from terminal I/O + # $ foo in # => read from 'in' file, output to terminal output stream + # $ foo in out # => read from 'in' file, output to 'out' file + # $ foo < in > out # => equivalent to above (essentially) + # + # Optionally a +block+ may be supplied, in which case + # IO will be reset once the block has executed. + # + # === Examples + # + # command :foo do |c| + # c.syntax = 'foo [input] [output]' + # c.when_called do |args, options| + # # or io(args.shift, args.shift) + # io *args + # str = $stdin.gets + # puts 'input was: ' + str.inspect + # end + # end + # + + def io(input = nil, output = nil, &block) + orig_stdin, orig_stdout = $stdin, $stdout + $stdin = File.new(input) if input + $stdout = File.new(output, 'r+') if output + return unless block + + yield + $stdin, $stdout = orig_stdin, orig_stdout + reset_io + end + + ## + # Find an editor available in path. Optionally supply the _preferred_ + # editor. Returns the name as a string, nil if none is available. + + def available_editor(preferred = nil) + [preferred, ENV['EDITOR'], 'mate -w', 'vim', 'vi', 'emacs', 'nano', 'pico'] + .compact + .find { |name| system("hash #{name.split.first} 2>&-") } + end + + ## + # Prompt an editor for input. Optionally supply initial + # _input_ which is written to the editor. + # + # _preferred_editor_ can be hinted. + # + # === Examples + # + # ask_editor # => prompts EDITOR with no input + # ask_editor('foo') # => prompts EDITOR with default text of 'foo' + # ask_editor('foo', 'mate -w') # => prompts TextMate with default text of 'foo' + # + + def ask_editor(input = nil, preferred_editor = nil) + editor = available_editor preferred_editor + program = Commander::Runner.instance.program(:name).downcase rescue 'commander' + tmpfile = Tempfile.new program + begin + tmpfile.write input if input + tmpfile.close + system("#{editor} #{tmpfile.path.shellescape}") ? IO.read(tmpfile.path) : nil + ensure + tmpfile.unlink + end + end + + ## + # Enable paging of output after called. + + def enable_paging + return unless $stdout.tty? + return unless Process.respond_to? :fork + + read, write = IO.pipe + + # Kernel.fork is not supported on all platforms and configurations. + # As of Ruby 1.9, `Process.respond_to? :fork` should return false on + # configurations that don't support it, but versions before 1.9 don't + # seem to do this reliably and instead raise a NotImplementedError + # (which is rescued below). + + if Kernel.fork + $stdin.reopen read + write.close + read.close + Kernel.select [$stdin] + ENV['LESS'] = 'FSRX' unless ENV.key? 'LESS' + pager = ENV['PAGER'] || 'less' + exec pager rescue exec '/bin/sh', '-c', pager + else + # subprocess + $stdout.reopen write + $stderr.reopen write if $stderr.tty? + write.close + read.close + end + rescue NotImplementedError + ensure + write.close if write && !write.closed? + read.close if read && !read.closed? + end + + ## + # Output progress while iterating _arr_. + # + # === Examples + # + # uris = %w( http://vision-media.ca http://google.com ) + # progress uris, :format => "Remaining: :time_remaining" do |uri| + # res = open uri + # end + # + + def progress(arr, options = {}) + bar = ProgressBar.new arr.length, options + bar.show + arr.each { |v| bar.increment yield(v) } + end + + ## + # Implements ask_for_CLASS methods. + + module AskForClass + DEPRECATED_CONSTANTS = %i[Config TimeoutError MissingSourceFile NIL TRUE FALSE Fixnum Bignum Data].freeze + + # define methods for common classes + [Float, Integer, String, Symbol, Regexp, Array, File, Pathname].each do |klass| + define_method "ask_for_#{klass.to_s.downcase}" do |prompt| + HighLine.default_instance.ask(prompt, klass) + end + end + + def method_missing(method_name, *arguments, &block) + if method_name.to_s =~ /^ask_for_(.*)/ + if arguments.count != 1 + fail ArgumentError, "wrong number of arguments (given #{arguments.count}, expected 1)" + end + + prompt = arguments.first + requested_class = Regexp.last_match[1] + + # All Classes that respond to #parse + # Ignore constants that trigger deprecation warnings + available_classes = (Object.constants - DEPRECATED_CONSTANTS).map do |const| + begin + Object.const_get(const) + rescue RuntimeError + # Rescue errors in Ruby 3 for SortedSet: + # The `SortedSet` class has been extracted from the `set` library. + end + end.compact.select do |const| + const.instance_of?(Class) && const.respond_to?(:parse) + end + + klass = available_classes.find { |k| k.to_s.downcase == requested_class } + if klass + HighLine.default_instance.ask(prompt, klass) + else + super + end + else + super + end + end + + def respond_to_missing?(method_name, include_private = false) + method_name.to_s.start_with?('ask_for_') || super + end + end + + ## + # Substitute _hash_'s keys with their associated values in _str_. + + def replace_tokens(str, hash) #:nodoc: + hash.inject(str) do |string, (key, value)| + string.gsub ":#{key}", value.to_s + end + end + + ## + # = Progress Bar + # + # Terminal progress bar utility. In its most basic form + # requires that the developer specifies when the bar should + # be incremented. Note that a hash of tokens may be passed to + # #increment, (or returned when using Object#progress). + # + # uris = %w( + # http://vision-media.ca + # http://yahoo.com + # http://google.com + # ) + # + # bar = Commander::UI::ProgressBar.new uris.length, options + # threads = [] + # uris.each do |uri| + # threads << Thread.new do + # begin + # res = open uri + # bar.increment :uri => uri + # rescue Exception => e + # bar.increment :uri => "#{uri} failed" + # end + # end + # end + # threads.each { |t| t.join } + # + # The Object method #progress is also available: + # + # progress uris, :width => 10 do |uri| + # res = open uri + # { :uri => uri } # Can now use :uri within :format option + # end + # + + class ProgressBar + ## + # Creates a new progress bar. + # + # === Options + # + # :title Title, defaults to "Progress" + # :width Width of :progress_bar + # :progress_str Progress string, defaults to "=" + # :incomplete_str Incomplete bar string, defaults to '.' + # :format Defaults to ":title |:progress_bar| :percent_complete% complete " + # :tokens Additional tokens replaced within the format string + # :complete_message Defaults to "Process complete" + # + # === Tokens + # + # :title + # :percent_complete + # :progress_bar + # :step + # :steps_remaining + # :total_steps + # :time_elapsed + # :time_remaining + # + + def initialize(total, options = {}) + @total_steps, @step, @start_time = total, 0, Time.now + @title = options.fetch :title, 'Progress' + @width = options.fetch :width, 25 + @progress_str = options.fetch :progress_str, '=' + @incomplete_str = options.fetch :incomplete_str, '.' + @complete_message = options.fetch :complete_message, 'Process complete' + @format = options.fetch :format, ':title |:progress_bar| :percent_complete% complete ' + @tokens = options.fetch :tokens, {} + end + + ## + # Completion percentage. + + def percent_complete + if @total_steps.zero? + 100 + else + @step * 100 / @total_steps + end + end + + ## + # Time that has elapsed since the operation started. + + def time_elapsed + Time.now - @start_time + end + + ## + # Estimated time remaining. + + def time_remaining + (time_elapsed / @step) * steps_remaining + end + + ## + # Number of steps left. + + def steps_remaining + @total_steps - @step + end + + ## + # Formatted progress bar. + + def progress_bar + (@progress_str * (@width * percent_complete / 100)).ljust @width, @incomplete_str + end + + ## + # Generates tokens for this step. + + def generate_tokens + { + title: @title, + percent_complete: percent_complete, + progress_bar: progress_bar, + step: @step, + steps_remaining: steps_remaining, + total_steps: @total_steps, + time_elapsed: format('%0.2fs', time_elapsed), + time_remaining: @step.positive? ? format('%0.2fs', time_remaining) : '', + }.merge! @tokens + end + + ## + # Output the progress bar. + + def show + return if finished? + + erase_line + if completed? + HighLine.default_instance.say UI.replace_tokens(@complete_message, generate_tokens) if @complete_message.is_a? String + else + HighLine.default_instance.say UI.replace_tokens(@format, generate_tokens) << ' ' + end + end + + ## + # Whether or not the operation is complete, and we have finished. + + def finished? + @step == @total_steps + 1 + end + + ## + # Whether or not the operation has completed. + + def completed? + @step == @total_steps + end + + ## + # Increment progress. Optionally pass _tokens_ which + # can be displayed in the output format. + + def increment(tokens = {}) + @step += 1 + @tokens.merge! tokens if tokens.is_a? Hash + show + end + + ## + # Erase previous terminal line. + + def erase_line + # highline does not expose the output stream + HighLine.default_instance.instance_variable_get('@output').print "\r\e[K" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/version.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/version.rb new file mode 100644 index 0000000..b11a309 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/lib/commander/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Commander + VERSION = '4.6.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/command_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/command_spec.rb new file mode 100644 index 0000000..8ccd057 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/command_spec.rb @@ -0,0 +1,198 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Commander::Command do + include Commander::Methods + + before :each do + mock_terminal + create_test_command + end + + describe 'Options' do + before :each do + @options = Commander::Command::Options.new + end + + it 'should act like an open struct' do + @options.send = 'mail' + @options.call = true + expect(@options.send).to eq('mail') + expect(@options.call).to eq(true) + end + + it 'should allow __send__ to function as always' do + @options.send = 'foo' + expect(@options.__send__(:send)).to eq('foo') + end + end + + describe '#option' do + it 'should add options' do + expect { @command.option '--recursive' }.to change(@command.options, :length).from(1).to(2) + end + + it 'should allow procs as option handlers' do + @command.option('--recursive') { |recursive| expect(recursive).to be true } + @command.run '--recursive' + end + + it 'should allow usage of common method names' do + @command.option '--open file' + @command.when_called { |_, options| expect(options.open).to eq('foo') } + @command.run '--open', 'foo' + end + end + + describe '#run' do + describe 'should invoke #when_called' do + it 'with arguments seperated from options' do + @command.when_called { |args, _options| expect(args.join(' ')).to eq('just some args') } + @command.run '--verbose', 'just', 'some', 'args' + end + + it 'calling the #call method by default when an object is called' do + object = double 'Object' + expect(object).to receive(:call).once + @command.when_called object + @command.run 'foo' + end + + it 'should allow #action as an alias to #when_called' do + object = double 'Object' + expect(object).to receive(:call).once + @command.action object + @command.run 'foo' + end + + it 'calling an arbitrary method when an object is called' do + object = double 'Object' + expect(object).to receive(:foo).once + @command.when_called object, :foo + @command.run 'foo' + end + + it 'should raise an error when no handler is present' do + expect { @command.when_called }.to raise_error(ArgumentError) + end + + it 'should be able to be run more than once' do + expect(@command.run('once')).to eql('test once') + expect(@command.run('twice')).to eql('test twice') + end + + it 'should not accumulate entries in @proxy_options when run twice' do + expect(@command.run('--verbose')).to eql('test ') + expect(@command.proxy_options).to eq([[:verbose, true]]) + expect(@command.run('foo')).to eql('test foo') + expect(@command.proxy_options).to eq([]) + end + end + + describe 'should populate options with' do + it 'boolean values' do + @command.option '--[no-]toggle' + @command.when_called { |_, options| expect(options.toggle).to be true } + @command.run '--toggle' + @command.when_called { |_, options| expect(options.toggle).to be false } + @command.run '--no-toggle' + end + + it 'mandatory arguments' do + @command.option '--file FILE' + @command.when_called { |_, options| expect(options.file).to eq('foo') } + @command.run '--file', 'foo' + expect { @command.run '--file' }.to raise_error(OptionParser::MissingArgument) + end + + describe 'optional arguments' do + before do + @command.option '--use-config [file] ' + end + + it 'should return the argument when provided' do + @command.when_called { |_, options| expect(options.use_config).to eq('foo') } + @command.run '--use-config', 'foo' + end + + it 'should return true when present without an argument' do + @command.when_called { |_, options| expect(options.use_config).to be true } + @command.run '--use-config' + end + + it 'should return nil when not present' do + @command.when_called { |_, options| expect(options.use_config).to be_nil } + @command.run + end + end + + describe 'typed arguments' do + before do + @command.option '--interval N', Integer + end + + it 'should parse valid values' do + @command.when_called { |_, options| expect(options.interval).to eq(5) } + @command.run '--interval', '5' + end + + it 'should reject invalid values' do + expect { @command.run '--interval', 'invalid' }.to raise_error(OptionParser::InvalidArgument) + end + end + + it 'lists' do + @command.option '--fav COLORS', Array + @command.when_called { |_, options| expect(options.fav).to eq(%w(red green blue)) } + @command.run '--fav', 'red,green,blue' + end + + it 'lists with multi-word items' do + @command.option '--fav MOVIES', Array + @command.when_called { |_, options| expect(options.fav).to eq(['super\ bad', 'nightmare']) } + @command.run '--fav', 'super\ bad,nightmare' + end + + it 'defaults' do + @command.option '--files LIST', Array + @command.option '--interval N', Integer + @command.when_called do |_, options| + options.default \ + files: %w(foo bar), + interval: 5 + expect(options.files).to eq(%w(foo bar)) + expect(options.interval).to eq(15) + end + @command.run '--interval', '15' + end + + describe 'given a global option' do + before do + @command.global_options << [:global_option, 'gvalue'] + end + + describe 'and no command specific arguments' do + it 'provides the global option to the command action' do + @command.when_called { |_, options| expect(options.global_option).to eq('gvalue') } + @command.run + end + end + + describe 'and a command specific option' do + it 'provides the global option to the command action' do + @command.when_called { |_, options| expect(options.global_option).to eq('gvalue') } + @command.run '--verbose' + end + end + + describe 'and a command specific argument' do + it 'provides the global option to the command action' do + @command.when_called { |_, options| expect(options.global_option).to eq('gvalue') } + @command.run 'argument' + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/configure_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/configure_spec.rb new file mode 100644 index 0000000..7b75cf0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/configure_spec.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'commander/configure' + +describe Commander do + describe '.configure' do + it 'calls the given block' do + expect { Commander.configure { throw :block_called } }.to throw_symbol(:block_called) + end + + describe 'called block' do + before(:each) do + allow(Commander::Runner.instance).to receive(:run!) + end + + it 'provides Commander configuration methods' do + Commander.configure do + program :name, 'test' + end + + expect(Commander::Runner.instance.program(:name)).to eq('test') + end + + it 'passes all arguments to the block' do + Commander.configure('foo') do |first_arg| + program :name, first_arg + end + + expect(Commander::Runner.instance.program(:name)).to eq('foo') + end + end + + it 'calls Runner#run! after calling the configuration block' do + expect(Commander::Runner.instance).to receive(:run!) + Commander.configure {} + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/core_ext/array_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/core_ext/array_spec.rb new file mode 100644 index 0000000..417d262 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/core_ext/array_spec.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Array do + describe '#parse' do + it 'should seperate a list of words into an array' do + expect(Array.parse('just a test')).to eq(%w(just a test)) + end + + it 'should preserve escaped whitespace' do + expect(Array.parse('just a\ test')).to eq(['just', 'a test']) + end + + it 'should match %w behavior with multiple backslashes' do + str = 'just a\\ test' + expect(Array.parse(str)).to eq(['just', 'a test']) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/core_ext/object_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/core_ext/object_spec.rb new file mode 100644 index 0000000..a079ca0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/core_ext/object_spec.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Object do + describe '#get_binding' do + it 'should return the objects binding' do + expect(-> {}.get_binding).to be_instance_of(Binding) + end + end + + describe '#method_missing' do + it 'should preserve its original behavior for missing methods' do + expect { send(:i_am_a_missing_method) }.to raise_error(NoMethodError) + end + + it 'should preserve its original behavior for missing variables' do + expect { i_am_a_missing_variable }.to raise_error(NameError) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/help_formatters/terminal_compact_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/help_formatters/terminal_compact_spec.rb new file mode 100644 index 0000000..8882c07 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/help_formatters/terminal_compact_spec.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Commander::HelpFormatter::TerminalCompact do + include Commander::Methods + + before :each do + mock_terminal + end + + describe 'global help' do + before :each do + new_command_runner 'help' do + program :help_formatter, :compact + command :'install gem' do |c| + c.syntax = 'foo install gem [options]' + c.summary = 'Install some gem' + end + end.run! + @global_help = @output.string + end + + describe 'should display' do + it 'the command name' do + expect(@global_help).to include('install gem') + end + + it 'the summary' do + expect(@global_help).to include('Install some gem') + end + end + end + + describe 'command help' do + before :each do + new_command_runner 'help', 'install', 'gem' do + program :help_formatter, :compact + command :'install gem' do |c| + c.syntax = 'foo install gem [options]' + c.summary = 'Install some gem' + c.description = 'Install some gem, blah blah blah' + c.example 'one', 'two' + c.example 'three', 'four' + end + end.run! + @command_help = @output.string + end + + describe 'should display' do + it 'the command name' do + expect(@command_help).to include('install gem') + end + + it 'the description' do + expect(@command_help).to include('Install some gem, blah blah blah') + end + + it 'all examples' do + expect(@command_help).to include('# one') + expect(@command_help).to include('two') + expect(@command_help).to include('# three') + expect(@command_help).to include('four') + end + + it 'the syntax' do + expect(@command_help).to include('foo install gem [options]') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/help_formatters/terminal_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/help_formatters/terminal_spec.rb new file mode 100644 index 0000000..9c733a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/help_formatters/terminal_spec.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Commander::HelpFormatter::Terminal do + include Commander::Methods + + before :each do + mock_terminal + end + + describe 'global help' do + before :each do + new_command_runner 'help' do + command :'install gem' do |c| + c.syntax = 'foo install gem [options]' + c.summary = 'Install some gem' + end + end.run! + @global_help = @output.string + end + + describe 'should display' do + it 'the command name' do + expect(@global_help).to include('install gem') + end + + it 'the summary' do + expect(@global_help).to include('Install some gem') + end + end + end + + describe 'command help' do + before :each do + new_command_runner 'help', 'install', 'gem' do + command :'install gem' do |c| + c.syntax = 'foo install gem [options]' + c.summary = 'Install some gem' + c.description = 'Install some gem, blah blah blah' + c.example 'one', 'two' + c.example 'three', 'four' + end + end.run! + @command_help = @output.string + end + + describe 'should display' do + it 'the command name' do + expect(@command_help).to include('install gem') + end + + it 'the description' do + expect(@command_help).to include('Install some gem, blah blah blah') + end + + it 'all examples' do + expect(@command_help).to include('# one') + expect(@command_help).to include('two') + expect(@command_help).to include('# three') + expect(@command_help).to include('four') + end + + it 'the syntax' do + expect(@command_help).to include('foo install gem [options]') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/methods_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/methods_spec.rb new file mode 100644 index 0000000..d729299 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/methods_spec.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'commander/methods' + +describe Commander::Methods do + it 'includes Commander::UI' do + expect(subject.ancestors).to include(Commander::UI) + end + + describe 'AskForClass' do + it 'includes Commander::UI::AskForClass' do + expect(subject.ancestors).to include(Commander::UI::AskForClass) + end + + describe 'defining methods' do + let(:terminal) { double } + + before do + allow(terminal).to receive(:ask) + @old_highline = HighLine.default_instance + HighLine.default_instance = terminal + end + + after do + HighLine.default_instance = @old_highline + end + + subject do + Class.new do + include Commander::UI::AskForClass + end.new + end + + it 'defines common "ask_for_*" methods' do + expect(subject.respond_to?(:ask_for_float)).to be_truthy + end + + it 'responds to "ask_for_*" methods for classes that implement #parse' do + expect(subject.respond_to?(:ask_for_datetime)).to be_truthy + end + + it 'fails "ask_for_*" method invocations without a prompt' do + expect do + subject.ask_for_datetime + end.to raise_error(ArgumentError) + end + + it 'implements "ask_for_*"' do + expect(terminal).to receive(:ask) + subject.ask_for_datetime('hi') + end + end + end + + it 'includes Commander::Delegates' do + expect(subject.ancestors).to include(Commander::Delegates) + end + + it 'does not change the Object ancestors' do + expect(Object.ancestors).not_to include(Commander::UI) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/runner_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/runner_spec.rb new file mode 100644 index 0000000..b64bc32 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/runner_spec.rb @@ -0,0 +1,761 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Commander do + include Commander::Methods + + before :each do + $stderr = StringIO.new + mock_terminal + create_test_command + end + + describe '#program' do + it 'should set / get program information' do + program :name, 'test' + expect(program(:name)).to eq('test') + end + + it 'should allow arbitrary blocks of global help documentation' do + program :help, 'Copyright', 'TJ Holowaychuk' + expect(program(:help)['Copyright']).to eq('TJ Holowaychuk') + end + + it 'should raise an error when required info has not been set' do + new_command_runner '--help' + program :version, '' + expect { run! }.to raise_error(Commander::Runner::CommandError) + end + + it 'should allow aliases of help formatters' do + program :help_formatter, :compact + expect(program(:help_formatter)).to eq(Commander::HelpFormatter::TerminalCompact) + end + end + + describe '#command' do + it 'should return a command instance when only the name is passed' do + expect(command(:test)).to be_instance_of(Commander::Command) + end + + it 'should return nil when the command does not exist' do + expect(command(:im_not_real)).to be_nil + end + end + + describe '#separate_switches_from_description' do + it 'should seperate switches and description returning both' do + switches, description = *Commander::Runner.separate_switches_from_description('-h', '--help', 'display help') + expect(switches).to eq(['-h', '--help']) + expect(description).to eq('display help') + end + end + + describe '#switch_to_sym' do + it 'should return a symbol based on the switch name' do + expect(Commander::Runner.switch_to_sym('--trace')).to eq(:trace) + expect(Commander::Runner.switch_to_sym('--foo-bar')).to eq(:foo_bar) + expect(Commander::Runner.switch_to_sym('--[no-]feature"')).to eq(:feature) + expect(Commander::Runner.switch_to_sym('--[no-]feature ARG')).to eq(:feature) + expect(Commander::Runner.switch_to_sym('--file [ARG]')).to eq(:file) + expect(Commander::Runner.switch_to_sym('--colors colors')).to eq(:colors) + end + end + + describe '#alias_command' do + it 'should alias a command' do + alias_command :foo, :test + expect(command(:foo)).to eq(command(:test)) + end + + it 'should pass arguments passed to the alias when called' do + gem_name = '' + new_command_runner 'install', 'gem', 'commander' do + command :install do |c| + c.option '--gem-name NAME', 'Install a gem' + c.when_called { |_, options| gem_name = options.gem_name } + end + alias_command :'install gem', :install, '--gem-name' + end.run! + expect(gem_name).to eq('commander') + end + end + + describe '#global_option' do + it 'should be invoked when used in the args list' do + file = '' + new_command_runner 'test', '--config', 'foo' do + global_option('--config FILE') { |f| file = f } + end.run! + expect(file).to eq('foo') + end + + it 'should be inherited by commands' do + quiet = nil + new_command_runner 'foo', '--quiet' do + global_option('--quiet', 'Suppress output') + command :foo do |c| + c.when_called { |_, options| quiet = options.quiet } + end + end.run! + expect(quiet).to be true + end + + it 'should be inherited by commands when provided before the command name' do + option = nil + new_command_runner '--global-option', 'option-value', 'command_name' do + global_option('--global-option=GLOBAL', 'A global option') + command :command_name do |c| + c.when_called { |_, options| option = options.global_option } + end + end.run! + expect(option).to eq('option-value') + end + + it 'should be inherited by commands even when a block is present' do + quiet = nil + new_command_runner 'foo', '--quiet' do + global_option('--quiet', 'Suppress output') {} + command :foo do |c| + c.when_called { |_, options| quiet = options.quiet } + end + end.run! + expect(quiet).to be true + end + + it 'should be inherited by commands when the positive form of a [no-] option' do + quiet = nil + new_command_runner 'foo', '--quiet' do + global_option('--[no-]quiet', 'Suppress output') {} + command :foo do |c| + c.when_called { |_, options| quiet = options.quiet } + end + end.run! + expect(quiet).to be true + end + + it 'should be inherited by commands when the negative form of a [no-] option' do + quiet = nil + new_command_runner 'foo', '--no-quiet' do + global_option('--[no-]quiet', 'Suppress output') {} + command :foo do |c| + c.when_called { |_, options| quiet = options.quiet } + end + end.run! + expect(quiet).to be false + end + + it 'should allow command arguments before the global option' do + config = nil + args = nil + new_command_runner 'foo', '--config', 'config-value', 'arg1', 'arg2' do + global_option('-c', '--config CONFIG', String) + command :foo do |c| + c.when_called do |arguments, options| + options.default(config: 'default') + args = arguments + config = options.config + end + end + end.run! + expect(config).to eq('config-value') + expect(args).to eq(%w(arg1 arg2)) + end + + it 'should allow command arguments after the global option' do + config = nil + args = nil + new_command_runner 'foo', 'arg1', 'arg2', '--config', 'config-value' do + global_option('-c', '--config CONFIG', String) + command :foo do |c| + c.when_called do |arguments, options| + options.default(config: 'default') + args = arguments + config = options.config + end + end + end.run! + expect(config).to eq('config-value') + expect(args).to eq(%w(arg1 arg2)) + end + + it 'allows global options in the form option=value' do + config = nil + args = nil + new_command_runner 'test', 'arg1', '--config=config-value', 'arg2' do + global_option('-c', '--config CONFIG', String) + command :test do |c| + c.when_called do |arguments, options| + options.default(config: 'default') + args = arguments + config = options.config + end + end + end.run! + expect(config).to eq('config-value') + expect(args).to eq(%w[arg1 arg2]) + end + end + + describe '#parse_global_options' do + it 'should parse global options before command' do + global_option = nil + new_command_runner('--testing-global', 'foo') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.when_called {} + end + end.run! + expect(global_option).to eq('MAGIC') + end + + it 'should parse global options after command' do + global_option = nil + new_command_runner('foo', '--testing-global') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.when_called {} + end + end.run! + expect(global_option).to eq('MAGIC') + end + + it 'should parse global options placed before command options' do + global_option = nil + new_command_runner('foo', '--testing-global', '--testing-command') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.option('--testing-command') {} + c.when_called {} + end + end.run! + + expect(global_option).to eq('MAGIC') + end + + it 'should parse global options placed after command options' do + global_option = nil + new_command_runner('foo', '--testing-command', '--testing-global') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.option('--testing-command') {} + c.when_called {} + end + end.run! + + expect(global_option).to eq('MAGIC') + end + + it 'should parse global options surrounded by command options' do + global_option = nil + new_command_runner('foo', '--testing-command', '--testing-global', '--other-command') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.option('--testing-command') {} + c.option('--other-command') {} + c.when_called {} + end + end.run! + + expect(global_option).to eq('MAGIC') + end + + it 'should not parse command options' do + global_option = nil + command_option = nil + new_command_runner('foo', '--testing-command', '--testing-global') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.option('--testing-command') { command_option = 'NO!' } + c.when_called {} + end + end.parse_global_options + + expect(command_option).to be_nil + expect(global_option).to eq('MAGIC') + end + + it 'should not affect command arguments with values' do + global_option = nil + command_option = nil + new_command_runner('foo', '--testing-command', 'bar', '--testing-global') do + global_option('--testing-global') { global_option = 'MAGIC' } + + command :foo do |c| + c.option('--testing-command VALUE') { |v| command_option = v } + c.when_called {} + end + end.run! + + expect(command_option).to eq('bar') + expect(global_option).to eq('MAGIC') + end + + it 'should not affect global arguments with values' do + global_option = nil + new_command_runner('foo', '--testing-command', '--testing-global', 'bar') do + global_option('--testing-global VALUE') { |v| global_option = v } + + command :foo do |c| + c.option('--testing-command') {} + c.when_called {} + end + end.run! + + expect(global_option).to eq('bar') + end + + it 'should allow global arguments with values before command arguments (github issue #8)' do + global_option = nil + command_option = nil + new_command_runner('foo', '--config', 'path', 'bar') do + global_option('--config VALUE') { |v| global_option = v } + + command :foo do |c| + c.option('bar') { command_option = 'bar' } + c.when_called {} + end + end.run! + + expect(global_option).to eq('path') + expect(command_option).to eq('bar') + end + end + + describe '#remove_global_options' do + it 'should remove only specified switches' do + options, args = [], [] + options << { switches: ['-t', '--trace'] } + options << { switches: ['--help'] } + options << { switches: ['--paths PATHS'] } + args << '-t' + args << '--help' + args << '--command' + args << '--command-with-arg' << 'rawr' + args << '--paths' << '"lib/**/*.js","spec/**/*.js"' + command_runner.remove_global_options options, args + expect(args).to eq(['--command', '--command-with-arg', 'rawr']) + end + + it 'should not swallow an argument unless it expects an argument' do + options, args = [], [] + options << { switches: ['-n', '--no-arg'] } + options << { switches: ['-y', '--yes ARG'] } + options << { switches: ['-a', '--alternative=ARG'] } + args << '-n' << 'alpha' + args << '--yes' << 'deleted' + args << '-a' << 'deleted' + args << 'beta' + command_runner.remove_global_options options, args + expect(args).to eq(%w(alpha beta)) + end + + it 'should remove a switch that is the positive form of the [no-] option' do + options, args = [], [] + options << { switches: ['-g', '--[no-]good'] } + options << { switches: ['-y', '--yes ARG'] } + options << { switches: ['-a', '--alternative=ARG'] } + args << '--good' << 'alpha' + args << '--yes' << 'deleted' + args << '-a' << 'deleted' + args << 'beta' + command_runner.remove_global_options options, args + expect(args).to eq(%w(alpha beta)) + end + + it 'should remove a switch that is the negative form of the [no-] option' do + options, args = [], [] + options << { switches: ['-g', '--[no-]good'] } + options << { switches: ['-y', '--yes ARG'] } + options << { switches: ['-a', '--alternative=ARG'] } + args << '--no-good' << 'alpha' + args << '--yes' << 'deleted' + args << '-a' << 'deleted' + args << 'beta' + command_runner.remove_global_options options, args + expect(args).to eq(%w(alpha beta)) + end + + it 'should not remove options that start with a global option name' do + options, args = [], [] + options << { switches: ['-v', '--version'] } + args << '--versionCode' << 'something' + command_runner.remove_global_options options, args + expect(args).to eq(%w(--versionCode something)) + end + + it 'should remove specified switches value provided via equals' do + options = [{ switches: ['--global GLOBAL'] }] + args = ['--command', '--global=option-value', 'arg'] + command_runner.remove_global_options options, args + expect(args).to eq(['--command', 'arg']) + end + + it 'should not remove extra values after switches' do + options = [{ switches: ['--global GLOBAL'] }] + args = ['--global', '--command', 'arg'] + command_runner.remove_global_options options, args + expect(args).to eq(['--command', 'arg']) + end + end + + describe '--trace' do + it 'should display pretty errors by default' do + expect do + new_command_runner 'foo' do + command(:foo) { |c| c.when_called { fail 'cookies!' } } + end.run! + end.to raise_error(TestSystemExit, /error: cookies!. Use --trace/) + end + + it 'should display callstack when using this switch' do + expect do + new_command_runner 'foo', '--trace' do + command(:foo) { |c| c.when_called { fail 'cookies!' } } + end.run! + end.to raise_error(RuntimeError) + end + end + + describe '#always_trace!' do + it 'should enable tracing globally, regardless of whether --trace was passed or not' do + expect do + new_command_runner 'foo' do + always_trace! + command(:foo) { |c| c.when_called { fail 'cookies!' } } + end.run! + end.to raise_error(RuntimeError) + end + end + + describe '#never_trace!' do + it 'should disable tracing globally, regardless of whether --trace was passed or not' do + expect do + new_command_runner 'help', '--trace' do + never_trace! + end.run! + end.to raise_error(TestSystemExit, /invalid option: --trace/) + end + + it 'should not prompt to use --trace switch on errors' do + msg = nil + begin + new_command_runner 'foo' do + never_trace! + command(:foo) { |c| c.when_called { fail 'cookies!' } } + end.run! + rescue TestSystemExit => e + msg = e.message + end + expect(msg).to match(/error: cookies!/) + expect(msg).not_to match(/--trace/) + end + end + + context 'conflict between #always_trace! and #never_trace!' do + it 'respects the last used command' do + expect do + new_command_runner 'foo' do + never_trace! + always_trace! + command(:foo) { |c| c.when_called { fail 'cookies!' } } + end.run! + end.to raise_error(RuntimeError) + end + end + + describe '--version' do + it 'should output program version' do + expect(run('--version')).to eq("test 1.2.3\n") + end + end + + describe '--help' do + it 'should not output an invalid command message' do + expect(run('--help')).not_to eq("invalid command. Use --help for more information\n") + end + + it 'can be used before or after the command and options' do + expect(run('test', '--help')).to eq("Implement help for test here\n") + end + + it 'can be used after the command and command arguments' do + expect(run('test', 'command-arg', '--help')).to eq("Implement help for test here\n") + end + + it 'can be used before a single-word command with command arguments' do + expect(run('help', 'test', 'command-arg')).to eq("Implement help for test here\n") + end + + it 'can be used before a multi-word command with command arguments' do + expect( + run('help', 'module', 'install', 'command-arg') do + command('module install') { |c| c.when_called { say 'whee!' } } + end + ).to eq("Implement help for module install here\n") + end + + describe 'help_paging program information' do + it 'enables paging when enabled' do + run('--help') { program :help_paging, true } + expect(Commander::UI).to have_received(:enable_paging) + end + + it 'is enabled by default' do + run('--help') + expect(Commander::UI).to have_received(:enable_paging) + end + + it 'does not enable paging when disabled' do + run('--help') { program :help_paging, false } + expect(Commander::UI).not_to have_received(:enable_paging) + end + end + end + + describe 'with invalid options' do + it 'should output an invalid option message' do + expect do + run('test', '--invalid-option') + end.to raise_error(TestSystemExit, /invalid option: --invalid-option/) + end + end + + describe 'with invalid command passed' do + it 'should output an invalid command message' do + expect do + run('foo') + end.to raise_error(TestSystemExit, /invalid command. Use --help for more information/) + end + end + + describe 'with invalid command passed to help' do + it 'should output an invalid command message' do + expect do + run('help', 'does_not_exist') + end.to raise_error(TestSystemExit, /invalid command. Use --help for more information/) + end + end + + describe 'with invalid command passed to --help' do + it 'should output an invalid command message' do + expect do + run('--help', 'does_not_exist') + end.to raise_error(TestSystemExit, /invalid command. Use --help for more information/) + end + end + + describe 'with invalid option passed to --help' do + it 'should output an invalid option message' do + expect do + run('--help', 'test', '--invalid-option') + end.to raise_error(TestSystemExit, /invalid option: --invalid-option/) + end + end + + describe '#valid_command_names_from' do + it 'should return array of valid command names' do + new_command_runner do + command('foo bar') {} + command('foo bar foo') {} + expect(command_runner.valid_command_names_from('foo', 'bar', 'foo').sort).to eq(['foo bar', 'foo bar foo']) + end + end + + it 'should return empty array when no possible command names exist' do + new_command_runner do + expect(command_runner.valid_command_names_from('fake', 'command', 'name')).to eq([]) + end + end + + it 'should match exact commands only' do + new_command_runner do + command('foo') {} + expect(command_runner.valid_command_names_from('foobar')).to eq([]) + end + end + end + + describe '#command_name_from_args' do + it 'should locate command within arbitrary arguments passed' do + new_command_runner '--help', '--arbitrary', 'test' + expect(command_runner.command_name_from_args).to eq('test') + end + + it 'should locate command when provided after a global argument with value' do + new_command_runner '--global-option', 'option-value', 'test' do + global_option('--global-option=GLOBAL', 'A global option') + end + expect(command_runner.command_name_from_args).to eq('test') + end + + it 'should support multi-word commands' do + new_command_runner '--help', '--arbitrary', 'some', 'long', 'command', 'foo' + command('some long command') {} + expect(command_runner.command_name_from_args).to eq('some long command') + end + + it 'should match the longest possible command' do + new_command_runner '--help', '--arbitrary', 'foo', 'bar', 'foo' + command('foo bar') {} + command('foo bar foo') {} + expect(command_runner.command_name_from_args).to eq('foo bar foo') + end + + it 'should use the left-most command name when multiple are present' do + new_command_runner 'help', 'test' + expect(command_runner.command_name_from_args).to eq('help') + end + end + + describe '#active_command' do + it 'should resolve the active command' do + new_command_runner '--help', 'test' + expect(command_runner.active_command).to be_instance_of(Commander::Command) + end + + it 'should resolve active command when invalid options are passed' do + new_command_runner '--help', 'test', '--arbitrary' + expect(command_runner.active_command).to be_instance_of(Commander::Command) + end + + it 'should return nil when the command is not found' do + new_command_runner 'foo' + expect(command_runner.active_command).to be_nil + end + end + + describe '#default_command' do + it 'should allow you to default any command when one is not explicitly passed' do + new_command_runner '--trace' do + default_command :test + expect(command(:test)).to receive(:run).once + expect(command_runner.active_command).to eq(command(:test)) + end.run! + end + + it 'should not prevent other commands from being called' do + new_command_runner 'foo', 'bar', '--trace' do + default_command :test + command(:'foo bar') {} + expect(command(:'foo bar')).to receive(:run).once + expect(command_runner.active_command).to eq(command(:'foo bar')) + end.run! + end + + it 'should not prevent longer commands to use the same words as the default' do + new_command_runner 'foo', 'bar', 'something' + default_command :'foo bar' + command(:'foo bar') {} + command(:'foo bar something') {} + expect(command_runner.active_command).to eq(command(:'foo bar something')) + end + + it 'should allow defaulting of command aliases' do + new_command_runner '--trace' do + default_command :foobar + alias_command :foobar, :test + expect(command(:test)).to receive(:run).once + end.run! + end + end + + describe 'should function correctly' do + it 'when options are passed before the command name' do + new_command_runner '--verbose', 'test', 'foo', 'bar' do + @command.when_called do |args, options| + expect(args).to eq(%w(foo bar)) + expect(options.verbose).to be true + end + end.run! + end + + it 'when options are passed after the command name' do + new_command_runner 'test', '--verbose', 'foo', 'bar' do + @command.when_called do |args, options| + expect(args).to eq(%w(foo bar)) + expect(options.verbose).to be true + end + end.run! + end + + it 'when an argument passed is the same name as the command' do + new_command_runner 'test', '--verbose', 'foo', 'test', 'bar' do + @command.when_called do |args, options| + expect(args).to eq(%w(foo test bar)) + expect(options.verbose).to be true + end + end.run! + end + + it 'when using multi-word commands' do + new_command_runner '--verbose', 'my', 'command', 'something', 'foo', 'bar' do + command('my command') { |c| c.option('--verbose') } + expect(command_runner.command_name_from_args).to eq('my command') + expect(command_runner.args_without_command_name).to eq(['--verbose', 'something', 'foo', 'bar']) + end.run! + end + + it 'when using multi-word commands with parts of the command name as arguments' do + new_command_runner '--verbose', 'my', 'command', 'something', 'my', 'command' do + command('my command') { |c| c.option('--verbose') } + expect(command_runner.command_name_from_args).to eq('my command') + expect(command_runner.args_without_command_name).to eq(['--verbose', 'something', 'my', 'command']) + end.run! + end + + it 'when using multi-word commands with other commands using the same words' do + new_command_runner '--verbose', 'my', 'command', 'something', 'my', 'command' do + command('my command') {} + command('my command something') { |c| c.option('--verbose') } + expect(command_runner.command_name_from_args).to eq('my command something') + expect(command_runner.args_without_command_name).to eq(['--verbose', 'my', 'command']) + end.run! + end + end + + describe 'options with optional arguments' do + it 'should return the argument when it is specified' do + new_command_runner 'foo', '--optional', 'arg1' do + command('foo') do |c| + c.option('--optional [argument]') + c.when_called do |_, options| + expect(options.optional).to eq('arg1') + end + end + end.run! + end + + it 'should return true when no argument is specified for the option' do + new_command_runner 'foo', '--optional' do + command('foo') do |c| + c.option('--optional [argument]') + c.when_called do |_, options| + expect(options.optional).to be true + end + end + end.run! + end + end + + describe 'with double dash' do + it 'should interpret the remainder as arguments' do + new_command_runner 'foo', '--', '-x' do + command('foo') do |c| + c.option '-x', 'Switch' + c.when_called do |args, options| + expect(args).to eq(%w(-x)) + expect(options.x).to be_nil + end + end + end.run! + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/spec_helper.rb new file mode 100644 index 0000000..97ee4f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/spec_helper.rb @@ -0,0 +1,89 @@ +# frozen_string_literal: true + +require 'rubygems' +require 'stringio' +require 'simplecov' +SimpleCov.start do + add_filter '/spec/' +end + +# Unshift so that local files load instead of something in gems +$LOAD_PATH.unshift "#{File.dirname(__FILE__)}/../lib" + +# This basically replicates the behavior of `require 'commander/import'` +# but without adding an `at_exit` hook, which interferes with exit code +require 'commander' +require 'commander/methods' + +# Mock terminal IO streams so we can spec against them +def mock_terminal + @input = StringIO.new + @output = StringIO.new + HighLine.default_instance = HighLine.new(@input, @output) +end + +# Stub Kernel.abort +TestSystemExit = Class.new(RuntimeError) +module Commander + class Runner + def abort(message) + fail TestSystemExit, message + end + end +end + +# Create test command for usage within several specs + +def create_test_command + command :test do |c| + c.syntax = 'test [options] ' + c.description = 'test description' + c.example 'description', 'command' + c.example 'description 2', 'command 2' + c.option '-v', '--verbose', 'verbose description' + c.when_called do |args, _options| + format('test %s', args.join) + end + end + @command = command :test +end + +# Create a new command runner + +def new_command_runner(*args, &block) + Commander::Runner.instance_variable_set :@instance, Commander::Runner.new(args) + program :name, 'test' + program :version, '1.2.3' + program :description, 'something' + create_test_command + yield if block + Commander::Runner.instance +end + +# Comply with how specs were previously written + +def command_runner + Commander::Runner.instance +end + +def run(*args) + new_command_runner(*args) do + program :help_formatter, Commander::HelpFormatter::Base + yield if block_given? + end.run! + @output.string +end + +RSpec.configure do |c| + c.expect_with(:rspec) do |e| + e.syntax = :expect + end + + c.mock_with(:rspec) do |m| + m.syntax = :expect + end + + c.before(:each) do + allow(Commander::UI).to receive(:enable_paging) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/ui_spec.rb b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/ui_spec.rb new file mode 100644 index 0000000..6ad2cac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/commander-4.6.0/spec/ui_spec.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe Commander::UI do + include Commander::Methods + + describe '.replace_tokens' do + it 'should replace tokens within a string, with hash values' do + result = Commander::UI.replace_tokens 'Welcome :name, enjoy your :object', name: 'TJ', object: 'cookie' + expect(result).to eq('Welcome TJ, enjoy your cookie') + end + end + + describe 'progress' do + it 'should not die on an empty list' do + exception = false + begin + progress([]) {} + rescue StandardError + exception = true + end + expect(exception).not_to be true + end + end + + describe '.available_editor' do + it 'should not fail on available editors with shell arguments' do + expect(Commander::UI.available_editor('sh -c')).to eq('sh -c') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/CHANGELOG.md new file mode 100644 index 0000000..17a2a64 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/CHANGELOG.md @@ -0,0 +1,561 @@ +## Current + +## Release v1.2.2 (24 Feb 2023) + +* (#993) Fix arguments passed to `Concurrent::Map`'s `default_proc`. + +## Release v1.2.1 (24 Feb 2023) + +* (#990) Add missing `require 'fiber'` for `FiberLocalVar`. +* (#989) Optimize `Concurrent::Map#[]` on CRuby by letting the backing Hash handle the `default_proc`. + +## Release v1.2.0 (23 Jan 2023) + +* (#962) Fix ReentrantReadWriteLock to use the same granularity for locals as for Mutex it uses. +* (#983) Add FiberLocalVar +* (#934) concurrent-ruby now supports requiring individual classes (public classes listed in the docs), e.g., `require 'concurrent/map'` +* (#976) Let `Promises.any_fulfilled_future` take an `Event` +* Improve documentation of various classes +* (#975) Set the Ruby compatibility version at 2.3 +* (#972) Remove Rubinius-related code + +## Release v1.1.10 (22 Mar 2022) + +concurrent-ruby: + +* (#951) Set the Ruby compatibility version at 2.2 +* (#939, #933) The `caller_runs` fallback policy no longer blocks reads from the job queue by worker threads +* (#938, #761, #652) You can now explicitly `prune_pool` a thread pool (Sylvain Joyeux) +* (#937, #757, #670) We switched the Yahoo stock API for demos to Alpha Vantage (Gustavo Caso) +* (#932, #931) We changed how `SafeTaskExecutor` handles local jump errors (Aaron Jensen) +* (#927) You can use keyword arguments in your initialize when using `Async` (Matt Larraz) +* (#926, #639) We removed timeout from `TimerTask` because it wasn't sound, and now it's a no-op with a warning (Jacob Atzen) +* (#919) If you double-lock a re-entrant read-write lock, we promote to locked for writing (zp yuan) +* (#915) `monotonic_time` now accepts an optional unit parameter, as Ruby's `clock_gettime` (Jean Boussier) + +## Release v1.1.9 (5 Jun 2021) + +concurrent-ruby: + +* (#866) Child promise state not set to :pending immediately after #execute when parent has completed +* (#905, #872) Fix RubyNonConcurrentPriorityQueue#delete method +* (2df0337d) Make sure locks are not shared on shared when objects are dup/cloned +* (#900, #906, #796, #847, #911) Fix Concurrent::Set tread-safety issues on CRuby +* (#907) Add new ConcurrentMap backend for TruffleRuby + +## Release v1.1.8 (20 January 2021) + +concurrent-ruby: + +* (#885) Fix race condition in TVar for stale reads +* (#884) RubyThreadLocalVar: Do not iterate over hash which might conflict with new pair addition + +## Release v1.1.7 (6 August 2020) + +concurrent-ruby: + +* (#879) Consider falsy value on `Concurrent::Map#compute_if_absent` for fast non-blocking path +* (#876) Reset Async queue on forking, makes Async fork-safe +* (#856) Avoid running problematic code in RubyThreadLocalVar on MRI that occasionally results in segfault +* (#853) Introduce ThreadPoolExecutor without a Queue + +## Release v1.1.6, edge v0.6.0 (10 Feb 2020) + +concurrent-ruby: + +* (#841) Concurrent.disable_at_exit_handlers! is no longer needed and was deprecated. +* (#841) AbstractExecutorService#auto_terminate= was deprecated and has no effect. + Set :auto_terminate option instead when executor is initialized. + +## Release v1.1.6.pre1, edge v0.6.0.pre1 (26 Jan 2020) + +concurrent-ruby: + +* (#828) Allow to name executors, the name is also used to name their threads +* (#838) Implement #dup and #clone for structs +* (#821) Safer finalizers for thread local variables +* Documentation fixes +* (#814) Use Ruby's Etc.nprocessors if available +* (#812) Fix directory structure not to mess with packaging tools +* (#840) Fix termination of pools on JRuby + +concurrent-ruby-edge: + +* Add WrappingExecutor (#830) + +## Release v1.1.5, edge v0.5.0 (10 Mar 2019) + +concurrent-ruby: + +* fix potential leak of context on JRuby and Java 7 + +concurrent-ruby-edge: + +* Add finalized Concurrent::Cancellation +* Add finalized Concurrent::Throttle +* Add finalized Concurrent::Promises::Channel +* Add new Concurrent::ErlangActor + +## Release v1.1.4 (14 Dec 2018) + +* (#780) Remove java_alias of 'submit' method of Runnable to let executor service work on java 11 +* (#776) Fix NameError on defining a struct with a name which is already taken in an ancestor + +## Release v1.1.3 (7 Nov 2018) + +* (#775) fix partial require of the gem (although not officially supported) + +## Release v1.1.2 (6 Nov 2018) + +* (#773) more defensive 1.9.3 support + +## Release v1.1.1, edge v0.4.1 (1 Nov 2018) + +* (#768) add support for 1.9.3 back + +## Release v1.1.0, edge v0.4.0 (31 OCt 2018) (yanked) + +* (#768) yanked because of issues with removed 1.9.3 support + +## Release v1.1.0.pre2, edge v0.4.0.pre2 (18 Sep 2018) + +concurrent-ruby: + +* fixed documentation and README links +* fix Set for TruffleRuby and Rubinius +* use properly supported TruffleRuby APIs + +concurrent-ruby-edge: + +* add Promises.zip_futures_over_on + +## Release v1.1.0.pre1, edge v0.4.0.pre1 (15 Aug 2018) + +concurrent-ruby: + +* requires at least Ruby 2.0 +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/1.1.0/Concurrent/Promises.html) + are moved from `concurrent-ruby-edge` to `concurrent-ruby` +* Add support for TruffleRuby + * (#734) Fix Array/Hash/Set construction broken on TruffleRuby + * AtomicReference fixed +* CI stabilization +* remove sharp dependency edge -> core +* remove warnings +* documentation updates +* Exchanger is no longer documented as edge since it was already available in + `concurrent-ruby` +* (#644) Fix Map#each and #each_pair not returning enumerator outside of MRI +* (#659) Edge promises fail during error handling +* (#741) Raise on recursive Delay#value call +* (#727) #717 fix global IO executor on JRuby +* (#740) Drop support for CRuby 1.9, JRuby 1.7, Rubinius. +* (#737) Move AtomicMarkableReference out of Edge +* (#708) Prefer platform specific memory barriers +* (#735) Fix wrong expected exception in channel spec assertion +* (#729) Allow executor option in `Promise#then` +* (#725) fix timeout check to use timeout_interval +* (#719) update engine detection +* (#660) Add specs for Promise#zip/Promise.zip ordering +* (#654) Promise.zip execution changes +* (#666) Add thread safe set implementation +* (#651) #699 #to_s, #inspect should not output negative object IDs. +* (#685) Avoid RSpec warnings about raise_error +* (#680) Avoid RSpec monkey patching, persist spec results locally, use RSpec + v3.7.0 +* (#665) Initialize the monitor for new subarrays on Rubinius +* (#661) Fix error handling in edge promises + +concurrent-ruby-edge: + +* (#659) Edge promises fail during error handling +* Edge files clearly separated in `lib-edge` +* added ReInclude + +## Release v1.0.5, edge v0.3.1 (26 Feb 2017) + +concurrent-ruby: + +* Documentation for Event and Semaphore +* Use Unsafe#fullFence and #loadFence directly since the shortcuts were removed in JRuby +* Do not depend on org.jruby.util.unsafe.UnsafeHolder + +concurrent-ruby-edge: + +* (#620) Actors on Pool raise an error +* (#624) Delayed promises did not interact correctly with flatting + * Fix arguments yielded by callback methods +* Overridable default executor in promises factory methods +* Asking actor to terminate will always resolve to `true` + +## Release v1.0.4, edge v0.3.0 (27 Dec 2016) + +concurrent-ruby: + +* Nothing + +concurrent-ruby-edge: + +* New promises' API renamed, lots of improvements, edge bumped to 0.3.0 + * **Incompatible** with previous 0.2.3 version + * see https://github.com/ruby-concurrency/concurrent-ruby/pull/522 + +## Release v1.0.3 (17 Dec 2016) + +* Trigger execution of flattened delayed futures +* Avoid forking for processor_count if possible +* Semaphore Mutex and JRuby parity +* Adds Map#each as alias to Map#each_pair +* Fix uninitialized instance variables +* Make Fixnum, Bignum merger ready +* Allows Promise#then to receive an executor +* TimerSet now survives a fork +* Reject promise on any exception +* Allow ThreadLocalVar to be initialized with a block +* Support Alpha with `Concurrent::processor_count` +* Fixes format-security error when compiling ruby_193_compatible.h +* Concurrent::Atom#swap fixed: reraise the exceptions from block + +## Release v1.0.2 (2 May 2016) + +* Fix bug with `Concurrent::Map` MRI backend `#inspect` method +* Fix bug with `Concurrent::Map` MRI backend using `Hash#value?` +* Improved documentation and examples +* Minor updates to Edge + +## Release v1.0.1 (27 February 2016) + +* Fix "uninitialized constant Concurrent::ReentrantReadWriteLock" error. +* Better handling of `autoload` vs. `require`. +* Improved API for Edge `Future` zipping. +* Fix reference leak in Edge `Future` constructor . +* Fix bug which prevented thread pools from surviving a `fork`. +* Fix bug in which `TimerTask` did not correctly specify all its dependencies. +* Improved support for JRuby+Truffle +* Improved error messages. +* Improved documentation. +* Updated README and CONTRIBUTING. + +## Release v1.0.0 (13 November 2015) + +* Rename `attr_volatile_with_cas` to `attr_atomic` +* Add `clear_each` to `LockFreeStack` +* Update `AtomicReference` documentation +* Further updates and improvements to the synchronization layer. +* Performance and memory usage performance with `Actor` logging. +* Fixed `ThreadPoolExecutor` task count methods. +* Improved `Async` performance for both short and long-lived objects. +* Fixed bug in `LockFreeLinkedSet`. +* Fixed bug in which `Agent#await` triggered a validation failure. +* Further `Channel` updates. +* Adopted a project Code of Conduct +* Cleared interpreter warnings +* Fixed bug in `ThreadPoolExecutor` task count methods +* Fixed bug in 'LockFreeLinkedSet' +* Improved Java extension loading +* Handle Exception children in Edge::Future +* Continued improvements to channel +* Removed interpreter warnings. +* Shared constants now in `lib/concurrent/constants.rb` +* Refactored many tests. +* Improved synchronization layer/memory model documentation. +* Bug fix in Edge `Future#flat` +* Brand new `Channel` implementation in Edge gem. +* Simplification of `RubySingleThreadExecutor` +* `Async` improvements + - Each object uses its own `SingleThreadExecutor` instead of the global thread pool. + - No longers supports executor injection + - Much better documentation +* `Atom` updates + - No longer `Dereferenceable` + - Now `Observable` + - Added a `#reset` method +* Brand new `Agent` API and implementation. Now functionally equivalent to Clojure. +* Continued improvements to the synchronization layer +* Merged in the `thread_safe` gem + - `Concurrent::Array` + - `Concurrent::Hash` + - `Concurrent::Map` (formerly ThreadSafe::Cache) + - `Concurrent::Tuple` +* Minor improvements to Concurrent::Map +* Complete rewrite of `Exchanger` +* Removed all deprecated code (classes, methods, constants, etc.) +* Updated Agent, MutexAtomic, and BufferedChannel to inherit from Synchronization::Object. +* Many improved tests +* Some internal reorganization + +## Release v0.9.1 (09 August 2015) + +* Fixed a Rubiniux bug in synchronization object +* Fixed all interpreter warnings (except circular references) +* Fixed require statements when requiring `Atom` alone +* Significantly improved `ThreadLocalVar` on non-JRuby platforms +* Fixed error handling in Edge `Concurrent.zip` +* `AtomicFixnum` methods `#increment` and `#decrement` now support optional delta +* New `AtomicFixnum#update` method +* Minor optimizations in `ReadWriteLock` +* New `ReentrantReadWriteLock` class +* `ThreadLocalVar#bind` method is now public +* Refactored many tests + +## Release v0.9.0 (10 July 2015) + +* Updated `AtomicReference` + - `AtomicReference#try_update` now simply returns instead of raising exception + - `AtomicReference#try_update!` was added to raise exceptions if an update + fails. Note: this is the same behavior as the old `try_update` +* Pure Java implementations of + - `AtomicBoolean` + - `AtomicFixnum` + - `Semaphore` +* Fixed bug when pruning Ruby thread pools +* Fixed bug in time calculations within `ScheduledTask` +* Default `count` in `CountDownLatch` to 1 +* Use monotonic clock for all timers via `Concurrent.monotonic_time` + - Use `Process.clock_gettime(Process::CLOCK_MONOTONIC)` when available + - Fallback to `java.lang.System.nanoTime()` on unsupported JRuby versions + - Pure Ruby implementation for everything else + - Effects `Concurrent.timer`, `Concurrent.timeout`, `TimerSet`, `TimerTask`, and `ScheduledTask` +* Deprecated all clock-time based timer scheduling + - Only support scheduling by delay + - Effects `Concurrent.timer`, `TimerSet`, and `ScheduledTask` +* Added new `ReadWriteLock` class +* Consistent `at_exit` behavior for Java and Ruby thread pools. +* Added `at_exit` handler to Ruby thread pools (already in Java thread pools) + - Ruby handler stores the object id and retrieves from `ObjectSpace` + - JRuby disables `ObjectSpace` by default so that handler stores the object reference +* Added a `:stop_on_exit` option to thread pools to enable/disable `at_exit` handler +* Updated thread pool docs to better explain shutting down thread pools +* Simpler `:executor` option syntax for all abstractions which support this option +* Added `Executor#auto_terminate?` predicate method (for thread pools) +* Added `at_exit` handler to `TimerSet` +* Simplified auto-termination of the global executors + - Can now disable auto-termination of global executors + - Added shutdown/kill/wait_for_termination variants for global executors +* Can now disable auto-termination for *all* executors (the nuclear option) +* Simplified auto-termination of the global executors +* Deprecated terms "task pool" and "operation pool" + - New terms are "io executor" and "fast executor" + - New functions added with new names + - Deprecation warnings added to functions referencing old names +* Moved all thread pool related functions from `Concurrent::Configuration` to `Concurrent` + - Old functions still exist with deprecation warnings + - New functions have updated names as appropriate +* All high-level abstractions default to the "io executor" +* Fixed bug in `Actor` causing it to prematurely warm global thread pools on gem load + - This also fixed a `RejectedExecutionError` bug when running with minitest/autorun via JRuby +* Moved global logger up to the `Concurrent` namespace and refactored the code +* Optimized the performance of `Delay` + - Fixed a bug in which no executor option on construction caused block execution on a global thread pool +* Numerous improvements and bug fixes to `TimerSet` +* Fixed deadlock of `Future` when the handler raises Exception +* Added shared specs for more classes +* New concurrency abstractions including: + - `Atom` + - `Maybe` + - `ImmutableStruct` + - `MutableStruct` + - `SettableStruct` +* Created an Edge gem for unstable abstractions including + - `Actor` + - `Agent` + - `Channel` + - `Exchanger` + - `LazyRegister` + - **new Future Framework** - unified + implementation of Futures and Promises which combines Features of previous `Future`, + `Promise`, `IVar`, `Event`, `Probe`, `dataflow`, `Delay`, `TimerTask` into single framework. It uses extensively + new synchronization layer to make all the paths **lock-free** with exception of blocking threads on `#wait`. + It offers better performance and does not block threads when not required. +* Actor framework changes: + - fixed reset loop in Pool + - Pool can use any actor as a worker, abstract worker class is no longer needed. + - Actor events not have format `[:event_name, *payload]` instead of just the Symbol. + - Actor now uses new Future/Promise Framework instead of `IVar` for better interoperability + - Behaviour definition array was simplified to `[BehaviourClass1, [BehaviourClass2, *initialization_args]]` + - Linking behavior responds to :linked message by returning array of linked actors + - Supervised behavior is removed in favour of just Linking + - RestartingContext is supervised by default now, `supervise: true` is not required any more + - Events can be private and public, so far only difference is that Linking will + pass to linked actors only public messages. Adding private :restarting and + :resetting events which are send before the actor restarts or resets allowing + to add callbacks to cleanup current child actors. + - Print also object_id in Reference to_s + - Add AbstractContext#default_executor to be able to override executor class wide + - Add basic IO example + - Documentation somewhat improved + - All messages should have same priority. It's now possible to send `actor << job1 << job2 << :terminate!` and + be sure that both jobs are processed first. +* Refactored `Channel` to use newer synchronization objects +* Added `#reset` and `#cancel` methods to `TimerSet` +* Added `#cancel` method to `Future` and `ScheduledTask` +* Refactored `TimerSet` to use `ScheduledTask` +* Updated `Async` with a factory that initializes the object +* Deprecated `Concurrent.timer` and `Concurrent.timeout` +* Reduced max threads on pure-Ruby thread pools (abends around 14751 threads) +* Moved many private/internal classes/modules into "namespace" modules +* Removed brute-force killing of threads in tests +* Fixed a thread pool bug when the operating system cannot allocate more threads + +## Release v0.8.0 (25 January 2015) + +* C extension for MRI have been extracted into the `concurrent-ruby-ext` companion gem. + Please see the README for more detail. +* Better variable isolation in `Promise` and `Future` via an `:args` option +* Continued to update intermittently failing tests + +## Release v0.7.2 (24 January 2015) + +* New `Semaphore` class based on [java.util.concurrent.Semaphore](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Semaphore.html) +* New `Promise.all?` and `Promise.any?` class methods +* Renamed `:overflow_policy` on thread pools to `:fallback_policy` +* Thread pools still accept the `:overflow_policy` option but display a warning +* Thread pools now implement `fallback_policy` behavior when not running (rather than universally rejecting tasks) +* Fixed minor `set_deref_options` constructor bug in `Promise` class +* Fixed minor `require` bug in `ThreadLocalVar` class +* Fixed race condition bug in `TimerSet` class +* Fixed race condition bug in `TimerSet` class +* Fixed signal bug in `TimerSet#post` method +* Numerous non-functional updates to clear warning when running in debug mode +* Fixed more intermittently failing tests +* Tests now run on new Travis build environment +* Multiple documentation updates + +## Release v0.7.1 (4 December 2014) + +Please see the [roadmap](https://github.com/ruby-concurrency/concurrent-ruby/issues/142) for more information on the next planned release. + +* Added `flat_map` method to `Promise` +* Added `zip` method to `Promise` +* Fixed bug with logging in `Actor` +* Improvements to `Promise` tests +* Removed actor-experimental warning +* Added an `IndirectImmediateExecutor` class +* Allow disabling auto termination of global executors +* Fix thread leaking in `ThreadLocalVar` (uses `Ref` gem on non-JRuby systems) +* Fix thread leaking when pruning pure-Ruby thread pools +* Prevent `Actor` from using an `ImmediateExecutor` (causes deadlock) +* Added missing synchronizations to `TimerSet` +* Fixed bug with return value of `Concurrent::Actor::Utils::Pool#ask` +* Fixed timing bug in `TimerTask` +* Fixed bug when creating a `JavaThreadPoolExecutor` with minimum pool size of zero +* Removed confusing warning when not using native extenstions +* Improved documentation + +## Release v0.7.0 (13 August 2014) + +* Merge the [atomic](https://github.com/ruby-concurrency/atomic) gem + - Pure Ruby `MutexAtomic` atomic reference class + - Platform native atomic reference classes `CAtomic`, `JavaAtomic`, and `RbxAtomic` + - Automated [build process](https://github.com/ruby-concurrency/rake-compiler-dev-box) + - Fat binary releases for [multiple platforms](https://rubygems.org/gems/concurrent-ruby/versions) including Windows (32/64), Linux (32/64), OS X (64-bit), Solaris (64-bit), and JRuby +* C native `CAtomicBoolean` +* C native `CAtomicFixnum` +* Refactored intermittently failing tests +* Added `dataflow!` and `dataflow_with!` methods to match `Future#value!` method +* Better handling of timeout in `Agent` +* Actor Improvements + - Fine-grained implementation using chain of behaviors. Each behavior is responsible for single aspect like: `Termination`, `Pausing`, `Linking`, `Supervising`, etc. Users can create custom Actors easily based on their needs. + - Supervision was added. `RestartingContext` will pause on error waiting on its supervisor to decide what to do next ( options are `:terminate!`, `:resume!`, `:reset!`, `:restart!`). Supervising behavior also supports strategies `:one_for_one` and `:one_for_all`. + - Linking was added to be able to monitor actor's events like: `:terminated`, `:paused`, `:restarted`, etc. + - Dead letter routing added. Rejected envelopes are collected in a configurable actor (default: `Concurrent::Actor.root.ask!(:dead_letter_routing)`) + - Old `Actor` class removed and replaced by new implementation previously called `Actress`. `Actress` was kept as an alias for `Actor` to keep compatibility. + - `Utils::Broadcast` actor which allows Publish–subscribe pattern. +* More executors for managing serialized operations + - `SerializedExecution` mixin module + - `SerializedExecutionDelegator` for serializing *any* executor +* Updated `Async` with serialized execution +* Updated `ImmediateExecutor` and `PerThreadExecutor` with full executor service lifecycle +* Added a `Delay` to root `Actress` initialization +* Minor bug fixes to thread pools +* Refactored many intermittently failing specs +* Removed Java interop warning `executor.rb:148 warning: ambiguous Java methods found, using submit(java.lang.Runnable)` +* Fixed minor bug in `RubyCachedThreadPool` overflow policy +* Updated tests to use [RSpec 3.0](http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3) +* Removed deprecated `Actor` class +* Better support for Rubinius + +## Release v0.6.1 (14 June 2014) + +* Many improvements to `Concurrent::Actress` +* Bug fixes to `Concurrent::RubyThreadPoolExecutor` +* Fixed several brittle tests +* Moved documentation to http://ruby-concurrency.github.io/concurrent-ruby/frames.html + +## Release v0.6.0 (25 May 2014) + +* Added `Concurrent::Observable` to encapsulate our thread safe observer sets +* Improvements to new `Channel` +* Major improvements to `CachedThreadPool` and `FixedThreadPool` +* Added `SingleThreadExecutor` +* Added `Current::timer` function +* Added `TimerSet` executor +* Added `AtomicBoolean` +* `ScheduledTask` refactoring +* Pure Ruby and JRuby-optimized `PriorityQueue` classes +* Updated `Agent` behavior to more closely match Clojure +* Observer sets support block callbacks to the `add_observer` method +* New algorithm for thread creation in `RubyThreadPoolExecutor` +* Minor API updates to `Event` +* Rewritten `TimerTask` now an `Executor` instead of a `Runnable` +* Fixed many brittle specs +* Renamed `FixedThreadPool` and `CachedThreadPool` to `RubyFixedThreadPool` and `RubyCachedThreadPool` +* Created JRuby optimized `JavaFixedThreadPool` and `JavaCachedThreadPool` +* Consolidated fixed thread pool tests into `spec/concurrent/fixed_thread_pool_shared.rb` and `spec/concurrent/cached_thread_pool_shared.rb` +* `FixedThreadPool` now subclasses `RubyFixedThreadPool` or `JavaFixedThreadPool` as appropriate +* `CachedThreadPool` now subclasses `RubyCachedThreadPool` or `JavaCachedThreadPool` as appropriate +* New `Delay` class +* `Concurrent::processor_count` helper function +* New `Async` module +* Renamed `NullThreadPool` to `PerThreadExecutor` +* Deprecated `Channel` (we are planning a new implementation based on [Go](http://golangtutorials.blogspot.com/2011/06/channels-in-go.html)) +* Added gem-level [configuration](http://robots.thoughtbot.com/mygem-configure-block) +* Deprecated `$GLOBAL_THREAD_POOL` in lieu of gem-level configuration +* Removed support for Ruby [1.9.2](https://www.ruby-lang.org/en/news/2013/12/17/maintenance-of-1-8-7-and-1-9-2/) +* New `RubyThreadPoolExecutor` and `JavaThreadPoolExecutor` classes +* All thread pools now extend the appropriate thread pool executor classes +* All thread pools now support `:overflow_policy` (based on Java's [reject policies](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html)) +* Deprecated `UsesGlobalThreadPool` in lieu of explicit `:executor` option (dependency injection) on `Future`, `Promise`, and `Agent` +* Added `Concurrent::dataflow_with(executor, *inputs)` method to support executor dependency injection for dataflow +* Software transactional memory with `TVar` and `Concurrent::atomically` +* First implementation of [new, high-performance](https://github.com/ruby-concurrency/concurrent-ruby/pull/49) `Channel` +* `Actor` is deprecated in favor of new experimental actor implementation [#73](https://github.com/ruby-concurrency/concurrent-ruby/pull/73). To avoid namespace collision it is living in `Actress` namespace until `Actor` is removed in next release. + +## Release v0.5.0 + +This is the most significant release of this gem since its inception. This release includes many improvements and optimizations. It also includes several bug fixes. The major areas of focus for this release were: + +* Stability improvements on Ruby versions with thread-level parallelism ([JRuby](http://jruby.org/) and [Rubinius](http://rubini.us/)) +* Creation of new low-level concurrency abstractions +* Internal refactoring to use the new low-level abstractions + +Most of these updates had no effect on the gem API. There are a few notable exceptions which were unavoidable. Please read the [release notes](API-Updates-in-v0.5.0) for more information. + +Specific changes include: + +* New class `IVar` +* New class `MVar` +* New class `ThreadLocalVar` +* New class `AtomicFixnum` +* New class method `dataflow` +* New class `Condition` +* New class `CountDownLatch` +* New class `DependencyCounter` +* New class `SafeTaskExecutor` +* New class `CopyOnNotifyObserverSet` +* New class `CopyOnWriteObserverSet` +* `Future` updated with `execute` API +* `ScheduledTask` updated with `execute` API +* New `Promise` API +* `Future` now extends `IVar` +* `Postable#post?` now returns an `IVar` +* Thread safety fixes to `Dereferenceable` +* Thread safety fixes to `Obligation` +* Thread safety fixes to `Supervisor` +* Thread safety fixes to `Event` +* Various other thread safety (race condition) fixes +* Refactored brittle tests +* Implemented pending tests +* Added JRuby and Rubinius as Travis CI build targets +* Added [CodeClimate](https://codeclimate.com/) code review +* Improved YARD documentation diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/Gemfile b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/Gemfile new file mode 100644 index 0000000..b336031 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/Gemfile @@ -0,0 +1,36 @@ +source 'https://rubygems.org' + +require File.join(File.dirname(__FILE__), 'lib/concurrent-ruby/concurrent/version') +require File.join(File.dirname(__FILE__ ), 'lib/concurrent-ruby-edge/concurrent/edge/version') + +no_path = ENV['NO_PATH'] +options = no_path ? {} : { path: '.' } + +gem 'concurrent-ruby', Concurrent::VERSION, options +gem 'concurrent-ruby-edge', Concurrent::EDGE_VERSION, options +gem 'concurrent-ruby-ext', Concurrent::VERSION, options.merge(platform: :mri) + +group :development do + gem 'rake', '~> 13.0' + gem 'rake-compiler', '~> 1.0', '>= 1.0.7' + gem 'rake-compiler-dock', '~> 1.0' + gem 'pry', '~> 0.11', platforms: :mri +end + +group :documentation, optional: true do + gem 'yard', '~> 0.9.0', require: false + gem 'redcarpet', '~> 3.0', platforms: :mri # understands github markdown + gem 'md-ruby-eval', '~> 0.6' +end + +group :testing do + gem 'rspec', '~> 3.7' + gem 'timecop', '~> 0.9' + gem 'sigdump', require: false +end + +# made opt-in since it will not install on jruby 1.7 +group :coverage, optional: !ENV['COVERAGE'] do + gem 'simplecov', '~> 0.16.0', require: false + gem 'coveralls', '~> 0.8.2', require: false +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/LICENSE.txt new file mode 100644 index 0000000..1026f28 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) Jerry D'Antonio -- released under the MIT license. + +http://www.opensource.org/licenses/mit-license.php + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/README.md b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/README.md new file mode 100644 index 0000000..15f011b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/README.md @@ -0,0 +1,405 @@ +# Concurrent Ruby + +[![Gem Version](https://badge.fury.io/rb/concurrent-ruby.svg)](http://badge.fury.io/rb/concurrent-ruby) +[![License](https://img.shields.io/badge/license-MIT-green.svg)](http://opensource.org/licenses/MIT) +[![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-devs%20%26%20users-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) + +Modern concurrency tools for Ruby. Inspired by +[Erlang](http://www.erlang.org/doc/reference_manual/processes.html), +[Clojure](http://clojure.org/concurrent_programming), +[Scala](http://akka.io/), +[Haskell](http://www.haskell.org/haskellwiki/Applications_and_libraries/Concurrency_and_parallelism#Concurrent_Haskell), +[F#](http://blogs.msdn.com/b/dsyme/archive/2010/02/15/async-and-parallel-design-patterns-in-f-part-3-agents.aspx), +[C#](http://msdn.microsoft.com/en-us/library/vstudio/hh191443.aspx), +[Java](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/package-summary.html), +and classic concurrency patterns. + + + +The design goals of this gem are: + +* Be an 'unopinionated' toolbox that provides useful utilities without debating which is better + or why +* Remain free of external gem dependencies +* Stay true to the spirit of the languages providing inspiration +* But implement in a way that makes sense for Ruby +* Keep the semantics as idiomatic Ruby as possible +* Support features that make sense in Ruby +* Exclude features that don't make sense in Ruby +* Be small, lean, and loosely coupled +* Thread-safety +* Backward compatibility + +## Contributing + +**This gem depends on +[contributions](https://github.com/ruby-concurrency/concurrent-ruby/graphs/contributors) and we +appreciate your help. Would you like to contribute? Great! Have a look at +[issues with `looking-for-contributor` label](https://github.com/ruby-concurrency/concurrent-ruby/issues?q=is%3Aissue+is%3Aopen+label%3Alooking-for-contributor).** And if you pick something up let us know on the issue. + +You can also get started by triaging issues which may include reproducing bug reports or asking for vital information, such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to concurrent-ruby on CodeTriage](https://www.codetriage.com/ruby-concurrency/concurrent-ruby). [![Open Source Helpers](https://www.codetriage.com/ruby-concurrency/concurrent-ruby/badges/users.svg)](https://www.codetriage.com/ruby-concurrency/concurrent-ruby) + +## Thread Safety + +*Concurrent Ruby makes one of the strongest thread safety guarantees of any Ruby concurrency +library, providing consistent behavior and guarantees on all three main Ruby interpreters +(MRI/CRuby, JRuby, TruffleRuby).* + +Every abstraction in this library is thread safe. Specific thread safety guarantees are documented +with each abstraction. + +It is critical to remember, however, that Ruby is a language of mutable references. *No* +concurrency library for Ruby can ever prevent the user from making thread safety mistakes (such as +sharing a mutable object between threads and modifying it on both threads) or from creating +deadlocks through incorrect use of locks. All the library can do is provide safe abstractions which +encourage safe practices. Concurrent Ruby provides more safe concurrency abstractions than any +other Ruby library, many of which support the mantra of +["Do not communicate by sharing memory; instead, share memory by communicating"](https://blog.golang.org/share-memory-by-communicating). +Concurrent Ruby is also the only Ruby library which provides a full suite of thread safe and +immutable variable types and data structures. + +We've also initiated discussion to document the [memory model](docs-source/synchronization.md) of Ruby which +would provide consistent behaviour and guarantees on all three main Ruby interpreters +(MRI/CRuby, JRuby, TruffleRuby). + +## Features & Documentation + +**The primary site for documentation is the automatically generated +[API documentation](http://ruby-concurrency.github.io/concurrent-ruby/index.html) which is up to +date with latest release.** This readme matches the master so may contain new stuff not yet +released. + +We also have a [IRC (gitter)](https://gitter.im/ruby-concurrency/concurrent-ruby). + +### Versioning + +* `concurrent-ruby` uses [Semantic Versioning](http://semver.org/) +* `concurrent-ruby-ext` has always same version as `concurrent-ruby` +* `concurrent-ruby-edge` will always be 0.y.z therefore following + [point 4](http://semver.org/#spec-item-4) applies *"Major version zero + (0.y.z) is for initial development. Anything may change at any time. The + public API should not be considered stable."* However we additionally use + following rules: + * Minor version increment means incompatible changes were made + * Patch version increment means only compatible changes were made + + +#### General-purpose Concurrency Abstractions + +* [Async](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Async.html): + A mixin module that provides simple asynchronous behavior to a class. Loosely based on Erlang's + [gen_server](http://www.erlang.org/doc/man/gen_server.html). +* [ScheduledTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ScheduledTask.html): + Like a Future scheduled for a specific future time. +* [TimerTask](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TimerTask.html): + A Thread that periodically wakes up to perform work at regular intervals. +* [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html): + Unified implementation of futures and promises which combines features of previous `Future`, + `Promise`, `IVar`, `Event`, `dataflow`, `Delay`, and (partially) `TimerTask` into a single + framework. It extensively uses the new synchronization layer to make all the features + **non-blocking** and **lock-free**, with the exception of obviously blocking operations like + `#wait`, `#value`. It also offers better performance. + +#### Thread-safe Value Objects, Structures, and Collections + +Collection classes that were originally part of the (deprecated) `thread_safe` gem: + +* [Array](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Array.html) A thread-safe + subclass of Ruby's standard [Array](http://ruby-doc.org/core/Array.html). +* [Hash](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Hash.html) A thread-safe + subclass of Ruby's standard [Hash](http://ruby-doc.org/core/Hash.html). +* [Set](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Set.html) A thread-safe + subclass of Ruby's standard [Set](http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html). +* [Map](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Map.html) A hash-like object + that should have much better performance characteristics, especially under high concurrency, + than `Concurrent::Hash`. +* [Tuple](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Tuple.html) A fixed size + array with volatile (synchronized, thread safe) getters/setters. + +Value objects inspired by other languages: + +* [Maybe](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Maybe.html) A thread-safe, + immutable object representing an optional value, based on + [Haskell Data.Maybe](https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html). + +Structure classes derived from Ruby's [Struct](http://ruby-doc.org/core/Struct.html): + +* [ImmutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ImmutableStruct.html) + Immutable struct where values are set at construction and cannot be changed later. +* [MutableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MutableStruct.html) + Synchronized, mutable struct where values can be safely changed at any time. +* [SettableStruct](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/SettableStruct.html) + Synchronized, write-once struct where values can be set at most once, either at construction + or any time thereafter. + +Thread-safe variables: + +* [Agent](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Agent.html): A way to + manage shared, mutable, *asynchronous*, independent state. Based on Clojure's + [Agent](http://clojure.org/agents). +* [Atom](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Atom.html): A way to manage + shared, mutable, *synchronous*, independent state. Based on Clojure's + [Atom](http://clojure.org/atoms). +* [AtomicBoolean](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicBoolean.html) + A boolean value that can be updated atomically. +* [AtomicFixnum](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicFixnum.html) + A numeric value that can be updated atomically. +* [AtomicReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicReference.html) + An object reference that may be updated atomically. +* [Exchanger](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Exchanger.html) + A synchronization point at which threads can pair and swap elements within pairs. Based on + Java's [Exchanger](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html). +* [MVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/MVar.html) A synchronized + single element container. Based on Haskell's + [MVar](https://hackage.haskell.org/package/base-4.8.1.0/docs/Control-Concurrent-MVar.html) and + Scala's [MVar](http://docs.typelevel.org/api/scalaz/nightly/index.html#scalaz.concurrent.MVar$). +* [ThreadLocalVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ThreadLocalVar.html) + A variable where the value is different for each thread. +* [TVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/TVar.html) A transactional + variable implementing software transactional memory (STM). Based on Clojure's + [Ref](http://clojure.org/refs). + +#### Java-inspired ThreadPools and Other Executors + +* See the [thread pool](http://ruby-concurrency.github.io/concurrent-ruby/master/file.thread_pools.html) + overview, which also contains a list of other Executors available. + +#### Thread Synchronization Classes and Algorithms + +* [CountDownLatch](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CountDownLatch.html) + A synchronization object that allows one thread to wait on multiple other threads. +* [CyclicBarrier](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/CyclicBarrier.html) + A synchronization aid that allows a set of threads to all wait for each other to reach a common barrier point. +* [Event](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Event.html) Old school + kernel-style event. +* [ReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReadWriteLock.html) + A lock that supports multiple readers but only one writer. +* [ReentrantReadWriteLock](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ReentrantReadWriteLock.html) + A read/write lock with reentrant and upgrade features. +* [Semaphore](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Semaphore.html) + A counting-based locking mechanism that uses permits. +* [AtomicMarkableReference](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/AtomicMarkableReference.html) + +#### Deprecated + +Deprecated features are still available and bugs are being fixed, but new features will not be added. + +* ~~[Future](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Future.html): + An asynchronous operation that produces a value.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + * ~~[.dataflow](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent.html#dataflow-class_method): + Built on Futures, Dataflow allows you to create a task that will be scheduled when all of + its data dependencies are available.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Promise](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promise.html): Similar + to Futures, with more features.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[Delay](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Delay.html) Lazy evaluation + of a block yielding an immutable result. Based on Clojure's + [delay](https://clojuredocs.org/clojure.core/delay).~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). +* ~~[IVar](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/IVar.html) Similar to a + "future" but can be manually assigned once, after which it becomes immutable.~~ Replaced by + [Promises](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises.html). + +### Edge Features + +These are available in the `concurrent-ruby-edge` companion gem. + +These features are under active development and may change frequently. They are expected not to +keep backward compatibility (there may also lack tests and documentation). Semantic versions will +be obeyed though. Features developed in `concurrent-ruby-edge` are expected to move to +`concurrent-ruby` when final. + +* [Actor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Actor.html): Implements + the Actor Model, where concurrent actors exchange messages. + *Status: Partial documentation and tests; depends on new future/promise framework; stability is good.* +* [Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Channel.html): + Communicating Sequential Processes ([CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes)). + Functionally equivalent to Go [channels](https://tour.golang.org/concurrency/2) with additional + inspiration from Clojure [core.async](https://clojure.github.io/core.async/). + *Status: Partial documentation and tests.* +* [LazyRegister](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LazyRegister.html) +* [LockFreeLinkedSet](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Edge/LockFreeLinkedSet.html) + *Status: will be moved to core soon.* +* [LockFreeStack](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/LockFreeStack.html) + *Status: missing documentation and tests.* +* [Promises::Channel](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Promises/Channel.html) + A first in first out channel that accepts messages with push family of methods and returns + messages with pop family of methods. + Pop and push operations can be represented as futures, see `#pop_op` and `#push_op`. + The capacity of the channel can be limited to support back pressure, use capacity option in `#initialize`. + `#pop` method blocks ans `#pop_op` returns pending future if there is no message in the channel. + If the capacity is limited the `#push` method blocks and `#push_op` returns pending future. +* [Cancellation](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Cancellation.html) + The Cancellation abstraction provides cooperative cancellation. + + The standard methods `Thread#raise` of `Thread#kill` available in Ruby + are very dangerous (see linked the blog posts bellow). + Therefore concurrent-ruby provides an alternative. + + * + * + * + + It provides an object which represents a task which can be executed, + the task has to get the reference to the object and periodically cooperatively check that it is not cancelled. + Good practices to make tasks cancellable: + * check cancellation every cycle of a loop which does significant work, + * do all blocking actions in a loop with a timeout then on timeout check cancellation + and if ok block again with the timeout +* [Throttle](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Throttle.html) + A tool managing concurrency level of tasks. +* [ErlangActor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ErlangActor.html) + Actor implementation which precisely matches Erlang actor behaviour. + Requires at least Ruby 2.1 otherwise it's not loaded. +* [WrappingExecutor](http://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/WrappingExecutor.html) + A delegating executor which modifies each task before the task is given to + the target executor it delegates to. + +## Supported Ruby versions + +* MRI 2.3 and above +* Latest JRuby 9000 +* Latest TruffleRuby + +## Usage + +Everything within this gem can be loaded simply by requiring it: + +```ruby +require 'concurrent' +``` + +You can also require a specific abstraction [part of the public documentation](https://ruby-concurrency.github.io/concurrent-ruby/master/index.html) since concurrent-ruby 1.2.0, for example: +```ruby +require 'concurrent/map' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/executor/fixed_thread_pool' +``` + +To use the tools in the Edge gem it must be required separately: + +```ruby +require 'concurrent-edge' +``` + +If the library does not behave as expected, `Concurrent.use_stdlib_logger(Logger::DEBUG)` could +help to reveal the problem. + +## Installation + +```shell +gem install concurrent-ruby +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby', require: 'concurrent' +``` + +and run `bundle install` from your shell. + +### Edge Gem Installation + +The Edge gem must be installed separately from the core gem: + +```shell +gem install concurrent-ruby-edge +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-edge', require: 'concurrent-edge' +``` + +and run `bundle install` from your shell. + + +### C Extensions for MRI + +Potential performance improvements may be achieved under MRI by installing optional C extensions. +To minimise installation errors the C extensions are available in the `concurrent-ruby-ext` +extension gem. `concurrent-ruby` and `concurrent-ruby-ext` are always released together with same +version. Simply install the extension gem too: + +```ruby +gem install concurrent-ruby-ext +``` + +or add the following line to Gemfile: + +```ruby +gem 'concurrent-ruby-ext' +``` + +and run `bundle install` from your shell. + +In code it is only necessary to + +```ruby +require 'concurrent' +``` + +The `concurrent-ruby` gem will automatically detect the presence of the `concurrent-ruby-ext` gem +and load the appropriate C extensions. + +#### Note For gem developers + +No gems should depend on `concurrent-ruby-ext`. Doing so will force C extensions on your users. The +best practice is to depend on `concurrent-ruby` and let users to decide if they want C extensions. + +## Building the gem + +### Requirements + +* Recent CRuby +* JRuby, `rbenv install jruby-9.2.17.0` +* Set env variable `CONCURRENT_JRUBY_HOME` to point to it, e.g. `/usr/local/opt/rbenv/versions/jruby-9.2.17.0` +* Install Docker, required for Windows builds + +### Publishing the Gem + +* Update `version.rb` +* Update the CHANGELOG +* Add the new version to `docs-source/signpost.md`. Needs to be done only if there are visible changes in the documentation. +* Commit (and push) the changes. +* Use `bundle exec rake release` to release the gem. + It consists of `['release:checks', 'release:build', 'release:test', 'release:publish']` steps. + It will ask at the end before publishing anything. Steps can also be executed individually. + +## Maintainers + +* [Benoit Daloze](https://github.com/eregon) +* [Matthew Draper](https://github.com/matthewd) +* [Rafael França](https://github.com/rafaelfranca) +* [Samuel Williams](https://github.com/ioquatix) + +### Special Thanks to + +* [Jerry D'Antonio](https://github.com/jdantonio) for creating the gem +* [Brian Durand](https://github.com/bdurand) for the `ref` gem +* [Charles Oliver Nutter](https://github.com/headius) for the `atomic` and `thread_safe` gems +* [thedarkone](https://github.com/thedarkone) for the `thread_safe` gem + +to the past maintainers + +* [Chris Seaton](https://github.com/chrisseaton) +* [Petr Chalupa](https://github.com/pitr-ch) +* [Michele Della Torre](https://github.com/mighe) +* [Paweł Obrok](https://github.com/obrok) +* [Lucas Allan](https://github.com/lucasallan) + +and to [Ruby Association](https://www.ruby.or.jp/en/) for sponsoring a project +["Enhancing Ruby’s concurrency tooling"](https://www.ruby.or.jp/en/news/20181106) in 2018. + +## License and Copyright + +*Concurrent Ruby* is free software released under the +[MIT License](http://www.opensource.org/licenses/MIT). + +The *Concurrent Ruby* [logo](https://raw.githubusercontent.com/ruby-concurrency/concurrent-ruby/master/docs-source/logo/concurrent-ruby-logo-300x300.png) was +designed by [David Jones](https://twitter.com/zombyboy). It is Copyright © 2014 +[Jerry D'Antonio](https://twitter.com/jerrydantonio). All Rights Reserved. diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/Rakefile b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/Rakefile new file mode 100644 index 0000000..f167f46 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/Rakefile @@ -0,0 +1,319 @@ +require_relative 'lib/concurrent-ruby/concurrent/version' +require_relative 'lib/concurrent-ruby-edge/concurrent/edge/version' +require_relative 'lib/concurrent-ruby/concurrent/utility/engine' + +core_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby.gemspec') +ext_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-ext.gemspec') +edge_gemspec = Gem::Specification.load File.join(__dir__, 'concurrent-ruby-edge.gemspec') + +require 'rake/javaextensiontask' + +ENV['JRUBY_HOME'] = ENV['CONCURRENT_JRUBY_HOME'] if ENV['CONCURRENT_JRUBY_HOME'] && !Concurrent.on_jruby? + +Rake::JavaExtensionTask.new('concurrent_ruby', core_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' +end + +unless Concurrent.on_jruby? || Concurrent.on_truffleruby? + require 'rake/extensiontask' + + Rake::ExtensionTask.new('concurrent_ruby_ext', ext_gemspec) do |ext| + ext.ext_dir = 'ext/concurrent-ruby-ext' + ext.lib_dir = 'lib/concurrent-ruby/concurrent' + ext.source_pattern = '*.{c,h}' + + ext.cross_compile = true + ext.cross_platform = ['x86-mingw32', 'x64-mingw32'] + end +end + +require 'rake_compiler_dock' +namespace :repackage do + desc '* with Windows fat distributions' + task :all do + Dir.chdir(__dir__) do + # store gems in vendor cache for docker + Bundler.with_original_env do + sh 'bundle package' + end + + # build only the jar file not the whole gem for java platform, the jar is part the concurrent-ruby-x.y.z.gem + Rake::Task['lib/concurrent-ruby/concurrent/concurrent_ruby.jar'].invoke + + # build all gem files + %w[x86-mingw32 x64-mingw32].each do |plat| + RakeCompilerDock.sh( + "bundle install --local && bundle exec rake native:#{plat} gem --trace", + platform: plat, + options: ['--privileged'], # otherwise the directory in the image is empty + runas: false) + end + end + end +end + +require 'rubygems' +require 'rubygems/package_task' + +Gem::PackageTask.new(core_gemspec) {} if core_gemspec +Gem::PackageTask.new(ext_gemspec) {} if ext_gemspec && !Concurrent.on_jruby? +Gem::PackageTask.new(edge_gemspec) {} if edge_gemspec + +CLEAN.include( + 'lib/concurrent-ruby/concurrent/concurrent_ruby_ext.*', + 'lib/concurrent-ruby/concurrent/2.*', + 'lib/concurrent-ruby/concurrent/*.jar') + +begin + require 'rspec' + require 'rspec/core/rake_task' + + RSpec::Core::RakeTask.new(:spec) + + namespace :spec do + desc '* Configured for ci' + RSpec::Core::RakeTask.new(:ci) do |t| + options = %w[ --color + --backtrace + --order defined + --format documentation ] + t.rspec_opts = [*options].join(' ') + end + + desc '* test packaged and installed gems instead of local files' + task :installed do + Bundler.with_original_env do + Dir.chdir(__dir__) do + sh "gem install pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" + sh "gem install pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" if Concurrent.on_cruby? + sh "gem install pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" + ENV['NO_PATH'] = 'true' + sh 'bundle update' + sh 'bundle exec rake spec:ci' + end + end + end + end + + desc 'executed in CI' + task :ci => [:compile, 'spec:ci'] + + desc 'run each spec file in a separate process to help find missing requires' + task 'spec:isolated' do + glob = "#{ENV['DIR'] || 'spec'}/**/*_spec.rb" + from = ENV['FROM'] + env = { 'ISOLATED' => 'true' } + Dir[glob].each do |spec| + next if from and from != spec + from = nil if from == spec + + sh env, 'rspec', spec + end + end + + task :default => [:clobber, :compile, :spec] +rescue LoadError => e + puts 'RSpec is not installed, skipping test task definitions: ' + e.message +end + +current_yard_version_name = Concurrent::VERSION + +begin + require 'yard' + require 'md_ruby_eval' + require_relative 'support/yard_full_types' + + common_yard_options = ['--no-yardopts', + '--no-document', + '--no-private', + '--embed-mixins', + '--markup', 'markdown', + '--title', 'Concurrent Ruby', + '--template', 'default', + '--template-path', 'yard-template', + '--default-return', 'undocumented'] + + desc 'Generate YARD Documentation (signpost, master)' + task :yard => ['yard:signpost', 'yard:master'] + + namespace :yard do + + desc '* eval markdown files' + task :eval_md do + Dir.chdir File.join(__dir__, 'docs-source') do + sh 'bundle exec md-ruby-eval --auto' + end + end + + task :update_readme do + Dir.chdir __dir__ do + content = File.read(File.join('README.md')). + gsub(/\[([\w ]+)\]\(http:\/\/ruby-concurrency\.github\.io\/concurrent-ruby\/master\/.*\)/) do |_| + case $1 + when 'LockFreeLinkedSet' + "{Concurrent::Edge::#{$1} #{$1}}" + when '.dataflow' + '{Concurrent.dataflow Concurrent.dataflow}' + when 'thread pool' + '{file:thread_pools.md thread pool}' + else + "{Concurrent::#{$1} #{$1}}" + end + end + FileUtils.mkpath 'tmp' + File.write 'tmp/README.md', content + end + end + + define_yard_task = -> name do + output_dir = "docs/#{name}" + + removal_name = "remove.#{name}" + task removal_name do + Dir.chdir __dir__ do + FileUtils.rm_rf output_dir + end + end + + desc "* of #{name} into subdir #{name}" + YARD::Rake::YardocTask.new(name) do |yard| + yard.options.push( + '--output-dir', output_dir, + '--main', 'tmp/README.md', + *common_yard_options) + yard.files = ['./lib/concurrent-ruby/**/*.rb', + './lib/concurrent-ruby-edge/**/*.rb', + './ext/concurrent_ruby_ext/**/*.c', + '-', + 'docs-source/thread_pools.md', + 'docs-source/promises.out.md', + 'docs-source/medium-example.out.rb', + 'LICENSE.txt', + 'CHANGELOG.md'] + end + Rake::Task[name].prerequisites.push removal_name, + # 'yard:eval_md', + 'yard:update_readme' + end + + define_yard_task.call current_yard_version_name + define_yard_task.call 'master' + + desc "* signpost for versions" + YARD::Rake::YardocTask.new(:signpost) do |yard| + yard.options.push( + '--output-dir', 'docs', + '--main', 'docs-source/signpost.md', + *common_yard_options) + yard.files = ['no-lib'] + end + end + +rescue LoadError => e + puts 'YARD is not installed, skipping documentation task definitions: ' + e.message +end + +desc 'build, test, and publish the gem' +task :release => ['release:checks', 'release:build', 'release:test', 'release:publish'] + +namespace :release do + # Depends on environment of @pitr-ch + + task :checks do + Dir.chdir(__dir__) do + sh 'test -z "$(git status --porcelain)"' do |ok, res| + unless ok + begin + status = `git status --porcelain` + STDOUT.puts 'There are local changes that you might want to commit.', status, 'Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + sh 'git fetch' + sh 'test $(git show-ref --verify --hash refs/heads/master) = ' + + '$(git show-ref --verify --hash refs/remotes/origin/master)' do |ok, res| + unless ok + begin + STDOUT.puts 'Local master branch is not pushed to origin.', 'Continue? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + end + end + end + end + + desc '* build all *.gem files necessary for release' + task :build => [:clobber, 'repackage:all'] + + desc '* test actual installed gems instead of cloned repository on MRI and JRuby' + task :test do + Dir.chdir(__dir__) do + puts "Testing with the installed gem" + + Bundler.with_original_env do + sh 'ruby -v' + sh 'bundle exec rake spec:installed' + + env = { "PATH" => "#{ENV['CONCURRENT_JRUBY_HOME']}/bin:#{ENV['PATH']}" } + sh env, 'ruby -v' + sh env, 'bundle exec rake spec:installed' + end + + puts 'Windows build is untested' + end + end + + desc '* do all nested steps' + task :publish => ['publish:ask', 'publish:tag', 'publish:rubygems', 'publish:post_steps'] + + namespace :publish do + publish_base = true + publish_edge = false + + task :ask do + begin + STDOUT.puts 'Do you want to publish anything now? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + exit 1 if input == 'n' + begin + STDOUT.puts 'It will publish `concurrent-ruby`. Do you want to publish `concurrent-ruby-edge`? (y/n)' + input = STDIN.gets.strip.downcase + end until %w(y n).include?(input) + publish_edge = input == 'y' + end + + desc '** tag HEAD with current version and push to github' + task :tag => :ask do + Dir.chdir(__dir__) do + sh "git tag v#{Concurrent::VERSION}" if publish_base + sh "git push origin v#{Concurrent::VERSION}" if publish_base + sh "git tag edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + sh "git push origin edge-v#{Concurrent::EDGE_VERSION}" if publish_edge + end + end + + desc '** push all *.gem files to rubygems' + task :rubygems => :ask do + Dir.chdir(__dir__) do + sh "gem push pkg/concurrent-ruby-#{Concurrent::VERSION}.gem" if publish_base + sh "gem push pkg/concurrent-ruby-edge-#{Concurrent::EDGE_VERSION}.gem" if publish_edge + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}.gem" if publish_base + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x64-mingw32.gem" if publish_base + sh "gem push pkg/concurrent-ruby-ext-#{Concurrent::VERSION}-x86-mingw32.gem" if publish_base + end + end + + desc '** print post release steps' + task :post_steps do + # TODO: (petr 05-Jun-2021) automate and renew the process + puts 'Manually: create a release on GitHub with relevant changelog part' + puts 'Manually: send email same as release with relevant changelog part' + puts 'Manually: tweet' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/ConcurrentRubyService.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/ConcurrentRubyService.java new file mode 100644 index 0000000..fb6be96 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/ConcurrentRubyService.java @@ -0,0 +1,17 @@ +import org.jruby.Ruby; +import org.jruby.runtime.load.BasicLibraryService; + +import java.io.IOException; + +public class ConcurrentRubyService implements BasicLibraryService { + + public boolean basicLoad(final Ruby runtime) throws IOException { + new com.concurrent_ruby.ext.AtomicReferenceLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicBooleanLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaAtomicFixnumLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JavaSemaphoreLibrary().load(runtime, false); + new com.concurrent_ruby.ext.SynchronizationLibrary().load(runtime, false); + new com.concurrent_ruby.ext.JRubyMapBackendLibrary().load(runtime, false); + return true; + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java new file mode 100644 index 0000000..dfa9e77 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java @@ -0,0 +1,175 @@ +package com.concurrent_ruby.ext; + +import java.lang.reflect.Field; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +/** + * This library adds an atomic reference type to JRuby for use in the atomic + * library. We do a native version to avoid the implicit value coercion that + * normally happens through JI. + * + * @author headius + */ +public class AtomicReferenceLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicReference", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + try { + sun.misc.Unsafe.class.getMethod("getAndSetObject", Object.class); + atomicCls.setAllocator(JRUBYREFERENCE8_ALLOCATOR); + } catch (Exception e) { + // leave it as Java 6/7 version + } + atomicCls.defineAnnotatedMethods(JRubyReference.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBYREFERENCE8_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyReference8(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyReference", parent="Object") + public static class JRubyReference extends RubyObject { + volatile IRubyObject reference; + + static final sun.misc.Unsafe UNSAFE; + static final long referenceOffset; + + static { + try { + UNSAFE = UnsafeHolder.U; + Class k = JRubyReference.class; + referenceOffset = UNSAFE.objectFieldOffset(k.getDeclaredField("reference")); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public JRubyReference(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + UNSAFE.putObject(this, referenceOffset, context.nil); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + UNSAFE.putObject(this, referenceOffset, value); + return context.nil; + } + + @JRubyMethod(name = {"get", "value"}) + public IRubyObject get() { + return reference; + } + + @JRubyMethod(name = {"set", "value="}) + public IRubyObject set(IRubyObject newValue) { + UNSAFE.putObjectVolatile(this, referenceOffset, newValue); + return newValue; + } + + @JRubyMethod(name = {"compare_and_set", "compare_and_swap"}) + public IRubyObject compare_and_set(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + if (expectedValue instanceof RubyNumeric) { + // numerics are not always idempotent in Ruby, so we need to do slower logic + return compareAndSetNumeric(context, expectedValue, newValue); + } + + return runtime.newBoolean(UNSAFE.compareAndSwapObject(this, referenceOffset, expectedValue, newValue)); + } + + @JRubyMethod(name = {"get_and_set", "swap"}) + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // less-efficient version for Java 6 and 7 + while (true) { + IRubyObject oldValue = get(); + if (UNSAFE.compareAndSwapObject(this, referenceOffset, oldValue, newValue)) { + return oldValue; + } + } + } + + private IRubyObject compareAndSetNumeric(ThreadContext context, IRubyObject expectedValue, IRubyObject newValue) { + Ruby runtime = context.runtime; + + // loop until: + // * reference CAS would succeed for same-valued objects + // * current and expected have different values as determined by #equals + while (true) { + IRubyObject current = reference; + + if (!(current instanceof RubyNumeric)) { + // old value is not numeric, CAS fails + return runtime.getFalse(); + } + + RubyNumeric currentNumber = (RubyNumeric)current; + if (!currentNumber.equals(expectedValue)) { + // current number does not equal expected, fail CAS + return runtime.getFalse(); + } + + // check that current has not changed, or else allow loop to repeat + boolean success = UNSAFE.compareAndSwapObject(this, referenceOffset, current, newValue); + if (success) { + // value is same and did not change in interim...success + return runtime.getTrue(); + } + } + } + } + + private static final class UnsafeHolder { + private UnsafeHolder(){} + + public static final sun.misc.Unsafe U = loadUnsafe(); + + private static sun.misc.Unsafe loadUnsafe() { + try { + Class unsafeClass = Class.forName("sun.misc.Unsafe"); + Field f = unsafeClass.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + } catch (Exception e) { + return null; + } + } + } + + public static class JRubyReference8 extends JRubyReference { + public JRubyReference8(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @Override + public IRubyObject get_and_set(ThreadContext context, IRubyObject newValue) { + // efficient version for Java 8 + return (IRubyObject)UNSAFE.getAndSetObject(this, referenceOffset, newValue); + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java new file mode 100644 index 0000000..a09f916 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java @@ -0,0 +1,248 @@ +package com.concurrent_ruby.ext; + +import org.jruby.*; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8; +import com.concurrent_ruby.ext.jsr166e.nounsafe.*; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.Map; + +import static org.jruby.runtime.Visibility.PRIVATE; + +/** + * Native Java implementation to avoid the JI overhead. + * + * @author thedarkone + */ +public class JRubyMapBackendLibrary implements Library { + public void load(Ruby runtime, boolean wrap) throws IOException { + + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyModule thread_safeMod = concurrentMod.defineModuleUnder("Collection"); + RubyClass jrubyRefClass = thread_safeMod.defineClassUnder("JRubyMapBackend", runtime.getObject(), BACKEND_ALLOCATOR); + jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); + jrubyRefClass.defineAnnotatedMethods(JRubyMapBackend.class); + } + + private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyMapBackend(runtime, klazz); + } + }; + + @JRubyClass(name="JRubyMapBackend", parent="Object") + public static class JRubyMapBackend extends RubyObject { + // Defaults used by the CHM + static final int DEFAULT_INITIAL_CAPACITY = 16; + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); + + private ConcurrentHashMap map; + + private static ConcurrentHashMap newCHM(int initialCapacity, float loadFactor) { + if (CAN_USE_UNSAFE_CHM) { + return new ConcurrentHashMapV8(initialCapacity, loadFactor); + } else { + return new com.concurrent_ruby.ext.jsr166e.nounsafe.ConcurrentHashMapV8(initialCapacity, loadFactor); + } + } + + private static ConcurrentHashMap newCHM() { + return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + private static boolean canUseUnsafeCHM() { + try { + new com.concurrent_ruby.ext.jsr166e.ConcurrentHashMapV8(); // force class load and initialization + return true; + } catch (Throwable t) { // ensuring we really do catch everything + // Doug's Unsafe setup errors always have this "Could not ini.." message + if (isCausedBySecurityException(t)) { + return false; + } + throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); + } + } + + private static boolean isCausedBySecurityException(Throwable t) { + while (t != null) { + if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { + return true; + } + t = t.getCause(); + } + return false; + } + + public JRubyMapBackend(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + map = newCHM(); + return context.getRuntime().getNil(); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject options) { + map = toCHM(context, options); + return context.getRuntime().getNil(); + } + + private ConcurrentHashMap toCHM(ThreadContext context, IRubyObject options) { + Ruby runtime = context.getRuntime(); + if (!options.isNil() && options.respondsTo("[]")) { + IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); + IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); + int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; + float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; + return newCHM(initialCapacity, loadFactor); + } else { + return newCHM(); + } + } + + @JRubyMethod(name = "[]", required = 1) + public IRubyObject op_aref(ThreadContext context, IRubyObject key) { + IRubyObject value; + return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; + } + + @JRubyMethod(name = {"[]="}, required = 2) + public IRubyObject op_aset(IRubyObject key, IRubyObject value) { + map.put(key, value); + return value; + } + + @JRubyMethod + public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { + IRubyObject result = map.putIfAbsent(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { + return map.computeIfAbsent(key, new ConcurrentHashMap.Fun() { + @Override + public IRubyObject apply(IRubyObject key) { + return block.yieldSpecific(context); + } + }); + } + + @JRubyMethod + public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { + IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { + IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun() { + @Override + public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { + IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); + return result.isNil() ? null : result; + } + }); + return result == null ? context.getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { + return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); + } + + @JRubyMethod(name = "key?", required = 1) + public RubyBoolean has_key_p(IRubyObject key) { + return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); + } + + @JRubyMethod + public IRubyObject key(IRubyObject value) { + final IRubyObject key = map.findKey(value); + return key == null ? getRuntime().getNil() : key; + } + + @JRubyMethod + public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { + IRubyObject result = map.replace(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { + IRubyObject result = map.put(key, value); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public IRubyObject delete(IRubyObject key) { + IRubyObject result = map.remove(key); + return result == null ? getRuntime().getNil() : result; + } + + @JRubyMethod + public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { + return getRuntime().newBoolean(map.remove(key, value)); + } + + @JRubyMethod + public IRubyObject clear() { + map.clear(); + return this; + } + + @JRubyMethod + public IRubyObject each_pair(ThreadContext context, Block block) { + for (Map.Entry entry : map.entrySet()) { + block.yieldSpecific(context, entry.getKey(), entry.getValue()); + } + return this; + } + + @JRubyMethod + public RubyFixnum size(ThreadContext context) { + return context.getRuntime().newFixnum(map.size()); + } + + @JRubyMethod + public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { + return map.getValueOrDefault(key, defaultValue); + } + + @JRubyMethod(visibility = PRIVATE) + public JRubyMapBackend initialize_copy(ThreadContext context, IRubyObject other) { + map = newCHM(); + return this; + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java new file mode 100644 index 0000000..b566076 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java @@ -0,0 +1,93 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBoolean; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyNil; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class JavaAtomicBooleanLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicBoolean", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + atomicCls.defineAnnotatedMethods(JavaAtomicBoolean.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicBoolean(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicBoolean", parent = "Object") + public static class JavaAtomicBoolean extends RubyObject { + + private AtomicBoolean atomicBoolean; + + public JavaAtomicBoolean(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + atomicBoolean = new AtomicBoolean(convertRubyBooleanToJavaBoolean(value)); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + atomicBoolean = new AtomicBoolean(); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject value() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "true?") + public IRubyObject isAtomicTrue() { + return getRuntime().newBoolean(atomicBoolean.get()); + } + + @JRubyMethod(name = "false?") + public IRubyObject isAtomicFalse() { + return getRuntime().newBoolean((atomicBoolean.get() == false)); + } + + @JRubyMethod(name = "value=") + public IRubyObject setAtomic(ThreadContext context, IRubyObject newValue) { + atomicBoolean.set(convertRubyBooleanToJavaBoolean(newValue)); + return context.nil; + } + + @JRubyMethod(name = "make_true") + public IRubyObject makeTrue() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(false, true)); + } + + @JRubyMethod(name = "make_false") + public IRubyObject makeFalse() { + return getRuntime().newBoolean(atomicBoolean.compareAndSet(true, false)); + } + + private boolean convertRubyBooleanToJavaBoolean(IRubyObject newValue) { + if (newValue instanceof RubyBoolean.False || newValue instanceof RubyNil) { + return false; + } else { + return true; + } + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java new file mode 100644 index 0000000..672bfc0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java @@ -0,0 +1,113 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import org.jruby.runtime.Block; + +public class JavaAtomicFixnumLibrary implements Library { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaAtomicFixnum", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaAtomicFixnum.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaAtomicFixnum(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaAtomicFixnum", parent = "Object") + public static class JavaAtomicFixnum extends RubyObject { + + private AtomicLong atomicLong; + + public JavaAtomicFixnum(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context) { + this.atomicLong = new AtomicLong(0); + return context.nil; + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.atomicLong = new AtomicLong(rubyFixnumToLong(value)); + return context.nil; + } + + @JRubyMethod(name = "value") + public IRubyObject getValue() { + return getRuntime().newFixnum(atomicLong.get()); + } + + @JRubyMethod(name = "value=") + public IRubyObject setValue(ThreadContext context, IRubyObject newValue) { + atomicLong.set(rubyFixnumToLong(newValue)); + return context.nil; + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment() { + return getRuntime().newFixnum(atomicLong.incrementAndGet()); + } + + @JRubyMethod(name = {"increment", "up"}) + public IRubyObject increment(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(delta)); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement() { + return getRuntime().newFixnum(atomicLong.decrementAndGet()); + } + + @JRubyMethod(name = {"decrement", "down"}) + public IRubyObject decrement(IRubyObject value) { + long delta = rubyFixnumToLong(value); + return getRuntime().newFixnum(atomicLong.addAndGet(-delta)); + } + + @JRubyMethod(name = "compare_and_set") + public IRubyObject compareAndSet(ThreadContext context, IRubyObject expect, IRubyObject update) { + return getRuntime().newBoolean(atomicLong.compareAndSet(rubyFixnumToLong(expect), rubyFixnumToLong(update))); + } + + @JRubyMethod + public IRubyObject update(ThreadContext context, Block block) { + for (;;) { + long _oldValue = atomicLong.get(); + IRubyObject oldValue = getRuntime().newFixnum(_oldValue); + IRubyObject newValue = block.yield(context, oldValue); + if (atomicLong.compareAndSet(_oldValue, rubyFixnumToLong(newValue))) { + return newValue; + } + } + } + + private long rubyFixnumToLong(IRubyObject value) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError("value must be a Fixnum"); + } + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java new file mode 100644 index 0000000..d887f25 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java @@ -0,0 +1,189 @@ +package com.concurrent_ruby.ext; + +import java.io.IOException; +import java.util.concurrent.Semaphore; +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyFixnum; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +public class JavaSemaphoreLibrary { + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule concurrentMod = runtime.defineModule("Concurrent"); + RubyClass atomicCls = concurrentMod.defineClassUnder("JavaSemaphore", runtime.getObject(), JRUBYREFERENCE_ALLOCATOR); + + atomicCls.defineAnnotatedMethods(JavaSemaphore.class); + } + + private static final ObjectAllocator JRUBYREFERENCE_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JavaSemaphore(runtime, klazz); + } + }; + + @JRubyClass(name = "JavaSemaphore", parent = "Object") + public static class JavaSemaphore extends RubyObject { + + private JRubySemaphore semaphore; + + public JavaSemaphore(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject value) { + this.semaphore = new JRubySemaphore(rubyFixnumInt(value, "count")); + return context.nil; + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context, final Block block) throws InterruptedException { + return this.acquire(context, 1, block); + } + + @JRubyMethod + public IRubyObject acquire(ThreadContext context, IRubyObject permits, final Block block) throws InterruptedException { + return this.acquire(context, rubyFixnumToPositiveInt(permits, "permits"), block); + } + + @JRubyMethod(name = "available_permits") + public IRubyObject availablePermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.availablePermits()); + } + + @JRubyMethod(name = "drain_permits") + public IRubyObject drainPermits(ThreadContext context) { + return getRuntime().newFixnum(this.semaphore.drainPermits()); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, final Block block) throws InterruptedException { + int permitsInt = 1; + boolean acquired = semaphore.tryAcquire(permitsInt); + + return triedAcquire(context, permitsInt, acquired, block); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits, final Block block) throws InterruptedException { + int permitsInt = rubyFixnumToPositiveInt(permits, "permits"); + boolean acquired = semaphore.tryAcquire(permitsInt); + + return triedAcquire(context, permitsInt, acquired, block); + } + + @JRubyMethod(name = "try_acquire") + public IRubyObject tryAcquire(ThreadContext context, IRubyObject permits, IRubyObject timeout, final Block block) throws InterruptedException { + int permitsInt = rubyFixnumToPositiveInt(permits, "permits"); + boolean acquired = semaphore.tryAcquire( + permitsInt, + rubyNumericToLong(timeout, "timeout"), + java.util.concurrent.TimeUnit.SECONDS + ); + + return triedAcquire(context, permitsInt, acquired, block); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context) { + this.semaphore.release(1); + return getRuntime().newBoolean(true); + } + + @JRubyMethod + public IRubyObject release(ThreadContext context, IRubyObject permits) { + this.semaphore.release(rubyFixnumToPositiveInt(permits, "permits")); + return getRuntime().newBoolean(true); + } + + @JRubyMethod(name = "reduce_permits") + public IRubyObject reducePermits(ThreadContext context, IRubyObject reduction) throws InterruptedException { + this.semaphore.publicReducePermits(rubyFixnumToNonNegativeInt(reduction, "reduction")); + return context.nil; + } + + private IRubyObject acquire(ThreadContext context, int permits, final Block block) throws InterruptedException { + this.semaphore.acquire(permits); + + if (!block.isGiven()) return context.nil; + + try { + return block.yieldSpecific(context); + } finally { + this.semaphore.release(permits); + } + } + + private IRubyObject triedAcquire(ThreadContext context, int permits, boolean acquired, final Block block) { + if (!block.isGiven()) return getRuntime().newBoolean(acquired); + if (!acquired) return context.nil; + + try { + return block.yieldSpecific(context); + } finally { + this.semaphore.release(permits); + } + } + + private int rubyFixnumInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be integer"); + } + } + + private int rubyFixnumToNonNegativeInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() >= 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a non-negative integer"); + } + } + + private int rubyFixnumToPositiveInt(IRubyObject value, String paramName) { + if (value instanceof RubyFixnum && ((RubyFixnum) value).getLongValue() > 0) { + RubyFixnum fixNum = (RubyFixnum) value; + return (int) fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be an integer greater than zero"); + } + } + + private long rubyNumericToLong(IRubyObject value, String paramName) { + if (value instanceof RubyNumeric && ((RubyNumeric) value).getDoubleValue() > 0) { + RubyNumeric fixNum = (RubyNumeric) value; + return fixNum.getLongValue(); + } else { + throw getRuntime().newArgumentError(paramName + " must be a float greater than zero"); + } + } + + class JRubySemaphore extends Semaphore { + + public JRubySemaphore(int permits) { + super(permits); + } + + public JRubySemaphore(int permits, boolean value) { + super(permits, value); + } + + public void publicReducePermits(int i) { + reducePermits(i); + } + + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java new file mode 100644 index 0000000..f0c75ee --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java @@ -0,0 +1,292 @@ +package com.concurrent_ruby.ext; + +import org.jruby.Ruby; +import org.jruby.RubyBasicObject; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.RubyThread; +import org.jruby.anno.JRubyClass; +import org.jruby.anno.JRubyMethod; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.Visibility; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.runtime.load.Library; +import sun.misc.Unsafe; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +public class SynchronizationLibrary implements Library { + + private static final Unsafe UNSAFE = loadUnsafe(); + private static final boolean FULL_FENCE = supportsFences(); + + private static Unsafe loadUnsafe() { + try { + Class ncdfe = Class.forName("sun.misc.Unsafe"); + Field f = ncdfe.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (Unsafe) f.get((java.lang.Object) null); + } catch (Exception var2) { + return null; + } catch (NoClassDefFoundError var3) { + return null; + } + } + + private static boolean supportsFences() { + if (UNSAFE == null) { + return false; + } else { + try { + Method m = UNSAFE.getClass().getDeclaredMethod("fullFence", new Class[0]); + if (m != null) { + return true; + } + } catch (Exception var1) { + // nothing + } + + return false; + } + } + + private static final ObjectAllocator OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new Object(runtime, klazz); + } + }; + + private static final ObjectAllocator ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new AbstractLockableObject(runtime, klazz); + } + }; + + private static final ObjectAllocator JRUBY_LOCKABLE_OBJECT_ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRubyLockableObject(runtime, klazz); + } + }; + + public void load(Ruby runtime, boolean wrap) throws IOException { + RubyModule synchronizationModule = runtime. + defineModule("Concurrent"). + defineModuleUnder("Synchronization"); + + RubyModule jrubyAttrVolatileModule = synchronizationModule.defineModuleUnder("JRubyAttrVolatile"); + jrubyAttrVolatileModule.defineAnnotatedMethods(JRubyAttrVolatile.class); + + defineClass(runtime, synchronizationModule, "AbstractObject", "Object", + Object.class, OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "AbstractLockableObject", + AbstractLockableObject.class, ABSTRACT_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "AbstractLockableObject", "JRubyLockableObject", + JRubyLockableObject.class, JRUBY_LOCKABLE_OBJECT_ALLOCATOR); + + defineClass(runtime, synchronizationModule, "Object", "JRuby", + JRuby.class, new ObjectAllocator() { + @Override + public IRubyObject allocate(Ruby runtime, RubyClass klazz) { + return new JRuby(runtime, klazz); + } + }); + } + + private RubyClass defineClass( + Ruby runtime, + RubyModule namespace, + String parentName, + String name, + Class javaImplementation, + ObjectAllocator allocator) { + final RubyClass parentClass = namespace.getClass(parentName); + + if (parentClass == null) { + System.out.println("not found " + parentName); + throw runtime.newRuntimeError(namespace.toString() + "::" + parentName + " is missing"); + } + + final RubyClass newClass = namespace.defineClassUnder(name, parentClass, allocator); + newClass.defineAnnotatedMethods(javaImplementation); + return newClass; + } + + // Facts: + // - all ivar reads are without any synchronisation of fences see + // https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/VariableAccessor.java#L110-110 + // - writes depend on UnsafeHolder.U, null -> SynchronizedVariableAccessor, !null -> StampedVariableAccessor + // SynchronizedVariableAccessor wraps with synchronized block, StampedVariableAccessor uses fullFence or + // volatilePut + // TODO (pitr 16-Sep-2015): what do we do in Java 9 ? + + // module JRubyAttrVolatile + public static class JRubyAttrVolatile { + + // volatile threadContext is used as a memory barrier per the JVM memory model happens-before semantic + // on volatile fields. any volatile field could have been used but using the thread context is an + // attempt to avoid code elimination. + private static volatile int volatileField; + + @JRubyMethod(name = "full_memory_barrier", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject fullMemoryBarrier(ThreadContext context, IRubyObject module) { + // Prevent reordering of ivar writes with publication of this instance + if (!FULL_FENCE) { + // Assuming that following volatile read and write is not eliminated it simulates fullFence. + // If it's eliminated it'll cause problems only on non-x86 platforms. + // http://shipilev.net/blog/2014/jmm-pragmatics/#_happens_before_test_your_understanding + final int volatileRead = volatileField; + volatileField = context.getLine(); + } else { + UNSAFE.fullFence(); + } + return context.nil; + } + + @JRubyMethod(name = "instance_variable_get_volatile", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject instanceVariableGetVolatile( + ThreadContext context, + IRubyObject module, + IRubyObject self, + IRubyObject name) { + // Ensure we ses latest value with loadFence + if (!FULL_FENCE) { + // piggybacking on volatile read, simulating loadFence + final int volatileRead = volatileField; + return ((RubyBasicObject) self).instance_variable_get(context, name); + } else { + UNSAFE.loadFence(); + return ((RubyBasicObject) self).instance_variable_get(context, name); + } + } + + @JRubyMethod(name = "instance_variable_set_volatile", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject InstanceVariableSetVolatile( + ThreadContext context, + IRubyObject module, + IRubyObject self, + IRubyObject name, + IRubyObject value) { + // Ensure we make last update visible + if (!FULL_FENCE) { + // piggybacking on volatile write, simulating storeFence + final IRubyObject result = ((RubyBasicObject) self).instance_variable_set(name, value); + volatileField = context.getLine(); + return result; + } else { + // JRuby uses StampedVariableAccessor which calls fullFence + // so no additional steps needed. + // See https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/runtime/ivars/StampedVariableAccessor.java#L151-L159 + return ((RubyBasicObject) self).instance_variable_set(name, value); + } + } + } + + @JRubyClass(name = "Object", parent = "AbstractObject") + public static class Object extends RubyObject { + + public Object(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "AbstractLockableObject", parent = "Object") + public static class AbstractLockableObject extends Object { + + public AbstractLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + } + + @JRubyClass(name = "JRubyLockableObject", parent = "AbstractLockableObject") + public static class JRubyLockableObject extends AbstractLockableObject { + + public JRubyLockableObject(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "synchronize", visibility = Visibility.PROTECTED) + public IRubyObject rubySynchronize(ThreadContext context, Block block) { + synchronized (this) { + return block.yield(context, null); + } + } + + @JRubyMethod(name = "ns_wait", optional = 1, visibility = Visibility.PROTECTED) + public IRubyObject nsWait(ThreadContext context, IRubyObject[] args) { + Ruby runtime = context.runtime; + if (args.length > 1) { + throw runtime.newArgumentError(args.length, 1); + } + Double timeout = null; + if (args.length > 0 && !args[0].isNil()) { + timeout = args[0].convertToFloat().getDoubleValue(); + if (timeout < 0) { + throw runtime.newArgumentError("time interval must be positive"); + } + } + if (Thread.interrupted()) { + throw runtime.newConcurrencyError("thread interrupted"); + } + boolean success = false; + try { + success = context.getThread().wait_timeout(this, timeout); + } catch (InterruptedException ie) { + throw runtime.newConcurrencyError(ie.getLocalizedMessage()); + } finally { + // An interrupt or timeout may have caused us to miss + // a notify that we consumed, so do another notify in + // case someone else is available to pick it up. + if (!success) { + this.notify(); + } + } + return this; + } + + @JRubyMethod(name = "ns_signal", visibility = Visibility.PROTECTED) + public IRubyObject nsSignal(ThreadContext context) { + notify(); + return this; + } + + @JRubyMethod(name = "ns_broadcast", visibility = Visibility.PROTECTED) + public IRubyObject nsBroadcast(ThreadContext context) { + notifyAll(); + return this; + } + } + + @JRubyClass(name = "JRuby") + public static class JRuby extends RubyObject { + public JRuby(Ruby runtime, RubyClass metaClass) { + super(runtime, metaClass); + } + + @JRubyMethod(name = "sleep_interruptibly", visibility = Visibility.PUBLIC, module = true) + public static IRubyObject sleepInterruptibly(final ThreadContext context, IRubyObject receiver, final Block block) { + try { + context.getThread().executeBlockingTask(new RubyThread.BlockingTask() { + @Override + public void run() throws InterruptedException { + block.call(context); + } + + @Override + public void wakeup() { + context.getThread().getNativeThread().interrupt(); + } + }); + } catch (InterruptedException e) { + throw context.runtime.newThreadError("interrupted in Concurrent::Synchronization::JRuby.sleep_interruptibly"); + } + return context.nil; + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java new file mode 100644 index 0000000..e11e15a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java @@ -0,0 +1,31 @@ +package com.concurrent_ruby.ext.jsr166e; + +import java.util.Map; +import java.util.Set; + +public interface ConcurrentHashMap { + /** Interface describing a function of one argument */ + public interface Fun { T apply(A a); } + /** Interface describing a function of two arguments */ + public interface BiFun { T apply(A a, B b); } + + public V get(K key); + public V put(K key, V value); + public V putIfAbsent(K key, V value); + public V computeIfAbsent(K key, Fun mf); + public V computeIfPresent(K key, BiFun mf); + public V compute(K key, BiFun mf); + public V merge(K key, V value, BiFun mf); + public boolean replace(K key, V oldVal, V newVal); + public V replace(K key, V value); + public boolean containsKey(K key); + public boolean remove(Object key, Object value); + public V remove(K key); + public void clear(); + public Set> entrySet(); + public int size(); + public V getValueOrDefault(Object key, V defaultValue); + + public boolean containsValue(V value); + public K findKey(V value); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java new file mode 100644 index 0000000..86aa4eb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java @@ -0,0 +1,3863 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

    + *
  • forEach: Perform a given action on each element. + * A variant form applies a given transformation on each element + * before performing the action.
  • + * + *
  • search: Return the first available non-null result of + * applying a given function on each element; skipping further + * search when a result is found.
  • + * + *
  • reduce: Accumulate each element. The supplied reduction + * function cannot rely on ordering (more formally, it should be + * both associative and commutative). There are five variants: + * + *
      + * + *
    • Plain reductions. (There is not a form of this method for + * (key, value) function arguments since there is no corresponding + * return type.)
    • + * + *
    • Mapped reductions that accumulate the results of a given + * function applied to each element.
    • + * + *
    • Reductions to scalar doubles, longs, and ints, using a + * given basis value.
    • + * + * + *
    + *
+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile Node[] table; + + /** + * The counter maintaining number of elements. + */ + private transient final LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(Node[] tab, int i) { // used by Iter + return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(Node[] tab, int i) { + if (tab != null && i >= 0 && i < tab.length) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + + // Unsafe mechanics for casHash + private static final sun.misc.Unsafe UNSAFE; + private static final long hashOffset; + + static { + try { + UNSAFE = getUnsafe(); + Class k = Node.class; + hashOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("hash")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(Node[] tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (Node[] tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (Node[])ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (Node[])fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (Node[] tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final Node[] initTable() { + Node[] tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + Node[] tab; int n, sc; + while ((tab = table) != null && + (n = tab.length) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + Node[] tab = table; int n; + if (tab == null || (n = tab.length) == 0) { + n = (sc > c) ? sc : c; + if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = new Node[n]; + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final Node[] rebuild(Node[] tab) { + int n = tab.length; + Node[] nextTab = new Node[n << 1]; + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(Node[] nextTab, int i, Node e) { + int bit = nextTab.length >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { + int bit = nextTab.length >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + Node[] tab = table; + while (tab != null && i < tab.length) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (Node[])fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + Node[] tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; Node[] t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length; + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + Node[] t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length; + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length; + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (Node[])ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { + try { + if (table == null) { + init = true; + Node[] tab = new Node[n]; + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + Node[] tab = table; + for (int i = 0; i < tab.length; ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long counterOffset; + private static final long sizeCtlOffset; + private static final long ABASE; + private static final int ASHIFT; + + static { + int ss; + try { + UNSAFE = getUnsafe(); + Class k = ConcurrentHashMapV8.class; + counterOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("counter")); + sizeCtlOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("sizeCtl")); + Class sc = Node[].class; + ABASE = UNSAFE.arrayBaseOffset(sc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java new file mode 100644 index 0000000..47a923c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java @@ -0,0 +1,203 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java new file mode 100644 index 0000000..93a277f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java @@ -0,0 +1,342 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e; +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } + +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java new file mode 100644 index 0000000..b7fc5a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java @@ -0,0 +1,3800 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on the 1.79 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import org.jruby.RubyClass; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.exceptions.RaiseException; +import com.concurrent_ruby.ext.jsr166e.ConcurrentHashMap; +import com.concurrent_ruby.ext.jsr166y.ThreadLocalRandom; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.Collection; +import java.util.Hashtable; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Enumeration; +import java.util.ConcurrentModificationException; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; + +import java.io.Serializable; + +/** + * A hash table supporting full concurrency of retrievals and + * high expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * {@code Hashtable}. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with {@code Hashtable} in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including {@code get}) generally do not + * block, so may overlap with update operations (including {@code put} + * and {@code remove}). Retrievals reflect the results of the most + * recently completed update operations holding upon their + * onset. (More formally, an update operation for a given key bears a + * happens-before relation with any (non-null) retrieval for + * that key reporting the updated value.) For aggregate operations + * such as {@code putAll} and {@code clear}, concurrent retrievals may + * reflect insertion or removal of only some entries. Similarly, + * Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw {@link + * ConcurrentModificationException}. However, iterators are designed + * to be used by only one thread at a time. Bear in mind that the + * results of aggregate status methods including {@code size}, {@code + * isEmpty}, and {@code containsValue} are typically useful only when + * a map is not undergoing concurrent updates in other threads. + * Otherwise the results of these methods reflect transient states + * that may be adequate for monitoring or estimation purposes, but not + * for program control. + * + *

The table is dynamically expanded when there are too many + * collisions (i.e., keys that have distinct hash codes but fall into + * the same slot modulo the table size), with the expected average + * effect of maintaining roughly two bins per mapping (corresponding + * to a 0.75 load factor threshold for resizing). There may be much + * variance around this average as mappings are added and removed, but + * overall, this maintains a commonly accepted time/space tradeoff for + * hash tables. However, resizing this or any other kind of hash + * table may be a relatively slow operation. When possible, it is a + * good idea to provide a size estimate as an optional {@code + * initialCapacity} constructor argument. An additional optional + * {@code loadFactor} constructor argument provides a further means of + * customizing initial table capacity by specifying the table density + * to be used in calculating the amount of space to allocate for the + * given number of elements. Also, for compatibility with previous + * versions of this class, constructors may optionally specify an + * expected {@code concurrencyLevel} as an additional hint for + * internal sizing. Note that using many keys with exactly the same + * {@code hashCode()} is a sure way to slow down performance of any + * hash table. + * + *

A {@link Set} projection of a ConcurrentHashMapV8 may be created + * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed + * (using {@link #keySet(Object)} when only keys are of interest, and the + * mapped values are (perhaps transiently) not used or all take the + * same mapping value. + * + *

A ConcurrentHashMapV8 can be used as scalable frequency map (a + * form of histogram or multiset) by using {@link LongAdder} values + * and initializing via {@link #computeIfAbsent}. For example, to add + * a count to a {@code ConcurrentHashMapV8 freqs}, you + * can use {@code freqs.computeIfAbsent(k -> new + * LongAdder()).increment();} + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow {@code null} to be used as a key or value. + * + *

ConcurrentHashMapV8s support parallel operations using the {@link + * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts + * are available in class {@link ForkJoinTasks}). These operations are + * designed to be safely, and often sensibly, applied even with maps + * that are being concurrently updated by other threads; for example, + * when computing a snapshot summary of the values in a shared + * registry. There are three kinds of operation, each with four + * forms, accepting functions with Keys, Values, Entries, and (Key, + * Value) arguments and/or return values. (The first three forms are + * also available via the {@link #keySet()}, {@link #values()} and + * {@link #entrySet()} views). Because the elements of a + * ConcurrentHashMapV8 are not ordered in any particular way, and may be + * processed in different orders in different parallel executions, the + * correctness of supplied functions should not depend on any + * ordering, or on any other objects or values that may transiently + * change while computation is in progress; and except for forEach + * actions, should ideally be side-effect-free. + * + *

+ * + *

The concurrency properties of bulk operations follow + * from those of ConcurrentHashMapV8: Any non-null result returned + * from {@code get(key)} and related access methods bears a + * happens-before relation with the associated insertion or + * update. The result of any bulk operation reflects the + * composition of these per-element relations (but is not + * necessarily atomic with respect to the map as a whole unless it + * is somehow known to be quiescent). Conversely, because keys + * and values in the map are never null, null serves as a reliable + * atomic indicator of the current lack of any result. To + * maintain this property, null serves as an implicit basis for + * all non-scalar reduction operations. For the double, long, and + * int versions, the basis should be one that, when combined with + * any other value, returns that other value (more formally, it + * should be the identity element for the reduction). Most common + * reductions have these properties; for example, computing a sum + * with basis 0 or a minimum with basis MAX_VALUE. + * + *

Search and transformation functions provided as arguments + * should similarly return null to indicate the lack of any result + * (in which case it is not used). In the case of mapped + * reductions, this also enables transformations to serve as + * filters, returning null (or, in the case of primitive + * specializations, the identity basis) if the element should not + * be combined. You can create compound transformations and + * filterings by composing them yourself under this "null means + * there is nothing there now" rule before using them in search or + * reduce operations. + * + *

Methods accepting and/or returning Entry arguments maintain + * key-value associations. They may be useful for example when + * finding the key for the greatest value. Note that "plain" Entry + * arguments can be supplied using {@code new + * AbstractMap.SimpleEntry(k,v)}. + * + *

Bulk operations may complete abruptly, throwing an + * exception encountered in the application of a supplied + * function. Bear in mind when handling such exceptions that other + * concurrently executing functions could also have thrown + * exceptions, or would have done so if the first exception had + * not occurred. + * + *

Parallel speedups for bulk operations compared to sequential + * processing are common but not guaranteed. Operations involving + * brief functions on small maps may execute more slowly than + * sequential loops if the underlying work to parallelize the + * computation is more expensive than the computation itself. + * Similarly, parallelization may not lead to much actual parallelism + * if all processors are busy performing unrelated tasks. + * + *

All arguments to all task methods must be non-null. + * + *

jsr166e note: During transition, this class + * uses nested functional interfaces with different names but the + * same forms as those expected for JDK8. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMapV8 + implements ConcurrentMap, Serializable, ConcurrentHashMap { + private static final long serialVersionUID = 7249069246763182397L; + + /** + * A partitionable iterator. A Spliterator can be traversed + * directly, but can also be partitioned (before traversal) by + * creating another Spliterator that covers a non-overlapping + * portion of the elements, and so may be amenable to parallel + * execution. + * + *

This interface exports a subset of expected JDK8 + * functionality. + * + *

Sample usage: Here is one (of the several) ways to compute + * the sum of the values held in a map using the ForkJoin + * framework. As illustrated here, Spliterators are well suited to + * designs in which a task repeatedly splits off half its work + * into forked subtasks until small enough to process directly, + * and then joins these subtasks. Variants of this style can also + * be used in completion-based designs. + * + *

+     * {@code ConcurrentHashMapV8 m = ...
+     * // split as if have 8 * parallelism, for load balance
+     * int n = m.size();
+     * int p = aForkJoinPool.getParallelism() * 8;
+     * int split = (n < p)? n : p;
+     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
+     * // ...
+     * static class SumValues extends RecursiveTask {
+     *   final Spliterator s;
+     *   final int split;             // split while > 1
+     *   final SumValues nextJoin;    // records forked subtasks to join
+     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
+     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
+     *   }
+     *   public Long compute() {
+     *     long sum = 0;
+     *     SumValues subtasks = null; // fork subtasks
+     *     for (int s = split >>> 1; s > 0; s >>>= 1)
+     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
+     *     while (s.hasNext())        // directly process remaining elements
+     *       sum += s.next();
+     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
+     *       sum += t.join();         // collect subtask results
+     *     return sum;
+     *   }
+     * }
+     * }
+ */ + public static interface Spliterator extends Iterator { + /** + * Returns a Spliterator covering approximately half of the + * elements, guaranteed not to overlap with those subsequently + * returned by this Spliterator. After invoking this method, + * the current Spliterator will not produce any of + * the elements of the returned Spliterator, but the two + * Spliterators together will produce all of the elements that + * would have been produced by this Spliterator had this + * method not been called. The exact number of elements + * produced by the returned Spliterator is not guaranteed, and + * may be zero (i.e., with {@code hasNext()} reporting {@code + * false}) if this Spliterator cannot be further split. + * + * @return a Spliterator covering approximately half of the + * elements + * @throws IllegalStateException if this Spliterator has + * already commenced traversing elements + */ + Spliterator split(); + } + + + /* + * Overview: + * + * The primary design goal of this hash table is to maintain + * concurrent readability (typically method get(), but also + * iterators and related methods) while minimizing update + * contention. Secondary goals are to keep space consumption about + * the same or better than java.util.HashMap, and to support high + * initial insertion rates on an empty table by many threads. + * + * Each key-value mapping is held in a Node. Because Node fields + * can contain special values, they are defined using plain Object + * types. Similarly in turn, all internal methods that use them + * work off Object types. And similarly, so do the internal + * methods of auxiliary iterator and view classes. All public + * generic typed methods relay in/out of these internal methods, + * supplying null-checks and casts as needed. This also allows + * many of the public methods to be factored into a smaller number + * of internal methods (although sadly not so for the five + * variants of put-related operations). The validation-based + * approach explained below leads to a lot of code sprawl because + * retry-control precludes factoring into smaller methods. + * + * The table is lazily initialized to a power-of-two size upon the + * first insertion. Each bin in the table normally contains a + * list of Nodes (most often, the list has only zero or one Node). + * Table accesses require volatile/atomic reads, writes, and + * CASes. Because there is no other way to arrange this without + * adding further indirections, we use intrinsics + * (sun.misc.Unsafe) operations. The lists of nodes within bins + * are always accurately traversable under volatile reads, so long + * as lookups check hash code and non-nullness of value before + * checking key equality. + * + * We use the top two bits of Node hash fields for control + * purposes -- they are available anyway because of addressing + * constraints. As explained further below, these top bits are + * used as follows: + * 00 - Normal + * 01 - Locked + * 11 - Locked and may have a thread waiting for lock + * 10 - Node is a forwarding node + * + * The lower 30 bits of each Node's hash field contain a + * transformation of the key's hash code, except for forwarding + * nodes, for which the lower bits are zero (and so always have + * hash field == MOVED). + * + * Insertion (via put or its variants) of the first node in an + * empty bin is performed by just CASing it to the bin. This is + * by far the most common case for put operations under most + * key/hash distributions. Other update operations (insert, + * delete, and replace) require locks. We do not want to waste + * the space required to associate a distinct lock object with + * each bin, so instead use the first node of a bin list itself as + * a lock. Blocking support for these locks relies on the builtin + * "synchronized" monitors. However, we also need a tryLock + * construction, so we overlay these by using bits of the Node + * hash field for lock control (see above), and so normally use + * builtin monitors only for blocking and signalling using + * wait/notifyAll constructions. See Node.tryAwaitLock. + * + * Using the first node of a list as a lock does not by itself + * suffice though: When a node is locked, any update must first + * validate that it is still the first node after locking it, and + * retry if not. Because new nodes are always appended to lists, + * once a node is first in a bin, it remains first until deleted + * or the bin becomes invalidated (upon resizing). However, + * operations that only conditionally update may inspect nodes + * until the point of update. This is a converse of sorts to the + * lazy locking technique described by Herlihy & Shavit. + * + * The main disadvantage of per-bin locks is that other update + * operations on other nodes in a bin list protected by the same + * lock can stall, for example when user equals() or mapping + * functions take a long time. However, statistically, under + * random hash codes, this is not a common problem. Ideally, the + * frequency of nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average, given the resizing threshold + * of 0.75, although with a large variance because of resizing + * granularity. Ignoring variance, the expected occurrences of + * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The + * first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * Lock contention probability for two threads accessing distinct + * elements is roughly 1 / (8 * #elements) under random hashes. + * + * Actual hash code distributions encountered in practice + * sometimes deviate significantly from uniform randomness. This + * includes the case when N > (1<<30), so some keys MUST collide. + * Similarly for dumb or hostile usages in which multiple keys are + * designed to have identical hash codes. Also, although we guard + * against the worst effects of this (see method spread), sets of + * hashes may differ only in bits that do not impact their bin + * index for a given power-of-two mask. So we use a secondary + * strategy that applies when the number of nodes in a bin exceeds + * a threshold, and at least one of the keys implements + * Comparable. These TreeBins use a balanced tree to hold nodes + * (a specialized form of red-black trees), bounding search time + * to O(log N). Each search step in a TreeBin is around twice as + * slow as in a regular list, but given that N cannot exceed + * (1<<64) (before running out of addresses) this bounds search + * steps, lock hold times, etc, to reasonable constants (roughly + * 100 nodes inspected per operation worst case) so long as keys + * are Comparable (which is very common -- String, Long, etc). + * TreeBin nodes (TreeNodes) also maintain the same "next" + * traversal pointers as regular nodes, so can be traversed in + * iterators in the same way. + * + * The table is resized when occupancy exceeds a percentage + * threshold (nominally, 0.75, but see below). Only a single + * thread performs the resize (using field "sizeCtl", to arrange + * exclusion), but the table otherwise remains usable for reads + * and updates. Resizing proceeds by transferring bins, one by + * one, from the table to the next table. Because we are using + * power-of-two expansion, the elements from each bin must either + * stay at same index, or move with a power of two offset. We + * eliminate unnecessary node creation by catching cases where old + * nodes can be reused because their next fields won't change. On + * average, only about one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage collectable as + * soon as they are no longer referenced by any reader thread that + * may be in the midst of concurrently traversing table. Upon + * transfer, the old table bin contains only a special forwarding + * node (with hash field "MOVED") that contains the next table as + * its key. On encountering a forwarding node, access and update + * operations restart, using the new table. + * + * Each bin transfer requires its bin lock. However, unlike other + * cases, a transfer can skip a bin if it fails to acquire its + * lock, and revisit it later (unless it is a TreeBin). Method + * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that + * have been skipped because of failure to acquire a lock, and + * blocks only if none are available (i.e., only very rarely). + * The transfer operation must also ensure that all accessible + * bins in both the old and new table are usable by any traversal. + * When there are no lock acquisition failures, this is arranged + * simply by proceeding from the last bin (table.length - 1) up + * towards the first. Upon seeing a forwarding node, traversals + * (see class Iter) arrange to move to the new table + * without revisiting nodes. However, when any node is skipped + * during a transfer, all earlier table bins may have become + * visible, so are initialized with a reverse-forwarding node back + * to the old table until the new ones are established. (This + * sometimes requires transiently locking a forwarding node, which + * is possible under the above encoding.) These more expensive + * mechanics trigger only when necessary. + * + * The traversal scheme also applies to partial traversals of + * ranges of bins (via an alternate Traverser constructor) + * to support partitioned aggregate operations. Also, read-only + * operations give up if ever forwarded to a null table, which + * provides support for shutdown-style clearing, which is also not + * currently implemented. + * + * Lazy table initialization minimizes footprint until first use, + * and also avoids resizings when the first operation is from a + * putAll, constructor with map argument, or deserialization. + * These cases attempt to override the initial capacity settings, + * but harmlessly fail to take effect in cases of races. + * + * The element count is maintained using a LongAdder, which avoids + * contention on updates but can encounter cache thrashing if read + * too frequently during concurrent access. To avoid reading so + * often, resizing is attempted either when a bin lock is + * contended, or upon adding to a bin already holding two or more + * nodes (checked before adding in the xIfAbsent methods, after + * adding in others). Under uniform hash distributions, the + * probability of this occurring at threshold is around 13%, + * meaning that only about 1 in 8 puts check threshold (and after + * resizing, many fewer do so). But this approximation has high + * variance for small table sizes, so we check on any collision + * for sizes <= 64. The bulk putAll operation further reduces + * contention by only committing count updates upon these size + * checks. + * + * Maintaining API and serialization compatibility with previous + * versions of this class introduces several oddities. Mainly: We + * leave untouched but unused constructor arguments refering to + * concurrencyLevel. We accept a loadFactor constructor argument, + * but apply it only to initial table capacity (which is the only + * time that we can guarantee to honor it.) We also declare an + * unused "Segment" class that is instantiated in minimal form + * only when serializing. + */ + + /* ---------------- Constants -------------- */ + + /** + * The largest possible table capacity. This value must be + * exactly 1<<30 to stay within Java array allocation and indexing + * bounds for power of two table sizes, and is further required + * because the top two bits of 32bit hash fields are used for + * control purposes. + */ + private static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The default initial table capacity. Must be a power of 2 + * (i.e., at least 1) and at most MAXIMUM_CAPACITY. + */ + private static final int DEFAULT_CAPACITY = 16; + + /** + * The largest possible (non-power of two) array size. + * Needed by toArray and related methods. + */ + static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + + /** + * The default concurrency level for this table. Unused but + * defined for compatibility with previous versions of this class. + */ + private static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The load factor for this table. Overrides of this value in + * constructors affect only the initial table capacity. The + * actual floating point value isn't normally used -- it is + * simpler to use expressions such as {@code n - (n >>> 2)} for + * the associated resizing threshold. + */ + private static final float LOAD_FACTOR = 0.75f; + + /** + * The buffer size for skipped bins during transfers. The + * value is arbitrary but should be large enough to avoid + * most locking stalls during resizes. + */ + private static final int TRANSFER_BUFFER_SIZE = 32; + + /** + * The bin count threshold for using a tree rather than list for a + * bin. The value reflects the approximate break-even point for + * using tree-based operations. + * Note that Doug's version defaults to 8, but when dealing with + * Ruby objects it is actually beneficial to avoid TreeNodes + * as long as possible as it usually means going into Ruby land. + */ + private static final int TREE_THRESHOLD = 16; + + /* + * Encodings for special uses of Node hash fields. See above for + * explanation. + */ + static final int MOVED = 0x80000000; // hash field for forwarding nodes + static final int LOCKED = 0x40000000; // set/tested only as a bit + static final int WAITING = 0xc0000000; // both bits set/tested together + static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash + + /* ---------------- Fields -------------- */ + + /** + * The array of bins. Lazily initialized upon first insertion. + * Size is always a power of two. Accessed directly by iterators. + */ + transient volatile AtomicReferenceArray table; + + /** + * The counter maintaining number of elements. + */ + private transient LongAdder counter; + + /** + * Table initialization and resizing control. When negative, the + * table is being initialized or resized. Otherwise, when table is + * null, holds the initial table size to use upon creation, or 0 + * for default. After initialization, holds the next element count + * value upon which to resize the table. + */ + private transient volatile int sizeCtl; + + // views + private transient KeySetView keySet; + private transient ValuesView values; + private transient EntrySetView entrySet; + + /** For serialization compatibility. Null unless serialized; see below */ + private Segment[] segments; + + static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); + + /* ---------------- Table element access -------------- */ + + /* + * Volatile access methods are used for table elements as well as + * elements of in-progress next table while resizing. Uses are + * null checked by callers, and implicitly bounds-checked, relying + * on the invariants that tab arrays have non-zero size, and all + * indices are masked with (tab.length - 1) which is never + * negative and always less than length. Note that, to be correct + * wrt arbitrary concurrency errors by users, bounds checks must + * operate on local variables, which accounts for some odd-looking + * inline assignments below. + */ + + static final Node tabAt(AtomicReferenceArray tab, int i) { // used by Iter + return tab.get(i); + } + + private static final boolean casTabAt(AtomicReferenceArray tab, int i, Node c, Node v) { + return tab.compareAndSet(i, c, v); + } + + private static final void setTabAt(AtomicReferenceArray tab, int i, Node v) { + tab.set(i, v); + } + + /* ---------------- Nodes -------------- */ + + /** + * Key-value entry. Note that this is never exported out as a + * user-visible Map.Entry (see MapEntry below). Nodes with a hash + * field of MOVED are special, and do not contain user keys or + * values. Otherwise, keys are never null, and null val fields + * indicate that a node is in the process of being deleted or + * created. For purposes of read-only access, a key may be read + * before a val, but can only be used after checking val to be + * non-null. + */ + static class Node { + volatile int hash; + final Object key; + volatile Object val; + volatile Node next; + + static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); + + Node(int hash, Object key, Object val, Node next) { + this.hash = hash; + this.key = key; + this.val = val; + this.next = next; + } + + /** CompareAndSet the hash field */ + final boolean casHash(int cmp, int val) { + return HASH_UPDATER.compareAndSet(this, cmp, val); + } + + /** The number of spins before blocking for a lock */ + static final int MAX_SPINS = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * Spins a while if LOCKED bit set and this node is the first + * of its bin, and then sets WAITING bits on hash field and + * blocks (once) if they are still set. It is OK for this + * method to return even if lock is not available upon exit, + * which enables these simple single-wait mechanics. + * + * The corresponding signalling operation is performed within + * callers: Upon detecting that WAITING has been set when + * unlocking lock (via a failed CAS from non-waiting LOCKED + * state), unlockers acquire the sync lock and perform a + * notifyAll. + * + * The initial sanity check on tab and bounds is not currently + * necessary in the only usages of this method, but enables + * use in other future contexts. + */ + final void tryAwaitLock(AtomicReferenceArray tab, int i) { + if (tab != null && i >= 0 && i < tab.length()) { // sanity check + int r = ThreadLocalRandom.current().nextInt(); // randomize spins + int spins = MAX_SPINS, h; + while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { + if (spins >= 0) { + r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift + if (r >= 0 && --spins == 0) + Thread.yield(); // yield before block + } + else if (casHash(h, h | WAITING)) { + synchronized (this) { + if (tabAt(tab, i) == this && + (hash & WAITING) == WAITING) { + try { + wait(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + else + notifyAll(); // possibly won race vs signaller + } + break; + } + } + } + } + } + + /* ---------------- TreeBins -------------- */ + + /** + * Nodes for use in TreeBins + */ + static final class TreeNode extends Node { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + + TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { + super(hash, key, val, next); + this.parent = parent; + } + } + + /** + * A specialized form of red-black tree for use in bins + * whose size exceeds a threshold. + * + * TreeBins use a special form of comparison for search and + * related operations (which is the main reason we cannot use + * existing collections such as TreeMaps). TreeBins contain + * Comparable elements, but may contain others, as well as + * elements that are Comparable but not necessarily Comparable + * for the same T, so we cannot invoke compareTo among them. To + * handle this, the tree is ordered primarily by hash value, then + * by getClass().getName() order, and then by Comparator order + * among elements of the same class. On lookup at a node, if + * elements are not comparable or compare as 0, both left and + * right children may need to be searched in the case of tied hash + * values. (This corresponds to the full list search that would be + * necessary if all elements were non-Comparable and had tied + * hashes.) The red-black balancing code is updated from + * pre-jdk-collections + * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) + * based in turn on Cormen, Leiserson, and Rivest "Introduction to + * Algorithms" (CLR). + * + * TreeBins also maintain a separate locking discipline than + * regular bins. Because they are forwarded via special MOVED + * nodes at bin heads (which can never change once established), + * we cannot use those nodes as locks. Instead, TreeBin + * extends AbstractQueuedSynchronizer to support a simple form of + * read-write lock. For update operations and table validation, + * the exclusive form of lock behaves in the same way as bin-head + * locks. However, lookups use shared read-lock mechanics to allow + * multiple readers in the absence of writers. Additionally, + * these lookups do not ever block: While the lock is not + * available, they proceed along the slow traversal path (via + * next-pointers) until the lock becomes available or the list is + * exhausted, whichever comes first. (These cases are not fast, + * but maximize aggregate expected throughput.) The AQS mechanics + * for doing this are straightforward. The lock state is held as + * AQS getState(). Read counts are negative; the write count (1) + * is positive. There are no signalling preferences among readers + * and writers. Since we don't need to export full Lock API, we + * just override the minimal AQS methods and use them directly. + */ + static final class TreeBin extends AbstractQueuedSynchronizer { + private static final long serialVersionUID = 2249069246763182397L; + transient TreeNode root; // root of tree + transient TreeNode first; // head of next-pointer list + + /* AQS overrides */ + public final boolean isHeldExclusively() { return getState() > 0; } + public final boolean tryAcquire(int ignore) { + if (compareAndSetState(0, 1)) { + setExclusiveOwnerThread(Thread.currentThread()); + return true; + } + return false; + } + public final boolean tryRelease(int ignore) { + setExclusiveOwnerThread(null); + setState(0); + return true; + } + public final int tryAcquireShared(int ignore) { + for (int c;;) { + if ((c = getState()) > 0) + return -1; + if (compareAndSetState(c, c -1)) + return 1; + } + } + public final boolean tryReleaseShared(int ignore) { + int c; + do {} while (!compareAndSetState(c = getState(), c + 1)); + return c == -1; + } + + /** From CLR */ + private void rotateLeft(TreeNode p) { + if (p != null) { + TreeNode r = p.right, pp, rl; + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + root = r; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + } + + /** From CLR */ + private void rotateRight(TreeNode p) { + if (p != null) { + TreeNode l = p.left, pp, lr; + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + root = l; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + } + + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, Object k, TreeNode p) { + return getTreeNode(h, (RubyObject)k, p); + } + + /** + * Returns the TreeNode (or null if not found) for the given key + * starting at given root. + */ + @SuppressWarnings("unchecked") final TreeNode getTreeNode + (int h, RubyObject k, TreeNode p) { + RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); + while (p != null) { + int dir, ph; RubyObject pk; RubyClass pc; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = (RubyClass)pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pl, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + // try to continue iterating on the left side + else if ((pl = p.left) != null && h <= pl.hash) + dir = -1; + else // no matching node found + return null; + } + } + } + else + dir = (h < ph) ? -1 : 1; + p = (dir > 0) ? p.right : p.left; + } + return null; + } + + int rubyCompare(RubyObject l, RubyObject r) { + ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); + IRubyObject result; + try { + result = l.callMethod(context, "<=>", r); + } catch (RaiseException e) { + // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys + if (context.runtime.getNoMethodError().isInstance(e.getException())) { + return 0; + } + throw e; + } + + return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); + } + + /** + * Wrapper for getTreeNode used by CHM.get. Tries to obtain + * read-lock to call getTreeNode, but during failure to get + * lock, searches along next links. + */ + final Object getValue(int h, Object k) { + Node r = null; + int c = getState(); // Must read lock state first + for (Node e = first; e != null; e = e.next) { + if (c <= 0 && compareAndSetState(c, c - 1)) { + try { + r = getTreeNode(h, k, root); + } finally { + releaseShared(0); + } + break; + } + else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { + r = e; + break; + } + else + c = getState(); + } + return r == null ? null : r.val; + } + + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, Object k, Object v) { + return putTreeNode(h, (RubyObject)k, v); + } + + /** + * Finds or adds a node. + * @return null if added + */ + @SuppressWarnings("unchecked") final TreeNode putTreeNode + (int h, RubyObject k, Object v) { + RubyClass c = k.getMetaClass(); + boolean kNotComparable = !k.respondsTo("<=>"); + TreeNode pp = root, p = null; + int dir = 0; + while (pp != null) { // find existing node or leaf to insert at + int ph; RubyObject pk; RubyClass pc; + p = pp; + if ((ph = p.hash) == h) { + if ((pk = (RubyObject)p.key) == k || k.equals(pk)) + return p; + if (c != (pc = pk.getMetaClass()) || + kNotComparable || + (dir = rubyCompare(k, pk)) == 0) { + dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); + if (dir == 0) { // if still stuck, need to check both sides + TreeNode r = null, pr; + // try to recurse on the right + if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) + return r; + else // continue descending down the left subtree + dir = -1; + } + } + } + else + dir = (h < ph) ? -1 : 1; + pp = (dir > 0) ? p.right : p.left; + } + + TreeNode f = first; + TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); + if (p == null) + root = x; + else { // attach and rebalance; adapted from CLR + TreeNode xp, xpp; + if (f != null) + f.prev = x; + if (dir <= 0) + p.left = x; + else + p.right = x; + x.red = true; + while (x != null && (xp = x.parent) != null && xp.red && + (xpp = xp.parent) != null) { + TreeNode xppl = xpp.left; + if (xp == xppl) { + TreeNode y = xpp.right; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + rotateLeft(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateRight(xpp); + } + } + } + } + else { + TreeNode y = xppl; + if (y != null && y.red) { + y.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + rotateRight(x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + rotateLeft(xpp); + } + } + } + } + } + TreeNode r = root; + if (r != null && r.red) + r.red = false; + } + return null; + } + + /** + * Removes the given node, that must be present before this + * call. This is messier than typical red-black deletion code + * because we cannot swap the contents of an interior node + * with a leaf successor that is pinned by "next" pointers + * that are accessible independently of lock. So instead we + * swap the tree linkages. + */ + final void deleteTreeNode(TreeNode p) { + TreeNode next = (TreeNode)p.next; // unlink traversal pointers + TreeNode pred = p.prev; + if (pred == null) + first = next; + else + pred.next = next; + if (next != null) + next.prev = pred; + TreeNode replacement; + TreeNode pl = p.left; + TreeNode pr = p.right; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + replacement = sr; + } + else + replacement = (pl != null) ? pl : pr; + TreeNode pp = p.parent; + if (replacement == null) { + if (pp == null) { + root = null; + return; + } + replacement = p; + } + else { + replacement.parent = pp; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + if (!p.red) { // rebalance, from CLR + TreeNode x = replacement; + while (x != null) { + TreeNode xp, xpl; + if (x.red || (xp = x.parent) == null) { + x.red = false; + break; + } + if (x == (xpl = xp.left)) { + TreeNode sib = xp.right; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateLeft(xp); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + sib.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + sib.red = true; + rotateRight(sib); + sib = (xp = x.parent) == null ? null : xp.right; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sr = sib.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + rotateLeft(xp); + } + x = root; + } + } + } + else { // symmetric + TreeNode sib = xpl; + if (sib != null && sib.red) { + sib.red = false; + xp.red = true; + rotateRight(xp); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib == null) + x = xp; + else { + TreeNode sl = sib.left, sr = sib.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + sib.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + sib.red = true; + rotateLeft(sib); + sib = (xp = x.parent) == null ? null : xp.left; + } + if (sib != null) { + sib.red = (xp == null) ? false : xp.red; + if ((sl = sib.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + rotateRight(xp); + } + x = root; + } + } + } + } + } + if (p == replacement && (pp = p.parent) != null) { + if (p == pp.left) // detach pointers + pp.left = null; + else if (p == pp.right) + pp.right = null; + p.parent = null; + } + } + } + + /* ---------------- Collision reduction methods -------------- */ + + /** + * Spreads higher bits to lower, and also forces top 2 bits to 0. + * Because the table uses power-of-two masking, sets of hashes + * that vary only in bits above the current mask will always + * collide. (Among known examples are sets of Float keys holding + * consecutive whole numbers in small tables.) To counter this, + * we apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed across bits (so don't benefit + * from spreading), and because we use trees to handle large sets + * of collisions in bins, we don't need excessively high quality. + */ + private static final int spread(int h) { + h ^= (h >>> 18) ^ (h >>> 12); + return (h ^ (h >>> 10)) & HASH_BITS; + } + + /** + * Replaces a list bin with a tree bin. Call only when locked. + * Fails to replace if the given key is non-comparable or table + * is, or needs, resizing. + */ + private final void replaceWithTreeBin(AtomicReferenceArray tab, int index, Object key) { + if ((key instanceof Comparable) && + (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { + TreeBin t = new TreeBin(); + for (Node e = tabAt(tab, index); e != null; e = e.next) + t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); + setTabAt(tab, index, new Node(MOVED, t, null, null)); + } + } + + /* ---------------- Internal access and update methods -------------- */ + + /** Implementation for get and containsKey */ + private final Object internalGet(Object k) { + int h = spread(k.hashCode()); + retry: for (AtomicReferenceArray tab = table; tab != null;) { + Node e, p; Object ek, ev; int eh; // locals to read fields once + for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { + if ((eh = e.hash) == MOVED) { + if ((ek = e.key) instanceof TreeBin) // search TreeBin + return ((TreeBin)ek).getValue(h, k); + else { // restart with new table + tab = (AtomicReferenceArray)ek; + continue retry; + } + } + else if ((eh & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + } + break; + } + return null; + } + + /** + * Implementation for the four public remove/replace methods: + * Replaces node value with v, conditional upon match of cv if + * non-null. If resulting value is null, delete. + */ + private final Object internalReplace(Object k, Object v, Object cv) { + int h = spread(k.hashCode()); + Object oldVal = null; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null || + (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) + break; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + boolean deleted = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) { + Object pv = p.val; + if (cv == null || cv == pv || cv.equals(pv)) { + oldVal = pv; + if ((p.val = v) == null) { + deleted = true; + t.deleteTreeNode(p); + } + } + } + } + } finally { + t.release(0); + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) != h && f.next == null) // precheck + break; // rules out possible existence + else if ((fh & LOCKED) != 0) { + checkForResize(); // try resizing if can't get lock + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + boolean validated = false; + boolean deleted = false; + try { + if (tabAt(tab, i) == f) { + validated = true; + for (Node e = f, pred = null;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + ((ev = e.val) != null) && + ((ek = e.key) == k || k.equals(ek))) { + if (cv == null || cv == ev || cv.equals(ev)) { + oldVal = ev; + if ((e.val = v) == null) { + deleted = true; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + } + break; + } + pred = e; + if ((e = e.next) == null) + break; + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (validated) { + if (deleted) + counter.add(-1L); + break; + } + } + } + return oldVal; + } + + /* + * Internal versions of the six insertion methods, each a + * little more complicated than the last. All have + * the same basic structure as the first (internalPut): + * 1. If table uninitialized, create + * 2. If bin empty, try to CAS new node + * 3. If bin stale, use new table + * 4. if bin converted to TreeBin, validate and relay to TreeBin methods + * 5. Lock and validate; if valid, scan and add or update + * + * The others interweave other checks and/or alternative actions: + * * Plain put checks for and performs resize after insertion. + * * putIfAbsent prescans for mapping without lock (and fails to add + * if present), which also makes pre-emptive resize checks worthwhile. + * * computeIfAbsent extends form used in putIfAbsent with additional + * mechanics to deal with, calls, potential exceptions and null + * returns from function call. + * * compute uses the same function-call mechanics, but without + * the prescans + * * merge acts as putIfAbsent in the absent case, but invokes the + * update function if present + * * putAll attempts to pre-allocate enough table space + * and more lazily performs count updates and checks. + * + * Someday when details settle down a bit more, it might be worth + * some factoring to reduce sprawl. + */ + + /** Implementation for put */ + private final Object internalPut(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; // no lock when adding to empty bin + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) { + oldVal = p.val; + p.val = v; + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { // needed in case equals() throws + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { // unlock and signal if needed + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for putIfAbsent */ + private final Object internalPutIfAbsent(Object k, Object v) { + int h = spread(k.hashCode()); + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + Object oldVal = null; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 2; + TreeNode p = t.putTreeNode(h, k, v); + if (p != null) + oldVal = p.val; + } + } finally { + t.release(0); + } + if (count != 0) { + if (oldVal != null) + return oldVal; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { // at least 2 nodes -- search and maybe resize + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + Object oldVal = null; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + oldVal = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (oldVal != null) + return oldVal; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + counter.add(1L); + if (count > 1) + checkForResize(); + return null; + } + + /** Implementation for computeIfAbsent */ + private final Object internalComputeIfAbsent(K k, + Fun mf) { + int h = spread(k.hashCode()); + Object val = null; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + count = 1; + try { + if ((val = mf.apply(k)) != null) + node.val = val; + } finally { + if (val == null) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean added = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + val = p.val; + else if ((val = mf.apply(k)) != null) { + added = true; + count = 2; + t.putTreeNode(h, k, val); + } + } + } finally { + t.release(0); + } + if (count != 0) { + if (!added) + return val; + break; + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & HASH_BITS) == h && (fv = f.val) != null && + ((fk = f.key) == k || k.equals(fk))) + return fv; + else { + Node g = f.next; + if (g != null) { + for (Node e = g;;) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) + return ev; + if ((e = e.next) == null) { + checkForResize(); + break; + } + } + } + if (((fh = f.hash) & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { + boolean added = false; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = ev; + break; + } + Node last = e; + if ((e = e.next) == null) { + if ((val = mf.apply(k)) != null) { + added = true; + last.next = new Node(h, k, val, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (!added) + return val; + if (tab.length() <= 64) + count = 2; + break; + } + } + } + } + if (val != null) { + counter.add(1L); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for compute */ + @SuppressWarnings("unchecked") private final Object internalCompute + (K k, boolean onlyIfPresent, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + Node f; int i, fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (onlyIfPresent) + break; + Node node = new Node(fh = h | LOCKED, k, null, null); + if (casTabAt(tab, i, null, node)) { + try { + count = 1; + if ((val = mf.apply(k, null)) != null) { + node.val = val; + delta = 1; + } + } finally { + if (delta == 0) + setTabAt(tab, i, null); + if (!node.casHash(fh, h)) { + node.hash = h; + synchronized (node) { node.notifyAll(); }; + } + } + } + if (count != 0) + break; + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + Object pv; + if (p == null) { + if (onlyIfPresent) + break; + pv = null; + } else + pv = p.val; + if ((val = mf.apply(k, (V)pv)) != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply(k, (V)ev); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + } + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for merge */ + @SuppressWarnings("unchecked") private final Object internalMerge + (K k, V v, BiFun mf) { + int h = spread(k.hashCode()); + Object val = null; + int delta = 0; + int count = 0; + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk, fv; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + delta = 1; + val = v; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + count = 1; + TreeNode p = t.getTreeNode(h, k, t.root); + val = (p == null) ? v : mf.apply((V)p.val, v); + if (val != null) { + if (p != null) + p.val = val; + else { + count = 2; + delta = 1; + t.putTreeNode(h, k, val); + } + } + else if (p != null) { + delta = -1; + t.deleteTreeNode(p); + } + } + } finally { + t.release(0); + } + if (count != 0) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f, pred = null;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + val = mf.apply((V)ev, v); + if (val != null) + e.val = val; + else { + delta = -1; + Node en = e.next; + if (pred != null) + pred.next = en; + else + setTabAt(tab, i, en); + } + break; + } + pred = e; + if ((e = e.next) == null) { + val = v; + pred.next = new Node(h, k, val, null); + delta = 1; + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (tab.length() <= 64) + count = 2; + break; + } + } + } + if (delta != 0) { + counter.add((long)delta); + if (count > 1) + checkForResize(); + } + return val; + } + + /** Implementation for putAll */ + private final void internalPutAll(Map m) { + tryPresize(m.size()); + long delta = 0L; // number of uncommitted additions + boolean npe = false; // to throw exception on exit for nulls + try { // to clean up counts on other exceptions + for (Map.Entry entry : m.entrySet()) { + Object k, v; + if (entry == null || (k = entry.getKey()) == null || + (v = entry.getValue()) == null) { + npe = true; + break; + } + int h = spread(k.hashCode()); + for (AtomicReferenceArray tab = table;;) { + int i; Node f; int fh; Object fk; + if (tab == null) + tab = initTable(); + else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ + if (casTabAt(tab, i, null, new Node(h, k, v, null))) { + ++delta; + break; + } + } + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + TreeNode p = t.getTreeNode(h, k, t.root); + if (p != null) + p.val = v; + else { + t.putTreeNode(h, k, v); + ++delta; + } + } + } finally { + t.release(0); + } + if (validated) + break; + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); + delta = 0L; + checkForResize(); + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + int count = 0; + try { + if (tabAt(tab, i) == f) { + count = 1; + for (Node e = f;; ++count) { + Object ek, ev; + if ((e.hash & HASH_BITS) == h && + (ev = e.val) != null && + ((ek = e.key) == k || k.equals(ek))) { + e.val = v; + break; + } + Node last = e; + if ((e = e.next) == null) { + ++delta; + last.next = new Node(h, k, v, null); + if (count >= TREE_THRESHOLD) + replaceWithTreeBin(tab, i, k); + break; + } + } + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (count != 0) { + if (count > 1) { + counter.add(delta); + delta = 0L; + checkForResize(); + } + break; + } + } + } + } + } finally { + if (delta != 0) + counter.add(delta); + } + if (npe) + throw new NullPointerException(); + } + + /* ---------------- Table Initialization and Resizing -------------- */ + + /** + * Returns a power of two table size for the given desired capacity. + * See Hackers Delight, sec 3.2 + */ + private static final int tableSizeFor(int c) { + int n = c - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /** + * Initializes table, using the size recorded in sizeCtl. + */ + private final AtomicReferenceArray initTable() { + AtomicReferenceArray tab; int sc; + while ((tab = table) == null) { + if ((sc = sizeCtl) < 0) + Thread.yield(); // lost initialization race; just spin + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if ((tab = table) == null) { + int n = (sc > 0) ? sc : DEFAULT_CAPACITY; + tab = table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + break; + } + } + return tab; + } + + /** + * If table is too small and not already resizing, creates next + * table and transfers bins. Rechecks occupancy after a transfer + * to see if another resize is already needed because resizings + * are lagging additions. + */ + private final void checkForResize() { + AtomicReferenceArray tab; int n, sc; + while ((tab = table) != null && + (n = tab.length()) < MAXIMUM_CAPACITY && + (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (tab == table) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + + /** + * Tries to presize table to accommodate the given number of elements. + * + * @param size number of elements (doesn't need to be perfectly accurate) + */ + private final void tryPresize(int size) { + int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : + tableSizeFor(size + (size >>> 1) + 1); + int sc; + while ((sc = sizeCtl) >= 0) { + AtomicReferenceArray tab = table; int n; + if (tab == null || (n = tab.length()) == 0) { + n = (sc > c) ? sc : c; + if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = new AtomicReferenceArray(n); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + } + } + else if (c <= sc || n >= MAXIMUM_CAPACITY) + break; + else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == tab) { + table = rebuild(tab); + sc = (n << 1) - (n >>> 1); + } + } finally { + sizeCtl = sc; + } + } + } + } + + /* + * Moves and/or copies the nodes in each bin to new table. See + * above for explanation. + * + * @return the new table + */ + private static final AtomicReferenceArray rebuild(AtomicReferenceArray tab) { + int n = tab.length(); + AtomicReferenceArray nextTab = new AtomicReferenceArray(n << 1); + Node fwd = new Node(MOVED, nextTab, null, null); + int[] buffer = null; // holds bins to revisit; null until needed + Node rev = null; // reverse forwarder; null until needed + int nbuffered = 0; // the number of bins in buffer list + int bufferIndex = 0; // buffer index of current buffered bin + int bin = n - 1; // current non-buffered bin or -1 if none + + for (int i = bin;;) { // start upwards sweep + int fh; Node f; + if ((f = tabAt(tab, i)) == null) { + if (bin >= 0) { // Unbuffered; no lock needed (or available) + if (!casTabAt(tab, i, f, fwd)) + continue; + } + else { // transiently use a locked forwarding node + Node g = new Node(MOVED|LOCKED, nextTab, null, null); + if (!casTabAt(tab, i, f, g)) + continue; + setTabAt(nextTab, i, null); + setTabAt(nextTab, i + n, null); + setTabAt(tab, i, fwd); + if (!g.casHash(MOVED|LOCKED, MOVED)) { + g.hash = MOVED; + synchronized (g) { g.notifyAll(); } + } + } + } + else if ((fh = f.hash) == MOVED) { + Object fk = f.key; + if (fk instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + boolean validated = false; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + validated = true; + splitTreeBin(nextTab, i, t); + setTabAt(tab, i, fwd); + } + } finally { + t.release(0); + } + if (!validated) + continue; + } + } + else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { + boolean validated = false; + try { // split to lo and hi lists; copying as needed + if (tabAt(tab, i) == f) { + validated = true; + splitBin(nextTab, i, f); + setTabAt(tab, i, fwd); + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + if (!validated) + continue; + } + else { + if (buffer == null) // initialize buffer for revisits + buffer = new int[TRANSFER_BUFFER_SIZE]; + if (bin < 0 && bufferIndex > 0) { + int j = buffer[--bufferIndex]; + buffer[bufferIndex] = i; + i = j; // swap with another bin + continue; + } + if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { + f.tryAwaitLock(tab, i); + continue; // no other options -- block + } + if (rev == null) // initialize reverse-forwarder + rev = new Node(MOVED, tab, null, null); + if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) + continue; // recheck before adding to list + buffer[nbuffered++] = i; + setTabAt(nextTab, i, rev); // install place-holders + setTabAt(nextTab, i + n, rev); + } + + if (bin > 0) + i = --bin; + else if (buffer != null && nbuffered > 0) { + bin = -1; + i = buffer[bufferIndex = --nbuffered]; + } + else + return nextTab; + } + } + + /** + * Splits a normal bin with list headed by e into lo and hi parts; + * installs in given table. + */ + private static void splitBin(AtomicReferenceArray nextTab, int i, Node e) { + int bit = nextTab.length() >>> 1; // bit to split on + int runBit = e.hash & bit; + Node lastRun = e, lo = null, hi = null; + for (Node p = e.next; p != null; p = p.next) { + int b = p.hash & bit; + if (b != runBit) { + runBit = b; + lastRun = p; + } + } + if (runBit == 0) + lo = lastRun; + else + hi = lastRun; + for (Node p = e; p != lastRun; p = p.next) { + int ph = p.hash & HASH_BITS; + Object pk = p.key, pv = p.val; + if ((ph & bit) == 0) + lo = new Node(ph, pk, pv, lo); + else + hi = new Node(ph, pk, pv, hi); + } + setTabAt(nextTab, i, lo); + setTabAt(nextTab, i + bit, hi); + } + + /** + * Splits a tree bin into lo and hi parts; installs in given table. + */ + private static void splitTreeBin(AtomicReferenceArray nextTab, int i, TreeBin t) { + int bit = nextTab.length() >>> 1; + TreeBin lt = new TreeBin(); + TreeBin ht = new TreeBin(); + int lc = 0, hc = 0; + for (Node e = t.first; e != null; e = e.next) { + int h = e.hash & HASH_BITS; + Object k = e.key, v = e.val; + if ((h & bit) == 0) { + ++lc; + lt.putTreeNode(h, k, v); + } + else { + ++hc; + ht.putTreeNode(h, k, v); + } + } + Node ln, hn; // throw away trees if too small + if (lc <= (TREE_THRESHOLD >>> 1)) { + ln = null; + for (Node p = lt.first; p != null; p = p.next) + ln = new Node(p.hash, p.key, p.val, ln); + } + else + ln = new Node(MOVED, lt, null, null); + setTabAt(nextTab, i, ln); + if (hc <= (TREE_THRESHOLD >>> 1)) { + hn = null; + for (Node p = ht.first; p != null; p = p.next) + hn = new Node(p.hash, p.key, p.val, hn); + } + else + hn = new Node(MOVED, ht, null, null); + setTabAt(nextTab, i + bit, hn); + } + + /** + * Implementation for clear. Steps through each bin, removing all + * nodes. + */ + private final void internalClear() { + long delta = 0L; // negative number of deletions + int i = 0; + AtomicReferenceArray tab = table; + while (tab != null && i < tab.length()) { + int fh; Object fk; + Node f = tabAt(tab, i); + if (f == null) + ++i; + else if ((fh = f.hash) == MOVED) { + if ((fk = f.key) instanceof TreeBin) { + TreeBin t = (TreeBin)fk; + t.acquire(0); + try { + if (tabAt(tab, i) == f) { + for (Node p = t.first; p != null; p = p.next) { + if (p.val != null) { // (currently always true) + p.val = null; + --delta; + } + } + t.first = null; + t.root = null; + ++i; + } + } finally { + t.release(0); + } + } + else + tab = (AtomicReferenceArray)fk; + } + else if ((fh & LOCKED) != 0) { + counter.add(delta); // opportunistically update count + delta = 0L; + f.tryAwaitLock(tab, i); + } + else if (f.casHash(fh, fh | LOCKED)) { + try { + if (tabAt(tab, i) == f) { + for (Node e = f; e != null; e = e.next) { + if (e.val != null) { // (currently always true) + e.val = null; + --delta; + } + } + setTabAt(tab, i, null); + ++i; + } + } finally { + if (!f.casHash(fh | LOCKED, fh)) { + f.hash = fh; + synchronized (f) { f.notifyAll(); }; + } + } + } + } + if (delta != 0) + counter.add(delta); + } + + /* ----------------Table Traversal -------------- */ + + /** + * Encapsulates traversal for methods such as containsValue; also + * serves as a base class for other iterators and bulk tasks. + * + * At each step, the iterator snapshots the key ("nextKey") and + * value ("nextVal") of a valid node (i.e., one that, at point of + * snapshot, has a non-null user value). Because val fields can + * change (including to null, indicating deletion), field nextVal + * might not be accurate at point of use, but still maintains the + * weak consistency property of holding a value that was once + * valid. To support iterator.remove, the nextKey field is not + * updated (nulled out) when the iterator cannot advance. + * + * Internal traversals directly access these fields, as in: + * {@code while (it.advance() != null) { process(it.nextKey); }} + * + * Exported iterators must track whether the iterator has advanced + * (in hasNext vs next) (by setting/checking/nulling field + * nextVal), and then extract key, value, or key-value pairs as + * return values of next(). + * + * The iterator visits once each still-valid node that was + * reachable upon iterator construction. It might miss some that + * were added to a bin after the bin was visited, which is OK wrt + * consistency guarantees. Maintaining this property in the face + * of possible ongoing resizes requires a fair amount of + * bookkeeping state that is difficult to optimize away amidst + * volatile accesses. Even so, traversal maintains reasonable + * throughput. + * + * Normally, iteration proceeds bin-by-bin traversing lists. + * However, if the table has been resized, then all future steps + * must traverse both the bin at the current index as well as at + * (index + baseSize); and so on for further resizings. To + * paranoically cope with potential sharing by users of iterators + * across threads, iteration terminates if a bounds checks fails + * for a table read. + * + * This class extends ForkJoinTask to streamline parallel + * iteration in bulk operations (see BulkTask). This adds only an + * int of space overhead, which is close enough to negligible in + * cases where it is not needed to not worry about it. Because + * ForkJoinTask is Serializable, but iterators need not be, we + * need to add warning suppressions. + */ + @SuppressWarnings("serial") static class Traverser { + final ConcurrentHashMapV8 map; + Node next; // the next entry to use + K nextKey; // cached key field of next + V nextVal; // cached val field of next + AtomicReferenceArray tab; // current table; updated if resized + int index; // index of bin to use next + int baseIndex; // current index of initial table + int baseLimit; // index bound for initial table + int baseSize; // initial table size + + /** Creates iterator for all entries in the table. */ + Traverser(ConcurrentHashMapV8 map) { + this.map = map; + } + + /** Creates iterator for split() methods */ + Traverser(Traverser it) { + ConcurrentHashMapV8 m; AtomicReferenceArray t; + if ((m = this.map = it.map) == null) + t = null; + else if ((t = it.tab) == null && // force parent tab initialization + (t = it.tab = m.table) != null) + it.baseLimit = it.baseSize = t.length(); + this.tab = t; + this.baseSize = it.baseSize; + it.baseLimit = this.index = this.baseIndex = + ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; + } + + /** + * Advances next; returns nextVal or null if terminated. + * See above for explanation. + */ + final V advance() { + Node e = next; + V ev = null; + outer: do { + if (e != null) // advance past used/skipped node + e = e.next; + while (e == null) { // get to next non-null bin + ConcurrentHashMapV8 m; + AtomicReferenceArray t; int b, i, n; Object ek; // checks must use locals + if ((t = tab) != null) + n = t.length(); + else if ((m = map) != null && (t = tab = m.table) != null) + n = baseLimit = baseSize = t.length(); + else + break outer; + if ((b = baseIndex) >= baseLimit || + (i = index) < 0 || i >= n) + break outer; + if ((e = tabAt(t, i)) != null && e.hash == MOVED) { + if ((ek = e.key) instanceof TreeBin) + e = ((TreeBin)ek).first; + else { + tab = (AtomicReferenceArray)ek; + continue; // restarts due to null val + } + } // visit upper slots if present + index = (i += baseSize) < n ? i : (baseIndex = b + 1); + } + nextKey = (K) e.key; + } while ((ev = (V) e.val) == null); // skip deleted or special nodes + next = e; + return nextVal = ev; + } + + public final void remove() { + Object k = nextKey; + if (k == null && (advance() == null || (k = nextKey) == null)) + throw new IllegalStateException(); + map.internalReplace(k, null, null); + } + + public final boolean hasNext() { + return nextVal != null || advance() != null; + } + + public final boolean hasMoreElements() { return hasNext(); } + public final void setRawResult(Object x) { } + public R getRawResult() { return null; } + public boolean exec() { return true; } + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the default initial table size (16). + */ + public ConcurrentHashMapV8() { + this.counter = new LongAdder(); + } + + /** + * Creates a new, empty map with an initial table size + * accommodating the specified number of elements without the need + * to dynamically resize. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + */ + public ConcurrentHashMapV8(int initialCapacity) { + if (initialCapacity < 0) + throw new IllegalArgumentException(); + int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? + MAXIMUM_CAPACITY : + tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new map with the same mappings as the given map. + * + * @param m the map + */ + public ConcurrentHashMapV8(Map m) { + this.counter = new LongAdder(); + this.sizeCtl = DEFAULT_CAPACITY; + internalPutAll(m); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}) and + * initial table density ({@code loadFactor}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, 1); + } + + /** + * Creates a new, empty map with an initial table size based on + * the given number of elements ({@code initialCapacity}), table + * density ({@code loadFactor}), and number of concurrently + * updating threads ({@code concurrencyLevel}). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements, + * given the specified load factor. + * @param loadFactor the load factor (table density) for + * establishing the initial table size + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation may use this value as + * a sizing hint. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive + */ + public ConcurrentHashMapV8(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (initialCapacity < concurrencyLevel) // Use at least as many bins + initialCapacity = concurrencyLevel; // as estimated threads + long size = (long)(1.0 + (long)initialCapacity / loadFactor); + int cap = (size >= (long)MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : tableSizeFor((int)size); + this.counter = new LongAdder(); + this.sizeCtl = cap; + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @return the new set + */ + public static KeySetView newKeySet() { + return new KeySetView(new ConcurrentHashMapV8(), + Boolean.TRUE); + } + + /** + * Creates a new {@link Set} backed by a ConcurrentHashMapV8 + * from the given type to {@code Boolean.TRUE}. + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative + * @return the new set + */ + public static KeySetView newKeySet(int initialCapacity) { + return new KeySetView(new ConcurrentHashMapV8(initialCapacity), + Boolean.TRUE); + } + + /** + * {@inheritDoc} + */ + public boolean isEmpty() { + return counter.sum() <= 0L; // ignore transient negative values + } + + /** + * {@inheritDoc} + */ + public int size() { + long n = counter.sum(); + return ((n < 0L) ? 0 : + (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : + (int)n); + } + + /** + * Returns the number of mappings. This method should be used + * instead of {@link #size} because a ConcurrentHashMapV8 may + * contain more mappings than can be represented as an int. The + * value returned is a snapshot; the actual count may differ if + * there are ongoing concurrent insertions or removals. + * + * @return the number of mappings + */ + public long mappingCount() { + long n = counter.sum(); + return (n < 0L) ? 0L : n; // ignore transient negative values + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V get(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalGet(key); + } + + /** + * Returns the value to which the specified key is mapped, + * or the given defaultValue if this map contains no mapping for the key. + * + * @param key the key + * @param defaultValue the value to return if this map contains + * no mapping for the given key + * @return the mapping for the key, if present; else the defaultValue + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { + if (key == null) + throw new NullPointerException(); + V v = (V) internalGet(key); + return v == null ? defaultValue : v; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return {@code true} if and only if the specified object + * is a key in this table, as determined by the + * {@code equals} method; {@code false} otherwise + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(Object key) { + if (key == null) + throw new NullPointerException(); + return internalGet(key) != null; + } + + /** + * Returns {@code true} if this map maps one or more keys to the + * specified value. Note: This method may require a full traversal + * of the map, and is much slower than method {@code containsKey}. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return true; + } + return false; + } + + public K findKey(Object value) { + if (value == null) + throw new NullPointerException(); + Object v; + Traverser it = new Traverser(this); + while ((v = it.advance()) != null) { + if (v == value || value.equals(v)) + return it.nextKey; + } + return null; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + * + * @param value a value to search for + * @return {@code true} if and only if some key maps to the + * {@code value} argument in this table as + * determined by the {@code equals} method; + * {@code false} otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the {@code get} method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V put(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPut(key, value); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalPutIfAbsent(key, value); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + internalPutAll(m); + } + + /** + * If the specified key is not already associated with a value, + * computes its value using the given mappingFunction and enters + * it into the map unless null. This is equivalent to + *

 {@code
+     * if (map.containsKey(key))
+     *   return map.get(key);
+     * value = mappingFunction.apply(key);
+     * if (value != null)
+     *   map.put(key, value);
+     * return value;}
+ * + * except that the action is performed atomically. If the + * function returns {@code null} no mapping is recorded. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and no mapping is recorded. Some + * attempted update operations on this map by other threads may be + * blocked while computation is in progress, so the computation + * should be short and simple, and must not attempt to update any + * other mappings of this Map. The most appropriate usage is to + * construct a new object serving as an initial mapped value, or + * memoized result, as in: + * + *
 {@code
+     * map.computeIfAbsent(key, new Fun() {
+     *   public V map(K k) { return new Value(f(k)); }});}
+ * + * @param key key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @return the current (existing or computed) value associated with + * the specified key, or null if the computed value is null + * @throws NullPointerException if the specified key or mappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the mappingFunction does so, + * in which case the mapping is left unestablished + */ + @SuppressWarnings("unchecked") public V computeIfAbsent + (K key, Fun mappingFunction) { + if (key == null || mappingFunction == null) + throw new NullPointerException(); + return (V)internalComputeIfAbsent(key, mappingFunction); + } + + /** + * If the given key is present, computes a new mapping value given a key and + * its current mapped value. This is equivalent to + *
 {@code
+     *   if (map.containsKey(key)) {
+     *     value = remappingFunction.apply(key, map.get(key));
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V computeIfPresent + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, true, remappingFunction); + } + + /** + * Computes a new mapping value given a key and + * its current mapped value (or {@code null} if there is no current + * mapping). This is equivalent to + *
 {@code
+     *   value = remappingFunction.apply(key, map.get(key));
+     *   if (value != null)
+     *     map.put(key, value);
+     *   else
+     *     map.remove(key);
+     * }
+ * + * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. For example, + * to either create or append new messages to a value mapping: + * + *
 {@code
+     * Map map = ...;
+     * final String msg = ...;
+     * map.compute(key, new BiFun() {
+     *   public String apply(Key k, String v) {
+     *    return (v == null) ? msg : v + msg;});}}
+ * + * @param key key with which the specified value is to be associated + * @param remappingFunction the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified key or remappingFunction + * is null + * @throws IllegalStateException if the computation detectably + * attempts a recursive update to this map that would + * otherwise never complete + * @throws RuntimeException or Error if the remappingFunction does so, + * in which case the mapping is unchanged + */ + @SuppressWarnings("unchecked") public V compute + (K key, BiFun remappingFunction) { + if (key == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalCompute(key, false, remappingFunction); + } + + /** + * If the specified key is not already associated + * with a value, associate it with the given value. + * Otherwise, replace the value with the results of + * the given remapping function. This is equivalent to: + *
 {@code
+     *   if (!map.containsKey(key))
+     *     map.put(value);
+     *   else {
+     *     newValue = remappingFunction.apply(map.get(key), value);
+     *     if (value != null)
+     *       map.put(key, value);
+     *     else
+     *       map.remove(key);
+     *   }
+     * }
+ * except that the action is performed atomically. If the + * function returns {@code null}, the mapping is removed. If the + * function itself throws an (unchecked) exception, the exception + * is rethrown to its caller, and the current mapping is left + * unchanged. Some attempted update operations on this map by + * other threads may be blocked while computation is in progress, + * so the computation should be short and simple, and must not + * attempt to update any other mappings of this Map. + */ + @SuppressWarnings("unchecked") public V merge + (K key, V value, BiFun remappingFunction) { + if (key == null || value == null || remappingFunction == null) + throw new NullPointerException(); + return (V)internalMerge(key, value, remappingFunction); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") public V remove(Object key) { + if (key == null) + throw new NullPointerException(); + return (V)internalReplace(key, null, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + if (key == null) + throw new NullPointerException(); + if (value == null) + return false; + return internalReplace(key, null, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + if (key == null || oldValue == null || newValue == null) + throw new NullPointerException(); + return internalReplace(key, newValue, oldValue) != null; + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or {@code null} if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") public V replace(K key, V value) { + if (key == null || value == null) + throw new NullPointerException(); + return (V)internalReplace(key, value, null); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + internalClear(); + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. + * + * @return the set view + */ + public KeySetView keySet() { + KeySetView ks = keySet; + return (ks != null) ? ks : (keySet = new KeySetView(this, null)); + } + + /** + * Returns a {@link Set} view of the keys in this map, using the + * given common mapped value for any additions (i.e., {@link + * Collection#add} and {@link Collection#addAll}). This is of + * course only appropriate if it is acceptable to use the same + * value for all additions from this view. + * + * @param mappedValue the mapped value to use for any + * additions. + * @return the set view + * @throws NullPointerException if the mappedValue is null + */ + public KeySetView keySet(V mappedValue) { + if (mappedValue == null) + throw new NullPointerException(); + return new KeySetView(this, mappedValue); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. + */ + public ValuesView values() { + ValuesView vs = values; + return (vs != null) ? vs : (values = new ValuesView(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the {@code Iterator.remove}, {@code Set.remove}, + * {@code removeAll}, {@code retainAll}, and {@code clear} + * operations. It does not support the {@code add} or + * {@code addAll} operations. + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + EntrySetView es = entrySet; + return (es != null) ? es : (entrySet = new EntrySetView(this)); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(this); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the keys in this map. + * + * @return a partitionable iterator of the keys in this map + */ + public Spliterator keySpliterator() { + return new KeyIterator(this); + } + + /** + * Returns a partitionable iterator of the values in this map. + * + * @return a partitionable iterator of the values in this map + */ + public Spliterator valueSpliterator() { + return new ValueIterator(this); + } + + /** + * Returns a partitionable iterator of the entries in this map. + * + * @return a partitionable iterator of the entries in this map + */ + public Spliterator> entrySpliterator() { + return new EntryIterator(this); + } + + /** + * Returns the hash code value for this {@link Map}, i.e., + * the sum of, for each key-value pair in the map, + * {@code key.hashCode() ^ value.hashCode()}. + * + * @return the hash code value for this map + */ + public int hashCode() { + int h = 0; + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + h += it.nextKey.hashCode() ^ v.hashCode(); + } + return h; + } + + /** + * Returns a string representation of this map. The string + * representation consists of a list of key-value mappings (in no + * particular order) enclosed in braces ("{@code {}}"). Adjacent + * mappings are separated by the characters {@code ", "} (comma + * and space). Each key-value mapping is rendered as the key + * followed by an equals sign ("{@code =}") followed by the + * associated value. + * + * @return a string representation of this map + */ + public String toString() { + Traverser it = new Traverser(this); + StringBuilder sb = new StringBuilder(); + sb.append('{'); + Object v; + if ((v = it.advance()) != null) { + for (;;) { + Object k = it.nextKey; + sb.append(k == this ? "(this Map)" : k); + sb.append('='); + sb.append(v == this ? "(this Map)" : v); + if ((v = it.advance()) == null) + break; + sb.append(',').append(' '); + } + } + return sb.append('}').toString(); + } + + /** + * Compares the specified object with this map for equality. + * Returns {@code true} if the given object is a map with the same + * mappings as this map. This operation may return misleading + * results if either map is concurrently modified during execution + * of this method. + * + * @param o object to be compared for equality with this map + * @return {@code true} if the specified object is equal to this map + */ + public boolean equals(Object o) { + if (o != this) { + if (!(o instanceof Map)) + return false; + Map m = (Map) o; + Traverser it = new Traverser(this); + Object val; + while ((val = it.advance()) != null) { + Object v = m.get(it.nextKey); + if (v == null || (v != val && !v.equals(val))) + return false; + } + for (Map.Entry e : m.entrySet()) { + Object mk, mv, v; + if ((mk = e.getKey()) == null || + (mv = e.getValue()) == null || + (v = internalGet(mk)) == null || + (mv != v && !mv.equals(v))) + return false; + } + } + return true; + } + + /* ----------------Iterators -------------- */ + + @SuppressWarnings("serial") static final class KeyIterator extends Traverser + implements Spliterator, Enumeration { + KeyIterator(ConcurrentHashMapV8 map) { super(map); } + KeyIterator(Traverser it) { + super(it); + } + public KeyIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new KeyIterator(this); + } + @SuppressWarnings("unchecked") public final K next() { + if (nextVal == null && advance() == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return (K) k; + } + + public final K nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class ValueIterator extends Traverser + implements Spliterator, Enumeration { + ValueIterator(ConcurrentHashMapV8 map) { super(map); } + ValueIterator(Traverser it) { + super(it); + } + public ValueIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new ValueIterator(this); + } + + @SuppressWarnings("unchecked") public final V next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + nextVal = null; + return (V) v; + } + + public final V nextElement() { return next(); } + } + + @SuppressWarnings("serial") static final class EntryIterator extends Traverser + implements Spliterator> { + EntryIterator(ConcurrentHashMapV8 map) { super(map); } + EntryIterator(Traverser it) { + super(it); + } + public EntryIterator split() { + if (nextKey != null) + throw new IllegalStateException(); + return new EntryIterator(this); + } + + @SuppressWarnings("unchecked") public final Map.Entry next() { + Object v; + if ((v = nextVal) == null && (v = advance()) == null) + throw new NoSuchElementException(); + Object k = nextKey; + nextVal = null; + return new MapEntry((K)k, (V)v, map); + } + } + + /** + * Exported Entry for iterators + */ + static final class MapEntry implements Map.Entry { + final K key; // non-null + V val; // non-null + final ConcurrentHashMapV8 map; + MapEntry(K key, V val, ConcurrentHashMapV8 map) { + this.key = key; + this.val = val; + this.map = map; + } + public final K getKey() { return key; } + public final V getValue() { return val; } + public final int hashCode() { return key.hashCode() ^ val.hashCode(); } + public final String toString(){ return key + "=" + val; } + + public final boolean equals(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + (k == key || k.equals(key)) && + (v == val || v.equals(val))); + } + + /** + * Sets our entry's value and writes through to the map. The + * value to return is somewhat arbitrary here. Since we do not + * necessarily track asynchronous changes, the most recent + * "previous" value could be different from what we return (or + * could even have been removed in which case the put will + * re-establish). We do not and cannot guarantee more. + */ + public final V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = val; + val = value; + map.put(key, value); + return v; + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Stripped-down version of helper class used in previous version, + * declared for the sake of serialization compatibility + */ + static class Segment implements Serializable { + private static final long serialVersionUID = 2249069246763182397L; + final float loadFactor; + Segment(float lf) { this.loadFactor = lf; } + } + + /** + * Saves the state of the {@code ConcurrentHashMapV8} instance to a + * stream (i.e., serializes it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + if (segments == null) { // for serialization compatibility + segments = (Segment[]) + new Segment[DEFAULT_CONCURRENCY_LEVEL]; + for (int i = 0; i < segments.length; ++i) + segments[i] = new Segment(LOAD_FACTOR); + } + s.defaultWriteObject(); + Traverser it = new Traverser(this); + Object v; + while ((v = it.advance()) != null) { + s.writeObject(it.nextKey); + s.writeObject(v); + } + s.writeObject(null); + s.writeObject(null); + segments = null; // throw away + } + + /** + * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + */ + @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + this.segments = null; // unneeded + // initialize transient final field + this.counter = new LongAdder(); + + // Create all nodes, then place in table once size is known + long size = 0L; + Node p = null; + for (;;) { + K k = (K) s.readObject(); + V v = (V) s.readObject(); + if (k != null && v != null) { + int h = spread(k.hashCode()); + p = new Node(h, k, v, p); + ++size; + } + else + break; + } + if (p != null) { + boolean init = false; + int n; + if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) + n = MAXIMUM_CAPACITY; + else { + int sz = (int)size; + n = tableSizeFor(sz + (sz >>> 1) + 1); + } + int sc = sizeCtl; + boolean collide = false; + if (n > sc && + SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { + try { + if (table == null) { + init = true; + AtomicReferenceArray tab = new AtomicReferenceArray(n); + int mask = n - 1; + while (p != null) { + int j = p.hash & mask; + Node next = p.next; + Node q = p.next = tabAt(tab, j); + setTabAt(tab, j, p); + if (!collide && q != null && q.hash == p.hash) + collide = true; + p = next; + } + table = tab; + counter.add(size); + sc = n - (n >>> 2); + } + } finally { + sizeCtl = sc; + } + if (collide) { // rescan and convert to TreeBins + AtomicReferenceArray tab = table; + for (int i = 0; i < tab.length(); ++i) { + int c = 0; + for (Node e = tabAt(tab, i); e != null; e = e.next) { + if (++c > TREE_THRESHOLD && + (e.key instanceof Comparable)) { + replaceWithTreeBin(tab, i, e.key); + break; + } + } + } + } + } + if (!init) { // Can only happen if unsafely published. + while (p != null) { + internalPut(p.key, p.val); + p = p.next; + } + } + } + } + + + // ------------------------------------------------------- + + // Sams + /** Interface describing a void action of one argument */ + public interface Action { void apply(A a); } + /** Interface describing a void action of two arguments */ + public interface BiAction { void apply(A a, B b); } + /** Interface describing a function of one argument */ + public interface Generator { T apply(); } + /** Interface describing a function mapping its argument to a double */ + public interface ObjectToDouble { double apply(A a); } + /** Interface describing a function mapping its argument to a long */ + public interface ObjectToLong { long apply(A a); } + /** Interface describing a function mapping its argument to an int */ + public interface ObjectToInt {int apply(A a); } + /** Interface describing a function mapping two arguments to a double */ + public interface ObjectByObjectToDouble { double apply(A a, B b); } + /** Interface describing a function mapping two arguments to a long */ + public interface ObjectByObjectToLong { long apply(A a, B b); } + /** Interface describing a function mapping two arguments to an int */ + public interface ObjectByObjectToInt {int apply(A a, B b); } + /** Interface describing a function mapping a double to a double */ + public interface DoubleToDouble { double apply(double a); } + /** Interface describing a function mapping a long to a long */ + public interface LongToLong { long apply(long a); } + /** Interface describing a function mapping an int to an int */ + public interface IntToInt { int apply(int a); } + /** Interface describing a function mapping two doubles to a double */ + public interface DoubleByDoubleToDouble { double apply(double a, double b); } + /** Interface describing a function mapping two longs to a long */ + public interface LongByLongToLong { long apply(long a, long b); } + /** Interface describing a function mapping two ints to an int */ + public interface IntByIntToInt { int apply(int a, int b); } + + + /* ----------------Views -------------- */ + + /** + * Base class for views. + */ + static abstract class CHMView { + final ConcurrentHashMapV8 map; + CHMView(ConcurrentHashMapV8 map) { this.map = map; } + + /** + * Returns the map backing this view. + * + * @return the map backing this view + */ + public ConcurrentHashMapV8 getMap() { return map; } + + public final int size() { return map.size(); } + public final boolean isEmpty() { return map.isEmpty(); } + public final void clear() { map.clear(); } + + // implementations below rely on concrete classes supplying these + abstract public Iterator iterator(); + abstract public boolean contains(Object o); + abstract public boolean remove(Object o); + + private static final String oomeMsg = "Required array size too large"; + + public final Object[] toArray() { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int n = (int)sz; + Object[] r = new Object[n]; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = it.next(); + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { + long sz = map.mappingCount(); + if (sz > (long)(MAX_ARRAY_SIZE)) + throw new OutOfMemoryError(oomeMsg); + int m = (int)sz; + T[] r = (a.length >= m) ? a : + (T[])java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), m); + int n = r.length; + int i = 0; + Iterator it = iterator(); + while (it.hasNext()) { + if (i == n) { + if (n >= MAX_ARRAY_SIZE) + throw new OutOfMemoryError(oomeMsg); + if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) + n = MAX_ARRAY_SIZE; + else + n += (n >>> 1) + 1; + r = Arrays.copyOf(r, n); + } + r[i++] = (T)it.next(); + } + if (a == r && i < n) { + r[i] = null; // null-terminate + return r; + } + return (i == n) ? r : Arrays.copyOf(r, i); + } + + public final int hashCode() { + int h = 0; + for (Iterator it = iterator(); it.hasNext();) + h += it.next().hashCode(); + return h; + } + + public final String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('['); + Iterator it = iterator(); + if (it.hasNext()) { + for (;;) { + Object e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (!it.hasNext()) + break; + sb.append(',').append(' '); + } + } + return sb.append(']').toString(); + } + + public final boolean containsAll(Collection c) { + if (c != this) { + for (Iterator it = c.iterator(); it.hasNext();) { + Object e = it.next(); + if (e == null || !contains(e)) + return false; + } + } + return true; + } + + public final boolean removeAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + public final boolean retainAll(Collection c) { + boolean modified = false; + for (Iterator it = iterator(); it.hasNext();) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in + * which additions may optionally be enabled by mapping to a + * common value. This class cannot be directly instantiated. See + * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, + * {@link #newKeySet(int)}. + */ + public static class KeySetView extends CHMView implements Set, java.io.Serializable { + private static final long serialVersionUID = 7249069246763182397L; + private final V value; + KeySetView(ConcurrentHashMapV8 map, V value) { // non-public + super(map); + this.value = value; + } + + /** + * Returns the default mapped value for additions, + * or {@code null} if additions are not supported. + * + * @return the default mapped value for additions, or {@code null} + * if not supported. + */ + public V getMappedValue() { return value; } + + // implement Set API + + public boolean contains(Object o) { return map.containsKey(o); } + public boolean remove(Object o) { return map.remove(o) != null; } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the keys of this map + */ + public Iterator iterator() { return new KeyIterator(map); } + public boolean add(K e) { + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + if (e == null) + throw new NullPointerException(); + return map.internalPutIfAbsent(e, v) == null; + } + public boolean addAll(Collection c) { + boolean added = false; + V v; + if ((v = value) == null) + throw new UnsupportedOperationException(); + for (K e : c) { + if (e == null) + throw new NullPointerException(); + if (map.internalPutIfAbsent(e, v) == null) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Collection} of + * values, in which additions are disabled. This class cannot be + * directly instantiated. See {@link #values}, + * + *

The view's {@code iterator} is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public static final class ValuesView extends CHMView + implements Collection { + ValuesView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { return map.containsValue(o); } + public final boolean remove(Object o) { + if (o != null) { + Iterator it = new ValueIterator(map); + while (it.hasNext()) { + if (o.equals(it.next())) { + it.remove(); + return true; + } + } + } + return false; + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the values of this map + */ + public final Iterator iterator() { + return new ValueIterator(map); + } + public final boolean add(V e) { + throw new UnsupportedOperationException(); + } + public final boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + } + + /** + * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) + * entries. This class cannot be directly instantiated. See + * {@link #entrySet}. + */ + public static final class EntrySetView extends CHMView + implements Set> { + EntrySetView(ConcurrentHashMapV8 map) { super(map); } + public final boolean contains(Object o) { + Object k, v, r; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (r = map.get(k)) != null && + (v = e.getValue()) != null && + (v == r || v.equals(r))); + } + public final boolean remove(Object o) { + Object k, v; Map.Entry e; + return ((o instanceof Map.Entry) && + (k = (e = (Map.Entry)o).getKey()) != null && + (v = e.getValue()) != null && + map.remove(k, v)); + } + + /** + * Returns a "weakly consistent" iterator that will never + * throw {@link ConcurrentModificationException}, and + * guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not + * guaranteed to) reflect any modifications subsequent to + * construction. + * + * @return an iterator over the entries of this map + */ + public final Iterator> iterator() { + return new EntryIterator(map); + } + + public final boolean add(Entry e) { + K key = e.getKey(); + V value = e.getValue(); + if (key == null || value == null) + throw new NullPointerException(); + return map.internalPut(key, value) == null; + } + public final boolean addAll(Collection> c) { + boolean added = false; + for (Entry e : c) { + if (add(e)) + added = true; + } + return added; + } + public boolean equals(Object o) { + Set c; + return ((o instanceof Set) && + ((c = (Set)o) == this || + (containsAll(c) && c.containsAll(this)))); + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java new file mode 100644 index 0000000..ecf552a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java @@ -0,0 +1,204 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.9 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.concurrent.atomic.AtomicLong; +import java.io.IOException; +import java.io.Serializable; +import java.io.ObjectInputStream; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code hashCode} and {@code compareTo} because + * instances are expected to be mutated, and so are not useful as + * collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot: Invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java new file mode 100644 index 0000000..f521642 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java @@ -0,0 +1,291 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.5 version. + +package com.concurrent_ruby.ext.jsr166e.nounsafe; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock: When the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + + static AtomicLongFieldUpdater VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); + + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return VALUE_UPDATER.compareAndSet(this, cmp, val); + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + AtomicLongFieldUpdater BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); + AtomicIntegerFieldUpdater BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return BASE_UPDATER.compareAndSet(this, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return BUSY_UPDATER.compareAndSet(this, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java new file mode 100644 index 0000000..3ea409f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java @@ -0,0 +1,199 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +// This is based on 1.16 version + +package com.concurrent_ruby.ext.jsr166y; + +import java.util.Random; + +/** + * A random number generator isolated to the current thread. Like the + * global {@link java.util.Random} generator used by the {@link + * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized + * with an internally generated seed that may not otherwise be + * modified. When applicable, use of {@code ThreadLocalRandom} rather + * than shared {@code Random} objects in concurrent programs will + * typically encounter much less overhead and contention. Use of + * {@code ThreadLocalRandom} is particularly appropriate when multiple + * tasks (for example, each a {@link ForkJoinTask}) use random numbers + * in parallel in thread pools. + * + *

Usages of this class should typically be of the form: + * {@code ThreadLocalRandom.current().nextX(...)} (where + * {@code X} is {@code Int}, {@code Long}, etc). + * When all usages are of this form, it is never possible to + * accidently share a {@code ThreadLocalRandom} across multiple threads. + * + *

This class also provides additional commonly used bounded random + * generation methods. + * + * @since 1.7 + * @author Doug Lea + */ +public class ThreadLocalRandom extends Random { + // same constants as Random, but must be redeclared because private + private static final long multiplier = 0x5DEECE66DL; + private static final long addend = 0xBL; + private static final long mask = (1L << 48) - 1; + + /** + * The random seed. We can't use super.seed. + */ + private long rnd; + + /** + * Initialization flag to permit calls to setSeed to succeed only + * while executing the Random constructor. We can't allow others + * since it would cause setting seed in one part of a program to + * unintentionally impact other usages by the thread. + */ + boolean initialized; + + // Padding to help avoid memory contention among seed updates in + // different TLRs in the common case that they are located near + // each other. + private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; + + /** + * The actual ThreadLocal + */ + private static final ThreadLocal localRandom = + new ThreadLocal() { + protected ThreadLocalRandom initialValue() { + return new ThreadLocalRandom(); + } + }; + + + /** + * Constructor called only by localRandom.initialValue. + */ + ThreadLocalRandom() { + super(); + initialized = true; + } + + /** + * Returns the current thread's {@code ThreadLocalRandom}. + * + * @return the current thread's {@code ThreadLocalRandom} + */ + public static ThreadLocalRandom current() { + return localRandom.get(); + } + + /** + * Throws {@code UnsupportedOperationException}. Setting seeds in + * this generator is not supported. + * + * @throws UnsupportedOperationException always + */ + public void setSeed(long seed) { + if (initialized) + throw new UnsupportedOperationException(); + rnd = (seed ^ multiplier) & mask; + } + + protected int next(int bits) { + rnd = (rnd * multiplier + addend) & mask; + return (int) (rnd >>> (48-bits)); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @throws IllegalArgumentException if least greater than or equal + * to bound + * @return the next value + */ + public int nextInt(int least, int bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextInt(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public long nextLong(long n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + // Divide n by two until small enough for nextInt. On each + // iteration (at most 31 of them but usually much less), + // randomly choose both whether to include high bit in result + // (offset) and whether to continue with the lower vs upper + // half (which makes a difference only if odd). + long offset = 0; + while (n >= Integer.MAX_VALUE) { + int bits = next(2); + long half = n >>> 1; + long nextn = ((bits & 2) == 0) ? half : n - half; + if ((bits & 1) == 0) + offset += n - nextn; + n = nextn; + } + return offset + nextInt((int) n); + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public long nextLong(long least, long bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextLong(bound - least) + least; + } + + /** + * Returns a pseudorandom, uniformly distributed {@code double} value + * between 0 (inclusive) and the specified value (exclusive). + * + * @param n the bound on the random number to be returned. Must be + * positive. + * @return the next value + * @throws IllegalArgumentException if n is not positive + */ + public double nextDouble(double n) { + if (n <= 0) + throw new IllegalArgumentException("n must be positive"); + return nextDouble() * n; + } + + /** + * Returns a pseudorandom, uniformly distributed value between the + * given least value (inclusive) and bound (exclusive). + * + * @param least the least value returned + * @param bound the upper bound (exclusive) + * @return the next value + * @throws IllegalArgumentException if least greater than or equal + * to bound + */ + public double nextDouble(double least, double bound) { + if (least >= bound) + throw new IllegalArgumentException(); + return nextDouble() * (bound - least) + least; + } + + private static final long serialVersionUID = -5851777807851030925L; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb new file mode 100644 index 0000000..e9a3dea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb @@ -0,0 +1,5 @@ +# This file is here so that there is a file with the same name as the gem that +# can be required by Bundler.require. Applications should normally +# require 'concurrent'. + +require_relative "concurrent" diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb new file mode 100644 index 0000000..87de46f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb @@ -0,0 +1,134 @@ +require 'concurrent/version' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' + +require 'concurrent/atomics' +require 'concurrent/executors' +require 'concurrent/synchronization' + +require 'concurrent/atomic/atomic_markable_reference' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/agent' +require 'concurrent/atom' +require 'concurrent/array' +require 'concurrent/hash' +require 'concurrent/set' +require 'concurrent/map' +require 'concurrent/tuple' +require 'concurrent/async' +require 'concurrent/dataflow' +require 'concurrent/delay' +require 'concurrent/exchanger' +require 'concurrent/future' +require 'concurrent/immutable_struct' +require 'concurrent/ivar' +require 'concurrent/maybe' +require 'concurrent/mutable_struct' +require 'concurrent/mvar' +require 'concurrent/promise' +require 'concurrent/scheduled_task' +require 'concurrent/settable_struct' +require 'concurrent/timer_task' +require 'concurrent/tvar' +require 'concurrent/promises' + +require 'concurrent/thread_safe/synchronized_delegator' +require 'concurrent/thread_safe/util' + +require 'concurrent/options' + +# @!macro internal_implementation_note +# +# @note **Private Implementation:** This abstraction is a private, internal +# implementation detail. It should never be used directly. + +# @!macro monotonic_clock_warning +# +# @note Time calculations on all platforms and languages are sensitive to +# changes to the system clock. To alleviate the potential problems +# associated with changing the system clock while an application is running, +# most modern operating systems provide a monotonic clock that operates +# independently of the system clock. A monotonic clock cannot be used to +# determine human-friendly clock times. A monotonic clock is used exclusively +# for calculating time intervals. Not all Ruby platforms provide access to an +# operating system monotonic clock. On these platforms a pure-Ruby monotonic +# clock will be used as a fallback. An operating system monotonic clock is both +# faster and more reliable than the pure-Ruby implementation. The pure-Ruby +# implementation should be fast and reliable enough for most non-realtime +# operations. At this time the common Ruby platforms that provide access to an +# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions). +# +# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3) + +# @!macro copy_options +# +# ## Copy Options +# +# Object references in Ruby are mutable. This can lead to serious +# problems when the {#value} of an object is a mutable reference. Which +# is always the case unless the value is a `Fixnum`, `Symbol`, or similar +# "primitive" data type. Each instance can be configured with a few +# options that can help protect the program from potentially dangerous +# operations. Each of these options can be optionally set when the object +# instance is created: +# +# * `:dup_on_deref` When true the object will call the `#dup` method on +# the `value` object every time the `#value` method is called +# (default: false) +# * `:freeze_on_deref` When true the object will call the `#freeze` +# method on the `value` object every time the `#value` method is called +# (default: false) +# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run +# every time the `#value` method is called. The `Proc` will be given +# the current `value` as its only argument and the result returned by +# the block will be the return value of the `#value` call. When `nil` +# this option will be ignored (default: nil) +# +# When multiple deref options are set the order of operations is strictly defined. +# The order of deref operations is: +# * `:copy_on_deref` +# * `:dup_on_deref` +# * `:freeze_on_deref` +# +# Because of this ordering there is no need to `#freeze` an object created by a +# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`. +# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is +# as close to the behavior of a "pure" functional language (like Erlang, Clojure, +# or Haskell) as we are likely to get in Ruby. + +# @!macro deref_options +# +# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before +# returning the data from {#value} +# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before +# returning the data from {#value} +# @option opts [Proc] :copy_on_deref (nil) When calling the {#value} +# method, call the given proc passing the internal value as the sole +# argument then return the new value returned from the proc. + +# @!macro executor_and_deref_options +# +# @param [Hash] opts the options used to define the behavior at update and deref +# and to specify the executor on which to perform actions +# @option opts [Executor] :executor when set use the given `Executor` instance. +# Three special values are also supported: `:io` returns the global pool for +# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast +# operations, and `:immediate` returns the global `ImmediateExecutor` object. +# @!macro deref_options + +# @!macro warn.edge +# @api Edge +# @note **Edge Features** are under active development and may change frequently. +# +# - Deprecations are not added before incompatible changes. +# - Edge version: _major_ is always 0, _minor_ bump means incompatible change, +# _patch_ bump means compatible change. +# - Edge features may also lack tests and documentation. +# - Features developed in `concurrent-ruby-edge` are expected to move +# to `concurrent-ruby` when finalised. + + +# {include:file:README.md} +module Concurrent +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb new file mode 100644 index 0000000..2d32926 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb @@ -0,0 +1,588 @@ +require 'concurrent/configuration' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/atomic/thread_local_var' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # `Agent` is inspired by Clojure's [agent](http://clojure.org/agents) + # function. An agent is a shared, mutable variable providing independent, + # uncoordinated, *asynchronous* change of individual values. Best used when + # the value will undergo frequent, complex updates. Suitable when the result + # of an update does not need to be known immediately. `Agent` is (mostly) + # functionally equivalent to Clojure's agent, except where the runtime + # prevents parity. + # + # Agents are reactive, not autonomous - there is no imperative message loop + # and no blocking receive. The state of an Agent should be itself immutable + # and the `#value` of an Agent is always immediately available for reading by + # any thread without any messages, i.e. observation does not require + # cooperation or coordination. + # + # Agent action dispatches are made using the various `#send` methods. These + # methods always return immediately. At some point later, in another thread, + # the following will happen: + # + # 1. The given `action` will be applied to the state of the Agent and the + # `args`, if any were supplied. + # 2. The return value of `action` will be passed to the validator lambda, + # if one has been set on the Agent. + # 3. If the validator succeeds or if no validator was given, the return value + # of the given `action` will become the new `#value` of the Agent. See + # `#initialize` for details. + # 4. If any observers were added to the Agent, they will be notified. See + # `#add_observer` for details. + # 5. If during the `action` execution any other dispatches are made (directly + # or indirectly), they will be held until after the `#value` of the Agent + # has been changed. + # + # If any exceptions are thrown by an action function, no nested dispatches + # will occur, and the exception will be cached in the Agent itself. When an + # Agent has errors cached, any subsequent interactions will immediately throw + # an exception, until the agent's errors are cleared. Agent errors can be + # examined with `#error` and the agent restarted with `#restart`. + # + # The actions of all Agents get interleaved amongst threads in a thread pool. + # At any point in time, at most one action for each Agent is being executed. + # Actions dispatched to an agent from another single agent or thread will + # occur in the order they were sent, potentially interleaved with actions + # dispatched to the same agent from other sources. The `#send` method should + # be used for actions that are CPU limited, while the `#send_off` method is + # appropriate for actions that may block on IO. + # + # Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an agent with an initial value + # agent = Concurrent::Agent.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # agent.send{|set| next_fibonacci(set) } + # end + # + # # wait for them to complete + # agent.await + # + # # get the current value + # agent.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Agents support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time an action dispatch returns and + # the new value is successfully validated. Observation will *not* occur if the + # action raises an exception, if validation fails, or when a {#restart} occurs. + # + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Agent when the action began + # processing. The `new_value` is the value to which the Agent was set when the + # action completed. Note that `old_value` and `new_value` may be the same. + # This is not an error. It simply means that the action returned the same + # value. + # + # ## Nested Actions + # + # It is possible for an Agent action to post further actions back to itself. + # The nested actions will be enqueued normally then processed *after* the + # outer action completes, in the order they were sent, possibly interleaved + # with action dispatches from other threads. Nested actions never deadlock + # with one another and a failure in a nested action will never affect the + # outer action. + # + # Nested actions can be called using the Agent reference from the enclosing + # scope or by passing the reference in as a "send" argument. Nested actions + # cannot be post using `self` from within the action block/proc/lambda; `self` + # in this context will not reference the Agent. The preferred method for + # dispatching nested actions is to pass the Agent as an argument. This allows + # Ruby to more effectively manage the closing scope. + # + # Prefer this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send(agent) do |value, this| + # this.send {|v| v + 42 } + # 3.14 + # end + # agent.value #=> 45.14 + # ``` + # + # Over this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send do |value| + # agent.send {|v| v + 42 } + # 3.14 + # end + # ``` + # + # @!macro agent_await_warning + # + # **NOTE** Never, *under any circumstances*, call any of the "await" methods + # ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action + # block/proc/lambda. The call will block the Agent and will always fail. + # Calling either {#await} or {#wait} (with a timeout of `nil`) will + # hopelessly deadlock the Agent with no possibility of recovery. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/Agents Clojure Agents + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Agent < Synchronization::LockableObject + include Concern::Observable + + ERROR_MODES = [:continue, :fail].freeze + private_constant :ERROR_MODES + + AWAIT_FLAG = ::Object.new + private_constant :AWAIT_FLAG + + AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG } + private_constant :AWAIT_ACTION + + DEFAULT_ERROR_HANDLER = ->(agent, error) { nil } + private_constant :DEFAULT_ERROR_HANDLER + + DEFAULT_VALIDATOR = ->(value) { true } + private_constant :DEFAULT_VALIDATOR + + Job = Struct.new(:action, :args, :executor, :caller) + private_constant :Job + + # Raised during action processing or any other time in an Agent's lifecycle. + class Error < StandardError + def initialize(message = nil) + message ||= 'agent must be restarted before jobs can post' + super(message) + end + end + + # Raised when a new value obtained during action processing or at `#restart` + # fails validation. + class ValidationError < Error + def initialize(message = nil) + message ||= 'invalid value' + super(message) + end + end + + # The error mode this Agent is operating in. See {#initialize} for details. + attr_reader :error_mode + + # Create a new `Agent` with the given initial value and options. + # + # The `:validator` option must be `nil` or a side-effect free proc/lambda + # which takes one argument. On any intended value change the validator, if + # provided, will be called. If the new value is invalid the validator should + # return `false` or raise an error. + # + # The `:error_handler` option must be `nil` or a proc/lambda which takes two + # arguments. When an action raises an error or validation fails, either by + # returning false or raising an error, the error handler will be called. The + # arguments to the error handler will be a reference to the agent itself and + # the error object which was raised. + # + # The `:error_mode` may be either `:continue` (the default if an error + # handler is given) or `:fail` (the default if error handler nil or not + # given). + # + # If an action being run by the agent throws an error or doesn't pass + # validation the error handler, if present, will be called. After the + # handler executes if the error mode is `:continue` the Agent will continue + # as if neither the action that caused the error nor the error itself ever + # happened. + # + # If the mode is `:fail` the Agent will become {#failed?} and will stop + # accepting new action dispatches. Any previously queued actions will be + # held until {#restart} is called. The {#value} method will still work, + # returning the value of the Agent before the error. + # + # @param [Object] initial the initial value + # @param [Hash] opts the configuration options + # + # @option opts [Symbol] :error_mode either `:continue` or `:fail` + # @option opts [nil, Proc] :error_handler the (optional) error handler + # @option opts [nil, Proc] :validator the (optional) validation procedure + def initialize(initial, opts = {}) + super() + synchronize { ns_initialize(initial, opts) } + end + + # The current value (state) of the Agent, irrespective of any pending or + # in-progress actions. The value is always available and is non-blocking. + # + # @return [Object] the current value + def value + @current.value # TODO (pitr 12-Sep-2015): broken unsafe read? + end + + alias_method :deref, :value + + # When {#failed?} and {#error_mode} is `:fail`, returns the error object + # which caused the failure, else `nil`. When {#error_mode} is `:continue` + # will *always* return `nil`. + # + # @return [nil, Error] the error which caused the failure when {#failed?} + def error + @error.value + end + + alias_method :reason, :error + + # @!macro agent_send + # + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Action dispatches are only allowed when the Agent + # is not {#failed?}. + # + # The action must be a block/proc/lambda which takes 1 or more arguments. + # The first argument is the current {#value} of the Agent. Any arguments + # passed to the send method via the `args` parameter will be passed to the + # action as the remaining arguments. The action must return the new value + # of the Agent. + # + # * {#send} and {#send!} should be used for actions that are CPU limited + # * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that + # may block on IO + # * {#send_via} and {#send_via!} are used when a specific executor is to + # be used for the action + # + # @param [Array] args zero or more arguments to be passed to + # the action + # @param [Proc] action the action dispatch to be enqueued + # + # @yield [agent, value, *args] process the old value and return the new + # @yieldparam [Object] value the current {#value} of the Agent + # @yieldparam [Array] args zero or more arguments to pass to the + # action + # @yieldreturn [Object] the new value of the Agent + # + # @!macro send_return + # @return [Boolean] true if the action is successfully enqueued, false if + # the Agent is {#failed?} + def send(*args, &action) + enqueue_action_job(action, args, Concurrent.global_fast_executor) + end + + # @!macro agent_send + # + # @!macro send_bang_return_and_raise + # @return [Boolean] true if the action is successfully enqueued + # @raise [Concurrent::Agent::Error] if the Agent is {#failed?} + def send!(*args, &action) + raise Error.new unless send(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + def send_off(*args, &action) + enqueue_action_job(action, args, Concurrent.global_io_executor) + end + + alias_method :post, :send_off + + # @!macro agent_send + # @!macro send_bang_return_and_raise + def send_off!(*args, &action) + raise Error.new unless send_off(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via(executor, *args, &action) + enqueue_action_job(action, args, executor) + end + + # @!macro agent_send + # @!macro send_bang_return_and_raise + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via!(executor, *args, &action) + raise Error.new unless send_via(executor, *args, &action) + true + end + + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Appropriate for actions that may block on IO. + # + # @param [Proc] action the action dispatch to be enqueued + # @return [Concurrent::Agent] self + # @see #send_off + def <<(action) + send_off(&action) + self + end + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far, from this thread or nested by the Agent, have occurred. Will + # block when {#failed?}. Will never return if a failed Agent is {#restart} + # with `:clear_actions` true. + # + # Returns a reference to `self` to support method chaining: + # + # ``` + # current_value = agent.await.value + # ``` + # + # @return [Boolean] self + # + # @!macro agent_await_warning + def await + wait(nil) + self + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout) + wait(timeout.to_f) + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # + # @!macro agent_await_warning + def await_for!(timeout) + raise Concurrent::TimeoutError unless wait(timeout.to_f) + true + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. Will block indefinitely when timeout is nil or not given. + # + # Provided mainly for consistency with other classes in this library. Prefer + # the various `await` methods instead. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def wait(timeout = nil) + latch = Concurrent::CountDownLatch.new(1) + enqueue_await_job(latch) + latch.wait(timeout) + end + + # Is the Agent in a failed state? + # + # @see #restart + def failed? + !@error.value.nil? + end + + alias_method :stopped?, :failed? + + # When an Agent is {#failed?}, changes the Agent {#value} to `new_value` + # then un-fails the Agent so that action dispatches are allowed again. If + # the `:clear_actions` option is give and true, any actions queued on the + # Agent that were being held while it was failed will be discarded, + # otherwise those held actions will proceed. The `new_value` must pass the + # validator if any, or `restart` will raise an exception and the Agent will + # remain failed with its old {#value} and {#error}. Observers, if any, will + # not be notified of the new state. + # + # @param [Object] new_value the new value for the Agent once restarted + # @param [Hash] opts the configuration options + # @option opts [Symbol] :clear_actions true if all enqueued but unprocessed + # actions should be discarded on restart, else false (default: false) + # @return [Boolean] true + # + # @raise [Concurrent:AgentError] when not failed + def restart(new_value, opts = {}) + clear_actions = opts.fetch(:clear_actions, false) + synchronize do + raise Error.new('agent is not failed') unless failed? + raise ValidationError unless ns_validate(new_value) + @current.value = new_value + @error.value = nil + @queue.clear if clear_actions + ns_post_next_job unless @queue.empty? + end + true + end + + class << self + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far to all the given Agents, from this thread or nested by the + # given Agents, have occurred. Will block when any of the agents are + # failed. Will never return if a failed Agent is restart with + # `:clear_actions` true. + # + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true + # + # @!macro agent_await_warning + def await(*agents) + agents.each { |agent| agent.await } + true + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout, *agents) + end_at = Concurrent.monotonic_time + timeout.to_f + ok = agents.length.times do |i| + break false if (delay = end_at - Concurrent.monotonic_time) < 0 + break false unless agents[i].await_for(delay) + end + !!ok + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # @!macro agent_await_warning + def await_for!(timeout, *agents) + raise Concurrent::TimeoutError unless await_for(timeout, *agents) + true + end + end + + private + + def ns_initialize(initial, opts) + @error_mode = opts[:error_mode] + @error_handler = opts[:error_handler] + + if @error_mode && !ERROR_MODES.include?(@error_mode) + raise ArgumentError.new('unrecognized error mode') + elsif @error_mode.nil? + @error_mode = @error_handler ? :continue : :fail + end + + @error_handler ||= DEFAULT_ERROR_HANDLER + @validator = opts.fetch(:validator, DEFAULT_VALIDATOR) + @current = Concurrent::AtomicReference.new(initial) + @error = Concurrent::AtomicReference.new(nil) + @caller = Concurrent::ThreadLocalVar.new(nil) + @queue = [] + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + def enqueue_action_job(action, args, executor) + raise ArgumentError.new('no action given') unless action + job = Job.new(action, args, executor, @caller.value || Thread.current.object_id) + synchronize { ns_enqueue_job(job) } + end + + def enqueue_await_job(latch) + synchronize do + if (index = ns_find_last_job_for_thread) + job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor, + Thread.current.object_id) + ns_enqueue_job(job, index+1) + else + latch.count_down + true + end + end + end + + def ns_enqueue_job(job, index = nil) + # a non-nil index means this is an await job + return false if index.nil? && failed? + index ||= @queue.length + @queue.insert(index, job) + # if this is the only job, post to executor + ns_post_next_job if @queue.length == 1 + true + end + + def ns_post_next_job + @queue.first.executor.post { execute_next_job } + end + + def execute_next_job + job = synchronize { @queue.first } + old_value = @current.value + + @caller.value = job.caller # for nested actions + new_value = job.action.call(old_value, *job.args) + @caller.value = nil + + return if new_value == AWAIT_FLAG + + if ns_validate(new_value) + @current.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + else + handle_error(ValidationError.new) + end + rescue => error + handle_error(error) + ensure + synchronize do + @queue.shift + unless failed? || @queue.empty? + ns_post_next_job + end + end + end + + def ns_validate(value) + @validator.call(value) + rescue + false + end + + def handle_error(error) + # stop new jobs from posting + @error.value = error if @error_mode == :fail + @error_handler.call(self, error) + rescue + # do nothing + end + + def ns_find_last_job_for_thread + @queue.rindex { |job| job.caller == Thread.current.object_id } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb new file mode 100644 index 0000000..96434a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb @@ -0,0 +1,56 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_array + # + # A thread-safe subclass of Array. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array` + # which is concatenation of `a` and `b`, then it writes the concatenation to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#concat` instead. + # + # @see http://ruby-doc.org/core/Array.html Ruby standard library `Array` + + # @!macro internal_implementation_note + ArrayImplementation = case + when Concurrent.on_cruby? + # Array is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Array + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyArray < ::Array + include JRuby::Synchronized + end + JRubyArray + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray + TruffleRubyArray + + else + warn 'Possibly unsupported Ruby implementation' + ::Array + end + private_constant :ArrayImplementation + + # @!macro concurrent_array + class Array < ArrayImplementation + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb new file mode 100644 index 0000000..f9f8adf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb @@ -0,0 +1,449 @@ +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A mixin module that provides simple asynchronous behavior to a class, + # turning it into a simple actor. Loosely based on Erlang's + # [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without + # supervision or linking. + # + # A more feature-rich {Concurrent::Actor} is also available when the + # capabilities of `Async` are too limited. + # + # ```cucumber + # Feature: + # As a stateful, plain old Ruby class + # I want safe, asynchronous behavior + # So my long-running methods don't block the main thread + # ``` + # + # The `Async` module is a way to mix simple yet powerful asynchronous + # capabilities into any plain old Ruby object or class, turning each object + # into a simple Actor. Method calls are processed on a background thread. The + # caller is free to perform other actions while processing occurs in the + # background. + # + # Method calls to the asynchronous object are made via two proxy methods: + # `async` (alias `cast`) and `await` (alias `call`). These proxy methods post + # the method call to the object's background thread and return a "future" + # which will eventually contain the result of the method call. + # + # This behavior is loosely patterned after Erlang's `gen_server` behavior. + # When an Erlang module implements the `gen_server` behavior it becomes + # inherently asynchronous. The `start` or `start_link` function spawns a + # process (similar to a thread but much more lightweight and efficient) and + # returns the ID of the process. Using the process ID, other processes can + # send messages to the `gen_server` via the `cast` and `call` methods. Unlike + # Erlang's `gen_server`, however, `Async` classes do not support linking or + # supervision trees. + # + # ## Basic Usage + # + # When this module is mixed into a class, objects of the class become inherently + # asynchronous. Each object gets its own background thread on which to post + # asynchronous method calls. Asynchronous method calls are executed in the + # background one at a time in the order they are received. + # + # To create an asynchronous class, simply mix in the `Concurrent::Async` module: + # + # ``` + # class Hello + # include Concurrent::Async + # + # def hello(name) + # "Hello, #{name}!" + # end + # end + # ``` + # + # Mixing this module into a class provides each object two proxy methods: + # `async` and `await`. These methods are thread safe with respect to the + # enclosing object. The former proxy allows methods to be called + # asynchronously by posting to the object's internal thread. The latter proxy + # allows a method to be called synchronously but does so safely with respect + # to any pending asynchronous method calls and ensures proper ordering. Both + # methods return a {Concurrent::IVar} which can be inspected for the result + # of the proxied method call. Calling a method with `async` will return a + # `:pending` `IVar` whereas `await` will return a `:complete` `IVar`. + # + # ``` + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # ``` + # + # ## Let It Fail + # + # The `async` and `await` proxy methods have built-in error protection based + # on Erlang's famous "let it fail" philosophy. Instance methods should not be + # programmed defensively. When an exception is raised by a delegated method + # the proxy will rescue the exception, expose it to the caller as the `reason` + # attribute of the returned future, then process the next method call. + # + # ## Calling Methods Internally + # + # External method calls should *always* use the `async` and `await` proxy + # methods. When one method calls another method, the `async` proxy should + # rarely be used and the `await` proxy should *never* be used. + # + # When an object calls one of its own methods using the `await` proxy the + # second call will be enqueued *behind* the currently running method call. + # Any attempt to wait on the result will fail as the second call will never + # run until after the current call completes. + # + # Calling a method using the `await` proxy from within a method that was + # itself called using `async` or `await` will irreversibly deadlock the + # object. Do *not* do this, ever. + # + # ## Instance Variables and Attribute Accessors + # + # Instance variables do not need to be thread-safe so long as they are private. + # Asynchronous method calls are processed in the order they are received and + # are processed one at a time. Therefore private instance variables can only + # be accessed by one thread at a time. This is inherently thread-safe. + # + # When using private instance variables within asynchronous methods, the best + # practice is to read the instance variable into a local variable at the start + # of the method then update the instance variable at the *end* of the method. + # This way, should an exception be raised during method execution the internal + # state of the object will not have been changed. + # + # ### Reader Attributes + # + # The use of `attr_reader` is discouraged. Internal state exposed externally, + # when necessary, should be done through accessor methods. The instance + # variables exposed by these methods *must* be thread-safe, or they must be + # called using the `async` and `await` proxy methods. These two approaches are + # subtly different. + # + # When internal state is accessed via the `async` and `await` proxy methods, + # the returned value represents the object's state *at the time the call is + # processed*, which may *not* be the state of the object at the time the call + # is made. + # + # To get the state *at the current* time, irrespective of an enqueued method + # calls, a reader method must be called directly. This is inherently unsafe + # unless the instance variable is itself thread-safe, preferably using one + # of the thread-safe classes within this library. Because the thread-safe + # classes within this library are internally-locking or non-locking, they can + # be safely used from within asynchronous methods without causing deadlocks. + # + # Generally speaking, the best practice is to *not* expose internal state via + # reader methods. The best practice is to simply use the method's return value. + # + # ### Writer Attributes + # + # Writer attributes should never be used with asynchronous classes. Changing + # the state externally, even when done in the thread-safe way, is not logically + # consistent. Changes to state need to be timed with respect to all asynchronous + # method calls which my be in-process or enqueued. The only safe practice is to + # pass all necessary data to each method as arguments and let the method update + # the internal state as necessary. + # + # ## Class Constants, Variables, and Methods + # + # ### Class Constants + # + # Class constants do not need to be thread-safe. Since they are read-only and + # immutable they may be safely read both externally and from within + # asynchronous methods. + # + # ### Class Variables + # + # Class variables should be avoided. Class variables represent shared state. + # Shared state is anathema to concurrency. Should there be a need to share + # state using class variables they *must* be thread-safe, preferably + # using the thread-safe classes within this library. When updating class + # variables, never assign a new value/object to the variable itself. Assignment + # is not thread-safe in Ruby. Instead, use the thread-safe update functions + # of the variable itself to change the value. + # + # The best practice is to *never* use class variables with `Async` classes. + # + # ### Class Methods + # + # Class methods which are pure functions are safe. Class methods which modify + # class variables should be avoided, for all the reasons listed above. + # + # ## An Important Note About Thread Safe Guarantees + # + # > Thread safe guarantees can only be made when asynchronous method calls + # > are not mixed with direct method calls. Use only direct method calls + # > when the object is used exclusively on a single thread. Use only + # > `async` and `await` when the object is shared between threads. Once you + # > call a method using `async` or `await`, you should no longer call methods + # > directly on the object. Use `async` and `await` exclusively from then on. + # + # @example + # + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # + # @see Concurrent::Actor + # @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia + # @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server + # @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/ + module Async + + # @!method self.new(*args, &block) + # + # Instanciate a new object and ensure proper initialization of the + # synchronization mechanisms. + # + # @param [Array] args Zero or more arguments to be passed to the + # object's initializer. + # @param [Proc] block Optional block to pass to the object's initializer. + # @return [Object] A properly initialized object of the asynchronous class. + + # Check for the presence of a method on an object and determine if a given + # set of arguments matches the required arity. + # + # @param [Object] obj the object to check against + # @param [Symbol] method the method to check the object for + # @param [Array] args zero or more arguments for the arity check + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + # + # @note This check is imperfect because of the way Ruby reports the arity of + # methods with a variable number of arguments. It is possible to determine + # if too few arguments are given but impossible to determine if too many + # arguments are given. This check may also fail to recognize dynamic behavior + # of the object, such as methods simulated with `method_missing`. + # + # @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity + # @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to? + # @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing + # + # @!visibility private + def self.validate_argc(obj, method, *args) + argc = args.length + arity = obj.method(method).arity + + if arity >= 0 && argc != arity + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})") + elsif arity < 0 && (arity = (arity + 1).abs) > argc + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)") + end + end + + # @!visibility private + def self.included(base) + base.singleton_class.send(:alias_method, :original_new, :new) + base.extend(ClassMethods) + super(base) + end + + # @!visibility private + module ClassMethods + def new(*args, &block) + obj = original_new(*args, &block) + obj.send(:init_synchronization) + obj + end + ruby2_keywords :new if respond_to?(:ruby2_keywords, true) + end + private_constant :ClassMethods + + # Delegates asynchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AsyncDelegator < Synchronization::LockableObject + safe_initialization! + + # Create a new delegator object wrapping the given delegate. + # + # @param [Object] delegate the object to wrap and delegate method calls to + def initialize(delegate) + super() + @delegate = delegate + @queue = [] + @executor = Concurrent.global_io_executor + @ruby_pid = $$ + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + super unless @delegate.respond_to?(method) + Async::validate_argc(@delegate, method, *args) + + ivar = Concurrent::IVar.new + synchronize do + reset_if_forked + @queue.push [ivar, method, args, block] + @executor.post { perform } if @queue.length == 1 + end + + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + + # Perform all enqueued tasks. + # + # This method must be called from within the executor. It must not be + # called while already running. It will loop until the queue is empty. + def perform + loop do + ivar, method, args, block = synchronize { @queue.first } + break unless ivar # queue is empty + + begin + ivar.set(@delegate.send(method, *args, &block)) + rescue => error + ivar.fail(error) + end + + synchronize do + @queue.shift + return if @queue.empty? + end + end + end + + def reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ruby_pid = $$ + end + end + end + private_constant :AsyncDelegator + + # Delegates synchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AwaitDelegator + + # Create a new delegator object wrapping the given delegate. + # + # @param [AsyncDelegator] delegate the object to wrap and delegate method calls to + def initialize(delegate) + @delegate = delegate + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + ivar = @delegate.send(method, *args, &block) + ivar.wait + ivar + end + + # Check whether the method is responsive + # + # @param [Symbol] method the method being called + def respond_to_missing?(method, include_private = false) + @delegate.respond_to?(method) || super + end + end + private_constant :AwaitDelegator + + # Causes the chained method call to be performed asynchronously on the + # object's thread. The delegated method will return a future in the + # `:pending` state and the method call will have been scheduled on the + # object's thread. The final disposition of the method call can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # @note The method call is guaranteed to be thread safe with respect to + # all other method calls against the same object that are called with + # either `async` or `await`. The mutable nature of Ruby references + # (and object orientation in general) prevent any other thread safety + # guarantees. Do NOT mix direct method calls with delegated method calls. + # Use *only* delegated method calls when sharing the object between threads. + # + # @return [Concurrent::IVar] the pending result of the asynchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of + # the requested method + def async + @__async_delegator__ + end + alias_method :cast, :async + + # Causes the chained method call to be performed synchronously on the + # current thread. The delegated will return a future in either the + # `:fulfilled` or `:rejected` state and the delegated method will have + # completed. The final disposition of the delegated method can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # + # @return [Concurrent::IVar] the completed result of the synchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of the + # requested method + def await + @__await_delegator__ + end + alias_method :call, :await + + # Initialize the internal serializer and other stnchronization mechanisms. + # + # @note This method *must* be called immediately upon object construction. + # This is the only way thread-safe initialization can be guaranteed. + # + # @!visibility private + def init_synchronization + return self if defined?(@__async_initialized__) && @__async_initialized__ + @__async_initialized__ = true + @__async_delegator__ = AsyncDelegator.new(self) + @__await_delegator__ = AwaitDelegator.new(@__async_delegator__) + self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb new file mode 100644 index 0000000..1074006 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb @@ -0,0 +1,222 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization/object' + +# @!macro thread_safe_variable_comparison +# +# ## Thread-safe Variable Classes +# +# Each of the thread-safe variable classes is designed to solve a different +# problem. In general: +# +# * *{Concurrent::Agent}:* Shared, mutable variable providing independent, +# uncoordinated, *asynchronous* change of individual values. Best used when +# the value will undergo frequent, complex updates. Suitable when the result +# of an update does not need to be known immediately. +# * *{Concurrent::Atom}:* Shared, mutable variable providing independent, +# uncoordinated, *synchronous* change of individual values. Best used when +# the value will undergo frequent reads but only occasional, though complex, +# updates. Suitable when the result of an update must be known immediately. +# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated +# atomically. Updates are synchronous but fast. Best used when updates a +# simple set operations. Not suitable when updates are complex. +# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar +# but optimized for the given data type. +# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used +# when two or more threads need to exchange data. The threads will pair then +# block on each other until the exchange is complete. +# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread +# must give a value to another, which must take the value. The threads will +# block on each other until the exchange is complete. +# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which +# holds a different value for each thread which has access. Often used as +# an instance variable in objects which must maintain different state +# for different threads. +# * *{Concurrent::TVar}:* Shared, mutable variables which provide +# *coordinated*, *synchronous*, change of *many* stated. Used when multiple +# value must change together, in an all-or-nothing transaction. + + +module Concurrent + + # Atoms provide a way to manage shared, synchronous, independent state. + # + # An atom is initialized with an initial value and an optional validation + # proc. At any time the value of the atom can be synchronously and safely + # changed. If a validator is given at construction then any new value + # will be checked against the validator and will be rejected if the + # validator returns false or raises an exception. + # + # There are two ways to change the value of an atom: {#compare_and_set} and + # {#swap}. The former will set the new value if and only if it validates and + # the current value matches the new value. The latter will atomically set the + # new value to the result of running the given block if and only if that + # value validates. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an atom with an initial value + # atom = Concurrent::Atom.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # atom.swap{|set| next_fibonacci(set) } + # end + # + # # get the current value + # atom.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Atoms support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time the value of the Atom changes. + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Atom when the change began + # The `new_value` is the value to which the Atom was set when the change + # completed. Note that `old_value` and `new_value` may be the same. This is + # not an error. It simply means that the change operation returned the same + # value. + # + # Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/atoms Clojure Atoms + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Atom < Synchronization::Object + include Concern::Observable + + safe_initialization! + attr_atomic(:value) + private :value=, :swap_value, :compare_and_set_value, :update_value + public :value + alias_method :deref, :value + + # @!method value + # The current value of the atom. + # + # @return [Object] The current value. + + # Create a new atom with the given initial value. + # + # @param [Object] value The initial value + # @param [Hash] opts The options used to configure the atom + # @option opts [Proc] :validator (nil) Optional proc used to validate new + # values. It must accept one and only one argument which will be the + # intended new value. The validator will return true if the new value + # is acceptable else return false (preferrably) or raise an exception. + # + # @!macro deref_options + # + # @raise [ArgumentError] if the validator is not a `Proc` (when given) + def initialize(value, opts = {}) + super() + @Validator = opts.fetch(:validator, -> v { true }) + self.observers = Collection::CopyOnNotifyObserverSet.new + self.value = value + end + + # Atomically swaps the value of atom using the given block. The current + # value will be passed to the block, as will any arguments passed as + # arguments to the function. The new value will be validated against the + # (optional) validator proc given at construction. If validation fails the + # value will not be changed. + # + # Internally, {#swap} reads the current value, applies the block to it, and + # attempts to compare-and-set it in. Since another thread may have changed + # the value in the intervening time, it may have to retry, and does so in a + # spin loop. The net effect is that the value will always be the result of + # the application of the supplied block to a current value, atomically. + # However, because the block might be called multiple times, it must be free + # of side effects. + # + # @note The given block may be called multiple times, and thus should be free + # of side effects. + # + # @param [Object] args Zero or more arguments passed to the block. + # + # @yield [value, args] Calculates a new value for the atom based on the + # current value and any supplied arguments. + # @yieldparam value [Object] The current value of the atom. + # @yieldparam args [Object] All arguments passed to the function, in order. + # @yieldreturn [Object] The intended new value of the atom. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + # + # @raise [ArgumentError] When no block is given. + def swap(*args) + raise ArgumentError.new('no block given') unless block_given? + + loop do + old_value = value + new_value = yield(old_value, *args) + begin + break old_value unless valid?(new_value) + break new_value if compare_and_set(old_value, new_value) + rescue + break old_value + end + end + end + + # Atomically sets the value of atom to the new value if and only if the + # current value of the atom is identical to the old value and the new + # value successfully validates against the (optional) validator given + # at construction. + # + # @param [Object] old_value The expected current value. + # @param [Object] new_value The intended new value. + # + # @return [Boolean] True if the value is changed else false. + def compare_and_set(old_value, new_value) + if valid?(new_value) && compare_and_set_value(old_value, new_value) + observers.notify_observers(Time.now, old_value, new_value) + true + else + false + end + end + + # Atomically sets the value of atom to the new value without regard for the + # current value so long as the new value successfully validates against the + # (optional) validator given at construction. + # + # @param [Object] new_value The intended new value. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + def reset(new_value) + old_value = value + if valid?(new_value) + self.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + new_value + else + old_value + end + end + + private + + # Is the new value valid? + # + # @param [Object] new_value The intended new value. + # @return [Boolean] false if the validator function returns false or raises + # an exception else true + def valid?(new_value) + @Validator.call(new_value) + rescue + false + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb new file mode 100644 index 0000000..f775691 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb @@ -0,0 +1,127 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +require 'concurrent/atomic/mutex_atomic_boolean' + +module Concurrent + + ################################################################### + + # @!macro atomic_boolean_method_initialize + # + # Creates a new `AtomicBoolean` with the given initial value. + # + # @param [Boolean] initial the initial value + + # @!macro atomic_boolean_method_value_get + # + # Retrieves the current `Boolean` value. + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_value_set + # + # Explicitly sets the value. + # + # @param [Boolean] value the new value to be set + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_true_question + # + # Is the current value `true` + # + # @return [Boolean] true if the current value is `true`, else false + + # @!macro atomic_boolean_method_false_question + # + # Is the current value `false` + # + # @return [Boolean] true if the current value is `false`, else false + + # @!macro atomic_boolean_method_make_true + # + # Explicitly sets the value to true. + # + # @return [Boolean] true if value has changed, otherwise false + + # @!macro atomic_boolean_method_make_false + # + # Explicitly sets the value to false. + # + # @return [Boolean] true if value has changed, otherwise false + + ################################################################### + + # @!macro atomic_boolean_public_api + # + # @!method initialize(initial = false) + # @!macro atomic_boolean_method_initialize + # + # @!method value + # @!macro atomic_boolean_method_value_get + # + # @!method value=(value) + # @!macro atomic_boolean_method_value_set + # + # @!method true? + # @!macro atomic_boolean_method_true_question + # + # @!method false? + # @!macro atomic_boolean_method_false_question + # + # @!method make_true + # @!macro atomic_boolean_method_make_true + # + # @!method make_false + # @!macro atomic_boolean_method_make_false + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicBooleanImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + CAtomicBoolean + when Concurrent.on_jruby? + JavaAtomicBoolean + else + MutexAtomicBoolean + end + private_constant :AtomicBooleanImplementation + + # @!macro atomic_boolean + # + # A boolean value that can be updated atomically. Reads and writes to an atomic + # boolean and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicBoolean... + # 2.790000 0.000000 2.790000 ( 2.791454) + # Testing with Concurrent::CAtomicBoolean... + # 0.740000 0.000000 0.740000 ( 0.740206) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicBoolean... + # 5.240000 2.520000 7.760000 ( 3.683000) + # Testing with Concurrent::JavaAtomicBoolean... + # 3.340000 0.010000 3.350000 ( 0.855000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean + # + # @!macro atomic_boolean_public_api + class AtomicBoolean < AtomicBooleanImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb new file mode 100644 index 0000000..26cd05d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb @@ -0,0 +1,144 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +require 'concurrent/atomic/mutex_atomic_fixnum' + +module Concurrent + + ################################################################### + + # @!macro atomic_fixnum_method_initialize + # + # Creates a new `AtomicFixnum` with the given initial value. + # + # @param [Fixnum] initial the initial value + # @raise [ArgumentError] if the initial value is not a `Fixnum` + + # @!macro atomic_fixnum_method_value_get + # + # Retrieves the current `Fixnum` value. + # + # @return [Fixnum] the current value + + # @!macro atomic_fixnum_method_value_set + # + # Explicitly sets the value. + # + # @param [Fixnum] value the new value to be set + # + # @return [Fixnum] the current value + # + # @raise [ArgumentError] if the new value is not a `Fixnum` + + # @!macro atomic_fixnum_method_increment + # + # Increases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to increase the current value + # + # @return [Fixnum] the current value after incrementation + + # @!macro atomic_fixnum_method_decrement + # + # Decreases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to decrease the current value + # + # @return [Fixnum] the current value after decrementation + + # @!macro atomic_fixnum_method_compare_and_set + # + # Atomically sets the value to the given updated value if the current + # value == the expected value. + # + # @param [Fixnum] expect the expected value + # @param [Fixnum] update the new value + # + # @return [Boolean] true if the value was updated else false + + # @!macro atomic_fixnum_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # + # @return [Object] the new value + + ################################################################### + + # @!macro atomic_fixnum_public_api + # + # @!method initialize(initial = 0) + # @!macro atomic_fixnum_method_initialize + # + # @!method value + # @!macro atomic_fixnum_method_value_get + # + # @!method value=(value) + # @!macro atomic_fixnum_method_value_set + # + # @!method increment(delta = 1) + # @!macro atomic_fixnum_method_increment + # + # @!method decrement(delta = 1) + # @!macro atomic_fixnum_method_decrement + # + # @!method compare_and_set(expect, update) + # @!macro atomic_fixnum_method_compare_and_set + # + # @!method update + # @!macro atomic_fixnum_method_update + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicFixnumImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + CAtomicFixnum + when Concurrent.on_jruby? + JavaAtomicFixnum + else + MutexAtomicFixnum + end + private_constant :AtomicFixnumImplementation + + # @!macro atomic_fixnum + # + # A numeric value that can be updated atomically. Reads and writes to an atomic + # fixnum and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicFixnum... + # 3.130000 0.000000 3.130000 ( 3.136505) + # Testing with Concurrent::CAtomicFixnum... + # 0.790000 0.000000 0.790000 ( 0.785550) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicFixnum... + # 5.460000 2.460000 7.920000 ( 3.715000) + # Testing with Concurrent::JavaAtomicFixnum... + # 4.520000 0.030000 4.550000 ( 1.187000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong + # + # @!macro atomic_fixnum_public_api + class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb new file mode 100644 index 0000000..e16be65 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb @@ -0,0 +1,167 @@ +require 'concurrent/errors' +require 'concurrent/synchronization/object' + +module Concurrent + # An atomic reference which maintains an object reference along with a mark bit + # that can be updated atomically. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html + # java.util.concurrent.atomic.AtomicMarkableReference + class AtomicMarkableReference < ::Concurrent::Synchronization::Object + + attr_atomic(:reference) + private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference + + def initialize(value = nil, mark = false) + super() + self.reference = immutable_array(value, mark) + end + + # Atomically sets the value and mark to the given updated value and + # mark given both: + # - the current value == the expected value && + # - the current mark == the expected mark + # + # @param [Object] expected_val the expected value + # @param [Object] new_val the new value + # @param [Boolean] expected_mark the expected mark + # @param [Boolean] new_mark the new mark + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value or the + # actual mark was not equal to the expected mark + def compare_and_set(expected_val, new_val, expected_mark, new_mark) + # Memoize a valid reference to the current AtomicReference for + # later comparison. + current = reference + curr_val, curr_mark = current + + # Ensure that that the expected marks match. + return false unless expected_mark == curr_mark + + if expected_val.is_a? Numeric + # If the object is a numeric, we need to ensure we are comparing + # the numerical values + return false unless expected_val == curr_val + else + # Otherwise, we need to ensure we are comparing the object identity. + # Theoretically, this could be incorrect if a user monkey-patched + # `Object#equal?`, but they should know that they are playing with + # fire at that point. + return false unless expected_val.equal? curr_val + end + + prospect = immutable_array(new_val, new_mark) + + compare_and_set_reference current, prospect + end + + alias_method :compare_and_swap, :compare_and_set + + # Gets the current reference and marked values. + # + # @return [Array] the current reference and marked values + def get + reference + end + + # Gets the current value of the reference + # + # @return [Object] the current value of the reference + def value + reference[0] + end + + # Gets the current marked value + # + # @return [Boolean] the current marked value + def mark + reference[1] + end + + alias_method :marked?, :mark + + # _Unconditionally_ sets to the given value of both the reference and + # the mark. + # + # @param [Object] new_val the new value + # @param [Boolean] new_mark the new mark + # + # @return [Array] both the new value and the new mark + def set(new_val, new_mark) + self.reference = immutable_array(new_val, new_mark) + end + + # Pass the current value and marked state to the given block, replacing it + # with the block's results. May retry if the value changes during the + # block's execution. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and new mark + def update + loop do + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + if compare_and_set old_val, new_val, old_mark, new_mark + return immutable_array(new_val, new_mark) + end + end + end + + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state + # + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + unless compare_and_set old_val, new_val, old_mark, new_mark + fail ::Concurrent::ConcurrentUpdateError, + 'AtomicMarkableReference: Update failed due to race condition.', + 'Note: If you would like to guarantee an update, please use ' + + 'the `AtomicMarkableReference#update` method.' + end + + immutable_array(new_val, new_mark) + end + + # Pass the current value to the given block, replacing it with the + # block's result. Simply return nil if update fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state, or nil if + # the update failed + def try_update + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + return unless compare_and_set old_val, new_val, old_mark, new_mark + + immutable_array(new_val, new_mark) + end + + private + + def immutable_array(*args) + args.freeze + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb new file mode 100644 index 0000000..bb5fb77 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb @@ -0,0 +1,135 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +require 'concurrent/atomic_reference/atomic_direct_update' +require 'concurrent/atomic_reference/numeric_cas_wrapper' +require 'concurrent/atomic_reference/mutex_atomic' + +# Shim for TruffleRuby::AtomicReference +if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference) + # @!visibility private + module TruffleRuby + AtomicReference = Truffle::AtomicReference + end +end + +module Concurrent + + # @!macro internal_implementation_note + AtomicReferenceImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + # @!visibility private + # @!macro internal_implementation_note + class CAtomicReference + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + end + CAtomicReference + when Concurrent.on_jruby? + # @!visibility private + # @!macro internal_implementation_note + class JavaAtomicReference + include AtomicDirectUpdate + end + JavaAtomicReference + when Concurrent.on_truffleruby? + class TruffleRubyAtomicReference < TruffleRuby::AtomicReference + include AtomicDirectUpdate + alias_method :value, :get + alias_method :value=, :set + alias_method :compare_and_swap, :compare_and_set + alias_method :swap, :get_and_set + end + TruffleRubyAtomicReference + else + MutexAtomicReference + end + private_constant :AtomicReferenceImplementation + + # An object reference that may be updated atomically. All read and write + # operations have java volatile semantic. + # + # @!macro thread_safe_variable_comparison + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html + # + # @!method initialize(value = nil) + # @!macro atomic_reference_method_initialize + # @param [Object] value The initial value. + # + # @!method get + # @!macro atomic_reference_method_get + # Gets the current value. + # @return [Object] the current value + # + # @!method set(new_value) + # @!macro atomic_reference_method_set + # Sets to the given value. + # @param [Object] new_value the new value + # @return [Object] the new value + # + # @!method get_and_set(new_value) + # @!macro atomic_reference_method_get_and_set + # Atomically sets to the given value and returns the old value. + # @param [Object] new_value the new value + # @return [Object] the old value + # + # @!method compare_and_set(old_value, new_value) + # @!macro atomic_reference_method_compare_and_set + # + # Atomically sets the value to the given updated value if + # the current value == the expected value. + # + # @param [Object] old_value the expected value + # @param [Object] new_value the new value + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value. + # + # @!method update + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @return [Object] the new value + # + # @!method try_update + # Pass the current value to the given block, replacing it + # with the block's result. Return nil if the update fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This method was altered to avoid raising an exception by default. + # Instead, this method now returns `nil` in case of failure. For more info, + # please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value, or nil if update failed + # + # @!method try_update! + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This behavior mimics the behavior of the original + # `AtomicReference#try_update` API. The reason this was changed was to + # avoid raising exceptions (which are inherently slow) by default. For more + # info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + class AtomicReference < AtomicReferenceImplementation + + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], get + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb new file mode 100644 index 0000000..d883aed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb @@ -0,0 +1,100 @@ +require 'concurrent/utility/engine' +require 'concurrent/atomic/mutex_count_down_latch' +require 'concurrent/atomic/java_count_down_latch' + +module Concurrent + + ################################################################### + + # @!macro count_down_latch_method_initialize + # + # Create a new `CountDownLatch` with the initial `count`. + # + # @param [new] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro count_down_latch_method_wait + # + # Block on the latch until the counter reaches zero or until `timeout` is reached. + # + # @param [Fixnum] timeout the number of seconds to wait for the counter or `nil` + # to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on `timeout` + + # @!macro count_down_latch_method_count_down + # + # Signal the latch to decrement the counter. Will signal all blocked threads when + # the `count` reaches zero. + + # @!macro count_down_latch_method_count + # + # The current value of the counter. + # + # @return [Fixnum] the current value of the counter + + ################################################################### + + # @!macro count_down_latch_public_api + # + # @!method initialize(count = 1) + # @!macro count_down_latch_method_initialize + # + # @!method wait(timeout = nil) + # @!macro count_down_latch_method_wait + # + # @!method count_down + # @!macro count_down_latch_method_count_down + # + # @!method count + # @!macro count_down_latch_method_count + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + CountDownLatchImplementation = case + when Concurrent.on_jruby? + JavaCountDownLatch + else + MutexCountDownLatch + end + private_constant :CountDownLatchImplementation + + # @!macro count_down_latch + # + # A synchronization object that allows one thread to wait on multiple other threads. + # The thread that will wait creates a `CountDownLatch` and sets the initial value + # (normally equal to the number of other threads). The initiating thread passes the + # latch to the other threads then waits for the other threads by calling the `#wait` + # method. Each of the other threads calls `#count_down` when done with its work. + # When the latch counter reaches zero the waiting thread is unblocked and continues + # with its work. A `CountDownLatch` can be used only once. Its value cannot be reset. + # + # @!macro count_down_latch_public_api + # @example Waiter and Decrementer + # latch = Concurrent::CountDownLatch.new(3) + # + # waiter = Thread.new do + # latch.wait() + # puts ("Waiter released") + # end + # + # decrementer = Thread.new do + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # end + # + # [waiter, decrementer].each(&:join) + class CountDownLatch < CountDownLatchImplementation + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb new file mode 100644 index 0000000..9ebe29d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb @@ -0,0 +1,128 @@ +require 'concurrent/synchronization/lockable_object' +require 'concurrent/utility/native_integer' + +module Concurrent + + # A synchronization aid that allows a set of threads to all wait for each + # other to reach a common barrier point. + # @example + # barrier = Concurrent::CyclicBarrier.new(3) + # jobs = Array.new(3) { |i| -> { sleep i; p done: i } } + # process = -> (i) do + # # waiting to start at the same time + # barrier.wait + # # execute job + # jobs[i].call + # # wait for others to finish + # barrier.wait + # end + # threads = 2.times.map do |i| + # Thread.new(i, &process) + # end + # + # # use main as well + # process.call 2 + # + # # here we can be sure that all jobs are processed + class CyclicBarrier < Synchronization::LockableObject + + # @!visibility private + Generation = Struct.new(:status) + private_constant :Generation + + # Create a new `CyclicBarrier` that waits for `parties` threads + # + # @param [Fixnum] parties the number of parties + # @yield an optional block that will be executed that will be executed after + # the last thread arrives and before the others are released + # + # @raise [ArgumentError] if `parties` is not an integer or is less than zero + def initialize(parties, &block) + Utility::NativeInteger.ensure_integer_and_bounds parties + Utility::NativeInteger.ensure_positive_and_no_zero parties + + super(&nil) + synchronize { ns_initialize parties, &block } + end + + # @return [Fixnum] the number of threads needed to pass the barrier + def parties + synchronize { @parties } + end + + # @return [Fixnum] the number of threads currently waiting on the barrier + def number_waiting + synchronize { @number_waiting } + end + + # Blocks on the barrier until the number of waiting threads is equal to + # `parties` or until `timeout` is reached or `reset` is called + # If a block has been passed to the constructor, it will be executed once by + # the last arrived thread before releasing the others + # @param [Fixnum] timeout the number of seconds to wait for the counter or + # `nil` to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on + # `timeout` or on `reset` or if the barrier is broken + def wait(timeout = nil) + synchronize do + + return false unless @generation.status == :waiting + + @number_waiting += 1 + + if @number_waiting == @parties + @action.call if @action + ns_generation_done @generation, :fulfilled + true + else + generation = @generation + if ns_wait_until(timeout) { generation.status != :waiting } + generation.status == :fulfilled + else + ns_generation_done generation, :broken, false + false + end + end + end + end + + # resets the barrier to its initial state + # If there is at least one waiting thread, it will be woken up, the `wait` + # method will return false and the barrier will be broken + # If the barrier is broken, this method restores it to the original state + # + # @return [nil] + def reset + synchronize { ns_generation_done @generation, :reset } + end + + # A barrier can be broken when: + # - a thread called the `reset` method while at least one other thread was waiting + # - at least one thread timed out on `wait` method + # + # A broken barrier can be restored using `reset` it's safer to create a new one + # @return [Boolean] true if the barrier is broken otherwise false + def broken? + synchronize { @generation.status != :waiting } + end + + protected + + def ns_generation_done(generation, status, continue = true) + generation.status = status + ns_next_generation if continue + ns_broadcast + end + + def ns_next_generation + @generation = Generation.new(:waiting) + @number_waiting = 0 + end + + def ns_initialize(parties, &block) + @parties = parties + @action = block + ns_next_generation + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb new file mode 100644 index 0000000..ccf84c9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb @@ -0,0 +1,109 @@ +require 'thread' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # Old school kernel-style event reminiscent of Win32 programming in C++. + # + # When an `Event` is created it is in the `unset` state. Threads can choose to + # `#wait` on the event, blocking until released by another thread. When one + # thread wants to alert all blocking threads it calls the `#set` method which + # will then wake up all listeners. Once an `Event` has been set it remains set. + # New threads calling `#wait` will return immediately. An `Event` may be + # `#reset` at any time once it has been set. + # + # @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx + # @example + # event = Concurrent::Event.new + # + # t1 = Thread.new do + # puts "t1 is waiting" + # event.wait(1) + # puts "event occurred" + # end + # + # t2 = Thread.new do + # puts "t2 calling set" + # event.set + # end + # + # [t1, t2].each(&:join) + # + # # prints: + # # t1 is waiting + # # t2 calling set + # # event occurred + class Event < Synchronization::LockableObject + + # Creates a new `Event` in the unset state. Threads calling `#wait` on the + # `Event` will block. + def initialize + super + synchronize { ns_initialize } + end + + # Is the object in the set state? + # + # @return [Boolean] indicating whether or not the `Event` has been set + def set? + synchronize { @set } + end + + # Trigger the event, setting the state to `set` and releasing all threads + # waiting on the event. Has no effect if the `Event` has already been set. + # + # @return [Boolean] should always return `true` + def set + synchronize { ns_set } + end + + def try? + synchronize { @set ? false : ns_set } + end + + # Reset a previously set event back to the `unset` state. + # Has no effect if the `Event` has not yet been set. + # + # @return [Boolean] should always return `true` + def reset + synchronize do + if @set + @set = false + @iteration +=1 + end + true + end + end + + # Wait a given number of seconds for the `Event` to be set by another + # thread. Will wait forever when no `timeout` value is given. Returns + # immediately if the `Event` has already been set. + # + # @return [Boolean] true if the `Event` was set before timeout else false + def wait(timeout = nil) + synchronize do + unless @set + iteration = @iteration + ns_wait_until(timeout) { iteration < @iteration || @set } + else + true + end + end + end + + protected + + def ns_set + unless @set + @set = true + ns_broadcast + end + true + end + + def ns_initialize + @set = false + @iteration = 0 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb new file mode 100644 index 0000000..e90fc24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb @@ -0,0 +1,109 @@ +require 'concurrent/constants' +require_relative 'locals' + +module Concurrent + + # A `FiberLocalVar` is a variable where the value is different for each fiber. + # Each variable may have a default value, but when you modify the variable only + # the current fiber will ever see that change. + # + # This is similar to Ruby's built-in fiber-local variables (`Thread.current[:name]`), + # but with these major advantages: + # * `FiberLocalVar` has its own identity, it doesn't need a Symbol. + # * Each Ruby's built-in fiber-local variable leaks some memory forever (it's a Symbol held forever on the fiber), + # so it's only OK to create a small amount of them. + # `FiberLocalVar` has no such issue and it is fine to create many of them. + # * Ruby's built-in fiber-local variables leak forever the value set on each fiber (unless set to nil explicitly). + # `FiberLocalVar` automatically removes the mapping for each fiber once the `FiberLocalVar` instance is GC'd. + # + # @example + # v = FiberLocalVar.new(14) + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # + # @example + # v = FiberLocalVar.new(14) + # + # Fiber.new do + # v.value #=> 14 + # v.value = 1 + # v.value #=> 1 + # end.resume + # + # Fiber.new do + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # end.resume + # + # v.value #=> 14 + class FiberLocalVar + LOCALS = FiberLocals.new + + # Creates a fiber local variable. + # + # @param [Object] default the default value when otherwise unset + # @param [Proc] default_block Optional block that gets called to obtain the + # default value for each fiber + def initialize(default = nil, &default_block) + if default && block_given? + raise ArgumentError, "Cannot use both value and block as default value" + end + + if block_given? + @default_block = default_block + @default = nil + else + @default_block = nil + @default = default + end + + @index = LOCALS.next_index(self) + end + + # Returns the value in the current fiber's copy of this fiber-local variable. + # + # @return [Object] the current value + def value + LOCALS.fetch(@index) { default } + end + + # Sets the current fiber's copy of this fiber-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value + def value=(value) + LOCALS.set(@index, value) + end + + # Bind the given value to fiber local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value + def bind(value) + if block_given? + old_value = self.value + self.value = value + begin + yield + ensure + self.value = old_value + end + end + end + + protected + + # @!visibility private + def default + if @default_block + self.value = @default_block.call + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb new file mode 100644 index 0000000..3c119bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb @@ -0,0 +1,43 @@ +if Concurrent.on_jruby? + require 'concurrent/utility/native_extension_loader' + + module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class JavaCountDownLatch + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds(count) + Utility::NativeInteger.ensure_positive(count) + @latch = java.util.concurrent.CountDownLatch.new(count) + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly { @latch.await } + result = true + else + Synchronization::JRuby.sleep_interruptibly do + result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + end + + # @!macro count_down_latch_method_count_down + def count_down + @latch.countDown + end + + # @!macro count_down_latch_method_count + def count + @latch.getCount + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb new file mode 100644 index 0000000..0a276ae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb @@ -0,0 +1,189 @@ +require 'fiber' +require 'concurrent/utility/engine' +require 'concurrent/constants' + +module Concurrent + # @!visibility private + # @!macro internal_implementation_note + # + # An abstract implementation of local storage, with sub-classes for + # per-thread and per-fiber locals. + # + # Each execution context (EC, thread or fiber) has a lazily initialized array + # of local variable values. Each time a new local variable is created, we + # allocate an "index" for it. + # + # For example, if the allocated index is 1, that means slot #1 in EVERY EC's + # locals array will be used for the value of that variable. + # + # The good thing about using a per-EC structure to hold values, rather than + # a global, is that no synchronization is needed when reading and writing + # those values (since the structure is only ever accessed by a single + # thread). + # + # Of course, when a local variable is GC'd, 1) we need to recover its index + # for use by other new local variables (otherwise the locals arrays could + # get bigger and bigger with time), and 2) we need to null out all the + # references held in the now-unused slots (both to avoid blocking GC of those + # objects, and also to prevent "stale" values from being passed on to a new + # local when the index is reused). + # + # Because we need to null out freed slots, we need to keep references to + # ALL the locals arrays, so we can null out the appropriate slots in all of + # them. This is why we need to use a finalizer to clean up the locals array + # when the EC goes out of scope. + class AbstractLocals + def initialize + @free = [] + @lock = Mutex.new + @all_arrays = {} + @next = 0 + end + + def synchronize + @lock.synchronize { yield } + end + + if Concurrent.on_cruby? + def weak_synchronize + yield + end + else + alias_method :weak_synchronize, :synchronize + end + + def next_index(local) + index = synchronize do + if @free.empty? + @next += 1 + else + @free.pop + end + end + + # When the local goes out of scope, we should free the associated index + # and all values stored into it. + ObjectSpace.define_finalizer(local, local_finalizer(index)) + + index + end + + def free_index(index) + weak_synchronize do + # The cost of GC'ing a TLV is linear in the number of ECs using local + # variables. But that is natural! More ECs means more storage is used + # per local variable. So naturally more CPU time is required to free + # more storage. + # + # DO NOT use each_value which might conflict with new pair assignment + # into the hash in #set method. + @all_arrays.values.each do |locals| + locals[index] = nil + end + + # free index has to be published after the arrays are cleared: + @free << index + end + end + + def fetch(index) + locals = self.locals + value = locals ? locals[index] : nil + + if nil == value + yield + elsif NULL.equal?(value) + nil + else + value + end + end + + def set(index, value) + locals = self.locals! + locals[index] = (nil == value ? NULL : value) + + value + end + + private + + # When the local goes out of scope, clean up that slot across all locals currently assigned. + def local_finalizer(index) + proc do + free_index(index) + end + end + + # When a thread/fiber goes out of scope, remove the array from @all_arrays. + def thread_fiber_finalizer(array_object_id) + proc do + weak_synchronize do + @all_arrays.delete(array_object_id) + end + end + end + + # Returns the locals for the current scope, or nil if none exist. + def locals + raise NotImplementedError + end + + # Returns the locals for the current scope, creating them if necessary. + def locals! + raise NotImplementedError + end + end + + # @!visibility private + # @!macro internal_implementation_note + # An array-backed storage of indexed variables per thread. + class ThreadLocals < AbstractLocals + def locals + Thread.current.thread_variable_get(:concurrent_thread_locals) + end + + def locals! + thread = Thread.current + locals = thread.thread_variable_get(:concurrent_thread_locals) + + unless locals + locals = thread.thread_variable_set(:concurrent_thread_locals, []) + weak_synchronize do + @all_arrays[locals.object_id] = locals + end + # When the thread goes out of scope, we should delete the associated locals: + ObjectSpace.define_finalizer(thread, thread_fiber_finalizer(locals.object_id)) + end + + locals + end + end + + # @!visibility private + # @!macro internal_implementation_note + # An array-backed storage of indexed variables per fiber. + class FiberLocals < AbstractLocals + def locals + Thread.current[:concurrent_fiber_locals] + end + + def locals! + thread = Thread.current + locals = thread[:concurrent_fiber_locals] + + unless locals + locals = thread[:concurrent_fiber_locals] = [] + weak_synchronize do + @all_arrays[locals.object_id] = locals + end + # When the fiber goes out of scope, we should delete the associated locals: + ObjectSpace.define_finalizer(Fiber.current, thread_fiber_finalizer(locals.object_id)) + end + + locals + end + end + + private_constant :AbstractLocals, :ThreadLocals, :FiberLocals +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb new file mode 100644 index 0000000..ebf23a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb @@ -0,0 +1,28 @@ +require 'concurrent/utility/engine' +require_relative 'fiber_local_var' +require_relative 'thread_local_var' + +module Concurrent + # @!visibility private + def self.mutex_owned_per_thread? + return false if Concurrent.on_jruby? || Concurrent.on_truffleruby? + + mutex = Mutex.new + # Lock the mutex: + mutex.synchronize do + # Check if the mutex is still owned in a child fiber: + Fiber.new { mutex.owned? }.resume + end + end + + if mutex_owned_per_thread? + LockLocalVar = ThreadLocalVar + else + LockLocalVar = FiberLocalVar + end + + # Either {FiberLocalVar} or {ThreadLocalVar} depending on whether Mutex (and Monitor) + # are held, respectively, per Fiber or per Thread. + class LockLocalVar + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb new file mode 100644 index 0000000..015996b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb @@ -0,0 +1,68 @@ +require 'concurrent/synchronization/safe_initialization' + +module Concurrent + + # @!macro atomic_boolean + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicBoolean + extend Concurrent::Synchronization::SafeInitialization + + # @!macro atomic_boolean_method_initialize + def initialize(initial = false) + super() + @Lock = ::Mutex.new + @value = !!initial + end + + # @!macro atomic_boolean_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_boolean_method_value_set + def value=(value) + synchronize { @value = !!value } + end + + # @!macro atomic_boolean_method_true_question + def true? + synchronize { @value } + end + + # @!macro atomic_boolean_method_false_question + def false? + synchronize { !@value } + end + + # @!macro atomic_boolean_method_make_true + def make_true + synchronize { ns_make_value(true) } + end + + # @!macro atomic_boolean_method_make_false + def make_false + synchronize { ns_make_value(false) } + end + + protected + + # @!visibility private + def synchronize + if @Lock.owned? + yield + else + @Lock.synchronize { yield } + end + end + + private + + # @!visibility private + def ns_make_value(value) + old = @value + @value = value + old != @value + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb new file mode 100644 index 0000000..0ca3955 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb @@ -0,0 +1,81 @@ +require 'concurrent/synchronization/safe_initialization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro atomic_fixnum + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicFixnum + extend Concurrent::Synchronization::SafeInitialization + + # @!macro atomic_fixnum_method_initialize + def initialize(initial = 0) + super() + @Lock = ::Mutex.new + ns_set(initial) + end + + # @!macro atomic_fixnum_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_fixnum_method_value_set + def value=(value) + synchronize { ns_set(value) } + end + + # @!macro atomic_fixnum_method_increment + def increment(delta = 1) + synchronize { ns_set(@value + delta.to_i) } + end + + alias_method :up, :increment + + # @!macro atomic_fixnum_method_decrement + def decrement(delta = 1) + synchronize { ns_set(@value - delta.to_i) } + end + + alias_method :down, :decrement + + # @!macro atomic_fixnum_method_compare_and_set + def compare_and_set(expect, update) + synchronize do + if @value == expect.to_i + @value = update.to_i + true + else + false + end + end + end + + # @!macro atomic_fixnum_method_update + def update + synchronize do + @value = yield @value + end + end + + protected + + # @!visibility private + def synchronize + if @Lock.owned? + yield + else + @Lock.synchronize { yield } + end + end + + private + + # @!visibility private + def ns_set(value) + Utility::NativeInteger.ensure_integer_and_bounds value + @value = value + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb new file mode 100644 index 0000000..29aa1ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb @@ -0,0 +1,44 @@ +require 'concurrent/synchronization/lockable_object' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class MutexCountDownLatch < Synchronization::LockableObject + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds count + Utility::NativeInteger.ensure_positive count + + super() + synchronize { ns_initialize count } + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + synchronize { ns_wait_until(timeout) { @count == 0 } } + end + + # @!macro count_down_latch_method_count_down + def count_down + synchronize do + @count -= 1 if @count > 0 + ns_broadcast if @count == 0 + end + end + + # @!macro count_down_latch_method_count + def count + synchronize { @count } + end + + protected + + def ns_initialize(count) + @count = count + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb new file mode 100644 index 0000000..4347289 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb @@ -0,0 +1,131 @@ +require 'concurrent/synchronization/lockable_object' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro semaphore + # @!visibility private + # @!macro internal_implementation_note + class MutexSemaphore < Synchronization::LockableObject + + # @!macro semaphore_method_initialize + def initialize(count) + Utility::NativeInteger.ensure_integer_and_bounds count + + super() + synchronize { ns_initialize count } + end + + # @!macro semaphore_method_acquire + def acquire(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + try_acquire_timed(permits, nil) + end + + return unless block_given? + + begin + yield + ensure + release(permits) + end + end + + # @!macro semaphore_method_available_permits + def available_permits + synchronize { @free } + end + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + def drain_permits + synchronize do + @free.tap { |_| @free = 0 } + end + end + + # @!macro semaphore_method_try_acquire + def try_acquire(permits = 1, timeout = nil) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + acquired = synchronize do + if timeout.nil? + try_acquire_now(permits) + else + try_acquire_timed(permits, timeout) + end + end + + return acquired unless block_given? + return unless acquired + + begin + yield + ensure + release(permits) + end + end + + # @!macro semaphore_method_release + def release(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + @free += permits + permits.times { ns_signal } + end + nil + end + + # Shrinks the number of available permits by the indicated reduction. + # + # @param [Fixnum] reduction Number of permits to remove. + # + # @raise [ArgumentError] if `reduction` is not an integer or is negative + # + # @raise [ArgumentError] if `@free` - `@reduction` is less than zero + # + # @return [nil] + # + # @!visibility private + def reduce_permits(reduction) + Utility::NativeInteger.ensure_integer_and_bounds reduction + Utility::NativeInteger.ensure_positive reduction + + synchronize { @free -= reduction } + nil + end + + protected + + # @!visibility private + def ns_initialize(count) + @free = count + end + + private + + # @!visibility private + def try_acquire_now(permits) + if @free >= permits + @free -= permits + true + else + false + end + end + + # @!visibility private + def try_acquire_timed(permits, timeout) + ns_wait_until(timeout) { try_acquire_now(permits) } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb new file mode 100644 index 0000000..b26bd17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb @@ -0,0 +1,255 @@ +require 'thread' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/errors' +require 'concurrent/synchronization/object' +require 'concurrent/synchronization/lock' + +module Concurrent + + # Ruby read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And if the "write" lock is taken, any readers who come along will have to wait) + # + # If readers are already active when a writer comes along, the writer will wait for + # all the readers to finish before going ahead. + # Any additional readers that come when the writer is already waiting, will also + # wait (so writers are not starved). + # + # This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @note Do **not** try to acquire the write lock while already holding a read lock + # **or** try to acquire the write lock while you already have it. + # This will lead to deadlock + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReadWriteLock < Synchronization::Object + + # @!visibility private + WAITING_WRITER = 1 << 15 + + # @!visibility private + RUNNING_WRITER = 1 << 29 + + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + safe_initialization! + + # Implementation notes: + # A goal is to make the uncontended path for both readers/writers lock-free + # Only if there is reader-writer or writer-writer contention, should locks be used + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each reader increments the counter by 1 when acquiring a read lock + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + + # Create a new `ReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadLock = Synchronization::Lock.new + @WriteLock = Synchronization::Lock.new + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock has been acquired will block until + # it is released. Will not block if other read locks have been acquired. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting when we first queue up, we need to wait + if waiting_writer?(c) + @ReadLock.wait_until { !waiting_writer? } + + # after a reader has waited once, they are allowed to "barge" ahead of waiting writers + # but if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadLock.wait_until { !running_writer? } + else + return if @Counter.compare_and_set(c, c+1) + end + end + else + break if @Counter.compare_and_set(c, c+1) + end + end + true + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + while true + c = @Counter.value + if @Counter.compare_and_set(c, c-1) + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_writer?(c) && running_readers(c) == 1 + @WriteLock.signal + end + break + end + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + if c == 0 # no readers OR writers running + # if we successfully swap the RUNNING_WRITER bit on, then we can go ahead + break if @Counter.compare_and_set(0, RUNNING_WRITER) + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here, OR another writer could increment + @WriteLock.wait_until do + # So we have to do another check inside the synchronized section + # If a writer OR reader is running, then go to sleep + c = @Counter.value + !running_writer?(c) && !running_readers?(c) + end + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # Then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + end + break + end + end + true + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + return true unless running_writer? + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadLock.broadcast + @WriteLock.signal if waiting_writers(c) > 0 + true + end + + # Queries if the write lock is held by any thread. + # + # @return [Boolean] true if the write lock is held else false` + def write_locked? + @Counter.value >= RUNNING_WRITER + end + + # Queries whether any threads are waiting to acquire the read or write lock. + # + # @return [Boolean] true if any threads are waiting for a lock else false + def has_waiters? + waiting_writer?(@Counter.value) + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) / WAITING_WRITER + end + + # @!visibility private + def waiting_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb new file mode 100644 index 0000000..6d72a3a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb @@ -0,0 +1,379 @@ +require 'thread' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/errors' +require 'concurrent/synchronization/object' +require 'concurrent/synchronization/lock' +require 'concurrent/atomic/lock_local_var' + +module Concurrent + + # Re-entrant read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And while the "write" lock is taken, no read locks can be obtained either. + # Hence, the write lock can also be called an "exclusive" lock.) + # + # If another thread has taken a read lock, any thread which wants a write lock + # will block until all the readers release their locks. However, once a thread + # starts waiting to obtain a write lock, any additional readers that come along + # will also wait (so writers are not starved). + # + # A thread can acquire both a read and write lock at the same time. A thread can + # also acquire a read lock OR a write lock more than once. Only when the read (or + # write) lock is released as many times as it was acquired, will the thread + # actually let it go, allowing other threads which might have been waiting + # to proceed. Therefore the lock can be upgraded by first acquiring + # read lock and then write lock and that the lock can be downgraded by first + # having both read and write lock a releasing just the write lock. + # + # If both read and write locks are acquired by the same thread, it is not strictly + # necessary to release them in the same order they were acquired. In other words, + # the following code is legal: + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.acquire_write_lock + # lock.acquire_read_lock + # lock.release_write_lock + # # At this point, the current thread is holding only a read lock, not a write + # # lock. So other threads can take read locks, but not a write lock. + # lock.release_read_lock + # # Now the current thread is not holding either a read or write lock, so + # # another thread could potentially acquire a write lock. + # + # This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReentrantReadWriteLock < Synchronization::Object + + # Implementation notes: + # + # A goal is to make the uncontended path for both readers/writers mutex-free + # Only if there is reader-writer or writer-writer contention, should mutexes be used + # Otherwise, a single CAS operation is all we need to acquire/release a lock + # + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each thread which has one OR MORE read locks increments the counter by 1 + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + # + # Additionally, each thread uses a thread-local variable to count how many times + # it has acquired a read lock, AND how many times it has acquired a write lock. + # It uses a similar trick; an increment of 1 means a read lock was taken, and + # an increment of (1 << 15) means a write lock was taken + # This is what makes re-entrancy possible + # + # 2 rules are followed to ensure good liveness properties: + # 1) Once a writer has queued up and is waiting for a write lock, no other thread + # can take a lock without waiting + # 2) When a write lock is released, readers are given the "first chance" to wake + # up and acquire a read lock + # Following these rules means readers and writers tend to "take turns", so neither + # can starve the other, even under heavy contention + + # @!visibility private + READER_BITS = 15 + # @!visibility private + WRITER_BITS = 14 + + # Used with @Counter: + # @!visibility private + WAITING_WRITER = 1 << READER_BITS + # @!visibility private + RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS) + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + # Used with @HeldCount: + # @!visibility private + WRITE_LOCK_HELD = 1 << READER_BITS + # @!visibility private + READ_LOCK_MASK = WRITE_LOCK_HELD - 1 + # @!visibility private + WRITE_LOCK_MASK = MAX_WRITERS + + safe_initialization! + + # Create a new `ReentrantReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadQueue = Synchronization::Lock.new # used to queue waiting readers + @WriteQueue = Synchronization::Lock.new # used to queue waiting writers + @HeldCount = LockLocalVar.new(0) # indicates # of R & W locks held by this thread + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock is held by another thread, will block + # until it is released. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + if (held = @HeldCount.value) > 0 + # If we already have a lock, there's no need to wait + if held & READ_LOCK_MASK == 0 + # But we do need to update the counter, if we were holding a write + # lock but not a read lock + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting OR running when we first queue up, we need to wait + if waiting_or_running_writer?(c) + # Before going to sleep, check again with the ReadQueue mutex held + @ReadQueue.synchronize do + @ReadQueue.ns_wait if waiting_or_running_writer? + end + # Note: the above 'synchronize' block could have used #wait_until, + # but that waits repeatedly in a loop, checking the wait condition + # each time it wakes up (to protect against spurious wakeups) + # But we are already in a loop, which is only broken when we successfully + # acquire the lock! So we don't care about spurious wakeups, and would + # rather not pay the extra overhead of using #wait_until + + # After a reader has waited once, they are allowed to "barge" ahead of waiting writers + # But if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadQueue.synchronize do + @ReadQueue.ns_wait if running_writer? + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + end + + # Try to acquire a read lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_read_lock + if (held = @HeldCount.value) > 0 + if held & READ_LOCK_MASK == 0 + # If we hold a write lock, but not a read lock... + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + false + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + held = @HeldCount.value = @HeldCount.value - 1 + rlocks_held = held & READ_LOCK_MASK + if rlocks_held == 0 + c = @Counter.update { |counter| counter - 1 } + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_or_running_writer?(c) && running_readers(c) == 0 + @WriteQueue.signal + end + elsif rlocks_held == READ_LOCK_MASK + raise IllegalOperationError, "Cannot release a read lock which is not held" + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + # if we already have a write (exclusive) lock, there's no need to wait + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + # To go ahead and take the lock without waiting, there must be no writer + # running right now, AND no writers who came before us still waiting to + # acquire the lock + # Additionally, if any read locks have been taken, we must hold all of them + if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER) + # If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead + @HeldCount.value = held + WRITE_LOCK_HELD + return true + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here + @WriteQueue.synchronize do + # So we have to do another check inside the synchronized section + # If a writer OR another reader is running, then go to sleep + c = @Counter.value + @WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held + end + # Note: if you are thinking of replacing the above 'synchronize' block + # with #wait_until, read the comment in #acquire_read_lock first! + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + if !running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + end + end + end + + # Try to acquire a write lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + @HeldCount.value = held + WRITE_LOCK_HELD + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + false + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD + wlocks_held = held & WRITE_LOCK_MASK + if wlocks_held == 0 + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadQueue.broadcast + @WriteQueue.signal if waiting_writers(c) > 0 + elsif wlocks_held == WRITE_LOCK_MASK + raise IllegalOperationError, "Cannot release a write lock which is not held" + end + true + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) >> READER_BITS + end + + # @!visibility private + def waiting_or_running_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb new file mode 100644 index 0000000..f0799f0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb @@ -0,0 +1,163 @@ +require 'concurrent/atomic/mutex_semaphore' + +module Concurrent + + ################################################################### + + # @!macro semaphore_method_initialize + # + # Create a new `Semaphore` with the initial `count`. + # + # @param [Fixnum] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer + + # @!macro semaphore_method_acquire + # + # Acquires the given number of permits from this semaphore, + # blocking until all are available. If a block is given, + # yields to it and releases the permits afterwards. + # + # @param [Fixnum] permits Number of permits to acquire + # + # @raise [ArgumentError] if `permits` is not an integer or is less than zero + # + # @return [nil, BasicObject] Without a block, `nil` is returned. If a block + # is given, its return value is returned. + + # @!macro semaphore_method_available_permits + # + # Returns the current number of permits available in this semaphore. + # + # @return [Integer] + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + + # @!macro semaphore_method_try_acquire + # + # Acquires the given number of permits from this semaphore, + # only if all are available at the time of invocation or within + # `timeout` interval. If a block is given, yields to it if the permits + # were successfully acquired, and releases them afterward, returning the + # block's return value. + # + # @param [Fixnum] permits the number of permits to acquire + # + # @param [Fixnum] timeout the number of seconds to wait for the counter + # or `nil` to return immediately + # + # @raise [ArgumentError] if `permits` is not an integer or is less than zero + # + # @return [true, false, nil, BasicObject] `false` if no permits are + # available, `true` when acquired a permit. If a block is given, the + # block's return value is returned if the permits were acquired; if not, + # `nil` is returned. + + # @!macro semaphore_method_release + # + # Releases the given number of permits, returning them to the semaphore. + # + # @param [Fixnum] permits Number of permits to return to the semaphore. + # + # @raise [ArgumentError] if `permits` is not a number or is less than zero + # + # @return [nil] + + ################################################################### + + # @!macro semaphore_public_api + # + # @!method initialize(count) + # @!macro semaphore_method_initialize + # + # @!method acquire(permits = 1) + # @!macro semaphore_method_acquire + # + # @!method available_permits + # @!macro semaphore_method_available_permits + # + # @!method drain_permits + # @!macro semaphore_method_drain_permits + # + # @!method try_acquire(permits = 1, timeout = nil) + # @!macro semaphore_method_try_acquire + # + # @!method release(permits = 1) + # @!macro semaphore_method_release + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + SemaphoreImplementation = if Concurrent.on_jruby? + require 'concurrent/utility/native_extension_loader' + JavaSemaphore + else + MutexSemaphore + end + private_constant :SemaphoreImplementation + + # @!macro semaphore + # + # A counting semaphore. Conceptually, a semaphore maintains a set of + # permits. Each {#acquire} blocks if necessary until a permit is + # available, and then takes it. Each {#release} adds a permit, potentially + # releasing a blocking acquirer. + # However, no actual permit objects are used; the Semaphore just keeps a + # count of the number available and acts accordingly. + # Alternatively, permits may be acquired within a block, and automatically + # released after the block finishes executing. + # + # @!macro semaphore_public_api + # @example + # semaphore = Concurrent::Semaphore.new(2) + # + # t1 = Thread.new do + # semaphore.acquire + # puts "Thread 1 acquired semaphore" + # end + # + # t2 = Thread.new do + # semaphore.acquire + # puts "Thread 2 acquired semaphore" + # end + # + # t3 = Thread.new do + # semaphore.acquire + # puts "Thread 3 acquired semaphore" + # end + # + # t4 = Thread.new do + # sleep(2) + # puts "Thread 4 releasing semaphore" + # semaphore.release + # end + # + # [t1, t2, t3, t4].each(&:join) + # + # # prints: + # # Thread 3 acquired semaphore + # # Thread 2 acquired semaphore + # # Thread 4 releasing semaphore + # # Thread 1 acquired semaphore + # + # @example + # semaphore = Concurrent::Semaphore.new(1) + # + # puts semaphore.available_permits + # semaphore.acquire do + # puts semaphore.available_permits + # end + # puts semaphore.available_permits + # + # # prints: + # # 1 + # # 0 + # # 1 + class Semaphore < SemaphoreImplementation + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb new file mode 100644 index 0000000..3b7e12b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb @@ -0,0 +1,111 @@ +require 'concurrent/constants' +require_relative 'locals' + +module Concurrent + + # A `ThreadLocalVar` is a variable where the value is different for each thread. + # Each variable may have a default value, but when you modify the variable only + # the current thread will ever see that change. + # + # This is similar to Ruby's built-in thread-local variables (`Thread#thread_variable_get`), + # but with these major advantages: + # * `ThreadLocalVar` has its own identity, it doesn't need a Symbol. + # * Each Ruby's built-in thread-local variable leaks some memory forever (it's a Symbol held forever on the thread), + # so it's only OK to create a small amount of them. + # `ThreadLocalVar` has no such issue and it is fine to create many of them. + # * Ruby's built-in thread-local variables leak forever the value set on each thread (unless set to nil explicitly). + # `ThreadLocalVar` automatically removes the mapping for each thread once the `ThreadLocalVar` instance is GC'd. + # + # @!macro thread_safe_variable_comparison + # + # @example + # v = ThreadLocalVar.new(14) + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # + # @example + # v = ThreadLocalVar.new(14) + # + # t1 = Thread.new do + # v.value #=> 14 + # v.value = 1 + # v.value #=> 1 + # end + # + # t2 = Thread.new do + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # end + # + # v.value #=> 14 + class ThreadLocalVar + LOCALS = ThreadLocals.new + + # Creates a thread local variable. + # + # @param [Object] default the default value when otherwise unset + # @param [Proc] default_block Optional block that gets called to obtain the + # default value for each thread + def initialize(default = nil, &default_block) + if default && block_given? + raise ArgumentError, "Cannot use both value and block as default value" + end + + if block_given? + @default_block = default_block + @default = nil + else + @default_block = nil + @default = default + end + + @index = LOCALS.next_index(self) + end + + # Returns the value in the current thread's copy of this thread-local variable. + # + # @return [Object] the current value + def value + LOCALS.fetch(@index) { default } + end + + # Sets the current thread's copy of this thread-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value + def value=(value) + LOCALS.set(@index, value) + end + + # Bind the given value to thread local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value + def bind(value) + if block_given? + old_value = self.value + self.value = value + begin + yield + ensure + self.value = old_value + end + end + end + + protected + + # @!visibility private + def default + if @default_block + self.value = @default_block.call + else + @default + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb new file mode 100644 index 0000000..5d2d7ed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb @@ -0,0 +1,37 @@ +require 'concurrent/errors' + +module Concurrent + + # Define update methods that use direct paths + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicDirectUpdate + def update + true until compare_and_set(old_value = get, new_value = yield(old_value)) + new_value + end + + def try_update + old_value = get + new_value = yield old_value + + return unless compare_and_set old_value, new_value + + new_value + end + + def try_update! + old_value = get + new_value = yield old_value + unless compare_and_set(old_value, new_value) + if $VERBOSE + raise ConcurrentUpdateError, "Update failed" + else + raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE + end + end + new_value + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb new file mode 100644 index 0000000..e5e2a63 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb @@ -0,0 +1,67 @@ +require 'concurrent/atomic_reference/atomic_direct_update' +require 'concurrent/atomic_reference/numeric_cas_wrapper' +require 'concurrent/synchronization/safe_initialization' + +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicReference + extend Concurrent::Synchronization::SafeInitialization + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + + # @!macro atomic_reference_method_initialize + def initialize(value = nil) + super() + @Lock = ::Mutex.new + @value = value + end + + # @!macro atomic_reference_method_get + def get + synchronize { @value } + end + alias_method :value, :get + + # @!macro atomic_reference_method_set + def set(new_value) + synchronize { @value = new_value } + end + alias_method :value=, :set + + # @!macro atomic_reference_method_get_and_set + def get_and_set(new_value) + synchronize do + old_value = @value + @value = new_value + old_value + end + end + alias_method :swap, :get_and_set + + # @!macro atomic_reference_method_compare_and_set + def _compare_and_set(old_value, new_value) + synchronize do + if @value.equal? old_value + @value = new_value + true + else + false + end + end + end + + protected + + # @!visibility private + def synchronize + if @Lock.owned? + yield + else + @Lock.synchronize { yield } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb new file mode 100644 index 0000000..709a382 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb @@ -0,0 +1,28 @@ +module Concurrent + + # Special "compare and set" handling of numeric values. + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicNumericCompareAndSetWrapper + + # @!macro atomic_reference_method_compare_and_set + def compare_and_set(old_value, new_value) + if old_value.kind_of? Numeric + while true + old = get + + return false unless old.kind_of? Numeric + + return false unless old == old_value + + result = _compare_and_set(old, new_value) + return result if result + end + else + _compare_and_set(old_value, new_value) + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb new file mode 100644 index 0000000..16cbe66 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb @@ -0,0 +1,10 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/atomic/cyclic_barrier' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/atomic/event' +require 'concurrent/atomic/read_write_lock' +require 'concurrent/atomic/reentrant_read_write_lock' +require 'concurrent/atomic/semaphore' +require 'concurrent/atomic/thread_local_var' diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb new file mode 100644 index 0000000..7c700bd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb @@ -0,0 +1,107 @@ +require 'concurrent/synchronization/lockable_object' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-read approach: + # observers are added and removed from a thread safe collection; every time + # a notification is required the internal data structure is copied to + # prevent concurrency issues + # + # @api private + class CopyOnNotifyObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + @observers[observer] = func + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + @observers.delete(observer) + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + synchronize do + @observers.clear + self + end + end + + # @!macro observable_count_observers + def count_observers + synchronize { @observers.count } + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + observers = duplicate_observers + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + observers = duplicate_and_clear_observers + notify_to(observers, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def duplicate_and_clear_observers + synchronize do + observers = @observers.dup + @observers.clear + observers + end + end + + def duplicate_observers + synchronize { @observers.dup } + end + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb new file mode 100644 index 0000000..bcb6750 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb @@ -0,0 +1,111 @@ +require 'concurrent/synchronization/lockable_object' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-write approach: + # every time an observer is added or removed the whole internal data structure is + # duplicated and replaced with a new one. + # + # @api private + class CopyOnWriteObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + new_observers = @observers.dup + new_observers[observer] = func + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + new_observers = @observers.dup + new_observers.delete(observer) + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + self.observers = {} + self + end + + # @!macro observable_count_observers + def count_observers + observers.count + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + old = clear_observers_and_return_old + notify_to(old, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + + def observers + synchronize { @observers } + end + + def observers=(new_set) + synchronize { @observers = new_set } + end + + def clear_observers_and_return_old + synchronize do + old_observers = @observers + @observers = {} + old_observers + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..2be9e43 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb @@ -0,0 +1,84 @@ +if Concurrent.on_jruby? + + module Concurrent + module Collection + + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class JavaNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + if [:min, :low].include?(order) + @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity + else + @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder()) + end + end + + # @!macro priority_queue_method_clear + def clear + @queue.clear + true + end + + # @!macro priority_queue_method_delete + def delete(item) + found = false + while @queue.remove(item) do + found = true + end + found + end + + # @!macro priority_queue_method_empty + def empty? + @queue.size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.contains(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @queue.size + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + @queue.peek + end + + # @!macro priority_queue_method_pop + def pop + @queue.poll + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @queue.add(item) + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb new file mode 100644 index 0000000..3704410 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb @@ -0,0 +1,160 @@ +require 'concurrent/synchronization/object' + +module Concurrent + + # @!macro warn.edge + class LockFreeStack < Synchronization::Object + + safe_initialization! + + class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? + + # @return [Node] + attr_reader :next_node + + # @return [Object] + attr_reader :value + + # @!visibility private + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value + + def initialize(value, next_node) + @value = value + @next_node = next_node + end + + singleton_class.send :alias_method, :[], :new + end + + # The singleton for empty node + EMPTY = Node[nil, nil] + def EMPTY.next_node + self + end + + attr_atomic(:head) + private :head, :head=, :swap_head, :compare_and_set_head, :update_head + + # @!visibility private + def self.of1(value) + new Node[value, EMPTY] + end + + # @!visibility private + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end + + # @param [Node] head + def initialize(head = EMPTY) + super() + self.head = head + end + + # @param [Node] head + # @return [true, false] + def empty?(head = head()) + head.equal? EMPTY + end + + # @param [Node] head + # @param [Object] value + # @return [true, false] + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end + + # @param [Object] value + # @return [self] + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] + end + end + + # @return [Node] + def peek + head + end + + # @param [Node] head + # @return [true, false] + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end + + # @return [Object] + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node + end + end + + # @param [Node] head + # @return [true, false] + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end + + include Enumerable + + # @param [Node] head + # @return [self] + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node + end + self + end + + # @return [true, false] + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY + end + end + + # @param [Node] head + # @return [true, false] + def clear_if(head) + compare_and_set_head head, EMPTY + end + + # @param [Node] head + # @param [Node] new_head + # @return [true, false] + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + + # @return [self] + # @yield over the cleared stack + # @yieldparam [Object] value + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self + end + end + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], to_a.to_s + end + + alias_method :inspect, :to_s + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb new file mode 100644 index 0000000..dc51893 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb @@ -0,0 +1,927 @@ +require 'concurrent/constants' +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/adder' +require 'concurrent/thread_safe/util/cheap_lockable' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module Collection + + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + # + # @!visibility private + class AtomicReferenceMapBackend + + # @!visibility private + class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + # + # @!visibility private + class Node + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :hash, :value, :next + + include Concurrent::ThreadSafe::Util::CheapLockable + + bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_acquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_acquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Concurrent::ThreadSafe::Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Concurrent::ThreadSafe::Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= ::Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb new file mode 100644 index 0000000..e0cf999 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb @@ -0,0 +1,66 @@ +require 'thread' +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class MriMapBackend < NonConcurrentMapBackend + + def initialize(options = nil, &default_proc) + super(options, &default_proc) + @write_lock = Mutex.new + end + + def []=(key, value) + @write_lock.synchronize { super } + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) # fast non-blocking path for the most likely case + stored_value + else + @write_lock.synchronize { super } + end + end + + def compute_if_present(key) + @write_lock.synchronize { super } + end + + def compute(key) + @write_lock.synchronize { super } + end + + def merge_pair(key, value) + @write_lock.synchronize { super } + end + + def replace_pair(key, old_value, new_value) + @write_lock.synchronize { super } + end + + def replace_if_exists(key, new_value) + @write_lock.synchronize { super } + end + + def get_and_set(key, value) + @write_lock.synchronize { super } + end + + def delete(key) + @write_lock.synchronize { super } + end + + def delete_pair(key, value) + @write_lock.synchronize { super } + end + + def clear + @write_lock.synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb new file mode 100644 index 0000000..ca5fd9b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb @@ -0,0 +1,148 @@ +require 'concurrent/constants' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class NonConcurrentMapBackend + + # WARNING: all public methods of the class must operate on the @backend + # directly without calling each other. This is important because of the + # SynchronizedMapBackend which uses a non-reentrant mutex for performance + # reasons. + def initialize(options = nil, &default_proc) + validate_options_hash!(options) if options.kind_of?(::Hash) + set_backend(default_proc) + @default_proc = default_proc + end + + def [](key) + @backend[key] + end + + def []=(key, value) + @backend[key] = value + end + + def compute_if_absent(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + stored_value + else + @backend[key] = yield + end + end + + def replace_pair(key, old_value, new_value) + if pair?(key, old_value) + @backend[key] = new_value + true + else + false + end + end + + def replace_if_exists(key, new_value) + if NULL != (stored_value = @backend.fetch(key, NULL)) + @backend[key] = new_value + stored_value + end + end + + def compute_if_present(key) + if NULL != (stored_value = @backend.fetch(key, NULL)) + store_computed_value(key, yield(stored_value)) + end + end + + def compute(key) + store_computed_value(key, yield(get_or_default(key, nil))) + end + + def merge_pair(key, value) + if NULL == (stored_value = @backend.fetch(key, NULL)) + @backend[key] = value + else + store_computed_value(key, yield(stored_value)) + end + end + + def get_and_set(key, value) + stored_value = get_or_default(key, nil) + @backend[key] = value + stored_value + end + + def key?(key) + @backend.key?(key) + end + + def delete(key) + @backend.delete(key) + end + + def delete_pair(key, value) + if pair?(key, value) + @backend.delete(key) + true + else + false + end + end + + def clear + @backend.clear + self + end + + def each_pair + dupped_backend.each_pair do |k, v| + yield k, v + end + self + end + + def size + @backend.size + end + + def get_or_default(key, default_value) + @backend.fetch(key, default_value) + end + + private + + def set_backend(default_proc) + if default_proc + @backend = ::Hash.new { |_h, key| default_proc.call(self, key) } + else + @backend = {} + end + end + + def initialize_copy(other) + super + set_backend(@default_proc) + self + end + + def dupped_backend + @backend.dup + end + + def pair?(key, expected_value) + NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) + end + + def store_computed_value(key, new_value) + if new_value.nil? + @backend.delete(key) + nil + else + @backend[key] = new_value + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb new file mode 100644 index 0000000..190c8d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb @@ -0,0 +1,82 @@ +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class SynchronizedMapBackend < NonConcurrentMapBackend + + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb new file mode 100644 index 0000000..68a1b38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb @@ -0,0 +1,14 @@ +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class TruffleRubyMapBackend < TruffleRuby::ConcurrentMap + def initialize(options = nil) + options ||= {} + super(initial_capacity: options[:initial_capacity], load_factor: options[:load_factor]) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb new file mode 100644 index 0000000..694cd7a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb @@ -0,0 +1,143 @@ +require 'concurrent/utility/engine' +require 'concurrent/collection/java_non_concurrent_priority_queue' +require 'concurrent/collection/ruby_non_concurrent_priority_queue' + +module Concurrent + module Collection + + # @!visibility private + # @!macro internal_implementation_note + NonConcurrentPriorityQueueImplementation = case + when Concurrent.on_jruby? + JavaNonConcurrentPriorityQueue + else + RubyNonConcurrentPriorityQueue + end + private_constant :NonConcurrentPriorityQueueImplementation + + # @!macro priority_queue + # + # A queue collection in which the elements are sorted based on their + # comparison (spaceship) operator `<=>`. Items are added to the queue + # at a position relative to their priority. On removal the element + # with the "highest" priority is removed. By default the sort order is + # from highest to lowest, but a lowest-to-highest sort order can be + # set on construction. + # + # The API is based on the `Queue` class from the Ruby standard library. + # + # The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm + # stored in an array. The algorithm is based on the work of Robert Sedgewick + # and Kevin Wayne. + # + # The JRuby native implementation is a thin wrapper around the standard + # library `java.util.NonConcurrentPriorityQueue`. + # + # When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`. + # When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`. + # + # @note This implementation is *not* thread safe. + # + # @see http://en.wikipedia.org/wiki/Priority_queue + # @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html + # + # @see http://algs4.cs.princeton.edu/24pq/index.php#2.6 + # @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html + # + # @!visibility private + class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation + + alias_method :has_priority?, :include? + + alias_method :size, :length + + alias_method :deq, :pop + alias_method :shift, :pop + + alias_method :<<, :push + alias_method :enq, :push + + # @!method initialize(opts = {}) + # @!macro priority_queue_method_initialize + # + # Create a new priority queue with no items. + # + # @param [Hash] opts the options for creating the queue + # @option opts [Symbol] :order (:max) dictates the order in which items are + # stored: from highest to lowest when `:max` or `:high`; from lowest to + # highest when `:min` or `:low` + + # @!method clear + # @!macro priority_queue_method_clear + # + # Removes all of the elements from this priority queue. + + # @!method delete(item) + # @!macro priority_queue_method_delete + # + # Deletes all items from `self` that are equal to `item`. + # + # @param [Object] item the item to be removed from the queue + # @return [Object] true if the item is found else false + + # @!method empty? + # @!macro priority_queue_method_empty + # + # Returns `true` if `self` contains no elements. + # + # @return [Boolean] true if there are no items in the queue else false + + # @!method include?(item) + # @!macro priority_queue_method_include + # + # Returns `true` if the given item is present in `self` (that is, if any + # element == `item`), otherwise returns false. + # + # @param [Object] item the item to search for + # + # @return [Boolean] true if the item is found else false + + # @!method length + # @!macro priority_queue_method_length + # + # The current length of the queue. + # + # @return [Fixnum] the number of items in the queue + + # @!method peek + # @!macro priority_queue_method_peek + # + # Retrieves, but does not remove, the head of this queue, or returns `nil` + # if this queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method pop + # @!macro priority_queue_method_pop + # + # Retrieves and removes the head of this queue, or returns `nil` if this + # queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method push(item) + # @!macro priority_queue_method_push + # + # Inserts the specified element into this priority queue. + # + # @param [Object] item the item to insert onto the queue + + # @!method self.from_list(list, opts = {}) + # @!macro priority_queue_method_from_list + # + # Create a new priority queue from the given list. + # + # @param [Enumerable] list the list to build the queue from + # @param [Hash] opts the options for creating the queue + # + # @return [NonConcurrentPriorityQueue] the newly created and populated queue + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb new file mode 100644 index 0000000..322b4ac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb @@ -0,0 +1,160 @@ +module Concurrent + module Collection + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class RubyNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + @comparator = [:min, :low].include?(order) ? -1 : 1 + clear + end + + # @!macro priority_queue_method_clear + def clear + @queue = [nil] + @length = 0 + true + end + + # @!macro priority_queue_method_delete + def delete(item) + return false if empty? + original_length = @length + k = 1 + while k <= @length + if @queue[k] == item + swap(k, @length) + @length -= 1 + sink(k) || swim(k) + @queue.pop + else + k += 1 + end + end + @length != original_length + end + + # @!macro priority_queue_method_empty + def empty? + size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.include?(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @length + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + empty? ? nil : @queue[1] + end + + # @!macro priority_queue_method_pop + def pop + return nil if empty? + max = @queue[1] + swap(1, @length) + @length -= 1 + sink(1) + @queue.pop + max + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @length += 1 + @queue << item + swim(@length) + true + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + + private + + # Exchange the values at the given indexes within the internal array. + # + # @param [Integer] x the first index to swap + # @param [Integer] y the second index to swap + # + # @!visibility private + def swap(x, y) + temp = @queue[x] + @queue[x] = @queue[y] + @queue[y] = temp + end + + # Are the items at the given indexes ordered based on the priority + # order specified at construction? + # + # @param [Integer] x the first index from which to retrieve a comparable value + # @param [Integer] y the second index from which to retrieve a comparable value + # + # @return [Boolean] true if the two elements are in the correct priority order + # else false + # + # @!visibility private + def ordered?(x, y) + (@queue[x] <=> @queue[y]) == @comparator + end + + # Percolate down to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def sink(k) + success = false + + while (j = (2 * k)) <= @length do + j += 1 if j < @length && ! ordered?(j, j+1) + break if ordered?(k, j) + swap(k, j) + success = true + k = j + end + + success + end + + # Percolate up to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def swim(k) + success = false + + while k > 1 && ! ordered?(k/2, k) do + swap(k, k/2) + k = k/2 + success = true + end + + success + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb new file mode 100644 index 0000000..35ae4b2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb @@ -0,0 +1,34 @@ +require 'concurrent/concern/logging' + +module Concurrent + module Concern + + # @!visibility private + # @!macro internal_implementation_note + module Deprecation + # TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed. + include Concern::Logging + + def deprecated(message, strip = 2) + caller_line = caller(strip).first if strip > 0 + klass = if Module === self + self + else + self.class + end + message = if strip > 0 + format("[DEPRECATED] %s\ncalled on: %s", message, caller_line) + else + format('[DEPRECATED] %s', message) + end + log WARN, klass.to_s, message + end + + def deprecated_method(old_name, new_name) + deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3 + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb new file mode 100644 index 0000000..dc172ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb @@ -0,0 +1,73 @@ +module Concurrent + module Concern + + # Object references in Ruby are mutable. This can lead to serious problems when + # the `#value` of a concurrent object is a mutable reference. Which is always the + # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type. + # Most classes in this library that expose a `#value` getter method do so using the + # `Dereferenceable` mixin module. + # + # @!macro copy_options + module Dereferenceable + # NOTE: This module is going away in 2.0. In the mean time we need it to + # play nicely with the synchronization layer. This means that the + # including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Return the value this object represents after applying the options specified + # by the `#set_deref_options` method. + # + # @return [Object] the current value of the object + def value + synchronize { apply_deref_options(@value) } + end + alias_method :deref, :value + + protected + + # Set the internal value of this object + # + # @param [Object] value the new value + def value=(value) + synchronize{ @value = value } + end + + # @!macro dereferenceable_set_deref_options + # Set the options which define the operations #value performs before + # returning data to the caller (dereferencing). + # + # @note Most classes that include this module will call `#set_deref_options` + # from within the constructor, thus allowing these options to be set at + # object creation. + # + # @param [Hash] opts the options defining dereference behavior. + # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def set_deref_options(opts = {}) + synchronize{ ns_set_deref_options(opts) } + end + + # @!macro dereferenceable_set_deref_options + # @!visibility private + def ns_set_deref_options(opts) + @dup_on_deref = opts[:dup_on_deref] || opts[:dup] + @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze] + @copy_on_deref = opts[:copy_on_deref] || opts[:copy] + @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref) + nil + end + + # @!visibility private + def apply_deref_options(value) + return nil if value.nil? + return value if @do_nothing_on_deref + value = @copy_on_deref.call(value) if @copy_on_deref + value = value.dup if @dup_on_deref + value = value.freeze if @freeze_on_deref + value + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb new file mode 100644 index 0000000..568a539 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb @@ -0,0 +1,116 @@ +require 'logger' +require 'concurrent/atomic/atomic_reference' + +module Concurrent + module Concern + + # Include where logging is needed + # + # @!visibility private + module Logging + include Logger::Severity + + # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger + # @param [Integer] level one of Logger::Severity constants + # @param [String] progname e.g. a path of an Actor + # @param [String, nil] message when nil block is used to generate the message + # @yieldreturn [String] a message + def log(level, progname, message = nil, &block) + logger = if defined?(@logger) && @logger + @logger + else + Concurrent.global_logger + end + logger.call level, progname, message, &block + rescue => error + $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" + + "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}" + end + end + end +end + +module Concurrent + extend Concern::Logging + + # @return [Logger] Logger with provided level and output. + def self.create_simple_logger(level = Logger::FATAL, output = $stderr) + # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking + lambda do |severity, progname, message = nil, &block| + return false if severity < level + + message = block ? block.call : message + formatted_message = case message + when String + message + when Exception + format "%s (%s)\n%s", + message.message, message.class, (message.backtrace || []).join("\n") + else + message.inspect + end + + output.print format "[%s] %5s -- %s: %s\n", + Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), + Logger::SEV_LABEL[severity], + progname, + formatted_message + true + end + end + + # Use logger created by #create_simple_logger to log concurrent-ruby messages. + def self.use_simple_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_simple_logger level, output + end + + # @return [Logger] Logger with provided level and output. + # @deprecated + def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) + logger = Logger.new(output) + logger.level = level + logger.formatter = lambda do |severity, datetime, progname, msg| + formatted_message = case msg + when String + msg + when Exception + format "%s (%s)\n%s", + msg.message, msg.class, (msg.backtrace || []).join("\n") + else + msg.inspect + end + format "[%s] %5s -- %s: %s\n", + datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), + severity, + progname, + formatted_message + end + + lambda do |loglevel, progname, message = nil, &block| + logger.add loglevel, message, progname, &block + end + end + + # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. + # @deprecated + def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) + Concurrent.global_logger = create_stdlib_logger level, output + end + + # TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods + + # Suppresses all output when used for logging. + NULL_LOGGER = lambda { |level, progname, message = nil, &block| } + + # @!visibility private + GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) + private_constant :GLOBAL_LOGGER + + def self.global_logger + GLOBAL_LOGGER.value + end + + def self.global_logger=(value) + GLOBAL_LOGGER.value = value + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb new file mode 100644 index 0000000..2c9ac12 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb @@ -0,0 +1,220 @@ +require 'thread' +require 'timeout' + +require 'concurrent/atomic/event' +require 'concurrent/concern/dereferenceable' + +module Concurrent + module Concern + + module Obligation + include Concern::Dereferenceable + # NOTE: The Dereferenceable module is going away in 2.0. In the mean time + # we need it to place nicely with the synchronization layer. This means + # that the including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Has the obligation been fulfilled? + # + # @return [Boolean] + def fulfilled? + state == :fulfilled + end + alias_method :realized?, :fulfilled? + + # Has the obligation been rejected? + # + # @return [Boolean] + def rejected? + state == :rejected + end + + # Is obligation completion still pending? + # + # @return [Boolean] + def pending? + state == :pending + end + + # Is the obligation still unscheduled? + # + # @return [Boolean] + def unscheduled? + state == :unscheduled + end + + # Has the obligation completed processing? + # + # @return [Boolean] + def complete? + [:fulfilled, :rejected].include? state + end + + # Is the obligation still awaiting completion of processing? + # + # @return [Boolean] + def incomplete? + ! complete? + end + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + def value(timeout = nil) + wait timeout + deref + end + + # Wait until obligation is complete or the timeout has been reached. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + def wait(timeout = nil) + event.wait(timeout) if timeout != 0 && incomplete? + self + end + + # Wait until obligation is complete or the timeout is reached. Will re-raise + # any exceptions raised during processing (but will not raise an exception + # on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + # @raise [Exception] raises the reason when rejected + def wait!(timeout = nil) + wait(timeout).tap { raise self if rejected? } + end + alias_method :no_error!, :wait! + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. Will re-raise any exceptions + # raised during processing (but will not raise an exception on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + # @raise [Exception] raises the reason when rejected + def value!(timeout = nil) + wait(timeout) + if rejected? + raise self + else + deref + end + end + + # The current state of the obligation. + # + # @return [Symbol] the current state + def state + synchronize { @state } + end + + # If an exception was raised during processing this will return the + # exception object. Will return `nil` when the state is pending or if + # the obligation has been successfully fulfilled. + # + # @return [Exception] the exception raised during processing or `nil` + def reason + synchronize { @reason } + end + + # @example allows Obligation to be risen + # rejected_ivar = Ivar.new.fail + # raise rejected_ivar + def exception(*args) + raise 'obligation is not rejected' unless rejected? + reason.exception(*args) + end + + protected + + # @!visibility private + def get_arguments_from(opts = {}) + [*opts.fetch(:args, [])] + end + + # @!visibility private + def init_obligation + @event = Event.new + @value = @reason = nil + end + + # @!visibility private + def event + @event + end + + # @!visibility private + def set_state(success, value, reason) + if success + @value = value + @state = :fulfilled + else + @reason = reason + @state = :rejected + end + end + + # @!visibility private + def state=(value) + synchronize { ns_set_state(value) } + end + + # Atomic compare and set operation + # State is set to `next_state` only if `current state == expected_current`. + # + # @param [Symbol] next_state + # @param [Symbol] expected_current + # + # @return [Boolean] true is state is changed, false otherwise + # + # @!visibility private + def compare_and_set_state(next_state, *expected_current) + synchronize do + if expected_current.include? @state + @state = next_state + true + else + false + end + end + end + + # Executes the block within mutex if current state is included in expected_states + # + # @return block value if executed, false otherwise + # + # @!visibility private + def if_state(*expected_states) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + + if expected_states.include? @state + yield + else + false + end + end + end + + protected + + # Am I in the current state? + # + # @param [Symbol] expected The state to check against + # @return [Boolean] true if in the expected state else false + # + # @!visibility private + def ns_check_state?(expected) + @state == expected + end + + # @!visibility private + def ns_set_state(value) + @state = value + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb new file mode 100644 index 0000000..b513271 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb @@ -0,0 +1,110 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/collection/copy_on_write_observer_set' + +module Concurrent + module Concern + + # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one + # of the most useful design patterns. + # + # The workflow is very simple: + # - an `observer` can register itself to a `subject` via a callback + # - many `observers` can be registered to the same `subject` + # - the `subject` notifies all registered observers when its status changes + # - an `observer` can deregister itself when is no more interested to receive + # event notifications + # + # In a single threaded environment the whole pattern is very easy: the + # `subject` can use a simple data structure to manage all its subscribed + # `observer`s and every `observer` can react directly to every event without + # caring about synchronization. + # + # In a multi threaded environment things are more complex. The `subject` must + # synchronize the access to its data structure and to do so currently we're + # using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet} + # and {Concurrent::Concern::CopyOnNotifyObserverSet}. + # + # When implementing and `observer` there's a very important rule to remember: + # **there are no guarantees about the thread that will execute the callback** + # + # Let's take this example + # ``` + # class Observer + # def initialize + # @count = 0 + # end + # + # def update + # @count += 1 + # end + # end + # + # obs = Observer.new + # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) } + # # execute [obj1, obj2, obj3, obj4] + # ``` + # + # `obs` is wrong because the variable `@count` can be accessed by different + # threads at the same time, so it should be synchronized (using either a Mutex + # or an AtomicFixum) + module Observable + + # @!macro observable_add_observer + # + # Adds an observer to this set. If a block is passed, the observer will be + # created by this method and no other params should be passed. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # Default is :update + # @return [Object] the added observer + def add_observer(observer = nil, func = :update, &block) + observers.add_observer(observer, func, &block) + end + + # As `#add_observer` but can be used for chaining. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # @return [Observable] self + def with_observer(observer = nil, func = :update, &block) + add_observer(observer, func, &block) + self + end + + # @!macro observable_delete_observer + # + # Remove `observer` as an observer on this object so that it will no + # longer receive notifications. + # + # @param [Object] observer the observer to remove + # @return [Object] the deleted observer + def delete_observer(observer) + observers.delete_observer(observer) + end + + # @!macro observable_delete_observers + # + # Remove all observers associated with this object. + # + # @return [Observable] self + def delete_observers + observers.delete_observers + self + end + + # @!macro observable_count_observers + # + # Return the number of observers associated with this object. + # + # @return [Integer] the observers count + def count_observers + observers.count_observers + end + + protected + + attr_accessor :observers + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concurrent_ruby.jar b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concurrent_ruby.jar new file mode 100644 index 0000000..a4bda41 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concurrent_ruby.jar differ diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb new file mode 100644 index 0000000..5571d39 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb @@ -0,0 +1,105 @@ +require 'thread' +require 'concurrent/delay' +require 'concurrent/errors' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/fixed_thread_pool' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/utility/processor_counter' + +module Concurrent + extend Concern::Deprecation + + autoload :Options, 'concurrent/options' + autoload :TimerSet, 'concurrent/executor/timer_set' + autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' + + # @!visibility private + GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor } + private_constant :GLOBAL_FAST_EXECUTOR + + # @!visibility private + GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor } + private_constant :GLOBAL_IO_EXECUTOR + + # @!visibility private + GLOBAL_TIMER_SET = Delay.new { TimerSet.new } + private_constant :GLOBAL_TIMER_SET + + # @!visibility private + GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new + private_constant :GLOBAL_IMMEDIATE_EXECUTOR + + # Disables AtExit handlers including pool auto-termination handlers. + # When disabled it will be the application programmer's responsibility + # to ensure that the handlers are shutdown properly prior to application + # exit by calling `AtExit.run` method. + # + # @note this option should be needed only because of `at_exit` ordering + # issues which may arise when running some of the testing frameworks. + # E.g. Minitest's test-suite runs itself in `at_exit` callback which + # executes after the pools are already terminated. Then auto termination + # needs to be disabled and called manually after test-suite ends. + # @note This method should *never* be called + # from within a gem. It should *only* be used from within the main + # application and even then it should be used only when necessary. + # @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841. + # + def self.disable_at_exit_handlers! + deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841." + end + + # Global thread pool optimized for short, fast *operations*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_fast_executor + GLOBAL_FAST_EXECUTOR.value! + end + + # Global thread pool optimized for long, blocking (IO) *tasks*. + # + # @return [ThreadPoolExecutor] the thread pool + def self.global_io_executor + GLOBAL_IO_EXECUTOR.value! + end + + def self.global_immediate_executor + GLOBAL_IMMEDIATE_EXECUTOR + end + + # Global thread pool user for global *timers*. + # + # @return [Concurrent::TimerSet] the thread pool + def self.global_timer_set + GLOBAL_TIMER_SET.value! + end + + # General access point to global executors. + # @param [Symbol, Executor] executor_identifier symbols: + # - :fast - {Concurrent.global_fast_executor} + # - :io - {Concurrent.global_io_executor} + # - :immediate - {Concurrent.global_immediate_executor} + # @return [Executor] + def self.executor(executor_identifier) + Options.executor(executor_identifier) + end + + def self.new_fast_executor(opts = {}) + FixedThreadPool.new( + [2, Concurrent.processor_count].max, + auto_terminate: opts.fetch(:auto_terminate, true), + idletime: 60, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "fast" + ) + end + + def self.new_io_executor(opts = {}) + CachedThreadPool.new( + auto_terminate: opts.fetch(:auto_terminate, true), + fallback_policy: :abort, # shouldn't matter -- 0 max queue + name: "io" + ) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb new file mode 100644 index 0000000..676c2af --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb @@ -0,0 +1,8 @@ +module Concurrent + + # Various classes within allows for +nil+ values to be stored, + # so a special +NULL+ token is required to indicate the "nil-ness". + # @!visibility private + NULL = ::Object.new + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb new file mode 100644 index 0000000..d55f19d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb @@ -0,0 +1,81 @@ +require 'concurrent/future' +require 'concurrent/atomic/atomic_fixnum' + +module Concurrent + + # @!visibility private + class DependencyCounter # :nodoc: + + def initialize(count, &block) + @counter = AtomicFixnum.new(count) + @block = block + end + + def update(time, value, reason) + if @counter.decrement == 0 + @block.call + end + end + end + + # Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. + # {include:file:docs-source/dataflow.md} + # + # @param [Future] inputs zero or more `Future` operations that this dataflow depends upon + # + # @yield The operation to perform once all the dependencies are met + # @yieldparam [Future] inputs each of the `Future` inputs to the dataflow + # @yieldreturn [Object] the result of the block operation + # + # @return [Object] the result of all the operations + # + # @raise [ArgumentError] if no block is given + # @raise [ArgumentError] if any of the inputs are not `IVar`s + def dataflow(*inputs, &block) + dataflow_with(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow + + def dataflow_with(executor, *inputs, &block) + call_dataflow(:value, executor, *inputs, &block) + end + module_function :dataflow_with + + def dataflow!(*inputs, &block) + dataflow_with!(Concurrent.global_io_executor, *inputs, &block) + end + module_function :dataflow! + + def dataflow_with!(executor, *inputs, &block) + call_dataflow(:value!, executor, *inputs, &block) + end + module_function :dataflow_with! + + private + + def call_dataflow(method, executor, *inputs, &block) + raise ArgumentError.new('an executor must be provided') if executor.nil? + raise ArgumentError.new('no block given') unless block_given? + unless inputs.all? { |input| input.is_a? IVar } + raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }") + end + + result = Future.new(executor: executor) do + values = inputs.map { |input| input.send(method) } + block.call(*values) + end + + if inputs.empty? + result.execute + else + counter = DependencyCounter.new(inputs.size) { result.execute } + + inputs.each do |input| + input.add_observer counter + end + end + + result + end + module_function :call_dataflow +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb new file mode 100644 index 0000000..923773c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb @@ -0,0 +1,199 @@ +require 'thread' +require 'concurrent/concern/obligation' +require 'concurrent/executor/immediate_executor' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # This file has circular require issues. It must be autoloaded here. + autoload :Options, 'concurrent/options' + + # Lazy evaluation of a block yielding an immutable result. Useful for + # expensive operations that may never be needed. It may be non-blocking, + # supports the `Concern::Obligation` interface, and accepts the injection of + # custom executor upon which to execute the block. Processing of + # block will be deferred until the first time `#value` is called. + # At that time the caller can choose to return immediately and let + # the block execute asynchronously, block indefinitely, or block + # with a timeout. + # + # When a `Delay` is created its state is set to `pending`. The value and + # reason are both `nil`. The first time the `#value` method is called the + # enclosed opration will be run and the calling thread will block. Other + # threads attempting to call `#value` will block as well. Once the operation + # is complete the *value* will be set to the result of the operation or the + # *reason* will be set to the raised exception, as appropriate. All threads + # blocked on `#value` will return. Subsequent calls to `#value` will immediately + # return the cached value. The operation will only be run once. This means that + # any side effects created by the operation will only happen once as well. + # + # `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread + # safety of the reference returned by `#value`. + # + # @!macro copy_options + # + # @!macro delay_note_regarding_blocking + # @note The default behavior of `Delay` is to block indefinitely when + # calling either `value` or `wait`, executing the delayed operation on + # the current thread. This makes the `timeout` value completely + # irrelevant. To enable non-blocking behavior, use the `executor` + # constructor option. This will cause the delayed operation to be + # execute on the given executor, allowing the call to timeout. + # + # @see Concurrent::Concern::Dereferenceable + class Delay < Synchronization::LockableObject + include Concern::Obligation + + # NOTE: Because the global thread pools are lazy-loaded with these objects + # there is a performance hit every time we post a new task to one of these + # thread pools. Subsequently it is critical that `Delay` perform as fast + # as possible post-completion. This class has been highly optimized using + # the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to + # DRY-up this class or perform other refactoring with running the + # benchmarks and ensuring that performance is not negatively impacted. + + # Create a new `Delay` in the `:pending` state. + # + # @!macro executor_and_deref_options + # + # @yield the delayed operation to perform + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(&nil) + synchronize { ns_initialize(opts, &block) } + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception this method will return nil. The exception object + # can be accessed via the `#reason` method. + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # + # @!macro delay_note_regarding_blocking + def value(timeout = nil) + if @executor # TODO (pitr 12-Sep-2015): broken unsafe read? + super + else + # this function has been optimized for performance and + # should not be modified without running new benchmarks + synchronize do + execute = @evaluation_started = true unless @evaluation_started + if execute + begin + set_state(true, @task.call, nil) + rescue => ex + set_state(false, nil, ex) + end + elsif incomplete? + raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay' + end + end + if @do_nothing_on_deref + @value + else + apply_deref_options(@value) + end + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception, this method will raise that exception (even when) + # the operation has already been executed). + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # @raise [Exception] when `#rejected?` raises `#reason` + # + # @!macro delay_note_regarding_blocking + def value!(timeout = nil) + if @executor + super + else + result = value + raise @reason if @reason + result + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. + # + # @param [Integer] timeout (nil) the maximum number of seconds to wait for + # the value to be computed. When `nil` the caller will block indefinitely. + # + # @return [Object] self + # + # @!macro delay_note_regarding_blocking + def wait(timeout = nil) + if @executor + execute_task_once + super(timeout) + else + value + end + self + end + + # Reconfigures the block returning the value if still `#incomplete?` + # + # @yield the delayed operation to perform + # @return [true, false] if success + def reconfigure(&block) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + unless @evaluation_started + @task = block + true + else + false + end + end + end + + protected + + def ns_initialize(opts, &block) + init_obligation + set_deref_options(opts) + @executor = opts[:executor] + + @task = block + @state = :pending + @evaluation_started = false + end + + private + + # @!visibility private + def execute_task_once # :nodoc: + # this function has been optimized for performance and + # should not be modified without running new benchmarks + execute = task = nil + synchronize do + execute = @evaluation_started = true unless @evaluation_started + task = @task + end + + if execute + executor = Options.executor_from_options(executor: @executor) + executor.post do + begin + result = task.call + success = true + rescue => ex + reason = ex + end + synchronize do + set_state(success, result, reason) + event.set + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb new file mode 100644 index 0000000..74f1fc3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb @@ -0,0 +1,74 @@ +module Concurrent + + Error = Class.new(StandardError) + + # Raised when errors occur during configuration. + ConfigurationError = Class.new(Error) + + # Raised when an asynchronous operation is cancelled before execution. + CancelledOperationError = Class.new(Error) + + # Raised when a lifecycle method (such as `stop`) is called in an improper + # sequence or when the object is in an inappropriate state. + LifecycleError = Class.new(Error) + + # Raised when an attempt is made to violate an immutability guarantee. + ImmutabilityError = Class.new(Error) + + # Raised when an operation is attempted which is not legal given the + # receiver's current state + IllegalOperationError = Class.new(Error) + + # Raised when an object's methods are called when it has not been + # properly initialized. + InitializationError = Class.new(Error) + + # Raised when an object with a start/stop lifecycle has been started an + # excessive number of times. Often used in conjunction with a restart + # policy or strategy. + MaxRestartFrequencyError = Class.new(Error) + + # Raised when an attempt is made to modify an immutable object + # (such as an `IVar`) after its final state has been set. + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end + + # Raised by an `Executor` when it is unable to process a given task, + # possibly because of a reject policy or other internal error. + RejectedExecutionError = Class.new(Error) + + # Raised when any finite resource, such as a lock counter, exceeds its + # maximum limit/threshold. + ResourceLimitError = Class.new(Error) + + # Raised when an operation times out. + TimeoutError = Class.new(Error) + + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + + # @!macro internal_implementation_note + class ConcurrentUpdateError < ThreadError + # frozen pre-allocated backtrace to speed ConcurrentUpdateError + CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb new file mode 100644 index 0000000..a5405d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb @@ -0,0 +1,353 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/maybe' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/utility/engine' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro exchanger + # + # A synchronization point at which threads can pair and swap elements within + # pairs. Each thread presents some object on entry to the exchange method, + # matches with a partner thread, and receives its partner's object on return. + # + # @!macro thread_safe_variable_comparison + # + # This implementation is very simple, using only a single slot for each + # exchanger (unlike more advanced implementations which use an "arena"). + # This approach will work perfectly fine when there are only a few threads + # accessing a single `Exchanger`. Beyond a handful of threads the performance + # will degrade rapidly due to contention on the single slot, but the algorithm + # will remain correct. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # threads = [ + # Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar" + # Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo" + # ] + # threads.each {|t| t.join(2) } + + # @!visibility private + class AbstractExchanger < Synchronization::Object + + # @!visibility private + CANCEL = ::Object.new + private_constant :CANCEL + + def initialize + super + end + + # @!macro exchanger_method_do_exchange + # + # Waits for another thread to arrive at this exchange point (unless the + # current thread is interrupted), and then transfers the given object to + # it, receiving its object in return. The timeout value indicates the + # approximate number of seconds the method should block while waiting + # for the exchange. When the timeout value is `nil` the method will + # block indefinitely. + # + # @param [Object] value the value to exchange with another thread + # @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely + # + # @!macro exchanger_method_exchange + # + # In some edge cases when a `timeout` is given a return value of `nil` may be + # ambiguous. Specifically, if `nil` is a valid value in the exchange it will + # be impossible to tell whether `nil` is the actual return value or if it + # signifies timeout. When `nil` is a valid value in the exchange consider + # using {#exchange!} or {#try_exchange} instead. + # + # @return [Object] the value exchanged by the other thread or `nil` on timeout + def exchange(value, timeout = nil) + (value = do_exchange(value, timeout)) == CANCEL ? nil : value + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + # + # On timeout a {Concurrent::TimeoutError} exception will be raised. + # + # @return [Object] the value exchanged by the other thread + # @raise [Concurrent::TimeoutError] on timeout + def exchange!(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + raise Concurrent::TimeoutError + else + value + end + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + # + # The return value will be a {Concurrent::Maybe} set to `Just` on success or + # `Nothing` on timeout. + # + # @return [Concurrent::Maybe] on success a `Just` maybe will be returned with + # the item exchanged by the other thread as `#value`; on timeout a + # `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason` + # + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # result = exchanger.exchange(:foo, 0.5) + # + # if result.just? + # puts result.value #=> :bar + # else + # puts 'timeout' + # end + def try_exchange(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + Concurrent::Maybe.nothing(Concurrent::TimeoutError) + else + Concurrent::Maybe.just(value) + end + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + raise NotImplementedError + end + end + + # @!macro internal_implementation_note + # @!visibility private + class RubyExchanger < AbstractExchanger + # A simplified version of java.util.concurrent.Exchanger written by + # Doug Lea, Bill Scherer, and Michael Scott with assistance from members + # of JCP JSR-166 Expert Group and released to the public domain. It does + # not include the arena or the multi-processor spin loops. + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java + + safe_initialization! + + class Node < Concurrent::Synchronization::Object + attr_atomic :value + safe_initialization! + + def initialize(item) + super() + @Item = item + @Latch = Concurrent::CountDownLatch.new + self.value = nil + end + + def latch + @Latch + end + + def item + @Item + end + end + private_constant :Node + + def initialize + super + end + + private + + attr_atomic(:slot) + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + + # ALGORITHM + # + # From the original Java version: + # + # > The basic idea is to maintain a "slot", which is a reference to + # > a Node containing both an Item to offer and a "hole" waiting to + # > get filled in. If an incoming "occupying" thread sees that the + # > slot is null, it CAS'es (compareAndSets) a Node there and waits + # > for another to invoke exchange. That second "fulfilling" thread + # > sees that the slot is non-null, and so CASes it back to null, + # > also exchanging items by CASing the hole, plus waking up the + # > occupying thread if it is blocked. In each case CAS'es may + # > fail because a slot at first appears non-null but is null upon + # > CAS, or vice-versa. So threads may need to retry these + # > actions. + # + # This version: + # + # An exchange occurs between an "occupier" thread and a "fulfiller" thread. + # The "slot" is used to setup this interaction. The first thread in the + # exchange puts itself into the slot (occupies) and waits for a fulfiller. + # The second thread removes the occupier from the slot and attempts to + # perform the exchange. Removing the occupier also frees the slot for + # another occupier/fulfiller pair. + # + # Because the occupier and the fulfiller are operating independently and + # because there may be contention with other threads, any failed operation + # indicates contention. Both the occupier and the fulfiller operate within + # spin loops. Any failed actions along the happy path will cause the thread + # to repeat the loop and try again. + # + # When a timeout value is given the thread must be cognizant of time spent + # in the spin loop. The remaining time is checked every loop. When the time + # runs out the thread will exit. + # + # A "node" is the data structure used to perform the exchange. Only the + # occupier's node is necessary. It's the node used for the exchange. + # Each node has an "item," a "hole" (self), and a "latch." The item is the + # node's initial value. It never changes. It's what the fulfiller returns on + # success. The occupier's hole is where the fulfiller put its item. It's the + # item that the occupier returns on success. The latch is used for synchronization. + # Because a thread may act as either an occupier or fulfiller (or possibly + # both in periods of high contention) every thread creates a node when + # the exchange method is first called. + # + # The following steps occur within the spin loop. If any actions fail + # the thread will loop and try again, so long as there is time remaining. + # If time runs out the thread will return CANCEL. + # + # Check the slot for an occupier: + # + # * If the slot is empty try to occupy + # * If the slot is full try to fulfill + # + # Attempt to occupy: + # + # * Attempt to CAS myself into the slot + # * Go to sleep and wait to be woken by a fulfiller + # * If the sleep is successful then the fulfiller completed its happy path + # - Return the value from my hole (the value given by the fulfiller) + # * When the sleep fails (time ran out) attempt to cancel the operation + # - Attempt to CAS myself out of the hole + # - If successful there is no contention + # - Return CANCEL + # - On failure, I am competing with a fulfiller + # - Attempt to CAS my hole to CANCEL + # - On success + # - Let the fulfiller deal with my cancel + # - Return CANCEL + # - On failure the fulfiller has completed its happy path + # - Return th value from my hole (the fulfiller's value) + # + # Attempt to fulfill: + # + # * Attempt to CAS the occupier out of the slot + # - On failure loop again + # * Attempt to CAS my item into the occupier's hole + # - On failure the occupier is trying to cancel + # - Loop again + # - On success we are on the happy path + # - Wake the sleeping occupier + # - Return the occupier's item + + value = NULL if value.nil? # The sentinel allows nil to be a valid value + me = Node.new(value) # create my node in case I need to occupy + end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up + + result = loop do + other = slot + if other && compare_and_set_slot(other, nil) + # try to fulfill + if other.compare_and_set_value(nil, value) + # happy path + other.latch.count_down + break other.item + end + elsif other.nil? && compare_and_set_slot(nil, me) + # try to occupy + timeout = end_at - Concurrent.monotonic_time if timeout + if me.latch.wait(timeout) + # happy path + break me.value + else + # attempt to remove myself from the slot + if compare_and_set_slot(me, nil) + break CANCEL + elsif !me.compare_and_set_value(nil, CANCEL) + # I've failed to block the fulfiller + break me.value + end + end + end + break CANCEL if timeout && Concurrent.monotonic_time >= end_at + end + + result == NULL ? nil : result + end + end + + if Concurrent.on_jruby? + require 'concurrent/utility/native_extension_loader' + + # @!macro internal_implementation_note + # @!visibility private + class JavaExchanger < AbstractExchanger + + def initialize + @exchanger = java.util.concurrent.Exchanger.new + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value) + end + else + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + rescue java.util.concurrent.TimeoutException + CANCEL + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + ExchangerImplementation = case + when Concurrent.on_jruby? + JavaExchanger + else + RubyExchanger + end + private_constant :ExchangerImplementation + + # @!macro exchanger + class Exchanger < ExchangerImplementation + + # @!method initialize + # Creates exchanger instance + + # @!method exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange + + # @!method exchange!(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + + # @!method try_exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb new file mode 100644 index 0000000..ac42953 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb @@ -0,0 +1,131 @@ +require 'concurrent/errors' +require 'concurrent/concern/deprecation' +require 'concurrent/executor/executor_service' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class AbstractExecutorService < Synchronization::LockableObject + include ExecutorService + include Concern::Deprecation + + # The set of possible fallback policies that may be set at thread pool creation. + FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze + + # @!macro executor_service_attr_reader_fallback_policy + attr_reader :fallback_policy + + attr_reader :name + + # Create a new thread pool. + def initialize(opts = {}, &block) + super(&nil) + synchronize do + @auto_terminate = opts.fetch(:auto_terminate, true) + @name = opts.fetch(:name) if opts.key?(:name) + ns_initialize(opts, &block) + end + end + + def to_s + name ? "#{super[0..-2]} name: #{name}>" : super + end + + # @!macro executor_service_method_shutdown + def shutdown + raise NotImplementedError + end + + # @!macro executor_service_method_kill + def kill + raise NotImplementedError + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + raise NotImplementedError + end + + # @!macro executor_service_method_running_question + def running? + synchronize { ns_running? } + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + synchronize { ns_shuttingdown? } + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + synchronize { ns_shutdown? } + end + + # @!macro executor_service_method_auto_terminate_question + def auto_terminate? + synchronize { @auto_terminate } + end + + # @!macro executor_service_method_auto_terminate_setter + def auto_terminate=(value) + deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized." + end + + private + + # Returns an action which executes the `fallback_policy` once the queue + # size reaches `max_queue`. The reason for the indirection of an action + # is so that the work can be deferred outside of synchronization. + # + # @param [Array] args the arguments to the task which is being handled. + # + # @!visibility private + def fallback_action(*args) + case fallback_policy + when :abort + lambda { raise RejectedExecutionError } + when :discard + lambda { false } + when :caller_runs + lambda { + begin + yield(*args) + rescue => ex + # let it fail + log DEBUG, ex + end + true + } + else + lambda { fail "Unknown fallback policy #{fallback_policy}" } + end + end + + def ns_execute(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_ns_shutdown_execution + # + # Callback method called when an orderly shutdown has completed. + # The default behavior is to signal all waiting threads. + def ns_shutdown_execution + # do nothing + end + + # @!macro executor_service_method_ns_kill_execution + # + # Callback method called when the executor has been killed. + # The default behavior is to do nothing. + def ns_kill_execution + # do nothing + end + + def ns_auto_terminate? + @auto_terminate + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb new file mode 100644 index 0000000..de50ed1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb @@ -0,0 +1,62 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # A thread pool that dynamically grows and shrinks to fit the current workload. + # New threads are created as needed, existing threads are reused, and threads + # that remain idle for too long are killed and removed from the pool. These + # pools are particularly suited to applications that perform a high volume of + # short-lived tasks. + # + # On creation a `CachedThreadPool` has zero running threads. New threads are + # created on the pool as new operations are `#post`. The size of the pool + # will grow until `#max_length` threads are in the pool or until the number + # of threads exceeds the number of running and pending operations. When a new + # operation is post to the pool the first available idle thread will be tasked + # with the new operation. + # + # Should a thread crash for any reason the thread will immediately be removed + # from the pool. Similarly, threads which remain idle for an extended period + # of time will be killed and reclaimed. Thus these thread pools are very + # efficient at reclaiming unused resources. + # + # The API and behavior of this class are based on Java's `CachedThreadPool` + # + # @!macro thread_pool_options + class CachedThreadPool < ThreadPoolExecutor + + # @!macro cached_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool-- + def initialize(opts = {}) + defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: 0, + max_threads: DEFAULT_MAX_POOL_SIZE, + max_queue: DEFAULT_MAX_QUEUE_SIZE } + super(defaults.merge(opts).merge(overrides)) + end + + private + + # @!macro cached_thread_pool_method_initialize + # @!visibility private + def ns_initialize(opts) + super(opts) + if Concurrent.on_jruby? + @max_queue = 0 + @executor = java.util.concurrent.Executors.newCachedThreadPool( + DaemonThreadFactory.new(ns_auto_terminate?)) + @executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new) + @executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb new file mode 100644 index 0000000..7e34491 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb @@ -0,0 +1,185 @@ +require 'concurrent/concern/logging' + +module Concurrent + + ################################################################### + + # @!macro executor_service_method_post + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + + # @!macro executor_service_method_left_shift + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Proc] task the asynchronous task to perform + # + # @return [self] returns itself + + # @!macro executor_service_method_can_overflow_question + # + # Does the task queue have a maximum size? + # + # @return [Boolean] True if the task queue has a maximum size else false. + + # @!macro executor_service_method_serialized_question + # + # Does this executor guarantee serialization of its operations? + # + # @return [Boolean] True if the executor guarantees that all operations + # will be post in the order they are received and no two operations may + # occur simultaneously. Else false. + + ################################################################### + + # @!macro executor_service_public_api + # + # @!method post(*args, &task) + # @!macro executor_service_method_post + # + # @!method <<(task) + # @!macro executor_service_method_left_shift + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + # + # @!method serialized? + # @!macro executor_service_method_serialized_question + + ################################################################### + + # @!macro executor_service_attr_reader_fallback_policy + # @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`. + + # @!macro executor_service_method_shutdown + # + # Begin an orderly shutdown. Tasks already in the queue will be executed, + # but no new tasks will be accepted. Has no additional effect if the + # thread pool is not running. + + # @!macro executor_service_method_kill + # + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + + # @!macro executor_service_method_wait_for_termination + # + # Block until executor shutdown is complete or until `timeout` seconds have + # passed. + # + # @note Does not initiate shutdown or termination. Either `shutdown` or `kill` + # must be called before this method (or on another thread). + # + # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete + # + # @return [Boolean] `true` if shutdown complete or false on `timeout` + + # @!macro executor_service_method_running_question + # + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + + # @!macro executor_service_method_shuttingdown_question + # + # Is the executor shuttingdown? + # + # @return [Boolean] `true` when not running and not shutdown, else `false` + + # @!macro executor_service_method_shutdown_question + # + # Is the executor shutdown? + # + # @return [Boolean] `true` when shutdown, `false` when shutting down or running + + # @!macro executor_service_method_auto_terminate_question + # + # Is the executor auto-terminate when the application exits? + # + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + # @!macro executor_service_method_auto_terminate_setter + # + # + # Set the auto-terminate behavior for this executor. + # @deprecated Has no effect + # @param [Boolean] value The new auto-terminate value to set for this executor. + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + ################################################################### + + # @!macro abstract_executor_service_public_api + # + # @!macro executor_service_public_api + # + # @!attribute [r] fallback_policy + # @!macro executor_service_attr_reader_fallback_policy + # + # @!method shutdown + # @!macro executor_service_method_shutdown + # + # @!method kill + # @!macro executor_service_method_kill + # + # @!method wait_for_termination(timeout = nil) + # @!macro executor_service_method_wait_for_termination + # + # @!method running? + # @!macro executor_service_method_running_question + # + # @!method shuttingdown? + # @!macro executor_service_method_shuttingdown_question + # + # @!method shutdown? + # @!macro executor_service_method_shutdown_question + # + # @!method auto_terminate? + # @!macro executor_service_method_auto_terminate_question + # + # @!method auto_terminate=(value) + # @!macro executor_service_method_auto_terminate_setter + + ################################################################### + + # @!macro executor_service_public_api + # @!visibility private + module ExecutorService + include Concern::Logging + + # @!macro executor_service_method_post + def post(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_can_overflow_question + # + # @note Always returns `false` + def can_overflow? + false + end + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `false` + def serialized? + false + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb new file mode 100644 index 0000000..4de512a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb @@ -0,0 +1,220 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # @!macro thread_pool_executor_constant_default_max_pool_size + # Default maximum number of threads that will be created in the pool. + + # @!macro thread_pool_executor_constant_default_min_pool_size + # Default minimum number of threads that will be retained in the pool. + + # @!macro thread_pool_executor_constant_default_max_queue_size + # Default maximum number of tasks that may be added to the task queue. + + # @!macro thread_pool_executor_constant_default_thread_timeout + # Default maximum number of seconds a thread in the pool may remain idle + # before being reclaimed. + + # @!macro thread_pool_executor_constant_default_synchronous + # Default value of the :synchronous option. + + # @!macro thread_pool_executor_attr_reader_max_length + # The maximum number of threads that may be created in the pool. + # @return [Integer] The maximum number of threads that may be created in the pool. + + # @!macro thread_pool_executor_attr_reader_min_length + # The minimum number of threads that may be retained in the pool. + # @return [Integer] The minimum number of threads that may be retained in the pool. + + # @!macro thread_pool_executor_attr_reader_largest_length + # The largest number of threads that have been created in the pool since construction. + # @return [Integer] The largest number of threads that have been created in the pool since construction. + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # The number of tasks that have been scheduled for execution on the pool since construction. + # @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction. + + # @!macro thread_pool_executor_attr_reader_completed_task_count + # The number of tasks that have been completed by the pool since construction. + # @return [Integer] The number of tasks that have been completed by the pool since construction. + + # @!macro thread_pool_executor_attr_reader_idletime + # The number of seconds that a thread may be idle before being reclaimed. + # @return [Integer] The number of seconds that a thread may be idle before being reclaimed. + + # @!macro thread_pool_executor_attr_reader_synchronous + # Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue. + # @return [true, false] + + # @!macro thread_pool_executor_attr_reader_max_queue + # The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + # + # @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + + # @!macro thread_pool_executor_attr_reader_length + # The number of threads currently in the pool. + # @return [Integer] The number of threads currently in the pool. + + # @!macro thread_pool_executor_attr_reader_queue_length + # The number of tasks in the queue awaiting execution. + # @return [Integer] The number of tasks in the queue awaiting execution. + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + # + # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + + # @!macro thread_pool_executor_method_prune_pool + # Prune the thread pool of unneeded threads + # + # What is being pruned is controlled by the min_threads and idletime + # parameters passed at pool creation time + # + # This is a no-op on some pool implementation (e.g. the Java one). The Ruby + # pool will auto-prune each time a new job is posted. You will need to call + # this method explicitely in case your application post jobs in bursts (a + # lot of jobs and then nothing for long periods) + + # @!macro thread_pool_executor_public_api + # + # @!macro abstract_executor_service_public_api + # + # @!attribute [r] max_length + # @!macro thread_pool_executor_attr_reader_max_length + # + # @!attribute [r] min_length + # @!macro thread_pool_executor_attr_reader_min_length + # + # @!attribute [r] largest_length + # @!macro thread_pool_executor_attr_reader_largest_length + # + # @!attribute [r] scheduled_task_count + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # + # @!attribute [r] completed_task_count + # @!macro thread_pool_executor_attr_reader_completed_task_count + # + # @!attribute [r] idletime + # @!macro thread_pool_executor_attr_reader_idletime + # + # @!attribute [r] max_queue + # @!macro thread_pool_executor_attr_reader_max_queue + # + # @!attribute [r] length + # @!macro thread_pool_executor_attr_reader_length + # + # @!attribute [r] queue_length + # @!macro thread_pool_executor_attr_reader_queue_length + # + # @!attribute [r] remaining_capacity + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + # + # @!method prune_pool + # @!macro thread_pool_executor_method_prune_pool + + + + + # @!macro thread_pool_options + # + # **Thread Pool Options** + # + # Thread pools support several configuration options: + # + # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. + # * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and + # a `-worker-` name is given to its threads if supported by used Ruby + # implementation. `` is uniq for each thread. + # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at + # any one time. When the queue size reaches `max_queue` and no new threads can be created, + # subsequent tasks will be rejected in accordance with the configured `fallback_policy`. + # * `auto_terminate`: When true (default), the threads started will be marked as daemon. + # * `fallback_policy`: The policy defining how rejected tasks are handled. + # + # Three fallback policies are supported: + # + # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. + # * `:discard`: Discard the task and return false. + # * `:caller_runs`: Execute the task on the calling thread. + # + # **Shutting Down Thread Pools** + # + # Killing a thread pool while tasks are still being processed, either by calling + # the `#kill` method or at application exit, will have unpredictable results. There + # is no way for the thread pool to know what resources are being used by the + # in-progress tasks. When those tasks are killed the impact on those resources + # cannot be predicted. The *best* practice is to explicitly shutdown all thread + # pools using the provided methods: + # + # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks + # * Call `#wait_for_termination` with an appropriate timeout interval an allow + # the orderly shutdown to complete + # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time + # + # On some runtime platforms (most notably the JVM) the application will not + # exit until all thread pools have been shutdown. To prevent applications from + # "hanging" on exit, all threads can be marked as daemon according to the + # `:auto_terminate` option. + # + # ```ruby + # pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon + # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon + # ``` + # + # @note Failure to properly shutdown a thread pool can lead to unpredictable results. + # Please read *Shutting Down Thread Pools* for more information. + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface + # @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean- + + + + + + # @!macro fixed_thread_pool + # + # A thread pool that reuses a fixed number of threads operating off an unbounded queue. + # At any point, at most `num_threads` will be active processing tasks. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `FixedThreadPool` + # + # @!macro thread_pool_options + class FixedThreadPool < ThreadPoolExecutor + + # @!macro fixed_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Integer] num_threads the number of threads to allocate + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `num_threads` is less than or equal to zero + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int- + def initialize(num_threads, opts = {}) + raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1 + defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE, + idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: num_threads, + max_threads: num_threads } + super(defaults.merge(opts).merge(overrides)) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb new file mode 100644 index 0000000..282df7a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb @@ -0,0 +1,66 @@ +require 'concurrent/atomic/event' +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/serial_executor_service' + +module Concurrent + + # An executor service which runs all operations on the current thread, + # blocking as necessary. Operations are performed in the order they are + # received and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used + # it immediately runs every `#post` operation on the current thread, blocking + # that thread until the operation is complete. This can be very beneficial + # during testing because it makes all operations deterministic. + # + # @note Intended for use primarily in testing and debugging. + class ImmediateExecutor < AbstractExecutorService + include SerialExecutorService + + # Creates a new executor + def initialize + @stopped = Concurrent::Event.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + task.call(*args) + true + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + ! shutdown? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + false + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @stopped.set + true + end + alias_method :kill, :shutdown + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb new file mode 100644 index 0000000..4f9769f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb @@ -0,0 +1,44 @@ +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/simple_executor_service' + +module Concurrent + # An executor service which runs all operations on a new thread, blocking + # until it completes. Operations are performed in the order they are received + # and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used it + # immediately runs every `#post` operation on a new thread, blocking the + # current thread until the operation is complete. This is similar to how the + # ImmediateExecutor works, but the operation has the full stack of the new + # thread at its disposal. This can be helpful when the operations will spawn + # more operations on the same executor and so on - such a situation might + # overflow the single stack in case of an ImmediateExecutor, which is + # inconsistent with how it would behave for a threaded executor. + # + # @note Intended for use primarily in testing and debugging. + class IndirectImmediateExecutor < ImmediateExecutor + # Creates a new executor + def initialize + super + @internal_executor = SimpleExecutorService.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new("no block given") unless block_given? + return false unless running? + + event = Concurrent::Event.new + @internal_executor.post do + begin + task.call(*args) + ensure + event.set + end + end + event.wait + + true + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb new file mode 100644 index 0000000..9a86385 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb @@ -0,0 +1,103 @@ +require 'concurrent/utility/engine' + +if Concurrent.on_jruby? + require 'concurrent/errors' + require 'concurrent/executor/abstract_executor_service' + + module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaExecutorService < AbstractExecutorService + java_import 'java.lang.Runnable' + + FALLBACK_POLICY_CLASSES = { + abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy, + discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy, + caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy + }.freeze + private_constant :FALLBACK_POLICY_CLASSES + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return fallback_action(*args, &task).call unless running? + @executor.submit Job.new(args, task) + true + rescue Java::JavaUtilConcurrent::RejectedExecutionException + raise RejectedExecutionError + end + + def wait_for_termination(timeout = nil) + if timeout.nil? + ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok + true + else + @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + + def shutdown + synchronize do + @executor.shutdown + nil + end + end + + def kill + synchronize do + @executor.shutdownNow + nil + end + end + + private + + def ns_running? + !(ns_shuttingdown? || ns_shutdown?) + end + + def ns_shuttingdown? + if @executor.respond_to? :isTerminating + @executor.isTerminating + else + false + end + end + + def ns_shutdown? + @executor.isShutdown || @executor.isTerminated + end + + class Job + include Runnable + def initialize(args, block) + @args = args + @block = block + end + + def run + @block.call(*@args) + end + end + private_constant :Job + end + + class DaemonThreadFactory + # hide include from YARD + send :include, java.util.concurrent.ThreadFactory + + def initialize(daemonize = true) + @daemonize = daemonize + end + + def newThread(runnable) + thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable) + thread.setDaemon(@daemonize) + return thread + end + end + + private_constant :DaemonThreadFactory + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb new file mode 100644 index 0000000..7aa24f2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb @@ -0,0 +1,30 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + require 'concurrent/executor/serial_executor_service' + + module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaSingleThreadExecutor < JavaExecutorService + include SerialExecutorService + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + private + + def ns_initialize(opts) + @executor = java.util.concurrent.Executors.newSingleThreadExecutor( + DaemonThreadFactory.new(ns_auto_terminate?) + ) + @fallback_policy = opts.fetch(:fallback_policy, :discard) + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb new file mode 100644 index 0000000..1213a95 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb @@ -0,0 +1,140 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + + module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class JavaThreadPoolExecutor < JavaExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647 + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + @max_queue != 0 + end + + # @!macro thread_pool_executor_attr_reader_min_length + def min_length + @executor.getCorePoolSize + end + + # @!macro thread_pool_executor_attr_reader_max_length + def max_length + @executor.getMaximumPoolSize + end + + # @!macro thread_pool_executor_attr_reader_length + def length + @executor.getPoolSize + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + @executor.getLargestPoolSize + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + @executor.getTaskCount + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + @executor.getCompletedTaskCount + end + + # @!macro thread_pool_executor_attr_reader_idletime + def idletime + @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS) + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + @executor.getQueue.size + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + @max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity + end + + # @!macro executor_service_method_running_question + def running? + super && !@executor.isTerminating + end + + # @!macro thread_pool_executor_method_prune_pool + def prune_pool + end + + private + + def ns_initialize(opts) + min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy) + + if @max_queue == 0 + if @synchronous + queue = java.util.concurrent.SynchronousQueue.new + else + queue = java.util.concurrent.LinkedBlockingQueue.new + end + else + queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue) + end + + @executor = java.util.concurrent.ThreadPoolExecutor.new( + min_length, + max_length, + idletime, + java.util.concurrent.TimeUnit::SECONDS, + queue, + DaemonThreadFactory.new(ns_auto_terminate?), + FALLBACK_POLICY_CLASSES[@fallback_policy].new) + + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb new file mode 100644 index 0000000..1f7301b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb @@ -0,0 +1,82 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/atomic/event' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubyExecutorService < AbstractExecutorService + safe_initialization! + + def initialize(*args, &block) + super + @StopEvent = Event.new + @StoppedEvent = Event.new + end + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + deferred_action = synchronize { + if running? + ns_execute(*args, &task) + else + fallback_action(*args, &task) + end + } + if deferred_action + deferred_action.call + else + true + end + end + + def shutdown + synchronize do + break unless running? + stop_event.set + ns_shutdown_execution + end + true + end + + def kill + synchronize do + break if shutdown? + stop_event.set + ns_kill_execution + stopped_event.set + end + true + end + + def wait_for_termination(timeout = nil) + stopped_event.wait(timeout) + end + + private + + def stop_event + @StopEvent + end + + def stopped_event + @StoppedEvent + end + + def ns_shutdown_execution + stopped_event.set + end + + def ns_running? + !stop_event.set? + end + + def ns_shuttingdown? + !(ns_running? || ns_shutdown?) + end + + def ns_shutdown? + stopped_event.set? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb new file mode 100644 index 0000000..916337d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb @@ -0,0 +1,21 @@ +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubySingleThreadExecutor < RubyThreadPoolExecutor + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super( + min_threads: 1, + max_threads: 1, + max_queue: 0, + idletime: DEFAULT_THREAD_IDLETIMEOUT, + fallback_policy: opts.fetch(:fallback_policy, :discard), + ) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb new file mode 100644 index 0000000..298dd7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb @@ -0,0 +1,366 @@ +require 'thread' +require 'concurrent/atomic/event' +require 'concurrent/concern/logging' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class RubyThreadPoolExecutor < RubyExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_constant_default_synchronous + DEFAULT_SYNCHRONOUS = false + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_min_length + attr_reader :min_length + + # @!macro thread_pool_executor_attr_reader_idletime + attr_reader :idletime + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_attr_reader_synchronous + attr_reader :synchronous + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + synchronize { @largest_length } + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + synchronize { @scheduled_task_count } + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + synchronize { @completed_task_count } + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + synchronize { ns_limited_queue? } + end + + # @!macro thread_pool_executor_attr_reader_length + def length + synchronize { @pool.length } + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + synchronize { @queue.length } + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + synchronize do + if ns_limited_queue? + @max_queue - @queue.length + else + -1 + end + end + end + + # @!visibility private + def remove_busy_worker(worker) + synchronize { ns_remove_busy_worker worker } + end + + # @!visibility private + def ready_worker(worker, last_message) + synchronize { ns_ready_worker worker, last_message } + end + + # @!visibility private + def worker_died(worker) + synchronize { ns_worker_died worker } + end + + # @!visibility private + def worker_task_completed + synchronize { @completed_task_count += 1 } + end + + # @!macro thread_pool_executor_method_prune_pool + def prune_pool + synchronize { ns_prune_pool } + end + + private + + # @!visibility private + def ns_initialize(opts) + @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + + @pool = [] # all workers + @ready = [] # used as a stash (most idle worker is at the start) + @queue = [] # used as queue + # @ready or @queue is empty at all times + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ # detects if Ruby has forked + + @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + # @!visibility private + def ns_limited_queue? + @max_queue != 0 + end + + # @!visibility private + def ns_execute(*args, &task) + ns_reset_if_forked + + if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) + @scheduled_task_count += 1 + else + return fallback_action(*args, &task) + end + + ns_prune_pool if @next_gc_time < Concurrent.monotonic_time + nil + end + + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + + if @pool.empty? + # nothing to do + stopped_event.set + end + + if @queue.empty? + # no more tasks will be accepted, just stop all workers + @pool.each(&:stop) + end + end + + # @!visibility private + def ns_kill_execution + # TODO log out unprocessed tasks in queue + # TODO try to shutdown first? + @pool.each(&:kill) + @pool.clear + @ready.clear + end + + # tries to assign task to a worker, tries to get one from @ready or to create new one + # @return [true, false] if task is assigned to a worker + # + # @!visibility private + def ns_assign_worker(*args, &task) + # keep growing if the pool is not at the minimum yet + worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker + if worker + worker << [task, args] + true + else + false + end + rescue ThreadError + # Raised when the operating system refuses to create the new thread + return false + end + + # tries to enqueue task + # @return [true, false] if enqueued + # + # @!visibility private + def ns_enqueue(*args, &task) + return false if @synchronous + + if !ns_limited_queue? || @queue.size < @max_queue + @queue << [task, args] + true + else + false + end + end + + # @!visibility private + def ns_worker_died(worker) + ns_remove_busy_worker worker + replacement_worker = ns_add_busy_worker + ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker + end + + # creates new worker which has to receive work to do after it's added + # @return [nil, Worker] nil of max capacity is reached + # + # @!visibility private + def ns_add_busy_worker + return if @pool.size >= @max_length + + @workers_counter += 1 + @pool << (worker = Worker.new(self, @workers_counter)) + @largest_length = @pool.length if @pool.length > @largest_length + worker + end + + # handle ready worker, giving it new job or assigning back to @ready + # + # @!visibility private + def ns_ready_worker(worker, last_message, success = true) + task_and_args = @queue.shift + if task_and_args + worker << task_and_args + else + # stop workers when !running?, do not return them to @ready + if running? + raise unless last_message + @ready.push([worker, last_message]) + else + worker.stop + end + end + end + + # removes a worker which is not in not tracked in @ready + # + # @!visibility private + def ns_remove_busy_worker(worker) + @pool.delete(worker) + stopped_event.set if @pool.empty? && !running? + true + end + + # try oldest worker if it is idle for enough time, it's returned back at the start + # + # @!visibility private + def ns_prune_pool + now = Concurrent.monotonic_time + stopped_workers = 0 + while !@ready.empty? && (@pool.size - stopped_workers > @min_length) + worker, last_message = @ready.first + if now - last_message > self.idletime + stopped_workers += 1 + @ready.shift + worker << :stop + else break + end + end + + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ready.clear + @pool.clear + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @workers_counter = 0 + @ruby_pid = $$ + end + end + + # @!visibility private + class Worker + include Concern::Logging + + def initialize(pool, id) + # instance variables accessed only under pool's lock so no need to sync here again + @queue = Queue.new + @pool = pool + @thread = create_worker @queue, pool, pool.idletime + + if @thread.respond_to?(:name=) + @thread.name = [pool.name, 'worker', id].compact.join('-') + end + end + + def <<(message) + @queue << message + end + + def stop + @queue << :stop + end + + def kill + @thread.kill + end + + private + + def create_worker(queue, pool, idletime) + Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| + catch(:stop) do + loop do + + case message = my_queue.pop + when :stop + my_pool.remove_busy_worker(self) + throw :stop + + else + task, args = message + run_task my_pool, task, args + my_pool.ready_worker(self, Concurrent.monotonic_time) + end + end + end + end + end + + def run_task(pool, task, args) + task.call(*args) + pool.worker_task_completed + rescue => ex + # let it fail + log DEBUG, ex + rescue Exception => ex + log ERROR, ex + pool.worker_died(self) + throw :stop + end + end + + private_constant :Worker + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb new file mode 100644 index 0000000..f796b85 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb @@ -0,0 +1,35 @@ +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A simple utility class that executes a callable and returns and array of three elements: + # success - indicating if the callable has been executed without errors + # value - filled by the callable result if it has been executed without errors, nil otherwise + # reason - the error risen by the callable if it has been executed with errors, nil otherwise + class SafeTaskExecutor < Synchronization::LockableObject + + def initialize(task, opts = {}) + @task = task + @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError + super() # ensures visibility + end + + # @return [Array] + def execute(*args) + success = true + value = reason = nil + + synchronize do + begin + value = @task.call(*args) + success = true + rescue @exception_class => ex + reason = ex + success = false + end + end + + [success, value, reason] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb new file mode 100644 index 0000000..f1c38ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb @@ -0,0 +1,34 @@ +require 'concurrent/executor/executor_service' + +module Concurrent + + # Indicates that the including `ExecutorService` guarantees + # that all operations will occur in the order they are post and that no + # two operations may occur simultaneously. This module provides no + # functionality and provides no guarantees. That is the responsibility + # of the including class. This module exists solely to allow the including + # object to be interrogated for its serialization status. + # + # @example + # class Foo + # include Concurrent::SerialExecutor + # end + # + # foo = Foo.new + # + # foo.is_a? Concurrent::ExecutorService #=> true + # foo.is_a? Concurrent::SerialExecutor #=> true + # foo.serialized? #=> true + # + # @!visibility private + module SerialExecutorService + include ExecutorService + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `true` + def serialized? + true + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb new file mode 100644 index 0000000..4db7c7f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb @@ -0,0 +1,107 @@ +require 'concurrent/errors' +require 'concurrent/concern/logging' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # Ensures passed jobs in a serialized order never running at the same time. + class SerializedExecution < Synchronization::LockableObject + include Concern::Logging + + def initialize() + super() + synchronize { ns_initialize } + end + + Job = Struct.new(:executor, :args, :block) do + def call + block.call(*args) + end + end + + # Submit a task to the executor for asynchronous processing. + # + # @param [Executor] executor to be used for this job + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + def post(executor, *args, &task) + posts [[executor, args, task]] + true + end + + # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not + # be interleaved by other tasks. + # + # @param [Array, Proc)>] posts array of triplets where + # first is a {ExecutorService}, second is array of args for task, third is a task (Proc) + def posts(posts) + # if can_overflow? + # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow' + # end + + return nil if posts.empty? + + jobs = posts.map { |executor, args, task| Job.new executor, args, task } + + job_to_post = synchronize do + if @being_executed + @stash.push(*jobs) + nil + else + @being_executed = true + @stash.push(*jobs[1..-1]) + jobs.first + end + end + + call_job job_to_post if job_to_post + true + end + + private + + def ns_initialize + @being_executed = false + @stash = [] + end + + def call_job(job) + did_it_run = begin + job.executor.post { work(job) } + true + rescue RejectedExecutionError => ex + false + end + + # TODO not the best idea to run it myself + unless did_it_run + begin + work job + rescue => ex + # let it fail + log DEBUG, ex + end + end + end + + # ensures next job is executed if any is stashed + def work(job) + job.call + ensure + synchronize do + job = @stash.shift || (@being_executed = false) + end + + # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end + # of this block + call_job job if job + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb new file mode 100644 index 0000000..8197781 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb @@ -0,0 +1,28 @@ +require 'delegate' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' + +module Concurrent + + # A wrapper/delegator for any `ExecutorService` that + # guarantees serialized execution of tasks. + # + # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html) + # @see Concurrent::SerializedExecution + class SerializedExecutionDelegator < SimpleDelegator + include SerialExecutorService + + def initialize(executor) + @executor = executor + @serializer = SerializedExecution.new + super(executor) + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @serializer.post(@executor, *args, &task) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb new file mode 100644 index 0000000..0bc62af --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb @@ -0,0 +1,103 @@ +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/atomic/event' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/ruby_executor_service' + +module Concurrent + + # An executor service in which every operation spawns a new, + # independently operating thread. + # + # This is perhaps the most inefficient executor service in this + # library. It exists mainly for testing an debugging. Thread creation + # and management is expensive in Ruby and this executor performs no + # resource pooling. This can be very beneficial during testing and + # debugging because it decouples the using code from the underlying + # executor implementation. In production this executor will likely + # lead to suboptimal performance. + # + # @note Intended for use primarily in testing and debugging. + class SimpleExecutorService < RubyExecutorService + + # @!macro executor_service_method_post + def self.post(*args) + raise ArgumentError.new('no block given') unless block_given? + Thread.new(*args) do + Thread.current.abort_on_exception = false + yield(*args) + end + true + end + + # @!macro executor_service_method_left_shift + def self.<<(task) + post(&task) + self + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @count.increment + Thread.new(*args) do + Thread.current.abort_on_exception = false + begin + yield(*args) + ensure + @count.decrement + @stopped.set if @running.false? && @count.value == 0 + end + end + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + @running.true? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + @running.false? && ! @stopped.set? + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @running.make_false + @stopped.set if @count.value == 0 + true + end + + # @!macro executor_service_method_kill + def kill + @running.make_false + @stopped.set + true + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + + private + + def ns_initialize(*args) + @running = Concurrent::AtomicBoolean.new(true) + @stopped = Concurrent::Event.new + @count = Concurrent::AtomicFixnum.new(0) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb new file mode 100644 index 0000000..f1474ea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb @@ -0,0 +1,57 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_single_thread_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_single_thread_executor' + end + + SingleThreadExecutorImplementation = case + when Concurrent.on_jruby? + JavaSingleThreadExecutor + else + RubySingleThreadExecutor + end + private_constant :SingleThreadExecutorImplementation + + # @!macro single_thread_executor + # + # A thread pool with a single thread an unlimited queue. Should the thread + # die for any reason it will be removed and replaced, thus ensuring that + # the executor will always remain viable and available to process jobs. + # + # A common pattern for background processing is to create a single thread + # on which an infinite loop is run. The thread's loop blocks on an input + # source (perhaps blocking I/O or a queue) and processes each input as it + # is received. This pattern has several issues. The thread itself is highly + # susceptible to errors during processing. Also, the thread itself must be + # constantly monitored and restarted should it die. `SingleThreadExecutor` + # encapsulates all these bahaviors. The task processor is highly resilient + # to errors from within tasks. Also, should the thread die it will + # automatically be restarted. + # + # The API and behavior of this class are based on Java's `SingleThreadExecutor`. + # + # @!macro abstract_executor_service_public_api + class SingleThreadExecutor < SingleThreadExecutorImplementation + + # @!macro single_thread_executor_method_initialize + # + # Create a new thread pool. + # + # @option opts [Symbol] :fallback_policy (:discard) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html + + # @!method initialize(opts = {}) + # @!macro single_thread_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb new file mode 100644 index 0000000..253d46a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb @@ -0,0 +1,88 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_thread_pool_executor' + end + + ThreadPoolExecutorImplementation = case + when Concurrent.on_jruby? + JavaThreadPoolExecutor + else + RubyThreadPoolExecutor + end + private_constant :ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor + # + # An abstraction composed of one or more threads and a task queue. Tasks + # (blocks or `proc` objects) are submitted to the pool and added to the queue. + # The threads in the pool remove the tasks and execute them in the order + # they were received. + # + # A `ThreadPoolExecutor` will automatically adjust the pool size according + # to the bounds set by `min-threads` and `max-threads`. When a new task is + # submitted and fewer than `min-threads` threads are running, a new thread + # is created to handle the request, even if other worker threads are idle. + # If there are more than `min-threads` but less than `max-threads` threads + # running, a new thread will be created only if the queue is full. + # + # Threads that are idle for too long will be garbage collected, down to the + # configured minimum options. Should a thread crash it, too, will be garbage collected. + # + # `ThreadPoolExecutor` is based on the Java class of the same name. From + # the official Java documentation; + # + # > Thread pools address two different problems: they usually provide + # > improved performance when executing large numbers of asynchronous tasks, + # > due to reduced per-task invocation overhead, and they provide a means + # > of bounding and managing the resources, including threads, consumed + # > when executing a collection of tasks. Each ThreadPoolExecutor also + # > maintains some basic statistics, such as the number of completed tasks. + # > + # > To be useful across a wide range of contexts, this class provides many + # > adjustable parameters and extensibility hooks. However, programmers are + # > urged to use the more convenient Executors factory methods + # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), + # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single + # > background thread), that preconfigure settings for the most common usage + # > scenarios. + # + # @!macro thread_pool_options + # + # @!macro thread_pool_executor_public_api + class ThreadPoolExecutor < ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options which configure the thread pool. + # + # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum + # number of threads to be created + # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted + # and fewer than `min_threads` are running, a new thread is created + # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum + # number of seconds a thread may be idle before being reclaimed + # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum + # number of tasks allowed in the work queue at any one time; a value of + # zero means the queue may grow without bound + # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0 + # for :max_queue means the queue must perform direct hand-off rather than unbounded. + # @raise [ArgumentError] if `:max_threads` is less than one + # @raise [ArgumentError] if `:min_threads` is less than zero + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html + + # @!method initialize(opts = {}) + # @!macro thread_pool_executor_method_initialize + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb new file mode 100644 index 0000000..0dfaf12 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb @@ -0,0 +1,172 @@ +require 'concurrent/scheduled_task' +require 'concurrent/atomic/event' +require 'concurrent/collection/non_concurrent_priority_queue' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/single_thread_executor' + +require 'concurrent/options' + +module Concurrent + + # Executes a collection of tasks, each after a given delay. A master task + # monitors the set and schedules each task for execution at the appropriate + # time. Tasks are run on the global thread pool or on the supplied executor. + # Each task is represented as a `ScheduledTask`. + # + # @see Concurrent::ScheduledTask + # + # @!macro monotonic_clock_warning + class TimerSet < RubyExecutorService + + # Create a new set of timed tasks. + # + # @!macro executor_options + # + # @param [Hash] opts the options used to specify the executor on which to perform actions + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:task` returns the global task pool, + # `:operation` returns the global operation pool, and `:immediate` returns a new + # `ImmediateExecutor` object. + def initialize(opts = {}) + super(opts) + end + + # Post a task to be execute run after a given delay (in seconds). If the + # delay is less than 1/100th of a second the task will be immediately post + # to the executor. + # + # @param [Float] delay the number of seconds to wait for before executing the task. + # @param [Array] args the arguments passed to the task on execution. + # + # @yield the task to be performed. + # + # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post + # is successful; false after shutdown. + # + # @raise [ArgumentError] if the intended execution time is not in the future. + # @raise [ArgumentError] if no block is given. + def post(delay, *args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + opts = { executor: @task_executor, + args: args, + timer_set: self } + task = ScheduledTask.execute(delay, opts, &task) # may raise exception + task.unscheduled? ? false : task + end + + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + def kill + shutdown + end + + private :<< + + private + + # Initialize the object. + # + # @param [Hash] opts the options to create the object with. + # @!visibility private + def ns_initialize(opts) + @queue = Collection::NonConcurrentPriorityQueue.new(order: :min) + @task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @timer_executor = SingleThreadExecutor.new + @condition = Event.new + @ruby_pid = $$ # detects if Ruby has forked + end + + # Post the task to the internal queue. + # + # @note This is intended as a callback method from ScheduledTask + # only. It is not intended to be used directly. Post a task + # by using the `SchedulesTask#execute` method. + # + # @!visibility private + def post_task(task) + synchronize { ns_post_task(task) } + end + + # @!visibility private + def ns_post_task(task) + return false unless ns_running? + ns_reset_if_forked + if (task.initial_delay) <= 0.01 + task.executor.post { task.process_task } + else + @queue.push(task) + # only post the process method when the queue is empty + @timer_executor.post(&method(:process_tasks)) if @queue.length == 1 + @condition.set + end + true + end + + # Remove the given task from the queue. + # + # @note This is intended as a callback method from `ScheduledTask` + # only. It is not intended to be used directly. Cancel a task + # by using the `ScheduledTask#cancel` method. + # + # @!visibility private + def remove_task(task) + synchronize { @queue.delete(task) } + end + + # `ExecutorService` callback called during shutdown. + # + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + @queue.clear + @timer_executor.kill + stopped_event.set + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @condition.reset + @ruby_pid = $$ + end + end + + # Run a loop and execute tasks in the scheduled order and at the approximate + # scheduled time. If no tasks remain the thread will exit gracefully so that + # garbage collection can occur. If there are no ready tasks it will sleep + # for up to 60 seconds waiting for the next scheduled task. + # + # @!visibility private + def process_tasks + loop do + task = synchronize { @condition.reset; @queue.peek } + break unless task + + now = Concurrent.monotonic_time + diff = task.schedule_time - now + + if diff <= 0 + # We need to remove the task from the queue before passing + # it to the executor, to avoid race conditions where we pass + # the peek'ed task to the executor and then pop a different + # one that's been added in the meantime. + # + # Note that there's no race condition between the peek and + # this pop - this pop could retrieve a different task from + # the peek, but that task would be due to fire now anyway + # (because @queue is a priority queue, and this thread is + # the only reader, so whatever timer is at the head of the + # queue now must have the same pop time, or a closer one, as + # when we peeked). + task = synchronize { @queue.pop } + task.executor.post { task.process_task } + else + @condition.wait([diff, 60].min) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb new file mode 100644 index 0000000..eb1972c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb @@ -0,0 +1,20 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/fixed_thread_pool' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/indirect_immediate_executor' +require 'concurrent/executor/java_executor_service' +require 'concurrent/executor/java_single_thread_executor' +require 'concurrent/executor/java_thread_pool_executor' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/ruby_single_thread_executor' +require 'concurrent/executor/ruby_thread_pool_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' +require 'concurrent/executor/serialized_execution_delegator' +require 'concurrent/executor/single_thread_executor' +require 'concurrent/executor/thread_pool_executor' +require 'concurrent/executor/timer_set' diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb new file mode 100644 index 0000000..1af182e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb @@ -0,0 +1,141 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc. + + +module Concurrent + + # {include:file:docs-source/future.md} + # + # @!macro copy_options + # + # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module + # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future + class Future < IVar + + # Create a new `Future` in the `:unscheduled` state. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(NULL, opts.merge(__task_from_block__: block), &nil) + end + + # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Future` is in any state other than `:unscheduled`. + # + # @return [Future] a reference to `self` + # + # @example Instance and execute in separate steps + # future = Concurrent::Future.new{ sleep(1); 42 } + # future.state #=> :unscheduled + # future.execute + # future.state #=> :pending + # + # @example Instance and execute in one line + # future = Concurrent::Future.new{ sleep(1); 42 }.execute + # future.state #=> :pending + def execute + if compare_and_set_state(:pending, :unscheduled) + @executor.post{ safe_execute(@task, @args) } + self + end + end + + # Create a new `Future` object with the given block, execute it, and return the + # `:pending` object. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + # + # @return [Future] the newly created `Future` in the `:pending` state + # + # @example + # future = Concurrent::Future.execute{ sleep(1); 42 } + # future.state #=> :pending + def self.execute(opts = {}, &block) + Future.new(opts, &block).execute + end + + # @!macro ivar_set_method + def set(value = NULL, &block) + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @task = block || Proc.new { value } + end + end + execute + end + + # Attempt to cancel the operation if it has not already processed. + # The operation can only be cancelled while still `pending`. It cannot + # be cancelled once it has begun processing or has completed. + # + # @return [Boolean] was the operation successfully cancelled. + def cancel + if compare_and_set_state(:cancelled, :pending) + complete(false, nil, CancelledOperationError.new) + true + else + false + end + end + + # Has the operation been successfully cancelled? + # + # @return [Boolean] + def cancelled? + state == :cancelled + end + + # Wait the given number of seconds for the operation to complete. + # On timeout attempt to cancel the operation. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Boolean] true if the operation completed before the timeout + # else false + def wait_or_cancel(timeout) + wait(timeout) + if complete? + true + else + cancel + false + end + end + + protected + + def ns_initialize(value, opts) + super + @state = :unscheduled + @task = opts[:__task_from_block__] + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb new file mode 100644 index 0000000..7902fe9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb @@ -0,0 +1,50 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_hash + # + # A thread-safe subclass of Hash. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`, + # which takes the lock repeatedly when reading an item. + # + # @see http://ruby-doc.org/core/Hash.html Ruby standard library `Hash` + + # @!macro internal_implementation_note + HashImplementation = case + when Concurrent.on_cruby? + # Hash is thread-safe in practice because CRuby runs + # threads one at a time and does not do context + # switching during the execution of C functions. + ::Hash + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyHash < ::Hash + include JRuby::Synchronized + end + JRubyHash + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyHash < ::Hash + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash + TruffleRubyHash + + else + warn 'Possibly unsupported Ruby implementation' + ::Hash + end + private_constant :HashImplementation + + # @!macro concurrent_hash + class Hash < HashImplementation + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb new file mode 100644 index 0000000..48462e8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb @@ -0,0 +1,101 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A thread-safe, immutable variation of Ruby's standard `Struct`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module ImmutableStruct + include Synchronization::AbstractStruct + + def self.included(base) + base.safe_initialization! + end + + # @!macro struct_values + def values + ns_values + end + + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + ns_values_at(indexes) + end + + # @!macro struct_inspect + def inspect + ns_inspect + end + + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + ns_merge(other, &block) + end + + # @!macro struct_to_h + def to_h + ns_to_h + end + + # @!macro struct_get + def [](member) + ns_get(member) + end + + # @!macro struct_equality + def ==(other) + ns_equality(other) + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + ns_each(&block) + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + ns_each_pair(&block) + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + ns_select(&block) + end + + private + + # @!visibility private + def initialize_copy(original) + super(original) + ns_initialize_copy + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block) + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb new file mode 100644 index 0000000..4165038 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb @@ -0,0 +1,208 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/obligation' +require 'concurrent/concern/observable' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # An `IVar` is like a future that you can assign. As a future is a value that + # is being computed that you can wait on, an `IVar` is a value that is waiting + # to be assigned, that you can wait on. `IVars` are single assignment and + # deterministic. + # + # Then, express futures as an asynchronous computation that assigns an `IVar`. + # The `IVar` becomes the primitive on which [futures](Future) and + # [dataflow](Dataflow) are built. + # + # An `IVar` is a single-element container that is normally created empty, and + # can only be set once. The I in `IVar` stands for immutable. Reading an + # `IVar` normally blocks until it is set. It is safe to set and read an `IVar` + # from different threads. + # + # If you want to have some parallel task set the value in an `IVar`, you want + # a `Future`. If you want to create a graph of parallel tasks all executed + # when the values they depend on are ready you want `dataflow`. `IVar` is + # generally a low-level primitive. + # + # ## Examples + # + # Create, set and get an `IVar` + # + # ```ruby + # ivar = Concurrent::IVar.new + # ivar.set 14 + # ivar.value #=> 14 + # ivar.set 2 # would now be an error + # ``` + # + # ## See Also + # + # 1. For the theory: Arvind, R. Nikhil, and K. Pingali. + # [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). + # In Proceedings of Workshop on Graph Reduction, 1986. + # 2. For recent application: + # [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html). + class IVar < Synchronization::LockableObject + include Concern::Obligation + include Concern::Observable + + # Create a new `IVar` in the `:pending` state with the (optional) initial value. + # + # @param [Object] value the initial value + # @param [Hash] opts the options to create a message with + # @option opts [String] :dup_on_deref (false) call `#dup` before returning + # the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before + # returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def initialize(value = NULL, opts = {}, &block) + if value != NULL && block_given? + raise ArgumentError.new('provide only a value or a block') + end + super(&nil) + synchronize { ns_initialize(value, opts, &block) } + end + + # Add an observer on this object that will receive notification on update. + # + # Upon completion the `IVar` will notify all observers in a thread-safe way. + # The `func` method of the observer will be called with three arguments: the + # `Time` at which the `Future` completed the asynchronous operation, the + # final `value` (or `nil` on rejection), and the final `reason` (or `nil` on + # fulfillment). + # + # @param [Object] observer the object that will be notified of changes + # @param [Symbol] func symbol naming the method to call when this + # `Observable` has changes` + def add_observer(observer = nil, func = :update, &block) + raise ArgumentError.new('cannot provide both an observer and a block') if observer && block + direct_notification = false + + if block + observer = block + func = :call + end + + synchronize do + if event.set? + direct_notification = true + else + observers.add_observer(observer, func) + end + end + + observer.send(func, Time.now, self.value, reason) if direct_notification + observer + end + + # @!macro ivar_set_method + # Set the `IVar` to a value and wake or notify all threads waiting on it. + # + # @!macro ivar_set_parameters_and_exceptions + # @param [Object] value the value to store in the `IVar` + # @yield A block operation to use for setting the value + # @raise [ArgumentError] if both a value and a block are given + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # + # @return [IVar] self + def set(value = NULL) + check_for_block_or_value!(block_given?, value) + raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending) + + begin + value = yield if block_given? + complete_without_notification(true, value, nil) + rescue => ex + complete_without_notification(false, nil, ex) + end + + notify_observers(self.value, reason) + self + end + + # @!macro ivar_fail_method + # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it. + # + # @param [Object] reason for the failure + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # @return [IVar] self + def fail(reason = StandardError.new) + complete(false, nil, reason) + end + + # Attempt to set the `IVar` with the given value or block. Return a + # boolean indicating the success or failure of the set operation. + # + # @!macro ivar_set_parameters_and_exceptions + # + # @return [Boolean] true if the value was set else false + def try_set(value = NULL, &block) + set(value, &block) + true + rescue MultipleAssignmentError + false + end + + protected + + # @!visibility private + def ns_initialize(value, opts) + value = yield if block_given? + init_obligation + self.observers = Collection::CopyOnWriteObserverSet.new + set_deref_options(opts) + + @state = :pending + if value != NULL + ns_complete_without_notification(true, value, nil) + end + end + + # @!visibility private + def safe_execute(task, args = []) + if compare_and_set_state(:processing, :pending) + success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, val, reason) + yield(success, val, reason) if block_given? + end + end + + # @!visibility private + def complete(success, value, reason) + complete_without_notification(success, value, reason) + notify_observers(self.value, reason) + self + end + + # @!visibility private + def complete_without_notification(success, value, reason) + synchronize { ns_complete_without_notification(success, value, reason) } + self + end + + # @!visibility private + def notify_observers(value, reason) + observers.notify_and_delete_observers{ [Time.now, value, reason] } + end + + # @!visibility private + def ns_complete_without_notification(success, value, reason) + raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state + set_state(success, value, reason) + event.set + end + + # @!visibility private + def check_for_block_or_value!(block_given, value) # :nodoc: + if (block_given && value != NULL) || (! block_given && value == NULL) + raise ArgumentError.new('must set with either a value or a block') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb new file mode 100644 index 0000000..1b22241 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb @@ -0,0 +1,350 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/utility/engine' + +module Concurrent + # @!visibility private + module Collection + + # @!visibility private + MapImplementation = case + when Concurrent.on_jruby? + require 'concurrent/utility/native_extension_loader' + # noinspection RubyResolve + JRubyMapBackend + when Concurrent.on_cruby? + require 'concurrent/collection/map/mri_map_backend' + MriMapBackend + when Concurrent.on_truffleruby? + if defined?(::TruffleRuby::ConcurrentMap) + require 'concurrent/collection/map/truffleruby_map_backend' + TruffleRubyMapBackend + else + require 'concurrent/collection/map/atomic_reference_map_backend' + AtomicReferenceMapBackend + end + else + warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' + require 'concurrent/collection/map/synchronized_map_backend' + SynchronizedMapBackend + end + end + + # `Concurrent::Map` is a hash-like object and should have much better performance + # characteristics, especially under high concurrency, than `Concurrent::Hash`. + # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` + # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` + # does. For most uses it should do fine though, and we recommend you consider + # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. + class Map < Collection::MapImplementation + + # @!macro map.atomic_method + # This method is atomic. + + # @!macro map.atomic_method_with_block + # This method is atomic. + # @note Atomic methods taking a block do not allow the `self` instance + # to be used within the block. Doing so will cause a deadlock. + + # @!method []=(key, value) + # Set a value with key + # @param [Object] key + # @param [Object] value + # @return [Object] the new value + + # @!method compute_if_absent(key) + # Compute and store new value for key if the key is absent. + # @param [Object] key + # @yield new value + # @yieldreturn [Object] new value + # @return [Object] new value or current value + # @!macro map.atomic_method_with_block + + # @!method compute_if_present(key) + # Compute and store new value for key if the key is present. + # @param [Object] key + # @yield new value + # @yieldparam old_value [Object] + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method compute(key) + # Compute and store new value for key. + # @param [Object] key + # @yield compute new value from old one + # @yieldparam old_value [Object, nil] old_value, or nil when key is absent + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method merge_pair(key, value) + # If the key is absent, the value is stored, otherwise new value is + # computed with a block. + # @param [Object] key + # @param [Object] value + # @yield compute new value from old one + # @yieldparam old_value [Object] old value + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method replace_pair(key, old_value, new_value) + # Replaces old_value with new_value if key exists and current value + # matches old_value + # @param [Object] key + # @param [Object] old_value + # @param [Object] new_value + # @return [true, false] true if replaced + # @!macro map.atomic_method + + # @!method replace_if_exists(key, new_value) + # Replaces current value with new_value if key exists + # @param [Object] key + # @param [Object] new_value + # @return [Object, nil] old value or nil + # @!macro map.atomic_method + + # @!method get_and_set(key, value) + # Get the current value under key and set new value. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete(key) + # Delete key and its value. + # @param [Object] key + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete_pair(key, value) + # Delete pair and its value if current value equals the provided value. + # @param [Object] key + # @param [Object] value + # @return [true, false] true if deleted + # @!macro map.atomic_method + + # NonConcurrentMapBackend handles default_proc natively + unless defined?(Collection::NonConcurrentMapBackend) and self < Collection::NonConcurrentMapBackend + + # @param [Hash, nil] options options to set the :initial_capacity or :load_factor. Ignored on some Rubies. + # @param [Proc] default_proc Optional block to compute the default value if the key is not set, like `Hash#default_proc` + def initialize(options = nil, &default_proc) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = default_proc + end + + # Get a value with key + # @param [Object] key + # @return [Object] the value + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + end + + alias_method :get, :[] + alias_method :put, :[]= + + # Get a value with key, or default_value when key is absent, + # or fail when no default_value is given. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @raise [KeyError] when key is missing and no default_value is provided + # @!macro map_method_not_atomic + # @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended + # to be use as a concurrency primitive with strong happens-before + # guarantees. It is not intended to be used as a high-level abstraction + # supporting complex operations. All read and write operations are + # thread safe, but no guarantees are made regarding race conditions + # between the fetch operation and yielding to the block. Additionally, + # this method does not support recursion. This is due to internal + # constraints that are very unlikely to change in the near future. + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + # Fetch value with key, or store default value when key is absent, + # or fail when no default_value is given. This is a two step operation, + # therefore not atomic. The store can overwrite other concurrently + # stored value. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + # Insert value into map with key if key is absent in one atomic step. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] the previous value when key was present or nil when there was no key + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + # Is the value stored in the map. Iterates over all values. + # @param [Object] value + # @return [true, false] + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end + + # All keys + # @return [::Array] keys + def keys + arr = [] + each_pair { |k, v| arr << k } + arr + end unless method_defined?(:keys) + + # All values + # @return [::Array] values + def values + arr = [] + each_pair { |k, v| arr << v } + arr + end unless method_defined?(:values) + + # Iterates over each key. + # @yield for each key in the map + # @yieldparam key [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_key + each_pair { |k, v| yield k } + end unless method_defined?(:each_key) + + # Iterates over each value. + # @yield for each value in the map + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_value + each_pair { |k, v| yield v } + end unless method_defined?(:each_value) + + # Iterates over each key value pair. + # @yield for each key value pair in the map + # @yieldparam key [Object] + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_pair + return enum_for :each_pair unless block_given? + super + end + + alias_method :each, :each_pair unless method_defined?(:each) + + # Find key of a value. + # @param [Object] value + # @return [Object, nil] key or nil when not found + def key(value) + each_pair { |k, v| return k if v == value } + nil + end unless method_defined?(:key) + + # Is map empty? + # @return [true, false] + def empty? + each_pair { |k, v| return false } + true + end unless method_defined?(:empty?) + + # The size of map. + # @return [Integer] size + def size + count = 0 + each_pair { |k, v| count += 1 } + count + end unless method_defined?(:size) + + # @!visibility private + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair { |k, v| h[k] = v } + h + end + + # @!visibility private + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + # @!visibility private + def inspect + format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect + end + + private + + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair { |k, v| self[k] = v } + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive Integer" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb new file mode 100644 index 0000000..317c82b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb @@ -0,0 +1,229 @@ +require 'concurrent/synchronization/object' + +module Concurrent + + # A `Maybe` encapsulates an optional value. A `Maybe` either contains a value + # of (represented as `Just`), or it is empty (represented as `Nothing`). Using + # `Maybe` is a good way to deal with errors or exceptional cases without + # resorting to drastic measures such as exceptions. + # + # `Maybe` is a replacement for the use of `nil` with better type checking. + # + # For compatibility with {Concurrent::Concern::Obligation} the predicate and + # accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and + # `reason`. + # + # ## Motivation + # + # A common pattern in languages with pattern matching, such as Erlang and + # Haskell, is to return *either* a value *or* an error from a function + # Consider this Erlang code: + # + # ```erlang + # case file:consult("data.dat") of + # {ok, Terms} -> do_something_useful(Terms); + # {error, Reason} -> lager:error(Reason) + # end. + # ``` + # + # In this example the standard library function `file:consult` returns a + # [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044) + # with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134) + # (similar to a ruby symbol) and a variable containing ancillary data. On + # success it returns the atom `ok` and the data from the file. On failure it + # returns `error` and a string with an explanation of the problem. With this + # pattern there is no ambiguity regarding success or failure. If the file is + # empty the return value cannot be misinterpreted as an error. And when an + # error occurs the return value provides useful information. + # + # In Ruby we tend to return `nil` when an error occurs or else we raise an + # exception. Both of these idioms are problematic. Returning `nil` is + # ambiguous because `nil` may also be a valid value. It also lacks + # information pertaining to the nature of the error. Raising an exception + # is both expensive and usurps the normal flow of control. All of these + # problems can be solved with the use of a `Maybe`. + # + # A `Maybe` is unambiguous with regard to whether or not it contains a value. + # When `Just` it contains a value, when `Nothing` it does not. When `Just` + # the value it contains may be `nil`, which is perfectly valid. When + # `Nothing` the reason for the lack of a value is contained as well. The + # previous Erlang example can be duplicated in Ruby in a principled way by + # having functions return `Maybe` objects: + # + # ```ruby + # result = MyFileUtils.consult("data.dat") # returns a Maybe + # if result.just? + # do_something_useful(result.value) # or result.just + # else + # logger.error(result.reason) # or result.nothing + # end + # ``` + # + # @example Returning a Maybe from a Function + # module MyFileUtils + # def self.consult(path) + # file = File.open(path, 'r') + # Concurrent::Maybe.just(file.read) + # rescue => ex + # return Concurrent::Maybe.nothing(ex) + # ensure + # file.close if file + # end + # end + # + # maybe = MyFileUtils.consult('bogus.file') + # maybe.just? #=> false + # maybe.nothing? #=> true + # maybe.reason #=> # + # + # maybe = MyFileUtils.consult('README.md') + # maybe.just? #=> true + # maybe.nothing? #=> false + # maybe.value #=> "# Concurrent Ruby\n[![Gem Version..." + # + # @example Using Maybe with a Block + # result = Concurrent::Maybe.from do + # Client.find(10) # Client is an ActiveRecord model + # end + # + # # -- if the record was found + # result.just? #=> true + # result.value #=> # + # + # # -- if the record was not found + # result.just? #=> false + # result.reason #=> ActiveRecord::RecordNotFound + # + # @example Using Maybe with the Null Object Pattern + # # In a Rails controller... + # result = ClientService.new(10).find # returns a Maybe + # render json: result.or(NullClient.new) + # + # @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe + # @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe + class Maybe < Synchronization::Object + include Comparable + safe_initialization! + + # Indicates that the given attribute has not been set. + # When `Just` the {#nothing} getter will return `NONE`. + # When `Nothing` the {#just} getter will return `NONE`. + NONE = ::Object.new.freeze + + # The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`. + attr_reader :just + + # The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`. + attr_reader :nothing + + private_class_method :new + + # Create a new `Maybe` using the given block. + # + # Runs the given block passing all function arguments to the block as block + # arguments. If the block runs to completion without raising an exception + # a new `Just` is created with the value set to the return value of the + # block. If the block raises an exception a new `Nothing` is created with + # the reason being set to the raised exception. + # + # @param [Array] args Zero or more arguments to pass to the block. + # @yield The block from which to create a new `Maybe`. + # @yieldparam [Array] args Zero or more block arguments passed as + # arguments to the function. + # + # @return [Maybe] The newly created object. + # + # @raise [ArgumentError] when no block given. + def self.from(*args) + raise ArgumentError.new('no block given') unless block_given? + begin + value = yield(*args) + return new(value, NONE) + rescue => ex + return new(NONE, ex) + end + end + + # Create a new `Just` with the given value. + # + # @param [Object] value The value to set for the new `Maybe` object. + # + # @return [Maybe] The newly created object. + def self.just(value) + return new(value, NONE) + end + + # Create a new `Nothing` with the given (optional) reason. + # + # @param [Exception] error The reason to set for the new `Maybe` object. + # When given a string a new `StandardError` will be created with the + # argument as the message. When no argument is given a new + # `StandardError` with an empty message will be created. + # + # @return [Maybe] The newly created object. + def self.nothing(error = '') + if error.is_a?(Exception) + nothing = error + else + nothing = StandardError.new(error.to_s) + end + return new(NONE, nothing) + end + + # Is this `Maybe` a `Just` (successfully fulfilled with a value)? + # + # @return [Boolean] True if `Just` or false if `Nothing`. + def just? + ! nothing? + end + alias :fulfilled? :just? + + # Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)? + # + # @return [Boolean] True if `Nothing` or false if `Just`. + def nothing? + @nothing != NONE + end + alias :rejected? :nothing? + + alias :value :just + + alias :reason :nothing + + # Comparison operator. + # + # @return [Integer] 0 if self and other are both `Nothing`; + # -1 if self is `Nothing` and other is `Just`; + # 1 if self is `Just` and other is nothing; + # `self.just <=> other.just` if both self and other are `Just`. + def <=>(other) + if nothing? + other.nothing? ? 0 : -1 + else + other.nothing? ? 1 : just <=> other.just + end + end + + # Return either the value of self or the given default value. + # + # @return [Object] The value of self when `Just`; else the given default. + def or(other) + just? ? just : other + end + + private + + # Create a new `Maybe` with the given attributes. + # + # @param [Object] just The value when `Just` else `NONE`. + # @param [Exception, Object] nothing The exception when `Nothing` else `NONE`. + # + # @return [Maybe] The new `Maybe`. + # + # @!visibility private + def initialize(just, nothing) + @just = just + @nothing = nothing + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb new file mode 100644 index 0000000..5d0e9b9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb @@ -0,0 +1,239 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # An thread-safe variation of Ruby's standard `Struct`. Values can be set at + # construction or safely changed at any time during the object's lifecycle. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + module MutableStruct + include Synchronization::AbstractStruct + + # @!macro struct_new + # + # Factory for creating new struct classes. + # + # ``` + # new([class_name] [, member_name]+>) -> StructClass click to toggle source + # new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass + # new(value, ...) -> obj + # StructClass[value, ...] -> obj + # ``` + # + # The first two forms are used to create a new struct subclass `class_name` + # that can contain a value for each member_name . This subclass can be + # used to create instances of the structure like any other Class . + # + # If the `class_name` is omitted an anonymous struct class will be created. + # Otherwise, the name of this struct will appear as a constant in the struct class, + # so it must be unique for all structs under this base class and must start with a + # capital letter. Assigning a struct class to a constant also gives the class + # the name of the constant. + # + # If a block is given it will be evaluated in the context of `StructClass`, passing + # the created class as a parameter. This is the recommended way to customize a struct. + # Subclassing an anonymous struct creates an extra anonymous class that will never be used. + # + # The last two forms create a new instance of a struct subclass. The number of value + # parameters must be less than or equal to the number of attributes defined for the + # struct. Unset parameters default to nil. Passing more parameters than number of attributes + # will raise an `ArgumentError`. + # + # @see http://ruby-doc.org/core/Struct.html#method-c-new Ruby standard library `Struct#new` + + # @!macro struct_values + # + # Returns the values for this struct as an Array. + # + # @return [Array] the values for this struct + # + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + # + # Returns the struct member values for each selector as an Array. + # + # A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`). + # + # @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order) + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + # + # Describe the contents of this struct in a string. + # + # @return [String] the contents of this struct in a string + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + # + # Returns a new struct containing the contents of `other` and the contents + # of `self`. If no block is specified, the value for entries with duplicate + # keys will be that of `other`. Otherwise the value for each duplicate key + # is determined by calling the block with the key, its value in `self` and + # its value in `other`. + # + # @param [Hash] other the hash from which to set the new values + # @yield an options block for resolving duplicate keys + # @yieldparam [String, Symbol] member the name of the member which is duplicated + # @yieldparam [Object] selfvalue the value of the member in `self` + # @yieldparam [Object] othervalue the value of the member in `other` + # + # @return [Synchronization::AbstractStruct] a new struct with the new values + # + # @raise [ArgumentError] of given a member that is not defined in the struct + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + # + # Returns a hash containing the names and values for the struct’s members. + # + # @return [Hash] the names and values for the struct’s members + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + # + # Attribute Reference + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the member does not exist + # @raise [IndexError] if the index is out of range. + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + # + # Equality + # + # @return [Boolean] true if other has the same struct subclass and has + # equal member values (according to `Object#==`) + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + # + # Yields the value of each struct member in order. If no block is given + # an enumerator is returned. + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + # + # Yields the name and value of each struct member in order. If no block is + # given an enumerator is returned. + # + # @yield the operation to be performed on each struct member/value pair + # @yieldparam [Object] member each struct member (in order) + # @yieldparam [Object] value each struct value (in order) + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + # + # Yields each member value from the struct to the block and returns an Array + # containing the member values from the struct for which the given block + # returns a true value (equivalent to `Enumerable#select`). + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + # + # @return [Array] an array containing each value for which the block returns true + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # Attribute Assignment + # + # Sets the value of the given struct member or the member at the given index. + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the name does not exist + # @raise [IndexError] if the index is out of range. + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize { @values[member] = value } + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize { @values[index] = value } + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb new file mode 100644 index 0000000..dfc4195 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb @@ -0,0 +1,242 @@ +require 'concurrent/concern/dereferenceable' +require 'concurrent/synchronization/object' + +module Concurrent + + # An `MVar` is a synchronized single element container. They are empty or + # contain one item. Taking a value from an empty `MVar` blocks, as does + # putting a value into a full one. You can either think of them as blocking + # queue of length one, or a special kind of mutable variable. + # + # On top of the fundamental `#put` and `#take` operations, we also provide a + # `#mutate` that is atomic with respect to operations on the same instance. + # These operations all support timeouts. + # + # We also support non-blocking operations `#try_put!` and `#try_take!`, a + # `#set!` that ignores existing values, a `#value` that returns the value + # without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields + # `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`. + # You shouldn't use these operations in the first instance. + # + # `MVar` is a [Dereferenceable](Dereferenceable). + # + # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala. + # + # Note that unlike the original Haskell paper, our `#take` is blocking. This is how + # Haskell and Scala do it today. + # + # @!macro copy_options + # + # ## See Also + # + # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th + # ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991. + # + # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). + # In Proceedings of the 23rd Symposium on Principles of Programming Languages + # (PoPL), 1996. + class MVar < Synchronization::Object + include Concern::Dereferenceable + safe_initialization! + + # Unique value that represents that an `MVar` was empty + EMPTY = ::Object.new + + # Unique value that represents that an `MVar` timed out before it was able + # to produce a value. + TIMEOUT = ::Object.new + + # Create a new `MVar`, either empty or with an initial value. + # + # @param [Hash] opts the options controlling how the future will be processed + # + # @!macro deref_options + def initialize(value = EMPTY, opts = {}) + @value = value + @mutex = Mutex.new + @empty_condition = ConditionVariable.new + @full_condition = ConditionVariable.new + set_deref_options(opts) + end + + # Remove the value from an `MVar`, leaving it empty, and blocking if there + # isn't a value. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was taken, or `TIMEOUT` + def take(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # acquires lock on the from an `MVAR`, yields the value to provided block, + # and release lock. A timeout can be set to limit the time spent blocked, + # in which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value returned by the block, or `TIMEOUT` + def borrow(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # if we timeoud out we'll still be empty + if unlocked_full? + yield @value + else + TIMEOUT + end + end + end + + # Put a value into an `MVar`, blocking if there is already a value until + # it is empty. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was put, or `TIMEOUT` + def put(value, timeout = nil) + @mutex.synchronize do + wait_for_empty(timeout) + + # If we timed out we won't be empty + if unlocked_empty? + @value = value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Atomically `take`, yield the value to a block for transformation, and then + # `put` the transformed value. Returns the transformed value. A timeout can + # be set to limit the time spent blocked, in which case it returns `TIMEOUT` + # if the time is exceeded. + # @return [Object] the transformed value, or `TIMEOUT` + def modify(timeout = nil) + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = yield value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Non-blocking version of `take`, that returns `EMPTY` instead of blocking. + def try_take! + @mutex.synchronize do + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + EMPTY + end + end + end + + # Non-blocking version of `put`, that returns whether or not it was successful. + def try_put!(value) + @mutex.synchronize do + if unlocked_empty? + @value = value + @full_condition.signal + true + else + false + end + end + end + + # Non-blocking version of `put` that will overwrite an existing value. + def set!(value) + @mutex.synchronize do + old_value = @value + @value = value + @full_condition.signal + apply_deref_options(old_value) + end + end + + # Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet. + def modify! + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + value = @value + @value = yield value + if unlocked_empty? + @empty_condition.signal + else + @full_condition.signal + end + apply_deref_options(value) + end + end + + # Returns if the `MVar` is currently empty. + def empty? + @mutex.synchronize { @value == EMPTY } + end + + # Returns if the `MVar` currently contains a value. + def full? + !empty? + end + + protected + + def synchronize(&block) + @mutex.synchronize(&block) + end + + private + + def unlocked_empty? + @value == EMPTY + end + + def unlocked_full? + ! unlocked_empty? + end + + def wait_for_full(timeout) + wait_while(@full_condition, timeout) { unlocked_empty? } + end + + def wait_for_empty(timeout) + wait_while(@empty_condition, timeout) { unlocked_full? } + end + + def wait_while(condition, timeout) + if timeout.nil? + while yield + condition.wait(@mutex) + end + else + stop = Concurrent.monotonic_time + timeout + while yield && timeout > 0.0 + condition.wait(@mutex, timeout) + timeout = stop - Concurrent.monotonic_time + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb new file mode 100644 index 0000000..bdd22a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb @@ -0,0 +1,42 @@ +require 'concurrent/configuration' + +module Concurrent + + # @!visibility private + module Options + + # Get the requested `Executor` based on the values set in the options hash. + # + # @param [Hash] opts the options defining the requested executor + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:fast` returns the global fast executor, + # `:io` returns the global io executor, and `:immediate` returns a new + # `ImmediateExecutor` object. + # + # @return [Executor, nil] the requested thread pool, or nil when no option specified + # + # @!visibility private + def self.executor_from_options(opts = {}) # :nodoc: + if identifier = opts.fetch(:executor, nil) + executor(identifier) + else + nil + end + end + + def self.executor(executor_identifier) + case executor_identifier + when :fast + Concurrent.global_fast_executor + when :io + Concurrent.global_io_executor + when :immediate + Concurrent.global_immediate_executor + when Concurrent::ExecutorService + executor_identifier + else + raise ArgumentError, "executor not recognized by '#{executor_identifier}'" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb new file mode 100644 index 0000000..ccc47dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb @@ -0,0 +1,580 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +module Concurrent + + PromiseExecutionError = Class.new(StandardError) + + # Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A) + # and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications. + # + # > A promise represents the eventual value returned from the single + # > completion of an operation. + # + # Promises are similar to futures and share many of the same behaviours. + # Promises are far more robust, however. Promises can be chained in a tree + # structure where each promise may have zero or more children. Promises are + # chained using the `then` method. The result of a call to `then` is always + # another promise. Promises are resolved asynchronously (with respect to the + # main thread) but in a strict order: parents are guaranteed to be resolved + # before their children, children before their younger siblings. The `then` + # method takes two parameters: an optional block to be executed upon parent + # resolution and an optional callable to be executed upon parent failure. The + # result of each promise is passed to each of its children upon resolution. + # When a promise is rejected all its children will be summarily rejected and + # will receive the reason. + # + # Promises have several possible states: *:unscheduled*, *:pending*, + # *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as + # `#incomplete?` and `#complete?`. When a Promise is created it is set to + # *:unscheduled*. Once the `#execute` method is called the state becomes + # *:pending*. Once a job is pulled from the thread pool's queue and is given + # to a thread for processing (often immediately upon `#post`) the state + # becomes *:processing*. The future will remain in this state until processing + # is complete. A future that is in the *:unscheduled*, *:pending*, or + # *:processing* is considered `#incomplete?`. A `#complete?` Promise is either + # *:rejected*, indicating that an exception was thrown during processing, or + # *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value` + # will be updated to reflect the result of the operation. If *:rejected* the + # `reason` will be updated with a reference to the thrown exception. The + # predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and + # `#fulfilled?` can be called at any time to obtain the state of the Promise, + # as can the `#state` method, which returns a symbol. + # + # Retrieving the value of a promise is done through the `value` (alias: + # `deref`) method. Obtaining the value of a promise is a potentially blocking + # operation. When a promise is *rejected* a call to `value` will return `nil` + # immediately. When a promise is *fulfilled* a call to `value` will + # immediately return the current value. When a promise is *pending* a call to + # `value` will block until the promise is either *rejected* or *fulfilled*. A + # *timeout* value can be passed to `value` to limit how long the call will + # block. If `nil` the call will block indefinitely. If `0` the call will not + # block. Any other integer or float value will indicate the maximum number of + # seconds to block. + # + # Promises run on the global thread pool. + # + # @!macro copy_options + # + # ### Examples + # + # Start by requiring promises + # + # ```ruby + # require 'concurrent/promise' + # ``` + # + # Then create one + # + # ```ruby + # p = Concurrent::Promise.execute do + # # do something + # 42 + # end + # ``` + # + # Promises can be chained using the `then` method. The `then` method accepts a + # block and an executor, to be executed on fulfillment, and a callable argument to be executed + # on rejection. The result of the each promise is passed as the block argument + # to chained promises. + # + # ```ruby + # p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute + # ``` + # + # And so on, and so on, and so on... + # + # ```ruby + # p = Concurrent::Promise.fulfill(20). + # then{|result| result - 10 }. + # then{|result| result * 3 }. + # then(executor: different_executor){|result| result % 5 }.execute + # ``` + # + # The initial state of a newly created Promise depends on the state of its parent: + # - if parent is *unscheduled* the child will be *unscheduled* + # - if parent is *pending* the child will be *pending* + # - if parent is *fulfilled* the child will be *pending* + # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*) + # + # Promises are executed asynchronously from the main thread. By the time a + # child Promise finishes intialization it may be in a different state than its + # parent (by the time a child is created its parent may have completed + # execution and changed state). Despite being asynchronous, however, the order + # of execution of Promise objects in a chain (or tree) is strictly defined. + # + # There are multiple ways to create and execute a new `Promise`. Both ways + # provide identical behavior: + # + # ```ruby + # # create, operate, then execute + # p1 = Concurrent::Promise.new{ "Hello World!" } + # p1.state #=> :unscheduled + # p1.execute + # + # # create and immediately execute + # p2 = Concurrent::Promise.new{ "Hello World!" }.execute + # + # # execute during creation + # p3 = Concurrent::Promise.execute{ "Hello World!" } + # ``` + # + # Once the `execute` method is called a `Promise` becomes `pending`: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # p.state #=> :pending + # p.pending? #=> true + # ``` + # + # Wait a little bit, and the promise will resolve and provide a value: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # sleep(0.1) + # + # p.state #=> :fulfilled + # p.fulfilled? #=> true + # p.value #=> "Hello, world!" + # ``` + # + # If an exception occurs, the promise will be rejected and will provide + # a reason for the rejection: + # + # ```ruby + # p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") } + # sleep(0.1) + # + # p.state #=> :rejected + # p.rejected? #=> true + # p.reason #=> "#" + # ``` + # + # #### Rejection + # + # When a promise is rejected all its children will be rejected and will + # receive the rejection `reason` as the rejection callable parameter: + # + # ```ruby + # p = Concurrent::Promise.execute { Thread.pass; raise StandardError } + # + # c1 = p.then(-> reason { 42 }) + # c2 = p.then(-> reason { raise 'Boom!' }) + # + # c1.wait.state #=> :fulfilled + # c1.value #=> 45 + # c2.wait.state #=> :rejected + # c2.reason #=> # + # ``` + # + # Once a promise is rejected it will continue to accept children that will + # receive immediately rejection (they will be executed asynchronously). + # + # #### Aliases + # + # The `then` method is the most generic alias: it accepts a block to be + # executed upon parent fulfillment and a callable to be executed upon parent + # rejection. At least one of them should be passed. The default block is `{ + # |result| result }` that fulfills the child with the parent value. The + # default callable is `{ |reason| raise reason }` that rejects the child with + # the parent reason. + # + # - `on_success { |result| ... }` is the same as `then {|result| ... }` + # - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )` + # - `rescue` is aliased by `catch` and `on_error` + class Promise < IVar + + # Initialize a new Promise with the provided options. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @option opts [Promise] :parent the parent `Promise` when building a chain/tree + # @option opts [Proc] :on_fulfill fulfillment handler + # @option opts [Proc] :on_reject rejection handler + # @option opts [object, Array] :args zero or more arguments to be passed + # the task block on execution + # + # @yield The block operation to be performed asynchronously. + # + # @raise [ArgumentError] if no block is given + # + # @see http://wiki.commonjs.org/wiki/Promises/A + # @see http://promises-aplus.github.io/promises-spec/ + def initialize(opts = {}, &block) + opts.delete_if { |k, v| v.nil? } + super(NULL, opts.merge(__promise_body_from_block__: block), &nil) + end + + # Create a new `Promise` and fulfill it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.fulfill(value, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) } + end + + # Create a new `Promise` and reject it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.reject(reason, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) } + end + + # Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Promise` is in any state other than `:unscheduled`. + # + # @return [Promise] a reference to `self` + def execute + if root? + if compare_and_set_state(:pending, :unscheduled) + set_pending + realize(@promise_body) + end + else + compare_and_set_state(:pending, :unscheduled) + @parent.execute + end + self + end + + # @!macro ivar_set_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def set(value = NULL, &block) + raise PromiseExecutionError.new('supported only on root promise') unless root? + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @promise_body = block || Proc.new { |result| value } + end + end + execute + end + + # @!macro ivar_fail_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def fail(reason = StandardError.new) + set { raise reason } + end + + # Create a new `Promise` object with the given block, execute it, and return the + # `:pending` object. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @return [Promise] the newly created `Promise` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + # + # @example + # promise = Concurrent::Promise.execute{ sleep(1); 42 } + # promise.state #=> :pending + def self.execute(opts = {}, &block) + new(opts, &block).execute + end + + # Chain a new promise off the current promise. + # + # @return [Promise] the new promise + # @yield The block operation to be performed asynchronously. + # @overload then(rescuer, executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + # @overload then(rescuer, executor: executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + def then(*args, &block) + if args.last.is_a?(::Hash) + executor = args.pop[:executor] + rescuer = args.first + else + rescuer, executor = args + end + + executor ||= @executor + + raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given? + block = Proc.new { |result| result } unless block_given? + child = Promise.new( + parent: self, + executor: executor, + on_fulfill: block, + on_reject: rescuer + ) + + synchronize do + child.state = :pending if @state == :pending + child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled + child.on_reject(@reason) if @state == :rejected + @children << child + end + + child + end + + # Chain onto this promise an action to be undertaken on success + # (fulfillment). + # + # @yield The block to execute + # + # @return [Promise] self + def on_success(&block) + raise ArgumentError.new('no block given') unless block_given? + self.then(&block) + end + + # Chain onto this promise an action to be undertaken on failure + # (rejection). + # + # @yield The block to execute + # + # @return [Promise] self + def rescue(&block) + self.then(block) + end + + alias_method :catch, :rescue + alias_method :on_error, :rescue + + # Yield the successful result to the block that returns a promise. If that + # promise is also successful the result is the result of the yielded promise. + # If either part fails the whole also fails. + # + # @example + # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3 + # + # @return [Promise] + def flat_map(&block) + child = Promise.new( + parent: self, + executor: ImmediateExecutor.new, + ) + + on_error { |e| child.on_reject(e) } + on_success do |result1| + begin + inner = block.call(result1) + inner.execute + inner.on_success { |result2| child.on_fulfill(result2) } + inner.on_error { |e| child.on_reject(e) } + rescue => e + child.on_reject(e) + end + end + + child + end + + # Builds a promise that produces the result of promises in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] promises + # + # @overload zip(*promises, opts) + # @param [Array] promises + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def self.zip(*promises) + opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {} + opts[:executor] ||= ImmediateExecutor.new + zero = if !opts.key?(:execute) || opts.delete(:execute) + fulfill([], opts) + else + Promise.new(opts) { [] } + end + + promises.reduce(zero) do |p1, p2| + p1.flat_map do |results| + p2.then do |next_result| + results << next_result + end + end + end + end + + # Builds a promise that produces the result of self and others in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] others + # + # @overload zip(*promises, opts) + # @param [Array] others + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def zip(*others) + self.class.zip(self, *others) + end + + # Aggregates a collection of promises and executes the `then` condition + # if all aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + # + # The returned promise will not yet have been executed. Additional `#then` + # and `#rescue` handlers may still be provided. Once the returned promise + # is execute the aggregate promises will be also be executed (if they have + # not been executed already). The results of the aggregate promises will + # be checked upon completion. The necessary `#then` and `#rescue` blocks + # on the aggregating promise will then be executed as appropriate. If the + # `#rescue` handlers are executed the raises exception will be + # `Concurrent::PromiseExecutionError`. + # + # @param [Array] promises Zero or more promises to aggregate + # @return [Promise] an unscheduled (not executed) promise that aggregates + # the promises given as arguments + def self.all?(*promises) + aggregate(:all?, *promises) + end + + # Aggregates a collection of promises and executes the `then` condition + # if any aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + def self.any?(*promises) + aggregate(:any?, *promises) + end + + protected + + def ns_initialize(value, opts) + super + + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + + @parent = opts.fetch(:parent) { nil } + @on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } } + @on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } } + + @promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result } + @state = :unscheduled + @children = [] + end + + # Aggregate a collection of zero or more promises under a composite promise, + # execute the aggregated promises and collect them into a standard Ruby array, + # call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`, + # or `one?`) on the collection checking for the success or failure of each, + # then executing the composite's `#then` handlers if the predicate returns + # `true` or executing the composite's `#rescue` handlers if the predicate + # returns false. + # + # @!macro promise_self_aggregate + def self.aggregate(method, *promises) + composite = Promise.new do + completed = promises.collect do |promise| + promise.execute if promise.unscheduled? + promise.wait + promise + end + unless completed.empty? || completed.send(method){|promise| promise.fulfilled? } + raise PromiseExecutionError + end + end + composite + end + + # @!visibility private + def set_pending + synchronize do + @state = :pending + @children.each { |c| c.set_pending } + end + end + + # @!visibility private + def root? # :nodoc: + @parent.nil? + end + + # @!visibility private + def on_fulfill(result) + realize Proc.new { @on_fulfill.call(result) } + nil + end + + # @!visibility private + def on_reject(reason) + realize Proc.new { @on_reject.call(reason) } + nil + end + + # @!visibility private + def notify_child(child) + if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) } + if_state(:rejected) { child.on_reject(@reason) } + end + + # @!visibility private + def complete(success, value, reason) + children_to_notify = synchronize do + set_state!(success, value, reason) + @children.dup + end + + children_to_notify.each { |child| notify_child(child) } + observers.notify_and_delete_observers{ [Time.now, self.value, reason] } + end + + # @!visibility private + def realize(task) + @executor.post do + success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, value, reason) + end + end + + # @!visibility private + def set_state!(success, value, reason) + set_state(success, value, reason) + event.set + end + + # @!visibility private + def synchronized_set_state!(success, value, reason) + synchronize { set_state!(success, value, reason) } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb new file mode 100644 index 0000000..3cd1705 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb @@ -0,0 +1,2168 @@ +require 'concurrent/synchronization/object' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/collection/lock_free_stack' +require 'concurrent/configuration' +require 'concurrent/errors' +require 'concurrent/re_include' + +module Concurrent + + # {include:file:docs-source/promises-main.md} + module Promises + + # @!macro promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. + # + # @!macro promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. + module FactoryMethods + extend ReInclude + extend self + + module Configuration + # @return [Executor, :io, :fast] the executor which is used when none is supplied + # to a factory method. The method can be overridden in the receivers of + # `include FactoryMethod` + def default_executor + :io + end + end + + include Configuration + + # @!macro promises.shortcut.on + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on default_executor + end + + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. + # + # @!macro promises.param.default_executor + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = self.default_executor) + ResolvableEventPromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on default_executor + end + + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} + # + # @!macro promises.param.default_executor + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = self.default_executor) + ResolvableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def future(*args, &task) + future_on(default_executor, *args, &task) + end + + # Constructs new Future which will be resolved after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # Creates resolved future with will be either fulfilled with the given value or rejection with + # the given reason. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promises.param.default_executor + # @return [Future] + def resolved_future(fulfilled, value, reason, default_executor = self.default_executor) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future + end + + # Creates resolved future with will be fulfilled with the given value. + # + # @!macro promises.param.default_executor + # @param [Object] value + # @return [Future] + def fulfilled_future(value, default_executor = self.default_executor) + resolved_future true, value, nil, default_executor + end + + # Creates resolved future with will be rejected with the given reason. + # + # @!macro promises.param.default_executor + # @param [Object] reason + # @return [Future] + def rejected_future(reason, default_executor = self.default_executor) + resolved_future false, nil, reason, default_executor + end + + # Creates resolved event. + # + # @!macro promises.param.default_executor + # @return [Event] + def resolved_event(default_executor = self.default_executor) + ImmediateEventPromise.new(default_executor).event + end + + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload make_future(nil, default_executor = self.default_executor) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload make_future(a_future, default_executor = self.default_executor) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload make_future(an_event, default_executor = self.default_executor) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload make_future(exception, default_executor = self.default_executor) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload make_future(value, default_executor = self.default_executor) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def make_future(argument = nil, default_executor = self.default_executor) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def delay(*args, &task) + delay_on default_executor, *args, &task + end + + # Creates new event or future which is resolved only after it is touched, + # see {Concurrent::AbstractEventFuture#touch}. + # + # @!macro promises.param.default_executor + # @overload delay_on(default_executor, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload delay_on(default_executor) + # If no task is provided, it returns an {Event} + # @return [Event] + def delay_on(default_executor, *args, &task) + event = DelayPromise.new(default_executor).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future, Event] + def schedule(intended_time, *args, &task) + schedule_on default_executor, intended_time, *args, &task + end + + # Creates new event or future which is resolved in intended_time. + # + # @!macro promises.param.default_executor + # @!macro promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. + # @overload schedule_on(default_executor, intended_time, *args, &task) + # If task is provided it returns a {Future} representing the result of the task. + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + # @overload schedule_on(default_executor, intended_time) + # If no task is provided, it returns an {Event} + # @return [Event] + def schedule_on(default_executor, intended_time, *args, &task) + event = ScheduledPromise.new(default_executor, intended_time).event + task ? event.chain(*args, &task) : event + end + + # @!macro promises.shortcut.on + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. + # @!macro promises.event-conversion + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # @!macro promises.shortcut.on + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on default_executor, *futures_and_or_events + end + + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on default_executor, *futures_and_or_events + end + + alias_method :any, :any_resolved_future + + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. + # @!macro promises.any-touch + # If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Event] + def any_event(*futures_and_or_events) + any_event_on default_executor, *futures_and_or_events + end + + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + # TODO or rather a generic aggregator taking a function + end + + module InternalStates + # @!visibility private + class State + def resolved? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + # @!visibility private + class Pending < State + def resolved? + false + end + + def to_sym + :pending + end + end + + # @!visibility private + class Reserved < Pending + end + + # @!visibility private + class ResolvedWithResult < State + def resolved? + true + end + + def to_sym + :resolved + end + + def result + [fulfilled?, value, reason] + end + + def fulfilled? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + # @!visibility private + class Fulfilled < ResolvedWithResult + + def initialize(value) + @Value = value + end + + def fulfilled? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :fulfilled + end + end + + # @!visibility private + class FulfilledArray < Fulfilled + def apply(args, block) + block.call(*value, *args) + end + end + + # @!visibility private + class Rejected < ResolvedWithResult + def initialize(reason) + @Reason = reason + end + + def fulfilled? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :rejected + end + + def apply(args, block) + block.call reason, *args + end + end + + # @!visibility private + class PartiallyRejected < ResolvedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def fulfilled? + false + end + + def to_sym + :rejected + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + # @!visibility private + PENDING = Pending.new + # @!visibility private + RESERVED = Reserved.new + # @!visibility private + RESOLVED = Fulfilled.new(nil) + + def RESOLVED.to_sym + :resolved + end + end + + private_constant :InternalStates + + # @!macro promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro promises.warn.blocks + # @note This function potentially blocks current thread until the Future is resolved. + # Be careful it can deadlock. Try to chain instead. + + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. + class AbstractEventFuture < Synchronization::Object + safe_initialization! + attr_atomic(:internal_state) + private :internal_state=, :swap_internal_state, :compare_and_set_internal_state, :update_internal_state + # @!method internal_state + # @!visibility private + + include InternalStates + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + @Callbacks = LockFreeStack.new + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + private :initialize + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :resolved] + # @overload a_future.state + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] + def state + internal_state.to_sym + end + + # Is it in pending state? + # @return [Boolean] + def pending? + !internal_state.resolved? + end + + # Is it in resolved state? + # @return [Boolean] + def resolved? + internal_state.resolved? + end + + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring resolved state, like {#wait}. + # @return [self] + def touch + @Promise.touch + self + end + + # @!macro promises.touches + # Calls {Concurrent::AbstractEventFuture#touch}. + + # @!macro promises.method.wait + # Wait (block the Thread) until receiver is {#resolved?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [self, true, false] self implies timeout was not used, true implies timeout was used + # and it was resolved, false implies it was not resolved within timeout. + def wait(timeout = nil) + result = wait_until_resolved(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor + # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on + # @see similar + def default_executor + @DefaultExecutor + end + + # @!macro promises.shortcut.on + # @return [Future] + def chain(*args, &task) + chain_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_on(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_on(executor, *args, &task) + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def chain_on(executor, *args, &task) + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], state + end + + alias_method :inspect, :to_s + + # Resolves the resolvable when receiver is resolved. + # + # @param [Resolvable] resolvable + # @return [self] + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } + end + + alias_method :tangle, :chain_resolvable + + # @!macro promises.shortcut.using + # @return [self] + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Object] reason + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback + end + + # @!macro promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + # @return [AbstractEventFuture] + def with_default_executor(executor) + raise NotImplementedError + end + + # @!visibility private + def resolve_with(state, raise_on_reassign = true, reserved = false) + if compare_and_set_internal_state(reserved ? RESERVED : PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + return rejected_resolution(raise_on_reassign, state) + end + self + end + + # For inspection. + # @!visibility private + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |(method, args), promises| + promises.push(args[0]) if method == :callback_notify_blocked + end + end + + # For inspection. + # @!visibility private + def callbacks + @Callbacks.each.to_a + end + + # For inspection. + # @!visibility private + def promise + @Promise + end + + # For inspection. + # @!visibility private + def touched? + promise.touched? + end + + # For inspection. + # @!visibility private + def waiting_threads + @Waiters.each.to_a + end + + # @!visibility private + def add_callback_notify_blocked(promise, index) + add_callback :callback_notify_blocked, promise, index + end + + # @!visibility private + def add_callback_clear_delayed_node(node) + add_callback(:callback_clear_delayed_node, node) + end + + # @!visibility private + def with_hidden_resolvable + # TODO (pitr-ch 10-Dec-2018): documentation, better name if in edge + self + end + + private + + def add_callback(method, *args) + state = internal_state + if state.resolved? + call_callback method, state, args + else + @Callbacks.push [method, args] + state = internal_state + # take back if it was resolved in the meanwhile + call_callbacks state if state.resolved? + end + self + end + + def callback_clear_delayed_node(state, node) + node.value = nil + end + + # @return [Boolean] + def wait_until_resolved(timeout) + return true if resolved? + + touch + + @Lock.synchronize do + @Waiters.increment + begin + unless resolved? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + resolved? + end + + def call_callback(method, state, args) + self.send method, state, *args + end + + def call_callbacks(state) + method, args = @Callbacks.pop + while method + call_callback method, state, args + method, args = @Callbacks.pop + end + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_resolution(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_resolution st, ar, cb + end + end + + def callback_notify_blocked(state, promise, index) + promise.on_blocker_resolution self, index + end + end + + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro promises.method.zip + # Creates a new event or a future which will be resolved when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is_a?(Future) + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. + # + # @return [Event] + def any(event_or_future) + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event + end + + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end + + # @!macro promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is resolved, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end.flat_event + end + + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end + + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new_blocked_by1(self, executor).event + end + + private + + def rejected_resolution(raise_on_reassign, state) + raise Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign + return false + end + + def callback_on_resolution(state, args, callback) + callback.call(*args) + end + end + + # Represents a value which will become available in future. May reject with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture + + # Is it in fulfilled state? + # @return [Boolean] + def fulfilled? + state = internal_state + state.resolved? && state.fulfilled? + end + + # Is it in rejected state? + # @return [Boolean] + def rejected? + state = internal_state + state.resolved? && !state.fulfilled? + end + + # @!macro promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @param [Object] timeout_value a value returned by the method when it times out + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # timeout_value on timeout, + # nil on rejection. + def value(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.value + else + timeout_value + end + end + + # Returns reason of future's rejection. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @!macro promises.param.timeout_value + # @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment. + def reason(timeout = nil, timeout_value = nil) + if wait_until_resolved timeout + internal_state.reason + else + timeout_value + end + end + + # Returns triplet fulfilled?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Object), nil] triplet of fulfilled?, value, reason, or nil + # on timeout. + def result(timeout = nil) + internal_state.result if wait_until_resolved timeout + end + + # @!macro promises.method.wait + # @raise [Exception] {#reason} on rejection + def wait!(timeout = nil) + result = wait_until_resolved!(timeout) + timeout ? result : self + end + + # @!macro promises.method.value + # @return [Object, nil, timeout_value] the value of the Future when fulfilled, + # or nil on rejection, + # or timeout_value on timeout. + # @raise [Exception] {#reason} on rejection + def value!(timeout = nil, timeout_value = nil) + if wait_until_resolved! timeout + internal_state.value + else + timeout_value + end + end + + # Allows rejected Future to be risen with `raise` method. + # If the reason is not an exception `Runtime.new(reason)` is returned. + # + # @example + # raise Promises.rejected_future(StandardError.new("boom")) + # raise Promises.rejected_future("or just boom") + # @raise [Concurrent::Error] when raising not rejected future + # @return [Exception] + def exception(*args) + raise Concurrent::Error, 'it is not rejected' unless rejected? + raise ArgumentError unless args.size <= 1 + reason = Array(internal_state.reason).flatten.compact + if reason.size > 1 + ex = Concurrent::MultipleErrors.new reason + ex.set_backtrace(caller) + ex + else + ex = if reason[0].respond_to? :exception + reason[0].exception(*args) + else + RuntimeError.new(reason[0]).exception(*args) + end + ex.set_backtrace Array(ex.backtrace) + caller + ex + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def then(*args, &task) + then_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_on(executor, *args, &task) + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def rescue(*args, &task) + rescue_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [reason, *args] to the task. + def rescue_on(executor, *args, &task) + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves + # first. + # + # @return [Future] + def any(event_or_future) + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future + end + + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Future] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end + + # @!macro promise.method.schedule + # @return [Future] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end.flat + end + + # @!macro promises.method.with_default_executor + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new_blocked_by1(self, executor).future + end + + # Creates new future which will have result of the future returned by receiver. If receiver + # rejects it will have its rejection. + # + # @param [Integer] level how many levels of futures should flatten + # @return [Future] + def flat_future(level = 1) + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future + end + + alias_method :flat, :flat_future + + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # @!macro promises.shortcut.using + # @return [self] + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback + end + + # @!macro promises.shortcut.using + # @return [self] + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback + end + + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any encountered exception + # will become reason of the returned future. + # + # @return [Future] + # @param [#call(value)] run_test + # an object which when called returns either Future to keep running with + # or nil, then the run completes with the value. + # The run_test can be used to extract the Future from deeper structure, + # or to distinguish Future which is a resulting value from a future + # which is suppose to continue running. + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? Promises.future(v, &body) : v + # end + # Promises.future(0, &body).run.value! # => 5 + def run(run_test = method(:run_test)) + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor, run_test).future + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + + # @return [String] Short string representation. + def to_s + if resolved? + format '%s with %s>', super[0..-2], (fulfilled? ? value : reason).inspect + else + super + end + end + + alias_method :inspect, :to_s + + private + + def run_test(v) + v if v.is_a?(Future) + end + + def rejected_resolution(raise_on_reassign, state) + if raise_on_reassign + if internal_state == RESERVED + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It is already reserved.") + else + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, + new_result: state.result) + end + end + return false + end + + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? + result + end + + def async_callback_on_fulfillment(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_fulfillment st, ar, cb + end + end + + def async_callback_on_rejection(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_rejection st, ar, cb + end + end + + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? + end + + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? + end + + def callback_on_resolution(state, args, callback) + callback.call(*state.result, *args) + end + + end + + # Marker module of Future, Event resolved manually. + module Resolvable + include InternalStates + end + + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable + + # @!macro raise_on_reassign + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. + + # @!macro promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already resolved + # @return [self, false] false is returned when raise_on_reassign is false and the receiver + # is already resolved. + # + + # Makes the event resolved, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + # @param [true, false] reserved + # Set to true if the resolvable is {#reserve}d by you, + # marks resolution of reserved resolvable events and futures explicitly. + # Advanced feature, ignore unless you use {Resolvable#reserve} from edge. + def resolve(raise_on_reassign = true, reserved = false) + resolve_with RESOLVED, raise_on_reassign, reserved + end + + # Creates new event wrapping receiver, effectively hiding the resolve method. + # + # @return [Event] + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @param [true, false] resolve_on_timeout + # If it times out and the argument is true it will also resolve the event. + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = false) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(false) + else + false + end + end + end + + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable + + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, + # which triggers all dependent futures. + # + # @param [true, false] fulfilled + # @param [Object] value + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true, reserved = false) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign, reserved) + end + + # Makes the future fulfilled with `value`, + # which triggers all dependent futures. + # + # @param [Object] value + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def fulfill(value, raise_on_reassign = true, reserved = false) + resolve_with Fulfilled.new(value), raise_on_reassign, reserved + end + + # Makes the future rejected with `reason`, + # which triggers all dependent futures. + # + # @param [Object] reason + # @!macro promise.param.raise_on_reassign + # @!macro promise.param.reserved + def reject(reason, raise_on_reassign = true, reserved = false) + resolve_with Rejected.new(reason), raise_on_reassign, reserved + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + def evaluate_to(*args, &block) + promise.evaluate_to(*args, block) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on rejection. + def evaluate_to!(*args, &block) + promise.evaluate_to(*args, block).wait! + end + + # @!macro promises.resolvable.resolve_on_timeout + # @param [::Array(true, Object, nil), ::Array(false, nil, Exception), nil] resolve_on_timeout + # If it times out and the argument is not nil it will also resolve the future + # to the provided resolution. + + # Behaves as {AbstractEventFuture#wait} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @see AbstractEventFuture#wait + def wait(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + !resolve(*resolve_on_timeout, false) + else + false + end + end + + # Behaves as {Future#wait!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [self, true, false] + # @raise [Exception] {#reason} on rejection + # @see Future#wait! + def wait!(timeout = nil, resolve_on_timeout = nil) + super(timeout) or if resolve_on_timeout + if resolve(*resolve_on_timeout, false) + false + else + # if it fails to resolve it was resolved in the meantime + # so return true as if there was no timeout + raise self if rejected? + true + end + else + false + end + end + + # Behaves as {Future#value} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @see Future#value + def value(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#value!} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Object, timeout_value, nil] + # @raise [Exception] {#reason} on rejection + # @see Future#value! + def value!(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved! timeout + internal_state.value + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + raise self if rejected? + return internal_state.value + end + end + timeout_value + end + end + + # Behaves as {Future#reason} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [Exception, timeout_value, nil] + # @see Future#reason + def reason(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.reason + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + return internal_state.reason + end + end + timeout_value + end + end + + # Behaves as {Future#result} but has one additional optional argument + # resolve_on_timeout. + # + # @!macro promises.resolvable.resolve_on_timeout + # @return [::Array(Boolean, Object, Exception), nil] + # @see Future#result + def result(timeout = nil, resolve_on_timeout = nil) + if wait_until_resolved timeout + internal_state.result + else + if resolve_on_timeout + unless resolve(*resolve_on_timeout, false) + # if it fails to resolve it was resolved in the meantime + # so return value as if there was no timeout + internal_state.result + end + end + # otherwise returns nil + end + end + + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. + # + # @return [Future] + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future + end + end + + # @abstract + # @private + class AbstractPromise < Synchronization::Object + safe_initialization! + include InternalStates + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + def to_s + format '%s %s>', super[0..-2], @Future + end + + alias_method :inspect, :to_s + + def delayed_because + nil + end + + private + + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + resolve_with Fulfilled.new(block.call(*args)) + rescue Exception => error + resolve_with Rejected.new(error) + raise error unless error.is_a?(StandardError) + end + end + + class ResolvableEventPromise < AbstractPromise + def initialize(default_executor) + super ResolvableEvent.new(self, default_executor) + end + end + + class ResolvableFuturePromise < AbstractPromise + def initialize(default_executor) + super ResolvableFuture.new(self, default_executor) + end + + public :evaluate_to + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + + private_class_method :new + + def self.new_blocked_by1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed_because + promise = new(blocker_delayed, 1, *args, &block) + blocker.add_callback_notify_blocked promise, 0 + promise + end + + def self.new_blocked_by2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed_because + blocker_delayed2 = blocker2.promise.delayed_because + delayed = if blocker_delayed1 && blocker_delayed2 + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) + else + blocker_delayed1 || blocker_delayed2 + end + promise = new(delayed, 2, *args, &block) + blocker1.add_callback_notify_blocked promise, 0 + blocker2.add_callback_notify_blocked promise, 1 + promise + end + + def self.new_blocked_by(blockers, *args, &block) + delayed = blockers.reduce(nil) { |d, f| add_delayed d, f.promise.delayed_because } + promise = new(delayed, blockers.size, *args, &block) + blockers.each_with_index { |f, i| f.add_callback_notify_blocked promise, i } + promise + end + + def self.add_delayed(delayed1, delayed2) + if delayed1 && delayed2 + delayed1.push delayed2 + delayed1 + else + delayed1 || delayed2 + end + end + + def initialize(delayed, blockers_count, future) + super(future) + @Delayed = delayed + @Countdown = AtomicFixnum.new blockers_count + end + + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) + resolvable = resolvable?(countdown, future, index) + + on_resolvable(future, index) if resolvable + end + + def delayed_because + @Delayed + end + + def touch + clear_and_propagate_touch + end + + # for inspection only + def blocked_by + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by + end + + private + + def clear_and_propagate_touch(stack_or_element = @Delayed) + return if stack_or_element.nil? + + if stack_or_element.is_a? LockFreeStack + stack_or_element.clear_each { |element| clear_and_propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end + + # @return [true,false] if resolvable + def resolvable?(countdown, future, index) + countdown.zero? + end + + def process_on_blocker_resolution(future, index) + @Countdown.decrement + end + + def on_resolvable(resolved_future, index) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super delayed, 1, Future.new(self, default_executor) + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_resolvable(resolved_future, index) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to(*args, task) + end + end + end + end + + # will be immediately resolved + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).resolve_with(RESOLVED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, fulfilled, value, reason) + super Future.new(self, default_executor). + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) + end + end + + class AbstractFlatPromise < BlockedPromise + + def initialize(delayed_because, blockers_count, event_or_future) + delayed = LockFreeStack.of1(self) + super(delayed, blockers_count, event_or_future) + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @DelayedBecause = delayed_because || LockFreeStack.new + + event_or_future.add_callback_clear_delayed_node delayed.peek + end + + def touch + if @Touched.make_true + clear_and_propagate_touch @DelayedBecause + end + end + + private + + def touched? + @Touched.value + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) + end + + def add_delayed_of(future) + delayed = future.promise.delayed_because + if touched? + clear_and_propagate_touch delayed + else + BlockedPromise.add_delayed @DelayedBecause, delayed + clear_and_propagate_touch @DelayedBecause if touched? + end + end + + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with RESOLVED + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + resolve_with RESOLVED + end + end + countdown + end + + end + + class FlatFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return countdown + end + + value = internal_state.value + case value + when AbstractEventFuture + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor, run_test) + super delayed, 1, Future.new(self, default_executor) + @RunTest = run_test + end + + def process_on_blocker_resolution(future, index) + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return 0 + end + + value = internal_state.value + continuation_future = @RunTest.call value + + if continuation_future + add_delayed_of continuation_future + continuation_future.add_callback_notify_blocked self, nil + else + resolve_with internal_state + end + + 1 + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) + @result = nil + end + + private + + def process_on_blocker_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index + end + + def on_resolvable(resolved_future, index) + resolve_with @result + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count, nil) + + on_resolvable nil, nil if blockers_count == 0 + end + + def process_on_blocker_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index + end + + def on_resolvable(resolved_future, index) + all_fulfilled = true + values = ::Array.new(@Resolutions.size) + reasons = ::Array.new(@Resolutions.size) + + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled + end + + if all_fulfilled + resolve_with FulfilledArray.new(values) + else + resolve_with PartiallyRejected.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + + on_resolvable nil, nil if blockers_count == 0 + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + # @abstract + class AbstractAnyPromise < BlockedPromise + end + + class AnyResolvedEventPromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED, false + end + end + + class AnyResolvedFuturePromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state, false + end + end + + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise + + private + + def resolvable?(countdown, event_or_future, index) + (event_or_future.is_a?(Event) ? event_or_future.resolved? : event_or_future.fulfilled?) || + # inlined super from BlockedPromise + countdown.zero? + end + end + + class DelayPromise < InnerPromise + + def initialize(default_executor) + event = Event.new(self, default_executor) + @Delayed = LockFreeStack.of1(self) + super event + event.add_callback_clear_delayed_node @Delayed.peek + end + + def touch + @Future.resolve_with RESOLVED + end + + def delayed_because + @Delayed + end + + end + + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.resolve_with RESOLVED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, + :DelayPromise, + :ScheduledPromise + + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb new file mode 100644 index 0000000..600bc6a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb @@ -0,0 +1,60 @@ +module Concurrent + + # Methods form module A included to a module B, which is already included into class C, + # will not be visible in the C class. If this module is extended to B then A's methods + # are correctly made visible to C. + # + # @example + # module A + # def a + # :a + # end + # end + # + # module B1 + # end + # + # class C1 + # include B1 + # end + # + # module B2 + # extend Concurrent::ReInclude + # end + # + # class C2 + # include B2 + # end + # + # B1.send :include, A + # B2.send :include, A + # + # C1.new.respond_to? :a # => false + # C2.new.respond_to? :a # => true + # + # @!visibility private + module ReInclude + # @!visibility private + def included(base) + (@re_include_to_bases ||= []) << [:include, base] + super(base) + end + + # @!visibility private + def extended(base) + (@re_include_to_bases ||= []) << [:extend, base] + super(base) + end + + # @!visibility private + def include(*modules) + result = super(*modules) + modules.reverse.each do |module_being_included| + (@re_include_to_bases ||= []).each do |method, mod| + mod.send method, module_being_included + end + end + result + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb new file mode 100644 index 0000000..429fc06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb @@ -0,0 +1,331 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/utility/monotonic_time' + +require 'concurrent/options' + +module Concurrent + + # `ScheduledTask` is a close relative of `Concurrent::Future` but with one + # important difference: A `Future` is set to execute as soon as possible + # whereas a `ScheduledTask` is set to execute after a specified delay. This + # implementation is loosely based on Java's + # [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html). + # It is a more feature-rich variant of {Concurrent.timer}. + # + # The *intended* schedule time of task execution is set on object construction + # with the `delay` argument. The delay is a numeric (floating point or integer) + # representing a number of seconds in the future. Any other value or a numeric + # equal to or less than zero will result in an exception. The *actual* schedule + # time of task execution is set when the `execute` method is called. + # + # The constructor can also be given zero or more processing options. Currently + # the only supported options are those recognized by the + # [Dereferenceable](Dereferenceable) module. + # + # The final constructor argument is a block representing the task to be performed. + # If no block is given an `ArgumentError` will be raised. + # + # **States** + # + # `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it + # "future" behavior. This includes the expected lifecycle states. `ScheduledTask` + # has one additional state, however. While the task (block) is being executed the + # state of the object will be `:processing`. This additional state is necessary + # because it has implications for task cancellation. + # + # **Cancellation** + # + # A `:pending` task can be cancelled using the `#cancel` method. A task in any + # other state, including `:processing`, cannot be cancelled. The `#cancel` + # method returns a boolean indicating the success of the cancellation attempt. + # A cancelled `ScheduledTask` cannot be restarted. It is immutable. + # + # **Obligation and Observation** + # + # The result of a `ScheduledTask` can be obtained either synchronously or + # asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation) + # module and the + # [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html) + # module from the Ruby standard library. With one exception `ScheduledTask` + # behaves identically to [Future](Observable) with regard to these modules. + # + # @!macro copy_options + # + # @example Basic usage + # + # require 'concurrent/scheduled_task' + # require 'csv' + # require 'open-uri' + # + # class Ticker + # def get_year_end_closing(symbol, year, api_key) + # uri = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=#{symbol}&apikey=#{api_key}&datatype=csv" + # data = [] + # csv = URI.parse(uri).read + # if csv.include?('call frequency') + # return :rate_limit_exceeded + # end + # CSV.parse(csv, headers: true) do |row| + # data << row['close'].to_f if row['timestamp'].include?(year.to_s) + # end + # year_end = data.first + # year_end + # rescue => e + # p e + # end + # end + # + # api_key = ENV['ALPHAVANTAGE_KEY'] + # abort(error_message) unless api_key + # + # # Future + # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013, api_key) } + # price.state #=> :pending + # price.pending? #=> true + # price.value(0) #=> nil (does not block) + # + # sleep(1) # do other stuff + # + # price.value #=> 63.65 (after blocking if necessary) + # price.state #=> :fulfilled + # price.fulfilled? #=> true + # price.value #=> 63.65 + # + # @example Successful task execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.state #=> :unscheduled + # task.execute + # task.state #=> pending + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> true + # task.rejected? #=> false + # task.value #=> 'What does the fox say?' + # + # @example One line creation and execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute + # task.state #=> pending + # + # task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' } + # task.state #=> pending + # + # @example Failed task execution + # + # task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') } + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> false + # task.rejected? #=> true + # task.value #=> nil + # task.reason #=> # + # + # @example Task execution with observation + # + # observer = Class.new{ + # def update(time, value, reason) + # puts "The task completed at #{time} with value '#{value}'" + # end + # }.new + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.add_observer(observer) + # task.execute + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?' + # + # @!macro monotonic_clock_warning + # + # @see Concurrent.timer + class ScheduledTask < IVar + include Comparable + + # The executor on which to execute the task. + # @!visibility private + attr_reader :executor + + # Schedule a task for execution at a specified future time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @yield the task to be performed + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] When no block is given + # @raise [ArgumentError] When given a time that is in the past + def initialize(delay, opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0 + + super(NULL, opts, &nil) + + synchronize do + ns_set_state(:unscheduled) + @parent = opts.fetch(:timer_set, Concurrent.global_timer_set) + @args = get_arguments_from(opts) + @delay = delay.to_f + @task = task + @time = nil + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + self.observers = Collection::CopyOnNotifyObserverSet.new + end + end + + # The `delay` value given at instanciation. + # + # @return [Float] the initial delay. + def initial_delay + synchronize { @delay } + end + + # The monotonic time at which the the task is scheduled to be executed. + # + # @return [Float] the schedule time or nil if `unscheduled` + def schedule_time + synchronize { @time } + end + + # Comparator which orders by schedule time. + # + # @!visibility private + def <=>(other) + schedule_time <=> other.schedule_time + end + + # Has the task been cancelled? + # + # @return [Boolean] true if the task is in the given state else false + def cancelled? + synchronize { ns_check_state?(:cancelled) } + end + + # In the task execution in progress? + # + # @return [Boolean] true if the task is in the given state else false + def processing? + synchronize { ns_check_state?(:processing) } + end + + # Cancel this task and prevent it from executing. A task can only be + # cancelled if it is pending or unscheduled. + # + # @return [Boolean] true if successfully cancelled else false + def cancel + if compare_and_set_state(:cancelled, :pending, :unscheduled) + complete(false, nil, CancelledOperationError.new) + # To avoid deadlocks this call must occur outside of #synchronize + # Changing the state above should prevent redundant calls + @parent.send(:remove_task, self) + else + false + end + end + + # Reschedule the task using the original delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @return [Boolean] true if successfully rescheduled else false + def reset + synchronize{ ns_reschedule(@delay) } + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @raise [ArgumentError] When given a time that is in the past + def reschedule(delay) + delay = delay.to_f + raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0 + synchronize{ ns_reschedule(delay) } + end + + # Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending` + # and starts counting down toward execution. Does nothing if the `ScheduledTask` is + # in any state other than `:unscheduled`. + # + # @return [ScheduledTask] a reference to `self` + def execute + if compare_and_set_state(:pending, :unscheduled) + synchronize{ ns_schedule(@delay) } + end + self + end + + # Create a new `ScheduledTask` object with the given block, execute it, and return the + # `:pending` object. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @!macro executor_and_deref_options + # + # @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + def self.execute(delay, opts = {}, &task) + new(delay, opts, &task).execute + end + + # Execute the task. + # + # @!visibility private + def process_task + safe_execute(@task, @args) + end + + protected :set, :try_set, :fail, :complete + + protected + + # Schedule the task using the given delay and the current time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_schedule(delay) + @delay = delay + @time = Concurrent.monotonic_time + @delay + @parent.send(:post_task, self) + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_reschedule(delay) + return false unless ns_check_state?(:pending) + @parent.send(:remove_task, self) && ns_schedule(delay) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb new file mode 100644 index 0000000..eee4eff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb @@ -0,0 +1,64 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' +require 'set' + +module Concurrent + + # @!macro concurrent_set + # + # A thread-safe subclass of Set. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set` + # which is union of `a` and `b`, then it writes the union to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#merge` instead. + # + # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set` + + # @!macro internal_implementation_note + SetImplementation = case + when Concurrent.on_cruby? + # The CRuby implementation of Set is written in Ruby itself and is + # not thread safe for certain methods. + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class CRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_cruby CRubySet + CRubySet + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubySet < ::Set + include JRuby::Synchronized + end + + JRubySet + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubySet + TruffleRubySet + + else + warn 'Possibly unsupported Ruby implementation' + ::Set + end + private_constant :SetImplementation + + # @!macro concurrent_set + class Set < SetImplementation + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb new file mode 100644 index 0000000..99b8561 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb @@ -0,0 +1,139 @@ +require 'concurrent/errors' +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # An thread-safe, write-once variation of Ruby's standard `Struct`. + # Each member can have its value set at most once, either at construction + # or any time thereafter. Attempting to assign a value to a member + # that has already been set will result in a `Concurrent::ImmutabilityError`. + # + # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` + # @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword + module SettableStruct + include Synchronization::AbstractStruct + + # @!macro struct_values + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # @raise [Concurrent::ImmutabilityError] if the given member has already been set + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize do + unless @values[member].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[member] = value + end + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + private + + # @!visibility private + def initialize_copy(original) + synchronize do + super(original) + ns_initialize_copy + end + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize do + unless @values[index].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[index] = value + end + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb new file mode 100644 index 0000000..6d8cf4b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb @@ -0,0 +1,13 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +require 'concurrent/synchronization/object' +require 'concurrent/synchronization/lockable_object' +require 'concurrent/synchronization/condition' +require 'concurrent/synchronization/lock' + +module Concurrent + # @!visibility private + module Synchronization + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb new file mode 100644 index 0000000..d9050b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb @@ -0,0 +1,102 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first +require 'concurrent/utility/monotonic_time' +require 'concurrent/synchronization/object' + +module Concurrent + module Synchronization + + # @!visibility private + class AbstractLockableObject < Synchronization::Object + + protected + + # @!macro synchronization_object_method_synchronize + # + # @yield runs the block synchronized against this object, + # equivalent of java's `synchronize(this) {}` + # @note can by made public in descendants if required by `public :synchronize` + def synchronize + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_wait_until + # + # Wait until condition is met or timeout passes, + # protects against spurious wake-ups. + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @yield condition to be met + # @yieldreturn [true, false] + # @return [true, false] if condition met + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait_until(timeout = nil, &condition) + # synchronize { ns_wait_until(timeout, &condition) } + # end + # ``` + def ns_wait_until(timeout = nil, &condition) + if timeout + wait_until = Concurrent.monotonic_time + timeout + loop do + now = Concurrent.monotonic_time + condition_result = condition.call + return condition_result if now >= wait_until || condition_result + ns_wait wait_until - now + end + else + ns_wait timeout until condition.call + true + end + end + + # @!macro synchronization_object_method_ns_wait + # + # Wait until another thread calls #signal or #broadcast, + # spurious wake-ups can happen. + # + # @param [Numeric, nil] timeout in seconds, `nil` means no timeout + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def wait(timeout = nil) + # synchronize { ns_wait(timeout) } + # end + # ``` + def ns_wait(timeout = nil) + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_signal + # + # Signal one waiting thread. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def signal + # synchronize { ns_signal } + # end + # ``` + def ns_signal + raise NotImplementedError + end + + # @!macro synchronization_object_method_ns_broadcast + # + # Broadcast to all waiting threads. + # @return [self] + # @note only to be used inside synchronized block + # @note to provide direct access to this method in a descendant add method + # ``` + # def broadcast + # synchronize { ns_broadcast } + # end + # ``` + def ns_broadcast + raise NotImplementedError + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb new file mode 100644 index 0000000..7cd2dec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb @@ -0,0 +1,22 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + class AbstractObject + def initialize + # nothing to do + end + + # @!visibility private + # @abstract + def full_memory_barrier + raise NotImplementedError + end + + def self.attr_volatile(*names) + raise NotImplementedError + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb new file mode 100644 index 0000000..1fe90c1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb @@ -0,0 +1,171 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module AbstractStruct + + # @!visibility private + def initialize(*values) + super() + ns_initialize(*values) + end + + # @!macro struct_length + # + # Returns the number of struct members. + # + # @return [Fixnum] the number of struct members + def length + self.class::MEMBERS.length + end + alias_method :size, :length + + # @!macro struct_members + # + # Returns the struct members as an array of symbols. + # + # @return [Array] the struct members as an array of symbols + def members + self.class::MEMBERS.dup + end + + protected + + # @!macro struct_values + # + # @!visibility private + def ns_values + @values.dup + end + + # @!macro struct_values_at + # + # @!visibility private + def ns_values_at(indexes) + @values.values_at(*indexes) + end + + # @!macro struct_to_h + # + # @!visibility private + def ns_to_h + length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo} + end + + # @!macro struct_get + # + # @!visibility private + def ns_get(member) + if member.is_a? Integer + if member >= @values.length + raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})") + end + @values[member] + else + send(member) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_equality + # + # @!visibility private + def ns_equality(other) + self.class == other.class && self.values == other.values + end + + # @!macro struct_each + # + # @!visibility private + def ns_each + values.each{|value| yield value } + end + + # @!macro struct_each_pair + # + # @!visibility private + def ns_each_pair + @values.length.times do |index| + yield self.class::MEMBERS[index], @values[index] + end + end + + # @!macro struct_select + # + # @!visibility private + def ns_select + values.select{|value| yield value } + end + + # @!macro struct_inspect + # + # @!visibility private + def ns_inspect + struct = pr_underscore(self.class.ancestors[1]) + clazz = ((self.class.to_s =~ /^#" + end + + # @!macro struct_merge + # + # @!visibility private + def ns_merge(other, &block) + self.class.new(*self.to_h.merge(other, &block).values) + end + + # @!visibility private + def ns_initialize_copy + @values = @values.map do |val| + begin + val.clone + rescue TypeError + val + end + end + end + + # @!visibility private + def pr_underscore(clazz) + word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229 + word.gsub!(/::/, '/') + word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') + word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') + word.tr!("-", "_") + word.downcase! + word + end + + # @!visibility private + def self.define_struct_class(parent, base, name, members, &block) + clazz = Class.new(base || Object) do + include parent + self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze) + def ns_initialize(*values) + raise ArgumentError.new('struct size differs') if values.length > length + @values = values.fill(nil, values.length..length-1) + end + end + unless name.nil? + begin + parent.send :remove_const, name if parent.const_defined?(name, false) + parent.const_set(name, clazz) + clazz + rescue NameError + raise NameError.new("identifier #{name} needs to be constant") + end + end + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + @values[index] + end + end + clazz.class_exec(&block) unless block.nil? + clazz.singleton_class.send :alias_method, :[], :new + clazz + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb new file mode 100644 index 0000000..5daa68b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb @@ -0,0 +1,62 @@ +require 'concurrent/synchronization/lockable_object' + +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Condition < LockableObject + safe_initialization! + + # TODO (pitr 12-Sep-2015): locks two objects, improve + # TODO (pitr 26-Sep-2015): study + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8-b132/java/util/concurrent/locks/AbstractQueuedSynchronizer.java#AbstractQueuedSynchronizer.Node + + singleton_class.send :alias_method, :private_new, :new + private_class_method :new + + def initialize(lock) + super() + @Lock = lock + end + + def wait(timeout = nil) + @Lock.synchronize { ns_wait(timeout) } + end + + def ns_wait(timeout = nil) + synchronize { super(timeout) } + end + + def wait_until(timeout = nil, &condition) + @Lock.synchronize { ns_wait_until(timeout, &condition) } + end + + def ns_wait_until(timeout = nil, &condition) + synchronize { super(timeout, &condition) } + end + + def signal + @Lock.synchronize { ns_signal } + end + + def ns_signal + synchronize { super } + end + + def broadcast + @Lock.synchronize { ns_broadcast } + end + + def ns_broadcast + synchronize { super } + end + end + + class LockableObject < LockableObjectImplementation + def new_condition + Condition.private_new(self) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb new file mode 100644 index 0000000..139e08d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb @@ -0,0 +1,29 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +module Concurrent + module Synchronization + case + when Concurrent.on_cruby? + def self.full_memory_barrier + # relying on undocumented behavior of CRuby, GVL acquire has lock which ensures visibility of ivars + # https://github.com/ruby/ruby/blob/ruby_2_2/thread_pthread.c#L204-L211 + end + + when Concurrent.on_jruby? + require 'concurrent/utility/native_extension_loader' + def self.full_memory_barrier + JRubyAttrVolatile.full_memory_barrier + end + + when Concurrent.on_truffleruby? + def self.full_memory_barrier + TruffleRuby.full_memory_barrier + end + + else + warn 'Possibly unsupported Ruby implementation' + def self.full_memory_barrier + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb new file mode 100644 index 0000000..7693046 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb @@ -0,0 +1,15 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +module Concurrent + module Synchronization + + if Concurrent.on_jruby? + + # @!visibility private + # @!macro internal_implementation_note + class JRubyLockableObject < AbstractLockableObject + + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb new file mode 100644 index 0000000..f90e0b5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb @@ -0,0 +1,38 @@ +require 'concurrent/synchronization/lockable_object' + +module Concurrent + module Synchronization + + # @!visibility private + # TODO (pitr-ch 04-Dec-2016): should be in edge + class Lock < LockableObject + # TODO use JavaReentrantLock on JRuby + + public :synchronize + + def wait(timeout = nil) + synchronize { ns_wait(timeout) } + end + + public :ns_wait + + def wait_until(timeout = nil, &condition) + synchronize { ns_wait_until(timeout, &condition) } + end + + public :ns_wait_until + + def signal + synchronize { ns_signal } + end + + public :ns_signal + + def broadcast + synchronize { ns_broadcast } + end + + public :ns_broadcast + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb new file mode 100644 index 0000000..08d2ff6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb @@ -0,0 +1,75 @@ +require 'concurrent/utility/engine' +require 'concurrent/synchronization/abstract_lockable_object' +require 'concurrent/synchronization/mutex_lockable_object' +require 'concurrent/synchronization/jruby_lockable_object' + +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + LockableObjectImplementation = case + when Concurrent.on_cruby? + MutexLockableObject + when Concurrent.on_jruby? + JRubyLockableObject + when Concurrent.on_truffleruby? + MutexLockableObject + else + warn 'Possibly unsupported Ruby implementation' + MonitorLockableObject + end + private_constant :LockableObjectImplementation + + # Safe synchronization under any Ruby implementation. + # It provides methods like {#synchronize}, {#wait}, {#signal} and {#broadcast}. + # Provides a single layer which can improve its implementation over time without changes needed to + # the classes using it. Use {Synchronization::Object} not this abstract class. + # + # @note this object does not support usage together with + # [`Thread#wakeup`](http://ruby-doc.org/core/Thread.html#method-i-wakeup) + # and [`Thread#raise`](http://ruby-doc.org/core/Thread.html#method-i-raise). + # `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and + # `Thread#wakeup` will not work on all platforms. + # + # @see Event implementation as an example of this class use + # + # @example simple + # class AnClass < Synchronization::Object + # def initialize + # super + # synchronize { @value = 'asd' } + # end + # + # def value + # synchronize { @value } + # end + # end + # + # @!visibility private + class LockableObject < LockableObjectImplementation + + # TODO (pitr 12-Sep-2015): make private for c-r, prohibit subclassing + # TODO (pitr 12-Sep-2015): we inherit too much ourselves :/ + + # @!method initialize(*args, &block) + # @!macro synchronization_object_method_initialize + + # @!method synchronize + # @!macro synchronization_object_method_synchronize + + # @!method wait_until(timeout = nil, &condition) + # @!macro synchronization_object_method_ns_wait_until + + # @!method wait(timeout = nil) + # @!macro synchronization_object_method_ns_wait + + # @!method signal + # @!macro synchronization_object_method_ns_signal + + # @!method broadcast + # @!macro synchronization_object_method_ns_broadcast + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb new file mode 100644 index 0000000..acc9745 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb @@ -0,0 +1,89 @@ +require 'concurrent/synchronization/abstract_lockable_object' + +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module ConditionSignalling + protected + + def ns_signal + @__Condition__.signal + self + end + + def ns_broadcast + @__Condition__.broadcast + self + end + end + + + # @!visibility private + # @!macro internal_implementation_note + class MutexLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize + super() + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new + end + + def initialize_copy(other) + super + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new + end + + protected + + def synchronize + if @__Lock__.owned? + yield + else + @__Lock__.synchronize { yield } + end + end + + def ns_wait(timeout = nil) + @__Condition__.wait @__Lock__, timeout + self + end + end + + # @!visibility private + # @!macro internal_implementation_note + class MonitorLockableObject < AbstractLockableObject + include ConditionSignalling + + safe_initialization! + + def initialize + super() + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond + end + + def initialize_copy(other) + super + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond + end + + protected + + def synchronize # TODO may be a problem with lock.synchronize { lock.wait } + @__Lock__.synchronize { yield } + end + + def ns_wait(timeout = nil) + @__Condition__.wait timeout + self + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb new file mode 100644 index 0000000..e839c9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb @@ -0,0 +1,151 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first + +require 'concurrent/synchronization/safe_initialization' +require 'concurrent/synchronization/volatile' +require 'concurrent/atomic/atomic_reference' + +module Concurrent + module Synchronization + + # Abstract object providing final, volatile, ans CAS extensions to build other concurrent abstractions. + # - final instance variables see {Object.safe_initialization!} + # - volatile instance variables see {Object.attr_volatile} + # - volatile instance variables see {Object.attr_atomic} + # @!visibility private + class Object < AbstractObject + include Volatile + + # TODO make it a module if possible + + # @!method self.attr_volatile(*names) + # Creates methods for reading and writing (as `attr_accessor` does) to a instance variable with + # volatile (Java) semantic. The instance variable should be accessed only through generated methods. + # + # @param [::Array] names of the instance variables to be volatile + # @return [::Array] names of defined method names + + # Has to be called by children. + def initialize + super + __initialize_atomic_fields__ + end + + def self.safe_initialization! + extend SafeInitialization unless safe_initialization? + end + + def self.safe_initialization? + self.singleton_class < SafeInitialization + end + + # For testing purposes, quite slow. Injects assert code to new method which will raise if class instance contains + # any instance variables with CamelCase names and isn't {.safe_initialization?}. + # @raise when offend found + # @return [true] + def self.ensure_safe_initialization_when_final_fields_are_present + Object.class_eval do + def self.new(*args, &block) + object = super(*args, &block) + ensure + has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } + if has_final_field && !safe_initialization? + raise "there was an instance of #{object.class} with final field but not marked with safe_initialization!" + end + end + end + true + end + + # Creates methods for reading and writing to a instance variable with + # volatile (Java) semantic as {.attr_volatile} does. + # The instance variable should be accessed oly through generated methods. + # This method generates following methods: `value`, `value=(new_value) #=> new_value`, + # `swap_value(new_value) #=> old_value`, + # `compare_and_set_value(expected, value) #=> true || false`, `update_value(&block)`. + # @param [::Array] names of the instance variables to be volatile with CAS. + # @return [::Array] names of defined method names. + # @!macro attr_atomic + # @!method $1 + # @return [Object] The $1. + # @!method $1=(new_$1) + # Set the $1. + # @return [Object] new_$1. + # @!method swap_$1(new_$1) + # Set the $1 to new_$1 and return the old $1. + # @return [Object] old $1 + # @!method compare_and_set_$1(expected_$1, new_$1) + # Sets the $1 to new_$1 if the current $1 is expected_$1 + # @return [true, false] + # @!method update_$1(&block) + # Updates the $1 using the block. + # @yield [Object] Calculate a new $1 using given (old) $1 + # @yieldparam [Object] old $1 + # @return [Object] new $1 + def self.attr_atomic(*names) + @__atomic_fields__ ||= [] + @__atomic_fields__ += names + safe_initialization! + define_initialize_atomic_fields + + names.each do |name| + ivar = :"@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar}.get + end + + def #{name}=(value) + #{ivar}.set value + end + + def swap_#{name}(value) + #{ivar}.swap value + end + + def compare_and_set_#{name}(expected, value) + #{ivar}.compare_and_set expected, value + end + + def update_#{name}(&block) + #{ivar}.update(&block) + end + RUBY + end + names.flat_map { |n| [n, :"#{n}=", :"swap_#{n}", :"compare_and_set_#{n}", :"update_#{n}"] } + end + + # @param [true, false] inherited should inherited volatile with CAS fields be returned? + # @return [::Array] Returns defined volatile with CAS fields on this class. + def self.atomic_attributes(inherited = true) + @__atomic_fields__ ||= [] + ((superclass.atomic_attributes if superclass.respond_to?(:atomic_attributes) && inherited) || []) + @__atomic_fields__ + end + + # @return [true, false] is the attribute with name atomic? + def self.atomic_attribute?(name) + atomic_attributes.include? name + end + + private + + def self.define_initialize_atomic_fields + assignments = @__atomic_fields__.map do |name| + "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = Concurrent::AtomicReference.new(nil)" + end.join("\n") + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def __initialize_atomic_fields__ + super + #{assignments} + end + RUBY + end + + private_class_method :define_initialize_atomic_fields + + def __initialize_atomic_fields__ + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb new file mode 100644 index 0000000..f785e35 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb @@ -0,0 +1,36 @@ +require 'concurrent/synchronization/full_memory_barrier' + +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + # + # By extending this module, a class and all its children are marked to be constructed safely. Meaning that + # all writes (ivar initializations) are made visible to all readers of newly constructed object. It ensures + # same behaviour as Java's final fields. + # + # Due to using Kernel#extend, the module is not included again if already present in the ancestors, + # which avoids extra overhead. + # + # @example + # class AClass < Concurrent::Synchronization::Object + # extend Concurrent::Synchronization::SafeInitialization + # + # def initialize + # @AFinalValue = 'value' # published safely, #foo will never return nil + # end + # + # def foo + # @AFinalValue + # end + # end + module SafeInitialization + def new(*args, &block) + super(*args, &block) + ensure + Concurrent::Synchronization.full_memory_barrier + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb new file mode 100644 index 0000000..46e8ba6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb @@ -0,0 +1,101 @@ +require 'concurrent/utility/native_extension_loader' # load native parts first +require 'concurrent/utility/engine' +require 'concurrent/synchronization/full_memory_barrier' + +module Concurrent + module Synchronization + + # Volatile adds the attr_volatile class method when included. + # + # @example + # class Foo + # include Concurrent::Synchronization::Volatile + # + # attr_volatile :bar + # + # def initialize + # self.bar = 1 + # end + # end + # + # foo = Foo.new + # foo.bar + # => 1 + # foo.bar = 2 + # => 2 + # + # @!visibility private + module Volatile + def self.included(base) + base.extend(ClassMethods) + end + + def full_memory_barrier + Synchronization.full_memory_barrier + end + + module ClassMethods + if Concurrent.on_cruby? + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + end + RUBY + end + names.map { |n| [n, :"#{n}="] }.flatten + end + + elsif Concurrent.on_jruby? + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + ::Concurrent::Synchronization::JRubyAttrVolatile.instance_variable_get_volatile(self, :#{ivar}) + end + + def #{name}=(value) + ::Concurrent::Synchronization::JRubyAttrVolatile.instance_variable_set_volatile(self, :#{ivar}, value) + end + RUBY + + end + names.map { |n| [n, :"#{n}="] }.flatten + end + + else + warn 'Possibly unsupported Ruby implementation' unless Concurrent.on_truffleruby? + + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + ::Concurrent::Synchronization.full_memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + ::Concurrent::Synchronization.full_memory_barrier + end + RUBY + end + + names.map { |n| [n, :"#{n}="] }.flatten + end + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000..019d843 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb @@ -0,0 +1,47 @@ +require 'delegate' +require 'monitor' + +module Concurrent + # This class provides a trivial way to synchronize all calls to a given object + # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls + # around the delegated `#send`. Example: + # + # array = [] # not thread-safe on many impls + # array = SynchronizedDelegator.new([]) # thread-safe + # + # A simple `Monitor` provides a very coarse-grained way to synchronize a given + # object, in that it will cause synchronization for methods that have no need + # for it, but this is a trivial way to get thread-safety where none may exist + # currently on some implementations. + # + # This class is currently being considered for inclusion into stdlib, via + # https://bugs.ruby-lang.org/issues/8556 + # + # @!visibility private + class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb new file mode 100644 index 0000000..c67084a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb @@ -0,0 +1,16 @@ +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter + CPU_COUNT = 16 # is there a way to determine this? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb new file mode 100644 index 0000000..7a6e8d5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb @@ -0,0 +1,74 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/striped64' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + # + # @!visibility private + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000..a07678d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,81 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/utility/engine' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + # + # @!visibility private + module CheapLockable + private + if Concurrent.on_jruby? + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb new file mode 100644 index 0000000..01eb98f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb @@ -0,0 +1,52 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/utility/engine' + +# Shim for TruffleRuby.synchronized +if Concurrent.on_truffleruby? && !TruffleRuby.respond_to?(:synchronized) + module TruffleRuby + def self.synchronized(object, &block) + Truffle::System.synchronized(object, &block) + end + end +end + +module Concurrent + module ThreadSafe + module Util + def self.make_synchronized_on_cruby(klass) + klass.class_eval do + def initialize(*args, &block) + @_monitor = Monitor.new + super + end + + def initialize_copy(other) + # make sure a copy is not sharing a monitor with the original object! + @_monitor = Monitor.new + super + end + end + + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + monitor = @_monitor + monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") + monitor.synchronize { super } + end + RUBY + end + end + + def self.make_synchronized_on_truffleruby(klass) + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args, &block) + TruffleRuby.synchronized(self) { super(*args, &block) } + end + RUBY + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000..b54be39 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,38 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/tuple' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + class PowerOfTwoTuple < Concurrent::Tuple + + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb new file mode 100644 index 0000000..4169c3d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb @@ -0,0 +1,246 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + # + # @!visibility private + class Striped64 + + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + # + # @!visibility private + class Cell < Concurrent::AtomicReference + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + + # @!visibility private + def self.padding + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + # TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created + # hide from yardoc in a method + attr_reader :padding_0, :padding_1, :padding_2, :padding_3, :padding_4, :padding_5, :padding_6, :padding_7, :padding_8, :padding_9, :padding_10, :padding_11 + end + padding + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb new file mode 100644 index 0000000..cdac2a3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb @@ -0,0 +1,75 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + module Volatile + + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of +Concurrent::AtomicReference+. + # + # Usage: + # class Foo + # extend Concurrent::ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000..bdde2dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,50 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb new file mode 100644 index 0000000..b69cfc8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb @@ -0,0 +1,311 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/dereferenceable' +require 'concurrent/concern/observable' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/scheduled_task' + +module Concurrent + + # A very common concurrency pattern is to run a thread that performs a task at + # regular intervals. The thread that performs the task sleeps for the given + # interval then wakes up and performs the task. Lather, rinse, repeat... This + # pattern causes two problems. First, it is difficult to test the business + # logic of the task because the task itself is tightly coupled with the + # concurrency logic. Second, an exception raised while performing the task can + # cause the entire thread to abend. In a long-running application where the + # task thread is intended to run for days/weeks/years a crashed task thread + # can pose a significant problem. `TimerTask` alleviates both problems. + # + # When a `TimerTask` is launched it starts a thread for monitoring the + # execution interval. The `TimerTask` thread does not perform the task, + # however. Instead, the TimerTask launches the task on a separate thread. + # Should the task experience an unrecoverable crash only the task thread will + # crash. This makes the `TimerTask` very fault tolerant. Additionally, the + # `TimerTask` thread can respond to the success or failure of the task, + # performing logging or ancillary operations. + # + # One other advantage of `TimerTask` is that it forces the business logic to + # be completely decoupled from the concurrency logic. The business logic can + # be tested separately then passed to the `TimerTask` for scheduling and + # running. + # + # In some cases it may be necessary for a `TimerTask` to affect its own + # execution cycle. To facilitate this, a reference to the TimerTask instance + # is passed as an argument to the provided block every time the task is + # executed. + # + # The `TimerTask` class includes the `Dereferenceable` mixin module so the + # result of the last execution is always available via the `#value` method. + # Dereferencing options can be passed to the `TimerTask` during construction or + # at any later time using the `#set_deref_options` method. + # + # `TimerTask` supports notification through the Ruby standard library + # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # Observable} module. On execution the `TimerTask` will notify the observers + # with three arguments: time of execution, the result of the block (or nil on + # failure), and any raised exceptions (or nil on success). + # + # @!macro copy_options + # + # @example Basic usage + # task = Concurrent::TimerTask.new{ puts 'Boom!' } + # task.execute + # + # task.execution_interval #=> 60 (default) + # + # # wait 60 seconds... + # #=> 'Boom!' + # + # task.shutdown #=> true + # + # @example Configuring `:execution_interval` + # task = Concurrent::TimerTask.new(execution_interval: 5) do + # puts 'Boom!' + # end + # + # task.execution_interval #=> 5 + # + # @example Immediate execution with `:run_now` + # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' } + # task.execute + # + # #=> 'Boom!' + # + # @example Last `#value` and `Dereferenceable` mixin + # task = Concurrent::TimerTask.new( + # dup_on_deref: true, + # execution_interval: 5 + # ){ Time.now } + # + # task.execute + # Time.now #=> 2013-11-07 18:06:50 -0500 + # sleep(10) + # task.value #=> 2013-11-07 18:06:55 -0500 + # + # @example Controlling execution from within the block + # timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task| + # task.execution_interval.times{ print 'Boom! ' } + # print "\n" + # task.execution_interval += 1 + # if task.execution_interval > 5 + # puts 'Stopping...' + # task.shutdown + # end + # end + # + # timer_task.execute # blocking call - this task will stop itself + # #=> Boom! + # #=> Boom! Boom! + # #=> Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! Boom! + # #=> Stopping... + # + # @example Observation + # class TaskObserver + # def update(time, result, ex) + # if result + # print "(#{time}) Execution successfully returned #{result}\n" + # else + # print "(#{time}) Execution failed with error #{ex}\n" + # end + # end + # end + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ 42 } + # task.add_observer(TaskObserver.new) + # task.execute + # sleep 4 + # + # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42 + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ sleep } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:07:25 -0400) Execution timed out + # #=> (2013-10-13 19:07:27 -0400) Execution timed out + # #=> (2013-10-13 19:07:29 -0400) Execution timed out + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError + # task.shutdown + # + # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html + class TimerTask < RubyExecutorService + include Concern::Dereferenceable + include Concern::Observable + + # Default `:execution_interval` in seconds. + EXECUTION_INTERVAL = 60 + + # Default `:timeout_interval` in seconds. + TIMEOUT_INTERVAL = 30 + + # Create a new TimerTask with the given task and configuration. + # + # @!macro timer_task_initialize + # @param [Hash] opts the options defining task execution. + # @option opts [Integer] :execution_interval number of seconds between + # task executions (default: EXECUTION_INTERVAL) + # @option opts [Boolean] :run_now Whether to run the task immediately + # upon instantiation or to wait until the first # execution_interval + # has passed (default: false) + # + # @!macro deref_options + # + # @raise ArgumentError when no block is given. + # + # @yield to the block after :execution_interval seconds have passed since + # the last yield + # @yieldparam task a reference to the `TimerTask` instance so that the + # block can control its own lifecycle. Necessary since `self` will + # refer to the execution context of the block rather than the running + # `TimerTask`. + # + # @return [TimerTask] the new `TimerTask` + def initialize(opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + super + set_deref_options opts + end + + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + def running? + @running.true? + end + + # Execute a previously created `TimerTask`. + # + # @return [TimerTask] a reference to `self` + # + # @example Instance and execute in separate steps + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> false + # task.execute + # task.running? #=> true + # + # @example Instance and execute in one line + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute + # task.running? #=> true + def execute + synchronize do + if @running.false? + @running.make_true + schedule_next_task(@run_now ? 0 : @execution_interval) + end + end + self + end + + # Create and execute a new `TimerTask`. + # + # @!macro timer_task_initialize + # + # @example + # task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> true + def self.execute(opts = {}, &task) + TimerTask.new(opts, &task).execute + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval + synchronize { @execution_interval } + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @execution_interval = value } + end + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval + warn 'TimerTask timeouts are now ignored as these were not able to be implemented correctly' + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval=(value) + warn 'TimerTask timeouts are now ignored as these were not able to be implemented correctly' + end + + private :post, :<< + + private + + def ns_initialize(opts, &task) + set_deref_options(opts) + + self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL + if opts[:timeout] || opts[:timeout_interval] + warn 'TimeTask timeouts are now ignored as these were not able to be implemented correctly' + end + @run_now = opts[:now] || opts[:run_now] + @executor = Concurrent::SafeTaskExecutor.new(task) + @running = Concurrent::AtomicBoolean.new(false) + @value = nil + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + # @!visibility private + def ns_shutdown_execution + @running.make_false + super + end + + # @!visibility private + def ns_kill_execution + @running.make_false + super + end + + # @!visibility private + def schedule_next_task(interval = execution_interval) + ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task)) + nil + end + + # @!visibility private + def execute_task(completion) + return nil unless @running.true? + _success, value, reason = @executor.execute(self) + if completion.try? + self.value = value + schedule_next_task + time = Time.now + observers.notify_observers do + [time, self.value, reason] + end + end + nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb new file mode 100644 index 0000000..56212cf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb @@ -0,0 +1,82 @@ +require 'concurrent/atomic/atomic_reference' + +module Concurrent + + # A fixed size array with volatile (synchronized, thread safe) getters/setters. + # Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal. + # + # @example + # tuple = Concurrent::Tuple.new(16) + # + # tuple.set(0, :foo) #=> :foo | volatile write + # tuple.get(0) #=> :foo | volatile read + # tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS + # tuple.cas(0, :foo, :baz) #=> false | strong CAS + # tuple.get(0) #=> :bar | volatile read + # + # @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia + # @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple + # @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable + class Tuple + include Enumerable + + # The (fixed) size of the tuple. + attr_reader :size + + # Create a new tuple of the given size. + # + # @param [Integer] size the number of elements in the tuple + def initialize(size) + @size = size + @tuple = tuple = ::Array.new(size) + i = 0 + while i < size + tuple[i] = Concurrent::AtomicReference.new + i += 1 + end + end + + # Get the value of the element at the given index. + # + # @param [Integer] i the index from which to retrieve the value + # @return [Object] the value at the given index or nil if the index is out of bounds + def get(i) + return nil if i >= @size || i < 0 + @tuple[i].get + end + alias_method :volatile_get, :get + + # Set the element at the given index to the given value + # + # @param [Integer] i the index for the element to set + # @param [Object] value the value to set at the given index + # + # @return [Object] the new value of the element at the given index or nil if the index is out of bounds + def set(i, value) + return nil if i >= @size || i < 0 + @tuple[i].set(value) + end + alias_method :volatile_set, :set + + # Set the value at the given index to the new value if and only if the current + # value matches the given old value. + # + # @param [Integer] i the index for the element to set + # @param [Object] old_value the value to compare against the current value + # @param [Object] new_value the value to set at the given index + # + # @return [Boolean] true if the value at the given element was set else false + def compare_and_set(i, old_value, new_value) + return false if i >= @size || i < 0 + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + # Calls the given block once for each element in self, passing that element as a parameter. + # + # @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index + def each + @tuple.each {|ref| yield ref.get} + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb new file mode 100644 index 0000000..5d02ef0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb @@ -0,0 +1,222 @@ +require 'set' +require 'concurrent/synchronization/object' + +module Concurrent + + # A `TVar` is a transactional variable - a single-element container that + # is used as part of a transaction - see `Concurrent::atomically`. + # + # @!macro thread_safe_variable_comparison + # + # {include:file:docs-source/tvar.md} + class TVar < Synchronization::Object + safe_initialization! + + # Create a new `TVar` with an initial value. + def initialize(value) + @value = value + @lock = Mutex.new + end + + # Get the value of a `TVar`. + def value + Concurrent::atomically do + Transaction::current.read(self) + end + end + + # Set the value of a `TVar`. + def value=(value) + Concurrent::atomically do + Transaction::current.write(self, value) + end + end + + # @!visibility private + def unsafe_value # :nodoc: + @value + end + + # @!visibility private + def unsafe_value=(value) # :nodoc: + @value = value + end + + # @!visibility private + def unsafe_lock # :nodoc: + @lock + end + + end + + # Run a block that reads and writes `TVar`s as a single atomic transaction. + # With respect to the value of `TVar` objects, the transaction is atomic, in + # that it either happens or it does not, consistent, in that the `TVar` + # objects involved will never enter an illegal state, and isolated, in that + # transactions never interfere with each other. You may recognise these + # properties from database transactions. + # + # There are some very important and unusual semantics that you must be aware of: + # + # * Most importantly, the block that you pass to atomically may be executed + # more than once. In most cases your code should be free of + # side-effects, except for via TVar. + # + # * If an exception escapes an atomically block it will abort the transaction. + # + # * It is undefined behaviour to use callcc or Fiber with atomically. + # + # * If you create a new thread within an atomically, it will not be part of + # the transaction. Creating a thread counts as a side-effect. + # + # Transactions within transactions are flattened to a single transaction. + # + # @example + # a = new TVar(100_000) + # b = new TVar(100) + # + # Concurrent::atomically do + # a.value -= 10 + # b.value += 10 + # end + def atomically + raise ArgumentError.new('no block given') unless block_given? + + # Get the current transaction + + transaction = Transaction::current + + # Are we not already in a transaction (not nested)? + + if transaction.nil? + # New transaction + + begin + # Retry loop + + loop do + + # Create a new transaction + + transaction = Transaction.new + Transaction::current = transaction + + # Run the block, aborting on exceptions + + begin + result = yield + rescue Transaction::AbortError => e + transaction.abort + result = Transaction::ABORTED + rescue Transaction::LeaveError => e + transaction.abort + break result + rescue => e + transaction.abort + raise e + end + # If we can commit, break out of the loop + + if result != Transaction::ABORTED + if transaction.commit + break result + end + end + end + ensure + # Clear the current transaction + + Transaction::current = nil + end + else + # Nested transaction - flatten it and just run the block + + yield + end + end + + # Abort a currently running transaction - see `Concurrent::atomically`. + def abort_transaction + raise Transaction::AbortError.new + end + + # Leave a transaction without committing or aborting - see `Concurrent::atomically`. + def leave_transaction + raise Transaction::LeaveError.new + end + + module_function :atomically, :abort_transaction, :leave_transaction + + private + + # @!visibility private + class Transaction + + ABORTED = ::Object.new + + OpenEntry = Struct.new(:value, :modified) + + AbortError = Class.new(StandardError) + LeaveError = Class.new(StandardError) + + def initialize + @open_tvars = {} + end + + def read(tvar) + entry = open(tvar) + entry.value + end + + def write(tvar, value) + entry = open(tvar) + entry.modified = true + entry.value = value + end + + def open(tvar) + entry = @open_tvars[tvar] + + unless entry + unless tvar.unsafe_lock.try_lock + Concurrent::abort_transaction + end + + entry = OpenEntry.new(tvar.unsafe_value, false) + @open_tvars[tvar] = entry + end + + entry + end + + def abort + unlock + end + + def commit + @open_tvars.each do |tvar, entry| + if entry.modified + tvar.unsafe_value = entry.value + end + end + + unlock + end + + def unlock + @open_tvars.each_key do |tvar| + tvar.unsafe_lock.unlock + end + end + + def self.current + Thread.current[:current_tvar_transaction] + end + + def self.current=(transaction) + Thread.current[:current_tvar_transaction] = transaction + end + + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb new file mode 100644 index 0000000..0c574b2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb @@ -0,0 +1,45 @@ +module Concurrent + # @!visibility private + module Utility + + # @!visibility private + module EngineDetector + def on_cruby? + RUBY_ENGINE == 'ruby' + end + + def on_jruby? + RUBY_ENGINE == 'jruby' + end + + def on_truffleruby? + RUBY_ENGINE == 'truffleruby' + end + + def on_windows? + !(RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/).nil? + end + + def on_osx? + !(RbConfig::CONFIG['host_os'] =~ /darwin|mac os/).nil? + end + + def on_linux? + !(RbConfig::CONFIG['host_os'] =~ /linux/).nil? + end + + def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch) + result = (version.split('.').map(&:to_i) <=> [major, minor, patch]) + comparisons = { :== => [0], + :>= => [1, 0], + :<= => [-1, 0], + :> => [1], + :< => [-1] } + comparisons.fetch(comparison).include? result + end + end + end + + # @!visibility private + extend Utility::EngineDetector +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb new file mode 100644 index 0000000..1c987d8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb @@ -0,0 +1,19 @@ +module Concurrent + + # @!macro monotonic_get_time + # + # Returns the current time as tracked by the application monotonic clock. + # + # @param [Symbol] unit the time unit to be returned, can be either + # :float_second, :float_millisecond, :float_microsecond, :second, + # :millisecond, :microsecond, or :nanosecond default to :float_second. + # + # @return [Float] The current monotonic time since some unspecified + # starting point + # + # @!macro monotonic_clock_warning + def monotonic_time(unit = :float_second) + Process.clock_gettime(Process::CLOCK_MONOTONIC, unit) + end + module_function :monotonic_time +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb new file mode 100644 index 0000000..bf7bab3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb @@ -0,0 +1,77 @@ +require 'concurrent/utility/engine' +# Synchronization::AbstractObject must be defined before loading the extension +require 'concurrent/synchronization/abstract_object' + +module Concurrent + # @!visibility private + module Utility + # @!visibility private + module NativeExtensionLoader + + def allow_c_extensions? + Concurrent.on_cruby? + end + + def c_extensions_loaded? + defined?(@c_extensions_loaded) && @c_extensions_loaded + end + + def load_native_extensions + if Concurrent.on_cruby? && !c_extensions_loaded? + ['concurrent/concurrent_ruby_ext', + "concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext" + ].each { |p| try_load_c_extension p } + end + + if Concurrent.on_jruby? && !java_extensions_loaded? + begin + require 'concurrent/concurrent_ruby.jar' + set_java_extensions_loaded + rescue LoadError => e + raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace + end + end + end + + private + + def load_error_path(error) + if error.respond_to? :path + error.path + else + error.message.split(' -- ').last + end + end + + def set_c_extensions_loaded + @c_extensions_loaded = true + end + + def java_extensions_loaded? + defined?(@java_extensions_loaded) && @java_extensions_loaded + end + + def set_java_extensions_loaded + @java_extensions_loaded = true + end + + def try_load_c_extension(path) + require path + set_c_extensions_loaded + rescue LoadError => e + if load_error_path(e) == path + # move on with pure-Ruby implementations + # TODO (pitr-ch 12-Jul-2018): warning on verbose? + else + raise e + end + end + + end + end + + # @!visibility private + extend Utility::NativeExtensionLoader +end + +Concurrent.load_native_extensions diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb new file mode 100644 index 0000000..de1cdc3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb @@ -0,0 +1,54 @@ +module Concurrent + # @!visibility private + module Utility + # @private + module NativeInteger + # http://stackoverflow.com/questions/535721/ruby-max-integer + MIN_VALUE = -(2**(0.size * 8 - 2)) + MAX_VALUE = (2**(0.size * 8 - 2) - 1) + + def ensure_upper_bound(value) + if value > MAX_VALUE + raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}") + end + value + end + + def ensure_lower_bound(value) + if value < MIN_VALUE + raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}") + end + value + end + + def ensure_integer(value) + unless value.is_a?(Integer) + raise ArgumentError.new("#{value} is not an Integer") + end + value + end + + def ensure_integer_and_bounds(value) + ensure_integer value + ensure_upper_bound value + ensure_lower_bound value + end + + def ensure_positive(value) + if value < 0 + raise ArgumentError.new("#{value} cannot be negative") + end + value + end + + def ensure_positive_and_no_zero(value) + if value < 1 + raise ArgumentError.new("#{value} cannot be negative or zero") + end + value + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb new file mode 100644 index 0000000..986e2d5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb @@ -0,0 +1,110 @@ +require 'etc' +require 'rbconfig' +require 'concurrent/delay' + +module Concurrent + # @!visibility private + module Utility + + # @!visibility private + class ProcessorCounter + def initialize + @processor_count = Delay.new { compute_processor_count } + @physical_processor_count = Delay.new { compute_physical_processor_count } + end + + def processor_count + @processor_count.value + end + + def physical_processor_count + @physical_processor_count.value + end + + private + + def compute_processor_count + if Concurrent.on_jruby? + java.lang.Runtime.getRuntime.availableProcessors + else + Etc.nprocessors + end + end + + def compute_physical_processor_count + ppc = case RbConfig::CONFIG["target_os"] + when /darwin\d\d/ + IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i + when /linux/ + cores = {} # unique physical ID / core ID combinations + phy = 0 + IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln| + if ln.start_with?("physical") + phy = ln[/\d+/] + elsif ln.start_with?("core") + cid = phy + ":" + ln[/\d+/] + cores[cid] = true if not cores[cid] + end + end + cores.count + when /mswin|mingw/ + require 'win32ole' + result_set = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfCores from Win32_Processor") + result_set.to_enum.collect(&:NumberOfCores).reduce(:+) + else + processor_count + end + # fall back to logical count if physical info is invalid + ppc > 0 ? ppc : processor_count + rescue + return 1 + end + end + end + + # create the default ProcessorCounter on load + @processor_counter = Utility::ProcessorCounter.new + singleton_class.send :attr_reader, :processor_counter + + # Number of processors seen by the OS and used for process scheduling. For + # performance reasons the calculated value will be memoized on the first + # call. + # + # When running under JRuby the Java runtime call + # `java.lang.Runtime.getRuntime.availableProcessors` will be used. According + # to the Java documentation this "value may change during a particular + # invocation of the virtual machine... [applications] should therefore + # occasionally poll this property." Subsequently the result will NOT be + # memoized under JRuby. + # + # Otherwise Ruby's Etc.nprocessors will be used. + # + # @return [Integer] number of processors seen by the OS or Java runtime + # + # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors() + def self.processor_count + processor_counter.processor_count + end + + # Number of physical processor cores on the current system. For performance + # reasons the calculated value will be memoized on the first call. + # + # On Windows the Win32 API will be queried for the `NumberOfCores from + # Win32_Processor`. This will return the total number "of cores for the + # current instance of the processor." On Unix-like operating systems either + # the `hwprefs` or `sysctl` utility will be called in a subshell and the + # returned value will be used. In the rare case where none of these methods + # work or an exception is raised the function will simply return 1. + # + # @return [Integer] number physical processor cores on the current system + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + # @see http://www.unix.com/man-page/osx/1/HWPREFS/ + # @see http://linux.die.net/man/8/sysctl + def self.physical_processor_count + processor_counter.physical_processor_count + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb new file mode 100644 index 0000000..d1c0989 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb @@ -0,0 +1,3 @@ +module Concurrent + VERSION = '1.2.2' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/.gitignore b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/.gitignore new file mode 100644 index 0000000..ae3fdc2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/.gitignore @@ -0,0 +1,14 @@ +/.bundle/ +/.yardoc +/Gemfile.lock +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ +*.bundle +*.so +*.o +*.a +mkmf.log diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/.travis.yml new file mode 100644 index 0000000..67e55b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/.travis.yml @@ -0,0 +1,12 @@ +language: ruby +before_install: gem install bundler +cache: bundler +rvm: + - ruby-head + - 2.7 + - 2.6 + - 2.5 + - 2.4 +jobs: + allow_failures: + - rvm: ruby-head diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/CHANGES.md b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/CHANGES.md new file mode 100644 index 0000000..97d1b91 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/CHANGES.md @@ -0,0 +1,49 @@ +# 0.0.10 + +* `Defaults.merge!` will now deprecate non-wrapped `Array` values. The following code is no longer valid (but still works). + + defaults.merge!( list: [1,2] ) + + Instead, you need to wrap it in a command like `Variables::Append`. + + defaults.merge!( list: Declarative::Variables::Append( [1,2] ) ) + + The reason for this change is to allow all kinds of operations with defaults variables, such as merges, overrides, append, prepend, and so on. + +* Introduce `Declarative::Variables.merge` to merge two sets of variables. +* `Defaults` now uses `Variables` for merge/overide operations. + +# 0.0.9 + +* Removing `uber` dependency. + +# 0.0.8 + +* When calling `Schema#defaults` (or `Defaults#merge!`) multiple times, same-named arrays will be joined instead of overridden. This fixes a common problem when merging different default settings. +* Remove `Defaults#[]` and `Defaults#[]=`. This now happens via `#merge!`. + +# 0.0.7 + +* Simplify `Defaults` and remove a warning in Ruby 2.2.3. + +# 0.0.6 + +* `Heritage#call` now accepts a block that allows processing the arguments for every recorded statement before replaying them. This provides a hook to inject or change parameters, e.g. to mark a replay as an inheritance. + +# 0.0.5 + +* Introduce `Schema::build_definition` as a central entry point for building `Definition` without any heritage involved. + +# 0.0.4 + +* Restructured modules, there's always a public `DSL` module now, etc. + +# 0.0.3 + +* Internals, only. + +# 0.0.2 + +* First usable version with `Declarative::Schema` and friends. + +TODO: default_nested_class RM diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/Gemfile b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/Gemfile new file mode 100644 index 0000000..39265b0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in declarative.gemspec +gemspec diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/LICENSE.txt new file mode 100644 index 0000000..08b3243 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2015-2020 Nick Sutterer + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/README.md b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/README.md new file mode 100644 index 0000000..e680226 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/README.md @@ -0,0 +1,109 @@ +# Declarative + +_DSL for nested schemas._ + +[![Gem Version](https://badge.fury.io/rb/declarative.svg)](http://badge.fury.io/rb/declarative) + +# Overview + +Declarative allows _declaring_ nested schemas. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'declarative' +``` + +## Declarative::Schema + +Include this into a class or module to allow defining nested schemas using the popular `::property` DSL. + +Normally, an abstract base class will define essential configuration. + +```ruby +class Model + extend Declarative::Schema + + def self.default_nested_class + Model + end +end +``` + +Concrete schema-users simply derive from the base class. + +```ruby +class Song < Model + property :id + + property :artist do + property :id + property :name + end +end +``` + +This won't do anything but populate the `::definitions` graph. + +```ruby +Song.definitions #=> + + + + + +``` + +The nested schema will be a subclass of `Model`. + +```ruby +Song.definitions.get(:artist) #=> +``` + +## Overriding Nested Building + +When declaring nested schemas, per default, Declarative will use its own `Schema::NestedBuilder` to create the nested schema composer. + +Override `::nested_builder` to define your own way of doing that. + +```ruby +class Model + extend Declarative::Schema + + def self.default_nested_class + Model + end + + def self.nested_builder + ->(options) do + Class.new(Model) do + class_eval &options[:_block] # executes `property :name` etc. on nested, fresh class. + end + end + end +end +``` + +## Features + +You can automatically include modules into all nested schemas by using `::feature`. + +```ruby +class Model + extend Declarative::Schema + feature Bla +``` + +## Defaults + +```ruby +class Model + extend Declarative::Schema + defaults visible: true +``` + +## Copyright + +* Copyright (c) 2015 Nick Sutterer diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/Rakefile b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/Rakefile new file mode 100644 index 0000000..d6c09af --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/Rakefile @@ -0,0 +1,9 @@ +require "bundler/gem_tasks" +require "rake/testtask" + +task :default => [:test] +Rake::TestTask.new(:test) do |test| + test.libs << 'test' + test.test_files = FileList['test/*_test.rb'] + test.verbose = true +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/declarative.gemspec b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/declarative.gemspec new file mode 100644 index 0000000..9f0c0ee --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/declarative.gemspec @@ -0,0 +1,25 @@ +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'declarative/version' + +Gem::Specification.new do |spec| + spec.name = "declarative" + spec.version = Declarative::VERSION + spec.authors = ["Nick Sutterer"] + spec.email = ["apotonick@gmail.com"] + spec.summary = %q{DSL for nested schemas.} + spec.description = %q{DSL for nested generic schemas with inheritance and refining.} + spec.homepage = "https://github.com/apotonick/declarative" + spec.license = "MIT" + + spec.files = `git ls-files -z`.split("\x0").reject do |f| + f.match(%r{^(test)/}) + end + spec.test_files = spec.files.grep(%r{^(test)/}) + spec.require_paths = ["lib"] + spec.required_ruby_version = '>= 2.3.0' + + spec.add_development_dependency "rake" + spec.add_development_dependency "minitest" + spec.add_development_dependency "minitest-line" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative.rb new file mode 100644 index 0000000..7f95e19 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative.rb @@ -0,0 +1,10 @@ +require "declarative/version" +require "declarative/definitions" +require "declarative/heritage" +require "declarative/defaults" +require "declarative/schema" +require "declarative/deep_dup" +require "declarative/variables" + +module Declarative +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/deep_dup.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/deep_dup.rb new file mode 100644 index 0000000..85b0e6a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/deep_dup.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module Declarative + module DeepDup + def self.call(args) + case args + when Array + Array[*dup_items(args)] + when ::Hash + ::Hash[dup_items(args)] + else + args + + end + end + + def self.dup_items(arr) + arr.to_a.collect { |v| call(v) } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/defaults.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/defaults.rb new file mode 100644 index 0000000..5b2c076 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/defaults.rb @@ -0,0 +1,45 @@ +module Declarative + # {Defaults} is a mutable DSL object that collects default directives via #merge!. + # Internally, it uses {Variables} to implement the merging of defaults. + class Defaults + def initialize + @static_options = {} + @dynamic_options = ->(*) { {} } + end + + # Set default values. Usually called in Schema::defaults. + # This can be called multiple times and will "deep-merge" arrays, e.g. `_features: []`. + def merge!(hash={}, &block) + @static_options = Variables.merge( @static_options, handle_array_and_deprecate(hash) ) + @dynamic_options = block if block_given? + + self + end + + # Evaluate defaults and merge given_options into them. + def call(name, given_options) + # TODO: allow to receive rest of options/block in dynamic block. or, rather, test it as it was already implemented. + evaluated_options = @dynamic_options.(name, given_options) + + options = Variables.merge( @static_options, handle_array_and_deprecate(evaluated_options) ) + Variables.merge( options, handle_array_and_deprecate(given_options) ) # FIXME: given_options is not tested! + end + + def handle_array_and_deprecate(variables) + wrapped = Defaults.wrap_arrays(variables) + + warn "[Declarative] Defaults#merge! and #call still accept arrays and automatically prepend those. This is now deprecated, you should replace `ary` with `Declarative::Variables::Append(ary)`." if wrapped.any? + + variables.merge(wrapped) + end + + # Wrap arrays in `variables` with Variables::Append so they get appended to existing + # same-named arrays. + def self.wrap_arrays(variables) + Hash[ variables. + find_all { |k,v| v.instance_of?(Array) }. + collect { |k,v| [k, Variables::Append(v)] } + ] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/definitions.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/definitions.rb new file mode 100644 index 0000000..7c2f396 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/definitions.rb @@ -0,0 +1,74 @@ +module Declarative + class Definitions < ::Hash + class Definition + def initialize(name, options={}) + @options = options.dup + @options[:name] = name.to_s + end + + def [](name) + @options[name] + end + + def merge!(hash) # TODO: this should return a new Definition instance. + @options.merge!(hash) + self + end + + def merge(hash) # TODO: should be called #copy. + DeepDup.(@options).merge(hash) + end + end + + + def initialize(definition_class) + @definition_class = definition_class + super() + end + + def each(&block) # TODO : test me! + values.each(&block) + end + + # #add is high-level behavior for Definitions#[]=. + # reserved options: + # :_features + # :_defaults + # :_base + # :_nested_builder + def add(name, options={}, &block) + options = options[:_defaults].(name, options) if options[:_defaults] # FIXME: pipeline? + base = options[:_base] + + if options.delete(:inherit) and (parent_property = get(name)) + base = parent_property[:nested] + options = parent_property.merge(options) # TODO: Definition#merge + end + + if options[:_nested_builder] + options[:nested] = build_nested( + options.merge( + _base: base, + _name: name, + _block: block, + ) + ) + end + + # clean up, we don't want that stored in the Definition instance. + [:_defaults, :_base, :_nested_builder, :_features].each { |key| options.delete(key) } + + self[name.to_s] = @definition_class.new(name, options) + end + + def get(name) + self[name.to_s] + end + + private + # Run builder to create nested schema (or twin, or representer, or whatever). + def build_nested(options) + options[:_nested_builder].(options) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/heritage.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/heritage.rb new file mode 100644 index 0000000..29d1b1b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/heritage.rb @@ -0,0 +1,45 @@ +require "declarative/deep_dup" + +module Declarative + class Heritage < Array + # Record inheritable assignments for replay in an inheriting class. + def record(method, *args, &block) + self << {method: method, args: DeepDup.(args), block: block} # DISCUSS: options.dup. + end + + # Replay the recorded assignments on inheritor. + # Accepts a block that will allow processing the arguments for every recorded statement. + def call(inheritor, &block) + each { |cfg| call!(inheritor, cfg, &block) } + end + + module DSL + def heritage + @heritage ||= Heritage.new + end + end + + # To be extended into classes using Heritage. Inherits the heritage. + module Inherited + def inherited(subclass) + super + heritage.(subclass) + end + end + + # To be included into modules using Heritage. When included, inherits the heritage. + module Included + def included(mod) + super + heritage.(mod) + end + end + + private + def call!(inheritor, cfg) + yield cfg if block_given? # allow messing around with recorded arguments. + + inheritor.send(cfg[:method], *cfg[:args], &cfg[:block]) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/schema.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/schema.rb new file mode 100644 index 0000000..31a4359 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/schema.rb @@ -0,0 +1,103 @@ +require "declarative/definitions" +require "declarative/defaults" +require "declarative/variables" +require "declarative/heritage" + +module Declarative + # Include this to maintain inheritable, nested schemas with ::defaults and + # ::feature the way we have it in Representable, Reform, and Disposable. + # + # The schema with its defnitions will be kept in ::definitions. + # + # Requirements to includer: ::default_nested_class, override building with ::nested_builder. + module Schema + def self.extended(extender) + extender.extend DSL # ::property + extender.extend Feature # ::feature + extender.extend Heritage::DSL # ::heritage + extender.extend Heritage::Inherited # ::included + end + + module DSL + def property(name, options={}, &block) + heritage.record(:property, name, options, &block) + + build_definition(name, options, &block) + end + + def defaults(options={}, &block) + heritage.record(:defaults, options, &block) + + # Always convert arrays to Variables::Append instructions. + options = options.merge( Defaults.wrap_arrays(options) ) + block = wrap_arrays_from_block(block) if block_given? + + _defaults.merge!(options, &block) + end + + def definitions + @definitions ||= Definitions.new(definition_class) + end + + def definition_class # TODO: test me. + Definitions::Definition + end + + private + def build_definition(name, options={}, &block) + default_options = { + _base: default_nested_class, + _defaults: _defaults + } + default_options[:_nested_builder] = nested_builder if block + + # options = options.merge( Defaults.wrap_arrays(options) ) + + definitions.add(name, default_options.merge(options), &block) + end + + def _defaults + @defaults ||= Declarative::Defaults.new + end + + def nested_builder + NestedBuilder # default implementation. + end + + NestedBuilder = ->(options) do + Class.new(options[:_base]) do # base + feature(*options[:_features]) + class_eval(&options[:_block]) + end + end + + # When called, executes `block` and wraps all array values in Variables::Append. + # This is the default behavior in older versions and allows to provide arrays for + # default values that will be prepended. + def wrap_arrays_from_block(block) + ->(*args) { + options = block.(*args) + options.merge( Defaults.wrap_arrays( options ) ) + } + end + end + + module Feature + # features are registered as defaults using _features, which in turn get translated to + # Class.new... { feature mod } which makes it recursive in nested schemas. + def feature(*mods) + mods.each do |mod| + include mod + register_feature(mod) + end + end + + private + def register_feature(mod) + heritage.record(:register_feature, mod) # this is only for inheritance between decorators and modules!!! ("horizontal and vertical") + + defaults.merge!( _features: Variables::Append([mod]) ) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/testing.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/testing.rb new file mode 100644 index 0000000..edc37cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/testing.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +module Declarative + def self.Inspect(obj) + string = obj.inspect + + if obj.is_a?(Proc) + elements = string.split('/') + string = "#{elements.first}#{elements.last}" + end + string.gsub(/0x\w+/, '') + end + + module Inspect + def inspect + string = super + if is_a?(Proc) + elements = string.split('/') + string = "#{elements.first}#{elements.last}" + end + string.gsub(/0x\w+/, '') + end + + module Schema + def inspect + definitions.extend(Definitions::Inspect) + "Schema: #{definitions.inspect}" + end + end + end + + module Definitions::Inspect + def inspect + each do |dfn| + dfn.extend(Declarative::Inspect) + + if dfn[:nested]&.is_a?(Declarative::Schema::DSL) + dfn[:nested].extend(Declarative::Inspect::Schema) + else + dfn[:nested]&.extend(Declarative::Definitions::Inspect) + end + end + super + end + + def get(*) + super.extend(Declarative::Inspect) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/variables.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/variables.rb new file mode 100644 index 0000000..675e469 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/variables.rb @@ -0,0 +1,38 @@ +module Declarative + # Implements the pattern of maintaining a hash of key/values (usually "defaults") + # that are mutated several times by user and library code (override defaults). + # + # The Variables instance then represents the configuration data to be processed by the + # using library (e.g. Representable or Trailblazer). + class Variables + class Proc < ::Proc + end + + # @return Hash hash where `overrides` is merged onto `defaults` respecting Merge, Append etc. + def self.merge(defaults, overrides) + defaults = defaults.merge({}) # todo: use our DeepDup. # TODO: or how could we provide immutability? + + overrides.each do |k, v| + if v.is_a?(Variables::Proc) + defaults[k] = v.( defaults[k] ) + else + defaults[k] = v + end + end + + defaults + end + + def self.Merge(merged_hash) + Variables::Proc.new do |original| + (original || {}).merge( merged_hash ) + end + end + + def self.Append(appended_array) + Variables::Proc.new do |original| + (original || []) + appended_array + end + end + end # Variables +end diff --git a/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/version.rb b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/version.rb new file mode 100644 index 0000000..e67827a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/declarative-0.0.20/lib/declarative/version.rb @@ -0,0 +1,3 @@ +module Declarative + VERSION = "0.0.20" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.github/workflows/integration.yml b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.github/workflows/integration.yml new file mode 100644 index 0000000..1379fe4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.github/workflows/integration.yml @@ -0,0 +1,26 @@ +name: CI + +on: + push: + tags: [ 'v0.*' ] + pull_request: + paths: [ 'ext/**' ] + +jobs: + tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + name: Integration Testing + steps: + - uses: actions/checkout@v2 + - name: Set up Docker + uses: docker-practice/actions-setup-docker@master + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: 2.7 + - name: Install dependencies + run: bundle install --jobs 4 --retry 3 + - name: Run integration tests + run: bundle exec rake spec:integration diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.github/workflows/ruby.yml b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.github/workflows/ruby.yml new file mode 100644 index 0000000..d998f21 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.github/workflows/ruby.yml @@ -0,0 +1,31 @@ +name: CI + +on: [ push, pull_request ] + +jobs: + tests: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: + - ubuntu-latest + - macos-latest + ruby: + - 2.4 + - 2.5 + - 2.6 + - 2.7 + - 3.0 + - jruby + name: OS ${{ matrix.os }} / Ruby ${{ matrix.ruby }} + steps: + - uses: actions/checkout@v2 + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + - name: Install dependencies + run: bundle install --jobs 4 --retry 3 + - name: Run tests + run: bundle exec rake spec diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.gitignore b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.gitignore new file mode 100644 index 0000000..3aedb4e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.gitignore @@ -0,0 +1,11 @@ +Gemfile.lock +doc +pkg +.bundle +.yardoc +/ext/digest/crc*/Makefile +/ext/digest/crc*/mkmf.log +/ext/digest/crc*/extconf.h +/ext/digest/crc*/*.o +/ext/digest/crc*/*.so +/spec/integration/docker/digest-crc.gem diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.rspec b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.rspec new file mode 100644 index 0000000..660778b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.rspec @@ -0,0 +1 @@ +--colour --format documentation diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.yardopts b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.yardopts new file mode 100644 index 0000000..b1b0316 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/.yardopts @@ -0,0 +1 @@ +--markup markdown --title 'Digest CRC Documentation' --protected --files ChangeLog.md,LICENSE.txt diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ChangeLog.md b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ChangeLog.md new file mode 100644 index 0000000..46e2359 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ChangeLog.md @@ -0,0 +1,201 @@ +### 0.6.4 / 2021-07-14 + +* Silence potential method redefinition warnings when loading the C extensions. + (@ojab) + +### 0.6.3 / 2020-12-19 + +* Broaden rake dependency to >= 12.0.0, < 14.0.0` for ruby 2.7, which includes + rake 13.x. + +### 0.6.2 / 2020-12-03 + +* Lower the rake dependency to `~> 12.0` for ruby 2.6. +* Fixed a bug in `ext/digest/Rakefile` which prevented digest-crc from being + installed on systems where C extensions could not be successfully compiled. + * Rake's `ruby` method, which in turn calls rake's `sh` method, raises + a `RuntimeError` exception when the ruby command fails, causing rake to + exit with an error code. Instead, rescue any `RuntimeError` exceptions and + fail gracefully. + +### 0.6.1 / 2020-07-02 + +* Fix installation issues under bundler by adding rake as an explicit dependency + (@rogerluan). + +### 0.6.0 / 2020-07-01 + +* Implement _optional_ C extensions for all CRC algorithms, resulting in an + average performance improvement of ~40x. Note, if for whatever reason the + C extensions cannot be compiled, they will be skipped and the pure-Ruby + CRC algorithms will be used instead. If the C extensions were successfully + compiled, then they will be loaded and override the pure-Ruby CRC methods with + the C equivalents. +* Alias {Digest::CRC16QT} to {Digest::CRC16X25}, since they are effectively the same (@dearblue). +* Fix {Digest::CRC32::WIDTH} (@dearblue). + +#### pure-Ruby (ruby 2.7.1) + + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.412953 0.000000 0.412953 ( 0.414688) + Digest::CRC5#update 1.116375 0.000003 1.116378 ( 1.120741) + Digest::CRC8#update 0.994263 0.000013 0.994276 ( 1.001079) + Digest::CRC8_1Wire#update 0.974115 0.000004 0.974119 ( 0.978186) + Digest::CRC15#update 1.139402 0.000927 1.140329 ( 1.146608) + Digest::CRC16#update 0.967836 0.000000 0.967836 ( 0.971792) + Digest::CRC16CCITT#update 1.118851 0.000000 1.118851 ( 1.123217) + Digest::CRC16DNP#update 0.922211 0.000000 0.922211 ( 0.925739) + Digest::CRC16Genibus#update 1.120580 0.000000 1.120580 ( 1.124771) + Digest::CRC16Modbus#update 0.955612 0.000000 0.955612 ( 0.959463) + Digest::CRC16QT#update 8.153403 0.000012 8.153415 ( 8.189977) + Digest::CRC16USB#update 0.952557 0.000000 0.952557 ( 0.956145) + Digest::CRC16X25#update 0.962295 0.000000 0.962295 ( 0.970401) + Digest::CRC16XModem#update 1.120531 0.000000 1.120531 ( 1.124494) + Digest::CRC16ZModem#update 1.124226 0.000000 1.124226 ( 1.128632) + Digest::CRC24#update 1.126317 0.000000 1.126317 ( 1.130794) + Digest::CRC32#update 0.960015 0.000000 0.960015 ( 0.964803) + Digest::CRC32BZip2#update 1.128626 0.000000 1.128626 ( 1.133641) + Digest::CRC32c#update 0.964047 0.000000 0.964047 ( 0.967456) + Digest::CRC32Jam#update 0.959141 0.000972 0.960113 ( 0.967444) + Digest::CRC32MPEG#update 1.131119 0.000002 1.131121 ( 1.137440) + Digest::CRC32POSIX#update 1.126019 0.000000 1.126019 ( 1.130549) + Digest::CRC32XFER#update 1.116598 0.000000 1.116598 ( 1.120595) + Digest::CRC64#update 2.665880 0.000928 2.666808 ( 2.680942) + Digest::CRC64Jones#update 2.678003 0.000000 2.678003 ( 2.691390) + Digest::CRC64XZ#update 2.671395 0.000000 2.671395 ( 2.682684) + +#### pure-Ruby (jruby 9.2.11.1) + + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.700000 0.070000 0.770000 ( 0.436112) + Digest::CRC5#update 1.930000 0.050000 1.980000 ( 1.084749) + Digest::CRC8#update 1.510000 0.060000 1.570000 ( 0.979123) + Digest::CRC8_1Wire#update 0.730000 0.030000 0.760000 ( 0.761309) + Digest::CRC15#update 1.760000 0.080000 1.840000 ( 1.061413) + Digest::CRC16#update 1.560000 0.030000 1.590000 ( 0.951273) + Digest::CRC16CCITT#update 1.700000 0.010000 1.710000 ( 1.046854) + Digest::CRC16DNP#update 1.490000 0.000000 1.490000 ( 0.902434) + Digest::CRC16Genibus#update 1.820000 0.020000 1.840000 ( 1.030269) + Digest::CRC16Modbus#update 0.740000 0.010000 0.750000 ( 0.738604) + Digest::CRC16QT#update 7.280000 0.040000 7.320000 ( 6.399987) + Digest::CRC16USB#update 0.930000 0.000000 0.930000 ( 0.801541) + Digest::CRC16X25#update 0.870000 0.000000 0.870000 ( 0.805130) + Digest::CRC16XModem#update 1.320000 0.010000 1.330000 ( 0.968956) + Digest::CRC16ZModem#update 1.300000 0.010000 1.310000 ( 0.928303) + Digest::CRC24#update 1.550000 0.020000 1.570000 ( 1.024450) + Digest::CRC32#update 1.260000 0.000000 1.260000 ( 0.913814) + Digest::CRC32BZip2#update 1.210000 0.010000 1.220000 ( 0.919086) + Digest::CRC32c#update 0.770000 0.010000 0.780000 ( 0.761726) + Digest::CRC32Jam#update 0.930000 0.000000 0.930000 ( 0.800468) + Digest::CRC32MPEG#update 1.240000 0.010000 1.250000 ( 0.933962) + Digest::CRC32POSIX#update 1.290000 0.010000 1.300000 ( 0.925254) + Digest::CRC32XFER#update 1.270000 0.000000 1.270000 ( 0.920521) + Digest::CRC64#update 3.480000 0.020000 3.500000 ( 2.883794) + Digest::CRC64Jones#update 2.740000 0.000000 2.740000 ( 2.738251) + Digest::CRC64XZ#update 2.780000 0.010000 2.790000 ( 2.715833) + + +#### C extensions (ruby 2.7.1) + + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.407438 0.000000 0.407438 ( 0.410495) + Digest::CRC5#update 0.022873 0.000000 0.022873 ( 0.023796) + Digest::CRC8#update 0.020129 0.000000 0.020129 ( 0.020887) + Digest::CRC8_1Wire#update 0.020106 0.000000 0.020106 ( 0.020897) + Digest::CRC15#update 0.028765 0.000003 0.028768 ( 0.029549) + Digest::CRC16#update 0.022176 0.000856 0.023032 ( 0.023153) + Digest::CRC16CCITT#update 0.028570 0.000000 0.028570 ( 0.028691) + Digest::CRC16DNP#update 0.023240 0.000001 0.023241 ( 0.024008) + Digest::CRC16Genibus#update 0.028692 0.000000 0.028692 ( 0.029575) + Digest::CRC16Modbus#update 0.023928 0.000000 0.023928 ( 0.024859) + Digest::CRC16QT#update 7.965822 0.000968 7.966790 ( 8.001781) + Digest::CRC16USB#update 0.023448 0.000001 0.023449 ( 0.024420) + Digest::CRC16X25#update 0.023061 0.000000 0.023061 ( 0.023861) + Digest::CRC16XModem#update 0.029407 0.000000 0.029407 ( 0.030583) + Digest::CRC16ZModem#update 0.029522 0.000000 0.029522 ( 0.030438) + Digest::CRC24#update 0.029528 0.000000 0.029528 ( 0.030504) + Digest::CRC32#update 0.023306 0.000000 0.023306 ( 0.024278) + Digest::CRC32BZip2#update 0.026346 0.000000 0.026346 ( 0.027293) + Digest::CRC32c#update 0.023525 0.000000 0.023525 ( 0.024489) + Digest::CRC32Jam#update 0.023348 0.000000 0.023348 ( 0.023477) + Digest::CRC32MPEG#update 0.026287 0.000000 0.026287 ( 0.027394) + Digest::CRC32POSIX#update 0.026063 0.000000 0.026063 ( 0.026986) + Digest::CRC32XFER#update 0.026374 0.000000 0.026374 ( 0.027314) + Digest::CRC64#update 0.023523 0.000000 0.023523 ( 0.024484) + Digest::CRC64Jones#update 0.023479 0.000000 0.023479 ( 0.024432) + Digest::CRC64XZ#update 0.024146 0.000000 0.024146 ( 0.025129) + +### 0.5.1 / 2020-03-03 + +* Fixed XOR logic in {Digest::CRC16Genibus}. +* Freeze all `TABLE` constants. +* Added missing documentation. + +### 0.5.0 / 2020-03-01 + +* Added {Digest::CRC15}. +* Added {Digest::CRC16Genibus}. +* Added {Digest::CRC16Kermit}. +* Added {Digest::CRC16X25}. +* Added {Digest::CRC32BZip2}. +* Added {Digest::CRC32Jam}. +* Added {Digest::CRC32POSIX}. +* Added {Digest::CRC32XFER}. +* Added {Digest::CRC64Jones}. +* Added {Digest::CRC64XZ}. +* Renamed `Digest::CRC32Mpeg` to {Digest::CRC32MPEG}. +* Renamed `Digest::CRC81Wire` to {Digest::CRC8_1Wire}. + +### 0.4.2 / 2020-03-01 + +* Corrected the logic in {Digest::CRC32#update}. +* Added missing {Digest::CRC5.pack} method. +* Fixed a require in `digest/crc8_1wire.rb`. + +### 0.4.1 / 2014-04-16 + +* Allow Digest CRC classes to be extended and their constants overriden. +* Allow {Digest::CRC5::CRC_MASK} to be overriden by subclasses. +* {Digest::CRC81Wire} now inherites from {Digest::CRC8}. + +### 0.4.0 / 2013-02-13 + +* Added {Digest::CRC16QT}. + +### 0.3.0 / 2011-09-24 + +* Added {Digest::CRC81Wire} (Henry Garner). + +### 0.2.0 / 2011-05-10 + +* Added {Digest::CRC32c}. +* Opted into [test.rubygems.org](http://test.rubygems.org/) +* Switched from using Jeweler and Bundler, to using + [Ore::Tasks](http://github.com/ruby-ore/ore-tasks). + +### 0.1.0 / 2010-06-01 + +* Initial release. + * CRC1 + * CRC5 + * CRC8 + * CRC16 + * CRC16 CCITT + * CRC16 DNP + * CRC16 Modbus + * CRC16 USB + * CRC16 XModem + * CRC16 ZModem + * CRC24 + * CRC32 + * CRC32 Mpeg + * CRC64 diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/Gemfile b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/Gemfile new file mode 100644 index 0000000..5d6782b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/Gemfile @@ -0,0 +1,15 @@ +source 'https://rubygems.org' + +gemspec + +group :development do + gem 'rake' + gem 'rubygems-tasks', '~> 0.2' + + gem 'rspec', '~> 3.0' + + gem 'yard', '~> 0.9' + gem 'kramdown' + gem 'kramdown-parser-gfm' + gem 'github-markup', '~> 1.1' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/LICENSE.txt new file mode 100644 index 0000000..ec04ca3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2010-2021 Hal Brodigan + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/README.md b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/README.md new file mode 100644 index 0000000..3f326b2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/README.md @@ -0,0 +1,406 @@ +# Digest CRC + +[![CI](https://github.com/postmodern/digest-crc/actions/workflows/ruby.yml/badge.svg)](https://github.com/postmodern/digest-crc/actions/workflows/ruby.yml) + +* [Source](https://github.com/postmodern/digest-crc) +* [Issues](https://github.com/postmodern/digest-crc/issues) +* [Documentation](http://rubydoc.info/gems/digest-crc/frames) + +## Description + +Adds support for calculating Cyclic Redundancy Check (CRC) to the Digest +module. + +## Features + +* Provides support for the following CRC algorithms: + * [CRC1](https://rubydoc.info/gems/digest-crc/Digest/CRC1) + * [CRC5](https://rubydoc.info/gems/digest-crc/Digest/CRC5) + * [CRC8](https://rubydoc.info/gems/digest-crc/Digest/CRC8) + * [CRC8 1-Wire](https://rubydoc.info/gems/digest-crc/Digest/CRC8_1Wire) + * [CRC15](https://rubydoc.info/gems/digest-crc/Digest/CRC15) + * [CRC16](https://rubydoc.info/gems/digest-crc/Digest/CRC16) + * [CRC16 CCITT](https://rubydoc.info/gems/digest-crc/Digest/CRC16CCITT) + * [CRC16 DNP](https://rubydoc.info/gems/digest-crc/Digest/CRC16DNP) + * [CRC16 Genibus](https://rubydoc.info/gems/digest-crc/Digest/CRC16Genibus) + * [CRC16 Kermit](https://rubydoc.info/gems/digest-crc/Digest/CRC16Kermit) + * [CRC16 Modbus](https://rubydoc.info/gems/digest-crc/Digest/CRC16Modbus) + * [CRC16 USB](https://rubydoc.info/gems/digest-crc/Digest/CRC16USB) + * [CRC16 X25](https://rubydoc.info/gems/digest-crc/Digest/CRC16X25) + * [CRC16 XModem](https://rubydoc.info/gems/digest-crc/Digest/CRC16XModem) + * [CRC16 ZModem](https://rubydoc.info/gems/digest-crc/Digest/CRC16ZModem) + * [CRC16 QT](https://rubydoc.info/gems/digest-crc/Digest/CRC16QT) + * [CRC24](https://rubydoc.info/gems/digest-crc/Digest/CRC24) + * [CRC32](https://rubydoc.info/gems/digest-crc/Digest/CRC32) + * [CRC32 BZip2](https://rubydoc.info/gems/digest-crc/Digest/CRC32BZip2) + * [CRC32c](https://rubydoc.info/gems/digest-crc/Digest/CRC32c) + * [CRC32 Jam](https://rubydoc.info/gems/digest-crc/Digest/CRC32Jam) + * [CRC32 MPEG](https://rubydoc.info/gems/digest-crc/Digest/CRC32MPEG) + * [CRC32 POSIX](https://rubydoc.info/gems/digest-crc/Digest/CRC32POSIX) + * [CRC32 XFER](https://rubydoc.info/gems/digest-crc/Digest/CRC32XFER) + * [CRC64](https://rubydoc.info/gems/digest-crc/Digest/CRC64) + * [CRC64 Jones](https://rubydoc.info/gems/digest-crc/Digest/CRC64Jones) + * [CRC64 XZ](https://rubydoc.info/gems/digest-crc/Digest/CRC64XZ) +* Pure Ruby implementation. +* Provides CRC Tables for optimized calculations. +* Supports _optional_ C extensions which increases performance by ~40x. + * If the C extensions cannot be compiled for whatever reason, digest-crc + will automatically fallback to the pure-Ruby implementation. + +## Install + +``` +gem install digest-crc +``` + +**Note:** to enable the C extensions ensure that you are using CRuby and have +a C compiler (`gcc` or `clang`) and `make` installed, _before_ installing +digest-crc. + +* Debian / Ubuntu: + + $ sudo apt install gcc make + +* RedHat / Fedora: + + $ sudo dnf install gcc make + +* Alpine Linux: + + $ apk add build-base + +* macOS: install XCode + +## Examples + +Calculate a CRC32: + +```ruby +require 'digest/crc32' + +Digest::CRC32.hexdigest('hello') +# => "3610a686" +``` + +Calculate a CRC32 of a file: + +```ruby +Digest::CRC32.file('README.md') +# => # +``` + +Incrementally calculate a CRC32: + +```ruby +crc = Digest::CRC32.new +crc << 'one' +crc << 'two' +crc << 'three' +crc.hexdigest +# => "09e1c092" +``` + +Directly access the checksum: + +```ruby +crc.checksum +# => 165789842 +``` + +Defining your own CRC class: + +```ruby +require 'digest/crc32' + +module Digest + class CRC3000 < CRC32 + + WIDTH = 4 + + INIT_CRC = 0xffffffff + + XOR_MASK = 0xffffffff + + TABLE = [ + # .... + ].freeze + + def update(data) + data.each_byte do |b| + @crc = (((@crc >> 8) & 0x00ffffff) ^ @table[(@crc ^ b) & 0xff]) + end + + return self + end + end +end +``` + +## Benchmarks + +### Ruby 2.7.4 (pure Ruby) + + $ bundle exec rake clean + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.423741 0.000000 0.423741 ( 0.425887) + Digest::CRC5#update 1.486578 0.000011 1.486589 ( 1.493215) + Digest::CRC8#update 1.261386 0.000000 1.261386 ( 1.266399) + Digest::CRC8_1Wire#update 1.250344 0.000000 1.250344 ( 1.255009) + Digest::CRC15#update 1.482515 0.000000 1.482515 ( 1.488131) + Digest::CRC16#update 1.216744 0.000811 1.217555 ( 1.222228) + Digest::CRC16CCITT#update 1.480490 0.000000 1.480490 ( 1.486745) + Digest::CRC16DNP#update 1.200067 0.000000 1.200067 ( 1.204835) + Digest::CRC16Genibus#update 1.492910 0.000000 1.492910 ( 1.498923) + Digest::CRC16Modbus#update 1.217449 0.000003 1.217452 ( 1.222348) + Digest::CRC16QT#update 1.223311 0.000000 1.223311 ( 1.229211) + Digest::CRC16USB#update 1.233744 0.000000 1.233744 ( 1.238615) + Digest::CRC16X25#update 1.223077 0.000000 1.223077 ( 1.227607) + Digest::CRC16XModem#update 1.487674 0.000000 1.487674 ( 1.493316) + Digest::CRC16ZModem#update 1.484288 0.000000 1.484288 ( 1.490096) + Digest::CRC24#update 1.490272 0.000000 1.490272 ( 1.496027) + Digest::CRC32#update 1.225311 0.000000 1.225311 ( 1.230572) + Digest::CRC32BZip2#update 1.503096 0.000000 1.503096 ( 1.509202) + Digest::CRC32c#update 1.220390 0.000000 1.220390 ( 1.225487) + Digest::CRC32Jam#update 1.216066 0.000000 1.216066 ( 1.220591) + Digest::CRC32MPEG#update 1.486808 0.000000 1.486808 ( 1.492611) + Digest::CRC32POSIX#update 1.494508 0.000957 1.495465 ( 1.503262) + Digest::CRC32XFER#update 1.504802 0.005830 1.510632 ( 1.522066) + Digest::CRC64#update 3.260784 0.015674 3.276458 ( 3.310506) + Digest::CRC64Jones#update 3.195204 0.000000 3.195204 ( 3.213054) + Digest::CRC64XZ#update 3.173597 0.000000 3.173597 ( 3.190438) + +### Ruby 2.7.4 (C extensions) + + $ bundle exec rake build:c_exts + ... + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.443619 0.000007 0.443626 ( 0.446545) + Digest::CRC5#update 0.025134 0.000806 0.025940 ( 0.026129) + Digest::CRC8#update 0.022564 0.000000 0.022564 ( 0.022775) + Digest::CRC8_1Wire#update 0.021427 0.000008 0.021435 ( 0.021551) + Digest::CRC15#update 0.030377 0.000833 0.031210 ( 0.031406) + Digest::CRC16#update 0.024004 0.000002 0.024006 ( 0.024418) + Digest::CRC16CCITT#update 0.026930 0.000001 0.026931 ( 0.027238) + Digest::CRC16DNP#update 0.024279 0.000000 0.024279 ( 0.024446) + Digest::CRC16Genibus#update 0.026477 0.000004 0.026481 ( 0.026656) + Digest::CRC16Modbus#update 0.023568 0.000000 0.023568 ( 0.023704) + Digest::CRC16QT#update 0.024161 0.000000 0.024161 ( 0.024316) + Digest::CRC16USB#update 0.023891 0.000000 0.023891 ( 0.024038) + Digest::CRC16X25#update 0.023849 0.000000 0.023849 ( 0.023991) + Digest::CRC16XModem#update 0.026254 0.000000 0.026254 ( 0.026523) + Digest::CRC16ZModem#update 0.026391 0.000000 0.026391 ( 0.026529) + Digest::CRC24#update 0.028805 0.000854 0.029659 ( 0.029830) + Digest::CRC32#update 0.024030 0.000000 0.024030 ( 0.024200) + Digest::CRC32BZip2#update 0.026942 0.000000 0.026942 ( 0.027244) + Digest::CRC32c#update 0.023989 0.000000 0.023989 ( 0.024159) + Digest::CRC32Jam#update 0.023940 0.000000 0.023940 ( 0.024066) + Digest::CRC32MPEG#update 0.027063 0.000000 0.027063 ( 0.027213) + Digest::CRC32POSIX#update 0.027137 0.000000 0.027137 ( 0.028160) + Digest::CRC32XFER#update 0.026956 0.000002 0.026958 ( 0.027103) + Digest::CRC64#update 0.024222 0.000005 0.024227 ( 0.024796) + Digest::CRC64Jones#update 0.025331 0.000000 0.025331 ( 0.025789) + Digest::CRC64XZ#update 0.024131 0.000001 0.024132 ( 0.024348) + +### Ruby 3.0.2 (pure Ruby) + + $ bundle exec rake clean + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.331405 0.000002 0.331407 ( 0.333588) + Digest::CRC5#update 1.206847 0.000020 1.206867 ( 1.224072) + Digest::CRC8#update 1.018571 0.000000 1.018571 ( 1.023002) + Digest::CRC8_1Wire#update 1.018802 0.000000 1.018802 ( 1.023292) + Digest::CRC15#update 1.207586 0.000000 1.207586 ( 1.212691) + Digest::CRC16#update 1.032505 0.000965 1.033470 ( 1.040862) + Digest::CRC16CCITT#update 1.198079 0.000000 1.198079 ( 1.203134) + Digest::CRC16DNP#update 0.994582 0.000000 0.994582 ( 1.006520) + Digest::CRC16Genibus#update 1.190596 0.000000 1.190596 ( 1.196087) + Digest::CRC16Modbus#update 1.007826 0.000000 1.007826 ( 1.012934) + Digest::CRC16QT#update 0.996298 0.000001 0.996299 ( 1.000255) + Digest::CRC16USB#update 0.995806 0.000000 0.995806 ( 0.999822) + Digest::CRC16X25#update 1.019589 0.000000 1.019589 ( 1.031010) + Digest::CRC16XModem#update 1.146947 0.000000 1.146947 ( 1.150817) + Digest::CRC16ZModem#update 1.145145 0.000000 1.145145 ( 1.149483) + Digest::CRC24#update 1.149009 0.000000 1.149009 ( 1.152854) + Digest::CRC32#update 0.970976 0.000000 0.970976 ( 0.974227) + Digest::CRC32BZip2#update 1.148596 0.000000 1.148596 ( 1.152381) + Digest::CRC32c#update 0.972566 0.000000 0.972566 ( 0.975790) + Digest::CRC32Jam#update 0.975854 0.000000 0.975854 ( 0.979217) + Digest::CRC32MPEG#update 1.148578 0.000000 1.148578 ( 1.153088) + Digest::CRC32POSIX#update 1.146218 0.000986 1.147204 ( 1.152460) + Digest::CRC32XFER#update 1.149823 0.000000 1.149823 ( 1.153692) + Digest::CRC64#update 2.869948 0.000016 2.869964 ( 2.884261) + Digest::CRC64Jones#update 2.867662 0.000000 2.867662 ( 2.886559) + Digest::CRC64XZ#update 2.858847 0.000000 2.858847 ( 2.874058) + +### Ruby 3.0.2 (C extensions) + + $ bundle exec rake build:c_exts + ... + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.349055 0.000000 0.349055 ( 0.350454) + Digest::CRC5#update 0.023144 0.000000 0.023144 ( 0.023248) + Digest::CRC8#update 0.021378 0.000000 0.021378 ( 0.021522) + Digest::CRC8_1Wire#update 0.021019 0.000000 0.021019 ( 0.021145) + Digest::CRC15#update 0.030063 0.000003 0.030066 ( 0.030245) + Digest::CRC16#update 0.024395 0.000000 0.024395 ( 0.024572) + Digest::CRC16CCITT#update 0.026979 0.000000 0.026979 ( 0.027138) + Digest::CRC16DNP#update 0.024665 0.000000 0.024665 ( 0.024844) + Digest::CRC16Genibus#update 0.027054 0.000000 0.027054 ( 0.027217) + Digest::CRC16Modbus#update 0.023963 0.000000 0.023963 ( 0.024257) + Digest::CRC16QT#update 0.024218 0.000000 0.024218 ( 0.024360) + Digest::CRC16USB#update 0.024393 0.000000 0.024393 ( 0.024561) + Digest::CRC16X25#update 0.025127 0.000000 0.025127 ( 0.025292) + Digest::CRC16XModem#update 0.028123 0.000000 0.028123 ( 0.028377) + Digest::CRC16ZModem#update 0.028205 0.000000 0.028205 ( 0.028571) + Digest::CRC24#update 0.031386 0.000000 0.031386 ( 0.031740) + Digest::CRC32#update 0.023832 0.000000 0.023832 ( 0.023948) + Digest::CRC32BZip2#update 0.027159 0.000000 0.027159 ( 0.027315) + Digest::CRC32c#update 0.024172 0.000000 0.024172 ( 0.024310) + Digest::CRC32Jam#update 0.024376 0.000000 0.024376 ( 0.024494) + Digest::CRC32MPEG#update 0.026035 0.000784 0.026819 ( 0.026940) + Digest::CRC32POSIX#update 0.026784 0.000000 0.026784 ( 0.026907) + Digest::CRC32XFER#update 0.026770 0.000000 0.026770 ( 0.026893) + Digest::CRC64#update 0.024400 0.000009 0.024409 ( 0.024531) + Digest::CRC64Jones#update 0.023477 0.000781 0.024258 ( 0.024390) + Digest::CRC64XZ#update 0.024611 0.000000 0.024611 ( 0.024779) + +### JRuby 9.2.18.0 (pure Ruby) + + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 1.080000 0.050000 1.130000 ( 0.676022) + Digest::CRC5#update 2.030000 0.040000 2.070000 ( 1.089240) + Digest::CRC8#update 1.590000 0.000000 1.590000 ( 0.999138) + Digest::CRC8_1Wire#update 0.920000 0.010000 0.930000 ( 0.873813) + Digest::CRC15#update 1.470000 0.030000 1.500000 ( 1.118886) + Digest::CRC16#update 1.780000 0.010000 1.790000 ( 1.067874) + Digest::CRC16CCITT#update 1.500000 0.070000 1.570000 ( 1.185564) + Digest::CRC16DNP#update 1.250000 0.000000 1.250000 ( 0.972322) + Digest::CRC16Genibus#update 1.700000 0.010000 1.710000 ( 1.092047) + Digest::CRC16Modbus#update 1.000000 0.010000 1.010000 ( 0.915328) + Digest::CRC16QT#update 1.250000 0.000000 1.250000 ( 0.968528) + Digest::CRC16USB#update 1.150000 0.010000 1.160000 ( 0.990387) + Digest::CRC16X25#update 0.940000 0.000000 0.940000 ( 0.926926) + Digest::CRC16XModem#update 1.390000 0.010000 1.400000 ( 1.100584) + Digest::CRC16ZModem#update 1.760000 0.020000 1.780000 ( 1.094003) + Digest::CRC24#update 1.690000 0.010000 1.700000 ( 1.106875) + Digest::CRC32#update 1.410000 0.020000 1.430000 ( 1.082506) + Digest::CRC32BZip2#update 1.510000 0.010000 1.520000 ( 1.104225) + Digest::CRC32c#update 1.270000 0.010000 1.280000 ( 1.023881) + Digest::CRC32Jam#update 1.190000 0.010000 1.200000 ( 0.998146) + Digest::CRC32MPEG#update 1.580000 0.010000 1.590000 ( 1.099086) + Digest::CRC32POSIX#update 1.550000 0.010000 1.560000 ( 1.142051) + Digest::CRC32XFER#update 1.360000 0.000000 1.360000 ( 1.071381) + Digest::CRC64#update 3.730000 0.020000 3.750000 ( 2.780390) + Digest::CRC64Jones#update 2.710000 0.020000 2.730000 ( 2.608007) + Digest::CRC64XZ#update 2.910000 0.020000 2.930000 ( 2.629401) + +### TruffleRuby 21.2.0 (pure Ruby) + + $ bundle exec rake clean + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.455340 0.000000 0.455340 ( 0.457710) + Digest::CRC5#update 1.406700 0.000000 1.406700 ( 1.412535) + Digest::CRC8#update 1.248323 0.000000 1.248323 ( 1.255452) + Digest::CRC8_1Wire#update 1.269434 0.000000 1.269434 ( 1.275315) + Digest::CRC15#update 1.428752 0.000000 1.428752 ( 1.434836) + Digest::CRC16#update 1.220394 0.000967 1.221361 ( 1.229684) + Digest::CRC16CCITT#update 1.434932 0.001000 1.435932 ( 1.452391) + Digest::CRC16DNP#update 1.191351 0.000000 1.191351 ( 1.202262) + Digest::CRC16Genibus#update 1.434067 0.000000 1.434067 ( 1.440300) + Digest::CRC16Modbus#update 1.200827 0.000000 1.200827 ( 1.205658) + Digest::CRC16QT#update 1.195077 0.000000 1.195077 ( 1.200328) + Digest::CRC16USB#update 1.196266 0.000000 1.196266 ( 1.201262) + Digest::CRC16X25#update 1.206690 0.000000 1.206690 ( 1.211781) + Digest::CRC16XModem#update 1.430468 0.000000 1.430468 ( 1.436801) + Digest::CRC16ZModem#update 1.442524 0.000000 1.442524 ( 1.448624) + Digest::CRC24#update 1.447611 0.000018 1.447629 ( 1.454534) + Digest::CRC32#update 1.214314 0.000000 1.214314 ( 1.219838) + Digest::CRC32BZip2#update 1.427408 0.000000 1.427408 ( 1.433626) + Digest::CRC32c#update 1.204985 0.000000 1.204985 ( 1.210273) + Digest::CRC32Jam#update 1.235039 0.000000 1.235039 ( 1.240686) + Digest::CRC32MPEG#update 1.429731 0.000000 1.429731 ( 1.435404) + Digest::CRC32POSIX#update 1.458886 0.000000 1.458886 ( 1.465914) + Digest::CRC32XFER#update 1.422109 0.000000 1.422109 ( 1.427635) + Digest::CRC64#update 3.283506 0.000000 3.283506 ( 3.303129) + Digest::CRC64Jones#update 3.297402 0.000000 3.297402 ( 3.317357) + Digest::CRC64XZ#update 3.278551 0.001875 3.280426 ( 3.315165) + + +### TruffleRuby 21.2.0 (C extensions) + + $ bundle exec rake build:c_exts + ... + $ bundle exec ./benchmarks.rb + Loading Digest::CRC classes ... + Generating 1000 8Kb lengthed strings ... + Benchmarking Digest::CRC classes ... + user system total real + Digest::CRC1#update 0.480586 0.000014 0.480600 ( 0.482817) + Digest::CRC5#update 0.023795 0.000000 0.023795 ( 0.023941) + Digest::CRC8#update 0.020619 0.000000 0.020619 ( 0.020747) + Digest::CRC8_1Wire#update 0.020571 0.000000 0.020571 ( 0.020700) + Digest::CRC15#update 0.031224 0.000000 0.031224 ( 0.031412) + Digest::CRC16#update 0.024013 0.000000 0.024013 ( 0.024174) + Digest::CRC16CCITT#update 0.026790 0.000000 0.026790 ( 0.027079) + Digest::CRC16DNP#update 0.024253 0.000000 0.024253 ( 0.024427) + Digest::CRC16Genibus#update 0.027237 0.000000 0.027237 ( 0.027390) + Digest::CRC16Modbus#update 0.024376 0.000000 0.024376 ( 0.024548) + Digest::CRC16QT#update 0.024361 0.000000 0.024361 ( 0.024518) + Digest::CRC16USB#update 0.024142 0.000000 0.024142 ( 0.024311) + Digest::CRC16X25#update 0.024098 0.000000 0.024098 ( 0.024222) + Digest::CRC16XModem#update 0.026306 0.000000 0.026306 ( 0.026502) + Digest::CRC16ZModem#update 0.026536 0.000000 0.026536 ( 0.026688) + Digest::CRC24#update 0.029732 0.000000 0.029732 ( 0.029902) + Digest::CRC32#update 0.024219 0.000000 0.024219 ( 0.024391) + Digest::CRC32BZip2#update 0.026817 0.000000 0.026817 ( 0.027044) + Digest::CRC32c#update 0.023681 0.000000 0.023681 ( 0.023798) + Digest::CRC32Jam#update 0.024243 0.000000 0.024243 ( 0.024419) + Digest::CRC32MPEG#update 0.026865 0.000000 0.026865 ( 0.027020) + Digest::CRC32POSIX#update 0.026583 0.000000 0.026583 ( 0.026748) + Digest::CRC32XFER#update 0.027423 0.000000 0.027423 ( 0.027615) + Digest::CRC64#update 0.024150 0.000000 0.024150 ( 0.024310) + Digest::CRC64Jones#update 0.024218 0.000000 0.024218 ( 0.024363) + Digest::CRC64XZ#update 0.024124 0.000000 0.024124 ( 0.024255) + +## Crystal + +[crystal-crc] is a [Crystal][crystal-lang] port of this library. + +[crystal-crc]: https://github.com/postmodern/crystal-crc +[crystal-lang]: https://www.crystal-lang.org/ + +## Thanks + +Special thanks go out to the [pycrc](http://www.tty1.net/pycrc/) library +which is able to generate C source-code for all of the CRC algorithms, +including their CRC Tables. + +## License + +Copyright (c) 2010-2021 Hal Brodigan + +See {file:LICENSE.txt} for license information. diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/Rakefile b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/Rakefile new file mode 100644 index 0000000..46df919 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/Rakefile @@ -0,0 +1,57 @@ +require 'rubygems' + +begin + require 'bundler/setup' +rescue LoadError => e + abort e.message +end + +require 'rake' +require 'rubygems/tasks' +Gem::Tasks.new + +namespace :build do + desc "Builds the C extensions" + task :c_exts do + Dir.chdir('ext/digest') { sh 'rake' } + end +end + +require 'rake/clean' +CLEAN.include('ext/digest/crc*/extconf.h') +CLEAN.include('ext/digest/crc*/Makefile') +CLEAN.include('ext/digest/crc*/*.o') +CLEAN.include('ext/digest/crc*/*.so') + +file 'spec/integration/docker/digest-crc.gem' do |t| + sh "gem build -o #{t.name} digest-crc.gemspec" +end + +require 'rspec/core/rake_task' +namespace :spec do + RSpec::Core::RakeTask.new(:pure) do |t| + t.exclude_pattern = 'spec/integration/*_spec.rb' + end + task :pure => :clean + + if RUBY_ENGINE == 'ruby' + RSpec::Core::RakeTask.new(:c_exts) do |t| + t.exclude_pattern = 'spec/integration/*_spec.rb' + end + task :c_exts => 'build:c_exts' + end + + RSpec::Core::RakeTask.new(:integration) do |t| + t.pattern = 'spec/integration/*_spec.rb' + end +end + +task :spec => 'spec:pure' +task :spec => 'spec:c_exts' if RUBY_ENGINE == 'ruby' + +task :test => :spec +task :default => :spec + +require 'yard' +YARD::Rake::YardocTask.new +task :doc => :yard diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/benchmarks.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/benchmarks.rb new file mode 100644 index 0000000..2ae878c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/benchmarks.rb @@ -0,0 +1,58 @@ +#!/usr/bin/env ruby +require 'benchmark' +$LOAD_PATH.unshift(File.expand_path('../ext',__FILE__)) +$LOAD_PATH.unshift(File.expand_path('../lib',__FILE__)) + +CRCs = { + 'crc1' => 'CRC1', + 'crc5' => 'CRC5', + 'crc8' => 'CRC8', + 'crc8_1wire' => 'CRC81Wire', + 'crc15' => 'CRC15', + 'crc16' => 'CRC16', + 'crc16_ccitt' => 'CRC16CCITT', + 'crc16_dnp' => 'CRC16DNP', + 'crc16_genibus' => 'CRC16Genibus', + 'crc16_modbus' => 'CRC16Modbus', + 'crc16_qt' => 'CRC16QT', + 'crc16_usb' => 'CRC16USB', + 'crc16_x_25' => 'CRC16X25', + 'crc16_xmodem' => 'CRC16XModem', + 'crc16_zmodem' => 'CRC16ZModem', + 'crc24' => 'CRC24', + 'crc32' => 'CRC32', + 'crc32_bzip2' => 'CRC32BZip2', + 'crc32c' => 'CRC32c', + 'crc32_jam' => 'CRC32Jam', + 'crc32_mpeg' => 'CRC32Mpeg', + 'crc32_posix' => 'CRC32POSIX', + 'crc32_xfer' => 'CRC32XFER', + 'crc64' => 'CRC64', + 'crc64_jones' => 'CRC64Jones', + 'crc64_xz' => 'CRC64XZ', +} + +puts "Loading Digest::CRC classes ..." +CRCs.each_key { |crc| require "digest/#{crc}" } + +N = 1000 +BLOCK_SIZE = 8 * 1024 + +puts "Generating #{N} #{BLOCK_SIZE / 1024}Kb lengthed strings ..." +SAMPLES = Array.new(N) do + Array.new(BLOCK_SIZE) { rand(256).chr }.join +end + +puts "Benchmarking Digest::CRC classes ..." +Benchmark.bm(27) do |b| + CRCs.each_value do |crc| + crc_class = Digest.const_get(crc) + crc = crc_class.new + + b.report("#{crc_class}#update") do + SAMPLES.each do |sample| + crc.update(sample) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/digest-crc.gemspec b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/digest-crc.gemspec new file mode 100644 index 0000000..dec089e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/digest-crc.gemspec @@ -0,0 +1,61 @@ +# encoding: utf-8 + +require 'yaml' + +Gem::Specification.new do |gem| + gemspec = YAML.load_file('gemspec.yml') + + gem.name = gemspec.fetch('name') + gem.version = gemspec.fetch('version') do + lib_dir = File.join(File.dirname(__FILE__),'lib') + $LOAD_PATH << lib_dir unless $LOAD_PATH.include?(lib_dir) + + require 'digest/crc/version' + Digest::CRC::VERSION + end + + gem.summary = gemspec['summary'] + gem.description = gemspec['description'] + gem.licenses = Array(gemspec['license']) + gem.authors = Array(gemspec['authors']) + gem.email = gemspec['email'] + gem.homepage = gemspec['homepage'] + gem.metadata = gemspec['metadata'] if gemspec['metadata'] + + glob = lambda { |patterns| gem.files & Dir[*patterns] } + + gem.files = `git ls-files`.split($/) + gem.files = glob[gemspec['files']] if gemspec['files'] + + gem.executables = gemspec.fetch('executables') do + glob['bin/*'].map { |path| File.basename(path) } + end + gem.default_executable = gem.executables.first if Gem::VERSION < '1.7.' + + gem.extensions = glob[gemspec['extensions'] || 'ext/**/extconf.rb'] + gem.test_files = glob[gemspec['test_files'] || '{test/{**/}*_test.rb'] + gem.extra_rdoc_files = glob[gemspec['extra_doc_files'] || '*.{txt,md}'] + + gem.require_paths = Array(gemspec.fetch('require_paths') { + %w[ext lib].select { |dir| File.directory?(dir) } + }) + + gem.requirements = gemspec['requirements'] + gem.required_ruby_version = gemspec['required_ruby_version'] + gem.required_rubygems_version = gemspec['required_rubygems_version'] + gem.post_install_message = gemspec['post_install_message'] + + split = lambda { |string| string.split(/,\s*/) } + + if gemspec['dependencies'] + gemspec['dependencies'].each do |name,versions| + gem.add_dependency(name,split[versions]) + end + end + + if gemspec['development_dependencies'] + gemspec['development_dependencies'].each do |name,versions| + gem.add_development_dependency(name,split[versions]) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/Rakefile b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/Rakefile new file mode 100644 index 0000000..0486c1f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/Rakefile @@ -0,0 +1,46 @@ +def fail_gracefully(message) + warn "#{message}. Failing gracefully ..." + exit +end + +unless RUBY_ENGINE == 'ruby' + fail_gracefully "C extensions for #{RUBY_ENGINE} currently not supported" +end + +begin + require "mkmf" +rescue LoadError + fail_gracefully "mkmf is not installed" +end + +CRCS = Dir['crc*'] +DLEXT = MakeMakefile::CONFIG['DLEXT'] + +CRCS.each do |crc| + crc_ext = "#{crc}_ext" + + file "#{crc}/Makefile" => "#{crc}/extconf.rb" do + Dir.chdir(crc) do + begin + ruby '-S', 'extconf.rb' + rescue + fail_gracefully "extconf.rb failed" + end + end + end + + crc_ext_lib = "#{crc}_ext.#{DLEXT}" + + file "#{crc}/#{crc_ext_lib}" => "#{crc}/Makefile" do + Dir.chdir(crc) do + begin + sh 'make', 'clean' + sh 'make' + rescue + fail_gracefully "Unable to build C extensions" + end + end + end + + task :default => "#{crc}/#{crc_ext_lib}" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/compat/ruby.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/compat/ruby.h new file mode 100644 index 0000000..8b82939 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/compat/ruby.h @@ -0,0 +1,11 @@ +#ifndef _DIGEST_COMPAT_RUBY_H_ +#define _DIGEST_COMPAT_RUBY_H_ + +#include + +// HACK: define USHORT2NUM for Ruby < 2.6 +#ifndef USHORT2NUM +#define USHORT2NUM(x) RB_INT2FIX(x) +#endif + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp.c new file mode 100644 index 0000000..983229d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp.c @@ -0,0 +1,68 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:30:42 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 12 + * - Poly = 0x80f + * - XorIn = 0x000 + * - ReflectIn = False + * - XorOut = 0x000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc12_3gpp.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc12_t crc12_table[256] = { + 0x000, 0x80f, 0x811, 0x01e, 0x82d, 0x022, 0x03c, 0x833, 0x855, 0x05a, 0x044, 0x84b, 0x078, 0x877, 0x869, 0x066, + 0x8a5, 0x0aa, 0x0b4, 0x8bb, 0x088, 0x887, 0x899, 0x096, 0x0f0, 0x8ff, 0x8e1, 0x0ee, 0x8dd, 0x0d2, 0x0cc, 0x8c3, + 0x945, 0x14a, 0x154, 0x95b, 0x168, 0x967, 0x979, 0x176, 0x110, 0x91f, 0x901, 0x10e, 0x93d, 0x132, 0x12c, 0x923, + 0x1e0, 0x9ef, 0x9f1, 0x1fe, 0x9cd, 0x1c2, 0x1dc, 0x9d3, 0x9b5, 0x1ba, 0x1a4, 0x9ab, 0x198, 0x997, 0x989, 0x186, + 0xa85, 0x28a, 0x294, 0xa9b, 0x2a8, 0xaa7, 0xab9, 0x2b6, 0x2d0, 0xadf, 0xac1, 0x2ce, 0xafd, 0x2f2, 0x2ec, 0xae3, + 0x220, 0xa2f, 0xa31, 0x23e, 0xa0d, 0x202, 0x21c, 0xa13, 0xa75, 0x27a, 0x264, 0xa6b, 0x258, 0xa57, 0xa49, 0x246, + 0x3c0, 0xbcf, 0xbd1, 0x3de, 0xbed, 0x3e2, 0x3fc, 0xbf3, 0xb95, 0x39a, 0x384, 0xb8b, 0x3b8, 0xbb7, 0xba9, 0x3a6, + 0xb65, 0x36a, 0x374, 0xb7b, 0x348, 0xb47, 0xb59, 0x356, 0x330, 0xb3f, 0xb21, 0x32e, 0xb1d, 0x312, 0x30c, 0xb03, + 0xd05, 0x50a, 0x514, 0xd1b, 0x528, 0xd27, 0xd39, 0x536, 0x550, 0xd5f, 0xd41, 0x54e, 0xd7d, 0x572, 0x56c, 0xd63, + 0x5a0, 0xdaf, 0xdb1, 0x5be, 0xd8d, 0x582, 0x59c, 0xd93, 0xdf5, 0x5fa, 0x5e4, 0xdeb, 0x5d8, 0xdd7, 0xdc9, 0x5c6, + 0x440, 0xc4f, 0xc51, 0x45e, 0xc6d, 0x462, 0x47c, 0xc73, 0xc15, 0x41a, 0x404, 0xc0b, 0x438, 0xc37, 0xc29, 0x426, + 0xce5, 0x4ea, 0x4f4, 0xcfb, 0x4c8, 0xcc7, 0xcd9, 0x4d6, 0x4b0, 0xcbf, 0xca1, 0x4ae, 0xc9d, 0x492, 0x48c, 0xc83, + 0x780, 0xf8f, 0xf91, 0x79e, 0xfad, 0x7a2, 0x7bc, 0xfb3, 0xfd5, 0x7da, 0x7c4, 0xfcb, 0x7f8, 0xff7, 0xfe9, 0x7e6, + 0xf25, 0x72a, 0x734, 0xf3b, 0x708, 0xf07, 0xf19, 0x716, 0x770, 0xf7f, 0xf61, 0x76e, 0xf5d, 0x752, 0x74c, 0xf43, + 0xec5, 0x6ca, 0x6d4, 0xedb, 0x6e8, 0xee7, 0xef9, 0x6f6, 0x690, 0xe9f, 0xe81, 0x68e, 0xebd, 0x6b2, 0x6ac, 0xea3, + 0x660, 0xe6f, 0xe71, 0x67e, 0xe4d, 0x642, 0x65c, 0xe53, 0xe35, 0x63a, 0x624, 0xe2b, 0x618, 0xe17, 0xe09, 0x606 +}; + +crc12_t crc_reflect(crc12_t data, size_t data_len) +{ + unsigned int i; + crc12_t ret = data & 0x01; + + for (i = 1; i < data_len; i++) + { + data >>= 1; + ret = (ret << 1) | (data & 0x01); + } + + return ret; +} + +crc12_t crc12_3gpp_update(crc12_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 4) ^ *d) & 0xff; + crc = (crc12_table[tbl_idx] ^ (crc << 8)) & 0xfff; + d++; + } + + return crc & 0xfff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp.h new file mode 100644 index 0000000..51ee585 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp.h @@ -0,0 +1,11 @@ +#ifndef _CRC12_3GPP_H_ +#define _CRC12_3GPP_H_ + +#include +#include + +typedef uint16_t crc12_t; + +crc12_t crc12_3gpp_update(crc12_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp_ext.bundle new file mode 100644 index 0000000..e5b9667 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp_ext.c new file mode 100644 index 0000000..019f6e6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/crc12_3gpp_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc12_3gpp.h" + +VALUE Digest_CRC12_3GPP_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc12_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc12_3gpp_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc12_3gpp_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC12_3GPP = rb_const_get(mDigest, rb_intern("CRC12_3GPP")); + + rb_undef_method(cCRC12_3GPP, "update"); + rb_define_method(cCRC12_3GPP, "update", Digest_CRC12_3GPP_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/extconf.rb new file mode 100644 index 0000000..f940301 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc12_3gpp/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc12_3gpp_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15.c new file mode 100644 index 0000000..1c0ea11 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15.c @@ -0,0 +1,54 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:30:57 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 15 + * - Poly = 0x4599 + * - XorIn = 0x0000 + * - ReflectIn = False + * - XorOut = 0x0000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc15.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc15_t crc15_table[256] = { + 0x0000, 0x4599, 0x4eab, 0x0b32, 0x58cf, 0x1d56, 0x1664, 0x53fd, 0x7407, 0x319e, 0x3aac, 0x7f35, 0x2cc8, 0x6951, 0x6263, 0x27fa, + 0x2d97, 0x680e, 0x633c, 0x26a5, 0x7558, 0x30c1, 0x3bf3, 0x7e6a, 0x5990, 0x1c09, 0x173b, 0x52a2, 0x015f, 0x44c6, 0x4ff4, 0x0a6d, + 0x5b2e, 0x1eb7, 0x1585, 0x501c, 0x03e1, 0x4678, 0x4d4a, 0x08d3, 0x2f29, 0x6ab0, 0x6182, 0x241b, 0x77e6, 0x327f, 0x394d, 0x7cd4, + 0x76b9, 0x3320, 0x3812, 0x7d8b, 0x2e76, 0x6bef, 0x60dd, 0x2544, 0x02be, 0x4727, 0x4c15, 0x098c, 0x5a71, 0x1fe8, 0x14da, 0x5143, + 0x73c5, 0x365c, 0x3d6e, 0x78f7, 0x2b0a, 0x6e93, 0x65a1, 0x2038, 0x07c2, 0x425b, 0x4969, 0x0cf0, 0x5f0d, 0x1a94, 0x11a6, 0x543f, + 0x5e52, 0x1bcb, 0x10f9, 0x5560, 0x069d, 0x4304, 0x4836, 0x0daf, 0x2a55, 0x6fcc, 0x64fe, 0x2167, 0x729a, 0x3703, 0x3c31, 0x79a8, + 0x28eb, 0x6d72, 0x6640, 0x23d9, 0x7024, 0x35bd, 0x3e8f, 0x7b16, 0x5cec, 0x1975, 0x1247, 0x57de, 0x0423, 0x41ba, 0x4a88, 0x0f11, + 0x057c, 0x40e5, 0x4bd7, 0x0e4e, 0x5db3, 0x182a, 0x1318, 0x5681, 0x717b, 0x34e2, 0x3fd0, 0x7a49, 0x29b4, 0x6c2d, 0x671f, 0x2286, + 0x2213, 0x678a, 0x6cb8, 0x2921, 0x7adc, 0x3f45, 0x3477, 0x71ee, 0x5614, 0x138d, 0x18bf, 0x5d26, 0x0edb, 0x4b42, 0x4070, 0x05e9, + 0x0f84, 0x4a1d, 0x412f, 0x04b6, 0x574b, 0x12d2, 0x19e0, 0x5c79, 0x7b83, 0x3e1a, 0x3528, 0x70b1, 0x234c, 0x66d5, 0x6de7, 0x287e, + 0x793d, 0x3ca4, 0x3796, 0x720f, 0x21f2, 0x646b, 0x6f59, 0x2ac0, 0x0d3a, 0x48a3, 0x4391, 0x0608, 0x55f5, 0x106c, 0x1b5e, 0x5ec7, + 0x54aa, 0x1133, 0x1a01, 0x5f98, 0x0c65, 0x49fc, 0x42ce, 0x0757, 0x20ad, 0x6534, 0x6e06, 0x2b9f, 0x7862, 0x3dfb, 0x36c9, 0x7350, + 0x51d6, 0x144f, 0x1f7d, 0x5ae4, 0x0919, 0x4c80, 0x47b2, 0x022b, 0x25d1, 0x6048, 0x6b7a, 0x2ee3, 0x7d1e, 0x3887, 0x33b5, 0x762c, + 0x7c41, 0x39d8, 0x32ea, 0x7773, 0x248e, 0x6117, 0x6a25, 0x2fbc, 0x0846, 0x4ddf, 0x46ed, 0x0374, 0x5089, 0x1510, 0x1e22, 0x5bbb, + 0x0af8, 0x4f61, 0x4453, 0x01ca, 0x5237, 0x17ae, 0x1c9c, 0x5905, 0x7eff, 0x3b66, 0x3054, 0x75cd, 0x2630, 0x63a9, 0x689b, 0x2d02, + 0x276f, 0x62f6, 0x69c4, 0x2c5d, 0x7fa0, 0x3a39, 0x310b, 0x7492, 0x5368, 0x16f1, 0x1dc3, 0x585a, 0x0ba7, 0x4e3e, 0x450c, 0x0095 +}; + +crc15_t crc15_update(crc15_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 7) ^ *d) & 0xff; + crc = (crc15_table[tbl_idx] ^ (crc << 8)) & 0x7fff; + d++; + } + + return crc & 0x7fff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15.h new file mode 100644 index 0000000..c0f9260 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15.h @@ -0,0 +1,11 @@ +#ifndef _CRC15_H_ +#define _CRC15_H_ + +#include +#include + +typedef uint16_t crc15_t; + +crc15_t crc15_update(crc15_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15_ext.bundle new file mode 100644 index 0000000..8c4a6fb Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15_ext.c new file mode 100644 index 0000000..852c6a3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/crc15_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc15.h" + +VALUE Digest_CRC15_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc15_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc15_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc15_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC15 = rb_const_get(mDigest, rb_intern("CRC15")); + + rb_undef_method(cCRC15, "update"); + rb_define_method(cCRC15, "update", Digest_CRC15_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/extconf.rb new file mode 100644 index 0000000..3114fe0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc15/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc15_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16.c new file mode 100644 index 0000000..3378cca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:01:54 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x8005 + * - XorIn = 0x0000 + * - ReflectIn = True + * - XorOut = 0x0000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc16.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc16_table[256] = { + 0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241, + 0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440, + 0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40, + 0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841, + 0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40, + 0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41, + 0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641, + 0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040, + 0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240, + 0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441, + 0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41, + 0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840, + 0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41, + 0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40, + 0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640, + 0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041, + 0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240, + 0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441, + 0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41, + 0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840, + 0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41, + 0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40, + 0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640, + 0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041, + 0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241, + 0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440, + 0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40, + 0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841, + 0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40, + 0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41, + 0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641, + 0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040 +}; + +crc16_t crc16_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc16_table[tbl_idx] ^ (crc >> 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16.h new file mode 100644 index 0000000..521e8ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16.h @@ -0,0 +1,11 @@ +#ifndef _CRC16_H_ +#define _CRC16_H_ + +#include +#include + +typedef uint16_t crc16_t; + +crc16_t crc16_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16_ext.bundle new file mode 100644 index 0000000..002671a Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16_ext.c new file mode 100644 index 0000000..db83afa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/crc16_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16.h" + +VALUE Digest_CRC16_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16 = rb_const_get(mDigest, rb_intern("CRC16")); + + rb_undef_method(cCRC16, "update"); + rb_define_method(cCRC16, "update", Digest_CRC16_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/extconf.rb new file mode 100644 index 0000000..602e58a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt.c new file mode 100644 index 0000000..3c245cc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:04:06 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x1021 + * - XorIn = 0x1d0f + * - ReflectIn = False + * - XorOut = 0x0000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc16_ccitt.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc16_ccitt_table[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +crc16_t crc16_ccitt_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 8) ^ *d) & 0xff; + crc = (crc16_ccitt_table[tbl_idx] ^ (crc << 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt.h new file mode 100644 index 0000000..c9d7950 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_CCITT_H_ +#define _CRC16_CCITT_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_ccitt_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt_ext.bundle new file mode 100644 index 0000000..4ac03ac Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt_ext.c new file mode 100644 index 0000000..5260d7e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/crc16_ccitt_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_ccitt.h" + +VALUE Digest_CRC16CCITT_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_ccitt_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_ccitt_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16CCITT = rb_const_get(mDigest, rb_intern("CRC16CCITT")); + + rb_undef_method(cCRC16CCITT, "update"); + rb_define_method(cCRC16CCITT, "update", Digest_CRC16CCITT_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/extconf.rb new file mode 100644 index 0000000..7c21c2c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_ccitt/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_ccitt_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp.c new file mode 100644 index 0000000..240e62c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp.c @@ -0,0 +1,54 @@ +#include "crc16_dnp.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc16_dnp_table[256] = { + 0x0000, 0x365e, 0x6cbc, 0x5ae2, 0xd978, 0xef26, 0xb5c4, 0x839a, + 0xff89, 0xc9d7, 0x9335, 0xa56b, 0x26f1, 0x10af, 0x4a4d, 0x7c13, + 0xb26b, 0x8435, 0xded7, 0xe889, 0x6b13, 0x5d4d, 0x07af, 0x31f1, + 0x4de2, 0x7bbc, 0x215e, 0x1700, 0x949a, 0xa2c4, 0xf826, 0xce78, + 0x29af, 0x1ff1, 0x4513, 0x734d, 0xf0d7, 0xc689, 0x9c6b, 0xaa35, + 0xd626, 0xe078, 0xba9a, 0x8cc4, 0x0f5e, 0x3900, 0x63e2, 0x55bc, + 0x9bc4, 0xad9a, 0xf778, 0xc126, 0x42bc, 0x74e2, 0x2e00, 0x185e, + 0x644d, 0x5213, 0x08f1, 0x3eaf, 0xbd35, 0x8b6b, 0xd189, 0xe7d7, + 0x535e, 0x6500, 0x3fe2, 0x09bc, 0x8a26, 0xbc78, 0xe69a, 0xd0c4, + 0xacd7, 0x9a89, 0xc06b, 0xf635, 0x75af, 0x43f1, 0x1913, 0x2f4d, + 0xe135, 0xd76b, 0x8d89, 0xbbd7, 0x384d, 0x0e13, 0x54f1, 0x62af, + 0x1ebc, 0x28e2, 0x7200, 0x445e, 0xc7c4, 0xf19a, 0xab78, 0x9d26, + 0x7af1, 0x4caf, 0x164d, 0x2013, 0xa389, 0x95d7, 0xcf35, 0xf96b, + 0x8578, 0xb326, 0xe9c4, 0xdf9a, 0x5c00, 0x6a5e, 0x30bc, 0x06e2, + 0xc89a, 0xfec4, 0xa426, 0x9278, 0x11e2, 0x27bc, 0x7d5e, 0x4b00, + 0x3713, 0x014d, 0x5baf, 0x6df1, 0xee6b, 0xd835, 0x82d7, 0xb489, + 0xa6bc, 0x90e2, 0xca00, 0xfc5e, 0x7fc4, 0x499a, 0x1378, 0x2526, + 0x5935, 0x6f6b, 0x3589, 0x03d7, 0x804d, 0xb613, 0xecf1, 0xdaaf, + 0x14d7, 0x2289, 0x786b, 0x4e35, 0xcdaf, 0xfbf1, 0xa113, 0x974d, + 0xeb5e, 0xdd00, 0x87e2, 0xb1bc, 0x3226, 0x0478, 0x5e9a, 0x68c4, + 0x8f13, 0xb94d, 0xe3af, 0xd5f1, 0x566b, 0x6035, 0x3ad7, 0x0c89, + 0x709a, 0x46c4, 0x1c26, 0x2a78, 0xa9e2, 0x9fbc, 0xc55e, 0xf300, + 0x3d78, 0x0b26, 0x51c4, 0x679a, 0xe400, 0xd25e, 0x88bc, 0xbee2, + 0xc2f1, 0xf4af, 0xae4d, 0x9813, 0x1b89, 0x2dd7, 0x7735, 0x416b, + 0xf5e2, 0xc3bc, 0x995e, 0xaf00, 0x2c9a, 0x1ac4, 0x4026, 0x7678, + 0x0a6b, 0x3c35, 0x66d7, 0x5089, 0xd313, 0xe54d, 0xbfaf, 0x89f1, + 0x4789, 0x71d7, 0x2b35, 0x1d6b, 0x9ef1, 0xa8af, 0xf24d, 0xc413, + 0xb800, 0x8e5e, 0xd4bc, 0xe2e2, 0x6178, 0x5726, 0x0dc4, 0x3b9a, + 0xdc4d, 0xea13, 0xb0f1, 0x86af, 0x0535, 0x336b, 0x6989, 0x5fd7, + 0x23c4, 0x159a, 0x4f78, 0x7926, 0xfabc, 0xcce2, 0x9600, 0xa05e, + 0x6e26, 0x5878, 0x029a, 0x34c4, 0xb75e, 0x8100, 0xdbe2, 0xedbc, + 0x91af, 0xa7f1, 0xfd13, 0xcb4d, 0x48d7, 0x7e89, 0x246b, 0x1235 +}; + +crc16_t crc16_dnp_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc << 8) ^ crc16_dnp_table[tbl_idx]; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp.h new file mode 100644 index 0000000..7265113 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_GENIBUS_H_ +#define _CRC16_GENIBUS_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_dnp_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp_ext.bundle new file mode 100644 index 0000000..403d43b Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp_ext.c new file mode 100644 index 0000000..9dc548a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/crc16_dnp_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_dnp.h" + +VALUE Digest_CRC16DNP_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_dnp_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_dnp_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16DNP = rb_const_get(mDigest, rb_intern("CRC16DNP")); + + rb_undef_method(cCRC16DNP, "update"); + rb_define_method(cCRC16DNP, "update", Digest_CRC16DNP_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/extconf.rb new file mode 100644 index 0000000..8df65cb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_dnp/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_dnp_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus.c new file mode 100644 index 0000000..866a311 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:31:25 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x1021 + * - XorIn = 0xffff + * - ReflectIn = False + * - XorOut = 0xffff + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc16_genibus.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc16_genibus_table[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +crc16_t crc16_genibus_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 8) ^ *d) & 0xff; + crc = (crc16_genibus_table[tbl_idx] ^ (crc << 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus.h new file mode 100644 index 0000000..74d5f54 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_GENIBUS_H_ +#define _CRC16_GENIBUS_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_genibus_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus_ext.bundle new file mode 100644 index 0000000..4216775 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus_ext.c new file mode 100644 index 0000000..6a306b5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/crc16_genibus_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_genibus.h" + +VALUE Digest_CRC16Genibus_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_genibus_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_genibus_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16Genibus = rb_const_get(mDigest, rb_intern("CRC16Genibus")); + + rb_undef_method(cCRC16Genibus, "update"); + rb_define_method(cCRC16Genibus, "update", Digest_CRC16Genibus_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/extconf.rb new file mode 100644 index 0000000..627aaeb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_genibus/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_genibus_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit.c new file mode 100644 index 0000000..2828671 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:31:50 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x1021 + * - XorIn = 0x0000 + * - ReflectIn = True + * - XorOut = 0x0000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc16_kermit.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + +crc16_t crc16_kermit_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit.h new file mode 100644 index 0000000..41cffb7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_KERMIT_H_ +#define _CRC16_KERMIT_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_kermit_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit_ext.bundle new file mode 100644 index 0000000..eb5e8b3 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit_ext.c new file mode 100644 index 0000000..6d00cae --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/crc16_kermit_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_kermit.h" + +VALUE Digest_CRC16Kermit_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_kermit_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_kermit_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16Kermit = rb_const_get(mDigest, rb_intern("CRC16Kermit")); + + rb_undef_method(cCRC16Kermit, "update"); + rb_define_method(cCRC16Kermit, "update", Digest_CRC16Kermit_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/extconf.rb new file mode 100644 index 0000000..6d0f907 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_kermit/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_kermit_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus.c new file mode 100644 index 0000000..1830c59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:04:54 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x8005 + * - XorIn = 0xffff + * - ReflectIn = True + * - XorOut = 0x0000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc16_modbus.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc_table[256] = { + 0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241, + 0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440, + 0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40, + 0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841, + 0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40, + 0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41, + 0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641, + 0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040, + 0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240, + 0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441, + 0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41, + 0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840, + 0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41, + 0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40, + 0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640, + 0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041, + 0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240, + 0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441, + 0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41, + 0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840, + 0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41, + 0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40, + 0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640, + 0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041, + 0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241, + 0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440, + 0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40, + 0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841, + 0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40, + 0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41, + 0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641, + 0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040 +}; + +crc16_t crc16_modbus_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus.h new file mode 100644 index 0000000..045058e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_MODBUS_H_ +#define _CRC16_MODBUS_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_modbus_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus_ext.bundle new file mode 100644 index 0000000..9cbfa12 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus_ext.c new file mode 100644 index 0000000..82165e0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/crc16_modbus_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_modbus.h" + +VALUE Digest_CRC16Modbus_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_modbus_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_modbus_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16Modbus = rb_const_get(mDigest, rb_intern("CRC16Modbus")); + + rb_undef_method(cCRC16Modbus, "update"); + rb_define_method(cCRC16Modbus, "update", Digest_CRC16Modbus_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/extconf.rb new file mode 100644 index 0000000..8373439 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_modbus/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_modbus_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb.c new file mode 100644 index 0000000..b6561a8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:05:41 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x8005 + * - XorIn = 0xffff + * - ReflectIn = True + * - XorOut = 0xffff + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc16_usb.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc_table[256] = { + 0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241, + 0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440, + 0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40, + 0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841, + 0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40, + 0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41, + 0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641, + 0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040, + 0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240, + 0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441, + 0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41, + 0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840, + 0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41, + 0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40, + 0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640, + 0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041, + 0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240, + 0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441, + 0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41, + 0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840, + 0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41, + 0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40, + 0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640, + 0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041, + 0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241, + 0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440, + 0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40, + 0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841, + 0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40, + 0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41, + 0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641, + 0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040 +}; + +crc16_t crc16_usb_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb.h new file mode 100644 index 0000000..f3c47e6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_USB_H_ +#define _CRC16_USB_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_usb_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb_ext.bundle new file mode 100644 index 0000000..3b9ebf1 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb_ext.c new file mode 100644 index 0000000..002eaa9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/crc16_usb_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_usb.h" + +VALUE Digest_CRC16USB_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_usb_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_usb_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16USB = rb_const_get(mDigest, rb_intern("CRC16USB")); + + rb_undef_method(cCRC16USB, "update"); + rb_define_method(cCRC16USB, "update", Digest_CRC16USB_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/extconf.rb new file mode 100644 index 0000000..6aa74d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_usb/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_usb_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25.c new file mode 100644 index 0000000..9c3b8bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:32:07 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x1021 + * - XorIn = 0xffff + * - ReflectIn = True + * - XorOut = 0xffff + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc16_x_25.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + +crc16_t crc16_x_25_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25.h new file mode 100644 index 0000000..73b6831 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_X_25_H_ +#define _CRC16_X_25_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_x_25_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25_ext.bundle new file mode 100644 index 0000000..e317214 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25_ext.c new file mode 100644 index 0000000..2efe121 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/crc16_x_25_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_x_25.h" + +VALUE Digest_CRC16X25_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_x_25_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_x_25_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16X25 = rb_const_get(mDigest, rb_intern("CRC16X25")); + + rb_undef_method(cCRC16X25, "update"); + rb_define_method(cCRC16X25, "update", Digest_CRC16X25_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/extconf.rb new file mode 100644 index 0000000..4f775db --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_x_25/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_x_25_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem.c new file mode 100644 index 0000000..f797a10 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:06:05 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x1021 + * - XorIn = 0x0000 + * - ReflectIn = False + * - XorOut = 0x0000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc16_xmodem.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc16_xmodem_table[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +crc16_t crc16_xmodem_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 8) ^ *d) & 0xff; + crc = (crc16_xmodem_table[tbl_idx] ^ (crc << 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem.h new file mode 100644 index 0000000..2b05430 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_XMODEM_H_ +#define _CRC16_XMODEM_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_xmodem_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem_ext.bundle new file mode 100644 index 0000000..cede21f Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem_ext.c new file mode 100644 index 0000000..e2e4a3c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/crc16_xmodem_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_xmodem.h" + +VALUE Digest_CRC16XModem_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_xmodem_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_xmodem_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16XModem = rb_const_get(mDigest, rb_intern("CRC16XModem")); + + rb_undef_method(cCRC16XModem, "update"); + rb_define_method(cCRC16XModem, "update", Digest_CRC16XModem_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/extconf.rb new file mode 100644 index 0000000..c290c5b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_xmodem/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_xmodem_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem.c new file mode 100644 index 0000000..45c364b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:06:11 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 16 + * - Poly = 0x1021 + * - XorIn = 0x0000 + * - ReflectIn = False + * - XorOut = 0x0000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc16_zmodem.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc16_t crc16_zmodem_table[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +crc16_t crc16_zmodem_update(crc16_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 8) ^ *d) & 0xff; + crc = (crc16_zmodem_table[tbl_idx] ^ (crc << 8)) & 0xffff; + d++; + } + + return crc & 0xffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem.h new file mode 100644 index 0000000..255c207 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem.h @@ -0,0 +1,8 @@ +#ifndef _CRC16_ZMODEM_H_ +#define _CRC16_ZMODEM_H_ + +#include "../crc16/crc16.h" + +crc16_t crc16_zmodem_update(crc16_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem_ext.bundle new file mode 100644 index 0000000..3734e52 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem_ext.c new file mode 100644 index 0000000..1a8c47f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/crc16_zmodem_ext.c @@ -0,0 +1,29 @@ +#include +#include "../compat/ruby.h" + +#include "extconf.h" +#include "crc16_zmodem.h" + +VALUE Digest_CRC16ZModem_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc16_t crc = NUM2USHORT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc16_zmodem_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, USHORT2NUM(crc)); + return self; +} + +void Init_crc16_zmodem_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC16ZModem = rb_const_get(mDigest, rb_intern("CRC16ZModem")); + + rb_undef_method(cCRC16ZModem, "update"); + rb_define_method(cCRC16ZModem, "update", Digest_CRC16ZModem_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/extconf.rb new file mode 100644 index 0000000..bc2e4fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc16_zmodem/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc16_zmodem_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24.c new file mode 100644 index 0000000..6c1c44e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:02:04 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 24 + * - Poly = 0x864cfb + * - XorIn = 0xb704ce + * - ReflectIn = False + * - XorOut = 0x000000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc24.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc24_t crc24_table[256] = { + 0x000000, 0x864cfb, 0x8ad50d, 0x0c99f6, 0x93e6e1, 0x15aa1a, 0x1933ec, 0x9f7f17, + 0xa18139, 0x27cdc2, 0x2b5434, 0xad18cf, 0x3267d8, 0xb42b23, 0xb8b2d5, 0x3efe2e, + 0xc54e89, 0x430272, 0x4f9b84, 0xc9d77f, 0x56a868, 0xd0e493, 0xdc7d65, 0x5a319e, + 0x64cfb0, 0xe2834b, 0xee1abd, 0x685646, 0xf72951, 0x7165aa, 0x7dfc5c, 0xfbb0a7, + 0x0cd1e9, 0x8a9d12, 0x8604e4, 0x00481f, 0x9f3708, 0x197bf3, 0x15e205, 0x93aefe, + 0xad50d0, 0x2b1c2b, 0x2785dd, 0xa1c926, 0x3eb631, 0xb8faca, 0xb4633c, 0x322fc7, + 0xc99f60, 0x4fd39b, 0x434a6d, 0xc50696, 0x5a7981, 0xdc357a, 0xd0ac8c, 0x56e077, + 0x681e59, 0xee52a2, 0xe2cb54, 0x6487af, 0xfbf8b8, 0x7db443, 0x712db5, 0xf7614e, + 0x19a3d2, 0x9fef29, 0x9376df, 0x153a24, 0x8a4533, 0x0c09c8, 0x00903e, 0x86dcc5, + 0xb822eb, 0x3e6e10, 0x32f7e6, 0xb4bb1d, 0x2bc40a, 0xad88f1, 0xa11107, 0x275dfc, + 0xdced5b, 0x5aa1a0, 0x563856, 0xd074ad, 0x4f0bba, 0xc94741, 0xc5deb7, 0x43924c, + 0x7d6c62, 0xfb2099, 0xf7b96f, 0x71f594, 0xee8a83, 0x68c678, 0x645f8e, 0xe21375, + 0x15723b, 0x933ec0, 0x9fa736, 0x19ebcd, 0x8694da, 0x00d821, 0x0c41d7, 0x8a0d2c, + 0xb4f302, 0x32bff9, 0x3e260f, 0xb86af4, 0x2715e3, 0xa15918, 0xadc0ee, 0x2b8c15, + 0xd03cb2, 0x567049, 0x5ae9bf, 0xdca544, 0x43da53, 0xc596a8, 0xc90f5e, 0x4f43a5, + 0x71bd8b, 0xf7f170, 0xfb6886, 0x7d247d, 0xe25b6a, 0x641791, 0x688e67, 0xeec29c, + 0x3347a4, 0xb50b5f, 0xb992a9, 0x3fde52, 0xa0a145, 0x26edbe, 0x2a7448, 0xac38b3, + 0x92c69d, 0x148a66, 0x181390, 0x9e5f6b, 0x01207c, 0x876c87, 0x8bf571, 0x0db98a, + 0xf6092d, 0x7045d6, 0x7cdc20, 0xfa90db, 0x65efcc, 0xe3a337, 0xef3ac1, 0x69763a, + 0x578814, 0xd1c4ef, 0xdd5d19, 0x5b11e2, 0xc46ef5, 0x42220e, 0x4ebbf8, 0xc8f703, + 0x3f964d, 0xb9dab6, 0xb54340, 0x330fbb, 0xac70ac, 0x2a3c57, 0x26a5a1, 0xa0e95a, + 0x9e1774, 0x185b8f, 0x14c279, 0x928e82, 0x0df195, 0x8bbd6e, 0x872498, 0x016863, + 0xfad8c4, 0x7c943f, 0x700dc9, 0xf64132, 0x693e25, 0xef72de, 0xe3eb28, 0x65a7d3, + 0x5b59fd, 0xdd1506, 0xd18cf0, 0x57c00b, 0xc8bf1c, 0x4ef3e7, 0x426a11, 0xc426ea, + 0x2ae476, 0xaca88d, 0xa0317b, 0x267d80, 0xb90297, 0x3f4e6c, 0x33d79a, 0xb59b61, + 0x8b654f, 0x0d29b4, 0x01b042, 0x87fcb9, 0x1883ae, 0x9ecf55, 0x9256a3, 0x141a58, + 0xefaaff, 0x69e604, 0x657ff2, 0xe33309, 0x7c4c1e, 0xfa00e5, 0xf69913, 0x70d5e8, + 0x4e2bc6, 0xc8673d, 0xc4fecb, 0x42b230, 0xddcd27, 0x5b81dc, 0x57182a, 0xd154d1, + 0x26359f, 0xa07964, 0xace092, 0x2aac69, 0xb5d37e, 0x339f85, 0x3f0673, 0xb94a88, + 0x87b4a6, 0x01f85d, 0x0d61ab, 0x8b2d50, 0x145247, 0x921ebc, 0x9e874a, 0x18cbb1, + 0xe37b16, 0x6537ed, 0x69ae1b, 0xefe2e0, 0x709df7, 0xf6d10c, 0xfa48fa, 0x7c0401, + 0x42fa2f, 0xc4b6d4, 0xc82f22, 0x4e63d9, 0xd11cce, 0x575035, 0x5bc9c3, 0xdd8538 +}; + +crc24_t crc24_update(crc24_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 16) ^ *d) & 0xff; + crc = (crc24_table[tbl_idx] ^ (crc << 8)) & 0xffffff; + d++; + } + + return crc & 0xffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24.h new file mode 100644 index 0000000..f51df61 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24.h @@ -0,0 +1,11 @@ +#ifndef _CRC24_H_ +#define _CRC24_H_ + +#include +#include + +typedef uint32_t crc24_t; + +crc24_t crc24_update(crc24_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24_ext.bundle new file mode 100644 index 0000000..31f1f57 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24_ext.c new file mode 100644 index 0000000..452b371 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/crc24_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc24.h" + +VALUE Digest_CRC24_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc24_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc24_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc24_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC24 = rb_const_get(mDigest, rb_intern("CRC24")); + + rb_undef_method(cCRC24, "update"); + rb_define_method(cCRC24, "update", Digest_CRC24_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/extconf.rb new file mode 100644 index 0000000..925d555 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc24/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc24_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32.c new file mode 100644 index 0000000..be9a49d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32.c @@ -0,0 +1,69 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 06:06:44 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x04c11db7 + * - XorIn = 0xffffffff + * - ReflectIn = True + * - XorOut = 0xffffffff + * - ReflectOut = True + * - Algorithm = table-driven + */ +#include "crc32.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32_table[256] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d +}; + +crc32_t crc32_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc32_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32.h new file mode 100644 index 0000000..a20757f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32.h @@ -0,0 +1,11 @@ +#ifndef _CRC32_H_ +#define _CRC32_H_ + +#include +#include + +typedef uint32_t crc32_t; + +crc32_t crc32_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32_ext.bundle new file mode 100644 index 0000000..f7a6add Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32_ext.c new file mode 100644 index 0000000..5dd3e9d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/crc32_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32.h" + +VALUE Digest_CRC32_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32 = rb_const_get(mDigest, rb_intern("CRC32")); + + rb_undef_method(cCRC32, "update"); + rb_define_method(cCRC32, "update", Digest_CRC32_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/extconf.rb new file mode 100644 index 0000000..0b2138b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2.c new file mode 100644 index 0000000..d8b09da --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:08:13 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x04c11db7 + * - XorIn = 0xffffffff + * - ReflectIn = False + * - XorOut = 0xffffffff + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc32_bzip2.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32_bzip2_table[256] = { + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, + 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, + 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, + 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, + 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, + 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, + 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, + 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, + 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, + 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, + 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, + 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, + 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 +}; + +crc32_t crc32_bzip2_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 24) ^ *d) & 0xff; + crc = (crc32_bzip2_table[tbl_idx] ^ (crc << 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2.h new file mode 100644 index 0000000..0b4d963 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2.h @@ -0,0 +1,8 @@ +#ifndef _CRC32_BZIP2_H_ +#define _CRC32_BZIP2_H_ + +#include "../crc32/crc32.h" + +crc32_t crc32_bzip2_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2_ext.bundle new file mode 100644 index 0000000..4a74d0b Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2_ext.c new file mode 100644 index 0000000..ed2013c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/crc32_bzip2_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32_bzip2.h" + +VALUE Digest_CRC32Bzip2_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32_bzip2_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32_bzip2_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32Bzip2 = rb_const_get(mDigest, rb_intern("CRC32BZip2")); + + rb_undef_method(cCRC32Bzip2, "update"); + rb_define_method(cCRC32Bzip2, "update", Digest_CRC32Bzip2_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/extconf.rb new file mode 100644 index 0000000..c6c47f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_bzip2/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32_bzip2_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam.c new file mode 100644 index 0000000..f5cd76a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam.c @@ -0,0 +1,70 @@ +/* + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:33:07 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x04c11db7 + * - XorIn = 0xffffffff + * - ReflectIn = True + * - XorOut = 0x00000000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc32_jam.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32_table[256] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d +}; + +crc32_t crc32_jam_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc32_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam.h new file mode 100644 index 0000000..71864b8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam.h @@ -0,0 +1,8 @@ +#ifndef _CRC32_JAM_H_ +#define _CRC32_JAM_H_ + +#include "../crc32/crc32.h" + +crc32_t crc32_jam_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam_ext.bundle new file mode 100644 index 0000000..cdecd3d Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam_ext.c new file mode 100644 index 0000000..f790882 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/crc32_jam_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32_jam.h" + +VALUE Digest_CRC32Jam_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32_jam_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32_jam_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32Jam = rb_const_get(mDigest, rb_intern("CRC32Jam")); + + rb_undef_method(cCRC32Jam, "update"); + rb_define_method(cCRC32Jam, "update", Digest_CRC32Jam_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/extconf.rb new file mode 100644 index 0000000..2ad560d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_jam/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32_jam_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg.c new file mode 100644 index 0000000..77ec2ea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:06:59 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x04c11db7 + * - XorIn = 0xffffffff + * - ReflectIn = False + * - XorOut = 0x00000000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc32_mpeg.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32_mpeg_table[256] = { + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, + 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, + 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, + 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, + 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, + 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, + 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, + 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, + 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, + 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, + 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, + 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, + 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 +}; + +crc32_t crc32_mpeg_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 24) ^ *d) & 0xff; + crc = (crc32_mpeg_table[tbl_idx] ^ (crc << 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg.h new file mode 100644 index 0000000..71d8862 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg.h @@ -0,0 +1,8 @@ +#ifndef _CRC32_MPEG_H_ +#define _CRC32_MPEG_H_ + +#include "../crc32/crc32.h" + +crc32_t crc32_mpeg_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg_ext.bundle new file mode 100644 index 0000000..17fd1db Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg_ext.c new file mode 100644 index 0000000..2756058 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/crc32_mpeg_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32_mpeg.h" + +VALUE Digest_CRC32MPEG_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32_mpeg_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32_mpeg_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32MPEG = rb_const_get(mDigest, rb_intern("CRC32MPEG")); + + rb_undef_method(cCRC32MPEG, "update"); + rb_define_method(cCRC32MPEG, "update", Digest_CRC32MPEG_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/extconf.rb new file mode 100644 index 0000000..a65e0bb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_mpeg/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32_mpeg_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix.c new file mode 100644 index 0000000..1ede671 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:32:59 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x04c11db7 + * - XorIn = 0x00000000 + * - ReflectIn = False + * - XorOut = 0xffffffff + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc32_posix.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32_posix_table[256] = { + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, + 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, + 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, + 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, + 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, + 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, + 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, + 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, + 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, + 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, + 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, + 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, + 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 +}; + +crc32_t crc32_posix_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 24) ^ *d) & 0xff; + crc = (crc32_posix_table[tbl_idx] ^ (crc << 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix.h new file mode 100644 index 0000000..bddb5bd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix.h @@ -0,0 +1,8 @@ +#ifndef _CRC32_POSIX_H_ +#define _CRC32_POSIX_H_ + +#include "../crc32/crc32.h" + +crc32_t crc32_posix_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix_ext.bundle new file mode 100644 index 0000000..2ec0e8f Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix_ext.c new file mode 100644 index 0000000..0fa7bc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/crc32_posix_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32_posix.h" + +VALUE Digest_CRC32POSIX_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32_posix_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32_posix_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32POSIX = rb_const_get(mDigest, rb_intern("CRC32POSIX")); + + rb_undef_method(cCRC32POSIX, "update"); + rb_define_method(cCRC32POSIX, "update", Digest_CRC32POSIX_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/extconf.rb new file mode 100644 index 0000000..fb3c2a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_posix/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32_posix_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer.c new file mode 100644 index 0000000..141ee42 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:33:18 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x000000af + * - XorIn = 0x00000000 + * - ReflectIn = False + * - XorOut = 0x00000000 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc32_xfer.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32_xfer_table[256] = { + 0x00000000, 0x000000af, 0x0000015e, 0x000001f1, 0x000002bc, 0x00000213, 0x000003e2, 0x0000034d, + 0x00000578, 0x000005d7, 0x00000426, 0x00000489, 0x000007c4, 0x0000076b, 0x0000069a, 0x00000635, + 0x00000af0, 0x00000a5f, 0x00000bae, 0x00000b01, 0x0000084c, 0x000008e3, 0x00000912, 0x000009bd, + 0x00000f88, 0x00000f27, 0x00000ed6, 0x00000e79, 0x00000d34, 0x00000d9b, 0x00000c6a, 0x00000cc5, + 0x000015e0, 0x0000154f, 0x000014be, 0x00001411, 0x0000175c, 0x000017f3, 0x00001602, 0x000016ad, + 0x00001098, 0x00001037, 0x000011c6, 0x00001169, 0x00001224, 0x0000128b, 0x0000137a, 0x000013d5, + 0x00001f10, 0x00001fbf, 0x00001e4e, 0x00001ee1, 0x00001dac, 0x00001d03, 0x00001cf2, 0x00001c5d, + 0x00001a68, 0x00001ac7, 0x00001b36, 0x00001b99, 0x000018d4, 0x0000187b, 0x0000198a, 0x00001925, + 0x00002bc0, 0x00002b6f, 0x00002a9e, 0x00002a31, 0x0000297c, 0x000029d3, 0x00002822, 0x0000288d, + 0x00002eb8, 0x00002e17, 0x00002fe6, 0x00002f49, 0x00002c04, 0x00002cab, 0x00002d5a, 0x00002df5, + 0x00002130, 0x0000219f, 0x0000206e, 0x000020c1, 0x0000238c, 0x00002323, 0x000022d2, 0x0000227d, + 0x00002448, 0x000024e7, 0x00002516, 0x000025b9, 0x000026f4, 0x0000265b, 0x000027aa, 0x00002705, + 0x00003e20, 0x00003e8f, 0x00003f7e, 0x00003fd1, 0x00003c9c, 0x00003c33, 0x00003dc2, 0x00003d6d, + 0x00003b58, 0x00003bf7, 0x00003a06, 0x00003aa9, 0x000039e4, 0x0000394b, 0x000038ba, 0x00003815, + 0x000034d0, 0x0000347f, 0x0000358e, 0x00003521, 0x0000366c, 0x000036c3, 0x00003732, 0x0000379d, + 0x000031a8, 0x00003107, 0x000030f6, 0x00003059, 0x00003314, 0x000033bb, 0x0000324a, 0x000032e5, + 0x00005780, 0x0000572f, 0x000056de, 0x00005671, 0x0000553c, 0x00005593, 0x00005462, 0x000054cd, + 0x000052f8, 0x00005257, 0x000053a6, 0x00005309, 0x00005044, 0x000050eb, 0x0000511a, 0x000051b5, + 0x00005d70, 0x00005ddf, 0x00005c2e, 0x00005c81, 0x00005fcc, 0x00005f63, 0x00005e92, 0x00005e3d, + 0x00005808, 0x000058a7, 0x00005956, 0x000059f9, 0x00005ab4, 0x00005a1b, 0x00005bea, 0x00005b45, + 0x00004260, 0x000042cf, 0x0000433e, 0x00004391, 0x000040dc, 0x00004073, 0x00004182, 0x0000412d, + 0x00004718, 0x000047b7, 0x00004646, 0x000046e9, 0x000045a4, 0x0000450b, 0x000044fa, 0x00004455, + 0x00004890, 0x0000483f, 0x000049ce, 0x00004961, 0x00004a2c, 0x00004a83, 0x00004b72, 0x00004bdd, + 0x00004de8, 0x00004d47, 0x00004cb6, 0x00004c19, 0x00004f54, 0x00004ffb, 0x00004e0a, 0x00004ea5, + 0x00007c40, 0x00007cef, 0x00007d1e, 0x00007db1, 0x00007efc, 0x00007e53, 0x00007fa2, 0x00007f0d, + 0x00007938, 0x00007997, 0x00007866, 0x000078c9, 0x00007b84, 0x00007b2b, 0x00007ada, 0x00007a75, + 0x000076b0, 0x0000761f, 0x000077ee, 0x00007741, 0x0000740c, 0x000074a3, 0x00007552, 0x000075fd, + 0x000073c8, 0x00007367, 0x00007296, 0x00007239, 0x00007174, 0x000071db, 0x0000702a, 0x00007085, + 0x000069a0, 0x0000690f, 0x000068fe, 0x00006851, 0x00006b1c, 0x00006bb3, 0x00006a42, 0x00006aed, + 0x00006cd8, 0x00006c77, 0x00006d86, 0x00006d29, 0x00006e64, 0x00006ecb, 0x00006f3a, 0x00006f95, + 0x00006350, 0x000063ff, 0x0000620e, 0x000062a1, 0x000061ec, 0x00006143, 0x000060b2, 0x0000601d, + 0x00006628, 0x00006687, 0x00006776, 0x000067d9, 0x00006494, 0x0000643b, 0x000065ca, 0x00006565 +}; + +crc32_t crc32_xfer_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = ((crc >> 24) ^ *d) & 0xff; + crc = (crc32_xfer_table[tbl_idx] ^ (crc << 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer.h new file mode 100644 index 0000000..2223cfe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer.h @@ -0,0 +1,8 @@ +#ifndef _CRC32_XFER_H_ +#define _CRC32_XFER_H_ + +#include "../crc32/crc32.h" + +crc32_t crc32_xfer_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer_ext.bundle new file mode 100644 index 0000000..fad7436 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer_ext.c new file mode 100644 index 0000000..6a93284 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/crc32_xfer_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32_xfer.h" + +VALUE Digest_CRC32XFER_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32_xfer_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32_xfer_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32XFER = rb_const_get(mDigest, rb_intern("CRC32XFER")); + + rb_undef_method(cCRC32XFER, "update"); + rb_define_method(cCRC32XFER, "update", Digest_CRC32XFER_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/extconf.rb new file mode 100644 index 0000000..978f559 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32_xfer/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32_xfer_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c.c new file mode 100644 index 0000000..d0eaee7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c.c @@ -0,0 +1,70 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:07:24 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 32 + * - Poly = 0x1edc6f41 + * - XorIn = 0xffffffff + * - ReflectIn = True + * - XorOut = 0xffffffff + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc32c.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc32_t crc32c_table[256] = { + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, + 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, + 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, + 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, + 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, + 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, + 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, + 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, + 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, + 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, + 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, + 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, + 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, + 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, + 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, + 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, + 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, + 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, + 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, + 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, + 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, + 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351 +}; + +crc32_t crc32c_update(crc32_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc32c_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff; + d++; + } + + return crc & 0xffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c.h new file mode 100644 index 0000000..a752465 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c.h @@ -0,0 +1,8 @@ +#ifndef _CRC32C_H_ +#define _CRC32C_H_ + +#include "../crc32/crc32.h" + +crc32_t crc32c_update(crc32_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c_ext.bundle new file mode 100644 index 0000000..2266d0b Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c_ext.c new file mode 100644 index 0000000..ca70bde --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/crc32c_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc32c.h" + +VALUE Digest_CRC32c_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc32_t crc = NUM2UINT(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc32c_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc32c_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC32c = rb_const_get(mDigest, rb_intern("CRC32c")); + + rb_undef_method(cCRC32c, "update"); + rb_define_method(cCRC32c, "update", Digest_CRC32c_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/extconf.rb new file mode 100644 index 0000000..5369e3f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc32c/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc32c_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5.c new file mode 100644 index 0000000..66020f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5.c @@ -0,0 +1,54 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:29:44 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 5 + * - Poly = 0x05 + * - XorIn = 0x1f + * - ReflectIn = True + * - XorOut = 0x1f + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc5.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc5_t crc5_table[256] = { + 0x00, 0x0e, 0x1c, 0x12, 0x11, 0x1f, 0x0d, 0x03, 0x0b, 0x05, 0x17, 0x19, 0x1a, 0x14, 0x06, 0x08, + 0x16, 0x18, 0x0a, 0x04, 0x07, 0x09, 0x1b, 0x15, 0x1d, 0x13, 0x01, 0x0f, 0x0c, 0x02, 0x10, 0x1e, + 0x05, 0x0b, 0x19, 0x17, 0x14, 0x1a, 0x08, 0x06, 0x0e, 0x00, 0x12, 0x1c, 0x1f, 0x11, 0x03, 0x0d, + 0x13, 0x1d, 0x0f, 0x01, 0x02, 0x0c, 0x1e, 0x10, 0x18, 0x16, 0x04, 0x0a, 0x09, 0x07, 0x15, 0x1b, + 0x0a, 0x04, 0x16, 0x18, 0x1b, 0x15, 0x07, 0x09, 0x01, 0x0f, 0x1d, 0x13, 0x10, 0x1e, 0x0c, 0x02, + 0x1c, 0x12, 0x00, 0x0e, 0x0d, 0x03, 0x11, 0x1f, 0x17, 0x19, 0x0b, 0x05, 0x06, 0x08, 0x1a, 0x14, + 0x0f, 0x01, 0x13, 0x1d, 0x1e, 0x10, 0x02, 0x0c, 0x04, 0x0a, 0x18, 0x16, 0x15, 0x1b, 0x09, 0x07, + 0x19, 0x17, 0x05, 0x0b, 0x08, 0x06, 0x14, 0x1a, 0x12, 0x1c, 0x0e, 0x00, 0x03, 0x0d, 0x1f, 0x11, + 0x14, 0x1a, 0x08, 0x06, 0x05, 0x0b, 0x19, 0x17, 0x1f, 0x11, 0x03, 0x0d, 0x0e, 0x00, 0x12, 0x1c, + 0x02, 0x0c, 0x1e, 0x10, 0x13, 0x1d, 0x0f, 0x01, 0x09, 0x07, 0x15, 0x1b, 0x18, 0x16, 0x04, 0x0a, + 0x11, 0x1f, 0x0d, 0x03, 0x00, 0x0e, 0x1c, 0x12, 0x1a, 0x14, 0x06, 0x08, 0x0b, 0x05, 0x17, 0x19, + 0x07, 0x09, 0x1b, 0x15, 0x16, 0x18, 0x0a, 0x04, 0x0c, 0x02, 0x10, 0x1e, 0x1d, 0x13, 0x01, 0x0f, + 0x1e, 0x10, 0x02, 0x0c, 0x0f, 0x01, 0x13, 0x1d, 0x15, 0x1b, 0x09, 0x07, 0x04, 0x0a, 0x18, 0x16, + 0x08, 0x06, 0x14, 0x1a, 0x19, 0x17, 0x05, 0x0b, 0x03, 0x0d, 0x1f, 0x11, 0x12, 0x1c, 0x0e, 0x00, + 0x1b, 0x15, 0x07, 0x09, 0x0a, 0x04, 0x16, 0x18, 0x10, 0x1e, 0x0c, 0x02, 0x01, 0x0f, 0x1d, 0x13, + 0x0d, 0x03, 0x11, 0x1f, 0x1c, 0x12, 0x00, 0x0e, 0x06, 0x08, 0x1a, 0x14, 0x17, 0x19, 0x0b, 0x05 +}; + +crc5_t crc5_update(crc5_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = crc ^ *d; + crc = (crc5_table[tbl_idx] ^ (crc >> 8)) & 0x1f; + d++; + } + + return crc & 0x1f; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5.h new file mode 100644 index 0000000..3de9d8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5.h @@ -0,0 +1,11 @@ +#ifndef _CRC5_H_ +#define _CRC5_H_ + +#include +#include + +typedef uint8_t crc5_t; + +crc5_t crc5_update(crc5_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5_ext.bundle new file mode 100644 index 0000000..048cce7 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5_ext.c new file mode 100644 index 0000000..f02f7e9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/crc5_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc5.h" + +VALUE Digest_CRC5_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc5_t crc = NUM2CHR(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc5_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc5_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC5 = rb_const_get(mDigest, rb_intern("CRC5")); + + rb_undef_method(cCRC5, "update"); + rb_define_method(cCRC5, "update", Digest_CRC5_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/extconf.rb new file mode 100644 index 0000000..84feb26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc5/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc5_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64.c new file mode 100644 index 0000000..ea3a7e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64.c @@ -0,0 +1,102 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:02:14 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 64 + * - Poly = 0x000000000000001b + * - XorIn = 0x0000000000000000 + * - ReflectIn = True + * - XorOut = 0x0000000000000000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc64.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc64_t crc64_table[256] = { + 0x0000000000000000, 0x01b0000000000000, 0x0360000000000000, 0x02d0000000000000, + 0x06c0000000000000, 0x0770000000000000, 0x05a0000000000000, 0x0410000000000000, + 0x0d80000000000000, 0x0c30000000000000, 0x0ee0000000000000, 0x0f50000000000000, + 0x0b40000000000000, 0x0af0000000000000, 0x0820000000000000, 0x0990000000000000, + 0x1b00000000000000, 0x1ab0000000000000, 0x1860000000000000, 0x19d0000000000000, + 0x1dc0000000000000, 0x1c70000000000000, 0x1ea0000000000000, 0x1f10000000000000, + 0x1680000000000000, 0x1730000000000000, 0x15e0000000000000, 0x1450000000000000, + 0x1040000000000000, 0x11f0000000000000, 0x1320000000000000, 0x1290000000000000, + 0x3600000000000000, 0x37b0000000000000, 0x3560000000000000, 0x34d0000000000000, + 0x30c0000000000000, 0x3170000000000000, 0x33a0000000000000, 0x3210000000000000, + 0x3b80000000000000, 0x3a30000000000000, 0x38e0000000000000, 0x3950000000000000, + 0x3d40000000000000, 0x3cf0000000000000, 0x3e20000000000000, 0x3f90000000000000, + 0x2d00000000000000, 0x2cb0000000000000, 0x2e60000000000000, 0x2fd0000000000000, + 0x2bc0000000000000, 0x2a70000000000000, 0x28a0000000000000, 0x2910000000000000, + 0x2080000000000000, 0x2130000000000000, 0x23e0000000000000, 0x2250000000000000, + 0x2640000000000000, 0x27f0000000000000, 0x2520000000000000, 0x2490000000000000, + 0x6c00000000000000, 0x6db0000000000000, 0x6f60000000000000, 0x6ed0000000000000, + 0x6ac0000000000000, 0x6b70000000000000, 0x69a0000000000000, 0x6810000000000000, + 0x6180000000000000, 0x6030000000000000, 0x62e0000000000000, 0x6350000000000000, + 0x6740000000000000, 0x66f0000000000000, 0x6420000000000000, 0x6590000000000000, + 0x7700000000000000, 0x76b0000000000000, 0x7460000000000000, 0x75d0000000000000, + 0x71c0000000000000, 0x7070000000000000, 0x72a0000000000000, 0x7310000000000000, + 0x7a80000000000000, 0x7b30000000000000, 0x79e0000000000000, 0x7850000000000000, + 0x7c40000000000000, 0x7df0000000000000, 0x7f20000000000000, 0x7e90000000000000, + 0x5a00000000000000, 0x5bb0000000000000, 0x5960000000000000, 0x58d0000000000000, + 0x5cc0000000000000, 0x5d70000000000000, 0x5fa0000000000000, 0x5e10000000000000, + 0x5780000000000000, 0x5630000000000000, 0x54e0000000000000, 0x5550000000000000, + 0x5140000000000000, 0x50f0000000000000, 0x5220000000000000, 0x5390000000000000, + 0x4100000000000000, 0x40b0000000000000, 0x4260000000000000, 0x43d0000000000000, + 0x47c0000000000000, 0x4670000000000000, 0x44a0000000000000, 0x4510000000000000, + 0x4c80000000000000, 0x4d30000000000000, 0x4fe0000000000000, 0x4e50000000000000, + 0x4a40000000000000, 0x4bf0000000000000, 0x4920000000000000, 0x4890000000000000, + 0xd800000000000000, 0xd9b0000000000000, 0xdb60000000000000, 0xdad0000000000000, + 0xdec0000000000000, 0xdf70000000000000, 0xdda0000000000000, 0xdc10000000000000, + 0xd580000000000000, 0xd430000000000000, 0xd6e0000000000000, 0xd750000000000000, + 0xd340000000000000, 0xd2f0000000000000, 0xd020000000000000, 0xd190000000000000, + 0xc300000000000000, 0xc2b0000000000000, 0xc060000000000000, 0xc1d0000000000000, + 0xc5c0000000000000, 0xc470000000000000, 0xc6a0000000000000, 0xc710000000000000, + 0xce80000000000000, 0xcf30000000000000, 0xcde0000000000000, 0xcc50000000000000, + 0xc840000000000000, 0xc9f0000000000000, 0xcb20000000000000, 0xca90000000000000, + 0xee00000000000000, 0xefb0000000000000, 0xed60000000000000, 0xecd0000000000000, + 0xe8c0000000000000, 0xe970000000000000, 0xeba0000000000000, 0xea10000000000000, + 0xe380000000000000, 0xe230000000000000, 0xe0e0000000000000, 0xe150000000000000, + 0xe540000000000000, 0xe4f0000000000000, 0xe620000000000000, 0xe790000000000000, + 0xf500000000000000, 0xf4b0000000000000, 0xf660000000000000, 0xf7d0000000000000, + 0xf3c0000000000000, 0xf270000000000000, 0xf0a0000000000000, 0xf110000000000000, + 0xf880000000000000, 0xf930000000000000, 0xfbe0000000000000, 0xfa50000000000000, + 0xfe40000000000000, 0xfff0000000000000, 0xfd20000000000000, 0xfc90000000000000, + 0xb400000000000000, 0xb5b0000000000000, 0xb760000000000000, 0xb6d0000000000000, + 0xb2c0000000000000, 0xb370000000000000, 0xb1a0000000000000, 0xb010000000000000, + 0xb980000000000000, 0xb830000000000000, 0xbae0000000000000, 0xbb50000000000000, + 0xbf40000000000000, 0xbef0000000000000, 0xbc20000000000000, 0xbd90000000000000, + 0xaf00000000000000, 0xaeb0000000000000, 0xac60000000000000, 0xadd0000000000000, + 0xa9c0000000000000, 0xa870000000000000, 0xaaa0000000000000, 0xab10000000000000, + 0xa280000000000000, 0xa330000000000000, 0xa1e0000000000000, 0xa050000000000000, + 0xa440000000000000, 0xa5f0000000000000, 0xa720000000000000, 0xa690000000000000, + 0x8200000000000000, 0x83b0000000000000, 0x8160000000000000, 0x80d0000000000000, + 0x84c0000000000000, 0x8570000000000000, 0x87a0000000000000, 0x8610000000000000, + 0x8f80000000000000, 0x8e30000000000000, 0x8ce0000000000000, 0x8d50000000000000, + 0x8940000000000000, 0x88f0000000000000, 0x8a20000000000000, 0x8b90000000000000, + 0x9900000000000000, 0x98b0000000000000, 0x9a60000000000000, 0x9bd0000000000000, + 0x9fc0000000000000, 0x9e70000000000000, 0x9ca0000000000000, 0x9d10000000000000, + 0x9480000000000000, 0x9530000000000000, 0x97e0000000000000, 0x9650000000000000, + 0x9240000000000000, 0x93f0000000000000, 0x9120000000000000, 0x9090000000000000 +}; + +crc64_t crc64_update(crc64_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc64_table[tbl_idx] ^ (crc >> 8)) & 0xffffffffffffffff; + d++; + } + + return crc & 0xffffffffffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64.h new file mode 100644 index 0000000..7a3f161 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64.h @@ -0,0 +1,11 @@ +#ifndef _CRC64_H_ +#define _CRC64_H_ + +#include +#include + +typedef uint64_t crc64_t; + +crc64_t crc64_update(crc64_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64_ext.bundle new file mode 100644 index 0000000..7b3392a Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64_ext.c new file mode 100644 index 0000000..c379595 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/crc64_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc64.h" + +VALUE Digest_CRC64_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc64_t crc = NUM2ULONG(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc64_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, ULONG2NUM(crc)); + return self; +} + +void Init_crc64_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC64 = rb_const_get(mDigest, rb_intern("CRC64")); + + rb_undef_method(cCRC64, "update"); + rb_define_method(cCRC64, "update", Digest_CRC64_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/extconf.rb new file mode 100644 index 0000000..d0f1f8a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc64_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones.c new file mode 100644 index 0000000..7f0232c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones.c @@ -0,0 +1,102 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:33:28 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 64 + * - Poly = 0xad93d23594c935a9 + * - XorIn = 0xffffffffffffffff + * - ReflectIn = True + * - XorOut = 0x0000000000000000 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc64_jones.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc64_t crc64_table[256] = { + 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, + 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, + 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, + 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, + 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, + 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, + 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, + 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, + 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, + 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, + 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, + 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, + 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, + 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, + 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, + 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, + 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, + 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, + 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, + 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, + 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, + 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, + 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, + 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, + 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, + 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, + 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, + 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, + 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, + 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, + 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, + 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, + 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, + 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, + 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, + 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, + 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, + 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, + 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, + 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, + 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, + 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, + 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, + 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, + 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, + 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, + 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, + 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, + 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, + 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, + 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, + 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, + 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, + 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, + 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, + 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, + 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, + 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, + 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, + 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, + 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, + 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, + 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, + 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728 +}; + +crc64_t crc64_jones_update(crc64_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc64_table[tbl_idx] ^ (crc >> 8)) & 0xffffffffffffffff; + d++; + } + + return crc & 0xffffffffffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones.h new file mode 100644 index 0000000..9baf592 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones.h @@ -0,0 +1,8 @@ +#ifndef _CRC64_JONES_H_ +#define _CRC64_JONES_H_ + +#include "../crc64/crc64.h" + +crc64_t crc64_jones_update(crc64_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones_ext.bundle new file mode 100644 index 0000000..91dae63 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones_ext.c new file mode 100644 index 0000000..a630655 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/crc64_jones_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc64_jones.h" + +VALUE Digest_CRC64Jones_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc64_t crc = NUM2ULONG(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc64_jones_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, ULONG2NUM(crc)); + return self; +} + +void Init_crc64_jones_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC64Jones = rb_const_get(mDigest, rb_intern("CRC64Jones")); + + rb_undef_method(cCRC64Jones, "update"); + rb_define_method(cCRC64Jones, "update", Digest_CRC64Jones_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/extconf.rb new file mode 100644 index 0000000..70cff91 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_jones/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc64_jones_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz.c new file mode 100644 index 0000000..c076512 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz.c @@ -0,0 +1,102 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:33:36 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 64 + * - Poly = 0x42f0e1eba9ea3693 + * - XorIn = 0xffffffffffffffff + * - ReflectIn = True + * - XorOut = 0xffffffffffffffff + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc64_xz.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc64_t crc64_table[256] = { + 0x0000000000000000, 0xb32e4cbe03a75f6f, 0xf4843657a840a05b, 0x47aa7ae9abe7ff34, + 0x7bd0c384ff8f5e33, 0xc8fe8f3afc28015c, 0x8f54f5d357cffe68, 0x3c7ab96d5468a107, + 0xf7a18709ff1ebc66, 0x448fcbb7fcb9e309, 0x0325b15e575e1c3d, 0xb00bfde054f94352, + 0x8c71448d0091e255, 0x3f5f08330336bd3a, 0x78f572daa8d1420e, 0xcbdb3e64ab761d61, + 0x7d9ba13851336649, 0xceb5ed8652943926, 0x891f976ff973c612, 0x3a31dbd1fad4997d, + 0x064b62bcaebc387a, 0xb5652e02ad1b6715, 0xf2cf54eb06fc9821, 0x41e11855055bc74e, + 0x8a3a2631ae2dda2f, 0x39146a8fad8a8540, 0x7ebe1066066d7a74, 0xcd905cd805ca251b, + 0xf1eae5b551a2841c, 0x42c4a90b5205db73, 0x056ed3e2f9e22447, 0xb6409f5cfa457b28, + 0xfb374270a266cc92, 0x48190ecea1c193fd, 0x0fb374270a266cc9, 0xbc9d3899098133a6, + 0x80e781f45de992a1, 0x33c9cd4a5e4ecdce, 0x7463b7a3f5a932fa, 0xc74dfb1df60e6d95, + 0x0c96c5795d7870f4, 0xbfb889c75edf2f9b, 0xf812f32ef538d0af, 0x4b3cbf90f69f8fc0, + 0x774606fda2f72ec7, 0xc4684a43a15071a8, 0x83c230aa0ab78e9c, 0x30ec7c140910d1f3, + 0x86ace348f355aadb, 0x3582aff6f0f2f5b4, 0x7228d51f5b150a80, 0xc10699a158b255ef, + 0xfd7c20cc0cdaf4e8, 0x4e526c720f7dab87, 0x09f8169ba49a54b3, 0xbad65a25a73d0bdc, + 0x710d64410c4b16bd, 0xc22328ff0fec49d2, 0x85895216a40bb6e6, 0x36a71ea8a7ace989, + 0x0adda7c5f3c4488e, 0xb9f3eb7bf06317e1, 0xfe5991925b84e8d5, 0x4d77dd2c5823b7ba, + 0x64b62bcaebc387a1, 0xd7986774e864d8ce, 0x90321d9d438327fa, 0x231c512340247895, + 0x1f66e84e144cd992, 0xac48a4f017eb86fd, 0xebe2de19bc0c79c9, 0x58cc92a7bfab26a6, + 0x9317acc314dd3bc7, 0x2039e07d177a64a8, 0x67939a94bc9d9b9c, 0xd4bdd62abf3ac4f3, + 0xe8c76f47eb5265f4, 0x5be923f9e8f53a9b, 0x1c4359104312c5af, 0xaf6d15ae40b59ac0, + 0x192d8af2baf0e1e8, 0xaa03c64cb957be87, 0xeda9bca512b041b3, 0x5e87f01b11171edc, + 0x62fd4976457fbfdb, 0xd1d305c846d8e0b4, 0x96797f21ed3f1f80, 0x2557339fee9840ef, + 0xee8c0dfb45ee5d8e, 0x5da24145464902e1, 0x1a083bacedaefdd5, 0xa9267712ee09a2ba, + 0x955cce7fba6103bd, 0x267282c1b9c65cd2, 0x61d8f8281221a3e6, 0xd2f6b4961186fc89, + 0x9f8169ba49a54b33, 0x2caf25044a02145c, 0x6b055fede1e5eb68, 0xd82b1353e242b407, + 0xe451aa3eb62a1500, 0x577fe680b58d4a6f, 0x10d59c691e6ab55b, 0xa3fbd0d71dcdea34, + 0x6820eeb3b6bbf755, 0xdb0ea20db51ca83a, 0x9ca4d8e41efb570e, 0x2f8a945a1d5c0861, + 0x13f02d374934a966, 0xa0de61894a93f609, 0xe7741b60e174093d, 0x545a57dee2d35652, + 0xe21ac88218962d7a, 0x5134843c1b317215, 0x169efed5b0d68d21, 0xa5b0b26bb371d24e, + 0x99ca0b06e7197349, 0x2ae447b8e4be2c26, 0x6d4e3d514f59d312, 0xde6071ef4cfe8c7d, + 0x15bb4f8be788911c, 0xa6950335e42fce73, 0xe13f79dc4fc83147, 0x521135624c6f6e28, + 0x6e6b8c0f1807cf2f, 0xdd45c0b11ba09040, 0x9aefba58b0476f74, 0x29c1f6e6b3e0301b, + 0xc96c5795d7870f42, 0x7a421b2bd420502d, 0x3de861c27fc7af19, 0x8ec62d7c7c60f076, + 0xb2bc941128085171, 0x0192d8af2baf0e1e, 0x4638a2468048f12a, 0xf516eef883efae45, + 0x3ecdd09c2899b324, 0x8de39c222b3eec4b, 0xca49e6cb80d9137f, 0x7967aa75837e4c10, + 0x451d1318d716ed17, 0xf6335fa6d4b1b278, 0xb199254f7f564d4c, 0x02b769f17cf11223, + 0xb4f7f6ad86b4690b, 0x07d9ba1385133664, 0x4073c0fa2ef4c950, 0xf35d8c442d53963f, + 0xcf273529793b3738, 0x7c0979977a9c6857, 0x3ba3037ed17b9763, 0x888d4fc0d2dcc80c, + 0x435671a479aad56d, 0xf0783d1a7a0d8a02, 0xb7d247f3d1ea7536, 0x04fc0b4dd24d2a59, + 0x3886b22086258b5e, 0x8ba8fe9e8582d431, 0xcc0284772e652b05, 0x7f2cc8c92dc2746a, + 0x325b15e575e1c3d0, 0x8175595b76469cbf, 0xc6df23b2dda1638b, 0x75f16f0cde063ce4, + 0x498bd6618a6e9de3, 0xfaa59adf89c9c28c, 0xbd0fe036222e3db8, 0x0e21ac88218962d7, + 0xc5fa92ec8aff7fb6, 0x76d4de52895820d9, 0x317ea4bb22bfdfed, 0x8250e80521188082, + 0xbe2a516875702185, 0x0d041dd676d77eea, 0x4aae673fdd3081de, 0xf9802b81de97deb1, + 0x4fc0b4dd24d2a599, 0xfceef8632775faf6, 0xbb44828a8c9205c2, 0x086ace348f355aad, + 0x34107759db5dfbaa, 0x873e3be7d8faa4c5, 0xc094410e731d5bf1, 0x73ba0db070ba049e, + 0xb86133d4dbcc19ff, 0x0b4f7f6ad86b4690, 0x4ce50583738cb9a4, 0xffcb493d702be6cb, + 0xc3b1f050244347cc, 0x709fbcee27e418a3, 0x3735c6078c03e797, 0x841b8ab98fa4b8f8, + 0xadda7c5f3c4488e3, 0x1ef430e13fe3d78c, 0x595e4a08940428b8, 0xea7006b697a377d7, + 0xd60abfdbc3cbd6d0, 0x6524f365c06c89bf, 0x228e898c6b8b768b, 0x91a0c532682c29e4, + 0x5a7bfb56c35a3485, 0xe955b7e8c0fd6bea, 0xaeffcd016b1a94de, 0x1dd181bf68bdcbb1, + 0x21ab38d23cd56ab6, 0x9285746c3f7235d9, 0xd52f0e859495caed, 0x6601423b97329582, + 0xd041dd676d77eeaa, 0x636f91d96ed0b1c5, 0x24c5eb30c5374ef1, 0x97eba78ec690119e, + 0xab911ee392f8b099, 0x18bf525d915feff6, 0x5f1528b43ab810c2, 0xec3b640a391f4fad, + 0x27e05a6e926952cc, 0x94ce16d091ce0da3, 0xd3646c393a29f297, 0x604a2087398eadf8, + 0x5c3099ea6de60cff, 0xef1ed5546e415390, 0xa8b4afbdc5a6aca4, 0x1b9ae303c601f3cb, + 0x56ed3e2f9e224471, 0xe5c372919d851b1e, 0xa26908783662e42a, 0x114744c635c5bb45, + 0x2d3dfdab61ad1a42, 0x9e13b115620a452d, 0xd9b9cbfcc9edba19, 0x6a978742ca4ae576, + 0xa14cb926613cf817, 0x1262f598629ba778, 0x55c88f71c97c584c, 0xe6e6c3cfcadb0723, + 0xda9c7aa29eb3a624, 0x69b2361c9d14f94b, 0x2e184cf536f3067f, 0x9d36004b35545910, + 0x2b769f17cf112238, 0x9858d3a9ccb67d57, 0xdff2a94067518263, 0x6cdce5fe64f6dd0c, + 0x50a65c93309e7c0b, 0xe388102d33392364, 0xa4226ac498dedc50, 0x170c267a9b79833f, + 0xdcd7181e300f9e5e, 0x6ff954a033a8c131, 0x28532e49984f3e05, 0x9b7d62f79be8616a, + 0xa707db9acf80c06d, 0x14299724cc279f02, 0x5383edcd67c06036, 0xe0ada17364673f59 +}; + +crc64_t crc64_xz_update(crc64_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = (crc ^ *d) & 0xff; + crc = (crc64_table[tbl_idx] ^ (crc >> 8)) & 0xffffffffffffffff; + d++; + } + + return crc & 0xffffffffffffffff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz.h new file mode 100644 index 0000000..2f5341f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz.h @@ -0,0 +1,8 @@ +#ifndef _CRC64_XZ_H_ +#define _CRC64_XZ_H_ + +#include "../crc64/crc64.h" + +crc64_t crc64_xz_update(crc64_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz_ext.bundle new file mode 100644 index 0000000..98aea68 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz_ext.c new file mode 100644 index 0000000..913215f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/crc64_xz_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc64_xz.h" + +VALUE Digest_CRC64XZ_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc64_t crc = NUM2ULONG(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc64_xz_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, ULONG2NUM(crc)); + return self; +} + +void Init_crc64_xz_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC64XZ = rb_const_get(mDigest, rb_intern("CRC64XZ")); + + rb_undef_method(cCRC64XZ, "update"); + rb_define_method(cCRC64XZ, "update", Digest_CRC64XZ_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/extconf.rb new file mode 100644 index 0000000..621cd2e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc64_xz/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc64_xz_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8.c new file mode 100644 index 0000000..7042bf8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8.c @@ -0,0 +1,54 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:01:47 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 8 + * - Poly = 0x07 + * - XorIn = 0x00 + * - ReflectIn = False + * - XorOut = 0x00 + * - ReflectOut = False + * - Algorithm = table-driven + */ + +#include "crc8.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc8_t crc8_1wire_table[256] = { + 0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15, 0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d, + 0x70, 0x77, 0x7e, 0x79, 0x6c, 0x6b, 0x62, 0x65, 0x48, 0x4f, 0x46, 0x41, 0x54, 0x53, 0x5a, 0x5d, + 0xe0, 0xe7, 0xee, 0xe9, 0xfc, 0xfb, 0xf2, 0xf5, 0xd8, 0xdf, 0xd6, 0xd1, 0xc4, 0xc3, 0xca, 0xcd, + 0x90, 0x97, 0x9e, 0x99, 0x8c, 0x8b, 0x82, 0x85, 0xa8, 0xaf, 0xa6, 0xa1, 0xb4, 0xb3, 0xba, 0xbd, + 0xc7, 0xc0, 0xc9, 0xce, 0xdb, 0xdc, 0xd5, 0xd2, 0xff, 0xf8, 0xf1, 0xf6, 0xe3, 0xe4, 0xed, 0xea, + 0xb7, 0xb0, 0xb9, 0xbe, 0xab, 0xac, 0xa5, 0xa2, 0x8f, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9d, 0x9a, + 0x27, 0x20, 0x29, 0x2e, 0x3b, 0x3c, 0x35, 0x32, 0x1f, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0d, 0x0a, + 0x57, 0x50, 0x59, 0x5e, 0x4b, 0x4c, 0x45, 0x42, 0x6f, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7d, 0x7a, + 0x89, 0x8e, 0x87, 0x80, 0x95, 0x92, 0x9b, 0x9c, 0xb1, 0xb6, 0xbf, 0xb8, 0xad, 0xaa, 0xa3, 0xa4, + 0xf9, 0xfe, 0xf7, 0xf0, 0xe5, 0xe2, 0xeb, 0xec, 0xc1, 0xc6, 0xcf, 0xc8, 0xdd, 0xda, 0xd3, 0xd4, + 0x69, 0x6e, 0x67, 0x60, 0x75, 0x72, 0x7b, 0x7c, 0x51, 0x56, 0x5f, 0x58, 0x4d, 0x4a, 0x43, 0x44, + 0x19, 0x1e, 0x17, 0x10, 0x05, 0x02, 0x0b, 0x0c, 0x21, 0x26, 0x2f, 0x28, 0x3d, 0x3a, 0x33, 0x34, + 0x4e, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5c, 0x5b, 0x76, 0x71, 0x78, 0x7f, 0x6a, 0x6d, 0x64, 0x63, + 0x3e, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2c, 0x2b, 0x06, 0x01, 0x08, 0x0f, 0x1a, 0x1d, 0x14, 0x13, + 0xae, 0xa9, 0xa0, 0xa7, 0xb2, 0xb5, 0xbc, 0xbb, 0x96, 0x91, 0x98, 0x9f, 0x8a, 0x8d, 0x84, 0x83, + 0xde, 0xd9, 0xd0, 0xd7, 0xc2, 0xc5, 0xcc, 0xcb, 0xe6, 0xe1, 0xe8, 0xef, 0xfa, 0xfd, 0xf4, 0xf3 +}; + +crc8_t crc8_update(crc8_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = crc ^ *d; + crc = crc8_1wire_table[tbl_idx] & 0xff; + d++; + } + + return crc & 0xff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8.h new file mode 100644 index 0000000..37a3454 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8.h @@ -0,0 +1,11 @@ +#ifndef _CRC8_H_ +#define _CRC8_H_ + +#include +#include + +typedef uint8_t crc8_t; + +crc8_t crc8_update(crc8_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8_ext.bundle new file mode 100644 index 0000000..fc7cb06 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8_ext.c new file mode 100644 index 0000000..4f82b6b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/crc8_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc8.h" + +VALUE Digest_CRC8_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc8_t crc = NUM2CHR(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc8_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc8_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC8 = rb_const_get(mDigest, rb_intern("CRC8")); + + rb_undef_method(cCRC8, "update"); + rb_define_method(cCRC8, "update", Digest_CRC8_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/extconf.rb new file mode 100644 index 0000000..1be8492 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc8_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire.c new file mode 100644 index 0000000..51ed75a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire.c @@ -0,0 +1,54 @@ +/** + * \file + * Functions and types for CRC checks. + * + * Generated on Sat Feb 29 02:02:50 2020 + * by pycrc v0.9.2, https://pycrc.org + * using the configuration: + * - Width = 8 + * - Poly = 0x31 + * - XorIn = 0x00 + * - ReflectIn = True + * - XorOut = 0x00 + * - ReflectOut = True + * - Algorithm = table-driven + */ + +#include "crc8_1wire.h" + +/** + * Static table used for the table_driven implementation. + */ +static const crc8_t crc8_table[256] = { + 0x00, 0x5e, 0xbc, 0xe2, 0x61, 0x3f, 0xdd, 0x83, 0xc2, 0x9c, 0x7e, 0x20, 0xa3, 0xfd, 0x1f, 0x41, + 0x9d, 0xc3, 0x21, 0x7f, 0xfc, 0xa2, 0x40, 0x1e, 0x5f, 0x01, 0xe3, 0xbd, 0x3e, 0x60, 0x82, 0xdc, + 0x23, 0x7d, 0x9f, 0xc1, 0x42, 0x1c, 0xfe, 0xa0, 0xe1, 0xbf, 0x5d, 0x03, 0x80, 0xde, 0x3c, 0x62, + 0xbe, 0xe0, 0x02, 0x5c, 0xdf, 0x81, 0x63, 0x3d, 0x7c, 0x22, 0xc0, 0x9e, 0x1d, 0x43, 0xa1, 0xff, + 0x46, 0x18, 0xfa, 0xa4, 0x27, 0x79, 0x9b, 0xc5, 0x84, 0xda, 0x38, 0x66, 0xe5, 0xbb, 0x59, 0x07, + 0xdb, 0x85, 0x67, 0x39, 0xba, 0xe4, 0x06, 0x58, 0x19, 0x47, 0xa5, 0xfb, 0x78, 0x26, 0xc4, 0x9a, + 0x65, 0x3b, 0xd9, 0x87, 0x04, 0x5a, 0xb8, 0xe6, 0xa7, 0xf9, 0x1b, 0x45, 0xc6, 0x98, 0x7a, 0x24, + 0xf8, 0xa6, 0x44, 0x1a, 0x99, 0xc7, 0x25, 0x7b, 0x3a, 0x64, 0x86, 0xd8, 0x5b, 0x05, 0xe7, 0xb9, + 0x8c, 0xd2, 0x30, 0x6e, 0xed, 0xb3, 0x51, 0x0f, 0x4e, 0x10, 0xf2, 0xac, 0x2f, 0x71, 0x93, 0xcd, + 0x11, 0x4f, 0xad, 0xf3, 0x70, 0x2e, 0xcc, 0x92, 0xd3, 0x8d, 0x6f, 0x31, 0xb2, 0xec, 0x0e, 0x50, + 0xaf, 0xf1, 0x13, 0x4d, 0xce, 0x90, 0x72, 0x2c, 0x6d, 0x33, 0xd1, 0x8f, 0x0c, 0x52, 0xb0, 0xee, + 0x32, 0x6c, 0x8e, 0xd0, 0x53, 0x0d, 0xef, 0xb1, 0xf0, 0xae, 0x4c, 0x12, 0x91, 0xcf, 0x2d, 0x73, + 0xca, 0x94, 0x76, 0x28, 0xab, 0xf5, 0x17, 0x49, 0x08, 0x56, 0xb4, 0xea, 0x69, 0x37, 0xd5, 0x8b, + 0x57, 0x09, 0xeb, 0xb5, 0x36, 0x68, 0x8a, 0xd4, 0x95, 0xcb, 0x29, 0x77, 0xf4, 0xaa, 0x48, 0x16, + 0xe9, 0xb7, 0x55, 0x0b, 0x88, 0xd6, 0x34, 0x6a, 0x2b, 0x75, 0x97, 0xc9, 0x4a, 0x14, 0xf6, 0xa8, + 0x74, 0x2a, 0xc8, 0x96, 0x15, 0x4b, 0xa9, 0xf7, 0xb6, 0xe8, 0x0a, 0x54, 0xd7, 0x89, 0x6b, 0x35 +}; + +crc8_t crc8_1wire_update(crc8_t crc, const void *data, size_t data_len) +{ + const unsigned char *d = (const unsigned char *)data; + unsigned int tbl_idx; + + while (data_len--) + { + tbl_idx = crc ^ *d; + crc = (crc8_table[tbl_idx] ^ (crc >> 8)) & 0xff; + d++; + } + + return crc & 0xff; +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire.h b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire.h new file mode 100644 index 0000000..37bcfb4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire.h @@ -0,0 +1,8 @@ +#ifndef _CRC8_1WIRE_H_ +#define _CRC8_1WIRE_H_ + +#include "../crc8/crc8.h" + +crc8_t crc8_1wire_update(crc8_t crc, const void *data, size_t data_len); + +#endif diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire_ext.bundle b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire_ext.bundle new file mode 100644 index 0000000..a316d09 Binary files /dev/null and b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire_ext.bundle differ diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire_ext.c b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire_ext.c new file mode 100644 index 0000000..9c7f236 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/crc8_1wire_ext.c @@ -0,0 +1,28 @@ +#include "ruby.h" +#include "extconf.h" + +#include "crc8_1wire.h" + +VALUE Digest_CRC81Wire_update(VALUE self, VALUE data) +{ + VALUE crc_ivar_name = rb_intern("@crc"); + VALUE crc_ivar = rb_ivar_get(self, crc_ivar_name); + crc8_t crc = NUM2CHR(crc_ivar); + + const char *data_ptr = StringValuePtr(data); + size_t length = RSTRING_LEN(data); + + crc = crc8_1wire_update(crc,data_ptr,length); + + rb_ivar_set(self, crc_ivar_name, UINT2NUM(crc)); + return self; +} + +void Init_crc8_1wire_ext() +{ + VALUE mDigest = rb_const_get(rb_cObject, rb_intern("Digest")); + VALUE cCRC81Wire = rb_const_get(mDigest, rb_intern("CRC8_1Wire")); + + rb_undef_method(cCRC81Wire, "update"); + rb_define_method(cCRC81Wire, "update", Digest_CRC81Wire_update, 1); +} diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/extconf.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/extconf.rb new file mode 100644 index 0000000..8b45a79 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/ext/digest/crc8_1wire/extconf.rb @@ -0,0 +1,7 @@ +require 'mkmf' + +have_header("stdint.h") +have_header('stddef.h') + +create_header +create_makefile "crc8_1wire_ext" diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/gemspec.yml b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/gemspec.yml new file mode 100644 index 0000000..ea8ad88 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/gemspec.yml @@ -0,0 +1,28 @@ +name: digest-crc +version: 0.6.4 +summary: A Cyclic Redundancy Check (CRC) library for Ruby. +description: + Adds support for calculating Cyclic Redundancy Check (CRC) to the Digest + module. + +license: MIT +authors: Postmodern +email: postmodern.mod3@gmail.com +homepage: https://github.com/postmodern/digest-crc#readme + +metadata: + documentation_uri: https://rubydoc.info/gems/digest-crc + source_code_uri: https://github.com/postmodern/digest-crc + bug_tracker_uri: https://github.com/postmodern/digest-crc/issues + changelog_uri: https://github.com/postmodern/digest-crc/blob/master/ChangeLog.md + +extensions: + - ext/digest/Rakefile + +has_yard: true + +dependencies: + rake: ">= 12.0.0, < 14.0.0" + +development_dependencies: + bundler: ~> 2.0 diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc.rb new file mode 100644 index 0000000..cc3508d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc.rb @@ -0,0 +1,135 @@ +require 'digest' + +module Digest + # + # Base class for all CRC algorithms. + # + class CRC < Digest::Class + + include Digest::Instance + + # The initial value of the CRC checksum + INIT_CRC = 0x00 + + # The XOR mask to apply to the resulting CRC checksum + XOR_MASK = 0x00 + + # The bit width of the CRC checksum + WIDTH = 0 + + # Default place holder CRC table + TABLE = [].freeze + + # + # Calculates the CRC checksum. + # + # @param [String] data + # The given data. + # + # @return [Integer] + # The CRC checksum. + # + def self.checksum(data) + crc = self.new + crc << data + + return crc.checksum + end + + # + # Packs the given CRC checksum. + # + # @param [Integer] crc + # The raw CRC checksum. + # + # @return [String] + # The packed CRC checksum. + # + # @abstract + # + def self.pack(crc) + raise(NotImplementedError,"#{self.class}##{__method__} not implemented") + end + + # + # Initializes the CRC checksum. + # + def initialize + @init_crc = self.class.const_get(:INIT_CRC) + @xor_mask = self.class.const_get(:XOR_MASK) + @width = self.class.const_get(:WIDTH) + @table = self.class.const_get(:TABLE) + + reset + end + + # + # The input block length. + # + # @return [1] + # + def block_length + 1 + end + + # + # The length of the digest. + # + # @return [Integer] + # The length in bytes. + # + def digest_length + (@width / 8.0).ceil + end + + # + # Updates the CRC checksum with the given data. + # + # @param [String] data + # The data to update the CRC checksum with. + # + # @abstract + # + def update(data) + raise(NotImplementedError,"#{self.class}##{__method__} not implemented") + end + + # + # @see #update + # + def <<(data) + update(data) + return self + end + + # + # Resets the CRC checksum. + # + # @return [Integer] + # The default value of the CRC checksum. + # + def reset + @crc = @init_crc + end + + # + # The resulting CRC checksum. + # + # @return [Integer] + # The resulting CRC checksum. + # + def checksum + @crc ^ @xor_mask + end + + # + # Finishes the CRC checksum calculation. + # + # @see pack + # + def finish + self.class.pack(checksum) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc1.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc1.rb new file mode 100644 index 0000000..43c6193 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc1.rb @@ -0,0 +1,35 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC1 algorithm. + # + class CRC1 < CRC + + # + # Packs the CRC1 checksum. + # + # @return [String] + # The CRC1 checksum. + # + def self.pack(crc) + [crc].pack('c*') + end + + # + # Updates the CRC1 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + accum = 0 + data.each_byte { |b| accum += b } + + @crc += (accum % 256) + + return self + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc15.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc15.rb new file mode 100644 index 0000000..0a3c1a6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc15.rb @@ -0,0 +1,70 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC15 algorithm. + # + # @since 0.5.0 + # + class CRC15 < CRC + + WIDTH = 15 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-16 --generate=c` + TABLE = [ + 0x0000, 0x4599, 0x4eab, 0x0b32, 0x58cf, 0x1d56, 0x1664, 0x53fd, 0x7407, 0x319e, 0x3aac, 0x7f35, 0x2cc8, 0x6951, 0x6263, 0x27fa, + 0x2d97, 0x680e, 0x633c, 0x26a5, 0x7558, 0x30c1, 0x3bf3, 0x7e6a, 0x5990, 0x1c09, 0x173b, 0x52a2, 0x015f, 0x44c6, 0x4ff4, 0x0a6d, + 0x5b2e, 0x1eb7, 0x1585, 0x501c, 0x03e1, 0x4678, 0x4d4a, 0x08d3, 0x2f29, 0x6ab0, 0x6182, 0x241b, 0x77e6, 0x327f, 0x394d, 0x7cd4, + 0x76b9, 0x3320, 0x3812, 0x7d8b, 0x2e76, 0x6bef, 0x60dd, 0x2544, 0x02be, 0x4727, 0x4c15, 0x098c, 0x5a71, 0x1fe8, 0x14da, 0x5143, + 0x73c5, 0x365c, 0x3d6e, 0x78f7, 0x2b0a, 0x6e93, 0x65a1, 0x2038, 0x07c2, 0x425b, 0x4969, 0x0cf0, 0x5f0d, 0x1a94, 0x11a6, 0x543f, + 0x5e52, 0x1bcb, 0x10f9, 0x5560, 0x069d, 0x4304, 0x4836, 0x0daf, 0x2a55, 0x6fcc, 0x64fe, 0x2167, 0x729a, 0x3703, 0x3c31, 0x79a8, + 0x28eb, 0x6d72, 0x6640, 0x23d9, 0x7024, 0x35bd, 0x3e8f, 0x7b16, 0x5cec, 0x1975, 0x1247, 0x57de, 0x0423, 0x41ba, 0x4a88, 0x0f11, + 0x057c, 0x40e5, 0x4bd7, 0x0e4e, 0x5db3, 0x182a, 0x1318, 0x5681, 0x717b, 0x34e2, 0x3fd0, 0x7a49, 0x29b4, 0x6c2d, 0x671f, 0x2286, + 0x2213, 0x678a, 0x6cb8, 0x2921, 0x7adc, 0x3f45, 0x3477, 0x71ee, 0x5614, 0x138d, 0x18bf, 0x5d26, 0x0edb, 0x4b42, 0x4070, 0x05e9, + 0x0f84, 0x4a1d, 0x412f, 0x04b6, 0x574b, 0x12d2, 0x19e0, 0x5c79, 0x7b83, 0x3e1a, 0x3528, 0x70b1, 0x234c, 0x66d5, 0x6de7, 0x287e, + 0x793d, 0x3ca4, 0x3796, 0x720f, 0x21f2, 0x646b, 0x6f59, 0x2ac0, 0x0d3a, 0x48a3, 0x4391, 0x0608, 0x55f5, 0x106c, 0x1b5e, 0x5ec7, + 0x54aa, 0x1133, 0x1a01, 0x5f98, 0x0c65, 0x49fc, 0x42ce, 0x0757, 0x20ad, 0x6534, 0x6e06, 0x2b9f, 0x7862, 0x3dfb, 0x36c9, 0x7350, + 0x51d6, 0x144f, 0x1f7d, 0x5ae4, 0x0919, 0x4c80, 0x47b2, 0x022b, 0x25d1, 0x6048, 0x6b7a, 0x2ee3, 0x7d1e, 0x3887, 0x33b5, 0x762c, + 0x7c41, 0x39d8, 0x32ea, 0x7773, 0x248e, 0x6117, 0x6a25, 0x2fbc, 0x0846, 0x4ddf, 0x46ed, 0x0374, 0x5089, 0x1510, 0x1e22, 0x5bbb, + 0x0af8, 0x4f61, 0x4453, 0x01ca, 0x5237, 0x17ae, 0x1c9c, 0x5905, 0x7eff, 0x3b66, 0x3054, 0x75cd, 0x2630, 0x63a9, 0x689b, 0x2d02, + 0x276f, 0x62f6, 0x69c4, 0x2c5d, 0x7fa0, 0x3a39, 0x310b, 0x7492, 0x5368, 0x16f1, 0x1dc3, 0x585a, 0x0ba7, 0x4e3e, 0x450c, 0x0095 + ].freeze + + # + # Packs the CRC15 checksum. + # + # @param [Integer] crc + # The CRC15 checksum to pack. + # + # @return [String] + # The packed CRC15 checksum. + # + def self.pack(crc) + buffer = '' + + buffer << ((crc & 0x7f00) >> 8).chr + buffer << (crc & 0xff).chr + + buffer + end + + # + # Updates the CRC15 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = (@table[((@crc >> 7) ^ b) & 0xff] ^ (@crc << 8)) & 0x7fff + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc15/crc15_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16.rb new file mode 100644 index 0000000..e3bea6d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16.rb @@ -0,0 +1,86 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC16 algorithm. + # + class CRC16 < CRC + + WIDTH = 16 + + INIT_CRC = 0x0000 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-16` + TABLE = [ + 0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241, + 0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440, + 0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40, + 0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841, + 0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40, + 0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41, + 0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641, + 0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040, + 0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240, + 0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441, + 0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41, + 0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840, + 0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41, + 0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40, + 0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640, + 0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041, + 0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240, + 0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441, + 0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41, + 0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840, + 0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41, + 0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40, + 0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640, + 0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041, + 0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241, + 0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440, + 0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40, + 0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841, + 0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40, + 0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41, + 0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641, + 0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040 + ].freeze + + # + # Packs the CRC16 checksum. + # + # @param [Integer] crc + # The CRC16 checksum to pack. + # + # @return [String] + # The packed CRC16 checksum. + # + def self.pack(crc) + buffer = '' + + buffer << ((crc & 0xff00) >> 8).chr + buffer << (crc & 0xff).chr + + buffer + end + + # + # Updates the CRC16 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[(@crc ^ b) & 0xff] ^ (@crc >> 8)) & 0xffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16/crc16_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_ccitt.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_ccitt.rb new file mode 100644 index 0000000..b432e46 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_ccitt.rb @@ -0,0 +1,66 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 CCITT algorithm. + # + class CRC16CCITT < CRC16 + + INIT_CRC = 0xffff + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-16-ccitt --generate=c` + TABLE = [ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 + ].freeze + + # + # Updates the CRC16 CCITT checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[((@crc >> 8) ^ b) & 0xff] ^ (@crc << 8)) & 0xffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_ccitt/crc16_ccitt_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_dnp.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_dnp.rb new file mode 100644 index 0000000..e5149a9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_dnp.rb @@ -0,0 +1,69 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 DNP algorithm. + # + class CRC16DNP < CRC16 + + INIT_CRC = 0 + + TABLE = [ + 0x0000, 0x365e, 0x6cbc, 0x5ae2, 0xd978, 0xef26, 0xb5c4, 0x839a, + 0xff89, 0xc9d7, 0x9335, 0xa56b, 0x26f1, 0x10af, 0x4a4d, 0x7c13, + 0xb26b, 0x8435, 0xded7, 0xe889, 0x6b13, 0x5d4d, 0x07af, 0x31f1, + 0x4de2, 0x7bbc, 0x215e, 0x1700, 0x949a, 0xa2c4, 0xf826, 0xce78, + 0x29af, 0x1ff1, 0x4513, 0x734d, 0xf0d7, 0xc689, 0x9c6b, 0xaa35, + 0xd626, 0xe078, 0xba9a, 0x8cc4, 0x0f5e, 0x3900, 0x63e2, 0x55bc, + 0x9bc4, 0xad9a, 0xf778, 0xc126, 0x42bc, 0x74e2, 0x2e00, 0x185e, + 0x644d, 0x5213, 0x08f1, 0x3eaf, 0xbd35, 0x8b6b, 0xd189, 0xe7d7, + 0x535e, 0x6500, 0x3fe2, 0x09bc, 0x8a26, 0xbc78, 0xe69a, 0xd0c4, + 0xacd7, 0x9a89, 0xc06b, 0xf635, 0x75af, 0x43f1, 0x1913, 0x2f4d, + 0xe135, 0xd76b, 0x8d89, 0xbbd7, 0x384d, 0x0e13, 0x54f1, 0x62af, + 0x1ebc, 0x28e2, 0x7200, 0x445e, 0xc7c4, 0xf19a, 0xab78, 0x9d26, + 0x7af1, 0x4caf, 0x164d, 0x2013, 0xa389, 0x95d7, 0xcf35, 0xf96b, + 0x8578, 0xb326, 0xe9c4, 0xdf9a, 0x5c00, 0x6a5e, 0x30bc, 0x06e2, + 0xc89a, 0xfec4, 0xa426, 0x9278, 0x11e2, 0x27bc, 0x7d5e, 0x4b00, + 0x3713, 0x014d, 0x5baf, 0x6df1, 0xee6b, 0xd835, 0x82d7, 0xb489, + 0xa6bc, 0x90e2, 0xca00, 0xfc5e, 0x7fc4, 0x499a, 0x1378, 0x2526, + 0x5935, 0x6f6b, 0x3589, 0x03d7, 0x804d, 0xb613, 0xecf1, 0xdaaf, + 0x14d7, 0x2289, 0x786b, 0x4e35, 0xcdaf, 0xfbf1, 0xa113, 0x974d, + 0xeb5e, 0xdd00, 0x87e2, 0xb1bc, 0x3226, 0x0478, 0x5e9a, 0x68c4, + 0x8f13, 0xb94d, 0xe3af, 0xd5f1, 0x566b, 0x6035, 0x3ad7, 0x0c89, + 0x709a, 0x46c4, 0x1c26, 0x2a78, 0xa9e2, 0x9fbc, 0xc55e, 0xf300, + 0x3d78, 0x0b26, 0x51c4, 0x679a, 0xe400, 0xd25e, 0x88bc, 0xbee2, + 0xc2f1, 0xf4af, 0xae4d, 0x9813, 0x1b89, 0x2dd7, 0x7735, 0x416b, + 0xf5e2, 0xc3bc, 0x995e, 0xaf00, 0x2c9a, 0x1ac4, 0x4026, 0x7678, + 0x0a6b, 0x3c35, 0x66d7, 0x5089, 0xd313, 0xe54d, 0xbfaf, 0x89f1, + 0x4789, 0x71d7, 0x2b35, 0x1d6b, 0x9ef1, 0xa8af, 0xf24d, 0xc413, + 0xb800, 0x8e5e, 0xd4bc, 0xe2e2, 0x6178, 0x5726, 0x0dc4, 0x3b9a, + 0xdc4d, 0xea13, 0xb0f1, 0x86af, 0x0535, 0x336b, 0x6989, 0x5fd7, + 0x23c4, 0x159a, 0x4f78, 0x7926, 0xfabc, 0xcce2, 0x9600, 0xa05e, + 0x6e26, 0x5878, 0x029a, 0x34c4, 0xb75e, 0x8100, 0xdbe2, 0xedbc, + 0x91af, 0xa7f1, 0xfd13, 0xcb4d, 0x48d7, 0x7e89, 0x246b, 0x1235 + ].freeze + + # + # Updates the CRC16 DNP checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@crc >> 8) ^ @table[(@crc ^ b) & 0xff]) + end + + return self + end + + def finish + self.class.pack(~@crc) + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_dnp/crc16_dnp_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_genibus.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_genibus.rb new file mode 100644 index 0000000..a5e9d49 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_genibus.rb @@ -0,0 +1,72 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 Genibus algorithm. + # + # @since 0.5.0 + # + class CRC16Genibus < CRC16 + + INIT_XOR = 0xffff + + INIT_CRC = 0x0000 ^ INIT_XOR + + XOR_MASK = 0xffff + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-16 --generate=c` + TABLE = [ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 + ].freeze + + # + # Updates the CRC16 Genibus checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = (@table[((@crc >> 8) ^ b) & 0xff] ^ (@crc << 8)) & 0xffff + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_genibus/crc16_genibus_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_kermit.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_kermit.rb new file mode 100644 index 0000000..b683623 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_kermit.rb @@ -0,0 +1,66 @@ +require "digest/crc16" + +module Digest + # + # Implements Kermit's CRC16 function. + # + # @since 0.5.0 + # + class CRC16Kermit < CRC16 + + # Generated by `./pycrc.py --algorithm=table-driven --model=kermit --generate=c` + TABLE = [ + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 + ].freeze + + # + # Updates the CRC16 Kermit checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = (@table[(@crc ^ b) & 0xff] ^ (@crc >> 8)) & 0xffff + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_kermit/crc16_kermit_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_modbus.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_modbus.rb new file mode 100644 index 0000000..fe2abe4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_modbus.rb @@ -0,0 +1,52 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 Modbus algorithm. + # + class CRC16Modbus < CRC16 + + INIT_CRC = 0xffff + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-16-modbus --generate=c` + TABLE = [ + 0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241, + 0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440, + 0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40, + 0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841, + 0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40, + 0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41, + 0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641, + 0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040, + 0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240, + 0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441, + 0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41, + 0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840, + 0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41, + 0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40, + 0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640, + 0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041, + 0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240, + 0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441, + 0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41, + 0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840, + 0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41, + 0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40, + 0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640, + 0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041, + 0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241, + 0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440, + 0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40, + 0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841, + 0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40, + 0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41, + 0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641, + 0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040 + ].freeze + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_modbus/crc16_modbus_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_qt.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_qt.rb new file mode 100644 index 0000000..9e6739a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_qt.rb @@ -0,0 +1,11 @@ +require 'digest/crc16_x_25' + +module Digest + # + # Implements the CRC16_CCITT algorithm used in QT algorithms. + # + # @note Is exactly the same as the CRC16 X-25 algorithm. + # + class CRC16QT < CRC16X25 + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_usb.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_usb.rb new file mode 100644 index 0000000..d04a2a2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_usb.rb @@ -0,0 +1,18 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 USB algorithm. + # + class CRC16USB < CRC16 + + INIT_CRC = 0xffff + + XOR_MASK = 0xffff + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_usb/crc16_usb_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_x_25.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_x_25.rb new file mode 100644 index 0000000..dbaf65d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_x_25.rb @@ -0,0 +1,56 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 X25 algorithm. + # + class CRC16X25 < CRC16 + + INIT_XOR = 0xffff + + INIT_CRC = 0x0 ^ INIT_XOR + + XOR_MASK = 0xffff + + # Generated by `./pycrc.py --algorithm=table-driven --model=x-25 --generate=c` + TABLE = [ + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 + ].freeze + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_x_25/crc16_x_25_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_xmodem.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_xmodem.rb new file mode 100644 index 0000000..6bd6c7d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_xmodem.rb @@ -0,0 +1,64 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 XMmodem algorithm. + # + class CRC16XModem < CRC16 + + # Generated by `./pycrc.py --algorithm=table-driven --model=xmodem --generate=c` + TABLE = [ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 + ].freeze + + # + # Updates the CRC16 XModem checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[((@crc >> 8) ^ b) & 0xff] ^ (@crc << 8)) & 0xffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_xmodem/crc16_xmodem_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_zmodem.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_zmodem.rb new file mode 100644 index 0000000..fc79b8e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc16_zmodem.rb @@ -0,0 +1,64 @@ +require 'digest/crc16' + +module Digest + # + # Implements the CRC16 ZModem algorithm. + # + class CRC16ZModem < CRC16 + + # Generated by `./pycrc.py --algorithm=table-driven --model=zmodem --generate=c` + TABLE = [ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 + ].freeze + + # + # Updates the CRC16 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[((@crc >> 8) ^ b) & 0xff] ^ (@crc << 8)) & 0xffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc16_zmodem/crc16_zmodem_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc24.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc24.rb new file mode 100644 index 0000000..ab71200 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc24.rb @@ -0,0 +1,87 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC24 algorithm. + # + class CRC24 < CRC + + WIDTH = 24 + + INIT_CRC = 0xb704ce + + # Generated by `./pycrc.py --algorithm=table-drive --model=crc24 --generate=c` + TABLE = [ + 0x000000, 0x864cfb, 0x8ad50d, 0x0c99f6, 0x93e6e1, 0x15aa1a, 0x1933ec, 0x9f7f17, + 0xa18139, 0x27cdc2, 0x2b5434, 0xad18cf, 0x3267d8, 0xb42b23, 0xb8b2d5, 0x3efe2e, + 0xc54e89, 0x430272, 0x4f9b84, 0xc9d77f, 0x56a868, 0xd0e493, 0xdc7d65, 0x5a319e, + 0x64cfb0, 0xe2834b, 0xee1abd, 0x685646, 0xf72951, 0x7165aa, 0x7dfc5c, 0xfbb0a7, + 0x0cd1e9, 0x8a9d12, 0x8604e4, 0x00481f, 0x9f3708, 0x197bf3, 0x15e205, 0x93aefe, + 0xad50d0, 0x2b1c2b, 0x2785dd, 0xa1c926, 0x3eb631, 0xb8faca, 0xb4633c, 0x322fc7, + 0xc99f60, 0x4fd39b, 0x434a6d, 0xc50696, 0x5a7981, 0xdc357a, 0xd0ac8c, 0x56e077, + 0x681e59, 0xee52a2, 0xe2cb54, 0x6487af, 0xfbf8b8, 0x7db443, 0x712db5, 0xf7614e, + 0x19a3d2, 0x9fef29, 0x9376df, 0x153a24, 0x8a4533, 0x0c09c8, 0x00903e, 0x86dcc5, + 0xb822eb, 0x3e6e10, 0x32f7e6, 0xb4bb1d, 0x2bc40a, 0xad88f1, 0xa11107, 0x275dfc, + 0xdced5b, 0x5aa1a0, 0x563856, 0xd074ad, 0x4f0bba, 0xc94741, 0xc5deb7, 0x43924c, + 0x7d6c62, 0xfb2099, 0xf7b96f, 0x71f594, 0xee8a83, 0x68c678, 0x645f8e, 0xe21375, + 0x15723b, 0x933ec0, 0x9fa736, 0x19ebcd, 0x8694da, 0x00d821, 0x0c41d7, 0x8a0d2c, + 0xb4f302, 0x32bff9, 0x3e260f, 0xb86af4, 0x2715e3, 0xa15918, 0xadc0ee, 0x2b8c15, + 0xd03cb2, 0x567049, 0x5ae9bf, 0xdca544, 0x43da53, 0xc596a8, 0xc90f5e, 0x4f43a5, + 0x71bd8b, 0xf7f170, 0xfb6886, 0x7d247d, 0xe25b6a, 0x641791, 0x688e67, 0xeec29c, + 0x3347a4, 0xb50b5f, 0xb992a9, 0x3fde52, 0xa0a145, 0x26edbe, 0x2a7448, 0xac38b3, + 0x92c69d, 0x148a66, 0x181390, 0x9e5f6b, 0x01207c, 0x876c87, 0x8bf571, 0x0db98a, + 0xf6092d, 0x7045d6, 0x7cdc20, 0xfa90db, 0x65efcc, 0xe3a337, 0xef3ac1, 0x69763a, + 0x578814, 0xd1c4ef, 0xdd5d19, 0x5b11e2, 0xc46ef5, 0x42220e, 0x4ebbf8, 0xc8f703, + 0x3f964d, 0xb9dab6, 0xb54340, 0x330fbb, 0xac70ac, 0x2a3c57, 0x26a5a1, 0xa0e95a, + 0x9e1774, 0x185b8f, 0x14c279, 0x928e82, 0x0df195, 0x8bbd6e, 0x872498, 0x016863, + 0xfad8c4, 0x7c943f, 0x700dc9, 0xf64132, 0x693e25, 0xef72de, 0xe3eb28, 0x65a7d3, + 0x5b59fd, 0xdd1506, 0xd18cf0, 0x57c00b, 0xc8bf1c, 0x4ef3e7, 0x426a11, 0xc426ea, + 0x2ae476, 0xaca88d, 0xa0317b, 0x267d80, 0xb90297, 0x3f4e6c, 0x33d79a, 0xb59b61, + 0x8b654f, 0x0d29b4, 0x01b042, 0x87fcb9, 0x1883ae, 0x9ecf55, 0x9256a3, 0x141a58, + 0xefaaff, 0x69e604, 0x657ff2, 0xe33309, 0x7c4c1e, 0xfa00e5, 0xf69913, 0x70d5e8, + 0x4e2bc6, 0xc8673d, 0xc4fecb, 0x42b230, 0xddcd27, 0x5b81dc, 0x57182a, 0xd154d1, + 0x26359f, 0xa07964, 0xace092, 0x2aac69, 0xb5d37e, 0x339f85, 0x3f0673, 0xb94a88, + 0x87b4a6, 0x01f85d, 0x0d61ab, 0x8b2d50, 0x145247, 0x921ebc, 0x9e874a, 0x18cbb1, + 0xe37b16, 0x6537ed, 0x69ae1b, 0xefe2e0, 0x709df7, 0xf6d10c, 0xfa48fa, 0x7c0401, + 0x42fa2f, 0xc4b6d4, 0xc82f22, 0x4e63d9, 0xd11cce, 0x575035, 0x5bc9c3, 0xdd8538 + ].freeze + + # + # Packs the CRC24 checksum. + # + # @param [Integer] crc + # The checksum to pack. + # + # @return [String] + # The packed checksum. + # + def self.pack(crc) + buffer = '' + + buffer << ((crc & 0xff0000) >> 16).chr + buffer << ((crc & 0x00ff00) >> 8).chr + buffer << (crc & 0x0000ff).chr + + buffer + end + + # + # Updates the CRC24 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[((@crc >> 16) ^ b) & 0xff] ^ (@crc << 8)) & 0xffffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc24/crc24_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32.rb new file mode 100644 index 0000000..dffd492 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32.rb @@ -0,0 +1,122 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC32 algorithm. + # + class CRC32 < CRC + + WIDTH = 32 + + INIT_CRC = 0xffffffff + + XOR_MASK = 0xffffffff + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-32 --generate=c` + TABLE = [ + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d + ].freeze + + # + # Packs the CRC32 checksum. + # + # @param [Integer] crc + # The checksum to pack. + # + # @return [String] + # The packed checksum. + # + def self.pack(crc) + buffer = '' + + buffer << ((crc & 0xff000000) >> 24).chr + buffer << ((crc & 0xff0000) >> 16).chr + buffer << ((crc & 0xff00) >> 8).chr + buffer << (crc & 0xff).chr + + buffer + end + + # + # Updates the CRC32 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = @table[(@crc ^ b) & 0xff] ^ ((@crc >> 8) & 0xffffffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc32/crc32_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_bzip2.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_bzip2.rb new file mode 100644 index 0000000..2d4df58 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_bzip2.rb @@ -0,0 +1,64 @@ +require 'digest/crc32' + +module Digest + # + # Implements the CRC32 BZip2 algorithm + # + class CRC32BZip2 < CRC32 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-32-bzip2 --generate=c` + TABLE = [ + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, + 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, + 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, + 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, + 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, + 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, + 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, + 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, + 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, + 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, + 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, + 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, + 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 + ].freeze + + # + # Updates the CRC32 BZip2 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = (@table[((@crc >> 24) ^ b) & 0xff] ^ (@crc << 8)) & 0xffffffff + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc32_bzip2/crc32_bzip2_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_jam.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_jam.rb new file mode 100644 index 0000000..5ebdf64 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_jam.rb @@ -0,0 +1,16 @@ +require 'digest/crc32' + +module Digest + # + # Implements the CRC32 Jam algorithm. + # + class CRC32Jam < CRC32 + + INIT_XOR = 0xffffffff + + INIT_CRC = 0x0 ^ INIT_XOR + + XOR_MASK = 0x00000000 + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_mpeg.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_mpeg.rb new file mode 100644 index 0000000..b02e463 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_mpeg.rb @@ -0,0 +1,101 @@ +require 'digest/crc32' + +module Digest + # + # Implements the CRC32 Mpeg algorithm. + # + class CRC32MPEG < CRC32 + + XOR_MASK = 0x00000000 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-32-mpeg --generate=c` + TABLE = [ + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, + 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, + 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, + 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, + 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, + 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, + 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, + 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, + 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, + 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, + 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, + 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, + 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, + 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, + 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, + 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, + 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, + 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, + 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, + 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, + 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, + 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, + 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, + 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, + 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, + 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, + 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, + 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, + 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, + 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, + 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, + 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, + 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, + 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, + 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, + 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, + 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, + 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, + 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, + 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, + 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, + 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, + 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, + 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, + 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 + ].freeze + + # + # Updates the CRC32 Mpeg checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[((@crc >> 24) ^ b) & 0xff] ^ (@crc << 8)) & 0xffffffff) + end + + return self + end + + end + + # @deprecated Please use {CRC32MPEG}. + CRC32Mpeg = CRC32MPEG +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc32_mpeg/crc32_mpeg_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_posix.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_posix.rb new file mode 100644 index 0000000..bbaff8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_posix.rb @@ -0,0 +1,65 @@ +require 'digest/crc32' + +module Digest + # + # Implements the CRC32 POSIX algorithm. + # + class CRC32POSIX < CRC32 + + INIT_CRC = 0x00000000 + + TABLE = [ + 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, + 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, + 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, + 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, + 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, + 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, + 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, + 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, + 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, + 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, + 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, + 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, + 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, + 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, + 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, + 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, + 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, + 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, + 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, + 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, + 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, + 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, + 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, + 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, + 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, + 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, + 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, + 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, + 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, + 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, + 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, + 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 + ].freeze + + # + # Updates the CRC32 POSIX checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = (@table[((@crc >> 24) ^ b) & 0xff] ^ (@crc << 8)) & 0xffffffff + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc32_posix/crc32_posix_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_xfer.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_xfer.rb new file mode 100644 index 0000000..942e582 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32_xfer.rb @@ -0,0 +1,68 @@ +require 'digest/crc32' + +module Digest + # + # Implements the CRC32 XFER algorithm. + # + class CRC32XFER < CRC32 + + INIT_CRC = 0x00000000 + + XOR_MASK = 0x00000000 + + # Generated by `./pycrc.py --algorithm=table-driven --model=xfer --generate=c` + TABLE = [ + 0x00000000, 0x000000af, 0x0000015e, 0x000001f1, 0x000002bc, 0x00000213, 0x000003e2, 0x0000034d, + 0x00000578, 0x000005d7, 0x00000426, 0x00000489, 0x000007c4, 0x0000076b, 0x0000069a, 0x00000635, + 0x00000af0, 0x00000a5f, 0x00000bae, 0x00000b01, 0x0000084c, 0x000008e3, 0x00000912, 0x000009bd, + 0x00000f88, 0x00000f27, 0x00000ed6, 0x00000e79, 0x00000d34, 0x00000d9b, 0x00000c6a, 0x00000cc5, + 0x000015e0, 0x0000154f, 0x000014be, 0x00001411, 0x0000175c, 0x000017f3, 0x00001602, 0x000016ad, + 0x00001098, 0x00001037, 0x000011c6, 0x00001169, 0x00001224, 0x0000128b, 0x0000137a, 0x000013d5, + 0x00001f10, 0x00001fbf, 0x00001e4e, 0x00001ee1, 0x00001dac, 0x00001d03, 0x00001cf2, 0x00001c5d, + 0x00001a68, 0x00001ac7, 0x00001b36, 0x00001b99, 0x000018d4, 0x0000187b, 0x0000198a, 0x00001925, + 0x00002bc0, 0x00002b6f, 0x00002a9e, 0x00002a31, 0x0000297c, 0x000029d3, 0x00002822, 0x0000288d, + 0x00002eb8, 0x00002e17, 0x00002fe6, 0x00002f49, 0x00002c04, 0x00002cab, 0x00002d5a, 0x00002df5, + 0x00002130, 0x0000219f, 0x0000206e, 0x000020c1, 0x0000238c, 0x00002323, 0x000022d2, 0x0000227d, + 0x00002448, 0x000024e7, 0x00002516, 0x000025b9, 0x000026f4, 0x0000265b, 0x000027aa, 0x00002705, + 0x00003e20, 0x00003e8f, 0x00003f7e, 0x00003fd1, 0x00003c9c, 0x00003c33, 0x00003dc2, 0x00003d6d, + 0x00003b58, 0x00003bf7, 0x00003a06, 0x00003aa9, 0x000039e4, 0x0000394b, 0x000038ba, 0x00003815, + 0x000034d0, 0x0000347f, 0x0000358e, 0x00003521, 0x0000366c, 0x000036c3, 0x00003732, 0x0000379d, + 0x000031a8, 0x00003107, 0x000030f6, 0x00003059, 0x00003314, 0x000033bb, 0x0000324a, 0x000032e5, + 0x00005780, 0x0000572f, 0x000056de, 0x00005671, 0x0000553c, 0x00005593, 0x00005462, 0x000054cd, + 0x000052f8, 0x00005257, 0x000053a6, 0x00005309, 0x00005044, 0x000050eb, 0x0000511a, 0x000051b5, + 0x00005d70, 0x00005ddf, 0x00005c2e, 0x00005c81, 0x00005fcc, 0x00005f63, 0x00005e92, 0x00005e3d, + 0x00005808, 0x000058a7, 0x00005956, 0x000059f9, 0x00005ab4, 0x00005a1b, 0x00005bea, 0x00005b45, + 0x00004260, 0x000042cf, 0x0000433e, 0x00004391, 0x000040dc, 0x00004073, 0x00004182, 0x0000412d, + 0x00004718, 0x000047b7, 0x00004646, 0x000046e9, 0x000045a4, 0x0000450b, 0x000044fa, 0x00004455, + 0x00004890, 0x0000483f, 0x000049ce, 0x00004961, 0x00004a2c, 0x00004a83, 0x00004b72, 0x00004bdd, + 0x00004de8, 0x00004d47, 0x00004cb6, 0x00004c19, 0x00004f54, 0x00004ffb, 0x00004e0a, 0x00004ea5, + 0x00007c40, 0x00007cef, 0x00007d1e, 0x00007db1, 0x00007efc, 0x00007e53, 0x00007fa2, 0x00007f0d, + 0x00007938, 0x00007997, 0x00007866, 0x000078c9, 0x00007b84, 0x00007b2b, 0x00007ada, 0x00007a75, + 0x000076b0, 0x0000761f, 0x000077ee, 0x00007741, 0x0000740c, 0x000074a3, 0x00007552, 0x000075fd, + 0x000073c8, 0x00007367, 0x00007296, 0x00007239, 0x00007174, 0x000071db, 0x0000702a, 0x00007085, + 0x000069a0, 0x0000690f, 0x000068fe, 0x00006851, 0x00006b1c, 0x00006bb3, 0x00006a42, 0x00006aed, + 0x00006cd8, 0x00006c77, 0x00006d86, 0x00006d29, 0x00006e64, 0x00006ecb, 0x00006f3a, 0x00006f95, + 0x00006350, 0x000063ff, 0x0000620e, 0x000062a1, 0x000061ec, 0x00006143, 0x000060b2, 0x0000601d, + 0x00006628, 0x00006687, 0x00006776, 0x000067d9, 0x00006494, 0x0000643b, 0x000065ca, 0x00006565 + ].freeze + + # + # Updates the CRC32 XFER checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = (@table[((@crc >> 24) ^ b) & 0xff] ^ (@crc << 8)) & 0xffffffff + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc32_xfer/crc32_xfer_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32c.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32c.rb new file mode 100644 index 0000000..203c991 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc32c.rb @@ -0,0 +1,82 @@ +require 'digest/crc32' + +module Digest + # + # Implements the CRC32c algorithm. + # + class CRC32c < CRC32 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-32c --generate=c` + TABLE = [ + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, + 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, + 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, + 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, + 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, + 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, + 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, + 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, + 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, + 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, + 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, + 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, + 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, + 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, + 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, + 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, + 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, + 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, + 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, + 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, + 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, + 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, + 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, + 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, + 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, + 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, + 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, + 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, + 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, + 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, + 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, + 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, + 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, + 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, + 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, + 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, + 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, + 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, + 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, + 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, + 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, + 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, + 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351 + ].freeze + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc32c/crc32c_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc5.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc5.rb new file mode 100644 index 0000000..2e57398 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc5.rb @@ -0,0 +1,78 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC5 algorithm. + # + class CRC5 < CRC + + WIDTH = 5 + + INIT_CRC = 0x1f + + XOR_MASK = 0x1f + + CRC_MASK = (0x1f << 3) + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-5 --generate=c` + TABLE = [ + 0x00, 0x70, 0xe0, 0x90, 0x88, 0xf8, 0x68, 0x18, 0x58, 0x28, 0xb8, 0xc8, 0xd0, 0xa0, 0x30, 0x40, + 0xb0, 0xc0, 0x50, 0x20, 0x38, 0x48, 0xd8, 0xa8, 0xe8, 0x98, 0x08, 0x78, 0x60, 0x10, 0x80, 0xf0, + 0x28, 0x58, 0xc8, 0xb8, 0xa0, 0xd0, 0x40, 0x30, 0x70, 0x00, 0x90, 0xe0, 0xf8, 0x88, 0x18, 0x68, + 0x98, 0xe8, 0x78, 0x08, 0x10, 0x60, 0xf0, 0x80, 0xc0, 0xb0, 0x20, 0x50, 0x48, 0x38, 0xa8, 0xd8, + 0x50, 0x20, 0xb0, 0xc0, 0xd8, 0xa8, 0x38, 0x48, 0x08, 0x78, 0xe8, 0x98, 0x80, 0xf0, 0x60, 0x10, + 0xe0, 0x90, 0x00, 0x70, 0x68, 0x18, 0x88, 0xf8, 0xb8, 0xc8, 0x58, 0x28, 0x30, 0x40, 0xd0, 0xa0, + 0x78, 0x08, 0x98, 0xe8, 0xf0, 0x80, 0x10, 0x60, 0x20, 0x50, 0xc0, 0xb0, 0xa8, 0xd8, 0x48, 0x38, + 0xc8, 0xb8, 0x28, 0x58, 0x40, 0x30, 0xa0, 0xd0, 0x90, 0xe0, 0x70, 0x00, 0x18, 0x68, 0xf8, 0x88, + 0xa0, 0xd0, 0x40, 0x30, 0x28, 0x58, 0xc8, 0xb8, 0xf8, 0x88, 0x18, 0x68, 0x70, 0x00, 0x90, 0xe0, + 0x10, 0x60, 0xf0, 0x80, 0x98, 0xe8, 0x78, 0x08, 0x48, 0x38, 0xa8, 0xd8, 0xc0, 0xb0, 0x20, 0x50, + 0x88, 0xf8, 0x68, 0x18, 0x00, 0x70, 0xe0, 0x90, 0xd0, 0xa0, 0x30, 0x40, 0x58, 0x28, 0xb8, 0xc8, + 0x38, 0x48, 0xd8, 0xa8, 0xb0, 0xc0, 0x50, 0x20, 0x60, 0x10, 0x80, 0xf0, 0xe8, 0x98, 0x08, 0x78, + 0xf0, 0x80, 0x10, 0x60, 0x78, 0x08, 0x98, 0xe8, 0xa8, 0xd8, 0x48, 0x38, 0x20, 0x50, 0xc0, 0xb0, + 0x40, 0x30, 0xa0, 0xd0, 0xc8, 0xb8, 0x28, 0x58, 0x18, 0x68, 0xf8, 0x88, 0x90, 0xe0, 0x70, 0x00, + 0xd8, 0xa8, 0x38, 0x48, 0x50, 0x20, 0xb0, 0xc0, 0x80, 0xf0, 0x60, 0x10, 0x08, 0x78, 0xe8, 0x98, + 0x68, 0x18, 0x88, 0xf8, 0xe0, 0x90, 0x00, 0x70, 0x30, 0x40, 0xd0, 0xa0, 0xb8, 0xc8, 0x58, 0x28 + ].freeze + + # + # Initializes the CRC5 instance. + # + def initialize + @crc_mask = self.class.const_get(:CRC_MASK) + + super + end + + # + # Packs the CRC8 checksum. + # + # @param [Integer] crc + # The checksum to pack. + # + # @return [String] + # The packed checksum. + # + def self.pack(crc) + (crc & CRC_MASK).chr + end + + # + # Updates the CRC5 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[((@crc >> 3) ^ b) & 0xff] ^ (@crc >> 8)) & @crc_mask) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc5/crc5_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64.rb new file mode 100644 index 0000000..696056a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64.rb @@ -0,0 +1,126 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC64 algorithm. + # + class CRC64 < CRC + + WIDTH = 64 + + INIT_CRC = 0x0000000000000000 + + XOR_MASK = 0x0000000000000000 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-64 --generate=c` + TABLE = [ + 0x0000000000000000, 0x01b0000000000000, 0x0360000000000000, 0x02d0000000000000, + 0x06c0000000000000, 0x0770000000000000, 0x05a0000000000000, 0x0410000000000000, + 0x0d80000000000000, 0x0c30000000000000, 0x0ee0000000000000, 0x0f50000000000000, + 0x0b40000000000000, 0x0af0000000000000, 0x0820000000000000, 0x0990000000000000, + 0x1b00000000000000, 0x1ab0000000000000, 0x1860000000000000, 0x19d0000000000000, + 0x1dc0000000000000, 0x1c70000000000000, 0x1ea0000000000000, 0x1f10000000000000, + 0x1680000000000000, 0x1730000000000000, 0x15e0000000000000, 0x1450000000000000, + 0x1040000000000000, 0x11f0000000000000, 0x1320000000000000, 0x1290000000000000, + 0x3600000000000000, 0x37b0000000000000, 0x3560000000000000, 0x34d0000000000000, + 0x30c0000000000000, 0x3170000000000000, 0x33a0000000000000, 0x3210000000000000, + 0x3b80000000000000, 0x3a30000000000000, 0x38e0000000000000, 0x3950000000000000, + 0x3d40000000000000, 0x3cf0000000000000, 0x3e20000000000000, 0x3f90000000000000, + 0x2d00000000000000, 0x2cb0000000000000, 0x2e60000000000000, 0x2fd0000000000000, + 0x2bc0000000000000, 0x2a70000000000000, 0x28a0000000000000, 0x2910000000000000, + 0x2080000000000000, 0x2130000000000000, 0x23e0000000000000, 0x2250000000000000, + 0x2640000000000000, 0x27f0000000000000, 0x2520000000000000, 0x2490000000000000, + 0x6c00000000000000, 0x6db0000000000000, 0x6f60000000000000, 0x6ed0000000000000, + 0x6ac0000000000000, 0x6b70000000000000, 0x69a0000000000000, 0x6810000000000000, + 0x6180000000000000, 0x6030000000000000, 0x62e0000000000000, 0x6350000000000000, + 0x6740000000000000, 0x66f0000000000000, 0x6420000000000000, 0x6590000000000000, + 0x7700000000000000, 0x76b0000000000000, 0x7460000000000000, 0x75d0000000000000, + 0x71c0000000000000, 0x7070000000000000, 0x72a0000000000000, 0x7310000000000000, + 0x7a80000000000000, 0x7b30000000000000, 0x79e0000000000000, 0x7850000000000000, + 0x7c40000000000000, 0x7df0000000000000, 0x7f20000000000000, 0x7e90000000000000, + 0x5a00000000000000, 0x5bb0000000000000, 0x5960000000000000, 0x58d0000000000000, + 0x5cc0000000000000, 0x5d70000000000000, 0x5fa0000000000000, 0x5e10000000000000, + 0x5780000000000000, 0x5630000000000000, 0x54e0000000000000, 0x5550000000000000, + 0x5140000000000000, 0x50f0000000000000, 0x5220000000000000, 0x5390000000000000, + 0x4100000000000000, 0x40b0000000000000, 0x4260000000000000, 0x43d0000000000000, + 0x47c0000000000000, 0x4670000000000000, 0x44a0000000000000, 0x4510000000000000, + 0x4c80000000000000, 0x4d30000000000000, 0x4fe0000000000000, 0x4e50000000000000, + 0x4a40000000000000, 0x4bf0000000000000, 0x4920000000000000, 0x4890000000000000, + 0xd800000000000000, 0xd9b0000000000000, 0xdb60000000000000, 0xdad0000000000000, + 0xdec0000000000000, 0xdf70000000000000, 0xdda0000000000000, 0xdc10000000000000, + 0xd580000000000000, 0xd430000000000000, 0xd6e0000000000000, 0xd750000000000000, + 0xd340000000000000, 0xd2f0000000000000, 0xd020000000000000, 0xd190000000000000, + 0xc300000000000000, 0xc2b0000000000000, 0xc060000000000000, 0xc1d0000000000000, + 0xc5c0000000000000, 0xc470000000000000, 0xc6a0000000000000, 0xc710000000000000, + 0xce80000000000000, 0xcf30000000000000, 0xcde0000000000000, 0xcc50000000000000, + 0xc840000000000000, 0xc9f0000000000000, 0xcb20000000000000, 0xca90000000000000, + 0xee00000000000000, 0xefb0000000000000, 0xed60000000000000, 0xecd0000000000000, + 0xe8c0000000000000, 0xe970000000000000, 0xeba0000000000000, 0xea10000000000000, + 0xe380000000000000, 0xe230000000000000, 0xe0e0000000000000, 0xe150000000000000, + 0xe540000000000000, 0xe4f0000000000000, 0xe620000000000000, 0xe790000000000000, + 0xf500000000000000, 0xf4b0000000000000, 0xf660000000000000, 0xf7d0000000000000, + 0xf3c0000000000000, 0xf270000000000000, 0xf0a0000000000000, 0xf110000000000000, + 0xf880000000000000, 0xf930000000000000, 0xfbe0000000000000, 0xfa50000000000000, + 0xfe40000000000000, 0xfff0000000000000, 0xfd20000000000000, 0xfc90000000000000, + 0xb400000000000000, 0xb5b0000000000000, 0xb760000000000000, 0xb6d0000000000000, + 0xb2c0000000000000, 0xb370000000000000, 0xb1a0000000000000, 0xb010000000000000, + 0xb980000000000000, 0xb830000000000000, 0xbae0000000000000, 0xbb50000000000000, + 0xbf40000000000000, 0xbef0000000000000, 0xbc20000000000000, 0xbd90000000000000, + 0xaf00000000000000, 0xaeb0000000000000, 0xac60000000000000, 0xadd0000000000000, + 0xa9c0000000000000, 0xa870000000000000, 0xaaa0000000000000, 0xab10000000000000, + 0xa280000000000000, 0xa330000000000000, 0xa1e0000000000000, 0xa050000000000000, + 0xa440000000000000, 0xa5f0000000000000, 0xa720000000000000, 0xa690000000000000, + 0x8200000000000000, 0x83b0000000000000, 0x8160000000000000, 0x80d0000000000000, + 0x84c0000000000000, 0x8570000000000000, 0x87a0000000000000, 0x8610000000000000, + 0x8f80000000000000, 0x8e30000000000000, 0x8ce0000000000000, 0x8d50000000000000, + 0x8940000000000000, 0x88f0000000000000, 0x8a20000000000000, 0x8b90000000000000, + 0x9900000000000000, 0x98b0000000000000, 0x9a60000000000000, 0x9bd0000000000000, + 0x9fc0000000000000, 0x9e70000000000000, 0x9ca0000000000000, 0x9d10000000000000, + 0x9480000000000000, 0x9530000000000000, 0x97e0000000000000, 0x9650000000000000, + 0x9240000000000000, 0x93f0000000000000, 0x9120000000000000, 0x9090000000000000 + ].freeze + + # + # Packs the CRC64 checksum. + # + # @param [Integer] crc + # The checksum to pack. + # + # @return [String] + # The packed checksum. + # + def self.pack(crc) + buffer = '' + + buffer << ((crc & 0xff00000000000000) >> 56).chr + buffer << ((crc & 0xff000000000000) >> 48).chr + buffer << ((crc & 0xff0000000000) >> 40).chr + buffer << ((crc & 0xff00000000) >> 32).chr + buffer << ((crc & 0xff000000) >> 24).chr + buffer << ((crc & 0xff0000) >> 16).chr + buffer << ((crc & 0xff00) >> 8).chr + buffer << (crc & 0xff).chr + + buffer + end + + # + # Updates the CRC64 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[(@crc ^ b) & 0xff] ^ (@crc >> 8)) & 0xffffffffffffffff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc64/crc64_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64_jones.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64_jones.rb new file mode 100644 index 0000000..5418ce0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64_jones.rb @@ -0,0 +1,88 @@ +require 'digest/crc64' + +module Digest + # + # Implements the CRC64 Jones algorithm. + # + # @since 0.5.0 + # + class CRC64Jones < CRC64 + + INIT_XOR = 0xffffffffffffffff + + INIT_CRC = 0x0 ^ INIT_XOR + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-64-jones --generate=c` + TABLE = [ + 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, + 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, + 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, + 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, + 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, + 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, + 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, + 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, + 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, + 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, + 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, + 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, + 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, + 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, + 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, + 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, + 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, + 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, + 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, + 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, + 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, + 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, + 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, + 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, + 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, + 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, + 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, + 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, + 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, + 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, + 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, + 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, + 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, + 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, + 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, + 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, + 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, + 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, + 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, + 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, + 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, + 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, + 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, + 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, + 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, + 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, + 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, + 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, + 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, + 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, + 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, + 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, + 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, + 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, + 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, + 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, + 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, + 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, + 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, + 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, + 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, + 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, + 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, + 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728 + ].freeze + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc64_jones/crc64_jones_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64_xz.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64_xz.rb new file mode 100644 index 0000000..d37272b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc64_xz.rb @@ -0,0 +1,90 @@ +require 'digest/crc64' + +module Digest + # + # Implements the CRC64 XZ algorithm. + # + # @since 0.5.0 + # + class CRC64XZ < CRC64 + + INIT_XOR = 0xffffffffffffffff + + INIT_CRC = 0x0 ^ INIT_XOR + + XOR_MASK = 0xffffffffffffffff + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-64-xz --generate=c` + TABLE = [ + 0x0000000000000000, 0xb32e4cbe03a75f6f, 0xf4843657a840a05b, 0x47aa7ae9abe7ff34, + 0x7bd0c384ff8f5e33, 0xc8fe8f3afc28015c, 0x8f54f5d357cffe68, 0x3c7ab96d5468a107, + 0xf7a18709ff1ebc66, 0x448fcbb7fcb9e309, 0x0325b15e575e1c3d, 0xb00bfde054f94352, + 0x8c71448d0091e255, 0x3f5f08330336bd3a, 0x78f572daa8d1420e, 0xcbdb3e64ab761d61, + 0x7d9ba13851336649, 0xceb5ed8652943926, 0x891f976ff973c612, 0x3a31dbd1fad4997d, + 0x064b62bcaebc387a, 0xb5652e02ad1b6715, 0xf2cf54eb06fc9821, 0x41e11855055bc74e, + 0x8a3a2631ae2dda2f, 0x39146a8fad8a8540, 0x7ebe1066066d7a74, 0xcd905cd805ca251b, + 0xf1eae5b551a2841c, 0x42c4a90b5205db73, 0x056ed3e2f9e22447, 0xb6409f5cfa457b28, + 0xfb374270a266cc92, 0x48190ecea1c193fd, 0x0fb374270a266cc9, 0xbc9d3899098133a6, + 0x80e781f45de992a1, 0x33c9cd4a5e4ecdce, 0x7463b7a3f5a932fa, 0xc74dfb1df60e6d95, + 0x0c96c5795d7870f4, 0xbfb889c75edf2f9b, 0xf812f32ef538d0af, 0x4b3cbf90f69f8fc0, + 0x774606fda2f72ec7, 0xc4684a43a15071a8, 0x83c230aa0ab78e9c, 0x30ec7c140910d1f3, + 0x86ace348f355aadb, 0x3582aff6f0f2f5b4, 0x7228d51f5b150a80, 0xc10699a158b255ef, + 0xfd7c20cc0cdaf4e8, 0x4e526c720f7dab87, 0x09f8169ba49a54b3, 0xbad65a25a73d0bdc, + 0x710d64410c4b16bd, 0xc22328ff0fec49d2, 0x85895216a40bb6e6, 0x36a71ea8a7ace989, + 0x0adda7c5f3c4488e, 0xb9f3eb7bf06317e1, 0xfe5991925b84e8d5, 0x4d77dd2c5823b7ba, + 0x64b62bcaebc387a1, 0xd7986774e864d8ce, 0x90321d9d438327fa, 0x231c512340247895, + 0x1f66e84e144cd992, 0xac48a4f017eb86fd, 0xebe2de19bc0c79c9, 0x58cc92a7bfab26a6, + 0x9317acc314dd3bc7, 0x2039e07d177a64a8, 0x67939a94bc9d9b9c, 0xd4bdd62abf3ac4f3, + 0xe8c76f47eb5265f4, 0x5be923f9e8f53a9b, 0x1c4359104312c5af, 0xaf6d15ae40b59ac0, + 0x192d8af2baf0e1e8, 0xaa03c64cb957be87, 0xeda9bca512b041b3, 0x5e87f01b11171edc, + 0x62fd4976457fbfdb, 0xd1d305c846d8e0b4, 0x96797f21ed3f1f80, 0x2557339fee9840ef, + 0xee8c0dfb45ee5d8e, 0x5da24145464902e1, 0x1a083bacedaefdd5, 0xa9267712ee09a2ba, + 0x955cce7fba6103bd, 0x267282c1b9c65cd2, 0x61d8f8281221a3e6, 0xd2f6b4961186fc89, + 0x9f8169ba49a54b33, 0x2caf25044a02145c, 0x6b055fede1e5eb68, 0xd82b1353e242b407, + 0xe451aa3eb62a1500, 0x577fe680b58d4a6f, 0x10d59c691e6ab55b, 0xa3fbd0d71dcdea34, + 0x6820eeb3b6bbf755, 0xdb0ea20db51ca83a, 0x9ca4d8e41efb570e, 0x2f8a945a1d5c0861, + 0x13f02d374934a966, 0xa0de61894a93f609, 0xe7741b60e174093d, 0x545a57dee2d35652, + 0xe21ac88218962d7a, 0x5134843c1b317215, 0x169efed5b0d68d21, 0xa5b0b26bb371d24e, + 0x99ca0b06e7197349, 0x2ae447b8e4be2c26, 0x6d4e3d514f59d312, 0xde6071ef4cfe8c7d, + 0x15bb4f8be788911c, 0xa6950335e42fce73, 0xe13f79dc4fc83147, 0x521135624c6f6e28, + 0x6e6b8c0f1807cf2f, 0xdd45c0b11ba09040, 0x9aefba58b0476f74, 0x29c1f6e6b3e0301b, + 0xc96c5795d7870f42, 0x7a421b2bd420502d, 0x3de861c27fc7af19, 0x8ec62d7c7c60f076, + 0xb2bc941128085171, 0x0192d8af2baf0e1e, 0x4638a2468048f12a, 0xf516eef883efae45, + 0x3ecdd09c2899b324, 0x8de39c222b3eec4b, 0xca49e6cb80d9137f, 0x7967aa75837e4c10, + 0x451d1318d716ed17, 0xf6335fa6d4b1b278, 0xb199254f7f564d4c, 0x02b769f17cf11223, + 0xb4f7f6ad86b4690b, 0x07d9ba1385133664, 0x4073c0fa2ef4c950, 0xf35d8c442d53963f, + 0xcf273529793b3738, 0x7c0979977a9c6857, 0x3ba3037ed17b9763, 0x888d4fc0d2dcc80c, + 0x435671a479aad56d, 0xf0783d1a7a0d8a02, 0xb7d247f3d1ea7536, 0x04fc0b4dd24d2a59, + 0x3886b22086258b5e, 0x8ba8fe9e8582d431, 0xcc0284772e652b05, 0x7f2cc8c92dc2746a, + 0x325b15e575e1c3d0, 0x8175595b76469cbf, 0xc6df23b2dda1638b, 0x75f16f0cde063ce4, + 0x498bd6618a6e9de3, 0xfaa59adf89c9c28c, 0xbd0fe036222e3db8, 0x0e21ac88218962d7, + 0xc5fa92ec8aff7fb6, 0x76d4de52895820d9, 0x317ea4bb22bfdfed, 0x8250e80521188082, + 0xbe2a516875702185, 0x0d041dd676d77eea, 0x4aae673fdd3081de, 0xf9802b81de97deb1, + 0x4fc0b4dd24d2a599, 0xfceef8632775faf6, 0xbb44828a8c9205c2, 0x086ace348f355aad, + 0x34107759db5dfbaa, 0x873e3be7d8faa4c5, 0xc094410e731d5bf1, 0x73ba0db070ba049e, + 0xb86133d4dbcc19ff, 0x0b4f7f6ad86b4690, 0x4ce50583738cb9a4, 0xffcb493d702be6cb, + 0xc3b1f050244347cc, 0x709fbcee27e418a3, 0x3735c6078c03e797, 0x841b8ab98fa4b8f8, + 0xadda7c5f3c4488e3, 0x1ef430e13fe3d78c, 0x595e4a08940428b8, 0xea7006b697a377d7, + 0xd60abfdbc3cbd6d0, 0x6524f365c06c89bf, 0x228e898c6b8b768b, 0x91a0c532682c29e4, + 0x5a7bfb56c35a3485, 0xe955b7e8c0fd6bea, 0xaeffcd016b1a94de, 0x1dd181bf68bdcbb1, + 0x21ab38d23cd56ab6, 0x9285746c3f7235d9, 0xd52f0e859495caed, 0x6601423b97329582, + 0xd041dd676d77eeaa, 0x636f91d96ed0b1c5, 0x24c5eb30c5374ef1, 0x97eba78ec690119e, + 0xab911ee392f8b099, 0x18bf525d915feff6, 0x5f1528b43ab810c2, 0xec3b640a391f4fad, + 0x27e05a6e926952cc, 0x94ce16d091ce0da3, 0xd3646c393a29f297, 0x604a2087398eadf8, + 0x5c3099ea6de60cff, 0xef1ed5546e415390, 0xa8b4afbdc5a6aca4, 0x1b9ae303c601f3cb, + 0x56ed3e2f9e224471, 0xe5c372919d851b1e, 0xa26908783662e42a, 0x114744c635c5bb45, + 0x2d3dfdab61ad1a42, 0x9e13b115620a452d, 0xd9b9cbfcc9edba19, 0x6a978742ca4ae576, + 0xa14cb926613cf817, 0x1262f598629ba778, 0x55c88f71c97c584c, 0xe6e6c3cfcadb0723, + 0xda9c7aa29eb3a624, 0x69b2361c9d14f94b, 0x2e184cf536f3067f, 0x9d36004b35545910, + 0x2b769f17cf112238, 0x9858d3a9ccb67d57, 0xdff2a94067518263, 0x6cdce5fe64f6dd0c, + 0x50a65c93309e7c0b, 0xe388102d33392364, 0xa4226ac498dedc50, 0x170c267a9b79833f, + 0xdcd7181e300f9e5e, 0x6ff954a033a8c131, 0x28532e49984f3e05, 0x9b7d62f79be8616a, + 0xa707db9acf80c06d, 0x14299724cc279f02, 0x5383edcd67c06036, 0xe0ada17364673f59 + ].freeze + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc64_xz/crc64_xz_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc8.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc8.rb new file mode 100644 index 0000000..6bc5772 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc8.rb @@ -0,0 +1,65 @@ +require 'digest/crc' + +module Digest + # + # Implements the CRC8 algorithm. + # + class CRC8 < CRC + + WIDTH = 8 + + INIT_CRC = 0x00 + + # Generated by `./pycrc.py --algorithm=table-driven --model=crc-8 --generate=c` + TABLE = [ + 0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15, 0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d, + 0x70, 0x77, 0x7e, 0x79, 0x6c, 0x6b, 0x62, 0x65, 0x48, 0x4f, 0x46, 0x41, 0x54, 0x53, 0x5a, 0x5d, + 0xe0, 0xe7, 0xee, 0xe9, 0xfc, 0xfb, 0xf2, 0xf5, 0xd8, 0xdf, 0xd6, 0xd1, 0xc4, 0xc3, 0xca, 0xcd, + 0x90, 0x97, 0x9e, 0x99, 0x8c, 0x8b, 0x82, 0x85, 0xa8, 0xaf, 0xa6, 0xa1, 0xb4, 0xb3, 0xba, 0xbd, + 0xc7, 0xc0, 0xc9, 0xce, 0xdb, 0xdc, 0xd5, 0xd2, 0xff, 0xf8, 0xf1, 0xf6, 0xe3, 0xe4, 0xed, 0xea, + 0xb7, 0xb0, 0xb9, 0xbe, 0xab, 0xac, 0xa5, 0xa2, 0x8f, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9d, 0x9a, + 0x27, 0x20, 0x29, 0x2e, 0x3b, 0x3c, 0x35, 0x32, 0x1f, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0d, 0x0a, + 0x57, 0x50, 0x59, 0x5e, 0x4b, 0x4c, 0x45, 0x42, 0x6f, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7d, 0x7a, + 0x89, 0x8e, 0x87, 0x80, 0x95, 0x92, 0x9b, 0x9c, 0xb1, 0xb6, 0xbf, 0xb8, 0xad, 0xaa, 0xa3, 0xa4, + 0xf9, 0xfe, 0xf7, 0xf0, 0xe5, 0xe2, 0xeb, 0xec, 0xc1, 0xc6, 0xcf, 0xc8, 0xdd, 0xda, 0xd3, 0xd4, + 0x69, 0x6e, 0x67, 0x60, 0x75, 0x72, 0x7b, 0x7c, 0x51, 0x56, 0x5f, 0x58, 0x4d, 0x4a, 0x43, 0x44, + 0x19, 0x1e, 0x17, 0x10, 0x05, 0x02, 0x0b, 0x0c, 0x21, 0x26, 0x2f, 0x28, 0x3d, 0x3a, 0x33, 0x34, + 0x4e, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5c, 0x5b, 0x76, 0x71, 0x78, 0x7f, 0x6a, 0x6d, 0x64, 0x63, + 0x3e, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2c, 0x2b, 0x06, 0x01, 0x08, 0x0f, 0x1a, 0x1d, 0x14, 0x13, + 0xae, 0xa9, 0xa0, 0xa7, 0xb2, 0xb5, 0xbc, 0xbb, 0x96, 0x91, 0x98, 0x9f, 0x8a, 0x8d, 0x84, 0x83, + 0xde, 0xd9, 0xd0, 0xd7, 0xc2, 0xc5, 0xcc, 0xcb, 0xe6, 0xe1, 0xe8, 0xef, 0xfa, 0xfd, 0xf4, 0xf3 + ].freeze + + # + # Packs the CRC8 checksum. + # + # @param [Integer] crc + # The checksum to pack. + # + # @return [String] + # The packed checksum. + # + def self.pack(crc) + (crc & 0xff).chr + end + + # + # Updates the CRC8 checksum. + # + # @param [String] data + # The data to update the checksum with. + # + def update(data) + data.each_byte do |b| + @crc = ((@table[(@crc ^ b) & 0xff] ^ (@crc << 8)) & 0xff) + end + + return self + end + + end +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc8/crc8_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc8_1wire.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc8_1wire.rb new file mode 100644 index 0000000..bc0ede0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/lib/digest/crc8_1wire.rb @@ -0,0 +1,37 @@ +require 'digest/crc8' + +module Digest + # + # Implements the CRC8 1-Wire algorithm. + # + class CRC8_1Wire < CRC8 + + # Generated by `./pycrc.py --algorithm=table-driven --model=dallas-1-wire --generate=c` + TABLE = [ + 0x00, 0x5e, 0xbc, 0xe2, 0x61, 0x3f, 0xdd, 0x83, 0xc2, 0x9c, 0x7e, 0x20, 0xa3, 0xfd, 0x1f, 0x41, + 0x9d, 0xc3, 0x21, 0x7f, 0xfc, 0xa2, 0x40, 0x1e, 0x5f, 0x01, 0xe3, 0xbd, 0x3e, 0x60, 0x82, 0xdc, + 0x23, 0x7d, 0x9f, 0xc1, 0x42, 0x1c, 0xfe, 0xa0, 0xe1, 0xbf, 0x5d, 0x03, 0x80, 0xde, 0x3c, 0x62, + 0xbe, 0xe0, 0x02, 0x5c, 0xdf, 0x81, 0x63, 0x3d, 0x7c, 0x22, 0xc0, 0x9e, 0x1d, 0x43, 0xa1, 0xff, + 0x46, 0x18, 0xfa, 0xa4, 0x27, 0x79, 0x9b, 0xc5, 0x84, 0xda, 0x38, 0x66, 0xe5, 0xbb, 0x59, 0x07, + 0xdb, 0x85, 0x67, 0x39, 0xba, 0xe4, 0x06, 0x58, 0x19, 0x47, 0xa5, 0xfb, 0x78, 0x26, 0xc4, 0x9a, + 0x65, 0x3b, 0xd9, 0x87, 0x04, 0x5a, 0xb8, 0xe6, 0xa7, 0xf9, 0x1b, 0x45, 0xc6, 0x98, 0x7a, 0x24, + 0xf8, 0xa6, 0x44, 0x1a, 0x99, 0xc7, 0x25, 0x7b, 0x3a, 0x64, 0x86, 0xd8, 0x5b, 0x05, 0xe7, 0xb9, + 0x8c, 0xd2, 0x30, 0x6e, 0xed, 0xb3, 0x51, 0x0f, 0x4e, 0x10, 0xf2, 0xac, 0x2f, 0x71, 0x93, 0xcd, + 0x11, 0x4f, 0xad, 0xf3, 0x70, 0x2e, 0xcc, 0x92, 0xd3, 0x8d, 0x6f, 0x31, 0xb2, 0xec, 0x0e, 0x50, + 0xaf, 0xf1, 0x13, 0x4d, 0xce, 0x90, 0x72, 0x2c, 0x6d, 0x33, 0xd1, 0x8f, 0x0c, 0x52, 0xb0, 0xee, + 0x32, 0x6c, 0x8e, 0xd0, 0x53, 0x0d, 0xef, 0xb1, 0xf0, 0xae, 0x4c, 0x12, 0x91, 0xcf, 0x2d, 0x73, + 0xca, 0x94, 0x76, 0x28, 0xab, 0xf5, 0x17, 0x49, 0x08, 0x56, 0xb4, 0xea, 0x69, 0x37, 0xd5, 0x8b, + 0x57, 0x09, 0xeb, 0xb5, 0x36, 0x68, 0x8a, 0xd4, 0x95, 0xcb, 0x29, 0x77, 0xf4, 0xaa, 0x48, 0x16, + 0xe9, 0xb7, 0x55, 0x0b, 0x88, 0xd6, 0x34, 0x6a, 0x2b, 0x75, 0x97, 0xc9, 0x4a, 0x14, 0xf6, 0xa8, + 0x74, 0x2a, 0xc8, 0x96, 0x15, 0x4b, 0xa9, 0xf7, 0xb6, 0xe8, 0x0a, 0x54, 0xd7, 0x89, 0x6b, 0x35 + ].freeze + + end + + # @deprecated Please use {CRC8_1Wire} instead. + CRC81Wire = CRC8_1Wire +end + +if RUBY_ENGINE == 'ruby' + begin; require 'digest/crc8_1wire/crc8_1wire_ext'; rescue LoadError; end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc15_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc15_spec.rb new file mode 100644 index 0000000..355fe80 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc15_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc15' + +describe Digest::CRC15 do + let(:string) { '1234567890' } + let(:expected) { '178c' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_ccitt_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_ccitt_spec.rb new file mode 100644 index 0000000..8338514 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_ccitt_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_ccitt' + +describe Digest::CRC16CCITT do + let(:string) { '1234567890' } + let(:expected) { '3218' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_genibus_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_genibus_spec.rb new file mode 100644 index 0000000..55206c2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_genibus_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_genibus' + +describe Digest::CRC16Genibus do + let(:string) { '1234567890' } + let(:expected) { 'cde7' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_kermit_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_kermit_spec.rb new file mode 100644 index 0000000..6e9e86e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_kermit_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_kermit' + +describe Digest::CRC16Kermit do + let(:string) { '1234567890' } + let(:expected) { '286b' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_modbus_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_modbus_spec.rb new file mode 100644 index 0000000..5ad16f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_modbus_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_modbus' + +describe Digest::CRC16Modbus do + let(:string) { '1234567890' } + let(:expected) { 'c20a' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_qt_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_qt_spec.rb new file mode 100644 index 0000000..f5b7a4e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_qt_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'digest/crc16_qt' + +describe "Digest::CRC16QT" do + subject { Digest::CRC16QT } + + it "should be an alias to Digest::CRC16X25" do + expect(subject).to be < Digest::CRC16X25 + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_spec.rb new file mode 100644 index 0000000..3e49775 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16' + +describe Digest::CRC16 do + let(:string) { '1234567890' } + let(:expected) { 'c57a' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_usb_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_usb_spec.rb new file mode 100644 index 0000000..5b99303 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_usb_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_usb' + +describe Digest::CRC16USB do + let(:string) { '1234567890' } + let(:expected) { '3df5' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_x_25_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_x_25_spec.rb new file mode 100644 index 0000000..1cdc688 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_x_25_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_x_25' + +describe Digest::CRC16X25 do + let(:string) { '1234567890' } + let(:expected) { '4b13' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_xmodem_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_xmodem_spec.rb new file mode 100644 index 0000000..dc36a67 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_xmodem_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_xmodem' + +describe Digest::CRC16XModem do + let(:string) { '1234567890' } + let(:expected) { 'd321' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_zmodem_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_zmodem_spec.rb new file mode 100644 index 0000000..67ee526 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc16_zmodem_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc16_zmodem' + +describe Digest::CRC16ZModem do + let(:string) { '1234567890' } + let(:expected) { 'd321' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc1_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc1_spec.rb new file mode 100644 index 0000000..d79afd1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc1_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc1' + +describe Digest::CRC1 do + let(:string) { '1234567890' } + let(:expected) { '0d' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc24_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc24_spec.rb new file mode 100644 index 0000000..ccfcc91 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc24_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc24' + +describe Digest::CRC24 do + let(:string) { '1234567890' } + let(:expected) { '8c0072' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_bzip2_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_bzip2_spec.rb new file mode 100644 index 0000000..58f0df0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_bzip2_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32_bzip2' + +describe Digest::CRC32BZip2 do + let(:string) { '1234567890' } + let(:expected) { '506853b6' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_jam_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_jam_spec.rb new file mode 100644 index 0000000..a52da2c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_jam_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32_jam' + +describe Digest::CRC32Jam do + let(:string) { '1234567890' } + let(:expected) { 'd9e2511a' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_mpeg_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_mpeg_spec.rb new file mode 100644 index 0000000..0b8c6e4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_mpeg_spec.rb @@ -0,0 +1,16 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32_mpeg' + +describe Digest::CRC32MPEG do + let(:string) { '1234567890' } + let(:expected) { 'af97ac49' } + + it_should_behave_like "CRC" +end + +describe "Digest::CRC32Mpeg" do + subject { Digest::CRC32Mpeg } + + it { expect(subject).to eq(Digest::CRC32MPEG) } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_posix_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_posix_spec.rb new file mode 100644 index 0000000..df62e6e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_posix_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32_posix' + +describe Digest::CRC32POSIX do + let(:string) { '1234567890' } + let(:expected) { 'c181fd8e' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_spec.rb new file mode 100644 index 0000000..0e000ab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32' + +describe Digest::CRC32 do + let(:string) { '1234567890' } + let(:expected) { '261daee5' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_xfer_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_xfer_spec.rb new file mode 100644 index 0000000..c2a93e0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32_xfer_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32_xfer' + +describe Digest::CRC32XFER do + let(:string) { '1234567890' } + let(:expected) { '0be368eb' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32c_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32c_spec.rb new file mode 100644 index 0000000..580098b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc32c_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc32c' + +describe Digest::CRC32c do + let(:string) { '1234567890' } + let(:expected) { 'f3dbd4fe' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc5_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc5_spec.rb new file mode 100644 index 0000000..c70d8fd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc5_spec.rb @@ -0,0 +1,12 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc5' + +describe Digest::CRC5 do + let(:string) { '1234567890' } + let(:expected) { '1' } + + pending "Implementation of CRC5 does not match pycrc.py" do + it_should_behave_like "CRC" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_jones_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_jones_spec.rb new file mode 100644 index 0000000..03e7c1b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_jones_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc64_jones' + +describe Digest::CRC64Jones do + let(:string) { '1234567890' } + let(:expected) { '68a745ba133af9bd' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_spec.rb new file mode 100644 index 0000000..54c75a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc64' + +describe Digest::CRC64 do + let(:string) { '1234567890' } + let(:expected) { 'bc66a5a9388a5bef' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_xz_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_xz_spec.rb new file mode 100644 index 0000000..7a9a0c0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc64_xz_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc64_xz' + +describe Digest::CRC64XZ do + let(:string) { '1234567890' } + let(:expected) { 'b1cb31bbb4a2b2be' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc8_1wire_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc8_1wire_spec.rb new file mode 100644 index 0000000..17c1bf4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc8_1wire_spec.rb @@ -0,0 +1,16 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc8_1wire' + +describe Digest::CRC8_1Wire do + let(:string) { '1234567890' } + let(:expected) { '4f' } + + it_should_behave_like "CRC" +end + +describe "Digest::CRC81Wire" do + subject { Digest::CRC81Wire } + + it { expect(subject).to eq(Digest::CRC8_1Wire) } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc8_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc8_spec.rb new file mode 100644 index 0000000..de33f34 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc8_spec.rb @@ -0,0 +1,10 @@ +require 'spec_helper' +require 'crc_examples' +require 'digest/crc8' + +describe Digest::CRC8 do + let(:string) { '1234567890' } + let(:expected) { '52' } + + it_should_behave_like "CRC" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc_examples.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc_examples.rb new file mode 100644 index 0000000..870b115 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc_examples.rb @@ -0,0 +1,27 @@ +require 'spec_helper' + +shared_examples_for "CRC" do + it "should calculate a checksum for text" do + expect(described_class.hexdigest(string)).to be == expected + end + + it "should calculate a checksum for multiple data" do + middle = (string.length / 2) + + chunk1 = string[0...middle] + chunk2 = string[middle..-1] + + crc = subject + crc << chunk1 + crc << chunk2 + + expect(crc.hexdigest).to be == expected + end + + it "should provide direct access to the checksum value" do + crc = subject + crc << string + + expect(crc.checksum).to be == expected.to_i(16) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc_spec.rb new file mode 100644 index 0000000..f6eb5e7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/crc_spec.rb @@ -0,0 +1,72 @@ +require 'spec_helper' +require 'digest/crc' + +describe Digest::CRC do + describe "#block_length" do + it { expect(subject.block_length).to be 1 } + end + + describe ".pack" do + subject { described_class } + + it do + expect { subject.pack(0) }.to raise_error(NotImplementedError) + end + end + + describe "#update" do + it do + expect { subject.update('') }.to raise_error(NotImplementedError) + end + end + + context "when inherited" do + subject do + Class.new(described_class).tap do |klass| + klass::WIDTH = 16 + + klass::INIT_CRC = 0x01 + + klass::XOR_MASK = 0x02 + + klass::TABLE = [0x01, 0x02, 0x03, 0x04].freeze + end + end + + it "should override WIDTH" do + expect(subject::WIDTH).not_to be described_class::WIDTH + end + + it "should override INIT_CRC" do + expect(subject::INIT_CRC).not_to be described_class::INIT_CRC + end + + it "should override XOR_MASK" do + expect(subject::XOR_MASK).not_to be described_class::XOR_MASK + end + + it "should override TABLE" do + expect(subject::TABLE).not_to be described_class::TABLE + end + + describe "#initialize" do + let(:instance) { subject.new } + + it "should initialize @init_crc" do + expect(instance.instance_variable_get("@init_crc")).to be subject::INIT_CRC + end + + it "should initialize @xor_mask" do + expect(instance.instance_variable_get("@xor_mask")).to be subject::XOR_MASK + end + + it "should initialize @width" do + expect(instance.instance_variable_get("@width")).to be subject::WIDTH + end + + it "should initialize @table" do + expect(instance.instance_variable_get("@table")).to be subject::TABLE + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.base b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.base new file mode 100644 index 0000000..af40177 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.base @@ -0,0 +1,9 @@ +ARG RUBY_VERSION=2.7.0 +FROM ruby:${RUBY_VERSION}-slim + +RUN apt-get update -y -qq +RUN apt-get install -y -qq bash + +COPY ./digest-crc.gem . + +ENTRYPOINT gem install ./digest-crc.gem diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.with-gcc b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.with-gcc new file mode 100644 index 0000000..0cdb139 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.with-gcc @@ -0,0 +1,3 @@ +FROM test-digest-crc-base + +RUN apt-get install -y -qq gcc diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.with-gcc-and-make b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.with-gcc-and-make new file mode 100644 index 0000000..9d3f92e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/docker/Dockerfile.with-gcc-and-make @@ -0,0 +1,3 @@ +FROM test-digest-crc-with-gcc + +RUN apt-get install -y -qq make diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/install_spec.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/install_spec.rb new file mode 100644 index 0000000..acf264d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/integration/install_spec.rb @@ -0,0 +1,59 @@ +require 'rspec' + +describe "installing digest-crc" do + ROOT_DIR = File.expand_path('../../..',__FILE__) + DOCKER_DIR = File.expand_path('../docker', __FILE__) + + IMAGES = %w[ + test-digest-crc-base + test-digest-crc-with-gcc + test-digest-crc-with-gcc-and-make + ] + + before(:all) do + puts ">>> Building digest-crc gem ..." + Dir.chdir(ROOT_DIR) do + system 'gem', 'build', + '-o', File.join(DOCKER_DIR,'digest-crc.gem'), + 'digest-crc.gemspec' + end + + IMAGES.each do |image| + suffix = image.sub('test-digest-crc-','') + + puts ">>> Building #{image} docker image ..." + Dir.chdir(DOCKER_DIR) do + system "docker build -t #{image} --file Dockerfile.#{suffix} ." + end + end + end + + context "when installing into a slim environment" do + let(:image) { 'test-digest-crc-base' } + + it "should successfully install digest-crc" do + expect(system("docker run #{image}")).to be(true) + end + end + + context "when gcc is installed" do + let(:image) { 'test-digest-crc-with-gcc' } + + it "should successfully install digest-crc" do + expect(system("docker run #{image}")).to be(true) + end + end + + context "when gcc and make are installed" do + let(:image) { 'test-digest-crc-with-gcc-and-make' } + + it "should successfully install digest-crc" do + expect(system("docker run #{image}")).to be(true) + end + end + + after(:all) do + puts ">>> Removing test-digest-crc docker images ..." + system "docker image rm -f #{IMAGES.reverse.join(' ')}" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/spec_helper.rb new file mode 100644 index 0000000..9779454 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/digest-crc-0.6.4/spec/spec_helper.rb @@ -0,0 +1 @@ +require 'rspec' diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.document b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.document new file mode 100644 index 0000000..3d618dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.document @@ -0,0 +1,5 @@ +lib/**/*.rb +bin/* +- +features/**/*.feature +LICENSE.txt diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.gitignore b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.gitignore new file mode 100644 index 0000000..d87d4be --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +Gemfile.lock +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.travis.yml b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.travis.yml new file mode 100644 index 0000000..e120c4c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/.travis.yml @@ -0,0 +1,21 @@ +language: ruby +rvm: + - 1.8.7 + - ree + - 1.9.3 + - 2.0.0 + - 2.1.9 + - 2.2.5 + - 2.3.1 + - ruby-head + - jruby-1.7.20 + - jruby-9.0.5.0 + - jruby-head + - rbx-2 +matrix: + allow_failures: + - rvm: ruby-head + - rvm: jruby-head + - rvm: rbx-2 +before_install: + - gem update bundler diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/CHANGELOG.md new file mode 100644 index 0000000..0c564b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/CHANGELOG.md @@ -0,0 +1,219 @@ +# Change Log + +## [v0.5.20190701](https://github.com/knu/ruby-domain_name/tree/v0.5.20190701) (2019-07-05) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20180417...v0.5.20190701) + +- Update the eTLD database to 2019-07-01 18:45:50 UTC + +## [v0.5.20180417](https://github.com/knu/ruby-domain_name/tree/v0.5.20180417) (2018-04-17) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20170404...v0.5.20180417) + +- Update the eTLD database to 2018-04-17T23:50:25Z + +## [v0.5.20170404](https://github.com/knu/ruby-domain_name/tree/v0.5.20170404) (2017-04-04) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20170223...v0.5.20170404) + +- Update the eTLD database to 2017-04-04T20:20:25Z + +## [v0.5.20170223](https://github.com/knu/ruby-domain_name/tree/v0.5.20170223) (2017-02-23) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20161129...v0.5.20170223) + +- Update the eTLD database to 2017-02-23T00:52:11Z + +## [v0.5.20161129](https://github.com/knu/ruby-domain_name/tree/v0.5.20161129) (2016-11-29) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160826...v0.5.20161129) + +- Update the eTLD database to 2016-11-29T01:22:03Z + +## [v0.5.20161021](https://github.com/knu/ruby-domain_name/tree/v0.5.20161021) (2016-10-27) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160826...v0.5.20161021) + +- Update the eTLD database to 2016-10-21T20:52:10Z + +## [v0.5.20160826](https://github.com/knu/ruby-domain_name/tree/v0.5.20160826) (2016-09-01) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160615...v0.5.20160826) + +- Update the license for the eTLD database +- Update the eTLD database to 2016-08-26T16:52:03Z + +## [v0.5.20160615](https://github.com/knu/ruby-domain_name/tree/v0.5.20160615) (2016-06-16) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160310...v0.5.20160615) + +- Always set `@domain` to avoid a warning when `$VERBOSE` is on +- Update the eTLD database to 2016-06-15T16:22:11Z + +## [v0.5.20160310](https://github.com/knu/ruby-domain_name/tree/v0.5.20160310) (2016-03-17) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160309...v0.5.20160310) + +- Update development dependencies for obsolete rubies +- Update the eTLD database to 2016-03-10T21:22:02Z + +## [v0.5.20160309](https://github.com/knu/ruby-domain_name/tree/v0.5.20160309) (2016-03-09) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160216...v0.5.20160309) + +- Fix support for Ruby 1.8 +- Update the eTLD database to 2016-03-09T09:52:02Z + +## [v0.5.20160216](https://github.com/knu/ruby-domain_name/tree/v0.5.20160216) (2016-02-24) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20160128...v0.5.20160216) + +- Update the eTLD database to 2016-02-16T19:22:02Z + +## [v0.5.20160128](https://github.com/knu/ruby-domain_name/tree/v0.5.20160128) (2016-01-29) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.25...v0.5.20160128) + +- Use the date as part of VERSION +- Update the eTLD database to 2016-01-28T23:22:02Z + +## [v0.5.25](https://github.com/knu/ruby-domain_name/tree/v0.5.25) (2015-10-06) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.24...v0.5.25) + +- Restrict i18n < 0.7.0 on ruby 1.8. +- Update the eTLD database to 2015-09-29T17:22:03Z + +## [v0.5.24](https://github.com/knu/ruby-domain_name/tree/v0.5.24) (2015-04-16) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.23...v0.5.24) + +- Update the eTLD database to 2015-04-07T20:26:05Z + +## [v0.5.23](https://github.com/knu/ruby-domain_name/tree/v0.5.23) (2014-12-19) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.22...v0.5.23) + +- Update the eTLD database to 2014-12-18T02:26:03Z + +## [v0.5.22](https://github.com/knu/ruby-domain_name/tree/v0.5.22) (2014-10-28) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.21...v0.5.22) + +- Update the eTLD database to 2014-10-27T15:26:07Z + +## [v0.5.21](https://github.com/knu/ruby-domain_name/tree/v0.5.21) (2014-09-09) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.20...v0.5.21) + +- Update the eTLD database to 2014-09-05T01:56:10Z + +## [v0.5.20](https://github.com/knu/ruby-domain_name/tree/v0.5.20) (2014-08-18) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.19...v0.5.20) + +- Update the eTLD database to 2014-08-14T00:56:09Z + +## [v0.5.19](https://github.com/knu/ruby-domain_name/tree/v0.5.19) (2014-06-12) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.18...v0.5.19) + +- Update the eTLD database to 2014-06-11T15:26:13Z + +## [v0.5.18](https://github.com/knu/ruby-domain_name/tree/v0.5.18) (2014-03-27) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.17...v0.5.18) + +- Update the eTLD database to 2014-03-27T03:00:59Z + +## [v0.5.17](https://github.com/knu/ruby-domain_name/tree/v0.5.17) (2014-03-21) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.16...v0.5.17) + +- Update the eTLD database to 2014-03-20T15:01:09Z + +## [v0.5.16](https://github.com/knu/ruby-domain_name/tree/v0.5.16) (2014-02-12) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.15...v0.5.16) + +- Update the eTLD database to 2014-02-11T16:01:07Z + +## [v0.5.15](https://github.com/knu/ruby-domain_name/tree/v0.5.15) (2013-11-15) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.14...v0.5.15) + +- Update the eTLD database to 2013-11-15T16:01:28Z +- Merge IDN tests from mozilla-central/netwerk/test/unit/data/test_psl.txt + +## [v0.5.14](https://github.com/knu/ruby-domain_name/tree/v0.5.14) (2013-10-16) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.13...v0.5.14) + +- Update the eTLD database to 2013-10-16T07:01:24Z + +## [v0.5.13](https://github.com/knu/ruby-domain_name/tree/v0.5.13) (2013-08-18) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.12...v0.5.13) + +- Update the eTLD database to 2013-08-15T11:01:26Z +- Adjust dependencies for Ruby 1.8 + +## [v0.5.12](https://github.com/knu/ruby-domain_name/tree/v0.5.12) (2013-06-07) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.11...v0.5.12) + +- Update the eTLD database to 2013-06-06T23:00:56Z +- Add *_idn methods that do ToUnicode conversion + +## [v0.5.11](https://github.com/knu/ruby-domain_name/tree/v0.5.11) (2013-04-12) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.10...v0.5.11) + +- Add DomainName#superdomain +- Update the database to 2013-04-05T23:00:49Z + +## [v0.5.10](https://github.com/knu/ruby-domain_name/tree/v0.5.10) (2013-04-01) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.9...v0.5.10) + +- Update the eTLD database to that of 2013-03-31T03:02:39Z + +## [v0.5.9](https://github.com/knu/ruby-domain_name/tree/v0.5.9) (2013-03-17) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.8...v0.5.9) + +- Support unf 0.1.0 + +## [v0.5.8](https://github.com/knu/ruby-domain_name/tree/v0.5.8) (2013-03-14) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.7...v0.5.8) + +- Update the eTLD database to the version as of 2013-02-18T20:02:07Z + +## [v0.5.7](https://github.com/knu/ruby-domain_name/tree/v0.5.7) (2013-01-07) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.6...v0.5.7) + +- Update the eTLD list + +## [v0.5.6](https://github.com/knu/ruby-domain_name/tree/v0.5.6) (2012-12-06) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.5...v0.5.6) + +- Update the eTLD list + +## [v0.5.5](https://github.com/knu/ruby-domain_name/tree/v0.5.5) (2012-12-06) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.4...v0.5.5) + +- Add an optional host_only flag to DomainName#cookie_domain? +- Migrate from jeweler to bundle gem + +## [v0.5.4](https://github.com/knu/ruby-domain_name/tree/v0.5.4) (2012-09-18) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.3...v0.5.4) + +- Update the eTLD list +- Import updated test cases suggested by Mozilla developers + +## [v0.5.3](https://github.com/knu/ruby-domain_name/tree/v0.5.3) (2012-04-06) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.2...v0.5.3) + +- Implement Punycode decoder + +**Closed issues:** + +- Running DomainName multi-threaded leads to Stack Errors [\#2](https://github.com/knu/ruby-domain_name/issues/2) + +## [v0.5.2](https://github.com/knu/ruby-domain_name/tree/v0.5.2) (2012-01-18) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.1...v0.5.2) + +- Update the eTLD list + +## [v0.5.1](https://github.com/knu/ruby-domain_name/tree/v0.5.1) (2011-11-09) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.5.0...v0.5.1) + +- DomainName.new calls #to_str if a non-string object is given +- Fix support for IPv6 addresses enclosed in square brackets + +**Merged pull requests:** + +- Fixed DomainName\#\<=\> for use with Ruby 1.8.7 [\#1](https://github.com/knu/ruby-domain_name/pull/1) ([drbrain](https://github.com/drbrain)) + +## [v0.5.0](https://github.com/knu/ruby-domain_name/tree/v0.5.0) (2011-11-04) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v0.0.0...v0.5.0) + +- Implement DomainName comparison and fix cookie_domain?() +- Avoid warnings about uninitialized instance variables + +## [v0.0.0](https://github.com/knu/ruby-domain_name/tree/v0.0.0) (2011-10-29) + +- Initial release + +\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)* diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/Gemfile b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/Gemfile new file mode 100644 index 0000000..4beebeb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in domain_name.gemspec +gemspec diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/LICENSE.txt new file mode 100644 index 0000000..67cec17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/LICENSE.txt @@ -0,0 +1,78 @@ +Copyright (c) 2011-2017 Akinori MUSHA + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +* lib/domain_name/punycode.rb + +This file is derived from the implementation of punycode available at +here: + +https://www.verisign.com/en_US/channel-resources/domain-registry-products/idn-sdks/index.xhtml + +Copyright (C) 2000-2002 Verisign Inc., All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + + 1) Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + 3) Neither the name of the VeriSign Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +This software is licensed under the BSD open source license. For more +information visit www.opensource.org. + +Authors: + John Colosi (VeriSign) + Srikanth Veeramachaneni (VeriSign) + Nagesh Chigurupati (Verisign) + Praveen Srinivasan(Verisign) + +* lib/domain_name/etld_data.rb + +This file is generated from the Public Suffix List +(https://publicsuffix.org/), which is licensed under MPL 2.0: + +https://mozilla.org/MPL/2.0/ diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/README.md b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/README.md new file mode 100644 index 0000000..50aadcd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/README.md @@ -0,0 +1,67 @@ +domain_name +=========== + +Synopsis +-------- + +Domain Name manipulation library for Ruby + +Description +----------- + +* Parses a domain name ready for extracting the registered domain and + TLD. + + require "domain_name" + + host = DomainName("a.b.example.co.uk") + host.domain #=> "example.co.uk" + host.tld #=> "uk" + host.cookie_domain?("example.co.uk") #=> true + host.cookie_domain?("co.uk") #=> false + + host = DomainName("[::1]") # IP addresses like "192.168.1.1" and "::1" are also acceptable + host.ipaddr? #=> true + host.cookie_domain?("0:0:0:0:0:0:0:1") #=> true + +* Implements rudimental IDNA support. + +To-do's +------- + +* Implement IDNA 2008 (and/or 2003) including the domain label + validation and mapping defined in RFC 5891-5895 and UTS #46. + (work in progress) + +* Define a compact YAML serialization format. + +Installation +------------ + + gem install domain_name + +References +---------- + +* [RFC 3492](http://tools.ietf.org/html/rfc3492) (Obsolete; just for test cases) + +* [RFC 5890](http://tools.ietf.org/html/rfc5890) + +* [RFC 5891](http://tools.ietf.org/html/rfc5891) + +* [RFC 5892](http://tools.ietf.org/html/rfc5892) + +* [RFC 5893](http://tools.ietf.org/html/rfc5892) + +* [Public Suffix List](https://publicsuffix.org/list/) + +License +------- + +Copyright (c) 2011-2017 Akinori MUSHA + +Licensed under the 2-clause BSD license. + +Some portion of this library is copyrighted by third parties and +licensed under MPL 2.0 or 3-clause BSD license, +See `LICENSE.txt` for details. diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/Rakefile b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/Rakefile new file mode 100644 index 0000000..0c2d3c7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/Rakefile @@ -0,0 +1,111 @@ +require 'bundler/gem_tasks' +require 'uri' +ETLD_DATA_URI = URI('https://publicsuffix.org/list/public_suffix_list.dat') +ETLD_DATA_FILE = 'data/public_suffix_list.dat' +ETLD_DATA_RB = 'lib/domain_name/etld_data.rb' +VERSION_RB = 'lib/domain_name/version.rb' + +task :default => :test + +task :test => ETLD_DATA_RB + +task :import => :etld_data + +# +# eTLD Database +# + +task :etld_data do + require 'open-uri' + require 'time' + + begin + begin + load File.join('.', ETLD_DATA_RB) + data = ETLD_DATA_URI.read( + 'If-Modified-Since' => Time.parse(DomainName::ETLD_DATA_DATE).rfc2822 + ) + rescue LoadError, NameError + data = ETLD_DATA_URI.read + end + puts 'eTLD database is modified.' + date = data.last_modified + File.write(ETLD_DATA_FILE, data) + File.utime Time.now, date, ETLD_DATA_FILE + if new_version = DomainName::VERSION.dup.sub!(/\b\d{8}\b/, date.strftime('%Y%m%d')) + File.open(VERSION_RB, 'r+') { |rb| + content = rb.read + rb.rewind + rb.write(content.sub(/(?<=^ VERSION = ')#{Regexp.quote(DomainName::VERSION)}(?='$)/, new_version)) + rb.truncate(rb.tell) + } + end + Rake::Task[ETLD_DATA_RB].execute + rescue OpenURI::HTTPError => e + if e.io.status.first == '304' # Not Modified + puts 'eTLD database is up-to-date.' + else + raise + end + end +end + +namespace :etld_data do + task :commit do + if system(*%W[git diff --exit-code --quiet], ETLD_DATA_FILE) + warn "Nothing to commit." + exit + end + + prev = `ruby -e "$(git cat-file -p @:lib/domain_name/version.rb); puts DomainName::VERSION"`.chomp + curr = `ruby -e "load 'lib/domain_name/version.rb'; puts DomainName::VERSION"`.chomp + timestamp = File.mtime(ETLD_DATA_FILE).utc + + File.open('CHANGELOG.md', 'r+') do |f| + lines = f.readlines + lines.insert(2, <<~EOF) +## [v#{curr}](https://github.com/knu/ruby-domain_name/tree/v#{curr}) (#{Time.now.strftime('%F')}) +[Full Changelog](https://github.com/knu/ruby-domain_name/compare/v#{prev}...v#{curr}) + +- Update the eTLD database to #{timestamp} + + EOF + f.rewind + f.puts lines + end + + sh 'git', 'commit', + 'CHANGELOG.md', + ETLD_DATA_FILE, + ETLD_DATA_RB, + VERSION_RB, + '-m', 'Update the eTLD database to %s.' % timestamp + + sh 'git', 'tag', "v#{curr}" + end +end + +file ETLD_DATA_RB => [ + ETLD_DATA_FILE, + ETLD_DATA_RB + '.erb', + 'tool/gen_etld_data.rb' +] do + ruby 'tool/gen_etld_data.rb' +end + +require 'rake/testtask' +Rake::TestTask.new(:test) do |test| + test.libs << 'test' + test.pattern = 'test/**/test_*.rb' + test.verbose = true +end + +require 'rdoc/task' +Rake::RDocTask.new do |rdoc| + version = DomainName::VERSION + + rdoc.rdoc_dir = 'rdoc' + rdoc.title = "domain_name #{version}" + rdoc.rdoc_files.include('lib/**/*.rb') + rdoc.rdoc_files.include(Bundler::GemHelper.gemspec.extra_rdoc_files) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/data/public_suffix_list.dat b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/data/public_suffix_list.dat new file mode 100644 index 0000000..b162596 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/data/public_suffix_list.dat @@ -0,0 +1,12985 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Please pull this list from, and only from https://publicsuffix.org/list/public_suffix_list.dat, +// rather than any other VCS sites. Pulling from any other URL is not guaranteed to be supported. + +// Instructions on pulling and using this list can be found at https://publicsuffix.org/list/. + +// ===BEGIN ICANN DOMAINS=== + +// ac : https://en.wikipedia.org/wiki/.ac +ac +com.ac +edu.ac +gov.ac +net.ac +mil.ac +org.ac + +// ad : https://en.wikipedia.org/wiki/.ad +ad +nom.ad + +// ae : https://en.wikipedia.org/wiki/.ae +// see also: "Domain Name Eligibility Policy" at http://www.aeda.ae/eng/aepolicy.php +ae +co.ae +net.ae +org.ae +sch.ae +ac.ae +gov.ae +mil.ae + +// aero : see https://www.information.aero/index.php?id=66 +aero +accident-investigation.aero +accident-prevention.aero +aerobatic.aero +aeroclub.aero +aerodrome.aero +agents.aero +aircraft.aero +airline.aero +airport.aero +air-surveillance.aero +airtraffic.aero +air-traffic-control.aero +ambulance.aero +amusement.aero +association.aero +author.aero +ballooning.aero +broker.aero +caa.aero +cargo.aero +catering.aero +certification.aero +championship.aero +charter.aero +civilaviation.aero +club.aero +conference.aero +consultant.aero +consulting.aero +control.aero +council.aero +crew.aero +design.aero +dgca.aero +educator.aero +emergency.aero +engine.aero +engineer.aero +entertainment.aero +equipment.aero +exchange.aero +express.aero +federation.aero +flight.aero +freight.aero +fuel.aero +gliding.aero +government.aero +groundhandling.aero +group.aero +hanggliding.aero +homebuilt.aero +insurance.aero +journal.aero +journalist.aero +leasing.aero +logistics.aero +magazine.aero +maintenance.aero +media.aero +microlight.aero +modelling.aero +navigation.aero +parachuting.aero +paragliding.aero +passenger-association.aero +pilot.aero +press.aero +production.aero +recreation.aero +repbody.aero +res.aero +research.aero +rotorcraft.aero +safety.aero +scientist.aero +services.aero +show.aero +skydiving.aero +software.aero +student.aero +trader.aero +trading.aero +trainer.aero +union.aero +workinggroup.aero +works.aero + +// af : http://www.nic.af/help.jsp +af +gov.af +com.af +org.af +net.af +edu.af + +// ag : http://www.nic.ag/prices.htm +ag +com.ag +org.ag +net.ag +co.ag +nom.ag + +// ai : http://nic.com.ai/ +ai +off.ai +com.ai +net.ai +org.ai + +// al : http://www.ert.gov.al/ert_alb/faq_det.html?Id=31 +al +com.al +edu.al +gov.al +mil.al +net.al +org.al + +// am : https://www.amnic.net/policy/en/Policy_EN.pdf +am +co.am +com.am +commune.am +net.am +org.am + +// ao : https://en.wikipedia.org/wiki/.ao +// http://www.dns.ao/REGISTR.DOC +ao +ed.ao +gv.ao +og.ao +co.ao +pb.ao +it.ao + +// aq : https://en.wikipedia.org/wiki/.aq +aq + +// ar : https://nic.ar/nic-argentina/normativa-vigente +ar +com.ar +edu.ar +gob.ar +gov.ar +int.ar +mil.ar +musica.ar +net.ar +org.ar +tur.ar + +// arpa : https://en.wikipedia.org/wiki/.arpa +// Confirmed by registry 2008-06-18 +arpa +e164.arpa +in-addr.arpa +ip6.arpa +iris.arpa +uri.arpa +urn.arpa + +// as : https://en.wikipedia.org/wiki/.as +as +gov.as + +// asia : https://en.wikipedia.org/wiki/.asia +asia + +// at : https://en.wikipedia.org/wiki/.at +// Confirmed by registry 2008-06-17 +at +ac.at +co.at +gv.at +or.at + +// au : https://en.wikipedia.org/wiki/.au +// http://www.auda.org.au/ +au +// 2LDs +com.au +net.au +org.au +edu.au +gov.au +asn.au +id.au +// Historic 2LDs (closed to new registration, but sites still exist) +info.au +conf.au +oz.au +// CGDNs - http://www.cgdn.org.au/ +act.au +nsw.au +nt.au +qld.au +sa.au +tas.au +vic.au +wa.au +// 3LDs +act.edu.au +nsw.edu.au +nt.edu.au +qld.edu.au +sa.edu.au +tas.edu.au +vic.edu.au +wa.edu.au +// act.gov.au Bug 984824 - Removed at request of Greg Tankard +// nsw.gov.au Bug 547985 - Removed at request of +// nt.gov.au Bug 940478 - Removed at request of Greg Connors +qld.gov.au +sa.gov.au +tas.gov.au +vic.gov.au +wa.gov.au + +// aw : https://en.wikipedia.org/wiki/.aw +aw +com.aw + +// ax : https://en.wikipedia.org/wiki/.ax +ax + +// az : https://en.wikipedia.org/wiki/.az +az +com.az +net.az +int.az +gov.az +org.az +edu.az +info.az +pp.az +mil.az +name.az +pro.az +biz.az + +// ba : http://nic.ba/users_data/files/pravilnik_o_registraciji.pdf +ba +com.ba +edu.ba +gov.ba +mil.ba +net.ba +org.ba + +// bb : https://en.wikipedia.org/wiki/.bb +bb +biz.bb +co.bb +com.bb +edu.bb +gov.bb +info.bb +net.bb +org.bb +store.bb +tv.bb + +// bd : https://en.wikipedia.org/wiki/.bd +*.bd + +// be : https://en.wikipedia.org/wiki/.be +// Confirmed by registry 2008-06-08 +be +ac.be + +// bf : https://en.wikipedia.org/wiki/.bf +bf +gov.bf + +// bg : https://en.wikipedia.org/wiki/.bg +// https://www.register.bg/user/static/rules/en/index.html +bg +a.bg +b.bg +c.bg +d.bg +e.bg +f.bg +g.bg +h.bg +i.bg +j.bg +k.bg +l.bg +m.bg +n.bg +o.bg +p.bg +q.bg +r.bg +s.bg +t.bg +u.bg +v.bg +w.bg +x.bg +y.bg +z.bg +0.bg +1.bg +2.bg +3.bg +4.bg +5.bg +6.bg +7.bg +8.bg +9.bg + +// bh : https://en.wikipedia.org/wiki/.bh +bh +com.bh +edu.bh +net.bh +org.bh +gov.bh + +// bi : https://en.wikipedia.org/wiki/.bi +// http://whois.nic.bi/ +bi +co.bi +com.bi +edu.bi +or.bi +org.bi + +// biz : https://en.wikipedia.org/wiki/.biz +biz + +// bj : https://en.wikipedia.org/wiki/.bj +bj +asso.bj +barreau.bj +gouv.bj + +// bm : http://www.bermudanic.bm/dnr-text.txt +bm +com.bm +edu.bm +gov.bm +net.bm +org.bm + +// bn : http://www.bnnic.bn/faqs +bn +com.bn +edu.bn +gov.bn +net.bn +org.bn + +// bo : https://nic.bo/delegacion2015.php#h-1.10 +bo +com.bo +edu.bo +gob.bo +int.bo +org.bo +net.bo +mil.bo +tv.bo +web.bo +// Social Domains +academia.bo +agro.bo +arte.bo +blog.bo +bolivia.bo +ciencia.bo +cooperativa.bo +democracia.bo +deporte.bo +ecologia.bo +economia.bo +empresa.bo +indigena.bo +industria.bo +info.bo +medicina.bo +movimiento.bo +musica.bo +natural.bo +nombre.bo +noticias.bo +patria.bo +politica.bo +profesional.bo +plurinacional.bo +pueblo.bo +revista.bo +salud.bo +tecnologia.bo +tksat.bo +transporte.bo +wiki.bo + +// br : http://registro.br/dominio/categoria.html +// Submitted by registry +br +9guacu.br +abc.br +adm.br +adv.br +agr.br +aju.br +am.br +anani.br +aparecida.br +arq.br +art.br +ato.br +b.br +barueri.br +belem.br +bhz.br +bio.br +blog.br +bmd.br +boavista.br +bsb.br +campinagrande.br +campinas.br +caxias.br +cim.br +cng.br +cnt.br +com.br +contagem.br +coop.br +cri.br +cuiaba.br +curitiba.br +def.br +ecn.br +eco.br +edu.br +emp.br +eng.br +esp.br +etc.br +eti.br +far.br +feira.br +flog.br +floripa.br +fm.br +fnd.br +fortal.br +fot.br +foz.br +fst.br +g12.br +ggf.br +goiania.br +gov.br +// gov.br 26 states + df https://en.wikipedia.org/wiki/States_of_Brazil +ac.gov.br +al.gov.br +am.gov.br +ap.gov.br +ba.gov.br +ce.gov.br +df.gov.br +es.gov.br +go.gov.br +ma.gov.br +mg.gov.br +ms.gov.br +mt.gov.br +pa.gov.br +pb.gov.br +pe.gov.br +pi.gov.br +pr.gov.br +rj.gov.br +rn.gov.br +ro.gov.br +rr.gov.br +rs.gov.br +sc.gov.br +se.gov.br +sp.gov.br +to.gov.br +gru.br +imb.br +ind.br +inf.br +jab.br +jampa.br +jdf.br +joinville.br +jor.br +jus.br +leg.br +lel.br +londrina.br +macapa.br +maceio.br +manaus.br +maringa.br +mat.br +med.br +mil.br +morena.br +mp.br +mus.br +natal.br +net.br +niteroi.br +*.nom.br +not.br +ntr.br +odo.br +ong.br +org.br +osasco.br +palmas.br +poa.br +ppg.br +pro.br +psc.br +psi.br +pvh.br +qsl.br +radio.br +rec.br +recife.br +ribeirao.br +rio.br +riobranco.br +riopreto.br +salvador.br +sampa.br +santamaria.br +santoandre.br +saobernardo.br +saogonca.br +sjc.br +slg.br +slz.br +sorocaba.br +srv.br +taxi.br +tc.br +teo.br +the.br +tmp.br +trd.br +tur.br +tv.br +udi.br +vet.br +vix.br +vlog.br +wiki.br +zlg.br + +// bs : http://www.nic.bs/rules.html +bs +com.bs +net.bs +org.bs +edu.bs +gov.bs + +// bt : https://en.wikipedia.org/wiki/.bt +bt +com.bt +edu.bt +gov.bt +net.bt +org.bt + +// bv : No registrations at this time. +// Submitted by registry +bv + +// bw : https://en.wikipedia.org/wiki/.bw +// http://www.gobin.info/domainname/bw.doc +// list of other 2nd level tlds ? +bw +co.bw +org.bw + +// by : https://en.wikipedia.org/wiki/.by +// http://tld.by/rules_2006_en.html +// list of other 2nd level tlds ? +by +gov.by +mil.by +// Official information does not indicate that com.by is a reserved +// second-level domain, but it's being used as one (see www.google.com.by and +// www.yahoo.com.by, for example), so we list it here for safety's sake. +com.by + +// http://hoster.by/ +of.by + +// bz : https://en.wikipedia.org/wiki/.bz +// http://www.belizenic.bz/ +bz +com.bz +net.bz +org.bz +edu.bz +gov.bz + +// ca : https://en.wikipedia.org/wiki/.ca +ca +// ca geographical names +ab.ca +bc.ca +mb.ca +nb.ca +nf.ca +nl.ca +ns.ca +nt.ca +nu.ca +on.ca +pe.ca +qc.ca +sk.ca +yk.ca +// gc.ca: https://en.wikipedia.org/wiki/.gc.ca +// see also: http://registry.gc.ca/en/SubdomainFAQ +gc.ca + +// cat : https://en.wikipedia.org/wiki/.cat +cat + +// cc : https://en.wikipedia.org/wiki/.cc +cc + +// cd : https://en.wikipedia.org/wiki/.cd +// see also: https://www.nic.cd/domain/insertDomain_2.jsp?act=1 +cd +gov.cd + +// cf : https://en.wikipedia.org/wiki/.cf +cf + +// cg : https://en.wikipedia.org/wiki/.cg +cg + +// ch : https://en.wikipedia.org/wiki/.ch +ch + +// ci : https://en.wikipedia.org/wiki/.ci +// http://www.nic.ci/index.php?page=charte +ci +org.ci +or.ci +com.ci +co.ci +edu.ci +ed.ci +ac.ci +net.ci +go.ci +asso.ci +aÊroport.ci +int.ci +presse.ci +md.ci +gouv.ci + +// ck : https://en.wikipedia.org/wiki/.ck +*.ck +!www.ck + +// cl : https://en.wikipedia.org/wiki/.cl +cl +gov.cl +gob.cl +co.cl +mil.cl + +// cm : https://en.wikipedia.org/wiki/.cm plus bug 981927 +cm +co.cm +com.cm +gov.cm +net.cm + +// cn : https://en.wikipedia.org/wiki/.cn +// Submitted by registry +cn +ac.cn +com.cn +edu.cn +gov.cn +net.cn +org.cn +mil.cn +å…Ŧ司.cn +įŊ‘įģœ.cn +įļ˛įĩĄ.cn +// cn geographic names +ah.cn +bj.cn +cq.cn +fj.cn +gd.cn +gs.cn +gz.cn +gx.cn +ha.cn +hb.cn +he.cn +hi.cn +hl.cn +hn.cn +jl.cn +js.cn +jx.cn +ln.cn +nm.cn +nx.cn +qh.cn +sc.cn +sd.cn +sh.cn +sn.cn +sx.cn +tj.cn +xj.cn +xz.cn +yn.cn +zj.cn +hk.cn +mo.cn +tw.cn + +// co : https://en.wikipedia.org/wiki/.co +// Submitted by registry +co +arts.co +com.co +edu.co +firm.co +gov.co +info.co +int.co +mil.co +net.co +nom.co +org.co +rec.co +web.co + +// com : https://en.wikipedia.org/wiki/.com +com + +// coop : https://en.wikipedia.org/wiki/.coop +coop + +// cr : http://www.nic.cr/niccr_publico/showRegistroDominiosScreen.do +cr +ac.cr +co.cr +ed.cr +fi.cr +go.cr +or.cr +sa.cr + +// cu : https://en.wikipedia.org/wiki/.cu +cu +com.cu +edu.cu +org.cu +net.cu +gov.cu +inf.cu + +// cv : https://en.wikipedia.org/wiki/.cv +cv + +// cw : http://www.una.cw/cw_registry/ +// Confirmed by registry 2013-03-26 +cw +com.cw +edu.cw +net.cw +org.cw + +// cx : https://en.wikipedia.org/wiki/.cx +// list of other 2nd level tlds ? +cx +gov.cx + +// cy : http://www.nic.cy/ +// Submitted by registry Panayiotou Fotia +cy +ac.cy +biz.cy +com.cy +ekloges.cy +gov.cy +ltd.cy +name.cy +net.cy +org.cy +parliament.cy +press.cy +pro.cy +tm.cy + +// cz : https://en.wikipedia.org/wiki/.cz +cz + +// de : https://en.wikipedia.org/wiki/.de +// Confirmed by registry (with technical +// reservations) 2008-07-01 +de + +// dj : https://en.wikipedia.org/wiki/.dj +dj + +// dk : https://en.wikipedia.org/wiki/.dk +// Confirmed by registry 2008-06-17 +dk + +// dm : https://en.wikipedia.org/wiki/.dm +dm +com.dm +net.dm +org.dm +edu.dm +gov.dm + +// do : https://en.wikipedia.org/wiki/.do +do +art.do +com.do +edu.do +gob.do +gov.do +mil.do +net.do +org.do +sld.do +web.do + +// dz : https://en.wikipedia.org/wiki/.dz +dz +com.dz +org.dz +net.dz +gov.dz +edu.dz +asso.dz +pol.dz +art.dz + +// ec : http://www.nic.ec/reg/paso1.asp +// Submitted by registry +ec +com.ec +info.ec +net.ec +fin.ec +k12.ec +med.ec +pro.ec +org.ec +edu.ec +gov.ec +gob.ec +mil.ec + +// edu : https://en.wikipedia.org/wiki/.edu +edu + +// ee : http://www.eenet.ee/EENet/dom_reeglid.html#lisa_B +ee +edu.ee +gov.ee +riik.ee +lib.ee +med.ee +com.ee +pri.ee +aip.ee +org.ee +fie.ee + +// eg : https://en.wikipedia.org/wiki/.eg +eg +com.eg +edu.eg +eun.eg +gov.eg +mil.eg +name.eg +net.eg +org.eg +sci.eg + +// er : https://en.wikipedia.org/wiki/.er +*.er + +// es : https://www.nic.es/site_ingles/ingles/dominios/index.html +es +com.es +nom.es +org.es +gob.es +edu.es + +// et : https://en.wikipedia.org/wiki/.et +et +com.et +gov.et +org.et +edu.et +biz.et +name.et +info.et +net.et + +// eu : https://en.wikipedia.org/wiki/.eu +eu + +// fi : https://en.wikipedia.org/wiki/.fi +fi +// aland.fi : https://en.wikipedia.org/wiki/.ax +// This domain is being phased out in favor of .ax. As there are still many +// domains under aland.fi, we still keep it on the list until aland.fi is +// completely removed. +// TODO: Check for updates (expected to be phased out around Q1/2009) +aland.fi + +// fj : https://en.wikipedia.org/wiki/.fj +*.fj + +// fk : https://en.wikipedia.org/wiki/.fk +*.fk + +// fm : https://en.wikipedia.org/wiki/.fm +fm + +// fo : https://en.wikipedia.org/wiki/.fo +fo + +// fr : http://www.afnic.fr/ +// domaines descriptifs : https://www.afnic.fr/medias/documents/Cadre_legal/Afnic_Naming_Policy_12122016_VEN.pdf +fr +asso.fr +com.fr +gouv.fr +nom.fr +prd.fr +tm.fr +// domaines sectoriels : https://www.afnic.fr/en/products-and-services/the-fr-tld/sector-based-fr-domains-4.html +aeroport.fr +avocat.fr +avoues.fr +cci.fr +chambagri.fr +chirurgiens-dentistes.fr +experts-comptables.fr +geometre-expert.fr +greta.fr +huissier-justice.fr +medecin.fr +notaires.fr +pharmacien.fr +port.fr +veterinaire.fr + +// ga : https://en.wikipedia.org/wiki/.ga +ga + +// gb : This registry is effectively dormant +// Submitted by registry +gb + +// gd : https://en.wikipedia.org/wiki/.gd +gd + +// ge : http://www.nic.net.ge/policy_en.pdf +ge +com.ge +edu.ge +gov.ge +org.ge +mil.ge +net.ge +pvt.ge + +// gf : https://en.wikipedia.org/wiki/.gf +gf + +// gg : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +gg +co.gg +net.gg +org.gg + +// gh : https://en.wikipedia.org/wiki/.gh +// see also: http://www.nic.gh/reg_now.php +// Although domains directly at second level are not possible at the moment, +// they have been possible for some time and may come back. +gh +com.gh +edu.gh +gov.gh +org.gh +mil.gh + +// gi : http://www.nic.gi/rules.html +gi +com.gi +ltd.gi +gov.gi +mod.gi +edu.gi +org.gi + +// gl : https://en.wikipedia.org/wiki/.gl +// http://nic.gl +gl +co.gl +com.gl +edu.gl +net.gl +org.gl + +// gm : http://www.nic.gm/htmlpages%5Cgm-policy.htm +gm + +// gn : http://psg.com/dns/gn/gn.txt +// Submitted by registry +gn +ac.gn +com.gn +edu.gn +gov.gn +org.gn +net.gn + +// gov : https://en.wikipedia.org/wiki/.gov +gov + +// gp : http://www.nic.gp/index.php?lang=en +gp +com.gp +net.gp +mobi.gp +edu.gp +org.gp +asso.gp + +// gq : https://en.wikipedia.org/wiki/.gq +gq + +// gr : https://grweb.ics.forth.gr/english/1617-B-2005.html +// Submitted by registry +gr +com.gr +edu.gr +net.gr +org.gr +gov.gr + +// gs : https://en.wikipedia.org/wiki/.gs +gs + +// gt : http://www.gt/politicas_de_registro.html +gt +com.gt +edu.gt +gob.gt +ind.gt +mil.gt +net.gt +org.gt + +// gu : http://gadao.gov.gu/register.html +// University of Guam : https://www.uog.edu +// Submitted by uognoc@triton.uog.edu +gu +com.gu +edu.gu +gov.gu +guam.gu +info.gu +net.gu +org.gu +web.gu + +// gw : https://en.wikipedia.org/wiki/.gw +gw + +// gy : https://en.wikipedia.org/wiki/.gy +// http://registry.gy/ +gy +co.gy +com.gy +edu.gy +gov.gy +net.gy +org.gy + +// hk : https://www.hkirc.hk +// Submitted by registry +hk +com.hk +edu.hk +gov.hk +idv.hk +net.hk +org.hk +å…Ŧ司.hk +æ•™č‚˛.hk +æ•Žč‚˛.hk +æ”ŋåēœ.hk +個äēē.hk +ä¸Ēäēē.hk +įŽ‡äēē.hk +įļ˛įģœ.hk +įŊ‘įģœ.hk +įģ„įš”.hk +įļ˛įĩĄ.hk +įŊ‘įĩĄ.hk +įģ„įģ‡.hk +įĩ„įš”.hk +įĩ„įģ‡.hk + +// hm : https://en.wikipedia.org/wiki/.hm +hm + +// hn : http://www.nic.hn/politicas/ps02,,05.html +hn +com.hn +edu.hn +org.hn +net.hn +mil.hn +gob.hn + +// hr : http://www.dns.hr/documents/pdf/HRTLD-regulations.pdf +hr +iz.hr +from.hr +name.hr +com.hr + +// ht : http://www.nic.ht/info/charte.cfm +ht +com.ht +shop.ht +firm.ht +info.ht +adult.ht +net.ht +pro.ht +org.ht +med.ht +art.ht +coop.ht +pol.ht +asso.ht +edu.ht +rel.ht +gouv.ht +perso.ht + +// hu : http://www.domain.hu/domain/English/sld.html +// Confirmed by registry 2008-06-12 +hu +co.hu +info.hu +org.hu +priv.hu +sport.hu +tm.hu +2000.hu +agrar.hu +bolt.hu +casino.hu +city.hu +erotica.hu +erotika.hu +film.hu +forum.hu +games.hu +hotel.hu +ingatlan.hu +jogasz.hu +konyvelo.hu +lakas.hu +media.hu +news.hu +reklam.hu +sex.hu +shop.hu +suli.hu +szex.hu +tozsde.hu +utazas.hu +video.hu + +// id : https://pandi.id/en/domain/registration-requirements/ +id +ac.id +biz.id +co.id +desa.id +go.id +mil.id +my.id +net.id +or.id +ponpes.id +sch.id +web.id + +// ie : https://en.wikipedia.org/wiki/.ie +ie +gov.ie + +// il : http://www.isoc.org.il/domains/ +il +ac.il +co.il +gov.il +idf.il +k12.il +muni.il +net.il +org.il + +// im : https://www.nic.im/ +// Submitted by registry +im +ac.im +co.im +com.im +ltd.co.im +net.im +org.im +plc.co.im +tt.im +tv.im + +// in : https://en.wikipedia.org/wiki/.in +// see also: https://registry.in/Policies +// Please note, that nic.in is not an official eTLD, but used by most +// government institutions. +in +co.in +firm.in +net.in +org.in +gen.in +ind.in +nic.in +ac.in +edu.in +res.in +gov.in +mil.in + +// info : https://en.wikipedia.org/wiki/.info +info + +// int : https://en.wikipedia.org/wiki/.int +// Confirmed by registry 2008-06-18 +int +eu.int + +// io : http://www.nic.io/rules.html +// list of other 2nd level tlds ? +io +com.io + +// iq : http://www.cmc.iq/english/iq/iqregister1.htm +iq +gov.iq +edu.iq +mil.iq +com.iq +org.iq +net.iq + +// ir : http://www.nic.ir/Terms_and_Conditions_ir,_Appendix_1_Domain_Rules +// Also see http://www.nic.ir/Internationalized_Domain_Names +// Two .ir entries added at request of , 2010-04-16 +ir +ac.ir +co.ir +gov.ir +id.ir +net.ir +org.ir +sch.ir +// xn--mgba3a4f16a.ir (.ir, Persian YEH) +Ø§ÛŒØąØ§Ų†.ir +// xn--mgba3a4fra.ir (.ir, Arabic YEH) +اŲŠØąØ§Ų†.ir + +// is : http://www.isnic.is/domain/rules.php +// Confirmed by registry 2008-12-06 +is +net.is +com.is +edu.is +gov.is +org.is +int.is + +// it : https://en.wikipedia.org/wiki/.it +it +gov.it +edu.it +// Reserved geo-names (regions and provinces): +// http://www.nic.it/sites/default/files/docs/Regulation_assignation_v7.1.pdf +// Regions +abr.it +abruzzo.it +aosta-valley.it +aostavalley.it +bas.it +basilicata.it +cal.it +calabria.it +cam.it +campania.it +emilia-romagna.it +emiliaromagna.it +emr.it +friuli-v-giulia.it +friuli-ve-giulia.it +friuli-vegiulia.it +friuli-venezia-giulia.it +friuli-veneziagiulia.it +friuli-vgiulia.it +friuliv-giulia.it +friulive-giulia.it +friulivegiulia.it +friulivenezia-giulia.it +friuliveneziagiulia.it +friulivgiulia.it +fvg.it +laz.it +lazio.it +lig.it +liguria.it +lom.it +lombardia.it +lombardy.it +lucania.it +mar.it +marche.it +mol.it +molise.it +piedmont.it +piemonte.it +pmn.it +pug.it +puglia.it +sar.it +sardegna.it +sardinia.it +sic.it +sicilia.it +sicily.it +taa.it +tos.it +toscana.it +trentin-sud-tirol.it +trentin-sÃŧd-tirol.it +trentin-sudtirol.it +trentin-sÃŧdtirol.it +trentin-sued-tirol.it +trentin-suedtirol.it +trentino-a-adige.it +trentino-aadige.it +trentino-alto-adige.it +trentino-altoadige.it +trentino-s-tirol.it +trentino-stirol.it +trentino-sud-tirol.it +trentino-sÃŧd-tirol.it +trentino-sudtirol.it +trentino-sÃŧdtirol.it +trentino-sued-tirol.it +trentino-suedtirol.it +trentino.it +trentinoa-adige.it +trentinoaadige.it +trentinoalto-adige.it +trentinoaltoadige.it +trentinos-tirol.it +trentinostirol.it +trentinosud-tirol.it +trentinosÃŧd-tirol.it +trentinosudtirol.it +trentinosÃŧdtirol.it +trentinosued-tirol.it +trentinosuedtirol.it +trentinsud-tirol.it +trentinsÃŧd-tirol.it +trentinsudtirol.it +trentinsÃŧdtirol.it +trentinsued-tirol.it +trentinsuedtirol.it +tuscany.it +umb.it +umbria.it +val-d-aosta.it +val-daosta.it +vald-aosta.it +valdaosta.it +valle-aosta.it +valle-d-aosta.it +valle-daosta.it +valleaosta.it +valled-aosta.it +valledaosta.it +vallee-aoste.it +vallÊe-aoste.it +vallee-d-aoste.it +vallÊe-d-aoste.it +valleeaoste.it +vallÊeaoste.it +valleedaoste.it +vallÊedaoste.it +vao.it +vda.it +ven.it +veneto.it +// Provinces +ag.it +agrigento.it +al.it +alessandria.it +alto-adige.it +altoadige.it +an.it +ancona.it +andria-barletta-trani.it +andria-trani-barletta.it +andriabarlettatrani.it +andriatranibarletta.it +ao.it +aosta.it +aoste.it +ap.it +aq.it +aquila.it +ar.it +arezzo.it +ascoli-piceno.it +ascolipiceno.it +asti.it +at.it +av.it +avellino.it +ba.it +balsan-sudtirol.it +balsan-sÃŧdtirol.it +balsan-suedtirol.it +balsan.it +bari.it +barletta-trani-andria.it +barlettatraniandria.it +belluno.it +benevento.it +bergamo.it +bg.it +bi.it +biella.it +bl.it +bn.it +bo.it +bologna.it +bolzano-altoadige.it +bolzano.it +bozen-sudtirol.it +bozen-sÃŧdtirol.it +bozen-suedtirol.it +bozen.it +br.it +brescia.it +brindisi.it +bs.it +bt.it +bulsan-sudtirol.it +bulsan-sÃŧdtirol.it +bulsan-suedtirol.it +bulsan.it +bz.it +ca.it +cagliari.it +caltanissetta.it +campidano-medio.it +campidanomedio.it +campobasso.it +carbonia-iglesias.it +carboniaiglesias.it +carrara-massa.it +carraramassa.it +caserta.it +catania.it +catanzaro.it +cb.it +ce.it +cesena-forli.it +cesena-forlÃŦ.it +cesenaforli.it +cesenaforlÃŦ.it +ch.it +chieti.it +ci.it +cl.it +cn.it +co.it +como.it +cosenza.it +cr.it +cremona.it +crotone.it +cs.it +ct.it +cuneo.it +cz.it +dell-ogliastra.it +dellogliastra.it +en.it +enna.it +fc.it +fe.it +fermo.it +ferrara.it +fg.it +fi.it +firenze.it +florence.it +fm.it +foggia.it +forli-cesena.it +forlÃŦ-cesena.it +forlicesena.it +forlÃŦcesena.it +fr.it +frosinone.it +ge.it +genoa.it +genova.it +go.it +gorizia.it +gr.it +grosseto.it +iglesias-carbonia.it +iglesiascarbonia.it +im.it +imperia.it +is.it +isernia.it +kr.it +la-spezia.it +laquila.it +laspezia.it +latina.it +lc.it +le.it +lecce.it +lecco.it +li.it +livorno.it +lo.it +lodi.it +lt.it +lu.it +lucca.it +macerata.it +mantova.it +massa-carrara.it +massacarrara.it +matera.it +mb.it +mc.it +me.it +medio-campidano.it +mediocampidano.it +messina.it +mi.it +milan.it +milano.it +mn.it +mo.it +modena.it +monza-brianza.it +monza-e-della-brianza.it +monza.it +monzabrianza.it +monzaebrianza.it +monzaedellabrianza.it +ms.it +mt.it +na.it +naples.it +napoli.it +no.it +novara.it +nu.it +nuoro.it +og.it +ogliastra.it +olbia-tempio.it +olbiatempio.it +or.it +oristano.it +ot.it +pa.it +padova.it +padua.it +palermo.it +parma.it +pavia.it +pc.it +pd.it +pe.it +perugia.it +pesaro-urbino.it +pesarourbino.it +pescara.it +pg.it +pi.it +piacenza.it +pisa.it +pistoia.it +pn.it +po.it +pordenone.it +potenza.it +pr.it +prato.it +pt.it +pu.it +pv.it +pz.it +ra.it +ragusa.it +ravenna.it +rc.it +re.it +reggio-calabria.it +reggio-emilia.it +reggiocalabria.it +reggioemilia.it +rg.it +ri.it +rieti.it +rimini.it +rm.it +rn.it +ro.it +roma.it +rome.it +rovigo.it +sa.it +salerno.it +sassari.it +savona.it +si.it +siena.it +siracusa.it +so.it +sondrio.it +sp.it +sr.it +ss.it +suedtirol.it +sÃŧdtirol.it +sv.it +ta.it +taranto.it +te.it +tempio-olbia.it +tempioolbia.it +teramo.it +terni.it +tn.it +to.it +torino.it +tp.it +tr.it +trani-andria-barletta.it +trani-barletta-andria.it +traniandriabarletta.it +tranibarlettaandria.it +trapani.it +trento.it +treviso.it +trieste.it +ts.it +turin.it +tv.it +ud.it +udine.it +urbino-pesaro.it +urbinopesaro.it +va.it +varese.it +vb.it +vc.it +ve.it +venezia.it +venice.it +verbania.it +vercelli.it +verona.it +vi.it +vibo-valentia.it +vibovalentia.it +vicenza.it +viterbo.it +vr.it +vs.it +vt.it +vv.it + +// je : http://www.channelisles.net/register-domains/ +// Confirmed by registry 2013-11-28 +je +co.je +net.je +org.je + +// jm : http://www.com.jm/register.html +*.jm + +// jo : http://www.dns.jo/Registration_policy.aspx +jo +com.jo +org.jo +net.jo +edu.jo +sch.jo +gov.jo +mil.jo +name.jo + +// jobs : https://en.wikipedia.org/wiki/.jobs +jobs + +// jp : https://en.wikipedia.org/wiki/.jp +// http://jprs.co.jp/en/jpdomain.html +// Submitted by registry +jp +// jp organizational type names +ac.jp +ad.jp +co.jp +ed.jp +go.jp +gr.jp +lg.jp +ne.jp +or.jp +// jp prefecture type names +aichi.jp +akita.jp +aomori.jp +chiba.jp +ehime.jp +fukui.jp +fukuoka.jp +fukushima.jp +gifu.jp +gunma.jp +hiroshima.jp +hokkaido.jp +hyogo.jp +ibaraki.jp +ishikawa.jp +iwate.jp +kagawa.jp +kagoshima.jp +kanagawa.jp +kochi.jp +kumamoto.jp +kyoto.jp +mie.jp +miyagi.jp +miyazaki.jp +nagano.jp +nagasaki.jp +nara.jp +niigata.jp +oita.jp +okayama.jp +okinawa.jp +osaka.jp +saga.jp +saitama.jp +shiga.jp +shimane.jp +shizuoka.jp +tochigi.jp +tokushima.jp +tokyo.jp +tottori.jp +toyama.jp +wakayama.jp +yamagata.jp +yamaguchi.jp +yamanashi.jp +栃木.jp +愛įŸĨ.jp +愛åĒ›.jp +å…ĩåēĢ.jp +į†ŠæœŦ.jp +čŒ¨åŸŽ.jp +北æĩˇé“.jp +åƒč‘‰.jp +å’Œæ­Œåąą.jp +é•ˇå´Ž.jp +長野.jp +新æŊŸ.jp +青æŖŽ.jp +é™å˛Ą.jp +æąäēŦ.jp +įŸŗåˇ.jp +åŸŧįŽ‰.jp +三重.jp +äēŦéƒŊ.jp +äŊčŗ€.jp +大分.jp +大é˜Ē.jp +åĨˆč‰¯.jp +厎城.jp +厎崎.jp +å¯Œåąą.jp +åąąåŖ.jp +åąąåŊĸ.jp +åąąæĸ¨.jp +å˛Šæ‰‹.jp +å˛é˜œ.jp +å˛Ąåąą.jp +åŗļæ š.jp +åēƒåŗļ.jp +åžŗåŗļ.jp +æ˛–į¸„.jp +æģ‹čŗ€.jp +įĨžåĨˆåˇ.jp +įĻäē•.jp +įĻå˛Ą.jp +įĻåŗļ.jp +į§‹į”°.jp +įž¤éĻŦ.jp +éĻ™åˇ.jp +éĢ˜įŸĨ.jp +éŗĨ取.jp +éšŋ児åŗļ.jp +// jp geographic type names +// http://jprs.jp/doc/rule/saisoku-1.html +*.kawasaki.jp +*.kitakyushu.jp +*.kobe.jp +*.nagoya.jp +*.sapporo.jp +*.sendai.jp +*.yokohama.jp +!city.kawasaki.jp +!city.kitakyushu.jp +!city.kobe.jp +!city.nagoya.jp +!city.sapporo.jp +!city.sendai.jp +!city.yokohama.jp +// 4th level registration +aisai.aichi.jp +ama.aichi.jp +anjo.aichi.jp +asuke.aichi.jp +chiryu.aichi.jp +chita.aichi.jp +fuso.aichi.jp +gamagori.aichi.jp +handa.aichi.jp +hazu.aichi.jp +hekinan.aichi.jp +higashiura.aichi.jp +ichinomiya.aichi.jp +inazawa.aichi.jp +inuyama.aichi.jp +isshiki.aichi.jp +iwakura.aichi.jp +kanie.aichi.jp +kariya.aichi.jp +kasugai.aichi.jp +kira.aichi.jp +kiyosu.aichi.jp +komaki.aichi.jp +konan.aichi.jp +kota.aichi.jp +mihama.aichi.jp +miyoshi.aichi.jp +nishio.aichi.jp +nisshin.aichi.jp +obu.aichi.jp +oguchi.aichi.jp +oharu.aichi.jp +okazaki.aichi.jp +owariasahi.aichi.jp +seto.aichi.jp +shikatsu.aichi.jp +shinshiro.aichi.jp +shitara.aichi.jp +tahara.aichi.jp +takahama.aichi.jp +tobishima.aichi.jp +toei.aichi.jp +togo.aichi.jp +tokai.aichi.jp +tokoname.aichi.jp +toyoake.aichi.jp +toyohashi.aichi.jp +toyokawa.aichi.jp +toyone.aichi.jp +toyota.aichi.jp +tsushima.aichi.jp +yatomi.aichi.jp +akita.akita.jp +daisen.akita.jp +fujisato.akita.jp +gojome.akita.jp +hachirogata.akita.jp +happou.akita.jp +higashinaruse.akita.jp +honjo.akita.jp +honjyo.akita.jp +ikawa.akita.jp +kamikoani.akita.jp +kamioka.akita.jp +katagami.akita.jp +kazuno.akita.jp +kitaakita.akita.jp +kosaka.akita.jp +kyowa.akita.jp +misato.akita.jp +mitane.akita.jp +moriyoshi.akita.jp +nikaho.akita.jp +noshiro.akita.jp +odate.akita.jp +oga.akita.jp +ogata.akita.jp +semboku.akita.jp +yokote.akita.jp +yurihonjo.akita.jp +aomori.aomori.jp +gonohe.aomori.jp +hachinohe.aomori.jp +hashikami.aomori.jp +hiranai.aomori.jp +hirosaki.aomori.jp +itayanagi.aomori.jp +kuroishi.aomori.jp +misawa.aomori.jp +mutsu.aomori.jp +nakadomari.aomori.jp +noheji.aomori.jp +oirase.aomori.jp +owani.aomori.jp +rokunohe.aomori.jp +sannohe.aomori.jp +shichinohe.aomori.jp +shingo.aomori.jp +takko.aomori.jp +towada.aomori.jp +tsugaru.aomori.jp +tsuruta.aomori.jp +abiko.chiba.jp +asahi.chiba.jp +chonan.chiba.jp +chosei.chiba.jp +choshi.chiba.jp +chuo.chiba.jp +funabashi.chiba.jp +futtsu.chiba.jp +hanamigawa.chiba.jp +ichihara.chiba.jp +ichikawa.chiba.jp +ichinomiya.chiba.jp +inzai.chiba.jp +isumi.chiba.jp +kamagaya.chiba.jp +kamogawa.chiba.jp +kashiwa.chiba.jp +katori.chiba.jp +katsuura.chiba.jp +kimitsu.chiba.jp +kisarazu.chiba.jp +kozaki.chiba.jp +kujukuri.chiba.jp +kyonan.chiba.jp +matsudo.chiba.jp +midori.chiba.jp +mihama.chiba.jp +minamiboso.chiba.jp +mobara.chiba.jp +mutsuzawa.chiba.jp +nagara.chiba.jp +nagareyama.chiba.jp +narashino.chiba.jp +narita.chiba.jp +noda.chiba.jp +oamishirasato.chiba.jp +omigawa.chiba.jp +onjuku.chiba.jp +otaki.chiba.jp +sakae.chiba.jp +sakura.chiba.jp +shimofusa.chiba.jp +shirako.chiba.jp +shiroi.chiba.jp +shisui.chiba.jp +sodegaura.chiba.jp +sosa.chiba.jp +tako.chiba.jp +tateyama.chiba.jp +togane.chiba.jp +tohnosho.chiba.jp +tomisato.chiba.jp +urayasu.chiba.jp +yachimata.chiba.jp +yachiyo.chiba.jp +yokaichiba.chiba.jp +yokoshibahikari.chiba.jp +yotsukaido.chiba.jp +ainan.ehime.jp +honai.ehime.jp +ikata.ehime.jp +imabari.ehime.jp +iyo.ehime.jp +kamijima.ehime.jp +kihoku.ehime.jp +kumakogen.ehime.jp +masaki.ehime.jp +matsuno.ehime.jp +matsuyama.ehime.jp +namikata.ehime.jp +niihama.ehime.jp +ozu.ehime.jp +saijo.ehime.jp +seiyo.ehime.jp +shikokuchuo.ehime.jp +tobe.ehime.jp +toon.ehime.jp +uchiko.ehime.jp +uwajima.ehime.jp +yawatahama.ehime.jp +echizen.fukui.jp +eiheiji.fukui.jp +fukui.fukui.jp +ikeda.fukui.jp +katsuyama.fukui.jp +mihama.fukui.jp +minamiechizen.fukui.jp +obama.fukui.jp +ohi.fukui.jp +ono.fukui.jp +sabae.fukui.jp +sakai.fukui.jp +takahama.fukui.jp +tsuruga.fukui.jp +wakasa.fukui.jp +ashiya.fukuoka.jp +buzen.fukuoka.jp +chikugo.fukuoka.jp +chikuho.fukuoka.jp +chikujo.fukuoka.jp +chikushino.fukuoka.jp +chikuzen.fukuoka.jp +chuo.fukuoka.jp +dazaifu.fukuoka.jp +fukuchi.fukuoka.jp +hakata.fukuoka.jp +higashi.fukuoka.jp +hirokawa.fukuoka.jp +hisayama.fukuoka.jp +iizuka.fukuoka.jp +inatsuki.fukuoka.jp +kaho.fukuoka.jp +kasuga.fukuoka.jp +kasuya.fukuoka.jp +kawara.fukuoka.jp +keisen.fukuoka.jp +koga.fukuoka.jp +kurate.fukuoka.jp +kurogi.fukuoka.jp +kurume.fukuoka.jp +minami.fukuoka.jp +miyako.fukuoka.jp +miyama.fukuoka.jp +miyawaka.fukuoka.jp +mizumaki.fukuoka.jp +munakata.fukuoka.jp +nakagawa.fukuoka.jp +nakama.fukuoka.jp +nishi.fukuoka.jp +nogata.fukuoka.jp +ogori.fukuoka.jp +okagaki.fukuoka.jp +okawa.fukuoka.jp +oki.fukuoka.jp +omuta.fukuoka.jp +onga.fukuoka.jp +onojo.fukuoka.jp +oto.fukuoka.jp +saigawa.fukuoka.jp +sasaguri.fukuoka.jp +shingu.fukuoka.jp +shinyoshitomi.fukuoka.jp +shonai.fukuoka.jp +soeda.fukuoka.jp +sue.fukuoka.jp +tachiarai.fukuoka.jp +tagawa.fukuoka.jp +takata.fukuoka.jp +toho.fukuoka.jp +toyotsu.fukuoka.jp +tsuiki.fukuoka.jp +ukiha.fukuoka.jp +umi.fukuoka.jp +usui.fukuoka.jp +yamada.fukuoka.jp +yame.fukuoka.jp +yanagawa.fukuoka.jp +yukuhashi.fukuoka.jp +aizubange.fukushima.jp +aizumisato.fukushima.jp +aizuwakamatsu.fukushima.jp +asakawa.fukushima.jp +bandai.fukushima.jp +date.fukushima.jp +fukushima.fukushima.jp +furudono.fukushima.jp +futaba.fukushima.jp +hanawa.fukushima.jp +higashi.fukushima.jp +hirata.fukushima.jp +hirono.fukushima.jp +iitate.fukushima.jp +inawashiro.fukushima.jp +ishikawa.fukushima.jp +iwaki.fukushima.jp +izumizaki.fukushima.jp +kagamiishi.fukushima.jp +kaneyama.fukushima.jp +kawamata.fukushima.jp +kitakata.fukushima.jp +kitashiobara.fukushima.jp +koori.fukushima.jp +koriyama.fukushima.jp +kunimi.fukushima.jp +miharu.fukushima.jp +mishima.fukushima.jp +namie.fukushima.jp +nango.fukushima.jp +nishiaizu.fukushima.jp +nishigo.fukushima.jp +okuma.fukushima.jp +omotego.fukushima.jp +ono.fukushima.jp +otama.fukushima.jp +samegawa.fukushima.jp +shimogo.fukushima.jp +shirakawa.fukushima.jp +showa.fukushima.jp +soma.fukushima.jp +sukagawa.fukushima.jp +taishin.fukushima.jp +tamakawa.fukushima.jp +tanagura.fukushima.jp +tenei.fukushima.jp +yabuki.fukushima.jp +yamato.fukushima.jp +yamatsuri.fukushima.jp +yanaizu.fukushima.jp +yugawa.fukushima.jp +anpachi.gifu.jp +ena.gifu.jp +gifu.gifu.jp +ginan.gifu.jp +godo.gifu.jp +gujo.gifu.jp +hashima.gifu.jp +hichiso.gifu.jp +hida.gifu.jp +higashishirakawa.gifu.jp +ibigawa.gifu.jp +ikeda.gifu.jp +kakamigahara.gifu.jp +kani.gifu.jp +kasahara.gifu.jp +kasamatsu.gifu.jp +kawaue.gifu.jp +kitagata.gifu.jp +mino.gifu.jp +minokamo.gifu.jp +mitake.gifu.jp +mizunami.gifu.jp +motosu.gifu.jp +nakatsugawa.gifu.jp +ogaki.gifu.jp +sakahogi.gifu.jp +seki.gifu.jp +sekigahara.gifu.jp +shirakawa.gifu.jp +tajimi.gifu.jp +takayama.gifu.jp +tarui.gifu.jp +toki.gifu.jp +tomika.gifu.jp +wanouchi.gifu.jp +yamagata.gifu.jp +yaotsu.gifu.jp +yoro.gifu.jp +annaka.gunma.jp +chiyoda.gunma.jp +fujioka.gunma.jp +higashiagatsuma.gunma.jp +isesaki.gunma.jp +itakura.gunma.jp +kanna.gunma.jp +kanra.gunma.jp +katashina.gunma.jp +kawaba.gunma.jp +kiryu.gunma.jp +kusatsu.gunma.jp +maebashi.gunma.jp +meiwa.gunma.jp +midori.gunma.jp +minakami.gunma.jp +naganohara.gunma.jp +nakanojo.gunma.jp +nanmoku.gunma.jp +numata.gunma.jp +oizumi.gunma.jp +ora.gunma.jp +ota.gunma.jp +shibukawa.gunma.jp +shimonita.gunma.jp +shinto.gunma.jp +showa.gunma.jp +takasaki.gunma.jp +takayama.gunma.jp +tamamura.gunma.jp +tatebayashi.gunma.jp +tomioka.gunma.jp +tsukiyono.gunma.jp +tsumagoi.gunma.jp +ueno.gunma.jp +yoshioka.gunma.jp +asaminami.hiroshima.jp +daiwa.hiroshima.jp +etajima.hiroshima.jp +fuchu.hiroshima.jp +fukuyama.hiroshima.jp +hatsukaichi.hiroshima.jp +higashihiroshima.hiroshima.jp +hongo.hiroshima.jp +jinsekikogen.hiroshima.jp +kaita.hiroshima.jp +kui.hiroshima.jp +kumano.hiroshima.jp +kure.hiroshima.jp +mihara.hiroshima.jp +miyoshi.hiroshima.jp +naka.hiroshima.jp +onomichi.hiroshima.jp +osakikamijima.hiroshima.jp +otake.hiroshima.jp +saka.hiroshima.jp +sera.hiroshima.jp +seranishi.hiroshima.jp +shinichi.hiroshima.jp +shobara.hiroshima.jp +takehara.hiroshima.jp +abashiri.hokkaido.jp +abira.hokkaido.jp +aibetsu.hokkaido.jp +akabira.hokkaido.jp +akkeshi.hokkaido.jp +asahikawa.hokkaido.jp +ashibetsu.hokkaido.jp +ashoro.hokkaido.jp +assabu.hokkaido.jp +atsuma.hokkaido.jp +bibai.hokkaido.jp +biei.hokkaido.jp +bifuka.hokkaido.jp +bihoro.hokkaido.jp +biratori.hokkaido.jp +chippubetsu.hokkaido.jp +chitose.hokkaido.jp +date.hokkaido.jp +ebetsu.hokkaido.jp +embetsu.hokkaido.jp +eniwa.hokkaido.jp +erimo.hokkaido.jp +esan.hokkaido.jp +esashi.hokkaido.jp +fukagawa.hokkaido.jp +fukushima.hokkaido.jp +furano.hokkaido.jp +furubira.hokkaido.jp +haboro.hokkaido.jp +hakodate.hokkaido.jp +hamatonbetsu.hokkaido.jp +hidaka.hokkaido.jp +higashikagura.hokkaido.jp +higashikawa.hokkaido.jp +hiroo.hokkaido.jp +hokuryu.hokkaido.jp +hokuto.hokkaido.jp +honbetsu.hokkaido.jp +horokanai.hokkaido.jp +horonobe.hokkaido.jp +ikeda.hokkaido.jp +imakane.hokkaido.jp +ishikari.hokkaido.jp +iwamizawa.hokkaido.jp +iwanai.hokkaido.jp +kamifurano.hokkaido.jp +kamikawa.hokkaido.jp +kamishihoro.hokkaido.jp +kamisunagawa.hokkaido.jp +kamoenai.hokkaido.jp +kayabe.hokkaido.jp +kembuchi.hokkaido.jp +kikonai.hokkaido.jp +kimobetsu.hokkaido.jp +kitahiroshima.hokkaido.jp +kitami.hokkaido.jp +kiyosato.hokkaido.jp +koshimizu.hokkaido.jp +kunneppu.hokkaido.jp +kuriyama.hokkaido.jp +kuromatsunai.hokkaido.jp +kushiro.hokkaido.jp +kutchan.hokkaido.jp +kyowa.hokkaido.jp +mashike.hokkaido.jp +matsumae.hokkaido.jp +mikasa.hokkaido.jp +minamifurano.hokkaido.jp +mombetsu.hokkaido.jp +moseushi.hokkaido.jp +mukawa.hokkaido.jp +muroran.hokkaido.jp +naie.hokkaido.jp +nakagawa.hokkaido.jp +nakasatsunai.hokkaido.jp +nakatombetsu.hokkaido.jp +nanae.hokkaido.jp +nanporo.hokkaido.jp +nayoro.hokkaido.jp +nemuro.hokkaido.jp +niikappu.hokkaido.jp +niki.hokkaido.jp +nishiokoppe.hokkaido.jp +noboribetsu.hokkaido.jp +numata.hokkaido.jp +obihiro.hokkaido.jp +obira.hokkaido.jp +oketo.hokkaido.jp +okoppe.hokkaido.jp +otaru.hokkaido.jp +otobe.hokkaido.jp +otofuke.hokkaido.jp +otoineppu.hokkaido.jp +oumu.hokkaido.jp +ozora.hokkaido.jp +pippu.hokkaido.jp +rankoshi.hokkaido.jp +rebun.hokkaido.jp +rikubetsu.hokkaido.jp +rishiri.hokkaido.jp +rishirifuji.hokkaido.jp +saroma.hokkaido.jp +sarufutsu.hokkaido.jp +shakotan.hokkaido.jp +shari.hokkaido.jp +shibecha.hokkaido.jp +shibetsu.hokkaido.jp +shikabe.hokkaido.jp +shikaoi.hokkaido.jp +shimamaki.hokkaido.jp +shimizu.hokkaido.jp +shimokawa.hokkaido.jp +shinshinotsu.hokkaido.jp +shintoku.hokkaido.jp +shiranuka.hokkaido.jp +shiraoi.hokkaido.jp +shiriuchi.hokkaido.jp +sobetsu.hokkaido.jp +sunagawa.hokkaido.jp +taiki.hokkaido.jp +takasu.hokkaido.jp +takikawa.hokkaido.jp +takinoue.hokkaido.jp +teshikaga.hokkaido.jp +tobetsu.hokkaido.jp +tohma.hokkaido.jp +tomakomai.hokkaido.jp +tomari.hokkaido.jp +toya.hokkaido.jp +toyako.hokkaido.jp +toyotomi.hokkaido.jp +toyoura.hokkaido.jp +tsubetsu.hokkaido.jp +tsukigata.hokkaido.jp +urakawa.hokkaido.jp +urausu.hokkaido.jp +uryu.hokkaido.jp +utashinai.hokkaido.jp +wakkanai.hokkaido.jp +wassamu.hokkaido.jp +yakumo.hokkaido.jp +yoichi.hokkaido.jp +aioi.hyogo.jp +akashi.hyogo.jp +ako.hyogo.jp +amagasaki.hyogo.jp +aogaki.hyogo.jp +asago.hyogo.jp +ashiya.hyogo.jp +awaji.hyogo.jp +fukusaki.hyogo.jp +goshiki.hyogo.jp +harima.hyogo.jp +himeji.hyogo.jp +ichikawa.hyogo.jp +inagawa.hyogo.jp +itami.hyogo.jp +kakogawa.hyogo.jp +kamigori.hyogo.jp +kamikawa.hyogo.jp +kasai.hyogo.jp +kasuga.hyogo.jp +kawanishi.hyogo.jp +miki.hyogo.jp +minamiawaji.hyogo.jp +nishinomiya.hyogo.jp +nishiwaki.hyogo.jp +ono.hyogo.jp +sanda.hyogo.jp +sannan.hyogo.jp +sasayama.hyogo.jp +sayo.hyogo.jp +shingu.hyogo.jp +shinonsen.hyogo.jp +shiso.hyogo.jp +sumoto.hyogo.jp +taishi.hyogo.jp +taka.hyogo.jp +takarazuka.hyogo.jp +takasago.hyogo.jp +takino.hyogo.jp +tamba.hyogo.jp +tatsuno.hyogo.jp +toyooka.hyogo.jp +yabu.hyogo.jp +yashiro.hyogo.jp +yoka.hyogo.jp +yokawa.hyogo.jp +ami.ibaraki.jp +asahi.ibaraki.jp +bando.ibaraki.jp +chikusei.ibaraki.jp +daigo.ibaraki.jp +fujishiro.ibaraki.jp +hitachi.ibaraki.jp +hitachinaka.ibaraki.jp +hitachiomiya.ibaraki.jp +hitachiota.ibaraki.jp +ibaraki.ibaraki.jp +ina.ibaraki.jp +inashiki.ibaraki.jp +itako.ibaraki.jp +iwama.ibaraki.jp +joso.ibaraki.jp +kamisu.ibaraki.jp +kasama.ibaraki.jp +kashima.ibaraki.jp +kasumigaura.ibaraki.jp +koga.ibaraki.jp +miho.ibaraki.jp +mito.ibaraki.jp +moriya.ibaraki.jp +naka.ibaraki.jp +namegata.ibaraki.jp +oarai.ibaraki.jp +ogawa.ibaraki.jp +omitama.ibaraki.jp +ryugasaki.ibaraki.jp +sakai.ibaraki.jp +sakuragawa.ibaraki.jp +shimodate.ibaraki.jp +shimotsuma.ibaraki.jp +shirosato.ibaraki.jp +sowa.ibaraki.jp +suifu.ibaraki.jp +takahagi.ibaraki.jp +tamatsukuri.ibaraki.jp +tokai.ibaraki.jp +tomobe.ibaraki.jp +tone.ibaraki.jp +toride.ibaraki.jp +tsuchiura.ibaraki.jp +tsukuba.ibaraki.jp +uchihara.ibaraki.jp +ushiku.ibaraki.jp +yachiyo.ibaraki.jp +yamagata.ibaraki.jp +yawara.ibaraki.jp +yuki.ibaraki.jp +anamizu.ishikawa.jp +hakui.ishikawa.jp +hakusan.ishikawa.jp +kaga.ishikawa.jp +kahoku.ishikawa.jp +kanazawa.ishikawa.jp +kawakita.ishikawa.jp +komatsu.ishikawa.jp +nakanoto.ishikawa.jp +nanao.ishikawa.jp +nomi.ishikawa.jp +nonoichi.ishikawa.jp +noto.ishikawa.jp +shika.ishikawa.jp +suzu.ishikawa.jp +tsubata.ishikawa.jp +tsurugi.ishikawa.jp +uchinada.ishikawa.jp +wajima.ishikawa.jp +fudai.iwate.jp +fujisawa.iwate.jp +hanamaki.iwate.jp +hiraizumi.iwate.jp +hirono.iwate.jp +ichinohe.iwate.jp +ichinoseki.iwate.jp +iwaizumi.iwate.jp +iwate.iwate.jp +joboji.iwate.jp +kamaishi.iwate.jp +kanegasaki.iwate.jp +karumai.iwate.jp +kawai.iwate.jp +kitakami.iwate.jp +kuji.iwate.jp +kunohe.iwate.jp +kuzumaki.iwate.jp +miyako.iwate.jp +mizusawa.iwate.jp +morioka.iwate.jp +ninohe.iwate.jp +noda.iwate.jp +ofunato.iwate.jp +oshu.iwate.jp +otsuchi.iwate.jp +rikuzentakata.iwate.jp +shiwa.iwate.jp +shizukuishi.iwate.jp +sumita.iwate.jp +tanohata.iwate.jp +tono.iwate.jp +yahaba.iwate.jp +yamada.iwate.jp +ayagawa.kagawa.jp +higashikagawa.kagawa.jp +kanonji.kagawa.jp +kotohira.kagawa.jp +manno.kagawa.jp +marugame.kagawa.jp +mitoyo.kagawa.jp +naoshima.kagawa.jp +sanuki.kagawa.jp +tadotsu.kagawa.jp +takamatsu.kagawa.jp +tonosho.kagawa.jp +uchinomi.kagawa.jp +utazu.kagawa.jp +zentsuji.kagawa.jp +akune.kagoshima.jp +amami.kagoshima.jp +hioki.kagoshima.jp +isa.kagoshima.jp +isen.kagoshima.jp +izumi.kagoshima.jp +kagoshima.kagoshima.jp +kanoya.kagoshima.jp +kawanabe.kagoshima.jp +kinko.kagoshima.jp +kouyama.kagoshima.jp +makurazaki.kagoshima.jp +matsumoto.kagoshima.jp +minamitane.kagoshima.jp +nakatane.kagoshima.jp +nishinoomote.kagoshima.jp +satsumasendai.kagoshima.jp +soo.kagoshima.jp +tarumizu.kagoshima.jp +yusui.kagoshima.jp +aikawa.kanagawa.jp +atsugi.kanagawa.jp +ayase.kanagawa.jp +chigasaki.kanagawa.jp +ebina.kanagawa.jp +fujisawa.kanagawa.jp +hadano.kanagawa.jp +hakone.kanagawa.jp +hiratsuka.kanagawa.jp +isehara.kanagawa.jp +kaisei.kanagawa.jp +kamakura.kanagawa.jp +kiyokawa.kanagawa.jp +matsuda.kanagawa.jp +minamiashigara.kanagawa.jp +miura.kanagawa.jp +nakai.kanagawa.jp +ninomiya.kanagawa.jp +odawara.kanagawa.jp +oi.kanagawa.jp +oiso.kanagawa.jp +sagamihara.kanagawa.jp +samukawa.kanagawa.jp +tsukui.kanagawa.jp +yamakita.kanagawa.jp +yamato.kanagawa.jp +yokosuka.kanagawa.jp +yugawara.kanagawa.jp +zama.kanagawa.jp +zushi.kanagawa.jp +aki.kochi.jp +geisei.kochi.jp +hidaka.kochi.jp +higashitsuno.kochi.jp +ino.kochi.jp +kagami.kochi.jp +kami.kochi.jp +kitagawa.kochi.jp +kochi.kochi.jp +mihara.kochi.jp +motoyama.kochi.jp +muroto.kochi.jp +nahari.kochi.jp +nakamura.kochi.jp +nankoku.kochi.jp +nishitosa.kochi.jp +niyodogawa.kochi.jp +ochi.kochi.jp +okawa.kochi.jp +otoyo.kochi.jp +otsuki.kochi.jp +sakawa.kochi.jp +sukumo.kochi.jp +susaki.kochi.jp +tosa.kochi.jp +tosashimizu.kochi.jp +toyo.kochi.jp +tsuno.kochi.jp +umaji.kochi.jp +yasuda.kochi.jp +yusuhara.kochi.jp +amakusa.kumamoto.jp +arao.kumamoto.jp +aso.kumamoto.jp +choyo.kumamoto.jp +gyokuto.kumamoto.jp +kamiamakusa.kumamoto.jp +kikuchi.kumamoto.jp +kumamoto.kumamoto.jp +mashiki.kumamoto.jp +mifune.kumamoto.jp +minamata.kumamoto.jp +minamioguni.kumamoto.jp +nagasu.kumamoto.jp +nishihara.kumamoto.jp +oguni.kumamoto.jp +ozu.kumamoto.jp +sumoto.kumamoto.jp +takamori.kumamoto.jp +uki.kumamoto.jp +uto.kumamoto.jp +yamaga.kumamoto.jp +yamato.kumamoto.jp +yatsushiro.kumamoto.jp +ayabe.kyoto.jp +fukuchiyama.kyoto.jp +higashiyama.kyoto.jp +ide.kyoto.jp +ine.kyoto.jp +joyo.kyoto.jp +kameoka.kyoto.jp +kamo.kyoto.jp +kita.kyoto.jp +kizu.kyoto.jp +kumiyama.kyoto.jp +kyotamba.kyoto.jp +kyotanabe.kyoto.jp +kyotango.kyoto.jp +maizuru.kyoto.jp +minami.kyoto.jp +minamiyamashiro.kyoto.jp +miyazu.kyoto.jp +muko.kyoto.jp +nagaokakyo.kyoto.jp +nakagyo.kyoto.jp +nantan.kyoto.jp +oyamazaki.kyoto.jp +sakyo.kyoto.jp +seika.kyoto.jp +tanabe.kyoto.jp +uji.kyoto.jp +ujitawara.kyoto.jp +wazuka.kyoto.jp +yamashina.kyoto.jp +yawata.kyoto.jp +asahi.mie.jp +inabe.mie.jp +ise.mie.jp +kameyama.mie.jp +kawagoe.mie.jp +kiho.mie.jp +kisosaki.mie.jp +kiwa.mie.jp +komono.mie.jp +kumano.mie.jp +kuwana.mie.jp +matsusaka.mie.jp +meiwa.mie.jp +mihama.mie.jp +minamiise.mie.jp +misugi.mie.jp +miyama.mie.jp +nabari.mie.jp +shima.mie.jp +suzuka.mie.jp +tado.mie.jp +taiki.mie.jp +taki.mie.jp +tamaki.mie.jp +toba.mie.jp +tsu.mie.jp +udono.mie.jp +ureshino.mie.jp +watarai.mie.jp +yokkaichi.mie.jp +furukawa.miyagi.jp +higashimatsushima.miyagi.jp +ishinomaki.miyagi.jp +iwanuma.miyagi.jp +kakuda.miyagi.jp +kami.miyagi.jp +kawasaki.miyagi.jp +marumori.miyagi.jp +matsushima.miyagi.jp +minamisanriku.miyagi.jp +misato.miyagi.jp +murata.miyagi.jp +natori.miyagi.jp +ogawara.miyagi.jp +ohira.miyagi.jp +onagawa.miyagi.jp +osaki.miyagi.jp +rifu.miyagi.jp +semine.miyagi.jp +shibata.miyagi.jp +shichikashuku.miyagi.jp +shikama.miyagi.jp +shiogama.miyagi.jp +shiroishi.miyagi.jp +tagajo.miyagi.jp +taiwa.miyagi.jp +tome.miyagi.jp +tomiya.miyagi.jp +wakuya.miyagi.jp +watari.miyagi.jp +yamamoto.miyagi.jp +zao.miyagi.jp +aya.miyazaki.jp +ebino.miyazaki.jp +gokase.miyazaki.jp +hyuga.miyazaki.jp +kadogawa.miyazaki.jp +kawaminami.miyazaki.jp +kijo.miyazaki.jp +kitagawa.miyazaki.jp +kitakata.miyazaki.jp +kitaura.miyazaki.jp +kobayashi.miyazaki.jp +kunitomi.miyazaki.jp +kushima.miyazaki.jp +mimata.miyazaki.jp +miyakonojo.miyazaki.jp +miyazaki.miyazaki.jp +morotsuka.miyazaki.jp +nichinan.miyazaki.jp +nishimera.miyazaki.jp +nobeoka.miyazaki.jp +saito.miyazaki.jp +shiiba.miyazaki.jp +shintomi.miyazaki.jp +takaharu.miyazaki.jp +takanabe.miyazaki.jp +takazaki.miyazaki.jp +tsuno.miyazaki.jp +achi.nagano.jp +agematsu.nagano.jp +anan.nagano.jp +aoki.nagano.jp +asahi.nagano.jp +azumino.nagano.jp +chikuhoku.nagano.jp +chikuma.nagano.jp +chino.nagano.jp +fujimi.nagano.jp +hakuba.nagano.jp +hara.nagano.jp +hiraya.nagano.jp +iida.nagano.jp +iijima.nagano.jp +iiyama.nagano.jp +iizuna.nagano.jp +ikeda.nagano.jp +ikusaka.nagano.jp +ina.nagano.jp +karuizawa.nagano.jp +kawakami.nagano.jp +kiso.nagano.jp +kisofukushima.nagano.jp +kitaaiki.nagano.jp +komagane.nagano.jp +komoro.nagano.jp +matsukawa.nagano.jp +matsumoto.nagano.jp +miasa.nagano.jp +minamiaiki.nagano.jp +minamimaki.nagano.jp +minamiminowa.nagano.jp +minowa.nagano.jp +miyada.nagano.jp +miyota.nagano.jp +mochizuki.nagano.jp +nagano.nagano.jp +nagawa.nagano.jp +nagiso.nagano.jp +nakagawa.nagano.jp +nakano.nagano.jp +nozawaonsen.nagano.jp +obuse.nagano.jp +ogawa.nagano.jp +okaya.nagano.jp +omachi.nagano.jp +omi.nagano.jp +ookuwa.nagano.jp +ooshika.nagano.jp +otaki.nagano.jp +otari.nagano.jp +sakae.nagano.jp +sakaki.nagano.jp +saku.nagano.jp +sakuho.nagano.jp +shimosuwa.nagano.jp +shinanomachi.nagano.jp +shiojiri.nagano.jp +suwa.nagano.jp +suzaka.nagano.jp +takagi.nagano.jp +takamori.nagano.jp +takayama.nagano.jp +tateshina.nagano.jp +tatsuno.nagano.jp +togakushi.nagano.jp +togura.nagano.jp +tomi.nagano.jp +ueda.nagano.jp +wada.nagano.jp +yamagata.nagano.jp +yamanouchi.nagano.jp +yasaka.nagano.jp +yasuoka.nagano.jp +chijiwa.nagasaki.jp +futsu.nagasaki.jp +goto.nagasaki.jp +hasami.nagasaki.jp +hirado.nagasaki.jp +iki.nagasaki.jp +isahaya.nagasaki.jp +kawatana.nagasaki.jp +kuchinotsu.nagasaki.jp +matsuura.nagasaki.jp +nagasaki.nagasaki.jp +obama.nagasaki.jp +omura.nagasaki.jp +oseto.nagasaki.jp +saikai.nagasaki.jp +sasebo.nagasaki.jp +seihi.nagasaki.jp +shimabara.nagasaki.jp +shinkamigoto.nagasaki.jp +togitsu.nagasaki.jp +tsushima.nagasaki.jp +unzen.nagasaki.jp +ando.nara.jp +gose.nara.jp +heguri.nara.jp +higashiyoshino.nara.jp +ikaruga.nara.jp +ikoma.nara.jp +kamikitayama.nara.jp +kanmaki.nara.jp +kashiba.nara.jp +kashihara.nara.jp +katsuragi.nara.jp +kawai.nara.jp +kawakami.nara.jp +kawanishi.nara.jp +koryo.nara.jp +kurotaki.nara.jp +mitsue.nara.jp +miyake.nara.jp +nara.nara.jp +nosegawa.nara.jp +oji.nara.jp +ouda.nara.jp +oyodo.nara.jp +sakurai.nara.jp +sango.nara.jp +shimoichi.nara.jp +shimokitayama.nara.jp +shinjo.nara.jp +soni.nara.jp +takatori.nara.jp +tawaramoto.nara.jp +tenkawa.nara.jp +tenri.nara.jp +uda.nara.jp +yamatokoriyama.nara.jp +yamatotakada.nara.jp +yamazoe.nara.jp +yoshino.nara.jp +aga.niigata.jp +agano.niigata.jp +gosen.niigata.jp +itoigawa.niigata.jp +izumozaki.niigata.jp +joetsu.niigata.jp +kamo.niigata.jp +kariwa.niigata.jp +kashiwazaki.niigata.jp +minamiuonuma.niigata.jp +mitsuke.niigata.jp +muika.niigata.jp +murakami.niigata.jp +myoko.niigata.jp +nagaoka.niigata.jp +niigata.niigata.jp +ojiya.niigata.jp +omi.niigata.jp +sado.niigata.jp +sanjo.niigata.jp +seiro.niigata.jp +seirou.niigata.jp +sekikawa.niigata.jp +shibata.niigata.jp +tagami.niigata.jp +tainai.niigata.jp +tochio.niigata.jp +tokamachi.niigata.jp +tsubame.niigata.jp +tsunan.niigata.jp +uonuma.niigata.jp +yahiko.niigata.jp +yoita.niigata.jp +yuzawa.niigata.jp +beppu.oita.jp +bungoono.oita.jp +bungotakada.oita.jp +hasama.oita.jp +hiji.oita.jp +himeshima.oita.jp +hita.oita.jp +kamitsue.oita.jp +kokonoe.oita.jp +kuju.oita.jp +kunisaki.oita.jp +kusu.oita.jp +oita.oita.jp +saiki.oita.jp +taketa.oita.jp +tsukumi.oita.jp +usa.oita.jp +usuki.oita.jp +yufu.oita.jp +akaiwa.okayama.jp +asakuchi.okayama.jp +bizen.okayama.jp +hayashima.okayama.jp +ibara.okayama.jp +kagamino.okayama.jp +kasaoka.okayama.jp +kibichuo.okayama.jp +kumenan.okayama.jp +kurashiki.okayama.jp +maniwa.okayama.jp +misaki.okayama.jp +nagi.okayama.jp +niimi.okayama.jp +nishiawakura.okayama.jp +okayama.okayama.jp +satosho.okayama.jp +setouchi.okayama.jp +shinjo.okayama.jp +shoo.okayama.jp +soja.okayama.jp +takahashi.okayama.jp +tamano.okayama.jp +tsuyama.okayama.jp +wake.okayama.jp +yakage.okayama.jp +aguni.okinawa.jp +ginowan.okinawa.jp +ginoza.okinawa.jp +gushikami.okinawa.jp +haebaru.okinawa.jp +higashi.okinawa.jp +hirara.okinawa.jp +iheya.okinawa.jp +ishigaki.okinawa.jp +ishikawa.okinawa.jp +itoman.okinawa.jp +izena.okinawa.jp +kadena.okinawa.jp +kin.okinawa.jp +kitadaito.okinawa.jp +kitanakagusuku.okinawa.jp +kumejima.okinawa.jp +kunigami.okinawa.jp +minamidaito.okinawa.jp +motobu.okinawa.jp +nago.okinawa.jp +naha.okinawa.jp +nakagusuku.okinawa.jp +nakijin.okinawa.jp +nanjo.okinawa.jp +nishihara.okinawa.jp +ogimi.okinawa.jp +okinawa.okinawa.jp +onna.okinawa.jp +shimoji.okinawa.jp +taketomi.okinawa.jp +tarama.okinawa.jp +tokashiki.okinawa.jp +tomigusuku.okinawa.jp +tonaki.okinawa.jp +urasoe.okinawa.jp +uruma.okinawa.jp +yaese.okinawa.jp +yomitan.okinawa.jp +yonabaru.okinawa.jp +yonaguni.okinawa.jp +zamami.okinawa.jp +abeno.osaka.jp +chihayaakasaka.osaka.jp +chuo.osaka.jp +daito.osaka.jp +fujiidera.osaka.jp +habikino.osaka.jp +hannan.osaka.jp +higashiosaka.osaka.jp +higashisumiyoshi.osaka.jp +higashiyodogawa.osaka.jp +hirakata.osaka.jp +ibaraki.osaka.jp +ikeda.osaka.jp +izumi.osaka.jp +izumiotsu.osaka.jp +izumisano.osaka.jp +kadoma.osaka.jp +kaizuka.osaka.jp +kanan.osaka.jp +kashiwara.osaka.jp +katano.osaka.jp +kawachinagano.osaka.jp +kishiwada.osaka.jp +kita.osaka.jp +kumatori.osaka.jp +matsubara.osaka.jp +minato.osaka.jp +minoh.osaka.jp +misaki.osaka.jp +moriguchi.osaka.jp +neyagawa.osaka.jp +nishi.osaka.jp +nose.osaka.jp +osakasayama.osaka.jp +sakai.osaka.jp +sayama.osaka.jp +sennan.osaka.jp +settsu.osaka.jp +shijonawate.osaka.jp +shimamoto.osaka.jp +suita.osaka.jp +tadaoka.osaka.jp +taishi.osaka.jp +tajiri.osaka.jp +takaishi.osaka.jp +takatsuki.osaka.jp +tondabayashi.osaka.jp +toyonaka.osaka.jp +toyono.osaka.jp +yao.osaka.jp +ariake.saga.jp +arita.saga.jp +fukudomi.saga.jp +genkai.saga.jp +hamatama.saga.jp +hizen.saga.jp +imari.saga.jp +kamimine.saga.jp +kanzaki.saga.jp +karatsu.saga.jp +kashima.saga.jp +kitagata.saga.jp +kitahata.saga.jp +kiyama.saga.jp +kouhoku.saga.jp +kyuragi.saga.jp +nishiarita.saga.jp +ogi.saga.jp +omachi.saga.jp +ouchi.saga.jp +saga.saga.jp +shiroishi.saga.jp +taku.saga.jp +tara.saga.jp +tosu.saga.jp +yoshinogari.saga.jp +arakawa.saitama.jp +asaka.saitama.jp +chichibu.saitama.jp +fujimi.saitama.jp +fujimino.saitama.jp +fukaya.saitama.jp +hanno.saitama.jp +hanyu.saitama.jp +hasuda.saitama.jp +hatogaya.saitama.jp +hatoyama.saitama.jp +hidaka.saitama.jp +higashichichibu.saitama.jp +higashimatsuyama.saitama.jp +honjo.saitama.jp +ina.saitama.jp +iruma.saitama.jp +iwatsuki.saitama.jp +kamiizumi.saitama.jp +kamikawa.saitama.jp +kamisato.saitama.jp +kasukabe.saitama.jp +kawagoe.saitama.jp +kawaguchi.saitama.jp +kawajima.saitama.jp +kazo.saitama.jp +kitamoto.saitama.jp +koshigaya.saitama.jp +kounosu.saitama.jp +kuki.saitama.jp +kumagaya.saitama.jp +matsubushi.saitama.jp +minano.saitama.jp +misato.saitama.jp +miyashiro.saitama.jp +miyoshi.saitama.jp +moroyama.saitama.jp +nagatoro.saitama.jp +namegawa.saitama.jp +niiza.saitama.jp +ogano.saitama.jp +ogawa.saitama.jp +ogose.saitama.jp +okegawa.saitama.jp +omiya.saitama.jp +otaki.saitama.jp +ranzan.saitama.jp +ryokami.saitama.jp +saitama.saitama.jp +sakado.saitama.jp +satte.saitama.jp +sayama.saitama.jp +shiki.saitama.jp +shiraoka.saitama.jp +soka.saitama.jp +sugito.saitama.jp +toda.saitama.jp +tokigawa.saitama.jp +tokorozawa.saitama.jp +tsurugashima.saitama.jp +urawa.saitama.jp +warabi.saitama.jp +yashio.saitama.jp +yokoze.saitama.jp +yono.saitama.jp +yorii.saitama.jp +yoshida.saitama.jp +yoshikawa.saitama.jp +yoshimi.saitama.jp +aisho.shiga.jp +gamo.shiga.jp +higashiomi.shiga.jp +hikone.shiga.jp +koka.shiga.jp +konan.shiga.jp +kosei.shiga.jp +koto.shiga.jp +kusatsu.shiga.jp +maibara.shiga.jp +moriyama.shiga.jp +nagahama.shiga.jp +nishiazai.shiga.jp +notogawa.shiga.jp +omihachiman.shiga.jp +otsu.shiga.jp +ritto.shiga.jp +ryuoh.shiga.jp +takashima.shiga.jp +takatsuki.shiga.jp +torahime.shiga.jp +toyosato.shiga.jp +yasu.shiga.jp +akagi.shimane.jp +ama.shimane.jp +gotsu.shimane.jp +hamada.shimane.jp +higashiizumo.shimane.jp +hikawa.shimane.jp +hikimi.shimane.jp +izumo.shimane.jp +kakinoki.shimane.jp +masuda.shimane.jp +matsue.shimane.jp +misato.shimane.jp +nishinoshima.shimane.jp +ohda.shimane.jp +okinoshima.shimane.jp +okuizumo.shimane.jp +shimane.shimane.jp +tamayu.shimane.jp +tsuwano.shimane.jp +unnan.shimane.jp +yakumo.shimane.jp +yasugi.shimane.jp +yatsuka.shimane.jp +arai.shizuoka.jp +atami.shizuoka.jp +fuji.shizuoka.jp +fujieda.shizuoka.jp +fujikawa.shizuoka.jp +fujinomiya.shizuoka.jp +fukuroi.shizuoka.jp +gotemba.shizuoka.jp +haibara.shizuoka.jp +hamamatsu.shizuoka.jp +higashiizu.shizuoka.jp +ito.shizuoka.jp +iwata.shizuoka.jp +izu.shizuoka.jp +izunokuni.shizuoka.jp +kakegawa.shizuoka.jp +kannami.shizuoka.jp +kawanehon.shizuoka.jp +kawazu.shizuoka.jp +kikugawa.shizuoka.jp +kosai.shizuoka.jp +makinohara.shizuoka.jp +matsuzaki.shizuoka.jp +minamiizu.shizuoka.jp +mishima.shizuoka.jp +morimachi.shizuoka.jp +nishiizu.shizuoka.jp +numazu.shizuoka.jp +omaezaki.shizuoka.jp +shimada.shizuoka.jp +shimizu.shizuoka.jp +shimoda.shizuoka.jp +shizuoka.shizuoka.jp +susono.shizuoka.jp +yaizu.shizuoka.jp +yoshida.shizuoka.jp +ashikaga.tochigi.jp +bato.tochigi.jp +haga.tochigi.jp +ichikai.tochigi.jp +iwafune.tochigi.jp +kaminokawa.tochigi.jp +kanuma.tochigi.jp +karasuyama.tochigi.jp +kuroiso.tochigi.jp +mashiko.tochigi.jp +mibu.tochigi.jp +moka.tochigi.jp +motegi.tochigi.jp +nasu.tochigi.jp +nasushiobara.tochigi.jp +nikko.tochigi.jp +nishikata.tochigi.jp +nogi.tochigi.jp +ohira.tochigi.jp +ohtawara.tochigi.jp +oyama.tochigi.jp +sakura.tochigi.jp +sano.tochigi.jp +shimotsuke.tochigi.jp +shioya.tochigi.jp +takanezawa.tochigi.jp +tochigi.tochigi.jp +tsuga.tochigi.jp +ujiie.tochigi.jp +utsunomiya.tochigi.jp +yaita.tochigi.jp +aizumi.tokushima.jp +anan.tokushima.jp +ichiba.tokushima.jp +itano.tokushima.jp +kainan.tokushima.jp +komatsushima.tokushima.jp +matsushige.tokushima.jp +mima.tokushima.jp +minami.tokushima.jp +miyoshi.tokushima.jp +mugi.tokushima.jp +nakagawa.tokushima.jp +naruto.tokushima.jp +sanagochi.tokushima.jp +shishikui.tokushima.jp +tokushima.tokushima.jp +wajiki.tokushima.jp +adachi.tokyo.jp +akiruno.tokyo.jp +akishima.tokyo.jp +aogashima.tokyo.jp +arakawa.tokyo.jp +bunkyo.tokyo.jp +chiyoda.tokyo.jp +chofu.tokyo.jp +chuo.tokyo.jp +edogawa.tokyo.jp +fuchu.tokyo.jp +fussa.tokyo.jp +hachijo.tokyo.jp +hachioji.tokyo.jp +hamura.tokyo.jp +higashikurume.tokyo.jp +higashimurayama.tokyo.jp +higashiyamato.tokyo.jp +hino.tokyo.jp +hinode.tokyo.jp +hinohara.tokyo.jp +inagi.tokyo.jp +itabashi.tokyo.jp +katsushika.tokyo.jp +kita.tokyo.jp +kiyose.tokyo.jp +kodaira.tokyo.jp +koganei.tokyo.jp +kokubunji.tokyo.jp +komae.tokyo.jp +koto.tokyo.jp +kouzushima.tokyo.jp +kunitachi.tokyo.jp +machida.tokyo.jp +meguro.tokyo.jp +minato.tokyo.jp +mitaka.tokyo.jp +mizuho.tokyo.jp +musashimurayama.tokyo.jp +musashino.tokyo.jp +nakano.tokyo.jp +nerima.tokyo.jp +ogasawara.tokyo.jp +okutama.tokyo.jp +ome.tokyo.jp +oshima.tokyo.jp +ota.tokyo.jp +setagaya.tokyo.jp +shibuya.tokyo.jp +shinagawa.tokyo.jp +shinjuku.tokyo.jp +suginami.tokyo.jp +sumida.tokyo.jp +tachikawa.tokyo.jp +taito.tokyo.jp +tama.tokyo.jp +toshima.tokyo.jp +chizu.tottori.jp +hino.tottori.jp +kawahara.tottori.jp +koge.tottori.jp +kotoura.tottori.jp +misasa.tottori.jp +nanbu.tottori.jp +nichinan.tottori.jp +sakaiminato.tottori.jp +tottori.tottori.jp +wakasa.tottori.jp +yazu.tottori.jp +yonago.tottori.jp +asahi.toyama.jp +fuchu.toyama.jp +fukumitsu.toyama.jp +funahashi.toyama.jp +himi.toyama.jp +imizu.toyama.jp +inami.toyama.jp +johana.toyama.jp +kamiichi.toyama.jp +kurobe.toyama.jp +nakaniikawa.toyama.jp +namerikawa.toyama.jp +nanto.toyama.jp +nyuzen.toyama.jp +oyabe.toyama.jp +taira.toyama.jp +takaoka.toyama.jp +tateyama.toyama.jp +toga.toyama.jp +tonami.toyama.jp +toyama.toyama.jp +unazuki.toyama.jp +uozu.toyama.jp +yamada.toyama.jp +arida.wakayama.jp +aridagawa.wakayama.jp +gobo.wakayama.jp +hashimoto.wakayama.jp +hidaka.wakayama.jp +hirogawa.wakayama.jp +inami.wakayama.jp +iwade.wakayama.jp +kainan.wakayama.jp +kamitonda.wakayama.jp +katsuragi.wakayama.jp +kimino.wakayama.jp +kinokawa.wakayama.jp +kitayama.wakayama.jp +koya.wakayama.jp +koza.wakayama.jp +kozagawa.wakayama.jp +kudoyama.wakayama.jp +kushimoto.wakayama.jp +mihama.wakayama.jp +misato.wakayama.jp +nachikatsuura.wakayama.jp +shingu.wakayama.jp +shirahama.wakayama.jp +taiji.wakayama.jp +tanabe.wakayama.jp +wakayama.wakayama.jp +yuasa.wakayama.jp +yura.wakayama.jp +asahi.yamagata.jp +funagata.yamagata.jp +higashine.yamagata.jp +iide.yamagata.jp +kahoku.yamagata.jp +kaminoyama.yamagata.jp +kaneyama.yamagata.jp +kawanishi.yamagata.jp +mamurogawa.yamagata.jp +mikawa.yamagata.jp +murayama.yamagata.jp +nagai.yamagata.jp +nakayama.yamagata.jp +nanyo.yamagata.jp +nishikawa.yamagata.jp +obanazawa.yamagata.jp +oe.yamagata.jp +oguni.yamagata.jp +ohkura.yamagata.jp +oishida.yamagata.jp +sagae.yamagata.jp +sakata.yamagata.jp +sakegawa.yamagata.jp +shinjo.yamagata.jp +shirataka.yamagata.jp +shonai.yamagata.jp +takahata.yamagata.jp +tendo.yamagata.jp +tozawa.yamagata.jp +tsuruoka.yamagata.jp +yamagata.yamagata.jp +yamanobe.yamagata.jp +yonezawa.yamagata.jp +yuza.yamagata.jp +abu.yamaguchi.jp +hagi.yamaguchi.jp +hikari.yamaguchi.jp +hofu.yamaguchi.jp +iwakuni.yamaguchi.jp +kudamatsu.yamaguchi.jp +mitou.yamaguchi.jp +nagato.yamaguchi.jp +oshima.yamaguchi.jp +shimonoseki.yamaguchi.jp +shunan.yamaguchi.jp +tabuse.yamaguchi.jp +tokuyama.yamaguchi.jp +toyota.yamaguchi.jp +ube.yamaguchi.jp +yuu.yamaguchi.jp +chuo.yamanashi.jp +doshi.yamanashi.jp +fuefuki.yamanashi.jp +fujikawa.yamanashi.jp +fujikawaguchiko.yamanashi.jp +fujiyoshida.yamanashi.jp +hayakawa.yamanashi.jp +hokuto.yamanashi.jp +ichikawamisato.yamanashi.jp +kai.yamanashi.jp +kofu.yamanashi.jp +koshu.yamanashi.jp +kosuge.yamanashi.jp +minami-alps.yamanashi.jp +minobu.yamanashi.jp +nakamichi.yamanashi.jp +nanbu.yamanashi.jp +narusawa.yamanashi.jp +nirasaki.yamanashi.jp +nishikatsura.yamanashi.jp +oshino.yamanashi.jp +otsuki.yamanashi.jp +showa.yamanashi.jp +tabayama.yamanashi.jp +tsuru.yamanashi.jp +uenohara.yamanashi.jp +yamanakako.yamanashi.jp +yamanashi.yamanashi.jp + +// ke : http://www.kenic.or.ke/index.php/en/ke-domains/ke-domains +ke +ac.ke +co.ke +go.ke +info.ke +me.ke +mobi.ke +ne.ke +or.ke +sc.ke + +// kg : http://www.domain.kg/dmn_n.html +kg +org.kg +net.kg +com.kg +edu.kg +gov.kg +mil.kg + +// kh : http://www.mptc.gov.kh/dns_registration.htm +*.kh + +// ki : http://www.ki/dns/index.html +ki +edu.ki +biz.ki +net.ki +org.ki +gov.ki +info.ki +com.ki + +// km : https://en.wikipedia.org/wiki/.km +// http://www.domaine.km/documents/charte.doc +km +org.km +nom.km +gov.km +prd.km +tm.km +edu.km +mil.km +ass.km +com.km +// These are only mentioned as proposed suggestions at domaine.km, but +// https://en.wikipedia.org/wiki/.km says they're available for registration: +coop.km +asso.km +presse.km +medecin.km +notaires.km +pharmaciens.km +veterinaire.km +gouv.km + +// kn : https://en.wikipedia.org/wiki/.kn +// http://www.dot.kn/domainRules.html +kn +net.kn +org.kn +edu.kn +gov.kn + +// kp : http://www.kcce.kp/en_index.php +kp +com.kp +edu.kp +gov.kp +org.kp +rep.kp +tra.kp + +// kr : https://en.wikipedia.org/wiki/.kr +// see also: http://domain.nida.or.kr/eng/registration.jsp +kr +ac.kr +co.kr +es.kr +go.kr +hs.kr +kg.kr +mil.kr +ms.kr +ne.kr +or.kr +pe.kr +re.kr +sc.kr +// kr geographical names +busan.kr +chungbuk.kr +chungnam.kr +daegu.kr +daejeon.kr +gangwon.kr +gwangju.kr +gyeongbuk.kr +gyeonggi.kr +gyeongnam.kr +incheon.kr +jeju.kr +jeonbuk.kr +jeonnam.kr +seoul.kr +ulsan.kr + +// kw : https://www.nic.kw/policies/ +// Confirmed by registry +kw +com.kw +edu.kw +emb.kw +gov.kw +ind.kw +net.kw +org.kw + +// ky : http://www.icta.ky/da_ky_reg_dom.php +// Confirmed by registry 2008-06-17 +ky +edu.ky +gov.ky +com.ky +org.ky +net.ky + +// kz : https://en.wikipedia.org/wiki/.kz +// see also: http://www.nic.kz/rules/index.jsp +kz +org.kz +edu.kz +net.kz +gov.kz +mil.kz +com.kz + +// la : https://en.wikipedia.org/wiki/.la +// Submitted by registry +la +int.la +net.la +info.la +edu.la +gov.la +per.la +com.la +org.la + +// lb : https://en.wikipedia.org/wiki/.lb +// Submitted by registry +lb +com.lb +edu.lb +gov.lb +net.lb +org.lb + +// lc : https://en.wikipedia.org/wiki/.lc +// see also: http://www.nic.lc/rules.htm +lc +com.lc +net.lc +co.lc +org.lc +edu.lc +gov.lc + +// li : https://en.wikipedia.org/wiki/.li +li + +// lk : http://www.nic.lk/seclevpr.html +lk +gov.lk +sch.lk +net.lk +int.lk +com.lk +org.lk +edu.lk +ngo.lk +soc.lk +web.lk +ltd.lk +assn.lk +grp.lk +hotel.lk +ac.lk + +// lr : http://psg.com/dns/lr/lr.txt +// Submitted by registry +lr +com.lr +edu.lr +gov.lr +org.lr +net.lr + +// ls : http://www.nic.ls/ +// Confirmed by registry +ls +ac.ls +biz.ls +co.ls +edu.ls +gov.ls +info.ls +net.ls +org.ls +sc.ls + +// lt : https://en.wikipedia.org/wiki/.lt +lt +// gov.lt : http://www.gov.lt/index_en.php +gov.lt + +// lu : http://www.dns.lu/en/ +lu + +// lv : http://www.nic.lv/DNS/En/generic.php +lv +com.lv +edu.lv +gov.lv +org.lv +mil.lv +id.lv +net.lv +asn.lv +conf.lv + +// ly : http://www.nic.ly/regulations.php +ly +com.ly +net.ly +gov.ly +plc.ly +edu.ly +sch.ly +med.ly +org.ly +id.ly + +// ma : https://en.wikipedia.org/wiki/.ma +// http://www.anrt.ma/fr/admin/download/upload/file_fr782.pdf +ma +co.ma +net.ma +gov.ma +org.ma +ac.ma +press.ma + +// mc : http://www.nic.mc/ +mc +tm.mc +asso.mc + +// md : https://en.wikipedia.org/wiki/.md +md + +// me : https://en.wikipedia.org/wiki/.me +me +co.me +net.me +org.me +edu.me +ac.me +gov.me +its.me +priv.me + +// mg : http://nic.mg/nicmg/?page_id=39 +mg +org.mg +nom.mg +gov.mg +prd.mg +tm.mg +edu.mg +mil.mg +com.mg +co.mg + +// mh : https://en.wikipedia.org/wiki/.mh +mh + +// mil : https://en.wikipedia.org/wiki/.mil +mil + +// mk : https://en.wikipedia.org/wiki/.mk +// see also: http://dns.marnet.net.mk/postapka.php +mk +com.mk +org.mk +net.mk +edu.mk +gov.mk +inf.mk +name.mk + +// ml : http://www.gobin.info/domainname/ml-template.doc +// see also: https://en.wikipedia.org/wiki/.ml +ml +com.ml +edu.ml +gouv.ml +gov.ml +net.ml +org.ml +presse.ml + +// mm : https://en.wikipedia.org/wiki/.mm +*.mm + +// mn : https://en.wikipedia.org/wiki/.mn +mn +gov.mn +edu.mn +org.mn + +// mo : http://www.monic.net.mo/ +mo +com.mo +net.mo +org.mo +edu.mo +gov.mo + +// mobi : https://en.wikipedia.org/wiki/.mobi +mobi + +// mp : http://www.dot.mp/ +// Confirmed by registry 2008-06-17 +mp + +// mq : https://en.wikipedia.org/wiki/.mq +mq + +// mr : https://en.wikipedia.org/wiki/.mr +mr +gov.mr + +// ms : http://www.nic.ms/pdf/MS_Domain_Name_Rules.pdf +ms +com.ms +edu.ms +gov.ms +net.ms +org.ms + +// mt : https://www.nic.org.mt/go/policy +// Submitted by registry +mt +com.mt +edu.mt +net.mt +org.mt + +// mu : https://en.wikipedia.org/wiki/.mu +mu +com.mu +net.mu +org.mu +gov.mu +ac.mu +co.mu +or.mu + +// museum : http://about.museum/naming/ +// http://index.museum/ +museum +academy.museum +agriculture.museum +air.museum +airguard.museum +alabama.museum +alaska.museum +amber.museum +ambulance.museum +american.museum +americana.museum +americanantiques.museum +americanart.museum +amsterdam.museum +and.museum +annefrank.museum +anthro.museum +anthropology.museum +antiques.museum +aquarium.museum +arboretum.museum +archaeological.museum +archaeology.museum +architecture.museum +art.museum +artanddesign.museum +artcenter.museum +artdeco.museum +arteducation.museum +artgallery.museum +arts.museum +artsandcrafts.museum +asmatart.museum +assassination.museum +assisi.museum +association.museum +astronomy.museum +atlanta.museum +austin.museum +australia.museum +automotive.museum +aviation.museum +axis.museum +badajoz.museum +baghdad.museum +bahn.museum +bale.museum +baltimore.museum +barcelona.museum +baseball.museum +basel.museum +baths.museum +bauern.museum +beauxarts.museum +beeldengeluid.museum +bellevue.museum +bergbau.museum +berkeley.museum +berlin.museum +bern.museum +bible.museum +bilbao.museum +bill.museum +birdart.museum +birthplace.museum +bonn.museum +boston.museum +botanical.museum +botanicalgarden.museum +botanicgarden.museum +botany.museum +brandywinevalley.museum +brasil.museum +bristol.museum +british.museum +britishcolumbia.museum +broadcast.museum +brunel.museum +brussel.museum +brussels.museum +bruxelles.museum +building.museum +burghof.museum +bus.museum +bushey.museum +cadaques.museum +california.museum +cambridge.museum +can.museum +canada.museum +capebreton.museum +carrier.museum +cartoonart.museum +casadelamoneda.museum +castle.museum +castres.museum +celtic.museum +center.museum +chattanooga.museum +cheltenham.museum +chesapeakebay.museum +chicago.museum +children.museum +childrens.museum +childrensgarden.museum +chiropractic.museum +chocolate.museum +christiansburg.museum +cincinnati.museum +cinema.museum +circus.museum +civilisation.museum +civilization.museum +civilwar.museum +clinton.museum +clock.museum +coal.museum +coastaldefence.museum +cody.museum +coldwar.museum +collection.museum +colonialwilliamsburg.museum +coloradoplateau.museum +columbia.museum +columbus.museum +communication.museum +communications.museum +community.museum +computer.museum +computerhistory.museum +comunicaçÃĩes.museum +contemporary.museum +contemporaryart.museum +convent.museum +copenhagen.museum +corporation.museum +correios-e-telecomunicaçÃĩes.museum +corvette.museum +costume.museum +countryestate.museum +county.museum +crafts.museum +cranbrook.museum +creation.museum +cultural.museum +culturalcenter.museum +culture.museum +cyber.museum +cymru.museum +dali.museum +dallas.museum +database.museum +ddr.museum +decorativearts.museum +delaware.museum +delmenhorst.museum +denmark.museum +depot.museum +design.museum +detroit.museum +dinosaur.museum +discovery.museum +dolls.museum +donostia.museum +durham.museum +eastafrica.museum +eastcoast.museum +education.museum +educational.museum +egyptian.museum +eisenbahn.museum +elburg.museum +elvendrell.museum +embroidery.museum +encyclopedic.museum +england.museum +entomology.museum +environment.museum +environmentalconservation.museum +epilepsy.museum +essex.museum +estate.museum +ethnology.museum +exeter.museum +exhibition.museum +family.museum +farm.museum +farmequipment.museum +farmers.museum +farmstead.museum +field.museum +figueres.museum +filatelia.museum +film.museum +fineart.museum +finearts.museum +finland.museum +flanders.museum +florida.museum +force.museum +fortmissoula.museum +fortworth.museum +foundation.museum +francaise.museum +frankfurt.museum +franziskaner.museum +freemasonry.museum +freiburg.museum +fribourg.museum +frog.museum +fundacio.museum +furniture.museum +gallery.museum +garden.museum +gateway.museum +geelvinck.museum +gemological.museum +geology.museum +georgia.museum +giessen.museum +glas.museum +glass.museum +gorge.museum +grandrapids.museum +graz.museum +guernsey.museum +halloffame.museum +hamburg.museum +handson.museum +harvestcelebration.museum +hawaii.museum +health.museum +heimatunduhren.museum +hellas.museum +helsinki.museum +hembygdsforbund.museum +heritage.museum +histoire.museum +historical.museum +historicalsociety.museum +historichouses.museum +historisch.museum +historisches.museum +history.museum +historyofscience.museum +horology.museum +house.museum +humanities.museum +illustration.museum +imageandsound.museum +indian.museum +indiana.museum +indianapolis.museum +indianmarket.museum +intelligence.museum +interactive.museum +iraq.museum +iron.museum +isleofman.museum +jamison.museum +jefferson.museum +jerusalem.museum +jewelry.museum +jewish.museum +jewishart.museum +jfk.museum +journalism.museum +judaica.museum +judygarland.museum +juedisches.museum +juif.museum +karate.museum +karikatur.museum +kids.museum +koebenhavn.museum +koeln.museum +kunst.museum +kunstsammlung.museum +kunstunddesign.museum +labor.museum +labour.museum +lajolla.museum +lancashire.museum +landes.museum +lans.museum +läns.museum +larsson.museum +lewismiller.museum +lincoln.museum +linz.museum +living.museum +livinghistory.museum +localhistory.museum +london.museum +losangeles.museum +louvre.museum +loyalist.museum +lucerne.museum +luxembourg.museum +luzern.museum +mad.museum +madrid.museum +mallorca.museum +manchester.museum +mansion.museum +mansions.museum +manx.museum +marburg.museum +maritime.museum +maritimo.museum +maryland.museum +marylhurst.museum +media.museum +medical.museum +medizinhistorisches.museum +meeres.museum +memorial.museum +mesaverde.museum +michigan.museum +midatlantic.museum +military.museum +mill.museum +miners.museum +mining.museum +minnesota.museum +missile.museum +missoula.museum +modern.museum +moma.museum +money.museum +monmouth.museum +monticello.museum +montreal.museum +moscow.museum +motorcycle.museum +muenchen.museum +muenster.museum +mulhouse.museum +muncie.museum +museet.museum +museumcenter.museum +museumvereniging.museum +music.museum +national.museum +nationalfirearms.museum +nationalheritage.museum +nativeamerican.museum +naturalhistory.museum +naturalhistorymuseum.museum +naturalsciences.museum +nature.museum +naturhistorisches.museum +natuurwetenschappen.museum +naumburg.museum +naval.museum +nebraska.museum +neues.museum +newhampshire.museum +newjersey.museum +newmexico.museum +newport.museum +newspaper.museum +newyork.museum +niepce.museum +norfolk.museum +north.museum +nrw.museum +nuernberg.museum +nuremberg.museum +nyc.museum +nyny.museum +oceanographic.museum +oceanographique.museum +omaha.museum +online.museum +ontario.museum +openair.museum +oregon.museum +oregontrail.museum +otago.museum +oxford.museum +pacific.museum +paderborn.museum +palace.museum +paleo.museum +palmsprings.museum +panama.museum +paris.museum +pasadena.museum +pharmacy.museum +philadelphia.museum +philadelphiaarea.museum +philately.museum +phoenix.museum +photography.museum +pilots.museum +pittsburgh.museum +planetarium.museum +plantation.museum +plants.museum +plaza.museum +portal.museum +portland.museum +portlligat.museum +posts-and-telecommunications.museum +preservation.museum +presidio.museum +press.museum +project.museum +public.museum +pubol.museum +quebec.museum +railroad.museum +railway.museum +research.museum +resistance.museum +riodejaneiro.museum +rochester.museum +rockart.museum +roma.museum +russia.museum +saintlouis.museum +salem.museum +salvadordali.museum +salzburg.museum +sandiego.museum +sanfrancisco.museum +santabarbara.museum +santacruz.museum +santafe.museum +saskatchewan.museum +satx.museum +savannahga.museum +schlesisches.museum +schoenbrunn.museum +schokoladen.museum +school.museum +schweiz.museum +science.museum +scienceandhistory.museum +scienceandindustry.museum +sciencecenter.museum +sciencecenters.museum +science-fiction.museum +sciencehistory.museum +sciences.museum +sciencesnaturelles.museum +scotland.museum +seaport.museum +settlement.museum +settlers.museum +shell.museum +sherbrooke.museum +sibenik.museum +silk.museum +ski.museum +skole.museum +society.museum +sologne.museum +soundandvision.museum +southcarolina.museum +southwest.museum +space.museum +spy.museum +square.museum +stadt.museum +stalbans.museum +starnberg.museum +state.museum +stateofdelaware.museum +station.museum +steam.museum +steiermark.museum +stjohn.museum +stockholm.museum +stpetersburg.museum +stuttgart.museum +suisse.museum +surgeonshall.museum +surrey.museum +svizzera.museum +sweden.museum +sydney.museum +tank.museum +tcm.museum +technology.museum +telekommunikation.museum +television.museum +texas.museum +textile.museum +theater.museum +time.museum +timekeeping.museum +topology.museum +torino.museum +touch.museum +town.museum +transport.museum +tree.museum +trolley.museum +trust.museum +trustee.museum +uhren.museum +ulm.museum +undersea.museum +university.museum +usa.museum +usantiques.museum +usarts.museum +uscountryestate.museum +usculture.museum +usdecorativearts.museum +usgarden.museum +ushistory.museum +ushuaia.museum +uslivinghistory.museum +utah.museum +uvic.museum +valley.museum +vantaa.museum +versailles.museum +viking.museum +village.museum +virginia.museum +virtual.museum +virtuel.museum +vlaanderen.museum +volkenkunde.museum +wales.museum +wallonie.museum +war.museum +washingtondc.museum +watchandclock.museum +watch-and-clock.museum +western.museum +westfalen.museum +whaling.museum +wildlife.museum +williamsburg.museum +windmill.museum +workshop.museum +york.museum +yorkshire.museum +yosemite.museum +youth.museum +zoological.museum +zoology.museum +ירושלים.museum +иĐēĐžĐŧ.museum + +// mv : https://en.wikipedia.org/wiki/.mv +// "mv" included because, contra Wikipedia, google.mv exists. +mv +aero.mv +biz.mv +com.mv +coop.mv +edu.mv +gov.mv +info.mv +int.mv +mil.mv +museum.mv +name.mv +net.mv +org.mv +pro.mv + +// mw : http://www.registrar.mw/ +mw +ac.mw +biz.mw +co.mw +com.mw +coop.mw +edu.mw +gov.mw +int.mw +museum.mw +net.mw +org.mw + +// mx : http://www.nic.mx/ +// Submitted by registry +mx +com.mx +org.mx +gob.mx +edu.mx +net.mx + +// my : http://www.mynic.net.my/ +my +com.my +net.my +org.my +gov.my +edu.my +mil.my +name.my + +// mz : http://www.uem.mz/ +// Submitted by registry +mz +ac.mz +adv.mz +co.mz +edu.mz +gov.mz +mil.mz +net.mz +org.mz + +// na : http://www.na-nic.com.na/ +// http://www.info.na/domain/ +na +info.na +pro.na +name.na +school.na +or.na +dr.na +us.na +mx.na +ca.na +in.na +cc.na +tv.na +ws.na +mobi.na +co.na +com.na +org.na + +// name : has 2nd-level tlds, but there's no list of them +name + +// nc : http://www.cctld.nc/ +nc +asso.nc +nom.nc + +// ne : https://en.wikipedia.org/wiki/.ne +ne + +// net : https://en.wikipedia.org/wiki/.net +net + +// nf : https://en.wikipedia.org/wiki/.nf +nf +com.nf +net.nf +per.nf +rec.nf +web.nf +arts.nf +firm.nf +info.nf +other.nf +store.nf + +// ng : http://www.nira.org.ng/index.php/join-us/register-ng-domain/189-nira-slds +ng +com.ng +edu.ng +gov.ng +i.ng +mil.ng +mobi.ng +name.ng +net.ng +org.ng +sch.ng + +// ni : http://www.nic.ni/ +ni +ac.ni +biz.ni +co.ni +com.ni +edu.ni +gob.ni +in.ni +info.ni +int.ni +mil.ni +net.ni +nom.ni +org.ni +web.ni + +// nl : https://en.wikipedia.org/wiki/.nl +// https://www.sidn.nl/ +// ccTLD for the Netherlands +nl + +// no : http://www.norid.no/regelverk/index.en.html +// The Norwegian registry has declined to notify us of updates. The web pages +// referenced below are the official source of the data. There is also an +// announce mailing list: +// https://postlister.uninett.no/sympa/info/norid-diskusjon +no +// Norid generic domains : http://www.norid.no/regelverk/vedlegg-c.en.html +fhs.no +vgs.no +fylkesbibl.no +folkebibl.no +museum.no +idrett.no +priv.no +// Non-Norid generic domains : http://www.norid.no/regelverk/vedlegg-d.en.html +mil.no +stat.no +dep.no +kommune.no +herad.no +// no geographical names : http://www.norid.no/regelverk/vedlegg-b.en.html +// counties +aa.no +ah.no +bu.no +fm.no +hl.no +hm.no +jan-mayen.no +mr.no +nl.no +nt.no +of.no +ol.no +oslo.no +rl.no +sf.no +st.no +svalbard.no +tm.no +tr.no +va.no +vf.no +// primary and lower secondary schools per county +gs.aa.no +gs.ah.no +gs.bu.no +gs.fm.no +gs.hl.no +gs.hm.no +gs.jan-mayen.no +gs.mr.no +gs.nl.no +gs.nt.no +gs.of.no +gs.ol.no +gs.oslo.no +gs.rl.no +gs.sf.no +gs.st.no +gs.svalbard.no +gs.tm.no +gs.tr.no +gs.va.no +gs.vf.no +// cities +akrehamn.no +ÃĨkrehamn.no +algard.no +ÃĨlgÃĨrd.no +arna.no +brumunddal.no +bryne.no +bronnoysund.no +brønnøysund.no +drobak.no +drøbak.no +egersund.no +fetsund.no +floro.no +florø.no +fredrikstad.no +hokksund.no +honefoss.no +hønefoss.no +jessheim.no +jorpeland.no +jørpeland.no +kirkenes.no +kopervik.no +krokstadelva.no +langevag.no +langevÃĨg.no +leirvik.no +mjondalen.no +mjøndalen.no +mo-i-rana.no +mosjoen.no +mosjøen.no +nesoddtangen.no +orkanger.no +osoyro.no +osøyro.no +raholt.no +rÃĨholt.no +sandnessjoen.no +sandnessjøen.no +skedsmokorset.no +slattum.no +spjelkavik.no +stathelle.no +stavern.no +stjordalshalsen.no +stjørdalshalsen.no +tananger.no +tranby.no +vossevangen.no +// communities +afjord.no +ÃĨfjord.no +agdenes.no +al.no +ÃĨl.no +alesund.no +ÃĨlesund.no +alstahaug.no +alta.no +ÃĄltÃĄ.no +alaheadju.no +ÃĄlaheadju.no +alvdal.no +amli.no +ÃĨmli.no +amot.no +ÃĨmot.no +andebu.no +andoy.no +andøy.no +andasuolo.no +ardal.no +ÃĨrdal.no +aremark.no +arendal.no +ÃĨs.no +aseral.no +ÃĨseral.no +asker.no +askim.no +askvoll.no +askoy.no +askøy.no +asnes.no +ÃĨsnes.no +audnedaln.no +aukra.no +aure.no +aurland.no +aurskog-holand.no +aurskog-høland.no +austevoll.no +austrheim.no +averoy.no +averøy.no +balestrand.no +ballangen.no +balat.no +bÃĄlÃĄt.no +balsfjord.no +bahccavuotna.no +bÃĄhccavuotna.no +bamble.no +bardu.no +beardu.no +beiarn.no +bajddar.no +bÃĄjddar.no +baidar.no +bÃĄidÃĄr.no +berg.no +bergen.no +berlevag.no +berlevÃĨg.no +bearalvahki.no +bearalvÃĄhki.no +bindal.no +birkenes.no +bjarkoy.no +bjarkøy.no +bjerkreim.no +bjugn.no +bodo.no +bodø.no +badaddja.no +bÃĨdÃĨddjÃĨ.no +budejju.no +bokn.no +bremanger.no +bronnoy.no +brønnøy.no +bygland.no +bykle.no +barum.no +bÃĻrum.no +bo.telemark.no +bø.telemark.no +bo.nordland.no +bø.nordland.no +bievat.no +bievÃĄt.no +bomlo.no +bømlo.no +batsfjord.no +bÃĨtsfjord.no +bahcavuotna.no +bÃĄhcavuotna.no +dovre.no +drammen.no +drangedal.no +dyroy.no +dyrøy.no +donna.no +dønna.no +eid.no +eidfjord.no +eidsberg.no +eidskog.no +eidsvoll.no +eigersund.no +elverum.no +enebakk.no +engerdal.no +etne.no +etnedal.no +evenes.no +evenassi.no +evenÃĄÅĄÅĄi.no +evje-og-hornnes.no +farsund.no +fauske.no +fuossko.no +fuoisku.no +fedje.no +fet.no +finnoy.no +finnøy.no +fitjar.no +fjaler.no +fjell.no +flakstad.no +flatanger.no +flekkefjord.no +flesberg.no +flora.no +fla.no +flÃĨ.no +folldal.no +forsand.no +fosnes.no +frei.no +frogn.no +froland.no +frosta.no +frana.no +frÃĻna.no +froya.no +frøya.no +fusa.no +fyresdal.no +forde.no +førde.no +gamvik.no +gangaviika.no +gÃĄÅ‹gaviika.no +gaular.no +gausdal.no +gildeskal.no +gildeskÃĨl.no +giske.no +gjemnes.no +gjerdrum.no +gjerstad.no +gjesdal.no +gjovik.no +gjøvik.no +gloppen.no +gol.no +gran.no +grane.no +granvin.no +gratangen.no +grimstad.no +grong.no +kraanghke.no +krÃĨanghke.no +grue.no +gulen.no +hadsel.no +halden.no +halsa.no +hamar.no +hamaroy.no +habmer.no +hÃĄbmer.no +hapmir.no +hÃĄpmir.no +hammerfest.no +hammarfeasta.no +hÃĄmmÃĄrfeasta.no +haram.no +hareid.no +harstad.no +hasvik.no +aknoluokta.no +ÃĄkŋoluokta.no +hattfjelldal.no +aarborte.no +haugesund.no +hemne.no +hemnes.no +hemsedal.no +heroy.more-og-romsdal.no +herøy.møre-og-romsdal.no +heroy.nordland.no +herøy.nordland.no +hitra.no +hjartdal.no +hjelmeland.no +hobol.no +hobøl.no +hof.no +hol.no +hole.no +holmestrand.no +holtalen.no +holtÃĨlen.no +hornindal.no +horten.no +hurdal.no +hurum.no +hvaler.no +hyllestad.no +hagebostad.no +hÃĻgebostad.no +hoyanger.no +høyanger.no +hoylandet.no +høylandet.no +ha.no +hÃĨ.no +ibestad.no +inderoy.no +inderøy.no +iveland.no +jevnaker.no +jondal.no +jolster.no +jølster.no +karasjok.no +karasjohka.no +kÃĄrÃĄÅĄjohka.no +karlsoy.no +galsa.no +gÃĄlsÃĄ.no +karmoy.no +karmøy.no +kautokeino.no +guovdageaidnu.no +klepp.no +klabu.no +klÃĻbu.no +kongsberg.no +kongsvinger.no +kragero.no +kragerø.no +kristiansand.no +kristiansund.no +krodsherad.no +krødsherad.no +kvalsund.no +rahkkeravju.no +rÃĄhkkerÃĄvju.no +kvam.no +kvinesdal.no +kvinnherad.no +kviteseid.no +kvitsoy.no +kvitsøy.no +kvafjord.no +kvÃĻfjord.no +giehtavuoatna.no +kvanangen.no +kvÃĻnangen.no +navuotna.no +nÃĄvuotna.no +kafjord.no +kÃĨfjord.no +gaivuotna.no +gÃĄivuotna.no +larvik.no +lavangen.no +lavagis.no +loabat.no +loabÃĄt.no +lebesby.no +davvesiida.no +leikanger.no +leirfjord.no +leka.no +leksvik.no +lenvik.no +leangaviika.no +leaŋgaviika.no +lesja.no +levanger.no +lier.no +lierne.no +lillehammer.no +lillesand.no +lindesnes.no +lindas.no +lindÃĨs.no +lom.no +loppa.no +lahppi.no +lÃĄhppi.no +lund.no +lunner.no +luroy.no +lurøy.no +luster.no +lyngdal.no +lyngen.no +ivgu.no +lardal.no +lerdal.no +lÃĻrdal.no +lodingen.no +lødingen.no +lorenskog.no +lørenskog.no +loten.no +løten.no +malvik.no +masoy.no +mÃĨsøy.no +muosat.no +muosÃĄt.no +mandal.no +marker.no +marnardal.no +masfjorden.no +meland.no +meldal.no +melhus.no +meloy.no +meløy.no +meraker.no +merÃĨker.no +moareke.no +moÃĨreke.no +midsund.no +midtre-gauldal.no +modalen.no +modum.no +molde.no +moskenes.no +moss.no +mosvik.no +malselv.no +mÃĨlselv.no +malatvuopmi.no +mÃĄlatvuopmi.no +namdalseid.no +aejrie.no +namsos.no +namsskogan.no +naamesjevuemie.no +nÃĨÃĨmesjevuemie.no +laakesvuemie.no +nannestad.no +narvik.no +narviika.no +naustdal.no +nedre-eiker.no +nes.akershus.no +nes.buskerud.no +nesna.no +nesodden.no +nesseby.no +unjarga.no +unjÃĄrga.no +nesset.no +nissedal.no +nittedal.no +nord-aurdal.no +nord-fron.no +nord-odal.no +norddal.no +nordkapp.no +davvenjarga.no +davvenjÃĄrga.no +nordre-land.no +nordreisa.no +raisa.no +rÃĄisa.no +nore-og-uvdal.no +notodden.no +naroy.no +nÃĻrøy.no +notteroy.no +nøtterøy.no +odda.no +oksnes.no +øksnes.no +oppdal.no +oppegard.no +oppegÃĨrd.no +orkdal.no +orland.no +ørland.no +orskog.no +ørskog.no +orsta.no +ørsta.no +os.hedmark.no +os.hordaland.no +osen.no +osteroy.no +osterøy.no +ostre-toten.no +østre-toten.no +overhalla.no +ovre-eiker.no +øvre-eiker.no +oyer.no +øyer.no +oygarden.no +øygarden.no +oystre-slidre.no +øystre-slidre.no +porsanger.no +porsangu.no +porsÃĄÅ‹gu.no +porsgrunn.no +radoy.no +radøy.no +rakkestad.no +rana.no +ruovat.no +randaberg.no +rauma.no +rendalen.no +rennebu.no +rennesoy.no +rennesøy.no +rindal.no +ringebu.no +ringerike.no +ringsaker.no +rissa.no +risor.no +risør.no +roan.no +rollag.no +rygge.no +ralingen.no +rÃĻlingen.no +rodoy.no +rødøy.no +romskog.no +rømskog.no +roros.no +røros.no +rost.no +røst.no +royken.no +røyken.no +royrvik.no +røyrvik.no +rade.no +rÃĨde.no +salangen.no +siellak.no +saltdal.no +salat.no +sÃĄlÃĄt.no +sÃĄlat.no +samnanger.no +sande.more-og-romsdal.no +sande.møre-og-romsdal.no +sande.vestfold.no +sandefjord.no +sandnes.no +sandoy.no +sandøy.no +sarpsborg.no +sauda.no +sauherad.no +sel.no +selbu.no +selje.no +seljord.no +sigdal.no +siljan.no +sirdal.no +skaun.no +skedsmo.no +ski.no +skien.no +skiptvet.no +skjervoy.no +skjervøy.no +skierva.no +skiervÃĄ.no +skjak.no +skjÃĨk.no +skodje.no +skanland.no +skÃĨnland.no +skanit.no +skÃĄnit.no +smola.no +smøla.no +snillfjord.no +snasa.no +snÃĨsa.no +snoasa.no +snaase.no +snÃĨase.no +sogndal.no +sokndal.no +sola.no +solund.no +songdalen.no +sortland.no +spydeberg.no +stange.no +stavanger.no +steigen.no +steinkjer.no +stjordal.no +stjørdal.no +stokke.no +stor-elvdal.no +stord.no +stordal.no +storfjord.no +omasvuotna.no +strand.no +stranda.no +stryn.no +sula.no +suldal.no +sund.no +sunndal.no +surnadal.no +sveio.no +svelvik.no +sykkylven.no +sogne.no +søgne.no +somna.no +sømna.no +sondre-land.no +søndre-land.no +sor-aurdal.no +sør-aurdal.no +sor-fron.no +sør-fron.no +sor-odal.no +sør-odal.no +sor-varanger.no +sør-varanger.no +matta-varjjat.no +mÃĄtta-vÃĄrjjat.no +sorfold.no +sørfold.no +sorreisa.no +sørreisa.no +sorum.no +sørum.no +tana.no +deatnu.no +time.no +tingvoll.no +tinn.no +tjeldsund.no +dielddanuorri.no +tjome.no +tjøme.no +tokke.no +tolga.no +torsken.no +tranoy.no +tranøy.no +tromso.no +tromsø.no +tromsa.no +romsa.no +trondheim.no +troandin.no +trysil.no +trana.no +trÃĻna.no +trogstad.no +trøgstad.no +tvedestrand.no +tydal.no +tynset.no +tysfjord.no +divtasvuodna.no +divttasvuotna.no +tysnes.no +tysvar.no +tysvÃĻr.no +tonsberg.no +tønsberg.no +ullensaker.no +ullensvang.no +ulvik.no +utsira.no +vadso.no +vadsø.no +cahcesuolo.no +ÄÃĄhcesuolo.no +vaksdal.no +valle.no +vang.no +vanylven.no +vardo.no +vardø.no +varggat.no +vÃĄrggÃĄt.no +vefsn.no +vaapste.no +vega.no +vegarshei.no +vegÃĨrshei.no +vennesla.no +verdal.no +verran.no +vestby.no +vestnes.no +vestre-slidre.no +vestre-toten.no +vestvagoy.no +vestvÃĨgøy.no +vevelstad.no +vik.no +vikna.no +vindafjord.no +volda.no +voss.no +varoy.no +vÃĻrøy.no +vagan.no +vÃĨgan.no +voagat.no +vagsoy.no +vÃĨgsøy.no +vaga.no +vÃĨgÃĨ.no +valer.ostfold.no +vÃĨler.østfold.no +valer.hedmark.no +vÃĨler.hedmark.no + +// np : http://www.mos.com.np/register.html +*.np + +// nr : http://cenpac.net.nr/dns/index.html +// Submitted by registry +nr +biz.nr +info.nr +gov.nr +edu.nr +org.nr +net.nr +com.nr + +// nu : https://en.wikipedia.org/wiki/.nu +nu + +// nz : https://en.wikipedia.org/wiki/.nz +// Submitted by registry +nz +ac.nz +co.nz +cri.nz +geek.nz +gen.nz +govt.nz +health.nz +iwi.nz +kiwi.nz +maori.nz +mil.nz +māori.nz +net.nz +org.nz +parliament.nz +school.nz + +// om : https://en.wikipedia.org/wiki/.om +om +co.om +com.om +edu.om +gov.om +med.om +museum.om +net.om +org.om +pro.om + +// onion : https://tools.ietf.org/html/rfc7686 +onion + +// org : https://en.wikipedia.org/wiki/.org +org + +// pa : http://www.nic.pa/ +// Some additional second level "domains" resolve directly as hostnames, such as +// pannet.pa, so we add a rule for "pa". +pa +ac.pa +gob.pa +com.pa +org.pa +sld.pa +edu.pa +net.pa +ing.pa +abo.pa +med.pa +nom.pa + +// pe : https://www.nic.pe/InformeFinalComision.pdf +pe +edu.pe +gob.pe +nom.pe +mil.pe +org.pe +com.pe +net.pe + +// pf : http://www.gobin.info/domainname/formulaire-pf.pdf +pf +com.pf +org.pf +edu.pf + +// pg : https://en.wikipedia.org/wiki/.pg +*.pg + +// ph : http://www.domains.ph/FAQ2.asp +// Submitted by registry +ph +com.ph +net.ph +org.ph +gov.ph +edu.ph +ngo.ph +mil.ph +i.ph + +// pk : http://pk5.pknic.net.pk/pk5/msgNamepk.PK +pk +com.pk +net.pk +edu.pk +org.pk +fam.pk +biz.pk +web.pk +gov.pk +gob.pk +gok.pk +gon.pk +gop.pk +gos.pk +info.pk + +// pl http://www.dns.pl/english/index.html +// Submitted by registry +pl +com.pl +net.pl +org.pl +// pl functional domains (http://www.dns.pl/english/index.html) +aid.pl +agro.pl +atm.pl +auto.pl +biz.pl +edu.pl +gmina.pl +gsm.pl +info.pl +mail.pl +miasta.pl +media.pl +mil.pl +nieruchomosci.pl +nom.pl +pc.pl +powiat.pl +priv.pl +realestate.pl +rel.pl +sex.pl +shop.pl +sklep.pl +sos.pl +szkola.pl +targi.pl +tm.pl +tourism.pl +travel.pl +turystyka.pl +// Government domains +gov.pl +ap.gov.pl +ic.gov.pl +is.gov.pl +us.gov.pl +kmpsp.gov.pl +kppsp.gov.pl +kwpsp.gov.pl +psp.gov.pl +wskr.gov.pl +kwp.gov.pl +mw.gov.pl +ug.gov.pl +um.gov.pl +umig.gov.pl +ugim.gov.pl +upow.gov.pl +uw.gov.pl +starostwo.gov.pl +pa.gov.pl +po.gov.pl +psse.gov.pl +pup.gov.pl +rzgw.gov.pl +sa.gov.pl +so.gov.pl +sr.gov.pl +wsa.gov.pl +sko.gov.pl +uzs.gov.pl +wiih.gov.pl +winb.gov.pl +pinb.gov.pl +wios.gov.pl +witd.gov.pl +wzmiuw.gov.pl +piw.gov.pl +wiw.gov.pl +griw.gov.pl +wif.gov.pl +oum.gov.pl +sdn.gov.pl +zp.gov.pl +uppo.gov.pl +mup.gov.pl +wuoz.gov.pl +konsulat.gov.pl +oirm.gov.pl +// pl regional domains (http://www.dns.pl/english/index.html) +augustow.pl +babia-gora.pl +bedzin.pl +beskidy.pl +bialowieza.pl +bialystok.pl +bielawa.pl +bieszczady.pl +boleslawiec.pl +bydgoszcz.pl +bytom.pl +cieszyn.pl +czeladz.pl +czest.pl +dlugoleka.pl +elblag.pl +elk.pl +glogow.pl +gniezno.pl +gorlice.pl +grajewo.pl +ilawa.pl +jaworzno.pl +jelenia-gora.pl +jgora.pl +kalisz.pl +kazimierz-dolny.pl +karpacz.pl +kartuzy.pl +kaszuby.pl +katowice.pl +kepno.pl +ketrzyn.pl +klodzko.pl +kobierzyce.pl +kolobrzeg.pl +konin.pl +konskowola.pl +kutno.pl +lapy.pl +lebork.pl +legnica.pl +lezajsk.pl +limanowa.pl +lomza.pl +lowicz.pl +lubin.pl +lukow.pl +malbork.pl +malopolska.pl +mazowsze.pl +mazury.pl +mielec.pl +mielno.pl +mragowo.pl +naklo.pl +nowaruda.pl +nysa.pl +olawa.pl +olecko.pl +olkusz.pl +olsztyn.pl +opoczno.pl +opole.pl +ostroda.pl +ostroleka.pl +ostrowiec.pl +ostrowwlkp.pl +pila.pl +pisz.pl +podhale.pl +podlasie.pl +polkowice.pl +pomorze.pl +pomorskie.pl +prochowice.pl +pruszkow.pl +przeworsk.pl +pulawy.pl +radom.pl +rawa-maz.pl +rybnik.pl +rzeszow.pl +sanok.pl +sejny.pl +slask.pl +slupsk.pl +sosnowiec.pl +stalowa-wola.pl +skoczow.pl +starachowice.pl +stargard.pl +suwalki.pl +swidnica.pl +swiebodzin.pl +swinoujscie.pl +szczecin.pl +szczytno.pl +tarnobrzeg.pl +tgory.pl +turek.pl +tychy.pl +ustka.pl +walbrzych.pl +warmia.pl +warszawa.pl +waw.pl +wegrow.pl +wielun.pl +wlocl.pl +wloclawek.pl +wodzislaw.pl +wolomin.pl +wroclaw.pl +zachpomor.pl +zagan.pl +zarow.pl +zgora.pl +zgorzelec.pl + +// pm : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +pm + +// pn : http://www.government.pn/PnRegistry/policies.htm +pn +gov.pn +co.pn +org.pn +edu.pn +net.pn + +// post : https://en.wikipedia.org/wiki/.post +post + +// pr : http://www.nic.pr/index.asp?f=1 +pr +com.pr +net.pr +org.pr +gov.pr +edu.pr +isla.pr +pro.pr +biz.pr +info.pr +name.pr +// these aren't mentioned on nic.pr, but on https://en.wikipedia.org/wiki/.pr +est.pr +prof.pr +ac.pr + +// pro : http://registry.pro/get-pro +pro +aaa.pro +aca.pro +acct.pro +avocat.pro +bar.pro +cpa.pro +eng.pro +jur.pro +law.pro +med.pro +recht.pro + +// ps : https://en.wikipedia.org/wiki/.ps +// http://www.nic.ps/registration/policy.html#reg +ps +edu.ps +gov.ps +sec.ps +plo.ps +com.ps +org.ps +net.ps + +// pt : http://online.dns.pt/dns/start_dns +pt +net.pt +gov.pt +org.pt +edu.pt +int.pt +publ.pt +com.pt +nome.pt + +// pw : https://en.wikipedia.org/wiki/.pw +pw +co.pw +ne.pw +or.pw +ed.pw +go.pw +belau.pw + +// py : http://www.nic.py/pautas.html#seccion_9 +// Submitted by registry +py +com.py +coop.py +edu.py +gov.py +mil.py +net.py +org.py + +// qa : http://domains.qa/en/ +qa +com.qa +edu.qa +gov.qa +mil.qa +name.qa +net.qa +org.qa +sch.qa + +// re : http://www.afnic.re/obtenir/chartes/nommage-re/annexe-descriptifs +re +asso.re +com.re +nom.re + +// ro : http://www.rotld.ro/ +ro +arts.ro +com.ro +firm.ro +info.ro +nom.ro +nt.ro +org.ro +rec.ro +store.ro +tm.ro +www.ro + +// rs : https://www.rnids.rs/en/domains/national-domains +rs +ac.rs +co.rs +edu.rs +gov.rs +in.rs +org.rs + +// ru : https://cctld.ru/en/domains/domens_ru/reserved/ +ru +ac.ru +edu.ru +gov.ru +int.ru +mil.ru +test.ru + +// rw : https://www.ricta.org.rw/sites/default/files/resources/registry_registrar_contract_0.pdf +rw +ac.rw +co.rw +coop.rw +gov.rw +mil.rw +net.rw +org.rw + +// sa : http://www.nic.net.sa/ +sa +com.sa +net.sa +org.sa +gov.sa +med.sa +pub.sa +edu.sa +sch.sa + +// sb : http://www.sbnic.net.sb/ +// Submitted by registry +sb +com.sb +edu.sb +gov.sb +net.sb +org.sb + +// sc : http://www.nic.sc/ +sc +com.sc +gov.sc +net.sc +org.sc +edu.sc + +// sd : http://www.isoc.sd/sudanic.isoc.sd/billing_pricing.htm +// Submitted by registry +sd +com.sd +net.sd +org.sd +edu.sd +med.sd +tv.sd +gov.sd +info.sd + +// se : https://en.wikipedia.org/wiki/.se +// Submitted by registry +se +a.se +ac.se +b.se +bd.se +brand.se +c.se +d.se +e.se +f.se +fh.se +fhsk.se +fhv.se +g.se +h.se +i.se +k.se +komforb.se +kommunalforbund.se +komvux.se +l.se +lanbib.se +m.se +n.se +naturbruksgymn.se +o.se +org.se +p.se +parti.se +pp.se +press.se +r.se +s.se +t.se +tm.se +u.se +w.se +x.se +y.se +z.se + +// sg : http://www.nic.net.sg/page/registration-policies-procedures-and-guidelines +sg +com.sg +net.sg +org.sg +gov.sg +edu.sg +per.sg + +// sh : http://www.nic.sh/registrar.html +sh +com.sh +net.sh +gov.sh +org.sh +mil.sh + +// si : https://en.wikipedia.org/wiki/.si +si + +// sj : No registrations at this time. +// Submitted by registry +sj + +// sk : https://en.wikipedia.org/wiki/.sk +// list of 2nd level domains ? +sk + +// sl : http://www.nic.sl +// Submitted by registry +sl +com.sl +net.sl +edu.sl +gov.sl +org.sl + +// sm : https://en.wikipedia.org/wiki/.sm +sm + +// sn : https://en.wikipedia.org/wiki/.sn +sn +art.sn +com.sn +edu.sn +gouv.sn +org.sn +perso.sn +univ.sn + +// so : http://www.soregistry.com/ +so +com.so +net.so +org.so + +// sr : https://en.wikipedia.org/wiki/.sr +sr + +// st : http://www.nic.st/html/policyrules/ +st +co.st +com.st +consulado.st +edu.st +embaixada.st +gov.st +mil.st +net.st +org.st +principe.st +saotome.st +store.st + +// su : https://en.wikipedia.org/wiki/.su +su + +// sv : http://www.svnet.org.sv/niveldos.pdf +sv +com.sv +edu.sv +gob.sv +org.sv +red.sv + +// sx : https://en.wikipedia.org/wiki/.sx +// Submitted by registry +sx +gov.sx + +// sy : https://en.wikipedia.org/wiki/.sy +// see also: http://www.gobin.info/domainname/sy.doc +sy +edu.sy +gov.sy +net.sy +mil.sy +com.sy +org.sy + +// sz : https://en.wikipedia.org/wiki/.sz +// http://www.sispa.org.sz/ +sz +co.sz +ac.sz +org.sz + +// tc : https://en.wikipedia.org/wiki/.tc +tc + +// td : https://en.wikipedia.org/wiki/.td +td + +// tel: https://en.wikipedia.org/wiki/.tel +// http://www.telnic.org/ +tel + +// tf : https://en.wikipedia.org/wiki/.tf +tf + +// tg : https://en.wikipedia.org/wiki/.tg +// http://www.nic.tg/ +tg + +// th : https://en.wikipedia.org/wiki/.th +// Submitted by registry +th +ac.th +co.th +go.th +in.th +mi.th +net.th +or.th + +// tj : http://www.nic.tj/policy.html +tj +ac.tj +biz.tj +co.tj +com.tj +edu.tj +go.tj +gov.tj +int.tj +mil.tj +name.tj +net.tj +nic.tj +org.tj +test.tj +web.tj + +// tk : https://en.wikipedia.org/wiki/.tk +tk + +// tl : https://en.wikipedia.org/wiki/.tl +tl +gov.tl + +// tm : http://www.nic.tm/local.html +tm +com.tm +co.tm +org.tm +net.tm +nom.tm +gov.tm +mil.tm +edu.tm + +// tn : https://en.wikipedia.org/wiki/.tn +// http://whois.ati.tn/ +tn +com.tn +ens.tn +fin.tn +gov.tn +ind.tn +intl.tn +nat.tn +net.tn +org.tn +info.tn +perso.tn +tourism.tn +edunet.tn +rnrt.tn +rns.tn +rnu.tn +mincom.tn +agrinet.tn +defense.tn +turen.tn + +// to : https://en.wikipedia.org/wiki/.to +// Submitted by registry +to +com.to +gov.to +net.to +org.to +edu.to +mil.to + +// tr : https://nic.tr/ +// https://nic.tr/forms/eng/policies.pdf +// https://nic.tr/index.php?USRACTN=PRICELST +tr +av.tr +bbs.tr +bel.tr +biz.tr +com.tr +dr.tr +edu.tr +gen.tr +gov.tr +info.tr +mil.tr +k12.tr +kep.tr +name.tr +net.tr +org.tr +pol.tr +tel.tr +tsk.tr +tv.tr +web.tr +// Used by Northern Cyprus +nc.tr +// Used by government agencies of Northern Cyprus +gov.nc.tr + +// tt : http://www.nic.tt/ +tt +co.tt +com.tt +org.tt +net.tt +biz.tt +info.tt +pro.tt +int.tt +coop.tt +jobs.tt +mobi.tt +travel.tt +museum.tt +aero.tt +name.tt +gov.tt +edu.tt + +// tv : https://en.wikipedia.org/wiki/.tv +// Not listing any 2LDs as reserved since none seem to exist in practice, +// Wikipedia notwithstanding. +tv + +// tw : https://en.wikipedia.org/wiki/.tw +tw +edu.tw +gov.tw +mil.tw +com.tw +net.tw +org.tw +idv.tw +game.tw +ebiz.tw +club.tw +įļ˛čˇ¯.tw +įĩ„įš”.tw +商æĨ­.tw + +// tz : http://www.tznic.or.tz/index.php/domains +// Submitted by registry +tz +ac.tz +co.tz +go.tz +hotel.tz +info.tz +me.tz +mil.tz +mobi.tz +ne.tz +or.tz +sc.tz +tv.tz + +// ua : https://hostmaster.ua/policy/?ua +// Submitted by registry +ua +// ua 2LD +com.ua +edu.ua +gov.ua +in.ua +net.ua +org.ua +// ua geographic names +// https://hostmaster.ua/2ld/ +cherkassy.ua +cherkasy.ua +chernigov.ua +chernihiv.ua +chernivtsi.ua +chernovtsy.ua +ck.ua +cn.ua +cr.ua +crimea.ua +cv.ua +dn.ua +dnepropetrovsk.ua +dnipropetrovsk.ua +dominic.ua +donetsk.ua +dp.ua +if.ua +ivano-frankivsk.ua +kh.ua +kharkiv.ua +kharkov.ua +kherson.ua +khmelnitskiy.ua +khmelnytskyi.ua +kiev.ua +kirovograd.ua +km.ua +kr.ua +krym.ua +ks.ua +kv.ua +kyiv.ua +lg.ua +lt.ua +lugansk.ua +lutsk.ua +lv.ua +lviv.ua +mk.ua +mykolaiv.ua +nikolaev.ua +od.ua +odesa.ua +odessa.ua +pl.ua +poltava.ua +rivne.ua +rovno.ua +rv.ua +sb.ua +sebastopol.ua +sevastopol.ua +sm.ua +sumy.ua +te.ua +ternopil.ua +uz.ua +uzhgorod.ua +vinnica.ua +vinnytsia.ua +vn.ua +volyn.ua +yalta.ua +zaporizhzhe.ua +zaporizhzhia.ua +zhitomir.ua +zhytomyr.ua +zp.ua +zt.ua + +// ug : https://www.registry.co.ug/ +ug +co.ug +or.ug +ac.ug +sc.ug +go.ug +ne.ug +com.ug +org.ug + +// uk : https://en.wikipedia.org/wiki/.uk +// Submitted by registry +uk +ac.uk +co.uk +gov.uk +ltd.uk +me.uk +net.uk +nhs.uk +org.uk +plc.uk +police.uk +*.sch.uk + +// us : https://en.wikipedia.org/wiki/.us +us +dni.us +fed.us +isa.us +kids.us +nsn.us +// us geographic names +ak.us +al.us +ar.us +as.us +az.us +ca.us +co.us +ct.us +dc.us +de.us +fl.us +ga.us +gu.us +hi.us +ia.us +id.us +il.us +in.us +ks.us +ky.us +la.us +ma.us +md.us +me.us +mi.us +mn.us +mo.us +ms.us +mt.us +nc.us +nd.us +ne.us +nh.us +nj.us +nm.us +nv.us +ny.us +oh.us +ok.us +or.us +pa.us +pr.us +ri.us +sc.us +sd.us +tn.us +tx.us +ut.us +vi.us +vt.us +va.us +wa.us +wi.us +wv.us +wy.us +// The registrar notes several more specific domains available in each state, +// such as state.*.us, dst.*.us, etc., but resolution of these is somewhat +// haphazard; in some states these domains resolve as addresses, while in others +// only subdomains are available, or even nothing at all. We include the +// most common ones where it's clear that different sites are different +// entities. +k12.ak.us +k12.al.us +k12.ar.us +k12.as.us +k12.az.us +k12.ca.us +k12.co.us +k12.ct.us +k12.dc.us +k12.de.us +k12.fl.us +k12.ga.us +k12.gu.us +// k12.hi.us Bug 614565 - Hawaii has a state-wide DOE login +k12.ia.us +k12.id.us +k12.il.us +k12.in.us +k12.ks.us +k12.ky.us +k12.la.us +k12.ma.us +k12.md.us +k12.me.us +k12.mi.us +k12.mn.us +k12.mo.us +k12.ms.us +k12.mt.us +k12.nc.us +// k12.nd.us Bug 1028347 - Removed at request of Travis Rosso +k12.ne.us +k12.nh.us +k12.nj.us +k12.nm.us +k12.nv.us +k12.ny.us +k12.oh.us +k12.ok.us +k12.or.us +k12.pa.us +k12.pr.us +k12.ri.us +k12.sc.us +// k12.sd.us Bug 934131 - Removed at request of James Booze +k12.tn.us +k12.tx.us +k12.ut.us +k12.vi.us +k12.vt.us +k12.va.us +k12.wa.us +k12.wi.us +// k12.wv.us Bug 947705 - Removed at request of Verne Britton +k12.wy.us +cc.ak.us +cc.al.us +cc.ar.us +cc.as.us +cc.az.us +cc.ca.us +cc.co.us +cc.ct.us +cc.dc.us +cc.de.us +cc.fl.us +cc.ga.us +cc.gu.us +cc.hi.us +cc.ia.us +cc.id.us +cc.il.us +cc.in.us +cc.ks.us +cc.ky.us +cc.la.us +cc.ma.us +cc.md.us +cc.me.us +cc.mi.us +cc.mn.us +cc.mo.us +cc.ms.us +cc.mt.us +cc.nc.us +cc.nd.us +cc.ne.us +cc.nh.us +cc.nj.us +cc.nm.us +cc.nv.us +cc.ny.us +cc.oh.us +cc.ok.us +cc.or.us +cc.pa.us +cc.pr.us +cc.ri.us +cc.sc.us +cc.sd.us +cc.tn.us +cc.tx.us +cc.ut.us +cc.vi.us +cc.vt.us +cc.va.us +cc.wa.us +cc.wi.us +cc.wv.us +cc.wy.us +lib.ak.us +lib.al.us +lib.ar.us +lib.as.us +lib.az.us +lib.ca.us +lib.co.us +lib.ct.us +lib.dc.us +// lib.de.us Issue #243 - Moved to Private section at request of Ed Moore +lib.fl.us +lib.ga.us +lib.gu.us +lib.hi.us +lib.ia.us +lib.id.us +lib.il.us +lib.in.us +lib.ks.us +lib.ky.us +lib.la.us +lib.ma.us +lib.md.us +lib.me.us +lib.mi.us +lib.mn.us +lib.mo.us +lib.ms.us +lib.mt.us +lib.nc.us +lib.nd.us +lib.ne.us +lib.nh.us +lib.nj.us +lib.nm.us +lib.nv.us +lib.ny.us +lib.oh.us +lib.ok.us +lib.or.us +lib.pa.us +lib.pr.us +lib.ri.us +lib.sc.us +lib.sd.us +lib.tn.us +lib.tx.us +lib.ut.us +lib.vi.us +lib.vt.us +lib.va.us +lib.wa.us +lib.wi.us +// lib.wv.us Bug 941670 - Removed at request of Larry W Arnold +lib.wy.us +// k12.ma.us contains school districts in Massachusetts. The 4LDs are +// managed independently except for private (PVT), charter (CHTR) and +// parochial (PAROCH) schools. Those are delegated directly to the +// 5LD operators. +pvt.k12.ma.us +chtr.k12.ma.us +paroch.k12.ma.us +// Merit Network, Inc. maintains the registry for =~ /(k12|cc|lib).mi.us/ and the following +// see also: http://domreg.merit.edu +// see also: whois -h whois.domreg.merit.edu help +ann-arbor.mi.us +cog.mi.us +dst.mi.us +eaton.mi.us +gen.mi.us +mus.mi.us +tec.mi.us +washtenaw.mi.us + +// uy : http://www.nic.org.uy/ +uy +com.uy +edu.uy +gub.uy +mil.uy +net.uy +org.uy + +// uz : http://www.reg.uz/ +uz +co.uz +com.uz +net.uz +org.uz + +// va : https://en.wikipedia.org/wiki/.va +va + +// vc : https://en.wikipedia.org/wiki/.vc +// Submitted by registry +vc +com.vc +net.vc +org.vc +gov.vc +mil.vc +edu.vc + +// ve : https://registro.nic.ve/ +// Submitted by registry +ve +arts.ve +co.ve +com.ve +e12.ve +edu.ve +firm.ve +gob.ve +gov.ve +info.ve +int.ve +mil.ve +net.ve +org.ve +rec.ve +store.ve +tec.ve +web.ve + +// vg : https://en.wikipedia.org/wiki/.vg +vg + +// vi : http://www.nic.vi/newdomainform.htm +// http://www.nic.vi/Domain_Rules/body_domain_rules.html indicates some other +// TLDs are "reserved", such as edu.vi and gov.vi, but doesn't actually say they +// are available for registration (which they do not seem to be). +vi +co.vi +com.vi +k12.vi +net.vi +org.vi + +// vn : https://www.dot.vn/vnnic/vnnic/domainregistration.jsp +vn +com.vn +net.vn +org.vn +edu.vn +gov.vn +int.vn +ac.vn +biz.vn +info.vn +name.vn +pro.vn +health.vn + +// vu : https://en.wikipedia.org/wiki/.vu +// http://www.vunic.vu/ +vu +com.vu +edu.vu +net.vu +org.vu + +// wf : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +wf + +// ws : https://en.wikipedia.org/wiki/.ws +// http://samoanic.ws/index.dhtml +ws +com.ws +net.ws +org.ws +gov.ws +edu.ws + +// yt : http://www.afnic.fr/medias/documents/AFNIC-naming-policy2012.pdf +yt + +// IDN ccTLDs +// When submitting patches, please maintain a sort by ISO 3166 ccTLD, then +// U-label, and follow this format: +// // A-Label ("", [, variant info]) : +// // [sponsoring org] +// U-Label + +// xn--mgbaam7a8h ("Emerat", Arabic) : AE +// http://nic.ae/english/arabicdomain/rules.jsp +اŲ…Ø§ØąØ§ØĒ + +// xn--y9a3aq ("hye", Armenian) : AM +// ISOC AM (operated by .am Registry) +Õ°ÕĄÕĩ + +// xn--54b7fta0cc ("Bangla", Bangla) : BD +āĻŦāĻžāĻ‚āĻ˛āĻž + +// xn--90ae ("bg", Bulgarian) : BG +ĐąĐŗ + +// xn--90ais ("bel", Belarusian/Russian Cyrillic) : BY +// Operated by .by registry +ĐąĐĩĐģ + +// xn--fiqs8s ("Zhongguo/China", Chinese, Simplified) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中å›Ŋ + +// xn--fiqz9s ("Zhongguo/China", Chinese, Traditional) : CN +// CNNIC +// http://cnnic.cn/html/Dir/2005/10/11/3218.htm +中國 + +// xn--lgbbat1ad8j ("Algeria/Al Jazair", Arabic) : DZ +اŲ„ØŦØ˛Ø§ØĻØą + +// xn--wgbh1c ("Egypt/Masr", Arabic) : EG +// http://www.dotmasr.eg/ +Ų…ØĩØą + +// xn--e1a4c ("eu", Cyrillic) : EU +ĐĩŅŽ + +// xn--node ("ge", Georgian Mkhedruli) : GE +გე + +// xn--qxam ("el", Greek) : GR +// Hellenic Ministry of Infrastructure, Transport, and Networks +ÎĩÎģ + +// xn--j6w193g ("Hong Kong", Chinese) : HK +// https://www.hkirc.hk +// Submitted by registry +// https://www.hkirc.hk/content.jsp?id=30#!/34 +éĻ™æ¸¯ +å…Ŧ司.éĻ™æ¸¯ +æ•™č‚˛.éĻ™æ¸¯ +æ”ŋåēœ.éĻ™æ¸¯ +個äēē.éĻ™æ¸¯ +įļ˛įĩĄ.éĻ™æ¸¯ +įĩ„įš”.éĻ™æ¸¯ + +// xn--2scrj9c ("Bharat", Kannada) : IN +// India +ā˛­ā˛žā˛°ā˛¤ + +// xn--3hcrj9c ("Bharat", Oriya) : IN +// India +āŦ­āŦžāŦ°āŦ¤ + +// xn--45br5cyl ("Bharatam", Assamese) : IN +// India +āĻ­āĻžā§°āĻ¤ + +// xn--h2breg3eve ("Bharatam", Sanskrit) : IN +// India +ā¤­ā¤žā¤°ā¤¤ā¤ŽāĨ + +// xn--h2brj9c8c ("Bharot", Santali) : IN +// India +ā¤­ā¤žā¤°āĨ‹ā¤¤ + +// xn--mgbgu82a ("Bharat", Sindhi) : IN +// India +Ú€Ø§ØąØĒ + +// xn--rvc1e0am3e ("Bharatam", Malayalam) : IN +// India +ā´­ā´žā´°ā´¤ā´‚ + +// xn--h2brj9c ("Bharat", Devanagari) : IN +// India +ā¤­ā¤žā¤°ā¤¤ + +// xn--mgbbh1a ("Bharat", Kashmiri) : IN +// India +Ø¨Ø§ØąØĒ + +// xn--mgbbh1a71e ("Bharat", Arabic) : IN +// India +Ø¨ÚžØ§ØąØĒ + +// xn--fpcrj9c3d ("Bharat", Telugu) : IN +// India +ā°­ā°žā°°ā°¤āą + +// xn--gecrj9c ("Bharat", Gujarati) : IN +// India +āĒ­āĒžāĒ°āĒ¤ + +// xn--s9brj9c ("Bharat", Gurmukhi) : IN +// India +ā¨­ā¨žā¨°ā¨¤ + +// xn--45brj9c ("Bharat", Bengali) : IN +// India +āĻ­āĻžāĻ°āĻ¤ + +// xn--xkc2dl3a5ee0h ("India", Tamil) : IN +// India +āŽ‡āŽ¨ā¯āŽ¤āŽŋāŽ¯āŽž + +// xn--mgba3a4f16a ("Iran", Persian) : IR +Ø§ÛŒØąØ§Ų† + +// xn--mgba3a4fra ("Iran", Arabic) : IR +اŲŠØąØ§Ų† + +// xn--mgbtx2b ("Iraq", Arabic) : IQ +// Communications and Media Commission +ØšØąØ§Ų‚ + +// xn--mgbayh7gpa ("al-Ordon", Arabic) : JO +// National Information Technology Center (NITC) +// Royal Scientific Society, Al-Jubeiha +اŲ„Ø§ØąØ¯Ų† + +// xn--3e0b707e ("Republic of Korea", Hangul) : KR +한ęĩ­ + +// xn--80ao21a ("Kaz", Kazakh) : KZ +Ō›Đ°Đˇ + +// xn--fzc2c9e2c ("Lanka", Sinhalese-Sinhala) : LK +// http://nic.lk +āļŊāļ‚āļšāˇ + +// xn--xkc2al3hye2a ("Ilangai", Tamil) : LK +// http://nic.lk +āŽ‡āŽ˛āŽ™ā¯āŽ•ā¯ˆ + +// xn--mgbc0a9azcg ("Morocco/al-Maghrib", Arabic) : MA +اŲ„Ų…ØēØąØ¨ + +// xn--d1alf ("mkd", Macedonian) : MK +// MARnet +ĐŧĐēĐ´ + +// xn--l1acc ("mon", Mongolian) : MN +ĐŧĐžĐŊ + +// xn--mix891f ("Macao", Chinese, Traditional) : MO +// MONIC / HNET Asia (Registry Operator for .mo) +æžŗ門 + +// xn--mix082f ("Macao", Chinese, Simplified) : MO +æžŗ门 + +// xn--mgbx4cd0ab ("Malaysia", Malay) : MY +Ų…Ų„ŲŠØŗŲŠØ§ + +// xn--mgb9awbf ("Oman", Arabic) : OM +ØšŲ…اŲ† + +// xn--mgbai9azgqp6j ("Pakistan", Urdu/Arabic) : PK +ŲžØ§ÚŠØŗØĒاŲ† + +// xn--mgbai9a5eva00b ("Pakistan", Urdu/Arabic, variant) : PK +ŲžØ§ŲƒØŗØĒاŲ† + +// xn--ygbi2ammx ("Falasteen", Arabic) : PS +// The Palestinian National Internet Naming Authority (PNINA) +// http://www.pnina.ps +ŲŲ„ØŗØˇŲŠŲ† + +// xn--90a3ac ("srb", Cyrillic) : RS +// https://www.rnids.rs/en/domains/national-domains +ŅŅ€Đą +ĐŋŅ€.ŅŅ€Đą +ĐžŅ€Đŗ.ŅŅ€Đą +ОйŅ€.ŅŅ€Đą +Од.ŅŅ€Đą +ŅƒĐŋŅ€.ŅŅ€Đą +Đ°Đē.ŅŅ€Đą + +// xn--p1ai ("rf", Russian-Cyrillic) : RU +// http://www.cctld.ru/en/docs/rulesrf.php +Ņ€Ņ„ + +// xn--wgbl6a ("Qatar", Arabic) : QA +// http://www.ict.gov.qa/ +Ų‚ØˇØą + +// xn--mgberp4a5d4ar ("AlSaudiah", Arabic) : SA +// http://www.nic.net.sa/ +اŲ„ØŗØšŲˆØ¯ŲŠØŠ + +// xn--mgberp4a5d4a87g ("AlSaudiah", Arabic, variant) : SA +اŲ„ØŗØšŲˆØ¯ÛŒØŠ + +// xn--mgbqly7c0a67fbc ("AlSaudiah", Arabic, variant) : SA +اŲ„ØŗØšŲˆØ¯ÛŒÛƒ + +// xn--mgbqly7cvafr ("AlSaudiah", Arabic, variant) : SA +اŲ„ØŗØšŲˆØ¯ŲŠŲ‡ + +// xn--mgbpl2fh ("sudan", Arabic) : SD +// Operated by .sd registry +ØŗŲˆØ¯Ø§Ų† + +// xn--yfro4i67o Singapore ("Singapore", Chinese) : SG +æ–°åŠ åĄ + +// xn--clchc0ea0b2g2a9gcd ("Singapore", Tamil) : SG +āŽšāŽŋāŽ™ā¯āŽ•āŽĒā¯āŽĒā¯‚āŽ°ā¯ + +// xn--ogbpf8fl ("Syria", Arabic) : SY +ØŗŲˆØąŲŠØŠ + +// xn--mgbtf8fl ("Syria", Arabic, variant) : SY +ØŗŲˆØąŲŠØ§ + +// xn--o3cw4h ("Thai", Thai) : TH +// http://www.thnic.co.th +āš„ā¸—ā¸ĸ +ā¸¨ā¸ļā¸ā¸Šā¸˛.āš„ā¸—ā¸ĸ +ā¸˜ā¸¸ā¸Ŗā¸ā¸´ā¸ˆ.āš„ā¸—ā¸ĸ +ā¸Ŗā¸ąā¸ā¸šā¸˛ā¸Ĩ.āš„ā¸—ā¸ĸ +ā¸—ā¸Ģā¸˛ā¸Ŗ.āš„ā¸—ā¸ĸ +āš€ā¸™āš‡ā¸•.āš„ā¸—ā¸ĸ +ā¸­ā¸‡ā¸„āšŒā¸ā¸Ŗ.āš„ā¸—ā¸ĸ + +// xn--pgbs0dh ("Tunisia", Arabic) : TN +// http://nic.tn +ØĒŲˆŲ†Øŗ + +// xn--kpry57d ("Taiwan", Chinese, Traditional) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台įŖ + +// xn--kprw13d ("Taiwan", Chinese, Simplified) : TW +// http://www.twnic.net/english/dn/dn_07a.htm +台暞 + +// xn--nnx388a ("Taiwan", Chinese, variant) : TW +č‡ēįŖ + +// xn--j1amh ("ukr", Cyrillic) : UA +ŅƒĐēŅ€ + +// xn--mgb2ddes ("AlYemen", Arabic) : YE +اŲ„ŲŠŲ…Ų† + +// xxx : http://icmregistry.com +xxx + +// ye : http://www.y.net.ye/services/domain_name.htm +*.ye + +// za : http://www.zadna.org.za/content/page/domain-information +ac.za +agric.za +alt.za +co.za +edu.za +gov.za +grondar.za +law.za +mil.za +net.za +ngo.za +nis.za +nom.za +org.za +school.za +tm.za +web.za + +// zm : https://zicta.zm/ +// Submitted by registry +zm +ac.zm +biz.zm +co.zm +com.zm +edu.zm +gov.zm +info.zm +mil.zm +net.zm +org.zm +sch.zm + +// zw : https://www.potraz.gov.zw/ +// Confirmed by registry 2017-01-25 +zw +ac.zw +co.zw +gov.zw +mil.zw +org.zw + + +// newGTLDs + +// List of new gTLDs imported from https://www.icann.org/resources/registries/gtlds/v2/gtlds.json on 2019-06-14T10:00:50-04:00 +// This list is auto-generated, don't edit it manually. +// aaa : 2015-02-26 American Automobile Association, Inc. +aaa + +// aarp : 2015-05-21 AARP +aarp + +// abarth : 2015-07-30 Fiat Chrysler Automobiles N.V. +abarth + +// abb : 2014-10-24 ABB Ltd +abb + +// abbott : 2014-07-24 Abbott Laboratories, Inc. +abbott + +// abbvie : 2015-07-30 AbbVie Inc. +abbvie + +// abc : 2015-07-30 Disney Enterprises, Inc. +abc + +// able : 2015-06-25 Able Inc. +able + +// abogado : 2014-04-24 Minds + Machines Group Limited +abogado + +// abudhabi : 2015-07-30 Abu Dhabi Systems and Information Centre +abudhabi + +// academy : 2013-11-07 Binky Moon, LLC +academy + +// accenture : 2014-08-15 Accenture plc +accenture + +// accountant : 2014-11-20 dot Accountant Limited +accountant + +// accountants : 2014-03-20 Binky Moon, LLC +accountants + +// aco : 2015-01-08 ACO Severin Ahlmann GmbH & Co. KG +aco + +// actor : 2013-12-12 Dog Beach, LLC +actor + +// adac : 2015-07-16 Allgemeiner Deutscher Automobil-Club e.V. (ADAC) +adac + +// ads : 2014-12-04 Charleston Road Registry Inc. +ads + +// adult : 2014-10-16 ICM Registry AD LLC +adult + +// aeg : 2015-03-19 Aktiebolaget Electrolux +aeg + +// aetna : 2015-05-21 Aetna Life Insurance Company +aetna + +// afamilycompany : 2015-07-23 Johnson Shareholdings, Inc. +afamilycompany + +// afl : 2014-10-02 Australian Football League +afl + +// africa : 2014-03-24 ZA Central Registry NPC trading as Registry.Africa +africa + +// agakhan : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +agakhan + +// agency : 2013-11-14 Binky Moon, LLC +agency + +// aig : 2014-12-18 American International Group, Inc. +aig + +// aigo : 2015-08-06 aigo Digital Technology Co,Ltd. +aigo + +// airbus : 2015-07-30 Airbus S.A.S. +airbus + +// airforce : 2014-03-06 Dog Beach, LLC +airforce + +// airtel : 2014-10-24 Bharti Airtel Limited +airtel + +// akdn : 2015-04-23 Fondation Aga Khan (Aga Khan Foundation) +akdn + +// alfaromeo : 2015-07-31 Fiat Chrysler Automobiles N.V. +alfaromeo + +// alibaba : 2015-01-15 Alibaba Group Holding Limited +alibaba + +// alipay : 2015-01-15 Alibaba Group Holding Limited +alipay + +// allfinanz : 2014-07-03 Allfinanz Deutsche VermÃļgensberatung Aktiengesellschaft +allfinanz + +// allstate : 2015-07-31 Allstate Fire and Casualty Insurance Company +allstate + +// ally : 2015-06-18 Ally Financial Inc. +ally + +// alsace : 2014-07-02 Region Grand Est +alsace + +// alstom : 2015-07-30 ALSTOM +alstom + +// americanexpress : 2015-07-31 American Express Travel Related Services Company, Inc. +americanexpress + +// americanfamily : 2015-07-23 AmFam, Inc. +americanfamily + +// amex : 2015-07-31 American Express Travel Related Services Company, Inc. +amex + +// amfam : 2015-07-23 AmFam, Inc. +amfam + +// amica : 2015-05-28 Amica Mutual Insurance Company +amica + +// amsterdam : 2014-07-24 Gemeente Amsterdam +amsterdam + +// analytics : 2014-12-18 Campus IP LLC +analytics + +// android : 2014-08-07 Charleston Road Registry Inc. +android + +// anquan : 2015-01-08 QIHOO 360 TECHNOLOGY CO. LTD. +anquan + +// anz : 2015-07-31 Australia and New Zealand Banking Group Limited +anz + +// aol : 2015-09-17 Oath Inc. +aol + +// apartments : 2014-12-11 Binky Moon, LLC +apartments + +// app : 2015-05-14 Charleston Road Registry Inc. +app + +// apple : 2015-05-14 Apple Inc. +apple + +// aquarelle : 2014-07-24 Aquarelle.com +aquarelle + +// arab : 2015-11-12 League of Arab States +arab + +// aramco : 2014-11-20 Aramco Services Company +aramco + +// archi : 2014-02-06 Afilias Limited +archi + +// army : 2014-03-06 Dog Beach, LLC +army + +// art : 2016-03-24 UK Creative Ideas Limited +art + +// arte : 2014-12-11 Association Relative à la TÊlÊvision EuropÊenne G.E.I.E. +arte + +// asda : 2015-07-31 Wal-Mart Stores, Inc. +asda + +// associates : 2014-03-06 Binky Moon, LLC +associates + +// athleta : 2015-07-30 The Gap, Inc. +athleta + +// attorney : 2014-03-20 Dog Beach, LLC +attorney + +// auction : 2014-03-20 Dog Beach, LLC +auction + +// audi : 2015-05-21 AUDI Aktiengesellschaft +audi + +// audible : 2015-06-25 Amazon Registry Services, Inc. +audible + +// audio : 2014-03-20 Uniregistry, Corp. +audio + +// auspost : 2015-08-13 Australian Postal Corporation +auspost + +// author : 2014-12-18 Amazon Registry Services, Inc. +author + +// auto : 2014-11-13 Cars Registry Limited +auto + +// autos : 2014-01-09 DERAutos, LLC +autos + +// avianca : 2015-01-08 Aerovias del Continente Americano S.A. Avianca +avianca + +// aws : 2015-06-25 Amazon Registry Services, Inc. +aws + +// axa : 2013-12-19 AXA SA +axa + +// azure : 2014-12-18 Microsoft Corporation +azure + +// baby : 2015-04-09 XYZ.COM LLC +baby + +// baidu : 2015-01-08 Baidu, Inc. +baidu + +// banamex : 2015-07-30 Citigroup Inc. +banamex + +// bananarepublic : 2015-07-31 The Gap, Inc. +bananarepublic + +// band : 2014-06-12 Dog Beach, LLC +band + +// bank : 2014-09-25 fTLD Registry Services LLC +bank + +// bar : 2013-12-12 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +bar + +// barcelona : 2014-07-24 Municipi de Barcelona +barcelona + +// barclaycard : 2014-11-20 Barclays Bank PLC +barclaycard + +// barclays : 2014-11-20 Barclays Bank PLC +barclays + +// barefoot : 2015-06-11 Gallo Vineyards, Inc. +barefoot + +// bargains : 2013-11-14 Binky Moon, LLC +bargains + +// baseball : 2015-10-29 MLB Advanced Media DH, LLC +baseball + +// basketball : 2015-08-20 FÊdÊration Internationale de Basketball (FIBA) +basketball + +// bauhaus : 2014-04-17 Werkhaus GmbH +bauhaus + +// bayern : 2014-01-23 Bayern Connect GmbH +bayern + +// bbc : 2014-12-18 British Broadcasting Corporation +bbc + +// bbt : 2015-07-23 BB&T Corporation +bbt + +// bbva : 2014-10-02 BANCO BILBAO VIZCAYA ARGENTARIA, S.A. +bbva + +// bcg : 2015-04-02 The Boston Consulting Group, Inc. +bcg + +// bcn : 2014-07-24 Municipi de Barcelona +bcn + +// beats : 2015-05-14 Beats Electronics, LLC +beats + +// beauty : 2015-12-03 L'OrÊal +beauty + +// beer : 2014-01-09 Minds + Machines Group Limited +beer + +// bentley : 2014-12-18 Bentley Motors Limited +bentley + +// berlin : 2013-10-31 dotBERLIN GmbH & Co. KG +berlin + +// best : 2013-12-19 BestTLD Pty Ltd +best + +// bestbuy : 2015-07-31 BBY Solutions, Inc. +bestbuy + +// bet : 2015-05-07 Afilias Limited +bet + +// bharti : 2014-01-09 Bharti Enterprises (Holding) Private Limited +bharti + +// bible : 2014-06-19 American Bible Society +bible + +// bid : 2013-12-19 dot Bid Limited +bid + +// bike : 2013-08-27 Binky Moon, LLC +bike + +// bing : 2014-12-18 Microsoft Corporation +bing + +// bingo : 2014-12-04 Binky Moon, LLC +bingo + +// bio : 2014-03-06 Afilias Limited +bio + +// black : 2014-01-16 Afilias Limited +black + +// blackfriday : 2014-01-16 Uniregistry, Corp. +blackfriday + +// blockbuster : 2015-07-30 Dish DBS Corporation +blockbuster + +// blog : 2015-05-14 Knock Knock WHOIS There, LLC +blog + +// bloomberg : 2014-07-17 Bloomberg IP Holdings LLC +bloomberg + +// blue : 2013-11-07 Afilias Limited +blue + +// bms : 2014-10-30 Bristol-Myers Squibb Company +bms + +// bmw : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +bmw + +// bnl : 2014-07-24 Banca Nazionale del Lavoro +bnl + +// bnpparibas : 2014-05-29 BNP Paribas +bnpparibas + +// boats : 2014-12-04 DERBoats, LLC +boats + +// boehringer : 2015-07-09 Boehringer Ingelheim International GmbH +boehringer + +// bofa : 2015-07-31 Bank of America Corporation +bofa + +// bom : 2014-10-16 NÃēcleo de InformaçÃŖo e CoordenaçÃŖo do Ponto BR - NIC.br +bom + +// bond : 2014-06-05 ShortDot SA +bond + +// boo : 2014-01-30 Charleston Road Registry Inc. +boo + +// book : 2015-08-27 Amazon Registry Services, Inc. +book + +// booking : 2015-07-16 Booking.com B.V. +booking + +// bosch : 2015-06-18 Robert Bosch GMBH +bosch + +// bostik : 2015-05-28 Bostik SA +bostik + +// boston : 2015-12-10 Boston TLD Management, LLC +boston + +// bot : 2014-12-18 Amazon Registry Services, Inc. +bot + +// boutique : 2013-11-14 Binky Moon, LLC +boutique + +// box : 2015-11-12 .BOX INC. +box + +// bradesco : 2014-12-18 Banco Bradesco S.A. +bradesco + +// bridgestone : 2014-12-18 Bridgestone Corporation +bridgestone + +// broadway : 2014-12-22 Celebrate Broadway, Inc. +broadway + +// broker : 2014-12-11 Dotbroker Registry Limited +broker + +// brother : 2015-01-29 Brother Industries, Ltd. +brother + +// brussels : 2014-02-06 DNS.be vzw +brussels + +// budapest : 2013-11-21 Minds + Machines Group Limited +budapest + +// bugatti : 2015-07-23 Bugatti International SA +bugatti + +// build : 2013-11-07 Plan Bee LLC +build + +// builders : 2013-11-07 Binky Moon, LLC +builders + +// business : 2013-11-07 Binky Moon, LLC +business + +// buy : 2014-12-18 Amazon Registry Services, Inc. +buy + +// buzz : 2013-10-02 DOTSTRATEGY CO. +buzz + +// bzh : 2014-02-27 Association www.bzh +bzh + +// cab : 2013-10-24 Binky Moon, LLC +cab + +// cafe : 2015-02-11 Binky Moon, LLC +cafe + +// cal : 2014-07-24 Charleston Road Registry Inc. +cal + +// call : 2014-12-18 Amazon Registry Services, Inc. +call + +// calvinklein : 2015-07-30 PVH gTLD Holdings LLC +calvinklein + +// cam : 2016-04-21 AC Webconnecting Holding B.V. +cam + +// camera : 2013-08-27 Binky Moon, LLC +camera + +// camp : 2013-11-07 Binky Moon, LLC +camp + +// cancerresearch : 2014-05-15 Australian Cancer Research Foundation +cancerresearch + +// canon : 2014-09-12 Canon Inc. +canon + +// capetown : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +capetown + +// capital : 2014-03-06 Binky Moon, LLC +capital + +// capitalone : 2015-08-06 Capital One Financial Corporation +capitalone + +// car : 2015-01-22 Cars Registry Limited +car + +// caravan : 2013-12-12 Caravan International, Inc. +caravan + +// cards : 2013-12-05 Binky Moon, LLC +cards + +// care : 2014-03-06 Binky Moon, LLC +care + +// career : 2013-10-09 dotCareer LLC +career + +// careers : 2013-10-02 Binky Moon, LLC +careers + +// cars : 2014-11-13 Cars Registry Limited +cars + +// cartier : 2014-06-23 Richemont DNS Inc. +cartier + +// casa : 2013-11-21 Minds + Machines Group Limited +casa + +// case : 2015-09-03 CNH Industrial N.V. +case + +// caseih : 2015-09-03 CNH Industrial N.V. +caseih + +// cash : 2014-03-06 Binky Moon, LLC +cash + +// casino : 2014-12-18 Binky Moon, LLC +casino + +// catering : 2013-12-05 Binky Moon, LLC +catering + +// catholic : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +catholic + +// cba : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +cba + +// cbn : 2014-08-22 The Christian Broadcasting Network, Inc. +cbn + +// cbre : 2015-07-02 CBRE, Inc. +cbre + +// cbs : 2015-08-06 CBS Domains Inc. +cbs + +// ceb : 2015-04-09 The Corporate Executive Board Company +ceb + +// center : 2013-11-07 Binky Moon, LLC +center + +// ceo : 2013-11-07 CEOTLD Pty Ltd +ceo + +// cern : 2014-06-05 European Organization for Nuclear Research ("CERN") +cern + +// cfa : 2014-08-28 CFA Institute +cfa + +// cfd : 2014-12-11 DotCFD Registry Limited +cfd + +// chanel : 2015-04-09 Chanel International B.V. +chanel + +// channel : 2014-05-08 Charleston Road Registry Inc. +channel + +// charity : 2018-04-11 Binky Moon, LLC +charity + +// chase : 2015-04-30 JPMorgan Chase Bank, National Association +chase + +// chat : 2014-12-04 Binky Moon, LLC +chat + +// cheap : 2013-11-14 Binky Moon, LLC +cheap + +// chintai : 2015-06-11 CHINTAI Corporation +chintai + +// christmas : 2013-11-21 Uniregistry, Corp. +christmas + +// chrome : 2014-07-24 Charleston Road Registry Inc. +chrome + +// chrysler : 2015-07-30 FCA US LLC. +chrysler + +// church : 2014-02-06 Binky Moon, LLC +church + +// cipriani : 2015-02-19 Hotel Cipriani Srl +cipriani + +// circle : 2014-12-18 Amazon Registry Services, Inc. +circle + +// cisco : 2014-12-22 Cisco Technology, Inc. +cisco + +// citadel : 2015-07-23 Citadel Domain LLC +citadel + +// citi : 2015-07-30 Citigroup Inc. +citi + +// citic : 2014-01-09 CITIC Group Corporation +citic + +// city : 2014-05-29 Binky Moon, LLC +city + +// cityeats : 2014-12-11 Lifestyle Domain Holdings, Inc. +cityeats + +// claims : 2014-03-20 Binky Moon, LLC +claims + +// cleaning : 2013-12-05 Binky Moon, LLC +cleaning + +// click : 2014-06-05 Uniregistry, Corp. +click + +// clinic : 2014-03-20 Binky Moon, LLC +clinic + +// clinique : 2015-10-01 The EstÊe Lauder Companies Inc. +clinique + +// clothing : 2013-08-27 Binky Moon, LLC +clothing + +// cloud : 2015-04-16 Aruba PEC S.p.A. +cloud + +// club : 2013-11-08 .CLUB DOMAINS, LLC +club + +// clubmed : 2015-06-25 Club MÊditerranÊe S.A. +clubmed + +// coach : 2014-10-09 Binky Moon, LLC +coach + +// codes : 2013-10-31 Binky Moon, LLC +codes + +// coffee : 2013-10-17 Binky Moon, LLC +coffee + +// college : 2014-01-16 XYZ.COM LLC +college + +// cologne : 2014-02-05 dotKoeln GmbH +cologne + +// comcast : 2015-07-23 Comcast IP Holdings I, LLC +comcast + +// commbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +commbank + +// community : 2013-12-05 Binky Moon, LLC +community + +// company : 2013-11-07 Binky Moon, LLC +company + +// compare : 2015-10-08 iSelect Ltd +compare + +// computer : 2013-10-24 Binky Moon, LLC +computer + +// comsec : 2015-01-08 VeriSign, Inc. +comsec + +// condos : 2013-12-05 Binky Moon, LLC +condos + +// construction : 2013-09-16 Binky Moon, LLC +construction + +// consulting : 2013-12-05 Dog Beach, LLC +consulting + +// contact : 2015-01-08 Dog Beach, LLC +contact + +// contractors : 2013-09-10 Binky Moon, LLC +contractors + +// cooking : 2013-11-21 Minds + Machines Group Limited +cooking + +// cookingchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +cookingchannel + +// cool : 2013-11-14 Binky Moon, LLC +cool + +// corsica : 2014-09-25 CollectivitÊ de Corse +corsica + +// country : 2013-12-19 DotCountry LLC +country + +// coupon : 2015-02-26 Amazon Registry Services, Inc. +coupon + +// coupons : 2015-03-26 Binky Moon, LLC +coupons + +// courses : 2014-12-04 OPEN UNIVERSITIES AUSTRALIA PTY LTD +courses + +// cpa : 2019-06-10 American Institute of Certified Public Accountants +cpa + +// credit : 2014-03-20 Binky Moon, LLC +credit + +// creditcard : 2014-03-20 Binky Moon, LLC +creditcard + +// creditunion : 2015-01-22 CUNA Performance Resources, LLC +creditunion + +// cricket : 2014-10-09 dot Cricket Limited +cricket + +// crown : 2014-10-24 Crown Equipment Corporation +crown + +// crs : 2014-04-03 Federated Co-operatives Limited +crs + +// cruise : 2015-12-10 Viking River Cruises (Bermuda) Ltd. +cruise + +// cruises : 2013-12-05 Binky Moon, LLC +cruises + +// csc : 2014-09-25 Alliance-One Services, Inc. +csc + +// cuisinella : 2014-04-03 SCHMIDT GROUPE S.A.S. +cuisinella + +// cymru : 2014-05-08 Nominet UK +cymru + +// cyou : 2015-01-22 Beijing Gamease Age Digital Technology Co., Ltd. +cyou + +// dabur : 2014-02-06 Dabur India Limited +dabur + +// dad : 2014-01-23 Charleston Road Registry Inc. +dad + +// dance : 2013-10-24 Dog Beach, LLC +dance + +// data : 2016-06-02 Dish DBS Corporation +data + +// date : 2014-11-20 dot Date Limited +date + +// dating : 2013-12-05 Binky Moon, LLC +dating + +// datsun : 2014-03-27 NISSAN MOTOR CO., LTD. +datsun + +// day : 2014-01-30 Charleston Road Registry Inc. +day + +// dclk : 2014-11-20 Charleston Road Registry Inc. +dclk + +// dds : 2015-05-07 Minds + Machines Group Limited +dds + +// deal : 2015-06-25 Amazon Registry Services, Inc. +deal + +// dealer : 2014-12-22 Intercap Registry Inc. +dealer + +// deals : 2014-05-22 Binky Moon, LLC +deals + +// degree : 2014-03-06 Dog Beach, LLC +degree + +// delivery : 2014-09-11 Binky Moon, LLC +delivery + +// dell : 2014-10-24 Dell Inc. +dell + +// deloitte : 2015-07-31 Deloitte Touche Tohmatsu +deloitte + +// delta : 2015-02-19 Delta Air Lines, Inc. +delta + +// democrat : 2013-10-24 Dog Beach, LLC +democrat + +// dental : 2014-03-20 Binky Moon, LLC +dental + +// dentist : 2014-03-20 Dog Beach, LLC +dentist + +// desi : 2013-11-14 Desi Networks LLC +desi + +// design : 2014-11-07 Top Level Design, LLC +design + +// dev : 2014-10-16 Charleston Road Registry Inc. +dev + +// dhl : 2015-07-23 Deutsche Post AG +dhl + +// diamonds : 2013-09-22 Binky Moon, LLC +diamonds + +// diet : 2014-06-26 Uniregistry, Corp. +diet + +// digital : 2014-03-06 Binky Moon, LLC +digital + +// direct : 2014-04-10 Binky Moon, LLC +direct + +// directory : 2013-09-20 Binky Moon, LLC +directory + +// discount : 2014-03-06 Binky Moon, LLC +discount + +// discover : 2015-07-23 Discover Financial Services +discover + +// dish : 2015-07-30 Dish DBS Corporation +dish + +// diy : 2015-11-05 Lifestyle Domain Holdings, Inc. +diy + +// dnp : 2013-12-13 Dai Nippon Printing Co., Ltd. +dnp + +// docs : 2014-10-16 Charleston Road Registry Inc. +docs + +// doctor : 2016-06-02 Binky Moon, LLC +doctor + +// dodge : 2015-07-30 FCA US LLC. +dodge + +// dog : 2014-12-04 Binky Moon, LLC +dog + +// domains : 2013-10-17 Binky Moon, LLC +domains + +// dot : 2015-05-21 Dish DBS Corporation +dot + +// download : 2014-11-20 dot Support Limited +download + +// drive : 2015-03-05 Charleston Road Registry Inc. +drive + +// dtv : 2015-06-04 Dish DBS Corporation +dtv + +// dubai : 2015-01-01 Dubai Smart Government Department +dubai + +// duck : 2015-07-23 Johnson Shareholdings, Inc. +duck + +// dunlop : 2015-07-02 The Goodyear Tire & Rubber Company +dunlop + +// duns : 2015-08-06 The Dun & Bradstreet Corporation +duns + +// dupont : 2015-06-25 E. I. du Pont de Nemours and Company +dupont + +// durban : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +durban + +// dvag : 2014-06-23 Deutsche VermÃļgensberatung Aktiengesellschaft DVAG +dvag + +// dvr : 2016-05-26 DISH Technologies L.L.C. +dvr + +// earth : 2014-12-04 Interlink Co., Ltd. +earth + +// eat : 2014-01-23 Charleston Road Registry Inc. +eat + +// eco : 2016-07-08 Big Room Inc. +eco + +// edeka : 2014-12-18 EDEKA Verband kaufmännischer Genossenschaften e.V. +edeka + +// education : 2013-11-07 Binky Moon, LLC +education + +// email : 2013-10-31 Binky Moon, LLC +email + +// emerck : 2014-04-03 Merck KGaA +emerck + +// energy : 2014-09-11 Binky Moon, LLC +energy + +// engineer : 2014-03-06 Dog Beach, LLC +engineer + +// engineering : 2014-03-06 Binky Moon, LLC +engineering + +// enterprises : 2013-09-20 Binky Moon, LLC +enterprises + +// epson : 2014-12-04 Seiko Epson Corporation +epson + +// equipment : 2013-08-27 Binky Moon, LLC +equipment + +// ericsson : 2015-07-09 Telefonaktiebolaget L M Ericsson +ericsson + +// erni : 2014-04-03 ERNI Group Holding AG +erni + +// esq : 2014-05-08 Charleston Road Registry Inc. +esq + +// estate : 2013-08-27 Binky Moon, LLC +estate + +// esurance : 2015-07-23 Esurance Insurance Company +esurance + +// etisalat : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +etisalat + +// eurovision : 2014-04-24 European Broadcasting Union (EBU) +eurovision + +// eus : 2013-12-12 Puntueus Fundazioa +eus + +// events : 2013-12-05 Binky Moon, LLC +events + +// everbank : 2014-05-15 EverBank +everbank + +// exchange : 2014-03-06 Binky Moon, LLC +exchange + +// expert : 2013-11-21 Binky Moon, LLC +expert + +// exposed : 2013-12-05 Binky Moon, LLC +exposed + +// express : 2015-02-11 Binky Moon, LLC +express + +// extraspace : 2015-05-14 Extra Space Storage LLC +extraspace + +// fage : 2014-12-18 Fage International S.A. +fage + +// fail : 2014-03-06 Binky Moon, LLC +fail + +// fairwinds : 2014-11-13 FairWinds Partners, LLC +fairwinds + +// faith : 2014-11-20 dot Faith Limited +faith + +// family : 2015-04-02 Dog Beach, LLC +family + +// fan : 2014-03-06 Dog Beach, LLC +fan + +// fans : 2014-11-07 Fans TLD Limited +fans + +// farm : 2013-11-07 Binky Moon, LLC +farm + +// farmers : 2015-07-09 Farmers Insurance Exchange +farmers + +// fashion : 2014-07-03 Minds + Machines Group Limited +fashion + +// fast : 2014-12-18 Amazon Registry Services, Inc. +fast + +// fedex : 2015-08-06 Federal Express Corporation +fedex + +// feedback : 2013-12-19 Top Level Spectrum, Inc. +feedback + +// ferrari : 2015-07-31 Fiat Chrysler Automobiles N.V. +ferrari + +// ferrero : 2014-12-18 Ferrero Trading Lux S.A. +ferrero + +// fiat : 2015-07-31 Fiat Chrysler Automobiles N.V. +fiat + +// fidelity : 2015-07-30 Fidelity Brokerage Services LLC +fidelity + +// fido : 2015-08-06 Rogers Communications Canada Inc. +fido + +// film : 2015-01-08 Motion Picture Domain Registry Pty Ltd +film + +// final : 2014-10-16 NÃēcleo de InformaçÃŖo e CoordenaçÃŖo do Ponto BR - NIC.br +final + +// finance : 2014-03-20 Binky Moon, LLC +finance + +// financial : 2014-03-06 Binky Moon, LLC +financial + +// fire : 2015-06-25 Amazon Registry Services, Inc. +fire + +// firestone : 2014-12-18 Bridgestone Licensing Services, Inc +firestone + +// firmdale : 2014-03-27 Firmdale Holdings Limited +firmdale + +// fish : 2013-12-12 Binky Moon, LLC +fish + +// fishing : 2013-11-21 Minds + Machines Group Limited +fishing + +// fit : 2014-11-07 Minds + Machines Group Limited +fit + +// fitness : 2014-03-06 Binky Moon, LLC +fitness + +// flickr : 2015-04-02 Yahoo! Domain Services Inc. +flickr + +// flights : 2013-12-05 Binky Moon, LLC +flights + +// flir : 2015-07-23 FLIR Systems, Inc. +flir + +// florist : 2013-11-07 Binky Moon, LLC +florist + +// flowers : 2014-10-09 Uniregistry, Corp. +flowers + +// fly : 2014-05-08 Charleston Road Registry Inc. +fly + +// foo : 2014-01-23 Charleston Road Registry Inc. +foo + +// food : 2016-04-21 Lifestyle Domain Holdings, Inc. +food + +// foodnetwork : 2015-07-02 Lifestyle Domain Holdings, Inc. +foodnetwork + +// football : 2014-12-18 Binky Moon, LLC +football + +// ford : 2014-11-13 Ford Motor Company +ford + +// forex : 2014-12-11 Dotforex Registry Limited +forex + +// forsale : 2014-05-22 Dog Beach, LLC +forsale + +// forum : 2015-04-02 Fegistry, LLC +forum + +// foundation : 2013-12-05 Binky Moon, LLC +foundation + +// fox : 2015-09-11 FOX Registry, LLC +fox + +// free : 2015-12-10 Amazon Registry Services, Inc. +free + +// fresenius : 2015-07-30 Fresenius Immobilien-Verwaltungs-GmbH +fresenius + +// frl : 2014-05-15 FRLregistry B.V. +frl + +// frogans : 2013-12-19 OP3FT +frogans + +// frontdoor : 2015-07-02 Lifestyle Domain Holdings, Inc. +frontdoor + +// frontier : 2015-02-05 Frontier Communications Corporation +frontier + +// ftr : 2015-07-16 Frontier Communications Corporation +ftr + +// fujitsu : 2015-07-30 Fujitsu Limited +fujitsu + +// fujixerox : 2015-07-23 Xerox DNHC LLC +fujixerox + +// fun : 2016-01-14 DotSpace Inc. +fun + +// fund : 2014-03-20 Binky Moon, LLC +fund + +// furniture : 2014-03-20 Binky Moon, LLC +furniture + +// futbol : 2013-09-20 Dog Beach, LLC +futbol + +// fyi : 2015-04-02 Binky Moon, LLC +fyi + +// gal : 2013-11-07 AsociaciÃŗn puntoGAL +gal + +// gallery : 2013-09-13 Binky Moon, LLC +gallery + +// gallo : 2015-06-11 Gallo Vineyards, Inc. +gallo + +// gallup : 2015-02-19 Gallup, Inc. +gallup + +// game : 2015-05-28 Uniregistry, Corp. +game + +// games : 2015-05-28 Dog Beach, LLC +games + +// gap : 2015-07-31 The Gap, Inc. +gap + +// garden : 2014-06-26 Minds + Machines Group Limited +garden + +// gay : 2019-05-23 Top Level Design, LLC +gay + +// gbiz : 2014-07-17 Charleston Road Registry Inc. +gbiz + +// gdn : 2014-07-31 Joint Stock Company "Navigation-information systems" +gdn + +// gea : 2014-12-04 GEA Group Aktiengesellschaft +gea + +// gent : 2014-01-23 COMBELL NV +gent + +// genting : 2015-03-12 Resorts World Inc Pte. Ltd. +genting + +// george : 2015-07-31 Wal-Mart Stores, Inc. +george + +// ggee : 2014-01-09 GMO Internet, Inc. +ggee + +// gift : 2013-10-17 DotGift, LLC +gift + +// gifts : 2014-07-03 Binky Moon, LLC +gifts + +// gives : 2014-03-06 Dog Beach, LLC +gives + +// giving : 2014-11-13 Giving Limited +giving + +// glade : 2015-07-23 Johnson Shareholdings, Inc. +glade + +// glass : 2013-11-07 Binky Moon, LLC +glass + +// gle : 2014-07-24 Charleston Road Registry Inc. +gle + +// global : 2014-04-17 Dot Global Domain Registry Limited +global + +// globo : 2013-12-19 Globo ComunicaçÃŖo e ParticipaçÃĩes S.A +globo + +// gmail : 2014-05-01 Charleston Road Registry Inc. +gmail + +// gmbh : 2016-01-29 Binky Moon, LLC +gmbh + +// gmo : 2014-01-09 GMO Internet Pte. Ltd. +gmo + +// gmx : 2014-04-24 1&1 Mail & Media GmbH +gmx + +// godaddy : 2015-07-23 Go Daddy East, LLC +godaddy + +// gold : 2015-01-22 Binky Moon, LLC +gold + +// goldpoint : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +goldpoint + +// golf : 2014-12-18 Binky Moon, LLC +golf + +// goo : 2014-12-18 NTT Resonant Inc. +goo + +// goodyear : 2015-07-02 The Goodyear Tire & Rubber Company +goodyear + +// goog : 2014-11-20 Charleston Road Registry Inc. +goog + +// google : 2014-07-24 Charleston Road Registry Inc. +google + +// gop : 2014-01-16 Republican State Leadership Committee, Inc. +gop + +// got : 2014-12-18 Amazon Registry Services, Inc. +got + +// grainger : 2015-05-07 Grainger Registry Services, LLC +grainger + +// graphics : 2013-09-13 Binky Moon, LLC +graphics + +// gratis : 2014-03-20 Binky Moon, LLC +gratis + +// green : 2014-05-08 Afilias Limited +green + +// gripe : 2014-03-06 Binky Moon, LLC +gripe + +// grocery : 2016-06-16 Wal-Mart Stores, Inc. +grocery + +// group : 2014-08-15 Binky Moon, LLC +group + +// guardian : 2015-07-30 The Guardian Life Insurance Company of America +guardian + +// gucci : 2014-11-13 Guccio Gucci S.p.a. +gucci + +// guge : 2014-08-28 Charleston Road Registry Inc. +guge + +// guide : 2013-09-13 Binky Moon, LLC +guide + +// guitars : 2013-11-14 Uniregistry, Corp. +guitars + +// guru : 2013-08-27 Binky Moon, LLC +guru + +// hair : 2015-12-03 L'OrÊal +hair + +// hamburg : 2014-02-20 Hamburg Top-Level-Domain GmbH +hamburg + +// hangout : 2014-11-13 Charleston Road Registry Inc. +hangout + +// haus : 2013-12-05 Dog Beach, LLC +haus + +// hbo : 2015-07-30 HBO Registry Services, Inc. +hbo + +// hdfc : 2015-07-30 HOUSING DEVELOPMENT FINANCE CORPORATION LIMITED +hdfc + +// hdfcbank : 2015-02-12 HDFC Bank Limited +hdfcbank + +// health : 2015-02-11 DotHealth, LLC +health + +// healthcare : 2014-06-12 Binky Moon, LLC +healthcare + +// help : 2014-06-26 Uniregistry, Corp. +help + +// helsinki : 2015-02-05 City of Helsinki +helsinki + +// here : 2014-02-06 Charleston Road Registry Inc. +here + +// hermes : 2014-07-10 HERMES INTERNATIONAL +hermes + +// hgtv : 2015-07-02 Lifestyle Domain Holdings, Inc. +hgtv + +// hiphop : 2014-03-06 Uniregistry, Corp. +hiphop + +// hisamitsu : 2015-07-16 Hisamitsu Pharmaceutical Co.,Inc. +hisamitsu + +// hitachi : 2014-10-31 Hitachi, Ltd. +hitachi + +// hiv : 2014-03-13 Uniregistry, Corp. +hiv + +// hkt : 2015-05-14 PCCW-HKT DataCom Services Limited +hkt + +// hockey : 2015-03-19 Binky Moon, LLC +hockey + +// holdings : 2013-08-27 Binky Moon, LLC +holdings + +// holiday : 2013-11-07 Binky Moon, LLC +holiday + +// homedepot : 2015-04-02 Home Depot Product Authority, LLC +homedepot + +// homegoods : 2015-07-16 The TJX Companies, Inc. +homegoods + +// homes : 2014-01-09 DERHomes, LLC +homes + +// homesense : 2015-07-16 The TJX Companies, Inc. +homesense + +// honda : 2014-12-18 Honda Motor Co., Ltd. +honda + +// honeywell : 2015-07-23 Honeywell GTLD LLC +honeywell + +// horse : 2013-11-21 Minds + Machines Group Limited +horse + +// hospital : 2016-10-20 Binky Moon, LLC +hospital + +// host : 2014-04-17 DotHost Inc. +host + +// hosting : 2014-05-29 Uniregistry, Corp. +hosting + +// hot : 2015-08-27 Amazon Registry Services, Inc. +hot + +// hoteles : 2015-03-05 Travel Reservations SRL +hoteles + +// hotels : 2016-04-07 Booking.com B.V. +hotels + +// hotmail : 2014-12-18 Microsoft Corporation +hotmail + +// house : 2013-11-07 Binky Moon, LLC +house + +// how : 2014-01-23 Charleston Road Registry Inc. +how + +// hsbc : 2014-10-24 HSBC Global Services (UK) Limited +hsbc + +// hughes : 2015-07-30 Hughes Satellite Systems Corporation +hughes + +// hyatt : 2015-07-30 Hyatt GTLD, L.L.C. +hyatt + +// hyundai : 2015-07-09 Hyundai Motor Company +hyundai + +// ibm : 2014-07-31 International Business Machines Corporation +ibm + +// icbc : 2015-02-19 Industrial and Commercial Bank of China Limited +icbc + +// ice : 2014-10-30 IntercontinentalExchange, Inc. +ice + +// icu : 2015-01-08 ShortDot SA +icu + +// ieee : 2015-07-23 IEEE Global LLC +ieee + +// ifm : 2014-01-30 ifm electronic gmbh +ifm + +// ikano : 2015-07-09 Ikano S.A. +ikano + +// imamat : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +imamat + +// imdb : 2015-06-25 Amazon Registry Services, Inc. +imdb + +// immo : 2014-07-10 Binky Moon, LLC +immo + +// immobilien : 2013-11-07 Dog Beach, LLC +immobilien + +// inc : 2018-03-10 Intercap Registry Inc. +inc + +// industries : 2013-12-05 Binky Moon, LLC +industries + +// infiniti : 2014-03-27 NISSAN MOTOR CO., LTD. +infiniti + +// ing : 2014-01-23 Charleston Road Registry Inc. +ing + +// ink : 2013-12-05 Top Level Design, LLC +ink + +// institute : 2013-11-07 Binky Moon, LLC +institute + +// insurance : 2015-02-19 fTLD Registry Services LLC +insurance + +// insure : 2014-03-20 Binky Moon, LLC +insure + +// intel : 2015-08-06 Intel Corporation +intel + +// international : 2013-11-07 Binky Moon, LLC +international + +// intuit : 2015-07-30 Intuit Administrative Services, Inc. +intuit + +// investments : 2014-03-20 Binky Moon, LLC +investments + +// ipiranga : 2014-08-28 Ipiranga Produtos de Petroleo S.A. +ipiranga + +// irish : 2014-08-07 Binky Moon, LLC +irish + +// iselect : 2015-02-11 iSelect Ltd +iselect + +// ismaili : 2015-08-06 Fondation Aga Khan (Aga Khan Foundation) +ismaili + +// ist : 2014-08-28 Istanbul Metropolitan Municipality +ist + +// istanbul : 2014-08-28 Istanbul Metropolitan Municipality +istanbul + +// itau : 2014-10-02 Itau Unibanco Holding S.A. +itau + +// itv : 2015-07-09 ITV Services Limited +itv + +// iveco : 2015-09-03 CNH Industrial N.V. +iveco + +// jaguar : 2014-11-13 Jaguar Land Rover Ltd +jaguar + +// java : 2014-06-19 Oracle Corporation +java + +// jcb : 2014-11-20 JCB Co., Ltd. +jcb + +// jcp : 2015-04-23 JCP Media, Inc. +jcp + +// jeep : 2015-07-30 FCA US LLC. +jeep + +// jetzt : 2014-01-09 Binky Moon, LLC +jetzt + +// jewelry : 2015-03-05 Binky Moon, LLC +jewelry + +// jio : 2015-04-02 Reliance Industries Limited +jio + +// jll : 2015-04-02 Jones Lang LaSalle Incorporated +jll + +// jmp : 2015-03-26 Matrix IP LLC +jmp + +// jnj : 2015-06-18 Johnson & Johnson Services, Inc. +jnj + +// joburg : 2014-03-24 ZA Central Registry NPC trading as ZA Central Registry +joburg + +// jot : 2014-12-18 Amazon Registry Services, Inc. +jot + +// joy : 2014-12-18 Amazon Registry Services, Inc. +joy + +// jpmorgan : 2015-04-30 JPMorgan Chase Bank, National Association +jpmorgan + +// jprs : 2014-09-18 Japan Registry Services Co., Ltd. +jprs + +// juegos : 2014-03-20 Uniregistry, Corp. +juegos + +// juniper : 2015-07-30 JUNIPER NETWORKS, INC. +juniper + +// kaufen : 2013-11-07 Dog Beach, LLC +kaufen + +// kddi : 2014-09-12 KDDI CORPORATION +kddi + +// kerryhotels : 2015-04-30 Kerry Trading Co. Limited +kerryhotels + +// kerrylogistics : 2015-04-09 Kerry Trading Co. Limited +kerrylogistics + +// kerryproperties : 2015-04-09 Kerry Trading Co. Limited +kerryproperties + +// kfh : 2014-12-04 Kuwait Finance House +kfh + +// kia : 2015-07-09 KIA MOTORS CORPORATION +kia + +// kim : 2013-09-23 Afilias Limited +kim + +// kinder : 2014-11-07 Ferrero Trading Lux S.A. +kinder + +// kindle : 2015-06-25 Amazon Registry Services, Inc. +kindle + +// kitchen : 2013-09-20 Binky Moon, LLC +kitchen + +// kiwi : 2013-09-20 DOT KIWI LIMITED +kiwi + +// koeln : 2014-01-09 dotKoeln GmbH +koeln + +// komatsu : 2015-01-08 Komatsu Ltd. +komatsu + +// kosher : 2015-08-20 Kosher Marketing Assets LLC +kosher + +// kpmg : 2015-04-23 KPMG International Cooperative (KPMG International Genossenschaft) +kpmg + +// kpn : 2015-01-08 Koninklijke KPN N.V. +kpn + +// krd : 2013-12-05 KRG Department of Information Technology +krd + +// kred : 2013-12-19 KredTLD Pty Ltd +kred + +// kuokgroup : 2015-04-09 Kerry Trading Co. Limited +kuokgroup + +// kyoto : 2014-11-07 Academic Institution: Kyoto Jyoho Gakuen +kyoto + +// lacaixa : 2014-01-09 FundaciÃŗn Bancaria Caixa d’Estalvis i Pensions de Barcelona, “la Caixa” +lacaixa + +// ladbrokes : 2015-08-06 LADBROKES INTERNATIONAL PLC +ladbrokes + +// lamborghini : 2015-06-04 Automobili Lamborghini S.p.A. +lamborghini + +// lamer : 2015-10-01 The EstÊe Lauder Companies Inc. +lamer + +// lancaster : 2015-02-12 LANCASTER +lancaster + +// lancia : 2015-07-31 Fiat Chrysler Automobiles N.V. +lancia + +// lancome : 2015-07-23 L'OrÊal +lancome + +// land : 2013-09-10 Binky Moon, LLC +land + +// landrover : 2014-11-13 Jaguar Land Rover Ltd +landrover + +// lanxess : 2015-07-30 LANXESS Corporation +lanxess + +// lasalle : 2015-04-02 Jones Lang LaSalle Incorporated +lasalle + +// lat : 2014-10-16 ECOM-LAC FederaciÃ˛n de Latinoamèrica y el Caribe para Internet y el Comercio ElectrÃ˛nico +lat + +// latino : 2015-07-30 Dish DBS Corporation +latino + +// latrobe : 2014-06-16 La Trobe University +latrobe + +// law : 2015-01-22 LW TLD Limited +law + +// lawyer : 2014-03-20 Dog Beach, LLC +lawyer + +// lds : 2014-03-20 IRI Domain Management, LLC ("Applicant") +lds + +// lease : 2014-03-06 Binky Moon, LLC +lease + +// leclerc : 2014-08-07 A.C.D. LEC Association des Centres Distributeurs Edouard Leclerc +leclerc + +// lefrak : 2015-07-16 LeFrak Organization, Inc. +lefrak + +// legal : 2014-10-16 Binky Moon, LLC +legal + +// lego : 2015-07-16 LEGO Juris A/S +lego + +// lexus : 2015-04-23 TOYOTA MOTOR CORPORATION +lexus + +// lgbt : 2014-05-08 Afilias Limited +lgbt + +// liaison : 2014-10-02 Liaison Technologies, Incorporated +liaison + +// lidl : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +lidl + +// life : 2014-02-06 Binky Moon, LLC +life + +// lifeinsurance : 2015-01-15 American Council of Life Insurers +lifeinsurance + +// lifestyle : 2014-12-11 Lifestyle Domain Holdings, Inc. +lifestyle + +// lighting : 2013-08-27 Binky Moon, LLC +lighting + +// like : 2014-12-18 Amazon Registry Services, Inc. +like + +// lilly : 2015-07-31 Eli Lilly and Company +lilly + +// limited : 2014-03-06 Binky Moon, LLC +limited + +// limo : 2013-10-17 Binky Moon, LLC +limo + +// lincoln : 2014-11-13 Ford Motor Company +lincoln + +// linde : 2014-12-04 Linde Aktiengesellschaft +linde + +// link : 2013-11-14 Uniregistry, Corp. +link + +// lipsy : 2015-06-25 Lipsy Ltd +lipsy + +// live : 2014-12-04 Dog Beach, LLC +live + +// living : 2015-07-30 Lifestyle Domain Holdings, Inc. +living + +// lixil : 2015-03-19 LIXIL Group Corporation +lixil + +// llc : 2017-12-14 Afilias Limited +llc + +// loan : 2014-11-20 dot Loan Limited +loan + +// loans : 2014-03-20 Binky Moon, LLC +loans + +// locker : 2015-06-04 Dish DBS Corporation +locker + +// locus : 2015-06-25 Locus Analytics LLC +locus + +// loft : 2015-07-30 Annco, Inc. +loft + +// lol : 2015-01-30 Uniregistry, Corp. +lol + +// london : 2013-11-14 Dot London Domains Limited +london + +// lotte : 2014-11-07 Lotte Holdings Co., Ltd. +lotte + +// lotto : 2014-04-10 Afilias Limited +lotto + +// love : 2014-12-22 Merchant Law Group LLP +love + +// lpl : 2015-07-30 LPL Holdings, Inc. +lpl + +// lplfinancial : 2015-07-30 LPL Holdings, Inc. +lplfinancial + +// ltd : 2014-09-25 Binky Moon, LLC +ltd + +// ltda : 2014-04-17 InterNetX, Corp +ltda + +// lundbeck : 2015-08-06 H. Lundbeck A/S +lundbeck + +// lupin : 2014-11-07 LUPIN LIMITED +lupin + +// luxe : 2014-01-09 Minds + Machines Group Limited +luxe + +// luxury : 2013-10-17 Luxury Partners, LLC +luxury + +// macys : 2015-07-31 Macys, Inc. +macys + +// madrid : 2014-05-01 Comunidad de Madrid +madrid + +// maif : 2014-10-02 Mutuelle Assurance Instituteur France (MAIF) +maif + +// maison : 2013-12-05 Binky Moon, LLC +maison + +// makeup : 2015-01-15 L'OrÊal +makeup + +// man : 2014-12-04 MAN SE +man + +// management : 2013-11-07 Binky Moon, LLC +management + +// mango : 2013-10-24 PUNTO FA S.L. +mango + +// map : 2016-06-09 Charleston Road Registry Inc. +map + +// market : 2014-03-06 Dog Beach, LLC +market + +// marketing : 2013-11-07 Binky Moon, LLC +marketing + +// markets : 2014-12-11 Dotmarkets Registry Limited +markets + +// marriott : 2014-10-09 Marriott Worldwide Corporation +marriott + +// marshalls : 2015-07-16 The TJX Companies, Inc. +marshalls + +// maserati : 2015-07-31 Fiat Chrysler Automobiles N.V. +maserati + +// mattel : 2015-08-06 Mattel Sites, Inc. +mattel + +// mba : 2015-04-02 Binky Moon, LLC +mba + +// mckinsey : 2015-07-31 McKinsey Holdings, Inc. +mckinsey + +// med : 2015-08-06 Medistry LLC +med + +// media : 2014-03-06 Binky Moon, LLC +media + +// meet : 2014-01-16 Charleston Road Registry Inc. +meet + +// melbourne : 2014-05-29 The Crown in right of the State of Victoria, represented by its Department of State Development, Business and Innovation +melbourne + +// meme : 2014-01-30 Charleston Road Registry Inc. +meme + +// memorial : 2014-10-16 Dog Beach, LLC +memorial + +// men : 2015-02-26 Exclusive Registry Limited +men + +// menu : 2013-09-11 Dot Menu Registry, LLC +menu + +// merckmsd : 2016-07-14 MSD Registry Holdings, Inc. +merckmsd + +// metlife : 2015-05-07 MetLife Services and Solutions, LLC +metlife + +// miami : 2013-12-19 Minds + Machines Group Limited +miami + +// microsoft : 2014-12-18 Microsoft Corporation +microsoft + +// mini : 2014-01-09 Bayerische Motoren Werke Aktiengesellschaft +mini + +// mint : 2015-07-30 Intuit Administrative Services, Inc. +mint + +// mit : 2015-07-02 Massachusetts Institute of Technology +mit + +// mitsubishi : 2015-07-23 Mitsubishi Corporation +mitsubishi + +// mlb : 2015-05-21 MLB Advanced Media DH, LLC +mlb + +// mls : 2015-04-23 The Canadian Real Estate Association +mls + +// mma : 2014-11-07 MMA IARD +mma + +// mobile : 2016-06-02 Dish DBS Corporation +mobile + +// mobily : 2014-12-18 GreenTech Consultancy Company W.L.L. +mobily + +// moda : 2013-11-07 Dog Beach, LLC +moda + +// moe : 2013-11-13 Interlink Co., Ltd. +moe + +// moi : 2014-12-18 Amazon Registry Services, Inc. +moi + +// mom : 2015-04-16 Uniregistry, Corp. +mom + +// monash : 2013-09-30 Monash University +monash + +// money : 2014-10-16 Binky Moon, LLC +money + +// monster : 2015-09-11 XYZ.COM LLC +monster + +// mopar : 2015-07-30 FCA US LLC. +mopar + +// mormon : 2013-12-05 IRI Domain Management, LLC ("Applicant") +mormon + +// mortgage : 2014-03-20 Dog Beach, LLC +mortgage + +// moscow : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +moscow + +// moto : 2015-06-04 Motorola Trademark Holdings, LLC +moto + +// motorcycles : 2014-01-09 DERMotorcycles, LLC +motorcycles + +// mov : 2014-01-30 Charleston Road Registry Inc. +mov + +// movie : 2015-02-05 Binky Moon, LLC +movie + +// movistar : 2014-10-16 TelefÃŗnica S.A. +movistar + +// msd : 2015-07-23 MSD Registry Holdings, Inc. +msd + +// mtn : 2014-12-04 MTN Dubai Limited +mtn + +// mtr : 2015-03-12 MTR Corporation Limited +mtr + +// mutual : 2015-04-02 Northwestern Mutual MU TLD Registry, LLC +mutual + +// nab : 2015-08-20 National Australia Bank Limited +nab + +// nadex : 2014-12-11 Nadex Domains, Inc. +nadex + +// nagoya : 2013-10-24 GMO Registry, Inc. +nagoya + +// nationwide : 2015-07-23 Nationwide Mutual Insurance Company +nationwide + +// natura : 2015-03-12 NATURA COSMÉTICOS S.A. +natura + +// navy : 2014-03-06 Dog Beach, LLC +navy + +// nba : 2015-07-31 NBA REGISTRY, LLC +nba + +// nec : 2015-01-08 NEC Corporation +nec + +// netbank : 2014-06-26 COMMONWEALTH BANK OF AUSTRALIA +netbank + +// netflix : 2015-06-18 Netflix, Inc. +netflix + +// network : 2013-11-14 Binky Moon, LLC +network + +// neustar : 2013-12-05 Registry Services, LLC +neustar + +// new : 2014-01-30 Charleston Road Registry Inc. +new + +// newholland : 2015-09-03 CNH Industrial N.V. +newholland + +// news : 2014-12-18 Dog Beach, LLC +news + +// next : 2015-06-18 Next plc +next + +// nextdirect : 2015-06-18 Next plc +nextdirect + +// nexus : 2014-07-24 Charleston Road Registry Inc. +nexus + +// nfl : 2015-07-23 NFL Reg Ops LLC +nfl + +// ngo : 2014-03-06 Public Interest Registry +ngo + +// nhk : 2014-02-13 Japan Broadcasting Corporation (NHK) +nhk + +// nico : 2014-12-04 DWANGO Co., Ltd. +nico + +// nike : 2015-07-23 NIKE, Inc. +nike + +// nikon : 2015-05-21 NIKON CORPORATION +nikon + +// ninja : 2013-11-07 Dog Beach, LLC +ninja + +// nissan : 2014-03-27 NISSAN MOTOR CO., LTD. +nissan + +// nissay : 2015-10-29 Nippon Life Insurance Company +nissay + +// nokia : 2015-01-08 Nokia Corporation +nokia + +// northwesternmutual : 2015-06-18 Northwestern Mutual Registry, LLC +northwesternmutual + +// norton : 2014-12-04 Symantec Corporation +norton + +// now : 2015-06-25 Amazon Registry Services, Inc. +now + +// nowruz : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +nowruz + +// nowtv : 2015-05-14 Starbucks (HK) Limited +nowtv + +// nra : 2014-05-22 NRA Holdings Company, INC. +nra + +// nrw : 2013-11-21 Minds + Machines GmbH +nrw + +// ntt : 2014-10-31 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +ntt + +// nyc : 2014-01-23 The City of New York by and through the New York City Department of Information Technology & Telecommunications +nyc + +// obi : 2014-09-25 OBI Group Holding SE & Co. KGaA +obi + +// observer : 2015-04-30 Top Level Spectrum, Inc. +observer + +// off : 2015-07-23 Johnson Shareholdings, Inc. +off + +// office : 2015-03-12 Microsoft Corporation +office + +// okinawa : 2013-12-05 BRregistry, Inc. +okinawa + +// olayan : 2015-05-14 Crescent Holding GmbH +olayan + +// olayangroup : 2015-05-14 Crescent Holding GmbH +olayangroup + +// oldnavy : 2015-07-31 The Gap, Inc. +oldnavy + +// ollo : 2015-06-04 Dish DBS Corporation +ollo + +// omega : 2015-01-08 The Swatch Group Ltd +omega + +// one : 2014-11-07 One.com A/S +one + +// ong : 2014-03-06 Public Interest Registry +ong + +// onl : 2013-09-16 I-Registry Ltd. +onl + +// online : 2015-01-15 DotOnline Inc. +online + +// onyourside : 2015-07-23 Nationwide Mutual Insurance Company +onyourside + +// ooo : 2014-01-09 INFIBEAM AVENUES LIMITED +ooo + +// open : 2015-07-31 American Express Travel Related Services Company, Inc. +open + +// oracle : 2014-06-19 Oracle Corporation +oracle + +// orange : 2015-03-12 Orange Brand Services Limited +orange + +// organic : 2014-03-27 Afilias Limited +organic + +// origins : 2015-10-01 The EstÊe Lauder Companies Inc. +origins + +// osaka : 2014-09-04 Osaka Registry Co., Ltd. +osaka + +// otsuka : 2013-10-11 Otsuka Holdings Co., Ltd. +otsuka + +// ott : 2015-06-04 Dish DBS Corporation +ott + +// ovh : 2014-01-16 MÊdiaBC +ovh + +// page : 2014-12-04 Charleston Road Registry Inc. +page + +// panasonic : 2015-07-30 Panasonic Corporation +panasonic + +// paris : 2014-01-30 City of Paris +paris + +// pars : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +pars + +// partners : 2013-12-05 Binky Moon, LLC +partners + +// parts : 2013-12-05 Binky Moon, LLC +parts + +// party : 2014-09-11 Blue Sky Registry Limited +party + +// passagens : 2015-03-05 Travel Reservations SRL +passagens + +// pay : 2015-08-27 Amazon Registry Services, Inc. +pay + +// pccw : 2015-05-14 PCCW Enterprises Limited +pccw + +// pet : 2015-05-07 Afilias Limited +pet + +// pfizer : 2015-09-11 Pfizer Inc. +pfizer + +// pharmacy : 2014-06-19 National Association of Boards of Pharmacy +pharmacy + +// phd : 2016-07-28 Charleston Road Registry Inc. +phd + +// philips : 2014-11-07 Koninklijke Philips N.V. +philips + +// phone : 2016-06-02 Dish DBS Corporation +phone + +// photo : 2013-11-14 Uniregistry, Corp. +photo + +// photography : 2013-09-20 Binky Moon, LLC +photography + +// photos : 2013-10-17 Binky Moon, LLC +photos + +// physio : 2014-05-01 PhysBiz Pty Ltd +physio + +// piaget : 2014-10-16 Richemont DNS Inc. +piaget + +// pics : 2013-11-14 Uniregistry, Corp. +pics + +// pictet : 2014-06-26 Pictet Europe S.A. +pictet + +// pictures : 2014-03-06 Binky Moon, LLC +pictures + +// pid : 2015-01-08 Top Level Spectrum, Inc. +pid + +// pin : 2014-12-18 Amazon Registry Services, Inc. +pin + +// ping : 2015-06-11 Ping Registry Provider, Inc. +ping + +// pink : 2013-10-01 Afilias Limited +pink + +// pioneer : 2015-07-16 Pioneer Corporation +pioneer + +// pizza : 2014-06-26 Binky Moon, LLC +pizza + +// place : 2014-04-24 Binky Moon, LLC +place + +// play : 2015-03-05 Charleston Road Registry Inc. +play + +// playstation : 2015-07-02 Sony Interactive Entertainment Inc. +playstation + +// plumbing : 2013-09-10 Binky Moon, LLC +plumbing + +// plus : 2015-02-05 Binky Moon, LLC +plus + +// pnc : 2015-07-02 PNC Domain Co., LLC +pnc + +// pohl : 2014-06-23 Deutsche VermÃļgensberatung Aktiengesellschaft DVAG +pohl + +// poker : 2014-07-03 Afilias Limited +poker + +// politie : 2015-08-20 Politie Nederland +politie + +// porn : 2014-10-16 ICM Registry PN LLC +porn + +// pramerica : 2015-07-30 Prudential Financial, Inc. +pramerica + +// praxi : 2013-12-05 Praxi S.p.A. +praxi + +// press : 2014-04-03 DotPress Inc. +press + +// prime : 2015-06-25 Amazon Registry Services, Inc. +prime + +// prod : 2014-01-23 Charleston Road Registry Inc. +prod + +// productions : 2013-12-05 Binky Moon, LLC +productions + +// prof : 2014-07-24 Charleston Road Registry Inc. +prof + +// progressive : 2015-07-23 Progressive Casualty Insurance Company +progressive + +// promo : 2014-12-18 Afilias Limited +promo + +// properties : 2013-12-05 Binky Moon, LLC +properties + +// property : 2014-05-22 Uniregistry, Corp. +property + +// protection : 2015-04-23 XYZ.COM LLC +protection + +// pru : 2015-07-30 Prudential Financial, Inc. +pru + +// prudential : 2015-07-30 Prudential Financial, Inc. +prudential + +// pub : 2013-12-12 Dog Beach, LLC +pub + +// pwc : 2015-10-29 PricewaterhouseCoopers LLP +pwc + +// qpon : 2013-11-14 dotCOOL, Inc. +qpon + +// quebec : 2013-12-19 PointQuÊbec Inc +quebec + +// quest : 2015-03-26 Quest ION Limited +quest + +// qvc : 2015-07-30 QVC, Inc. +qvc + +// racing : 2014-12-04 Premier Registry Limited +racing + +// radio : 2016-07-21 European Broadcasting Union (EBU) +radio + +// raid : 2015-07-23 Johnson Shareholdings, Inc. +raid + +// read : 2014-12-18 Amazon Registry Services, Inc. +read + +// realestate : 2015-09-11 dotRealEstate LLC +realestate + +// realtor : 2014-05-29 Real Estate Domains LLC +realtor + +// realty : 2015-03-19 Fegistry, LLC +realty + +// recipes : 2013-10-17 Binky Moon, LLC +recipes + +// red : 2013-11-07 Afilias Limited +red + +// redstone : 2014-10-31 Redstone Haute Couture Co., Ltd. +redstone + +// redumbrella : 2015-03-26 Travelers TLD, LLC +redumbrella + +// rehab : 2014-03-06 Dog Beach, LLC +rehab + +// reise : 2014-03-13 Binky Moon, LLC +reise + +// reisen : 2014-03-06 Binky Moon, LLC +reisen + +// reit : 2014-09-04 National Association of Real Estate Investment Trusts, Inc. +reit + +// reliance : 2015-04-02 Reliance Industries Limited +reliance + +// ren : 2013-12-12 Beijing Qianxiang Wangjing Technology Development Co., Ltd. +ren + +// rent : 2014-12-04 XYZ.COM LLC +rent + +// rentals : 2013-12-05 Binky Moon, LLC +rentals + +// repair : 2013-11-07 Binky Moon, LLC +repair + +// report : 2013-12-05 Binky Moon, LLC +report + +// republican : 2014-03-20 Dog Beach, LLC +republican + +// rest : 2013-12-19 Punto 2012 Sociedad Anonima Promotora de Inversion de Capital Variable +rest + +// restaurant : 2014-07-03 Binky Moon, LLC +restaurant + +// review : 2014-11-20 dot Review Limited +review + +// reviews : 2013-09-13 Dog Beach, LLC +reviews + +// rexroth : 2015-06-18 Robert Bosch GMBH +rexroth + +// rich : 2013-11-21 I-Registry Ltd. +rich + +// richardli : 2015-05-14 Pacific Century Asset Management (HK) Limited +richardli + +// ricoh : 2014-11-20 Ricoh Company, Ltd. +ricoh + +// rightathome : 2015-07-23 Johnson Shareholdings, Inc. +rightathome + +// ril : 2015-04-02 Reliance Industries Limited +ril + +// rio : 2014-02-27 Empresa Municipal de InformÃĄtica SA - IPLANRIO +rio + +// rip : 2014-07-10 Dog Beach, LLC +rip + +// rmit : 2015-11-19 Royal Melbourne Institute of Technology +rmit + +// rocher : 2014-12-18 Ferrero Trading Lux S.A. +rocher + +// rocks : 2013-11-14 Dog Beach, LLC +rocks + +// rodeo : 2013-12-19 Minds + Machines Group Limited +rodeo + +// rogers : 2015-08-06 Rogers Communications Canada Inc. +rogers + +// room : 2014-12-18 Amazon Registry Services, Inc. +room + +// rsvp : 2014-05-08 Charleston Road Registry Inc. +rsvp + +// rugby : 2016-12-15 World Rugby Strategic Developments Limited +rugby + +// ruhr : 2013-10-02 regiodot GmbH & Co. KG +ruhr + +// run : 2015-03-19 Binky Moon, LLC +run + +// rwe : 2015-04-02 RWE AG +rwe + +// ryukyu : 2014-01-09 BRregistry, Inc. +ryukyu + +// saarland : 2013-12-12 dotSaarland GmbH +saarland + +// safe : 2014-12-18 Amazon Registry Services, Inc. +safe + +// safety : 2015-01-08 Safety Registry Services, LLC. +safety + +// sakura : 2014-12-18 SAKURA Internet Inc. +sakura + +// sale : 2014-10-16 Dog Beach, LLC +sale + +// salon : 2014-12-11 Binky Moon, LLC +salon + +// samsclub : 2015-07-31 Wal-Mart Stores, Inc. +samsclub + +// samsung : 2014-04-03 SAMSUNG SDS CO., LTD +samsung + +// sandvik : 2014-11-13 Sandvik AB +sandvik + +// sandvikcoromant : 2014-11-07 Sandvik AB +sandvikcoromant + +// sanofi : 2014-10-09 Sanofi +sanofi + +// sap : 2014-03-27 SAP AG +sap + +// sarl : 2014-07-03 Binky Moon, LLC +sarl + +// sas : 2015-04-02 Research IP LLC +sas + +// save : 2015-06-25 Amazon Registry Services, Inc. +save + +// saxo : 2014-10-31 Saxo Bank A/S +saxo + +// sbi : 2015-03-12 STATE BANK OF INDIA +sbi + +// sbs : 2014-11-07 SPECIAL BROADCASTING SERVICE CORPORATION +sbs + +// sca : 2014-03-13 SVENSKA CELLULOSA AKTIEBOLAGET SCA (publ) +sca + +// scb : 2014-02-20 The Siam Commercial Bank Public Company Limited ("SCB") +scb + +// schaeffler : 2015-08-06 Schaeffler Technologies AG & Co. KG +schaeffler + +// schmidt : 2014-04-03 SCHMIDT GROUPE S.A.S. +schmidt + +// scholarships : 2014-04-24 Scholarships.com, LLC +scholarships + +// school : 2014-12-18 Binky Moon, LLC +school + +// schule : 2014-03-06 Binky Moon, LLC +schule + +// schwarz : 2014-09-18 Schwarz Domains und Services GmbH & Co. KG +schwarz + +// science : 2014-09-11 dot Science Limited +science + +// scjohnson : 2015-07-23 Johnson Shareholdings, Inc. +scjohnson + +// scor : 2014-10-31 SCOR SE +scor + +// scot : 2014-01-23 Dot Scot Registry Limited +scot + +// search : 2016-06-09 Charleston Road Registry Inc. +search + +// seat : 2014-05-22 SEAT, S.A. (Sociedad Unipersonal) +seat + +// secure : 2015-08-27 Amazon Registry Services, Inc. +secure + +// security : 2015-05-14 XYZ.COM LLC +security + +// seek : 2014-12-04 Seek Limited +seek + +// select : 2015-10-08 iSelect Ltd +select + +// sener : 2014-10-24 Sener Ingeniería y Sistemas, S.A. +sener + +// services : 2014-02-27 Binky Moon, LLC +services + +// ses : 2015-07-23 SES +ses + +// seven : 2015-08-06 Seven West Media Ltd +seven + +// sew : 2014-07-17 SEW-EURODRIVE GmbH & Co KG +sew + +// sex : 2014-11-13 ICM Registry SX LLC +sex + +// sexy : 2013-09-11 Uniregistry, Corp. +sexy + +// sfr : 2015-08-13 Societe Francaise du Radiotelephone - SFR +sfr + +// shangrila : 2015-09-03 Shangri‐La International Hotel Management Limited +shangrila + +// sharp : 2014-05-01 Sharp Corporation +sharp + +// shaw : 2015-04-23 Shaw Cablesystems G.P. +shaw + +// shell : 2015-07-30 Shell Information Technology International Inc +shell + +// shia : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +shia + +// shiksha : 2013-11-14 Afilias Limited +shiksha + +// shoes : 2013-10-02 Binky Moon, LLC +shoes + +// shop : 2016-04-08 GMO Registry, Inc. +shop + +// shopping : 2016-03-31 Binky Moon, LLC +shopping + +// shouji : 2015-01-08 QIHOO 360 TECHNOLOGY CO. LTD. +shouji + +// show : 2015-03-05 Binky Moon, LLC +show + +// showtime : 2015-08-06 CBS Domains Inc. +showtime + +// shriram : 2014-01-23 Shriram Capital Ltd. +shriram + +// silk : 2015-06-25 Amazon Registry Services, Inc. +silk + +// sina : 2015-03-12 Sina Corporation +sina + +// singles : 2013-08-27 Binky Moon, LLC +singles + +// site : 2015-01-15 DotSite Inc. +site + +// ski : 2015-04-09 Afilias Limited +ski + +// skin : 2015-01-15 L'OrÊal +skin + +// sky : 2014-06-19 Sky International AG +sky + +// skype : 2014-12-18 Microsoft Corporation +skype + +// sling : 2015-07-30 DISH Technologies L.L.C. +sling + +// smart : 2015-07-09 Smart Communications, Inc. (SMART) +smart + +// smile : 2014-12-18 Amazon Registry Services, Inc. +smile + +// sncf : 2015-02-19 SociÊtÊ Nationale des Chemins de fer Francais S N C F +sncf + +// soccer : 2015-03-26 Binky Moon, LLC +soccer + +// social : 2013-11-07 Dog Beach, LLC +social + +// softbank : 2015-07-02 SoftBank Group Corp. +softbank + +// software : 2014-03-20 Dog Beach, LLC +software + +// sohu : 2013-12-19 Sohu.com Limited +sohu + +// solar : 2013-11-07 Binky Moon, LLC +solar + +// solutions : 2013-11-07 Binky Moon, LLC +solutions + +// song : 2015-02-26 Amazon Registry Services, Inc. +song + +// sony : 2015-01-08 Sony Corporation +sony + +// soy : 2014-01-23 Charleston Road Registry Inc. +soy + +// space : 2014-04-03 DotSpace Inc. +space + +// sport : 2017-11-16 Global Association of International Sports Federations (GAISF) +sport + +// spot : 2015-02-26 Amazon Registry Services, Inc. +spot + +// spreadbetting : 2014-12-11 Dotspreadbetting Registry Limited +spreadbetting + +// srl : 2015-05-07 InterNetX, Corp +srl + +// srt : 2015-07-30 FCA US LLC. +srt + +// stada : 2014-11-13 STADA Arzneimittel AG +stada + +// staples : 2015-07-30 Staples, Inc. +staples + +// star : 2015-01-08 Star India Private Limited +star + +// starhub : 2015-02-05 StarHub Ltd +starhub + +// statebank : 2015-03-12 STATE BANK OF INDIA +statebank + +// statefarm : 2015-07-30 State Farm Mutual Automobile Insurance Company +statefarm + +// stc : 2014-10-09 Saudi Telecom Company +stc + +// stcgroup : 2014-10-09 Saudi Telecom Company +stcgroup + +// stockholm : 2014-12-18 Stockholms kommun +stockholm + +// storage : 2014-12-22 XYZ.COM LLC +storage + +// store : 2015-04-09 DotStore Inc. +store + +// stream : 2016-01-08 dot Stream Limited +stream + +// studio : 2015-02-11 Dog Beach, LLC +studio + +// study : 2014-12-11 OPEN UNIVERSITIES AUSTRALIA PTY LTD +study + +// style : 2014-12-04 Binky Moon, LLC +style + +// sucks : 2014-12-22 Vox Populi Registry Ltd. +sucks + +// supplies : 2013-12-19 Binky Moon, LLC +supplies + +// supply : 2013-12-19 Binky Moon, LLC +supply + +// support : 2013-10-24 Binky Moon, LLC +support + +// surf : 2014-01-09 Minds + Machines Group Limited +surf + +// surgery : 2014-03-20 Binky Moon, LLC +surgery + +// suzuki : 2014-02-20 SUZUKI MOTOR CORPORATION +suzuki + +// swatch : 2015-01-08 The Swatch Group Ltd +swatch + +// swiftcover : 2015-07-23 Swiftcover Insurance Services Limited +swiftcover + +// swiss : 2014-10-16 Swiss Confederation +swiss + +// sydney : 2014-09-18 State of New South Wales, Department of Premier and Cabinet +sydney + +// symantec : 2014-12-04 Symantec Corporation +symantec + +// systems : 2013-11-07 Binky Moon, LLC +systems + +// tab : 2014-12-04 Tabcorp Holdings Limited +tab + +// taipei : 2014-07-10 Taipei City Government +taipei + +// talk : 2015-04-09 Amazon Registry Services, Inc. +talk + +// taobao : 2015-01-15 Alibaba Group Holding Limited +taobao + +// target : 2015-07-31 Target Domain Holdings, LLC +target + +// tatamotors : 2015-03-12 Tata Motors Ltd +tatamotors + +// tatar : 2014-04-24 Limited Liability Company "Coordination Center of Regional Domain of Tatarstan Republic" +tatar + +// tattoo : 2013-08-30 Uniregistry, Corp. +tattoo + +// tax : 2014-03-20 Binky Moon, LLC +tax + +// taxi : 2015-03-19 Binky Moon, LLC +taxi + +// tci : 2014-09-12 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +tci + +// tdk : 2015-06-11 TDK Corporation +tdk + +// team : 2015-03-05 Binky Moon, LLC +team + +// tech : 2015-01-30 Personals TLD Inc. +tech + +// technology : 2013-09-13 Binky Moon, LLC +technology + +// telefonica : 2014-10-16 TelefÃŗnica S.A. +telefonica + +// temasek : 2014-08-07 Temasek Holdings (Private) Limited +temasek + +// tennis : 2014-12-04 Binky Moon, LLC +tennis + +// teva : 2015-07-02 Teva Pharmaceutical Industries Limited +teva + +// thd : 2015-04-02 Home Depot Product Authority, LLC +thd + +// theater : 2015-03-19 Binky Moon, LLC +theater + +// theatre : 2015-05-07 XYZ.COM LLC +theatre + +// tiaa : 2015-07-23 Teachers Insurance and Annuity Association of America +tiaa + +// tickets : 2015-02-05 Accent Media Limited +tickets + +// tienda : 2013-11-14 Binky Moon, LLC +tienda + +// tiffany : 2015-01-30 Tiffany and Company +tiffany + +// tips : 2013-09-20 Binky Moon, LLC +tips + +// tires : 2014-11-07 Binky Moon, LLC +tires + +// tirol : 2014-04-24 punkt Tirol GmbH +tirol + +// tjmaxx : 2015-07-16 The TJX Companies, Inc. +tjmaxx + +// tjx : 2015-07-16 The TJX Companies, Inc. +tjx + +// tkmaxx : 2015-07-16 The TJX Companies, Inc. +tkmaxx + +// tmall : 2015-01-15 Alibaba Group Holding Limited +tmall + +// today : 2013-09-20 Binky Moon, LLC +today + +// tokyo : 2013-11-13 GMO Registry, Inc. +tokyo + +// tools : 2013-11-21 Binky Moon, LLC +tools + +// top : 2014-03-20 .TOP Registry +top + +// toray : 2014-12-18 Toray Industries, Inc. +toray + +// toshiba : 2014-04-10 TOSHIBA Corporation +toshiba + +// total : 2015-08-06 Total SA +total + +// tours : 2015-01-22 Binky Moon, LLC +tours + +// town : 2014-03-06 Binky Moon, LLC +town + +// toyota : 2015-04-23 TOYOTA MOTOR CORPORATION +toyota + +// toys : 2014-03-06 Binky Moon, LLC +toys + +// trade : 2014-01-23 Elite Registry Limited +trade + +// trading : 2014-12-11 Dottrading Registry Limited +trading + +// training : 2013-11-07 Binky Moon, LLC +training + +// travel : Dog Beach, LLC +travel + +// travelchannel : 2015-07-02 Lifestyle Domain Holdings, Inc. +travelchannel + +// travelers : 2015-03-26 Travelers TLD, LLC +travelers + +// travelersinsurance : 2015-03-26 Travelers TLD, LLC +travelersinsurance + +// trust : 2014-10-16 NCC Group Inc. +trust + +// trv : 2015-03-26 Travelers TLD, LLC +trv + +// tube : 2015-06-11 Latin American Telecom LLC +tube + +// tui : 2014-07-03 TUI AG +tui + +// tunes : 2015-02-26 Amazon Registry Services, Inc. +tunes + +// tushu : 2014-12-18 Amazon Registry Services, Inc. +tushu + +// tvs : 2015-02-19 T V SUNDRAM IYENGAR & SONS LIMITED +tvs + +// ubank : 2015-08-20 National Australia Bank Limited +ubank + +// ubs : 2014-12-11 UBS AG +ubs + +// uconnect : 2015-07-30 FCA US LLC. +uconnect + +// unicom : 2015-10-15 China United Network Communications Corporation Limited +unicom + +// university : 2014-03-06 Binky Moon, LLC +university + +// uno : 2013-09-11 Dot Latin LLC +uno + +// uol : 2014-05-01 UBN INTERNET LTDA. +uol + +// ups : 2015-06-25 UPS Market Driver, Inc. +ups + +// vacations : 2013-12-05 Binky Moon, LLC +vacations + +// vana : 2014-12-11 Lifestyle Domain Holdings, Inc. +vana + +// vanguard : 2015-09-03 The Vanguard Group, Inc. +vanguard + +// vegas : 2014-01-16 Dot Vegas, Inc. +vegas + +// ventures : 2013-08-27 Binky Moon, LLC +ventures + +// verisign : 2015-08-13 VeriSign, Inc. +verisign + +// versicherung : 2014-03-20 tldbox GmbH +versicherung + +// vet : 2014-03-06 Dog Beach, LLC +vet + +// viajes : 2013-10-17 Binky Moon, LLC +viajes + +// video : 2014-10-16 Dog Beach, LLC +video + +// vig : 2015-05-14 VIENNA INSURANCE GROUP AG Wiener Versicherung Gruppe +vig + +// viking : 2015-04-02 Viking River Cruises (Bermuda) Ltd. +viking + +// villas : 2013-12-05 Binky Moon, LLC +villas + +// vin : 2015-06-18 Binky Moon, LLC +vin + +// vip : 2015-01-22 Minds + Machines Group Limited +vip + +// virgin : 2014-09-25 Virgin Enterprises Limited +virgin + +// visa : 2015-07-30 Visa Worldwide Pte. Limited +visa + +// vision : 2013-12-05 Binky Moon, LLC +vision + +// vistaprint : 2014-09-18 Vistaprint Limited +vistaprint + +// viva : 2014-11-07 Saudi Telecom Company +viva + +// vivo : 2015-07-31 Telefonica Brasil S.A. +vivo + +// vlaanderen : 2014-02-06 DNS.be vzw +vlaanderen + +// vodka : 2013-12-19 Minds + Machines Group Limited +vodka + +// volkswagen : 2015-05-14 Volkswagen Group of America Inc. +volkswagen + +// volvo : 2015-11-12 Volvo Holding Sverige Aktiebolag +volvo + +// vote : 2013-11-21 Monolith Registry LLC +vote + +// voting : 2013-11-13 Valuetainment Corp. +voting + +// voto : 2013-11-21 Monolith Registry LLC +voto + +// voyage : 2013-08-27 Binky Moon, LLC +voyage + +// vuelos : 2015-03-05 Travel Reservations SRL +vuelos + +// wales : 2014-05-08 Nominet UK +wales + +// walmart : 2015-07-31 Wal-Mart Stores, Inc. +walmart + +// walter : 2014-11-13 Sandvik AB +walter + +// wang : 2013-10-24 Zodiac Wang Limited +wang + +// wanggou : 2014-12-18 Amazon Registry Services, Inc. +wanggou + +// warman : 2015-06-18 Weir Group IP Limited +warman + +// watch : 2013-11-14 Binky Moon, LLC +watch + +// watches : 2014-12-22 Richemont DNS Inc. +watches + +// weather : 2015-01-08 International Business Machines Corporation +weather + +// weatherchannel : 2015-03-12 International Business Machines Corporation +weatherchannel + +// webcam : 2014-01-23 dot Webcam Limited +webcam + +// weber : 2015-06-04 Saint-Gobain Weber SA +weber + +// website : 2014-04-03 DotWebsite Inc. +website + +// wed : 2013-10-01 Atgron, Inc. +wed + +// wedding : 2014-04-24 Minds + Machines Group Limited +wedding + +// weibo : 2015-03-05 Sina Corporation +weibo + +// weir : 2015-01-29 Weir Group IP Limited +weir + +// whoswho : 2014-02-20 Who's Who Registry +whoswho + +// wien : 2013-10-28 punkt.wien GmbH +wien + +// wiki : 2013-11-07 Top Level Design, LLC +wiki + +// williamhill : 2014-03-13 William Hill Organization Limited +williamhill + +// win : 2014-11-20 First Registry Limited +win + +// windows : 2014-12-18 Microsoft Corporation +windows + +// wine : 2015-06-18 Binky Moon, LLC +wine + +// winners : 2015-07-16 The TJX Companies, Inc. +winners + +// wme : 2014-02-13 William Morris Endeavor Entertainment, LLC +wme + +// wolterskluwer : 2015-08-06 Wolters Kluwer N.V. +wolterskluwer + +// woodside : 2015-07-09 Woodside Petroleum Limited +woodside + +// work : 2013-12-19 Minds + Machines Group Limited +work + +// works : 2013-11-14 Binky Moon, LLC +works + +// world : 2014-06-12 Binky Moon, LLC +world + +// wow : 2015-10-08 Amazon Registry Services, Inc. +wow + +// wtc : 2013-12-19 World Trade Centers Association, Inc. +wtc + +// wtf : 2014-03-06 Binky Moon, LLC +wtf + +// xbox : 2014-12-18 Microsoft Corporation +xbox + +// xerox : 2014-10-24 Xerox DNHC LLC +xerox + +// xfinity : 2015-07-09 Comcast IP Holdings I, LLC +xfinity + +// xihuan : 2015-01-08 QIHOO 360 TECHNOLOGY CO. LTD. +xihuan + +// xin : 2014-12-11 Elegant Leader Limited +xin + +// xn--11b4c3d : 2015-01-15 VeriSign Sarl +ā¤•āĨ‰ā¤Ž + +// xn--1ck2e1b : 2015-02-26 Amazon Registry Services, Inc. +ã‚ģãƒŧãƒĢ + +// xn--1qqw23a : 2014-01-09 Guangzhou YU Wei Information Technology Co., Ltd. +äŊ›åąą + +// xn--30rr7y : 2014-06-12 Excellent First Limited +慈善 + +// xn--3bst00m : 2013-09-13 Eagle Horizon Limited +集å›ĸ + +// xn--3ds443g : 2013-09-08 TLD REGISTRY LIMITED +在įēŋ + +// xn--3oq18vl8pn36a : 2015-07-02 Volkswagen (China) Investment Co., Ltd. +大äŧ—æąŊčŊĻ + +// xn--3pxu8k : 2015-01-15 VeriSign Sarl +į‚šįœ‹ + +// xn--42c2d9a : 2015-01-15 VeriSign Sarl +ā¸„ā¸­ā¸Ą + +// xn--45q11c : 2013-11-21 Zodiac Gemini Ltd +å…ĢåĻ + +// xn--4gbrim : 2013-10-04 Suhub Electronic Establishment +Ų…ŲˆŲ‚Øš + +// xn--55qw42g : 2013-11-08 China Organizational Name Administration Center +å…Ŧį›Š + +// xn--55qx5d : 2013-11-14 China Internet Network Information Center (CNNIC) +å…Ŧ司 + +// xn--5su34j936bgsg : 2015-09-03 Shangri‐La International Hotel Management Limited +éĻ™æ ŧ里拉 + +// xn--5tzm5g : 2014-12-22 Global Website TLD Asia Limited +įŊ‘įĢ™ + +// xn--6frz82g : 2013-09-23 Afilias Limited +į§ģ动 + +// xn--6qq986b3xl : 2013-09-13 Tycoon Treasure Limited +我įˆąäŊ  + +// xn--80adxhks : 2013-12-19 Foundation for Assistance for Internet Technologies and Infrastructure Development (FAITID) +ĐŧĐžŅĐēва + +// xn--80aqecdr1a : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +ĐēĐ°Ņ‚ĐžĐģиĐē + +// xn--80asehdb : 2013-07-14 CORE Association +ĐžĐŊĐģĐ°ĐšĐŊ + +// xn--80aswg : 2013-07-14 CORE Association +ŅĐ°ĐšŅ‚ + +// xn--8y0a063a : 2015-03-26 China United Network Communications Corporation Limited +联通 + +// xn--9dbq2a : 2015-01-15 VeriSign Sarl +קום + +// xn--9et52u : 2014-06-12 RISE VICTORY LIMITED +æ—ļ尚 + +// xn--9krt00a : 2015-03-12 Sina Corporation +垎博 + +// xn--b4w605ferd : 2014-08-07 Temasek Holdings (Private) Limited +æˇĄéŠŦ锡 + +// xn--bck1b9a5dre4c : 2015-02-26 Amazon Registry Services, Inc. +ãƒ•ã‚Ąãƒƒã‚ˇãƒ§ãƒŗ + +// xn--c1avg : 2013-11-14 Public Interest Registry +ĐžŅ€Đŗ + +// xn--c2br7g : 2015-01-15 VeriSign Sarl +ā¤¨āĨ‡ā¤Ÿ + +// xn--cck2b3b : 2015-02-26 Amazon Registry Services, Inc. +゚トã‚ĸ + +// xn--cg4bki : 2013-09-27 SAMSUNG SDS CO., LTD +ė‚ŧė„ą + +// xn--czr694b : 2014-01-16 Dot Trademark TLD Holding Company Limited +商标 + +// xn--czrs0t : 2013-12-19 Binky Moon, LLC +商åē— + +// xn--czru2d : 2013-11-21 Zodiac Aquarius Limited +商城 + +// xn--d1acj3b : 2013-11-20 The Foundation for Network Initiatives “The Smart Internet” +Đ´ĐĩŅ‚и + +// xn--eckvdtc9d : 2014-12-18 Amazon Registry Services, Inc. +ポイãƒŗト + +// xn--efvy88h : 2014-08-22 Guangzhou YU Wei Information Technology Co., Ltd. +新é—ģ + +// xn--estv75g : 2015-02-19 Industrial and Commercial Bank of China Limited +åˇĨ行 + +// xn--fct429k : 2015-04-09 Amazon Registry Services, Inc. +åŽļé›ģ + +// xn--fhbei : 2015-01-15 VeriSign Sarl +ŲƒŲˆŲ… + +// xn--fiq228c5hs : 2013-09-08 TLD REGISTRY LIMITED +中文įŊ‘ + +// xn--fiq64b : 2013-10-14 CITIC Group Corporation +中äŋĄ + +// xn--fjq720a : 2014-05-22 Binky Moon, LLC +å¨ąäš + +// xn--flw351e : 2014-07-31 Charleston Road Registry Inc. +č°ˇæ­Œ + +// xn--fzys8d69uvgm : 2015-05-14 PCCW Enterprises Limited +é›ģ訊į›ˆį§‘ + +// xn--g2xx48c : 2015-01-30 Minds + Machines Group Limited +č´­į‰Š + +// xn--gckr3f0f : 2015-02-26 Amazon Registry Services, Inc. +クナã‚Ļド + +// xn--gk3at1e : 2015-10-08 Amazon Registry Services, Inc. +通販 + +// xn--hxt814e : 2014-05-15 Zodiac Taurus Limited +įŊ‘åē— + +// xn--i1b6b1a6a2e : 2013-11-14 Public Interest Registry +ā¤¸ā¤‚ā¤—ā¤ ā¤¨ + +// xn--imr513n : 2014-12-11 Dot Trademark TLD Holding Company Limited +餐厅 + +// xn--io0a7i : 2013-11-14 China Internet Network Information Center (CNNIC) +įŊ‘įģœ + +// xn--j1aef : 2015-01-15 VeriSign Sarl +ĐēĐžĐŧ + +// xn--jlq61u9w7b : 2015-01-08 Nokia Corporation +č¯ēåŸēäēš + +// xn--jvr189m : 2015-02-26 Amazon Registry Services, Inc. +éŖŸå“ + +// xn--kcrx77d1x4a : 2014-11-07 Koninklijke Philips N.V. +éŖžåˆŠæĩĻ + +// xn--kpu716f : 2014-12-22 Richemont DNS Inc. +æ‰‹čĄ¨ + +// xn--kput3i : 2014-02-13 Beijing RITT-Net Technology Development Co., Ltd +手æœē + +// xn--mgba3a3ejt : 2014-11-20 Aramco Services Company +Ø§ØąØ§Ų…ŲƒŲˆ + +// xn--mgba7c0bbn0a : 2015-05-14 Crescent Holding GmbH +اŲ„ØšŲ„ŲŠØ§Ų† + +// xn--mgbaakc7dvf : 2015-09-03 Emirates Telecommunications Corporation (trading as Etisalat) +اØĒØĩاŲ„اØĒ + +// xn--mgbab2bd : 2013-10-31 CORE Association +Ø¨Ø§Ø˛Ø§Øą + +// xn--mgbb9fbpob : 2014-12-18 GreenTech Consultancy Company W.L.L. +Ų…ŲˆØ¨Ø§ŲŠŲ„ŲŠ + +// xn--mgbca7dzdo : 2015-07-30 Abu Dhabi Systems and Information Centre +ابŲˆØ¸Ø¨ŲŠ + +// xn--mgbi4ecexp : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +ŲƒØ§ØĢŲˆŲ„ŲŠŲƒ + +// xn--mgbt3dhd : 2014-09-04 Asia Green IT System Bilgisayar San. ve Tic. Ltd. Sti. +Ų‡Ų…ØąØ§Ų‡ + +// xn--mk1bu44c : 2015-01-15 VeriSign Sarl +닷ėģ´ + +// xn--mxtq1m : 2014-03-06 Net-Chinese Co., Ltd. +æ”ŋåēœ + +// xn--ngbc5azd : 2013-07-13 International Domain Registry Pty. Ltd. +شبŲƒØŠ + +// xn--ngbe9e0a : 2014-12-04 Kuwait Finance House +بŲŠØĒŲƒ + +// xn--ngbrx : 2015-11-12 League of Arab States +ØšØąØ¨ + +// xn--nqv7f : 2013-11-14 Public Interest Registry +æœē构 + +// xn--nqv7fs00ema : 2013-11-14 Public Interest Registry +įģ„įģ‡æœē构 + +// xn--nyqy26a : 2014-11-07 Stable Tone Limited +åĨåēˇ + +// xn--otu796d : 2017-08-06 Dot Trademark TLD Holding Company Limited +æ‹›č˜ + +// xn--p1acf : 2013-12-12 Rusnames Limited +Ņ€ŅƒŅ + +// xn--pbt977c : 2014-12-22 Richemont DNS Inc. +į åŽ + +// xn--pssy2u : 2015-01-15 VeriSign Sarl +大æ‹ŋ + +// xn--q9jyb4c : 2013-09-17 Charleston Road Registry Inc. +ãŋんãĒ + +// xn--qcka1pmc : 2014-07-31 Charleston Road Registry Inc. +グãƒŧグãƒĢ + +// xn--rhqv96g : 2013-09-11 Stable Tone Limited +世į•Œ + +// xn--rovu88b : 2015-02-26 Amazon Registry Services, Inc. +書įą + +// xn--ses554g : 2014-01-16 KNET Co., Ltd. +įŊ‘址 + +// xn--t60b56a : 2015-01-15 VeriSign Sarl +닷넷 + +// xn--tckwe : 2015-01-15 VeriSign Sarl +ã‚ŗム + +// xn--tiq49xqyj : 2015-10-21 Pontificium Consilium de Comunicationibus Socialibus (PCCS) (Pontifical Council for Social Communication) +夊ä¸ģ教 + +// xn--unup4y : 2013-07-14 Binky Moon, LLC +游戏 + +// xn--vermgensberater-ctb : 2014-06-23 Deutsche VermÃļgensberatung Aktiengesellschaft DVAG +vermÃļgensberater + +// xn--vermgensberatung-pwb : 2014-06-23 Deutsche VermÃļgensberatung Aktiengesellschaft DVAG +vermÃļgensberatung + +// xn--vhquv : 2013-08-27 Binky Moon, LLC +äŧä¸š + +// xn--vuq861b : 2014-10-16 Beijing Tele-info Network Technology Co., Ltd. +äŋĄæ¯ + +// xn--w4r85el8fhu5dnra : 2015-04-30 Kerry Trading Co. Limited +嘉里大酒åē— + +// xn--w4rs40l : 2015-07-30 Kerry Trading Co. Limited +嘉里 + +// xn--xhq521b : 2013-11-14 Guangzhou YU Wei Information Technology Co., Ltd. +åšŋ东 + +// xn--zfr164b : 2013-11-08 China Organizational Name Administration Center +æ”ŋåŠĄ + +// xyz : 2013-12-05 XYZ.COM LLC +xyz + +// yachts : 2014-01-09 DERYachts, LLC +yachts + +// yahoo : 2015-04-02 Yahoo! Domain Services Inc. +yahoo + +// yamaxun : 2014-12-18 Amazon Registry Services, Inc. +yamaxun + +// yandex : 2014-04-10 YANDEX, LLC +yandex + +// yodobashi : 2014-11-20 YODOBASHI CAMERA CO.,LTD. +yodobashi + +// yoga : 2014-05-29 Minds + Machines Group Limited +yoga + +// yokohama : 2013-12-12 GMO Registry, Inc. +yokohama + +// you : 2015-04-09 Amazon Registry Services, Inc. +you + +// youtube : 2014-05-01 Charleston Road Registry Inc. +youtube + +// yun : 2015-01-08 QIHOO 360 TECHNOLOGY CO. LTD. +yun + +// zappos : 2015-06-25 Amazon Registry Services, Inc. +zappos + +// zara : 2014-11-07 Industria de DiseÃąo Textil, S.A. (INDITEX, S.A.) +zara + +// zero : 2014-12-18 Amazon Registry Services, Inc. +zero + +// zip : 2014-05-08 Charleston Road Registry Inc. +zip + +// zone : 2013-11-14 Binky Moon, LLC +zone + +// zuerich : 2014-11-07 Kanton ZÃŧrich (Canton of Zurich) +zuerich + + +// ===END ICANN DOMAINS=== +// ===BEGIN PRIVATE DOMAINS=== +// (Note: these are in alphabetical order by company name) + +// 1GB LLC : https://www.1gb.ua/ +// Submitted by 1GB LLC +cc.ua +inf.ua +ltd.ua + +// Agnat sp. z o.o. : https://domena.pl +// Submitted by Przemyslaw Plewa +beep.pl + +// alboto.ca : http://alboto.ca +// Submitted by Anton Avramov +barsy.ca + +// Alces Software Ltd : http://alces-software.com +// Submitted by Mark J. Titorenko +*.compute.estate +*.alces.network + +// alwaysdata : https://www.alwaysdata.com +// Submitted by Cyril +alwaysdata.net + +// Amazon CloudFront : https://aws.amazon.com/cloudfront/ +// Submitted by Donavan Miller +cloudfront.net + +// Amazon Elastic Compute Cloud : https://aws.amazon.com/ec2/ +// Submitted by Luke Wells +*.compute.amazonaws.com +*.compute-1.amazonaws.com +*.compute.amazonaws.com.cn +us-east-1.amazonaws.com + +// Amazon Elastic Beanstalk : https://aws.amazon.com/elasticbeanstalk/ +// Submitted by Luke Wells +cn-north-1.eb.amazonaws.com.cn +cn-northwest-1.eb.amazonaws.com.cn +elasticbeanstalk.com +ap-northeast-1.elasticbeanstalk.com +ap-northeast-2.elasticbeanstalk.com +ap-northeast-3.elasticbeanstalk.com +ap-south-1.elasticbeanstalk.com +ap-southeast-1.elasticbeanstalk.com +ap-southeast-2.elasticbeanstalk.com +ca-central-1.elasticbeanstalk.com +eu-central-1.elasticbeanstalk.com +eu-west-1.elasticbeanstalk.com +eu-west-2.elasticbeanstalk.com +eu-west-3.elasticbeanstalk.com +sa-east-1.elasticbeanstalk.com +us-east-1.elasticbeanstalk.com +us-east-2.elasticbeanstalk.com +us-gov-west-1.elasticbeanstalk.com +us-west-1.elasticbeanstalk.com +us-west-2.elasticbeanstalk.com + +// Amazon Elastic Load Balancing : https://aws.amazon.com/elasticloadbalancing/ +// Submitted by Luke Wells +*.elb.amazonaws.com +*.elb.amazonaws.com.cn + +// Amazon S3 : https://aws.amazon.com/s3/ +// Submitted by Luke Wells +s3.amazonaws.com +s3-ap-northeast-1.amazonaws.com +s3-ap-northeast-2.amazonaws.com +s3-ap-south-1.amazonaws.com +s3-ap-southeast-1.amazonaws.com +s3-ap-southeast-2.amazonaws.com +s3-ca-central-1.amazonaws.com +s3-eu-central-1.amazonaws.com +s3-eu-west-1.amazonaws.com +s3-eu-west-2.amazonaws.com +s3-eu-west-3.amazonaws.com +s3-external-1.amazonaws.com +s3-fips-us-gov-west-1.amazonaws.com +s3-sa-east-1.amazonaws.com +s3-us-gov-west-1.amazonaws.com +s3-us-east-2.amazonaws.com +s3-us-west-1.amazonaws.com +s3-us-west-2.amazonaws.com +s3.ap-northeast-2.amazonaws.com +s3.ap-south-1.amazonaws.com +s3.cn-north-1.amazonaws.com.cn +s3.ca-central-1.amazonaws.com +s3.eu-central-1.amazonaws.com +s3.eu-west-2.amazonaws.com +s3.eu-west-3.amazonaws.com +s3.us-east-2.amazonaws.com +s3.dualstack.ap-northeast-1.amazonaws.com +s3.dualstack.ap-northeast-2.amazonaws.com +s3.dualstack.ap-south-1.amazonaws.com +s3.dualstack.ap-southeast-1.amazonaws.com +s3.dualstack.ap-southeast-2.amazonaws.com +s3.dualstack.ca-central-1.amazonaws.com +s3.dualstack.eu-central-1.amazonaws.com +s3.dualstack.eu-west-1.amazonaws.com +s3.dualstack.eu-west-2.amazonaws.com +s3.dualstack.eu-west-3.amazonaws.com +s3.dualstack.sa-east-1.amazonaws.com +s3.dualstack.us-east-1.amazonaws.com +s3.dualstack.us-east-2.amazonaws.com +s3-website-us-east-1.amazonaws.com +s3-website-us-west-1.amazonaws.com +s3-website-us-west-2.amazonaws.com +s3-website-ap-northeast-1.amazonaws.com +s3-website-ap-southeast-1.amazonaws.com +s3-website-ap-southeast-2.amazonaws.com +s3-website-eu-west-1.amazonaws.com +s3-website-sa-east-1.amazonaws.com +s3-website.ap-northeast-2.amazonaws.com +s3-website.ap-south-1.amazonaws.com +s3-website.ca-central-1.amazonaws.com +s3-website.eu-central-1.amazonaws.com +s3-website.eu-west-2.amazonaws.com +s3-website.eu-west-3.amazonaws.com +s3-website.us-east-2.amazonaws.com + +// Amune : https://amune.org/ +// Submitted by Team Amune +t3l3p0rt.net +tele.amune.org + +// Apigee : https://apigee.com/ +// Submitted by Apigee Security Team +apigee.io + +// Aptible : https://www.aptible.com/ +// Submitted by Thomas Orozco +on-aptible.com + +// AsociaciÃŗn Amigos de la InformÃĄtica "Euskalamiga" : http://encounter.eus/ +// Submitted by Hector Martin +user.party.eus + +// Association potager.org : https://potager.org/ +// Submitted by Lunar +pimienta.org +poivron.org +potager.org +sweetpepper.org + +// ASUSTOR Inc. : http://www.asustor.com +// Submitted by Vincent Tseng +myasustor.com + +// Automattic Inc. : https://automattic.com/ +// Submitted by Alex Concha +go-vip.co +go-vip.net +wpcomstaging.com + +// AVM : https://avm.de +// Submitted by Andreas Weise +myfritz.net + +// AW AdvisorWebsites.com Software Inc : https://advisorwebsites.com +// Submitted by James Kennedy +*.awdev.ca +*.advisor.ws + +// b-data GmbH : https://www.b-data.io +// Submitted by Olivier Benz +b-data.io + +// backplane : https://www.backplane.io +// Submitted by Anthony Voutas +backplaneapp.io + +// Balena : https://www.balena.io +// Submitted by Petros Angelatos +balena-devices.com + +// Banzai Cloud +// Submitted by Gabor Kozma +app.banzaicloud.io + +// BetaInABox +// Submitted by Adrian +betainabox.com + +// BinaryLane : http://www.binarylane.com +// Submitted by Nathan O'Sullivan +bnr.la + +// Blackbaud, Inc. : https://www.blackbaud.com +// Submitted by Paul Crowder +blackbaudcdn.net + +// Boomla : https://boomla.com +// Submitted by Tibor Halter +boomla.net + +// Boxfuse : https://boxfuse.com +// Submitted by Axel Fontaine +boxfuse.io + +// bplaced : https://www.bplaced.net/ +// Submitted by Miroslav Bozic +square7.ch +bplaced.com +bplaced.de +square7.de +bplaced.net +square7.net + +// BrowserSafetyMark +// Submitted by Dave Tharp +browsersafetymark.io + +// Bytemark Hosting : https://www.bytemark.co.uk +// Submitted by Paul Cammish +uk0.bigv.io +dh.bytemark.co.uk +vm.bytemark.co.uk + +// callidomus : https://www.callidomus.com/ +// Submitted by Marcus Popp +mycd.eu + +// Carrd : https://carrd.co +// Submitted by AJ +carrd.co +crd.co +uwu.ai + +// CentralNic : http://www.centralnic.com/names/domains +// Submitted by registry +ae.org +ar.com +br.com +cn.com +com.de +com.se +de.com +eu.com +gb.com +gb.net +hu.com +hu.net +jp.net +jpn.com +kr.com +mex.com +no.com +qc.com +ru.com +sa.com +se.net +uk.com +uk.net +us.com +uy.com +za.bz +za.com + +// Africa.com Web Solutions Ltd : https://registry.africa.com +// Submitted by Gavin Brown +africa.com + +// iDOT Services Limited : http://www.domain.gr.com +// Submitted by Gavin Brown +gr.com + +// Radix FZC : http://domains.in.net +// Submitted by Gavin Brown +in.net + +// US REGISTRY LLC : http://us.org +// Submitted by Gavin Brown +us.org + +// co.com Registry, LLC : https://registry.co.com +// Submitted by Gavin Brown +co.com + +// c.la : http://www.c.la/ +c.la + +// certmgr.org : https://certmgr.org +// Submitted by B. Blechschmidt +certmgr.org + +// Citrix : https://citrix.com +// Submitted by Alex Stoddard +xenapponazure.com + +// Civilized Discourse Construction Kit, Inc. : https://www.discourse.org/ +// Submitted by Rishabh Nambiar +discourse.group + +// ClearVox : http://www.clearvox.nl/ +// Submitted by Leon Rowland +virtueeldomein.nl + +// Clever Cloud : https://www.clever-cloud.com/ +// Submitted by Quentin Adam +cleverapps.io + +// Clerk : https://www.clerk.dev +// Submitted by Colin Sidoti +*.lcl.dev +*.stg.dev + +// Cloud66 : https://www.cloud66.com/ +// Submitted by Khash Sajadi +c66.me +cloud66.ws +cloud66.zone + +// CloudAccess.net : https://www.cloudaccess.net/ +// Submitted by Pawel Panek +jdevcloud.com +wpdevcloud.com +cloudaccess.host +freesite.host +cloudaccess.net + +// cloudControl : https://www.cloudcontrol.com/ +// Submitted by Tobias Wilken +cloudcontrolled.com +cloudcontrolapp.com + +// Cloudera, Inc. : https://www.cloudera.com/ +// Submitted by Philip Langdale +cloudera.site + +// Cloudflare, Inc. : https://www.cloudflare.com/ +// Submitted by Jake Riesterer +trycloudflare.com +workers.dev + +// Clovyr : https://clovyr.io +// Submitted by Patrick Nielsen +wnext.app + +// co.ca : http://registry.co.ca/ +co.ca + +// Co & Co : https://co-co.nl/ +// Submitted by Govert Versluis +*.otap.co + +// i-registry s.r.o. : http://www.i-registry.cz/ +// Submitted by Martin Semrad +co.cz + +// CDN77.com : http://www.cdn77.com +// Submitted by Jan Krpes +c.cdn77.org +cdn77-ssl.net +r.cdn77.net +rsc.cdn77.org +ssl.origin.cdn77-secure.org + +// Cloud DNS Ltd : http://www.cloudns.net +// Submitted by Aleksander Hristov +cloudns.asia +cloudns.biz +cloudns.club +cloudns.cc +cloudns.eu +cloudns.in +cloudns.info +cloudns.org +cloudns.pro +cloudns.pw +cloudns.us + +// Cloudeity Inc : https://cloudeity.com +// Submitted by Stefan Dimitrov +cloudeity.net + +// CNPY : https://cnpy.gdn +// Submitted by Angelo Gladding +cnpy.gdn + +// CoDNS B.V. +co.nl +co.no + +// Combell.com : https://www.combell.com +// Submitted by Thomas Wouters +webhosting.be +hosting-cluster.nl + +// COSIMO GmbH : http://www.cosimo.de +// Submitted by Rene Marticke +dyn.cosidns.de +dynamisches-dns.de +dnsupdater.de +internet-dns.de +l-o-g-i-n.de +dynamic-dns.info +feste-ip.net +knx-server.net +static-access.net + +// Craynic, s.r.o. : http://www.craynic.com/ +// Submitted by Ales Krajnik +realm.cz + +// Cryptonomic : https://cryptonomic.net/ +// Submitted by Andrew Cady +*.cryptonomic.net + +// Cupcake : https://cupcake.io/ +// Submitted by Jonathan Rudenberg +cupcake.is + +// cyon GmbH : https://www.cyon.ch/ +// Submitted by Dominic Luechinger +cyon.link +cyon.site + +// Daplie, Inc : https://daplie.com +// Submitted by AJ ONeal +daplie.me +localhost.daplie.me + +// Datto, Inc. : https://www.datto.com/ +// Submitted by Philipp Heckel +dattolocal.com +dattorelay.com +dattoweb.com +mydatto.com +dattolocal.net +mydatto.net + +// Dansk.net : http://www.dansk.net/ +// Submitted by Anani Voule +biz.dk +co.dk +firm.dk +reg.dk +store.dk + +// dapps.earth : https://dapps.earth/ +// Submitted by Daniil Burdakov +*.dapps.earth +*.bzz.dapps.earth + +// Debian : https://www.debian.org/ +// Submitted by Peter Palfrader / Debian Sysadmin Team +debian.net + +// deSEC : https://desec.io/ +// Submitted by Peter Thomassen +dedyn.io + +// DNShome : https://www.dnshome.de/ +// Submitted by Norbert Auler +dnshome.de + +// DotArai : https://www.dotarai.com/ +// Submitted by Atsadawat Netcharadsang +online.th +shop.th + +// DrayTek Corp. : https://www.draytek.com/ +// Submitted by Paul Fang +drayddns.com + +// DreamHost : http://www.dreamhost.com/ +// Submitted by Andrew Farmer +dreamhosters.com + +// Drobo : http://www.drobo.com/ +// Submitted by Ricardo Padilha +mydrobo.com + +// Drud Holdings, LLC. : https://www.drud.com/ +// Submitted by Kevin Bridges +drud.io +drud.us + +// DuckDNS : http://www.duckdns.org/ +// Submitted by Richard Harper +duckdns.org + +// dy.fi : http://dy.fi/ +// Submitted by Heikki Hannikainen +dy.fi +tunk.org + +// DynDNS.com : http://www.dyndns.com/services/dns/dyndns/ +dyndns-at-home.com +dyndns-at-work.com +dyndns-blog.com +dyndns-free.com +dyndns-home.com +dyndns-ip.com +dyndns-mail.com +dyndns-office.com +dyndns-pics.com +dyndns-remote.com +dyndns-server.com +dyndns-web.com +dyndns-wiki.com +dyndns-work.com +dyndns.biz +dyndns.info +dyndns.org +dyndns.tv +at-band-camp.net +ath.cx +barrel-of-knowledge.info +barrell-of-knowledge.info +better-than.tv +blogdns.com +blogdns.net +blogdns.org +blogsite.org +boldlygoingnowhere.org +broke-it.net +buyshouses.net +cechire.com +dnsalias.com +dnsalias.net +dnsalias.org +dnsdojo.com +dnsdojo.net +dnsdojo.org +does-it.net +doesntexist.com +doesntexist.org +dontexist.com +dontexist.net +dontexist.org +doomdns.com +doomdns.org +dvrdns.org +dyn-o-saur.com +dynalias.com +dynalias.net +dynalias.org +dynathome.net +dyndns.ws +endofinternet.net +endofinternet.org +endoftheinternet.org +est-a-la-maison.com +est-a-la-masion.com +est-le-patron.com +est-mon-blogueur.com +for-better.biz +for-more.biz +for-our.info +for-some.biz +for-the.biz +forgot.her.name +forgot.his.name +from-ak.com +from-al.com +from-ar.com +from-az.net +from-ca.com +from-co.net +from-ct.com +from-dc.com +from-de.com +from-fl.com +from-ga.com +from-hi.com +from-ia.com +from-id.com +from-il.com +from-in.com +from-ks.com +from-ky.com +from-la.net +from-ma.com +from-md.com +from-me.org +from-mi.com +from-mn.com +from-mo.com +from-ms.com +from-mt.com +from-nc.com +from-nd.com +from-ne.com +from-nh.com +from-nj.com +from-nm.com +from-nv.com +from-ny.net +from-oh.com +from-ok.com +from-or.com +from-pa.com +from-pr.com +from-ri.com +from-sc.com +from-sd.com +from-tn.com +from-tx.com +from-ut.com +from-va.com +from-vt.com +from-wa.com +from-wi.com +from-wv.com +from-wy.com +ftpaccess.cc +fuettertdasnetz.de +game-host.org +game-server.cc +getmyip.com +gets-it.net +go.dyndns.org +gotdns.com +gotdns.org +groks-the.info +groks-this.info +ham-radio-op.net +here-for-more.info +hobby-site.com +hobby-site.org +home.dyndns.org +homedns.org +homeftp.net +homeftp.org +homeip.net +homelinux.com +homelinux.net +homelinux.org +homeunix.com +homeunix.net +homeunix.org +iamallama.com +in-the-band.net +is-a-anarchist.com +is-a-blogger.com +is-a-bookkeeper.com +is-a-bruinsfan.org +is-a-bulls-fan.com +is-a-candidate.org +is-a-caterer.com +is-a-celticsfan.org +is-a-chef.com +is-a-chef.net +is-a-chef.org +is-a-conservative.com +is-a-cpa.com +is-a-cubicle-slave.com +is-a-democrat.com +is-a-designer.com +is-a-doctor.com +is-a-financialadvisor.com +is-a-geek.com +is-a-geek.net +is-a-geek.org +is-a-green.com +is-a-guru.com +is-a-hard-worker.com +is-a-hunter.com +is-a-knight.org +is-a-landscaper.com +is-a-lawyer.com +is-a-liberal.com +is-a-libertarian.com +is-a-linux-user.org +is-a-llama.com +is-a-musician.com +is-a-nascarfan.com +is-a-nurse.com +is-a-painter.com +is-a-patsfan.org +is-a-personaltrainer.com +is-a-photographer.com +is-a-player.com +is-a-republican.com +is-a-rockstar.com +is-a-socialist.com +is-a-soxfan.org +is-a-student.com +is-a-teacher.com +is-a-techie.com +is-a-therapist.com +is-an-accountant.com +is-an-actor.com +is-an-actress.com +is-an-anarchist.com +is-an-artist.com +is-an-engineer.com +is-an-entertainer.com +is-by.us +is-certified.com +is-found.org +is-gone.com +is-into-anime.com +is-into-cars.com +is-into-cartoons.com +is-into-games.com +is-leet.com +is-lost.org +is-not-certified.com +is-saved.org +is-slick.com +is-uberleet.com +is-very-bad.org +is-very-evil.org +is-very-good.org +is-very-nice.org +is-very-sweet.org +is-with-theband.com +isa-geek.com +isa-geek.net +isa-geek.org +isa-hockeynut.com +issmarterthanyou.com +isteingeek.de +istmein.de +kicks-ass.net +kicks-ass.org +knowsitall.info +land-4-sale.us +lebtimnetz.de +leitungsen.de +likes-pie.com +likescandy.com +merseine.nu +mine.nu +misconfused.org +mypets.ws +myphotos.cc +neat-url.com +office-on-the.net +on-the-web.tv +podzone.net +podzone.org +readmyblog.org +saves-the-whales.com +scrapper-site.net +scrapping.cc +selfip.biz +selfip.com +selfip.info +selfip.net +selfip.org +sells-for-less.com +sells-for-u.com +sells-it.net +sellsyourhome.org +servebbs.com +servebbs.net +servebbs.org +serveftp.net +serveftp.org +servegame.org +shacknet.nu +simple-url.com +space-to-rent.com +stuff-4-sale.org +stuff-4-sale.us +teaches-yoga.com +thruhere.net +traeumtgerade.de +webhop.biz +webhop.info +webhop.net +webhop.org +worse-than.tv +writesthisblog.com + +// ddnss.de : https://www.ddnss.de/ +// Submitted by Robert Niedziela +ddnss.de +dyn.ddnss.de +dyndns.ddnss.de +dyndns1.de +dyn-ip24.de +home-webserver.de +dyn.home-webserver.de +myhome-server.de +ddnss.org + +// Definima : http://www.definima.com/ +// Submitted by Maxence Bitterli +definima.net +definima.io + +// dnstrace.pro : https://dnstrace.pro/ +// Submitted by Chris Partridge +bci.dnstrace.pro + +// Dynu.com : https://www.dynu.com/ +// Submitted by Sue Ye +ddnsfree.com +ddnsgeek.com +giize.com +gleeze.com +kozow.com +loseyourip.com +ooguy.com +theworkpc.com +casacam.net +dynu.net +accesscam.org +camdvr.org +freeddns.org +mywire.org +webredirect.org +myddns.rocks +blogsite.xyz + +// dynv6 : https://dynv6.com +// Submitted by Dominik Menke +dynv6.net + +// E4YOU spol. s.r.o. : https://e4you.cz/ +// Submitted by Vladimir Dudr +e4.cz + +// Enalean SAS: https://www.enalean.com +// Submitted by Thomas Cottier +mytuleap.com + +// ECG Robotics, Inc: https://ecgrobotics.org +// Submitted by +onred.one +staging.onred.one + +// Enonic : http://enonic.com/ +// Submitted by Erik Kaareng-Sunde +enonic.io +customer.enonic.io + +// EU.org https://eu.org/ +// Submitted by Pierre Beyssac +eu.org +al.eu.org +asso.eu.org +at.eu.org +au.eu.org +be.eu.org +bg.eu.org +ca.eu.org +cd.eu.org +ch.eu.org +cn.eu.org +cy.eu.org +cz.eu.org +de.eu.org +dk.eu.org +edu.eu.org +ee.eu.org +es.eu.org +fi.eu.org +fr.eu.org +gr.eu.org +hr.eu.org +hu.eu.org +ie.eu.org +il.eu.org +in.eu.org +int.eu.org +is.eu.org +it.eu.org +jp.eu.org +kr.eu.org +lt.eu.org +lu.eu.org +lv.eu.org +mc.eu.org +me.eu.org +mk.eu.org +mt.eu.org +my.eu.org +net.eu.org +ng.eu.org +nl.eu.org +no.eu.org +nz.eu.org +paris.eu.org +pl.eu.org +pt.eu.org +q-a.eu.org +ro.eu.org +ru.eu.org +se.eu.org +si.eu.org +sk.eu.org +tr.eu.org +uk.eu.org +us.eu.org + +// Evennode : http://www.evennode.com/ +// Submitted by Michal Kralik +eu-1.evennode.com +eu-2.evennode.com +eu-3.evennode.com +eu-4.evennode.com +us-1.evennode.com +us-2.evennode.com +us-3.evennode.com +us-4.evennode.com + +// eDirect Corp. : https://hosting.url.com.tw/ +// Submitted by C.S. chang +twmail.cc +twmail.net +twmail.org +mymailer.com.tw +url.tw + +// Facebook, Inc. +// Submitted by Peter Ruibal +apps.fbsbx.com + +// FAITID : https://faitid.org/ +// Submitted by Maxim Alzoba +// https://www.flexireg.net/stat_info +ru.net +adygeya.ru +bashkiria.ru +bir.ru +cbg.ru +com.ru +dagestan.ru +grozny.ru +kalmykia.ru +kustanai.ru +marine.ru +mordovia.ru +msk.ru +mytis.ru +nalchik.ru +nov.ru +pyatigorsk.ru +spb.ru +vladikavkaz.ru +vladimir.ru +abkhazia.su +adygeya.su +aktyubinsk.su +arkhangelsk.su +armenia.su +ashgabad.su +azerbaijan.su +balashov.su +bashkiria.su +bryansk.su +bukhara.su +chimkent.su +dagestan.su +east-kazakhstan.su +exnet.su +georgia.su +grozny.su +ivanovo.su +jambyl.su +kalmykia.su +kaluga.su +karacol.su +karaganda.su +karelia.su +khakassia.su +krasnodar.su +kurgan.su +kustanai.su +lenug.su +mangyshlak.su +mordovia.su +msk.su +murmansk.su +nalchik.su +navoi.su +north-kazakhstan.su +nov.su +obninsk.su +penza.su +pokrovsk.su +sochi.su +spb.su +tashkent.su +termez.su +togliatti.su +troitsk.su +tselinograd.su +tula.su +tuva.su +vladikavkaz.su +vladimir.su +vologda.su + +// Fancy Bits, LLC : http://getchannels.com +// Submitted by Aman Gupta +channelsdvr.net + +// Fastly Inc. : http://www.fastly.com/ +// Submitted by Fastly Security +fastly-terrarium.com +fastlylb.net +map.fastlylb.net +freetls.fastly.net +map.fastly.net +a.prod.fastly.net +global.prod.fastly.net +a.ssl.fastly.net +b.ssl.fastly.net +global.ssl.fastly.net + +// FASTVPS EESTI OU : https://fastvps.ru/ +// Submitted by Likhachev Vasiliy +fastpanel.direct +fastvps-server.com + +// Featherhead : https://featherhead.xyz/ +// Submitted by Simon Menke +fhapp.xyz + +// Fedora : https://fedoraproject.org/ +// submitted by Patrick Uiterwijk +fedorainfracloud.org +fedorapeople.org +cloud.fedoraproject.org +app.os.fedoraproject.org +app.os.stg.fedoraproject.org + +// Fermax : https://fermax.com/ +// submitted by Koen Van Isterdael +mydobiss.com + +// Filegear Inc. : https://www.filegear.com +// Submitted by Jason Zhu +filegear.me +filegear-au.me +filegear-de.me +filegear-gb.me +filegear-ie.me +filegear-jp.me +filegear-sg.me + +// Firebase, Inc. +// Submitted by Chris Raynor +firebaseapp.com + +// Flynn : https://flynn.io +// Submitted by Jonathan Rudenberg +flynnhub.com +flynnhosting.net + +// Freebox : http://www.freebox.fr +// Submitted by Romain Fliedel +freebox-os.com +freeboxos.com +fbx-os.fr +fbxos.fr +freebox-os.fr +freeboxos.fr + +// freedesktop.org : https://www.freedesktop.org +// Submitted by Daniel Stone +freedesktop.org + +// Futureweb OG : http://www.futureweb.at +// Submitted by Andreas Schnederle-Wagner +*.futurecms.at +*.ex.futurecms.at +*.in.futurecms.at +futurehosting.at +futuremailing.at +*.ex.ortsinfo.at +*.kunden.ortsinfo.at +*.statics.cloud + +// GDS : https://www.gov.uk/service-manual/operations/operating-servicegovuk-subdomains +// Submitted by David Illsley +service.gov.uk + +// Gehirn Inc. : https://www.gehirn.co.jp/ +// Submitted by Kohei YOSHIDA +gehirn.ne.jp +usercontent.jp + +// Gentlent, Limited : https://www.gentlent.com +// Submitted by Tom Klein +lab.ms + +// GitHub, Inc. +// Submitted by Patrick Toomey +github.io +githubusercontent.com + +// GitLab, Inc. +// Submitted by Alex Hanselka +gitlab.io + +// Glitch, Inc : https://glitch.com +// Submitted by Mads Hartmann +glitch.me + +// GOV.UK Platform as a Service : https://www.cloud.service.gov.uk/ +// Submitted by Tom Whitwell +cloudapps.digital +london.cloudapps.digital + +// UKHomeOffice : https://www.gov.uk/government/organisations/home-office +// Submitted by Jon Shanks +homeoffice.gov.uk + +// GlobeHosting, Inc. +// Submitted by Zoltan Egresi +ro.im +shop.ro + +// GoIP DNS Services : http://www.goip.de +// Submitted by Christian Poulter +goip.de + +// Google, Inc. +// Submitted by Eduardo Vela +run.app +a.run.app +web.app +*.0emm.com +appspot.com +blogspot.ae +blogspot.al +blogspot.am +blogspot.ba +blogspot.be +blogspot.bg +blogspot.bj +blogspot.ca +blogspot.cf +blogspot.ch +blogspot.cl +blogspot.co.at +blogspot.co.id +blogspot.co.il +blogspot.co.ke +blogspot.co.nz +blogspot.co.uk +blogspot.co.za +blogspot.com +blogspot.com.ar +blogspot.com.au +blogspot.com.br +blogspot.com.by +blogspot.com.co +blogspot.com.cy +blogspot.com.ee +blogspot.com.eg +blogspot.com.es +blogspot.com.mt +blogspot.com.ng +blogspot.com.tr +blogspot.com.uy +blogspot.cv +blogspot.cz +blogspot.de +blogspot.dk +blogspot.fi +blogspot.fr +blogspot.gr +blogspot.hk +blogspot.hr +blogspot.hu +blogspot.ie +blogspot.in +blogspot.is +blogspot.it +blogspot.jp +blogspot.kr +blogspot.li +blogspot.lt +blogspot.lu +blogspot.md +blogspot.mk +blogspot.mr +blogspot.mx +blogspot.my +blogspot.nl +blogspot.no +blogspot.pe +blogspot.pt +blogspot.qa +blogspot.re +blogspot.ro +blogspot.rs +blogspot.ru +blogspot.se +blogspot.sg +blogspot.si +blogspot.sk +blogspot.sn +blogspot.td +blogspot.tw +blogspot.ug +blogspot.vn +cloudfunctions.net +cloud.goog +codespot.com +googleapis.com +googlecode.com +pagespeedmobilizer.com +publishproxy.com +withgoogle.com +withyoutube.com + +// Hakaran group: http://hakaran.cz +// Submited by Arseniy Sokolov +fin.ci +free.hr +caa.li +ua.rs +conf.se + +// Handshake : https://handshake.org +// Submitted by Mike Damm +hs.zone +hs.run + +// Hashbang : https://hashbang.sh +hashbang.sh + +// Hasura : https://hasura.io +// Submitted by Shahidh K Muhammed +hasura.app +hasura-app.io + +// Hepforge : https://www.hepforge.org +// Submitted by David Grellscheid +hepforge.org + +// Heroku : https://www.heroku.com/ +// Submitted by Tom Maher +herokuapp.com +herokussl.com + +// Hibernating Rhinos +// Submitted by Oren Eini +myravendb.com +ravendb.community +ravendb.me +development.run +ravendb.run + +// HOSTBIP REGISTRY : https://www.hostbip.com/ +// Submitted by Atanunu Igbunuroghene +bpl.biz +orx.biz +ng.city +ng.ink +biz.gl +col.ng +gen.ng +ltd.ng +sch.so + +// Häkkinen.fi +// Submitted by Eero Häkkinen +häkkinen.fi + +// Ici la Lune : http://www.icilalune.com/ +// Submitted by Simon Morvan +*.moonscale.io +moonscale.net + +// iki.fi +// Submitted by Hannu Aronsson +iki.fi + +// Individual Network Berlin e.V. : https://www.in-berlin.de/ +// Submitted by Christian Seitz +dyn-berlin.de +in-berlin.de +in-brb.de +in-butter.de +in-dsl.de +in-dsl.net +in-dsl.org +in-vpn.de +in-vpn.net +in-vpn.org + +// info.at : http://www.info.at/ +biz.at +info.at + +// info.cx : http://info.cx +// Submitted by Jacob Slater +info.cx + +// Interlegis : http://www.interlegis.leg.br +// Submitted by Gabriel Ferreira +ac.leg.br +al.leg.br +am.leg.br +ap.leg.br +ba.leg.br +ce.leg.br +df.leg.br +es.leg.br +go.leg.br +ma.leg.br +mg.leg.br +ms.leg.br +mt.leg.br +pa.leg.br +pb.leg.br +pe.leg.br +pi.leg.br +pr.leg.br +rj.leg.br +rn.leg.br +ro.leg.br +rr.leg.br +rs.leg.br +sc.leg.br +se.leg.br +sp.leg.br +to.leg.br + +// intermetrics GmbH : https://pixolino.com/ +// Submitted by Wolfgang Schwarz +pixolino.com + +// IPiFony Systems, Inc. : https://www.ipifony.com/ +// Submitted by Matthew Hardeman +ipifony.net + +// IServ GmbH : https://iserv.eu +// Submitted by Kim-Alexander Brodowski +mein-iserv.de +test-iserv.de +iserv.dev + +// I-O DATA DEVICE, INC. : http://www.iodata.com/ +// Submitted by Yuji Minagawa +iobb.net + +// Jino : https://www.jino.ru +// Submitted by Sergey Ulyashin +myjino.ru +*.hosting.myjino.ru +*.landing.myjino.ru +*.spectrum.myjino.ru +*.vps.myjino.ru + +// Joyent : https://www.joyent.com/ +// Submitted by Brian Bennett +*.triton.zone +*.cns.joyent.com + +// JS.ORG : http://dns.js.org +// Submitted by Stefan Keim +js.org + +// KaasHosting : http://www.kaashosting.nl/ +// Submitted by Wouter Bakker +kaas.gg +khplay.nl + +// Keyweb AG : https://www.keyweb.de +// Submitted by Martin Dannehl +keymachine.de + +// KingHost : https://king.host +// Submitted by Felipe Keller Braz +kinghost.net +uni5.net + +// KnightPoint Systems, LLC : http://www.knightpoint.com/ +// Submitted by Roy Keene +knightpoint.systems + +// .KRD : http://nic.krd/data/krd/Registration%20Policy.pdf +co.krd +edu.krd + +// LCube - Professional hosting e.K. : https://www.lcube-webhosting.de +// Submitted by Lars Laehn +git-repos.de +lcube-server.de +svn-repos.de + +// Leadpages : https://www.leadpages.net +// Submitted by Greg Dallavalle +leadpages.co +lpages.co +lpusercontent.com + +// Lifetime Hosting : https://Lifetime.Hosting/ +// Submitted by Mike Fillator +co.business +co.education +co.events +co.financial +co.network +co.place +co.technology + +// Lightmaker Property Manager, Inc. : https://app.lmpm.com/ +// Submitted by Greg Holland +app.lmpm.com + +// Linki Tools UG : https://linki.tools +// Submitted by Paulo Matos +linkitools.space + +// linkyard ldt: https://www.linkyard.ch/ +// Submitted by Mario Siegenthaler +linkyard.cloud +linkyard-cloud.ch + +// Linode : https://linode.com +// Submitted by +members.linode.com +nodebalancer.linode.com + +// LiquidNet Ltd : http://www.liquidnetlimited.com/ +// Submitted by Victor Velchev +we.bs + +// Log'in Line : https://www.loginline.com/ +// Submitted by RÊmi Mach +loginline.app +loginline.dev +loginline.io +loginline.services +loginline.site + +// LubMAN UMCS Sp. z o.o : https://lubman.pl/ +// Submitted by Ireneusz Maliszewski +krasnik.pl +leczna.pl +lubartow.pl +lublin.pl +poniatowa.pl +swidnik.pl + +// Lug.org.uk : https://lug.org.uk +// Submitted by Jon Spriggs +uklugs.org +glug.org.uk +lug.org.uk +lugs.org.uk + +// Lukanet Ltd : https://lukanet.com +// Submitted by Anton Avramov +barsy.bg +barsy.co.uk +barsyonline.co.uk +barsycenter.com +barsyonline.com +barsy.club +barsy.de +barsy.eu +barsy.in +barsy.info +barsy.io +barsy.me +barsy.menu +barsy.mobi +barsy.net +barsy.online +barsy.org +barsy.pro +barsy.pub +barsy.shop +barsy.site +barsy.support +barsy.uk + +// Magento Commerce +// Submitted by Damien Tournoud +*.magentosite.cloud + +// May First - People Link : https://mayfirst.org/ +// Submitted by Jamie McClelland +mayfirst.info +mayfirst.org + +// Mail.Ru Group : https://hb.cldmail.ru +// Submitted by Ilya Zaretskiy +hb.cldmail.ru + +// Memset hosting : https://www.memset.com +// Submitted by Tom Whitwell +miniserver.com +memset.net + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Zdeněk Å ustr +cloud.metacentrum.cz +custom.metacentrum.cz + +// MetaCentrum, CESNET z.s.p.o. : https://www.metacentrum.cz/en/ +// Submitted by Radim Janča +flt.cloud.muni.cz +usr.cloud.muni.cz + +// Meteor Development Group : https://www.meteor.com/hosting +// Submitted by Pierre Carrier +meteorapp.com +eu.meteorapp.com + +// Michau Enterprises Limited : http://www.co.pl/ +co.pl + +// Microsoft Corporation : http://microsoft.com +// Submitted by Justin Luk +azurecontainer.io +azurewebsites.net +azure-mobile.net +cloudapp.net + +// Mozilla Corporation : https://mozilla.com +// Submitted by Ben Francis +mozilla-iot.org + +// Mozilla Foundation : https://mozilla.org/ +// Submitted by glob +bmoattachments.org + +// MSK-IX : https://www.msk-ix.ru/ +// Submitted by Khannanov Roman +net.ru +org.ru +pp.ru + +// Nabu Casa : https://www.nabucasa.com +// Submitted by Paulus Schoutsen +ui.nabu.casa + +// Names.of.London : https://names.of.london/ +// Submitted by James Stevens or +pony.club +of.fashion +on.fashion +of.football +in.london +of.london +for.men +and.mom +for.mom +for.one +for.sale +of.work +to.work + +// NCTU.ME : https://nctu.me/ +// Submitted by Tocknicsu +nctu.me + +// Netlify : https://www.netlify.com +// Submitted by Jessica Parsons +bitballoon.com +netlify.com + +// Neustar Inc. +// Submitted by Trung Tran +4u.com + +// ngrok : https://ngrok.com/ +// Submitted by Alan Shreve +ngrok.io + +// Nimbus Hosting Ltd. : https://www.nimbushosting.co.uk/ +// Submitted by Nicholas Ford +nh-serv.co.uk + +// NFSN, Inc. : https://www.NearlyFreeSpeech.NET/ +// Submitted by Jeff Wheelhouse +nfshost.com + +// Now-DNS : https://now-dns.com +// Submitted by Steve Russell +dnsking.ch +mypi.co +n4t.co +001www.com +ddnslive.com +myiphost.com +forumz.info +16-b.it +32-b.it +64-b.it +soundcast.me +tcp4.me +dnsup.net +hicam.net +now-dns.net +ownip.net +vpndns.net +dynserv.org +now-dns.org +x443.pw +now-dns.top +ntdll.top +freeddns.us +crafting.xyz +zapto.xyz + +// nsupdate.info : https://www.nsupdate.info/ +// Submitted by Thomas Waldmann +nsupdate.info +nerdpol.ovh + +// No-IP.com : https://noip.com/ +// Submitted by Deven Reza +blogsyte.com +brasilia.me +cable-modem.org +ciscofreak.com +collegefan.org +couchpotatofries.org +damnserver.com +ddns.me +ditchyourip.com +dnsfor.me +dnsiskinky.com +dvrcam.info +dynns.com +eating-organic.net +fantasyleague.cc +geekgalaxy.com +golffan.us +health-carereform.com +homesecuritymac.com +homesecuritypc.com +hopto.me +ilovecollege.info +loginto.me +mlbfan.org +mmafan.biz +myactivedirectory.com +mydissent.net +myeffect.net +mymediapc.net +mypsx.net +mysecuritycamera.com +mysecuritycamera.net +mysecuritycamera.org +net-freaks.com +nflfan.org +nhlfan.net +no-ip.ca +no-ip.co.uk +no-ip.net +noip.us +onthewifi.com +pgafan.net +point2this.com +pointto.us +privatizehealthinsurance.net +quicksytes.com +read-books.org +securitytactics.com +serveexchange.com +servehumour.com +servep2p.com +servesarcasm.com +stufftoread.com +ufcfan.org +unusualperson.com +workisboring.com +3utilities.com +bounceme.net +ddns.net +ddnsking.com +gotdns.ch +hopto.org +myftp.biz +myftp.org +myvnc.com +no-ip.biz +no-ip.info +no-ip.org +noip.me +redirectme.net +servebeer.com +serveblog.net +servecounterstrike.com +serveftp.com +servegame.com +servehalflife.com +servehttp.com +serveirc.com +serveminecraft.net +servemp3.com +servepics.com +servequake.com +sytes.net +webhop.me +zapto.org + +// NodeArt : https://nodeart.io +// Submitted by Konstantin Nosov +stage.nodeart.io + +// Nodum B.V. : https://nodum.io/ +// Submitted by Wietse Wind +nodum.co +nodum.io + +// Nucleos Inc. : https://nucleos.com +// Submitted by Piotr Zduniak +pcloud.host + +// NYC.mn : http://www.information.nyc.mn +// Submitted by Matthew Brown +nyc.mn + +// NymNom : https://nymnom.com/ +// Submitted by Dave McCormack +nom.ae +nom.af +nom.ai +nom.al +nym.by +nym.bz +nom.cl +nom.gd +nom.ge +nom.gl +nym.gr +nom.gt +nym.gy +nom.hn +nym.ie +nom.im +nom.ke +nym.kz +nym.la +nym.lc +nom.li +nym.li +nym.lt +nym.lu +nym.me +nom.mk +nym.mn +nym.mx +nom.nu +nym.nz +nym.pe +nym.pt +nom.pw +nom.qa +nym.ro +nom.rs +nom.si +nym.sk +nom.st +nym.su +nym.sx +nom.tj +nym.tw +nom.ug +nom.uy +nom.vc +nom.vg + +// Octopodal Solutions, LLC. : https://ulterius.io/ +// Submitted by Andrew Sampson +cya.gg + +// Omnibond Systems, LLC. : https://www.omnibond.com +// Submitted by Cole Estep +cloudycluster.net + +// One Fold Media : http://www.onefoldmedia.com/ +// Submitted by Eddie Jones +nid.io + +// OpenCraft GmbH : http://opencraft.com/ +// Submitted by Sven Marnach +opencraft.hosting + +// Opera Software, A.S.A. +// Submitted by Yngve Pettersen +operaunite.com + +// OutSystems +// Submitted by Duarte Santos +outsystemscloud.com + +// OwnProvider GmbH: http://www.ownprovider.com +// Submitted by Jan Moennich +ownprovider.com +own.pm + +// OX : http://www.ox.rs +// Submitted by Adam Grand +ox.rs + +// oy.lc +// Submitted by Charly Coste +oy.lc + +// Pagefog : https://pagefog.com/ +// Submitted by Derek Myers +pgfog.com + +// Pagefront : https://www.pagefronthq.com/ +// Submitted by Jason Kriss +pagefrontapp.com + +// .pl domains (grandfathered) +art.pl +gliwice.pl +krakow.pl +poznan.pl +wroc.pl +zakopane.pl + +// Pantheon Systems, Inc. : https://pantheon.io/ +// Submitted by Gary Dylina +pantheonsite.io +gotpantheon.com + +// Peplink | Pepwave : http://peplink.com/ +// Submitted by Steve Leung +mypep.link + +// Planet-Work : https://www.planet-work.com/ +// Submitted by FrÊdÊric VANNIÈRE +on-web.fr + +// Platform.sh : https://platform.sh +// Submitted by Nikola Kotur +*.platform.sh +*.platformsh.site + +// Port53 : https://port53.io/ +// Submitted by Maximilian Schieder +dyn53.io + +// Positive Codes Technology Company : http://co.bn/faq.html +// Submitted by Zulfais +co.bn + +// prgmr.com : https://prgmr.com/ +// Submitted by Sarah Newman +xen.prgmr.com + +// priv.at : http://www.nic.priv.at/ +// Submitted by registry +priv.at + +// privacytools.io : https://www.privacytools.io/ +// Submitted by Jonah Aragon +prvcy.page + +// Protocol Labs : https://protocol.ai/ +// Submitted by Michael Burns +*.dweb.link + +// Protonet GmbH : http://protonet.io +// Submitted by Martin Meier +protonet.io + +// Publication Presse Communication SARL : https://ppcom.fr +// Submitted by Yaacov Akiba Slama +chirurgiens-dentistes-en-france.fr +byen.site + +// pubtls.org: https://www.pubtls.org +// Submitted by Kor Nielsen +pubtls.org + +// Qualifio : https://qualifio.com/ +// Submitted by Xavier De Cock +qualifioapp.com + +// Redstar Consultants : https://www.redstarconsultants.com/ +// Submitted by Jons Slemmer +instantcloud.cn + +// Russian Academy of Sciences +// Submitted by Tech Support +ras.ru + +// QA2 +// Submitted by Daniel Dent (https://www.danieldent.com/) +qa2.com + +// QNAP System Inc : https://www.qnap.com +// Submitted by Nick Chang +dev-myqnapcloud.com +alpha-myqnapcloud.com +myqnapcloud.com + +// Quip : https://quip.com +// Submitted by Patrick Linehan +*.quipelements.com + +// Qutheory LLC : http://qutheory.io +// Submitted by Jonas Schwartz +vapor.cloud +vaporcloud.io + +// Rackmaze LLC : https://www.rackmaze.com +// Submitted by Kirill Pertsev +rackmaze.com +rackmaze.net + +// Rancher Labs, Inc : https://rancher.com +// Submitted by Vincent Fiduccia +*.on-rancher.cloud +*.on-rio.io + +// Read The Docs, Inc : https://www.readthedocs.org +// Submitted by David Fischer +readthedocs.io + +// Red Hat, Inc. OpenShift : https://openshift.redhat.com/ +// Submitted by Tim Kramer +rhcloud.com + +// Render : https://render.com +// Submitted by Anurag Goel +app.render.com +onrender.com + +// Repl.it : https://repl.it +// Submitted by Mason Clayton +repl.co +repl.run + +// Resin.io : https://resin.io +// Submitted by Tim Perry +resindevice.io +devices.resinstaging.io + +// RethinkDB : https://www.rethinkdb.com/ +// Submitted by Chris Kastorff +hzc.io + +// Revitalised Limited : http://www.revitalised.co.uk +// Submitted by Jack Price +wellbeingzone.eu +ptplus.fit +wellbeingzone.co.uk + +// Rochester Institute of Technology : http://www.rit.edu/ +// Submitted by Jennifer Herting +git-pages.rit.edu + +// Sandstorm Development Group, Inc. : https://sandcats.io/ +// Submitted by Asheesh Laroia +sandcats.io + +// SBE network solutions GmbH : https://www.sbe.de/ +// Submitted by Norman Meilick +logoip.de +logoip.com + +// schokokeks.org GbR : https://schokokeks.org/ +// Submitted by Hanno BÃļck +schokokeks.net + +// Scry Security : http://www.scrysec.com +// Submitted by Shante Adam +scrysec.com + +// Securepoint GmbH : https://www.securepoint.de +// Submitted by Erik Anders +firewall-gateway.com +firewall-gateway.de +my-gateway.de +my-router.de +spdns.de +spdns.eu +firewall-gateway.net +my-firewall.org +myfirewall.org +spdns.org + +// SensioLabs, SAS : https://sensiolabs.com/ +// Submitted by Fabien Potencier +*.s5y.io +*.sensiosite.cloud + +// Service Online LLC : http://drs.ua/ +// Submitted by Serhii Bulakh +biz.ua +co.ua +pp.ua + +// ShiftEdit : https://shiftedit.net/ +// Submitted by Adam Jimenez +shiftedit.io + +// Shopblocks : http://www.shopblocks.com/ +// Submitted by Alex Bowers +myshopblocks.com + +// Shopit : https://www.shopitcommerce.com/ +// Submitted by Craig McMahon +shopitsite.com + +// Siemens Mobility GmbH +// Submitted by Oliver Graebner +mo-siemens.io + +// SinaAppEngine : http://sae.sina.com.cn/ +// Submitted by SinaAppEngine +1kapp.com +appchizi.com +applinzi.com +sinaapp.com +vipsinaapp.com + +// Siteleaf : https://www.siteleaf.com/ +// Submitted by Skylar Challand +siteleaf.net + +// Skyhat : http://www.skyhat.io +// Submitted by Shante Adam +bounty-full.com +alpha.bounty-full.com +beta.bounty-full.com + +// Stackhero : https://www.stackhero.io +// Submitted by Adrien Gillon +stackhero-network.com + +// staticland : https://static.land +// Submitted by Seth Vincent +static.land +dev.static.land +sites.static.land + +// SourceLair PC : https://www.sourcelair.com +// Submitted by Antonis Kalipetis +apps.lair.io +*.stolos.io + +// SpaceKit : https://www.spacekit.io/ +// Submitted by Reza Akhavan +spacekit.io + +// SpeedPartner GmbH: https://www.speedpartner.de/ +// Submitted by Stefan Neufeind +customer.speedpartner.de + +// Standard Library : https://stdlib.com +// Submitted by Jacob Lee +api.stdlib.com + +// Storj Labs Inc. : https://storj.io/ +// Submitted by Philip Hutchins +storj.farm + +// Studenten Net Twente : http://www.snt.utwente.nl/ +// Submitted by Silke Hofstra +utwente.io + +// Student-Run Computing Facility : https://www.srcf.net/ +// Submitted by Edwin Balani +soc.srcf.net +user.srcf.net + +// Sub 6 Limited: http://www.sub6.com +// Submitted by Dan Miller +temp-dns.com + +// Swisscom Application Cloud: https://developer.swisscom.com +// Submitted by Matthias.Winzeler +applicationcloud.io +scapp.io + +// Syncloud : https://syncloud.org +// Submitted by Boris Rybalkin +syncloud.it + +// Synology, Inc. : https://www.synology.com/ +// Submitted by Rony Weng +diskstation.me +dscloud.biz +dscloud.me +dscloud.mobi +dsmynas.com +dsmynas.net +dsmynas.org +familyds.com +familyds.net +familyds.org +i234.me +myds.me +synology.me +vpnplus.to + +// TAIFUN Software AG : http://taifun-software.de +// Submitted by Bjoern Henke +taifun-dns.de + +// TASK geographical domains (www.task.gda.pl/uslugi/dns) +gda.pl +gdansk.pl +gdynia.pl +med.pl +sopot.pl + +// Teckids e.V. : https://www.teckids.org +// Submitted by Dominik George +edugit.org + +// Telebit : https://telebit.cloud +// Submitted by AJ ONeal +telebit.app +telebit.io +*.telebit.xyz + +// The Gwiddle Foundation : https://gwiddlefoundation.org.uk +// Submitted by Joshua Bayfield +gwiddle.co.uk + +// Thingdust AG : https://thingdust.com/ +// Submitted by Adrian Imboden +thingdustdata.com +cust.dev.thingdust.io +cust.disrec.thingdust.io +cust.prod.thingdust.io +cust.testing.thingdust.io + +// Tlon.io : https://tlon.io +// Submitted by Mark Staarink +arvo.network +azimuth.network + +// TownNews.com : http://www.townnews.com +// Submitted by Dustin Ward +bloxcms.com +townnews-staging.com + +// TrafficPlex GmbH : https://www.trafficplex.de/ +// Submitted by Phillipp RÃļll +12hp.at +2ix.at +4lima.at +lima-city.at +12hp.ch +2ix.ch +4lima.ch +lima-city.ch +trafficplex.cloud +de.cool +12hp.de +2ix.de +4lima.de +lima-city.de +1337.pictures +clan.rip +lima-city.rocks +webspace.rocks +lima.zone + +// TransIP : https://www.transip.nl +// Submitted by Rory Breuk +*.transurl.be +*.transurl.eu +*.transurl.nl + +// TuxFamily : http://tuxfamily.org +// Submitted by TuxFamily administrators +tuxfamily.org + +// TwoDNS : https://www.twodns.de/ +// Submitted by TwoDNS-Support +dd-dns.de +diskstation.eu +diskstation.org +dray-dns.de +draydns.de +dyn-vpn.de +dynvpn.de +mein-vigor.de +my-vigor.de +my-wan.de +syno-ds.de +synology-diskstation.de +synology-ds.de + +// Uberspace : https://uberspace.de +// Submitted by Moritz Werner +uber.space +*.uberspace.de + +// UDR Limited : http://www.udr.hk.com +// Submitted by registry +hk.com +hk.org +ltd.hk +inc.hk + +// United Gameserver GmbH : https://united-gameserver.de +// Submitted by Stefan Schwarz +virtualuser.de +virtual-user.de + +// .US +// Submitted by Ed Moore +lib.de.us + +// VeryPositive SIA : http://very.lv +// Submitted by Danko Aleksejevs +2038.io + +// Viprinet Europe GmbH : http://www.viprinet.com +// Submitted by Simon Kissel +router.management + +// Virtual-Info : https://www.virtual-info.info/ +// Submitted by Adnan RIHAN +v-info.info + +// Voorloper.com: https://voorloper.com +// Submitted by Nathan van Bakel +voorloper.cloud + +// Waffle Computer Inc., Ltd. : https://docs.waffleinfo.com +// Submitted by Masayuki Note +wafflecell.com + +// WeDeploy by Liferay, Inc. : https://www.wedeploy.com +// Submitted by Henrique Vicente +wedeploy.io +wedeploy.me +wedeploy.sh + +// Western Digital Technologies, Inc : https://www.wdc.com +// Submitted by Jung Jin +remotewd.com + +// Wikimedia Labs : https://wikitech.wikimedia.org +// Submitted by Yuvi Panda +wmflabs.org + +// XenonCloud GbR: https://xenoncloud.net +// Submitted by Julian Uphoff +half.host + +// XnBay Technology : http://www.xnbay.com/ +// Submitted by XnBay Developer +xnbay.com +u2.xnbay.com +u2-local.xnbay.com + +// XS4ALL Internet bv : https://www.xs4all.nl/ +// Submitted by Daniel Mostertman +cistron.nl +demon.nl +xs4all.space + +// YesCourse Pty Ltd : https://yescourse.com +// Submitted by Atul Bhouraskar +official.academy + +// Yola : https://www.yola.com/ +// Submitted by Stefano Rivera +yolasite.com + +// Yombo : https://yombo.net +// Submitted by Mitch Schwenk +ybo.faith +yombo.me +homelink.one +ybo.party +ybo.review +ybo.science +ybo.trade + +// Yunohost : https://yunohost.org +// Submitted by Valentin Grimaud +nohost.me +noho.st + +// ZaNiC : http://www.za.net/ +// Submitted by registry +za.net +za.org + +// Zeit, Inc. : https://zeit.domains/ +// Submitted by Olli Vanhoja +now.sh + +// Zine EOOD : https://zine.bg/ +// Submitted by Martin Angelov +bss.design + +// Zitcom A/S : https://www.zitcom.dk +// Submitted by Emil Stahl +basicserver.io +virtualserver.io +site.builder.nu +enterprisecloud.nu + +// Zone.id : https://zone.id/ +// Submitted by Su Hendro +zone.id + +// ===END PRIVATE DOMAINS=== diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/domain_name.gemspec b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/domain_name.gemspec new file mode 100644 index 0000000..7ac4c82 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/domain_name.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'domain_name/version' + +Gem::Specification.new do |gem| + gem.name = "domain_name" + gem.version = DomainName::VERSION + gem.authors = ["Akinori MUSHA"] + gem.email = ["knu@idaemons.org"] + gem.description = <<-'EOS' +This is a Domain Name manipulation library for Ruby. + +It can also be used for cookie domain validation based on the Public +Suffix List. + EOS + gem.summary = %q{Domain Name manipulation library for Ruby} + gem.homepage = "https://github.com/knu/ruby-domain_name" + gem.licenses = ["BSD-2-Clause", "BSD-3-Clause", "MPL-2.0"] + + gem.files = `git ls-files`.split($/) + gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } + gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) + gem.require_paths = ["lib"] + + gem.extra_rdoc_files = [ + "LICENSE.txt", + "README.md" + ] + + gem.add_runtime_dependency("unf", ["< 1.0.0", ">= 0.0.5"]) + gem.add_development_dependency("test-unit", "~> 2.5.5") + gem.add_development_dependency("bundler", [">= 1.2.0"]) + gem.add_development_dependency("rake", [">= 0.9.2.2", *("< 11" if RUBY_VERSION < "1.9")]) + gem.add_development_dependency("rdoc", [">= 2.4.2"]) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name.rb new file mode 100644 index 0000000..739570b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name.rb @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# +# domain_name.rb - Domain Name manipulation library for Ruby +# +# Copyright (C) 2011-2017 Akinori MUSHA, All rights reserved. +# + +require 'domain_name/version' +require 'domain_name/punycode' +require 'domain_name/etld_data' +require 'unf' +require 'ipaddr' + +# Represents a domain name ready for extracting its registered domain +# and TLD. +class DomainName + # The full host name normalized, ASCII-ized and downcased using the + # Unicode NFC rules and the Punycode algorithm. If initialized with + # an IP address, the string representation of the IP address + # suitable for opening a connection to. + attr_reader :hostname + + # The Unicode representation of the #hostname property. + # + # :attr_reader: hostname_idn + + # The least "universally original" domain part of this domain name. + # For example, "example.co.uk" for "www.sub.example.co.uk". This + # may be nil if the hostname does not have one, like when it is an + # IP address, an effective TLD or higher itself, or of a + # non-canonical domain. + attr_reader :domain + + # The Unicode representation of the #domain property. + # + # :attr_reader: domain_idn + + # The TLD part of this domain name. For example, if the hostname is + # "www.sub.example.co.uk", the TLD part is "uk". This property is + # nil only if +ipaddr?+ is true. This may be nil if the hostname + # does not have one, like when it is an IP address or of a + # non-canonical domain. + attr_reader :tld + + # The Unicode representation of the #tld property. + # + # :attr_reader: tld_idn + + # Returns an IPAddr object if this is an IP address. + attr_reader :ipaddr + + # Returns true if this is an IP address, such as "192.168.0.1" and + # "[::1]". + def ipaddr? + @ipaddr ? true : false + end + + # Returns a host name representation suitable for use in the host + # name part of a URI. A host name, an IPv4 address, or a IPv6 + # address enclosed in square brackets. + attr_reader :uri_host + + # Returns true if this domain name has a canonical TLD. + def canonical_tld? + @canonical_tld_p + end + + # Returns true if this domain name has a canonical registered + # domain. + def canonical? + @canonical_tld_p && (@domain ? true : false) + end + + DOT = '.'.freeze # :nodoc: + + # Parses _hostname_ into a DomainName object. An IP address is also + # accepted. An IPv6 address may be enclosed in square brackets. + def initialize(hostname) + hostname.is_a?(String) or + (hostname.respond_to?(:to_str) && (hostname = hostname.to_str).is_a?(String)) or + raise TypeError, "#{hostname.class} is not a String" + if hostname.start_with?(DOT) + raise ArgumentError, "domain name must not start with a dot: #{hostname}" + end + case hostname + when /\A([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\z/ + @ipaddr = IPAddr.new($1) + @uri_host = @hostname = @ipaddr.to_s + @domain = @tld = nil + return + when /\A([0-9A-Fa-f:]*:[0-9A-Fa-f:]*:[0-9A-Fa-f:]*)\z/, + /\A\[([0-9A-Fa-f:]*:[0-9A-Fa-f:]*:[0-9A-Fa-f:]*)\]\z/ + @ipaddr = IPAddr.new($1) + @hostname = @ipaddr.to_s + @uri_host = "[#{@hostname}]" + @domain = @tld = nil + return + end + @ipaddr = nil + @hostname = DomainName.normalize(hostname) + @uri_host = @hostname + if last_dot = @hostname.rindex(DOT) + @tld = @hostname[(last_dot + 1)..-1] + else + @tld = @hostname + end + etld_data = DomainName.etld_data + if @canonical_tld_p = etld_data.key?(@tld) + subdomain = domain = nil + parent = @hostname + loop { + case etld_data[parent] + when 0 + @domain = domain + return + when -1 + @domain = subdomain + return + when 1 + @domain = parent + return + end + subdomain = domain + domain = parent + pos = @hostname.index(DOT, -domain.length) or break + parent = @hostname[(pos + 1)..-1] + } + else + # unknown/local TLD + if last_dot + # fallback - accept cookies down to second level + # cf. http://www.dkim-reputation.org/regdom-libs/ + if penultimate_dot = @hostname.rindex(DOT, last_dot - 1) + @domain = @hostname[(penultimate_dot + 1)..-1] + else + @domain = @hostname + end + else + # no domain part - must be a local hostname + @domain = @tld + end + end + end + + # Checks if the server represented by this domain is qualified to + # send and receive cookies with a domain attribute value of + # _domain_. A true value given as the second argument represents + # cookies without a domain attribute value, in which case only + # hostname equality is checked. + def cookie_domain?(domain, host_only = false) + # RFC 6265 #5.3 + # When the user agent "receives a cookie": + return self == domain if host_only + + domain = DomainName.new(domain) unless DomainName === domain + if ipaddr? + # RFC 6265 #5.1.3 + # Do not perform subdomain matching against IP addresses. + @hostname == domain.hostname + else + # RFC 6265 #4.1.1 + # Domain-value must be a subdomain. + @domain && self <= domain && domain <= @domain ? true : false + end + end + + # Returns the superdomain of this domain name. + def superdomain + return nil if ipaddr? + pos = @hostname.index(DOT) or return nil + self.class.new(@hostname[(pos + 1)..-1]) + end + + def ==(other) + other = DomainName.new(other) unless DomainName === other + other.hostname == @hostname + end + + def <=>(other) + other = DomainName.new(other) unless DomainName === other + othername = other.hostname + if othername == @hostname + 0 + elsif @hostname.end_with?(othername) && @hostname[-othername.size - 1, 1] == DOT + # The other is higher + -1 + elsif othername.end_with?(@hostname) && othername[-@hostname.size - 1, 1] == DOT + # The other is lower + 1 + else + nil + end + end + + def <(other) + case self <=> other + when -1 + true + when nil + nil + else + false + end + end + + def >(other) + case self <=> other + when 1 + true + when nil + nil + else + false + end + end + + def <=(other) + case self <=> other + when -1, 0 + true + when nil + nil + else + false + end + end + + def >=(other) + case self <=> other + when 1, 0 + true + when nil + nil + else + false + end + end + + def to_s + @hostname + end + + alias to_str to_s + + def hostname_idn + @hostname_idn ||= + if @ipaddr + @hostname + else + DomainName::Punycode.decode_hostname(@hostname) + end + end + + alias idn hostname_idn + + def domain_idn + @domain_idn ||= + if @ipaddr + @domain + else + DomainName::Punycode.decode_hostname(@domain) + end + end + + def tld_idn + @tld_idn ||= + if @ipaddr + @tld + else + DomainName::Punycode.decode_hostname(@tld) + end + end + + def inspect + str = '#<%s:%s' % [self.class.name, @hostname] + if @ipaddr + str << ' (ipaddr)' + else + str << ' domain=' << @domain if @domain + str << ' tld=' << @tld if @tld + end + str << '>' + end + + class << self + # Normalizes a _domain_ using the Punycode algorithm as necessary. + # The result will be a downcased, ASCII-only string. + def normalize(domain) + DomainName::Punycode.encode_hostname(domain.chomp(DOT).to_nfc).downcase + end + end +end + +# Short hand for DomainName.new(). +def DomainName(hostname) + DomainName.new(hostname) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/etld_data.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/etld_data.rb new file mode 100644 index 0000000..9525d84 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/etld_data.rb @@ -0,0 +1,8787 @@ +class DomainName + ETLD_DATA_DATE = '2019-07-01T18:45:50Z' + + ETLD_DATA = { + "ac" => 0, + "com.ac" => 0, + "edu.ac" => 0, + "gov.ac" => 0, + "net.ac" => 0, + "mil.ac" => 0, + "org.ac" => 0, + "ad" => 0, + "nom.ad" => 0, + "ae" => 0, + "co.ae" => 0, + "net.ae" => 0, + "org.ae" => 0, + "sch.ae" => 0, + "ac.ae" => 0, + "gov.ae" => 0, + "mil.ae" => 0, + "aero" => 0, + "accident-investigation.aero" => 0, + "accident-prevention.aero" => 0, + "aerobatic.aero" => 0, + "aeroclub.aero" => 0, + "aerodrome.aero" => 0, + "agents.aero" => 0, + "aircraft.aero" => 0, + "airline.aero" => 0, + "airport.aero" => 0, + "air-surveillance.aero" => 0, + "airtraffic.aero" => 0, + "air-traffic-control.aero" => 0, + "ambulance.aero" => 0, + "amusement.aero" => 0, + "association.aero" => 0, + "author.aero" => 0, + "ballooning.aero" => 0, + "broker.aero" => 0, + "caa.aero" => 0, + "cargo.aero" => 0, + "catering.aero" => 0, + "certification.aero" => 0, + "championship.aero" => 0, + "charter.aero" => 0, + "civilaviation.aero" => 0, + "club.aero" => 0, + "conference.aero" => 0, + "consultant.aero" => 0, + "consulting.aero" => 0, + "control.aero" => 0, + "council.aero" => 0, + "crew.aero" => 0, + "design.aero" => 0, + "dgca.aero" => 0, + "educator.aero" => 0, + "emergency.aero" => 0, + "engine.aero" => 0, + "engineer.aero" => 0, + "entertainment.aero" => 0, + "equipment.aero" => 0, + "exchange.aero" => 0, + "express.aero" => 0, + "federation.aero" => 0, + "flight.aero" => 0, + "freight.aero" => 0, + "fuel.aero" => 0, + "gliding.aero" => 0, + "government.aero" => 0, + "groundhandling.aero" => 0, + "group.aero" => 0, + "hanggliding.aero" => 0, + "homebuilt.aero" => 0, + "insurance.aero" => 0, + "journal.aero" => 0, + "journalist.aero" => 0, + "leasing.aero" => 0, + "logistics.aero" => 0, + "magazine.aero" => 0, + "maintenance.aero" => 0, + "media.aero" => 0, + "microlight.aero" => 0, + "modelling.aero" => 0, + "navigation.aero" => 0, + "parachuting.aero" => 0, + "paragliding.aero" => 0, + "passenger-association.aero" => 0, + "pilot.aero" => 0, + "press.aero" => 0, + "production.aero" => 0, + "recreation.aero" => 0, + "repbody.aero" => 0, + "res.aero" => 0, + "research.aero" => 0, + "rotorcraft.aero" => 0, + "safety.aero" => 0, + "scientist.aero" => 0, + "services.aero" => 0, + "show.aero" => 0, + "skydiving.aero" => 0, + "software.aero" => 0, + "student.aero" => 0, + "trader.aero" => 0, + "trading.aero" => 0, + "trainer.aero" => 0, + "union.aero" => 0, + "workinggroup.aero" => 0, + "works.aero" => 0, + "af" => 0, + "gov.af" => 0, + "com.af" => 0, + "org.af" => 0, + "net.af" => 0, + "edu.af" => 0, + "ag" => 0, + "com.ag" => 0, + "org.ag" => 0, + "net.ag" => 0, + "co.ag" => 0, + "nom.ag" => 0, + "ai" => 0, + "off.ai" => 0, + "com.ai" => 0, + "net.ai" => 0, + "org.ai" => 0, + "al" => 0, + "com.al" => 0, + "edu.al" => 0, + "gov.al" => 0, + "mil.al" => 0, + "net.al" => 0, + "org.al" => 0, + "am" => 0, + "co.am" => 0, + "com.am" => 0, + "commune.am" => 0, + "net.am" => 0, + "org.am" => 0, + "ao" => 0, + "ed.ao" => 0, + "gv.ao" => 0, + "og.ao" => 0, + "co.ao" => 0, + "pb.ao" => 0, + "it.ao" => 0, + "aq" => 0, + "ar" => 0, + "com.ar" => 0, + "edu.ar" => 0, + "gob.ar" => 0, + "gov.ar" => 0, + "int.ar" => 0, + "mil.ar" => 0, + "musica.ar" => 0, + "net.ar" => 0, + "org.ar" => 0, + "tur.ar" => 0, + "arpa" => 0, + "e164.arpa" => 0, + "in-addr.arpa" => 0, + "ip6.arpa" => 0, + "iris.arpa" => 0, + "uri.arpa" => 0, + "urn.arpa" => 0, + "as" => 0, + "gov.as" => 0, + "asia" => 0, + "at" => 0, + "ac.at" => 0, + "co.at" => 0, + "gv.at" => 0, + "or.at" => 0, + "au" => 0, + "com.au" => 0, + "net.au" => 0, + "org.au" => 0, + "edu.au" => 0, + "gov.au" => 0, + "asn.au" => 0, + "id.au" => 0, + "info.au" => 0, + "conf.au" => 0, + "oz.au" => 0, + "act.au" => 0, + "nsw.au" => 0, + "nt.au" => 0, + "qld.au" => 0, + "sa.au" => 0, + "tas.au" => 0, + "vic.au" => 0, + "wa.au" => 0, + "act.edu.au" => 0, + "nsw.edu.au" => 0, + "nt.edu.au" => 0, + "qld.edu.au" => 0, + "sa.edu.au" => 0, + "tas.edu.au" => 0, + "vic.edu.au" => 0, + "wa.edu.au" => 0, + "qld.gov.au" => 0, + "sa.gov.au" => 0, + "tas.gov.au" => 0, + "vic.gov.au" => 0, + "wa.gov.au" => 0, + "aw" => 0, + "com.aw" => 0, + "ax" => 0, + "az" => 0, + "com.az" => 0, + "net.az" => 0, + "int.az" => 0, + "gov.az" => 0, + "org.az" => 0, + "edu.az" => 0, + "info.az" => 0, + "pp.az" => 0, + "mil.az" => 0, + "name.az" => 0, + "pro.az" => 0, + "biz.az" => 0, + "ba" => 0, + "com.ba" => 0, + "edu.ba" => 0, + "gov.ba" => 0, + "mil.ba" => 0, + "net.ba" => 0, + "org.ba" => 0, + "bb" => 0, + "biz.bb" => 0, + "co.bb" => 0, + "com.bb" => 0, + "edu.bb" => 0, + "gov.bb" => 0, + "info.bb" => 0, + "net.bb" => 0, + "org.bb" => 0, + "store.bb" => 0, + "tv.bb" => 0, + "bd" => -1, + "be" => 0, + "ac.be" => 0, + "bf" => 0, + "gov.bf" => 0, + "bg" => 0, + "a.bg" => 0, + "b.bg" => 0, + "c.bg" => 0, + "d.bg" => 0, + "e.bg" => 0, + "f.bg" => 0, + "g.bg" => 0, + "h.bg" => 0, + "i.bg" => 0, + "j.bg" => 0, + "k.bg" => 0, + "l.bg" => 0, + "m.bg" => 0, + "n.bg" => 0, + "o.bg" => 0, + "p.bg" => 0, + "q.bg" => 0, + "r.bg" => 0, + "s.bg" => 0, + "t.bg" => 0, + "u.bg" => 0, + "v.bg" => 0, + "w.bg" => 0, + "x.bg" => 0, + "y.bg" => 0, + "z.bg" => 0, + "0.bg" => 0, + "1.bg" => 0, + "2.bg" => 0, + "3.bg" => 0, + "4.bg" => 0, + "5.bg" => 0, + "6.bg" => 0, + "7.bg" => 0, + "8.bg" => 0, + "9.bg" => 0, + "bh" => 0, + "com.bh" => 0, + "edu.bh" => 0, + "net.bh" => 0, + "org.bh" => 0, + "gov.bh" => 0, + "bi" => 0, + "co.bi" => 0, + "com.bi" => 0, + "edu.bi" => 0, + "or.bi" => 0, + "org.bi" => 0, + "biz" => 0, + "bj" => 0, + "asso.bj" => 0, + "barreau.bj" => 0, + "gouv.bj" => 0, + "bm" => 0, + "com.bm" => 0, + "edu.bm" => 0, + "gov.bm" => 0, + "net.bm" => 0, + "org.bm" => 0, + "bn" => 0, + "com.bn" => 0, + "edu.bn" => 0, + "gov.bn" => 0, + "net.bn" => 0, + "org.bn" => 0, + "bo" => 0, + "com.bo" => 0, + "edu.bo" => 0, + "gob.bo" => 0, + "int.bo" => 0, + "org.bo" => 0, + "net.bo" => 0, + "mil.bo" => 0, + "tv.bo" => 0, + "web.bo" => 0, + "academia.bo" => 0, + "agro.bo" => 0, + "arte.bo" => 0, + "blog.bo" => 0, + "bolivia.bo" => 0, + "ciencia.bo" => 0, + "cooperativa.bo" => 0, + "democracia.bo" => 0, + "deporte.bo" => 0, + "ecologia.bo" => 0, + "economia.bo" => 0, + "empresa.bo" => 0, + "indigena.bo" => 0, + "industria.bo" => 0, + "info.bo" => 0, + "medicina.bo" => 0, + "movimiento.bo" => 0, + "musica.bo" => 0, + "natural.bo" => 0, + "nombre.bo" => 0, + "noticias.bo" => 0, + "patria.bo" => 0, + "politica.bo" => 0, + "profesional.bo" => 0, + "plurinacional.bo" => 0, + "pueblo.bo" => 0, + "revista.bo" => 0, + "salud.bo" => 0, + "tecnologia.bo" => 0, + "tksat.bo" => 0, + "transporte.bo" => 0, + "wiki.bo" => 0, + "br" => 0, + "9guacu.br" => 0, + "abc.br" => 0, + "adm.br" => 0, + "adv.br" => 0, + "agr.br" => 0, + "aju.br" => 0, + "am.br" => 0, + "anani.br" => 0, + "aparecida.br" => 0, + "arq.br" => 0, + "art.br" => 0, + "ato.br" => 0, + "b.br" => 0, + "barueri.br" => 0, + "belem.br" => 0, + "bhz.br" => 0, + "bio.br" => 0, + "blog.br" => 0, + "bmd.br" => 0, + "boavista.br" => 0, + "bsb.br" => 0, + "campinagrande.br" => 0, + "campinas.br" => 0, + "caxias.br" => 0, + "cim.br" => 0, + "cng.br" => 0, + "cnt.br" => 0, + "com.br" => 0, + "contagem.br" => 0, + "coop.br" => 0, + "cri.br" => 0, + "cuiaba.br" => 0, + "curitiba.br" => 0, + "def.br" => 0, + "ecn.br" => 0, + "eco.br" => 0, + "edu.br" => 0, + "emp.br" => 0, + "eng.br" => 0, + "esp.br" => 0, + "etc.br" => 0, + "eti.br" => 0, + "far.br" => 0, + "feira.br" => 0, + "flog.br" => 0, + "floripa.br" => 0, + "fm.br" => 0, + "fnd.br" => 0, + "fortal.br" => 0, + "fot.br" => 0, + "foz.br" => 0, + "fst.br" => 0, + "g12.br" => 0, + "ggf.br" => 0, + "goiania.br" => 0, + "gov.br" => 0, + "ac.gov.br" => 0, + "al.gov.br" => 0, + "am.gov.br" => 0, + "ap.gov.br" => 0, + "ba.gov.br" => 0, + "ce.gov.br" => 0, + "df.gov.br" => 0, + "es.gov.br" => 0, + "go.gov.br" => 0, + "ma.gov.br" => 0, + "mg.gov.br" => 0, + "ms.gov.br" => 0, + "mt.gov.br" => 0, + "pa.gov.br" => 0, + "pb.gov.br" => 0, + "pe.gov.br" => 0, + "pi.gov.br" => 0, + "pr.gov.br" => 0, + "rj.gov.br" => 0, + "rn.gov.br" => 0, + "ro.gov.br" => 0, + "rr.gov.br" => 0, + "rs.gov.br" => 0, + "sc.gov.br" => 0, + "se.gov.br" => 0, + "sp.gov.br" => 0, + "to.gov.br" => 0, + "gru.br" => 0, + "imb.br" => 0, + "ind.br" => 0, + "inf.br" => 0, + "jab.br" => 0, + "jampa.br" => 0, + "jdf.br" => 0, + "joinville.br" => 0, + "jor.br" => 0, + "jus.br" => 0, + "leg.br" => 0, + "lel.br" => 0, + "londrina.br" => 0, + "macapa.br" => 0, + "maceio.br" => 0, + "manaus.br" => 0, + "maringa.br" => 0, + "mat.br" => 0, + "med.br" => 0, + "mil.br" => 0, + "morena.br" => 0, + "mp.br" => 0, + "mus.br" => 0, + "natal.br" => 0, + "net.br" => 0, + "niteroi.br" => 0, + "nom.br" => -1, + "not.br" => 0, + "ntr.br" => 0, + "odo.br" => 0, + "ong.br" => 0, + "org.br" => 0, + "osasco.br" => 0, + "palmas.br" => 0, + "poa.br" => 0, + "ppg.br" => 0, + "pro.br" => 0, + "psc.br" => 0, + "psi.br" => 0, + "pvh.br" => 0, + "qsl.br" => 0, + "radio.br" => 0, + "rec.br" => 0, + "recife.br" => 0, + "ribeirao.br" => 0, + "rio.br" => 0, + "riobranco.br" => 0, + "riopreto.br" => 0, + "salvador.br" => 0, + "sampa.br" => 0, + "santamaria.br" => 0, + "santoandre.br" => 0, + "saobernardo.br" => 0, + "saogonca.br" => 0, + "sjc.br" => 0, + "slg.br" => 0, + "slz.br" => 0, + "sorocaba.br" => 0, + "srv.br" => 0, + "taxi.br" => 0, + "tc.br" => 0, + "teo.br" => 0, + "the.br" => 0, + "tmp.br" => 0, + "trd.br" => 0, + "tur.br" => 0, + "tv.br" => 0, + "udi.br" => 0, + "vet.br" => 0, + "vix.br" => 0, + "vlog.br" => 0, + "wiki.br" => 0, + "zlg.br" => 0, + "bs" => 0, + "com.bs" => 0, + "net.bs" => 0, + "org.bs" => 0, + "edu.bs" => 0, + "gov.bs" => 0, + "bt" => 0, + "com.bt" => 0, + "edu.bt" => 0, + "gov.bt" => 0, + "net.bt" => 0, + "org.bt" => 0, + "bv" => 0, + "bw" => 0, + "co.bw" => 0, + "org.bw" => 0, + "by" => 0, + "gov.by" => 0, + "mil.by" => 0, + "com.by" => 0, + "of.by" => 0, + "bz" => 0, + "com.bz" => 0, + "net.bz" => 0, + "org.bz" => 0, + "edu.bz" => 0, + "gov.bz" => 0, + "ca" => 0, + "ab.ca" => 0, + "bc.ca" => 0, + "mb.ca" => 0, + "nb.ca" => 0, + "nf.ca" => 0, + "nl.ca" => 0, + "ns.ca" => 0, + "nt.ca" => 0, + "nu.ca" => 0, + "on.ca" => 0, + "pe.ca" => 0, + "qc.ca" => 0, + "sk.ca" => 0, + "yk.ca" => 0, + "gc.ca" => 0, + "cat" => 0, + "cc" => 0, + "cd" => 0, + "gov.cd" => 0, + "cf" => 0, + "cg" => 0, + "ch" => 0, + "ci" => 0, + "org.ci" => 0, + "or.ci" => 0, + "com.ci" => 0, + "co.ci" => 0, + "edu.ci" => 0, + "ed.ci" => 0, + "ac.ci" => 0, + "net.ci" => 0, + "go.ci" => 0, + "asso.ci" => 0, + "xn--aroport-bya.ci" => 0, + "int.ci" => 0, + "presse.ci" => 0, + "md.ci" => 0, + "gouv.ci" => 0, + "ck" => -1, + "www.ck" => 1, + "cl" => 0, + "gov.cl" => 0, + "gob.cl" => 0, + "co.cl" => 0, + "mil.cl" => 0, + "cm" => 0, + "co.cm" => 0, + "com.cm" => 0, + "gov.cm" => 0, + "net.cm" => 0, + "cn" => 0, + "ac.cn" => 0, + "com.cn" => 0, + "edu.cn" => 0, + "gov.cn" => 0, + "net.cn" => 0, + "org.cn" => 0, + "mil.cn" => 0, + "xn--55qx5d.cn" => 0, + "xn--io0a7i.cn" => 0, + "xn--od0alg.cn" => 0, + "ah.cn" => 0, + "bj.cn" => 0, + "cq.cn" => 0, + "fj.cn" => 0, + "gd.cn" => 0, + "gs.cn" => 0, + "gz.cn" => 0, + "gx.cn" => 0, + "ha.cn" => 0, + "hb.cn" => 0, + "he.cn" => 0, + "hi.cn" => 0, + "hl.cn" => 0, + "hn.cn" => 0, + "jl.cn" => 0, + "js.cn" => 0, + "jx.cn" => 0, + "ln.cn" => 0, + "nm.cn" => 0, + "nx.cn" => 0, + "qh.cn" => 0, + "sc.cn" => 0, + "sd.cn" => 0, + "sh.cn" => 0, + "sn.cn" => 0, + "sx.cn" => 0, + "tj.cn" => 0, + "xj.cn" => 0, + "xz.cn" => 0, + "yn.cn" => 0, + "zj.cn" => 0, + "hk.cn" => 0, + "mo.cn" => 0, + "tw.cn" => 0, + "co" => 0, + "arts.co" => 0, + "com.co" => 0, + "edu.co" => 0, + "firm.co" => 0, + "gov.co" => 0, + "info.co" => 0, + "int.co" => 0, + "mil.co" => 0, + "net.co" => 0, + "nom.co" => 0, + "org.co" => 0, + "rec.co" => 0, + "web.co" => 0, + "com" => 0, + "coop" => 0, + "cr" => 0, + "ac.cr" => 0, + "co.cr" => 0, + "ed.cr" => 0, + "fi.cr" => 0, + "go.cr" => 0, + "or.cr" => 0, + "sa.cr" => 0, + "cu" => 0, + "com.cu" => 0, + "edu.cu" => 0, + "org.cu" => 0, + "net.cu" => 0, + "gov.cu" => 0, + "inf.cu" => 0, + "cv" => 0, + "cw" => 0, + "com.cw" => 0, + "edu.cw" => 0, + "net.cw" => 0, + "org.cw" => 0, + "cx" => 0, + "gov.cx" => 0, + "cy" => 0, + "ac.cy" => 0, + "biz.cy" => 0, + "com.cy" => 0, + "ekloges.cy" => 0, + "gov.cy" => 0, + "ltd.cy" => 0, + "name.cy" => 0, + "net.cy" => 0, + "org.cy" => 0, + "parliament.cy" => 0, + "press.cy" => 0, + "pro.cy" => 0, + "tm.cy" => 0, + "cz" => 0, + "de" => 0, + "dj" => 0, + "dk" => 0, + "dm" => 0, + "com.dm" => 0, + "net.dm" => 0, + "org.dm" => 0, + "edu.dm" => 0, + "gov.dm" => 0, + "do" => 0, + "art.do" => 0, + "com.do" => 0, + "edu.do" => 0, + "gob.do" => 0, + "gov.do" => 0, + "mil.do" => 0, + "net.do" => 0, + "org.do" => 0, + "sld.do" => 0, + "web.do" => 0, + "dz" => 0, + "com.dz" => 0, + "org.dz" => 0, + "net.dz" => 0, + "gov.dz" => 0, + "edu.dz" => 0, + "asso.dz" => 0, + "pol.dz" => 0, + "art.dz" => 0, + "ec" => 0, + "com.ec" => 0, + "info.ec" => 0, + "net.ec" => 0, + "fin.ec" => 0, + "k12.ec" => 0, + "med.ec" => 0, + "pro.ec" => 0, + "org.ec" => 0, + "edu.ec" => 0, + "gov.ec" => 0, + "gob.ec" => 0, + "mil.ec" => 0, + "edu" => 0, + "ee" => 0, + "edu.ee" => 0, + "gov.ee" => 0, + "riik.ee" => 0, + "lib.ee" => 0, + "med.ee" => 0, + "com.ee" => 0, + "pri.ee" => 0, + "aip.ee" => 0, + "org.ee" => 0, + "fie.ee" => 0, + "eg" => 0, + "com.eg" => 0, + "edu.eg" => 0, + "eun.eg" => 0, + "gov.eg" => 0, + "mil.eg" => 0, + "name.eg" => 0, + "net.eg" => 0, + "org.eg" => 0, + "sci.eg" => 0, + "er" => -1, + "es" => 0, + "com.es" => 0, + "nom.es" => 0, + "org.es" => 0, + "gob.es" => 0, + "edu.es" => 0, + "et" => 0, + "com.et" => 0, + "gov.et" => 0, + "org.et" => 0, + "edu.et" => 0, + "biz.et" => 0, + "name.et" => 0, + "info.et" => 0, + "net.et" => 0, + "eu" => 0, + "fi" => 0, + "aland.fi" => 0, + "fj" => -1, + "fk" => -1, + "fm" => 0, + "fo" => 0, + "fr" => 0, + "asso.fr" => 0, + "com.fr" => 0, + "gouv.fr" => 0, + "nom.fr" => 0, + "prd.fr" => 0, + "tm.fr" => 0, + "aeroport.fr" => 0, + "avocat.fr" => 0, + "avoues.fr" => 0, + "cci.fr" => 0, + "chambagri.fr" => 0, + "chirurgiens-dentistes.fr" => 0, + "experts-comptables.fr" => 0, + "geometre-expert.fr" => 0, + "greta.fr" => 0, + "huissier-justice.fr" => 0, + "medecin.fr" => 0, + "notaires.fr" => 0, + "pharmacien.fr" => 0, + "port.fr" => 0, + "veterinaire.fr" => 0, + "ga" => 0, + "gb" => 0, + "gd" => 0, + "ge" => 0, + "com.ge" => 0, + "edu.ge" => 0, + "gov.ge" => 0, + "org.ge" => 0, + "mil.ge" => 0, + "net.ge" => 0, + "pvt.ge" => 0, + "gf" => 0, + "gg" => 0, + "co.gg" => 0, + "net.gg" => 0, + "org.gg" => 0, + "gh" => 0, + "com.gh" => 0, + "edu.gh" => 0, + "gov.gh" => 0, + "org.gh" => 0, + "mil.gh" => 0, + "gi" => 0, + "com.gi" => 0, + "ltd.gi" => 0, + "gov.gi" => 0, + "mod.gi" => 0, + "edu.gi" => 0, + "org.gi" => 0, + "gl" => 0, + "co.gl" => 0, + "com.gl" => 0, + "edu.gl" => 0, + "net.gl" => 0, + "org.gl" => 0, + "gm" => 0, + "gn" => 0, + "ac.gn" => 0, + "com.gn" => 0, + "edu.gn" => 0, + "gov.gn" => 0, + "org.gn" => 0, + "net.gn" => 0, + "gov" => 0, + "gp" => 0, + "com.gp" => 0, + "net.gp" => 0, + "mobi.gp" => 0, + "edu.gp" => 0, + "org.gp" => 0, + "asso.gp" => 0, + "gq" => 0, + "gr" => 0, + "com.gr" => 0, + "edu.gr" => 0, + "net.gr" => 0, + "org.gr" => 0, + "gov.gr" => 0, + "gs" => 0, + "gt" => 0, + "com.gt" => 0, + "edu.gt" => 0, + "gob.gt" => 0, + "ind.gt" => 0, + "mil.gt" => 0, + "net.gt" => 0, + "org.gt" => 0, + "gu" => 0, + "com.gu" => 0, + "edu.gu" => 0, + "gov.gu" => 0, + "guam.gu" => 0, + "info.gu" => 0, + "net.gu" => 0, + "org.gu" => 0, + "web.gu" => 0, + "gw" => 0, + "gy" => 0, + "co.gy" => 0, + "com.gy" => 0, + "edu.gy" => 0, + "gov.gy" => 0, + "net.gy" => 0, + "org.gy" => 0, + "hk" => 0, + "com.hk" => 0, + "edu.hk" => 0, + "gov.hk" => 0, + "idv.hk" => 0, + "net.hk" => 0, + "org.hk" => 0, + "xn--55qx5d.hk" => 0, + "xn--wcvs22d.hk" => 0, + "xn--lcvr32d.hk" => 0, + "xn--mxtq1m.hk" => 0, + "xn--gmqw5a.hk" => 0, + "xn--ciqpn.hk" => 0, + "xn--gmq050i.hk" => 0, + "xn--zf0avx.hk" => 0, + "xn--io0a7i.hk" => 0, + "xn--mk0axi.hk" => 0, + "xn--od0alg.hk" => 0, + "xn--od0aq3b.hk" => 0, + "xn--tn0ag.hk" => 0, + "xn--uc0atv.hk" => 0, + "xn--uc0ay4a.hk" => 0, + "hm" => 0, + "hn" => 0, + "com.hn" => 0, + "edu.hn" => 0, + "org.hn" => 0, + "net.hn" => 0, + "mil.hn" => 0, + "gob.hn" => 0, + "hr" => 0, + "iz.hr" => 0, + "from.hr" => 0, + "name.hr" => 0, + "com.hr" => 0, + "ht" => 0, + "com.ht" => 0, + "shop.ht" => 0, + "firm.ht" => 0, + "info.ht" => 0, + "adult.ht" => 0, + "net.ht" => 0, + "pro.ht" => 0, + "org.ht" => 0, + "med.ht" => 0, + "art.ht" => 0, + "coop.ht" => 0, + "pol.ht" => 0, + "asso.ht" => 0, + "edu.ht" => 0, + "rel.ht" => 0, + "gouv.ht" => 0, + "perso.ht" => 0, + "hu" => 0, + "co.hu" => 0, + "info.hu" => 0, + "org.hu" => 0, + "priv.hu" => 0, + "sport.hu" => 0, + "tm.hu" => 0, + "2000.hu" => 0, + "agrar.hu" => 0, + "bolt.hu" => 0, + "casino.hu" => 0, + "city.hu" => 0, + "erotica.hu" => 0, + "erotika.hu" => 0, + "film.hu" => 0, + "forum.hu" => 0, + "games.hu" => 0, + "hotel.hu" => 0, + "ingatlan.hu" => 0, + "jogasz.hu" => 0, + "konyvelo.hu" => 0, + "lakas.hu" => 0, + "media.hu" => 0, + "news.hu" => 0, + "reklam.hu" => 0, + "sex.hu" => 0, + "shop.hu" => 0, + "suli.hu" => 0, + "szex.hu" => 0, + "tozsde.hu" => 0, + "utazas.hu" => 0, + "video.hu" => 0, + "id" => 0, + "ac.id" => 0, + "biz.id" => 0, + "co.id" => 0, + "desa.id" => 0, + "go.id" => 0, + "mil.id" => 0, + "my.id" => 0, + "net.id" => 0, + "or.id" => 0, + "ponpes.id" => 0, + "sch.id" => 0, + "web.id" => 0, + "ie" => 0, + "gov.ie" => 0, + "il" => 0, + "ac.il" => 0, + "co.il" => 0, + "gov.il" => 0, + "idf.il" => 0, + "k12.il" => 0, + "muni.il" => 0, + "net.il" => 0, + "org.il" => 0, + "im" => 0, + "ac.im" => 0, + "co.im" => 0, + "com.im" => 0, + "ltd.co.im" => 0, + "net.im" => 0, + "org.im" => 0, + "plc.co.im" => 0, + "tt.im" => 0, + "tv.im" => 0, + "in" => 0, + "co.in" => 0, + "firm.in" => 0, + "net.in" => 0, + "org.in" => 0, + "gen.in" => 0, + "ind.in" => 0, + "nic.in" => 0, + "ac.in" => 0, + "edu.in" => 0, + "res.in" => 0, + "gov.in" => 0, + "mil.in" => 0, + "info" => 0, + "int" => 0, + "eu.int" => 0, + "io" => 0, + "com.io" => 0, + "iq" => 0, + "gov.iq" => 0, + "edu.iq" => 0, + "mil.iq" => 0, + "com.iq" => 0, + "org.iq" => 0, + "net.iq" => 0, + "ir" => 0, + "ac.ir" => 0, + "co.ir" => 0, + "gov.ir" => 0, + "id.ir" => 0, + "net.ir" => 0, + "org.ir" => 0, + "sch.ir" => 0, + "xn--mgba3a4f16a.ir" => 0, + "xn--mgba3a4fra.ir" => 0, + "is" => 0, + "net.is" => 0, + "com.is" => 0, + "edu.is" => 0, + "gov.is" => 0, + "org.is" => 0, + "int.is" => 0, + "it" => 0, + "gov.it" => 0, + "edu.it" => 0, + "abr.it" => 0, + "abruzzo.it" => 0, + "aosta-valley.it" => 0, + "aostavalley.it" => 0, + "bas.it" => 0, + "basilicata.it" => 0, + "cal.it" => 0, + "calabria.it" => 0, + "cam.it" => 0, + "campania.it" => 0, + "emilia-romagna.it" => 0, + "emiliaromagna.it" => 0, + "emr.it" => 0, + "friuli-v-giulia.it" => 0, + "friuli-ve-giulia.it" => 0, + "friuli-vegiulia.it" => 0, + "friuli-venezia-giulia.it" => 0, + "friuli-veneziagiulia.it" => 0, + "friuli-vgiulia.it" => 0, + "friuliv-giulia.it" => 0, + "friulive-giulia.it" => 0, + "friulivegiulia.it" => 0, + "friulivenezia-giulia.it" => 0, + "friuliveneziagiulia.it" => 0, + "friulivgiulia.it" => 0, + "fvg.it" => 0, + "laz.it" => 0, + "lazio.it" => 0, + "lig.it" => 0, + "liguria.it" => 0, + "lom.it" => 0, + "lombardia.it" => 0, + "lombardy.it" => 0, + "lucania.it" => 0, + "mar.it" => 0, + "marche.it" => 0, + "mol.it" => 0, + "molise.it" => 0, + "piedmont.it" => 0, + "piemonte.it" => 0, + "pmn.it" => 0, + "pug.it" => 0, + "puglia.it" => 0, + "sar.it" => 0, + "sardegna.it" => 0, + "sardinia.it" => 0, + "sic.it" => 0, + "sicilia.it" => 0, + "sicily.it" => 0, + "taa.it" => 0, + "tos.it" => 0, + "toscana.it" => 0, + "trentin-sud-tirol.it" => 0, + "xn--trentin-sd-tirol-rzb.it" => 0, + "trentin-sudtirol.it" => 0, + "xn--trentin-sdtirol-7vb.it" => 0, + "trentin-sued-tirol.it" => 0, + "trentin-suedtirol.it" => 0, + "trentino-a-adige.it" => 0, + "trentino-aadige.it" => 0, + "trentino-alto-adige.it" => 0, + "trentino-altoadige.it" => 0, + "trentino-s-tirol.it" => 0, + "trentino-stirol.it" => 0, + "trentino-sud-tirol.it" => 0, + "xn--trentino-sd-tirol-c3b.it" => 0, + "trentino-sudtirol.it" => 0, + "xn--trentino-sdtirol-szb.it" => 0, + "trentino-sued-tirol.it" => 0, + "trentino-suedtirol.it" => 0, + "trentino.it" => 0, + "trentinoa-adige.it" => 0, + "trentinoaadige.it" => 0, + "trentinoalto-adige.it" => 0, + "trentinoaltoadige.it" => 0, + "trentinos-tirol.it" => 0, + "trentinostirol.it" => 0, + "trentinosud-tirol.it" => 0, + "xn--trentinosd-tirol-rzb.it" => 0, + "trentinosudtirol.it" => 0, + "xn--trentinosdtirol-7vb.it" => 0, + "trentinosued-tirol.it" => 0, + "trentinosuedtirol.it" => 0, + "trentinsud-tirol.it" => 0, + "xn--trentinsd-tirol-6vb.it" => 0, + "trentinsudtirol.it" => 0, + "xn--trentinsdtirol-nsb.it" => 0, + "trentinsued-tirol.it" => 0, + "trentinsuedtirol.it" => 0, + "tuscany.it" => 0, + "umb.it" => 0, + "umbria.it" => 0, + "val-d-aosta.it" => 0, + "val-daosta.it" => 0, + "vald-aosta.it" => 0, + "valdaosta.it" => 0, + "valle-aosta.it" => 0, + "valle-d-aosta.it" => 0, + "valle-daosta.it" => 0, + "valleaosta.it" => 0, + "valled-aosta.it" => 0, + "valledaosta.it" => 0, + "vallee-aoste.it" => 0, + "xn--valle-aoste-ebb.it" => 0, + "vallee-d-aoste.it" => 0, + "xn--valle-d-aoste-ehb.it" => 0, + "valleeaoste.it" => 0, + "xn--valleaoste-e7a.it" => 0, + "valleedaoste.it" => 0, + "xn--valledaoste-ebb.it" => 0, + "vao.it" => 0, + "vda.it" => 0, + "ven.it" => 0, + "veneto.it" => 0, + "ag.it" => 0, + "agrigento.it" => 0, + "al.it" => 0, + "alessandria.it" => 0, + "alto-adige.it" => 0, + "altoadige.it" => 0, + "an.it" => 0, + "ancona.it" => 0, + "andria-barletta-trani.it" => 0, + "andria-trani-barletta.it" => 0, + "andriabarlettatrani.it" => 0, + "andriatranibarletta.it" => 0, + "ao.it" => 0, + "aosta.it" => 0, + "aoste.it" => 0, + "ap.it" => 0, + "aq.it" => 0, + "aquila.it" => 0, + "ar.it" => 0, + "arezzo.it" => 0, + "ascoli-piceno.it" => 0, + "ascolipiceno.it" => 0, + "asti.it" => 0, + "at.it" => 0, + "av.it" => 0, + "avellino.it" => 0, + "ba.it" => 0, + "balsan-sudtirol.it" => 0, + "xn--balsan-sdtirol-nsb.it" => 0, + "balsan-suedtirol.it" => 0, + "balsan.it" => 0, + "bari.it" => 0, + "barletta-trani-andria.it" => 0, + "barlettatraniandria.it" => 0, + "belluno.it" => 0, + "benevento.it" => 0, + "bergamo.it" => 0, + "bg.it" => 0, + "bi.it" => 0, + "biella.it" => 0, + "bl.it" => 0, + "bn.it" => 0, + "bo.it" => 0, + "bologna.it" => 0, + "bolzano-altoadige.it" => 0, + "bolzano.it" => 0, + "bozen-sudtirol.it" => 0, + "xn--bozen-sdtirol-2ob.it" => 0, + "bozen-suedtirol.it" => 0, + "bozen.it" => 0, + "br.it" => 0, + "brescia.it" => 0, + "brindisi.it" => 0, + "bs.it" => 0, + "bt.it" => 0, + "bulsan-sudtirol.it" => 0, + "xn--bulsan-sdtirol-nsb.it" => 0, + "bulsan-suedtirol.it" => 0, + "bulsan.it" => 0, + "bz.it" => 0, + "ca.it" => 0, + "cagliari.it" => 0, + "caltanissetta.it" => 0, + "campidano-medio.it" => 0, + "campidanomedio.it" => 0, + "campobasso.it" => 0, + "carbonia-iglesias.it" => 0, + "carboniaiglesias.it" => 0, + "carrara-massa.it" => 0, + "carraramassa.it" => 0, + "caserta.it" => 0, + "catania.it" => 0, + "catanzaro.it" => 0, + "cb.it" => 0, + "ce.it" => 0, + "cesena-forli.it" => 0, + "xn--cesena-forl-mcb.it" => 0, + "cesenaforli.it" => 0, + "xn--cesenaforl-i8a.it" => 0, + "ch.it" => 0, + "chieti.it" => 0, + "ci.it" => 0, + "cl.it" => 0, + "cn.it" => 0, + "co.it" => 0, + "como.it" => 0, + "cosenza.it" => 0, + "cr.it" => 0, + "cremona.it" => 0, + "crotone.it" => 0, + "cs.it" => 0, + "ct.it" => 0, + "cuneo.it" => 0, + "cz.it" => 0, + "dell-ogliastra.it" => 0, + "dellogliastra.it" => 0, + "en.it" => 0, + "enna.it" => 0, + "fc.it" => 0, + "fe.it" => 0, + "fermo.it" => 0, + "ferrara.it" => 0, + "fg.it" => 0, + "fi.it" => 0, + "firenze.it" => 0, + "florence.it" => 0, + "fm.it" => 0, + "foggia.it" => 0, + "forli-cesena.it" => 0, + "xn--forl-cesena-fcb.it" => 0, + "forlicesena.it" => 0, + "xn--forlcesena-c8a.it" => 0, + "fr.it" => 0, + "frosinone.it" => 0, + "ge.it" => 0, + "genoa.it" => 0, + "genova.it" => 0, + "go.it" => 0, + "gorizia.it" => 0, + "gr.it" => 0, + "grosseto.it" => 0, + "iglesias-carbonia.it" => 0, + "iglesiascarbonia.it" => 0, + "im.it" => 0, + "imperia.it" => 0, + "is.it" => 0, + "isernia.it" => 0, + "kr.it" => 0, + "la-spezia.it" => 0, + "laquila.it" => 0, + "laspezia.it" => 0, + "latina.it" => 0, + "lc.it" => 0, + "le.it" => 0, + "lecce.it" => 0, + "lecco.it" => 0, + "li.it" => 0, + "livorno.it" => 0, + "lo.it" => 0, + "lodi.it" => 0, + "lt.it" => 0, + "lu.it" => 0, + "lucca.it" => 0, + "macerata.it" => 0, + "mantova.it" => 0, + "massa-carrara.it" => 0, + "massacarrara.it" => 0, + "matera.it" => 0, + "mb.it" => 0, + "mc.it" => 0, + "me.it" => 0, + "medio-campidano.it" => 0, + "mediocampidano.it" => 0, + "messina.it" => 0, + "mi.it" => 0, + "milan.it" => 0, + "milano.it" => 0, + "mn.it" => 0, + "mo.it" => 0, + "modena.it" => 0, + "monza-brianza.it" => 0, + "monza-e-della-brianza.it" => 0, + "monza.it" => 0, + "monzabrianza.it" => 0, + "monzaebrianza.it" => 0, + "monzaedellabrianza.it" => 0, + "ms.it" => 0, + "mt.it" => 0, + "na.it" => 0, + "naples.it" => 0, + "napoli.it" => 0, + "no.it" => 0, + "novara.it" => 0, + "nu.it" => 0, + "nuoro.it" => 0, + "og.it" => 0, + "ogliastra.it" => 0, + "olbia-tempio.it" => 0, + "olbiatempio.it" => 0, + "or.it" => 0, + "oristano.it" => 0, + "ot.it" => 0, + "pa.it" => 0, + "padova.it" => 0, + "padua.it" => 0, + "palermo.it" => 0, + "parma.it" => 0, + "pavia.it" => 0, + "pc.it" => 0, + "pd.it" => 0, + "pe.it" => 0, + "perugia.it" => 0, + "pesaro-urbino.it" => 0, + "pesarourbino.it" => 0, + "pescara.it" => 0, + "pg.it" => 0, + "pi.it" => 0, + "piacenza.it" => 0, + "pisa.it" => 0, + "pistoia.it" => 0, + "pn.it" => 0, + "po.it" => 0, + "pordenone.it" => 0, + "potenza.it" => 0, + "pr.it" => 0, + "prato.it" => 0, + "pt.it" => 0, + "pu.it" => 0, + "pv.it" => 0, + "pz.it" => 0, + "ra.it" => 0, + "ragusa.it" => 0, + "ravenna.it" => 0, + "rc.it" => 0, + "re.it" => 0, + "reggio-calabria.it" => 0, + "reggio-emilia.it" => 0, + "reggiocalabria.it" => 0, + "reggioemilia.it" => 0, + "rg.it" => 0, + "ri.it" => 0, + "rieti.it" => 0, + "rimini.it" => 0, + "rm.it" => 0, + "rn.it" => 0, + "ro.it" => 0, + "roma.it" => 0, + "rome.it" => 0, + "rovigo.it" => 0, + "sa.it" => 0, + "salerno.it" => 0, + "sassari.it" => 0, + "savona.it" => 0, + "si.it" => 0, + "siena.it" => 0, + "siracusa.it" => 0, + "so.it" => 0, + "sondrio.it" => 0, + "sp.it" => 0, + "sr.it" => 0, + "ss.it" => 0, + "suedtirol.it" => 0, + "xn--sdtirol-n2a.it" => 0, + "sv.it" => 0, + "ta.it" => 0, + "taranto.it" => 0, + "te.it" => 0, + "tempio-olbia.it" => 0, + "tempioolbia.it" => 0, + "teramo.it" => 0, + "terni.it" => 0, + "tn.it" => 0, + "to.it" => 0, + "torino.it" => 0, + "tp.it" => 0, + "tr.it" => 0, + "trani-andria-barletta.it" => 0, + "trani-barletta-andria.it" => 0, + "traniandriabarletta.it" => 0, + "tranibarlettaandria.it" => 0, + "trapani.it" => 0, + "trento.it" => 0, + "treviso.it" => 0, + "trieste.it" => 0, + "ts.it" => 0, + "turin.it" => 0, + "tv.it" => 0, + "ud.it" => 0, + "udine.it" => 0, + "urbino-pesaro.it" => 0, + "urbinopesaro.it" => 0, + "va.it" => 0, + "varese.it" => 0, + "vb.it" => 0, + "vc.it" => 0, + "ve.it" => 0, + "venezia.it" => 0, + "venice.it" => 0, + "verbania.it" => 0, + "vercelli.it" => 0, + "verona.it" => 0, + "vi.it" => 0, + "vibo-valentia.it" => 0, + "vibovalentia.it" => 0, + "vicenza.it" => 0, + "viterbo.it" => 0, + "vr.it" => 0, + "vs.it" => 0, + "vt.it" => 0, + "vv.it" => 0, + "je" => 0, + "co.je" => 0, + "net.je" => 0, + "org.je" => 0, + "jm" => -1, + "jo" => 0, + "com.jo" => 0, + "org.jo" => 0, + "net.jo" => 0, + "edu.jo" => 0, + "sch.jo" => 0, + "gov.jo" => 0, + "mil.jo" => 0, + "name.jo" => 0, + "jobs" => 0, + "jp" => 0, + "ac.jp" => 0, + "ad.jp" => 0, + "co.jp" => 0, + "ed.jp" => 0, + "go.jp" => 0, + "gr.jp" => 0, + "lg.jp" => 0, + "ne.jp" => 0, + "or.jp" => 0, + "aichi.jp" => 0, + "akita.jp" => 0, + "aomori.jp" => 0, + "chiba.jp" => 0, + "ehime.jp" => 0, + "fukui.jp" => 0, + "fukuoka.jp" => 0, + "fukushima.jp" => 0, + "gifu.jp" => 0, + "gunma.jp" => 0, + "hiroshima.jp" => 0, + "hokkaido.jp" => 0, + "hyogo.jp" => 0, + "ibaraki.jp" => 0, + "ishikawa.jp" => 0, + "iwate.jp" => 0, + "kagawa.jp" => 0, + "kagoshima.jp" => 0, + "kanagawa.jp" => 0, + "kochi.jp" => 0, + "kumamoto.jp" => 0, + "kyoto.jp" => 0, + "mie.jp" => 0, + "miyagi.jp" => 0, + "miyazaki.jp" => 0, + "nagano.jp" => 0, + "nagasaki.jp" => 0, + "nara.jp" => 0, + "niigata.jp" => 0, + "oita.jp" => 0, + "okayama.jp" => 0, + "okinawa.jp" => 0, + "osaka.jp" => 0, + "saga.jp" => 0, + "saitama.jp" => 0, + "shiga.jp" => 0, + "shimane.jp" => 0, + "shizuoka.jp" => 0, + "tochigi.jp" => 0, + "tokushima.jp" => 0, + "tokyo.jp" => 0, + "tottori.jp" => 0, + "toyama.jp" => 0, + "wakayama.jp" => 0, + "yamagata.jp" => 0, + "yamaguchi.jp" => 0, + "yamanashi.jp" => 0, + "xn--4pvxs.jp" => 0, + "xn--vgu402c.jp" => 0, + "xn--c3s14m.jp" => 0, + "xn--f6qx53a.jp" => 0, + "xn--8pvr4u.jp" => 0, + "xn--uist22h.jp" => 0, + "xn--djrs72d6uy.jp" => 0, + "xn--mkru45i.jp" => 0, + "xn--0trq7p7nn.jp" => 0, + "xn--8ltr62k.jp" => 0, + "xn--2m4a15e.jp" => 0, + "xn--efvn9s.jp" => 0, + "xn--32vp30h.jp" => 0, + "xn--4it797k.jp" => 0, + "xn--1lqs71d.jp" => 0, + "xn--5rtp49c.jp" => 0, + "xn--5js045d.jp" => 0, + "xn--ehqz56n.jp" => 0, + "xn--1lqs03n.jp" => 0, + "xn--qqqt11m.jp" => 0, + "xn--kbrq7o.jp" => 0, + "xn--pssu33l.jp" => 0, + "xn--ntsq17g.jp" => 0, + "xn--uisz3g.jp" => 0, + "xn--6btw5a.jp" => 0, + "xn--1ctwo.jp" => 0, + "xn--6orx2r.jp" => 0, + "xn--rht61e.jp" => 0, + "xn--rht27z.jp" => 0, + "xn--djty4k.jp" => 0, + "xn--nit225k.jp" => 0, + "xn--rht3d.jp" => 0, + "xn--klty5x.jp" => 0, + "xn--kltx9a.jp" => 0, + "xn--kltp7d.jp" => 0, + "xn--uuwu58a.jp" => 0, + "xn--zbx025d.jp" => 0, + "xn--ntso0iqx3a.jp" => 0, + "xn--elqq16h.jp" => 0, + "xn--4it168d.jp" => 0, + "xn--klt787d.jp" => 0, + "xn--rny31h.jp" => 0, + "xn--7t0a264c.jp" => 0, + "xn--5rtq34k.jp" => 0, + "xn--k7yn95e.jp" => 0, + "xn--tor131o.jp" => 0, + "xn--d5qv7z876c.jp" => 0, + "kawasaki.jp" => -1, + "kitakyushu.jp" => -1, + "kobe.jp" => -1, + "nagoya.jp" => -1, + "sapporo.jp" => -1, + "sendai.jp" => -1, + "yokohama.jp" => -1, + "city.kawasaki.jp" => 1, + "city.kitakyushu.jp" => 1, + "city.kobe.jp" => 1, + "city.nagoya.jp" => 1, + "city.sapporo.jp" => 1, + "city.sendai.jp" => 1, + "city.yokohama.jp" => 1, + "aisai.aichi.jp" => 0, + "ama.aichi.jp" => 0, + "anjo.aichi.jp" => 0, + "asuke.aichi.jp" => 0, + "chiryu.aichi.jp" => 0, + "chita.aichi.jp" => 0, + "fuso.aichi.jp" => 0, + "gamagori.aichi.jp" => 0, + "handa.aichi.jp" => 0, + "hazu.aichi.jp" => 0, + "hekinan.aichi.jp" => 0, + "higashiura.aichi.jp" => 0, + "ichinomiya.aichi.jp" => 0, + "inazawa.aichi.jp" => 0, + "inuyama.aichi.jp" => 0, + "isshiki.aichi.jp" => 0, + "iwakura.aichi.jp" => 0, + "kanie.aichi.jp" => 0, + "kariya.aichi.jp" => 0, + "kasugai.aichi.jp" => 0, + "kira.aichi.jp" => 0, + "kiyosu.aichi.jp" => 0, + "komaki.aichi.jp" => 0, + "konan.aichi.jp" => 0, + "kota.aichi.jp" => 0, + "mihama.aichi.jp" => 0, + "miyoshi.aichi.jp" => 0, + "nishio.aichi.jp" => 0, + "nisshin.aichi.jp" => 0, + "obu.aichi.jp" => 0, + "oguchi.aichi.jp" => 0, + "oharu.aichi.jp" => 0, + "okazaki.aichi.jp" => 0, + "owariasahi.aichi.jp" => 0, + "seto.aichi.jp" => 0, + "shikatsu.aichi.jp" => 0, + "shinshiro.aichi.jp" => 0, + "shitara.aichi.jp" => 0, + "tahara.aichi.jp" => 0, + "takahama.aichi.jp" => 0, + "tobishima.aichi.jp" => 0, + "toei.aichi.jp" => 0, + "togo.aichi.jp" => 0, + "tokai.aichi.jp" => 0, + "tokoname.aichi.jp" => 0, + "toyoake.aichi.jp" => 0, + "toyohashi.aichi.jp" => 0, + "toyokawa.aichi.jp" => 0, + "toyone.aichi.jp" => 0, + "toyota.aichi.jp" => 0, + "tsushima.aichi.jp" => 0, + "yatomi.aichi.jp" => 0, + "akita.akita.jp" => 0, + "daisen.akita.jp" => 0, + "fujisato.akita.jp" => 0, + "gojome.akita.jp" => 0, + "hachirogata.akita.jp" => 0, + "happou.akita.jp" => 0, + "higashinaruse.akita.jp" => 0, + "honjo.akita.jp" => 0, + "honjyo.akita.jp" => 0, + "ikawa.akita.jp" => 0, + "kamikoani.akita.jp" => 0, + "kamioka.akita.jp" => 0, + "katagami.akita.jp" => 0, + "kazuno.akita.jp" => 0, + "kitaakita.akita.jp" => 0, + "kosaka.akita.jp" => 0, + "kyowa.akita.jp" => 0, + "misato.akita.jp" => 0, + "mitane.akita.jp" => 0, + "moriyoshi.akita.jp" => 0, + "nikaho.akita.jp" => 0, + "noshiro.akita.jp" => 0, + "odate.akita.jp" => 0, + "oga.akita.jp" => 0, + "ogata.akita.jp" => 0, + "semboku.akita.jp" => 0, + "yokote.akita.jp" => 0, + "yurihonjo.akita.jp" => 0, + "aomori.aomori.jp" => 0, + "gonohe.aomori.jp" => 0, + "hachinohe.aomori.jp" => 0, + "hashikami.aomori.jp" => 0, + "hiranai.aomori.jp" => 0, + "hirosaki.aomori.jp" => 0, + "itayanagi.aomori.jp" => 0, + "kuroishi.aomori.jp" => 0, + "misawa.aomori.jp" => 0, + "mutsu.aomori.jp" => 0, + "nakadomari.aomori.jp" => 0, + "noheji.aomori.jp" => 0, + "oirase.aomori.jp" => 0, + "owani.aomori.jp" => 0, + "rokunohe.aomori.jp" => 0, + "sannohe.aomori.jp" => 0, + "shichinohe.aomori.jp" => 0, + "shingo.aomori.jp" => 0, + "takko.aomori.jp" => 0, + "towada.aomori.jp" => 0, + "tsugaru.aomori.jp" => 0, + "tsuruta.aomori.jp" => 0, + "abiko.chiba.jp" => 0, + "asahi.chiba.jp" => 0, + "chonan.chiba.jp" => 0, + "chosei.chiba.jp" => 0, + "choshi.chiba.jp" => 0, + "chuo.chiba.jp" => 0, + "funabashi.chiba.jp" => 0, + "futtsu.chiba.jp" => 0, + "hanamigawa.chiba.jp" => 0, + "ichihara.chiba.jp" => 0, + "ichikawa.chiba.jp" => 0, + "ichinomiya.chiba.jp" => 0, + "inzai.chiba.jp" => 0, + "isumi.chiba.jp" => 0, + "kamagaya.chiba.jp" => 0, + "kamogawa.chiba.jp" => 0, + "kashiwa.chiba.jp" => 0, + "katori.chiba.jp" => 0, + "katsuura.chiba.jp" => 0, + "kimitsu.chiba.jp" => 0, + "kisarazu.chiba.jp" => 0, + "kozaki.chiba.jp" => 0, + "kujukuri.chiba.jp" => 0, + "kyonan.chiba.jp" => 0, + "matsudo.chiba.jp" => 0, + "midori.chiba.jp" => 0, + "mihama.chiba.jp" => 0, + "minamiboso.chiba.jp" => 0, + "mobara.chiba.jp" => 0, + "mutsuzawa.chiba.jp" => 0, + "nagara.chiba.jp" => 0, + "nagareyama.chiba.jp" => 0, + "narashino.chiba.jp" => 0, + "narita.chiba.jp" => 0, + "noda.chiba.jp" => 0, + "oamishirasato.chiba.jp" => 0, + "omigawa.chiba.jp" => 0, + "onjuku.chiba.jp" => 0, + "otaki.chiba.jp" => 0, + "sakae.chiba.jp" => 0, + "sakura.chiba.jp" => 0, + "shimofusa.chiba.jp" => 0, + "shirako.chiba.jp" => 0, + "shiroi.chiba.jp" => 0, + "shisui.chiba.jp" => 0, + "sodegaura.chiba.jp" => 0, + "sosa.chiba.jp" => 0, + "tako.chiba.jp" => 0, + "tateyama.chiba.jp" => 0, + "togane.chiba.jp" => 0, + "tohnosho.chiba.jp" => 0, + "tomisato.chiba.jp" => 0, + "urayasu.chiba.jp" => 0, + "yachimata.chiba.jp" => 0, + "yachiyo.chiba.jp" => 0, + "yokaichiba.chiba.jp" => 0, + "yokoshibahikari.chiba.jp" => 0, + "yotsukaido.chiba.jp" => 0, + "ainan.ehime.jp" => 0, + "honai.ehime.jp" => 0, + "ikata.ehime.jp" => 0, + "imabari.ehime.jp" => 0, + "iyo.ehime.jp" => 0, + "kamijima.ehime.jp" => 0, + "kihoku.ehime.jp" => 0, + "kumakogen.ehime.jp" => 0, + "masaki.ehime.jp" => 0, + "matsuno.ehime.jp" => 0, + "matsuyama.ehime.jp" => 0, + "namikata.ehime.jp" => 0, + "niihama.ehime.jp" => 0, + "ozu.ehime.jp" => 0, + "saijo.ehime.jp" => 0, + "seiyo.ehime.jp" => 0, + "shikokuchuo.ehime.jp" => 0, + "tobe.ehime.jp" => 0, + "toon.ehime.jp" => 0, + "uchiko.ehime.jp" => 0, + "uwajima.ehime.jp" => 0, + "yawatahama.ehime.jp" => 0, + "echizen.fukui.jp" => 0, + "eiheiji.fukui.jp" => 0, + "fukui.fukui.jp" => 0, + "ikeda.fukui.jp" => 0, + "katsuyama.fukui.jp" => 0, + "mihama.fukui.jp" => 0, + "minamiechizen.fukui.jp" => 0, + "obama.fukui.jp" => 0, + "ohi.fukui.jp" => 0, + "ono.fukui.jp" => 0, + "sabae.fukui.jp" => 0, + "sakai.fukui.jp" => 0, + "takahama.fukui.jp" => 0, + "tsuruga.fukui.jp" => 0, + "wakasa.fukui.jp" => 0, + "ashiya.fukuoka.jp" => 0, + "buzen.fukuoka.jp" => 0, + "chikugo.fukuoka.jp" => 0, + "chikuho.fukuoka.jp" => 0, + "chikujo.fukuoka.jp" => 0, + "chikushino.fukuoka.jp" => 0, + "chikuzen.fukuoka.jp" => 0, + "chuo.fukuoka.jp" => 0, + "dazaifu.fukuoka.jp" => 0, + "fukuchi.fukuoka.jp" => 0, + "hakata.fukuoka.jp" => 0, + "higashi.fukuoka.jp" => 0, + "hirokawa.fukuoka.jp" => 0, + "hisayama.fukuoka.jp" => 0, + "iizuka.fukuoka.jp" => 0, + "inatsuki.fukuoka.jp" => 0, + "kaho.fukuoka.jp" => 0, + "kasuga.fukuoka.jp" => 0, + "kasuya.fukuoka.jp" => 0, + "kawara.fukuoka.jp" => 0, + "keisen.fukuoka.jp" => 0, + "koga.fukuoka.jp" => 0, + "kurate.fukuoka.jp" => 0, + "kurogi.fukuoka.jp" => 0, + "kurume.fukuoka.jp" => 0, + "minami.fukuoka.jp" => 0, + "miyako.fukuoka.jp" => 0, + "miyama.fukuoka.jp" => 0, + "miyawaka.fukuoka.jp" => 0, + "mizumaki.fukuoka.jp" => 0, + "munakata.fukuoka.jp" => 0, + "nakagawa.fukuoka.jp" => 0, + "nakama.fukuoka.jp" => 0, + "nishi.fukuoka.jp" => 0, + "nogata.fukuoka.jp" => 0, + "ogori.fukuoka.jp" => 0, + "okagaki.fukuoka.jp" => 0, + "okawa.fukuoka.jp" => 0, + "oki.fukuoka.jp" => 0, + "omuta.fukuoka.jp" => 0, + "onga.fukuoka.jp" => 0, + "onojo.fukuoka.jp" => 0, + "oto.fukuoka.jp" => 0, + "saigawa.fukuoka.jp" => 0, + "sasaguri.fukuoka.jp" => 0, + "shingu.fukuoka.jp" => 0, + "shinyoshitomi.fukuoka.jp" => 0, + "shonai.fukuoka.jp" => 0, + "soeda.fukuoka.jp" => 0, + "sue.fukuoka.jp" => 0, + "tachiarai.fukuoka.jp" => 0, + "tagawa.fukuoka.jp" => 0, + "takata.fukuoka.jp" => 0, + "toho.fukuoka.jp" => 0, + "toyotsu.fukuoka.jp" => 0, + "tsuiki.fukuoka.jp" => 0, + "ukiha.fukuoka.jp" => 0, + "umi.fukuoka.jp" => 0, + "usui.fukuoka.jp" => 0, + "yamada.fukuoka.jp" => 0, + "yame.fukuoka.jp" => 0, + "yanagawa.fukuoka.jp" => 0, + "yukuhashi.fukuoka.jp" => 0, + "aizubange.fukushima.jp" => 0, + "aizumisato.fukushima.jp" => 0, + "aizuwakamatsu.fukushima.jp" => 0, + "asakawa.fukushima.jp" => 0, + "bandai.fukushima.jp" => 0, + "date.fukushima.jp" => 0, + "fukushima.fukushima.jp" => 0, + "furudono.fukushima.jp" => 0, + "futaba.fukushima.jp" => 0, + "hanawa.fukushima.jp" => 0, + "higashi.fukushima.jp" => 0, + "hirata.fukushima.jp" => 0, + "hirono.fukushima.jp" => 0, + "iitate.fukushima.jp" => 0, + "inawashiro.fukushima.jp" => 0, + "ishikawa.fukushima.jp" => 0, + "iwaki.fukushima.jp" => 0, + "izumizaki.fukushima.jp" => 0, + "kagamiishi.fukushima.jp" => 0, + "kaneyama.fukushima.jp" => 0, + "kawamata.fukushima.jp" => 0, + "kitakata.fukushima.jp" => 0, + "kitashiobara.fukushima.jp" => 0, + "koori.fukushima.jp" => 0, + "koriyama.fukushima.jp" => 0, + "kunimi.fukushima.jp" => 0, + "miharu.fukushima.jp" => 0, + "mishima.fukushima.jp" => 0, + "namie.fukushima.jp" => 0, + "nango.fukushima.jp" => 0, + "nishiaizu.fukushima.jp" => 0, + "nishigo.fukushima.jp" => 0, + "okuma.fukushima.jp" => 0, + "omotego.fukushima.jp" => 0, + "ono.fukushima.jp" => 0, + "otama.fukushima.jp" => 0, + "samegawa.fukushima.jp" => 0, + "shimogo.fukushima.jp" => 0, + "shirakawa.fukushima.jp" => 0, + "showa.fukushima.jp" => 0, + "soma.fukushima.jp" => 0, + "sukagawa.fukushima.jp" => 0, + "taishin.fukushima.jp" => 0, + "tamakawa.fukushima.jp" => 0, + "tanagura.fukushima.jp" => 0, + "tenei.fukushima.jp" => 0, + "yabuki.fukushima.jp" => 0, + "yamato.fukushima.jp" => 0, + "yamatsuri.fukushima.jp" => 0, + "yanaizu.fukushima.jp" => 0, + "yugawa.fukushima.jp" => 0, + "anpachi.gifu.jp" => 0, + "ena.gifu.jp" => 0, + "gifu.gifu.jp" => 0, + "ginan.gifu.jp" => 0, + "godo.gifu.jp" => 0, + "gujo.gifu.jp" => 0, + "hashima.gifu.jp" => 0, + "hichiso.gifu.jp" => 0, + "hida.gifu.jp" => 0, + "higashishirakawa.gifu.jp" => 0, + "ibigawa.gifu.jp" => 0, + "ikeda.gifu.jp" => 0, + "kakamigahara.gifu.jp" => 0, + "kani.gifu.jp" => 0, + "kasahara.gifu.jp" => 0, + "kasamatsu.gifu.jp" => 0, + "kawaue.gifu.jp" => 0, + "kitagata.gifu.jp" => 0, + "mino.gifu.jp" => 0, + "minokamo.gifu.jp" => 0, + "mitake.gifu.jp" => 0, + "mizunami.gifu.jp" => 0, + "motosu.gifu.jp" => 0, + "nakatsugawa.gifu.jp" => 0, + "ogaki.gifu.jp" => 0, + "sakahogi.gifu.jp" => 0, + "seki.gifu.jp" => 0, + "sekigahara.gifu.jp" => 0, + "shirakawa.gifu.jp" => 0, + "tajimi.gifu.jp" => 0, + "takayama.gifu.jp" => 0, + "tarui.gifu.jp" => 0, + "toki.gifu.jp" => 0, + "tomika.gifu.jp" => 0, + "wanouchi.gifu.jp" => 0, + "yamagata.gifu.jp" => 0, + "yaotsu.gifu.jp" => 0, + "yoro.gifu.jp" => 0, + "annaka.gunma.jp" => 0, + "chiyoda.gunma.jp" => 0, + "fujioka.gunma.jp" => 0, + "higashiagatsuma.gunma.jp" => 0, + "isesaki.gunma.jp" => 0, + "itakura.gunma.jp" => 0, + "kanna.gunma.jp" => 0, + "kanra.gunma.jp" => 0, + "katashina.gunma.jp" => 0, + "kawaba.gunma.jp" => 0, + "kiryu.gunma.jp" => 0, + "kusatsu.gunma.jp" => 0, + "maebashi.gunma.jp" => 0, + "meiwa.gunma.jp" => 0, + "midori.gunma.jp" => 0, + "minakami.gunma.jp" => 0, + "naganohara.gunma.jp" => 0, + "nakanojo.gunma.jp" => 0, + "nanmoku.gunma.jp" => 0, + "numata.gunma.jp" => 0, + "oizumi.gunma.jp" => 0, + "ora.gunma.jp" => 0, + "ota.gunma.jp" => 0, + "shibukawa.gunma.jp" => 0, + "shimonita.gunma.jp" => 0, + "shinto.gunma.jp" => 0, + "showa.gunma.jp" => 0, + "takasaki.gunma.jp" => 0, + "takayama.gunma.jp" => 0, + "tamamura.gunma.jp" => 0, + "tatebayashi.gunma.jp" => 0, + "tomioka.gunma.jp" => 0, + "tsukiyono.gunma.jp" => 0, + "tsumagoi.gunma.jp" => 0, + "ueno.gunma.jp" => 0, + "yoshioka.gunma.jp" => 0, + "asaminami.hiroshima.jp" => 0, + "daiwa.hiroshima.jp" => 0, + "etajima.hiroshima.jp" => 0, + "fuchu.hiroshima.jp" => 0, + "fukuyama.hiroshima.jp" => 0, + "hatsukaichi.hiroshima.jp" => 0, + "higashihiroshima.hiroshima.jp" => 0, + "hongo.hiroshima.jp" => 0, + "jinsekikogen.hiroshima.jp" => 0, + "kaita.hiroshima.jp" => 0, + "kui.hiroshima.jp" => 0, + "kumano.hiroshima.jp" => 0, + "kure.hiroshima.jp" => 0, + "mihara.hiroshima.jp" => 0, + "miyoshi.hiroshima.jp" => 0, + "naka.hiroshima.jp" => 0, + "onomichi.hiroshima.jp" => 0, + "osakikamijima.hiroshima.jp" => 0, + "otake.hiroshima.jp" => 0, + "saka.hiroshima.jp" => 0, + "sera.hiroshima.jp" => 0, + "seranishi.hiroshima.jp" => 0, + "shinichi.hiroshima.jp" => 0, + "shobara.hiroshima.jp" => 0, + "takehara.hiroshima.jp" => 0, + "abashiri.hokkaido.jp" => 0, + "abira.hokkaido.jp" => 0, + "aibetsu.hokkaido.jp" => 0, + "akabira.hokkaido.jp" => 0, + "akkeshi.hokkaido.jp" => 0, + "asahikawa.hokkaido.jp" => 0, + "ashibetsu.hokkaido.jp" => 0, + "ashoro.hokkaido.jp" => 0, + "assabu.hokkaido.jp" => 0, + "atsuma.hokkaido.jp" => 0, + "bibai.hokkaido.jp" => 0, + "biei.hokkaido.jp" => 0, + "bifuka.hokkaido.jp" => 0, + "bihoro.hokkaido.jp" => 0, + "biratori.hokkaido.jp" => 0, + "chippubetsu.hokkaido.jp" => 0, + "chitose.hokkaido.jp" => 0, + "date.hokkaido.jp" => 0, + "ebetsu.hokkaido.jp" => 0, + "embetsu.hokkaido.jp" => 0, + "eniwa.hokkaido.jp" => 0, + "erimo.hokkaido.jp" => 0, + "esan.hokkaido.jp" => 0, + "esashi.hokkaido.jp" => 0, + "fukagawa.hokkaido.jp" => 0, + "fukushima.hokkaido.jp" => 0, + "furano.hokkaido.jp" => 0, + "furubira.hokkaido.jp" => 0, + "haboro.hokkaido.jp" => 0, + "hakodate.hokkaido.jp" => 0, + "hamatonbetsu.hokkaido.jp" => 0, + "hidaka.hokkaido.jp" => 0, + "higashikagura.hokkaido.jp" => 0, + "higashikawa.hokkaido.jp" => 0, + "hiroo.hokkaido.jp" => 0, + "hokuryu.hokkaido.jp" => 0, + "hokuto.hokkaido.jp" => 0, + "honbetsu.hokkaido.jp" => 0, + "horokanai.hokkaido.jp" => 0, + "horonobe.hokkaido.jp" => 0, + "ikeda.hokkaido.jp" => 0, + "imakane.hokkaido.jp" => 0, + "ishikari.hokkaido.jp" => 0, + "iwamizawa.hokkaido.jp" => 0, + "iwanai.hokkaido.jp" => 0, + "kamifurano.hokkaido.jp" => 0, + "kamikawa.hokkaido.jp" => 0, + "kamishihoro.hokkaido.jp" => 0, + "kamisunagawa.hokkaido.jp" => 0, + "kamoenai.hokkaido.jp" => 0, + "kayabe.hokkaido.jp" => 0, + "kembuchi.hokkaido.jp" => 0, + "kikonai.hokkaido.jp" => 0, + "kimobetsu.hokkaido.jp" => 0, + "kitahiroshima.hokkaido.jp" => 0, + "kitami.hokkaido.jp" => 0, + "kiyosato.hokkaido.jp" => 0, + "koshimizu.hokkaido.jp" => 0, + "kunneppu.hokkaido.jp" => 0, + "kuriyama.hokkaido.jp" => 0, + "kuromatsunai.hokkaido.jp" => 0, + "kushiro.hokkaido.jp" => 0, + "kutchan.hokkaido.jp" => 0, + "kyowa.hokkaido.jp" => 0, + "mashike.hokkaido.jp" => 0, + "matsumae.hokkaido.jp" => 0, + "mikasa.hokkaido.jp" => 0, + "minamifurano.hokkaido.jp" => 0, + "mombetsu.hokkaido.jp" => 0, + "moseushi.hokkaido.jp" => 0, + "mukawa.hokkaido.jp" => 0, + "muroran.hokkaido.jp" => 0, + "naie.hokkaido.jp" => 0, + "nakagawa.hokkaido.jp" => 0, + "nakasatsunai.hokkaido.jp" => 0, + "nakatombetsu.hokkaido.jp" => 0, + "nanae.hokkaido.jp" => 0, + "nanporo.hokkaido.jp" => 0, + "nayoro.hokkaido.jp" => 0, + "nemuro.hokkaido.jp" => 0, + "niikappu.hokkaido.jp" => 0, + "niki.hokkaido.jp" => 0, + "nishiokoppe.hokkaido.jp" => 0, + "noboribetsu.hokkaido.jp" => 0, + "numata.hokkaido.jp" => 0, + "obihiro.hokkaido.jp" => 0, + "obira.hokkaido.jp" => 0, + "oketo.hokkaido.jp" => 0, + "okoppe.hokkaido.jp" => 0, + "otaru.hokkaido.jp" => 0, + "otobe.hokkaido.jp" => 0, + "otofuke.hokkaido.jp" => 0, + "otoineppu.hokkaido.jp" => 0, + "oumu.hokkaido.jp" => 0, + "ozora.hokkaido.jp" => 0, + "pippu.hokkaido.jp" => 0, + "rankoshi.hokkaido.jp" => 0, + "rebun.hokkaido.jp" => 0, + "rikubetsu.hokkaido.jp" => 0, + "rishiri.hokkaido.jp" => 0, + "rishirifuji.hokkaido.jp" => 0, + "saroma.hokkaido.jp" => 0, + "sarufutsu.hokkaido.jp" => 0, + "shakotan.hokkaido.jp" => 0, + "shari.hokkaido.jp" => 0, + "shibecha.hokkaido.jp" => 0, + "shibetsu.hokkaido.jp" => 0, + "shikabe.hokkaido.jp" => 0, + "shikaoi.hokkaido.jp" => 0, + "shimamaki.hokkaido.jp" => 0, + "shimizu.hokkaido.jp" => 0, + "shimokawa.hokkaido.jp" => 0, + "shinshinotsu.hokkaido.jp" => 0, + "shintoku.hokkaido.jp" => 0, + "shiranuka.hokkaido.jp" => 0, + "shiraoi.hokkaido.jp" => 0, + "shiriuchi.hokkaido.jp" => 0, + "sobetsu.hokkaido.jp" => 0, + "sunagawa.hokkaido.jp" => 0, + "taiki.hokkaido.jp" => 0, + "takasu.hokkaido.jp" => 0, + "takikawa.hokkaido.jp" => 0, + "takinoue.hokkaido.jp" => 0, + "teshikaga.hokkaido.jp" => 0, + "tobetsu.hokkaido.jp" => 0, + "tohma.hokkaido.jp" => 0, + "tomakomai.hokkaido.jp" => 0, + "tomari.hokkaido.jp" => 0, + "toya.hokkaido.jp" => 0, + "toyako.hokkaido.jp" => 0, + "toyotomi.hokkaido.jp" => 0, + "toyoura.hokkaido.jp" => 0, + "tsubetsu.hokkaido.jp" => 0, + "tsukigata.hokkaido.jp" => 0, + "urakawa.hokkaido.jp" => 0, + "urausu.hokkaido.jp" => 0, + "uryu.hokkaido.jp" => 0, + "utashinai.hokkaido.jp" => 0, + "wakkanai.hokkaido.jp" => 0, + "wassamu.hokkaido.jp" => 0, + "yakumo.hokkaido.jp" => 0, + "yoichi.hokkaido.jp" => 0, + "aioi.hyogo.jp" => 0, + "akashi.hyogo.jp" => 0, + "ako.hyogo.jp" => 0, + "amagasaki.hyogo.jp" => 0, + "aogaki.hyogo.jp" => 0, + "asago.hyogo.jp" => 0, + "ashiya.hyogo.jp" => 0, + "awaji.hyogo.jp" => 0, + "fukusaki.hyogo.jp" => 0, + "goshiki.hyogo.jp" => 0, + "harima.hyogo.jp" => 0, + "himeji.hyogo.jp" => 0, + "ichikawa.hyogo.jp" => 0, + "inagawa.hyogo.jp" => 0, + "itami.hyogo.jp" => 0, + "kakogawa.hyogo.jp" => 0, + "kamigori.hyogo.jp" => 0, + "kamikawa.hyogo.jp" => 0, + "kasai.hyogo.jp" => 0, + "kasuga.hyogo.jp" => 0, + "kawanishi.hyogo.jp" => 0, + "miki.hyogo.jp" => 0, + "minamiawaji.hyogo.jp" => 0, + "nishinomiya.hyogo.jp" => 0, + "nishiwaki.hyogo.jp" => 0, + "ono.hyogo.jp" => 0, + "sanda.hyogo.jp" => 0, + "sannan.hyogo.jp" => 0, + "sasayama.hyogo.jp" => 0, + "sayo.hyogo.jp" => 0, + "shingu.hyogo.jp" => 0, + "shinonsen.hyogo.jp" => 0, + "shiso.hyogo.jp" => 0, + "sumoto.hyogo.jp" => 0, + "taishi.hyogo.jp" => 0, + "taka.hyogo.jp" => 0, + "takarazuka.hyogo.jp" => 0, + "takasago.hyogo.jp" => 0, + "takino.hyogo.jp" => 0, + "tamba.hyogo.jp" => 0, + "tatsuno.hyogo.jp" => 0, + "toyooka.hyogo.jp" => 0, + "yabu.hyogo.jp" => 0, + "yashiro.hyogo.jp" => 0, + "yoka.hyogo.jp" => 0, + "yokawa.hyogo.jp" => 0, + "ami.ibaraki.jp" => 0, + "asahi.ibaraki.jp" => 0, + "bando.ibaraki.jp" => 0, + "chikusei.ibaraki.jp" => 0, + "daigo.ibaraki.jp" => 0, + "fujishiro.ibaraki.jp" => 0, + "hitachi.ibaraki.jp" => 0, + "hitachinaka.ibaraki.jp" => 0, + "hitachiomiya.ibaraki.jp" => 0, + "hitachiota.ibaraki.jp" => 0, + "ibaraki.ibaraki.jp" => 0, + "ina.ibaraki.jp" => 0, + "inashiki.ibaraki.jp" => 0, + "itako.ibaraki.jp" => 0, + "iwama.ibaraki.jp" => 0, + "joso.ibaraki.jp" => 0, + "kamisu.ibaraki.jp" => 0, + "kasama.ibaraki.jp" => 0, + "kashima.ibaraki.jp" => 0, + "kasumigaura.ibaraki.jp" => 0, + "koga.ibaraki.jp" => 0, + "miho.ibaraki.jp" => 0, + "mito.ibaraki.jp" => 0, + "moriya.ibaraki.jp" => 0, + "naka.ibaraki.jp" => 0, + "namegata.ibaraki.jp" => 0, + "oarai.ibaraki.jp" => 0, + "ogawa.ibaraki.jp" => 0, + "omitama.ibaraki.jp" => 0, + "ryugasaki.ibaraki.jp" => 0, + "sakai.ibaraki.jp" => 0, + "sakuragawa.ibaraki.jp" => 0, + "shimodate.ibaraki.jp" => 0, + "shimotsuma.ibaraki.jp" => 0, + "shirosato.ibaraki.jp" => 0, + "sowa.ibaraki.jp" => 0, + "suifu.ibaraki.jp" => 0, + "takahagi.ibaraki.jp" => 0, + "tamatsukuri.ibaraki.jp" => 0, + "tokai.ibaraki.jp" => 0, + "tomobe.ibaraki.jp" => 0, + "tone.ibaraki.jp" => 0, + "toride.ibaraki.jp" => 0, + "tsuchiura.ibaraki.jp" => 0, + "tsukuba.ibaraki.jp" => 0, + "uchihara.ibaraki.jp" => 0, + "ushiku.ibaraki.jp" => 0, + "yachiyo.ibaraki.jp" => 0, + "yamagata.ibaraki.jp" => 0, + "yawara.ibaraki.jp" => 0, + "yuki.ibaraki.jp" => 0, + "anamizu.ishikawa.jp" => 0, + "hakui.ishikawa.jp" => 0, + "hakusan.ishikawa.jp" => 0, + "kaga.ishikawa.jp" => 0, + "kahoku.ishikawa.jp" => 0, + "kanazawa.ishikawa.jp" => 0, + "kawakita.ishikawa.jp" => 0, + "komatsu.ishikawa.jp" => 0, + "nakanoto.ishikawa.jp" => 0, + "nanao.ishikawa.jp" => 0, + "nomi.ishikawa.jp" => 0, + "nonoichi.ishikawa.jp" => 0, + "noto.ishikawa.jp" => 0, + "shika.ishikawa.jp" => 0, + "suzu.ishikawa.jp" => 0, + "tsubata.ishikawa.jp" => 0, + "tsurugi.ishikawa.jp" => 0, + "uchinada.ishikawa.jp" => 0, + "wajima.ishikawa.jp" => 0, + "fudai.iwate.jp" => 0, + "fujisawa.iwate.jp" => 0, + "hanamaki.iwate.jp" => 0, + "hiraizumi.iwate.jp" => 0, + "hirono.iwate.jp" => 0, + "ichinohe.iwate.jp" => 0, + "ichinoseki.iwate.jp" => 0, + "iwaizumi.iwate.jp" => 0, + "iwate.iwate.jp" => 0, + "joboji.iwate.jp" => 0, + "kamaishi.iwate.jp" => 0, + "kanegasaki.iwate.jp" => 0, + "karumai.iwate.jp" => 0, + "kawai.iwate.jp" => 0, + "kitakami.iwate.jp" => 0, + "kuji.iwate.jp" => 0, + "kunohe.iwate.jp" => 0, + "kuzumaki.iwate.jp" => 0, + "miyako.iwate.jp" => 0, + "mizusawa.iwate.jp" => 0, + "morioka.iwate.jp" => 0, + "ninohe.iwate.jp" => 0, + "noda.iwate.jp" => 0, + "ofunato.iwate.jp" => 0, + "oshu.iwate.jp" => 0, + "otsuchi.iwate.jp" => 0, + "rikuzentakata.iwate.jp" => 0, + "shiwa.iwate.jp" => 0, + "shizukuishi.iwate.jp" => 0, + "sumita.iwate.jp" => 0, + "tanohata.iwate.jp" => 0, + "tono.iwate.jp" => 0, + "yahaba.iwate.jp" => 0, + "yamada.iwate.jp" => 0, + "ayagawa.kagawa.jp" => 0, + "higashikagawa.kagawa.jp" => 0, + "kanonji.kagawa.jp" => 0, + "kotohira.kagawa.jp" => 0, + "manno.kagawa.jp" => 0, + "marugame.kagawa.jp" => 0, + "mitoyo.kagawa.jp" => 0, + "naoshima.kagawa.jp" => 0, + "sanuki.kagawa.jp" => 0, + "tadotsu.kagawa.jp" => 0, + "takamatsu.kagawa.jp" => 0, + "tonosho.kagawa.jp" => 0, + "uchinomi.kagawa.jp" => 0, + "utazu.kagawa.jp" => 0, + "zentsuji.kagawa.jp" => 0, + "akune.kagoshima.jp" => 0, + "amami.kagoshima.jp" => 0, + "hioki.kagoshima.jp" => 0, + "isa.kagoshima.jp" => 0, + "isen.kagoshima.jp" => 0, + "izumi.kagoshima.jp" => 0, + "kagoshima.kagoshima.jp" => 0, + "kanoya.kagoshima.jp" => 0, + "kawanabe.kagoshima.jp" => 0, + "kinko.kagoshima.jp" => 0, + "kouyama.kagoshima.jp" => 0, + "makurazaki.kagoshima.jp" => 0, + "matsumoto.kagoshima.jp" => 0, + "minamitane.kagoshima.jp" => 0, + "nakatane.kagoshima.jp" => 0, + "nishinoomote.kagoshima.jp" => 0, + "satsumasendai.kagoshima.jp" => 0, + "soo.kagoshima.jp" => 0, + "tarumizu.kagoshima.jp" => 0, + "yusui.kagoshima.jp" => 0, + "aikawa.kanagawa.jp" => 0, + "atsugi.kanagawa.jp" => 0, + "ayase.kanagawa.jp" => 0, + "chigasaki.kanagawa.jp" => 0, + "ebina.kanagawa.jp" => 0, + "fujisawa.kanagawa.jp" => 0, + "hadano.kanagawa.jp" => 0, + "hakone.kanagawa.jp" => 0, + "hiratsuka.kanagawa.jp" => 0, + "isehara.kanagawa.jp" => 0, + "kaisei.kanagawa.jp" => 0, + "kamakura.kanagawa.jp" => 0, + "kiyokawa.kanagawa.jp" => 0, + "matsuda.kanagawa.jp" => 0, + "minamiashigara.kanagawa.jp" => 0, + "miura.kanagawa.jp" => 0, + "nakai.kanagawa.jp" => 0, + "ninomiya.kanagawa.jp" => 0, + "odawara.kanagawa.jp" => 0, + "oi.kanagawa.jp" => 0, + "oiso.kanagawa.jp" => 0, + "sagamihara.kanagawa.jp" => 0, + "samukawa.kanagawa.jp" => 0, + "tsukui.kanagawa.jp" => 0, + "yamakita.kanagawa.jp" => 0, + "yamato.kanagawa.jp" => 0, + "yokosuka.kanagawa.jp" => 0, + "yugawara.kanagawa.jp" => 0, + "zama.kanagawa.jp" => 0, + "zushi.kanagawa.jp" => 0, + "aki.kochi.jp" => 0, + "geisei.kochi.jp" => 0, + "hidaka.kochi.jp" => 0, + "higashitsuno.kochi.jp" => 0, + "ino.kochi.jp" => 0, + "kagami.kochi.jp" => 0, + "kami.kochi.jp" => 0, + "kitagawa.kochi.jp" => 0, + "kochi.kochi.jp" => 0, + "mihara.kochi.jp" => 0, + "motoyama.kochi.jp" => 0, + "muroto.kochi.jp" => 0, + "nahari.kochi.jp" => 0, + "nakamura.kochi.jp" => 0, + "nankoku.kochi.jp" => 0, + "nishitosa.kochi.jp" => 0, + "niyodogawa.kochi.jp" => 0, + "ochi.kochi.jp" => 0, + "okawa.kochi.jp" => 0, + "otoyo.kochi.jp" => 0, + "otsuki.kochi.jp" => 0, + "sakawa.kochi.jp" => 0, + "sukumo.kochi.jp" => 0, + "susaki.kochi.jp" => 0, + "tosa.kochi.jp" => 0, + "tosashimizu.kochi.jp" => 0, + "toyo.kochi.jp" => 0, + "tsuno.kochi.jp" => 0, + "umaji.kochi.jp" => 0, + "yasuda.kochi.jp" => 0, + "yusuhara.kochi.jp" => 0, + "amakusa.kumamoto.jp" => 0, + "arao.kumamoto.jp" => 0, + "aso.kumamoto.jp" => 0, + "choyo.kumamoto.jp" => 0, + "gyokuto.kumamoto.jp" => 0, + "kamiamakusa.kumamoto.jp" => 0, + "kikuchi.kumamoto.jp" => 0, + "kumamoto.kumamoto.jp" => 0, + "mashiki.kumamoto.jp" => 0, + "mifune.kumamoto.jp" => 0, + "minamata.kumamoto.jp" => 0, + "minamioguni.kumamoto.jp" => 0, + "nagasu.kumamoto.jp" => 0, + "nishihara.kumamoto.jp" => 0, + "oguni.kumamoto.jp" => 0, + "ozu.kumamoto.jp" => 0, + "sumoto.kumamoto.jp" => 0, + "takamori.kumamoto.jp" => 0, + "uki.kumamoto.jp" => 0, + "uto.kumamoto.jp" => 0, + "yamaga.kumamoto.jp" => 0, + "yamato.kumamoto.jp" => 0, + "yatsushiro.kumamoto.jp" => 0, + "ayabe.kyoto.jp" => 0, + "fukuchiyama.kyoto.jp" => 0, + "higashiyama.kyoto.jp" => 0, + "ide.kyoto.jp" => 0, + "ine.kyoto.jp" => 0, + "joyo.kyoto.jp" => 0, + "kameoka.kyoto.jp" => 0, + "kamo.kyoto.jp" => 0, + "kita.kyoto.jp" => 0, + "kizu.kyoto.jp" => 0, + "kumiyama.kyoto.jp" => 0, + "kyotamba.kyoto.jp" => 0, + "kyotanabe.kyoto.jp" => 0, + "kyotango.kyoto.jp" => 0, + "maizuru.kyoto.jp" => 0, + "minami.kyoto.jp" => 0, + "minamiyamashiro.kyoto.jp" => 0, + "miyazu.kyoto.jp" => 0, + "muko.kyoto.jp" => 0, + "nagaokakyo.kyoto.jp" => 0, + "nakagyo.kyoto.jp" => 0, + "nantan.kyoto.jp" => 0, + "oyamazaki.kyoto.jp" => 0, + "sakyo.kyoto.jp" => 0, + "seika.kyoto.jp" => 0, + "tanabe.kyoto.jp" => 0, + "uji.kyoto.jp" => 0, + "ujitawara.kyoto.jp" => 0, + "wazuka.kyoto.jp" => 0, + "yamashina.kyoto.jp" => 0, + "yawata.kyoto.jp" => 0, + "asahi.mie.jp" => 0, + "inabe.mie.jp" => 0, + "ise.mie.jp" => 0, + "kameyama.mie.jp" => 0, + "kawagoe.mie.jp" => 0, + "kiho.mie.jp" => 0, + "kisosaki.mie.jp" => 0, + "kiwa.mie.jp" => 0, + "komono.mie.jp" => 0, + "kumano.mie.jp" => 0, + "kuwana.mie.jp" => 0, + "matsusaka.mie.jp" => 0, + "meiwa.mie.jp" => 0, + "mihama.mie.jp" => 0, + "minamiise.mie.jp" => 0, + "misugi.mie.jp" => 0, + "miyama.mie.jp" => 0, + "nabari.mie.jp" => 0, + "shima.mie.jp" => 0, + "suzuka.mie.jp" => 0, + "tado.mie.jp" => 0, + "taiki.mie.jp" => 0, + "taki.mie.jp" => 0, + "tamaki.mie.jp" => 0, + "toba.mie.jp" => 0, + "tsu.mie.jp" => 0, + "udono.mie.jp" => 0, + "ureshino.mie.jp" => 0, + "watarai.mie.jp" => 0, + "yokkaichi.mie.jp" => 0, + "furukawa.miyagi.jp" => 0, + "higashimatsushima.miyagi.jp" => 0, + "ishinomaki.miyagi.jp" => 0, + "iwanuma.miyagi.jp" => 0, + "kakuda.miyagi.jp" => 0, + "kami.miyagi.jp" => 0, + "kawasaki.miyagi.jp" => 0, + "marumori.miyagi.jp" => 0, + "matsushima.miyagi.jp" => 0, + "minamisanriku.miyagi.jp" => 0, + "misato.miyagi.jp" => 0, + "murata.miyagi.jp" => 0, + "natori.miyagi.jp" => 0, + "ogawara.miyagi.jp" => 0, + "ohira.miyagi.jp" => 0, + "onagawa.miyagi.jp" => 0, + "osaki.miyagi.jp" => 0, + "rifu.miyagi.jp" => 0, + "semine.miyagi.jp" => 0, + "shibata.miyagi.jp" => 0, + "shichikashuku.miyagi.jp" => 0, + "shikama.miyagi.jp" => 0, + "shiogama.miyagi.jp" => 0, + "shiroishi.miyagi.jp" => 0, + "tagajo.miyagi.jp" => 0, + "taiwa.miyagi.jp" => 0, + "tome.miyagi.jp" => 0, + "tomiya.miyagi.jp" => 0, + "wakuya.miyagi.jp" => 0, + "watari.miyagi.jp" => 0, + "yamamoto.miyagi.jp" => 0, + "zao.miyagi.jp" => 0, + "aya.miyazaki.jp" => 0, + "ebino.miyazaki.jp" => 0, + "gokase.miyazaki.jp" => 0, + "hyuga.miyazaki.jp" => 0, + "kadogawa.miyazaki.jp" => 0, + "kawaminami.miyazaki.jp" => 0, + "kijo.miyazaki.jp" => 0, + "kitagawa.miyazaki.jp" => 0, + "kitakata.miyazaki.jp" => 0, + "kitaura.miyazaki.jp" => 0, + "kobayashi.miyazaki.jp" => 0, + "kunitomi.miyazaki.jp" => 0, + "kushima.miyazaki.jp" => 0, + "mimata.miyazaki.jp" => 0, + "miyakonojo.miyazaki.jp" => 0, + "miyazaki.miyazaki.jp" => 0, + "morotsuka.miyazaki.jp" => 0, + "nichinan.miyazaki.jp" => 0, + "nishimera.miyazaki.jp" => 0, + "nobeoka.miyazaki.jp" => 0, + "saito.miyazaki.jp" => 0, + "shiiba.miyazaki.jp" => 0, + "shintomi.miyazaki.jp" => 0, + "takaharu.miyazaki.jp" => 0, + "takanabe.miyazaki.jp" => 0, + "takazaki.miyazaki.jp" => 0, + "tsuno.miyazaki.jp" => 0, + "achi.nagano.jp" => 0, + "agematsu.nagano.jp" => 0, + "anan.nagano.jp" => 0, + "aoki.nagano.jp" => 0, + "asahi.nagano.jp" => 0, + "azumino.nagano.jp" => 0, + "chikuhoku.nagano.jp" => 0, + "chikuma.nagano.jp" => 0, + "chino.nagano.jp" => 0, + "fujimi.nagano.jp" => 0, + "hakuba.nagano.jp" => 0, + "hara.nagano.jp" => 0, + "hiraya.nagano.jp" => 0, + "iida.nagano.jp" => 0, + "iijima.nagano.jp" => 0, + "iiyama.nagano.jp" => 0, + "iizuna.nagano.jp" => 0, + "ikeda.nagano.jp" => 0, + "ikusaka.nagano.jp" => 0, + "ina.nagano.jp" => 0, + "karuizawa.nagano.jp" => 0, + "kawakami.nagano.jp" => 0, + "kiso.nagano.jp" => 0, + "kisofukushima.nagano.jp" => 0, + "kitaaiki.nagano.jp" => 0, + "komagane.nagano.jp" => 0, + "komoro.nagano.jp" => 0, + "matsukawa.nagano.jp" => 0, + "matsumoto.nagano.jp" => 0, + "miasa.nagano.jp" => 0, + "minamiaiki.nagano.jp" => 0, + "minamimaki.nagano.jp" => 0, + "minamiminowa.nagano.jp" => 0, + "minowa.nagano.jp" => 0, + "miyada.nagano.jp" => 0, + "miyota.nagano.jp" => 0, + "mochizuki.nagano.jp" => 0, + "nagano.nagano.jp" => 0, + "nagawa.nagano.jp" => 0, + "nagiso.nagano.jp" => 0, + "nakagawa.nagano.jp" => 0, + "nakano.nagano.jp" => 0, + "nozawaonsen.nagano.jp" => 0, + "obuse.nagano.jp" => 0, + "ogawa.nagano.jp" => 0, + "okaya.nagano.jp" => 0, + "omachi.nagano.jp" => 0, + "omi.nagano.jp" => 0, + "ookuwa.nagano.jp" => 0, + "ooshika.nagano.jp" => 0, + "otaki.nagano.jp" => 0, + "otari.nagano.jp" => 0, + "sakae.nagano.jp" => 0, + "sakaki.nagano.jp" => 0, + "saku.nagano.jp" => 0, + "sakuho.nagano.jp" => 0, + "shimosuwa.nagano.jp" => 0, + "shinanomachi.nagano.jp" => 0, + "shiojiri.nagano.jp" => 0, + "suwa.nagano.jp" => 0, + "suzaka.nagano.jp" => 0, + "takagi.nagano.jp" => 0, + "takamori.nagano.jp" => 0, + "takayama.nagano.jp" => 0, + "tateshina.nagano.jp" => 0, + "tatsuno.nagano.jp" => 0, + "togakushi.nagano.jp" => 0, + "togura.nagano.jp" => 0, + "tomi.nagano.jp" => 0, + "ueda.nagano.jp" => 0, + "wada.nagano.jp" => 0, + "yamagata.nagano.jp" => 0, + "yamanouchi.nagano.jp" => 0, + "yasaka.nagano.jp" => 0, + "yasuoka.nagano.jp" => 0, + "chijiwa.nagasaki.jp" => 0, + "futsu.nagasaki.jp" => 0, + "goto.nagasaki.jp" => 0, + "hasami.nagasaki.jp" => 0, + "hirado.nagasaki.jp" => 0, + "iki.nagasaki.jp" => 0, + "isahaya.nagasaki.jp" => 0, + "kawatana.nagasaki.jp" => 0, + "kuchinotsu.nagasaki.jp" => 0, + "matsuura.nagasaki.jp" => 0, + "nagasaki.nagasaki.jp" => 0, + "obama.nagasaki.jp" => 0, + "omura.nagasaki.jp" => 0, + "oseto.nagasaki.jp" => 0, + "saikai.nagasaki.jp" => 0, + "sasebo.nagasaki.jp" => 0, + "seihi.nagasaki.jp" => 0, + "shimabara.nagasaki.jp" => 0, + "shinkamigoto.nagasaki.jp" => 0, + "togitsu.nagasaki.jp" => 0, + "tsushima.nagasaki.jp" => 0, + "unzen.nagasaki.jp" => 0, + "ando.nara.jp" => 0, + "gose.nara.jp" => 0, + "heguri.nara.jp" => 0, + "higashiyoshino.nara.jp" => 0, + "ikaruga.nara.jp" => 0, + "ikoma.nara.jp" => 0, + "kamikitayama.nara.jp" => 0, + "kanmaki.nara.jp" => 0, + "kashiba.nara.jp" => 0, + "kashihara.nara.jp" => 0, + "katsuragi.nara.jp" => 0, + "kawai.nara.jp" => 0, + "kawakami.nara.jp" => 0, + "kawanishi.nara.jp" => 0, + "koryo.nara.jp" => 0, + "kurotaki.nara.jp" => 0, + "mitsue.nara.jp" => 0, + "miyake.nara.jp" => 0, + "nara.nara.jp" => 0, + "nosegawa.nara.jp" => 0, + "oji.nara.jp" => 0, + "ouda.nara.jp" => 0, + "oyodo.nara.jp" => 0, + "sakurai.nara.jp" => 0, + "sango.nara.jp" => 0, + "shimoichi.nara.jp" => 0, + "shimokitayama.nara.jp" => 0, + "shinjo.nara.jp" => 0, + "soni.nara.jp" => 0, + "takatori.nara.jp" => 0, + "tawaramoto.nara.jp" => 0, + "tenkawa.nara.jp" => 0, + "tenri.nara.jp" => 0, + "uda.nara.jp" => 0, + "yamatokoriyama.nara.jp" => 0, + "yamatotakada.nara.jp" => 0, + "yamazoe.nara.jp" => 0, + "yoshino.nara.jp" => 0, + "aga.niigata.jp" => 0, + "agano.niigata.jp" => 0, + "gosen.niigata.jp" => 0, + "itoigawa.niigata.jp" => 0, + "izumozaki.niigata.jp" => 0, + "joetsu.niigata.jp" => 0, + "kamo.niigata.jp" => 0, + "kariwa.niigata.jp" => 0, + "kashiwazaki.niigata.jp" => 0, + "minamiuonuma.niigata.jp" => 0, + "mitsuke.niigata.jp" => 0, + "muika.niigata.jp" => 0, + "murakami.niigata.jp" => 0, + "myoko.niigata.jp" => 0, + "nagaoka.niigata.jp" => 0, + "niigata.niigata.jp" => 0, + "ojiya.niigata.jp" => 0, + "omi.niigata.jp" => 0, + "sado.niigata.jp" => 0, + "sanjo.niigata.jp" => 0, + "seiro.niigata.jp" => 0, + "seirou.niigata.jp" => 0, + "sekikawa.niigata.jp" => 0, + "shibata.niigata.jp" => 0, + "tagami.niigata.jp" => 0, + "tainai.niigata.jp" => 0, + "tochio.niigata.jp" => 0, + "tokamachi.niigata.jp" => 0, + "tsubame.niigata.jp" => 0, + "tsunan.niigata.jp" => 0, + "uonuma.niigata.jp" => 0, + "yahiko.niigata.jp" => 0, + "yoita.niigata.jp" => 0, + "yuzawa.niigata.jp" => 0, + "beppu.oita.jp" => 0, + "bungoono.oita.jp" => 0, + "bungotakada.oita.jp" => 0, + "hasama.oita.jp" => 0, + "hiji.oita.jp" => 0, + "himeshima.oita.jp" => 0, + "hita.oita.jp" => 0, + "kamitsue.oita.jp" => 0, + "kokonoe.oita.jp" => 0, + "kuju.oita.jp" => 0, + "kunisaki.oita.jp" => 0, + "kusu.oita.jp" => 0, + "oita.oita.jp" => 0, + "saiki.oita.jp" => 0, + "taketa.oita.jp" => 0, + "tsukumi.oita.jp" => 0, + "usa.oita.jp" => 0, + "usuki.oita.jp" => 0, + "yufu.oita.jp" => 0, + "akaiwa.okayama.jp" => 0, + "asakuchi.okayama.jp" => 0, + "bizen.okayama.jp" => 0, + "hayashima.okayama.jp" => 0, + "ibara.okayama.jp" => 0, + "kagamino.okayama.jp" => 0, + "kasaoka.okayama.jp" => 0, + "kibichuo.okayama.jp" => 0, + "kumenan.okayama.jp" => 0, + "kurashiki.okayama.jp" => 0, + "maniwa.okayama.jp" => 0, + "misaki.okayama.jp" => 0, + "nagi.okayama.jp" => 0, + "niimi.okayama.jp" => 0, + "nishiawakura.okayama.jp" => 0, + "okayama.okayama.jp" => 0, + "satosho.okayama.jp" => 0, + "setouchi.okayama.jp" => 0, + "shinjo.okayama.jp" => 0, + "shoo.okayama.jp" => 0, + "soja.okayama.jp" => 0, + "takahashi.okayama.jp" => 0, + "tamano.okayama.jp" => 0, + "tsuyama.okayama.jp" => 0, + "wake.okayama.jp" => 0, + "yakage.okayama.jp" => 0, + "aguni.okinawa.jp" => 0, + "ginowan.okinawa.jp" => 0, + "ginoza.okinawa.jp" => 0, + "gushikami.okinawa.jp" => 0, + "haebaru.okinawa.jp" => 0, + "higashi.okinawa.jp" => 0, + "hirara.okinawa.jp" => 0, + "iheya.okinawa.jp" => 0, + "ishigaki.okinawa.jp" => 0, + "ishikawa.okinawa.jp" => 0, + "itoman.okinawa.jp" => 0, + "izena.okinawa.jp" => 0, + "kadena.okinawa.jp" => 0, + "kin.okinawa.jp" => 0, + "kitadaito.okinawa.jp" => 0, + "kitanakagusuku.okinawa.jp" => 0, + "kumejima.okinawa.jp" => 0, + "kunigami.okinawa.jp" => 0, + "minamidaito.okinawa.jp" => 0, + "motobu.okinawa.jp" => 0, + "nago.okinawa.jp" => 0, + "naha.okinawa.jp" => 0, + "nakagusuku.okinawa.jp" => 0, + "nakijin.okinawa.jp" => 0, + "nanjo.okinawa.jp" => 0, + "nishihara.okinawa.jp" => 0, + "ogimi.okinawa.jp" => 0, + "okinawa.okinawa.jp" => 0, + "onna.okinawa.jp" => 0, + "shimoji.okinawa.jp" => 0, + "taketomi.okinawa.jp" => 0, + "tarama.okinawa.jp" => 0, + "tokashiki.okinawa.jp" => 0, + "tomigusuku.okinawa.jp" => 0, + "tonaki.okinawa.jp" => 0, + "urasoe.okinawa.jp" => 0, + "uruma.okinawa.jp" => 0, + "yaese.okinawa.jp" => 0, + "yomitan.okinawa.jp" => 0, + "yonabaru.okinawa.jp" => 0, + "yonaguni.okinawa.jp" => 0, + "zamami.okinawa.jp" => 0, + "abeno.osaka.jp" => 0, + "chihayaakasaka.osaka.jp" => 0, + "chuo.osaka.jp" => 0, + "daito.osaka.jp" => 0, + "fujiidera.osaka.jp" => 0, + "habikino.osaka.jp" => 0, + "hannan.osaka.jp" => 0, + "higashiosaka.osaka.jp" => 0, + "higashisumiyoshi.osaka.jp" => 0, + "higashiyodogawa.osaka.jp" => 0, + "hirakata.osaka.jp" => 0, + "ibaraki.osaka.jp" => 0, + "ikeda.osaka.jp" => 0, + "izumi.osaka.jp" => 0, + "izumiotsu.osaka.jp" => 0, + "izumisano.osaka.jp" => 0, + "kadoma.osaka.jp" => 0, + "kaizuka.osaka.jp" => 0, + "kanan.osaka.jp" => 0, + "kashiwara.osaka.jp" => 0, + "katano.osaka.jp" => 0, + "kawachinagano.osaka.jp" => 0, + "kishiwada.osaka.jp" => 0, + "kita.osaka.jp" => 0, + "kumatori.osaka.jp" => 0, + "matsubara.osaka.jp" => 0, + "minato.osaka.jp" => 0, + "minoh.osaka.jp" => 0, + "misaki.osaka.jp" => 0, + "moriguchi.osaka.jp" => 0, + "neyagawa.osaka.jp" => 0, + "nishi.osaka.jp" => 0, + "nose.osaka.jp" => 0, + "osakasayama.osaka.jp" => 0, + "sakai.osaka.jp" => 0, + "sayama.osaka.jp" => 0, + "sennan.osaka.jp" => 0, + "settsu.osaka.jp" => 0, + "shijonawate.osaka.jp" => 0, + "shimamoto.osaka.jp" => 0, + "suita.osaka.jp" => 0, + "tadaoka.osaka.jp" => 0, + "taishi.osaka.jp" => 0, + "tajiri.osaka.jp" => 0, + "takaishi.osaka.jp" => 0, + "takatsuki.osaka.jp" => 0, + "tondabayashi.osaka.jp" => 0, + "toyonaka.osaka.jp" => 0, + "toyono.osaka.jp" => 0, + "yao.osaka.jp" => 0, + "ariake.saga.jp" => 0, + "arita.saga.jp" => 0, + "fukudomi.saga.jp" => 0, + "genkai.saga.jp" => 0, + "hamatama.saga.jp" => 0, + "hizen.saga.jp" => 0, + "imari.saga.jp" => 0, + "kamimine.saga.jp" => 0, + "kanzaki.saga.jp" => 0, + "karatsu.saga.jp" => 0, + "kashima.saga.jp" => 0, + "kitagata.saga.jp" => 0, + "kitahata.saga.jp" => 0, + "kiyama.saga.jp" => 0, + "kouhoku.saga.jp" => 0, + "kyuragi.saga.jp" => 0, + "nishiarita.saga.jp" => 0, + "ogi.saga.jp" => 0, + "omachi.saga.jp" => 0, + "ouchi.saga.jp" => 0, + "saga.saga.jp" => 0, + "shiroishi.saga.jp" => 0, + "taku.saga.jp" => 0, + "tara.saga.jp" => 0, + "tosu.saga.jp" => 0, + "yoshinogari.saga.jp" => 0, + "arakawa.saitama.jp" => 0, + "asaka.saitama.jp" => 0, + "chichibu.saitama.jp" => 0, + "fujimi.saitama.jp" => 0, + "fujimino.saitama.jp" => 0, + "fukaya.saitama.jp" => 0, + "hanno.saitama.jp" => 0, + "hanyu.saitama.jp" => 0, + "hasuda.saitama.jp" => 0, + "hatogaya.saitama.jp" => 0, + "hatoyama.saitama.jp" => 0, + "hidaka.saitama.jp" => 0, + "higashichichibu.saitama.jp" => 0, + "higashimatsuyama.saitama.jp" => 0, + "honjo.saitama.jp" => 0, + "ina.saitama.jp" => 0, + "iruma.saitama.jp" => 0, + "iwatsuki.saitama.jp" => 0, + "kamiizumi.saitama.jp" => 0, + "kamikawa.saitama.jp" => 0, + "kamisato.saitama.jp" => 0, + "kasukabe.saitama.jp" => 0, + "kawagoe.saitama.jp" => 0, + "kawaguchi.saitama.jp" => 0, + "kawajima.saitama.jp" => 0, + "kazo.saitama.jp" => 0, + "kitamoto.saitama.jp" => 0, + "koshigaya.saitama.jp" => 0, + "kounosu.saitama.jp" => 0, + "kuki.saitama.jp" => 0, + "kumagaya.saitama.jp" => 0, + "matsubushi.saitama.jp" => 0, + "minano.saitama.jp" => 0, + "misato.saitama.jp" => 0, + "miyashiro.saitama.jp" => 0, + "miyoshi.saitama.jp" => 0, + "moroyama.saitama.jp" => 0, + "nagatoro.saitama.jp" => 0, + "namegawa.saitama.jp" => 0, + "niiza.saitama.jp" => 0, + "ogano.saitama.jp" => 0, + "ogawa.saitama.jp" => 0, + "ogose.saitama.jp" => 0, + "okegawa.saitama.jp" => 0, + "omiya.saitama.jp" => 0, + "otaki.saitama.jp" => 0, + "ranzan.saitama.jp" => 0, + "ryokami.saitama.jp" => 0, + "saitama.saitama.jp" => 0, + "sakado.saitama.jp" => 0, + "satte.saitama.jp" => 0, + "sayama.saitama.jp" => 0, + "shiki.saitama.jp" => 0, + "shiraoka.saitama.jp" => 0, + "soka.saitama.jp" => 0, + "sugito.saitama.jp" => 0, + "toda.saitama.jp" => 0, + "tokigawa.saitama.jp" => 0, + "tokorozawa.saitama.jp" => 0, + "tsurugashima.saitama.jp" => 0, + "urawa.saitama.jp" => 0, + "warabi.saitama.jp" => 0, + "yashio.saitama.jp" => 0, + "yokoze.saitama.jp" => 0, + "yono.saitama.jp" => 0, + "yorii.saitama.jp" => 0, + "yoshida.saitama.jp" => 0, + "yoshikawa.saitama.jp" => 0, + "yoshimi.saitama.jp" => 0, + "aisho.shiga.jp" => 0, + "gamo.shiga.jp" => 0, + "higashiomi.shiga.jp" => 0, + "hikone.shiga.jp" => 0, + "koka.shiga.jp" => 0, + "konan.shiga.jp" => 0, + "kosei.shiga.jp" => 0, + "koto.shiga.jp" => 0, + "kusatsu.shiga.jp" => 0, + "maibara.shiga.jp" => 0, + "moriyama.shiga.jp" => 0, + "nagahama.shiga.jp" => 0, + "nishiazai.shiga.jp" => 0, + "notogawa.shiga.jp" => 0, + "omihachiman.shiga.jp" => 0, + "otsu.shiga.jp" => 0, + "ritto.shiga.jp" => 0, + "ryuoh.shiga.jp" => 0, + "takashima.shiga.jp" => 0, + "takatsuki.shiga.jp" => 0, + "torahime.shiga.jp" => 0, + "toyosato.shiga.jp" => 0, + "yasu.shiga.jp" => 0, + "akagi.shimane.jp" => 0, + "ama.shimane.jp" => 0, + "gotsu.shimane.jp" => 0, + "hamada.shimane.jp" => 0, + "higashiizumo.shimane.jp" => 0, + "hikawa.shimane.jp" => 0, + "hikimi.shimane.jp" => 0, + "izumo.shimane.jp" => 0, + "kakinoki.shimane.jp" => 0, + "masuda.shimane.jp" => 0, + "matsue.shimane.jp" => 0, + "misato.shimane.jp" => 0, + "nishinoshima.shimane.jp" => 0, + "ohda.shimane.jp" => 0, + "okinoshima.shimane.jp" => 0, + "okuizumo.shimane.jp" => 0, + "shimane.shimane.jp" => 0, + "tamayu.shimane.jp" => 0, + "tsuwano.shimane.jp" => 0, + "unnan.shimane.jp" => 0, + "yakumo.shimane.jp" => 0, + "yasugi.shimane.jp" => 0, + "yatsuka.shimane.jp" => 0, + "arai.shizuoka.jp" => 0, + "atami.shizuoka.jp" => 0, + "fuji.shizuoka.jp" => 0, + "fujieda.shizuoka.jp" => 0, + "fujikawa.shizuoka.jp" => 0, + "fujinomiya.shizuoka.jp" => 0, + "fukuroi.shizuoka.jp" => 0, + "gotemba.shizuoka.jp" => 0, + "haibara.shizuoka.jp" => 0, + "hamamatsu.shizuoka.jp" => 0, + "higashiizu.shizuoka.jp" => 0, + "ito.shizuoka.jp" => 0, + "iwata.shizuoka.jp" => 0, + "izu.shizuoka.jp" => 0, + "izunokuni.shizuoka.jp" => 0, + "kakegawa.shizuoka.jp" => 0, + "kannami.shizuoka.jp" => 0, + "kawanehon.shizuoka.jp" => 0, + "kawazu.shizuoka.jp" => 0, + "kikugawa.shizuoka.jp" => 0, + "kosai.shizuoka.jp" => 0, + "makinohara.shizuoka.jp" => 0, + "matsuzaki.shizuoka.jp" => 0, + "minamiizu.shizuoka.jp" => 0, + "mishima.shizuoka.jp" => 0, + "morimachi.shizuoka.jp" => 0, + "nishiizu.shizuoka.jp" => 0, + "numazu.shizuoka.jp" => 0, + "omaezaki.shizuoka.jp" => 0, + "shimada.shizuoka.jp" => 0, + "shimizu.shizuoka.jp" => 0, + "shimoda.shizuoka.jp" => 0, + "shizuoka.shizuoka.jp" => 0, + "susono.shizuoka.jp" => 0, + "yaizu.shizuoka.jp" => 0, + "yoshida.shizuoka.jp" => 0, + "ashikaga.tochigi.jp" => 0, + "bato.tochigi.jp" => 0, + "haga.tochigi.jp" => 0, + "ichikai.tochigi.jp" => 0, + "iwafune.tochigi.jp" => 0, + "kaminokawa.tochigi.jp" => 0, + "kanuma.tochigi.jp" => 0, + "karasuyama.tochigi.jp" => 0, + "kuroiso.tochigi.jp" => 0, + "mashiko.tochigi.jp" => 0, + "mibu.tochigi.jp" => 0, + "moka.tochigi.jp" => 0, + "motegi.tochigi.jp" => 0, + "nasu.tochigi.jp" => 0, + "nasushiobara.tochigi.jp" => 0, + "nikko.tochigi.jp" => 0, + "nishikata.tochigi.jp" => 0, + "nogi.tochigi.jp" => 0, + "ohira.tochigi.jp" => 0, + "ohtawara.tochigi.jp" => 0, + "oyama.tochigi.jp" => 0, + "sakura.tochigi.jp" => 0, + "sano.tochigi.jp" => 0, + "shimotsuke.tochigi.jp" => 0, + "shioya.tochigi.jp" => 0, + "takanezawa.tochigi.jp" => 0, + "tochigi.tochigi.jp" => 0, + "tsuga.tochigi.jp" => 0, + "ujiie.tochigi.jp" => 0, + "utsunomiya.tochigi.jp" => 0, + "yaita.tochigi.jp" => 0, + "aizumi.tokushima.jp" => 0, + "anan.tokushima.jp" => 0, + "ichiba.tokushima.jp" => 0, + "itano.tokushima.jp" => 0, + "kainan.tokushima.jp" => 0, + "komatsushima.tokushima.jp" => 0, + "matsushige.tokushima.jp" => 0, + "mima.tokushima.jp" => 0, + "minami.tokushima.jp" => 0, + "miyoshi.tokushima.jp" => 0, + "mugi.tokushima.jp" => 0, + "nakagawa.tokushima.jp" => 0, + "naruto.tokushima.jp" => 0, + "sanagochi.tokushima.jp" => 0, + "shishikui.tokushima.jp" => 0, + "tokushima.tokushima.jp" => 0, + "wajiki.tokushima.jp" => 0, + "adachi.tokyo.jp" => 0, + "akiruno.tokyo.jp" => 0, + "akishima.tokyo.jp" => 0, + "aogashima.tokyo.jp" => 0, + "arakawa.tokyo.jp" => 0, + "bunkyo.tokyo.jp" => 0, + "chiyoda.tokyo.jp" => 0, + "chofu.tokyo.jp" => 0, + "chuo.tokyo.jp" => 0, + "edogawa.tokyo.jp" => 0, + "fuchu.tokyo.jp" => 0, + "fussa.tokyo.jp" => 0, + "hachijo.tokyo.jp" => 0, + "hachioji.tokyo.jp" => 0, + "hamura.tokyo.jp" => 0, + "higashikurume.tokyo.jp" => 0, + "higashimurayama.tokyo.jp" => 0, + "higashiyamato.tokyo.jp" => 0, + "hino.tokyo.jp" => 0, + "hinode.tokyo.jp" => 0, + "hinohara.tokyo.jp" => 0, + "inagi.tokyo.jp" => 0, + "itabashi.tokyo.jp" => 0, + "katsushika.tokyo.jp" => 0, + "kita.tokyo.jp" => 0, + "kiyose.tokyo.jp" => 0, + "kodaira.tokyo.jp" => 0, + "koganei.tokyo.jp" => 0, + "kokubunji.tokyo.jp" => 0, + "komae.tokyo.jp" => 0, + "koto.tokyo.jp" => 0, + "kouzushima.tokyo.jp" => 0, + "kunitachi.tokyo.jp" => 0, + "machida.tokyo.jp" => 0, + "meguro.tokyo.jp" => 0, + "minato.tokyo.jp" => 0, + "mitaka.tokyo.jp" => 0, + "mizuho.tokyo.jp" => 0, + "musashimurayama.tokyo.jp" => 0, + "musashino.tokyo.jp" => 0, + "nakano.tokyo.jp" => 0, + "nerima.tokyo.jp" => 0, + "ogasawara.tokyo.jp" => 0, + "okutama.tokyo.jp" => 0, + "ome.tokyo.jp" => 0, + "oshima.tokyo.jp" => 0, + "ota.tokyo.jp" => 0, + "setagaya.tokyo.jp" => 0, + "shibuya.tokyo.jp" => 0, + "shinagawa.tokyo.jp" => 0, + "shinjuku.tokyo.jp" => 0, + "suginami.tokyo.jp" => 0, + "sumida.tokyo.jp" => 0, + "tachikawa.tokyo.jp" => 0, + "taito.tokyo.jp" => 0, + "tama.tokyo.jp" => 0, + "toshima.tokyo.jp" => 0, + "chizu.tottori.jp" => 0, + "hino.tottori.jp" => 0, + "kawahara.tottori.jp" => 0, + "koge.tottori.jp" => 0, + "kotoura.tottori.jp" => 0, + "misasa.tottori.jp" => 0, + "nanbu.tottori.jp" => 0, + "nichinan.tottori.jp" => 0, + "sakaiminato.tottori.jp" => 0, + "tottori.tottori.jp" => 0, + "wakasa.tottori.jp" => 0, + "yazu.tottori.jp" => 0, + "yonago.tottori.jp" => 0, + "asahi.toyama.jp" => 0, + "fuchu.toyama.jp" => 0, + "fukumitsu.toyama.jp" => 0, + "funahashi.toyama.jp" => 0, + "himi.toyama.jp" => 0, + "imizu.toyama.jp" => 0, + "inami.toyama.jp" => 0, + "johana.toyama.jp" => 0, + "kamiichi.toyama.jp" => 0, + "kurobe.toyama.jp" => 0, + "nakaniikawa.toyama.jp" => 0, + "namerikawa.toyama.jp" => 0, + "nanto.toyama.jp" => 0, + "nyuzen.toyama.jp" => 0, + "oyabe.toyama.jp" => 0, + "taira.toyama.jp" => 0, + "takaoka.toyama.jp" => 0, + "tateyama.toyama.jp" => 0, + "toga.toyama.jp" => 0, + "tonami.toyama.jp" => 0, + "toyama.toyama.jp" => 0, + "unazuki.toyama.jp" => 0, + "uozu.toyama.jp" => 0, + "yamada.toyama.jp" => 0, + "arida.wakayama.jp" => 0, + "aridagawa.wakayama.jp" => 0, + "gobo.wakayama.jp" => 0, + "hashimoto.wakayama.jp" => 0, + "hidaka.wakayama.jp" => 0, + "hirogawa.wakayama.jp" => 0, + "inami.wakayama.jp" => 0, + "iwade.wakayama.jp" => 0, + "kainan.wakayama.jp" => 0, + "kamitonda.wakayama.jp" => 0, + "katsuragi.wakayama.jp" => 0, + "kimino.wakayama.jp" => 0, + "kinokawa.wakayama.jp" => 0, + "kitayama.wakayama.jp" => 0, + "koya.wakayama.jp" => 0, + "koza.wakayama.jp" => 0, + "kozagawa.wakayama.jp" => 0, + "kudoyama.wakayama.jp" => 0, + "kushimoto.wakayama.jp" => 0, + "mihama.wakayama.jp" => 0, + "misato.wakayama.jp" => 0, + "nachikatsuura.wakayama.jp" => 0, + "shingu.wakayama.jp" => 0, + "shirahama.wakayama.jp" => 0, + "taiji.wakayama.jp" => 0, + "tanabe.wakayama.jp" => 0, + "wakayama.wakayama.jp" => 0, + "yuasa.wakayama.jp" => 0, + "yura.wakayama.jp" => 0, + "asahi.yamagata.jp" => 0, + "funagata.yamagata.jp" => 0, + "higashine.yamagata.jp" => 0, + "iide.yamagata.jp" => 0, + "kahoku.yamagata.jp" => 0, + "kaminoyama.yamagata.jp" => 0, + "kaneyama.yamagata.jp" => 0, + "kawanishi.yamagata.jp" => 0, + "mamurogawa.yamagata.jp" => 0, + "mikawa.yamagata.jp" => 0, + "murayama.yamagata.jp" => 0, + "nagai.yamagata.jp" => 0, + "nakayama.yamagata.jp" => 0, + "nanyo.yamagata.jp" => 0, + "nishikawa.yamagata.jp" => 0, + "obanazawa.yamagata.jp" => 0, + "oe.yamagata.jp" => 0, + "oguni.yamagata.jp" => 0, + "ohkura.yamagata.jp" => 0, + "oishida.yamagata.jp" => 0, + "sagae.yamagata.jp" => 0, + "sakata.yamagata.jp" => 0, + "sakegawa.yamagata.jp" => 0, + "shinjo.yamagata.jp" => 0, + "shirataka.yamagata.jp" => 0, + "shonai.yamagata.jp" => 0, + "takahata.yamagata.jp" => 0, + "tendo.yamagata.jp" => 0, + "tozawa.yamagata.jp" => 0, + "tsuruoka.yamagata.jp" => 0, + "yamagata.yamagata.jp" => 0, + "yamanobe.yamagata.jp" => 0, + "yonezawa.yamagata.jp" => 0, + "yuza.yamagata.jp" => 0, + "abu.yamaguchi.jp" => 0, + "hagi.yamaguchi.jp" => 0, + "hikari.yamaguchi.jp" => 0, + "hofu.yamaguchi.jp" => 0, + "iwakuni.yamaguchi.jp" => 0, + "kudamatsu.yamaguchi.jp" => 0, + "mitou.yamaguchi.jp" => 0, + "nagato.yamaguchi.jp" => 0, + "oshima.yamaguchi.jp" => 0, + "shimonoseki.yamaguchi.jp" => 0, + "shunan.yamaguchi.jp" => 0, + "tabuse.yamaguchi.jp" => 0, + "tokuyama.yamaguchi.jp" => 0, + "toyota.yamaguchi.jp" => 0, + "ube.yamaguchi.jp" => 0, + "yuu.yamaguchi.jp" => 0, + "chuo.yamanashi.jp" => 0, + "doshi.yamanashi.jp" => 0, + "fuefuki.yamanashi.jp" => 0, + "fujikawa.yamanashi.jp" => 0, + "fujikawaguchiko.yamanashi.jp" => 0, + "fujiyoshida.yamanashi.jp" => 0, + "hayakawa.yamanashi.jp" => 0, + "hokuto.yamanashi.jp" => 0, + "ichikawamisato.yamanashi.jp" => 0, + "kai.yamanashi.jp" => 0, + "kofu.yamanashi.jp" => 0, + "koshu.yamanashi.jp" => 0, + "kosuge.yamanashi.jp" => 0, + "minami-alps.yamanashi.jp" => 0, + "minobu.yamanashi.jp" => 0, + "nakamichi.yamanashi.jp" => 0, + "nanbu.yamanashi.jp" => 0, + "narusawa.yamanashi.jp" => 0, + "nirasaki.yamanashi.jp" => 0, + "nishikatsura.yamanashi.jp" => 0, + "oshino.yamanashi.jp" => 0, + "otsuki.yamanashi.jp" => 0, + "showa.yamanashi.jp" => 0, + "tabayama.yamanashi.jp" => 0, + "tsuru.yamanashi.jp" => 0, + "uenohara.yamanashi.jp" => 0, + "yamanakako.yamanashi.jp" => 0, + "yamanashi.yamanashi.jp" => 0, + "ke" => 0, + "ac.ke" => 0, + "co.ke" => 0, + "go.ke" => 0, + "info.ke" => 0, + "me.ke" => 0, + "mobi.ke" => 0, + "ne.ke" => 0, + "or.ke" => 0, + "sc.ke" => 0, + "kg" => 0, + "org.kg" => 0, + "net.kg" => 0, + "com.kg" => 0, + "edu.kg" => 0, + "gov.kg" => 0, + "mil.kg" => 0, + "kh" => -1, + "ki" => 0, + "edu.ki" => 0, + "biz.ki" => 0, + "net.ki" => 0, + "org.ki" => 0, + "gov.ki" => 0, + "info.ki" => 0, + "com.ki" => 0, + "km" => 0, + "org.km" => 0, + "nom.km" => 0, + "gov.km" => 0, + "prd.km" => 0, + "tm.km" => 0, + "edu.km" => 0, + "mil.km" => 0, + "ass.km" => 0, + "com.km" => 0, + "coop.km" => 0, + "asso.km" => 0, + "presse.km" => 0, + "medecin.km" => 0, + "notaires.km" => 0, + "pharmaciens.km" => 0, + "veterinaire.km" => 0, + "gouv.km" => 0, + "kn" => 0, + "net.kn" => 0, + "org.kn" => 0, + "edu.kn" => 0, + "gov.kn" => 0, + "kp" => 0, + "com.kp" => 0, + "edu.kp" => 0, + "gov.kp" => 0, + "org.kp" => 0, + "rep.kp" => 0, + "tra.kp" => 0, + "kr" => 0, + "ac.kr" => 0, + "co.kr" => 0, + "es.kr" => 0, + "go.kr" => 0, + "hs.kr" => 0, + "kg.kr" => 0, + "mil.kr" => 0, + "ms.kr" => 0, + "ne.kr" => 0, + "or.kr" => 0, + "pe.kr" => 0, + "re.kr" => 0, + "sc.kr" => 0, + "busan.kr" => 0, + "chungbuk.kr" => 0, + "chungnam.kr" => 0, + "daegu.kr" => 0, + "daejeon.kr" => 0, + "gangwon.kr" => 0, + "gwangju.kr" => 0, + "gyeongbuk.kr" => 0, + "gyeonggi.kr" => 0, + "gyeongnam.kr" => 0, + "incheon.kr" => 0, + "jeju.kr" => 0, + "jeonbuk.kr" => 0, + "jeonnam.kr" => 0, + "seoul.kr" => 0, + "ulsan.kr" => 0, + "kw" => 0, + "com.kw" => 0, + "edu.kw" => 0, + "emb.kw" => 0, + "gov.kw" => 0, + "ind.kw" => 0, + "net.kw" => 0, + "org.kw" => 0, + "ky" => 0, + "edu.ky" => 0, + "gov.ky" => 0, + "com.ky" => 0, + "org.ky" => 0, + "net.ky" => 0, + "kz" => 0, + "org.kz" => 0, + "edu.kz" => 0, + "net.kz" => 0, + "gov.kz" => 0, + "mil.kz" => 0, + "com.kz" => 0, + "la" => 0, + "int.la" => 0, + "net.la" => 0, + "info.la" => 0, + "edu.la" => 0, + "gov.la" => 0, + "per.la" => 0, + "com.la" => 0, + "org.la" => 0, + "lb" => 0, + "com.lb" => 0, + "edu.lb" => 0, + "gov.lb" => 0, + "net.lb" => 0, + "org.lb" => 0, + "lc" => 0, + "com.lc" => 0, + "net.lc" => 0, + "co.lc" => 0, + "org.lc" => 0, + "edu.lc" => 0, + "gov.lc" => 0, + "li" => 0, + "lk" => 0, + "gov.lk" => 0, + "sch.lk" => 0, + "net.lk" => 0, + "int.lk" => 0, + "com.lk" => 0, + "org.lk" => 0, + "edu.lk" => 0, + "ngo.lk" => 0, + "soc.lk" => 0, + "web.lk" => 0, + "ltd.lk" => 0, + "assn.lk" => 0, + "grp.lk" => 0, + "hotel.lk" => 0, + "ac.lk" => 0, + "lr" => 0, + "com.lr" => 0, + "edu.lr" => 0, + "gov.lr" => 0, + "org.lr" => 0, + "net.lr" => 0, + "ls" => 0, + "ac.ls" => 0, + "biz.ls" => 0, + "co.ls" => 0, + "edu.ls" => 0, + "gov.ls" => 0, + "info.ls" => 0, + "net.ls" => 0, + "org.ls" => 0, + "sc.ls" => 0, + "lt" => 0, + "gov.lt" => 0, + "lu" => 0, + "lv" => 0, + "com.lv" => 0, + "edu.lv" => 0, + "gov.lv" => 0, + "org.lv" => 0, + "mil.lv" => 0, + "id.lv" => 0, + "net.lv" => 0, + "asn.lv" => 0, + "conf.lv" => 0, + "ly" => 0, + "com.ly" => 0, + "net.ly" => 0, + "gov.ly" => 0, + "plc.ly" => 0, + "edu.ly" => 0, + "sch.ly" => 0, + "med.ly" => 0, + "org.ly" => 0, + "id.ly" => 0, + "ma" => 0, + "co.ma" => 0, + "net.ma" => 0, + "gov.ma" => 0, + "org.ma" => 0, + "ac.ma" => 0, + "press.ma" => 0, + "mc" => 0, + "tm.mc" => 0, + "asso.mc" => 0, + "md" => 0, + "me" => 0, + "co.me" => 0, + "net.me" => 0, + "org.me" => 0, + "edu.me" => 0, + "ac.me" => 0, + "gov.me" => 0, + "its.me" => 0, + "priv.me" => 0, + "mg" => 0, + "org.mg" => 0, + "nom.mg" => 0, + "gov.mg" => 0, + "prd.mg" => 0, + "tm.mg" => 0, + "edu.mg" => 0, + "mil.mg" => 0, + "com.mg" => 0, + "co.mg" => 0, + "mh" => 0, + "mil" => 0, + "mk" => 0, + "com.mk" => 0, + "org.mk" => 0, + "net.mk" => 0, + "edu.mk" => 0, + "gov.mk" => 0, + "inf.mk" => 0, + "name.mk" => 0, + "ml" => 0, + "com.ml" => 0, + "edu.ml" => 0, + "gouv.ml" => 0, + "gov.ml" => 0, + "net.ml" => 0, + "org.ml" => 0, + "presse.ml" => 0, + "mm" => -1, + "mn" => 0, + "gov.mn" => 0, + "edu.mn" => 0, + "org.mn" => 0, + "mo" => 0, + "com.mo" => 0, + "net.mo" => 0, + "org.mo" => 0, + "edu.mo" => 0, + "gov.mo" => 0, + "mobi" => 0, + "mp" => 0, + "mq" => 0, + "mr" => 0, + "gov.mr" => 0, + "ms" => 0, + "com.ms" => 0, + "edu.ms" => 0, + "gov.ms" => 0, + "net.ms" => 0, + "org.ms" => 0, + "mt" => 0, + "com.mt" => 0, + "edu.mt" => 0, + "net.mt" => 0, + "org.mt" => 0, + "mu" => 0, + "com.mu" => 0, + "net.mu" => 0, + "org.mu" => 0, + "gov.mu" => 0, + "ac.mu" => 0, + "co.mu" => 0, + "or.mu" => 0, + "museum" => 0, + "academy.museum" => 0, + "agriculture.museum" => 0, + "air.museum" => 0, + "airguard.museum" => 0, + "alabama.museum" => 0, + "alaska.museum" => 0, + "amber.museum" => 0, + "ambulance.museum" => 0, + "american.museum" => 0, + "americana.museum" => 0, + "americanantiques.museum" => 0, + "americanart.museum" => 0, + "amsterdam.museum" => 0, + "and.museum" => 0, + "annefrank.museum" => 0, + "anthro.museum" => 0, + "anthropology.museum" => 0, + "antiques.museum" => 0, + "aquarium.museum" => 0, + "arboretum.museum" => 0, + "archaeological.museum" => 0, + "archaeology.museum" => 0, + "architecture.museum" => 0, + "art.museum" => 0, + "artanddesign.museum" => 0, + "artcenter.museum" => 0, + "artdeco.museum" => 0, + "arteducation.museum" => 0, + "artgallery.museum" => 0, + "arts.museum" => 0, + "artsandcrafts.museum" => 0, + "asmatart.museum" => 0, + "assassination.museum" => 0, + "assisi.museum" => 0, + "association.museum" => 0, + "astronomy.museum" => 0, + "atlanta.museum" => 0, + "austin.museum" => 0, + "australia.museum" => 0, + "automotive.museum" => 0, + "aviation.museum" => 0, + "axis.museum" => 0, + "badajoz.museum" => 0, + "baghdad.museum" => 0, + "bahn.museum" => 0, + "bale.museum" => 0, + "baltimore.museum" => 0, + "barcelona.museum" => 0, + "baseball.museum" => 0, + "basel.museum" => 0, + "baths.museum" => 0, + "bauern.museum" => 0, + "beauxarts.museum" => 0, + "beeldengeluid.museum" => 0, + "bellevue.museum" => 0, + "bergbau.museum" => 0, + "berkeley.museum" => 0, + "berlin.museum" => 0, + "bern.museum" => 0, + "bible.museum" => 0, + "bilbao.museum" => 0, + "bill.museum" => 0, + "birdart.museum" => 0, + "birthplace.museum" => 0, + "bonn.museum" => 0, + "boston.museum" => 0, + "botanical.museum" => 0, + "botanicalgarden.museum" => 0, + "botanicgarden.museum" => 0, + "botany.museum" => 0, + "brandywinevalley.museum" => 0, + "brasil.museum" => 0, + "bristol.museum" => 0, + "british.museum" => 0, + "britishcolumbia.museum" => 0, + "broadcast.museum" => 0, + "brunel.museum" => 0, + "brussel.museum" => 0, + "brussels.museum" => 0, + "bruxelles.museum" => 0, + "building.museum" => 0, + "burghof.museum" => 0, + "bus.museum" => 0, + "bushey.museum" => 0, + "cadaques.museum" => 0, + "california.museum" => 0, + "cambridge.museum" => 0, + "can.museum" => 0, + "canada.museum" => 0, + "capebreton.museum" => 0, + "carrier.museum" => 0, + "cartoonart.museum" => 0, + "casadelamoneda.museum" => 0, + "castle.museum" => 0, + "castres.museum" => 0, + "celtic.museum" => 0, + "center.museum" => 0, + "chattanooga.museum" => 0, + "cheltenham.museum" => 0, + "chesapeakebay.museum" => 0, + "chicago.museum" => 0, + "children.museum" => 0, + "childrens.museum" => 0, + "childrensgarden.museum" => 0, + "chiropractic.museum" => 0, + "chocolate.museum" => 0, + "christiansburg.museum" => 0, + "cincinnati.museum" => 0, + "cinema.museum" => 0, + "circus.museum" => 0, + "civilisation.museum" => 0, + "civilization.museum" => 0, + "civilwar.museum" => 0, + "clinton.museum" => 0, + "clock.museum" => 0, + "coal.museum" => 0, + "coastaldefence.museum" => 0, + "cody.museum" => 0, + "coldwar.museum" => 0, + "collection.museum" => 0, + "colonialwilliamsburg.museum" => 0, + "coloradoplateau.museum" => 0, + "columbia.museum" => 0, + "columbus.museum" => 0, + "communication.museum" => 0, + "communications.museum" => 0, + "community.museum" => 0, + "computer.museum" => 0, + "computerhistory.museum" => 0, + "xn--comunicaes-v6a2o.museum" => 0, + "contemporary.museum" => 0, + "contemporaryart.museum" => 0, + "convent.museum" => 0, + "copenhagen.museum" => 0, + "corporation.museum" => 0, + "xn--correios-e-telecomunicaes-ghc29a.museum" => 0, + "corvette.museum" => 0, + "costume.museum" => 0, + "countryestate.museum" => 0, + "county.museum" => 0, + "crafts.museum" => 0, + "cranbrook.museum" => 0, + "creation.museum" => 0, + "cultural.museum" => 0, + "culturalcenter.museum" => 0, + "culture.museum" => 0, + "cyber.museum" => 0, + "cymru.museum" => 0, + "dali.museum" => 0, + "dallas.museum" => 0, + "database.museum" => 0, + "ddr.museum" => 0, + "decorativearts.museum" => 0, + "delaware.museum" => 0, + "delmenhorst.museum" => 0, + "denmark.museum" => 0, + "depot.museum" => 0, + "design.museum" => 0, + "detroit.museum" => 0, + "dinosaur.museum" => 0, + "discovery.museum" => 0, + "dolls.museum" => 0, + "donostia.museum" => 0, + "durham.museum" => 0, + "eastafrica.museum" => 0, + "eastcoast.museum" => 0, + "education.museum" => 0, + "educational.museum" => 0, + "egyptian.museum" => 0, + "eisenbahn.museum" => 0, + "elburg.museum" => 0, + "elvendrell.museum" => 0, + "embroidery.museum" => 0, + "encyclopedic.museum" => 0, + "england.museum" => 0, + "entomology.museum" => 0, + "environment.museum" => 0, + "environmentalconservation.museum" => 0, + "epilepsy.museum" => 0, + "essex.museum" => 0, + "estate.museum" => 0, + "ethnology.museum" => 0, + "exeter.museum" => 0, + "exhibition.museum" => 0, + "family.museum" => 0, + "farm.museum" => 0, + "farmequipment.museum" => 0, + "farmers.museum" => 0, + "farmstead.museum" => 0, + "field.museum" => 0, + "figueres.museum" => 0, + "filatelia.museum" => 0, + "film.museum" => 0, + "fineart.museum" => 0, + "finearts.museum" => 0, + "finland.museum" => 0, + "flanders.museum" => 0, + "florida.museum" => 0, + "force.museum" => 0, + "fortmissoula.museum" => 0, + "fortworth.museum" => 0, + "foundation.museum" => 0, + "francaise.museum" => 0, + "frankfurt.museum" => 0, + "franziskaner.museum" => 0, + "freemasonry.museum" => 0, + "freiburg.museum" => 0, + "fribourg.museum" => 0, + "frog.museum" => 0, + "fundacio.museum" => 0, + "furniture.museum" => 0, + "gallery.museum" => 0, + "garden.museum" => 0, + "gateway.museum" => 0, + "geelvinck.museum" => 0, + "gemological.museum" => 0, + "geology.museum" => 0, + "georgia.museum" => 0, + "giessen.museum" => 0, + "glas.museum" => 0, + "glass.museum" => 0, + "gorge.museum" => 0, + "grandrapids.museum" => 0, + "graz.museum" => 0, + "guernsey.museum" => 0, + "halloffame.museum" => 0, + "hamburg.museum" => 0, + "handson.museum" => 0, + "harvestcelebration.museum" => 0, + "hawaii.museum" => 0, + "health.museum" => 0, + "heimatunduhren.museum" => 0, + "hellas.museum" => 0, + "helsinki.museum" => 0, + "hembygdsforbund.museum" => 0, + "heritage.museum" => 0, + "histoire.museum" => 0, + "historical.museum" => 0, + "historicalsociety.museum" => 0, + "historichouses.museum" => 0, + "historisch.museum" => 0, + "historisches.museum" => 0, + "history.museum" => 0, + "historyofscience.museum" => 0, + "horology.museum" => 0, + "house.museum" => 0, + "humanities.museum" => 0, + "illustration.museum" => 0, + "imageandsound.museum" => 0, + "indian.museum" => 0, + "indiana.museum" => 0, + "indianapolis.museum" => 0, + "indianmarket.museum" => 0, + "intelligence.museum" => 0, + "interactive.museum" => 0, + "iraq.museum" => 0, + "iron.museum" => 0, + "isleofman.museum" => 0, + "jamison.museum" => 0, + "jefferson.museum" => 0, + "jerusalem.museum" => 0, + "jewelry.museum" => 0, + "jewish.museum" => 0, + "jewishart.museum" => 0, + "jfk.museum" => 0, + "journalism.museum" => 0, + "judaica.museum" => 0, + "judygarland.museum" => 0, + "juedisches.museum" => 0, + "juif.museum" => 0, + "karate.museum" => 0, + "karikatur.museum" => 0, + "kids.museum" => 0, + "koebenhavn.museum" => 0, + "koeln.museum" => 0, + "kunst.museum" => 0, + "kunstsammlung.museum" => 0, + "kunstunddesign.museum" => 0, + "labor.museum" => 0, + "labour.museum" => 0, + "lajolla.museum" => 0, + "lancashire.museum" => 0, + "landes.museum" => 0, + "lans.museum" => 0, + "xn--lns-qla.museum" => 0, + "larsson.museum" => 0, + "lewismiller.museum" => 0, + "lincoln.museum" => 0, + "linz.museum" => 0, + "living.museum" => 0, + "livinghistory.museum" => 0, + "localhistory.museum" => 0, + "london.museum" => 0, + "losangeles.museum" => 0, + "louvre.museum" => 0, + "loyalist.museum" => 0, + "lucerne.museum" => 0, + "luxembourg.museum" => 0, + "luzern.museum" => 0, + "mad.museum" => 0, + "madrid.museum" => 0, + "mallorca.museum" => 0, + "manchester.museum" => 0, + "mansion.museum" => 0, + "mansions.museum" => 0, + "manx.museum" => 0, + "marburg.museum" => 0, + "maritime.museum" => 0, + "maritimo.museum" => 0, + "maryland.museum" => 0, + "marylhurst.museum" => 0, + "media.museum" => 0, + "medical.museum" => 0, + "medizinhistorisches.museum" => 0, + "meeres.museum" => 0, + "memorial.museum" => 0, + "mesaverde.museum" => 0, + "michigan.museum" => 0, + "midatlantic.museum" => 0, + "military.museum" => 0, + "mill.museum" => 0, + "miners.museum" => 0, + "mining.museum" => 0, + "minnesota.museum" => 0, + "missile.museum" => 0, + "missoula.museum" => 0, + "modern.museum" => 0, + "moma.museum" => 0, + "money.museum" => 0, + "monmouth.museum" => 0, + "monticello.museum" => 0, + "montreal.museum" => 0, + "moscow.museum" => 0, + "motorcycle.museum" => 0, + "muenchen.museum" => 0, + "muenster.museum" => 0, + "mulhouse.museum" => 0, + "muncie.museum" => 0, + "museet.museum" => 0, + "museumcenter.museum" => 0, + "museumvereniging.museum" => 0, + "music.museum" => 0, + "national.museum" => 0, + "nationalfirearms.museum" => 0, + "nationalheritage.museum" => 0, + "nativeamerican.museum" => 0, + "naturalhistory.museum" => 0, + "naturalhistorymuseum.museum" => 0, + "naturalsciences.museum" => 0, + "nature.museum" => 0, + "naturhistorisches.museum" => 0, + "natuurwetenschappen.museum" => 0, + "naumburg.museum" => 0, + "naval.museum" => 0, + "nebraska.museum" => 0, + "neues.museum" => 0, + "newhampshire.museum" => 0, + "newjersey.museum" => 0, + "newmexico.museum" => 0, + "newport.museum" => 0, + "newspaper.museum" => 0, + "newyork.museum" => 0, + "niepce.museum" => 0, + "norfolk.museum" => 0, + "north.museum" => 0, + "nrw.museum" => 0, + "nuernberg.museum" => 0, + "nuremberg.museum" => 0, + "nyc.museum" => 0, + "nyny.museum" => 0, + "oceanographic.museum" => 0, + "oceanographique.museum" => 0, + "omaha.museum" => 0, + "online.museum" => 0, + "ontario.museum" => 0, + "openair.museum" => 0, + "oregon.museum" => 0, + "oregontrail.museum" => 0, + "otago.museum" => 0, + "oxford.museum" => 0, + "pacific.museum" => 0, + "paderborn.museum" => 0, + "palace.museum" => 0, + "paleo.museum" => 0, + "palmsprings.museum" => 0, + "panama.museum" => 0, + "paris.museum" => 0, + "pasadena.museum" => 0, + "pharmacy.museum" => 0, + "philadelphia.museum" => 0, + "philadelphiaarea.museum" => 0, + "philately.museum" => 0, + "phoenix.museum" => 0, + "photography.museum" => 0, + "pilots.museum" => 0, + "pittsburgh.museum" => 0, + "planetarium.museum" => 0, + "plantation.museum" => 0, + "plants.museum" => 0, + "plaza.museum" => 0, + "portal.museum" => 0, + "portland.museum" => 0, + "portlligat.museum" => 0, + "posts-and-telecommunications.museum" => 0, + "preservation.museum" => 0, + "presidio.museum" => 0, + "press.museum" => 0, + "project.museum" => 0, + "public.museum" => 0, + "pubol.museum" => 0, + "quebec.museum" => 0, + "railroad.museum" => 0, + "railway.museum" => 0, + "research.museum" => 0, + "resistance.museum" => 0, + "riodejaneiro.museum" => 0, + "rochester.museum" => 0, + "rockart.museum" => 0, + "roma.museum" => 0, + "russia.museum" => 0, + "saintlouis.museum" => 0, + "salem.museum" => 0, + "salvadordali.museum" => 0, + "salzburg.museum" => 0, + "sandiego.museum" => 0, + "sanfrancisco.museum" => 0, + "santabarbara.museum" => 0, + "santacruz.museum" => 0, + "santafe.museum" => 0, + "saskatchewan.museum" => 0, + "satx.museum" => 0, + "savannahga.museum" => 0, + "schlesisches.museum" => 0, + "schoenbrunn.museum" => 0, + "schokoladen.museum" => 0, + "school.museum" => 0, + "schweiz.museum" => 0, + "science.museum" => 0, + "scienceandhistory.museum" => 0, + "scienceandindustry.museum" => 0, + "sciencecenter.museum" => 0, + "sciencecenters.museum" => 0, + "science-fiction.museum" => 0, + "sciencehistory.museum" => 0, + "sciences.museum" => 0, + "sciencesnaturelles.museum" => 0, + "scotland.museum" => 0, + "seaport.museum" => 0, + "settlement.museum" => 0, + "settlers.museum" => 0, + "shell.museum" => 0, + "sherbrooke.museum" => 0, + "sibenik.museum" => 0, + "silk.museum" => 0, + "ski.museum" => 0, + "skole.museum" => 0, + "society.museum" => 0, + "sologne.museum" => 0, + "soundandvision.museum" => 0, + "southcarolina.museum" => 0, + "southwest.museum" => 0, + "space.museum" => 0, + "spy.museum" => 0, + "square.museum" => 0, + "stadt.museum" => 0, + "stalbans.museum" => 0, + "starnberg.museum" => 0, + "state.museum" => 0, + "stateofdelaware.museum" => 0, + "station.museum" => 0, + "steam.museum" => 0, + "steiermark.museum" => 0, + "stjohn.museum" => 0, + "stockholm.museum" => 0, + "stpetersburg.museum" => 0, + "stuttgart.museum" => 0, + "suisse.museum" => 0, + "surgeonshall.museum" => 0, + "surrey.museum" => 0, + "svizzera.museum" => 0, + "sweden.museum" => 0, + "sydney.museum" => 0, + "tank.museum" => 0, + "tcm.museum" => 0, + "technology.museum" => 0, + "telekommunikation.museum" => 0, + "television.museum" => 0, + "texas.museum" => 0, + "textile.museum" => 0, + "theater.museum" => 0, + "time.museum" => 0, + "timekeeping.museum" => 0, + "topology.museum" => 0, + "torino.museum" => 0, + "touch.museum" => 0, + "town.museum" => 0, + "transport.museum" => 0, + "tree.museum" => 0, + "trolley.museum" => 0, + "trust.museum" => 0, + "trustee.museum" => 0, + "uhren.museum" => 0, + "ulm.museum" => 0, + "undersea.museum" => 0, + "university.museum" => 0, + "usa.museum" => 0, + "usantiques.museum" => 0, + "usarts.museum" => 0, + "uscountryestate.museum" => 0, + "usculture.museum" => 0, + "usdecorativearts.museum" => 0, + "usgarden.museum" => 0, + "ushistory.museum" => 0, + "ushuaia.museum" => 0, + "uslivinghistory.museum" => 0, + "utah.museum" => 0, + "uvic.museum" => 0, + "valley.museum" => 0, + "vantaa.museum" => 0, + "versailles.museum" => 0, + "viking.museum" => 0, + "village.museum" => 0, + "virginia.museum" => 0, + "virtual.museum" => 0, + "virtuel.museum" => 0, + "vlaanderen.museum" => 0, + "volkenkunde.museum" => 0, + "wales.museum" => 0, + "wallonie.museum" => 0, + "war.museum" => 0, + "washingtondc.museum" => 0, + "watchandclock.museum" => 0, + "watch-and-clock.museum" => 0, + "western.museum" => 0, + "westfalen.museum" => 0, + "whaling.museum" => 0, + "wildlife.museum" => 0, + "williamsburg.museum" => 0, + "windmill.museum" => 0, + "workshop.museum" => 0, + "york.museum" => 0, + "yorkshire.museum" => 0, + "yosemite.museum" => 0, + "youth.museum" => 0, + "zoological.museum" => 0, + "zoology.museum" => 0, + "xn--9dbhblg6di.museum" => 0, + "xn--h1aegh.museum" => 0, + "mv" => 0, + "aero.mv" => 0, + "biz.mv" => 0, + "com.mv" => 0, + "coop.mv" => 0, + "edu.mv" => 0, + "gov.mv" => 0, + "info.mv" => 0, + "int.mv" => 0, + "mil.mv" => 0, + "museum.mv" => 0, + "name.mv" => 0, + "net.mv" => 0, + "org.mv" => 0, + "pro.mv" => 0, + "mw" => 0, + "ac.mw" => 0, + "biz.mw" => 0, + "co.mw" => 0, + "com.mw" => 0, + "coop.mw" => 0, + "edu.mw" => 0, + "gov.mw" => 0, + "int.mw" => 0, + "museum.mw" => 0, + "net.mw" => 0, + "org.mw" => 0, + "mx" => 0, + "com.mx" => 0, + "org.mx" => 0, + "gob.mx" => 0, + "edu.mx" => 0, + "net.mx" => 0, + "my" => 0, + "com.my" => 0, + "net.my" => 0, + "org.my" => 0, + "gov.my" => 0, + "edu.my" => 0, + "mil.my" => 0, + "name.my" => 0, + "mz" => 0, + "ac.mz" => 0, + "adv.mz" => 0, + "co.mz" => 0, + "edu.mz" => 0, + "gov.mz" => 0, + "mil.mz" => 0, + "net.mz" => 0, + "org.mz" => 0, + "na" => 0, + "info.na" => 0, + "pro.na" => 0, + "name.na" => 0, + "school.na" => 0, + "or.na" => 0, + "dr.na" => 0, + "us.na" => 0, + "mx.na" => 0, + "ca.na" => 0, + "in.na" => 0, + "cc.na" => 0, + "tv.na" => 0, + "ws.na" => 0, + "mobi.na" => 0, + "co.na" => 0, + "com.na" => 0, + "org.na" => 0, + "name" => 0, + "nc" => 0, + "asso.nc" => 0, + "nom.nc" => 0, + "ne" => 0, + "net" => 0, + "nf" => 0, + "com.nf" => 0, + "net.nf" => 0, + "per.nf" => 0, + "rec.nf" => 0, + "web.nf" => 0, + "arts.nf" => 0, + "firm.nf" => 0, + "info.nf" => 0, + "other.nf" => 0, + "store.nf" => 0, + "ng" => 0, + "com.ng" => 0, + "edu.ng" => 0, + "gov.ng" => 0, + "i.ng" => 0, + "mil.ng" => 0, + "mobi.ng" => 0, + "name.ng" => 0, + "net.ng" => 0, + "org.ng" => 0, + "sch.ng" => 0, + "ni" => 0, + "ac.ni" => 0, + "biz.ni" => 0, + "co.ni" => 0, + "com.ni" => 0, + "edu.ni" => 0, + "gob.ni" => 0, + "in.ni" => 0, + "info.ni" => 0, + "int.ni" => 0, + "mil.ni" => 0, + "net.ni" => 0, + "nom.ni" => 0, + "org.ni" => 0, + "web.ni" => 0, + "nl" => 0, + "no" => 0, + "fhs.no" => 0, + "vgs.no" => 0, + "fylkesbibl.no" => 0, + "folkebibl.no" => 0, + "museum.no" => 0, + "idrett.no" => 0, + "priv.no" => 0, + "mil.no" => 0, + "stat.no" => 0, + "dep.no" => 0, + "kommune.no" => 0, + "herad.no" => 0, + "aa.no" => 0, + "ah.no" => 0, + "bu.no" => 0, + "fm.no" => 0, + "hl.no" => 0, + "hm.no" => 0, + "jan-mayen.no" => 0, + "mr.no" => 0, + "nl.no" => 0, + "nt.no" => 0, + "of.no" => 0, + "ol.no" => 0, + "oslo.no" => 0, + "rl.no" => 0, + "sf.no" => 0, + "st.no" => 0, + "svalbard.no" => 0, + "tm.no" => 0, + "tr.no" => 0, + "va.no" => 0, + "vf.no" => 0, + "gs.aa.no" => 0, + "gs.ah.no" => 0, + "gs.bu.no" => 0, + "gs.fm.no" => 0, + "gs.hl.no" => 0, + "gs.hm.no" => 0, + "gs.jan-mayen.no" => 0, + "gs.mr.no" => 0, + "gs.nl.no" => 0, + "gs.nt.no" => 0, + "gs.of.no" => 0, + "gs.ol.no" => 0, + "gs.oslo.no" => 0, + "gs.rl.no" => 0, + "gs.sf.no" => 0, + "gs.st.no" => 0, + "gs.svalbard.no" => 0, + "gs.tm.no" => 0, + "gs.tr.no" => 0, + "gs.va.no" => 0, + "gs.vf.no" => 0, + "akrehamn.no" => 0, + "xn--krehamn-dxa.no" => 0, + "algard.no" => 0, + "xn--lgrd-poac.no" => 0, + "arna.no" => 0, + "brumunddal.no" => 0, + "bryne.no" => 0, + "bronnoysund.no" => 0, + "xn--brnnysund-m8ac.no" => 0, + "drobak.no" => 0, + "xn--drbak-wua.no" => 0, + "egersund.no" => 0, + "fetsund.no" => 0, + "floro.no" => 0, + "xn--flor-jra.no" => 0, + "fredrikstad.no" => 0, + "hokksund.no" => 0, + "honefoss.no" => 0, + "xn--hnefoss-q1a.no" => 0, + "jessheim.no" => 0, + "jorpeland.no" => 0, + "xn--jrpeland-54a.no" => 0, + "kirkenes.no" => 0, + "kopervik.no" => 0, + "krokstadelva.no" => 0, + "langevag.no" => 0, + "xn--langevg-jxa.no" => 0, + "leirvik.no" => 0, + "mjondalen.no" => 0, + "xn--mjndalen-64a.no" => 0, + "mo-i-rana.no" => 0, + "mosjoen.no" => 0, + "xn--mosjen-eya.no" => 0, + "nesoddtangen.no" => 0, + "orkanger.no" => 0, + "osoyro.no" => 0, + "xn--osyro-wua.no" => 0, + "raholt.no" => 0, + "xn--rholt-mra.no" => 0, + "sandnessjoen.no" => 0, + "xn--sandnessjen-ogb.no" => 0, + "skedsmokorset.no" => 0, + "slattum.no" => 0, + "spjelkavik.no" => 0, + "stathelle.no" => 0, + "stavern.no" => 0, + "stjordalshalsen.no" => 0, + "xn--stjrdalshalsen-sqb.no" => 0, + "tananger.no" => 0, + "tranby.no" => 0, + "vossevangen.no" => 0, + "afjord.no" => 0, + "xn--fjord-lra.no" => 0, + "agdenes.no" => 0, + "al.no" => 0, + "xn--l-1fa.no" => 0, + "alesund.no" => 0, + "xn--lesund-hua.no" => 0, + "alstahaug.no" => 0, + "alta.no" => 0, + "xn--lt-liac.no" => 0, + "alaheadju.no" => 0, + "xn--laheadju-7ya.no" => 0, + "alvdal.no" => 0, + "amli.no" => 0, + "xn--mli-tla.no" => 0, + "amot.no" => 0, + "xn--mot-tla.no" => 0, + "andebu.no" => 0, + "andoy.no" => 0, + "xn--andy-ira.no" => 0, + "andasuolo.no" => 0, + "ardal.no" => 0, + "xn--rdal-poa.no" => 0, + "aremark.no" => 0, + "arendal.no" => 0, + "xn--s-1fa.no" => 0, + "aseral.no" => 0, + "xn--seral-lra.no" => 0, + "asker.no" => 0, + "askim.no" => 0, + "askvoll.no" => 0, + "askoy.no" => 0, + "xn--asky-ira.no" => 0, + "asnes.no" => 0, + "xn--snes-poa.no" => 0, + "audnedaln.no" => 0, + "aukra.no" => 0, + "aure.no" => 0, + "aurland.no" => 0, + "aurskog-holand.no" => 0, + "xn--aurskog-hland-jnb.no" => 0, + "austevoll.no" => 0, + "austrheim.no" => 0, + "averoy.no" => 0, + "xn--avery-yua.no" => 0, + "balestrand.no" => 0, + "ballangen.no" => 0, + "balat.no" => 0, + "xn--blt-elab.no" => 0, + "balsfjord.no" => 0, + "bahccavuotna.no" => 0, + "xn--bhccavuotna-k7a.no" => 0, + "bamble.no" => 0, + "bardu.no" => 0, + "beardu.no" => 0, + "beiarn.no" => 0, + "bajddar.no" => 0, + "xn--bjddar-pta.no" => 0, + "baidar.no" => 0, + "xn--bidr-5nac.no" => 0, + "berg.no" => 0, + "bergen.no" => 0, + "berlevag.no" => 0, + "xn--berlevg-jxa.no" => 0, + "bearalvahki.no" => 0, + "xn--bearalvhki-y4a.no" => 0, + "bindal.no" => 0, + "birkenes.no" => 0, + "bjarkoy.no" => 0, + "xn--bjarky-fya.no" => 0, + "bjerkreim.no" => 0, + "bjugn.no" => 0, + "bodo.no" => 0, + "xn--bod-2na.no" => 0, + "badaddja.no" => 0, + "xn--bdddj-mrabd.no" => 0, + "budejju.no" => 0, + "bokn.no" => 0, + "bremanger.no" => 0, + "bronnoy.no" => 0, + "xn--brnny-wuac.no" => 0, + "bygland.no" => 0, + "bykle.no" => 0, + "barum.no" => 0, + "xn--brum-voa.no" => 0, + "bo.telemark.no" => 0, + "xn--b-5ga.telemark.no" => 0, + "bo.nordland.no" => 0, + "xn--b-5ga.nordland.no" => 0, + "bievat.no" => 0, + "xn--bievt-0qa.no" => 0, + "bomlo.no" => 0, + "xn--bmlo-gra.no" => 0, + "batsfjord.no" => 0, + "xn--btsfjord-9za.no" => 0, + "bahcavuotna.no" => 0, + "xn--bhcavuotna-s4a.no" => 0, + "dovre.no" => 0, + "drammen.no" => 0, + "drangedal.no" => 0, + "dyroy.no" => 0, + "xn--dyry-ira.no" => 0, + "donna.no" => 0, + "xn--dnna-gra.no" => 0, + "eid.no" => 0, + "eidfjord.no" => 0, + "eidsberg.no" => 0, + "eidskog.no" => 0, + "eidsvoll.no" => 0, + "eigersund.no" => 0, + "elverum.no" => 0, + "enebakk.no" => 0, + "engerdal.no" => 0, + "etne.no" => 0, + "etnedal.no" => 0, + "evenes.no" => 0, + "evenassi.no" => 0, + "xn--eveni-0qa01ga.no" => 0, + "evje-og-hornnes.no" => 0, + "farsund.no" => 0, + "fauske.no" => 0, + "fuossko.no" => 0, + "fuoisku.no" => 0, + "fedje.no" => 0, + "fet.no" => 0, + "finnoy.no" => 0, + "xn--finny-yua.no" => 0, + "fitjar.no" => 0, + "fjaler.no" => 0, + "fjell.no" => 0, + "flakstad.no" => 0, + "flatanger.no" => 0, + "flekkefjord.no" => 0, + "flesberg.no" => 0, + "flora.no" => 0, + "fla.no" => 0, + "xn--fl-zia.no" => 0, + "folldal.no" => 0, + "forsand.no" => 0, + "fosnes.no" => 0, + "frei.no" => 0, + "frogn.no" => 0, + "froland.no" => 0, + "frosta.no" => 0, + "frana.no" => 0, + "xn--frna-woa.no" => 0, + "froya.no" => 0, + "xn--frya-hra.no" => 0, + "fusa.no" => 0, + "fyresdal.no" => 0, + "forde.no" => 0, + "xn--frde-gra.no" => 0, + "gamvik.no" => 0, + "gangaviika.no" => 0, + "xn--ggaviika-8ya47h.no" => 0, + "gaular.no" => 0, + "gausdal.no" => 0, + "gildeskal.no" => 0, + "xn--gildeskl-g0a.no" => 0, + "giske.no" => 0, + "gjemnes.no" => 0, + "gjerdrum.no" => 0, + "gjerstad.no" => 0, + "gjesdal.no" => 0, + "gjovik.no" => 0, + "xn--gjvik-wua.no" => 0, + "gloppen.no" => 0, + "gol.no" => 0, + "gran.no" => 0, + "grane.no" => 0, + "granvin.no" => 0, + "gratangen.no" => 0, + "grimstad.no" => 0, + "grong.no" => 0, + "kraanghke.no" => 0, + "xn--kranghke-b0a.no" => 0, + "grue.no" => 0, + "gulen.no" => 0, + "hadsel.no" => 0, + "halden.no" => 0, + "halsa.no" => 0, + "hamar.no" => 0, + "hamaroy.no" => 0, + "habmer.no" => 0, + "xn--hbmer-xqa.no" => 0, + "hapmir.no" => 0, + "xn--hpmir-xqa.no" => 0, + "hammerfest.no" => 0, + "hammarfeasta.no" => 0, + "xn--hmmrfeasta-s4ac.no" => 0, + "haram.no" => 0, + "hareid.no" => 0, + "harstad.no" => 0, + "hasvik.no" => 0, + "aknoluokta.no" => 0, + "xn--koluokta-7ya57h.no" => 0, + "hattfjelldal.no" => 0, + "aarborte.no" => 0, + "haugesund.no" => 0, + "hemne.no" => 0, + "hemnes.no" => 0, + "hemsedal.no" => 0, + "heroy.more-og-romsdal.no" => 0, + "xn--hery-ira.xn--mre-og-romsdal-qqb.no" => 0, + "heroy.nordland.no" => 0, + "xn--hery-ira.nordland.no" => 0, + "hitra.no" => 0, + "hjartdal.no" => 0, + "hjelmeland.no" => 0, + "hobol.no" => 0, + "xn--hobl-ira.no" => 0, + "hof.no" => 0, + "hol.no" => 0, + "hole.no" => 0, + "holmestrand.no" => 0, + "holtalen.no" => 0, + "xn--holtlen-hxa.no" => 0, + "hornindal.no" => 0, + "horten.no" => 0, + "hurdal.no" => 0, + "hurum.no" => 0, + "hvaler.no" => 0, + "hyllestad.no" => 0, + "hagebostad.no" => 0, + "xn--hgebostad-g3a.no" => 0, + "hoyanger.no" => 0, + "xn--hyanger-q1a.no" => 0, + "hoylandet.no" => 0, + "xn--hylandet-54a.no" => 0, + "ha.no" => 0, + "xn--h-2fa.no" => 0, + "ibestad.no" => 0, + "inderoy.no" => 0, + "xn--indery-fya.no" => 0, + "iveland.no" => 0, + "jevnaker.no" => 0, + "jondal.no" => 0, + "jolster.no" => 0, + "xn--jlster-bya.no" => 0, + "karasjok.no" => 0, + "karasjohka.no" => 0, + "xn--krjohka-hwab49j.no" => 0, + "karlsoy.no" => 0, + "galsa.no" => 0, + "xn--gls-elac.no" => 0, + "karmoy.no" => 0, + "xn--karmy-yua.no" => 0, + "kautokeino.no" => 0, + "guovdageaidnu.no" => 0, + "klepp.no" => 0, + "klabu.no" => 0, + "xn--klbu-woa.no" => 0, + "kongsberg.no" => 0, + "kongsvinger.no" => 0, + "kragero.no" => 0, + "xn--krager-gya.no" => 0, + "kristiansand.no" => 0, + "kristiansund.no" => 0, + "krodsherad.no" => 0, + "xn--krdsherad-m8a.no" => 0, + "kvalsund.no" => 0, + "rahkkeravju.no" => 0, + "xn--rhkkervju-01af.no" => 0, + "kvam.no" => 0, + "kvinesdal.no" => 0, + "kvinnherad.no" => 0, + "kviteseid.no" => 0, + "kvitsoy.no" => 0, + "xn--kvitsy-fya.no" => 0, + "kvafjord.no" => 0, + "xn--kvfjord-nxa.no" => 0, + "giehtavuoatna.no" => 0, + "kvanangen.no" => 0, + "xn--kvnangen-k0a.no" => 0, + "navuotna.no" => 0, + "xn--nvuotna-hwa.no" => 0, + "kafjord.no" => 0, + "xn--kfjord-iua.no" => 0, + "gaivuotna.no" => 0, + "xn--givuotna-8ya.no" => 0, + "larvik.no" => 0, + "lavangen.no" => 0, + "lavagis.no" => 0, + "loabat.no" => 0, + "xn--loabt-0qa.no" => 0, + "lebesby.no" => 0, + "davvesiida.no" => 0, + "leikanger.no" => 0, + "leirfjord.no" => 0, + "leka.no" => 0, + "leksvik.no" => 0, + "lenvik.no" => 0, + "leangaviika.no" => 0, + "xn--leagaviika-52b.no" => 0, + "lesja.no" => 0, + "levanger.no" => 0, + "lier.no" => 0, + "lierne.no" => 0, + "lillehammer.no" => 0, + "lillesand.no" => 0, + "lindesnes.no" => 0, + "lindas.no" => 0, + "xn--linds-pra.no" => 0, + "lom.no" => 0, + "loppa.no" => 0, + "lahppi.no" => 0, + "xn--lhppi-xqa.no" => 0, + "lund.no" => 0, + "lunner.no" => 0, + "luroy.no" => 0, + "xn--lury-ira.no" => 0, + "luster.no" => 0, + "lyngdal.no" => 0, + "lyngen.no" => 0, + "ivgu.no" => 0, + "lardal.no" => 0, + "lerdal.no" => 0, + "xn--lrdal-sra.no" => 0, + "lodingen.no" => 0, + "xn--ldingen-q1a.no" => 0, + "lorenskog.no" => 0, + "xn--lrenskog-54a.no" => 0, + "loten.no" => 0, + "xn--lten-gra.no" => 0, + "malvik.no" => 0, + "masoy.no" => 0, + "xn--msy-ula0h.no" => 0, + "muosat.no" => 0, + "xn--muost-0qa.no" => 0, + "mandal.no" => 0, + "marker.no" => 0, + "marnardal.no" => 0, + "masfjorden.no" => 0, + "meland.no" => 0, + "meldal.no" => 0, + "melhus.no" => 0, + "meloy.no" => 0, + "xn--mely-ira.no" => 0, + "meraker.no" => 0, + "xn--merker-kua.no" => 0, + "moareke.no" => 0, + "xn--moreke-jua.no" => 0, + "midsund.no" => 0, + "midtre-gauldal.no" => 0, + "modalen.no" => 0, + "modum.no" => 0, + "molde.no" => 0, + "moskenes.no" => 0, + "moss.no" => 0, + "mosvik.no" => 0, + "malselv.no" => 0, + "xn--mlselv-iua.no" => 0, + "malatvuopmi.no" => 0, + "xn--mlatvuopmi-s4a.no" => 0, + "namdalseid.no" => 0, + "aejrie.no" => 0, + "namsos.no" => 0, + "namsskogan.no" => 0, + "naamesjevuemie.no" => 0, + "xn--nmesjevuemie-tcba.no" => 0, + "laakesvuemie.no" => 0, + "nannestad.no" => 0, + "narvik.no" => 0, + "narviika.no" => 0, + "naustdal.no" => 0, + "nedre-eiker.no" => 0, + "nes.akershus.no" => 0, + "nes.buskerud.no" => 0, + "nesna.no" => 0, + "nesodden.no" => 0, + "nesseby.no" => 0, + "unjarga.no" => 0, + "xn--unjrga-rta.no" => 0, + "nesset.no" => 0, + "nissedal.no" => 0, + "nittedal.no" => 0, + "nord-aurdal.no" => 0, + "nord-fron.no" => 0, + "nord-odal.no" => 0, + "norddal.no" => 0, + "nordkapp.no" => 0, + "davvenjarga.no" => 0, + "xn--davvenjrga-y4a.no" => 0, + "nordre-land.no" => 0, + "nordreisa.no" => 0, + "raisa.no" => 0, + "xn--risa-5na.no" => 0, + "nore-og-uvdal.no" => 0, + "notodden.no" => 0, + "naroy.no" => 0, + "xn--nry-yla5g.no" => 0, + "notteroy.no" => 0, + "xn--nttery-byae.no" => 0, + "odda.no" => 0, + "oksnes.no" => 0, + "xn--ksnes-uua.no" => 0, + "oppdal.no" => 0, + "oppegard.no" => 0, + "xn--oppegrd-ixa.no" => 0, + "orkdal.no" => 0, + "orland.no" => 0, + "xn--rland-uua.no" => 0, + "orskog.no" => 0, + "xn--rskog-uua.no" => 0, + "orsta.no" => 0, + "xn--rsta-fra.no" => 0, + "os.hedmark.no" => 0, + "os.hordaland.no" => 0, + "osen.no" => 0, + "osteroy.no" => 0, + "xn--ostery-fya.no" => 0, + "ostre-toten.no" => 0, + "xn--stre-toten-zcb.no" => 0, + "overhalla.no" => 0, + "ovre-eiker.no" => 0, + "xn--vre-eiker-k8a.no" => 0, + "oyer.no" => 0, + "xn--yer-zna.no" => 0, + "oygarden.no" => 0, + "xn--ygarden-p1a.no" => 0, + "oystre-slidre.no" => 0, + "xn--ystre-slidre-ujb.no" => 0, + "porsanger.no" => 0, + "porsangu.no" => 0, + "xn--porsgu-sta26f.no" => 0, + "porsgrunn.no" => 0, + "radoy.no" => 0, + "xn--rady-ira.no" => 0, + "rakkestad.no" => 0, + "rana.no" => 0, + "ruovat.no" => 0, + "randaberg.no" => 0, + "rauma.no" => 0, + "rendalen.no" => 0, + "rennebu.no" => 0, + "rennesoy.no" => 0, + "xn--rennesy-v1a.no" => 0, + "rindal.no" => 0, + "ringebu.no" => 0, + "ringerike.no" => 0, + "ringsaker.no" => 0, + "rissa.no" => 0, + "risor.no" => 0, + "xn--risr-ira.no" => 0, + "roan.no" => 0, + "rollag.no" => 0, + "rygge.no" => 0, + "ralingen.no" => 0, + "xn--rlingen-mxa.no" => 0, + "rodoy.no" => 0, + "xn--rdy-0nab.no" => 0, + "romskog.no" => 0, + "xn--rmskog-bya.no" => 0, + "roros.no" => 0, + "xn--rros-gra.no" => 0, + "rost.no" => 0, + "xn--rst-0na.no" => 0, + "royken.no" => 0, + "xn--ryken-vua.no" => 0, + "royrvik.no" => 0, + "xn--ryrvik-bya.no" => 0, + "rade.no" => 0, + "xn--rde-ula.no" => 0, + "salangen.no" => 0, + "siellak.no" => 0, + "saltdal.no" => 0, + "salat.no" => 0, + "xn--slt-elab.no" => 0, + "xn--slat-5na.no" => 0, + "samnanger.no" => 0, + "sande.more-og-romsdal.no" => 0, + "sande.xn--mre-og-romsdal-qqb.no" => 0, + "sande.vestfold.no" => 0, + "sandefjord.no" => 0, + "sandnes.no" => 0, + "sandoy.no" => 0, + "xn--sandy-yua.no" => 0, + "sarpsborg.no" => 0, + "sauda.no" => 0, + "sauherad.no" => 0, + "sel.no" => 0, + "selbu.no" => 0, + "selje.no" => 0, + "seljord.no" => 0, + "sigdal.no" => 0, + "siljan.no" => 0, + "sirdal.no" => 0, + "skaun.no" => 0, + "skedsmo.no" => 0, + "ski.no" => 0, + "skien.no" => 0, + "skiptvet.no" => 0, + "skjervoy.no" => 0, + "xn--skjervy-v1a.no" => 0, + "skierva.no" => 0, + "xn--skierv-uta.no" => 0, + "skjak.no" => 0, + "xn--skjk-soa.no" => 0, + "skodje.no" => 0, + "skanland.no" => 0, + "xn--sknland-fxa.no" => 0, + "skanit.no" => 0, + "xn--sknit-yqa.no" => 0, + "smola.no" => 0, + "xn--smla-hra.no" => 0, + "snillfjord.no" => 0, + "snasa.no" => 0, + "xn--snsa-roa.no" => 0, + "snoasa.no" => 0, + "snaase.no" => 0, + "xn--snase-nra.no" => 0, + "sogndal.no" => 0, + "sokndal.no" => 0, + "sola.no" => 0, + "solund.no" => 0, + "songdalen.no" => 0, + "sortland.no" => 0, + "spydeberg.no" => 0, + "stange.no" => 0, + "stavanger.no" => 0, + "steigen.no" => 0, + "steinkjer.no" => 0, + "stjordal.no" => 0, + "xn--stjrdal-s1a.no" => 0, + "stokke.no" => 0, + "stor-elvdal.no" => 0, + "stord.no" => 0, + "stordal.no" => 0, + "storfjord.no" => 0, + "omasvuotna.no" => 0, + "strand.no" => 0, + "stranda.no" => 0, + "stryn.no" => 0, + "sula.no" => 0, + "suldal.no" => 0, + "sund.no" => 0, + "sunndal.no" => 0, + "surnadal.no" => 0, + "sveio.no" => 0, + "svelvik.no" => 0, + "sykkylven.no" => 0, + "sogne.no" => 0, + "xn--sgne-gra.no" => 0, + "somna.no" => 0, + "xn--smna-gra.no" => 0, + "sondre-land.no" => 0, + "xn--sndre-land-0cb.no" => 0, + "sor-aurdal.no" => 0, + "xn--sr-aurdal-l8a.no" => 0, + "sor-fron.no" => 0, + "xn--sr-fron-q1a.no" => 0, + "sor-odal.no" => 0, + "xn--sr-odal-q1a.no" => 0, + "sor-varanger.no" => 0, + "xn--sr-varanger-ggb.no" => 0, + "matta-varjjat.no" => 0, + "xn--mtta-vrjjat-k7af.no" => 0, + "sorfold.no" => 0, + "xn--srfold-bya.no" => 0, + "sorreisa.no" => 0, + "xn--srreisa-q1a.no" => 0, + "sorum.no" => 0, + "xn--srum-gra.no" => 0, + "tana.no" => 0, + "deatnu.no" => 0, + "time.no" => 0, + "tingvoll.no" => 0, + "tinn.no" => 0, + "tjeldsund.no" => 0, + "dielddanuorri.no" => 0, + "tjome.no" => 0, + "xn--tjme-hra.no" => 0, + "tokke.no" => 0, + "tolga.no" => 0, + "torsken.no" => 0, + "tranoy.no" => 0, + "xn--trany-yua.no" => 0, + "tromso.no" => 0, + "xn--troms-zua.no" => 0, + "tromsa.no" => 0, + "romsa.no" => 0, + "trondheim.no" => 0, + "troandin.no" => 0, + "trysil.no" => 0, + "trana.no" => 0, + "xn--trna-woa.no" => 0, + "trogstad.no" => 0, + "xn--trgstad-r1a.no" => 0, + "tvedestrand.no" => 0, + "tydal.no" => 0, + "tynset.no" => 0, + "tysfjord.no" => 0, + "divtasvuodna.no" => 0, + "divttasvuotna.no" => 0, + "tysnes.no" => 0, + "tysvar.no" => 0, + "xn--tysvr-vra.no" => 0, + "tonsberg.no" => 0, + "xn--tnsberg-q1a.no" => 0, + "ullensaker.no" => 0, + "ullensvang.no" => 0, + "ulvik.no" => 0, + "utsira.no" => 0, + "vadso.no" => 0, + "xn--vads-jra.no" => 0, + "cahcesuolo.no" => 0, + "xn--hcesuolo-7ya35b.no" => 0, + "vaksdal.no" => 0, + "valle.no" => 0, + "vang.no" => 0, + "vanylven.no" => 0, + "vardo.no" => 0, + "xn--vard-jra.no" => 0, + "varggat.no" => 0, + "xn--vrggt-xqad.no" => 0, + "vefsn.no" => 0, + "vaapste.no" => 0, + "vega.no" => 0, + "vegarshei.no" => 0, + "xn--vegrshei-c0a.no" => 0, + "vennesla.no" => 0, + "verdal.no" => 0, + "verran.no" => 0, + "vestby.no" => 0, + "vestnes.no" => 0, + "vestre-slidre.no" => 0, + "vestre-toten.no" => 0, + "vestvagoy.no" => 0, + "xn--vestvgy-ixa6o.no" => 0, + "vevelstad.no" => 0, + "vik.no" => 0, + "vikna.no" => 0, + "vindafjord.no" => 0, + "volda.no" => 0, + "voss.no" => 0, + "varoy.no" => 0, + "xn--vry-yla5g.no" => 0, + "vagan.no" => 0, + "xn--vgan-qoa.no" => 0, + "voagat.no" => 0, + "vagsoy.no" => 0, + "xn--vgsy-qoa0j.no" => 0, + "vaga.no" => 0, + "xn--vg-yiab.no" => 0, + "valer.ostfold.no" => 0, + "xn--vler-qoa.xn--stfold-9xa.no" => 0, + "valer.hedmark.no" => 0, + "xn--vler-qoa.hedmark.no" => 0, + "np" => -1, + "nr" => 0, + "biz.nr" => 0, + "info.nr" => 0, + "gov.nr" => 0, + "edu.nr" => 0, + "org.nr" => 0, + "net.nr" => 0, + "com.nr" => 0, + "nu" => 0, + "nz" => 0, + "ac.nz" => 0, + "co.nz" => 0, + "cri.nz" => 0, + "geek.nz" => 0, + "gen.nz" => 0, + "govt.nz" => 0, + "health.nz" => 0, + "iwi.nz" => 0, + "kiwi.nz" => 0, + "maori.nz" => 0, + "mil.nz" => 0, + "xn--mori-qsa.nz" => 0, + "net.nz" => 0, + "org.nz" => 0, + "parliament.nz" => 0, + "school.nz" => 0, + "om" => 0, + "co.om" => 0, + "com.om" => 0, + "edu.om" => 0, + "gov.om" => 0, + "med.om" => 0, + "museum.om" => 0, + "net.om" => 0, + "org.om" => 0, + "pro.om" => 0, + "onion" => 0, + "org" => 0, + "pa" => 0, + "ac.pa" => 0, + "gob.pa" => 0, + "com.pa" => 0, + "org.pa" => 0, + "sld.pa" => 0, + "edu.pa" => 0, + "net.pa" => 0, + "ing.pa" => 0, + "abo.pa" => 0, + "med.pa" => 0, + "nom.pa" => 0, + "pe" => 0, + "edu.pe" => 0, + "gob.pe" => 0, + "nom.pe" => 0, + "mil.pe" => 0, + "org.pe" => 0, + "com.pe" => 0, + "net.pe" => 0, + "pf" => 0, + "com.pf" => 0, + "org.pf" => 0, + "edu.pf" => 0, + "pg" => -1, + "ph" => 0, + "com.ph" => 0, + "net.ph" => 0, + "org.ph" => 0, + "gov.ph" => 0, + "edu.ph" => 0, + "ngo.ph" => 0, + "mil.ph" => 0, + "i.ph" => 0, + "pk" => 0, + "com.pk" => 0, + "net.pk" => 0, + "edu.pk" => 0, + "org.pk" => 0, + "fam.pk" => 0, + "biz.pk" => 0, + "web.pk" => 0, + "gov.pk" => 0, + "gob.pk" => 0, + "gok.pk" => 0, + "gon.pk" => 0, + "gop.pk" => 0, + "gos.pk" => 0, + "info.pk" => 0, + "pl" => 0, + "com.pl" => 0, + "net.pl" => 0, + "org.pl" => 0, + "aid.pl" => 0, + "agro.pl" => 0, + "atm.pl" => 0, + "auto.pl" => 0, + "biz.pl" => 0, + "edu.pl" => 0, + "gmina.pl" => 0, + "gsm.pl" => 0, + "info.pl" => 0, + "mail.pl" => 0, + "miasta.pl" => 0, + "media.pl" => 0, + "mil.pl" => 0, + "nieruchomosci.pl" => 0, + "nom.pl" => 0, + "pc.pl" => 0, + "powiat.pl" => 0, + "priv.pl" => 0, + "realestate.pl" => 0, + "rel.pl" => 0, + "sex.pl" => 0, + "shop.pl" => 0, + "sklep.pl" => 0, + "sos.pl" => 0, + "szkola.pl" => 0, + "targi.pl" => 0, + "tm.pl" => 0, + "tourism.pl" => 0, + "travel.pl" => 0, + "turystyka.pl" => 0, + "gov.pl" => 0, + "ap.gov.pl" => 0, + "ic.gov.pl" => 0, + "is.gov.pl" => 0, + "us.gov.pl" => 0, + "kmpsp.gov.pl" => 0, + "kppsp.gov.pl" => 0, + "kwpsp.gov.pl" => 0, + "psp.gov.pl" => 0, + "wskr.gov.pl" => 0, + "kwp.gov.pl" => 0, + "mw.gov.pl" => 0, + "ug.gov.pl" => 0, + "um.gov.pl" => 0, + "umig.gov.pl" => 0, + "ugim.gov.pl" => 0, + "upow.gov.pl" => 0, + "uw.gov.pl" => 0, + "starostwo.gov.pl" => 0, + "pa.gov.pl" => 0, + "po.gov.pl" => 0, + "psse.gov.pl" => 0, + "pup.gov.pl" => 0, + "rzgw.gov.pl" => 0, + "sa.gov.pl" => 0, + "so.gov.pl" => 0, + "sr.gov.pl" => 0, + "wsa.gov.pl" => 0, + "sko.gov.pl" => 0, + "uzs.gov.pl" => 0, + "wiih.gov.pl" => 0, + "winb.gov.pl" => 0, + "pinb.gov.pl" => 0, + "wios.gov.pl" => 0, + "witd.gov.pl" => 0, + "wzmiuw.gov.pl" => 0, + "piw.gov.pl" => 0, + "wiw.gov.pl" => 0, + "griw.gov.pl" => 0, + "wif.gov.pl" => 0, + "oum.gov.pl" => 0, + "sdn.gov.pl" => 0, + "zp.gov.pl" => 0, + "uppo.gov.pl" => 0, + "mup.gov.pl" => 0, + "wuoz.gov.pl" => 0, + "konsulat.gov.pl" => 0, + "oirm.gov.pl" => 0, + "augustow.pl" => 0, + "babia-gora.pl" => 0, + "bedzin.pl" => 0, + "beskidy.pl" => 0, + "bialowieza.pl" => 0, + "bialystok.pl" => 0, + "bielawa.pl" => 0, + "bieszczady.pl" => 0, + "boleslawiec.pl" => 0, + "bydgoszcz.pl" => 0, + "bytom.pl" => 0, + "cieszyn.pl" => 0, + "czeladz.pl" => 0, + "czest.pl" => 0, + "dlugoleka.pl" => 0, + "elblag.pl" => 0, + "elk.pl" => 0, + "glogow.pl" => 0, + "gniezno.pl" => 0, + "gorlice.pl" => 0, + "grajewo.pl" => 0, + "ilawa.pl" => 0, + "jaworzno.pl" => 0, + "jelenia-gora.pl" => 0, + "jgora.pl" => 0, + "kalisz.pl" => 0, + "kazimierz-dolny.pl" => 0, + "karpacz.pl" => 0, + "kartuzy.pl" => 0, + "kaszuby.pl" => 0, + "katowice.pl" => 0, + "kepno.pl" => 0, + "ketrzyn.pl" => 0, + "klodzko.pl" => 0, + "kobierzyce.pl" => 0, + "kolobrzeg.pl" => 0, + "konin.pl" => 0, + "konskowola.pl" => 0, + "kutno.pl" => 0, + "lapy.pl" => 0, + "lebork.pl" => 0, + "legnica.pl" => 0, + "lezajsk.pl" => 0, + "limanowa.pl" => 0, + "lomza.pl" => 0, + "lowicz.pl" => 0, + "lubin.pl" => 0, + "lukow.pl" => 0, + "malbork.pl" => 0, + "malopolska.pl" => 0, + "mazowsze.pl" => 0, + "mazury.pl" => 0, + "mielec.pl" => 0, + "mielno.pl" => 0, + "mragowo.pl" => 0, + "naklo.pl" => 0, + "nowaruda.pl" => 0, + "nysa.pl" => 0, + "olawa.pl" => 0, + "olecko.pl" => 0, + "olkusz.pl" => 0, + "olsztyn.pl" => 0, + "opoczno.pl" => 0, + "opole.pl" => 0, + "ostroda.pl" => 0, + "ostroleka.pl" => 0, + "ostrowiec.pl" => 0, + "ostrowwlkp.pl" => 0, + "pila.pl" => 0, + "pisz.pl" => 0, + "podhale.pl" => 0, + "podlasie.pl" => 0, + "polkowice.pl" => 0, + "pomorze.pl" => 0, + "pomorskie.pl" => 0, + "prochowice.pl" => 0, + "pruszkow.pl" => 0, + "przeworsk.pl" => 0, + "pulawy.pl" => 0, + "radom.pl" => 0, + "rawa-maz.pl" => 0, + "rybnik.pl" => 0, + "rzeszow.pl" => 0, + "sanok.pl" => 0, + "sejny.pl" => 0, + "slask.pl" => 0, + "slupsk.pl" => 0, + "sosnowiec.pl" => 0, + "stalowa-wola.pl" => 0, + "skoczow.pl" => 0, + "starachowice.pl" => 0, + "stargard.pl" => 0, + "suwalki.pl" => 0, + "swidnica.pl" => 0, + "swiebodzin.pl" => 0, + "swinoujscie.pl" => 0, + "szczecin.pl" => 0, + "szczytno.pl" => 0, + "tarnobrzeg.pl" => 0, + "tgory.pl" => 0, + "turek.pl" => 0, + "tychy.pl" => 0, + "ustka.pl" => 0, + "walbrzych.pl" => 0, + "warmia.pl" => 0, + "warszawa.pl" => 0, + "waw.pl" => 0, + "wegrow.pl" => 0, + "wielun.pl" => 0, + "wlocl.pl" => 0, + "wloclawek.pl" => 0, + "wodzislaw.pl" => 0, + "wolomin.pl" => 0, + "wroclaw.pl" => 0, + "zachpomor.pl" => 0, + "zagan.pl" => 0, + "zarow.pl" => 0, + "zgora.pl" => 0, + "zgorzelec.pl" => 0, + "pm" => 0, + "pn" => 0, + "gov.pn" => 0, + "co.pn" => 0, + "org.pn" => 0, + "edu.pn" => 0, + "net.pn" => 0, + "post" => 0, + "pr" => 0, + "com.pr" => 0, + "net.pr" => 0, + "org.pr" => 0, + "gov.pr" => 0, + "edu.pr" => 0, + "isla.pr" => 0, + "pro.pr" => 0, + "biz.pr" => 0, + "info.pr" => 0, + "name.pr" => 0, + "est.pr" => 0, + "prof.pr" => 0, + "ac.pr" => 0, + "pro" => 0, + "aaa.pro" => 0, + "aca.pro" => 0, + "acct.pro" => 0, + "avocat.pro" => 0, + "bar.pro" => 0, + "cpa.pro" => 0, + "eng.pro" => 0, + "jur.pro" => 0, + "law.pro" => 0, + "med.pro" => 0, + "recht.pro" => 0, + "ps" => 0, + "edu.ps" => 0, + "gov.ps" => 0, + "sec.ps" => 0, + "plo.ps" => 0, + "com.ps" => 0, + "org.ps" => 0, + "net.ps" => 0, + "pt" => 0, + "net.pt" => 0, + "gov.pt" => 0, + "org.pt" => 0, + "edu.pt" => 0, + "int.pt" => 0, + "publ.pt" => 0, + "com.pt" => 0, + "nome.pt" => 0, + "pw" => 0, + "co.pw" => 0, + "ne.pw" => 0, + "or.pw" => 0, + "ed.pw" => 0, + "go.pw" => 0, + "belau.pw" => 0, + "py" => 0, + "com.py" => 0, + "coop.py" => 0, + "edu.py" => 0, + "gov.py" => 0, + "mil.py" => 0, + "net.py" => 0, + "org.py" => 0, + "qa" => 0, + "com.qa" => 0, + "edu.qa" => 0, + "gov.qa" => 0, + "mil.qa" => 0, + "name.qa" => 0, + "net.qa" => 0, + "org.qa" => 0, + "sch.qa" => 0, + "re" => 0, + "asso.re" => 0, + "com.re" => 0, + "nom.re" => 0, + "ro" => 0, + "arts.ro" => 0, + "com.ro" => 0, + "firm.ro" => 0, + "info.ro" => 0, + "nom.ro" => 0, + "nt.ro" => 0, + "org.ro" => 0, + "rec.ro" => 0, + "store.ro" => 0, + "tm.ro" => 0, + "www.ro" => 0, + "rs" => 0, + "ac.rs" => 0, + "co.rs" => 0, + "edu.rs" => 0, + "gov.rs" => 0, + "in.rs" => 0, + "org.rs" => 0, + "ru" => 0, + "ac.ru" => 0, + "edu.ru" => 0, + "gov.ru" => 0, + "int.ru" => 0, + "mil.ru" => 0, + "test.ru" => 0, + "rw" => 0, + "ac.rw" => 0, + "co.rw" => 0, + "coop.rw" => 0, + "gov.rw" => 0, + "mil.rw" => 0, + "net.rw" => 0, + "org.rw" => 0, + "sa" => 0, + "com.sa" => 0, + "net.sa" => 0, + "org.sa" => 0, + "gov.sa" => 0, + "med.sa" => 0, + "pub.sa" => 0, + "edu.sa" => 0, + "sch.sa" => 0, + "sb" => 0, + "com.sb" => 0, + "edu.sb" => 0, + "gov.sb" => 0, + "net.sb" => 0, + "org.sb" => 0, + "sc" => 0, + "com.sc" => 0, + "gov.sc" => 0, + "net.sc" => 0, + "org.sc" => 0, + "edu.sc" => 0, + "sd" => 0, + "com.sd" => 0, + "net.sd" => 0, + "org.sd" => 0, + "edu.sd" => 0, + "med.sd" => 0, + "tv.sd" => 0, + "gov.sd" => 0, + "info.sd" => 0, + "se" => 0, + "a.se" => 0, + "ac.se" => 0, + "b.se" => 0, + "bd.se" => 0, + "brand.se" => 0, + "c.se" => 0, + "d.se" => 0, + "e.se" => 0, + "f.se" => 0, + "fh.se" => 0, + "fhsk.se" => 0, + "fhv.se" => 0, + "g.se" => 0, + "h.se" => 0, + "i.se" => 0, + "k.se" => 0, + "komforb.se" => 0, + "kommunalforbund.se" => 0, + "komvux.se" => 0, + "l.se" => 0, + "lanbib.se" => 0, + "m.se" => 0, + "n.se" => 0, + "naturbruksgymn.se" => 0, + "o.se" => 0, + "org.se" => 0, + "p.se" => 0, + "parti.se" => 0, + "pp.se" => 0, + "press.se" => 0, + "r.se" => 0, + "s.se" => 0, + "t.se" => 0, + "tm.se" => 0, + "u.se" => 0, + "w.se" => 0, + "x.se" => 0, + "y.se" => 0, + "z.se" => 0, + "sg" => 0, + "com.sg" => 0, + "net.sg" => 0, + "org.sg" => 0, + "gov.sg" => 0, + "edu.sg" => 0, + "per.sg" => 0, + "sh" => 0, + "com.sh" => 0, + "net.sh" => 0, + "gov.sh" => 0, + "org.sh" => 0, + "mil.sh" => 0, + "si" => 0, + "sj" => 0, + "sk" => 0, + "sl" => 0, + "com.sl" => 0, + "net.sl" => 0, + "edu.sl" => 0, + "gov.sl" => 0, + "org.sl" => 0, + "sm" => 0, + "sn" => 0, + "art.sn" => 0, + "com.sn" => 0, + "edu.sn" => 0, + "gouv.sn" => 0, + "org.sn" => 0, + "perso.sn" => 0, + "univ.sn" => 0, + "so" => 0, + "com.so" => 0, + "net.so" => 0, + "org.so" => 0, + "sr" => 0, + "st" => 0, + "co.st" => 0, + "com.st" => 0, + "consulado.st" => 0, + "edu.st" => 0, + "embaixada.st" => 0, + "gov.st" => 0, + "mil.st" => 0, + "net.st" => 0, + "org.st" => 0, + "principe.st" => 0, + "saotome.st" => 0, + "store.st" => 0, + "su" => 0, + "sv" => 0, + "com.sv" => 0, + "edu.sv" => 0, + "gob.sv" => 0, + "org.sv" => 0, + "red.sv" => 0, + "sx" => 0, + "gov.sx" => 0, + "sy" => 0, + "edu.sy" => 0, + "gov.sy" => 0, + "net.sy" => 0, + "mil.sy" => 0, + "com.sy" => 0, + "org.sy" => 0, + "sz" => 0, + "co.sz" => 0, + "ac.sz" => 0, + "org.sz" => 0, + "tc" => 0, + "td" => 0, + "tel" => 0, + "tf" => 0, + "tg" => 0, + "th" => 0, + "ac.th" => 0, + "co.th" => 0, + "go.th" => 0, + "in.th" => 0, + "mi.th" => 0, + "net.th" => 0, + "or.th" => 0, + "tj" => 0, + "ac.tj" => 0, + "biz.tj" => 0, + "co.tj" => 0, + "com.tj" => 0, + "edu.tj" => 0, + "go.tj" => 0, + "gov.tj" => 0, + "int.tj" => 0, + "mil.tj" => 0, + "name.tj" => 0, + "net.tj" => 0, + "nic.tj" => 0, + "org.tj" => 0, + "test.tj" => 0, + "web.tj" => 0, + "tk" => 0, + "tl" => 0, + "gov.tl" => 0, + "tm" => 0, + "com.tm" => 0, + "co.tm" => 0, + "org.tm" => 0, + "net.tm" => 0, + "nom.tm" => 0, + "gov.tm" => 0, + "mil.tm" => 0, + "edu.tm" => 0, + "tn" => 0, + "com.tn" => 0, + "ens.tn" => 0, + "fin.tn" => 0, + "gov.tn" => 0, + "ind.tn" => 0, + "intl.tn" => 0, + "nat.tn" => 0, + "net.tn" => 0, + "org.tn" => 0, + "info.tn" => 0, + "perso.tn" => 0, + "tourism.tn" => 0, + "edunet.tn" => 0, + "rnrt.tn" => 0, + "rns.tn" => 0, + "rnu.tn" => 0, + "mincom.tn" => 0, + "agrinet.tn" => 0, + "defense.tn" => 0, + "turen.tn" => 0, + "to" => 0, + "com.to" => 0, + "gov.to" => 0, + "net.to" => 0, + "org.to" => 0, + "edu.to" => 0, + "mil.to" => 0, + "tr" => 0, + "av.tr" => 0, + "bbs.tr" => 0, + "bel.tr" => 0, + "biz.tr" => 0, + "com.tr" => 0, + "dr.tr" => 0, + "edu.tr" => 0, + "gen.tr" => 0, + "gov.tr" => 0, + "info.tr" => 0, + "mil.tr" => 0, + "k12.tr" => 0, + "kep.tr" => 0, + "name.tr" => 0, + "net.tr" => 0, + "org.tr" => 0, + "pol.tr" => 0, + "tel.tr" => 0, + "tsk.tr" => 0, + "tv.tr" => 0, + "web.tr" => 0, + "nc.tr" => 0, + "gov.nc.tr" => 0, + "tt" => 0, + "co.tt" => 0, + "com.tt" => 0, + "org.tt" => 0, + "net.tt" => 0, + "biz.tt" => 0, + "info.tt" => 0, + "pro.tt" => 0, + "int.tt" => 0, + "coop.tt" => 0, + "jobs.tt" => 0, + "mobi.tt" => 0, + "travel.tt" => 0, + "museum.tt" => 0, + "aero.tt" => 0, + "name.tt" => 0, + "gov.tt" => 0, + "edu.tt" => 0, + "tv" => 0, + "tw" => 0, + "edu.tw" => 0, + "gov.tw" => 0, + "mil.tw" => 0, + "com.tw" => 0, + "net.tw" => 0, + "org.tw" => 0, + "idv.tw" => 0, + "game.tw" => 0, + "ebiz.tw" => 0, + "club.tw" => 0, + "xn--zf0ao64a.tw" => 0, + "xn--uc0atv.tw" => 0, + "xn--czrw28b.tw" => 0, + "tz" => 0, + "ac.tz" => 0, + "co.tz" => 0, + "go.tz" => 0, + "hotel.tz" => 0, + "info.tz" => 0, + "me.tz" => 0, + "mil.tz" => 0, + "mobi.tz" => 0, + "ne.tz" => 0, + "or.tz" => 0, + "sc.tz" => 0, + "tv.tz" => 0, + "ua" => 0, + "com.ua" => 0, + "edu.ua" => 0, + "gov.ua" => 0, + "in.ua" => 0, + "net.ua" => 0, + "org.ua" => 0, + "cherkassy.ua" => 0, + "cherkasy.ua" => 0, + "chernigov.ua" => 0, + "chernihiv.ua" => 0, + "chernivtsi.ua" => 0, + "chernovtsy.ua" => 0, + "ck.ua" => 0, + "cn.ua" => 0, + "cr.ua" => 0, + "crimea.ua" => 0, + "cv.ua" => 0, + "dn.ua" => 0, + "dnepropetrovsk.ua" => 0, + "dnipropetrovsk.ua" => 0, + "dominic.ua" => 0, + "donetsk.ua" => 0, + "dp.ua" => 0, + "if.ua" => 0, + "ivano-frankivsk.ua" => 0, + "kh.ua" => 0, + "kharkiv.ua" => 0, + "kharkov.ua" => 0, + "kherson.ua" => 0, + "khmelnitskiy.ua" => 0, + "khmelnytskyi.ua" => 0, + "kiev.ua" => 0, + "kirovograd.ua" => 0, + "km.ua" => 0, + "kr.ua" => 0, + "krym.ua" => 0, + "ks.ua" => 0, + "kv.ua" => 0, + "kyiv.ua" => 0, + "lg.ua" => 0, + "lt.ua" => 0, + "lugansk.ua" => 0, + "lutsk.ua" => 0, + "lv.ua" => 0, + "lviv.ua" => 0, + "mk.ua" => 0, + "mykolaiv.ua" => 0, + "nikolaev.ua" => 0, + "od.ua" => 0, + "odesa.ua" => 0, + "odessa.ua" => 0, + "pl.ua" => 0, + "poltava.ua" => 0, + "rivne.ua" => 0, + "rovno.ua" => 0, + "rv.ua" => 0, + "sb.ua" => 0, + "sebastopol.ua" => 0, + "sevastopol.ua" => 0, + "sm.ua" => 0, + "sumy.ua" => 0, + "te.ua" => 0, + "ternopil.ua" => 0, + "uz.ua" => 0, + "uzhgorod.ua" => 0, + "vinnica.ua" => 0, + "vinnytsia.ua" => 0, + "vn.ua" => 0, + "volyn.ua" => 0, + "yalta.ua" => 0, + "zaporizhzhe.ua" => 0, + "zaporizhzhia.ua" => 0, + "zhitomir.ua" => 0, + "zhytomyr.ua" => 0, + "zp.ua" => 0, + "zt.ua" => 0, + "ug" => 0, + "co.ug" => 0, + "or.ug" => 0, + "ac.ug" => 0, + "sc.ug" => 0, + "go.ug" => 0, + "ne.ug" => 0, + "com.ug" => 0, + "org.ug" => 0, + "uk" => 0, + "ac.uk" => 0, + "co.uk" => 0, + "gov.uk" => 0, + "ltd.uk" => 0, + "me.uk" => 0, + "net.uk" => 0, + "nhs.uk" => 0, + "org.uk" => 0, + "plc.uk" => 0, + "police.uk" => 0, + "sch.uk" => -1, + "us" => 0, + "dni.us" => 0, + "fed.us" => 0, + "isa.us" => 0, + "kids.us" => 0, + "nsn.us" => 0, + "ak.us" => 0, + "al.us" => 0, + "ar.us" => 0, + "as.us" => 0, + "az.us" => 0, + "ca.us" => 0, + "co.us" => 0, + "ct.us" => 0, + "dc.us" => 0, + "de.us" => 0, + "fl.us" => 0, + "ga.us" => 0, + "gu.us" => 0, + "hi.us" => 0, + "ia.us" => 0, + "id.us" => 0, + "il.us" => 0, + "in.us" => 0, + "ks.us" => 0, + "ky.us" => 0, + "la.us" => 0, + "ma.us" => 0, + "md.us" => 0, + "me.us" => 0, + "mi.us" => 0, + "mn.us" => 0, + "mo.us" => 0, + "ms.us" => 0, + "mt.us" => 0, + "nc.us" => 0, + "nd.us" => 0, + "ne.us" => 0, + "nh.us" => 0, + "nj.us" => 0, + "nm.us" => 0, + "nv.us" => 0, + "ny.us" => 0, + "oh.us" => 0, + "ok.us" => 0, + "or.us" => 0, + "pa.us" => 0, + "pr.us" => 0, + "ri.us" => 0, + "sc.us" => 0, + "sd.us" => 0, + "tn.us" => 0, + "tx.us" => 0, + "ut.us" => 0, + "vi.us" => 0, + "vt.us" => 0, + "va.us" => 0, + "wa.us" => 0, + "wi.us" => 0, + "wv.us" => 0, + "wy.us" => 0, + "k12.ak.us" => 0, + "k12.al.us" => 0, + "k12.ar.us" => 0, + "k12.as.us" => 0, + "k12.az.us" => 0, + "k12.ca.us" => 0, + "k12.co.us" => 0, + "k12.ct.us" => 0, + "k12.dc.us" => 0, + "k12.de.us" => 0, + "k12.fl.us" => 0, + "k12.ga.us" => 0, + "k12.gu.us" => 0, + "k12.ia.us" => 0, + "k12.id.us" => 0, + "k12.il.us" => 0, + "k12.in.us" => 0, + "k12.ks.us" => 0, + "k12.ky.us" => 0, + "k12.la.us" => 0, + "k12.ma.us" => 0, + "k12.md.us" => 0, + "k12.me.us" => 0, + "k12.mi.us" => 0, + "k12.mn.us" => 0, + "k12.mo.us" => 0, + "k12.ms.us" => 0, + "k12.mt.us" => 0, + "k12.nc.us" => 0, + "k12.ne.us" => 0, + "k12.nh.us" => 0, + "k12.nj.us" => 0, + "k12.nm.us" => 0, + "k12.nv.us" => 0, + "k12.ny.us" => 0, + "k12.oh.us" => 0, + "k12.ok.us" => 0, + "k12.or.us" => 0, + "k12.pa.us" => 0, + "k12.pr.us" => 0, + "k12.ri.us" => 0, + "k12.sc.us" => 0, + "k12.tn.us" => 0, + "k12.tx.us" => 0, + "k12.ut.us" => 0, + "k12.vi.us" => 0, + "k12.vt.us" => 0, + "k12.va.us" => 0, + "k12.wa.us" => 0, + "k12.wi.us" => 0, + "k12.wy.us" => 0, + "cc.ak.us" => 0, + "cc.al.us" => 0, + "cc.ar.us" => 0, + "cc.as.us" => 0, + "cc.az.us" => 0, + "cc.ca.us" => 0, + "cc.co.us" => 0, + "cc.ct.us" => 0, + "cc.dc.us" => 0, + "cc.de.us" => 0, + "cc.fl.us" => 0, + "cc.ga.us" => 0, + "cc.gu.us" => 0, + "cc.hi.us" => 0, + "cc.ia.us" => 0, + "cc.id.us" => 0, + "cc.il.us" => 0, + "cc.in.us" => 0, + "cc.ks.us" => 0, + "cc.ky.us" => 0, + "cc.la.us" => 0, + "cc.ma.us" => 0, + "cc.md.us" => 0, + "cc.me.us" => 0, + "cc.mi.us" => 0, + "cc.mn.us" => 0, + "cc.mo.us" => 0, + "cc.ms.us" => 0, + "cc.mt.us" => 0, + "cc.nc.us" => 0, + "cc.nd.us" => 0, + "cc.ne.us" => 0, + "cc.nh.us" => 0, + "cc.nj.us" => 0, + "cc.nm.us" => 0, + "cc.nv.us" => 0, + "cc.ny.us" => 0, + "cc.oh.us" => 0, + "cc.ok.us" => 0, + "cc.or.us" => 0, + "cc.pa.us" => 0, + "cc.pr.us" => 0, + "cc.ri.us" => 0, + "cc.sc.us" => 0, + "cc.sd.us" => 0, + "cc.tn.us" => 0, + "cc.tx.us" => 0, + "cc.ut.us" => 0, + "cc.vi.us" => 0, + "cc.vt.us" => 0, + "cc.va.us" => 0, + "cc.wa.us" => 0, + "cc.wi.us" => 0, + "cc.wv.us" => 0, + "cc.wy.us" => 0, + "lib.ak.us" => 0, + "lib.al.us" => 0, + "lib.ar.us" => 0, + "lib.as.us" => 0, + "lib.az.us" => 0, + "lib.ca.us" => 0, + "lib.co.us" => 0, + "lib.ct.us" => 0, + "lib.dc.us" => 0, + "lib.fl.us" => 0, + "lib.ga.us" => 0, + "lib.gu.us" => 0, + "lib.hi.us" => 0, + "lib.ia.us" => 0, + "lib.id.us" => 0, + "lib.il.us" => 0, + "lib.in.us" => 0, + "lib.ks.us" => 0, + "lib.ky.us" => 0, + "lib.la.us" => 0, + "lib.ma.us" => 0, + "lib.md.us" => 0, + "lib.me.us" => 0, + "lib.mi.us" => 0, + "lib.mn.us" => 0, + "lib.mo.us" => 0, + "lib.ms.us" => 0, + "lib.mt.us" => 0, + "lib.nc.us" => 0, + "lib.nd.us" => 0, + "lib.ne.us" => 0, + "lib.nh.us" => 0, + "lib.nj.us" => 0, + "lib.nm.us" => 0, + "lib.nv.us" => 0, + "lib.ny.us" => 0, + "lib.oh.us" => 0, + "lib.ok.us" => 0, + "lib.or.us" => 0, + "lib.pa.us" => 0, + "lib.pr.us" => 0, + "lib.ri.us" => 0, + "lib.sc.us" => 0, + "lib.sd.us" => 0, + "lib.tn.us" => 0, + "lib.tx.us" => 0, + "lib.ut.us" => 0, + "lib.vi.us" => 0, + "lib.vt.us" => 0, + "lib.va.us" => 0, + "lib.wa.us" => 0, + "lib.wi.us" => 0, + "lib.wy.us" => 0, + "pvt.k12.ma.us" => 0, + "chtr.k12.ma.us" => 0, + "paroch.k12.ma.us" => 0, + "ann-arbor.mi.us" => 0, + "cog.mi.us" => 0, + "dst.mi.us" => 0, + "eaton.mi.us" => 0, + "gen.mi.us" => 0, + "mus.mi.us" => 0, + "tec.mi.us" => 0, + "washtenaw.mi.us" => 0, + "uy" => 0, + "com.uy" => 0, + "edu.uy" => 0, + "gub.uy" => 0, + "mil.uy" => 0, + "net.uy" => 0, + "org.uy" => 0, + "uz" => 0, + "co.uz" => 0, + "com.uz" => 0, + "net.uz" => 0, + "org.uz" => 0, + "va" => 0, + "vc" => 0, + "com.vc" => 0, + "net.vc" => 0, + "org.vc" => 0, + "gov.vc" => 0, + "mil.vc" => 0, + "edu.vc" => 0, + "ve" => 0, + "arts.ve" => 0, + "co.ve" => 0, + "com.ve" => 0, + "e12.ve" => 0, + "edu.ve" => 0, + "firm.ve" => 0, + "gob.ve" => 0, + "gov.ve" => 0, + "info.ve" => 0, + "int.ve" => 0, + "mil.ve" => 0, + "net.ve" => 0, + "org.ve" => 0, + "rec.ve" => 0, + "store.ve" => 0, + "tec.ve" => 0, + "web.ve" => 0, + "vg" => 0, + "vi" => 0, + "co.vi" => 0, + "com.vi" => 0, + "k12.vi" => 0, + "net.vi" => 0, + "org.vi" => 0, + "vn" => 0, + "com.vn" => 0, + "net.vn" => 0, + "org.vn" => 0, + "edu.vn" => 0, + "gov.vn" => 0, + "int.vn" => 0, + "ac.vn" => 0, + "biz.vn" => 0, + "info.vn" => 0, + "name.vn" => 0, + "pro.vn" => 0, + "health.vn" => 0, + "vu" => 0, + "com.vu" => 0, + "edu.vu" => 0, + "net.vu" => 0, + "org.vu" => 0, + "wf" => 0, + "ws" => 0, + "com.ws" => 0, + "net.ws" => 0, + "org.ws" => 0, + "gov.ws" => 0, + "edu.ws" => 0, + "yt" => 0, + "xn--mgbaam7a8h" => 0, + "xn--y9a3aq" => 0, + "xn--54b7fta0cc" => 0, + "xn--90ae" => 0, + "xn--90ais" => 0, + "xn--fiqs8s" => 0, + "xn--fiqz9s" => 0, + "xn--lgbbat1ad8j" => 0, + "xn--wgbh1c" => 0, + "xn--e1a4c" => 0, + "xn--node" => 0, + "xn--qxam" => 0, + "xn--j6w193g" => 0, + "xn--55qx5d.xn--j6w193g" => 0, + "xn--wcvs22d.xn--j6w193g" => 0, + "xn--mxtq1m.xn--j6w193g" => 0, + "xn--gmqw5a.xn--j6w193g" => 0, + "xn--od0alg.xn--j6w193g" => 0, + "xn--uc0atv.xn--j6w193g" => 0, + "xn--2scrj9c" => 0, + "xn--3hcrj9c" => 0, + "xn--45br5cyl" => 0, + "xn--h2breg3eve" => 0, + "xn--h2brj9c8c" => 0, + "xn--mgbgu82a" => 0, + "xn--rvc1e0am3e" => 0, + "xn--h2brj9c" => 0, + "xn--mgbbh1a" => 0, + "xn--mgbbh1a71e" => 0, + "xn--fpcrj9c3d" => 0, + "xn--gecrj9c" => 0, + "xn--s9brj9c" => 0, + "xn--45brj9c" => 0, + "xn--xkc2dl3a5ee0h" => 0, + "xn--mgba3a4f16a" => 0, + "xn--mgba3a4fra" => 0, + "xn--mgbtx2b" => 0, + "xn--mgbayh7gpa" => 0, + "xn--3e0b707e" => 0, + "xn--80ao21a" => 0, + "xn--fzc2c9e2c" => 0, + "xn--xkc2al3hye2a" => 0, + "xn--mgbc0a9azcg" => 0, + "xn--d1alf" => 0, + "xn--l1acc" => 0, + "xn--mix891f" => 0, + "xn--mix082f" => 0, + "xn--mgbx4cd0ab" => 0, + "xn--mgb9awbf" => 0, + "xn--mgbai9azgqp6j" => 0, + "xn--mgbai9a5eva00b" => 0, + "xn--ygbi2ammx" => 0, + "xn--90a3ac" => 0, + "xn--o1ac.xn--90a3ac" => 0, + "xn--c1avg.xn--90a3ac" => 0, + "xn--90azh.xn--90a3ac" => 0, + "xn--d1at.xn--90a3ac" => 0, + "xn--o1ach.xn--90a3ac" => 0, + "xn--80au.xn--90a3ac" => 0, + "xn--p1ai" => 0, + "xn--wgbl6a" => 0, + "xn--mgberp4a5d4ar" => 0, + "xn--mgberp4a5d4a87g" => 0, + "xn--mgbqly7c0a67fbc" => 0, + "xn--mgbqly7cvafr" => 0, + "xn--mgbpl2fh" => 0, + "xn--yfro4i67o" => 0, + "xn--clchc0ea0b2g2a9gcd" => 0, + "xn--ogbpf8fl" => 0, + "xn--mgbtf8fl" => 0, + "xn--o3cw4h" => 0, + "xn--12c1fe0br.xn--o3cw4h" => 0, + "xn--12co0c3b4eva.xn--o3cw4h" => 0, + "xn--h3cuzk1di.xn--o3cw4h" => 0, + "xn--o3cyx2a.xn--o3cw4h" => 0, + "xn--m3ch0j3a.xn--o3cw4h" => 0, + "xn--12cfi8ixb8l.xn--o3cw4h" => 0, + "xn--pgbs0dh" => 0, + "xn--kpry57d" => 0, + "xn--kprw13d" => 0, + "xn--nnx388a" => 0, + "xn--j1amh" => 0, + "xn--mgb2ddes" => 0, + "xxx" => 0, + "ye" => -1, + "za" => 1, + "ac.za" => 0, + "agric.za" => 0, + "alt.za" => 0, + "co.za" => 0, + "edu.za" => 0, + "gov.za" => 0, + "grondar.za" => 0, + "law.za" => 0, + "mil.za" => 0, + "net.za" => 0, + "ngo.za" => 0, + "nis.za" => 0, + "nom.za" => 0, + "org.za" => 0, + "school.za" => 0, + "tm.za" => 0, + "web.za" => 0, + "zm" => 0, + "ac.zm" => 0, + "biz.zm" => 0, + "co.zm" => 0, + "com.zm" => 0, + "edu.zm" => 0, + "gov.zm" => 0, + "info.zm" => 0, + "mil.zm" => 0, + "net.zm" => 0, + "org.zm" => 0, + "sch.zm" => 0, + "zw" => 0, + "ac.zw" => 0, + "co.zw" => 0, + "gov.zw" => 0, + "mil.zw" => 0, + "org.zw" => 0, + "aaa" => 0, + "aarp" => 0, + "abarth" => 0, + "abb" => 0, + "abbott" => 0, + "abbvie" => 0, + "abc" => 0, + "able" => 0, + "abogado" => 0, + "abudhabi" => 0, + "academy" => 0, + "accenture" => 0, + "accountant" => 0, + "accountants" => 0, + "aco" => 0, + "actor" => 0, + "adac" => 0, + "ads" => 0, + "adult" => 0, + "aeg" => 0, + "aetna" => 0, + "afamilycompany" => 0, + "afl" => 0, + "africa" => 0, + "agakhan" => 0, + "agency" => 0, + "aig" => 0, + "aigo" => 0, + "airbus" => 0, + "airforce" => 0, + "airtel" => 0, + "akdn" => 0, + "alfaromeo" => 0, + "alibaba" => 0, + "alipay" => 0, + "allfinanz" => 0, + "allstate" => 0, + "ally" => 0, + "alsace" => 0, + "alstom" => 0, + "americanexpress" => 0, + "americanfamily" => 0, + "amex" => 0, + "amfam" => 0, + "amica" => 0, + "amsterdam" => 0, + "analytics" => 0, + "android" => 0, + "anquan" => 0, + "anz" => 0, + "aol" => 0, + "apartments" => 0, + "app" => 0, + "apple" => 0, + "aquarelle" => 0, + "arab" => 0, + "aramco" => 0, + "archi" => 0, + "army" => 0, + "art" => 0, + "arte" => 0, + "asda" => 0, + "associates" => 0, + "athleta" => 0, + "attorney" => 0, + "auction" => 0, + "audi" => 0, + "audible" => 0, + "audio" => 0, + "auspost" => 0, + "author" => 0, + "auto" => 0, + "autos" => 0, + "avianca" => 0, + "aws" => 0, + "axa" => 0, + "azure" => 0, + "baby" => 0, + "baidu" => 0, + "banamex" => 0, + "bananarepublic" => 0, + "band" => 0, + "bank" => 0, + "bar" => 0, + "barcelona" => 0, + "barclaycard" => 0, + "barclays" => 0, + "barefoot" => 0, + "bargains" => 0, + "baseball" => 0, + "basketball" => 0, + "bauhaus" => 0, + "bayern" => 0, + "bbc" => 0, + "bbt" => 0, + "bbva" => 0, + "bcg" => 0, + "bcn" => 0, + "beats" => 0, + "beauty" => 0, + "beer" => 0, + "bentley" => 0, + "berlin" => 0, + "best" => 0, + "bestbuy" => 0, + "bet" => 0, + "bharti" => 0, + "bible" => 0, + "bid" => 0, + "bike" => 0, + "bing" => 0, + "bingo" => 0, + "bio" => 0, + "black" => 0, + "blackfriday" => 0, + "blockbuster" => 0, + "blog" => 0, + "bloomberg" => 0, + "blue" => 0, + "bms" => 0, + "bmw" => 0, + "bnl" => 0, + "bnpparibas" => 0, + "boats" => 0, + "boehringer" => 0, + "bofa" => 0, + "bom" => 0, + "bond" => 0, + "boo" => 0, + "book" => 0, + "booking" => 0, + "bosch" => 0, + "bostik" => 0, + "boston" => 0, + "bot" => 0, + "boutique" => 0, + "box" => 0, + "bradesco" => 0, + "bridgestone" => 0, + "broadway" => 0, + "broker" => 0, + "brother" => 0, + "brussels" => 0, + "budapest" => 0, + "bugatti" => 0, + "build" => 0, + "builders" => 0, + "business" => 0, + "buy" => 0, + "buzz" => 0, + "bzh" => 0, + "cab" => 0, + "cafe" => 0, + "cal" => 0, + "call" => 0, + "calvinklein" => 0, + "cam" => 0, + "camera" => 0, + "camp" => 0, + "cancerresearch" => 0, + "canon" => 0, + "capetown" => 0, + "capital" => 0, + "capitalone" => 0, + "car" => 0, + "caravan" => 0, + "cards" => 0, + "care" => 0, + "career" => 0, + "careers" => 0, + "cars" => 0, + "cartier" => 0, + "casa" => 0, + "case" => 0, + "caseih" => 0, + "cash" => 0, + "casino" => 0, + "catering" => 0, + "catholic" => 0, + "cba" => 0, + "cbn" => 0, + "cbre" => 0, + "cbs" => 0, + "ceb" => 0, + "center" => 0, + "ceo" => 0, + "cern" => 0, + "cfa" => 0, + "cfd" => 0, + "chanel" => 0, + "channel" => 0, + "charity" => 0, + "chase" => 0, + "chat" => 0, + "cheap" => 0, + "chintai" => 0, + "christmas" => 0, + "chrome" => 0, + "chrysler" => 0, + "church" => 0, + "cipriani" => 0, + "circle" => 0, + "cisco" => 0, + "citadel" => 0, + "citi" => 0, + "citic" => 0, + "city" => 0, + "cityeats" => 0, + "claims" => 0, + "cleaning" => 0, + "click" => 0, + "clinic" => 0, + "clinique" => 0, + "clothing" => 0, + "cloud" => 0, + "club" => 0, + "clubmed" => 0, + "coach" => 0, + "codes" => 0, + "coffee" => 0, + "college" => 0, + "cologne" => 0, + "comcast" => 0, + "commbank" => 0, + "community" => 0, + "company" => 0, + "compare" => 0, + "computer" => 0, + "comsec" => 0, + "condos" => 0, + "construction" => 0, + "consulting" => 0, + "contact" => 0, + "contractors" => 0, + "cooking" => 0, + "cookingchannel" => 0, + "cool" => 0, + "corsica" => 0, + "country" => 0, + "coupon" => 0, + "coupons" => 0, + "courses" => 0, + "cpa" => 0, + "credit" => 0, + "creditcard" => 0, + "creditunion" => 0, + "cricket" => 0, + "crown" => 0, + "crs" => 0, + "cruise" => 0, + "cruises" => 0, + "csc" => 0, + "cuisinella" => 0, + "cymru" => 0, + "cyou" => 0, + "dabur" => 0, + "dad" => 0, + "dance" => 0, + "data" => 0, + "date" => 0, + "dating" => 0, + "datsun" => 0, + "day" => 0, + "dclk" => 0, + "dds" => 0, + "deal" => 0, + "dealer" => 0, + "deals" => 0, + "degree" => 0, + "delivery" => 0, + "dell" => 0, + "deloitte" => 0, + "delta" => 0, + "democrat" => 0, + "dental" => 0, + "dentist" => 0, + "desi" => 0, + "design" => 0, + "dev" => 0, + "dhl" => 0, + "diamonds" => 0, + "diet" => 0, + "digital" => 0, + "direct" => 0, + "directory" => 0, + "discount" => 0, + "discover" => 0, + "dish" => 0, + "diy" => 0, + "dnp" => 0, + "docs" => 0, + "doctor" => 0, + "dodge" => 0, + "dog" => 0, + "domains" => 0, + "dot" => 0, + "download" => 0, + "drive" => 0, + "dtv" => 0, + "dubai" => 0, + "duck" => 0, + "dunlop" => 0, + "duns" => 0, + "dupont" => 0, + "durban" => 0, + "dvag" => 0, + "dvr" => 0, + "earth" => 0, + "eat" => 0, + "eco" => 0, + "edeka" => 0, + "education" => 0, + "email" => 0, + "emerck" => 0, + "energy" => 0, + "engineer" => 0, + "engineering" => 0, + "enterprises" => 0, + "epson" => 0, + "equipment" => 0, + "ericsson" => 0, + "erni" => 0, + "esq" => 0, + "estate" => 0, + "esurance" => 0, + "etisalat" => 0, + "eurovision" => 0, + "eus" => 0, + "events" => 0, + "everbank" => 0, + "exchange" => 0, + "expert" => 0, + "exposed" => 0, + "express" => 0, + "extraspace" => 0, + "fage" => 0, + "fail" => 0, + "fairwinds" => 0, + "faith" => 0, + "family" => 0, + "fan" => 0, + "fans" => 0, + "farm" => 0, + "farmers" => 0, + "fashion" => 0, + "fast" => 0, + "fedex" => 0, + "feedback" => 0, + "ferrari" => 0, + "ferrero" => 0, + "fiat" => 0, + "fidelity" => 0, + "fido" => 0, + "film" => 0, + "final" => 0, + "finance" => 0, + "financial" => 0, + "fire" => 0, + "firestone" => 0, + "firmdale" => 0, + "fish" => 0, + "fishing" => 0, + "fit" => 0, + "fitness" => 0, + "flickr" => 0, + "flights" => 0, + "flir" => 0, + "florist" => 0, + "flowers" => 0, + "fly" => 0, + "foo" => 0, + "food" => 0, + "foodnetwork" => 0, + "football" => 0, + "ford" => 0, + "forex" => 0, + "forsale" => 0, + "forum" => 0, + "foundation" => 0, + "fox" => 0, + "free" => 0, + "fresenius" => 0, + "frl" => 0, + "frogans" => 0, + "frontdoor" => 0, + "frontier" => 0, + "ftr" => 0, + "fujitsu" => 0, + "fujixerox" => 0, + "fun" => 0, + "fund" => 0, + "furniture" => 0, + "futbol" => 0, + "fyi" => 0, + "gal" => 0, + "gallery" => 0, + "gallo" => 0, + "gallup" => 0, + "game" => 0, + "games" => 0, + "gap" => 0, + "garden" => 0, + "gay" => 0, + "gbiz" => 0, + "gdn" => 0, + "gea" => 0, + "gent" => 0, + "genting" => 0, + "george" => 0, + "ggee" => 0, + "gift" => 0, + "gifts" => 0, + "gives" => 0, + "giving" => 0, + "glade" => 0, + "glass" => 0, + "gle" => 0, + "global" => 0, + "globo" => 0, + "gmail" => 0, + "gmbh" => 0, + "gmo" => 0, + "gmx" => 0, + "godaddy" => 0, + "gold" => 0, + "goldpoint" => 0, + "golf" => 0, + "goo" => 0, + "goodyear" => 0, + "goog" => 0, + "google" => 0, + "gop" => 0, + "got" => 0, + "grainger" => 0, + "graphics" => 0, + "gratis" => 0, + "green" => 0, + "gripe" => 0, + "grocery" => 0, + "group" => 0, + "guardian" => 0, + "gucci" => 0, + "guge" => 0, + "guide" => 0, + "guitars" => 0, + "guru" => 0, + "hair" => 0, + "hamburg" => 0, + "hangout" => 0, + "haus" => 0, + "hbo" => 0, + "hdfc" => 0, + "hdfcbank" => 0, + "health" => 0, + "healthcare" => 0, + "help" => 0, + "helsinki" => 0, + "here" => 0, + "hermes" => 0, + "hgtv" => 0, + "hiphop" => 0, + "hisamitsu" => 0, + "hitachi" => 0, + "hiv" => 0, + "hkt" => 0, + "hockey" => 0, + "holdings" => 0, + "holiday" => 0, + "homedepot" => 0, + "homegoods" => 0, + "homes" => 0, + "homesense" => 0, + "honda" => 0, + "honeywell" => 0, + "horse" => 0, + "hospital" => 0, + "host" => 0, + "hosting" => 0, + "hot" => 0, + "hoteles" => 0, + "hotels" => 0, + "hotmail" => 0, + "house" => 0, + "how" => 0, + "hsbc" => 0, + "hughes" => 0, + "hyatt" => 0, + "hyundai" => 0, + "ibm" => 0, + "icbc" => 0, + "ice" => 0, + "icu" => 0, + "ieee" => 0, + "ifm" => 0, + "ikano" => 0, + "imamat" => 0, + "imdb" => 0, + "immo" => 0, + "immobilien" => 0, + "inc" => 0, + "industries" => 0, + "infiniti" => 0, + "ing" => 0, + "ink" => 0, + "institute" => 0, + "insurance" => 0, + "insure" => 0, + "intel" => 0, + "international" => 0, + "intuit" => 0, + "investments" => 0, + "ipiranga" => 0, + "irish" => 0, + "iselect" => 0, + "ismaili" => 0, + "ist" => 0, + "istanbul" => 0, + "itau" => 0, + "itv" => 0, + "iveco" => 0, + "jaguar" => 0, + "java" => 0, + "jcb" => 0, + "jcp" => 0, + "jeep" => 0, + "jetzt" => 0, + "jewelry" => 0, + "jio" => 0, + "jll" => 0, + "jmp" => 0, + "jnj" => 0, + "joburg" => 0, + "jot" => 0, + "joy" => 0, + "jpmorgan" => 0, + "jprs" => 0, + "juegos" => 0, + "juniper" => 0, + "kaufen" => 0, + "kddi" => 0, + "kerryhotels" => 0, + "kerrylogistics" => 0, + "kerryproperties" => 0, + "kfh" => 0, + "kia" => 0, + "kim" => 0, + "kinder" => 0, + "kindle" => 0, + "kitchen" => 0, + "kiwi" => 0, + "koeln" => 0, + "komatsu" => 0, + "kosher" => 0, + "kpmg" => 0, + "kpn" => 0, + "krd" => 0, + "kred" => 0, + "kuokgroup" => 0, + "kyoto" => 0, + "lacaixa" => 0, + "ladbrokes" => 0, + "lamborghini" => 0, + "lamer" => 0, + "lancaster" => 0, + "lancia" => 0, + "lancome" => 0, + "land" => 0, + "landrover" => 0, + "lanxess" => 0, + "lasalle" => 0, + "lat" => 0, + "latino" => 0, + "latrobe" => 0, + "law" => 0, + "lawyer" => 0, + "lds" => 0, + "lease" => 0, + "leclerc" => 0, + "lefrak" => 0, + "legal" => 0, + "lego" => 0, + "lexus" => 0, + "lgbt" => 0, + "liaison" => 0, + "lidl" => 0, + "life" => 0, + "lifeinsurance" => 0, + "lifestyle" => 0, + "lighting" => 0, + "like" => 0, + "lilly" => 0, + "limited" => 0, + "limo" => 0, + "lincoln" => 0, + "linde" => 0, + "link" => 0, + "lipsy" => 0, + "live" => 0, + "living" => 0, + "lixil" => 0, + "llc" => 0, + "loan" => 0, + "loans" => 0, + "locker" => 0, + "locus" => 0, + "loft" => 0, + "lol" => 0, + "london" => 0, + "lotte" => 0, + "lotto" => 0, + "love" => 0, + "lpl" => 0, + "lplfinancial" => 0, + "ltd" => 0, + "ltda" => 0, + "lundbeck" => 0, + "lupin" => 0, + "luxe" => 0, + "luxury" => 0, + "macys" => 0, + "madrid" => 0, + "maif" => 0, + "maison" => 0, + "makeup" => 0, + "man" => 0, + "management" => 0, + "mango" => 0, + "map" => 0, + "market" => 0, + "marketing" => 0, + "markets" => 0, + "marriott" => 0, + "marshalls" => 0, + "maserati" => 0, + "mattel" => 0, + "mba" => 0, + "mckinsey" => 0, + "med" => 0, + "media" => 0, + "meet" => 0, + "melbourne" => 0, + "meme" => 0, + "memorial" => 0, + "men" => 0, + "menu" => 0, + "merckmsd" => 0, + "metlife" => 0, + "miami" => 0, + "microsoft" => 0, + "mini" => 0, + "mint" => 0, + "mit" => 0, + "mitsubishi" => 0, + "mlb" => 0, + "mls" => 0, + "mma" => 0, + "mobile" => 0, + "mobily" => 0, + "moda" => 0, + "moe" => 0, + "moi" => 0, + "mom" => 0, + "monash" => 0, + "money" => 0, + "monster" => 0, + "mopar" => 0, + "mormon" => 0, + "mortgage" => 0, + "moscow" => 0, + "moto" => 0, + "motorcycles" => 0, + "mov" => 0, + "movie" => 0, + "movistar" => 0, + "msd" => 0, + "mtn" => 0, + "mtr" => 0, + "mutual" => 0, + "nab" => 0, + "nadex" => 0, + "nagoya" => 0, + "nationwide" => 0, + "natura" => 0, + "navy" => 0, + "nba" => 0, + "nec" => 0, + "netbank" => 0, + "netflix" => 0, + "network" => 0, + "neustar" => 0, + "new" => 0, + "newholland" => 0, + "news" => 0, + "next" => 0, + "nextdirect" => 0, + "nexus" => 0, + "nfl" => 0, + "ngo" => 0, + "nhk" => 0, + "nico" => 0, + "nike" => 0, + "nikon" => 0, + "ninja" => 0, + "nissan" => 0, + "nissay" => 0, + "nokia" => 0, + "northwesternmutual" => 0, + "norton" => 0, + "now" => 0, + "nowruz" => 0, + "nowtv" => 0, + "nra" => 0, + "nrw" => 0, + "ntt" => 0, + "nyc" => 0, + "obi" => 0, + "observer" => 0, + "off" => 0, + "office" => 0, + "okinawa" => 0, + "olayan" => 0, + "olayangroup" => 0, + "oldnavy" => 0, + "ollo" => 0, + "omega" => 0, + "one" => 0, + "ong" => 0, + "onl" => 0, + "online" => 0, + "onyourside" => 0, + "ooo" => 0, + "open" => 0, + "oracle" => 0, + "orange" => 0, + "organic" => 0, + "origins" => 0, + "osaka" => 0, + "otsuka" => 0, + "ott" => 0, + "ovh" => 0, + "page" => 0, + "panasonic" => 0, + "paris" => 0, + "pars" => 0, + "partners" => 0, + "parts" => 0, + "party" => 0, + "passagens" => 0, + "pay" => 0, + "pccw" => 0, + "pet" => 0, + "pfizer" => 0, + "pharmacy" => 0, + "phd" => 0, + "philips" => 0, + "phone" => 0, + "photo" => 0, + "photography" => 0, + "photos" => 0, + "physio" => 0, + "piaget" => 0, + "pics" => 0, + "pictet" => 0, + "pictures" => 0, + "pid" => 0, + "pin" => 0, + "ping" => 0, + "pink" => 0, + "pioneer" => 0, + "pizza" => 0, + "place" => 0, + "play" => 0, + "playstation" => 0, + "plumbing" => 0, + "plus" => 0, + "pnc" => 0, + "pohl" => 0, + "poker" => 0, + "politie" => 0, + "porn" => 0, + "pramerica" => 0, + "praxi" => 0, + "press" => 0, + "prime" => 0, + "prod" => 0, + "productions" => 0, + "prof" => 0, + "progressive" => 0, + "promo" => 0, + "properties" => 0, + "property" => 0, + "protection" => 0, + "pru" => 0, + "prudential" => 0, + "pub" => 0, + "pwc" => 0, + "qpon" => 0, + "quebec" => 0, + "quest" => 0, + "qvc" => 0, + "racing" => 0, + "radio" => 0, + "raid" => 0, + "read" => 0, + "realestate" => 0, + "realtor" => 0, + "realty" => 0, + "recipes" => 0, + "red" => 0, + "redstone" => 0, + "redumbrella" => 0, + "rehab" => 0, + "reise" => 0, + "reisen" => 0, + "reit" => 0, + "reliance" => 0, + "ren" => 0, + "rent" => 0, + "rentals" => 0, + "repair" => 0, + "report" => 0, + "republican" => 0, + "rest" => 0, + "restaurant" => 0, + "review" => 0, + "reviews" => 0, + "rexroth" => 0, + "rich" => 0, + "richardli" => 0, + "ricoh" => 0, + "rightathome" => 0, + "ril" => 0, + "rio" => 0, + "rip" => 0, + "rmit" => 0, + "rocher" => 0, + "rocks" => 0, + "rodeo" => 0, + "rogers" => 0, + "room" => 0, + "rsvp" => 0, + "rugby" => 0, + "ruhr" => 0, + "run" => 0, + "rwe" => 0, + "ryukyu" => 0, + "saarland" => 0, + "safe" => 0, + "safety" => 0, + "sakura" => 0, + "sale" => 0, + "salon" => 0, + "samsclub" => 0, + "samsung" => 0, + "sandvik" => 0, + "sandvikcoromant" => 0, + "sanofi" => 0, + "sap" => 0, + "sarl" => 0, + "sas" => 0, + "save" => 0, + "saxo" => 0, + "sbi" => 0, + "sbs" => 0, + "sca" => 0, + "scb" => 0, + "schaeffler" => 0, + "schmidt" => 0, + "scholarships" => 0, + "school" => 0, + "schule" => 0, + "schwarz" => 0, + "science" => 0, + "scjohnson" => 0, + "scor" => 0, + "scot" => 0, + "search" => 0, + "seat" => 0, + "secure" => 0, + "security" => 0, + "seek" => 0, + "select" => 0, + "sener" => 0, + "services" => 0, + "ses" => 0, + "seven" => 0, + "sew" => 0, + "sex" => 0, + "sexy" => 0, + "sfr" => 0, + "shangrila" => 0, + "sharp" => 0, + "shaw" => 0, + "shell" => 0, + "shia" => 0, + "shiksha" => 0, + "shoes" => 0, + "shop" => 0, + "shopping" => 0, + "shouji" => 0, + "show" => 0, + "showtime" => 0, + "shriram" => 0, + "silk" => 0, + "sina" => 0, + "singles" => 0, + "site" => 0, + "ski" => 0, + "skin" => 0, + "sky" => 0, + "skype" => 0, + "sling" => 0, + "smart" => 0, + "smile" => 0, + "sncf" => 0, + "soccer" => 0, + "social" => 0, + "softbank" => 0, + "software" => 0, + "sohu" => 0, + "solar" => 0, + "solutions" => 0, + "song" => 0, + "sony" => 0, + "soy" => 0, + "space" => 0, + "sport" => 0, + "spot" => 0, + "spreadbetting" => 0, + "srl" => 0, + "srt" => 0, + "stada" => 0, + "staples" => 0, + "star" => 0, + "starhub" => 0, + "statebank" => 0, + "statefarm" => 0, + "stc" => 0, + "stcgroup" => 0, + "stockholm" => 0, + "storage" => 0, + "store" => 0, + "stream" => 0, + "studio" => 0, + "study" => 0, + "style" => 0, + "sucks" => 0, + "supplies" => 0, + "supply" => 0, + "support" => 0, + "surf" => 0, + "surgery" => 0, + "suzuki" => 0, + "swatch" => 0, + "swiftcover" => 0, + "swiss" => 0, + "sydney" => 0, + "symantec" => 0, + "systems" => 0, + "tab" => 0, + "taipei" => 0, + "talk" => 0, + "taobao" => 0, + "target" => 0, + "tatamotors" => 0, + "tatar" => 0, + "tattoo" => 0, + "tax" => 0, + "taxi" => 0, + "tci" => 0, + "tdk" => 0, + "team" => 0, + "tech" => 0, + "technology" => 0, + "telefonica" => 0, + "temasek" => 0, + "tennis" => 0, + "teva" => 0, + "thd" => 0, + "theater" => 0, + "theatre" => 0, + "tiaa" => 0, + "tickets" => 0, + "tienda" => 0, + "tiffany" => 0, + "tips" => 0, + "tires" => 0, + "tirol" => 0, + "tjmaxx" => 0, + "tjx" => 0, + "tkmaxx" => 0, + "tmall" => 0, + "today" => 0, + "tokyo" => 0, + "tools" => 0, + "top" => 0, + "toray" => 0, + "toshiba" => 0, + "total" => 0, + "tours" => 0, + "town" => 0, + "toyota" => 0, + "toys" => 0, + "trade" => 0, + "trading" => 0, + "training" => 0, + "travel" => 0, + "travelchannel" => 0, + "travelers" => 0, + "travelersinsurance" => 0, + "trust" => 0, + "trv" => 0, + "tube" => 0, + "tui" => 0, + "tunes" => 0, + "tushu" => 0, + "tvs" => 0, + "ubank" => 0, + "ubs" => 0, + "uconnect" => 0, + "unicom" => 0, + "university" => 0, + "uno" => 0, + "uol" => 0, + "ups" => 0, + "vacations" => 0, + "vana" => 0, + "vanguard" => 0, + "vegas" => 0, + "ventures" => 0, + "verisign" => 0, + "versicherung" => 0, + "vet" => 0, + "viajes" => 0, + "video" => 0, + "vig" => 0, + "viking" => 0, + "villas" => 0, + "vin" => 0, + "vip" => 0, + "virgin" => 0, + "visa" => 0, + "vision" => 0, + "vistaprint" => 0, + "viva" => 0, + "vivo" => 0, + "vlaanderen" => 0, + "vodka" => 0, + "volkswagen" => 0, + "volvo" => 0, + "vote" => 0, + "voting" => 0, + "voto" => 0, + "voyage" => 0, + "vuelos" => 0, + "wales" => 0, + "walmart" => 0, + "walter" => 0, + "wang" => 0, + "wanggou" => 0, + "warman" => 0, + "watch" => 0, + "watches" => 0, + "weather" => 0, + "weatherchannel" => 0, + "webcam" => 0, + "weber" => 0, + "website" => 0, + "wed" => 0, + "wedding" => 0, + "weibo" => 0, + "weir" => 0, + "whoswho" => 0, + "wien" => 0, + "wiki" => 0, + "williamhill" => 0, + "win" => 0, + "windows" => 0, + "wine" => 0, + "winners" => 0, + "wme" => 0, + "wolterskluwer" => 0, + "woodside" => 0, + "work" => 0, + "works" => 0, + "world" => 0, + "wow" => 0, + "wtc" => 0, + "wtf" => 0, + "xbox" => 0, + "xerox" => 0, + "xfinity" => 0, + "xihuan" => 0, + "xin" => 0, + "xn--11b4c3d" => 0, + "xn--1ck2e1b" => 0, + "xn--1qqw23a" => 0, + "xn--30rr7y" => 0, + "xn--3bst00m" => 0, + "xn--3ds443g" => 0, + "xn--3oq18vl8pn36a" => 0, + "xn--3pxu8k" => 0, + "xn--42c2d9a" => 0, + "xn--45q11c" => 0, + "xn--4gbrim" => 0, + "xn--55qw42g" => 0, + "xn--55qx5d" => 0, + "xn--5su34j936bgsg" => 0, + "xn--5tzm5g" => 0, + "xn--6frz82g" => 0, + "xn--6qq986b3xl" => 0, + "xn--80adxhks" => 0, + "xn--80aqecdr1a" => 0, + "xn--80asehdb" => 0, + "xn--80aswg" => 0, + "xn--8y0a063a" => 0, + "xn--9dbq2a" => 0, + "xn--9et52u" => 0, + "xn--9krt00a" => 0, + "xn--b4w605ferd" => 0, + "xn--bck1b9a5dre4c" => 0, + "xn--c1avg" => 0, + "xn--c2br7g" => 0, + "xn--cck2b3b" => 0, + "xn--cg4bki" => 0, + "xn--czr694b" => 0, + "xn--czrs0t" => 0, + "xn--czru2d" => 0, + "xn--d1acj3b" => 0, + "xn--eckvdtc9d" => 0, + "xn--efvy88h" => 0, + "xn--estv75g" => 0, + "xn--fct429k" => 0, + "xn--fhbei" => 0, + "xn--fiq228c5hs" => 0, + "xn--fiq64b" => 0, + "xn--fjq720a" => 0, + "xn--flw351e" => 0, + "xn--fzys8d69uvgm" => 0, + "xn--g2xx48c" => 0, + "xn--gckr3f0f" => 0, + "xn--gk3at1e" => 0, + "xn--hxt814e" => 0, + "xn--i1b6b1a6a2e" => 0, + "xn--imr513n" => 0, + "xn--io0a7i" => 0, + "xn--j1aef" => 0, + "xn--jlq61u9w7b" => 0, + "xn--jvr189m" => 0, + "xn--kcrx77d1x4a" => 0, + "xn--kpu716f" => 0, + "xn--kput3i" => 0, + "xn--mgba3a3ejt" => 0, + "xn--mgba7c0bbn0a" => 0, + "xn--mgbaakc7dvf" => 0, + "xn--mgbab2bd" => 0, + "xn--mgbb9fbpob" => 0, + "xn--mgbca7dzdo" => 0, + "xn--mgbi4ecexp" => 0, + "xn--mgbt3dhd" => 0, + "xn--mk1bu44c" => 0, + "xn--mxtq1m" => 0, + "xn--ngbc5azd" => 0, + "xn--ngbe9e0a" => 0, + "xn--ngbrx" => 0, + "xn--nqv7f" => 0, + "xn--nqv7fs00ema" => 0, + "xn--nyqy26a" => 0, + "xn--otu796d" => 0, + "xn--p1acf" => 0, + "xn--pbt977c" => 0, + "xn--pssy2u" => 0, + "xn--q9jyb4c" => 0, + "xn--qcka1pmc" => 0, + "xn--rhqv96g" => 0, + "xn--rovu88b" => 0, + "xn--ses554g" => 0, + "xn--t60b56a" => 0, + "xn--tckwe" => 0, + "xn--tiq49xqyj" => 0, + "xn--unup4y" => 0, + "xn--vermgensberater-ctb" => 0, + "xn--vermgensberatung-pwb" => 0, + "xn--vhquv" => 0, + "xn--vuq861b" => 0, + "xn--w4r85el8fhu5dnra" => 0, + "xn--w4rs40l" => 0, + "xn--xhq521b" => 0, + "xn--zfr164b" => 0, + "xyz" => 0, + "yachts" => 0, + "yahoo" => 0, + "yamaxun" => 0, + "yandex" => 0, + "yodobashi" => 0, + "yoga" => 0, + "yokohama" => 0, + "you" => 0, + "youtube" => 0, + "yun" => 0, + "zappos" => 0, + "zara" => 0, + "zero" => 0, + "zip" => 0, + "zone" => 0, + "zuerich" => 0, + "cc.ua" => 0, + "inf.ua" => 0, + "ltd.ua" => 0, + "beep.pl" => 0, + "barsy.ca" => 0, + "compute.estate" => -1, + "alces.network" => -1, + "alwaysdata.net" => 0, + "cloudfront.net" => 0, + "compute.amazonaws.com" => -1, + "compute-1.amazonaws.com" => -1, + "compute.amazonaws.com.cn" => -1, + "us-east-1.amazonaws.com" => 0, + "cn-north-1.eb.amazonaws.com.cn" => 0, + "cn-northwest-1.eb.amazonaws.com.cn" => 0, + "elasticbeanstalk.com" => 0, + "ap-northeast-1.elasticbeanstalk.com" => 0, + "ap-northeast-2.elasticbeanstalk.com" => 0, + "ap-northeast-3.elasticbeanstalk.com" => 0, + "ap-south-1.elasticbeanstalk.com" => 0, + "ap-southeast-1.elasticbeanstalk.com" => 0, + "ap-southeast-2.elasticbeanstalk.com" => 0, + "ca-central-1.elasticbeanstalk.com" => 0, + "eu-central-1.elasticbeanstalk.com" => 0, + "eu-west-1.elasticbeanstalk.com" => 0, + "eu-west-2.elasticbeanstalk.com" => 0, + "eu-west-3.elasticbeanstalk.com" => 0, + "sa-east-1.elasticbeanstalk.com" => 0, + "us-east-1.elasticbeanstalk.com" => 0, + "us-east-2.elasticbeanstalk.com" => 0, + "us-gov-west-1.elasticbeanstalk.com" => 0, + "us-west-1.elasticbeanstalk.com" => 0, + "us-west-2.elasticbeanstalk.com" => 0, + "elb.amazonaws.com" => -1, + "elb.amazonaws.com.cn" => -1, + "s3.amazonaws.com" => 0, + "s3-ap-northeast-1.amazonaws.com" => 0, + "s3-ap-northeast-2.amazonaws.com" => 0, + "s3-ap-south-1.amazonaws.com" => 0, + "s3-ap-southeast-1.amazonaws.com" => 0, + "s3-ap-southeast-2.amazonaws.com" => 0, + "s3-ca-central-1.amazonaws.com" => 0, + "s3-eu-central-1.amazonaws.com" => 0, + "s3-eu-west-1.amazonaws.com" => 0, + "s3-eu-west-2.amazonaws.com" => 0, + "s3-eu-west-3.amazonaws.com" => 0, + "s3-external-1.amazonaws.com" => 0, + "s3-fips-us-gov-west-1.amazonaws.com" => 0, + "s3-sa-east-1.amazonaws.com" => 0, + "s3-us-gov-west-1.amazonaws.com" => 0, + "s3-us-east-2.amazonaws.com" => 0, + "s3-us-west-1.amazonaws.com" => 0, + "s3-us-west-2.amazonaws.com" => 0, + "s3.ap-northeast-2.amazonaws.com" => 0, + "s3.ap-south-1.amazonaws.com" => 0, + "s3.cn-north-1.amazonaws.com.cn" => 0, + "s3.ca-central-1.amazonaws.com" => 0, + "s3.eu-central-1.amazonaws.com" => 0, + "s3.eu-west-2.amazonaws.com" => 0, + "s3.eu-west-3.amazonaws.com" => 0, + "s3.us-east-2.amazonaws.com" => 0, + "s3.dualstack.ap-northeast-1.amazonaws.com" => 0, + "s3.dualstack.ap-northeast-2.amazonaws.com" => 0, + "s3.dualstack.ap-south-1.amazonaws.com" => 0, + "s3.dualstack.ap-southeast-1.amazonaws.com" => 0, + "s3.dualstack.ap-southeast-2.amazonaws.com" => 0, + "s3.dualstack.ca-central-1.amazonaws.com" => 0, + "s3.dualstack.eu-central-1.amazonaws.com" => 0, + "s3.dualstack.eu-west-1.amazonaws.com" => 0, + "s3.dualstack.eu-west-2.amazonaws.com" => 0, + "s3.dualstack.eu-west-3.amazonaws.com" => 0, + "s3.dualstack.sa-east-1.amazonaws.com" => 0, + "s3.dualstack.us-east-1.amazonaws.com" => 0, + "s3.dualstack.us-east-2.amazonaws.com" => 0, + "s3-website-us-east-1.amazonaws.com" => 0, + "s3-website-us-west-1.amazonaws.com" => 0, + "s3-website-us-west-2.amazonaws.com" => 0, + "s3-website-ap-northeast-1.amazonaws.com" => 0, + "s3-website-ap-southeast-1.amazonaws.com" => 0, + "s3-website-ap-southeast-2.amazonaws.com" => 0, + "s3-website-eu-west-1.amazonaws.com" => 0, + "s3-website-sa-east-1.amazonaws.com" => 0, + "s3-website.ap-northeast-2.amazonaws.com" => 0, + "s3-website.ap-south-1.amazonaws.com" => 0, + "s3-website.ca-central-1.amazonaws.com" => 0, + "s3-website.eu-central-1.amazonaws.com" => 0, + "s3-website.eu-west-2.amazonaws.com" => 0, + "s3-website.eu-west-3.amazonaws.com" => 0, + "s3-website.us-east-2.amazonaws.com" => 0, + "t3l3p0rt.net" => 0, + "tele.amune.org" => 0, + "apigee.io" => 0, + "on-aptible.com" => 0, + "user.party.eus" => 0, + "pimienta.org" => 0, + "poivron.org" => 0, + "potager.org" => 0, + "sweetpepper.org" => 0, + "myasustor.com" => 0, + "go-vip.co" => 0, + "go-vip.net" => 0, + "wpcomstaging.com" => 0, + "myfritz.net" => 0, + "awdev.ca" => -1, + "advisor.ws" => -1, + "b-data.io" => 0, + "backplaneapp.io" => 0, + "balena-devices.com" => 0, + "app.banzaicloud.io" => 0, + "betainabox.com" => 0, + "bnr.la" => 0, + "blackbaudcdn.net" => 0, + "boomla.net" => 0, + "boxfuse.io" => 0, + "square7.ch" => 0, + "bplaced.com" => 0, + "bplaced.de" => 0, + "square7.de" => 0, + "bplaced.net" => 0, + "square7.net" => 0, + "browsersafetymark.io" => 0, + "uk0.bigv.io" => 0, + "dh.bytemark.co.uk" => 0, + "vm.bytemark.co.uk" => 0, + "mycd.eu" => 0, + "carrd.co" => 0, + "crd.co" => 0, + "uwu.ai" => 0, + "ae.org" => 0, + "ar.com" => 0, + "br.com" => 0, + "cn.com" => 0, + "com.de" => 0, + "com.se" => 0, + "de.com" => 0, + "eu.com" => 0, + "gb.com" => 0, + "gb.net" => 0, + "hu.com" => 0, + "hu.net" => 0, + "jp.net" => 0, + "jpn.com" => 0, + "kr.com" => 0, + "mex.com" => 0, + "no.com" => 0, + "qc.com" => 0, + "ru.com" => 0, + "sa.com" => 0, + "se.net" => 0, + "uk.com" => 0, + "uk.net" => 0, + "us.com" => 0, + "uy.com" => 0, + "za.bz" => 0, + "za.com" => 0, + "africa.com" => 0, + "gr.com" => 0, + "in.net" => 0, + "us.org" => 0, + "co.com" => 0, + "c.la" => 0, + "certmgr.org" => 0, + "xenapponazure.com" => 0, + "discourse.group" => 0, + "virtueeldomein.nl" => 0, + "cleverapps.io" => 0, + "lcl.dev" => -1, + "stg.dev" => -1, + "c66.me" => 0, + "cloud66.ws" => 0, + "cloud66.zone" => 0, + "jdevcloud.com" => 0, + "wpdevcloud.com" => 0, + "cloudaccess.host" => 0, + "freesite.host" => 0, + "cloudaccess.net" => 0, + "cloudcontrolled.com" => 0, + "cloudcontrolapp.com" => 0, + "cloudera.site" => 0, + "trycloudflare.com" => 0, + "workers.dev" => 0, + "wnext.app" => 0, + "co.ca" => 0, + "otap.co" => -1, + "co.cz" => 0, + "c.cdn77.org" => 0, + "cdn77-ssl.net" => 0, + "r.cdn77.net" => 0, + "rsc.cdn77.org" => 0, + "ssl.origin.cdn77-secure.org" => 0, + "cloudns.asia" => 0, + "cloudns.biz" => 0, + "cloudns.club" => 0, + "cloudns.cc" => 0, + "cloudns.eu" => 0, + "cloudns.in" => 0, + "cloudns.info" => 0, + "cloudns.org" => 0, + "cloudns.pro" => 0, + "cloudns.pw" => 0, + "cloudns.us" => 0, + "cloudeity.net" => 0, + "cnpy.gdn" => 0, + "co.nl" => 0, + "co.no" => 0, + "webhosting.be" => 0, + "hosting-cluster.nl" => 0, + "dyn.cosidns.de" => 0, + "dynamisches-dns.de" => 0, + "dnsupdater.de" => 0, + "internet-dns.de" => 0, + "l-o-g-i-n.de" => 0, + "dynamic-dns.info" => 0, + "feste-ip.net" => 0, + "knx-server.net" => 0, + "static-access.net" => 0, + "realm.cz" => 0, + "cryptonomic.net" => -1, + "cupcake.is" => 0, + "cyon.link" => 0, + "cyon.site" => 0, + "daplie.me" => 0, + "localhost.daplie.me" => 0, + "dattolocal.com" => 0, + "dattorelay.com" => 0, + "dattoweb.com" => 0, + "mydatto.com" => 0, + "dattolocal.net" => 0, + "mydatto.net" => 0, + "biz.dk" => 0, + "co.dk" => 0, + "firm.dk" => 0, + "reg.dk" => 0, + "store.dk" => 0, + "dapps.earth" => -1, + "bzz.dapps.earth" => -1, + "debian.net" => 0, + "dedyn.io" => 0, + "dnshome.de" => 0, + "online.th" => 0, + "shop.th" => 0, + "drayddns.com" => 0, + "dreamhosters.com" => 0, + "mydrobo.com" => 0, + "drud.io" => 0, + "drud.us" => 0, + "duckdns.org" => 0, + "dy.fi" => 0, + "tunk.org" => 0, + "dyndns-at-home.com" => 0, + "dyndns-at-work.com" => 0, + "dyndns-blog.com" => 0, + "dyndns-free.com" => 0, + "dyndns-home.com" => 0, + "dyndns-ip.com" => 0, + "dyndns-mail.com" => 0, + "dyndns-office.com" => 0, + "dyndns-pics.com" => 0, + "dyndns-remote.com" => 0, + "dyndns-server.com" => 0, + "dyndns-web.com" => 0, + "dyndns-wiki.com" => 0, + "dyndns-work.com" => 0, + "dyndns.biz" => 0, + "dyndns.info" => 0, + "dyndns.org" => 0, + "dyndns.tv" => 0, + "at-band-camp.net" => 0, + "ath.cx" => 0, + "barrel-of-knowledge.info" => 0, + "barrell-of-knowledge.info" => 0, + "better-than.tv" => 0, + "blogdns.com" => 0, + "blogdns.net" => 0, + "blogdns.org" => 0, + "blogsite.org" => 0, + "boldlygoingnowhere.org" => 0, + "broke-it.net" => 0, + "buyshouses.net" => 0, + "cechire.com" => 0, + "dnsalias.com" => 0, + "dnsalias.net" => 0, + "dnsalias.org" => 0, + "dnsdojo.com" => 0, + "dnsdojo.net" => 0, + "dnsdojo.org" => 0, + "does-it.net" => 0, + "doesntexist.com" => 0, + "doesntexist.org" => 0, + "dontexist.com" => 0, + "dontexist.net" => 0, + "dontexist.org" => 0, + "doomdns.com" => 0, + "doomdns.org" => 0, + "dvrdns.org" => 0, + "dyn-o-saur.com" => 0, + "dynalias.com" => 0, + "dynalias.net" => 0, + "dynalias.org" => 0, + "dynathome.net" => 0, + "dyndns.ws" => 0, + "endofinternet.net" => 0, + "endofinternet.org" => 0, + "endoftheinternet.org" => 0, + "est-a-la-maison.com" => 0, + "est-a-la-masion.com" => 0, + "est-le-patron.com" => 0, + "est-mon-blogueur.com" => 0, + "for-better.biz" => 0, + "for-more.biz" => 0, + "for-our.info" => 0, + "for-some.biz" => 0, + "for-the.biz" => 0, + "forgot.her.name" => 0, + "forgot.his.name" => 0, + "from-ak.com" => 0, + "from-al.com" => 0, + "from-ar.com" => 0, + "from-az.net" => 0, + "from-ca.com" => 0, + "from-co.net" => 0, + "from-ct.com" => 0, + "from-dc.com" => 0, + "from-de.com" => 0, + "from-fl.com" => 0, + "from-ga.com" => 0, + "from-hi.com" => 0, + "from-ia.com" => 0, + "from-id.com" => 0, + "from-il.com" => 0, + "from-in.com" => 0, + "from-ks.com" => 0, + "from-ky.com" => 0, + "from-la.net" => 0, + "from-ma.com" => 0, + "from-md.com" => 0, + "from-me.org" => 0, + "from-mi.com" => 0, + "from-mn.com" => 0, + "from-mo.com" => 0, + "from-ms.com" => 0, + "from-mt.com" => 0, + "from-nc.com" => 0, + "from-nd.com" => 0, + "from-ne.com" => 0, + "from-nh.com" => 0, + "from-nj.com" => 0, + "from-nm.com" => 0, + "from-nv.com" => 0, + "from-ny.net" => 0, + "from-oh.com" => 0, + "from-ok.com" => 0, + "from-or.com" => 0, + "from-pa.com" => 0, + "from-pr.com" => 0, + "from-ri.com" => 0, + "from-sc.com" => 0, + "from-sd.com" => 0, + "from-tn.com" => 0, + "from-tx.com" => 0, + "from-ut.com" => 0, + "from-va.com" => 0, + "from-vt.com" => 0, + "from-wa.com" => 0, + "from-wi.com" => 0, + "from-wv.com" => 0, + "from-wy.com" => 0, + "ftpaccess.cc" => 0, + "fuettertdasnetz.de" => 0, + "game-host.org" => 0, + "game-server.cc" => 0, + "getmyip.com" => 0, + "gets-it.net" => 0, + "go.dyndns.org" => 0, + "gotdns.com" => 0, + "gotdns.org" => 0, + "groks-the.info" => 0, + "groks-this.info" => 0, + "ham-radio-op.net" => 0, + "here-for-more.info" => 0, + "hobby-site.com" => 0, + "hobby-site.org" => 0, + "home.dyndns.org" => 0, + "homedns.org" => 0, + "homeftp.net" => 0, + "homeftp.org" => 0, + "homeip.net" => 0, + "homelinux.com" => 0, + "homelinux.net" => 0, + "homelinux.org" => 0, + "homeunix.com" => 0, + "homeunix.net" => 0, + "homeunix.org" => 0, + "iamallama.com" => 0, + "in-the-band.net" => 0, + "is-a-anarchist.com" => 0, + "is-a-blogger.com" => 0, + "is-a-bookkeeper.com" => 0, + "is-a-bruinsfan.org" => 0, + "is-a-bulls-fan.com" => 0, + "is-a-candidate.org" => 0, + "is-a-caterer.com" => 0, + "is-a-celticsfan.org" => 0, + "is-a-chef.com" => 0, + "is-a-chef.net" => 0, + "is-a-chef.org" => 0, + "is-a-conservative.com" => 0, + "is-a-cpa.com" => 0, + "is-a-cubicle-slave.com" => 0, + "is-a-democrat.com" => 0, + "is-a-designer.com" => 0, + "is-a-doctor.com" => 0, + "is-a-financialadvisor.com" => 0, + "is-a-geek.com" => 0, + "is-a-geek.net" => 0, + "is-a-geek.org" => 0, + "is-a-green.com" => 0, + "is-a-guru.com" => 0, + "is-a-hard-worker.com" => 0, + "is-a-hunter.com" => 0, + "is-a-knight.org" => 0, + "is-a-landscaper.com" => 0, + "is-a-lawyer.com" => 0, + "is-a-liberal.com" => 0, + "is-a-libertarian.com" => 0, + "is-a-linux-user.org" => 0, + "is-a-llama.com" => 0, + "is-a-musician.com" => 0, + "is-a-nascarfan.com" => 0, + "is-a-nurse.com" => 0, + "is-a-painter.com" => 0, + "is-a-patsfan.org" => 0, + "is-a-personaltrainer.com" => 0, + "is-a-photographer.com" => 0, + "is-a-player.com" => 0, + "is-a-republican.com" => 0, + "is-a-rockstar.com" => 0, + "is-a-socialist.com" => 0, + "is-a-soxfan.org" => 0, + "is-a-student.com" => 0, + "is-a-teacher.com" => 0, + "is-a-techie.com" => 0, + "is-a-therapist.com" => 0, + "is-an-accountant.com" => 0, + "is-an-actor.com" => 0, + "is-an-actress.com" => 0, + "is-an-anarchist.com" => 0, + "is-an-artist.com" => 0, + "is-an-engineer.com" => 0, + "is-an-entertainer.com" => 0, + "is-by.us" => 0, + "is-certified.com" => 0, + "is-found.org" => 0, + "is-gone.com" => 0, + "is-into-anime.com" => 0, + "is-into-cars.com" => 0, + "is-into-cartoons.com" => 0, + "is-into-games.com" => 0, + "is-leet.com" => 0, + "is-lost.org" => 0, + "is-not-certified.com" => 0, + "is-saved.org" => 0, + "is-slick.com" => 0, + "is-uberleet.com" => 0, + "is-very-bad.org" => 0, + "is-very-evil.org" => 0, + "is-very-good.org" => 0, + "is-very-nice.org" => 0, + "is-very-sweet.org" => 0, + "is-with-theband.com" => 0, + "isa-geek.com" => 0, + "isa-geek.net" => 0, + "isa-geek.org" => 0, + "isa-hockeynut.com" => 0, + "issmarterthanyou.com" => 0, + "isteingeek.de" => 0, + "istmein.de" => 0, + "kicks-ass.net" => 0, + "kicks-ass.org" => 0, + "knowsitall.info" => 0, + "land-4-sale.us" => 0, + "lebtimnetz.de" => 0, + "leitungsen.de" => 0, + "likes-pie.com" => 0, + "likescandy.com" => 0, + "merseine.nu" => 0, + "mine.nu" => 0, + "misconfused.org" => 0, + "mypets.ws" => 0, + "myphotos.cc" => 0, + "neat-url.com" => 0, + "office-on-the.net" => 0, + "on-the-web.tv" => 0, + "podzone.net" => 0, + "podzone.org" => 0, + "readmyblog.org" => 0, + "saves-the-whales.com" => 0, + "scrapper-site.net" => 0, + "scrapping.cc" => 0, + "selfip.biz" => 0, + "selfip.com" => 0, + "selfip.info" => 0, + "selfip.net" => 0, + "selfip.org" => 0, + "sells-for-less.com" => 0, + "sells-for-u.com" => 0, + "sells-it.net" => 0, + "sellsyourhome.org" => 0, + "servebbs.com" => 0, + "servebbs.net" => 0, + "servebbs.org" => 0, + "serveftp.net" => 0, + "serveftp.org" => 0, + "servegame.org" => 0, + "shacknet.nu" => 0, + "simple-url.com" => 0, + "space-to-rent.com" => 0, + "stuff-4-sale.org" => 0, + "stuff-4-sale.us" => 0, + "teaches-yoga.com" => 0, + "thruhere.net" => 0, + "traeumtgerade.de" => 0, + "webhop.biz" => 0, + "webhop.info" => 0, + "webhop.net" => 0, + "webhop.org" => 0, + "worse-than.tv" => 0, + "writesthisblog.com" => 0, + "ddnss.de" => 0, + "dyn.ddnss.de" => 0, + "dyndns.ddnss.de" => 0, + "dyndns1.de" => 0, + "dyn-ip24.de" => 0, + "home-webserver.de" => 0, + "dyn.home-webserver.de" => 0, + "myhome-server.de" => 0, + "ddnss.org" => 0, + "definima.net" => 0, + "definima.io" => 0, + "bci.dnstrace.pro" => 0, + "ddnsfree.com" => 0, + "ddnsgeek.com" => 0, + "giize.com" => 0, + "gleeze.com" => 0, + "kozow.com" => 0, + "loseyourip.com" => 0, + "ooguy.com" => 0, + "theworkpc.com" => 0, + "casacam.net" => 0, + "dynu.net" => 0, + "accesscam.org" => 0, + "camdvr.org" => 0, + "freeddns.org" => 0, + "mywire.org" => 0, + "webredirect.org" => 0, + "myddns.rocks" => 0, + "blogsite.xyz" => 0, + "dynv6.net" => 0, + "e4.cz" => 0, + "mytuleap.com" => 0, + "onred.one" => 0, + "staging.onred.one" => 0, + "enonic.io" => 0, + "customer.enonic.io" => 0, + "eu.org" => 0, + "al.eu.org" => 0, + "asso.eu.org" => 0, + "at.eu.org" => 0, + "au.eu.org" => 0, + "be.eu.org" => 0, + "bg.eu.org" => 0, + "ca.eu.org" => 0, + "cd.eu.org" => 0, + "ch.eu.org" => 0, + "cn.eu.org" => 0, + "cy.eu.org" => 0, + "cz.eu.org" => 0, + "de.eu.org" => 0, + "dk.eu.org" => 0, + "edu.eu.org" => 0, + "ee.eu.org" => 0, + "es.eu.org" => 0, + "fi.eu.org" => 0, + "fr.eu.org" => 0, + "gr.eu.org" => 0, + "hr.eu.org" => 0, + "hu.eu.org" => 0, + "ie.eu.org" => 0, + "il.eu.org" => 0, + "in.eu.org" => 0, + "int.eu.org" => 0, + "is.eu.org" => 0, + "it.eu.org" => 0, + "jp.eu.org" => 0, + "kr.eu.org" => 0, + "lt.eu.org" => 0, + "lu.eu.org" => 0, + "lv.eu.org" => 0, + "mc.eu.org" => 0, + "me.eu.org" => 0, + "mk.eu.org" => 0, + "mt.eu.org" => 0, + "my.eu.org" => 0, + "net.eu.org" => 0, + "ng.eu.org" => 0, + "nl.eu.org" => 0, + "no.eu.org" => 0, + "nz.eu.org" => 0, + "paris.eu.org" => 0, + "pl.eu.org" => 0, + "pt.eu.org" => 0, + "q-a.eu.org" => 0, + "ro.eu.org" => 0, + "ru.eu.org" => 0, + "se.eu.org" => 0, + "si.eu.org" => 0, + "sk.eu.org" => 0, + "tr.eu.org" => 0, + "uk.eu.org" => 0, + "us.eu.org" => 0, + "eu-1.evennode.com" => 0, + "eu-2.evennode.com" => 0, + "eu-3.evennode.com" => 0, + "eu-4.evennode.com" => 0, + "us-1.evennode.com" => 0, + "us-2.evennode.com" => 0, + "us-3.evennode.com" => 0, + "us-4.evennode.com" => 0, + "twmail.cc" => 0, + "twmail.net" => 0, + "twmail.org" => 0, + "mymailer.com.tw" => 0, + "url.tw" => 0, + "apps.fbsbx.com" => 0, + "ru.net" => 0, + "adygeya.ru" => 0, + "bashkiria.ru" => 0, + "bir.ru" => 0, + "cbg.ru" => 0, + "com.ru" => 0, + "dagestan.ru" => 0, + "grozny.ru" => 0, + "kalmykia.ru" => 0, + "kustanai.ru" => 0, + "marine.ru" => 0, + "mordovia.ru" => 0, + "msk.ru" => 0, + "mytis.ru" => 0, + "nalchik.ru" => 0, + "nov.ru" => 0, + "pyatigorsk.ru" => 0, + "spb.ru" => 0, + "vladikavkaz.ru" => 0, + "vladimir.ru" => 0, + "abkhazia.su" => 0, + "adygeya.su" => 0, + "aktyubinsk.su" => 0, + "arkhangelsk.su" => 0, + "armenia.su" => 0, + "ashgabad.su" => 0, + "azerbaijan.su" => 0, + "balashov.su" => 0, + "bashkiria.su" => 0, + "bryansk.su" => 0, + "bukhara.su" => 0, + "chimkent.su" => 0, + "dagestan.su" => 0, + "east-kazakhstan.su" => 0, + "exnet.su" => 0, + "georgia.su" => 0, + "grozny.su" => 0, + "ivanovo.su" => 0, + "jambyl.su" => 0, + "kalmykia.su" => 0, + "kaluga.su" => 0, + "karacol.su" => 0, + "karaganda.su" => 0, + "karelia.su" => 0, + "khakassia.su" => 0, + "krasnodar.su" => 0, + "kurgan.su" => 0, + "kustanai.su" => 0, + "lenug.su" => 0, + "mangyshlak.su" => 0, + "mordovia.su" => 0, + "msk.su" => 0, + "murmansk.su" => 0, + "nalchik.su" => 0, + "navoi.su" => 0, + "north-kazakhstan.su" => 0, + "nov.su" => 0, + "obninsk.su" => 0, + "penza.su" => 0, + "pokrovsk.su" => 0, + "sochi.su" => 0, + "spb.su" => 0, + "tashkent.su" => 0, + "termez.su" => 0, + "togliatti.su" => 0, + "troitsk.su" => 0, + "tselinograd.su" => 0, + "tula.su" => 0, + "tuva.su" => 0, + "vladikavkaz.su" => 0, + "vladimir.su" => 0, + "vologda.su" => 0, + "channelsdvr.net" => 0, + "fastly-terrarium.com" => 0, + "fastlylb.net" => 0, + "map.fastlylb.net" => 0, + "freetls.fastly.net" => 0, + "map.fastly.net" => 0, + "a.prod.fastly.net" => 0, + "global.prod.fastly.net" => 0, + "a.ssl.fastly.net" => 0, + "b.ssl.fastly.net" => 0, + "global.ssl.fastly.net" => 0, + "fastpanel.direct" => 0, + "fastvps-server.com" => 0, + "fhapp.xyz" => 0, + "fedorainfracloud.org" => 0, + "fedorapeople.org" => 0, + "cloud.fedoraproject.org" => 0, + "app.os.fedoraproject.org" => 0, + "app.os.stg.fedoraproject.org" => 0, + "mydobiss.com" => 0, + "filegear.me" => 0, + "filegear-au.me" => 0, + "filegear-de.me" => 0, + "filegear-gb.me" => 0, + "filegear-ie.me" => 0, + "filegear-jp.me" => 0, + "filegear-sg.me" => 0, + "firebaseapp.com" => 0, + "flynnhub.com" => 0, + "flynnhosting.net" => 0, + "freebox-os.com" => 0, + "freeboxos.com" => 0, + "fbx-os.fr" => 0, + "fbxos.fr" => 0, + "freebox-os.fr" => 0, + "freeboxos.fr" => 0, + "freedesktop.org" => 0, + "futurecms.at" => -1, + "ex.futurecms.at" => -1, + "in.futurecms.at" => -1, + "futurehosting.at" => 0, + "futuremailing.at" => 0, + "ex.ortsinfo.at" => -1, + "kunden.ortsinfo.at" => -1, + "statics.cloud" => -1, + "service.gov.uk" => 0, + "gehirn.ne.jp" => 0, + "usercontent.jp" => 0, + "lab.ms" => 0, + "github.io" => 0, + "githubusercontent.com" => 0, + "gitlab.io" => 0, + "glitch.me" => 0, + "cloudapps.digital" => 0, + "london.cloudapps.digital" => 0, + "homeoffice.gov.uk" => 0, + "ro.im" => 0, + "shop.ro" => 0, + "goip.de" => 0, + "run.app" => 0, + "a.run.app" => 0, + "web.app" => 0, + "0emm.com" => -1, + "appspot.com" => 0, + "blogspot.ae" => 0, + "blogspot.al" => 0, + "blogspot.am" => 0, + "blogspot.ba" => 0, + "blogspot.be" => 0, + "blogspot.bg" => 0, + "blogspot.bj" => 0, + "blogspot.ca" => 0, + "blogspot.cf" => 0, + "blogspot.ch" => 0, + "blogspot.cl" => 0, + "blogspot.co.at" => 0, + "blogspot.co.id" => 0, + "blogspot.co.il" => 0, + "blogspot.co.ke" => 0, + "blogspot.co.nz" => 0, + "blogspot.co.uk" => 0, + "blogspot.co.za" => 0, + "blogspot.com" => 0, + "blogspot.com.ar" => 0, + "blogspot.com.au" => 0, + "blogspot.com.br" => 0, + "blogspot.com.by" => 0, + "blogspot.com.co" => 0, + "blogspot.com.cy" => 0, + "blogspot.com.ee" => 0, + "blogspot.com.eg" => 0, + "blogspot.com.es" => 0, + "blogspot.com.mt" => 0, + "blogspot.com.ng" => 0, + "blogspot.com.tr" => 0, + "blogspot.com.uy" => 0, + "blogspot.cv" => 0, + "blogspot.cz" => 0, + "blogspot.de" => 0, + "blogspot.dk" => 0, + "blogspot.fi" => 0, + "blogspot.fr" => 0, + "blogspot.gr" => 0, + "blogspot.hk" => 0, + "blogspot.hr" => 0, + "blogspot.hu" => 0, + "blogspot.ie" => 0, + "blogspot.in" => 0, + "blogspot.is" => 0, + "blogspot.it" => 0, + "blogspot.jp" => 0, + "blogspot.kr" => 0, + "blogspot.li" => 0, + "blogspot.lt" => 0, + "blogspot.lu" => 0, + "blogspot.md" => 0, + "blogspot.mk" => 0, + "blogspot.mr" => 0, + "blogspot.mx" => 0, + "blogspot.my" => 0, + "blogspot.nl" => 0, + "blogspot.no" => 0, + "blogspot.pe" => 0, + "blogspot.pt" => 0, + "blogspot.qa" => 0, + "blogspot.re" => 0, + "blogspot.ro" => 0, + "blogspot.rs" => 0, + "blogspot.ru" => 0, + "blogspot.se" => 0, + "blogspot.sg" => 0, + "blogspot.si" => 0, + "blogspot.sk" => 0, + "blogspot.sn" => 0, + "blogspot.td" => 0, + "blogspot.tw" => 0, + "blogspot.ug" => 0, + "blogspot.vn" => 0, + "cloudfunctions.net" => 0, + "cloud.goog" => 0, + "codespot.com" => 0, + "googleapis.com" => 0, + "googlecode.com" => 0, + "pagespeedmobilizer.com" => 0, + "publishproxy.com" => 0, + "withgoogle.com" => 0, + "withyoutube.com" => 0, + "fin.ci" => 0, + "free.hr" => 0, + "caa.li" => 0, + "ua.rs" => 0, + "conf.se" => 0, + "hs.zone" => 0, + "hs.run" => 0, + "hashbang.sh" => 0, + "hasura.app" => 0, + "hasura-app.io" => 0, + "hepforge.org" => 0, + "herokuapp.com" => 0, + "herokussl.com" => 0, + "myravendb.com" => 0, + "ravendb.community" => 0, + "ravendb.me" => 0, + "development.run" => 0, + "ravendb.run" => 0, + "bpl.biz" => 0, + "orx.biz" => 0, + "ng.city" => 0, + "ng.ink" => 0, + "biz.gl" => 0, + "col.ng" => 0, + "gen.ng" => 0, + "ltd.ng" => 0, + "sch.so" => 0, + "xn--hkkinen-5wa.fi" => 0, + "moonscale.io" => -1, + "moonscale.net" => 0, + "iki.fi" => 0, + "dyn-berlin.de" => 0, + "in-berlin.de" => 0, + "in-brb.de" => 0, + "in-butter.de" => 0, + "in-dsl.de" => 0, + "in-dsl.net" => 0, + "in-dsl.org" => 0, + "in-vpn.de" => 0, + "in-vpn.net" => 0, + "in-vpn.org" => 0, + "biz.at" => 0, + "info.at" => 0, + "info.cx" => 0, + "ac.leg.br" => 0, + "al.leg.br" => 0, + "am.leg.br" => 0, + "ap.leg.br" => 0, + "ba.leg.br" => 0, + "ce.leg.br" => 0, + "df.leg.br" => 0, + "es.leg.br" => 0, + "go.leg.br" => 0, + "ma.leg.br" => 0, + "mg.leg.br" => 0, + "ms.leg.br" => 0, + "mt.leg.br" => 0, + "pa.leg.br" => 0, + "pb.leg.br" => 0, + "pe.leg.br" => 0, + "pi.leg.br" => 0, + "pr.leg.br" => 0, + "rj.leg.br" => 0, + "rn.leg.br" => 0, + "ro.leg.br" => 0, + "rr.leg.br" => 0, + "rs.leg.br" => 0, + "sc.leg.br" => 0, + "se.leg.br" => 0, + "sp.leg.br" => 0, + "to.leg.br" => 0, + "pixolino.com" => 0, + "ipifony.net" => 0, + "mein-iserv.de" => 0, + "test-iserv.de" => 0, + "iserv.dev" => 0, + "iobb.net" => 0, + "myjino.ru" => 0, + "hosting.myjino.ru" => -1, + "landing.myjino.ru" => -1, + "spectrum.myjino.ru" => -1, + "vps.myjino.ru" => -1, + "triton.zone" => -1, + "cns.joyent.com" => -1, + "js.org" => 0, + "kaas.gg" => 0, + "khplay.nl" => 0, + "keymachine.de" => 0, + "kinghost.net" => 0, + "uni5.net" => 0, + "knightpoint.systems" => 0, + "co.krd" => 0, + "edu.krd" => 0, + "git-repos.de" => 0, + "lcube-server.de" => 0, + "svn-repos.de" => 0, + "leadpages.co" => 0, + "lpages.co" => 0, + "lpusercontent.com" => 0, + "co.business" => 0, + "co.education" => 0, + "co.events" => 0, + "co.financial" => 0, + "co.network" => 0, + "co.place" => 0, + "co.technology" => 0, + "app.lmpm.com" => 0, + "linkitools.space" => 0, + "linkyard.cloud" => 0, + "linkyard-cloud.ch" => 0, + "members.linode.com" => 0, + "nodebalancer.linode.com" => 0, + "we.bs" => 0, + "loginline.app" => 0, + "loginline.dev" => 0, + "loginline.io" => 0, + "loginline.services" => 0, + "loginline.site" => 0, + "krasnik.pl" => 0, + "leczna.pl" => 0, + "lubartow.pl" => 0, + "lublin.pl" => 0, + "poniatowa.pl" => 0, + "swidnik.pl" => 0, + "uklugs.org" => 0, + "glug.org.uk" => 0, + "lug.org.uk" => 0, + "lugs.org.uk" => 0, + "barsy.bg" => 0, + "barsy.co.uk" => 0, + "barsyonline.co.uk" => 0, + "barsycenter.com" => 0, + "barsyonline.com" => 0, + "barsy.club" => 0, + "barsy.de" => 0, + "barsy.eu" => 0, + "barsy.in" => 0, + "barsy.info" => 0, + "barsy.io" => 0, + "barsy.me" => 0, + "barsy.menu" => 0, + "barsy.mobi" => 0, + "barsy.net" => 0, + "barsy.online" => 0, + "barsy.org" => 0, + "barsy.pro" => 0, + "barsy.pub" => 0, + "barsy.shop" => 0, + "barsy.site" => 0, + "barsy.support" => 0, + "barsy.uk" => 0, + "magentosite.cloud" => -1, + "mayfirst.info" => 0, + "mayfirst.org" => 0, + "hb.cldmail.ru" => 0, + "miniserver.com" => 0, + "memset.net" => 0, + "cloud.metacentrum.cz" => 0, + "custom.metacentrum.cz" => 0, + "flt.cloud.muni.cz" => 0, + "usr.cloud.muni.cz" => 0, + "meteorapp.com" => 0, + "eu.meteorapp.com" => 0, + "co.pl" => 0, + "azurecontainer.io" => 0, + "azurewebsites.net" => 0, + "azure-mobile.net" => 0, + "cloudapp.net" => 0, + "mozilla-iot.org" => 0, + "bmoattachments.org" => 0, + "net.ru" => 0, + "org.ru" => 0, + "pp.ru" => 0, + "ui.nabu.casa" => 0, + "pony.club" => 0, + "of.fashion" => 0, + "on.fashion" => 0, + "of.football" => 0, + "in.london" => 0, + "of.london" => 0, + "for.men" => 0, + "and.mom" => 0, + "for.mom" => 0, + "for.one" => 0, + "for.sale" => 0, + "of.work" => 0, + "to.work" => 0, + "nctu.me" => 0, + "bitballoon.com" => 0, + "netlify.com" => 0, + "4u.com" => 0, + "ngrok.io" => 0, + "nh-serv.co.uk" => 0, + "nfshost.com" => 0, + "dnsking.ch" => 0, + "mypi.co" => 0, + "n4t.co" => 0, + "001www.com" => 0, + "ddnslive.com" => 0, + "myiphost.com" => 0, + "forumz.info" => 0, + "16-b.it" => 0, + "32-b.it" => 0, + "64-b.it" => 0, + "soundcast.me" => 0, + "tcp4.me" => 0, + "dnsup.net" => 0, + "hicam.net" => 0, + "now-dns.net" => 0, + "ownip.net" => 0, + "vpndns.net" => 0, + "dynserv.org" => 0, + "now-dns.org" => 0, + "x443.pw" => 0, + "now-dns.top" => 0, + "ntdll.top" => 0, + "freeddns.us" => 0, + "crafting.xyz" => 0, + "zapto.xyz" => 0, + "nsupdate.info" => 0, + "nerdpol.ovh" => 0, + "blogsyte.com" => 0, + "brasilia.me" => 0, + "cable-modem.org" => 0, + "ciscofreak.com" => 0, + "collegefan.org" => 0, + "couchpotatofries.org" => 0, + "damnserver.com" => 0, + "ddns.me" => 0, + "ditchyourip.com" => 0, + "dnsfor.me" => 0, + "dnsiskinky.com" => 0, + "dvrcam.info" => 0, + "dynns.com" => 0, + "eating-organic.net" => 0, + "fantasyleague.cc" => 0, + "geekgalaxy.com" => 0, + "golffan.us" => 0, + "health-carereform.com" => 0, + "homesecuritymac.com" => 0, + "homesecuritypc.com" => 0, + "hopto.me" => 0, + "ilovecollege.info" => 0, + "loginto.me" => 0, + "mlbfan.org" => 0, + "mmafan.biz" => 0, + "myactivedirectory.com" => 0, + "mydissent.net" => 0, + "myeffect.net" => 0, + "mymediapc.net" => 0, + "mypsx.net" => 0, + "mysecuritycamera.com" => 0, + "mysecuritycamera.net" => 0, + "mysecuritycamera.org" => 0, + "net-freaks.com" => 0, + "nflfan.org" => 0, + "nhlfan.net" => 0, + "no-ip.ca" => 0, + "no-ip.co.uk" => 0, + "no-ip.net" => 0, + "noip.us" => 0, + "onthewifi.com" => 0, + "pgafan.net" => 0, + "point2this.com" => 0, + "pointto.us" => 0, + "privatizehealthinsurance.net" => 0, + "quicksytes.com" => 0, + "read-books.org" => 0, + "securitytactics.com" => 0, + "serveexchange.com" => 0, + "servehumour.com" => 0, + "servep2p.com" => 0, + "servesarcasm.com" => 0, + "stufftoread.com" => 0, + "ufcfan.org" => 0, + "unusualperson.com" => 0, + "workisboring.com" => 0, + "3utilities.com" => 0, + "bounceme.net" => 0, + "ddns.net" => 0, + "ddnsking.com" => 0, + "gotdns.ch" => 0, + "hopto.org" => 0, + "myftp.biz" => 0, + "myftp.org" => 0, + "myvnc.com" => 0, + "no-ip.biz" => 0, + "no-ip.info" => 0, + "no-ip.org" => 0, + "noip.me" => 0, + "redirectme.net" => 0, + "servebeer.com" => 0, + "serveblog.net" => 0, + "servecounterstrike.com" => 0, + "serveftp.com" => 0, + "servegame.com" => 0, + "servehalflife.com" => 0, + "servehttp.com" => 0, + "serveirc.com" => 0, + "serveminecraft.net" => 0, + "servemp3.com" => 0, + "servepics.com" => 0, + "servequake.com" => 0, + "sytes.net" => 0, + "webhop.me" => 0, + "zapto.org" => 0, + "stage.nodeart.io" => 0, + "nodum.co" => 0, + "nodum.io" => 0, + "pcloud.host" => 0, + "nyc.mn" => 0, + "nom.ae" => 0, + "nom.af" => 0, + "nom.ai" => 0, + "nom.al" => 0, + "nym.by" => 0, + "nym.bz" => 0, + "nom.cl" => 0, + "nom.gd" => 0, + "nom.ge" => 0, + "nom.gl" => 0, + "nym.gr" => 0, + "nom.gt" => 0, + "nym.gy" => 0, + "nom.hn" => 0, + "nym.ie" => 0, + "nom.im" => 0, + "nom.ke" => 0, + "nym.kz" => 0, + "nym.la" => 0, + "nym.lc" => 0, + "nom.li" => 0, + "nym.li" => 0, + "nym.lt" => 0, + "nym.lu" => 0, + "nym.me" => 0, + "nom.mk" => 0, + "nym.mn" => 0, + "nym.mx" => 0, + "nom.nu" => 0, + "nym.nz" => 0, + "nym.pe" => 0, + "nym.pt" => 0, + "nom.pw" => 0, + "nom.qa" => 0, + "nym.ro" => 0, + "nom.rs" => 0, + "nom.si" => 0, + "nym.sk" => 0, + "nom.st" => 0, + "nym.su" => 0, + "nym.sx" => 0, + "nom.tj" => 0, + "nym.tw" => 0, + "nom.ug" => 0, + "nom.uy" => 0, + "nom.vc" => 0, + "nom.vg" => 0, + "cya.gg" => 0, + "cloudycluster.net" => 0, + "nid.io" => 0, + "opencraft.hosting" => 0, + "operaunite.com" => 0, + "outsystemscloud.com" => 0, + "ownprovider.com" => 0, + "own.pm" => 0, + "ox.rs" => 0, + "oy.lc" => 0, + "pgfog.com" => 0, + "pagefrontapp.com" => 0, + "art.pl" => 0, + "gliwice.pl" => 0, + "krakow.pl" => 0, + "poznan.pl" => 0, + "wroc.pl" => 0, + "zakopane.pl" => 0, + "pantheonsite.io" => 0, + "gotpantheon.com" => 0, + "mypep.link" => 0, + "on-web.fr" => 0, + "platform.sh" => -1, + "platformsh.site" => -1, + "dyn53.io" => 0, + "co.bn" => 0, + "xen.prgmr.com" => 0, + "priv.at" => 0, + "prvcy.page" => 0, + "dweb.link" => -1, + "protonet.io" => 0, + "chirurgiens-dentistes-en-france.fr" => 0, + "byen.site" => 0, + "pubtls.org" => 0, + "qualifioapp.com" => 0, + "instantcloud.cn" => 0, + "ras.ru" => 0, + "qa2.com" => 0, + "dev-myqnapcloud.com" => 0, + "alpha-myqnapcloud.com" => 0, + "myqnapcloud.com" => 0, + "quipelements.com" => -1, + "vapor.cloud" => 0, + "vaporcloud.io" => 0, + "rackmaze.com" => 0, + "rackmaze.net" => 0, + "on-rancher.cloud" => -1, + "on-rio.io" => -1, + "readthedocs.io" => 0, + "rhcloud.com" => 0, + "app.render.com" => 0, + "onrender.com" => 0, + "repl.co" => 0, + "repl.run" => 0, + "resindevice.io" => 0, + "devices.resinstaging.io" => 0, + "hzc.io" => 0, + "wellbeingzone.eu" => 0, + "ptplus.fit" => 0, + "wellbeingzone.co.uk" => 0, + "git-pages.rit.edu" => 0, + "sandcats.io" => 0, + "logoip.de" => 0, + "logoip.com" => 0, + "schokokeks.net" => 0, + "scrysec.com" => 0, + "firewall-gateway.com" => 0, + "firewall-gateway.de" => 0, + "my-gateway.de" => 0, + "my-router.de" => 0, + "spdns.de" => 0, + "spdns.eu" => 0, + "firewall-gateway.net" => 0, + "my-firewall.org" => 0, + "myfirewall.org" => 0, + "spdns.org" => 0, + "s5y.io" => -1, + "sensiosite.cloud" => -1, + "biz.ua" => 0, + "co.ua" => 0, + "pp.ua" => 0, + "shiftedit.io" => 0, + "myshopblocks.com" => 0, + "shopitsite.com" => 0, + "mo-siemens.io" => 0, + "1kapp.com" => 0, + "appchizi.com" => 0, + "applinzi.com" => 0, + "sinaapp.com" => 0, + "vipsinaapp.com" => 0, + "siteleaf.net" => 0, + "bounty-full.com" => 0, + "alpha.bounty-full.com" => 0, + "beta.bounty-full.com" => 0, + "stackhero-network.com" => 0, + "static.land" => 0, + "dev.static.land" => 0, + "sites.static.land" => 0, + "apps.lair.io" => 0, + "stolos.io" => -1, + "spacekit.io" => 0, + "customer.speedpartner.de" => 0, + "api.stdlib.com" => 0, + "storj.farm" => 0, + "utwente.io" => 0, + "soc.srcf.net" => 0, + "user.srcf.net" => 0, + "temp-dns.com" => 0, + "applicationcloud.io" => 0, + "scapp.io" => 0, + "syncloud.it" => 0, + "diskstation.me" => 0, + "dscloud.biz" => 0, + "dscloud.me" => 0, + "dscloud.mobi" => 0, + "dsmynas.com" => 0, + "dsmynas.net" => 0, + "dsmynas.org" => 0, + "familyds.com" => 0, + "familyds.net" => 0, + "familyds.org" => 0, + "i234.me" => 0, + "myds.me" => 0, + "synology.me" => 0, + "vpnplus.to" => 0, + "taifun-dns.de" => 0, + "gda.pl" => 0, + "gdansk.pl" => 0, + "gdynia.pl" => 0, + "med.pl" => 0, + "sopot.pl" => 0, + "edugit.org" => 0, + "telebit.app" => 0, + "telebit.io" => 0, + "telebit.xyz" => -1, + "gwiddle.co.uk" => 0, + "thingdustdata.com" => 0, + "cust.dev.thingdust.io" => 0, + "cust.disrec.thingdust.io" => 0, + "cust.prod.thingdust.io" => 0, + "cust.testing.thingdust.io" => 0, + "arvo.network" => 0, + "azimuth.network" => 0, + "bloxcms.com" => 0, + "townnews-staging.com" => 0, + "12hp.at" => 0, + "2ix.at" => 0, + "4lima.at" => 0, + "lima-city.at" => 0, + "12hp.ch" => 0, + "2ix.ch" => 0, + "4lima.ch" => 0, + "lima-city.ch" => 0, + "trafficplex.cloud" => 0, + "de.cool" => 0, + "12hp.de" => 0, + "2ix.de" => 0, + "4lima.de" => 0, + "lima-city.de" => 0, + "1337.pictures" => 0, + "clan.rip" => 0, + "lima-city.rocks" => 0, + "webspace.rocks" => 0, + "lima.zone" => 0, + "transurl.be" => -1, + "transurl.eu" => -1, + "transurl.nl" => -1, + "tuxfamily.org" => 0, + "dd-dns.de" => 0, + "diskstation.eu" => 0, + "diskstation.org" => 0, + "dray-dns.de" => 0, + "draydns.de" => 0, + "dyn-vpn.de" => 0, + "dynvpn.de" => 0, + "mein-vigor.de" => 0, + "my-vigor.de" => 0, + "my-wan.de" => 0, + "syno-ds.de" => 0, + "synology-diskstation.de" => 0, + "synology-ds.de" => 0, + "uber.space" => 0, + "uberspace.de" => -1, + "hk.com" => 0, + "hk.org" => 0, + "ltd.hk" => 0, + "inc.hk" => 0, + "virtualuser.de" => 0, + "virtual-user.de" => 0, + "lib.de.us" => 0, + "2038.io" => 0, + "router.management" => 0, + "v-info.info" => 0, + "voorloper.cloud" => 0, + "wafflecell.com" => 0, + "wedeploy.io" => 0, + "wedeploy.me" => 0, + "wedeploy.sh" => 0, + "remotewd.com" => 0, + "wmflabs.org" => 0, + "half.host" => 0, + "xnbay.com" => 0, + "u2.xnbay.com" => 0, + "u2-local.xnbay.com" => 0, + "cistron.nl" => 0, + "demon.nl" => 0, + "xs4all.space" => 0, + "official.academy" => 0, + "yolasite.com" => 0, + "ybo.faith" => 0, + "yombo.me" => 0, + "homelink.one" => 0, + "ybo.party" => 0, + "ybo.review" => 0, + "ybo.science" => 0, + "ybo.trade" => 0, + "nohost.me" => 0, + "noho.st" => 0, + "za.net" => 0, + "za.org" => 0, + "now.sh" => 0, + "bss.design" => 0, + "basicserver.io" => 0, + "virtualserver.io" => 0, + "site.builder.nu" => 0, + "enterprisecloud.nu" => 0, + "zone.id" => 0, + } + + def self.etld_data + ETLD_DATA + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/etld_data.rb.erb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/etld_data.rb.erb new file mode 100644 index 0000000..c45feb7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/etld_data.rb.erb @@ -0,0 +1,11 @@ +class DomainName + ETLD_DATA_DATE = '<%= etld_data_date.utc.strftime('%Y-%m-%dT%H:%M:%SZ') %>' + + ETLD_DATA = { +<% etld_data.each_pair { |key, value| %> <%= key.inspect %> => <%= value.inspect %>, +<% } %> } + + def self.etld_data + ETLD_DATA + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/punycode.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/punycode.rb new file mode 100644 index 0000000..b945a2e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/punycode.rb @@ -0,0 +1,283 @@ +# -*- coding: utf-8 -*- +#-- +# punycode.rb - PunyCode encoder for the Domain Name library +# +# Copyright (C) 2011-2017 Akinori MUSHA, All rights reserved. +# +# Ported from puny.c, a part of VeriSign XCode (encode/decode) IDN +# Library. +# +# Copyright (C) 2000-2002 Verisign Inc., All rights reserved. +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the following +# conditions are met: +# +# 1) Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2) Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# 3) Neither the name of the VeriSign Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# This software is licensed under the BSD open source license. For more +# information visit www.opensource.org. +# +# Authors: +# John Colosi (VeriSign) +# Srikanth Veeramachaneni (VeriSign) +# Nagesh Chigurupati (Verisign) +# Praveen Srinivasan(Verisign) +#++ + +class DomainName + module Punycode + BASE = 36 + TMIN = 1 + TMAX = 26 + SKEW = 38 + DAMP = 700 + INITIAL_BIAS = 72 + INITIAL_N = 0x80 + DELIMITER = '-'.freeze + + MAXINT = (1 << 32) - 1 + + LOBASE = BASE - TMIN + CUTOFF = LOBASE * TMAX / 2 + + RE_NONBASIC = /[^\x00-\x7f]/ + + # Returns the numeric value of a basic code point (for use in + # representing integers) in the range 0 to base-1, or nil if cp + # is does not represent a value. + DECODE_DIGIT = {}.tap { |map| + # ASCII A..Z map to 0..25 + # ASCII a..z map to 0..25 + (0..25).each { |i| map[65 + i] = map[97 + i] = i } + # ASCII 0..9 map to 26..35 + (26..35).each { |i| map[22 + i] = i } + } + + # Returns the basic code point whose value (when used for + # representing integers) is d, which must be in the range 0 to + # BASE-1. The lowercase form is used unless flag is true, in + # which case the uppercase form is used. The behavior is + # undefined if flag is nonzero and digit d has no uppercase + # form. + ENCODE_DIGIT = proc { |d, flag| + (d + 22 + (d < 26 ? 75 : 0) - (flag ? (1 << 5) : 0)).chr + # 0..25 map to ASCII a..z or A..Z + # 26..35 map to ASCII 0..9 + } + + DOT = '.'.freeze + PREFIX = 'xn--'.freeze + + # Most errors we raise are basically kind of ArgumentError. + class ArgumentError < ::ArgumentError; end + class BufferOverflowError < ArgumentError; end + + class << self + # Encode a +string+ in Punycode + def encode(string) + input = string.unpack('U*') + output = '' + + # Initialize the state + n = INITIAL_N + delta = 0 + bias = INITIAL_BIAS + + # Handle the basic code points + input.each { |cp| output << cp.chr if cp < 0x80 } + + h = b = output.length + + # h is the number of code points that have been handled, b is the + # number of basic code points, and out is the number of characters + # that have been output. + + output << DELIMITER if b > 0 + + # Main encoding loop + + while h < input.length + # All non-basic code points < n have been handled already. Find + # the next larger one + + m = MAXINT + input.each { |cp| + m = cp if (n...m) === cp + } + + # Increase delta enough to advance the decoder's state to + # , but guard against overflow + + delta += (m - n) * (h + 1) + raise BufferOverflowError if delta > MAXINT + n = m + + input.each { |cp| + # AMC-ACE-Z can use this simplified version instead + if cp < n + delta += 1 + raise BufferOverflowError if delta > MAXINT + elsif cp == n + # Represent delta as a generalized variable-length integer + q = delta + k = BASE + loop { + t = k <= bias ? TMIN : k - bias >= TMAX ? TMAX : k - bias + break if q < t + q, r = (q - t).divmod(BASE - t) + output << ENCODE_DIGIT[t + r, false] + k += BASE + } + + output << ENCODE_DIGIT[q, false] + + # Adapt the bias + delta = h == b ? delta / DAMP : delta >> 1 + delta += delta / (h + 1) + bias = 0 + while delta > CUTOFF + delta /= LOBASE + bias += BASE + end + bias += (LOBASE + 1) * delta / (delta + SKEW) + + delta = 0 + h += 1 + end + } + + delta += 1 + n += 1 + end + + output + end + + # Encode a hostname using IDN/Punycode algorithms + def encode_hostname(hostname) + hostname.match(RE_NONBASIC) or return hostname + + hostname.split(DOT).map { |name| + if name.match(RE_NONBASIC) + PREFIX + encode(name) + else + name + end + }.join(DOT) + end + + # Decode a +string+ encoded in Punycode + def decode(string) + # Initialize the state + n = INITIAL_N + i = 0 + bias = INITIAL_BIAS + + if j = string.rindex(DELIMITER) + b = string[0...j] + + b.match(RE_NONBASIC) and + raise ArgumentError, "Illegal character is found in basic part: #{string.inspect}" + + # Handle the basic code points + + output = b.unpack('U*') + u = string[(j + 1)..-1] + else + output = [] + u = string + end + + # Main decoding loop: Start just after the last delimiter if any + # basic code points were copied; start at the beginning + # otherwise. + + input = u.unpack('C*') + input_length = input.length + h = 0 + out = output.length + + while h < input_length + # Decode a generalized variable-length integer into delta, + # which gets added to i. The overflow checking is easier + # if we increase i as we go, then subtract off its starting + # value at the end to obtain delta. + + oldi = i + w = 1 + k = BASE + + loop { + digit = DECODE_DIGIT[input[h]] or + raise ArgumentError, "Illegal character is found in non-basic part: #{string.inspect}" + h += 1 + i += digit * w + raise BufferOverflowError if i > MAXINT + t = k <= bias ? TMIN : k - bias >= TMAX ? TMAX : k - bias + break if digit < t + w *= BASE - t + raise BufferOverflowError if w > MAXINT + k += BASE + h < input_length or raise ArgumentError, "Malformed input given: #{string.inspect}" + } + + # Adapt the bias + delta = oldi == 0 ? i / DAMP : (i - oldi) >> 1 + delta += delta / (out + 1) + bias = 0 + while delta > CUTOFF + delta /= LOBASE + bias += BASE + end + bias += (LOBASE + 1) * delta / (delta + SKEW) + + # i was supposed to wrap around from out+1 to 0, incrementing + # n each time, so we'll fix that now: + + q, i = i.divmod(out + 1) + n += q + raise BufferOverflowError if n > MAXINT + + # Insert n at position i of the output: + + output[i, 0] = n + + out += 1 + i += 1 + end + output.pack('U*') + end + + # Decode a hostname using IDN/Punycode algorithms + def decode_hostname(hostname) + hostname.gsub(/(\A|#{Regexp.quote(DOT)})#{Regexp.quote(PREFIX)}([^#{Regexp.quote(DOT)}]*)/o) { + $1 << decode($2) + } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/version.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/version.rb new file mode 100644 index 0000000..c37552f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/lib/domain_name/version.rb @@ -0,0 +1,3 @@ +class DomainName + VERSION = '0.5.20190701' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/helper.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/helper.rb new file mode 100644 index 0000000..beb8025 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/helper.rb @@ -0,0 +1,17 @@ +require 'rubygems' +require 'bundler' +begin + Bundler.setup(:default, :development) +rescue Bundler::BundlerError => e + $stderr.puts e.message + $stderr.puts "Run `bundle install` to install missing gems" + exit e.status_code +end +require 'test/unit' + +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib')) +$LOAD_PATH.unshift(File.dirname(__FILE__)) +require 'domain_name' + +class Test::Unit::TestCase +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/test_domain_name-punycode.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/test_domain_name-punycode.rb new file mode 100644 index 0000000..5e601f2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/test_domain_name-punycode.rb @@ -0,0 +1,97 @@ +require 'helper' + +class TestDomainName < Test::Unit::TestCase + test "encode labels just as listed in RFC 3492 #7.1 (slightly modified)" do + [ + ['(A) Arabic (Egyptian)', + [0x0644, 0x064A, 0x0647, 0x0645, 0x0627, 0x0628, 0x062A, 0x0643, 0x0644, + 0x0645, 0x0648, 0x0634, 0x0639, 0x0631, 0x0628, 0x064A, 0x061F], + 'egbpdaj6bu4bxfgehfvwxn'], + ['(B) Chinese (simplified)', + [0x4ED6, 0x4EEC, 0x4E3A, 0x4EC0, 0x4E48, 0x4E0D, 0x8BF4, 0x4E2D, 0x6587], + 'ihqwcrb4cv8a8dqg056pqjye'], + ['(C) Chinese (traditional)', + [0x4ED6, 0x5011, 0x7232, 0x4EC0, 0x9EBD, 0x4E0D, 0x8AAA, 0x4E2D, 0x6587], + 'ihqwctvzc91f659drss3x8bo0yb'], + ['(D) Czech: Proprostnemluvesky', + [0x0050, 0x0072, 0x006F, 0x010D, 0x0070, 0x0072, 0x006F, 0x0073, 0x0074, + 0x011B, 0x006E, 0x0065, 0x006D, 0x006C, 0x0075, 0x0076, 0x00ED, 0x010D, + 0x0065, 0x0073, 0x006B, 0x0079], + 'Proprostnemluvesky-uyb24dma41a'], + ['(E) Hebrew', + [0x05DC, 0x05DE, 0x05D4, 0x05D4, 0x05DD, 0x05E4, 0x05E9, 0x05D5, 0x05D8, + 0x05DC, 0x05D0, 0x05DE, 0x05D3, 0x05D1, 0x05E8, 0x05D9, 0x05DD, 0x05E2, + 0x05D1, 0x05E8, 0x05D9, 0x05EA], + '4dbcagdahymbxekheh6e0a7fei0b'], + ['(F) Hindi (Devanagari)', + [0x092F, 0x0939, 0x0932, 0x094B, 0x0917, 0x0939, 0x093F, 0x0928, 0x094D, + 0x0926, 0x0940, 0x0915, 0x094D, 0x092F, 0x094B, 0x0902, 0x0928, 0x0939, + 0x0940, 0x0902, 0x092C, 0x094B, 0x0932, 0x0938, 0x0915, 0x0924, 0x0947, + 0x0939, 0x0948, 0x0902], + 'i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd'], + ['(G) Japanese (kanji and hiragana)', + [0x306A, 0x305C, 0x307F, 0x3093, 0x306A, 0x65E5, 0x672C, 0x8A9E, 0x3092, + 0x8A71, 0x3057, 0x3066, 0x304F, 0x308C, 0x306A, 0x3044, 0x306E, 0x304B], + 'n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa'], + ['(H) Korean (Hangul syllables)', + [0xC138, 0xACC4, 0xC758, 0xBAA8, 0xB4E0, 0xC0AC, 0xB78C, 0xB4E4, 0xC774, + 0xD55C, 0xAD6D, 0xC5B4, 0xB97C, 0xC774, 0xD574, 0xD55C, 0xB2E4, 0xBA74, + 0xC5BC, 0xB9C8, 0xB098, 0xC88B, 0xC744, 0xAE4C], + '989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j' << + 'psd879ccm6fea98c'], + ['(I) Russian (Cyrillic)', + [0x043F, 0x043E, 0x0447, 0x0435, 0x043C, 0x0443, 0x0436, 0x0435, 0x043E, + 0x043D, 0x0438, 0x043D, 0x0435, 0x0433, 0x043E, 0x0432, 0x043E, 0x0440, + 0x044F, 0x0442, 0x043F, 0x043E, 0x0440, 0x0443, 0x0441, 0x0441, 0x043A, + 0x0438], + 'b1abfaaepdrnnbgefbadotcwatmq2g4l'], + ['(J) Spanish: PorqunopuedensimplementehablarenEspaol', + [0x0050, 0x006F, 0x0072, 0x0071, 0x0075, 0x00E9, 0x006E, 0x006F, 0x0070, + 0x0075, 0x0065, 0x0064, 0x0065, 0x006E, 0x0073, 0x0069, 0x006D, 0x0070, + 0x006C, 0x0065, 0x006D, 0x0065, 0x006E, 0x0074, 0x0065, 0x0068, 0x0061, + 0x0062, 0x006C, 0x0061, 0x0072, 0x0065, 0x006E, 0x0045, 0x0073, 0x0070, + 0x0061, 0x00F1, 0x006F, 0x006C], + 'PorqunopuedensimplementehablarenEspaol-fmd56a'], + ['(K) Vietnamese: Tisaohkhngthch' << + 'nitingVit', + [0x0054, 0x1EA1, 0x0069, 0x0073, 0x0061, 0x006F, 0x0068, 0x1ECD, 0x006B, + 0x0068, 0x00F4, 0x006E, 0x0067, 0x0074, 0x0068, 0x1EC3, 0x0063, 0x0068, + 0x1EC9, 0x006E, 0x00F3, 0x0069, 0x0074, 0x0069, 0x1EBF, 0x006E, 0x0067, + 0x0056, 0x0069, 0x1EC7, 0x0074], + 'TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g'], + ['(L) 3B', + [0x0033, 0x5E74, 0x0042, 0x7D44, 0x91D1, 0x516B, 0x5148, 0x751F], + '3B-ww4c5e180e575a65lsy2b'], + ['(M) -with-SUPER-MONKEYS', + [0x5B89, 0x5BA4, 0x5948, 0x7F8E, 0x6075, 0x002D, 0x0077, 0x0069, 0x0074, + 0x0068, 0x002D, 0x0053, 0x0055, 0x0050, 0x0045, 0x0052, 0x002D, 0x004D, + 0x004F, 0x004E, 0x004B, 0x0045, 0x0059, 0x0053], + '-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n'], + ['(N) Hello-Another-Way-', + [0x0048, 0x0065, 0x006C, 0x006C, 0x006F, 0x002D, 0x0041, 0x006E, 0x006F, + 0x0074, 0x0068, 0x0065, 0x0072, 0x002D, 0x0057, 0x0061, 0x0079, 0x002D, + 0x305D, 0x308C, 0x305E, 0x308C, 0x306E, 0x5834, 0x6240], + 'Hello-Another-Way--fc4qua05auwb3674vfr0b'], + ['(O) 2', + [0x3072, 0x3068, 0x3064, 0x5C4B, 0x6839, 0x306E, 0x4E0B, 0x0032], + '2-u9tlzr9756bt3uc0v'], + ['(P) MajiKoi5', + [0x004D, 0x0061, 0x006A, 0x0069, 0x3067, 0x004B, 0x006F, 0x0069, 0x3059, + 0x308B, 0x0035, 0x79D2, 0x524D], + 'MajiKoi5-783gue6qz075azm5e'], + ['(Q) de', + [0x30D1, 0x30D5, 0x30A3, 0x30FC, 0x0064, 0x0065, 0x30EB, 0x30F3, 0x30D0], + 'de-jg4avhby1noc0d'], + ['(R) ', + [0x305D, 0x306E, 0x30B9, 0x30D4, 0x30FC, 0x30C9, 0x3067], + 'd9juau41awczczp'], + ['(S) -> $1.00 <-', + [0x002D, 0x003E, 0x0020, 0x0024, 0x0031, 0x002E, 0x0030, 0x0030, 0x0020, + 0x003C, 0x002D], + '-> $1.00 <--'] + ].each { |title, cps, punycode| + assert_equal punycode, DomainName::Punycode.encode(cps.pack('U*')), title + assert_equal cps.pack('U*').to_nfc, DomainName::Punycode.decode(punycode), title + } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/test_domain_name.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/test_domain_name.rb new file mode 100644 index 0000000..86696cc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/test/test_domain_name.rb @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +require 'helper' +require 'ipaddr' + +class TestDomainName < Test::Unit::TestCase + test "raise ArgumentError if hostname starts with a dot" do + [ + # Leading dot. + '.com', + '.example', + '.example.com', + '.example.example', + ].each { |hostname| + assert_raises(ArgumentError) { DomainName.new(hostname) } + } + end + + test "accept a String-alike for initialization" do + Object.new.tap { |obj| + def obj.to_str + "Example.org" + end + assert_equal "example.org", DomainName.new(obj).hostname + } + + Object.new.tap { |obj| + def obj.to_str + 123 + end + assert_raises(TypeError) { DomainName.new(obj) } + } + + Object.new.tap { |obj| + assert_raises(TypeError) { DomainName.new(obj) } + } + end + + test "parse canonical domain names correctly" do + [ + # Mixed case. + ['COM', nil, false, 'com', true], + ['example.COM', 'example.com', true, 'com', true], + ['WwW.example.COM', 'example.com', true, 'com', true], + # Unlisted TLD. + ['example', 'example', false, 'example', false], + ['example.example', 'example.example', false, 'example', false], + ['b.example.example', 'example.example', false, 'example', false], + ['a.b.example.example', 'example.example', false, 'example', false], + # Listed, but non-Internet, TLD. + ['local', 'local', false, 'local', false], + ['example.local', 'example.local', false, 'local', false], + ['b.example.local', 'example.local', false, 'local', false], + ['a.b.example.local', 'example.local', false, 'local', false], + # TLD with only 1 rule. + ['biz', nil, false, 'biz', true], + ['domain.biz', 'domain.biz', true, 'biz', true], + ['b.domain.biz', 'domain.biz', true, 'biz', true], + ['a.b.domain.biz', 'domain.biz', true, 'biz', true], + # TLD with some 2-level rules. + ['com', nil, false, 'com', true], + ['example.com', 'example.com', true, 'com', true], + ['b.example.com', 'example.com', true, 'com', true], + ['a.b.example.com', 'example.com', true, 'com', true], + ['uk.com', nil, false, 'com', true], + ['example.uk.com', 'example.uk.com', true, 'com', true], + ['b.example.uk.com', 'example.uk.com', true, 'com', true], + ['a.b.example.uk.com', 'example.uk.com', true, 'com', true], + ['test.ac', 'test.ac', true, 'ac', true], + # TLD with only 1 (wildcard) rule. + ['bd', nil, false, 'bd', true], + ['c.bd', nil, false, 'bd', true], + ['b.c.bd', 'b.c.bd', true, 'bd', true], + ['a.b.c.bd', 'b.c.bd', true, 'bd', true], + # More complex TLD. + ['jp', nil, false, 'jp', true], + ['test.jp', 'test.jp', true, 'jp', true], + ['www.test.jp', 'test.jp', true, 'jp', true], + ['ac.jp', nil, false, 'jp', true], + ['test.ac.jp', 'test.ac.jp', true, 'jp', true], + ['www.test.ac.jp', 'test.ac.jp', true, 'jp', true], + ['kyoto.jp', nil, false, 'jp', true], + ['test.kyoto.jp', 'test.kyoto.jp', true, 'jp', true], + ['ide.kyoto.jp', nil, false, 'jp', true], + ['b.ide.kyoto.jp', 'b.ide.kyoto.jp', true, 'jp', true], + ['a.b.ide.kyoto.jp', 'b.ide.kyoto.jp', true, 'jp', true], + ['c.kobe.jp', nil, false, 'jp', true], + ['b.c.kobe.jp', 'b.c.kobe.jp', true, 'jp', true], + ['a.b.c.kobe.jp', 'b.c.kobe.jp', true, 'jp', true], + ['city.kobe.jp', 'city.kobe.jp', true, 'jp', true], + ['www.city.kobe.jp', 'city.kobe.jp', true, 'jp', true], + # TLD with a wildcard rule and exceptions. + ['ck', nil, false, 'ck', true], + ['test.ck', nil, false, 'ck', true], + ['b.test.ck', 'b.test.ck', true, 'ck', true], + ['a.b.test.ck', 'b.test.ck', true, 'ck', true], + ['www.ck', 'www.ck', true, 'ck', true], + ['www.www.ck', 'www.ck', true, 'ck', true], + # US K12. + ['us', nil, false, 'us', true], + ['test.us', 'test.us', true, 'us', true], + ['www.test.us', 'test.us', true, 'us', true], + ['ak.us', nil, false, 'us', true], + ['test.ak.us', 'test.ak.us', true, 'us', true], + ['www.test.ak.us', 'test.ak.us', true, 'us', true], + ['k12.ak.us', nil, false, 'us', true], + ['test.k12.ak.us', 'test.k12.ak.us', true, 'us', true], + ['www.test.k12.ak.us', 'test.k12.ak.us', true, 'us', true], + # IDN labels. (modified; currently DomainName always converts U-labels to A-labels) + ['éŖŸį‹Ž.com.cn', 'xn--85x722f.com.cn', true, 'cn', true], + ['éŖŸį‹Ž.å…Ŧ司.cn', 'xn--85x722f.xn--55qx5d.cn', true, 'cn', true], + ['www.éŖŸį‹Ž.å…Ŧ司.cn', 'xn--85x722f.xn--55qx5d.cn', true, 'cn', true], + ['shishi.å…Ŧ司.cn', 'shishi.xn--55qx5d.cn', true, 'cn', true], + ['å…Ŧ司.cn', nil, false, 'cn', true], + ['éŖŸį‹Ž.中å›Ŋ', 'xn--85x722f.xn--fiqs8s', true, 'xn--fiqs8s', true], + ['www.éŖŸį‹Ž.中å›Ŋ', 'xn--85x722f.xn--fiqs8s', true, 'xn--fiqs8s', true], + ['shishi.中å›Ŋ', 'shishi.xn--fiqs8s', true, 'xn--fiqs8s', true], + ['中å›Ŋ', nil, false, 'xn--fiqs8s', true], + # Same as above, but punycoded. + ['xn--85x722f.com.cn', 'xn--85x722f.com.cn', true, 'cn', true], + ['xn--85x722f.xn--55qx5d.cn', 'xn--85x722f.xn--55qx5d.cn', true, 'cn', true], + ['www.xn--85x722f.xn--55qx5d.cn', 'xn--85x722f.xn--55qx5d.cn', true, 'cn', true], + ['shishi.xn--55qx5d.cn', 'shishi.xn--55qx5d.cn', true, 'cn', true], + ['xn--55qx5d.cn', nil, false, 'cn', true], + ['xn--85x722f.xn--fiqs8s', 'xn--85x722f.xn--fiqs8s', true, 'xn--fiqs8s', true], + ['www.xn--85x722f.xn--fiqs8s', 'xn--85x722f.xn--fiqs8s', true, 'xn--fiqs8s', true], + ['shishi.xn--fiqs8s', 'shishi.xn--fiqs8s', true, 'xn--fiqs8s', true], + ['xn--fiqs8s', nil, false, 'xn--fiqs8s', true], + ].each { |hostname, domain, canonical, tld, canonical_tld| + dn = DomainName.new(hostname) + assert_equal(domain, dn.domain, hostname + ':domain') + assert_equal(canonical, dn.canonical?, hostname + ':canoninal?') + assert_equal(tld, dn.tld, hostname + ':tld') + assert_equal(canonical_tld, dn.canonical_tld?, hostname + ':canoninal_tld?') + } + end + + test "compare hostnames correctly" do + [ + ["foo.com", "abc.foo.com", 1], + ["COM", "abc.foo.com", 1], + ["abc.def.foo.com", "foo.com", -1], + ["abc.def.foo.com", "ABC.def.FOO.com", 0], + ["abc.def.foo.com", "bar.com", nil], + ].each { |x, y, v| + dx, dy = DomainName(x), DomainName(y) + [ + [dx, y, v], + [dx, dy, v], + [dy, x, v ? -v : v], + [dy, dx, v ? -v : v], + ].each { |a, b, expected| + assert_equal expected, a <=> b + case expected + when 1 + assert_equal(true, a > b) + assert_equal(true, a >= b) + assert_equal(false, a == b) + assert_equal(false, a <= b) + assert_equal(false, a < b) + when -1 + assert_equal(true, a < b) + assert_equal(true, a <= b) + assert_equal(false, a == b) + assert_equal(false, a >= b) + assert_equal(false, a > b) + when 0 + assert_equal(false, a < b) + assert_equal(true, a <= b) + assert_equal(true, a == b) + assert_equal(true, a >= b) + assert_equal(false, a > b) + when nil + assert_equal(nil, a < b) + assert_equal(nil, a <= b) + assert_equal(false, a == b) + assert_equal(nil, a >= b) + assert_equal(nil, a > b) + end + } + } + end + + test "check cookie domain correctly" do + { + 'com' => [ + ['com', false], + ['example.com', false], + ['foo.example.com', false], + ['bar.foo.example.com', false], + ], + + 'example.com' => [ + ['com', false], + ['example.com', true], + ['foo.example.com', false], + ['bar.foo.example.com', false], + ], + + 'foo.example.com' => [ + ['com', false], + ['example.com', true], + ['foo.example.com', true], + ['foo.Example.com', true], + ['bar.foo.example.com', false], + ['bar.Foo.Example.com', false], + ], + + 'b.sapporo.jp' => [ + ['jp', false], + ['sapporo.jp', false], + ['b.sapporo.jp', false], + ['a.b.sapporo.jp', false], + ], + + 'b.c.sapporo.jp' => [ + ['jp', false], + ['sapporo.jp', false], + ['c.sapporo.jp', false], + ['b.c.sapporo.jp', true], + ['a.b.c.sapporo.jp', false], + ], + + 'b.c.d.sapporo.jp' => [ + ['jp', false], + ['sapporo.jp', false], + ['d.sapporo.jp', false], + ['c.d.sapporo.jp', true], + ['b.c.d.sapporo.jp', true], + ['a.b.c.d.sapporo.jp', false], + ], + + 'city.sapporo.jp' => [ + ['jp', false], + ['sapporo.jp', false], + ['city.sapporo.jp', true], + ['a.city.sapporo.jp', false], + ], + + 'b.city.sapporo.jp' => [ + ['jp', false], + ['sapporo.jp', false], + ['city.sapporo.jp', true], + ['b.city.sapporo.jp', true], + ['a.b.city.sapporo.jp', false], + ], + }.each_pair { |host, pairs| + dn = DomainName(host) + assert_equal(true, dn.cookie_domain?(host.upcase, true), dn.to_s) + assert_equal(true, dn.cookie_domain?(host.downcase, true), dn.to_s) + assert_equal(false, dn.cookie_domain?("www." << host, true), dn.to_s) + pairs.each { |domain, expected| + assert_equal(expected, dn.cookie_domain?(domain), "%s - %s" % [dn.to_s, domain]) + assert_equal(expected, dn.cookie_domain?(DomainName(domain)), "%s - %s" % [dn.to_s, domain]) + } + } + end + + test "parse IPv4 addresseses" do + a = '192.168.10.20' + dn = DomainName(a) + assert_equal(a, dn.hostname) + assert_equal(true, dn.ipaddr?) + assert_equal(IPAddr.new(a), dn.ipaddr) + assert_equal(true, dn.cookie_domain?(a)) + assert_equal(true, dn.cookie_domain?(a, true)) + assert_equal(true, dn.cookie_domain?(dn)) + assert_equal(true, dn.cookie_domain?(dn, true)) + assert_equal(false, dn.cookie_domain?('168.10.20')) + assert_equal(false, dn.cookie_domain?('20')) + assert_equal(nil, dn.superdomain) + end + + test "parse IPv6 addresseses" do + a = '2001:200:dff:fff1:216:3eff:feb1:44d7' + b = '2001:0200:0dff:fff1:0216:3eff:feb1:44d7' + [b, b.upcase, "[#{b}]", "[#{b.upcase}]"].each { |host| + dn = DomainName(host) + assert_equal("[#{a}]", dn.uri_host) + assert_equal(a, dn.hostname) + assert_equal(true, dn.ipaddr?) + assert_equal(IPAddr.new(a), dn.ipaddr) + assert_equal(true, dn.cookie_domain?(host)) + assert_equal(true, dn.cookie_domain?(host, true)) + assert_equal(true, dn.cookie_domain?(dn)) + assert_equal(true, dn.cookie_domain?(dn, true)) + assert_equal(true, dn.cookie_domain?(a)) + assert_equal(true, dn.cookie_domain?(a, true)) + assert_equal(nil, dn.superdomain) + } + end + + test "get superdomain" do + [ + %w[www.sub.example.local sub.example.local example.local local], + %w[www.sub.example.com sub.example.com example.com com], + ].each { |domain, *superdomains| + dn = DomainName(domain) + superdomains.each { |superdomain| + sdn = DomainName(superdomain) + assert_equal sdn, dn.superdomain + dn = sdn + } + assert_equal nil, dn.superdomain + } + end + + test "have idn methods" do + dn = DomainName("金å…Ģ先į”Ÿ.Bįĩ„.3åš´.æ—ĨæœŦčĒžãƒ‰ãƒĄã‚¤ãƒŗ名Example.æ—ĨæœŦ") + + assert_equal "xn--44q1cv48kq8x.xn--b-gf6c.xn--3-pj3b.xn--example-6q4fyliikhk162btq3b2zd4y2o.xn--wgv71a", dn.hostname + assert_equal "金å…Ģ先į”Ÿ.bįĩ„.3åš´.æ—ĨæœŦčĒžãƒ‰ãƒĄã‚¤ãƒŗ名example.æ—ĨæœŦ", dn.hostname_idn + assert_equal "xn--example-6q4fyliikhk162btq3b2zd4y2o.xn--wgv71a", dn.domain + assert_equal "æ—ĨæœŦčĒžãƒ‰ãƒĄã‚¤ãƒŗ名example.æ—ĨæœŦ", dn.domain_idn + assert_equal "xn--wgv71a", dn.tld + assert_equal "æ—ĨæœŦ", dn.tld_idn + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/tool/gen_etld_data.rb b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/tool/gen_etld_data.rb new file mode 100644 index 0000000..4340c8e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/domain_name-0.5.20190701/tool/gen_etld_data.rb @@ -0,0 +1,63 @@ +#!/usr/bin/env ruby + +require 'rubygems' +require 'pathname' +$basedir = Pathname.new(__FILE__).dirname.parent +$LOAD_PATH.unshift $basedir + 'lib' +require 'domain_name' +require 'set' +require 'erb' + +def main + dat_file = $basedir + 'data' + 'public_suffix_list.dat' + dir = $basedir + 'lib' + 'domain_name' + erb_file = dir + 'etld_data.rb.erb' + rb_file = dir + 'etld_data.rb' + + etld_data_date = File.mtime(dat_file) + + File.open(dat_file, 'r:utf-8') { |dat| + etld_data = parse(dat) + File.open(rb_file, 'w:utf-8') { |rb| + File.open(erb_file, 'r:utf-8') { |erb| + rb.print ERB.new(erb.read).result(binding) + } + } + } +end + +def normalize_hostname(domain) + DomainName.normalize(domain) +end + +def parse(f) + {}.tap { |table| + tlds = Set[] + f.each_line { |line| + line.sub!(%r{//.*}, '') + line.strip! + next if line.empty? + case line + when /^local$/ + # ignore .local + next + when /^([^!*]+)$/ + domain = normalize_hostname($1) + value = 0 + when /^\*\.([^!*]+)$/ + domain = normalize_hostname($1) + value = -1 + when /^\!([^!*]+)$/ + domain = normalize_hostname($1) + value = 1 + else + raise "syntax error: #{line}" + end + tld = domain.match(/(?:^|\.)([^.]+)$/)[1] + table[tld] ||= 1 + table[domain] = value + } + } +end + +main() diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/LICENSE b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/LICENSE new file mode 100644 index 0000000..0b67483 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 Brandon Keepers + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/README.md b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/README.md new file mode 100644 index 0000000..2691a02 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/README.md @@ -0,0 +1,266 @@ +# dotenv [![Gem Version](https://badge.fury.io/rb/dotenv.svg)](https://badge.fury.io/rb/dotenv) [![Join the chat at https://gitter.im/bkeepers/dotenv](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/bkeepers/dotenv?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +Shim to load environment variables from `.env` into `ENV` in *development*. + +Storing [configuration in the environment](http://12factor.net/config) is one of the tenets of a [twelve-factor app](http://12factor.net). Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. + +But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. dotenv loads variables from a `.env` file into `ENV` when the environment is bootstrapped. + +## Installation + +### Rails + +Add this line to the top of your application's Gemfile: + +```ruby +gem 'dotenv-rails', groups: [:development, :test] +``` + +And then execute: + +```shell +$ bundle +``` + +#### Note on load order + +dotenv is initialized in your Rails app during the `before_configuration` callback, which is fired when the `Application` constant is defined in `config/application.rb` with `class Application < Rails::Application`. If you need it to be initialized sooner, you can manually call `Dotenv::Railtie.load`. + +```ruby +# config/application.rb +Bundler.require(*Rails.groups) + +# Load dotenv only in development or test environment +if ['development', 'test'].include? ENV['RAILS_ENV'] + Dotenv::Railtie.load +end + +HOSTNAME = ENV['HOSTNAME'] +``` + +If you use gems that require environment variables to be set before they are loaded, then list `dotenv-rails` in the `Gemfile` before those other gems and require `dotenv/rails-now`. + +```ruby +gem 'dotenv-rails', require: 'dotenv/rails-now' +gem 'gem-that-requires-env-variables' +``` + +### Sinatra or Plain ol' Ruby + +Install the gem: + +```shell +$ gem install dotenv +``` + +As early as possible in your application bootstrap process, load `.env`: + +```ruby +require 'dotenv/load' + +# or +require 'dotenv' +Dotenv.load +``` + +By default, `load` will look for a file called `.env` in the current working directory. Pass in multiple files and they will be loaded in order. The first value set for a variable will win. + +```ruby +require 'dotenv' +Dotenv.load('file1.env', 'file2.env') +``` + +Alternatively, you can use the `dotenv` executable to launch your application: + +```shell +$ dotenv ./script.rb +``` + +The `dotenv` executable also accepts a single flag, `-f`. Its value should be a comma-separated list of configuration files, in the order of most important to least. All of the files must exist. There _must_ be a space between the flag and its value. + +``` +$ dotenv -f ".env.local,.env" ./script.rb +``` + +To ensure `.env` is loaded in rake, load the tasks: + +```ruby +require 'dotenv/tasks' + +task mytask: :dotenv do + # things that require .env +end +``` + +## Usage + +Add your application configuration to your `.env` file in the root of your project: + +```shell +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Whenever your application loads, these variables will be available in `ENV`: + +```ruby +config.fog_directory = ENV['S3_BUCKET'] +``` + +You may also add `export` in front of each line so you can `source` the file in bash: + +```shell +export S3_BUCKET=YOURS3BUCKET +export SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +### Multi-line values + +If you need multiline variables, for example private keys, you can double quote strings and use the `\n` character for newlines: + +```shell +PRIVATE_KEY="-----BEGIN RSA PRIVATE KEY-----\nHkVN9...\n-----END DSA PRIVATE KEY-----\n" +``` + +Alternatively, multi-line values with line breaks are now supported for quoted values. + +```shell +PRIVATE_KEY="-----BEGIN RSA PRIVATE KEY----- +... +HkVN9... +... +-----END DSA PRIVATE KEY-----" +``` + +This is particularly helpful when using the Heroku command line plugin [`heroku-config`](https://github.com/xavdid/heroku-config) to pull configuration variables down that may have line breaks. + +### Command Substitution + +You need to add the output of a command in one of your variables? Simply add it with `$(your_command)`: + +```shell +DATABASE_URL="postgres://$(whoami)@localhost/my_database" +``` + +### Variable Substitution + +You need to add the value of another variable in one of your variables? You can reference the variable with `${VAR}` or often just `$VAR` in unqoted or double-quoted values. + +```shell +DATABASE_URL="postgres://${USER}@localhost/my_database" +``` + +If a value contains a `$` and it is not intended to be a variable, wrap it in single quotes. + +```shell +PASSWORD='pas$word' +``` + +### Comments + +Comments may be added to your file as such: + +```shell +# This is a comment +SECRET_KEY=YOURSECRETKEYGOESHERE # comment +SECRET_HASH="something-with-a-#-hash" +``` + +### Required Keys + +If a particular configuration value is required but not set, it's appropriate to raise an error. + +To require configuration keys: + +```ruby +# config/initializers/dotenv.rb + +Dotenv.require_keys("SERVICE_APP_ID", "SERVICE_KEY", "SERVICE_SECRET") +``` + +If any of the configuration keys above are not set, your application will raise an error during initialization. This method is preferred because it prevents runtime errors in a production application due to improper configuration. + +### Parsing + +To parse a list of env files for programmatic inspection without modifying the ENV: + +```ruby +Dotenv.parse(".env.local", ".env") +# => {'S3_BUCKET' => 'YOURS3BUCKET', 'SECRET_KEY' => 'YOURSECRETKEYGOESHERE', ...} +``` + +This method returns a hash of the ENV var name/value pairs. + +## Frequently Answered Questions + +### Can I use dotenv in production? + +dotenv was originally created to load configuration variables into `ENV` in *development*. There are typically better ways to manage configuration in production environments - such as `/etc/environment` managed by [Puppet](https://github.com/puppetlabs/puppet) or [Chef](https://github.com/chef/chef), `heroku config`, etc. + +However, some find dotenv to be a convenient way to configure Rails applications in staging and production environments, and you can do that by defining environment-specific files like `.env.production` or `.env.test`. + +If you use this gem to handle env vars for multiple Rails environments (development, test, production, etc.), please note that env vars that are general to all environments should be stored in `.env`. Then, environment specific env vars should be stored in `.env.`. + +### What other .env* files can I use? + +`dotenv-rails` will override in the following order (highest defined variable overrides lower): + +| Hierarchy Priority | Filename | Environment | Should I `.gitignore`it? | Notes | +| ------------------ | ------------------------ | -------------------- | --------------------------------------------------- | ------------------------------------------------------------ | +| 1st (highest) | `.env.development.local` | Development | Yes! | Local overrides of environment-specific settings. | +| 1st | `.env.test.local` | Test | Yes! | Local overrides of environment-specific settings. | +| 1st | `.env.production.local` | Production | Yes! | Local overrides of environment-specific settings. | +| 2nd | `.env.local` | Wherever the file is | Definitely. | Local overrides. This file is loaded for all environments _except_ `test`. | +| 3rd | `.env.development` | Development | No. | Shared environment-specific settings | +| 3rd | `.env.test` | Test | No. | Shared environment-specific settings | +| 3rd | `.env.production` | Production | No. | Shared environment-specific settings | +| Last | `.env` | All Environments | Depends (See [below](#should-i-commit-my-env-file)) | The OriginalÂŽ | + + +### Should I commit my .env file? + +Credentials should only be accessible on the machines that need access to them. Never commit sensitive information to a repository that is not needed by every development machine and server. + + +You can use the `-t` or `--template` flag on the dotenv cli to create a template of your `.env` file. +```shell +$ dotenv -t .env +``` +A template will be created in your working directory named `{FINAME}.template`. So in the above example, it would create a `.env.template` file. + +The template will contain all the environment variables in your `.env` file but with their values set to the variable names. + +```shell +# .env +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Would become + +```shell +# .env.template +S3_BUCKET=S3_BUCKET +SECRET_KEY=SECRET_KEY +``` + +Personally, I prefer to commit the `.env` file with development-only settings. This makes it easy for other developers to get started on the project without compromising credentials for other environments. If you follow this advice, make sure that all the credentials for your development environment are different from your other deployments and that the development credentials do not have access to any confidential data. + +### Why is it not overriding existing `ENV` variables? + +By default, it **won't** overwrite existing environment variables as dotenv assumes the deployment environment has more knowledge about configuration than the application does. To overwrite existing environment variables you can use `Dotenv.overload`. + +You can also use the `-o` or `--overload` flag on the dotenv cli to override existing `ENV` variables. +```shell +$ dotenv -o -f ".env.local,.env" +``` + +## Contributing + +If you want a better idea of how dotenv works, check out the [Ruby Rogues Code Reading of dotenv](https://www.youtube.com/watch?v=lKmY_0uY86s). + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Added some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/bin/dotenv b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/bin/dotenv new file mode 100644 index 0000000..74db937 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/bin/dotenv @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby + +require "dotenv/cli" +Dotenv::CLI.new(ARGV).run diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv.rb new file mode 100644 index 0000000..18ddeeb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv.rb @@ -0,0 +1,86 @@ +require "dotenv/parser" +require "dotenv/environment" +require "dotenv/missing_keys" + +# The top level Dotenv module. The entrypoint for the application logic. +module Dotenv + class << self + attr_accessor :instrumenter + end + + module_function + + def load(*filenames) + with(*filenames) do |f| + ignoring_nonexistent_files do + env = Environment.new(f, true) + instrument("dotenv.load", env: env) { env.apply } + end + end + end + + # same as `load`, but raises Errno::ENOENT if any files don't exist + def load!(*filenames) + with(*filenames) do |f| + env = Environment.new(f, true) + instrument("dotenv.load", env: env) { env.apply } + end + end + + # same as `load`, but will override existing values in `ENV` + def overload(*filenames) + with(*filenames) do |f| + ignoring_nonexistent_files do + env = Environment.new(f, false) + instrument("dotenv.overload", env: env) { env.apply! } + end + end + end + + # same as `overload`, but raises Errno::ENOENT if any files don't exist + def overload!(*filenames) + with(*filenames) do |f| + env = Environment.new(f, false) + instrument("dotenv.overload", env: env) { env.apply! } + end + end + + # returns a hash of parsed key/value pairs but does not modify ENV + def parse(*filenames) + with(*filenames) do |f| + ignoring_nonexistent_files do + Environment.new(f, false) + end + end + end + + # Internal: Helper to expand list of filenames. + # + # Returns a hash of all the loaded environment variables. + def with(*filenames) + filenames << ".env" if filenames.empty? + + filenames.reduce({}) do |hash, filename| + hash.merge!(yield(File.expand_path(filename)) || {}) + end + end + + def instrument(name, payload = {}, &block) + if instrumenter + instrumenter.instrument(name, payload, &block) + else + yield + end + end + + def require_keys(*keys) + missing_keys = keys.flatten - ::ENV.keys + return if missing_keys.empty? + raise MissingKeys, missing_keys + end + + def ignoring_nonexistent_files + yield + rescue Errno::ENOENT + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/cli.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/cli.rb new file mode 100644 index 0000000..1a586a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/cli.rb @@ -0,0 +1,57 @@ +require "dotenv" +require "dotenv/version" +require "dotenv/template" +require "optparse" + +module Dotenv + # The command line interface + class CLI < OptionParser + attr_reader :argv, :filenames, :overload + + def initialize(argv = []) + @argv = argv.dup + @filenames = [] + @overload = false + + super "Usage: dotenv [options]" + separator "" + + on("-f FILES", Array, "List of env files to parse") do |list| + @filenames = list + end + + on("-o", "--overload", "override existing ENV variables") do + @overload = true + end + + on("-h", "--help", "Display help") do + puts self + exit + end + + on("-v", "--version", "Show version") do + puts "dotenv #{Dotenv::VERSION}" + exit + end + + on("-t", "--template=FILE", "Create a template env file") do |file| + template = Dotenv::EnvTemplate.new(file) + template.create_template + end + + order!(@argv) + end + + def run + if @overload + Dotenv.overload!(*@filenames) + else + Dotenv.load!(*@filenames) + end + rescue Errno::ENOENT => e + abort e.message + else + exec(*@argv) unless @argv.empty? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/environment.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/environment.rb new file mode 100644 index 0000000..c7d21d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/environment.rb @@ -0,0 +1,28 @@ +module Dotenv + # This class inherits from Hash and represents the environment into which + # Dotenv will load key value pairs from a file. + class Environment < Hash + attr_reader :filename + + def initialize(filename, is_load = false) + @filename = filename + load(is_load) + end + + def load(is_load = false) + update Parser.call(read, is_load) + end + + def read + File.open(@filename, "rb:bom|utf-8", &:read) + end + + def apply + each { |k, v| ENV[k] ||= v } + end + + def apply! + each { |k, v| ENV[k] = v } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/load.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/load.rb new file mode 100644 index 0000000..7ad8fef --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/load.rb @@ -0,0 +1,2 @@ +require "dotenv" +Dotenv.load diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/missing_keys.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/missing_keys.rb new file mode 100644 index 0000000..ecedcbf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/missing_keys.rb @@ -0,0 +1,10 @@ +module Dotenv + class Error < StandardError; end + + class MissingKeys < Error # :nodoc: + def initialize(keys) + key_word = "key#{keys.size > 1 ? "s" : ""}" + super("Missing required configuration #{key_word}: #{keys.inspect}") + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/parser.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/parser.rb new file mode 100644 index 0000000..d2c8028 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/parser.rb @@ -0,0 +1,109 @@ +require "dotenv/substitutions/variable" +require "dotenv/substitutions/command" if RUBY_VERSION > "1.8.7" + +module Dotenv + class FormatError < SyntaxError; end + + # This class enables parsing of a string for key value pairs to be returned + # and stored in the Environment. It allows for variable substitutions and + # exporting of variables. + class Parser + @substitutions = + [Dotenv::Substitutions::Variable, Dotenv::Substitutions::Command] + + LINE = / + (?:^|\A) # beginning of line + \s* # leading whitespace + (?:export\s+)? # optional export + ([\w.]+) # key + (?:\s*=\s*?|:\s+?) # separator + ( # optional value begin + \s*'(?:\\'|[^'])*' # single quoted value + | # or + \s*"(?:\\"|[^"])*" # double quoted value + | # or + [^\#\r\n]+ # unquoted value + )? # value end + \s* # trailing whitespace + (?:\#.*)? # optional comment + (?:$|\z) # end of line + /x + + class << self + attr_reader :substitutions + + def call(string, is_load = false) + new(string, is_load).call + end + end + + def initialize(string, is_load = false) + @string = string + @hash = {} + @is_load = is_load + end + + def call + # Convert line breaks to same format + lines = @string.gsub(/\r\n?/, "\n") + # Process matches + lines.scan(LINE).each do |key, value| + @hash[key] = parse_value(value || "") + end + # Process non-matches + lines.gsub(LINE, "").split(/[\n\r]+/).each do |line| + parse_line(line) + end + @hash + end + + private + + def parse_line(line) + if line.split.first == "export" + if variable_not_set?(line) + raise FormatError, "Line #{line.inspect} has an unset variable" + end + end + end + + def parse_value(value) + # Remove surrounding quotes + value = value.strip.sub(/\A(['"])(.*)\1\z/m, '\2') + maybe_quote = Regexp.last_match(1) + value = unescape_value(value, maybe_quote) + perform_substitutions(value, maybe_quote) + end + + def unescape_characters(value) + value.gsub(/\\([^$])/, '\1') + end + + def expand_newlines(value) + value.gsub('\n', "\n").gsub('\r', "\r") + end + + def variable_not_set?(line) + !line.split[1..-1].all? { |var| @hash.member?(var) } + end + + def unescape_value(value, maybe_quote) + if maybe_quote == '"' + unescape_characters(expand_newlines(value)) + elsif maybe_quote.nil? + unescape_characters(value) + else + value + end + end + + def perform_substitutions(value, maybe_quote) + if maybe_quote != "'" + self.class.substitutions.each do |proc| + value = proc.call(value, @hash, @is_load) + end + end + value + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/substitutions/command.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/substitutions/command.rb new file mode 100644 index 0000000..724f87d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/substitutions/command.rb @@ -0,0 +1,41 @@ +require "English" + +module Dotenv + module Substitutions + # Substitute shell commands in a value. + # + # SHA=$(git rev-parse HEAD) + # + module Command + class << self + INTERPOLATED_SHELL_COMMAND = / + (?\\)? # is it escaped with a backslash? + \$ # literal $ + (? # collect command content for eval + \( # require opening paren + (?:[^()]|\g)+ # allow any number of non-parens, or balanced + # parens (by nesting the expression + # recursively) + \) # require closing paren + ) + /x + + def call(value, _env, _is_load) + # Process interpolated shell commands + value.gsub(INTERPOLATED_SHELL_COMMAND) do |*| + # Eliminate opening and closing parentheses + command = $LAST_MATCH_INFO[:cmd][1..-2] + + if $LAST_MATCH_INFO[:backslash] + # Command is escaped, don't replace it. + $LAST_MATCH_INFO[0][1..-1] + else + # Execute the command and return the value + `#{command}`.chomp + end + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/substitutions/variable.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/substitutions/variable.rb new file mode 100644 index 0000000..4dba441 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/substitutions/variable.rb @@ -0,0 +1,43 @@ +require "English" + +module Dotenv + module Substitutions + # Substitute variables in a value. + # + # HOST=example.com + # URL="https://$HOST" + # + module Variable + class << self + VARIABLE = / + (\\)? # is it escaped with a backslash? + (\$) # literal $ + (?!\() # shouldnt be followed by paranthesis + \{? # allow brace wrapping + ([A-Z0-9_]+)? # optional alpha nums + \}? # closing brace + /xi + + def call(value, env, is_load) + combined_env = is_load ? env.merge(ENV) : ENV.to_h.merge(env) + value.gsub(VARIABLE) do |variable| + match = $LAST_MATCH_INFO + substitute(match, variable, combined_env) + end + end + + private + + def substitute(match, variable, env) + if match[1] == "\\" + variable[1..-1] + elsif match[3] + env.fetch(match[3], "") + else + variable + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/tasks.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/tasks.rb new file mode 100644 index 0000000..a0b1288 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/tasks.rb @@ -0,0 +1,7 @@ +desc "Load environment settings from .env" +task :dotenv do + require "dotenv" + Dotenv.load +end + +task environment: :dotenv diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/template.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/template.rb new file mode 100644 index 0000000..1aa37a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/template.rb @@ -0,0 +1,26 @@ +module Dotenv + EXPORT_COMMAND = "export ".freeze + # Class for creating a template from a env file + class EnvTemplate + def initialize(env_file) + @env_file = env_file + end + + def create_template + File.open(@env_file, "r") do |env_file| + File.open("#{@env_file}.template", "w") do |env_template| + env_file.each do |line| + env_template.puts template_line(line) + end + end + end + end + + def template_line(line) + var, value = line.split("=") + template = var.gsub(EXPORT_COMMAND, "") + is_a_comment = var.strip[0].eql?("#") + value.nil? || is_a_comment ? line : "#{var}=#{template}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/version.rb b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/version.rb new file mode 100644 index 0000000..dbb95d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/dotenv-2.8.1/lib/dotenv/version.rb @@ -0,0 +1,3 @@ +module Dotenv + VERSION = "2.8.1".freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/emoji_regex-3.2.3/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/emoji_regex-3.2.3/LICENSE.md new file mode 100644 index 0000000..79066bb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/emoji_regex-3.2.3/LICENSE.md @@ -0,0 +1,9 @@ +# The MIT License (MIT) + +**Copyright Š 2020 Jessica Stokes** + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/emoji_regex-3.2.3/README.md b/vendor/bundle/ruby/2.7.0/gems/emoji_regex-3.2.3/README.md new file mode 100644 index 0000000..30fab98 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/emoji_regex-3.2.3/README.md @@ -0,0 +1,180 @@ +# Ruby Emoji Regex 💎 + +[![Gem Version](https://badge.fury.io/rb/emoji_regex.svg)](https://rubygems.org/gems/emoji_regex) [![Node & Ruby CI](https://github.com/ticky/ruby-emoji-regex/workflows/Node%20&%20Ruby%20CI/badge.svg)](https://github.com/ticky/ruby-emoji-regex/actions?query=workflow%3A%22Node+%26+Ruby+CI%22) + +A set of Ruby regular expressions for matching Unicode Emoji symbols. + +## Background + +This is based upon the fantastic work from [Mathias Bynens'](https://mathiasbynens.be/) [`emoji-regex`](https://github.com/mathiasbynens/emoji-regex) Javascript package. `emoji-regex` is cleverly assembled based upon data from the Unicode Consortium. + +The regular expressions provided herein are derived from that pacakge. + +## Installation + +```shell +gem install emoji_regex +``` + +## Usage + +`emoji_regex` provides these regular expressions: + +* `EmojiRegex::RGIEmoji` is the regex you most likely want. It matches all emoji recommended for general interchange, as defined by [the Unicode standard's `RGI_Emoji` property](https://unicode.org/reports/tr51/#def_rgi_set). In a future version, this regular expression will be renamed to `EmojiRegex::Regex` and all other regexes removed. + +* `EmojiRegex::Regex` is deprecated, and will be replaced with `RGIEmoji` in a future major version. It matches emoji which present as emoji by default, and those which present as emoji when combined with `U+FE0F VARIATION SELECTOR-16`. + +* `EmojiRegex::Text` is deprecated, and will be removed in a future major version. It matches emoji which present as text by default (regardless of variation selector), as well as those which present as emoji by default. + +### RGI vs Emoji vs Text Presentation + +`RGI_Emoji` is a property of emoji symbols, defined in [Unicode Technical Report #51](https://unicode.org/reports/tr51/#def_rgi_set) which marks emoji as being supported by major vendors and therefore expected to be usable generally. In most cases, this is the property you will want when seeking emoji characters. + +`Emoji_Presentation` is another such property, [defined in UTR#51](http://unicode.org/reports/tr51/#Emoji_Properties_and_Data_Files) which controls whether symbols are intended to be rendered as emoji by default. + +Generally, for emoji which re-use Unicode code points which existed before Emoji itself was introduced to Unicode, `Emoji_Presentation` is `false`. `Emoji_Presentation` may be `true` but `RGI_Emoji` false for characters with non-standard emoji-like representations in certain conditions. Notable cases are the Emoji Keycap Sequences (#ī¸âƒŖ, 1ī¸âƒŖ, 9ī¸âƒŖ, *ī¸âƒŖ, etc.) which are sequences composed of three characters; the base character, an `U+FE0F VARIATION SELECTOR-16`, and finally the `U+20E3 COMBINING ENCLOSING KEYCAP`. + +These characters, therefore, are matched to varying degrees of precision by each of the regular expressions included in this package; + +- `#` is matched only by `EmojiRegex::Text` as it is considered to be a text part of a possible emoji. +- `#ī¸` is matched by `EmojiRegex::Regex` as well as `EmojiRegex::Text` as it has `Emoji_Presentation` despite not being a generally accepted Emoji or recommended for general interchange. +- `#ī¸âƒŖ` is matched by all three regular expressions, as it is recommended for general interchange. + +It's most likely that the regular expression you want is `EmojiRegex::RGIEmoji`! â˜ēī¸ + +### Example + +```ruby +require 'emoji_regex' + +text = <Home page:] http://__.rubyforge.org/ +[Project site:] http://rubyforge.org/projects/__ +[Gem install:] gem install escape +[Wiki:] http://wiki.qualitysmith.com/__ +[Author:] Your name +[Copyright:] 2007 QualitySmith, Inc. +[License:] {GNU General Public License}[http://www.gnu.org/copyleft/gpl.html] + +== Introduction + +... + +== Installation + +... + +== Usage + +... diff --git a/vendor/bundle/ruby/2.7.0/gems/escape-0.0.4/doc_include/template/qualitysmith.rb b/vendor/bundle/ruby/2.7.0/gems/escape-0.0.4/doc_include/template/qualitysmith.rb new file mode 100644 index 0000000..0f53b9b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/escape-0.0.4/doc_include/template/qualitysmith.rb @@ -0,0 +1,631 @@ +module RDoc +module Page + +FONTS = "\"Bitstream Vera Sans\", Verdana, Arial, Helvetica, sans-serif" + +STYLE = < pre { + padding: 0.5em; + border: 1px dotted black; + background: #FFE; +} + +CSS + +XHTML_PREAMBLE = %{ + +} + +HEADER = XHTML_PREAMBLE + < + + %title% + + + + + + + +ENDHEADER + +FILE_PAGE = < + + + + +
File
%short_name%
+ + + + + + + + + +
Path:%full_path% +IF:cvsurl +  (CVS) +ENDIF:cvsurl +
Modified:%dtm_modified%
+
+ +
+HTML + +################################################################### + +CLASS_PAGE = < + %classmod%
%full_name% + + + + + + +IF:parent + + + + +ENDIF:parent +
In: +START:infiles +HREF:full_path_url:full_path: +IF:cvsurl + (CVS) +ENDIF:cvsurl +END:infiles +
Parent: +IF:par_url + +ENDIF:par_url +%parent% +IF:par_url + +ENDIF:par_url +
+ + + +HTML + +################################################################### + +METHOD_LIST = < +IF:diagram +
+ %diagram% +
+ENDIF:diagram + +IF:description +
%description%
+ENDIF:description + +IF:requires +
Required Files
+
    +START:requires +
  • HREF:aref:name:
  • +END:requires +
+ENDIF:requires + +IF:toc +
Contents
+ +ENDIF:toc + +IF:methods +
Methods
+
    +START:methods +
  • HREF:aref:name:
  • +END:methods +
+ENDIF:methods + +IF:includes +
Included Modules
+
    +START:includes +
  • HREF:aref:name:
  • +END:includes +
+ENDIF:includes + +START:sections +IF:sectitle + +IF:seccomment +
+%seccomment% +
+ENDIF:seccomment +ENDIF:sectitle + +IF:classlist +
Classes and Modules
+ %classlist% +ENDIF:classlist + +IF:constants +
Constants
+ +START:constants + + + + + +IF:desc + + + + +ENDIF:desc +END:constants +
%name%=%value%
 %desc%
+ENDIF:constants + +IF:attributes +
Attributes
+ +START:attributes + + + + + +END:attributes +
+IF:rw +[%rw%] +ENDIF:rw + %name%%a_desc%
+ENDIF:attributes + +IF:method_list +START:method_list +IF:methods +
%type% %category% methods
+START:methods +
+
+IF:callseq + %callseq% +ENDIF:callseq +IFNOT:callseq + %name%%params% +ENDIF:callseq +IF:codeurl +[ source ] +ENDIF:codeurl +
+IF:m_desc +
+ %m_desc% +
+ENDIF:m_desc +IF:aka +
+ This method is also aliased as +START:aka + %name% +END:aka +
+ENDIF:aka +IF:sourcecode +
+ +
+
+%sourcecode%
+
+
+
+ENDIF:sourcecode +
+END:methods +ENDIF:methods +END:method_list +ENDIF:method_list +END:sections + +HTML + +FOOTER = < + +ENDFOOTER + +BODY = HEADER + < + +
+ #{METHOD_LIST} +
+ + #{FOOTER} +ENDBODY + +########################## Source code ########################## + +SRC_PAGE = XHTML_PREAMBLE + < +%title% + + + + +
%code%
+ + +HTML + +########################## Index ################################ + +FR_INDEX_BODY = < + + + + + + + + + + +
+START:entries +%name% +END:entries +
+ +HTML + +CLASS_INDEX = FILE_INDEX +METHOD_INDEX = FILE_INDEX + +INDEX = XHTML_PREAMBLE + < + + %title% + + + + + + + + + + +IF:inline_source + +ENDIF:inline_source +IFNOT:inline_source + + + + +ENDIF:inline_source + + <body bgcolor="white"> + Click <a href="html/index.html">here</a> for a non-frames + version of this page. + </body> + + + + +HTML + +end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/escape-0.0.4/lib/escape.rb b/vendor/bundle/ruby/2.7.0/gems/escape-0.0.4/lib/escape.rb new file mode 100644 index 0000000..8999669 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/escape-0.0.4/lib/escape.rb @@ -0,0 +1,247 @@ +# escape.rb - escape/unescape library for several formats +# +# Copyright (C) 2006,2007 Tanaka Akira +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# 3. The name of the author may not be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY +# OF SUCH DAMAGE. + +# Escape module provides several escape functions. +# * URI +# * HTML +# * shell command +module Escape + module_function + + # Escape.shell_command composes + # a sequence of words to + # a single shell command line. + # All shell meta characters are quoted and + # the words are concatenated with interleaving space. + # + # Escape.shell_command(["ls", "/"]) #=> "ls /" + # Escape.shell_command(["echo", "*"]) #=> "echo '*'" + # + # Note that system(*command) and + # system(Escape.shell_command(command)) is roughly same. + # There are two exception as follows. + # * The first is that the later may invokes /bin/sh. + # * The second is an interpretation of an array with only one element: + # the element is parsed by the shell with the former but + # it is recognized as single word with the later. + # For example, system(*["echo foo"]) invokes echo command with an argument "foo". + # But system(Escape.shell_command(["echo foo"])) invokes "echo foo" command without arguments (and it probably fails). + def shell_command(command) + command.map {|word| shell_single_word(word) }.join(' ') + end + + # Escape.shell_single_word quotes shell meta characters. + # + # The result string is always single shell word, even if + # the argument is "". + # Escape.shell_single_word("") returns "''". + # + # Escape.shell_single_word("") #=> "''" + # Escape.shell_single_word("foo") #=> "foo" + # Escape.shell_single_word("*") #=> "'*'" + def shell_single_word(str) + if str.empty? + "''" + elsif %r{\A[0-9A-Za-z+,./:=@_-]+\z} =~ str + str + else + result = '' + str.scan(/('+)|[^']+/) { + if $1 + result << %q{\'} * $1.length + else + result << "'#{$&}'" + end + } + result + end + end + + # Escape.uri_segment escapes URI segment using percent-encoding. + # + # Escape.uri_segment("a/b") #=> "a%2Fb" + # + # The segment is "/"-splitted element after authority before query in URI, as follows. + # + # scheme://authority/segment1/segment2/.../segmentN?query#fragment + # + # See RFC 3986 for details of URI. + def uri_segment(str) + # pchar - pct-encoded = unreserved / sub-delims / ":" / "@" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + str.gsub(%r{[^A-Za-z0-9\-._~!$&'()*+,;=:@]}n) { + '%' + $&.unpack("H2")[0].upcase + } + end + + # Escape.uri_path escapes URI path using percent-encoding. + # The given path should be a sequence of (non-escaped) segments separated by "/". + # The segments cannot contains "/". + # + # Escape.uri_path("a/b/c") #=> "a/b/c" + # Escape.uri_path("a?b/c?d/e?f") #=> "a%3Fb/c%3Fd/e%3Ff" + # + # The path is the part after authority before query in URI, as follows. + # + # scheme://authority/path#fragment + # + # See RFC 3986 for details of URI. + # + # Note that this function is not appropriate to convert OS path to URI. + def uri_path(str) + str.gsub(%r{[^/]+}n) { uri_segment($&) } + end + + # :stopdoc: + def html_form_fast(pairs, sep=';') + pairs.map {|k, v| + # query-chars - pct-encoded - x-www-form-urlencoded-delimiters = + # unreserved / "!" / "$" / "'" / "(" / ")" / "*" / "," / ":" / "@" / "/" / "?" + # query-char - pct-encoded = unreserved / sub-delims / ":" / "@" / "/" / "?" + # query-char = pchar / "/" / "?" = unreserved / pct-encoded / sub-delims / ":" / "@" / "/" / "?" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + # x-www-form-urlencoded-delimiters = "&" / "+" / ";" / "=" + k = k.gsub(%r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n) { + '%' + $&.unpack("H2")[0].upcase + } + v = v.gsub(%r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n) { + '%' + $&.unpack("H2")[0].upcase + } + "#{k}=#{v}" + }.join(sep) + end + # :startdoc: + + # Escape.html_form composes HTML form key-value pairs as a x-www-form-urlencoded encoded string. + # + # Escape.html_form takes an array of pair of strings or + # an hash from string to string. + # + # Escape.html_form([["a","b"], ["c","d"]]) #=> "a=b&c=d" + # Escape.html_form({"a"=>"b", "c"=>"d"}) #=> "a=b&c=d" + # + # In the array form, it is possible to use same key more than once. + # (It is required for a HTML form which contains + # checkboxes and select element with multiple attribute.) + # + # Escape.html_form([["k","1"], ["k","2"]]) #=> "k=1&k=2" + # + # If the strings contains characters which must be escaped in x-www-form-urlencoded, + # they are escaped using %-encoding. + # + # Escape.html_form([["k=","&;="]]) #=> "k%3D=%26%3B%3D" + # + # The separator can be specified by the optional second argument. + # + # Escape.html_form([["a","b"], ["c","d"]], ";") #=> "a=b;c=d" + # + # See HTML 4.01 for details. + def html_form(pairs, sep='&') + r = '' + first = true + pairs.each {|k, v| + # query-chars - pct-encoded - x-www-form-urlencoded-delimiters = + # unreserved / "!" / "$" / "'" / "(" / ")" / "*" / "," / ":" / "@" / "/" / "?" + # query-char - pct-encoded = unreserved / sub-delims / ":" / "@" / "/" / "?" + # query-char = pchar / "/" / "?" = unreserved / pct-encoded / sub-delims / ":" / "@" / "/" / "?" + # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + # x-www-form-urlencoded-delimiters = "&" / "+" / ";" / "=" + r << sep if !first + first = false + k.each_byte {|byte| + ch = byte.chr + if %r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n =~ ch + r << "%" << ch.unpack("H2")[0].upcase + else + r << ch + end + } + r << '=' + v.each_byte {|byte| + ch = byte.chr + if %r{[^0-9A-Za-z\-\._~:/?@!\$'()*,]}n =~ ch + r << "%" << ch.unpack("H2")[0].upcase + else + r << ch + end + } + } + r + end + + # :stopdoc: + HTML_TEXT_ESCAPE_HASH = { + '&' => '&', + '<' => '<', + '>' => '>', + } + # :startdoc: + + # Escape.html_text escapes a string appropriate for HTML text using character references. + # + # It escapes 3 characters: + # * '&' to '&' + # * '<' to '<' + # * '>' to '>' + # + # Escape.html_text("abc") #=> "abc" + # Escape.html_text("a & b < c > d") #=> "a & b < c > d" + # + # This function is not appropriate for escaping HTML element attribute + # because quotes are not escaped. + def html_text(str) + str.gsub(/[&<>]/) {|ch| HTML_TEXT_ESCAPE_HASH[ch] } + end + + # :stopdoc: + HTML_ATTR_ESCAPE_HASH = { + '&' => '&', + '<' => '<', + '>' => '>', + '"' => '"', + } + # :startdoc: + + # Escape.html_attr encodes a string as a double-quoted HTML attribute using character references. + # + # Escape.html_attr("abc") #=> "\"abc\"" + # Escape.html_attr("a&b") #=> "\"a&b\"" + # Escape.html_attr("ab&<>\"c") #=> "\"ab&<>"c\"" + # Escape.html_attr("a'c") #=> "\"a'c\"" + # + # It escapes 4 characters: + # * '&' to '&' + # * '<' to '<' + # * '>' to '>' + # * '"' to '"' + # + def html_attr(str) + '"' + str.gsub(/[&<>"]/) {|ch| HTML_ATTR_ESCAPE_HASH[ch] } + '"' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.github/workflows/ruby.yml b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.github/workflows/ruby.yml new file mode 100644 index 0000000..d35c188 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.github/workflows/ruby.yml @@ -0,0 +1,41 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. +# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake +# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby + +name: Ruby + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + test: + runs-on: ${{ matrix.os }}-latest + strategy: + fail-fast: false + matrix: + os: [ubuntu, macos] + ruby-version: [2.5, 2.6, 2.7, 3.0, head, debug, truffleruby] + continue-on-error: ${{ endsWith(matrix.ruby, 'head') || matrix.ruby == 'debug' }} + steps: + - uses: actions/checkout@v2 + - name: Install libcurl header + run: | + if ${{ matrix.os == 'macos' }} + then + brew install curl + else + sudo apt update && sudo apt install -y --no-install-recommends libcurl4-openssl-dev + fi + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby-version }} + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + - name: Run tests + run: bundle exec rake diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.gitignore b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.gitignore new file mode 100644 index 0000000..75a08dc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.gitignore @@ -0,0 +1,8 @@ +*.gem +.bundle +Gemfile.lock +.DS_Store +.yardoc +doc +coverage +.idea diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.rspec b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.rspec new file mode 100644 index 0000000..7d8b8b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/.rspec @@ -0,0 +1,3 @@ +--tty +--color +--format documentation diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/CHANGELOG.md new file mode 100644 index 0000000..8059799 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/CHANGELOG.md @@ -0,0 +1,375 @@ +# Changelog + +## Master + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.15.0...master) + +* Added `redirect_url` value to available informations and `Easy::Mirror`. + ([Adrien Rey-Jarthon](https://github.com/jarthod) + +## 0.15.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.14.0...v0.15.0) + +## 0.12.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.11.0...v0.12.0) + +- Removed deprecated `CURLE_SSL_CACERT` pinned in curl v7.62.0 ([@arku](https://github.com/arku) in [#158](https://github.com/typhoeus/ethon/pull/158)) + + +## 0.11.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.10.1...v0.11.0) + +## 0.10.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.10.0...v0.10.1) + +## 0.10.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.9.1...v0.10.0) + +## 0.9.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.9.0...v0.9.1) + +## 0.9.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.8.1...v0.9.0) + +## 0.8.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.8.0...v0.8.1) + +* Support optional escaping of params. + ([Tasos Laskos](https://github.com/zapotek) +* `Easy::Mirror`: Reduced object allocations and method calls during info handling. + ([Tasos Laskos](https://github.com/zapotek) + +## 0.8.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.3...v0.7.4) + +* `Easy::Mirror`: Reduced object allocations and method calls during info handling. + ([Tasos Laskos](https://github.com/zapotek) + +## 0.7.4 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.3...v0.7.4) + +* Support different array encodings for params. + ([Marcello Barnaba](https://github.com/ifad), [\#104](https://github.com/typhoeus/ethon/pull/104)) +* Programtic access to version infos. + ([Jonas Wagner](https://github.com/jwagner), [\#90](https://github.com/typhoeus/ethon/pull/90)) + + +## 0.7.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.2...v0.7.3) + +* `Ethon::Curl::FDSet` + * Set `:fd_array` size to the current MS Windows `FD_SETSIZE` (2048). + ([Tasos Laskos](https://github.com/zapotek) + +* Added `redirect_time` value to available informations and `Easy::Mirror`. + ([Adrien Jarthon](https://github.com/jarthod) + +## 0.7.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.7.1...v0.7.2) + +* FFI data-types updated to be more correct. + +## 0.7.1 + +* MS Windows determination delegated to `Gem.windows?` for better accuracy. +* FFI data-types updated to work on MS Windows. + +## 0.7.0 + +Not backwards compatible changes: + +* `mime-types` are no longer a dependency. The gem will be still used if available to determine the mime type of a file which is uploaded. That means you have to have take care of the gem installation yourself. + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.3...v0.7.0) + +## 0.6.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.2...v0.6.3) + +## 0.6.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.1...v0.6.2) + +## 0.6.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.6.0...v0.6.1) + +The changelog entries are coming soon! + +## 0.6.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.12...v0.6.0) + +The changelog entries are coming soon! + +Bugfixes: + + * URL-encode nullbytes in parameters instead of escaping them to `\\0`. + ([Tasos Laskos](https://github.com/zapotek) + +## 0.5.12 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.11...v0.5.12) + +Enhancements: + +* Performance optimizations. + ([Kyle Oppenheim](https://github.com/koppenheim) and [Richie Vos](https://github.com/richievos), [\#48](https://github.com/typhoeus/ethon/pull/48)) +* Reuse memory pointer. + ([Richie Vos](https://github.com/richievos), [\#49](https://github.com/typhoeus/ethon/pull/49)) + +Bugfixes: + +* Fix windows install. + ([Derik Olsson](https://github.com/derikolsson), [\#47](https://github.com/typhoeus/ethon/pull/47)) +* Handle urls that already contain query params. + ([Turner King](https://github.com/turnerking ), [\#45](https://github.com/typhoeus/ethon/pull/45)) + +## 0.5.11 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.10...v0.5.11) + +Enhancements: + +* Add support for postredirs, unrestricted_auth. +* Add support for cookie, cookiejar, cookiefile. + ([erwanlr](https://github.com/erwanlr), [\#46](https://github.com/typhoeus/ethon/pull/46)) +* Relax ffi requirements. + ([voxik](https://github.com/voxik), [\#40](https://github.com/typhoeus/ethon/pull/40)) +* Various documentation improvements. + ([Craig Little](https://github.com/craiglittle)) + +Bugfixes: + +* Fix the memory leaks. + ([Richie Vos](https://github.com/richievos), [\#45](https://github.com/typhoeus/ethon/pull/45)) + +## 0.5.10 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.9...v0.5.10) + +Enhancements: + +* Allow custom requests. + ([Nathan Sutton](https://github.com/nate), [\#36](https://github.com/typhoeus/ethon/pull/36)) +* Use updated version of FFI. + +Bugfixes: + +* Fix windows install issue. + ([brainsucker](https://github.com/brainsucker), [\#38](https://github.com/typhoeus/ethon/pull/38)) + +## 0.5.9 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.8...v0.5.9) + +Enhancements: + +* Allow to set multiple protocols. + +## 0.5.8 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.7...v0.5.8) + +Enhancements: + +* Add support for protocols and redir_protocols( + [libcurl SASL buffer overflow vulnerability](http://curl.haxx.se/docs/adv_20130206.html)). +* Add max_send_speed_large and max_recv_speed_large([Paul Schuegraf](https://github.com/pschuegr), [\#33](https://github.com/typhoeus/ethon/pull/33)) + +## 0.5.7 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.6...v0.5.7) + +Enhancements: + +* Use new version of ffi. + +## 0.5.6 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.4...v0.5.6) + +Bugfixes: + +* Easy#reset resets on_complete callbacks. + +## 0.5.4 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.3...v0.5.4) + +Enhancements: + +* Use Libc#getdtablesize to get the FDSet size. +* New libcurl option accept_encoding. +* Documentation updates. + +## 0.5.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.2...v0.5.3) + +Enhancements: + +* Deprecate Easy#prepare. It is no longer necessary. +* Unroll metaprogramming for easy and multi options. +* More specs. + +Bugfixes: + +* Correct size for FDSets +* Add proxytypes to enums. + +## 0.5.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.1...v0.5.2) + +Enhancements: + +* New libcurl option keypasswd. + +Bugfixes: + +* Correct request logging when using multi interface. +* Remove invalid libcurl option sslcertpasswd. + +## 0.5.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.5.0...v0.5.1) + +Bugfixes: + +* Mark Curl.select and Curl.easy_perform as blocking so that the GIL is + released by ffi. + +## 0.5.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.4...v0.5.0) + +Enhancements: + +* New libcurl option proxyuserpwd +* Rename response_header to response_headers + +Bugfixes: + +* Mark Curl.select and Curl.easy_perform as blocking so that the GIL is + released by ffi. + +## 0.4.4 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.3...v0.4.4) + +Enhancements: + +* Prepare multi explicit like easy + +## 0.4.3 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.2...v0.4.3) + +Enhancements: + +* Remove deprecated libcurl option put +* More documentation +* New libcurl option connecttimeout_ms and timeout_ms +* Support multi options + +Bugfixes: + +* Handle nil values in query params + +## 0.4.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.1...v0.4.2) + +Enhancements: + +* New libcurl option forbid_reuse +* Use libcurls escape instead of CGI::escape + +## 0.4.1 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.4.0...v0.4.1) + +Bugfixes: + +* Handle nested hash in an array in params correct + ( [\#201](https://github.com/typhoeus/typhoeus/issues/201) ) + +## 0.4.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.3.0...v0.4.0) + +Enhancements: + +* ruby 1.8.7 compatible +* Ethon.logger +* Deal with string param/body +* More documentation + +Bugfixes: + +* Add multi_cleanup to curl + +## 0.3.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.2.0...v0.3.0) + +Enhancements: + +* New libcurl option proxyport +* Raise invalid value error when providing a wrong key for sslversion or httpauth + +Bugfixes: + +* Libcurl option sslversion is handled correct + +## 0.2.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.1.0...v0.2.0) + +Enhancements: + +* GET requests are using custom requests only when there is a request body +* Easy#on_complete takes multiple callbacks +* raise Errors::GlobalInit when libcurls global_init failed instead of + runtime error +* raise Errors::InvalidOption if option is invalid + +## 0.1.0 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.0.2...v0.1.0) + +Enhancements: + +* Documentation + ( [Alex P](https://github.com/ifesdjeen), [\#13](https://github.com/typhoeus/ethon/issues/13) ) +* New libcurl option dns_cache_timeout + ( [Chris Heald](https://github.com/cheald), [\#192](https://github.com/typhoeus/typhoeus/pull/192) ) + +Bugfixes: + +* Libcurl option ssl_verifyhost takes an integer. +* Add space between header key and value. + +## 0.0.2 + +[Full Changelog](https://github.com/typhoeus/ethon/compare/v0.0.1...v0.0.2) + +Bugfixes: + +* Add libcurl.so.4 to ffi_lib in order to load correct lib on Debian. +* Escape zero bytes. + +## 0.0.1 Initial version diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Gemfile b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Gemfile new file mode 100644 index 0000000..a2f41e1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Gemfile @@ -0,0 +1,43 @@ +# frozen_string_literal: true +source "https://rubygems.org" +gemspec + +if Gem.ruby_version < Gem::Version.new("1.9.3") + gem "rake", "< 11" +else + gem "rake" +end + +group :development, :test do + gem "rspec", "~> 3.4" + + gem "sinatra" + + if Gem.ruby_version < Gem::Version.new("2.0.0") + gem "json", "< 2" + else + gem "json" + end + + if Gem.ruby_version >= Gem::Version.new("2.0.0") + gem "mime-types", "~> 1.18" + end + + if Gem.ruby_version >= Gem::Version.new("2.2.0") + gem "mustermann" + elsif Gem.ruby_version >= Gem::Version.new("2.1.0") + gem "mustermann", "0.4.0" + elsif Gem.ruby_version >= Gem::Version.new("2.0.0") + gem "mustermann", "0.3.1" + end + + if Gem.ruby_version >= Gem::Version.new("3.0.0") + gem "webrick" + end +end + +group :perf do + gem "benchmark-ips" + gem "patron" + gem "curb" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Guardfile b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Guardfile new file mode 100644 index 0000000..37d79f0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Guardfile @@ -0,0 +1,10 @@ +# frozen_string_literal: true +# vim:set filetype=ruby: +guard( + "rspec", + :all_after_pass => false, + :cli => "--fail-fast --tty --format documentation --colour") do + + watch(%r{^spec/.+_spec\.rb$}) + watch(%r{^lib/(.+)\.rb$}) { |match| "spec/#{match[1]}_spec.rb" } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/LICENSE b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/LICENSE new file mode 100644 index 0000000..f064c17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 Hans Hasselberg + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/README.md b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/README.md new file mode 100644 index 0000000..9f9f567 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/README.md @@ -0,0 +1,118 @@ +[![Gem Version](https://badge.fury.io/rb/ethon.svg)](https://badge.fury.io/rb/ethon) +[![Build Status](https://github.com/typhoeus/ethon/workflows/Ruby/badge.svg)](https://github.com/typhoeus/ethon/actions/workflows/ruby.yml) + +# Ethon + +In Greek mythology, Ethon, the son of Typhoeus and Echidna, is a gigantic eagle. So much for the history. +In the modern world, Ethon is a very basic libcurl wrapper using ffi. + +* [Documentation](http://rubydoc.info/github/typhoeus/ethon/frames/Ethon) +* [Website](http://typhoeus.github.com/) +* [Mailing list](http://groups.google.com/group/typhoeus) + +## Installation + +With bundler: + + gem "ethon" + +With rubygems: + + gem install ethon + +## Usage + +Making the first request is simple: + +```ruby +easy = Ethon::Easy.new(url: "www.example.com") +easy.perform +#=> :ok +``` + +You have access to various options, such as following redirects: + +```ruby +easy = Ethon::Easy.new(url: "www.example.com", followlocation: true) +easy.perform +#=> :ok +``` + +Once you're done you can inspect the response code and body: + +```ruby +easy = Ethon::Easy.new(url: "www.example.com", followlocation: true) +easy.perform +easy.response_code +#=> 200 +easy.response_body +#=> " :ok +``` + +```ruby +easy = Ethon::Easy.new +easy.http_request("www.example.com", :post, { params: { a: 1 }, body: { b: 2 } }) +easy.perform +#=> :ok +``` + +This is really handy when making requests since you don't have to care about setting +everything up correctly. + +## Http2 +Standard http2 servers require the client to connect once and create a session (multi) and then add simple requests to the multi handler. +The `perform` method then takes all the requests in the multi handler and sends them to the server. + +See the following example +```ruby +multi = Ethon::Multi.new +easy = Ethon::Easy.new + +easy.http_request("www.example.com/get", :get, { http_version: :httpv2_0 }) + +# Sending a request with http version 2 will send an Upgrade header to the server, which many older servers will not support +# See below for more info: https://everything.curl.dev/http/http2 +# If this is a problem, send the below: +easy.http_request("www.example.com/get", :get, { http_version: :httpv2_prior_knowledge }) + +# To set the server to use http2 with https and http1 with http, send the following: +easy.http_request("www.example.com/get", :get, { http_version: :httpv2_tls } + +multi.add(easy) +multi.perform +``` + +## LICENSE + +(The MIT License) + +Copyright Š 2012-2016 [Hans Hasselberg](http://www.hans.io) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without +limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons +to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Rakefile b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Rakefile new file mode 100644 index 0000000..a5bc61b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/Rakefile @@ -0,0 +1,40 @@ +# frozen_string_literal: true +require "bundler" +Bundler.setup + +require "rake" +require "rspec/core/rake_task" +$LOAD_PATH.unshift File.expand_path("../lib", __FILE__) +require "ethon/version" + +task :gem => :build +task :build do + system "gem build ethon.gemspec" +end + +task :install => :build do + system "gem install ethon-#{Ethon::VERSION}.gem" +end + +task :release => :build do + system "git tag -a v#{Ethon::VERSION} -m 'Tagging #{Ethon::VERSION}'" + system "git push --tags" + system "gem push ethon-#{Ethon::VERSION}.gem" +end + +RSpec::Core::RakeTask.new(:spec) do |t| + t.verbose = false + t.ruby_opts = "-W -I./spec -rspec_helper" +end + +desc "Start up the test servers" +task :start do + require_relative 'spec/support/boot' + begin + Boot.start_servers(:rake) + rescue Exception + end +end + +task :default => :spec + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/ethon.gemspec b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/ethon.gemspec new file mode 100644 index 0000000..561330b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/ethon.gemspec @@ -0,0 +1,26 @@ +# encoding: utf-8 +# frozen_string_literal: true +lib = File.expand_path('../lib/', __FILE__) +$:.unshift lib unless $:.include?(lib) + +require 'ethon/version' + +Gem::Specification.new do |s| + s.name = "ethon" + s.version = Ethon::VERSION + s.platform = Gem::Platform::RUBY + s.authors = ["Hans Hasselberg"] + s.email = ["me@hans.io"] + s.homepage = "https://github.com/typhoeus/ethon" + s.summary = "Libcurl wrapper." + s.description = "Very lightweight libcurl wrapper." + + s.required_rubygems_version = ">= 1.3.6" + s.license = 'MIT' + + s.add_dependency('ffi', ['>= 1.15.0']) + + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- spec/*`.split("\n") + s.require_path = 'lib' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon.rb new file mode 100644 index 0000000..461b4c3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true +require 'logger' +require 'ffi' +require 'thread' +begin + require 'mime/types/columnar' +rescue LoadError + begin + require 'mime/types' + rescue LoadError + end +end +require 'tempfile' + +require 'ethon/libc' +require 'ethon/curl' +require 'ethon/easy' +require 'ethon/errors' +require 'ethon/loggable' +require 'ethon/multi' +require 'ethon/version' + +# Ethon is a very simple libcurl. +# It provides direct access to libcurl functionality +# as well as some helpers for doing http requests. +# +# Ethon was extracted from Typhoeus. If you want to +# see how others use Ethon look at the Typhoeus code. +# +# @see https://www.github.com/typhoeus/typhoeus Typhoeus +# +# @note Please update to the latest libcurl version in order +# to benefit from all features and bugfixes. +# http://curl.haxx.se/download.html +module Ethon +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curl.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curl.rb new file mode 100644 index 0000000..f9194c7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curl.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true +require 'ethon/curls/codes' +require 'ethon/curls/options' +require 'ethon/curls/infos' +require 'ethon/curls/form_options' +require 'ethon/curls/messages' +require 'ethon/curls/functions' + +module Ethon + + # FFI Wrapper module for Curl. Holds constants and required initializers. + # + # @api private + module Curl + extend ::FFI::Library + extend Ethon::Curls::Codes + extend Ethon::Curls::Options + extend Ethon::Curls::Infos + extend Ethon::Curls::FormOptions + extend Ethon::Curls::Messages + + # :nodoc: + def self.windows? + Libc.windows? + end + + require 'ethon/curls/constants' + require 'ethon/curls/settings' + require 'ethon/curls/classes' + extend Ethon::Curls::Functions + + @blocking = true + + @@initialized = false + @@curl_mutex = Mutex.new + + class << self + # This function sets up the program environment that libcurl needs. + # Think of it as an extension of the library loader. + # + # This function must be called at least once within a program (a program is all the + # code that shares a memory space) before the program calls any other function in libcurl. + # The environment it sets up is constant for the life of the program and is the same for + # every program, so multiple calls have the same effect as one call. + # + # The flags option is a bit pattern that tells libcurl exactly what features to init, + # as described below. Set the desired bits by ORing the values together. In normal + # operation, you must specify CURL_GLOBAL_ALL. Don't use any other value unless + # you are familiar with it and mean to control internal operations of libcurl. + # + # This function is not thread safe. You must not call it when any other thread in + # the program (i.e. a thread sharing the same memory) is running. This doesn't just + # mean no other thread that is using libcurl. Because curl_global_init() calls + # functions of other libraries that are similarly thread unsafe, it could conflict with + # any other thread that uses these other libraries. + # + # @raise [ Ethon::Errors::GlobalInit ] If Curl.global_init fails. + def init + @@curl_mutex.synchronize { + if not @@initialized + raise Errors::GlobalInit.new if Curl.global_init(GLOBAL_ALL) != 0 + @@initialized = true + Ethon.logger.debug("ETHON: Libcurl initialized") if Ethon.logger + end + } + end + + # This function releases resources acquired by curl_global_init. + # You should call curl_global_cleanup once for each call you make to + # curl_global_init, after you are done using libcurl. + # This function is not thread safe. You must not call it when any other thread in the + # program (i.e. a thread sharing the same memory) is running. This doesn't just + # mean no other thread that is using libcurl. Because curl_global_cleanup calls functions of other + # libraries that are similarly thread unsafe, it could conflict with + # any other thread that uses these other libraries. + # See the description in libcurl of global environment requirements + # for details of how to use this function. + def cleanup + @@curl_mutex.synchronize { + if @@initialized + Curl.global_cleanup() + @@initialized = false + Ethon.logger.debug("ETHON: Libcurl cleanup") if Ethon.logger + end + } + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/classes.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/classes.rb new file mode 100644 index 0000000..e663ac8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/classes.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true +module Ethon + module Curl + # :nodoc: + class MsgData < ::FFI::Union + layout :whatever, :pointer, :code, :easy_code + end + + # :nodoc: + class Msg < ::FFI::Struct + layout :code, :msg_code, :easy_handle, :pointer, :data, MsgData + end + + class VersionInfoData < ::FFI::Struct + layout :curl_version, :uint8, + :version, :string, + :version_num, :int, + :host, :string, + :features, :int, + :ssl_version, :string, + :ssl_version_num, :long, + :libz_version, :string, + :protocols, :pointer + end + + # :nodoc: + class FDSet < ::FFI::Struct + if Curl.windows? + layout :fd_count, :uint, + # TODO: Make it future proof by dynamically grabbing FD_SETSIZE. + :fd_array, [:uint, 2048] + + def clear; self[:fd_count] = 0; end + else + # https://github.com/typhoeus/ethon/issues/182 + FD_SETSIZE = begin + # Allow to override the (new) default cap + if ENV['ETHON_FD_SIZE'] + ENV['ETHON_FD_SIZE'] + + # auto-detect ulimit, but cap at 2^16 + else + [::Ethon::Libc.getdtablesize, 65_536].min + end + end + + layout :fds_bits, [:long, FD_SETSIZE / ::FFI::Type::LONG.size] + + # :nodoc: + def clear; super; end + end + end + + # :nodoc: + class Timeval < ::FFI::Struct + if Curl.windows? + layout :sec, :long, + :usec, :long + else + layout :sec, :time_t, + :usec, :suseconds_t + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/codes.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/codes.rb new file mode 100644 index 0000000..4b53096 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/codes.rb @@ -0,0 +1,122 @@ +# frozen_string_literal: true +module Ethon + module Curls # :nodoc: + + # This module contains all easy and + # multi return codes. + module Codes + + # Libcurl error codes, refer + # https://github.com/bagder/curl/blob/master/include/curl/curl.h for details + def easy_codes + [ + :ok, + :unsupported_protocol, + :failed_init, + :url_malformat, + :not_built_in, + :couldnt_resolve_proxy, + :couldnt_resolve_host, + :couldnt_connect, + :ftp_weird_server_reply, + :remote_access_denied, + :ftp_accept_failed, + :ftp_weird_pass_reply, + :ftp_accept_timeout, + :ftp_weird_pasv_reply, + :ftp_weird_227_format, + :ftp_cant_get_host, + :obsolete16, + :ftp_couldnt_set_type, + :partial_file, + :ftp_couldnt_retr_file, + :obsolete20, + :quote_error, + :http_returned_error, + :write_error, + :obsolete24, + :upload_failed, + :read_error, + :out_of_memory, + :operation_timedout, + :obsolete29, + :ftp_port_failed, + :ftp_couldnt_use_rest, + :obsolete32, + :range_error, + :http_post_error, + :ssl_connect_error, + :bad_download_resume, + :file_couldnt_read_file, + :ldap_cannot_bind, + :ldap_search_failed, + :obsolete40, + :function_not_found, + :aborted_by_callback, + :bad_function_argument, + :obsolete44, + :interface_failed, + :obsolete46, + :too_many_redirects , + :unknown_option, + :telnet_option_syntax , + :obsolete50, + :peer_failed_verification, + :got_nothing, + :ssl_engine_notfound, + :ssl_engine_setfailed, + :send_error, + :recv_error, + :obsolete57, + :ssl_certproblem, + :ssl_cipher, + :bad_content_encoding, + :ldap_invalid_url, + :filesize_exceeded, + :use_ssl_failed, + :send_fail_rewind, + :ssl_engine_initfailed, + :login_denied, + :tftp_notfound, + :tftp_perm, + :remote_disk_full, + :tftp_illegal, + :tftp_unknownid, + :remote_file_exists, + :tftp_nosuchuser, + :conv_failed, + :conv_reqd, + :ssl_cacert_badfile, + :remote_file_not_found, + :ssh, + :ssl_shutdown_failed, + :again, + :ssl_crl_badfile, + :ssl_issuer_error, + :ftp_pret_failed, + :rtsp_cseq_error, + :rtsp_session_error, + :ftp_bad_file_list, + :chunk_failed, + :last + ] + end + + # Curl-Multi socket error codes, refer + # https://github.com/bagder/curl/blob/master/include/curl/multi.h for details + def multi_codes + [ + :call_multi_perform, -1, + :ok, + :bad_handle, + :bad_easy_handle, + :out_of_memory, + :internal_error, + :bad_socket, + :unknown_option, + :last + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/constants.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/constants.rb new file mode 100644 index 0000000..3c7c32d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/constants.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true +module Ethon + module Curl + # :nodoc: + VERSION_NOW = 3 + + # Flag. Initialize SSL. + GLOBAL_SSL = 0x01 + # Flag. Initialize win32 socket libraries. + GLOBAL_WIN32 = 0x02 + # Flag. Initialize everything possible. + GLOBAL_ALL = (GLOBAL_SSL | GLOBAL_WIN32) + # Flag. Initialize everything by default. + GLOBAL_DEFAULT = GLOBAL_ALL + + # :nodoc: + EasyCode = enum(:easy_code, easy_codes) + # :nodoc: + MultiCode = enum(:multi_code, multi_codes) + + # :nodoc: + EasyOption = enum(:easy_option, easy_options(:enum).to_a.flatten) + # :nodoc: + MultiOption = enum(:multi_option, multi_options(:enum).to_a.flatten) + + # Used by curl_debug_callback when setting CURLOPT_DEBUGFUNCTION + # https://github.com/bagder/curl/blob/master/include/curl/curl.h#L378 for details + DebugInfoType = enum(:debug_info_type, debug_info_types) + + # :nodoc: + InfoType = enum(info_types.to_a.flatten) + + # Info details, refer + # https://github.com/bagder/curl/blob/master/src/tool_writeout.c#L66 for details + Info = enum(:info, infos.to_a.flatten) + + # Form options, used by FormAdd for temporary storage, refer + # https://github.com/bagder/curl/blob/master/lib/formdata.h#L51 for details + FormOption = enum(:form_option, form_options) + + # :nodoc: + MsgCode = enum(:msg_code, msg_codes) + + VERSION_IPV6 = (1<<0) # IPv6-enabled + VERSION_KERBEROS4 = (1<<1) # kerberos auth is supported + VERSION_SSL = (1<<2) # SSL options are present + VERSION_LIBZ = (1<<3) # libz features are present + VERSION_NTLM = (1<<4) # NTLM auth is supported + VERSION_GSSNEGOTIATE = (1<<5) # Negotiate auth supp + VERSION_DEBUG = (1<<6) # built with debug capabilities + VERSION_ASYNCHDNS = (1<<7) # asynchronous dns resolves + VERSION_SPNEGO = (1<<8) # SPNEGO auth is supported + VERSION_LARGEFILE = (1<<9) # supports files bigger than 2GB + VERSION_IDN = (1<<10) # International Domain Names support + VERSION_SSPI = (1<<11) # SSPI is supported + VERSION_CONV = (1<<12) # character conversions supported + VERSION_CURLDEBUG = (1<<13) # debug memory tracking supported + VERSION_TLSAUTH_SRP = (1<<14) # TLS-SRP auth is supported + VERSION_NTLM_WB = (1<<15) # NTLM delegating to winbind helper + VERSION_HTTP2 = (1<<16) # HTTP2 support built + VERSION_GSSAPI = (1<<17) # GSS-API is supported + + SOCKET_BAD = -1 + SOCKET_TIMEOUT = SOCKET_BAD + + PollAction = enum(:poll_action, [ + :none, + :in, + :out, + :inout, + :remove + ]) + + SocketReadiness = bitmask(:socket_readiness, [ + :in, # CURL_CSELECT_IN - 0x01 (bit 0) + :out, # CURL_CSELECT_OUT - 0x02 (bit 1) + :err, # CURL_CSELECT_ERR - 0x04 (bit 2) + ]) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/form_options.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/form_options.rb new file mode 100644 index 0000000..ce5f079 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/form_options.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains the available options for forms. + module FormOptions + + # Form options, used by FormAdd for temporary storage, refer + # https://github.com/bagder/curl/blob/master/lib/formdata.h#L51 for details + def form_options + [ + :none, + :copyname, + :ptrname, + :namelength, + :copycontents, + :ptrcontents, + :contentslength, + :filecontent, + :array, + :obsolete, + :file, + :buffer, + :bufferptr, + :bufferlength, + :contenttype, + :contentheader, + :filename, + :end, + :obsolete2, + :stream, + :last + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/functions.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/functions.rb new file mode 100644 index 0000000..2c01bea --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/functions.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains the functions to be attached in order to work with + # libcurl. + module Functions + + # :nodoc: + def self.extended(base) + base.attach_function :global_init, :curl_global_init, [:long], :int + base.attach_function :global_cleanup, :curl_global_cleanup, [], :void + base.attach_function :free, :curl_free, [:pointer], :void + + base.attach_function :easy_init, :curl_easy_init, [], :pointer + base.attach_function :easy_cleanup, :curl_easy_cleanup, [:pointer], :void + base.attach_function :easy_getinfo, :curl_easy_getinfo, [:pointer, :info, :varargs], :easy_code + base.attach_function :easy_setopt, :curl_easy_setopt, [:pointer, :easy_option, :varargs], :easy_code + base.instance_variable_set(:@blocking, true) + base.attach_function :easy_perform, :curl_easy_perform, [:pointer], :easy_code + base.attach_function :easy_strerror, :curl_easy_strerror, [:easy_code], :string + base.attach_function :easy_escape, :curl_easy_escape, [:pointer, :pointer, :int], :pointer + base.attach_function :easy_reset, :curl_easy_reset, [:pointer], :void + base.attach_function :easy_duphandle, :curl_easy_duphandle, [:pointer], :pointer + + base.attach_function :formadd, :curl_formadd, [:pointer, :pointer, :varargs], :int + base.attach_function :formfree, :curl_formfree, [:pointer], :void + + base.attach_function :multi_init, :curl_multi_init, [], :pointer + base.attach_function :multi_cleanup, :curl_multi_cleanup, [:pointer], :void + base.attach_function :multi_add_handle, :curl_multi_add_handle, [:pointer, :pointer], :multi_code + base.attach_function :multi_remove_handle, :curl_multi_remove_handle, [:pointer, :pointer], :multi_code + base.attach_function :multi_info_read, :curl_multi_info_read, [:pointer, :pointer], Curl::Msg.ptr + base.attach_function :multi_perform, :curl_multi_perform, [:pointer, :pointer], :multi_code + base.attach_function :multi_timeout, :curl_multi_timeout, [:pointer, :pointer], :multi_code + base.attach_function :multi_fdset, :curl_multi_fdset, [:pointer, Curl::FDSet.ptr, Curl::FDSet.ptr, Curl::FDSet.ptr, :pointer], :multi_code + base.attach_function :multi_strerror, :curl_multi_strerror, [:int], :string + base.attach_function :multi_setopt, :curl_multi_setopt, [:pointer, :multi_option, :varargs], :multi_code + base.attach_function :multi_socket_action, :curl_multi_socket_action, [:pointer, :int, :socket_readiness, :pointer], :multi_code + + base.attach_function :version, :curl_version, [], :string + base.attach_function :version_info, :curl_version_info, [], Curl::VersionInfoData.ptr + + base.attach_function :slist_append, :curl_slist_append, [:pointer, :string], :pointer + base.attach_function :slist_free_all, :curl_slist_free_all, [:pointer], :void + base.instance_variable_set(:@blocking, true) + + if Curl.windows? + base.ffi_lib 'ws2_32' + else + base.ffi_lib ::FFI::Library::LIBC + end + + base.attach_function :select, [:int, Curl::FDSet.ptr, Curl::FDSet.ptr, Curl::FDSet.ptr, Curl::Timeval.ptr], :int + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/infos.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/infos.rb new file mode 100644 index 0000000..d285f5b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/infos.rb @@ -0,0 +1,151 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains logic for the available informations + # on an easy, eg.: connect_time. + module Infos + + # Return info types. + # + # @example Return info types. + # Ethon::Curl.info_types + # + # @return [ Hash ] The info types. + def info_types + { + :string =>0x100000, + :long => 0x200000, + :double =>0x300000, + :slist => 0x400000 + } + end + + # http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTDEBUGFUNCTION + # https://github.com/bagder/curl/blob/master/include/curl/curl.h#L378 + # + # @example Return debug info types. + # Ethon::Curl.debug_info_types + # + # @return [ Hash ] The info types available to curl_debug_callback. + def debug_info_types + [ + :text, 0, + :header_in, + :header_out, + :data_in, + :data_out, + :ssl_data_in, + :ssl_data_out + ] + end + + # Return Info details, refer + # https://github.com/bagder/curl/blob/master/src/tool_writeout.c#L66 for details + # + # @example Return infos. + # Ethon::Curl.infos + # + # @return [ Hash ] The infos. + def infos + { + :effective_url => info_types[:string] + 1, + :response_code => info_types[:long] + 2, + :total_time => info_types[:double] + 3, + :namelookup_time => info_types[:double] + 4, + :connect_time => info_types[:double] + 5, + :pretransfer_time => info_types[:double] + 6, + :size_upload => info_types[:double] + 7, + :size_download => info_types[:double] + 8, + :speed_download => info_types[:double] + 9, + :speed_upload => info_types[:double] + 10, + :header_size => info_types[:long] + 11, + :request_size => info_types[:long] + 12, + :ssl_verifyresult => info_types[:long] + 13, + :filetime => info_types[:long] + 14, + :content_length_download =>info_types[:double] + 15, + :content_length_upload => info_types[:double] + 16, + :starttransfer_time => info_types[:double] + 17, + :content_type => info_types[:string] + 18, + :redirect_time => info_types[:double] + 19, + :redirect_count => info_types[:long] + 20, + :private => info_types[:string] + 21, + :http_connectcode => info_types[:long] + 22, + :httpauth_avail => info_types[:long] + 23, + :proxyauth_avail => info_types[:long] + 24, + :os_errno => info_types[:long] + 25, + :num_connects => info_types[:long] + 26, + :ssl_engines => info_types[:slist] + 27, + :cookielist => info_types[:slist] + 28, + :lastsocket => info_types[:long] + 29, + :ftp_entry_path => info_types[:string] + 30, + :redirect_url => info_types[:string] + 31, + :primary_ip => info_types[:string] + 32, + :appconnect_time => info_types[:double] + 33, + :certinfo => info_types[:slist] + 34, + :condition_unmet => info_types[:long] + 35, + :rtsp_session_id => info_types[:string] + 36, + :rtsp_client_cseq => info_types[:long] + 37, + :rtsp_server_cseq => info_types[:long] + 38, + :rtsp_cseq_recv => info_types[:long] + 39, + :primary_port => info_types[:long] + 40, + :local_ip => info_types[:string] + 41, + :local_port => info_types[:long] + 42, + :last =>42 + } + end + + # Return info as string. + # + # @example Return info. + # Curl.get_info_string(:primary_ip, easy) + # + # @param [ Symbol ] option The option name. + # @param [ ::FFI::Pointer ] handle The easy handle. + # + # @return [ String ] The info. + def get_info_string(option, handle) + string_ptr = ::FFI::MemoryPointer.new(:pointer) + + if easy_getinfo(handle, option, :pointer, string_ptr) == :ok + ptr=string_ptr.read_pointer + ptr.null? ? nil : ptr.read_string + end + end + + # Return info as integer. + # + # @example Return info. + # Curl.get_info_long(:response_code, easy) + # + # @param [ Symbol ] option The option name. + # @param [ ::FFI::Pointer ] handle The easy handle. + # + # @return [ Integer ] The info. + def get_info_long(option, handle) + long_ptr = ::FFI::MemoryPointer.new(:long) + + if easy_getinfo(handle, option, :pointer, long_ptr) == :ok + long_ptr.read_long + end + end + + # Return info as float + # + # @example Return info. + # Curl.get_info_double(:response_code, easy) + # + # @param [ Symbol ] option The option name. + # @param [ ::FFI::Pointer ] handle The easy handle. + # + # @return [ Float ] The info. + def get_info_double(option, handle) + double_ptr = ::FFI::MemoryPointer.new(:double) + + if easy_getinfo(handle, option, :pointer, double_ptr) == :ok + double_ptr.read_double + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/messages.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/messages.rb new file mode 100644 index 0000000..72bc54d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/messages.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains available message codes. + module Messages + + # Return message codes. + # + # @example Return message codes. + # Ethon::Curl.msg_codes + # + # @return [ Array ] The messages codes. + def msg_codes + [:none, :done, :last] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/options.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/options.rb new file mode 100644 index 0000000..0c391d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/options.rb @@ -0,0 +1,503 @@ +# frozen_string_literal: true +module Ethon + module Curls + + # This module contains logic for setting options on + # easy or multi interface. + module Options + + OPTION_STRINGS = { :easy => 'easy_options', :multi => 'multi_options' }.freeze + FOPTION_STRINGS = { :easy => 'EASY_OPTIONS', :multi => 'MULTI_OPTIONS' }.freeze + FUNCS = { :easy => 'easy_setopt', :multi => 'multi_setopt' }.freeze + # Sets appropriate option for easy, depending on value type. + def set_option(option, value, handle, type = :easy) + type = type.to_sym unless type.is_a?(Symbol) + raise NameError, "Ethon::Curls::Options unknown type #{type}." unless respond_to?(OPTION_STRINGS[type]) + opthash=send(OPTION_STRINGS[type], nil) + raise Errors::InvalidOption.new(option) unless opthash.include?(option) + + case opthash[option][:type] + when :none + return if value.nil? + value=1 + va_type=:long + when :int + return if value.nil? + va_type=:long + value=value.to_i + when :bool + return if value.nil? + va_type=:long + value=(value&&value!=0) ? 1 : 0 + when :time + return if value.nil? + va_type=:long + value=value.to_i + when :enum + return if value.nil? + va_type=:long + value = case value + when Symbol + opthash[option][:opts][value] + when String + opthash[option][:opts][value.to_sym] + else + value + end.to_i + when :bitmask + return if value.nil? + va_type=:long + value = case value + when Symbol + opthash[option][:opts][value] + when Array + value.inject(0) { |res,v| res|opthash[option][:opts][v] } + else + value + end.to_i + when :string + va_type=:string + value=value.to_s unless value.nil? + when :string_as_pointer + va_type = :pointer + s = '' + s = value.to_s unless value.nil? + value = FFI::MemoryPointer.new(:char, s.bytesize) + value.put_bytes(0, s) + when :string_escape_null + va_type=:string + value=Util.escape_zero_byte(value) unless value.nil? + when :ffipointer + va_type=:pointer + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? FFI::Pointer + when :curl_slist + va_type=:pointer + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? FFI::Pointer + when :buffer + raise NotImplementedError, "Ethon::Curls::Options option #{option} buffer type not implemented." + when :dontuse_object + raise NotImplementedError, "Ethon::Curls::Options option #{option} type not implemented." + when :cbdata + raise NotImplementedError, "Ethon::Curls::Options option #{option} callback data type not implemented. Use Ruby closures." + when :callback + va_type=:callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :socket_callback + va_type=:socket_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :timer_callback + va_type=:timer_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :debug_callback + va_type=:debug_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :progress_callback + va_type=:progress_callback + raise Errors::InvalidValue.new(option,value) unless value.nil? or value.is_a? Proc + when :off_t + return if value.nil? + va_type=:int64 + value=value.to_i + end + + if va_type==:long or va_type==:int64 then + bits=FFI.type_size(va_type)*8 + tv=((value<0) ? value.abs-1 : value) + raise Errors::InvalidValue.new(option,value) unless tv<(1< 0, + :objectpoint => 10000, + :functionpoint => 20000, + :off_t => 30000 + } + OPTION_TYPE_MAP = { + :none => :long, + :int => :long, + :bool => :long, + :time => :long, + :enum => :long, # Two ways to specify values (as opts parameter): + # * Array of symbols, these will number sequentially + # starting at 0. Skip elements with nil. (see :netrc) + # * Hash of :symbol => enum_value (See :proxytype) + :bitmask => :long, # Three ways to specify values (as opts parameter): + # * Hash of :symbol => bitmask_value or Array. + # An Array can be an array of already defined + # Symbols, which represents a bitwise or of those + # symbols. (See :httpauth) + # * Array of symbols, these will number the bits + # sequentially (i.e. 0, 1, 2, 4, etc.). Skip + # elements with nil. The last element can be a + # Hash, which will be interpreted as above. + # (See :protocols) + # :all defaults to all bits set + :string => :objectpoint, + :string_escape_null => :objectpoint, + :string_as_pointer => :objectpoint, + :ffipointer => :objectpoint, # FFI::Pointer + :curl_slist => :objectpoint, + :buffer => :objectpoint, # A memory buffer of size defined in the options + :dontuse_object => :objectpoint, # An object we don't support (e.g. FILE*) + :cbdata => :objectpoint, + :callback => :functionpoint, + :socket_callback => :functionpoint, + :timer_callback => :functionpoint, + :debug_callback => :functionpoint, + :progress_callback => :functionpoint, + :off_t => :off_t, + } + + def self.option(ftype,name,type,num,opts=nil) + case type + when :enum + if opts.is_a? Array then + opts=Hash[opts.each_with_index.to_a] + elsif not opts.is_a? Hash then + raise TypeError, "Ethon::Curls::Options #{ftype} #{name} Expected opts to be an Array or a Hash." + end + + when :bitmask + if opts.is_a? Array then + if opts.last.is_a? Hash then + hopts=opts.pop + else + hopts={} + end + opts.each_with_index do |v,i| + next if v.nil? + if i==0 then + hopts[v]=0 + else + hopts[v]=1<<(i-1) + end + end + opts=hopts + elsif not opts.is_a? Hash then + raise TypeError, "Ethon::Curls::Options #{ftype} #{name} Expected opts to be an Array or a Hash." + end + opts[:all]=-1 unless opts.include? :all + opts.each do |k,v| + if v.is_a? Array then + opts[k]=v.map { |b| opts[b] }.inject :| + end + end + + when :buffer + raise TypeError, "Ethon::Curls::Options #{ftype} #{name} Expected opts to be an Array or a Hash." unless opts.is_a? Integer + + else + raise ArgumentError, "Ethon::Curls::Options #{ftype} #{name} Expected no opts." unless opts.nil? + end + opthash=const_get(FOPTION_STRINGS[ftype]) + opthash[name] = { :type => type, + :opt => OPTION_TYPE_BASE[OPTION_TYPE_MAP[type]] + num, + :opts => opts } + end + + def self.option_alias(ftype,name,*aliases) + opthash=const_get(FOPTION_STRINGS[ftype]) + aliases.each { |a| opthash[a]=opthash[name] } + end + + def self.option_type(type) + cname = FOPTION_STRINGS[type] + const_set(cname, {}) + define_method(OPTION_STRINGS[type]) do |rt| + return Ethon::Curls::Options.const_get(cname).map { |k, v| [k, v[:opt]] } if rt == :enum + Ethon::Curls::Options.const_get(cname) + end + end + + # Curl multi options, refer + # Defined @ https://github.com/bagder/curl/blob/master/include/curl/multi.h + # Documentation @ http://curl.haxx.se/libcurl/c/curl_multi_setopt.html + option_type :multi + + option :multi, :socketfunction, :socket_callback, 1 + option :multi, :socketdata, :cbdata, 2 + option :multi, :pipelining, :int, 3 + option :multi, :timerfunction, :timer_callback, 4 + option :multi, :timerdata, :cbdata, 5 + option :multi, :maxconnects, :int, 6 + option :multi, :max_host_connections, :int, 7 + option :multi, :max_pipeline_length, :int, 8 + option :multi, :content_length_penalty_size, :off_t, 9 + option :multi, :chunk_length_penalty_size, :off_t, 10 + option :multi, :pipelining_site_bl, :dontuse_object, 11 + option :multi, :pipelining_server_bl, :dontuse_object, 12 + option :multi, :max_total_connections, :int, 3 + + # Curl easy options + # Defined @ https://github.com/bagder/curl/blob/master/include/curl/curl.h + # Documentation @ http://curl.haxx.se/libcurl/c/curl_easy_setopt.html + ## BEHAVIOR OPTIONS + option_type :easy + + option :easy, :verbose, :bool, 41 + option :easy, :header, :bool, 42 + option :easy, :noprogress, :bool, 43 + option :easy, :nosignal, :bool, 99 + option :easy, :wildcardmatch, :bool, 197 + ## CALLBACK OPTIONS + option :easy, :writefunction, :callback, 11 + option :easy, :file, :cbdata, 1 + option_alias :easy, :file, :writedata + option :easy, :readfunction, :callback, 12 + option :easy, :infile, :cbdata, 9 + option_alias :easy, :infile, :readdata + option :easy, :ioctlfunction, :callback, 130 + option :easy, :ioctldata, :cbdata, 131 + option :easy, :seekfunction, :callback, 167 + option :easy, :seekdata, :cbdata, 168 + option :easy, :sockoptfunction, :callback, 148 + option :easy, :sockoptdata, :cbdata, 149 + option :easy, :opensocketfunction, :callback, 163 + option :easy, :opensocketdata, :cbdata, 164 + option :easy, :closesocketfunction, :callback, 208 + option :easy, :closesocketdata, :cbdata, 209 + option :easy, :path_as_is, :bool, 234 + option :easy, :progressfunction, :progress_callback, 56 + option :easy, :progressdata, :cbdata, 57 + option :easy, :headerfunction, :callback, 79 + option :easy, :writeheader, :cbdata, 29 + option_alias :easy, :writeheader, :headerdata + option :easy, :debugfunction, :debug_callback, 94 + option :easy, :debugdata, :cbdata, 95 + option :easy, :ssl_ctx_function, :callback, 108 + option :easy, :ssl_ctx_data, :cbdata, 109 + option :easy, :conv_to_network_function, :callback, 143 + option :easy, :conv_from_network_function, :callback, 142 + option :easy, :conv_from_utf8_function, :callback, 144 + option :easy, :interleavefunction, :callback, 196 + option :easy, :interleavedata, :cbdata, 195 + option :easy, :chunk_bgn_function, :callback, 198 + option :easy, :chunk_end_function, :callback, 199 + option :easy, :chunk_data, :cbdata, 201 + option :easy, :fnmatch_function, :callback, 200 + option :easy, :fnmatch_data, :cbdata, 202 + option :easy, :xferinfofunction, :progress_callback, 219 + option :easy, :xferinfodata, :cbdata, 57 + ## ERROR OPTIONS + option :easy, :errorbuffer, :buffer, 10, 256 + option :easy, :stderr, :dontuse_object, 37 + option :easy, :failonerror, :bool, 45 + ## NETWORK OPTIONS + option :easy, :url, :string, 2 + option :easy, :protocols, :bitmask, 181, [nil, :http, :https, :ftp, :ftps, :scp, :sftp, :telnet, :ldap, :ldaps, :dict, :file, :tftp, :imap, :imaps, :pop3, :pop3s, :smtp, :smtps, :rtsp, :rtmp, :rtmpt, :rtmpe, :rtmpte, :rtmps, :rtmpts, :gopher] + option :easy, :redir_protocols, :bitmask, 182, [nil, :http, :https, :ftp, :ftps, :scp, :sftp, :telnet, :ldap, :ldaps, :dict, :file, :tftp, :imap, :imaps, :pop3, :pop3s, :smtp, :smtps, :rtsp, :rtmp, :rtmpt, :rtmpe, :rtmpte, :rtmps, :rtmpts, :gopher] + option :easy, :proxy, :string, 4 + option :easy, :proxyport, :int, 59 + option :easy, :proxytype, :enum, 101, [:http, :http_1_0, :https, nil, :socks4, :socks5, :socks4a, :socks5_hostname] + option :easy, :noproxy, :string, 177 + option :easy, :httpproxytunnel, :bool, 61 + option :easy, :socks5_gssapi_service, :string, 179 + option :easy, :socks5_gssapi_nec, :bool, 180 + option :easy, :interface, :string, 62 + option :easy, :localport, :int, 139 + option :easy, :localportrange, :int, 140 + option :easy, :dns_cache_timeout, :int, 92 + option :easy, :dns_use_global_cache, :bool, 91 # Obsolete + option :easy, :dns_interface, :string, 221 + option :easy, :dns_local_ip4, :string, 222 + option :easy, :dns_shuffle_addresses, :bool, 275 + option :easy, :buffersize, :int, 98 + option :easy, :port, :int, 3 + option :easy, :tcp_nodelay, :bool, 121 + option :easy, :address_scope, :int, 171 + option :easy, :tcp_fastopen, :bool, 212 + option :easy, :tcp_keepalive, :bool, 213 + option :easy, :tcp_keepidle, :int, 214 + option :easy, :tcp_keepintvl, :int, 215 + ## NAMES and PASSWORDS OPTIONS (Authentication) + option :easy, :netrc, :enum, 51, [:ignored, :optional, :required] + option :easy, :netrc_file, :string, 118 + option :easy, :userpwd, :string, 5 + option :easy, :proxyuserpwd, :string, 6 + option :easy, :username, :string, 173 + option :easy, :password, :string, 174 + option :easy, :proxyusername, :string, 175 + option :easy, :proxypassword, :string, 176 + option :easy, :httpauth, :bitmask, 107, [:none, :basic, :digest, :gssnegotiate, :ntlm, :digest_ie, :ntlm_wb, {:only => 1<<31, :any => ~0x10, :anysafe => ~0x11, :auto => 0x1f}] + option :easy, :tlsauth_type, :enum, 206, [:none, :srp] + option :easy, :tlsauth_username, :string, 204 + option :easy, :tlsauth_password, :string, 205 + option :easy, :proxyauth, :bitmask, 111, [:none, :basic, :digest, :gssnegotiate, :ntlm, :digest_ie, :ntlm_wb, {:only => 1<<31, :any => ~0x10, :anysafe => ~0x11, :auto => 0x1f}] + option :easy, :sasl_ir, :bool, 218 + ## HTTP OPTIONS + option :easy, :autoreferer, :bool, 58 + option :easy, :accept_encoding, :string, 102 + option_alias :easy, :accept_encoding, :encoding + option :easy, :transfer_encoding, :bool, 207 + option :easy, :followlocation, :bool, 52 + option :easy, :unrestricted_auth, :bool, 105 + option :easy, :maxredirs, :int, 68 + option :easy, :postredir, :bitmask, 161, [:get_all, :post_301, :post_302, :post_303, {:post_all => [:post_301, :post_302, :post_303]}] + option_alias :easy, :postredir, :post301 + option :easy, :put, :bool, 54 + option :easy, :post, :bool, 47 + option :easy, :postfields, :string, 15 + option :easy, :postfieldsize, :int, 60 + option :easy, :postfieldsize_large, :off_t, 120 + option :easy, :copypostfields, :string_as_pointer, 165 + option :easy, :httppost, :ffipointer, 24 + option :easy, :referer, :string, 16 + option :easy, :useragent, :string, 18 + option :easy, :httpheader, :curl_slist, 23 + option :easy, :http200aliases, :curl_slist, 104 + option :easy, :cookie, :string, 22 + option :easy, :cookiefile, :string, 31 + option :easy, :cookiejar, :string, 82 + option :easy, :cookiesession, :bool, 96 + option :easy, :cookielist, :string, 135 + option :easy, :httpget, :bool, 80 + option :easy, :http_version, :enum, 84, [:none, :httpv1_0, :httpv1_1, :httpv2_0, :httpv2_tls, :httpv2_prior_knowledge] + option :easy, :ignore_content_length, :bool, 136 + option :easy, :http_content_decoding, :bool, 158 + option :easy, :http_transfer_decoding, :bool, 157 + ## SMTP OPTIONS + option :easy, :mail_from, :string, 186 + option :easy, :mail_rcpt, :curl_slist, 187 + option :easy, :mail_auth, :string, 217 + ## TFTP OPTIONS + option :easy, :tftp_blksize, :int, 178 + ## FTP OPTIONS + option :easy, :ftpport, :string, 17 + option :easy, :quote, :curl_slist, 28 + option :easy, :postquote, :curl_slist, 39 + option :easy, :prequote, :curl_slist, 93 + option :easy, :dirlistonly, :bool, 48 + option_alias :easy, :dirlistonly, :ftplistonly + option :easy, :append, :bool, 50 + option_alias :easy, :append, :ftpappend + option :easy, :ftp_use_eprt, :bool, 106 + option :easy, :ftp_use_epsv, :bool, 85 + option :easy, :ftp_use_pret, :bool, 188 + option :easy, :ftp_create_missing_dirs, :bool, 110 + option :easy, :ftp_response_timeout, :int, 112 + option_alias :easy, :ftp_response_timeout, :server_response_timeout + option :easy, :ftp_alternative_to_user, :string, 147 + option :easy, :ftp_skip_pasv_ip, :bool, 137 + option :easy, :ftpsslauth, :enum, 129, [:default, :ssl, :tls] + option :easy, :ftp_ssl_ccc, :enum, 154, [:none, :passive, :active] + option :easy, :ftp_account, :string, 134 + option :easy, :ftp_filemethod, :enum, 138, [:default, :multicwd, :nocwd, :singlecwd] + ## RTSP OPTIONS + option :easy, :rtsp_request, :enum, 189, [:none, :options, :describe, :announce, :setup, :play, :pause, :teardown, :get_parameter, :set_parameter, :record, :receive] + option :easy, :rtsp_session_id, :string, 190 + option :easy, :rtsp_stream_uri, :string, 191 + option :easy, :rtsp_transport, :string, 192 + option_alias :easy, :httpheader, :rtspheader + option :easy, :rtsp_client_cseq, :int, 193 + option :easy, :rtsp_server_cseq, :int, 194 + ## PROTOCOL OPTIONS + option :easy, :transfertext, :bool, 53 + option :easy, :proxy_transfer_mode, :bool, 166 + option :easy, :crlf, :bool, 27 + option :easy, :range, :string, 7 + option :easy, :resume_from, :int, 21 + option :easy, :resume_from_large, :off_t, 116 + option :easy, :customrequest, :string, 36 + option :easy, :filetime, :bool, 69 + option :easy, :nobody, :bool, 44 + option :easy, :infilesize, :int, 14 + option :easy, :infilesize_large, :off_t, 115 + option :easy, :upload, :bool, 46 + option :easy, :maxfilesize, :int, 114 + option :easy, :maxfilesize_large, :off_t, 117 + option :easy, :timecondition, :enum, 33, [:none, :ifmodsince, :ifunmodsince, :lastmod] + option :easy, :timevalue, :time, 34 + ## CONNECTION OPTIONS + option :easy, :timeout, :int, 13 + option :easy, :timeout_ms, :int, 155 + option :easy, :low_speed_limit, :int, 19 + option :easy, :low_speed_time, :int, 20 + option :easy, :max_send_speed_large, :off_t, 145 + option :easy, :max_recv_speed_large, :off_t, 146 + option :easy, :maxconnects, :int, 71 + option :easy, :fresh_connect, :bool, 74 + option :easy, :forbid_reuse, :bool, 75 + option :easy, :connecttimeout, :int, 78 + option :easy, :connecttimeout_ms, :int, 156 + option :easy, :ipresolve, :enum, 113, [:whatever, :v4, :v6] + option :easy, :connect_only, :bool, 141 + option :easy, :use_ssl, :enum, 119, [:none, :try, :control, :all] + option_alias :easy, :use_ssl, :ftp_ssl + option :easy, :resolve, :curl_slist, 203 + option :easy, :dns_servers, :string, 211 + option :easy, :accepttimeout_ms, :int, 212 + option :easy, :unix_socket_path, :string, 231 + option :easy, :pipewait, :bool, 237 + option_alias :easy, :unix_socket_path, :unix_socket + ## SSL and SECURITY OPTIONS + option :easy, :sslcert, :string, 25 + option :easy, :sslcerttype, :string, 86 + option :easy, :sslkey, :string, 87 + option :easy, :sslkeytype, :string, 88 + option :easy, :keypasswd, :string, 26 + option_alias :easy, :keypasswd, :sslcertpasswd + option_alias :easy, :keypasswd, :sslkeypasswd + option :easy, :sslengine, :string, 89 + option :easy, :sslengine_default, :none, 90 + option :easy, :sslversion, :enum, 32, [:default, :tlsv1, :sslv2, :sslv3, :tlsv1_0, :tlsv1_1, :tlsv1_2, :tlsv1_3] + option :easy, :ssl_verifypeer, :bool, 64 + option :easy, :cainfo, :string, 65 + option :easy, :issuercert, :string, 170 + option :easy, :capath, :string, 97 + option :easy, :crlfile, :string, 169 + option :easy, :ssl_verifyhost, :int, 81 + option :easy, :certinfo, :bool, 172 + option :easy, :random_file, :string, 76 + option :easy, :egdsocket, :string, 77 + option :easy, :ssl_cipher_list, :string, 83 + option :easy, :ssl_sessionid_cache, :bool, 150 + option :easy, :ssl_options, :bitmask, 216, [nil, :allow_beast] + option :easy, :krblevel, :string, 63 + option_alias :easy, :krblevel, :krb4level + option :easy, :gssapi_delegation, :bitmask, 210, [:none, :policy_flag, :flag] + option :easy, :pinnedpublickey, :string, 230 + option_alias :easy, :pinnedpublickey, :pinned_public_key + ## PROXY SSL OPTIONS + option :easy, :proxy_cainfo, :string, 246 + option :easy, :proxy_capath, :string, 247 + option :easy, :proxy_ssl_verifypeer, :bool, 248 + option :easy, :proxy_ssl_verifyhost, :int, 249 + option :easy, :proxy_sslversion, :enum, 250, [:default, :tlsv1, :sslv2, :sslv3, :tlsv1_0, :tlsv1_1, :tlsv1_2, :tlsv1_3] + option :easy, :proxy_tlsauth_username, :string, 251 + option :easy, :proxy_tlsauth_password, :string, 252 + option :easy, :proxy_tlsauth_type, :enum, 253, [:none, :srp] + option :easy, :proxy_sslcert, :string, 254 + option :easy, :proxy_sslcerttype, :string, 255 + option :easy, :proxy_sslkey, :string, 256 + option :easy, :proxy_sslkeytype, :string, 257 + option :easy, :proxy_keypasswd, :string, 258 + option_alias :easy, :proxy_keypasswd, :proxy_sslcertpasswd + option_alias :easy, :proxy_keypasswd, :proxy_sslkeypasswd + option :easy, :proxy_ssl_cipher_list, :string, 259 + option :easy, :proxy_crlfile, :string, 260 + option :easy, :proxy_ssl_options, :bitmask, 261, [nil, :allow_beast] + option :easy, :pre_proxy, :string, 262 + option :easy, :proxy_pinnedpublickey, :string, 263 + option_alias :easy, :proxy_pinnedpublickey, :proxy_pinned_public_key + option :easy, :proxy_issuercert, :string, 296 + ## SSH OPTIONS + option :easy, :ssh_auth_types, :bitmask, 151, [:none, :publickey, :password, :host, :keyboard, :agent, {:any => [:all], :default => [:any]}] + option :easy, :ssh_host_public_key_md5, :string, 162 + option :easy, :ssh_public_keyfile, :string, 152 + option :easy, :ssh_private_keyfile, :string, 153 + option :easy, :ssh_knownhosts, :string, 183 + option :easy, :ssh_keyfunction, :callback, 184 + option :easy, :khstat, :enum, -1, [:fine_add_to_file, :fine, :reject, :defer] # Kludge to make this enum available... Access via CurL::EASY_OPTIONS[:khstat][:opt] + option :easy, :ssh_keydata, :cbdata, 185 + ## OTHER OPTIONS + option :easy, :private, :cbdata, 103 + option :easy, :share, :dontuse_object, 100 + option :easy, :new_file_perms, :int, 159 + option :easy, :new_directory_perms, :int, 160 + ## TELNET OPTIONS + option :easy, :telnetoptions, :curl_slist, 70 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/settings.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/settings.rb new file mode 100644 index 0000000..8c0161b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/curls/settings.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Curl + callback :callback, [:pointer, :size_t, :size_t, :pointer], :size_t + callback :socket_callback, [:pointer, :int, :poll_action, :pointer, :pointer], :multi_code + callback :timer_callback, [:pointer, :long, :pointer], :multi_code + callback :debug_callback, [:pointer, :debug_info_type, :pointer, :size_t, :pointer], :int + callback :progress_callback, [:pointer, :long_long, :long_long, :long_long, :long_long], :int + ffi_lib_flags :now, :global + ffi_lib ['libcurl', 'libcurl.so.4'] + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy.rb new file mode 100644 index 0000000..355a519 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy.rb @@ -0,0 +1,315 @@ +# frozen_string_literal: true +require 'ethon/easy/informations' +require 'ethon/easy/features' +require 'ethon/easy/callbacks' +require 'ethon/easy/options' +require 'ethon/easy/header' +require 'ethon/easy/util' +require 'ethon/easy/params' +require 'ethon/easy/form' +require 'ethon/easy/http' +require 'ethon/easy/operations' +require 'ethon/easy/response_callbacks' +require 'ethon/easy/debug_info' +require 'ethon/easy/mirror' + +module Ethon + + # This is the class representing the libcurl easy interface + # See http://curl.haxx.se/libcurl/c/libcurl-easy.html for more informations. + # + # @example You can access the libcurl easy interface through this class, every request is based on it. The simplest setup looks like that: + # + # e = Ethon::Easy.new(url: "www.example.com") + # e.perform + # #=> :ok + # + # @example You can the reuse this Easy for the next request: + # + # e.reset # reset easy handle + # e.url = "www.google.com" + # e.followlocation = true + # e.perform + # #=> :ok + # + # @see initialize + class Easy + include Ethon::Easy::Informations + include Ethon::Easy::Callbacks + include Ethon::Easy::Options + include Ethon::Easy::Header + include Ethon::Easy::Http + include Ethon::Easy::Operations + include Ethon::Easy::ResponseCallbacks + extend Ethon::Easy::Features + + # Returns the curl return code. + # + # @return [ Symbol ] The return code. + # * :ok: All fine. Proceed as usual. + # * :unsupported_protocol: The URL you passed to libcurl used a + # protocol that this libcurl does not support. The support + # might be a compile-time option that you didn't use, it can + # be a misspelled protocol string or just a protocol + # libcurl has no code for. + # * :failed_init: Very early initialization code failed. This + # is likely to be an internal error or problem, or a + # resource problem where something fundamental couldn't + # get done at init time. + # * :url_malformat: The URL was not properly formatted. + # * :not_built_in: A requested feature, protocol or option + # was not found built-in in this libcurl due to a build-time + # decision. This means that a feature or option was not enabled + # or explicitly disabled when libcurl was built and in + # order to get it to function you have to get a rebuilt libcurl. + # * :couldnt_resolve_proxy: Couldn't resolve proxy. The given + # proxy host could not be resolved. + # * :couldnt_resolve_host: Couldn't resolve host. The given remote + # host was not resolved. + # * :couldnt_connect: Failed to connect() to host or proxy. + # * :ftp_weird_server_reply: After connecting to a FTP server, + # libcurl expects to get a certain reply back. This error + # code implies that it got a strange or bad reply. The given + # remote server is probably not an OK FTP server. + # * :remote_access_denied: We were denied access to the resource + # given in the URL. For FTP, this occurs while trying to + # change to the remote directory. + # * :ftp_accept_failed: While waiting for the server to connect + # back when an active FTP session is used, an error code was + # sent over the control connection or similar. + # * :ftp_weird_pass_reply: After having sent the FTP password to + # the server, libcurl expects a proper reply. This error code + # indicates that an unexpected code was returned. + # * :ftp_accept_timeout: During an active FTP session while + # waiting for the server to connect, the CURLOPT_ACCEPTTIMOUT_MS + # (or the internal default) timeout expired. + # * :ftp_weird_pasv_reply: libcurl failed to get a sensible result + # back from the server as a response to either a PASV or a + # EPSV command. The server is flawed. + # * :ftp_weird_227_format: FTP servers return a 227-line as a response + # to a PASV command. If libcurl fails to parse that line, + # this return code is passed back. + # * :ftp_cant_get_host: An internal failure to lookup the host used + # for the new connection. + # * :ftp_couldnt_set_type: Received an error when trying to set + # the transfer mode to binary or ASCII. + # * :partial_file: A file transfer was shorter or larger than + # expected. This happens when the server first reports an expected + # transfer size, and then delivers data that doesn't match the + # previously given size. + # * :ftp_couldnt_retr_file: This was either a weird reply to a + # 'RETR' command or a zero byte transfer complete. + # * :quote_error: When sending custom "QUOTE" commands to the + # remote server, one of the commands returned an error code that + # was 400 or higher (for FTP) or otherwise indicated unsuccessful + # completion of the command. + # * :http_returned_error: This is returned if CURLOPT_FAILONERROR is + # set TRUE and the HTTP server returns an error code that is >= 400. + # * :write_error: An error occurred when writing received data to a + # local file, or an error was returned to libcurl from a write callback. + # * :upload_failed: Failed starting the upload. For FTP, the server + # typically denied the STOR command. The error buffer usually + # contains the server's explanation for this. + # * :read_error: There was a problem reading a local file or an error + # returned by the read callback. + # * :out_of_memory: A memory allocation request failed. This is serious + # badness and things are severely screwed up if this ever occurs. + # * :operation_timedout: Operation timeout. The specified time-out + # period was reached according to the conditions. + # * :ftp_port_failed: The FTP PORT command returned error. This mostly + # happens when you haven't specified a good enough address for + # libcurl to use. See CURLOPT_FTPPORT. + # * :ftp_couldnt_use_rest: The FTP REST command returned error. This + # should never happen if the server is sane. + # * :range_error: The server does not support or accept range requests. + # * :http_post_error: This is an odd error that mainly occurs due to + # internal confusion. + # * :ssl_connect_error: A problem occurred somewhere in the SSL/TLS + # handshake. You really want the error buffer and read the message + # there as it pinpoints the problem slightly more. Could be + # certificates (file formats, paths, permissions), passwords, and others. + # * :bad_download_resume: The download could not be resumed because + # the specified offset was out of the file boundary. + # * :file_couldnt_read_file: A file given with FILE:// couldn't be + # opened. Most likely because the file path doesn't identify an + # existing file. Did you check file permissions? + # * :ldap_cannot_bind: LDAP cannot bind. LDAP bind operation failed. + # * :ldap_search_failed: LDAP search failed. + # * :function_not_found: Function not found. A required zlib function was not found. + # * :aborted_by_callback: Aborted by callback. A callback returned + # "abort" to libcurl. + # * :bad_function_argument: Internal error. A function was called with + # a bad parameter. + # * :interface_failed: Interface error. A specified outgoing interface + # could not be used. Set which interface to use for outgoing + # connections' source IP address with CURLOPT_INTERFACE. + # * :too_many_redirects: Too many redirects. When following redirects, + # libcurl hit the maximum amount. Set your limit with CURLOPT_MAXREDIRS. + # * :unknown_option: An option passed to libcurl is not recognized/known. + # Refer to the appropriate documentation. This is most likely a + # problem in the program that uses libcurl. The error buffer might + # contain more specific information about which exact option it concerns. + # * :telnet_option_syntax: A telnet option string was Illegally formatted. + # * :peer_failed_verification: The remote server's SSL certificate or + # SSH md5 fingerprint was deemed not OK. + # * :got_nothing: Nothing was returned from the server, and under the + # circumstances, getting nothing is considered an error. + # * :ssl_engine_notfound: The specified crypto engine wasn't found. + # * :ssl_engine_setfailed: Failed setting the selected SSL crypto engine as default! + # * :send_error: Failed sending network data. + # * :recv_error: Failure with receiving network data. + # * :ssl_certproblem: problem with the local client certificate. + # * :ssl_cipher: Couldn't use specified cipher. + # * :bad_content_encoding: Unrecognized transfer encoding. + # * :ldap_invalid_url: Invalid LDAP URL. + # * :filesize_exceeded: Maximum file size exceeded. + # * :use_ssl_failed: Requested FTP SSL level failed. + # * :send_fail_rewind: When doing a send operation curl had to rewind the data to + # retransmit, but the rewinding operation failed. + # * :ssl_engine_initfailed: Initiating the SSL Engine failed. + # * :login_denied: The remote server denied curl to login + # * :tftp_notfound: File not found on TFTP server. + # * :tftp_perm: Permission problem on TFTP server. + # * :remote_disk_full: Out of disk space on the server. + # * :tftp_illegal: Illegal TFTP operation. + # * :tftp_unknownid: Unknown TFTP transfer ID. + # * :remote_file_exists: File already exists and will not be overwritten. + # * :tftp_nosuchuser: This error should never be returned by a properly + # functioning TFTP server. + # * :conv_failed: Character conversion failed. + # * :conv_reqd: Caller must register conversion callbacks. + # * :ssl_cacert_badfile: Problem with reading the SSL CA cert (path? access rights?): + # * :remote_file_not_found: The resource referenced in the URL does not exist. + # * :ssh: An unspecified error occurred during the SSH session. + # * :ssl_shutdown_failed: Failed to shut down the SSL connection. + # * :again: Socket is not ready for send/recv wait till it's ready and try again. + # This return code is only returned from curl_easy_recv(3) and curl_easy_send(3) + # * :ssl_crl_badfile: Failed to load CRL file + # * :ssl_issuer_error: Issuer check failed + # * :ftp_pret_failed: The FTP server does not understand the PRET command at + # all or does not support the given argument. Be careful when + # using CURLOPT_CUSTOMREQUEST, a custom LIST command will be sent with PRET CMD + # before PASV as well. + # * :rtsp_cseq_error: Mismatch of RTSP CSeq numbers. + # * :rtsp_session_error: Mismatch of RTSP Session Identifiers. + # * :ftp_bad_file_list: Unable to parse FTP file list (during FTP wildcard downloading). + # * :chunk_failed: Chunk callback reported error. + # * :obsolete: These error codes will never be returned. They were used in an old + # libcurl version and are currently unused. + # + # @see http://curl.haxx.se/libcurl/c/libcurl-errors.html + attr_accessor :return_code + + # Initialize a new Easy. + # It initializes curl, if not already done and applies the provided options. + # Look into {Ethon::Easy::Options Options} to see what you can provide in the + # options hash. + # + # @example Create a new Easy. + # Easy.new(url: "www.google.de") + # + # @param [ Hash ] options The options to set. + # @option options :headers [ Hash ] Request headers. + # + # @return [ Easy ] A new Easy. + # + # @see Ethon::Easy::Options + # @see http://curl.haxx.se/libcurl/c/curl_easy_setopt.html + def initialize(options = {}) + Curl.init + set_attributes(options) + set_callbacks + end + + # Set given options. + # + # @example Set options. + # easy.set_attributes(options) + # + # @param [ Hash ] options The options. + # + # @raise InvalidOption + # + # @see initialize + def set_attributes(options) + options.each_pair do |key, value| + method = "#{key}=" + unless respond_to?(method) + raise Errors::InvalidOption.new(key) + end + send(method, value) + end + end + + # Reset easy. This means resetting all options and instance variables. + # Also the easy handle is resetted. + # + # @example Reset. + # easy.reset + def reset + @url = nil + @escape = nil + @hash = nil + @on_complete = nil + @on_headers = nil + @on_body = nil + @on_progress = nil + @procs = nil + @mirror = nil + Curl.easy_reset(handle) + set_callbacks + end + + # Clones libcurl session handle. This means that all options that is set in + # the current handle will be set on duplicated handle. + def dup + e = super + e.handle = Curl.easy_duphandle(handle) + e.instance_variable_set(:@body_write_callback, nil) + e.instance_variable_set(:@header_write_callback, nil) + e.instance_variable_set(:@debug_callback, nil) + e.instance_variable_set(:@progress_callback, nil) + e.set_callbacks + e + end + # Url escapes the value. + # + # @example Url escape. + # easy.escape(value) + # + # @param [ String ] value The value to escape. + # + # @return [ String ] The escaped value. + # + # @api private + def escape(value) + string_pointer = Curl.easy_escape(handle, value, value.bytesize) + returned_string = string_pointer.read_string + Curl.free(string_pointer) + returned_string + end + + # Returns the informations available through libcurl as + # a hash. + # + # @return [ Hash ] The informations hash. + def to_hash + Kernel.warn("Ethon: Easy#to_hash is deprecated and will be removed, please use #mirror.") + mirror.to_hash + end + + def mirror + @mirror ||= Mirror.from_easy(self) + end + + # Return pretty log out. + # + # @example Return log out. + # easy.log_inspect + # + # @return [ String ] The log out. + def log_inspect + "EASY #{mirror.log_informations.map{|k, v| "#{k}=#{v}"}.flatten.join(' ')}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/callbacks.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/callbacks.rb new file mode 100644 index 0000000..73f2f21 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/callbacks.rb @@ -0,0 +1,149 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains all the logic around the callbacks, + # which are needed to interact with libcurl. + # + # @api private + module Callbacks + + # :nodoc: + def self.included(base) + base.send(:attr_accessor, *[:response_body, :response_headers, :debug_info]) + end + + # Set writefunction and headerfunction callback. + # They are called by libcurl in order to provide the header and + # the body from the request. + # + # @example Set callbacks. + # easy.set_callbacks + def set_callbacks + Curl.set_option(:writefunction, body_write_callback, handle) + Curl.set_option(:headerfunction, header_write_callback, handle) + Curl.set_option(:debugfunction, debug_callback, handle) + @response_body = String.new + @response_headers = String.new + @headers_called = false + @debug_info = Ethon::Easy::DebugInfo.new + end + + # Returns the body write callback. + # + # @example Return the callback. + # easy.body_write_callback + # + # @return [ Proc ] The callback. + def body_write_callback + @body_write_callback ||= proc do |stream, size, num, object| + headers + result = body(chunk = stream.read_string(size * num)) + @response_body << chunk if result == :unyielded + result != :abort ? size * num : -1 + end + end + + # Returns the header write callback. + # + # @example Return the callback. + # easy.header_write_callback + # + # @return [ Proc ] The callback. + def header_write_callback + @header_write_callback ||= proc {|stream, size, num, object| + result = headers + @response_headers << stream.read_string(size * num) + result != :abort ? size * num : -1 + } + end + + # Returns the debug callback. This callback is currently used + # write the raw http request headers. + # + # @example Return the callback. + # easy.debug_callback + # + # @return [ Proc ] The callback. + def debug_callback + @debug_callback ||= proc {|handle, type, data, size, udata| + message = data.read_string(size) + @debug_info.add type, message + print message unless [:data_in, :data_out].include?(type) + 0 + } + end + + def set_progress_callback + if Curl.version_info[:version] >= "7.32.0" + Curl.set_option(:xferinfofunction, progress_callback, handle) + else + Curl.set_option(:progressfunction, progress_callback, handle) + end + end + + # Returns the progress callback. + # + # @example Return the callback. + # easy.progress_callback + # + # @return [ Proc ] The callback. + def progress_callback + @progress_callback ||= proc { |_, dltotal, dlnow, ultotal, ulnow| + progress(dltotal, dlnow, ultotal, ulnow) + 0 + } + end + + # Set the read callback. This callback is used by libcurl to + # read data when performing a PUT request. + # + # @example Set the callback. + # easy.set_read_callback("a=1") + # + # @param [ String ] body The body. + def set_read_callback(body) + @request_body_read = 0 + readfunction do |stream, size, num, object| + size = size * num + body_size = if body.respond_to?(:bytesize) + body.bytesize + elsif body.respond_to?(:size) + body.size + elsif body.is_a?(File) + File.size(body.path) + end + + left = body_size - @request_body_read + size = left if size > left + + if size > 0 + chunk = if body.respond_to?(:byteslice) + body.byteslice(@request_body_read, size) + elsif body.respond_to?(:read) + body.read(size) + else + body[@request_body_read, size] + end + + stream.write_string( + chunk, size + ) + @request_body_read += size + end + size + end + end + + # Returns the body read callback. + # + # @example Return the callback. + # easy.read_callback + # + # @return [ Proc ] The callback. + def read_callback + @read_callback + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/debug_info.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/debug_info.rb new file mode 100644 index 0000000..d8f15ed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/debug_info.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This class is used to store and retreive debug information, + # which is only saved when verbose is set to true. + # + # @api private + class DebugInfo + + MESSAGE_TYPES = Ethon::Curl::DebugInfoType.to_h.keys + + class Message + attr_reader :type, :message + + def initialize(type, message) + @type = type + @message = message + end + end + + def initialize + @messages = [] + end + + def add(type, message) + @messages << Message.new(type, message) + end + + def messages_for(type) + @messages.select {|m| m.type == type }.map(&:message) + end + + MESSAGE_TYPES.each do |type| + eval %Q|def #{type}; messages_for(:#{type}); end| + end + + def to_a + @messages.map(&:message) + end + + def to_h + Hash[MESSAGE_TYPES.map {|k| [k, send(k)] }] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/features.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/features.rb new file mode 100644 index 0000000..c4a8962 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/features.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains class methods for feature checks + module Features + # Returns true if this curl version supports zlib. + # + # @example Return wether zlib is supported. + # Ethon::Easy.supports_zlib? + # + # @return [ Boolean ] True if supported, else false. + def supports_zlib? + !!(Curl.version_info[:features] & Curl::VERSION_LIBZ) + end + + # Returns true if this curl version supports AsynchDNS. + # + # @example + # Ethon::Easy.supports_asynch_dns? + # + # @return [ Boolean ] True if supported, else false. + def supports_asynch_dns? + !!(Curl.version_info[:features] & Curl::VERSION_ASYNCHDNS) + end + + alias :supports_timeout_ms? :supports_asynch_dns? + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/form.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/form.rb new file mode 100644 index 0000000..681d0f9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/form.rb @@ -0,0 +1,107 @@ +# frozen_string_literal: true +require 'ethon/easy/util' +require 'ethon/easy/queryable' + +module Ethon + class Easy + + # This class represents a form and is used to send a payload in the + # request body via POST/PUT. + # It handles multipart forms, too. + # + # @api private + class Form + include Ethon::Easy::Util + include Ethon::Easy::Queryable + + # Return a new Form. + # + # @example Return a new Form. + # Form.new({}) + # + # @param [ Hash ] params The parameter with which to initialize the form. + # + # @return [ Form ] A new Form. + def initialize(easy, params, multipart = nil) + @easy = easy + @params = params || {} + @multipart = multipart + end + + # Return a pointer to the first form element in libcurl. + # + # @example Return the first form element. + # form.first + # + # @return [ FFI::Pointer ] The first element. + def first + @first ||= FFI::MemoryPointer.new(:pointer) + end + + # Return a pointer to the last form element in libcurl. + # + # @example Return the last form element. + # form.last + # + # @return [ FFI::Pointer ] The last element. + def last + @last ||= FFI::MemoryPointer.new(:pointer) + end + + # Return if form is multipart. The form is multipart + # when it contains a file or multipart option is set on the form during creation. + # + # @example Return if form is multipart. + # form.multipart? + # + # @return [ Boolean ] True if form is multipart, else false. + def multipart? + return true if @multipart + query_pairs.any?{|pair| pair.respond_to?(:last) && pair.last.is_a?(Array)} + end + + # Add form elements to libcurl. + # + # @example Add form to libcurl. + # form.materialize + def materialize + query_pairs.each { |pair| form_add(pair.first.to_s, pair.last) } + end + + private + + def form_add(name, content) + case content + when Array + Curl.formadd(first, last, + :form_option, :copyname, :pointer, name, + :form_option, :namelength, :long, name.bytesize, + :form_option, :file, :string, content[2], + :form_option, :filename, :string, content[0], + :form_option, :contenttype, :string, content[1], + :form_option, :end + ) + else + Curl.formadd(first, last, + :form_option, :copyname, :pointer, name, + :form_option, :namelength, :long, name.bytesize, + :form_option, :copycontents, :pointer, content.to_s, + :form_option, :contentslength, :long, content ? content.to_s.bytesize : 0, + :form_option, :end + ) + end + + setup_garbage_collection + end + + def setup_garbage_collection + # first is a pointer to a pointer. Since it's a MemoryPointer it will + # auto clean itself up, but we need to clean up the object it points + # to. So this results in (pseudo-c): + # form_data_cleanup_handler = *first + # curl_form_free(form_data_cleanup_handler) + @form_data_cleanup_handler ||= FFI::AutoPointer.new(@first.get_pointer(0), Curl.method(:formfree)) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/header.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/header.rb new file mode 100644 index 0000000..8984a24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/header.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true +module Ethon + class Easy + # This module contains the logic around adding headers to libcurl. + # + # @api private + module Header + # Return headers, return empty hash if none. + # + # @example Return the headers. + # easy.headers + # + # @return [ Hash ] The headers. + def headers + @headers ||= {} + end + + # Set the headers. + # + # @example Set the headers. + # easy.headers = {'User-Agent' => 'ethon'} + # + # @param [ Hash ] headers The headers. + def headers=(headers) + headers ||= {} + header_list = nil + headers.each do |k, v| + header_list = Curl.slist_append(header_list, compose_header(k,v)) + end + Curl.set_option(:httpheader, header_list, handle) + + @header_list = header_list && FFI::AutoPointer.new(header_list, Curl.method(:slist_free_all)) + end + + # Return header_list. + # + # @example Return header_list. + # easy.header_list + # + # @return [ FFI::Pointer ] The header list. + def header_list + @header_list + end + + # Compose libcurl header string from key and value. + # Also replaces null bytes, because libcurl will complain + # otherwise. + # + # @example Compose header. + # easy.compose_header('User-Agent', 'Ethon') + # + # @param [ String ] key The header name. + # @param [ String ] value The header value. + # + # @return [ String ] The composed header. + def compose_header(key, value) + Util.escape_zero_byte("#{key}: #{value}") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http.rb new file mode 100644 index 0000000..4359830 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true +require 'ethon/easy/http/actionable' +require 'ethon/easy/http/post' +require 'ethon/easy/http/get' +require 'ethon/easy/http/head' +require 'ethon/easy/http/put' +require 'ethon/easy/http/delete' +require 'ethon/easy/http/patch' +require 'ethon/easy/http/options' +require 'ethon/easy/http/custom' + +module Ethon + class Easy + + # This module contains logic about making valid HTTP requests. + module Http + + # Set specified options in order to make a HTTP request. + # Look at {Ethon::Easy::Options Options} to see what you can + # provide in the options hash. + # + # @example Set options for HTTP request. + # easy.http_request("www.google.com", :get, {}) + # + # @param [ String ] url The url. + # @param [ String ] action_name The HTTP action name. + # @param [ Hash ] options The options hash. + # + # @option options :params [ Hash ] Params hash which + # is attached to the url. + # @option options :body [ Hash ] Body hash which + # becomes the request body. It is a PUT body for + # PUT requests and a POST for everything else. + # @option options :headers [ Hash ] Request headers. + # + # @return [ void ] + # + # @see Ethon::Easy::Options + def http_request(url, action_name, options = {}) + fabricate(url, action_name, options).setup(self) + end + + private + + # Return the corresponding action class. + # + # @example Return the action. + # Action.fabricate(:get) + # Action.fabricate(:smash) + # + # @param [ String ] url The url. + # @param [ String ] action_name The HTTP action name. + # @param [ Hash ] options The option hash. + # + # @return [ Easy::Ethon::Actionable ] The request instance. + def fabricate(url, action_name, options) + constant_name = action_name.to_s.capitalize + + if Ethon::Easy::Http.const_defined?(constant_name) + Ethon::Easy::Http.const_get(constant_name).new(url, options) + else + Ethon::Easy::Http::Custom.new(constant_name.upcase, url, options) + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/actionable.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/actionable.rb new file mode 100644 index 0000000..374a21c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/actionable.rb @@ -0,0 +1,157 @@ +# frozen_string_literal: true +require 'ethon/easy/http/putable' +require 'ethon/easy/http/postable' + +module Ethon + class Easy + module Http + # This module represents a Http Action and is a factory + # for more real actions like GET, HEAD, POST and PUT. + module Actionable + + QUERY_OPTIONS = [ :params, :body, :params_encoding ] + + # Create a new action. + # + # @example Create a new action. + # Action.new("www.example.com", {}) + # + # @param [ String ] url The url. + # @param [ Hash ] options The options. + # + # @return [ Action ] A new action. + def initialize(url, options) + @url = url + @options, @query_options = parse_options(options) + end + + # Return the url. + # + # @example Return url. + # action.url + # + # @return [ String ] The url. + def url + @url + end + + # Return the options hash. + # + # @example Return options. + # action.options + # + # @return [ Hash ] The options. + def options + @options + end + + # Returns the query options hash. + # + # @example Return query options. + # action.query_options + # + # @return [ Hash ] The query options. + def query_options + @query_options + end + + # Return the params. + # + # @example Return params. + # action.params + # + # @return [ Params ] The params. + def params + @params ||= Params.new(@easy, query_options.fetch(:params, nil)) + end + + # Return the form. + # + # @example Return form. + # action.form + # + # @return [ Form ] The form. + def form + @form ||= Form.new(@easy, query_options.fetch(:body, nil), options.fetch(:multipart, nil)) + end + + # Get the requested array encoding. By default it's + # :typhoeus, but it can also be set to :rack. + # + # @example Get encoding from options + # action.params_encoding + # + def params_encoding + @params_encoding ||= query_options.fetch(:params_encoding, :typhoeus) + end + + # Setup everything necessary for a proper request. + # + # @example setup. + # action.setup(easy) + # + # @param [ easy ] easy the easy to setup. + def setup(easy) + @easy = easy + + # Order is important, @easy will be used to provide access to options + # relevant to the following operations (like whether or not to escape + # values). + easy.set_attributes(options) + + set_form(easy) unless form.empty? + + if params.empty? + easy.url = url + else + set_params(easy) + end + end + + # Setup request with params. + # + # @example Setup nothing. + # action.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_params(easy) + params.escape = easy.escape? + params.params_encoding = params_encoding + + base_url, base_params = url.split('?') + base_url << '?' + base_url << base_params.to_s + base_url << '&' if base_params + base_url << params.to_s + + easy.url = base_url + end + + # Setup request with form. + # + # @example Setup nothing. + # action.set_form(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_form(easy) + end + + private + + def parse_options(options) + query_options = {} + options = options.dup + + QUERY_OPTIONS.each do |query_option| + if options.key?(query_option) + query_options[query_option] = options.delete(query_option) + end + end + + return options, query_options + end + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/custom.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/custom.rb new file mode 100644 index 0000000..a00f3e3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/custom.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making requests for custom HTTP verbs. + class Custom + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + def initialize(verb, url, options) + @verb = verb + super(url, options) + end + + # Setup easy to make a request. + # + # @example Setup. + # custom.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = @verb + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/delete.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/delete.rb new file mode 100644 index 0000000..9cc2226 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/delete.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making DELETE requests. + class Delete + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a DELETE request. + # + # @example Setup customrequest. + # delete.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "DELETE" + end + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/get.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/get.rb new file mode 100644 index 0000000..6175e42 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/get.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making GET requests. + class Get + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a GET request. + # + # @example Setup. + # get.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "GET" unless form.empty? + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/head.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/head.rb new file mode 100644 index 0000000..d9bdbd2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/head.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making HEAD requests. + class Head + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a HEAD request. + # + # @example Setup. + # get.set_params(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.nobody = true + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/options.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/options.rb new file mode 100644 index 0000000..6e12c29 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/options.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making OPTIONS requests. + class Options + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a OPTIONS request. + # + # @example Setup. + # options.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "OPTIONS" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/patch.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/patch.rb new file mode 100644 index 0000000..e616383 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/patch.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making PATCH requests. + class Patch + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a PATCH request. + # + # @example Setup. + # patch.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + easy.customrequest = "PATCH" + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/post.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/post.rb new file mode 100644 index 0000000..f773f79 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/post.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + # This class knows everything about making POST requests. + class Post + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Postable + + # Setup easy to make a POST request. + # + # @example Setup. + # post.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + if form.empty? + easy.postfieldsize = 0 + easy.copypostfields = "" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/postable.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/postable.rb new file mode 100644 index 0000000..bd42f63 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/postable.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This module contains logic for setting up a [multipart] POST body. + module Postable + + # Set things up when form is provided. + # Deals with multipart forms. + # + # @example Setup. + # post.set_form(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_form(easy) + easy.url ||= url + form.params_encoding = params_encoding + if form.multipart? + form.escape = false + form.materialize + easy.httppost = form.first.read_pointer + else + form.escape = easy.escape? + easy.postfieldsize = form.to_s.bytesize + easy.copypostfields = form.to_s + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/put.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/put.rb new file mode 100644 index 0000000..202cbee --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/put.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This class knows everything about making PUT requests. + class Put + include Ethon::Easy::Http::Actionable + include Ethon::Easy::Http::Putable + + # Setup easy to make a PUT request. + # + # @example Setup. + # put.setup(easy) + # + # @param [ Easy ] easy The easy to setup. + def setup(easy) + super + if form.empty? + easy.upload = true + easy.infilesize = 0 + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/putable.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/putable.rb new file mode 100644 index 0000000..a639f06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/http/putable.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true +module Ethon + class Easy + module Http + + # This module contains logic about setting up a PUT body. + module Putable + # Set things up when form is provided. + # Deals with multipart forms. + # + # @example Setup. + # put.set_form(easy) + # + # @param [ Easy ] easy The easy to setup. + def set_form(easy) + easy.upload = true + form.escape = true + form.params_encoding = params_encoding + easy.infilesize = form.to_s.bytesize + easy.set_read_callback(form.to_s) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/informations.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/informations.rb new file mode 100644 index 0000000..cac62a3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/informations.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains the methods to return informations + # from the easy handle. See http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html + # for more information. + module Informations + + # Holds available informations and their type, which is needed to + # request the informations from libcurl. + AVAILABLE_INFORMATIONS = { + # Return the available HTTP auth methods. + :httpauth_avail => :long, + + # Return the total time in seconds for the previous + # transfer, including name resolution, TCP connection, etc. + :total_time => :double, + + # Return the time, in seconds, it took from the start + # until the first byte was received by libcurl. This + # includes pre-transfer time and also the time the + # server needs to calculate the result. + :starttransfer_time => :double, + + # Return the time, in seconds, it took from the start + # until the SSL/SSH connect/handshake to the remote + # host was completed. This time is most often very near + # to the pre-transfer time, except for cases such as HTTP + # pipelining where the pre-transfer time can be delayed + # due to waits in line for the pipeline and more. + :appconnect_time => :double, + + # Return the time, in seconds, it took from the start + # until the file transfer was just about to begin. This + # includes all pre-transfer commands and negotiations + # that are specific to the particular protocol(s) involved. + # It does not involve the sending of the protocol- + # specific request that triggers a transfer. + :pretransfer_time => :double, + + # Return the time, in seconds, it took from the start + # until the connect to the remote host (or proxy) was completed. + :connect_time => :double, + + # Return the time, in seconds, it took from the + # start until the name resolution was completed. + :namelookup_time => :double, + + # Return the time, in seconds, it took for all redirection steps + # include name lookup, connect, pretransfer and transfer before the + # final transaction was started. time_redirect shows the complete + # execution time for multiple redirections. (Added in 7.12.3) + :redirect_time => :double, + + # Return the last used effective url. + :effective_url => :string, + + # Return the string holding the IP address of the most recent + # connection done with this curl handle. This string + # may be IPv6 if that's enabled. + :primary_ip => :string, + + # Return the last received HTTP, FTP or SMTP response code. + # The value will be zero if no server response code has + # been received. Note that a proxy's CONNECT response should + # be read with http_connect_code and not this. + :response_code => :long, + + :request_size => :long, + + # Return the total number of redirections that were + # actually followed. + :redirect_count => :long, + + # URL a redirect would take you to, had you enabled redirects (Added in 7.18.2) + :redirect_url => :string, + + # Return the bytes, the total amount of bytes that were uploaded + :size_upload => :double, + + # Return the bytes, the total amount of bytes that were downloaded. + # The amount is only for the latest transfer and will be reset again + # for each new transfer. This counts actual payload data, what's + # also commonly called body. All meta and header data are excluded + # and will not be counted in this number. + :size_download => :double, + + # Return the bytes/second, the average upload speed that curl + # measured for the complete upload + :speed_upload => :double, + + # Return the bytes/second, the average download speed that curl + # measured for the complete download + :speed_download => :double + } + + AVAILABLE_INFORMATIONS.each do |name, type| + eval %Q|def #{name}; Curl.send(:get_info_#{type}, :#{name}, handle); end| + end + + # Returns true if this curl version supports zlib. + # + # @example Return wether zlib is supported. + # easy.supports_zlib? + # + # @return [ Boolean ] True if supported, else false. + # @deprecated Please use the static version instead + def supports_zlib? + Kernel.warn("Ethon: Easy#supports_zlib? is deprecated and will be removed, please use Easy#.") + Easy.supports_zlib? + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/mirror.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/mirror.rb new file mode 100644 index 0000000..f485c9f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/mirror.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true +module Ethon + class Easy + class Mirror + attr_reader :options + alias_method :to_hash, :options + + INFORMATIONS_TO_MIRROR = Informations::AVAILABLE_INFORMATIONS.keys + + [:return_code, :response_headers, :response_body, :debug_info] + + INFORMATIONS_TO_LOG = [:effective_url, :response_code, :return_code, :total_time] + + def self.from_easy(easy) + options = {} + INFORMATIONS_TO_MIRROR.each do |info| + options[info] = easy.send(info) + end + new(options) + end + + def initialize(options = {}) + @options = options + end + + def log_informations + Hash[*INFORMATIONS_TO_LOG.map do |info| + [info, options[info]] + end.flatten] + end + + INFORMATIONS_TO_MIRROR.each do |info| + eval %Q|def #{info}; options[#{info}]; end| + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/operations.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/operations.rb new file mode 100644 index 0000000..ad5397d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/operations.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true +module Ethon + class Easy + # This module contains the logic to prepare and perform + # an easy. + module Operations + # Returns a pointer to the curl easy handle. + # + # @example Return the handle. + # easy.handle + # + # @return [ FFI::Pointer ] A pointer to the curl easy handle. + def handle + @handle ||= FFI::AutoPointer.new(Curl.easy_init, Curl.method(:easy_cleanup)) + end + + # Sets a pointer to the curl easy handle. + # @param [ ::FFI::Pointer ] Easy handle that will be assigned. + def handle=(h) + @handle = h + end + + # Perform the easy request. + # + # @example Perform the request. + # easy.perform + # + # @return [ Integer ] The return code. + def perform + @return_code = Curl.easy_perform(handle) + if Ethon.logger.debug? + Ethon.logger.debug { "ETHON: performed #{log_inspect}" } + end + complete + @return_code + end + + # Clean up the easy. + # + # @example Perform clean up. + # easy.cleanup + # + # @return the result of the free which is nil + def cleanup + handle.free + end + + # Prepare the easy. Options, headers and callbacks + # were set. + # + # @example Prepare easy. + # easy.prepare + # + # @deprecated It is no longer necessary to call prepare. + def prepare + Ethon.logger.warn( + "ETHON: It is no longer necessary to call "+ + "Easy#prepare. It's going to be removed "+ + "in future versions." + ) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/options.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/options.rb new file mode 100644 index 0000000..30c4df6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/options.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains the logic and knowledge about the + # available options on easy. + module Options + attr_reader :url + + def url=(value) + @url = value + Curl.set_option(:url, value, handle) + end + + def escape=( b ) + @escape = b + end + + def escape? + return true if !defined?(@escape) || @escape.nil? + @escape + end + + def multipart=(b) + @multipart = b + end + + def multipart? + !!@multipart + end + + Curl.easy_options(nil).each do |opt, props| + method_name = "#{opt}=".freeze + unless method_defined? method_name + define_method(method_name) do |value| + Curl.set_option(opt, value, handle) + value + end + end + next if props[:type] != :callback || method_defined?(opt) + define_method(opt) do |&block| + @procs ||= {} + @procs[opt.to_sym] = block + Curl.set_option(opt, block, handle) + nil + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/params.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/params.rb new file mode 100644 index 0000000..41dad8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/params.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true +require 'ethon/easy/util' +require 'ethon/easy/queryable' + +module Ethon + class Easy + + # This class represents HTTP request parameters. + # + # @api private + class Params + include Ethon::Easy::Util + include Ethon::Easy::Queryable + + # Create a new Params. + # + # @example Create a new Params. + # Params.new({}) + # + # @param [ Hash ] params The params to use. + # + # @return [ Params ] A new Params. + def initialize(easy, params) + @easy = easy + @params = params || {} + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/queryable.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/queryable.rb new file mode 100644 index 0000000..47f7901 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/queryable.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains logic about building + # query parameters for url or form. + module Queryable + + # :nodoc: + def self.included(base) + base.send(:attr_accessor, :escape) + base.send(:attr_accessor, :params_encoding) + end + + # Return wether there are elements in params or not. + # + # @example Return if params is empty. + # form.empty? + # + # @return [ Boolean ] True if params is empty, else false. + def empty? + @params.empty? + end + + # Return the string representation of params. + # + # @example Return string representation. + # params.to_s + # + # @return [ String ] The string representation. + def to_s + @to_s ||= query_pairs.map{ |pair| + return pair if pair.is_a?(String) + + if escape && @easy + pair.map{ |e| @easy.escape(e.to_s) }.join("=") + else + pair.join("=") + end + }.join('&') + end + + # Return the query pairs. + # + # @example Return the query pairs. + # params.query_pairs + # + # @return [ Array ] The query pairs. + def query_pairs + @query_pairs ||= build_query_pairs(@params) + end + + # Return query pairs build from a hash. + # + # @example Build query pairs. + # action.build_query_pairs({a: 1, b: 2}) + # #=> [[:a, 1], [:b, 2]] + # + # @param [ Hash ] hash The hash to go through. + # + # @return [ Array ] The array of query pairs. + def build_query_pairs(hash) + return [hash] if hash.is_a?(String) + + pairs = [] + recursively_generate_pairs(hash, nil, pairs) + pairs + end + + # Return file info for a file. + # + # @example Return file info. + # action.file_info(File.open('fubar', 'r')) + # + # @param [ File ] file The file to handle. + # + # @return [ Array ] Array of informations. + def file_info(file) + filename = File.basename(file.path) + [ + filename, + mime_type(filename), + File.expand_path(file.path) + ] + end + + private + + def mime_type(filename) + if defined?(MIME) && t = MIME::Types.type_for(filename).first + t.to_s + else + 'application/octet-stream' + end + end + + def recursively_generate_pairs(h, prefix, pairs) + case h + when Hash + encode_hash_pairs(h, prefix, pairs) + when Array + if params_encoding == :rack + encode_rack_array_pairs(h, prefix, pairs) + elsif params_encoding == :multi + encode_multi_array_pairs(h, prefix, pairs) + elsif params_encoding == :none + pairs << [prefix, h] + else + encode_indexed_array_pairs(h, prefix, pairs) + end + end + end + + def encode_hash_pairs(h, prefix, pairs) + h.each_pair do |k,v| + key = prefix.nil? ? k : "#{prefix}[#{k}]" + pairs_for(v, key, pairs) + end + end + + def encode_indexed_array_pairs(h, prefix, pairs) + h.each_with_index do |v, i| + key = "#{prefix}[#{i}]" + pairs_for(v, key, pairs) + end + end + + def encode_rack_array_pairs(h, prefix, pairs) + h.each do |v| + key = "#{prefix}[]" + pairs_for(v, key, pairs) + end + end + + def encode_multi_array_pairs(h, prefix, pairs) + h.each_with_index do |v, i| + key = prefix + pairs_for(v, key, pairs) + end + end + + def pairs_for(v, key, pairs) + case v + when Hash, Array + recursively_generate_pairs(v, key, pairs) + when File, Tempfile + pairs << [key, file_info(v)] + else + pairs << [key, v] + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/response_callbacks.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/response_callbacks.rb new file mode 100644 index 0000000..c5dd1a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/response_callbacks.rb @@ -0,0 +1,136 @@ +# frozen_string_literal: true +module Ethon + class Easy + + # This module contains the logic for the response callbacks. + # The on_complete callback is the only one at the moment. + # + # You can set multiple callbacks, which are then executed + # in the same order. + # + # easy.on_complete { p 1 } + # easy.on_complete { p 2 } + # easy.complete + # #=> 1 + # #=> 2 + # + # You can clear the callbacks: + # + # easy.on_complete { p 1 } + # easy.on_complete { p 2 } + # easy.on_complete.clear + # easy.on_complete + # #=> [] + module ResponseCallbacks + + # Set on_headers callback. + # + # @example Set on_headers. + # request.on_headers { p "yay" } + # + # @param [ Block ] block The block to execute. + def on_headers(&block) + @on_headers ||= [] + @on_headers << block if block_given? + @on_headers + end + + # Execute on_headers callbacks. + # + # @example Execute on_headers. + # request.headers + def headers + return if @headers_called + @headers_called = true + if defined?(@on_headers) and not @on_headers.nil? + result = nil + @on_headers.each do |callback| + result = callback.call(self) + break if result == :abort + end + result + end + end + + # Set on_complete callback. + # + # @example Set on_complete. + # request.on_complete { p "yay" } + # + # @param [ Block ] block The block to execute. + def on_complete(&block) + @on_complete ||= [] + @on_complete << block if block_given? + @on_complete + end + + # Execute on_complete callbacks. + # + # @example Execute on_completes. + # request.complete + def complete + headers unless @response_headers.empty? + if defined?(@on_complete) and not @on_complete.nil? + @on_complete.each{ |callback| callback.call(self) } + end + end + + # Set on_progress callback. + # + # @example Set on_progress. + # request.on_progress {|dltotal, dlnow, ultotal, ulnow| p "#{dltotal} #{dlnow} #{ultotal} #{ulnow}" } + # + # @param [ Block ] block The block to execute. + def on_progress(&block) + @on_progress ||= [] + if block_given? + @on_progress << block + set_progress_callback + self.noprogress = 0 + end + @on_progress + end + + # Execute on_progress callbacks. + # + # @example Execute on_progress. + # request.body(1, 1, 1, 1) + def progress(dltotal, dlnow, ultotal, ulnow) + if defined?(@on_progress) and not @on_progress.nil? + @on_progress.each{ |callback| callback.call(dltotal, dlnow, ultotal, ulnow) } + end + end + + # Set on_body callback. + # + # @example Set on_body. + # request.on_body { |chunk| p "yay" } + # + # @param [ Block ] block The block to execute. + def on_body(&block) + @on_body ||= [] + @on_body << block if block_given? + @on_body + end + + # Execute on_body callbacks. + # + # @example Execute on_body. + # request.body("This data came from HTTP.") + # + # @return [ Object ] If there are no on_body callbacks, returns the symbol :unyielded. + def body(chunk) + if defined?(@on_body) and not @on_body.nil? + result = nil + @on_body.each do |callback| + result = callback.call(chunk, self) + break if result == :abort + end + result + else + :unyielded + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/util.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/util.rb new file mode 100644 index 0000000..7649782 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/easy/util.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true +module Ethon + class Easy # :nodoc: + + # This module contains small helpers. + # + # @api private + module Util + + # Escapes zero bytes in strings. + # + # @example Escape zero bytes. + # Util.escape_zero_byte("1\0") + # #=> "1\\0" + # + # @param [ Object ] value The value to escape. + # + # @return [ String, Object ] Escaped String if + # zero byte found, original object if not. + def escape_zero_byte(value) + return value unless value.to_s.include?(0.chr) + value.to_s.gsub(0.chr, '\\\0') + end + + extend self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors.rb new file mode 100644 index 0000000..17eecad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true +require 'ethon/errors/ethon_error' +require 'ethon/errors/global_init' +require 'ethon/errors/multi_timeout' +require 'ethon/errors/multi_fdset' +require 'ethon/errors/multi_add' +require 'ethon/errors/multi_remove' +require 'ethon/errors/select' +require 'ethon/errors/invalid_option' +require 'ethon/errors/invalid_value' + +module Ethon + + # This namespace contains all errors raised by ethon. + module Errors + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/ethon_error.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/ethon_error.rb new file mode 100644 index 0000000..84dbf6c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/ethon_error.rb @@ -0,0 +1,9 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Default Ethon error class for all custom errors. + class EthonError < StandardError + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/global_init.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/global_init.rb new file mode 100644 index 0000000..816b774 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/global_init.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when global_init failed. + class GlobalInit < EthonError + def initialize + super("An error occured initializing curl.") + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/invalid_option.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/invalid_option.rb new file mode 100644 index 0000000..4e558f5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/invalid_option.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when option is invalid. + class InvalidOption < EthonError + def initialize(option) + super("The option: #{option} is invalid.") + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/invalid_value.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/invalid_value.rb new file mode 100644 index 0000000..aedb109 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/invalid_value.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when option is invalid. + class InvalidValue < EthonError + def initialize(option, value) + super("The value: #{value} is invalid for option: #{option}.") + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_add.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_add.rb new file mode 100644 index 0000000..a569a31 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_add.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when multi_add_handle failed. + class MultiAdd < EthonError + def initialize(code, easy) + super("An error occured adding the easy handle: #{easy} to the multi: #{code}") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_fdset.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_fdset.rb new file mode 100644 index 0000000..e84be72 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_fdset.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when multi_fdset failed. + class MultiFdset < EthonError + def initialize(code) + super("An error occured getting the fdset: #{code}") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_remove.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_remove.rb new file mode 100644 index 0000000..36074b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_remove.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raises when multi_remove_handle failed. + class MultiRemove < EthonError + def initialize(code, easy) + super("An error occured removing the easy handle: #{easy} from the multi: #{code}") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_timeout.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_timeout.rb new file mode 100644 index 0000000..79be3a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/multi_timeout.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raised when multi_timeout failed. + class MultiTimeout < EthonError + def initialize(code) + super("An error occured getting the timeout: #{code}") + # "An error occured getting the timeout: #{code}: #{Curl.multi_strerror(code)}" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/select.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/select.rb new file mode 100644 index 0000000..464a43b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/errors/select.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true +module Ethon + module Errors + + # Raised when select failed. + class Select < EthonError + def initialize(errno) + super("An error occured on select: #{errno}") + end + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/libc.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/libc.rb new file mode 100644 index 0000000..1fddb8a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/libc.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true +module Ethon + + # FFI Wrapper module for Libc. + # + # @api private + module Libc + extend FFI::Library + ffi_lib 'c' + + # :nodoc: + def self.windows? + Gem.win_platform? + end + + unless windows? + attach_function :getdtablesize, [], :int + attach_function :free, [:pointer], :void + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/loggable.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/loggable.rb new file mode 100644 index 0000000..02b7478 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/loggable.rb @@ -0,0 +1,59 @@ +# encoding: utf-8 +# frozen_string_literal: true +module Ethon + + # Contains logging behaviour. + module Loggable + + # Get the logger. + # + # @note Will try to grab Rails' logger first before creating a new logger + # with stdout. + # + # @example Get the logger. + # Loggable.logger + # + # @return [ Logger ] The logger. + def logger + return @logger if defined?(@logger) + @logger = rails_logger || default_logger + end + + # Set the logger. + # + # @example Set the logger. + # Loggable.logger = Logger.new($stdout) + # + # @param [ Logger ] logger The logger to set. + # + # @return [ Logger ] The new logger. + def logger=(logger) + @logger = logger + end + + private + + # Gets the default Ethon logger - stdout. + # + # @example Get the default logger. + # Loggable.default_logger + # + # @return [ Logger ] The default logger. + def default_logger + logger = Logger.new($stdout) + logger.level = Logger::INFO + logger + end + + # Get the Rails logger if it's defined. + # + # @example Get Rails' logger. + # Loggable.rails_logger + # + # @return [ Logger ] The Rails logger. + def rails_logger + defined?(::Rails) && ::Rails.respond_to?(:logger) && ::Rails.logger + end + end + extend Loggable +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi.rb new file mode 100644 index 0000000..0919583 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true +require 'ethon/easy/util' +require 'ethon/multi/stack' +require 'ethon/multi/operations' +require 'ethon/multi/options' + +module Ethon + + # This class represents libcurl multi. + class Multi + include Ethon::Multi::Stack + include Ethon::Multi::Operations + include Ethon::Multi::Options + + # Create a new multi. Initialize curl in case + # it didn't happen before. + # + # @example Create a new Multi. + # Multi.new + # + # @param [ Hash ] options The options. + # + # @option options :socketdata [String] + # Pass a pointer to whatever you want passed to the + # curl_socket_callback's forth argument, the userp pointer. This is not + # used by libcurl but only passed-thru as-is. Set the callback pointer + # with CURLMOPT_SOCKETFUNCTION. + # @option options :pipelining [Boolean] + # Pass a long set to 1 to enable or 0 to disable. Enabling pipelining + # on a multi handle will make it attempt to perform HTTP Pipelining as + # far as possible for transfers using this handle. This means that if + # you add a second request that can use an already existing connection, + # the second request will be "piped" on the same connection rather than + # being executed in parallel. (Added in 7.16.0) + # @option options :timerfunction [Proc] + # Pass a pointer to a function matching the curl_multi_timer_callback + # prototype. This function will then be called when the timeout value + # changes. The timeout value is at what latest time the application + # should call one of the "performing" functions of the multi interface + # (curl_multi_socket_action(3) and curl_multi_perform(3)) - to allow + # libcurl to keep timeouts and retries etc to work. A timeout value of + # -1 means that there is no timeout at all, and 0 means that the + # timeout is already reached. Libcurl attempts to limit calling this + # only when the fixed future timeout time actually changes. See also + # CURLMOPT_TIMERDATA. This callback can be used instead of, or in + # addition to, curl_multi_timeout(3). (Added in 7.16.0) + # @option options :timerdata [String] + # Pass a pointer to whatever you want passed to the + # curl_multi_timer_callback's third argument, the userp pointer. This + # is not used by libcurl but only passed-thru as-is. Set the callback + # pointer with CURLMOPT_TIMERFUNCTION. (Added in 7.16.0) + # @option options :maxconnects [Integer] + # Pass a long. The set number will be used as the maximum amount of + # simultaneously open connections that libcurl may cache. Default is + # 10, and libcurl will enlarge the size for each added easy handle to + # make it fit 4 times the number of added easy handles. + # By setting this option, you can prevent the cache size from growing + # beyond the limit set by you. + # When the cache is full, curl closes the oldest one in the cache to + # prevent the number of open connections from increasing. + # This option is for the multi handle's use only, when using the easy + # interface you should instead use the CURLOPT_MAXCONNECTS option. + # (Added in 7.16.3) + # @option options :max_total_connections [Integer] + # Pass a long. The set number will be used as the maximum amount of + # simultaneously open connections in total. For each new session, + # libcurl will open a new connection up to the limit set by + # CURLMOPT_MAX_TOTAL_CONNECTIONS. When the limit is reached, the + # sessions will be pending until there are available connections. + # If CURLMOPT_PIPELINING is 1, libcurl will try to pipeline if the host + # is capable of it. + # The default value is 0, which means that there is no limit. However, + # for backwards compatibility, setting it to 0 when CURLMOPT_PIPELINING + # is 1 will not be treated as unlimited. Instead it will open only 1 + # connection and try to pipeline on it. + # (Added in 7.30.0) + # @option options :execution_mode [Boolean] + # Either :perform (default) or :socket_action, specifies the usage + # method that will be used on this multi object. The default :perform + # mode provides a #perform function that uses curl_multi_perform + # behind the scenes to automatically continue execution until all + # requests have completed. The :socket_action mode provides an API + # that allows the {Multi} object to be integrated into an external + # IO loop, by calling #socket_action and responding to the + # socketfunction and timerfunction callbacks, using the underlying + # curl_multi_socket_action semantics. + # + # @return [ Multi ] The new multi. + def initialize(options = {}) + Curl.init + @execution_mode = options.delete(:execution_mode) || :perform + set_attributes(options) + init_vars + end + + # Set given options. + # + # @example Set options. + # multi.set_attributes(options) + # + # @raise InvalidOption + # + # @see initialize + # + # @api private + def set_attributes(options) + options.each_pair do |key, value| + unless respond_to?("#{key}=") + raise Errors::InvalidOption.new(key) + end + method("#{key}=").call(value) + end + end + + private + + # Internal function to gate functions to a specific execution mode + # + # @raise ArgumentError + # + # @api private + def ensure_execution_mode(expected_mode) + raise ArgumentError, "Expected the Multi to be in #{expected_mode} but it was in #{@execution_mode}" if expected_mode != @execution_mode + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/operations.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/operations.rb new file mode 100644 index 0000000..b5c2796 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/operations.rb @@ -0,0 +1,228 @@ +# frozen_string_literal: true +module Ethon + class Multi # :nodoc + # This module contains logic to run a multi. + module Operations + STARTED_MULTI = "ETHON: started MULTI" + PERFORMED_MULTI = "ETHON: performed MULTI" + + # Return the multi handle. Inititialize multi handle, + # in case it didn't happened already. + # + # @example Return multi handle. + # multi.handle + # + # @return [ FFI::Pointer ] The multi handle. + def handle + @handle ||= FFI::AutoPointer.new(Curl.multi_init, Curl.method(:multi_cleanup)) + end + + # Initialize variables. + # + # @example Initialize variables. + # multi.init_vars + # + # @return [ void ] + def init_vars + if @execution_mode == :perform + @timeout = ::FFI::MemoryPointer.new(:long) + @timeval = Curl::Timeval.new + @fd_read = Curl::FDSet.new + @fd_write = Curl::FDSet.new + @fd_excep = Curl::FDSet.new + @max_fd = ::FFI::MemoryPointer.new(:int) + elsif @execution_mode == :socket_action + @running_count_pointer = FFI::MemoryPointer.new(:int) + end + end + + # Perform multi. + # + # @return [ nil ] + # + # @example Perform multi. + # multi.perform + def perform + ensure_execution_mode(:perform) + + Ethon.logger.debug(STARTED_MULTI) + while ongoing? + run + timeout = get_timeout + next if timeout == 0 + reset_fds + set_fds(timeout) + end + Ethon.logger.debug(PERFORMED_MULTI) + nil + end + + # Prepare multi. + # + # @return [ nil ] + # + # @example Prepare multi. + # multi.prepare + # + # @deprecated It is no longer necessary to call prepare. + def prepare + Ethon.logger.warn( + "ETHON: It is no longer necessay to call "+ + "Multi#prepare. Its going to be removed "+ + "in future versions." + ) + end + + # Continue execution with an external IO loop. + # + # @example When no sockets are ready yet, or to begin. + # multi.socket_action + # + # @example When a socket is readable + # multi.socket_action(io_object, [:in]) + # + # @example When a socket is readable and writable + # multi.socket_action(io_object, [:in, :out]) + # + # @return [ Symbol ] The Curl.multi_socket_action return code. + def socket_action(io = nil, readiness = 0) + ensure_execution_mode(:socket_action) + + fd = if io.nil? + ::Ethon::Curl::SOCKET_TIMEOUT + elsif io.is_a?(Integer) + io + else + io.fileno + end + + code = Curl.multi_socket_action(handle, fd, readiness, @running_count_pointer) + @running_count = @running_count_pointer.read_int + + check + + code + end + + # Return whether the multi still contains requests or not. + # + # @example Return if ongoing. + # multi.ongoing? + # + # @return [ Boolean ] True if ongoing, else false. + def ongoing? + easy_handles.size > 0 || (!defined?(@running_count) || running_count > 0) + end + + private + + # Get timeout. + # + # @example Get timeout. + # multi.get_timeout + # + # @return [ Integer ] The timeout. + # + # @raise [ Ethon::Errors::MultiTimeout ] If getting the timeout fails. + def get_timeout + code = Curl.multi_timeout(handle, @timeout) + raise Errors::MultiTimeout.new(code) unless code == :ok + timeout = @timeout.read_long + timeout = 1 if timeout < 0 + timeout + end + + # Reset file describtors. + # + # @example Reset fds. + # multi.reset_fds + # + # @return [ void ] + def reset_fds + @fd_read.clear + @fd_write.clear + @fd_excep.clear + end + + # Set fds. + # + # @example Set fds. + # multi.set_fds + # + # @return [ void ] + # + # @raise [ Ethon::Errors::MultiFdset ] If setting the file descriptors fails. + # @raise [ Ethon::Errors::Select ] If select fails. + def set_fds(timeout) + code = Curl.multi_fdset(handle, @fd_read, @fd_write, @fd_excep, @max_fd) + raise Errors::MultiFdset.new(code) unless code == :ok + max_fd = @max_fd.read_int + if max_fd == -1 + sleep(0.001) + else + @timeval[:sec] = timeout / 1000 + @timeval[:usec] = (timeout * 1000) % 1000000 + loop do + code = Curl.select(max_fd + 1, @fd_read, @fd_write, @fd_excep, @timeval) + break unless code < 0 && ::FFI.errno == Errno::EINTR::Errno + end + raise Errors::Select.new(::FFI.errno) if code < 0 + end + end + + # Check. + # + # @example Check. + # multi.check + # + # @return [ void ] + def check + msgs_left = ::FFI::MemoryPointer.new(:int) + while true + msg = Curl.multi_info_read(handle, msgs_left) + break if msg.null? + next if msg[:code] != :done + easy = easy_handles.find{ |e| e.handle == msg[:easy_handle] } + easy.return_code = msg[:data][:code] + Ethon.logger.debug { "ETHON: performed #{easy.log_inspect}" } + delete(easy) + easy.complete + end + end + + # Run. + # + # @example Run + # multi.run + # + # @return [ void ] + def run + running_count_pointer = FFI::MemoryPointer.new(:int) + begin code = trigger(running_count_pointer) end while code == :call_multi_perform + check + end + + # Trigger. + # + # @example Trigger. + # multi.trigger + # + # @return [ Symbol ] The Curl.multi_perform return code. + def trigger(running_count_pointer) + code = Curl.multi_perform(handle, running_count_pointer) + @running_count = running_count_pointer.read_int + code + end + + # Return number of running requests. + # + # @example Return count. + # multi.running_count + # + # @return [ Integer ] Number running requests. + def running_count + @running_count ||= nil + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/options.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/options.rb new file mode 100644 index 0000000..08a3c03 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/options.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true +module Ethon + class Multi + + # This module contains the logic and knowledge about the + # available options on multi. + module Options + + # Sets max_total_connections option. + # + # @example Set max_total_connections option. + # multi.max_total_conections = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def max_total_connections=(value) + Curl.set_option(:max_total_connections, value_for(value, :int), handle, :multi) + end + + # Sets maxconnects option. + # + # @example Set maxconnects option. + # multi.maxconnects = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def maxconnects=(value) + Curl.set_option(:maxconnects, value_for(value, :int), handle, :multi) + end + + # Sets pipelining option. + # + # @example Set pipelining option. + # multi.pipelining = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def pipelining=(value) + Curl.set_option(:pipelining, value_for(value, :int), handle, :multi) + end + + # Sets socketdata option. + # + # @example Set socketdata option. + # multi.socketdata = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def socketdata=(value) + Curl.set_option(:socketdata, value_for(value, :string), handle, :multi) + end + + # Sets socketfunction option. + # + # @example Set socketfunction option. + # multi.socketfunction = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def socketfunction=(value) + Curl.set_option(:socketfunction, value_for(value, :string), handle, :multi) + end + + # Sets timerdata option. + # + # @example Set timerdata option. + # multi.timerdata = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def timerdata=(value) + Curl.set_option(:timerdata, value_for(value, :string), handle, :multi) + end + + # Sets timerfunction option. + # + # @example Set timerfunction option. + # multi.timerfunction = $value + # + # @param [ String ] value The value to set. + # + # @return [ void ] + def timerfunction=(value) + Curl.set_option(:timerfunction, value_for(value, :string), handle, :multi) + end + + private + + # Return the value to set to multi handle. It is converted with the help + # of bool_options, enum_options and int_options. + # + # @example Return casted the value. + # multi.value_for(:verbose) + # + # @return [ Object ] The casted value. + def value_for(value, type, option = nil) + return nil if value.nil? + + if type == :bool + value ? 1 : 0 + elsif type == :int + value.to_i + elsif value.is_a?(String) + Ethon::Easy::Util.escape_zero_byte(value) + else + value + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/stack.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/stack.rb new file mode 100644 index 0000000..4c6381c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/multi/stack.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true +module Ethon + class Multi + + # This module provides the multi stack behaviour. + module Stack + + # Return easy handles. + # + # @example Return easy handles. + # multi.easy_handles + # + # @return [ Array ] The easy handles. + def easy_handles + @easy_handles ||= [] + end + + # Add an easy to the stack. + # + # @example Add easy. + # multi.add(easy) + # + # @param [ Easy ] easy The easy to add. + # + # @raise [ Ethon::Errors::MultiAdd ] If adding an easy failed. + def add(easy) + return nil if easy_handles.include?(easy) + + code = Curl.multi_add_handle(handle, easy.handle) + raise Errors::MultiAdd.new(code, easy) unless code == :ok + easy_handles << easy + end + + # Delete an easy from stack. + # + # @example Delete easy from stack. + # + # @param [ Easy ] easy The easy to delete. + # + # @raise [ Ethon::Errors::MultiRemove ] If removing an easy failed. + def delete(easy) + if easy_handles.delete(easy) + code = Curl.multi_remove_handle(handle, easy.handle) + raise Errors::MultiRemove.new(code, handle) unless code == :ok + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/version.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/version.rb new file mode 100644 index 0000000..640f68d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/lib/ethon/version.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true +module Ethon + + # Ethon version. + VERSION = '0.16.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/benchmarks.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/benchmarks.rb new file mode 100644 index 0000000..6cd68ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/benchmarks.rb @@ -0,0 +1,104 @@ +# encoding: utf-8 +# frozen_string_literal: true +require 'ethon' +require 'open-uri' +require 'patron' +require 'curb' +require 'net/http' +require 'cgi' +require 'benchmark/ips' + +require_relative '../spec/support/server' +require_relative '../spec/support/localhost_server' + +LocalhostServer.new(TESTSERVER.new, 3000) +LocalhostServer.new(TESTSERVER.new, 3001) +LocalhostServer.new(TESTSERVER.new, 3002) + +url = 'http://localhost:3000/'.freeze +uri = URI.parse('http://localhost:3000/').freeze +ethon = Ethon::Easy.new(url: url) +patron = Patron::Session.new +patron_url = Patron::Session.new(base_url: url) +curb = Curl::Easy.new(url) + +puts '[Creation]' +Benchmark.ips do |x| + x.report('String.new') { '' } + x.report('Easy.new') { Ethon::Easy.new } +end + +puts '[Escape]' +Benchmark.ips do |x| + x.report('CGI.escape') { CGI.escape("ぞつもと") } + x.report('Easy.escape') { ethon.escape("ぞつもと") } +end + +puts '[Requests]' +Benchmark.ips do |x| + x.report('net/http') { Net::HTTP.get_response(uri) } + x.report('open-uri') { open url } + + x.report('patron') do + patron.base_url = url + patron.get('/') + end + + x.report('patron reuse') { patron_url.get('/') } + + x.report('curb') do + curb.url = url + curb.perform + end + + x.report('curb reuse') { curb.perform } + + x.report('Easy.perform') do + ethon.url = url + ethon.perform + end + + x.report('Easy.perform reuse') { ethon.perform } +end + +puts "[ 4 delayed Requests ]" +Benchmark.ips do |x| + x.report('net/http') do + 3.times do |i| + uri = URI.parse("http://localhost:300#{i}/?delay=1") + Net::HTTP.get_response(uri) + end + end + + x.report("open-uri") do + 3.times do |i| + open("http://localhost:300#{i}/?delay=1") + end + end + + x.report("patron") do + sess = Patron::Session.new + 3.times do |i| + sess.base_url = "http://localhost:300#{i}/?delay=1" + sess.get("/") + end + end + + x.report("Easy.perform") do + easy = Ethon::Easy.new + 3.times do |i| + easy.url = "http://localhost:300#{i}/?delay=1" + easy.perform + end + end + + x.report("Multi.perform") do + multi = Ethon::Multi.new + 3.times do |i| + easy = Ethon::Easy.new + easy.url = "http://localhost:300#{i}/?delay=1" + multi.add(easy) + end + multi.perform + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/memory_leaks.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/memory_leaks.rb new file mode 100644 index 0000000..32000b8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/memory_leaks.rb @@ -0,0 +1,114 @@ +# frozen_string_literal: true +require 'ethon' +require 'ethon/easy' + +require_relative 'perf_spec_helper' +require 'rspec/autorun' + +describe "low-level interactions with libcurl" do + describe Ethon::Multi do + memory_leak_test("init") do + Ethon::Multi.new + end + + memory_leak_test("handle") do + Ethon::Multi.new.handle + end + end + + describe Ethon::Easy do + memory_leak_test("init") do + Ethon::Easy.new + end + + memory_leak_test("handle") do + Ethon::Easy.new.handle + end + + memory_leak_test("headers") do + Ethon::Easy.new.headers = { "a" => 1, "b" => 2, "c" => 3, "d" => 4} + end + + memory_leak_test("escape") do + Ethon::Easy.new.escape("the_sky&is_blue") + end + end + + + describe Ethon::Easy::Form do + memory_leak_test("init") do + Ethon::Easy::Form.new(nil, {}) + end + + memory_leak_test("first") do + Ethon::Easy::Form.new(nil, {}).first + end + + memory_leak_test("last") do + Ethon::Easy::Form.new(nil, {}).last + end + + memory_leak_test("materialized with some params") do + form = Ethon::Easy::Form.new(nil, { "a" => "1" }) + form.materialize + end + + memory_leak_test("materialized with a file") do + File.open(__FILE__, "r") do |file| + form = Ethon::Easy::Form.new(nil, { "a" => file }) + form.materialize + end + end + end +end + +describe "higher level operations" do + memory_leak_test("a simple request") do + Ethon::Easy.new(:url => "http://localhost:3001/", + :forbid_reuse => true).perform + end + + memory_leak_test("a request with headers") do + Ethon::Easy.new(:url => "http://localhost:3001/", + :headers => { "Content-Type" => "application/json", + "Something" => "1", + "Else" => "qwerty", + "Long-String" => "aassddffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz"}, + :forbid_reuse => true).perform + end + + memory_leak_test("a request with headers and params") do + easy = Ethon::Easy.new(:url => "http://localhost:3001/", + :headers => { "Content-Type" => "application/json", + "Something" => "1", + "Else" => "qwerty", + "Long-String" => "aassddffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz"}, + :forbid_reuse => true) + easy.http_request("http://localhost:3001/", + :get, + :params => { "param1" => "value1", + "param2" => "value2", + "param3" => "value3", + "param4" => "value4"}) + end + + memory_leak_test("a request with headers, params, and body") do + easy = Ethon::Easy.new(:url => "http://localhost:3001/", + :headers => { "Content-Type" => "application/json", + "Something" => "1", + "Else" => "qwerty", + "Long-String" => "aassddffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz"}, + :forbid_reuse => true) + easy.http_request("http://localhost:3001/", + :get, + :params => { "param1" => "value1", + "param2" => "value2", + "param3" => "value3", + "param4" => "value4"}, + :body => { + "body1" => "value1", + "body2" => "value2", + "body3" => "value3" + }) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/perf_spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/perf_spec_helper.rb new file mode 100644 index 0000000..35fd623 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/perf_spec_helper.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true +#### SETUP +require 'bundler' +Bundler.setup +require 'rspec' + +require 'support/localhost_server' +require 'support/server' +require_relative 'support/memory_test_helpers' + +require 'logger' + +if ENV['VERBOSE'] + Ethon.logger = Logger.new($stdout) + Ethon.logger.level = Logger::DEBUG +end + +RSpec.configure do |config| + config.before(:suite) do + LocalhostServer.new(TESTSERVER.new, 3001) + end + config.include(MemoryTestHelpers) + config.extend(MemoryTestHelpers::TestMethods) +end + +MemoryTestHelpers.setup +MemoryTestHelpers.logger = Logger.new($stdout) +MemoryTestHelpers.logger.level = Logger::INFO +MemoryTestHelpers.logger.formatter = proc do |severity, datetime, progname, msg| + "\t\t#{msg}\n" +end + +if ENV['VERBOSE'] + MemoryTestHelpers.logger.level = Logger::DEBUG +end + +MemoryTestHelpers.iterations = ENV.fetch("ITERATIONS", 10_000).to_i diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/memory_test_helpers.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/memory_test_helpers.rb new file mode 100644 index 0000000..bfb6ff4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/memory_test_helpers.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true +require_relative 'ruby_object_leak_tracker' +require_relative 'os_memory_leak_tracker' + +module MemoryTestHelpers + class << self + attr_accessor :gc_proc, :iterations, :logger + + def setup + if RUBY_PLATFORM == "java" + # for leak detection + JRuby.objectspace = true if defined?(JRuby) + # for gc + require 'java' + java_import 'java.lang.System' + self.gc_proc = proc { System.gc } + else + self.gc_proc = proc { GC.start } + end + end + end + + module TestMethods + def memory_leak_test(description, &block) + context(description) do + it "doesn't leak ruby objects" do + object_leak_tracker = RubyObjectLeakTracker.new + track_memory_usage(object_leak_tracker, &block) + object_leak_tracker.total_difference_between_runs.should be <= 10 + end + + it "doesn't leak OS memory (C interop check)" do + os_memory_leak_tracker = OSMemoryLeakTracker.new + track_memory_usage(os_memory_leak_tracker, &block) + os_memory_leak_tracker.total_difference_between_runs.should be <= 10 + end + end + end + end + + def track_memory_usage(tracker) + # Intentionally do all this setup before we do any testing + logger = MemoryTestHelpers.logger + iterations = MemoryTestHelpers.iterations + + checkpoint_frequency = (iterations / 10.0).to_i + gc_frequency = 20 + + warmup_iterations = [(iterations / 3.0).to_i, 500].min + logger.info "Performing #{warmup_iterations} warmup iterations" + warmup_iterations.times do + yield + MemoryTestHelpers.gc_proc.call + end + tracker.capture_initial_memory_usage + + logger.info "Performing #{iterations} iterations (checkpoint every #{checkpoint_frequency})" + + iterations.times do |i| + yield + + last_iteration = (i == iterations - 1) + checkpoint = last_iteration || (i % checkpoint_frequency == 0) + + if checkpoint || (i % gc_frequency == 0) + MemoryTestHelpers.gc_proc.call + end + + if checkpoint + logger.info "Iteration #{i} checkpoint" + tracker.capture_memory_usage + tracker.dump_status(logger) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/os_memory_leak_tracker.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/os_memory_leak_tracker.rb new file mode 100644 index 0000000..d9e62d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/os_memory_leak_tracker.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true +class OSMemoryLeakTracker + attr_reader :current_run + + def initialize + @previous_run = @current_run = 0 + end + + def difference_between_runs(basis=@previous_run) + @current_run - basis + end + + def total_difference_between_runs + difference_between_runs(@initial_count_run) + end + + def capture_initial_memory_usage + capture_memory_usage + @initial_count_run = @current_run + end + + def capture_memory_usage + @previous_run = @current_run + @current_run = rss_bytes + end + + def dump_status(logger) + delta = difference_between_runs + logger.add(log_level(delta), sprintf("\tTotal memory usage (kb): %d (%+d)", current_run, delta)) + end + + private + # amount of memory the current process "is using", in RAM + # (doesn't include any swap memory that it may be using, just that in actual RAM) + # Code loosely based on https://github.com/rdp/os/blob/master/lib/os.rb + # returns 0 on windows + def rss_bytes + if ENV['OS'] == 'Windows_NT' + 0 + else + `ps -o rss= -p #{Process.pid}`.to_i # in kilobytes + end + end + + def log_level(delta) + delta > 0 ? Logger::WARN : Logger::DEBUG + end +end \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/ruby_object_leak_tracker.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/ruby_object_leak_tracker.rb new file mode 100644 index 0000000..afcce5a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/profile/support/ruby_object_leak_tracker.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true +class RubyObjectLeakTracker + attr_reader :previous_count_hash, :current_count_hash + + def initialize + @previous_count_hash = @current_count_hash = {} + end + + def difference_between_runs(basis=@previous_count_hash) + @difference_between_runs ||= Hash[@current_count_hash.map do |object_class, count| + [object_class, count - (basis[object_class] || 0)] + end] + end + + def total_difference_between_runs + difference_between_runs(@initial_count_hash).values.inject(0) { |sum, count| sum + count } + end + + def capture_initial_memory_usage + capture_memory_usage + @initial_count_hash = @current_count_hash + end + + def capture_memory_usage + @difference_between_runs = nil + @previous_count_hash = @current_count_hash + + class_to_count = Hash.new { |hash, key| hash[key] = 0 } + ObjectSpace.each_object { |obj| class_to_count[obj.class] += 1 } + + sorted_class_to_count = class_to_count.sort_by { |k, v| -v } + @current_count_hash = Hash[sorted_class_to_count] + end + + def dump_status(logger) + diff = difference_between_runs + most_used_objects = current_count_hash.to_a.sort_by(&:last).reverse[0, 20] + + most_used_objects.each do |object_class, count| + delta = diff[object_class] + logger.add(log_level(delta), sprintf("\t%s: %d (%+d)", object_class, count, delta)) + end + end + + private + def log_level(delta) + delta > 0 ? Logger::WARN : Logger::DEBUG + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/curl_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/curl_spec.rb new file mode 100644 index 0000000..24d3726 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/curl_spec.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Curl do + describe ".init" do + before { Ethon::Curl.send(:class_variable_set, :@@initialized, false) } + + context "when global_init fails" do + it "raises global init error" do + expect(Ethon::Curl).to receive(:global_init).and_return(1) + expect{ Ethon::Curl.init }.to raise_error(Ethon::Errors::GlobalInit) + end + end + + context "when global_init works" do + before { expect(Ethon::Curl).to receive(:global_init).and_return(0) } + + it "doesn't raises global init error" do + expect{ Ethon::Curl.init }.to_not raise_error + end + + it "logs" do + expect(Ethon.logger).to receive(:debug) + Ethon::Curl.init + end + end + + context "when global_cleanup is called" do + before { expect(Ethon::Curl).to receive(:global_cleanup) } + + it "logs" do + expect(Ethon.logger).to receive(:debug).twice + Ethon::Curl.init + Ethon::Curl.cleanup + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/callbacks_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/callbacks_spec.rb new file mode 100644 index 0000000..2fa75da --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/callbacks_spec.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Callbacks do + let!(:easy) { Ethon::Easy.new } + + describe "#set_callbacks" do + before do + expect(Ethon::Curl).to receive(:set_option).exactly(3).times + end + + it "sets write-, debug-, and headerfunction" do + easy.set_callbacks + end + + it "resets @response_body" do + easy.set_callbacks + expect(easy.instance_variable_get(:@response_body)).to eq("") + end + + it "resets @response_headers" do + easy.set_callbacks + expect(easy.instance_variable_get(:@response_headers)).to eq("") + end + + it "resets @debug_info" do + easy.set_callbacks + expect(easy.instance_variable_get(:@debug_info).to_a).to eq([]) + end + end + + describe "#progress_callback" do + it "returns 0" do + expect(easy.progress_callback.call(0,1,1,1,1)).to be(0) + end + end + + describe "#body_write_callback" do + let(:body_write_callback) { easy.instance_variable_get(:@body_write_callback) } + let(:stream) { double(:read_string => "") } + context "when body returns not :abort" do + it "returns number bigger than 0" do + expect(body_write_callback.call(stream, 1, 1, nil) > 0).to be(true) + end + end + + context "when body returns :abort" do + before do + easy.on_body.clear + easy.on_body { :abort } + end + let(:body_write_callback) { easy.instance_variable_get(:@body_write_callback) } + + it "returns -1 to indicate abort to libcurl" do + expect(body_write_callback.call(stream, 1, 1, nil)).to eq(-1) + end + end + end + + describe "#header_write_callback" do + let(:header_write_callback) { easy.instance_variable_get(:@header_write_callback) } + let(:stream) { double(:read_string => "") } + context "when header returns not :abort" do + it "returns number bigger than 0" do + expect(header_write_callback.call(stream, 1, 1, nil) > 0).to be(true) + end + end + + context "when header returns :abort" do + before do + easy.on_headers.clear + easy.on_headers { :abort } + end + let(:header_write_callback) { easy.instance_variable_get(:@header_write_callback) } + + it "returns -1 to indicate abort to libcurl" do + expect(header_write_callback.call(stream, 1, 1, nil)).to eq(-1) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/debug_info_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/debug_info_spec.rb new file mode 100644 index 0000000..8dccc38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/debug_info_spec.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::DebugInfo do + let(:easy) { Ethon::Easy.new } + + before do + easy.url = "http://localhost:3001/" + easy.perform + end + + describe "#debug_info" do + context "when verbose is not set to true" do + it "does not save any debug info after a request" do + expect(easy.debug_info.to_a.length).to eq(0) + expect(easy.debug_info.to_h.values.flatten.length).to eq(0) + end + end + + context "when verbose is set to true" do + before do + easy.verbose = true + easy.perform + end + + after do + easy.verbose = false + easy.reset + end + + it "saves debug info after a request" do + expect(easy.debug_info.to_a.length).to be > 0 + end + + it "saves request headers" do + expect(easy.debug_info.header_out.join).to include('GET / HTTP/1.1') + end + + it "saves response headers" do + expect(easy.debug_info.header_in.length).to be > 0 + expect(easy.response_headers).to include(easy.debug_info.header_in.join) + end + + it "saves incoming data" do + expect(easy.debug_info.data_in.length).to be > 0 + expect(easy.response_body).to include(easy.debug_info.data_in.join) + end + + it "saves debug text" do + expect(easy.debug_info.text.length).to be > 0 + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/features_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/features_spec.rb new file mode 100644 index 0000000..b174948 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/features_spec.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Informations do + + describe "#supports_asynch_dns?" do + it "returns boolean" do + expect([true, false].include? Ethon::Easy.supports_asynch_dns?).to be_truthy + end + end + + describe "#supports_zlib?" do + it "returns boolean" do + expect([true, false].include? Ethon::Easy.supports_zlib?).to be_truthy + end + end + + describe "#supports_timeout_ms?" do + it "returns boolean" do + expect([true, false].include? Ethon::Easy.supports_timeout_ms?).to be_truthy + end + end + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/form_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/form_spec.rb new file mode 100644 index 0000000..4cd34d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/form_spec.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Form do + let(:hash) { {} } + let!(:easy) { Ethon::Easy.new } + let(:form) { Ethon::Easy::Form.new(easy, hash) } + + describe ".new" do + it "assigns attribute to @params" do + expect(form.instance_variable_get(:@params)).to eq(hash) + end + end + + describe "#first" do + it "returns a pointer" do + expect(form.first).to be_a(FFI::Pointer) + end + end + + describe "#last" do + it "returns a pointer" do + expect(form.first).to be_a(FFI::Pointer) + end + end + + describe "#multipart?" do + before { form.instance_variable_set(:@query_pairs, pairs) } + + context "when query_pairs contains string values" do + let(:pairs) { [['a', '1'], ['b', '2']] } + + it "returns false" do + expect(form.multipart?).to be_falsey + end + end + + context "when query_pairs contains file" do + let(:pairs) { [['a', '1'], ['b', ['path', 'encoding', 'abs_path']]] } + + it "returns true" do + expect(form.multipart?).to be_truthy + end + end + + context "when options contains multipart=true" do + before { form.instance_variable_set(:@multipart, true) } + let(:pairs) { [['a', '1'], ['b', '2']] } + + it "returns true" do + expect(form.multipart?).to be_truthy + end + end + end + + describe "#materialize" do + before { form.instance_variable_set(:@query_pairs, pairs) } + + context "when query_pairs contains string values" do + let(:pairs) { [['a', '1']] } + + it "adds params to form" do + expect(Ethon::Curl).to receive(:formadd) + form.materialize + end + end + + context "when query_pairs contains nil" do + let(:pairs) { [['a', nil]] } + + it "adds params to form" do + expect(Ethon::Curl).to receive(:formadd) + form.materialize + end + end + + context "when query_pairs contains file" do + let(:pairs) { [['a', ["file", "type", "path/file"]]] } + + it "adds file to form" do + expect(Ethon::Curl).to receive(:formadd) + form.materialize + end + end + + context "when query_pairs contains file and string values" do + let(:pairs) { [['a', ["file", "type", "path/file"]], ['b', '1']] } + + it "adds file to form" do + expect(Ethon::Curl).to receive(:formadd).twice + form.materialize + end + end + + context "when query_pairs contains file, string and int values" do + let(:pairs) { [['a', ["file", "type", "path/file"]], ['b', '1'], ['c', 1]] } + + it "adds file to form" do + expect(Ethon::Curl).to receive(:formadd).exactly(3).times + form.materialize + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/header_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/header_spec.rb new file mode 100644 index 0000000..4931fc7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/header_spec.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Header do + let(:easy) { Ethon::Easy.new } + + describe "#headers=" do + let(:headers) { { 'User-Agent' => 'Ethon' } } + + it "sets header" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Curl).to receive(:set_option) + easy.headers = headers + end + + context "when requesting" do + before do + easy.headers = headers + easy.url = "http://localhost:3001" + easy.perform + end + + it "sends" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + + context "when header value contains null byte" do + let(:headers) { { 'User-Agent' => "Ethon\0" } } + + it "escapes" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon\\\\0"') + end + end + + context "when header value has leading whitespace" do + let(:headers) { { 'User-Agent' => " Ethon" } } + + it "removes" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + end + + context "when header value has traiing whitespace" do + let(:headers) { { 'User-Agent' => "Ethon " } } + + it "removes" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + end + end + end + + describe "#compose_header" do + it "has space in between" do + expect(easy.compose_header('a', 'b')).to eq('a: b') + end + + context "when value is a symbol" do + it "works" do + expect{ easy.compose_header('a', :b) }.to_not raise_error + end + end + end + + describe "#header_list" do + context "when no set_headers" do + it "returns nil" do + expect(easy.header_list).to eq(nil) + end + end + + context "when set_headers" do + it "returns pointer to header list" do + easy.headers = {'User-Agent' => 'Custom'} + expect(easy.header_list).to be_a(FFI::Pointer) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/custom_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/custom_spec.rb new file mode 100644 index 0000000..ba0978b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/custom_spec.rb @@ -0,0 +1,177 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Custom do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:custom) { described_class.new("PURGE", url, {:params => params, :body => form}) } + + describe "#setup" do + context "when nothing" do + it "sets url" do + custom.setup(easy) + expect(easy.url).to eq(url) + end + + it "makes a custom request" do + custom.setup(easy) + easy.perform + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + end + + context "when params" do + let(:params) { {:a => "1&"} } + + it "attaches escaped to url" do + custom.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "is a custom verb" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + + it "does not use application/x-www-form-urlencoded content type" do + expect(easy.response_body).to_not include('"CONTENT_TYPE":"application/x-www-form-urlencoded"') + end + + it "requests parameterized url" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?a=1%26"') + end + end + end + + context "when body" do + context "when multipart" do + let(:form) { {:a => File.open(__FILE__, 'r')} } + + it "sets httppost" do + expect(easy).to receive(:httppost=) + custom.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a custom verb" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"multipart/form-data') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":".+"') + end + + it "submits the data" do + expect(easy.response_body).to include('"filename":"custom_spec.rb"') + end + end + end + + context "when not multipart" do + let(:form) { {:a => "1&b=2"} } + let(:encoded) { "a=1%26b%3D2" } + + it "sets escaped copypostfields" do + expect(easy).to receive(:copypostfields=).with(encoded) + custom.setup(easy) + end + + it "sets postfieldsize" do + expect(easy).to receive(:postfieldsize=).with(encoded.bytesize) + custom.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a custom verb" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PURGE"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"application/x-www-form-urlencoded') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":"a=1%26b%3D2"') + end + + it "submits the data" do + expect(easy.response_body).to include('"rack.request.form_hash":{"a":"1&b=2"}') + end + end + end + + context "when string" do + let(:form) { "{a: 1}" } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sends string" do + expect(easy.response_body).to include('"body":"{a: 1}"') + end + end + end + end + + context "when params and body" do + let(:form) { {:a => "1"} } + let(:params) { {:b => "2"} } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + custom.setup(easy) + easy.perform + end + + it "url contains params" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?b=2"') + end + + it "body contains form" do + expect(easy.response_body).to include('"body":"a=1"') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/delete_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/delete_spec.rb new file mode 100644 index 0000000..fe7e55e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/delete_spec.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Delete do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:delete) { described_class.new(url, {:params => params, :body => form}) } + + context "when requesting" do + before do + delete.setup(easy) + easy.perform + end + + it "makes a delete request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"DELETE"') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/get_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/get_spec.rb new file mode 100644 index 0000000..e3a7e4f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/get_spec.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Get do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { {} } + let(:get) { described_class.new(url, {:params => params, :body => form}.merge(options)) } + + describe "#setup" do + it "sets url" do + get.setup(easy) + expect(easy.url).to eq(url) + end + + context "when body" do + let(:form) { { :a => 1 } } + + it "sets customrequest" do + expect(easy).to receive(:customrequest=).with("GET") + get.setup(easy) + end + end + + context "when no body" do + it "doesn't set customrequest" do + expect(easy).to receive(:customrequest=).never + get.setup(easy) + end + end + + context "when requesting" do + before do + get.setup(easy) + easy.perform + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + let(:params) { {:a => "1&b=2"} } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a get request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + + context "when params and no body" do + let(:params) { {:a => "1&b=2"} } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a get request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + end + + context "when params and body" do + let(:params) { {:a => "1&b=2"} } + let(:form) { {:b => "2"} } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a get request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + end + + context "with :escape" do + let(:params) { {:a => "1&b=2"} } + + context 'missing' do + it "escapes values" do + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + end + + context 'nil' do + let(:options) { {:escape => nil} } + + it "escapes values" do + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + end + + context 'true' do + let(:options) { {:escape => true} } + + it "escapes values" do + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + end + + context 'false' do + let(:options) { {:escape => false} } + + it "sends raw values" do + expect(easy.url).to eq("#{url}?a=1&b=2") + end + end + end + + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/head_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/head_spec.rb new file mode 100644 index 0000000..5a04f06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/head_spec.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Head do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:head) { described_class.new(url, {:params => params, :body => form}) } + + describe "#setup" do + context "when nothing" do + it "sets nobody" do + expect(easy).to receive(:nobody=).with(true) + head.setup(easy) + end + + it "sets url" do + head.setup(easy) + expect(easy.url).to eq(url) + end + end + + context "when params" do + let(:params) { {:a => "1&b=2"} } + + it "sets nobody" do + expect(easy).to receive(:nobody=).with(true) + head.setup(easy) + end + + it "attaches escaped to url" do + head.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26b%3D2") + end + + context "when requesting" do + before do + head.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "has no body" do + expect(easy.response_body).to be_empty + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + end + end + + context "when body" do + let(:form) { {:a => 1} } + + context "when requesting" do + before do + head.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/options_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/options_spec.rb new file mode 100644 index 0000000..99ca96f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/options_spec.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Options do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { described_class.new(url, {:params => params, :body => form}) } + + describe "#setup" do + it "sets customrequest" do + expect(easy).to receive(:customrequest=).with("OPTIONS") + options.setup(easy) + end + + it "sets url" do + options.setup(easy) + expect(easy.url).to eq(url) + end + + context "when requesting" do + let(:params) { {:a => "1&b=2"} } + + before do + options.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a options request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"OPTIONS"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/patch_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/patch_spec.rb new file mode 100644 index 0000000..a74cec7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/patch_spec.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Patch do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:patch) { described_class.new(url, {:params => params, :body => form}) } + + describe "#setup" do + it "sets customrequest" do + expect(easy).to receive(:customrequest=).with("PATCH") + patch.setup(easy) + end + + it "sets url" do + patch.setup(easy) + expect(easy.url).to eq(url) + end + + context "when requesting" do + let(:params) { {:a => "1&b=2"} } + + before do + patch.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a patch request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PATCH"') + end + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?a=1%26b%3D2") + end + + context "when url already contains params" do + let(:url) { "http://localhost:3001/?query=here" } + + it "requests parameterized url" do + expect(easy.effective_url).to eq("http://localhost:3001/?query=here&a=1%26b%3D2") + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/post_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/post_spec.rb new file mode 100644 index 0000000..a65ceff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/post_spec.rb @@ -0,0 +1,317 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Post do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { Hash.new } + let(:post) { described_class.new(url, options.merge({:params => params, :body => form})) } + + describe "#setup" do + context "when nothing" do + it "sets url" do + post.setup(easy) + expect(easy.url).to eq(url) + end + + it "sets postfield_size" do + expect(easy).to receive(:postfieldsize=).with(0) + post.setup(easy) + end + + it "sets copy_postfields" do + expect(easy).to receive(:copypostfields=).with("") + post.setup(easy) + end + + it "makes a post request" do + post.setup(easy) + easy.perform + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + end + + context "when params" do + let(:params) { {:a => "1&"} } + + it "attaches escaped to url" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + + context "with arrays" do + let(:params) { {:a => %w( foo bar )} } + + context "by default" do + it "encodes them with indexes" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a%5B0%5D=foo&a%5B1%5D=bar") + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + it "encodes them without indexes" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a%5B%5D=foo&a%5B%5D=bar") + end + end + end + + context "with :escape" do + context 'missing' do + it "escapes values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + end + + context 'nil' do + let(:options) { {:escape => nil} } + + it "escapes values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + end + + context 'true' do + let(:options) { {:escape => true} } + + it "escapes values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + end + + context 'false' do + let(:options) { {:escape => false} } + + it "sends raw values" do + post.setup(easy) + expect(easy.url).to eq("#{url}?a=1&") + end + end + end + + it "sets postfieldsize" do + expect(easy).to receive(:postfieldsize=).with(0) + post.setup(easy) + end + + it "sets copypostfields" do + expect(easy).to receive(:copypostfields=).with("") + post.setup(easy) + end + + context "when requesting" do + let(:postredir) { nil } + + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.postredir = postredir + easy.followlocation = true + easy.perform + end + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + + it "uses application/x-www-form-urlencoded content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"application/x-www-form-urlencoded"') + end + + it "requests parameterized url" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?a=1%26"') + end + + context "when redirection" do + let(:url) { "localhost:3001/redirect" } + + context "when no postredirs" do + it "is a get" do + expect(easy.response_body).to include('"REQUEST_METHOD":"GET"') + end + end + + unless ENV['TRAVIS'] + context "when postredirs" do + let(:postredir) { :post_all } + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + end + end + end + end + end + + context "when body" do + context "when multipart" do + let(:form) { {:a => File.open(__FILE__, 'r')} } + + it "sets httppost" do + expect(easy).to receive(:httppost=) + post.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"multipart/form-data') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":".+"') + end + + it "submits the data" do + expect(easy.response_body).to include('"filename":"post_spec.rb"') + end + end + end + + context "when not multipart" do + let(:form) { {:a => "1&b=2"} } + let(:encoded) { "a=1%26b%3D2" } + + it "sets escaped copypostfields" do + expect(easy).to receive(:copypostfields=).with(encoded) + post.setup(easy) + end + + it "sets postfieldsize" do + expect(easy).to receive(:postfieldsize=).with(encoded.bytesize) + post.setup(easy) + end + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "is a post" do + expect(easy.response_body).to include('"REQUEST_METHOD":"POST"') + end + + it "uses multipart/form-data content type" do + expect(easy.response_body).to include('"CONTENT_TYPE":"application/x-www-form-urlencoded') + end + + it "submits a body" do + expect(easy.response_body).to match('"body":"a=1%26b%3D2"') + end + + it "submits the data" do + expect(easy.response_body).to include('"rack.request.form_hash":{"a":"1&b=2"}') + end + end + end + + context "when string" do + let(:form) { "{a: 1}" } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sends string" do + expect(easy.response_body).to include('"body":"{a: 1}"') + end + end + end + + context "when binary with null bytes" do + let(:form) { [1, 0, 1].pack('c*') } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sends binary data" do + expect(easy.response_body).to include('"body":"\\u0001\\u0000\\u0001"') + end + end + end + + context "when arrays" do + let(:form) { {:a => %w( foo bar )} } + + context "by default" do + it "sets copypostfields with indexed, escaped representation" do + expect(easy).to receive(:copypostfields=).with('a%5B0%5D=foo&a%5B1%5D=bar') + post.setup(easy) + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + + it "sets copypostfields with non-indexed, escaped representation" do + expect(easy).to receive(:copypostfields=).with('a%5B%5D=foo&a%5B%5D=bar') + post.setup(easy) + end + end + end + end + + context "when params and body" do + let(:form) { {:a => "1"} } + let(:params) { {:b => "2"} } + + context "when requesting" do + before do + easy.headers = { 'Expect' => '' } + post.setup(easy) + easy.perform + end + + it "url contains params" do + expect(easy.response_body).to include('"REQUEST_URI":"http://localhost:3001/?b=2"') + end + + it "body contains form" do + expect(easy.response_body).to include('"body":"a=1"') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/put_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/put_spec.rb new file mode 100644 index 0000000..f294c37 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http/put_spec.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http::Put do + let(:easy) { Ethon::Easy.new } + let(:url) { "http://localhost:3001/" } + let(:params) { nil } + let(:form) { nil } + let(:options) { Hash.new } + let(:put) { described_class.new(url, options.merge({:params => params, :body => form})) } + + describe "#setup" do + context "when nothing" do + it "sets url" do + put.setup(easy) + expect(easy.url).to eq(url) + end + + it "sets upload" do + expect(easy).to receive(:upload=).with(true) + put.setup(easy) + end + + it "sets infilesize" do + expect(easy).to receive(:infilesize=).with(0) + put.setup(easy) + end + + context "when requesting" do + it "makes a put request" do + put.setup(easy) + easy.perform + expect(easy.response_body).to include('"REQUEST_METHOD":"PUT"') + end + end + end + + context "when params" do + let(:params) { {:a => "1&"} } + + it "attaches escaped to url" do + put.setup(easy) + expect(easy.url).to eq("#{url}?a=1%26") + end + + context "with arrays" do + let(:params) { {:a => %w( foo bar )} } + + context "by default" do + it "encodes them with indexes" do + put.setup(easy) + expect(easy.url).to eq("#{url}?a%5B0%5D=foo&a%5B1%5D=bar") + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + it "encodes them without indexes" do + put.setup(easy) + expect(easy.url).to eq("#{url}?a%5B%5D=foo&a%5B%5D=bar") + end + end + end + + it "sets upload" do + expect(easy).to receive(:upload=).with(true) + put.setup(easy) + end + + it "sets infilesize" do + expect(easy).to receive(:infilesize=).with(0) + put.setup(easy) + end + + context "when requesting" do + before do + put.setup(easy) + easy.perform + end + + it "makes a put request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PUT"') + end + end + end + + context "when body" do + let(:form) { {:a => "1&b=2"} } + + it "sets infilesize" do + expect(easy).to receive(:infilesize=).with(11) + put.setup(easy) + end + + it "sets readfunction" do + expect(easy).to receive(:readfunction) + put.setup(easy) + end + + it "sets upload" do + expect(easy).to receive(:upload=).with(true) + put.setup(easy) + end + + context "when requesting" do + context "sending string body" do + before do + easy.headers = { 'Expect' => '' } + put.setup(easy) + easy.perform + end + + it "makes a put request" do + expect(easy.response_body).to include('"REQUEST_METHOD":"PUT"') + end + + it "submits a body" do + expect(easy.response_body).to include('"body":"a=1%26b%3D2"') + end + end + + context "when injecting a file as body" do + let(:file) { File.open(__FILE__) } + let(:easy) do + e = Ethon::Easy.new(:url => url, :upload => true) + e.set_read_callback(file) + e.infilesize = File.size(file.path) + e + end + + before do + easy.headers = { 'Expect' => '' } + easy.perform + end + + it "submits file" do + expect(easy.response_body).to include("injecting") + end + end + end + + context "when arrays" do + let(:form) { {:a => %w( foo bar )} } + + before do + put.setup(easy) + easy.perform + end + + context "by default" do + it "submits an indexed, escaped representation" do + expect(easy.response_body).to include('"body":"a%5B0%5D=foo&a%5B1%5D=bar"') + end + end + + context "when params_encoding is :rack" do + let(:options) { {:params_encoding => :rack} } + + it "submits an non-indexed, escaped representation" do + expect(easy.response_body).to include('"body":"a%5B%5D=foo&a%5B%5D=bar"') + end + end + end + end + + context "when params and body" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http_spec.rb new file mode 100644 index 0000000..b74a2df --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/http_spec.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Http do + let(:easy) { Ethon::Easy.new } + + describe "#http_request" do + let(:url) { "http://localhost:3001/" } + let(:action_name) { :get } + let(:options) { {} } + + let(:get) { double(:setup) } + let(:get_class) { Ethon::Easy::Http::Get } + + it "instanciates action" do + expect(get).to receive(:setup) + expect(get_class).to receive(:new).and_return(get) + easy.http_request(url, action_name, options) + end + + context "when requesting" do + [ :get, :post, :put, :delete, :head, :patch, :options ].map do |action| + it "returns ok" do + easy.http_request(url, action, options) + easy.perform + expect(easy.return_code).to be(:ok) + end + + unless action == :head + it "makes a #{action.to_s.upcase} request" do + easy.http_request(url, action, options) + easy.perform + expect(easy.response_body).to include("\"REQUEST_METHOD\":\"#{action.to_s.upcase}\"") + end + + it "streams the response body from the #{action.to_s.upcase} request" do + bytes_read = 0 + easy.on_body { |chunk, response| bytes_read += chunk.bytesize } + easy.http_request(url, action, options) + easy.perform + content_length = ((easy.response_headers =~ /Content-Length: (\d+)/) && $1.to_i) + expect(bytes_read).to eq(content_length) + expect(easy.response_body).to eq("") + end + + it "notifies when headers are ready" do + headers = [] + easy.on_headers { |r| headers << r.response_headers } + easy.http_request(url, action, options) + easy.perform + expect(headers).to eq([easy.response_headers]) + expect(headers.first).to match(/Content-Length: (\d+)/) + end + end + end + + it "makes requests with custom HTTP verbs" do + easy.http_request(url, :purge, options) + easy.perform + expect(easy.response_body).to include(%{"REQUEST_METHOD":"PURGE"}) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/informations_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/informations_spec.rb new file mode 100644 index 0000000..3526847 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/informations_spec.rb @@ -0,0 +1,126 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Informations do + let(:easy) { Ethon::Easy.new } + + before do + easy.url = "http://localhost:3001" + easy.perform + end + + describe "#httpauth_avail" do + it "returns" do + expect(easy.httpauth_avail).to be + end + end + + describe "#total_time" do + it "returns float" do + expect(easy.total_time).to be_a(Float) + end + end + + describe "#starttransfer_time" do + it "returns float" do + expect(easy.starttransfer_time).to be_a(Float) + end + end + + describe "#appconnect_time" do + it "returns float" do + expect(easy.appconnect_time).to be_a(Float) + end + end + + describe "#pretransfer_time" do + it "returns float" do + expect(easy.pretransfer_time).to be_a(Float) + end + end + + describe "#connect_time" do + it "returns float" do + expect(easy.connect_time).to be_a(Float) + end + end + + describe "#namelookup_time" do + it "returns float" do + expect(easy.namelookup_time).to be_a(Float) + end + end + + describe "#redirect_time" do + it "returns float" do + expect(easy.redirect_time).to be_a(Float) + end + end + + describe "#effective_url" do + it "returns url" do + expect(easy.effective_url).to match(/^http:\/\/localhost:3001\/?/) + end + end + + describe "#primary_ip" do + it "returns localhost" do + expect(easy.primary_ip).to match(/::1|127\.0\.0\.1/) + end + end + + describe "#response_code" do + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + + describe "#redirect_count" do + it "returns 0" do + expect(easy.redirect_count).to eq(0) + end + end + + describe "#redirect_url" do + it "returns nil as there is no redirect" do + expect(easy.redirect_url).to be(nil) + end + end + + describe "#request_size" do + it "returns 53" do + expect(easy.request_size).to eq(53) + end + end + + describe "#supports_zlib?" do + it "returns true" do + expect(Kernel).to receive(:warn) + expect(easy.supports_zlib?).to be_truthy + end + end + + describe "#size_upload" do + it "returns float" do + expect(easy.size_upload).to be_a(Float) + end + end + + describe "#size_download" do + it "returns float" do + expect(easy.size_download).to be_a(Float) + end + end + + describe "#speed_upload" do + it "returns float" do + expect(easy.speed_upload).to be_a(Float) + end + end + + describe "#speed_download" do + it "returns float" do + expect(easy.speed_download).to be_a(Float) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/mirror_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/mirror_spec.rb new file mode 100644 index 0000000..29d150d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/mirror_spec.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Mirror do + let(:options) { nil } + let(:mirror) { described_class.new(options) } + + describe "::INFORMATIONS_TO_LOG" do + [ + :return_code, :response_code, :response_body, :response_headers, + :total_time, :starttransfer_time, :appconnect_time, + :pretransfer_time, :connect_time, :namelookup_time, :redirect_time, + :size_upload, :size_download, :speed_upload, :speed_upload, + :effective_url, :primary_ip, :redirect_count, :redirect_url, :debug_info + ].each do |name| + it "contains #{name}" do + expect(described_class::INFORMATIONS_TO_MIRROR).to include(name) + end + end + end + + describe "#to_hash" do + let(:options) { {:return_code => 1} } + + it "returns mirror as hash" do + expect(mirror.to_hash).to eq(options) + end + end + + describe "#log_informations" do + let(:options) { {:return_code => 1} } + + it "returns hash" do + expect(mirror.log_informations).to be_a(Hash) + end + + it "only calls methods that exist" do + described_class::INFORMATIONS_TO_LOG.each do |method_name| + expect(mirror.respond_to? method_name).to eql(true) + end + end + + it "includes return code" do + expect(mirror.log_informations).to include(options) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/operations_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/operations_spec.rb new file mode 100644 index 0000000..a3ddc86 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/operations_spec.rb @@ -0,0 +1,271 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Operations do + let(:easy) { Ethon::Easy.new } + + describe "#handle" do + it "returns a pointer" do + expect(easy.handle).to be_a(FFI::Pointer) + end + end + + + describe "#perform" do + let(:url) { nil } + let(:timeout) { nil } + let(:connect_timeout) { nil } + let(:follow_location) { nil } + let(:max_redirs) { nil } + let(:user_pwd) { nil } + let(:http_auth) { nil } + let(:headers) { nil } + let(:protocols) { nil } + let(:redir_protocols) { nil } + let(:username) { nil } + let(:password) { nil } + + before do + Ethon.logger.level = Logger::DEBUG + easy.url = url + easy.timeout = timeout + easy.connecttimeout = connect_timeout + easy.followlocation = follow_location + easy.maxredirs = max_redirs + easy.httpauth = http_auth + easy.headers = headers + easy.protocols = protocols + easy.redir_protocols = redir_protocols + + if user_pwd + easy.userpwd = user_pwd + else + easy.username = username + easy.password = password + end + + easy.perform + end + + it "calls Curl.easy_perform" do + expect(Ethon::Curl).to receive(:easy_perform) + easy.perform + end + + it "calls Curl.easy_cleanup" do + expect_any_instance_of(FFI::AutoPointer).to receive(:free) + easy.cleanup + end + + it "logs" do + expect(Ethon.logger).to receive(:debug) + easy.perform + end + + it "doesn't log after completing because completing could reset" do + easy.on_complete{ expect(Ethon.logger).to receive(:debug).never } + easy.perform + end + + context "when url" do + let(:url) { "http://localhost:3001/" } + + it "returns ok" do + expect(easy.return_code).to eq(:ok) + end + + it "sets response body" do + expect(easy.response_body).to be + end + + it "sets response headers" do + expect(easy.response_headers).to be + end + + context "when request timed out" do + let(:url) { "http://localhost:3001/?delay=1" } + let(:timeout) { 1 } + + it "returns operation_timedout" do + expect(easy.return_code).to eq(:operation_timedout) + end + end + + context "when connection timed out" do + let(:url) { "http://localhost:3009" } + let(:connect_timeout) { 1 } + + it "returns couldnt_connect" do + expect(easy.return_code).to eq(:couldnt_connect) + end + end + + context "when no follow location" do + let(:url) { "http://localhost:3001/redirect" } + let(:follow_location) { false } + + it "doesn't follow" do + expect(easy.response_code).to eq(302) + expect(easy.redirect_url).to eq("http://localhost:3001/") + end + end + + context "when follow location" do + let(:url) { "http://localhost:3001/redirect" } + let(:follow_location) { true } + + it "follows" do + expect(easy.response_code).to eq(200) + expect(easy.redirect_url).to eq(nil) + end + + context "when infinite redirect loop" do + let(:url) { "http://localhost:3001/bad_redirect" } + let(:max_redirs) { 5 } + + context "when max redirect set" do + it "follows only x times" do + expect(easy.response_code).to eq(302) + expect(easy.redirect_url).to eq("http://localhost:3001/bad_redirect") + end + end + end + end + + context "when user agent" do + let(:headers) { { 'User-Agent' => 'Ethon' } } + + it "sets" do + expect(easy.response_body).to include('"HTTP_USER_AGENT":"Ethon"') + end + end + end + + context "when auth url" do + before { easy.url = url } + + context "when basic auth" do + let(:url) { "http://localhost:3001/auth_basic/username/password" } + + context "when no user_pwd" do + it "returns 401" do + expect(easy.response_code).to eq(401) + end + end + + context "when invalid user_pwd" do + let(:user_pwd) { "invalid:invalid" } + + it "returns 401" do + expect(easy.response_code).to eq(401) + end + end + + context "when valid user_pwd" do + let(:user_pwd) { "username:password" } + + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + + context "when user and password" do + let(:username) { "username" } + let(:password) { "password" } + + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + end + + context "when ntlm" do + let(:url) { "http://localhost:3001/auth_ntlm" } + let(:http_auth) { :ntlm } + + context "when no user_pwd" do + it "returns 401" do + expect(easy.response_code).to eq(401) + end + end + + context "when user_pwd" do + let(:user_pwd) { "username:password" } + + it "returns 200" do + expect(easy.response_code).to eq(200) + end + end + end + end + + context "when protocols" do + context "when asking for a allowed url" do + let(:url) { "http://localhost:3001" } + let(:protocols) { :http } + + it "returns ok" do + expect(easy.return_code).to be(:ok) + end + end + + context "when asking for a not allowed url" do + let(:url) { "http://localhost:3001" } + let(:protocols) { :https } + + it "returns unsupported_protocol" do + expect(easy.return_code).to be(:unsupported_protocol) + end + end + end + + context "when multiple protocols" do + context "when asking for a allowed url" do + let(:protocols) { [:http, :https] } + + context "when http" do + let(:url) { "http://localhost:3001" } + + it "returns ok for http" do + expect(easy.return_code).to be(:ok) + end + end + + context "when https" do + let(:url) { "https://localhost:3001" } + + it "returns ssl_connect_error for https" do + expect(easy.return_code).to be(:ssl_connect_error) + end + end + end + + context "when asking for a not allowed url" do + let(:url) { "ssh://localhost" } + let(:protocols) { [:https, :http] } + + it "returns unsupported_protocol" do + expect(easy.return_code).to be(:unsupported_protocol) + end + end + end + + context "when redir_protocols" do + context "when redirecting to a not allowed url" do + let(:url) { "http://localhost:3001/redirect" } + let(:follow_location) { true } + let(:redir_protocols) { :https } + + it "returns unsupported_protocol" do + expect(easy.return_code).to be(:unsupported_protocol) + end + end + end + + context "when no url" do + it "returns url_malformat" do + expect(easy.perform).to eq(:url_malformat) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/options_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/options_spec.rb new file mode 100644 index 0000000..e135bd9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/options_spec.rb @@ -0,0 +1,193 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Options do + let(:easy) { Ethon::Easy.new } + + [ + :accept_encoding, :cainfo, :capath, :connecttimeout, :connecttimeout_ms, :cookie, + :cookiejar, :cookiefile, :copypostfields, :customrequest, :dns_cache_timeout, + :followlocation, :forbid_reuse, :http_version, :httpauth, :httpget, :httppost, + :infilesize, :interface, :keypasswd, :maxredirs, :nobody, :nosignal, + :postfieldsize, :postredir, :protocols, :proxy, :proxyauth, :proxyport, :proxytype, + :proxyuserpwd, :readdata, :readfunction, :redir_protocols, :ssl_verifyhost, + :ssl_verifypeer, :sslcert, :sslcerttype, :sslkey, :sslkeytype, :sslversion, + :timeout, :timeout_ms, :unrestricted_auth, :upload, :url, :useragent, + :userpwd, :verbose, :pipewait, :dns_shuffle_addresses, :path_as_is + ].each do |name| + describe "#{name}=" do + it "responds_to" do + expect(easy).to respond_to("#{name}=") + end + + it "sets option" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Curl).to receive(:set_option).with(name, anything, anything) + value = case name + when :http_version + :httpv1_0 + when :httpauth + :basic + when :protocols, :redir_protocols + :http + when :postredir + :post_301 + when :proxytype + :http + when :sslversion + :default + when :httppost + FFI::Pointer::NULL + else + 1 + end + easy.method("#{name}=").call(value) + end + end + end + + describe '#escape?' do + context 'by default' do + it 'returns true' do + expect(easy.escape?).to be_truthy + end + end + + context 'when #escape=nil' do + it 'returns true' do + easy.escape = nil + expect(easy.escape?).to be_truthy + end + end + + context 'when #escape=true' do + it 'returns true' do + easy.escape = true + expect(easy.escape?).to be_truthy + end + end + + context 'when #escape=false' do + it 'returns true' do + easy.escape = false + expect(easy.escape?).to be_falsey + end + end + end + + describe '#multipart?' do + context 'by default' do + it 'returns false' do + expect(easy.multipart?).to be_falsey + end + end + + context 'when #multipart=nil' do + it 'returns false' do + easy.multipart = nil + expect(easy.multipart?).to be_falsey + end + end + + context 'when #multipart=true' do + it 'returns true' do + easy.multipart = true + expect(easy.multipart?).to be_truthy + end + end + + context 'when #multipart=false' do + it 'returns false' do + easy.multipart = false + expect(easy.multipart?).to be_falsey + end + end + end + + describe "#httppost=" do + it "raises unless given a FFI::Pointer" do + expect{ easy.httppost = 1 }.to raise_error(Ethon::Errors::InvalidValue) + end + end + + context "when requesting" do + let(:url) { "localhost:3001" } + let(:timeout) { nil } + let(:timeout_ms) { nil } + let(:connecttimeout) { nil } + let(:connecttimeout_ms) { nil } + let(:userpwd) { nil } + + before do + easy.url = url + easy.timeout = timeout + easy.timeout_ms = timeout_ms + easy.connecttimeout = connecttimeout + easy.connecttimeout_ms = connecttimeout_ms + easy.userpwd = userpwd + easy.perform + end + + context "when userpwd" do + context "when contains /" do + let(:url) { "localhost:3001/auth_basic/test/te%2Fst" } + let(:userpwd) { "test:te/st" } + + it "works" do + expect(easy.response_code).to eq(200) + end + end + end + + context "when timeout" do + let(:timeout) { 1 } + + context "when request takes longer" do + let(:url) { "localhost:3001?delay=2" } + + it "times out" do + expect(easy.return_code).to eq(:operation_timedout) + end + end + end + + context "when connecttimeout" do + let(:connecttimeout) { 1 } + + context "when cannot connect" do + let(:url) { "localhost:3002" } + + it "times out" do + expect(easy.return_code).to eq(:couldnt_connect) + end + end + end + + if Ethon::Easy.supports_timeout_ms? + context "when timeout_ms" do + let(:timeout_ms) { 100 } + + context "when request takes longer" do + let(:url) { "localhost:3001?delay=1" } + + it "times out" do + expect(easy.return_code).to eq(:operation_timedout) + end + end + end + + context "when connecttimeout_ms" do + let(:connecttimeout_ms) { 100 } + + context "when cannot connect" do + let(:url) { "localhost:3002" } + + it "times out" do + # this can either lead to a timeout or couldnt connect depending on which happens first + expect([:couldnt_connect, :operation_timedout]).to include(easy.return_code) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/queryable_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/queryable_spec.rb new file mode 100644 index 0000000..89c4d11 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/queryable_spec.rb @@ -0,0 +1,235 @@ +# encoding: utf-8 +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Queryable do + let(:hash) { {} } + let!(:easy) { Ethon::Easy.new } + let(:params) { Ethon::Easy::Params.new(easy, hash) } + + describe "#to_s" do + context "when query_pairs empty" do + before { params.instance_variable_set(:@query_pairs, []) } + + it "returns empty string" do + expect(params.to_s).to eq("") + end + end + + context "when query_pairs not empty" do + context "when escape" do + before do + params.escape = true + end + + { + '!' => '%21', '*' => '%2A', "'" => '%27', '(' => '%28', + ')' => '%29', ';' => '%3B', ':' => '%3A', '@' => '%40', + '&' => '%26', '=' => '%3D', '+' => '%2B', '$' => '%24', + ',' => '%2C', '/' => '%2F', '?' => '%3F', '#' => '%23', + '[' => '%5B', ']' => '%5D', + + '<' => '%3C', '>' => '%3E', '"' => '%22', '{' => '%7B', + '}' => '%7D', '|' => '%7C', '\\' => '%5C', '`' => '%60', + '^' => '%5E', '%' => '%25', ' ' => '%20', "\0" => '%00', + + 'ぞつもと' => '%E3%81%BE%E3%81%A4%E3%82%82%E3%81%A8', + }.each do |value, percent| + it "turns #{value.inspect} into #{percent}" do + params.instance_variable_set(:@query_pairs, [[:a, value]]) + expect(params.to_s).to eq("a=#{percent}") + end + end + + { + '.' => '%2E', '-' => '%2D', '_' => '%5F', '~' => '%7E', + }.each do |value, percent| + it "leaves #{value.inspect} instead of turning into #{percent}" do + params.instance_variable_set(:@query_pairs, [[:a, value]]) + expect(params.to_s).to eq("a=#{value}") + end + end + end + + context "when no escape" do + before { params.instance_variable_set(:@query_pairs, [[:a, 1], [:b, 2]]) } + + it "returns concatenated query string" do + expect(params.to_s).to eq("a=1&b=2") + end + end + end + + context "when query_pairs contains a string" do + before { params.instance_variable_set(:@query_pairs, ["{a: 1}"]) } + + it "returns correct string" do + expect(params.to_s).to eq("{a: 1}") + end + end + end + + describe "#build_query_pairs" do + let(:pairs) { params.method(:build_query_pairs).call(hash) } + + context "when params is empty" do + it "returns empty array" do + expect(pairs).to eq([]) + end + end + + context "when params is string" do + let(:hash) { "{a: 1}" } + + it "wraps it in an array" do + expect(pairs).to eq([hash]) + end + end + + context "when params is simple hash" do + let(:hash) { {:a => 1, :b => 2} } + + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include([:b, 2]) + end + end + + context "when params is a nested hash" do + let(:hash) { {:a => 1, :b => {:c => 2}} } + + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include(["b[c]", 2]) + end + end + + context "when params contains an array" do + let(:hash) { {:a => 1, :b => [2, 3]} } + + context "by default" do + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include(["b[0]", 2]) + expect(pairs).to include(["b[1]", 3]) + end + end + + context "when params_encoding is :rack" do + before { params.params_encoding = :rack } + it "transforms without indexes" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include(["b[]", 2]) + expect(pairs).to include(["b[]", 3]) + end + end + + context "when params_encoding is :none" do + before { params.params_encoding = :none } + it "does no transformation" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include([:b, [2, 3]]) + end + end + end + + context "when params contains something nested in an array" do + context "when string" do + let(:hash) { {:a => {:b => ["hello", "world"]}} } + + it "transforms" do + expect(pairs).to eq([["a[b][0]", "hello"], ["a[b][1]", "world"]]) + end + end + + context "when hash" do + let(:hash) { {:a => {:b => [{:c =>1}, {:d => 2}]}} } + + it "transforms" do + expect(pairs).to eq([["a[b][0][c]", 1], ["a[b][1][d]", 2]]) + end + end + + context "when file" do + let(:file) { File.open("spec/spec_helper.rb") } + let(:file_info) { params.method(:file_info).call(file) } + let(:hash) { {:a => {:b => [file]}} } + let(:mime_type) { file_info[1] } + + it "transforms" do + expect(pairs).to eq([["a[b][0]", file_info]]) + end + + context "when MIME" do + if defined?(MIME) + context "when mime type" do + it "sets mime type to text" do + expect(mime_type).to eq("application/x-ruby") + end + end + end + + context "when no mime type" do + let(:file) { Tempfile.new("fubar") } + + it "sets mime type to default application/octet-stream" do + expect(mime_type).to eq("application/octet-stream") + end + end + end + + context "when no MIME" do + before { hide_const("MIME") } + + it "sets mime type to default application/octet-stream" do + expect(mime_type).to eq("application/octet-stream") + end + end + end + end + + + context "when params contains file" do + let(:file) { Tempfile.new("fubar") } + let(:file_info) { params.method(:file_info).call(file) } + let(:hash) { {:a => 1, :b => file} } + + it "transforms" do + expect(pairs).to include([:a, 1]) + expect(pairs).to include([:b, file_info]) + end + end + + context "when params key contains a null byte" do + let(:hash) { {:a => "1\0" } } + + it "preserves" do + expect(pairs).to eq([[:a, "1\0"]]) + end + end + + context "when params value contains a null byte" do + let(:hash) { {"a\0" => 1 } } + + it "preserves" do + expect(pairs).to eq([["a\0", 1]]) + end + end + end + + describe "#empty?" do + context "when params empty" do + it "returns true" do + expect(params.empty?).to be_truthy + end + end + + context "when params not empty" do + let(:hash) { {:a => 1} } + + it "returns false" do + expect(params.empty?).to be_falsey + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/response_callbacks_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/response_callbacks_spec.rb new file mode 100644 index 0000000..f142fad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/response_callbacks_spec.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::ResponseCallbacks do + let(:easy) { Ethon::Easy.new } + + [:on_complete, :on_headers, :on_body, :on_progress].each do |callback_type| + describe "##{callback_type}" do + it "responds" do + expect(easy).to respond_to("#{callback_type}") + end + + context "when no block given" do + it "returns @#{callback_type}" do + expect(easy.send("#{callback_type}")).to eq([]) + end + end + + context "when block given" do + it "stores" do + easy.send(callback_type) { p 1 } + expect(easy.instance_variable_get("@#{callback_type}").size).to eq(1) + end + end + + context "when multiple blocks given" do + it "stores" do + easy.send(callback_type) { p 1 } + easy.send(callback_type) { p 2 } + expect(easy.instance_variable_get("@#{callback_type}").size).to eq(2) + end + end + end + end + + describe "#complete" do + before do + easy.on_complete {|r| String.new(r.url) } + end + + it "executes blocks and passes self" do + expect(String).to receive(:new).with(easy.url) + easy.complete + end + + context "when @on_complete nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_complete, nil) + expect{ easy.complete }.to_not raise_error + end + end + end + + describe "#headers" do + before do + easy.on_headers {|r| String.new(r.url) } + end + + it "executes blocks and passes self" do + expect(String).to receive(:new).with(easy.url) + easy.headers + end + + context "when @on_headers nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_headers, nil) + expect{ easy.headers }.to_not raise_error + end + end + end + + describe "#progress" do + context "when requesting for realz" do + it "executes callback" do + post = Ethon::Easy::Http::Post.new("http://localhost:3001", {:body => "bar=fu"}) + post.setup(easy) + @called = false + @has_dltotal = false + @has_ultotal = false + easy.on_progress { @called = true } + easy.on_progress { |dltotal, _, _, _| @has_dltotal ||= true } + easy.on_progress { |_, _, ultotal, _| @has_ultotal ||= true } + easy.perform + expect(@called).to be true + expect(@has_dltotal).to be true + expect(@has_ultotal).to be true + end + end + + context "when pretending" do + before do + @dltotal = nil + @dlnow = nil + @ultotal = nil + @ulnow = nil + easy.on_progress { |dltotal, dlnow, ultotal, ulnow| @dltotal = dltotal ; @dlnow = dlnow; @ultotal = ultotal; @ulnow = ulnow } + end + + it "executes blocks and passes dltotal" do + easy.progress(1, 2, 3, 4) + expect(@dltotal).to eq(1) + end + + it "executes blocks and passes dlnow" do + easy.progress(1, 2, 3, 4) + expect(@dlnow).to eq(2) + end + + it "executes blocks and passes ultotal" do + easy.progress(1, 2, 3, 4) + expect(@ultotal).to eq(3) + end + + it "executes blocks and passes ulnow" do + easy.progress(1, 2, 3, 4) + expect(@ulnow).to eq(4) + end + + context "when @on_progress nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_progress, nil) + expect{ easy.progress(1, 2, 3, 4) }.to_not raise_error + end + end + end + end + + describe "#body" do + before do + @chunk = nil + @r = nil + easy.on_body { |chunk, r| @chunk = chunk ; @r = r } + end + + it "executes blocks and passes self" do + easy.body("the chunk") + expect(@r).to be(easy) + end + + it "executes blocks and passes chunk" do + easy.body("the chunk") + expect(@chunk).to eq("the chunk") + end + + context "when @on_body nil" do + it "doesn't raise" do + easy.instance_variable_set(:@on_body, nil) + expect{ easy.body("the chunk") }.to_not raise_error + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/util_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/util_spec.rb new file mode 100644 index 0000000..048c5bd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy/util_spec.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy::Util do + class Dummy + include Ethon::Easy::Util + end + + let(:klass) { Dummy.new } + + describe "escape_zero_byte" do + context "when value has no zero byte" do + let(:value) { "hello world" } + + it "returns same value" do + expect(klass.escape_zero_byte(value)).to be(value) + end + end + + context "when value has zero byte" do + let(:value) { "hello \0world" } + + it "returns escaped" do + expect(klass.escape_zero_byte(value)).to eq("hello \\0world") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy_spec.rb new file mode 100644 index 0000000..c0e9565 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/easy_spec.rb @@ -0,0 +1,203 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Easy do + let(:easy) { Ethon::Easy.new } + + describe ".new" do + it "inits curl" do + expect(Ethon::Curl).to receive(:init) + easy + end + + context "when options are empty" do + it "sets only callbacks" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Easy).to receive(:set_option).never + easy + end + end + + context "when options not empty" do + context "when followlocation is set" do + let(:options) { { :followlocation => true } } + let(:easy) { Ethon::Easy.new(options) } + + it "sets followlocation" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Curl).to receive(:set_option).with(:followlocation, true, anything) + easy + end + end + end + end + + describe "#set_attributes" do + context "when options are empty" do + it "sets only callbacks" do + expect_any_instance_of(Ethon::Easy).to receive(:set_callbacks) + expect(Ethon::Easy).to receive(:set_option).never + easy + end + end + + context "when options aren't empty" do + context "when valid key" do + it "sets" do + expect(easy).to receive(:verbose=).with(true) + easy.set_attributes({:verbose => true}) + end + end + + context "when invalid key" do + it "raises invalid option error" do + expect{ easy.set_attributes({:fubar => 1}) }.to raise_error(Ethon::Errors::InvalidOption) + end + end + end + end + + describe "#reset" do + before { easy.url = "www.example.com" } + + it "resets url" do + easy.reset + expect(easy.url).to be_nil + end + + it "resets escape?" do + easy.escape = false + easy.reset + expect(easy.escape?).to be_truthy + end + + it "resets hash" do + easy.reset + expect(easy.instance_variable_get(:@hash)).to be_nil + end + + it "resets easy handle" do + expect(Ethon::Curl).to receive(:easy_reset) + easy.reset + end + + it "resets on_complete" do + easy.on_complete { p 1 } + easy.reset + expect(easy.on_complete).to be_empty + end + + it "resets on_headers" do + easy.on_headers { p 1 } + easy.reset + expect(easy.on_headers).to be_empty + end + + it "resets on_body" do + easy.on_body { p 1 } + easy.reset + expect(easy.on_body).to be_empty + end + end + + describe "#dup" do + let!(:easy) do + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/" + easy.on_complete { 'on_complete' } + easy.on_headers { 'on_headers' } + easy.on_progress { 'on_progress' } + easy.response_body = String.new('test_body') + easy.response_headers = String.new('test_headers') + easy + end + let!(:e) { easy.dup } + + it "sets a new handle" do + expect(e.handle).not_to eq(easy.handle) + end + + it "preserves url" do + expect(e.url).to eq(easy.url) + end + + it "preserves on_complete callback" do + expect(e.on_complete).to be(easy.on_complete) + end + + it "preserves on_headers callback" do + expect(e.on_headers).to be(easy.on_headers) + end + + it 'preserves body_write_callback of original handle' do + expect { easy.perform }.to change { easy.response_body } + expect { easy.perform }.not_to change { e.response_body } + end + + it "preserves on_progress callback" do + expect(e.on_progress).to be(easy.on_progress) + end + + it 'sets new body_write_callback of duplicated handle' do + expect { e.perform }.to change { e.response_body } + expect { e.perform }.not_to change { easy.response_body } + end + + it 'preserves headers_write_callback of original handle' do + expect { easy.perform }.to change { easy.response_headers } + expect { easy.perform }.not_to change { e.response_headers } + end + + it 'sets new headers_write_callback of duplicated handle' do + expect { e.perform }.to change { e.response_headers } + expect { e.perform }.not_to change { easy.response_headers } + end + + it "resets response_body" do + expect(e.response_body).to be_empty + end + + it "resets response_headers" do + expect(e.response_headers).to be_empty + end + + it "sets response_body for duplicated Easy" do + e.perform + expect(e.response_body).not_to be_empty + end + + it "sets response_headers for duplicated Easy" do + e.perform + expect(e.response_headers).not_to be_empty + end + + it "preserves response_body for original Easy" do + e.perform + expect(easy.response_body).to eq('test_body') + end + + it "preserves response_headers for original Easy" do + e.perform + expect(easy.response_headers).to eq('test_headers') + end + end + + describe "#mirror" do + it "returns a Mirror" do + expect(easy.mirror).to be_a(Ethon::Easy::Mirror) + end + + it "builds from easy" do + expect(Ethon::Easy::Mirror).to receive(:from_easy).with(easy) + easy.mirror + end + end + + describe "#log_inspect" do + [ :url, :response_code, :return_code, :total_time ].each do |name| + it "contains #{name}" do + expect(easy.log_inspect).to match name.to_s + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/libc_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/libc_spec.rb new file mode 100644 index 0000000..a323151 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/libc_spec.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Libc do + describe "#getdtablesize", :if => !Ethon::Curl.windows? do + it "returns an integer" do + expect(Ethon::Libc.getdtablesize).to be_a(Integer) + end + + it "returns bigger zero", :if => !Ethon::Curl.windows? do + expect(Ethon::Libc.getdtablesize).to_not be_zero + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/loggable_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/loggable_spec.rb new file mode 100644 index 0000000..117cba3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/loggable_spec.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true +require "spec_helper" + +describe Ethon::Loggable do + + describe "#logger=" do + + let(:logger) do + Logger.new($stdout).tap do |log| + log.level = Logger::INFO + end + end + + before do + Ethon.logger = logger + end + + it "sets the logger" do + expect(Ethon.logger).to eq(logger) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/operations_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/operations_spec.rb new file mode 100644 index 0000000..781b62a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/operations_spec.rb @@ -0,0 +1,298 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi::Operations do + let(:multi) { Ethon::Multi.new } + let(:easy) { Ethon::Easy.new } + let(:pointer) { FFI::MemoryPointer.new(:int) } + + describe "#handle" do + it "returns a pointer" do + expect(multi.handle).to be_a(FFI::Pointer) + end + end + + describe "#running_count" do + context "when hydra has no easy" do + it "returns nil" do + expect(multi.send(:running_count)).to be_nil + end + end + + context "when hydra has easy" do + before do + easy.url = "http://localhost:3001/" + multi.add(easy) + multi.send(:trigger, pointer) + end + + it "returns 1" do + expect(multi.send(:running_count)).to eq(1) + end + end + + context "when hydra has more easys" do + let(:another_easy) { Ethon::Easy.new } + + before do + easy.url = "http://localhost:3001/" + another_easy.url = "http://localhost:3001/" + multi.add(easy) + multi.add(another_easy) + multi.send(:trigger, pointer) + end + + it "returns 2" do + expect(multi.send(:running_count)).to eq(2) + end + end + end + + describe "#get_timeout" do + context "when code ok" do + let(:timeout) { 1 } + + before do + expect(Ethon::Curl).to receive(:multi_timeout).and_return(:ok) + multi.instance_variable_set(:@timeout, double(:read_long => timeout)) + end + + it "doesn't raise" do + expect{ multi.send(:get_timeout) }.to_not raise_error + end + + context "when timeout smaller zero" do + let(:timeout) { -1 } + + it "returns 1" do + expect(multi.send(:get_timeout)).to eq(1) + end + end + + context "when timeout bigger or equal zero" do + let(:timeout) { 2 } + + it "returns timeout" do + expect(multi.send(:get_timeout)).to eq(timeout) + end + end + end + + context "when code not ok" do + before { expect(Ethon::Curl).to receive(:multi_timeout).and_return(:not_ok) } + + it "raises MultiTimeout error" do + expect{ multi.send(:get_timeout) }.to raise_error(Ethon::Errors::MultiTimeout) + end + end + end + + describe "#set_fds" do + let(:timeout) { 1 } + let(:max_fd) { 1 } + + context "when code ok" do + before { expect(Ethon::Curl).to receive(:multi_fdset).and_return(:ok) } + + it "doesn't raise" do + expect{ multi.method(:set_fds).call(timeout) }.to_not raise_error + end + + context "when max_fd -1" do + let(:max_fd) { -1 } + + before do + multi.instance_variable_set(:@max_fd, double(:read_int => max_fd)) + expect(multi).to receive(:sleep).with(0.001) + end + + it "waits 100ms" do + multi.method(:set_fds).call(timeout) + end + end + + context "when max_fd not -1" do + context "when code smaller zero" do + before { expect(Ethon::Curl).to receive(:select).and_return(-1) } + + it "raises Select error" do + expect{ multi.method(:set_fds).call(timeout) }.to raise_error(Ethon::Errors::Select) + end + end + + context "when code bigger or equal zero" do + before { expect(Ethon::Curl).to receive(:select).and_return(0) } + + it "doesn't raise" do + expect{ multi.method(:set_fds).call(timeout) }.to_not raise_error + end + end + end + end + + context "when code not ok" do + before { expect(Ethon::Curl).to receive(:multi_fdset).and_return(:not_ok) } + + it "raises MultiFdset error" do + expect{ multi.method(:set_fds).call(timeout) }.to raise_error(Ethon::Errors::MultiFdset) + end + end + end + + describe "#perform" do + context "when no easy handles" do + it "returns nil" do + expect(multi.perform).to be_nil + end + + it "logs" do + expect(Ethon.logger).to receive(:debug).twice + multi.perform + end + end + + context "when easy handle" do + before do + easy.url = "http://localhost:3001/" + multi.add(easy) + end + + it "requests" do + multi.perform + end + + it "sets easy" do + multi.perform + expect(easy.response_code).to eq(200) + end + end + + context "when four easy handles" do + let(:easies) do + ary = [] + 4.times do + ary << another_easy = Ethon::Easy.new + another_easy.url = "http://localhost:3001/" + end + ary + end + + before do + easies.each { |e| multi.add(e) } + multi.perform + end + + it "sets response codes" do + expect(easies.all?{ |e| e.response_code == 200 }).to be_truthy + end + end + end + + describe "#ongoing?" do + context "when easy_handles" do + before { multi.easy_handles << 1 } + + context "when running_count not greater 0" do + before { multi.instance_variable_set(:@running_count, 0) } + + it "returns true" do + expect(multi.method(:ongoing?).call).to be_truthy + end + end + + context "when running_count greater 0" do + before { multi.instance_variable_set(:@running_count, 1) } + + it "returns true" do + expect(multi.method(:ongoing?).call).to be_truthy + end + end + end + + context "when no easy_handles" do + context "when running_count not greater 0" do + before { multi.instance_variable_set(:@running_count, 0) } + + it "returns false" do + expect(multi.method(:ongoing?).call).to be_falsey + end + end + + context "when running_count greater 0" do + before { multi.instance_variable_set(:@running_count, 1) } + + it "returns true" do + expect(multi.method(:ongoing?).call).to be_truthy + end + end + end + end + + describe "#init_vars" do + it "sets @timeout" do + expect(multi.instance_variable_get(:@timeout)).to be_a(FFI::MemoryPointer) + end + + it "sets @timeval" do + expect(multi.instance_variable_get(:@timeval)).to be_a(Ethon::Curl::Timeval) + end + + it "sets @fd_read" do + expect(multi.instance_variable_get(:@fd_read)).to be_a(Ethon::Curl::FDSet) + end + + it "sets @fd_write" do + expect(multi.instance_variable_get(:@fd_write)).to be_a(Ethon::Curl::FDSet) + end + + it "sets @fd_excep" do + expect(multi.instance_variable_get(:@fd_excep)).to be_a(Ethon::Curl::FDSet) + end + + it "sets @max_fd" do + expect(multi.instance_variable_get(:@max_fd)).to be_a(FFI::MemoryPointer) + end + end + + describe "#reset_fds" do + after { multi.method(:reset_fds).call } + + it "resets @fd_read" do + expect(multi.instance_variable_get(:@fd_read)).to receive(:clear) + end + + it "resets @fd_write" do + expect(multi.instance_variable_get(:@fd_write)).to receive(:clear) + end + + it "resets @fd_excep" do + expect(multi.instance_variable_get(:@fd_excep)).to receive(:clear) + end + end + + describe "#check" do + it { skip("untested") } + end + + describe "#run" do + it { skip("untested") } + end + + describe "#trigger" do + it "calls multi perform" do + expect(Ethon::Curl).to receive(:multi_perform) + multi.send(:trigger, pointer) + end + + it "sets running count" do + multi.instance_variable_set(:@running_count, nil) + multi.send(:trigger, pointer) + expect(multi.instance_variable_get(:@running_count)).to_not be_nil + end + + it "returns multi perform code" do + expect(Ethon::Curl).to receive(:multi_perform).and_return(:ok) + expect(multi.send(:trigger, pointer)).to eq(:ok) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/options_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/options_spec.rb new file mode 100644 index 0000000..b4832c6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/options_spec.rb @@ -0,0 +1,182 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi::Options do + let(:multi) { Ethon::Multi.new } + + [ + :maxconnects, :pipelining, :socketdata, :socketfunction, + :timerdata, :timerfunction, :max_total_connections + ].each do |name| + describe "#{name}=" do + it "responds_to" do + expect(multi).to respond_to("#{name}=") + end + + it "sets option" do + expect(Ethon::Curl).to receive(:set_option).with(name, anything, anything, anything) + multi.method("#{name}=").call(1) + end + end + end + + context "socket_action mode" do + let(:multi) { Ethon::Multi.new(execution_mode: :socket_action) } + + describe "#socketfunction callbacks" do + it "allows multi_code return values" do + calls = [] + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + calls << what + :ok + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + expect(calls).to eq([]) + 5.times do + multi.socket_action + break unless calls.empty? + sleep 0.1 + end + expect(calls.last).to eq(:in).or(eq(:out)) + multi.delete(easy) + expect(calls.last).to eq(:remove) + end + + it "allows integer return values (compatibility)" do + called = false + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + called = true + 0 + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + 5.times do + multi.socket_action + break if called + sleep 0.1 + end + multi.delete(easy) + + expect(called).to be_truthy + end + + it "errors on invalid return codes" do + called = false + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + called = true + "hi" + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + expect { + 5.times do + multi.socket_action + break if called + sleep 0.1 + end + }.to raise_error(ArgumentError) + expect { multi.delete(easy) }.to raise_error(ArgumentError) + end + end + + describe "#timerfunction callbacks" do + it "allows multi_code return values" do + calls = [] + multi.timerfunction = proc do |handle, timeout_ms, userp| + calls << timeout_ms + :ok + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + expect(calls.last).to be >= 0 # adds an immediate timeout + + multi.delete(easy) + expect(calls.last).to eq(-1) # cancels the timer + end + + it "allows integer return values (compatibility)" do + called = false + multi.timerfunction = proc do |handle, timeout_ms, userp| + called = true + 0 + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + multi.socket_action + multi.delete(easy) + + expect(called).to be_truthy + end + + it "errors on invalid return codes" do + called = false + multi.timerfunction = proc do |handle, timeout_ms, userp| + called = true + "hi" + end + + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + expect { multi.add(easy) }.to raise_error(ArgumentError) + end + end + end + + describe "#value_for" do + context "when option in bool" do + context "when value true" do + let(:value) { true } + + it "returns 1" do + expect(multi.method(:value_for).call(value, :bool)).to eq(1) + end + end + + context "when value false" do + let(:value) { false } + + it "returns 0" do + expect(multi.method(:value_for).call(value, :bool)).to eq(0) + end + end + end + + + context "when value in int" do + let(:value) { "2" } + + it "returns value casted to int" do + expect(multi.method(:value_for).call(value, :int)).to eq(2) + end + end + + context "when value in unspecific_options" do + context "when value a string" do + let(:value) { "www.example.\0com" } + + it "returns zero byte escaped string" do + expect(multi.method(:value_for).call(value, nil)).to eq("www.example.\\0com") + end + end + + context "when value not a string" do + let(:value) { 1 } + + it "returns value" do + expect(multi.method(:value_for).call(value, nil)).to eq(1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/stack_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/stack_spec.rb new file mode 100644 index 0000000..5eb5900 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi/stack_spec.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi::Stack do + let(:multi) { Ethon::Multi.new } + let(:easy) { Ethon::Easy.new } + + describe "#add" do + context "when easy already added" do + before { multi.add(easy) } + + it "returns nil" do + expect(multi.add(easy)).to be_nil + end + end + + context "when easy new" do + it "adds easy to multi" do + expect(Ethon::Curl).to receive(:multi_add_handle).and_return(:ok) + multi.add(easy) + end + + it "adds easy to easy_handles" do + multi.add(easy) + expect(multi.easy_handles).to include(easy) + end + end + + context "when multi_add_handle fails" do + it "raises multi add error" do + expect(Ethon::Curl).to receive(:multi_add_handle).and_return(:bad_easy_handle) + expect{ multi.add(easy) }.to raise_error(Ethon::Errors::MultiAdd) + end + end + + context "when multi cleaned up before" do + it "raises multi add error" do + Ethon::Curl.multi_cleanup(multi.handle) + expect{ multi.add(easy) }.to raise_error(Ethon::Errors::MultiAdd) + end + end + end + + describe "#delete" do + context "when easy in easy_handles" do + before { multi.add(easy) } + + it "deletes easy from multi" do + expect(Ethon::Curl).to receive(:multi_remove_handle).and_return(:ok) + multi.delete(easy) + end + + it "deletes easy from easy_handles" do + multi.delete(easy) + expect(multi.easy_handles).to_not include(easy) + end + end + + context "when easy is not in easy_handles" do + it "does nothing" do + expect(Ethon::Curl).to receive(:multi_add_handle).and_return(:ok) + multi.add(easy) + end + + it "adds easy to easy_handles" do + multi.add(easy) + expect(multi.easy_handles).to include(easy) + end + end + + context "when multi_remove_handle fails" do + before { multi.add(easy) } + + it "raises multi remove error" do + expect(Ethon::Curl).to receive(:multi_remove_handle).and_return(:bad_easy_handle) + expect{ multi.delete(easy) }.to raise_error(Ethon::Errors::MultiRemove) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi_spec.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi_spec.rb new file mode 100644 index 0000000..a483357 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/ethon/multi_spec.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true +require 'spec_helper' + +describe Ethon::Multi do + describe ".new" do + it "inits curl" do + expect(Ethon::Curl).to receive(:init) + Ethon::Multi.new + end + + context "with default options" do + it "allows running #perform with the default execution_mode" do + Ethon::Multi.new.perform + end + + it "refuses to run #socket_action" do + expect { Ethon::Multi.new.socket_action }.to raise_error(ArgumentError) + end + end + + context "when options not empty" do + context "when pipelining is set" do + let(:options) { { :pipelining => true } } + + it "sets pipelining" do + expect_any_instance_of(Ethon::Multi).to receive(:pipelining=).with(true) + Ethon::Multi.new(options) + end + end + + context "when execution_mode option is :socket_action" do + let(:options) { { :execution_mode => :socket_action } } + let(:multi) { Ethon::Multi.new(options) } + + it "refuses to run #perform" do + expect { multi.perform }.to raise_error(ArgumentError) + end + + it "allows running #socket_action" do + multi.socket_action + end + end + end + end + + describe "#socket_action" do + let(:options) { { :execution_mode => :socket_action } } + let(:select_state) { { :readers => [], :writers => [], :timeout => 0 } } + let(:multi) { + multi = Ethon::Multi.new(options) + multi.timerfunction = proc do |handle, timeout_ms, userp| + timeout_ms = nil if timeout_ms == -1 + select_state[:timeout] = timeout_ms + :ok + end + multi.socketfunction = proc do |handle, sock, what, userp, socketp| + case what + when :remove + select_state[:readers].delete(sock) + select_state[:writers].delete(sock) + when :in + select_state[:readers].push(sock) unless select_state[:readers].include? sock + select_state[:writers].delete(sock) + when :out + select_state[:readers].delete(sock) + select_state[:writers].push(sock) unless select_state[:writers].include? sock + when :inout + select_state[:readers].push(sock) unless select_state[:readers].include? sock + select_state[:writers].push(sock) unless select_state[:writers].include? sock + else + raise ArgumentError, "invalid value for 'what' in socketfunction callback" + end + :ok + end + multi + } + + def fds_to_ios(fds) + fds.map do |fd| + IO.for_fd(fd).tap { |io| io.autoclose = false } + end + end + + def perform_socket_action_until_complete + multi.socket_action # start things off + + while multi.ongoing? + readers, writers, _ = IO.select( + fds_to_ios(select_state[:readers]), + fds_to_ios(select_state[:writers]), + [], + select_state[:timeout] + ) + + to_notify = Hash.new { |hash, key| hash[key] = [] } + unless readers.nil? + readers.each do |reader| + to_notify[reader] << :in + end + end + unless writers.nil? + writers.each do |writer| + to_notify[writer] << :out + end + end + + to_notify.each do |io, readiness| + multi.socket_action(io, readiness) + end + + # if we didn't have anything to notify, then we timed out + multi.socket_action if to_notify.empty? + end + ensure + multi.easy_handles.dup.each do |h| + multi.delete(h) + end + end + + it "supports an end-to-end request" do + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/" + multi.add(easy) + + perform_socket_action_until_complete + + expect(multi.ongoing?).to eq(false) + end + + it "supports multiple concurrent requests" do + handles = [] + 10.times do + easy = Ethon::Easy.new + easy.url = "http://localhost:3001/?delay=1" + multi.add(easy) + handles << easy + end + + start = Time.now + perform_socket_action_until_complete + duration = Time.now - start + + # these should have happened concurrently + expect(duration).to be < 2 + expect(multi.ongoing?).to eq(false) + + handles.each do |handle| + expect(handle.response_code).to eq(200) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/spec_helper.rb new file mode 100644 index 0000000..55bc9ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/spec_helper.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true +$LOAD_PATH.unshift(File.dirname(__FILE__)) +$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), "..", "lib")) + +require 'bundler' +Bundler.setup +require "ethon" +require 'rspec' + +if defined? require_relative + require_relative 'support/localhost_server' + require_relative 'support/server' +else + require 'support/localhost_server' + require 'support/server' +end + +# Ethon.logger = Logger.new($stdout).tap do |log| +# log.level = Logger::DEBUG +# end + +RSpec.configure do |config| + # config.order = :rand + + config.before(:suite) do + LocalhostServer.new(TESTSERVER.new, 3001) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/support/localhost_server.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/support/localhost_server.rb new file mode 100644 index 0000000..a7b119a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/support/localhost_server.rb @@ -0,0 +1,95 @@ +# frozen_string_literal: true +require 'rack' +require 'rack/handler/webrick' +require 'net/http' + +# The code for this is inspired by Capybara's server: +# http://github.com/jnicklas/capybara/blob/0.3.9/lib/capybara/server.rb +class LocalhostServer + READY_MESSAGE = "Server ready" + + class Identify + def initialize(app) + @app = app + end + + def call(env) + if env["PATH_INFO"] == "/__identify__" + [200, {}, [LocalhostServer::READY_MESSAGE]] + else + @app.call(env) + end + end + end + + attr_reader :port + + def initialize(rack_app, port = nil) + @port = port || find_available_port + @rack_app = rack_app + concurrently { boot } + wait_until(10, "Boot failed.") { booted? } + end + + private + + def find_available_port + server = TCPServer.new('127.0.0.1', 0) + server.addr[1] + ensure + server.close if server + end + + def boot + # Use WEBrick since it's part of the ruby standard library and is available on all ruby interpreters. + options = { :Port => port } + options.merge!(:AccessLog => [], :Logger => WEBrick::BasicLog.new(StringIO.new)) unless ENV['VERBOSE_SERVER'] + Rack::Handler::WEBrick.run(Identify.new(@rack_app), **options) + end + + def booted? + res = ::Net::HTTP.get_response("localhost", '/__identify__', port) + if res.is_a?(::Net::HTTPSuccess) or res.is_a?(::Net::HTTPRedirection) + return res.body == READY_MESSAGE + end + rescue Errno::ECONNREFUSED, Errno::EBADF + return false + end + + def concurrently + if should_use_subprocess? + pid = Process.fork do + trap(:INT) { ::Rack::Handler::WEBrick.shutdown } + yield + exit # manually exit; otherwise this sub-process will re-run the specs that haven't run yet. + end + + at_exit do + Process.kill('INT', pid) + begin + Process.wait(pid) + rescue Errno::ECHILD + # ignore this error...I think it means the child process has already exited. + end + end + else + Thread.new { yield } + end + end + + def should_use_subprocess? + # !ENV['THREADED'] + false + end + + def wait_until(timeout, error_message, &block) + start_time = Time.now + + while true + return if yield + raise TimeoutError.new(error_message) if (Time.now - start_time) > timeout + sleep(0.05) + end + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/support/server.rb b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/support/server.rb new file mode 100644 index 0000000..ab8ffd3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/ethon-0.16.0/spec/support/server.rb @@ -0,0 +1,115 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true +require 'json' +require 'zlib' +require 'sinatra/base' + +TESTSERVER = Sinatra.new do + set :logging, nil + + fail_count = 0 + + post '/file' do + { + 'content-type' => params[:file][:type], + 'filename' => params[:file][:filename], + 'content' => params[:file][:tempfile].read, + 'request-content-type' => request.env['CONTENT_TYPE'] + }.to_json + end + + get '/multiple-headers' do + [200, { 'Set-Cookie' => %w[ foo bar ], 'Content-Type' => 'text/plain' }, ['']] + end + + get '/fail/:number' do + if fail_count >= params[:number].to_i + "ok" + else + fail_count += 1 + error 500, "oh noes!" + end + end + + get '/fail_forever' do + error 500, "oh noes!" + end + + get '/redirect' do + redirect '/' + end + + post '/redirect' do + redirect '/' + end + + get '/bad_redirect' do + redirect '/bad_redirect' + end + + get '/auth_basic/:username/:password' do + @auth ||= Rack::Auth::Basic::Request.new(request.env) + # Check that we've got a basic auth, and that it's credentials match the ones + # provided in the request + if @auth.provided? && @auth.basic? && @auth.credentials == [ params[:username], params[:password] ] + # auth is valid - confirm it + true + else + # invalid auth - request the authentication + response['WWW-Authenticate'] = %(Basic realm="Testing HTTP Auth") + throw(:halt, [401, "Not authorized\n"]) + end + end + + get '/auth_ntlm' do + # we're just checking for the existence if NTLM auth header here. It's validation + # is too troublesome and really doesn't bother is much, it's up to libcurl to make + # it valid + response['WWW-Authenticate'] = 'NTLM' + is_ntlm_auth = /^NTLM/ =~ request.env['HTTP_AUTHORIZATION'] + true if is_ntlm_auth + throw(:halt, [401, "Not authorized\n"]) if !is_ntlm_auth + end + + get '/gzipped' do + req_env = request.env.to_json + z = Zlib::Deflate.new + gzipped_env = z.deflate(req_env, Zlib::FINISH) + z.close + response['Content-Encoding'] = 'gzip' + gzipped_env + end + + get '/**' do + sleep params["delay"].to_i if params.has_key?("delay") + request.env.merge!(:body => request.body.read).to_json + end + + head '/**' do + sleep params["delay"].to_i if params.has_key?("delay") + end + + put '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + post '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + delete '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + patch '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + options '/**' do + request.env.merge!(:body => request.body.read).to_json + end + + route 'PURGE', '/**' do + request.env.merge!(:body => request.body.read).to_json + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/CONTRIBUTING.md b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/CONTRIBUTING.md new file mode 100644 index 0000000..2a59feb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/CONTRIBUTING.md @@ -0,0 +1,22 @@ +## Getting Involved + +New contributors are always welcome, when it doubt please ask questions. We strive to be an open and welcoming community. Please be nice to one another. + +### Coding + +* Pick a task: + * Offer feedback on open [pull requests](https://github.com/excon/excon/pulls). + * Review open [issues](https://github.com/excon/excon/issues) for things to help on. + * [Create an issue](https://github.com/excon/excon/issues/new) to start a discussion on additions or features. +* Fork the project, add your changes and tests to cover them in a topic branch. +* Commit your changes and rebase against `excon/excon` to ensure everything is up to date. +* [Submit a pull request](https://github.com/excon/excon/compare/). + +### Non-Coding + +* Work for [twitter](http://twitter.com)? I'd love to reclaim the unused [@excon](http://twitter.com/excon) account! +* Offer feedback on open [issues](https://github.com/excon/excon/issues). +* Write and help edit [documentation](https://github.com/excon/excon.github.com). +* Translate [documentation](https://github.com/excon/excon.github.com) in to other languages. +* Organize or volunteer at events. +* Discuss other ideas for contribution with [geemus](mailto:geemus+excon@gmail.com). diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/CONTRIBUTORS.md b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/CONTRIBUTORS.md new file mode 100644 index 0000000..f8e2dd1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/CONTRIBUTORS.md @@ -0,0 +1,182 @@ +* Aaron Stone +* Adam Avilla +* Adam Esterline +* Alexander SandstrÃļm +* Alexandr Burov +* Andrew Katz +* Andrew Metcalf +* Andrew Sullivan Cant +* AndrÊ Diego Piske +* Andy Delcambre +* Anshul Khandelwal +* Anton Chuchkalov +* Antonio Terceiro +* Ash Wilson +* Atul Bhosale +* Bart de Water +* Ben Burkert +* Benedikt BÃļhm +* Bill Mill +* Bo Jeanes +* Brandur +* Brian D. Burns +* Brian Hartsock +* Bryan Paxton +* Caio Chassot +* Caius Durling +* Carl HoĖˆrberg +* Carl HÃļrberg +* Carlos Sanchez +* Casper Thomsen +* Chris Hanks +* Christoph Rieß +* Christophe Taton +* Claudio Poli +* Craig Shannon +* Damien Mathieu +* Dan Hensgen +* Dan Peterson +* Dan Prince +* Dane Harrigan +* Daniel Berger +* Dave Myron +* Dave Newton +* Dave Vasilevsky +* David Biehl +* David Taylor +* Dimitrij Denissenko +* Dominik Richter +* Doug McInnes +* Esteban Pastorino +* Eugene Howe +* Evan Phoenix +* Fabian Wiesel +* Federico Ravasio +* Felix Wolfsteller +* Glenn Pratt +* Graeme Nelson +* Grey Baker +* Guillaume Balaine +* Hakan Ensari +* Hiroshi Hatake +* Ian Neubert +* Igor Fedoronchuk +* Jacob Atzen +* James Cox +* James Watling +* Jean Mertz +* Jeremy Hinegardner +* Jesse Kempf +* Jessica Jiang +* Joe Rafaniello +* John Keiser +* John Leach +* Jonas Pfenniger +* Jonathan Dance +* Jonathan Roes +* Joshua B. Smith +* Joshua Gross +* Joshua Mckinney +* Joshua Napoli +* Kelly Mahan +* Kensuke Nagae +* Kimmo Lehto +* Koen Rouwhorst +* Konstantin Shabanov +* Kyle Purkiss +* Kyle Rames +* Lewis Marshall +* Lincoln Stoll +* Louis Sobel +* Mahemoff +* Marco Costa +* Markus Bucher +* Mathias Meyer +* Matt Gauger +* Matt Palmer +* Matt Sanders +* Matt Snyder +* Matt Todd +* Maurice Schreiber +* Max Lincoln +* Michael Brodhead +* Michael Hale +* Michael Rowe +* Michael Rykov +* Mike Heffner +* Milovan Zogovic +* Myron Marston +* Nathan Long +* Nathan Sutton +* Nick Osborn +* Nicolas Leger +* Nicolas Sanguinetti +* Paul Gideon Dann +* Pavel +* Pavel Valena +* Peter Meier +* Peter Weldon +* Phil Ross +* Raul Murciano +* Richard Godbee +* Richard Ramsden +* Rohan Mendon +* Ruslan Korolev +* Ruslan Kyrychuk +* Ryan Bigg +* Ryan Mohr +* Ryan Schlesinger +* Ryoji Yoshioka +* Sam +* Sam Lehman +* Sam Withrow +* Scott Gonyea +* Scott Walkinshaw +* Sean Cribbs +* Sergio Rubio +* Shai Rosenfeld +* Stan Hu +* Stefan Merettig +* Stephen Chu +* Swanand Pagnis +* Terry Howe +* Thom Mahoney & Josh Lane +* Thom May +* Tim Carey-Smith +* TimothÊe Peignier +* Tobias Schmidt +* Todd Lunter +* Tom Maher +* Trym Skaar +* Tuomas Silen +* Victor Costan +* Viven +* Vít Ondruch +* Wesley Beary +* Yusuke Nakamura +* Zach Anker +* chrisrhoden +* dependabot[bot] +* dickeyxxx +* geemus +* geemus (Wesley Beary) +* ggoodale +* ivan.filenko +* jasquat +* karimb +* marios +* mkb +* nathannaveen +* ojab +* patrick brisbin +* pavel +* phiggins +* rin_ne +* rinrinne +* rkyrychuk +* shale +* sshaw +* starbelly +* twrodriguez +* wsnarski +* zimbatm \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/LICENSE.md new file mode 100644 index 0000000..05f6718 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2009-2019 [CONTRIBUTORS.md](https://github.com/excon/excon/blob/master/CONTRIBUTORS.md) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/README.md b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/README.md new file mode 100644 index 0000000..a68bb28 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/README.md @@ -0,0 +1,523 @@ +# excon + +Usable, fast, simple Ruby HTTP 1.1 + +Excon was designed to be simple, fast and performant. It works great as a general HTTP(s) client and is particularly well suited to usage in API clients. + +[![Build Status](https://github.com/excon/excon/actions/workflows/ruby.yml/badge.svg)](https://github.com/excon/excon/actions/workflows/ruby.yml) +[![Gem Version](https://badge.fury.io/rb/excon.svg)](https://badge.fury.io/rb/excon) + +- [Getting Started](#getting-started) +- [Options](#options) +- [Chunked Requests](#chunked-requests) +- [Pipelining Requests](#pipelining-requests) +- [Streaming Responses](#streaming-responses) +- [Proxy Support](#proxy-support) +- [Reusable ports](#reusable-ports) +- [Unix Socket Support](#unix-socket-support) +- [Stubs](#stubs) +- [Instrumentation](#instrumentation) +- [HTTPS client certificate](#https-client-certificate) +- [HTTPS/SSL Issues](#httpsssl-issues) +- [Getting Help](#getting-help) +- [Contributing](#contributing) +- [Plugins and Middlewares](#plugins-and-middlewares) +- [License](#license) + +## Getting Started + +Install the gem. + +``` +$ sudo gem install excon +``` + +Require with rubygems. + +```ruby +require 'rubygems' +require 'excon' +``` + +The easiest way to get started is by using one-off requests. Supported one-off request methods are `connect`, `delete`, `get`, `head`, `options`, `post`, `put`, and `trace`. Requests return a response object which has `body`, `headers`, `remote_ip` and `status` attributes. + +```ruby +response = Excon.get('http://geemus.com') +response.body # => "..." +response.headers # => {...} +response.remote_ip # => "..." +response.status # => 200 +``` + +For API clients or other ongoing usage, reuse a connection across multiple requests to share options and improve performance. + +```ruby +connection = Excon.new('http://geemus.com') +get_response = connection.get +post_response = connection.post(:path => '/foo') +delete_response = connection.delete(:path => '/bar') +``` + +By default, each connection is non-persistent. This means that each request made against a connection behaves like a +one-off request. Each request will establish a socket connection to the server, then close the socket once the request +is complete. + +To use a persistent connection, use the `:persistent` option: + +```ruby +connection = Excon.new('http://geemus.com', :persistent => true) +``` + +The initial request will establish a socket connection to the server and leave the socket open. Subsequent requests +will reuse that socket. You may call `Connection#reset` at any time to close the underlying socket, and the next request +will establish a new socket connection. + +You may also control persistence on a per-request basis by setting the `:persistent` option for each request. + +```ruby +connection = Excon.new('http://geemus.com') # non-persistent by default +connection.get # socket established, then closed +connection.get(:persistent => true) # socket established, left open +connection.get(:persistent => true) # socket reused +connection.get # socket reused, then closed + +connection = Excon.new('http://geemus.com', :persistent => true) +connection.get # socket established, left open +connection.get(:persistent => false) # socket reused, then closed +connection.get(:persistent => false) # socket established, then closed +connection.get # socket established, left open +connection.get # socket reused +``` + +Note that sending a request with `:persistent => false` to close the socket will also send `Connection: close` to inform +the server the connection is no longer needed. `Connection#reset` will simply close our end of the socket. + +## Options + +Both one-off and persistent connections support many other options. The final options for a request are built up by starting with `Excon.defaults`, then merging in options from the connection and finally merging in any request options. In this way you have plenty of options on where and how to set options and can easily setup connections or defaults to match common options for a particular endpoint. + +Here are a few common examples: + +```ruby +# Output debug info, similar to ENV['EXCON_DEBUG'] +connection = Excon.new('http://geemus.com/', :debug => true) + +# Custom headers +Excon.get('http://geemus.com', :headers => {'Authorization' => 'Basic 0123456789ABCDEF'}) +connection.get(:headers => {'Authorization' => 'Basic 0123456789ABCDEF'}) + +# Changing query strings +connection = Excon.new('http://geemus.com/') +connection.get(:query => {:foo => 'bar'}) + +# POST body encoded with application/x-www-form-urlencoded +Excon.post('http://geemus.com', + :body => 'language=ruby&class=fog', + :headers => { "Content-Type" => "application/x-www-form-urlencoded" }) + +# same again, but using URI to build the body of parameters +Excon.post('http://geemus.com', + :body => URI.encode_www_form(:language => 'ruby', :class => 'fog'), + :headers => { "Content-Type" => "application/x-www-form-urlencoded" }) + +# request takes a method option, accepting either a symbol or string +connection.request(:method => :get) +connection.request(:method => 'GET') + +# expect one or more status codes, or raise an error +connection.request(:expects => [200, 201], :method => :get) + +# use basic authentication by supplying credentials in the URL or as parameters +connection = Excon.new('http://username:password@secure.geemus.com') +# Note: username & password is unescaped for request, so you should provide escaped values here +# i. e. instead of `password: 'pa%%word'` you should use `password: Excon::Utils.escape_uri('pa%%word')`, +# which return `pa%25%25word` +connection = Excon.new('http://secure.geemus.com', + :user => 'username', :password => 'password') + +# use custom uri parser +require 'addressable/uri' +connection = Excon.new('http://geemus.com/', uri_parser: Addressable::URI) +``` + +Compared to web browsers and other http client libraries, e.g. curl, Excon is a bit more low-level and doesn't assume much by default. If you are seeing different results compared to other clients, the following options might help: + +```ruby +# opt-in to omitting port from http:80 and https:443 +connection = Excon.new('http://geemus.com/', :omit_default_port => true) + +# accept gzip encoding +connection = Excon.new('http://geemus.com/', :headers => { "Accept-Encoding" => "gzip" }) + +# turn off peer verification (less secure) +Excon.defaults[:ssl_verify_peer] = false +connection = Excon.new('https://...') +``` + +## Timeouts and Retries + +You can modify timeouts and define whether and how many (blocking) retries Excon should attempt if errors occur. + +```ruby +# this request can be repeated safely, so retry on errors up to 4 times +connection.request(:idempotent => true) + +# this request can be repeated safely, retry up to 6 times +connection.request(:idempotent => true, :retry_limit => 6) + +# this request can be repeated safely, retry up to 6 times and sleep 5 seconds +# in between each retry +connection.request(:idempotent => true, :retry_limit => 6, :retry_interval => 5) + +# specify the errors on which to retry (default Timeout, Socket, HTTPStatus) +# only retry on timeouts +connection.request(:idempotent => true, :retry_limit => 6, :retry_interval => 5, :retry_errors => [Excon::Error::Timeout] ) + +# set longer read_timeout (default is 60 seconds) +connection.request(:read_timeout => 360) + +# set longer write_timeout (default is 60 seconds) +connection.request(:write_timeout => 360) + +# Enable the socket option TCP_NODELAY on the underlying socket. +# +# This can improve response time when sending frequent short +# requests in time-sensitive scenarios. +# +connection = Excon.new('http://geemus.com/', :tcp_nodelay => true) + +# set longer connect_timeout (default is 60 seconds) +connection = Excon.new('http://geemus.com/', :connect_timeout => 360) + +# opt-out of nonblocking operations for performance and/or as a workaround +connection = Excon.new('http://geemus.com/', :nonblock => false) +``` + +## Chunked Requests + +You can make `Transfer-Encoding: chunked` requests by passing a block that will deliver chunks, delivering an empty chunk to signal completion. + +```ruby +file = File.open('data') + +chunker = lambda do + # Excon.defaults[:chunk_size] defaults to 1048576, ie 1MB + # to_s will convert the nil received after everything is read to the final empty chunk + file.read(Excon.defaults[:chunk_size]).to_s +end + +Excon.post('http://geemus.com', :request_block => chunker) + +file.close +``` + +Iterating in this way allows you to have more granular control over writes and to write things where you can not calculate the overall length up front. + +## Pipelining Requests + +You can make use of HTTP pipelining to improve performance. Instead of the normal request/response cycle, pipelining sends a series of requests and then receives a series of responses. You can take advantage of this using the `requests` method, which takes an array of params where each is a hash like request would receive and returns an array of responses. + +```ruby +connection = Excon.new('http://geemus.com/') +connection.requests([{:method => :get}, {:method => :get}]) +``` + +By default, each call to `requests` will use a separate persistent socket connection. To make multiple `requests` calls +using a single persistent connection, set `:persistent => true` when establishing the connection. + +For large numbers of simultaneous requests please consider using the `batch_requests` method. This will automatically slice up the requests into batches based on the file descriptor limit of your operating system. The results are the same as the `requests` method, but using this method can help prevent timeout errors. + +```ruby +large_array_of_requests = [{:method => :get, :path => 'some_path'}, { ... }] # Hundreds of items +connection.batch_requests(large_array_of_requests) +``` + +## Streaming Responses + +You can stream responses by passing a block that will receive each chunk. + +```ruby +streamer = lambda do |chunk, remaining_bytes, total_bytes| + puts chunk + puts "Remaining: #{remaining_bytes.to_f / total_bytes}%" +end + +Excon.get('http://geemus.com', :response_block => streamer) +``` + +Iterating over each chunk will allow you to do work on the response incrementally without buffering the entire response first. For very large responses this can lead to significant memory savings. + +## Proxy Support + +You can specify a proxy URL that Excon will use with both HTTP and HTTPS connections: + +```ruby +connection = Excon.new('http://geemus.com', :proxy => 'http://my.proxy:3128') +connection.request(:method => 'GET') + +Excon.get('http://geemus.com', :proxy => 'http://my.proxy:3128') +``` + +The proxy URL must be fully specified, including scheme (e.g. "http://") and port. + +Proxy support must be set when establishing a connection object and cannot be overridden in individual requests. + +NOTE: Excon will use `HTTP_PROXY` and `HTTPS_PROXY` environment variables. If set they will take precedence over any :proxy option specified in code. If "HTTPS_PROXY" is not set, "HTTP_PROXY" will be used for both HTTP and HTTPS connections. To disable this behavior, set the `NO_PROXY` environment variable and other environment variable proxy settings will be disregarded. + +## Reusable ports + +For advanced cases where you'd like to reuse the local port assigned to the excon socket in another socket, use the `:reuseaddr` option. + +```ruby +connection = Excon.new('http://geemus.com', :reuseaddr => true) +connection.get + +s = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0) +s.setsockopt(Socket::SOL_SOCKET, Socket::SO_REUSEADDR, true) +if defined?(Socket::SO_REUSEPORT) + s.setsockopt(Socket::SOL_SOCKET, Socket::SO_REUSEPORT, true) +end + +s.bind(Socket.pack_sockaddr_in(connection.local_port, connection.local_address)) +s.connect(Socket.pack_sockaddr_in(80, '1.2.3.4')) +puts s.read +s.close +``` + +## Unix Socket Support + +The Unix socket will work for one-off requests and multiuse connections. A Unix socket path must be provided separate from the resource path. + +```ruby +connection = Excon.new('unix:///', :socket => '/tmp/unicorn.sock') +connection.request(:method => :get, :path => '/ping') + +Excon.get('unix:///ping', :socket => '/tmp/unicorn.sock') +``` + +NOTE: Proxies will be ignored when using a Unix socket, since a Unix socket has to be local. + +## Stubs + +You can stub out requests for testing purposes by enabling mock mode on a connection. + +```ruby +connection = Excon.new('http://example.com', :mock => true) +``` + +Or by enabling mock mode for a request. + +```ruby +connection.request(:method => :get, :path => 'example', :mock => true) +``` + +Add stubs by providing the request attributes to match and response attributes to return. Response params can be specified as either a hash or block which will yield with the request params. + +```ruby +Excon.stub({}, {:body => 'body', :status => 200}) +Excon.stub({}, lambda {|request_params| {:body => request_params[:body], :status => 200}}) +``` + +Omitted attributes are assumed to match, so this stub will match _any_ request and return an Excon::Response with a body of 'body' and status of 200. + +```ruby +Excon.stub({ :scheme => 'https', :host => 'example.com', :path => /\/examples\/\d+/, :port => 443 }, { body: 'body', status: 200 }) +``` + +The above code will stub this: + +```ruby +Excon.get('https://example.com/examples/123', mock: true) +``` + +You can add whatever stubs you might like this way and they will be checked against in the order they were added, if none of them match then excon will raise an `Excon::Errors::StubNotFound` error to let you know. + +If you want to allow unstubbed requests without raising `StubNotFound`, set the `allow_unstubbed_requests` option either globally or per request. + +```ruby +connection = Excon.new('http://example.com', :mock => true, :allow_unstubbed_requests => true) +``` + +To remove a previously defined stub, or all stubs: + +```ruby +Excon.unstub({}) # remove first/oldest stub matching {} +Excon.stubs.clear # remove all stubs +``` + +For example, if using RSpec for your test suite you can clear stubs after running each example: + +```ruby +config.after(:each) do + Excon.stubs.clear +end +``` + +You can also modify `Excon.defaults` to set a stub for all requests, so for a test suite you might do this: + +```ruby +# Mock by default and stub any request as success +config.before(:all) do + Excon.defaults[:mock] = true + Excon.stub({}, {:body => 'Fallback', :status => 200}) + # Add your own stubs here or in specific tests... +end +``` + +By default stubs are shared globally, to make stubs unique to each thread, use `Excon.defaults[:stubs] = :local`. + +## Instrumentation + +Excon calls can be timed using the [ActiveSupport::Notifications](http://api.rubyonrails.org/classes/ActiveSupport/Notifications.html) API. + +```ruby +connection = Excon.new( + 'http://geemus.com', + :instrumentor => ActiveSupport::Notifications +) +``` + +Excon will then instrument each request, retry, and error. The corresponding events are named `excon.request`, `excon.retry`, and `excon.error` respectively. + +```ruby +ActiveSupport::Notifications.subscribe(/excon/) do |*args| + puts "Excon did stuff!" +end +``` + +If you prefer to label each event with a namespace other than "excon", you may specify +an alternate name in the constructor: + +```ruby +connection = Excon.new( + 'http://geemus.com', + :instrumentor => ActiveSupport::Notifications, + :instrumentor_name => 'my_app' +) +``` + +Note: Excon's ActiveSupport::Notifications implementation has the following event format: `.` which is the opposite of the Rails' implementation. + +ActiveSupport provides a [subscriber](http://api.rubyonrails.org/classes/ActiveSupport/Subscriber.html) interface which lets you attach a subscriber to a namespace. Due to the incompability above, you won't be able to attach a subscriber to the "excon" namespace out of the box. + +If you want this functionality, you can use a simple adapter such as this one: + +```ruby +class ExconToRailsInstrumentor + def self.instrument(name, datum, &block) + namespace, *event = name.split(".") + rails_name = [event, namespace].flatten.join(".") + ActiveSupport::Notifications.instrument(rails_name, datum, &block) + end +end +``` + +If you don't want to add ActiveSupport to your application, simply define a class which implements the same `#instrument` method like so: + +```ruby +class SimpleInstrumentor + class << self + attr_accessor :events + + def instrument(name, params = {}, &block) + puts "#{name} just happened." + yield if block_given? + end + end +end +``` + +The #instrument method will be called for each HTTP request, response, retry, and error. + +For debugging purposes you can also use `Excon::StandardInstrumentor` to output all events to stderr. This can also be specified by setting the `EXCON_DEBUG` ENV var. + +See [the documentation for ActiveSupport::Notifications](http://api.rubyonrails.org/classes/ActiveSupport/Notifications.html) for more detail on using the subscription interface. See excon's [instrumentation_test.rb](https://github.com/excon/excon/blob/master/tests/middlewares/instrumentation_tests.rb) for more examples of instrumenting excon. + +## HTTPS client certificate + +You can supply a client side certificate if the server requires it for authentication: + +```ruby +connection = Excon.new('https://example.com', + client_cert: 'mycert.pem', + client_key: 'mycert.key', + client_key_pass: 'my pass phrase') +``` + +`client_key_pass` is optional. + +Optionally, you can also pass the whole chain by passing the extra certificates through `client_chain`: + +```ruby +connection = Excon.new('https://example.com', + client_cert: 'mycert.pem', + client_chain: 'mychain.pem', + client_key: 'mycert.key') +``` + +If you already have loaded the certificate, key and chain into memory, then pass it through like: + +```ruby +client_cert_data = File.load 'mycert.pem' +client_key_data = File.load 'mycert.key' + +connection = Excon.new('https://example.com', + client_cert_data: client_cert_data, + client_chain_data: client_chain_data, + client_key_data: client_key_data) +``` + +This can be useful if your program has already loaded the assets through +another mechanism (E.g. a remote API call to a secure K:V system like Vault). + +## HTTPS/SSL Issues + +By default excon will try to verify peer certificates when using HTTPS. Unfortunately on some operating systems the defaults will not work. This will likely manifest itself as something like `Excon::Errors::CertificateError: SSL_connect returned=1 ...` + +If you have the misfortune of running into this problem you have a couple options. If you have certificates but they aren't being auto-discovered, you can specify the path to your certificates: + +```ruby +Excon.defaults[:ssl_ca_path] = '/path/to/certs' +``` + +Failing that, you can turn off peer verification (less secure): + +```ruby +Excon.defaults[:ssl_verify_peer] = false +``` + +Either of these should allow you to work around the socket error and continue with your work. + +## Getting Help + +- Ask specific questions on [Stack Overflow](http://stackoverflow.com/questions/tagged/excon). +- Report bugs and discuss potential features in [Github issues](https://github.com/excon/excon/issues). + +## Contributing + +Please refer to [CONTRIBUTING.md](https://github.com/excon/excon/blob/master/CONTRIBUTING.md). + +# Plugins and Middlewares + +Using Excon's [Middleware system][middleware], you can easily extend Excon's +functionality with your own. The following plugins extend Excon in their own +way: + +- [excon-addressable](https://github.com/JeanMertz/excon-addressable) + + Set [addressable](https://github.com/sporkmonger/addressable) as the default + URI parser, and add support for [URI templating][templating]. + +- [excon-hypermedia](https://github.com/JeanMertz/excon-hypermedia) + + Teaches Excon to talk with [HyperMedia APIs][hypermedia]. Allowing you to use + all of Excon's functionality, while traversing APIs in an easy and + self-discovering way. + +## License + +Please refer to [LICENSE.md](https://github.com/excon/excon/blob/master/LICENSE.md). + +[middleware]: lib/excon/middlewares/base.rb +[hypermedia]: https://en.wikipedia.org/wiki/HATEOAS +[templating]: https://www.rfc-editor.org/rfc/rfc6570.txt diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/data/cacert.pem b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/data/cacert.pem new file mode 100644 index 0000000..2ae7b6c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/data/cacert.pem @@ -0,0 +1,3372 @@ +## +## Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Tue Jan 10 04:12:06 2023 GMT +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## https://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl version 1.29. +## SHA256: 90c470e705b4b5f36f09684dc50e2b79c8b86989a848b62cd1a7bd6460ee65f6 +## + + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) FőtanÃēsítvÃĄny +======================================== +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +QuoVadis Root CA 1 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE +PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm +PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6 +Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN +ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l +g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV +7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX +9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f +iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg +t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI +hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3 +GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct +Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP ++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh +3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa +wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6 +O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0 +FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV +hMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +QuoVadis Root CA 2 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh +ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY +NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t +oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o +MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l +V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo +L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ +sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD +6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh +lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI +hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K +pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9 +x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz +dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X +U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw +mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD +zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN +JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr +O3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +QuoVadis Root CA 3 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286 +IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL +Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe +6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3 +I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U +VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7 +5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi +Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM +dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt +rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI +hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS +t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ +TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du +DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib +Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD +hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX +0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW +dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2 +PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +DigiCert Assured ID Root G2 +=========================== +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw +MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH +35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq +bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw +VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP +YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn +lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO +w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv +0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz +d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW +hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M +jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +DigiCert Assured ID Root G3 +=========================== +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD +VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb +RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs +KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF +UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy +YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy +1vUhZscv6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +DigiCert Global Root G2 +======================= +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx +MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ +kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO +3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV +BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM +UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB +o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu +5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr +F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U +WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH +QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/ +iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +DigiCert Global Root G3 +======================= +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD +VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw +MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C +AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O +YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp +Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y +3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34 +VOKa5Vt8sycX +-----END CERTIFICATE----- + +DigiCert Trusted Root G4 +======================== +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw +HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp +pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o +k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa +vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY +QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6 +MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm +mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7 +f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH +dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8 +oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY +ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr +yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy +7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah +ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN +5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb +/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa +5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK +G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP +82Z+ +-----END CERTIFICATE----- + +COMODO RSA Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn +dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ +FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+ +5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG +x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX +2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL +OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3 +sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C +GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5 +WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt +rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+ +nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg +tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW +sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp +pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA +zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq +ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52 +7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I +LaZRfyHBNVOFBkpdn627G190 +-----END CERTIFICATE----- + +USERTrust RSA Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz +0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j +Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn +RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O ++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq +/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE +Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM +lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8 +yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+ +eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW +FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ +7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ +Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM +8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi +FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi +yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c +J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw +sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx +Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +USERTrust ECC Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2 +0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez +nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV +HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB +HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu +9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R5 +=========================== +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6 +SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS +h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx +uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 +yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +IdenTrust Commercial Root CA 1 +============================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS +b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES +MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB +IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld +hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/ +mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi +1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C +XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl +3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy +NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV +WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg +xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix +uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI +hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg +ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt +ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV +YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX +feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro +kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe +2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz +Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R +cGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +IdenTrust Public Sector Root CA 1 +================================= +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv +ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV +UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS +b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy +P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6 +Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI +rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf +qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS +mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn +ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh +LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v +iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL +4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw +DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A +mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt +GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt +m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx +NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4 +Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI +ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC +ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ +3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy +bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug +b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw +HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT +DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx +OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP +/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz +HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU +s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y +TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx +AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6 +0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z +iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi +nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+ +vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO +e4pIb4tF9g== +-----END CERTIFICATE----- + +Entrust Root Certification Authority - EC1 +========================================== +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx +FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn +YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw +FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs +LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg +dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy +AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef +9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h +vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8 +kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +CFCA EV ROOT +============ +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE +CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB +IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw +MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD +DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV +BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD +7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN +uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW +ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7 +xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f +py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K +gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol +hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ +tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf +BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q +ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua +4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG +E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX +BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn +aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy +PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX +kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C +ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GB CA +=============================== +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBtMQswCQYDVQQG +EwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAw +MzJaFw0zOTEyMDExNTEwMzFaMG0xCzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEds +b2JhbCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3HEokKtaX +scriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGxWuR51jIjK+FTzJlFXHtP +rby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk +9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNku7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4o +Qnc/nSMbsrY9gBQHTC5P99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvg +GUpuuy9rM2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZI +hvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrghcViXfa43FK8+5/ea4n32cZiZBKpD +dHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0 +VQreUGdNZtGn//3ZwLWoo4rOZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEui +HZeeevJuQHHfaPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +SZAFIR ROOT CA2 +=============== +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQELBQAwUTELMAkG +A1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6ZW5pb3dhIFMuQS4xGDAWBgNV +BAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkwNzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJ +BgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYD +VQQDDA9TWkFGSVIgUk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5Q +qEvNQLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT3PSQ1hNK +DJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw3gAeqDRHu5rr/gsUvTaE +2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr63fE9biCloBK0TXC5ztdyO4mTp4CEHCdJ +ckm1/zuVnsHMyAHs6A6KCpbns6aH5db5BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwi +ieDhZNRnvDF5YTy7ykHNXGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0P +AQH/BAQDAgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsFAAOC +AQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw8PRBEew/R40/cof5 +O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOGnXkZ7/e7DDWQw4rtTw/1zBLZpD67 +oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCPoky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul +4+vJhaAlIDf7js4MNIThPIGyd05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6 ++/NNIxuZMzSgLvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +Certum Trusted Network CA 2 +=========================== +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCBgDELMAkGA1UE +BhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMuQS4xJzAlBgNVBAsTHkNlcnR1 +bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIGA1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29y +ayBDQSAyMCIYDzIwMTExMDA2MDgzOTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQ +TDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENB +IDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWADGSdhhuWZGc/IjoedQF9 +7/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+o +CgCXhVqqndwpyeI1B+twTUrWwbNWuKFBOJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40b +Rr5HMNUuctHFY9rnY3lEfktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2p +uTRZCr+ESv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1mo130 +GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02isx7QBlrd9pPPV3WZ +9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOWOZV7bIBaTxNyxtd9KXpEulKkKtVB +Rgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgezTv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pye +hizKV/Ma5ciSixqClnrDvFASadgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vM +BhBgu4M1t15n3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZI +hvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQF/xlhMcQSZDe28cmk4gmb3DW +Al45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTfCVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuA +L55MYIR4PSFk1vtBHxgP58l1cb29XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMo +clm2q8KMZiYcdywmdjWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tM +pkT/WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jbAoJnwTnb +w3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksqP/ujmv5zMnHCnsZy4Ypo +J/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Kob7a6bINDd82Kkhehnlt4Fj1F4jNy3eFm +ypnTycUm/Q1oBEauttmbjL4ZvrHG8hnjXALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLX +is7VmFxWlgPF7ncGNf/P5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7 +zAYspsbiDrW5viSP +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2015 +======================================================= +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcT +BkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0 +aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAx +MTIxWjCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMg +QWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNV +BAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIw +MTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDC+Kk/G4n8PDwEXT2QNrCROnk8Zlrv +bTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+eh +iGsxr/CL0BgzuNtFajT0AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+ +6PAQZe104S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06CojXd +FPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV9Cz82XBST3i4vTwr +i5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrDgfgXy5I2XdGj2HUb4Ysn6npIQf1F +GQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2 +fu/Z8VFRfS0myGlZYeCsargqNhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9mu +iNX6hME6wGkoLfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVdctA4GGqd83EkVAswDQYJKoZI +hvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0IXtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+ +D1hYc2Ryx+hFjtyp8iY/xnmMsVMIM4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrM +d/K4kPFox/la/vot9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+y +d+2VZ5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/eaj8GsGsVn +82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnhX9izjFk0WaSrT2y7Hxjb +davYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQl033DlZdwJVqwjbDG2jJ9SrcR5q+ss7F +Jej6A7na+RZukYT1HCjI/CbM1xyQVqdfbzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVt +J94Cj8rDtSvK6evIIVM4pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGa +JI7ZjnHKe7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0vm9q +p/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions ECC RootCA 2015 +=========================================================== +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0 +aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u +cyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgRUNDIFJvb3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEw +MzcxMlowgaoxCzAJBgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmlj +IEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUQwQgYD +VQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIEVDQyBSb290 +Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKgQehLgoRc4vgxEZmGZE4JJS+dQS8KrjVP +dJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJajq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoK +Vlp8aQuqgAkkbH7BRqNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFLQiC4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaeplSTA +GiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7SofTUwJCA3sS61kFyjn +dc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +ISRG Root X1 +============ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAwTzELMAkGA1UE +BhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNoIEdyb3VwMRUwEwYDVQQD +EwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQG +EwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMT +DElTUkcgUm9vdCBYMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54r +Vygch77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+0TM8ukj1 +3Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6UA5/TR5d8mUgjU+g4rk8K +b4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sWT8KOEUt+zwvo/7V3LvSye0rgTBIlDHCN +Aymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyHB5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ +4Q7e2RCOFvu396j3x+UCB5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf +1b0SHzUvKBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWnOlFu +hjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTnjh8BCNAw1FtxNrQH +usEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbwqHyGO0aoSCqI3Haadr8faqU9GY/r +OPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CIrU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY +9umbbjANBgkqhkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ3BebYhtF8GaV +0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KKNFtY2PwByVS5uCbMiogziUwt +hDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJw +TdwJx4nLCgdNbOhdjsnvzqvHu7UrTkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nx +e5AW0wdeRlN8NwdCjNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZA +JzVcoyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq4RgqsahD +YVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPAmRGunUHBcnWEvgJBQl9n +JEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57demyPxgcYxn/eR44/KJ4EBs+lVDR3veyJ +m+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +AC RAIZ FNMT-RCM +================ +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsxCzAJBgNVBAYT +AkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTAeFw0wODEw +MjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJD +TTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBALpxgHpMhm5/yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcf +qQgfBBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAzWHFctPVr +btQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxFtBDXaEAUwED653cXeuYL +j2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z374jNUUeAlz+taibmSXaXvMiwzn15Cou +08YfxGyqxRxqAQVKL9LFwag0Jl1mpdICIfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mw +WsXmo8RZZUc1g16p6DULmbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnT +tOmlcYF7wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peSMKGJ +47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2ZSysV4999AeU14EC +ll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMetUqIJ5G+GR4of6ygnXYMgrwTJbFaa +i0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FPd9xf3E6Jobd2Sn9R2gzL+HYJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1o +dHRwOi8vd3d3LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1RXxlDPiyN8+s +D8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYMLVN0V2Ue1bLdI4E7pWYjJ2cJ +j+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrT +Qfv6MooqtyuGC2mDOL7Nii4LcK2NJpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW ++YJF1DngoABd15jmfZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7 +Ixjp6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp1txyM/1d +8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B9kiABdcPUXmsEKvU7ANm +5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wokRqEIr9baRRmW1FMdW4R58MD3R++Lj8UG +rp1MYp3/RgT408m2ECVAdf4WqslKYIYvuu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +Amazon Root CA 1 +================ +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsFADA5MQswCQYD +VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAxMB4XDTE1 +MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv +bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALJ4gHHKeNXjca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgH +FzZM9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qwIFAGbHrQ +gLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6VOujw5H5SNz/0egwLX0t +dHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L93FcXmn/6pUCyziKrlA4b9v7LWIbxcce +VOF34GfID5yHI9Y/QCB/IIDEgEw+OyQmjgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3 +DQEBCwUAA4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDIU5PM +CCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUsN+gDS63pYaACbvXy +8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vvo/ufQJVtMVT8QtPHRh8jrdkPSHCa +2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2 +xJNDd2ZhwLnoQdeXeGADbkpyrqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +Amazon Root CA 2 +================ +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwFADA5MQswCQYD +VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAyMB4XDTE1 +MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv +bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAK2Wny2cSkxKgXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4 +kHbZW0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg1dKmSYXp +N+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K8nu+NQWpEjTj82R0Yiw9 +AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvd +fLC6HM783k81ds8P+HgfajZRRidhW+mez/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAEx +kv8LV/SasrlX6avvDXbR8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSS +btqDT6ZjmUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz7Mt0 +Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6+XUyo05f7O0oYtlN +c/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI0u1ufm8/0i2BWSlmy5A5lREedCf+ +3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSw +DPBMMPQFWAJI/TPlUq9LhONmUjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oA +A7CXDpO8Wqj2LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kSk5Nrp+gvU5LE +YFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl7uxMMne0nxrpS10gxdr9HIcW +xkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygmbtmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQ +gj9sAq+uEjonljYE1x2igGOpm/HlurR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbW +aQbLU8uz/mtBzUF+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoV +Yh63n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE76KlXIx3 +KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H9jVlpNMKVv/1F2Rs76gi +JUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT4PsJYGw= +-----END CERTIFICATE----- + +Amazon Root CA 3 +================ +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5MQswCQYDVQQG +EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAzMB4XDTE1MDUy +NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ +MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZB +f8ANm+gBG1bG8lKlui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjr +Zt6jQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSrttvXBp43 +rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkrBqWTrBqYaGFy+uGh0Psc +eGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteMYyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +Amazon Root CA 4 +================ +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5MQswCQYDVQQG +EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSA0MB4XDTE1MDUy +NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ +MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN +/sGKe0uoe0ZLY7Bi9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri +83BkM6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WBMAoGCCqGSM49BAMDA2gA +MGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlwCkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1 +AE47xDqUEpHJWEadIRNyp4iciuRMStuW1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIxGDAWBgNVBAcT +D0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxpbXNlbCB2ZSBUZWtub2xvamlr +IEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0wKwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24g +TWVya2V6aSAtIEthbXUgU00xNjA0BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRp +ZmlrYXNpIC0gU3VydW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYD +VQQGEwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXllIEJpbGlt +c2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklUQUsxLTArBgNVBAsTJEth +bXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBTTTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11 +IFNNIFNTTCBLb2sgU2VydGlmaWthc2kgLSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAr3UwM6q7a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y8 +6Ij5iySrLqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INrN3wc +wv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2XYacQuFWQfw4tJzh0 +3+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/iSIzL+aFCr2lqBs23tPcLG07xxO9 +WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4fAJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQU +ZT/HiobGPN08VFw1+DrtUgxHV8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJ +KoZIhvcNAQELBQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPfIPP54+M638yc +lNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4lzwDGrpDxpa5RXI4s6ehlj2R +e37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0j +q5Rm+K37DwhuJi1/FwcJsoz7UMCflo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +GDCA TrustAUTH R5 ROOT +====================== +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UEBhMCQ04xMjAw +BgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8wHQYDVQQD +DBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVow +YjELMAkGA1UEBhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJjDp6L3TQs +AlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBjTnnEt1u9ol2x8kECK62p +OqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+uKU49tm7srsHwJ5uu4/Ts765/94Y9cnrr +pftZTqfrlYwiOXnhLQiPzLyRuEH3FMEjqcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ +9Cy5WmYqsBebnh52nUpmMUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQ +xXABZG12ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloPzgsM +R6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3GkL30SgLdTMEZeS1SZ +D2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeCjGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4 +oR24qoAATILnsn8JuLwwoC8N9VKejveSswoAHQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx +9hoh49pwBiFYFIeFd3mqgnkCAwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlR +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZmDRd9FBUb1Ov9 +H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5COmSdI31R9KrO9b7eGZONn35 +6ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ryL3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd ++PwyvzeG5LuOmCd+uh8W4XAR8gPfJWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQ +HtZa37dG/OaG+svgIHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBD +F8Io2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV09tL7ECQ +8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQXR4EzzffHqhmsYzmIGrv +/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrqT8p+ck0LcIymSLumoRT2+1hEmRSuqguT +aaApJUqlyyvdimYHFngVV3Eb7PVHhPOeMTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +SSL.com Root Certification Authority RSA +======================================== +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UEBhMCVVMxDjAM +BgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24x +MTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYw +MjEyMTczOTM5WhcNNDEwMjEyMTczOTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMx +EDAOBgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NM +LmNvbSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2RxFdHaxh3a3by/ZPkPQ/C +Fp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aXqhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8 +P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcCC52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/ge +oeOy3ZExqysdBP+lSgQ36YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkp +k8zruFvh/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrFYD3Z +fBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93EJNyAKoFBbZQ+yODJ +gUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVcUS4cK38acijnALXRdMbX5J+tB5O2 +UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi8 +1xtZPCvM8hnIk2snYxnP/Okm+Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4s +bE6x/c+cCbqiM+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGVcpNxJK1ok1iOMq8bs3AD/CUr +dIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBcHadm47GUBwwyOabqG7B52B2ccETjit3E+ZUf +ijhDPwGFpUenPUayvOUiaPd7nNgsPgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAsl +u1OJD7OAUN5F7kR/q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjq +erQ0cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jra6x+3uxj +MxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90IH37hVZkLId6Tngr75qNJ +vTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/YK9f1JmzJBjSWFupwWRoyeXkLtoh/D1JI +Pb9s2KJELtFOt3JY04kTlf5Eq/jXixtunLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406y +wKBjYZC6VWg3dGq2ktufoYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NI +WuuA8ShYIc2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +SSL.com Root Certification Authority ECC +======================================== +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMCVVMxDjAMBgNV +BAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24xMTAv +BgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEy +MTgxNDAzWhcNNDEwMjEyMTgxNDAzWjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAO +BgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuBBAAiA2IA +BEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI7Z4INcgn64mMU1jrYor+ +8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPgCemB+vNH06NjMGEwHQYDVR0OBBYEFILR +hXMw5zUE044CkvvlpNHEIejNMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTT +jgKS++Wk0cQh6M0wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCW +e+0F+S8Tkdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+gA0z +5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +SSL.com EV Root Certification Authority RSA R2 +============================================== +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNVBAYTAlVTMQ4w +DAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9u +MTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MB4XDTE3MDUzMTE4MTQzN1oXDTQyMDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQI +DAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYD +VQQDDC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvqM0fNTPl9fb69LT3w23jh +hqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssufOePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7w +cXHswxzpY6IXFJ3vG2fThVUCAtZJycxa4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTO +Zw+oz12WGQvE43LrrdF9HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+ +B6KjBSYRaZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcAb9Zh +CBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQGp8hLH94t2S42Oim +9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQVPWKchjgGAGYS5Fl2WlPAApiiECto +RHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMOpgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+Slm +JuwgUHfbSguPvuUCYHBBXtSuUDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48 ++qvWBkofZ6aYMBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa49QaAJadz20Zp +qJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBWs47LCp1Jjr+kxJG7ZhcFUZh1 +++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nx +Y/hoLVUE0fKNsKTPvDxeH3jnpaAgcLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2G +guDKBAdRUNf/ktUM79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDz +OFSz/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXtll9ldDz7 +CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEmKf7GUmG6sXP/wwyc5Wxq +lD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKKQbNmC1r7fSOl8hqw/96bg5Qu0T/fkreR +rwU7ZcegbLHNYhLDkBvjJc40vG93drEQw/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1 +hlMYegouCRw2n5H9gooiS9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX +9hwJ1C07mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +SSL.com EV Root Certification Authority ECC +=========================================== +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMCVVMxDjAMBgNV +BAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24xNDAy +BgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYw +MjEyMTgxNTIzWhcNNDEwMjEyMTgxNTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMx +EDAOBgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NM +LmNvbSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMAVIbc/R/fALhBYlzccBYy +3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1KthkuWnBaBu2+8KGwytAJKaNjMGEwHQYDVR0O +BBYEFFvKXuXe0oGqzagtZFG22XKbl+ZPMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe +5d7SgarNqC1kUbbZcpuX5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJ +N+vp1RPZytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZgh5Mm +m7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +GlobalSign Root CA - R6 +======================= +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEgMB4GA1UECxMX +R2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQxMjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9i +YWxTaWduIFJvb3QgQ0EgLSBSNjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFs +U2lnbjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQss +grRIxutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1kZguSgMpE +3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxDaNc9PIrFsmbVkJq3MQbF +vuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJwLnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqM +PKq0pPbzlUoSB239jLKJz9CgYXfIWHSw1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+ +azayOeSsJDa38O+2HBNXk7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05O +WgtH8wY2SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/hbguy +CLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4nWUx2OVvq+aWh2IMP +0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpYrZxCRXluDocZXFSxZba/jJvcE+kN +b7gu3GduyYsRtYQUigAZcIN5kZeR1BonvzceMgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNV +HSMEGDAWgBSubAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGtIxg93eFyRJa0 +lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr6155wsTLxDKZmOMNOsIeDjHfrY +BzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLjvUYAGm0CuiVdjaExUd1URhxN25mW7xocBFym +Fe944Hn+Xds+qkxV/ZoVqW/hpvvfcDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr +3TsTjxKM4kEaSHpzoHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB1 +0jZpnOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfspA9MRf/T +uTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+vJJUEeKgDu+6B5dpffItK +oZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+t +JDfLRVpOoERIyNiwmcUVhAn21klJwGW45hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GC CA +=============================== +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQswCQYDVQQGEwJD +SDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEo +MCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRa +Fw00MjA1MDkwOTU4MzNaMG0xCzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQL +ExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4nieUqjFqdr +VCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4Wp2OQ0jnUsYd4XxiWD1Ab +NTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7TrYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0E +AwMDaAAwZQIwJsdpW9zV57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtk +AjEA2zQgMgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +UCA Global G2 Root +================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9MQswCQYDVQQG +EwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBHbG9iYWwgRzIgUm9vdDAeFw0x +NjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0xCzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlU +cnVzdDEbMBkGA1UEAwwSVUNBIEdsb2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxeYrb3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmT +oni9kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzmVHqUwCoV +8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/RVogvGjqNO7uCEeBHANBS +h6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDcC/Vkw85DvG1xudLeJ1uK6NjGruFZfc8o +LTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIjtm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/ +R+zvWr9LesGtOxdQXGLYD0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBe +KW4bHAyvj5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6DlNaBa +4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6iIis7nCs+dwp4wwc +OxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznPO6Q0ibd5Ei9Hxeepl2n8pndntd97 +8XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFIHEjMz15DD/pQwIX4wVZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo +5sOASD0Ee/ojL3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl1qnN3e92mI0A +Ds0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oUb3n09tDh05S60FdRvScFDcH9 +yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LVPtateJLbXDzz2K36uGt/xDYotgIVilQsnLAX +c47QN6MUPJiVAAwpBVueSUmxX8fjy88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHo +jhJi6IjMtX9Gl8CbEGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZk +bxqgDMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI+Vg7RE+x +ygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGyYiGqhkCyLmTTX8jjfhFn +RR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bXUB+K+wb1whnw0A== +-----END CERTIFICATE----- + +UCA Extended Validation Root +============================ +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBHMQswCQYDVQQG +EwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9u +IFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMxMDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8G +A1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrs +iWogD4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvSsPGP2KxF +Rv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aopO2z6+I9tTcg1367r3CTu +eUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dksHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR +59mzLC52LqGj3n5qiAno8geK+LLNEOfic0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH +0mK1lTnj8/FtDw5lhIpjVMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KR +el7sFsLzKuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/TuDv +B0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41Gsx2VYVdWf6/wFlth +WG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs1+lvK9JKBZP8nm9rZ/+I8U6laUpS +NwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQDfwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS +3H5aBZ8eNJr34RQwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEL +BQADggIBADaNl8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQVBcZEhrxH9cM +aVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5c6sq1WnIeJEmMX3ixzDx/BR4 +dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb ++7lsq+KePRXBOy5nAliRn+/4Qh8st2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOW +F3sGPjLtx7dCvHaj2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwi +GpWOvpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2CxR9GUeOc +GMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmxcmtpzyKEC2IPrNkZAJSi +djzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbMfjKaiJUINlK73nZfdklJrX+9ZSCyycEr +dhh2n1ax +-----END CERTIFICATE----- + +Certigna Root CA +================ +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAwWjELMAkGA1UE +BhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAwMiA0ODE0NjMwODEwMDAzNjEZ +MBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0xMzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjda +MFoxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYz +MDgxMDAwMzYxGTAXBgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sOty3tRQgX +stmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9MCiBtnyN6tMbaLOQdLNyz +KNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPuI9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8 +JXrJhFwLrN1CTivngqIkicuQstDuI7pmTLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16 +XdG+RCYyKfHx9WzMfgIhC59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq +4NYKpkDfePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3YzIoej +wpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWTCo/1VTp2lc5ZmIoJ +lXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1kJWumIWmbat10TWuXekG9qxf5kBdI +jzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp/ +/TBt2dzhauH8XwIDAQABo4IBGjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczovL3d3d3cuY2Vy +dGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilodHRwOi8vY3JsLmNlcnRpZ25h +LmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYraHR0cDovL2NybC5kaGlteW90aXMuY29tL2Nl +cnRpZ25hcm9vdGNhLmNybDANBgkqhkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOIt +OoldaDgvUSILSo3L6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxP +TGRGHVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH60BGM+RFq +7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncBlA2c5uk5jR+mUYyZDDl3 +4bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdio2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd +8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS +6Cvu5zHbugRqh5jnxV/vfaci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaY +tlu3zM63Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayhjWZS +aX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw3kAP+HwV96LOPNde +E4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +emSign Root CA - G1 +=================== +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYDVQQGEwJJTjET +MBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9sb2dpZXMgTGltaXRl +ZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBHMTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgx +ODMwMDBaMGcxCzAJBgNVBAYTAklOMRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVk +aHJhIFRlY2hub2xvZ2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQzf2N4aLTN +LnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO8oG0x5ZOrRkVUkr+PHB1 +cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aqd7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHW +DV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhMtTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ +6DqS0hdW5TUaQBw+jSztOd9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrH +hQIDAQABo0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQDAgEG +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31xPaOfG1vR2vjTnGs2 +vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjMwiI/aTvFthUvozXGaCocV685743Q +NcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6dGNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q ++Mri/Tm3R7nrft8EI6/6nAYH6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeih +U80Bv2noWgbyRQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +emSign ECC Root CA - G3 +======================= +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQGEwJJTjETMBEG +A1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9sb2dpZXMgTGltaXRlZDEg +MB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4 +MTgzMDAwWjBrMQswCQYDVQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11 +ZGhyYSBUZWNobm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0WXTsuwYc +58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xySfvalY8L1X44uT6EYGQIr +MgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuBzhccLikenEhjQjAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+D +CBeQyh+KTOgNG3qxrdWBCUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7 +jHvrZQnD+JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +emSign Root CA - C1 +=================== +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkGA1UEBhMCVVMx +EzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNp +Z24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAwMFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UE +BhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQD +ExNlbVNpZ24gUm9vdCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+up +ufGZBczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZHdPIWoU/ +Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH3DspVpNqs8FqOp099cGX +OFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvHGPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4V +I5b2P/AgNBbeCsbEBEV5f6f9vtKppa+cxSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleooms +lMuoaJuvimUnzYnu3Yy1aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+ +XJGFehiqTbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQAD +ggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87/kOXSTKZEhVb3xEp +/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4kqNPEjE2NuLe/gDEo2APJ62gsIq1 +NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrGYQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9 +wC68AivTxEDkigcxHpvOJpkT+xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQ +BmIMMMAVSKeoWXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +emSign ECC Root CA - C3 +======================= +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQGEwJVUzETMBEG +A1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMxIDAeBgNVBAMTF2VtU2lnbiBF +Q0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAwMFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UE +BhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQD +ExdlbVNpZ24gRUNDIFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd +6bciMK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4OjavtisIGJAnB9 +SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0OBBYEFPtaSNCAIEDyqOkA +B2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gA +MGUCMQC02C8Cif22TGK6Q04ThHK1rt0c3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwU +ZOR8loMRnLDRWmFLpg9J0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +Hongkong Post Root CA 3 +======================= +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQELBQAwbzELMAkG +A1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJSG9uZyBLb25nMRYwFAYDVQQK +Ew1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25na29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2 +MDMwMjI5NDZaFw00MjA2MDMwMjI5NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtv +bmcxEjAQBgNVBAcTCUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMX +SG9uZ2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz +iNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFOdem1p+/l6TWZ5Mwc50tf +jTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mIVoBc+L0sPOFMV4i707mV78vH9toxdCim +5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOe +sL4jpNrcyCse2m5FHomY2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj +0mRiikKYvLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+TtbNe/ +JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZbx39ri1UbSsUgYT2u +y1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+l2oBlKN8W4UdKjk60FSh0Tlxnf0h ++bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YKTE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsG +xVd7GYYKecsAyVKvQv83j+GjHno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwID +AQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEwDQYJKoZIhvcN +AQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG7BJ8dNVI0lkUmcDrudHr9Egw +W62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCkMpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWld +y8joRTnU+kLBEUx3XZL7av9YROXrgZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov ++BS5gLNdTaqX4fnkGMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDc +eqFS3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJmOzj/2ZQw +9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+l6mc1X5VTMbeRRAc6uk7 +nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6cJfTzPV4e0hz5sy229zdcxsshTrD3mUcY +hcErulWuBurQB7Lcq9CClnXO0lD+mefPL5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB +60PZ2Pierc+xYw5F9KBaLJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fq +dBb9HxEGmpv0 +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G4 +========================================= +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAwgb4xCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3Qu +bmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1 +dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eSAtIEc0MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYT +AlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEc0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3D +umSXbcr3DbVZwbPLqGgZ2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV +3imz/f3ET+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j5pds +8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAMC1rlLAHGVK/XqsEQ +e9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73TDtTUXm6Hnmo9RR3RXRv06QqsYJn7 +ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNXwbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5X +xNMhIWNlUpEbsZmOeX7m640A2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV +7rtNOzK+mndmnqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwlN4y6mACXi0mW +Hv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNjc0kCAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9n +MA0GCSqGSIb3DQEBCwUAA4ICAQAS5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4Q +jbRaZIxowLByQzTSGwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht +7LGrhFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/B7NTeLUK +YvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uIAeV8KEsD+UmDfLJ/fOPt +jqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbwH5Lk6rWS02FREAutp9lfx1/cH6NcjKF+ +m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKW +RGhXxNUzzxkvFMSUHHuk2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjA +JOgc47OlIQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk5F6G ++TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuYn/PIjhs4ViFqUZPT +kcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- + +Microsoft ECC Root Certificate Authority 2017 +============================================= +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNyb3NvZnQgRUND +IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4 +MjMxNjA0WjBlMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZRogPZnZH6 +thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYbhGBKia/teQ87zvH2RPUB +eMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTIy5lycFIM ++Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlf +Xu5gKcs68tvWMoQZP3zVL8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaR +eNtUjGUBiudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +Microsoft RSA Root Certificate Authority 2017 +============================================= +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNyb3NvZnQg +UlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIw +NzE4MjMwMDIzWjBlMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9u +MTYwNAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZNt9GkMml +7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0ZdDMbRnMlfl7rEqUrQ7e +S0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw7 +1VdyvD/IybLeS2v4I2wDwAW9lcfNcztmgGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+ +dkC0zVJhUXAoP8XFWvLJjEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49F +yGcohJUcaDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaGYaRS +MLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6W6IYZVcSn2i51BVr +lMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4KUGsTuqwPN1q3ErWQgR5WrlcihtnJ +0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH+FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJ +ClTUFLkqqNfs+avNJVgyeY+QW5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZCLgLNFgVZJ8og +6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OCgMNPOsduET/m4xaRhPtthH80 +dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk ++ONVFT24bcMKpBLBaYVu32TxU5nhSnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex +/2kskZGT4d9Mozd2TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDy +AmH3pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGRxpl/j8nW +ZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiAppGWSZI1b7rCoucL5mxAyE +7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKT +c0QWbej09+CVgI+WXTik9KveCjCHk9hNAHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D +5KbvtwEwXlGjefVwaaZBRA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +e-Szigno Root CA 2017 +===================== +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNVBAYTAkhVMREw +DwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRkLjEXMBUGA1UEYQwOVkFUSFUt +MjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJvb3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZa +Fw00MjA4MjIxMjA3MDZaMHExCzAJBgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UE +CgwNTWljcm9zZWMgTHRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3pp +Z25vIFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtvxie+RJCx +s1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+HWyx7xf58etqjYzBhMA8G +A1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSHERUI0arBeAyxr87GyZDv +vzAEwDAfBgNVHSMEGDAWgBSHERUI0arBeAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEA +tVfd14pVCzbhhkT61NlojbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxO +svxyqltZ+efcMQ== +-----END CERTIFICATE----- + +certSIGN Root CA G2 +=================== +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNVBAYTAlJPMRQw +EgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04gUk9PVCBDQSBHMjAeFw0xNzAy +MDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJBgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lH +TiBTQTEcMBoGA1UECxMTY2VydFNJR04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAMDFdRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05 +N0IwvlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZuIt4Imfk +abBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhpn+Sc8CnTXPnGFiWeI8Mg +wT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKscpc/I1mbySKEwQdPzH/iV8oScLumZfNp +dWO9lfsbl83kqK/20U6o2YpxJM02PbyWxPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91Qqh +ngLjYl/rNUssuHLoPj1PrCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732 +jcZZroiFDsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fxDTvf +95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgyLcsUDFDYg2WD7rlc +z8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6CeWRgKRM+o/1Pcmqr4tTluCRVLERL +iohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1Ud +DgQWBBSCIS1mxteg4BXrzkwJd8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOB +ywaK8SJJ6ejqkX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQlqiCA2ClV9+BB +/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0OJD7uNGzcgbJceaBxXntC6Z5 +8hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+cNywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5 +BiKDUyUM/FHE5r7iOZULJK2v0ZXkltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklW +atKcsWMy5WHgUyIOpwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tU +Sxfj03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZkPuXaTH4M +NMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE1LlSVHJ7liXMvGnjSG4N +0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MXQRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +Trustwave Global Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJV +UzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2 +ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0xNzA4MjMxOTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJV +UzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2 +ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALldUShLPDeS0YLOvR29 +zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0XznswuvCAAJWX/NKSqIk4cXGIDtiLK0thAf +LdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4Bq +stTnoApTAbqOl5F2brz81Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9o +WN0EACyW80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotPJqX+ +OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1lRtzuzWniTY+HKE40 +Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfwhI0Vcnyh78zyiGG69Gm7DIwLdVcE +uE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm ++9jaJXLE9gCxInm943xZYkqcBW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqj +ifLJS3tBEW1ntwiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1UdDwEB/wQEAwIB +BjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W0OhUKDtkLSGm+J1WE2pIPU/H +PinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfeuyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0H +ZJDmHvUqoai7PF35owgLEQzxPy0QlG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla +4gt5kNdXElE1GYhBaCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5R +vbbEsLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPTMaCm/zjd +zyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qequ5AvzSxnI9O4fKSTx+O +856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxhVicGaeVyQYHTtgGJoC86cnn+OjC/QezH +Yj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu +3R3y4G5OBVixwJAWKqQ9EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP +29FpHOTKyeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +Trustwave Global ECC P256 Certification Authority +================================================= +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYDVQQGEwJVUzER +MA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRy +dXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDI1 +NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH77bOYj +43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoNFWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqm +P62jQzBBMA8GA1UdEwEB/wQFMAMBAf8wDwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt +0UrrdaVKEJmzsaGLSvcwCgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjz +RM4q3wghDDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +Trustwave Global ECC P384 Certification Authority +================================================= +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYDVQQGEwJVUzER +MA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRy +dXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDM4 +NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuBBAAiA2IABGvaDXU1CDFH +Ba5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJj9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr +/TklZvFe/oyujUF5nQlgziip04pt89ZF1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNV +HQ8BAf8EBQMDBwYAMB0GA1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNn +ADBkAjA3AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsCMGcl +CrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVuSw== +-----END CERTIFICATE----- + +NAVER Global Root Certification Authority +========================================= +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEMBQAwaTELMAkG +A1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRGT1JNIENvcnAuMTIwMAYDVQQD +DClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4 +NDJaFw0zNzA4MTgyMzU5NTlaMGkxCzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVT +UyBQTEFURk9STSBDb3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVAiQqrDZBb +UGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH38dq6SZeWYp34+hInDEW ++j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lEHoSTGEq0n+USZGnQJoViAbbJAh2+g1G7 +XNr4rRVqmfeSVPc0W+m/6imBEtRTkZazkVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2 +aacp+yPOiNgSnABIqKYPszuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4 +Yb8ObtoqvC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHfnZ3z +VHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaGYQ5fG8Ir4ozVu53B +A0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo0es+nPxdGoMuK8u180SdOqcXYZai +cdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3aCJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejy +YhbLgGvtPe31HzClrkvJE+2KAQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNV +HQ4EFgQU0p+I36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoNqo0hV4/GPnrK +21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatjcu3cvuzHV+YwIHHW1xDBE1UB +jCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm+LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bx +hYTeodoS76TiEJd6eN4MUZeoIUCLhr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTg +E34h5prCy8VCZLQelHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTH +D8z7p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8piKCk5XQ +A76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLRLBT/DShycpWbXgnbiUSY +qqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oG +I/hGoiLtk/bdmuYqh7GYVPEi92tF4+KOdh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmg +kpzNNIaRkPpkUZ3+/uul9XXeifdy +-----END CERTIFICATE----- + +AC RAIZ FNMT-RCM SERVIDORES SEGUROS +=================================== +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQswCQYDVQQGEwJF +UzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgwFgYDVQRhDA9WQVRFUy1RMjgy +NjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1SQ00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4 +MTIyMDA5MzczM1oXDTQzMTIyMDA5MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQt +UkNNMQ4wDAYDVQQLDAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNB +QyBSQUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuBBAAiA2IA +BPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LHsbI6GA60XYyzZl2hNPk2 +LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oKUm8BA06Oi6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqG +SM49BAMDA2kAMGYCMQCuSuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoD +zBOQn5ICMQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJyv+c= +-----END CERTIFICATE----- + +GlobalSign Root R46 +=================== +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUAMEYxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQDExNHbG9iYWxTaWduIFJv +b3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAX +BgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08Es +CVeJOaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQGvGIFAha/ +r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud316HCkD7rRlr+/fKYIje +2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo0q3v84RLHIf8E6M6cqJaESvWJ3En7YEt +bWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSEy132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvj +K8Cd+RTyG/FWaha/LIWFzXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD4 +12lPFzYE+cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCNI/on +ccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzsx2sZy/N78CsHpdls +eVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqaByFrgY/bxFn63iLABJzjqls2k+g9 +vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEM +BQADggIBAHx47PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti2kM3S+LGteWy +gxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIkpnnpHs6i58FZFZ8d4kuaPp92 +CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRFFRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZm +OUdkLG5NrmJ7v2B0GbhWrJKsFjLtrWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qq +JZ4d16GLuc1CLgSkZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwye +qiv5u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP4vkYxboz +nxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6N3ec592kD3ZDZopD8p/7 +DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3vouXsXgxT7PntgMTzlSdriVZzH81Xwj3 +QEUxeCp6 +-----END CERTIFICATE----- + +GlobalSign Root E46 +=================== +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYxCzAJBgNVBAYT +AkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQDExNHbG9iYWxTaWduIFJvb3Qg +RTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNV +BAoTEEdsb2JhbFNpZ24gbnYtc2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkB +jtjqR+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGddyXqBPCCj +QjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQxCpCPtsad0kRL +gLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZk +vLtoURMMA/cVi4RguYv/Uo7njLwcAjA8+RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+ +CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +GLOBALTRUST 2020 +================ +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCQVQx +IzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVT +VCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYxMDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAh +BgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAy +MDIwMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWi +D59bRatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9ZYybNpyrO +VPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3QWPKzv9pj2gOlTblzLmM +CcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPwyJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCm +fecqQjuCgGOlYx8ZzHyyZqjC0203b+J+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKA +A1GqtH6qRNdDYfOiaxaJSaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9OR +JitHHmkHr96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj04KlG +DfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9MedKZssCz3AwyIDMvU +clOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIwq7ejMZdnrY8XD2zHc+0klGvIg5rQ +mjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1Ud +IwQYMBaAFNwuH9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJCXtzoRlgHNQIw +4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd6IwPS3BD0IL/qMy/pJTAvoe9 +iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf+I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS +8cE54+X1+NZK3TTN+2/BT+MAi1bikvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2 +HcqtbepBEX4tdJP7wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxS +vTOBTI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6CMUO+1918 +oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn4rnvyOL2NSl6dPrFf4IF +YqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+IaFvowdlxfv1k7/9nR4hYJS8+hge9+6jl +gqispdNpQ80xiEmEU5LAsTkbOYMBMMTyqfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +ANF Secure Server Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNVBAUTCUc2MzI4 +NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lv +bjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNVBAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3Qg +Q0EwHhcNMTkwOTA0MTAwMDM4WhcNMzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEw +MQswCQYDVQQGEwJFUzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQw +EgYDVQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9vdCBDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCjcqQZAZ2cC4Ffc0m6p6zz +BE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9qyGFOtibBTI3/TO80sh9l2Ll49a2pcbnv +T1gdpd50IJeh7WhM3pIXS7yr/2WanvtH2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcv +B2VSAKduyK9o7PQUlrZXH1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXse +zx76W0OLzc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyRp1RM +VwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQzW7i1o0TJrH93PB0j +7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/SiOL9V8BY9KHcyi1Swr1+KuCLH5z +JTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJnLNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe +8TZBAQIvfXOn3kLMTOmJDVb3n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVO +Hj1tyRRM4y5Bu8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAOBgNVHQ8BAf8E +BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEATh65isagmD9uw2nAalxJ +UqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzx +j6ptBZNscsdW699QIyjlRRA96Gejrw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDt +dD+4E5UGUcjohybKpFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM +5gf0vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjqOknkJjCb +5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ/zo1PqVUSlJZS2Db7v54 +EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ92zg/LFis6ELhDtjTO0wugumDLmsx2d1H +hk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI+PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGy +g77FGr8H6lnco4g175x2MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3 +r5+qPeoott7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +Certum EC-384 CA +================ +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQswCQYDVQQGEwJQ +TDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2 +MDcyNDU0WhcNNDMwMzI2MDcyNDU0WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERh +dGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx +GTAXBgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATEKI6rGFtq +vm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7TmFy8as10CW4kjPMIRBSqn +iBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68KjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFI0GZnQkdjrzife81r1HfS+8EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNo +ADBlAjADVS2m5hjEfO/JUG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0 +QoSZ/6vnnvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +Certum Trusted Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6MQswCQYDVQQG +EwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0g +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0Ew +HhcNMTgwMzE2MTIxMDEzWhcNNDMwMzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMY +QXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZn0EGze2jusDbCSzBfN8p +fktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/qp1x4EaTByIVcJdPTsuclzxFUl6s1wB52 +HO8AU5853BSlLCIls3Jy/I2z5T4IHhQqNwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2 +fJmItdUDmj0VDT06qKhF8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGt +g/BKEiJ3HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGamqi4 +NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi7VdNIuJGmj8PkTQk +fVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSFytKAQd8FqKPVhJBPC/PgP5sZ0jeJ +P/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0PqafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSY +njYJdmZm/Bo/6khUHL4wvYBQv3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHK +HRzQ+8S1h9E6Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQADggIBAEii1QAL +LtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4WxmB82M+w85bj/UvXgF2Ez8s +ALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvozMrnadyHncI013nR03e4qllY/p0m+jiGPp2K +h2RX5Rc64vmNueMzeMGQ2Ljdt4NR5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8 +CYyqOhNf6DR5UMEQGfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA +4kZf5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq0Uc9Nneo +WWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7DP78v3DSk+yshzWePS/Tj +6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTMqJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmT +OPQD8rv7gmsHINFSH5pkAnuYZttcTVoP0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZck +bxJF0WddCajJFdr60qZfE2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +TunTrust Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQELBQAwYTELMAkG +A1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUgQ2VydGlmaWNhdGlvbiBFbGVj +dHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJvb3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQw +NDI2MDg1NzU2WjBhMQswCQYDVQQGEwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBD +ZXJ0aWZpY2F0aW9uIEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZn56eY+hz +2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd2JQDoOw05TDENX37Jk0b +bjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgFVwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7 +NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZGoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAd +gjH8KcwAWJeRTIAAHDOFli/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViW +VSHbhlnUr8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2eY8f +Tpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIbMlEsPvLfe/ZdeikZ +juXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISgjwBUFfyRbVinljvrS5YnzWuioYas +DXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwS +VXAkPcvCFDVDXSdOvsC9qnyW5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI +04Y+oXNZtPdEITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+zxiD2BkewhpMl +0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYuQEkHDVneixCwSQXi/5E/S7fd +Ao74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRY +YdZ2vyJ/0Adqp2RT8JeNnYA/u8EH22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJp +adbGNjHh/PqAulxPxOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65x +xBzndFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5Xc0yGYuP +jCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7bnV2UqL1g52KAdoGDDIzM +MEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQCvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9z +ZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZHu/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3r +AZ3r2OvEhJn7wAzMMujjd9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +HARICA TLS RSA Root CA 2021 +=========================== +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBsMQswCQYDVQQG +EwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u +cyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0EgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUz +OFoXDTQ1MDIxMzEwNTUzN1owbDELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRl +bWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNB +IFJvb3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569lmwVnlskN +JLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE4VGC/6zStGndLuwRo0Xu +a2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uva9of08WRiFukiZLRgeaMOVig1mlDqa2Y +Ulhu2wr7a89o+uOkXjpFc5gH6l8Cct4MpbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K +5FrZx40d/JiZ+yykgmvwKh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEv +dmn8kN3bLW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcYAuUR +0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqBAGMUuTNe3QvboEUH +GjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYqE613TBoYm5EPWNgGVMWX+Ko/IIqm +haZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHrW2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQ +CPxrvrNQKlr9qEgYRtaQQJKQCoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAUX15QvWiWkKQU +EapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3f5Z2EMVGpdAgS1D0NTsY9FVq +QRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxajaH6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxD +QpSbIPDRzbLrLFPCU3hKTwSUQZqPJzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcR +j88YxeMn/ibvBZ3PzzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5 +vZStjBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0/L5H9MG0 +qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pTBGIBnfHAT+7hOtSLIBD6 +Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79aPib8qXPMThcFarmlwDB31qlpzmq6YR/ +PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YWxw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnn +kf3/W9b3raYvAwtt41dU63ZTGI0RmLo= +-----END CERTIFICATE----- + +HARICA TLS ECC Root CA 2021 +=========================== +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQswCQYDVQQGEwJH +UjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBD +QTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoX +DTQ1MDIxMzExMDEwOVowbDELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWlj +IGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJv +b3QgQ0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7KKrxcm1l +AEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9YSTHMmE5gEYd103KUkE+b +ECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW +0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAi +rcJRQO9gcS3ujwLEXQNwSaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/Qw +CZ61IygNnxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1Ud +DgQWBBRlzeurNR4APn7VdMActHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4w +gZswgZgGBFUdIAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABCAG8AbgBhAG4A +bwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAwADEANzAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9miWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL +4QjbEwj4KKE1soCzC1HA01aajTNFSa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDb +LIpgD7dvlAceHabJhfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1il +I45PVf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZEEAEeiGaP +cjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV1aUsIC+nmCjuRfzxuIgA +LI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2tCsvMo2ebKHTEm9caPARYpoKdrcd7b/+A +lun4jWq9GJAd/0kakFI3ky88Al2CdgtR5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH +9IBk9W6VULgRfhVwOEqwf9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpf +NIbnYrX9ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNKGbqE +ZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +vTrus ECC Root CA +================= +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMwRzELMAkGA1UE +BhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBS +b290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDczMTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAa +BgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+c +ToL0v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUde4BdS49n +TPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIwV53dVvHH4+m4SVBrm2nDb+zDfSXkV5UT +QJtS0zvzQBm8JsctBp61ezaf9SXUY2sAAjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQL +YgmRWAD5Tfs0aNoJrSEGGJTO +-----END CERTIFICATE----- + +vTrus Root CA +============= +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQELBQAwQzELMAkG +A1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xFjAUBgNVBAMTDXZUcnVzIFJv +b3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMxMDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoG +A1UEChMTaVRydXNDaGluYSBDby4sTHRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZots +SKYcIrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykUAyyNJJrI +ZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+GrPSbcKvdmaVayqwlHeF +XgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z98Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KA +YPxMvDVTAWqXcoKv8R1w6Jz1717CbMdHflqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70 +kLJrxLT5ZOrpGgrIDajtJ8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2 +AXPKBlim0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZNpGvu +/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQUqqzApVg+QxMaPnu +1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHWOXSuTEGC2/KmSNGzm/MzqvOmwMVO +9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMBAAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYg +scasGrz2iTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOC +AgEAKbqSSaet8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1jbhd47F18iMjr +jld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvMKar5CKXiNxTKsbhm7xqC5PD4 +8acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIivTDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJn +xDHO2zTlJQNgJXtxmOTAGytfdELSS8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554Wg +icEFOwE30z9J4nfrI8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4 +sEb9b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNBUvupLnKW +nyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1PTi07NEPhmg4NpGaXutIc +SkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929vensBxXVsFy6K2ir40zSbofitzmdHxghm+H +l3s= +-----END CERTIFICATE----- + +ISRG Root X2 +============ +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQswCQYDVQQGEwJV +UzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElT +UkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVT +MSkwJwYDVQQKEyBJbnRlcm5ldCBTZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNS +RyBSb290IFgyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0H +ttwW+1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9ItgKbppb +d9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZIzj0EAwMDaAAwZQIwe3lORlCEwkSHRhtF +cP9Ymd70/aTSVaYgLXTWNLxBo1BfASdWtL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5 +U6VR5CmD1/iQMVtCnwr1/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +HiPKI Root CA - G1 +================== +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBPMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xGzAZBgNVBAMMEkhpUEtJ +IFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRaFw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYT +AlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kg +Um9vdCBDQSAtIEcxMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0 +o9QwqNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twvVcg3Px+k +wJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6lZgRZq2XNdZ1AYDgr/SE +YYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnzQs7ZngyzsHeXZJzA9KMuH5UHsBffMNsA +GJZMoYFL3QRtU6M9/Aes1MU3guvklQgZKILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfd +hSi8MEyr48KxRURHH+CKFgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj +1jOXTyFjHluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDry+K4 +9a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ/W3c1pzAtH2lsN0/ +Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgMa/aOEmem8rJY5AIJEzypuxC00jBF +8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQD +AgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqcSE5XCV0vrPSl +tJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6FzaZsT0pPBWGTMpWmWSBUdGSquE +wx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9TcXzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07Q +JNBAsNB1CI69aO4I1258EHBGG3zgiLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv +5wiZqAxeJoBF1PhoL5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+Gpz +jLrFNe85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wrkkVbbiVg +hUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+vhV4nYWBSipX3tUZQ9rb +yltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQUYDksswBVLuT1sw5XxJFBAJw/6KXf6vb/ +yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYDVQQLExtHbG9i +YWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgwMTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9i +YWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkW +ymOxuYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNVHQ8BAf8E +BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/+wpu+74zyTyjhNUwCgYI +KoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147bmF0774BxL4YSFlhgjICICadVGNA3jdg +UM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +GTS Root R1 +=========== +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQGEwJV +UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg +UjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE +ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7wCl7raKb0 +xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjwTcLCeoiKu7rPWRnWr4+w +B7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0PfyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXW +nOunVmSPlk9orj2XwoSPwLxAwAtcvfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk +9+aCEI3oncKKiPo4Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zq +kUspzBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92wO1A +K/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70paDPvOmbsB4om3xPX +V2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDW +cfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQAD +ggIBAJ+qQibbC5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuyh6f88/qBVRRi +ClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM47HLwEXWdyzRSjeZ2axfG34ar +J45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8JZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYci +NuaCp+0KueIHoI17eko8cdLiA6EfMgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5me +LMFrUKTX5hgUvYU/Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJF +fbdT6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ0E6yove+ +7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm2tIMPNuzjsmhDYAPexZ3 +FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bbbP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3 +gm3c +-----END CERTIFICATE----- + +GTS Root R2 +=========== +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQGEwJV +UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg +UjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE +ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY6Dlo7JUl +e3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAuMC6C/Pq8tBcKSOWIm8Wb +a96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS ++LFjKBC4swm4VndAoiaYecb+3yXuPuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7M +kogwTZq9TwtImoS1mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJG +r61K8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RWIr9q +S34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKaG73VululycslaVNV +J1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCqgc7dGtxRcw1PcOnlthYhGXmy5okL +dWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQAD +ggIBAB/Kzt3HvqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyCB19m3H0Q/gxh +swWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2uNmSRXbBoGOqKYcl3qJfEycel +/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMgyALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVn +jWQye+mew4K6Ki3pHrTgSAai/GevHyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y5 +9PYjJbigapordwj6xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M +7YNRTOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924SgJPFI/2R8 +0L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV7LXTWtiBmelDGDfrs7vR +WGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjW +HYbL +-----END CERTIFICATE----- + +GTS Root R3 +=========== +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJVUzEi +MCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMw +HhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZ +R29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjO +PQIBBgUrgQQAIgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL24CejQjBA +MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTB8Sa6oC2uhYHP0/Eq +Er24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azT +L818+FsuVbu/3ZL3pAzcMeGiAjEA/JdmZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV +11RZt+cRLInUue4X +-----END CERTIFICATE----- + +GTS Root R4 +=========== +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJVUzEi +MCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQw +HhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZ +R29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjO +PQIBBgUrgQQAIgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvRHYqjQjBA +MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSATNbrdP9JNqPV2Py1 +PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/C +r8deVl5c1RxYIigL9zC2L7F8AjEA8GE8p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh +4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +Telia Root CA v2 +================ +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQxCzAJBgNVBAYT +AkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2 +MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQK +DBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ7 +6zBqAMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9vVYiQJ3q +9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9lRdU2HhE8Qx3FZLgmEKn +pNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTODn3WhUidhOPFZPY5Q4L15POdslv5e2QJl +tI5c0BE0312/UqeBAMN/mUWZFdUXyApT7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW +5olWK8jjfN7j/4nlNW4o6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNr +RBH0pUPCTEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6WT0E +BXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63RDolUK5X6wK0dmBR4 +M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZIpEYslOqodmJHixBTB0hXbOKSTbau +BcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGjYzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7W +xy+G2CQ5MB0GA1UdDgQWBBRyrOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi0f6X+J8wfBj5 +tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMMA8iZGok1GTzTyVR8qPAs5m4H +eW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBSSRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+C +y748fdHif64W1lZYudogsYMVoe+KTTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygC +QMez2P2ccGrGKMOF6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15 +h2Er3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMtTy3EHD70 +sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pTVmBds9hCG1xLEooc6+t9 +xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAWysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQ +raVplI/owd8k+BsHMYeB2F326CjYSlKArBPuUBQemMc= +-----END CERTIFICATE----- + +D-TRUST BR Root CA 1 2020 +========================= +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQswCQYDVQQGEwJE +RTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRSVVNUIEJSIFJvb3QgQ0EgMSAy +MDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNV +BAoTDEQtVHJ1c3QgR21iSDEiMCAGA1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7 +dPYSzuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0QVK5buXu +QqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/VbNafAkl1bK6CKBrqx9t +MA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6gPKA6hjhodHRwOi8vY3JsLmQtdHJ1c3Qu +bmV0L2NybC9kLXRydXN0X2JyX3Jvb3RfY2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxP +PUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjOPQQD +AwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFWwKrY7RjEsK70Pvom +AjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHVdWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +D-TRUST EV Root CA 1 2020 +========================= +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQswCQYDVQQGEwJE +RTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRSVVNUIEVWIFJvb3QgQ0EgMSAy +MDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNV +BAoTDEQtVHJ1c3QgR21iSDEiMCAGA1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8 +ZRCC/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rDwpdhQntJ +raOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3OqQo5FD4pPfsazK2/umL +MA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6gPKA6hjhodHRwOi8vY3JsLmQtdHJ1c3Qu +bmV0L2NybC9kLXRydXN0X2V2X3Jvb3RfY2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxP +PUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjOPQQD +AwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CAy/m0sRtW9XLS/BnR +AjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJbgfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +DigiCert TLS ECC P384 Root G5 +============================= +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURpZ2lDZXJ0IFRMUyBFQ0MgUDM4 +NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDkRpZ2lDZXJ0LCBJbmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQg +Um9vdCBHNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1Tzvd +lHJS7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp0zVozptj +n4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICISB4CIfBFqMA4GA1UdDwEB +/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQCJao1H5+z8blUD2Wds +Jk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQLgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIx +AJSdYsiJvRmEFOml+wG4DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +DigiCert TLS RSA4096 Root G5 +============================ +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBNMQswCQYDVQQG +EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0 +MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcNNDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2 +IFJvb3QgRzUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS8 +7IE+ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG02C+JFvuU +AT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgpwgscONyfMXdcvyej/Ces +tyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZMpG2T6T867jp8nVid9E6P/DsjyG244gXa +zOvswzH016cpVIDPRFtMbzCe88zdH5RDnU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnV +DdXifBBiqmvwPXbzP6PosMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9q +TXeXAaDxZre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cdLvvy +z6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvXKyY//SovcfXWJL5/ +MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNeXoVPzthwiHvOAbWWl9fNff2C+MIk +wcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPLtgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4E +FgQUUTMc7TZArxfTJc1paPKvTiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7HPNtQOa27PShN +lnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLFO4uJ+DQtpBflF+aZfTCIITfN +MBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQREtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/ +u4cnYiWB39yhL/btp/96j1EuMPikAdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9G +OUrYU9DzLjtxpdRv/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh +47a+p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilwMUc/dNAU +FvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WFqUITVuwhd4GTWgzqltlJ +yqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCKovfepEWFJqgejF0pW8hL2JpqA15w8oVP +bEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +Certainly Root R1 +================= +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAwPTELMAkGA1UE +BhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2VydGFpbmx5IFJvb3QgUjEwHhcN +MjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2Vy +dGFpbmx5MRowGAYDVQQDExFDZXJ0YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBANA21B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O +5MQTvqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbedaFySpvXl +8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b01C7jcvk2xusVtyWMOvwl +DbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGI +XsXwClTNSaa/ApzSRKft43jvRl5tcdF5cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkN +KPl6I7ENPT2a/Z2B7yyQwHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQ +AjeZjOVJ6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA2Cnb +rlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyHWyf5QBGenDPBt+U1 +VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMReiFPCyEQtkA6qyI6BJyLm4SGcprS +p6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTgqj8ljZ9EXME66C6ud0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAsz +HQNTVfSVcOQrPbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi1wrykXprOQ4v +MMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrdrRT90+7iIgXr0PK3aBLXWopB +GsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9ditaY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+ +gjwN/KUD+nsa2UUeYNrEjvn8K8l7lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgH +JBu6haEaBQmAupVjyTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7 +fpYnKx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLyyCwzk5Iw +x06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5nwXARPbv0+Em34yaXOp/S +X3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +Certainly Root E1 +================= +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQswCQYDVQQGEwJV +UzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlubHkgUm9vdCBFMTAeFw0yMTA0 +MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJBgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlu +bHkxGjAYBgNVBAMTEUNlcnRhaW5seSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4 +fxzf7flHh4axpMCK+IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9 +YBk2QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4hevIIgcwCgYIKoZIzj0E +AwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozmut6Dacpps6kFtZaSF4fC0urQe87YQVt8 +rgIwRt7qy12a7DLCZRawTDBcMPPaTnOGBtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +E-Tugra Global Root CA RSA v3 +============================= +-----BEGIN CERTIFICATE----- +MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQELBQAwgYAxCzAJ +BgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVncmEgRUJHIEEuUy4xHTAb +BgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290 +IENBIFJTQSB2MzAeFw0yMDAzMTgwOTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJU +UjEPMA0GA1UEBxMGQW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRF +LVR1Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBSU0Eg +djMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J77gnJY9LTQ91ew6aEOErx +jYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscxuj7X/iWpKo429NEvx7epXTPcMHD4QGxL +sqYxYdE0PD0xesevxKenhOGXpOhL9hd87jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF +/YP9f4RtNGx/ardLAQO/rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8q +QedmCeFLl+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bGwzrw +bMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4znKS4iicvObpCdg6 +04nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBOM/J+JjKsBY04pOZ2PJ8QaQ5tndLB +eSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiM +bIedBi3x7+PmBvrFZhNb/FAHnnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbg +h3cXTJ2w2AmoDVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSytK7mLfcm1ap1 +LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAImocn+M684uGMQQ +gC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN4 +38o2Fi+CiJ+8EUdPdk3ILY7r3y18Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/q +ln0F7psTpURs+APQ3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3s +SdPkvmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn99t2HVhjY +sCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQmhty3QUBjYZgv6Rn7rWl +DdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YAVSgU7NbHEqIbZULpkejLPoeJVF3Zr52X +nGnnCv8PWniLYypMfUeUP95L6VPQMPHF9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFH +IK+WEj5jlB0E5y67hscMmoi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiX +YY60MGo8bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ +-----END CERTIFICATE----- + +E-Tugra Global Root CA ECC v3 +============================= +-----BEGIN CERTIFICATE----- +MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMwgYAxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVncmEgRUJHIEEuUy4xHTAbBgNV +BAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENB +IEVDQyB2MzAeFw0yMDAzMTgwOTQ2NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEP +MA0GA1UEBxMGQW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1 +Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBFQ0MgdjMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQKczLWYHMjLiSF4mDKpL2 +w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YKfWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31 +Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQ +zPUwHQYDVR0OBBYEFP+CMXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjO +PQQDAwNpADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/67W4W +Aie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFxvmjkI6TZraE3 +-----END CERTIFICATE----- + +Security Communication RootCA3 +============================== +-----BEGIN CERTIFICATE----- +MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNVBAYTAkpQMSUw +IwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScwJQYDVQQDEx5TZWN1cml0eSBD +b21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQsw +CQYDVQQGEwJKUDElMCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UE +AxMeU2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4rCmDvu20r +hvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzAlrenfna84xtSGc4RHwsE +NPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MGTfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2 +/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF79+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGm +npjKIG58u4iFW/vAEGK78vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtY +XLVqAvO4g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3weGVPK +p7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst+3A7caoreyYn8xrC +3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M0V9hvqG8OmpI6iZVIhZdXw3/JzOf +GAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQT9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0Vcw +CBEF/VfR2ccCAwEAAaNCMEAwHQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS +YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PAFNr0Y/Dq9HHu +Tofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd9XbXv8S2gVj/yP9kaWJ5rW4O +H3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQIUYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASx +YfQAW0q3nHE3GYV5v4GwxxMOdnE+OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZ +XSEIx2C/pHF7uNkegr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml ++LLfiAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUVnuiZIesn +KwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD2NCcnWXL0CsnMQMeNuE9 +dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm +6Vwdp6POXiUyK+OVrCoHzrQoeIY8LaadTdJ0MN1kURXbg4NR16/9M51NZg== +-----END CERTIFICATE----- + +Security Communication ECC RootCA1 +================================== +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYTAkpQMSUwIwYD +VQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYDVQQDEyJTZWN1cml0eSBDb21t +dW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYxNjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTEL +MAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNV +BAMTIlNlY3VyaXR5IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+CnnfdldB9sELLo +5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpKULGjQjBAMB0GA1UdDgQW +BBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAK +BggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3L +snNdo4gIxwwCMQDAqy0Obe0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70e +N9k= +-----END CERTIFICATE----- diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/excon.gemspec b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/excon.gemspec new file mode 100644 index 0000000..3e19c0a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/excon.gemspec @@ -0,0 +1,44 @@ +$LOAD_PATH.unshift File.join(File.dirname(__FILE__), 'lib') +require 'excon/version' + +Gem::Specification.new do |s| + s.name = 'excon' + s.version = Excon::VERSION + s.summary = "speed, persistence, http(s)" + s.description = "EXtended http(s) CONnections" + s.authors = ["dpiddy (Dan Peterson)", "geemus (Wesley Beary)", "nextmat (Matt Sanders)"] + s.email = 'geemus@gmail.com' + s.homepage = 'https://github.com/excon/excon' + s.license = 'MIT' + s.rdoc_options = ["--charset=UTF-8"] + s.extra_rdoc_files = %w[README.md CONTRIBUTORS.md CONTRIBUTING.md] + s.files = `git ls-files -- data/* lib/*`.split("\n") + [ + "CONTRIBUTING.md", + "CONTRIBUTORS.md", + "LICENSE.md", + "README.md", + "excon.gemspec" + ] + + s.add_development_dependency('rspec', '>= 3.5.0') + s.add_development_dependency('activesupport') + s.add_development_dependency('delorean') + s.add_development_dependency('eventmachine', '>= 1.0.4') + s.add_development_dependency('open4') + s.add_development_dependency('rake') + s.add_development_dependency('shindo') + s.add_development_dependency('sinatra') + s.add_development_dependency('sinatra-contrib') + s.add_development_dependency('json', '>= 1.8.5') + s.add_development_dependency('puma') + s.add_development_dependency('webrick') + + s.metadata = { + 'homepage_uri' => 'https://github.com/excon/excon', + 'bug_tracker_uri' => 'https://github.com/excon/excon/issues', + 'changelog_uri' => 'https://github.com/excon/excon/blob/master/changelog.txt', + 'documentation_uri' => 'https://github.com/excon/excon/blob/master/README.md', + 'source_code_uri' => 'https://github.com/excon/excon', + 'wiki_uri' => 'https://github.com/excon/excon/wiki' + } +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon.rb new file mode 100644 index 0000000..b7c6291 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon.rb @@ -0,0 +1,265 @@ +# frozen_string_literal: true +$:.unshift(File.dirname(__FILE__)) unless + $:.include?(File.dirname(__FILE__)) || $:.include?(File.expand_path(File.dirname(__FILE__))) + +require 'cgi' +require 'forwardable' +require 'openssl' +require 'rbconfig' +require 'socket' +require 'timeout' +require 'uri' +require 'zlib' +require 'stringio' + +require 'excon/version' + +require 'excon/extensions/uri' + +require 'excon/middlewares/base' +require 'excon/middlewares/expects' +require 'excon/middlewares/idempotent' +require 'excon/middlewares/instrumentor' +require 'excon/middlewares/mock' +require 'excon/middlewares/response_parser' + +require 'excon/error' +require 'excon/constants' +require 'excon/utils' + +require 'excon/connection' +require 'excon/headers' +require 'excon/response' +require 'excon/middlewares/decompress' +require 'excon/middlewares/escape_path' +require 'excon/middlewares/redirect_follower' +require 'excon/middlewares/capture_cookies' +require 'excon/pretty_printer' +require 'excon/socket' +require 'excon/ssl_socket' +require 'excon/instrumentors/standard_instrumentor' +require 'excon/instrumentors/logging_instrumentor' +require 'excon/unix_socket' + +# Define defaults first so they will be available to other files +module Excon + class << self + + # @return [Hash] defaults for Excon connections + def defaults + @defaults ||= DEFAULTS + end + + # Change defaults for Excon connections + # @return [Hash] defaults for Excon connections + def defaults=(new_defaults) + @defaults = new_defaults + end + + def display_warning(warning) + # Show warning if $VERBOSE or ENV['EXCON_DEBUG'] is set + if $VERBOSE || ENV['EXCON_DEBUG'] + $stderr.puts "[excon][WARNING] #{warning}\n#{ caller.join("\n") }" + end + + if @raise_on_warnings + raise Error::Warning.new(warning) + end + end + + def set_raise_on_warnings!(should_raise) + @raise_on_warnings = should_raise + end + + # Status of mocking + def mock + display_warning('Excon#mock is deprecated, use Excon.defaults[:mock] instead.') + self.defaults[:mock] + end + + # Change the status of mocking + # false is the default and works as expected + # true returns a value from stubs or raises + def mock=(new_mock) + display_warning('Excon#mock is deprecated, use Excon.defaults[:mock]= instead.') + self.defaults[:mock] = new_mock + end + + # @return [String] The filesystem path to the SSL Certificate Authority + def ssl_ca_path + display_warning('Excon#ssl_ca_path is deprecated, use Excon.defaults[:ssl_ca_path] instead.') + self.defaults[:ssl_ca_path] + end + + # Change path to the SSL Certificate Authority + # @return [String] The filesystem path to the SSL Certificate Authority + def ssl_ca_path=(new_ssl_ca_path) + display_warning('Excon#ssl_ca_path= is deprecated, use Excon.defaults[:ssl_ca_path]= instead.') + self.defaults[:ssl_ca_path] = new_ssl_ca_path + end + + # @return [true, false] Whether or not to verify the peer's SSL certificate / chain + def ssl_verify_peer + display_warning('Excon#ssl_verify_peer is deprecated, use Excon.defaults[:ssl_verify_peer] instead.') + self.defaults[:ssl_verify_peer] + end + + # Change the status of ssl peer verification + # @see Excon#ssl_verify_peer (attr_reader) + def ssl_verify_peer=(new_ssl_verify_peer) + display_warning('Excon#ssl_verify_peer= is deprecated, use Excon.defaults[:ssl_verify_peer]= instead.') + self.defaults[:ssl_verify_peer] = new_ssl_verify_peer + end + + # @see Connection#initialize + # Initializes a new keep-alive session for a given remote host + # @param [String] url The destination URL + # @param [Hash] params One or more option params to set on the Connection instance + # @return [Connection] A new Excon::Connection instance + def new(url, params = {}) + uri_parser = params[:uri_parser] || defaults[:uri_parser] + uri = uri_parser.parse(url) + if params[:path] + uri_parser.parse(params[:path]) + end + unless uri.scheme + raise ArgumentError.new("Invalid URI: #{uri}") + end + params = { + :host => uri.host, + :hostname => uri.hostname, + :path => uri.path, + :port => uri.port, + :query => uri.query, + :scheme => uri.scheme + }.merge(params) + if uri.password + params[:password] = Utils.unescape_uri(uri.password) + end + if uri.user + params[:user] = Utils.unescape_uri(uri.user) + end + Excon::Connection.new(params) + end + + # push an additional stub onto the list to check for mock requests + # @param request_params [Hash] request params to match against, omitted params match all + # @param response_params [Hash] response params to return from matched request or block to call with params + def stub(request_params = {}, response_params = nil, &block) + if (method = request_params.delete(:method)) + request_params[:method] = method.to_s.downcase.to_sym + end + if (url = request_params.delete(:url)) + uri = URI.parse(url) + request_params = { + :host => uri.host, + :path => uri.path, + :port => uri.port, + :query => uri.query, + :scheme => uri.scheme + }.merge!(request_params) + if uri.user || uri.password + request_params[:headers] ||= {} + user, pass = Utils.unescape_form(uri.user.to_s), Utils.unescape_form(uri.password.to_s) + request_params[:headers]['Authorization'] ||= 'Basic ' + ["#{user}:#{pass}"].pack('m').delete(Excon::CR_NL) + end + end + if request_params.has_key?(:headers) + headers = Excon::Headers.new + request_params[:headers].each do |key, value| + headers[key] = value + end + request_params[:headers] = headers + end + if block_given? + if response_params + raise(ArgumentError.new("stub requires either response_params OR a block")) + else + stub = [request_params, block] + end + elsif response_params + stub = [request_params, response_params] + else + raise(ArgumentError.new("stub requires either response_params OR a block")) + end + stubs.unshift(stub) + stub + end + + # get a stub matching params or nil + # @param request_params [Hash] request params to match against, omitted params match all + # @return [Hash] response params to return from matched request or block to call with params + def stub_for(request_params={}) + if (method = request_params.delete(:method)) + request_params[:method] = method.to_s.downcase.to_sym + end + Excon.stubs.each do |stub, response_params| + captures = { :headers => {} } + headers_match = !stub.has_key?(:headers) || stub[:headers].keys.all? do |key| + case value = stub[:headers][key] + when Regexp + case request_params[:headers][key] + when String + if (match = value.match(request_params[:headers][key])) + captures[:headers][key] = match.captures + end + when Regexp # for unstub on regex params + match = (value == request_params[:headers][key]) + end + match + else + value == request_params[:headers][key] + end + end + non_headers_match = (stub.keys - [:headers]).all? do |key| + case value = stub[key] + when Regexp + case request_params[key] + when String + if (match = value.match(request_params[key])) + captures[key] = match.captures + end + when Regexp # for unstub on regex params + match = (value == request_params[key]) + end + match + else + value == request_params[key] + end + end + if headers_match && non_headers_match + request_params[:captures] = captures + return [stub, response_params] + end + end + nil + end + + # get a list of defined stubs + def stubs + case Excon.defaults[:stubs] + when :global + @stubs ||= [] + when :local + Thread.current[:_excon_stubs] ||= [] + end + end + + # remove first/oldest stub matching request_params + # @param request_params [Hash] request params to match against, omitted params match all + # @return [Hash] response params from deleted stub + def unstub(request_params = {}) + stub = stub_for(request_params) + Excon.stubs.delete_at(Excon.stubs.index(stub)) + end + + # Generic non-persistent HTTP methods + HTTP_VERBS.each do |method| + module_eval <<-DEF, __FILE__, __LINE__ + 1 + def #{method}(url, params = {}, &block) + new(url, params).request(:method => :#{method}, &block) + end + DEF + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/connection.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/connection.rb new file mode 100644 index 0000000..37c8669 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/connection.rb @@ -0,0 +1,614 @@ +# frozen_string_literal: true +require 'ipaddr' + +module Excon + class Connection + include Utils + + attr_reader :data + + def connection + Excon.display_warning('Excon::Connection#connection is deprecated use Excon::Connection#data instead.') + @data + end + def connection=(new_params) + Excon.display_warning('Excon::Connection#connection= is deprecated. Use of this method may cause unexpected results.') + @data = new_params + end + + def params + Excon.display_warning('Excon::Connection#params is deprecated use Excon::Connection#data instead.') + @data + end + def params=(new_params) + Excon.display_warning('Excon::Connection#params= is deprecated. Use of this method may cause unexpected results.') + @data = new_params + end + + def proxy + Excon.display_warning('Excon::Connection#proxy is deprecated use Excon::Connection#data[:proxy] instead.') + @data[:proxy] + end + def proxy=(new_proxy) + Excon.display_warning('Excon::Connection#proxy= is deprecated. Use of this method may cause unexpected results.') + @data[:proxy] = new_proxy + end + + def logger + if @data[:instrumentor] && @data[:instrumentor].respond_to?(:logger) + @data[:instrumentor].logger + end + end + def logger=(logger) + @data[:instrumentor] = Excon::LoggingInstrumentor + @data[:logger] = logger + end + + # Initializes a new Connection instance + # @param [Hash] params One or more optional params + # @option params [String] :body Default text to be sent over a socket. Only used if :body absent in Connection#request params + # @option params [Hash] :headers The default headers to supply in a request. Only used if params[:headers] is not supplied to Connection#request + # @option params [String] :host The destination host's reachable DNS name or IP, in the form of a String. IPv6 addresses must be wrapped (e.g. [::1]). See URI#host. + # @option params [String] :hostname Same as host, but usable for socket connections. IPv6 addresses must not be wrapped (e.g. ::1). See URI#hostname. + # @option params [String] :path Default path; appears after 'scheme://host:port/'. Only used if params[:path] is not supplied to Connection#request + # @option params [Fixnum] :port The port on which to connect, to the destination host + # @option params [Hash] :query Default query; appended to the 'scheme://host:port/path/' in the form of '?key=value'. Will only be used if params[:query] is not supplied to Connection#request + # @option params [String] :scheme The protocol; 'https' causes OpenSSL to be used + # @option params [String] :socket The path to the unix socket (required for 'unix://' connections) + # @option params [String] :ciphers Only use the specified SSL/TLS cipher suites; use OpenSSL cipher spec format e.g. 'HIGH:!aNULL:!3DES' or 'AES256-SHA:DES-CBC3-SHA' + # @option params [String] :proxy Proxy server; e.g. 'http://myproxy.com:8888' + # @option params [Fixnum] :retry_limit Set how many times we'll retry a failed request. (Default 4) + # @option params [Fixnum] :retry_interval Set how long to wait between retries. (Default 0) + # @option params [Class] :instrumentor Responds to #instrument as in ActiveSupport::Notifications + # @option params [String] :instrumentor_name Name prefix for #instrument events. Defaults to 'excon' + def initialize(params = {}) + @pid = Process.pid + @data = Excon.defaults.dup + # merge does not deep-dup, so make sure headers is not the original + @data[:headers] = @data[:headers].dup + + # the same goes for :middlewares + @data[:middlewares] = @data[:middlewares].dup + + @data.merge!(params) + validate_params(:connection, @data, @data[:middlewares]) + + if @data.key?(:host) && !@data.key?(:hostname) + Excon.display_warning('hostname is missing! For IPv6 support, provide both host and hostname: Excon::Connection#new(:host => uri.host, :hostname => uri.hostname, ...).') + @data[:hostname] = @data[:host] + end + + setup_proxy + + if ENV.has_key?('EXCON_STANDARD_INSTRUMENTOR') + @data[:instrumentor] = Excon::StandardInstrumentor + end + + if @data[:debug] || ENV.has_key?('EXCON_DEBUG') + @data[:debug_request] = @data[:debug_response] = true + @data[:instrumentor] = Excon::StandardInstrumentor + end + + if @data[:scheme] == UNIX + # 'uri' >= v0.12.0 returns an empty string instead of nil for no host. + # So treat the parameter as present if and only if it is both non-nill and non-empty. + if @data[:host] && !@data[:host].empty? + raise ArgumentError, "The `:host` parameter should not be set for `unix://` connections.\n" + + "When supplying a `unix://` URI, it should start with `unix:/` or `unix:///`." + elsif !@data[:socket] + raise ArgumentError, 'You must provide a `:socket` for `unix://` connections' + else + @socket_key = "#{@data[:scheme]}://#{@data[:socket]}" + end + else + @socket_key = "#{@data[:scheme]}://#{@data[:host]}#{port_string(@data)}" + end + reset + end + + def error_call(datum) + if datum[:error] + raise(datum[:error]) + end + end + + def request_call(datum) + begin + if datum.has_key?(:response) + # we already have data from a middleware, so bail + return datum + else + socket(datum).data = datum + # start with "METHOD /path" + request = datum[:method].to_s.upcase + ' ' + if datum[:proxy] && datum[:scheme] != HTTPS + request << datum[:scheme] << '://' << datum[:host] << port_string(datum) + end + request << datum[:path] + + # add query to path, if there is one + request << query_string(datum) + + # finish first line with "HTTP/1.1\r\n" + request << HTTP_1_1 + + if datum.has_key?(:request_block) + datum[:headers]['Transfer-Encoding'] = 'chunked' + else + body = datum[:body].is_a?(String) ? StringIO.new(datum[:body]) : datum[:body] + + # The HTTP spec isn't clear on it, but specifically, GET requests don't usually send bodies; + # if they don't, sending Content-Length:0 can cause issues. + unless datum[:method].to_s.casecmp('GET') == 0 && body.nil? + unless datum[:headers].has_key?('Content-Length') + datum[:headers]['Content-Length'] = detect_content_length(body) + end + end + end + + # add headers to request + request << Utils.headers_hash_to_s(datum[:headers]) + + # add additional "\r\n" to indicate end of headers + request << CR_NL + + if datum.has_key?(:request_block) + socket(datum).write(request) # write out request + headers + while true # write out body with chunked encoding + chunk = datum[:request_block].call + chunk = binary_encode(chunk) + if chunk.length > 0 + socket(datum).write(chunk.length.to_s(16) << CR_NL << chunk << CR_NL) + else + socket(datum).write(String.new("0#{CR_NL}#{CR_NL}")) + break + end + end + elsif body.nil? + socket(datum).write(request) # write out request + headers + else # write out body + if body.respond_to?(:binmode) && !body.is_a?(StringIO) + body.binmode + end + if body.respond_to?(:rewind) + body.rewind rescue nil + end + + # if request + headers is less than chunk size, fill with body + request = binary_encode(request) + chunk = body.read([datum[:chunk_size] - request.length, 0].max) + if chunk + chunk = binary_encode(chunk) + socket(datum).write(request << chunk) + else + socket(datum).write(request) # write out request + headers + end + + while (chunk = body.read(datum[:chunk_size])) + socket(datum).write(chunk) + end + end + end + rescue => error + case error + when Excon::Errors::InvalidHeaderKey, Excon::Errors::InvalidHeaderValue, Excon::Errors::StubNotFound, Excon::Errors::Timeout + raise(error) + when Errno::EPIPE + # Read whatever remains in the pipe to aid in debugging + response = socket.read + error = Excon::Error.new(response + error.message) + raise_socket_error(error) + else + raise_socket_error(error) + end + end + + datum + end + + def response_call(datum) + # ensure response_block is yielded to and body is empty from middlewares + if datum.has_key?(:response_block) && !(datum[:response][:body].nil? || datum[:response][:body].empty?) + response_body = datum[:response][:body].dup + datum[:response][:body] = '' + content_length = remaining = response_body.bytesize + while remaining > 0 + datum[:response_block].call(response_body.slice!(0, [datum[:chunk_size], remaining].min), [remaining - datum[:chunk_size], 0].max, content_length) + remaining -= datum[:chunk_size] + end + end + datum + end + + # Sends the supplied request to the destination host. + # @yield [chunk] @see Response#self.parse + # @param [Hash] params One or more optional params, override defaults set in Connection.new + # @option params [String] :body text to be sent over a socket + # @option params [Hash] :headers The default headers to supply in a request + # @option params [String] :path appears after 'scheme://host:port/' + # @option params [Hash] :query appended to the 'scheme://host:port/path/' in the form of '?key=value' + def request(params={}, &block) + # @data has defaults, merge in new params to override + datum = @data.merge(params) + datum[:headers] = @data[:headers].merge(datum[:headers] || {}) + + validate_params(:request, params, datum[:middlewares]) + # If the user passed in new middleware, we want to validate that the original connection parameters + # are still valid with the provided middleware. + if params[:middlewares] + validate_params(:connection, @data, datum[:middlewares]) + end + + if datum[:user] || datum[:password] + user, pass = Utils.unescape_uri(datum[:user].to_s), Utils.unescape_uri(datum[:password].to_s) + datum[:headers]['Authorization'] ||= 'Basic ' + ["#{user}:#{pass}"].pack('m').delete(Excon::CR_NL) + end + + if datum[:scheme] == UNIX + datum[:headers]['Host'] ||= '' + else + datum[:headers]['Host'] ||= datum[:host] + port_string(datum) + end + + # RFC 7230, section 5.4, states that the Host header SHOULD be the first one # to be present. + # Some web servers will reject the request if it comes too late, so let's hoist it to the top. + if (host = datum[:headers].delete('Host')) + datum[:headers] = { 'Host' => host }.merge(datum[:headers]) + end + + # default to GET if no method specified + unless datum[:method] + datum[:method] = :get + end + + # if path is empty or doesn't start with '/', insert one + unless datum[:path][0, 1] == '/' + datum[:path] = datum[:path].dup.insert(0, '/') + end + + if block_given? + Excon.display_warning('Excon requests with a block are deprecated, pass :response_block instead.') + datum[:response_block] = block + end + + datum[:connection] = self + + # cleanup data left behind on persistent connection after interrupt + if datum[:persistent] && !@persistent_socket_reusable + reset + end + + datum[:stack] = datum[:middlewares].map do |middleware| + lambda {|stack| middleware.new(stack)} + end.reverse.inject(self) do |middlewares, middleware| + middleware.call(middlewares) + end + datum = datum[:stack].request_call(datum) + + unless datum[:pipeline] + @persistent_socket_reusable = false + datum = response(datum) + @persistent_socket_reusable = true + + if datum[:persistent] + if (key = datum[:response][:headers].keys.detect {|k| k.casecmp('Connection') == 0 }) + if datum[:response][:headers][key].casecmp('close') == 0 + reset + end + end + else + reset + end + + Excon::Response.new(datum[:response]) + else + datum + end + rescue => error + reset + + # If we didn't get far enough to initialize datum and the middleware stack, just raise + raise error if !datum + + datum[:error] = error + if datum[:stack] + datum[:stack].error_call(datum) + else + raise error + end + end + + # Sends the supplied requests to the destination host using pipelining. + # @param pipeline_params [Array] An array of one or more optional params, override defaults set in Connection.new, see #request for details + def requests(pipeline_params) + pipeline_params.each {|params| params.merge!(:pipeline => true, :persistent => true) } + pipeline_params.last.merge!(:persistent => @data[:persistent]) + + responses = pipeline_params.map do |params| + request(params) + end.map do |datum| + Excon::Response.new(response(datum)[:response]) + end + + if @data[:persistent] + if (key = responses.last[:headers].keys.detect {|k| k.casecmp('Connection') == 0 }) + if responses.last[:headers][key].casecmp('close') == 0 + reset + end + end + else + reset + end + + responses + end + + # Sends the supplied requests to the destination host using pipelining in + # batches of @limit [Numeric] requests. This is your soft file descriptor + # limit by default, typically 256. + # @param pipeline_params [Array] An array of one or more optional params, override defaults set in Connection.new, see #request for details + def batch_requests(pipeline_params, limit = nil) + limit ||= Process.respond_to?(:getrlimit) ? Process.getrlimit(:NOFILE).first : 256 + responses = [] + + pipeline_params.each_slice(limit) do |params| + responses.concat(requests(params)) + end + + responses + end + + def reset + if (old_socket = sockets.delete(@socket_key)) + old_socket.close rescue nil + end + @persistent_socket_reusable = true + end + + # Generate HTTP request verb methods + Excon::HTTP_VERBS.each do |method| + class_eval <<-DEF, __FILE__, __LINE__ + 1 + def #{method}(params={}, &block) + request(params.merge!(:method => :#{method}), &block) + end + DEF + end + + def retry_limit=(new_retry_limit) + Excon.display_warning('Excon::Connection#retry_limit= is deprecated, pass :retry_limit to the initializer.') + @data[:retry_limit] = new_retry_limit + end + + def retry_limit + Excon.display_warning('Excon::Connection#retry_limit is deprecated, use Excon::Connection#data[:retry_limit].') + @data[:retry_limit] ||= DEFAULT_RETRY_LIMIT + end + + def inspect + vars = instance_variables.inject({}) do |accum, var| + accum.merge!(var.to_sym => instance_variable_get(var)) + end + vars[:'@data'] = Utils.redact(vars[:'@data']) + inspection = '# error + case error + when Excon::Errors::HTTPStatusError, Excon::Errors::Timeout, Excon::Errors::TooManyRedirects + raise(error) + else + raise_socket_error(error) + end + end + + def socket(datum = @data) + unix_proxy = datum[:proxy] ? datum[:proxy][:scheme] == UNIX : false + sockets[@socket_key] ||= if datum[:scheme] == UNIX || unix_proxy + Excon::UnixSocket.new(datum) + elsif datum[:ssl_uri_schemes].include?(datum[:scheme]) + Excon::SSLSocket.new(datum) + else + Excon::Socket.new(datum) + end + end + + def sockets + @_excon_sockets ||= {} + @_excon_sockets.compare_by_identity + + if @pid != Process.pid + @_excon_sockets.clear # GC will take care of closing sockets + @pid = Process.pid + end + + if @data[:thread_safe_sockets] + # In a multi-threaded world, if the same connection is used by multiple + # threads at the same time to connect to the same destination, they may + # stomp on each other's sockets. This ensures every thread gets their + # own socket cache, within the context of a single connection. + @_excon_sockets[Thread.current] ||= {} + else + @_excon_sockets + end + end + + def raise_socket_error(error) + if error.message =~ /certificate verify failed/ + raise(Excon::Errors::CertificateError.new(error)) + else + raise(Excon::Errors::SocketError.new(error)) + end + end + + def proxy_match_host_port(host, port) + host_match = if host.is_a? IPAddr + begin + host.include? @data[:host] + rescue IPAddr::Error + false + end + else + /(^|\.)#{host}$/.match(@data[:host]) + end + host_match && (port.nil? || port.to_i == @data[:port]) + end + + def proxy_from_env + if (no_proxy_env = ENV['no_proxy'] || ENV['NO_PROXY']) + no_proxy_list = no_proxy_env.scan(/\s*(?:\[([\dA-Fa-f:\/]+)\]|\*?\.?([^\s,:]+))(?::(\d+))?\s*/i).map { |e| + if e[0] + begin + [IPAddr.new(e[0]), e[2]] + rescue IPAddr::Error + nil + end + else + begin + [IPAddr.new(e[1]), e[2]] + rescue IPAddr::Error + [e[1], e[2]] + end + end + }.reject { |e| e.nil? || e[0].nil? } + end + + unless no_proxy_env && no_proxy_list.index { |h| proxy_match_host_port(h[0], h[1]) } + if @data[:scheme] == HTTPS && (ENV.has_key?('https_proxy') || ENV.has_key?('HTTPS_PROXY')) + @data[:proxy] = ENV['https_proxy'] || ENV['HTTPS_PROXY'] + elsif (ENV.has_key?('http_proxy') || ENV.has_key?('HTTP_PROXY')) + @data[:proxy] = ENV['http_proxy'] || ENV['HTTP_PROXY'] + end + end + end + + def setup_proxy + if @data[:disable_proxy] + if @data[:proxy] + raise ArgumentError, "`:disable_proxy` parameter and `:proxy` parameter cannot both be set at the same time." + end + return + end + + return if @data[:scheme] == UNIX + + proxy_from_env + + case @data[:proxy] + when nil + @data.delete(:proxy) + when '' + @data.delete(:proxy) + when Hash + # no processing needed + when String, URI + uri = @data[:proxy].is_a?(String) ? URI.parse(@data[:proxy]) : @data[:proxy] + @data[:proxy] = { + :host => uri.host, + :hostname => uri.hostname, + # path is only sensible for a Unix socket proxy + :path => uri.scheme == UNIX ? uri.path : nil, + :port => uri.port, + :scheme => uri.scheme, + } + if uri.password + @data[:proxy][:password] = uri.password + end + if uri.user + @data[:proxy][:user] = uri.user + end + if @data[:ssl_proxy_headers] && !@data[:ssl_uri_schemes].include?(@data[:scheme]) + raise ArgumentError, "The `:ssl_proxy_headers` parameter should only be used with HTTPS requests." + end + if @data[:proxy][:scheme] == UNIX + # URI.parse might return empty string for security reasons. + @data[:proxy][:host] = nil if @data[:proxy][:host] == "" + if @data[:proxy][:host] + raise ArgumentError, "The `:host` parameter should not be set for `unix://` proxies.\n" + + "When supplying a `unix://` URI, it should start with `unix:/` or `unix:///`." + end + else + unless uri.host && uri.port && uri.scheme + raise Excon::Errors::ProxyParse, "Proxy is invalid" + end + end + else + raise Excon::Errors::ProxyParse, "Proxy is invalid" + end + + if @data.has_key?(:proxy) && @data[:scheme] == 'http' + @data[:headers]['Proxy-Connection'] ||= 'Keep-Alive' + # https credentials happen in handshake + if @data[:proxy].has_key?(:user) || @data[:proxy].has_key?(:password) + user, pass = Utils.unescape_form(@data[:proxy][:user].to_s), Utils.unescape_form(@data[:proxy][:password].to_s) + auth = ["#{user}:#{pass}"].pack('m').delete(Excon::CR_NL) + @data[:headers]['Proxy-Authorization'] = 'Basic ' + auth + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/constants.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/constants.rb new file mode 100644 index 0000000..4a67c6c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/constants.rb @@ -0,0 +1,174 @@ +# frozen_string_literal: true +module Excon + + CR_NL = "\r\n" + + DEFAULT_CA_FILE = File.expand_path(File.join(File.dirname(__FILE__), "..", "..", "data", "cacert.pem")) + + DEFAULT_CHUNK_SIZE = 1048576 # 1 megabyte + + # avoid overwrite if somebody has redefined + unless const_defined?(:CHUNK_SIZE) + CHUNK_SIZE = DEFAULT_CHUNK_SIZE + end + + DEFAULT_REDIRECT_LIMIT = 10 + + DEFAULT_RETRY_LIMIT = 4 + + DEFAULT_RETRY_ERRORS = [ + Excon::Error::Timeout, + Excon::Error::Socket, + Excon::Error::HTTPStatus + ] + + FORCE_ENC = CR_NL.respond_to?(:force_encoding) + + HTTP_1_1 = " HTTP/1.1\r\n" + + HTTP_VERBS = %w{connect delete get head options patch post put trace} + + HTTPS = 'https' + + NO_ENTITY = [204, 205, 304].freeze + + REDACTED = 'REDACTED' + + UNIX = 'unix' + + USER_AGENT = "excon/#{VERSION}" + + VERSIONS = "#{USER_AGENT} (#{RUBY_PLATFORM}) ruby/#{RUBY_VERSION}" + + VALID_REQUEST_KEYS = [ + :allow_unstubbed_requests, + :body, + :chunk_size, + :debug_request, + :debug_response, + :headers, + :instrumentor, # Used for setting logging within Connection + :logger, + :method, + :middlewares, + :password, + :path, + :persistent, + :pipeline, + :query, + :read_timeout, + :request_block, + :response_block, + :stubs, + :user, + :versions, + :write_timeout + ] + + VALID_CONNECTION_KEYS = VALID_REQUEST_KEYS + [ + :ciphers, + :client_key, + :client_key_data, + :client_key_pass, + :client_cert, + :client_cert_data, + :client_chain, + :client_chain_data, + :certificate, + :certificate_path, + :disable_proxy, + :private_key, + :private_key_path, + :connect_timeout, + :family, + :keepalive, + :host, + :hostname, + :omit_default_port, + :nonblock, + :reuseaddr, + :port, + :proxy, + :scheme, + :socket, + :ssl_ca_file, + :ssl_ca_path, + :ssl_cert_store, + :ssl_verify_callback, + :ssl_verify_peer, + :ssl_verify_peer_host, + :ssl_verify_hostname, + :ssl_version, + :ssl_min_version, + :ssl_max_version, + :ssl_security_level, + :ssl_proxy_headers, + :ssl_uri_schemes, + :tcp_nodelay, + :thread_safe_sockets, + :uri_parser, + ] + + DEPRECATED_VALID_REQUEST_KEYS = { + :captures => 'Mock', + :expects => 'Expects', + :idempotent => 'Idempotent', + :instrumentor_name => 'Instrumentor', + :mock => 'Mock', + :retries_remaining => 'Idempotent', # referenced in Instrumentor, but only relevant with Idempotent + :retry_errors => 'Idempotent', + :retry_interval => 'Idempotent', + :retry_limit => 'Idempotent' # referenced in Instrumentor, but only relevant with Idempotent + } + + unless ::IO.const_defined?(:WaitReadable) + class ::IO + module WaitReadable; end + end + end + + unless ::IO.const_defined?(:WaitWritable) + class ::IO + module WaitWritable; end + end + end + # these come last as they rely on the above + DEFAULTS = { + :chunk_size => CHUNK_SIZE || DEFAULT_CHUNK_SIZE, + # see https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + # list provided then had DES related things sorted to the end + :ciphers => 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:DES-CBC3-SHA:!DSS', + :connect_timeout => 60, + :debug_request => false, + :debug_response => false, + :headers => { + 'User-Agent' => USER_AGENT, + 'Accept' => '*/*' + }, + :idempotent => false, + :instrumentor_name => 'excon', + :middlewares => [ + Excon::Middleware::ResponseParser, + Excon::Middleware::Expects, + Excon::Middleware::Idempotent, + Excon::Middleware::Instrumentor, + Excon::Middleware::Mock + ], + :mock => false, + :nonblock => true, + :omit_default_port => false, + :persistent => false, + :read_timeout => 60, + :retry_errors => DEFAULT_RETRY_ERRORS, + :retry_limit => DEFAULT_RETRY_LIMIT, + :ssl_verify_peer => true, + :ssl_uri_schemes => [HTTPS], + :stubs => :global, + :tcp_nodelay => false, + :thread_safe_sockets => true, + :uri_parser => URI, + :versions => VERSIONS, + :write_timeout => 60 + } + +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/error.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/error.rb new file mode 100644 index 0000000..58945a1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/error.rb @@ -0,0 +1,229 @@ +# frozen_string_literal: true +module Excon + # Excon exception classes + class Error < StandardError + @default_status_error = :HTTPStatus + + class StubNotFound < Error; end + class InvalidStub < Error; end + class Warning < Error; end + + # Socket related errors + class Socket < Error + attr_reader :socket_error + + def initialize(socket_error = Excon::Error.new) + if is_a?(Certificate) || is_a?(Excon::Errors::CertificateError) + super + else + super("#{socket_error.message} (#{socket_error.class})") + set_backtrace(socket_error.backtrace) + @socket_error = socket_error + end + end + end + + # Certificate related errors + class Certificate < Socket + def initialize(socket_error = Excon::Error.new) + msg = <<-EOL +Unable to verify certificate. This may be an issue with the remote host or with Excon. Excon has certificates bundled, but these can be customized: + + `Excon.defaults[:ssl_ca_path] = path_to_certs` + `ENV['SSL_CERT_DIR'] = path_to_certs` + `Excon.defaults[:ssl_ca_file] = path_to_file` + `ENV['SSL_CERT_FILE'] = path_to_file` + `Excon.defaults[:ssl_verify_callback] = callback` + (see OpenSSL::SSL::SSLContext#verify_callback) +or: + `Excon.defaults[:ssl_verify_peer] = false` (less secure). + EOL + full_message = "#{socket_error.message} (#{socket_error.class})" + + ' ' + msg + super(full_message) + set_backtrace(socket_error.backtrace) + @socket_error = socket_error + end + end + + class InvalidHeaderKey < Error; end + class InvalidHeaderValue < Error; end + class Timeout < Error; end + class ResponseParse < Error; end + + class ProxyConnectionError < Error + attr_reader :request, :response + + def initialize(msg, request = nil, response = nil) + super(msg) + @request = request + @response = response + end + end + + class ProxyParse < Error; end + class TooManyRedirects < Error; end + + # Base class for HTTP Error classes + class HTTPStatus < Error + attr_reader :request, :response + + def initialize(msg, request = nil, response = nil) + super(msg) + @request = request + @response = response + end + end + + # HTTP Error classes + class Informational < HTTPStatus; end + class Success < HTTPStatus; end + class Redirection < HTTPStatus; end + class Client < HTTPStatus; end + class Server < HTTPStatus; end + + class Continue < Informational; end # 100 + class SwitchingProtocols < Informational; end # 101 + class OK < Success; end # 200 + class Created < Success; end # 201 + class Accepted < Success; end # 202 + class NonAuthoritativeInformation < Success; end # 203 + class NoContent < Success; end # 204 + class ResetContent < Success; end # 205 + class PartialContent < Success; end # 206 + class MultipleChoices < Redirection; end # 300 + class MovedPermanently < Redirection; end # 301 + class Found < Redirection; end # 302 + class SeeOther < Redirection; end # 303 + class NotModified < Redirection; end # 304 + class UseProxy < Redirection; end # 305 + class TemporaryRedirect < Redirection; end # 307 + class BadRequest < Client; end # 400 + class Unauthorized < Client; end # 401 + class PaymentRequired < Client; end # 402 + class Forbidden < Client; end # 403 + class NotFound < Client; end # 404 + class MethodNotAllowed < Client; end # 405 + class NotAcceptable < Client; end # 406 + class ProxyAuthenticationRequired < Client; end # 407 + class RequestTimeout < Client; end # 408 + class Conflict < Client; end # 409 + class Gone < Client; end # 410 + class LengthRequired < Client; end # 411 + class PreconditionFailed < Client; end # 412 + class RequestEntityTooLarge < Client; end # 413 + class RequestURITooLong < Client; end # 414 + class UnsupportedMediaType < Client; end # 415 + class RequestedRangeNotSatisfiable < Client; end # 416 + class ExpectationFailed < Client; end # 417 + class UnprocessableEntity < Client; end # 422 + class TooManyRequests < Client; end # 429 + class InternalServerError < Server; end # 500 + class NotImplemented < Server; end # 501 + class BadGateway < Server; end # 502 + class ServiceUnavailable < Server; end # 503 + class GatewayTimeout < Server; end # 504 + + def self.status_errors + @status_errors ||= { + 100 => [Excon::Error::Continue, 'Continue'], + 101 => [Excon::Error::SwitchingProtocols, 'Switching Protocols'], + 200 => [Excon::Error::OK, 'OK'], + 201 => [Excon::Error::Created, 'Created'], + 202 => [Excon::Error::Accepted, 'Accepted'], + 203 => [Excon::Error::NonAuthoritativeInformation, 'Non-Authoritative Information'], + 204 => [Excon::Error::NoContent, 'No Content'], + 205 => [Excon::Error::ResetContent, 'Reset Content'], + 206 => [Excon::Error::PartialContent, 'Partial Content'], + 300 => [Excon::Error::MultipleChoices, 'Multiple Choices'], + 301 => [Excon::Error::MovedPermanently, 'Moved Permanently'], + 302 => [Excon::Error::Found, 'Found'], + 303 => [Excon::Error::SeeOther, 'See Other'], + 304 => [Excon::Error::NotModified, 'Not Modified'], + 305 => [Excon::Error::UseProxy, 'Use Proxy'], + 307 => [Excon::Error::TemporaryRedirect, 'Temporary Redirect'], + 400 => [Excon::Error::BadRequest, 'Bad Request'], + 401 => [Excon::Error::Unauthorized, 'Unauthorized'], + 402 => [Excon::Error::PaymentRequired, 'Payment Required'], + 403 => [Excon::Error::Forbidden, 'Forbidden'], + 404 => [Excon::Error::NotFound, 'Not Found'], + 405 => [Excon::Error::MethodNotAllowed, 'Method Not Allowed'], + 406 => [Excon::Error::NotAcceptable, 'Not Acceptable'], + 407 => [Excon::Error::ProxyAuthenticationRequired, 'Proxy Authentication Required'], + 408 => [Excon::Error::RequestTimeout, 'Request Timeout'], + 409 => [Excon::Error::Conflict, 'Conflict'], + 410 => [Excon::Error::Gone, 'Gone'], + 411 => [Excon::Error::LengthRequired, 'Length Required'], + 412 => [Excon::Error::PreconditionFailed, 'Precondition Failed'], + 413 => [Excon::Error::RequestEntityTooLarge, 'Request Entity Too Large'], + 414 => [Excon::Error::RequestURITooLong, 'Request-URI Too Long'], + 415 => [Excon::Error::UnsupportedMediaType, 'Unsupported Media Type'], + 416 => [Excon::Error::RequestedRangeNotSatisfiable, 'Request Range Not Satisfiable'], + 417 => [Excon::Error::ExpectationFailed, 'Expectation Failed'], + 422 => [Excon::Error::UnprocessableEntity, 'Unprocessable Entity'], + 429 => [Excon::Error::TooManyRequests, 'Too Many Requests'], + 500 => [Excon::Error::InternalServerError, 'InternalServerError'], + 501 => [Excon::Error::NotImplemented, 'Not Implemented'], + 502 => [Excon::Error::BadGateway, 'Bad Gateway'], + 503 => [Excon::Error::ServiceUnavailable, 'Service Unavailable'], + 504 => [Excon::Error::GatewayTimeout, 'Gateway Timeout'] + } + end + + # Messages for nicer exceptions, from rfc2616 + def self.status_error(request, response) + error_class, error_message = status_errors[response[:status]] + if error_class.nil? + default_class = Excon::Error.const_get(@default_status_error) + error_class, error_message = [default_class, 'Unknown'] + end + message = StringIO.new + str = "Expected(#{request[:expects].inspect}) <=>" + + " Actual(#{response[:status]} #{error_message})" + message.puts(str) + if request[:debug_request] + message.puts('excon.error.request') + Excon::PrettyPrinter.pp(message, request) + end + + if request[:debug_response] + message.puts('excon.error.response') + Excon::PrettyPrinter.pp(message, response.data) + end + message.rewind + error_class.new(message.read, request, response) + end + end + + # Legacy + module Errors + Excon::Errors::Error = Excon::Error + + legacy_re = / + \A + Client + |Server + |Socket + |Certificate + |HTTPStatus + |InternalServer + \Z + /x + + klasses = Excon::Error.constants.select do |c| + Excon::Error.const_get(c).is_a? Class + end + + klasses.each do |klass| + class_name = klass.to_s + unless class_name =~ /Error\Z/ + class_name = klass.to_s + 'Error' if class_name =~ legacy_re + end + Excon::Errors.const_set(class_name, Excon::Error.const_get(klass)) + end + + def self.status_error(request, response) + Excon::Error.status_error(request, response) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/extensions/uri.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/extensions/uri.rb new file mode 100644 index 0000000..eae9580 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/extensions/uri.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true +# TODO: Remove this monkey patch once ruby 1.9.3+ is the minimum supported version. +# +# This patch backports URI#hostname to ruby 1.9.2 and older. +# URI#hostname is used for IPv6 support in Excon. +# +# URI#hostname was added in stdlib in v1_9_3_0 in this commit: +# https://github.com/ruby/ruby/commit/5fd45a4b79dd26f9e7b6dc41142912df911e4d7d +# +# Addressable::URI is also an URI parser accepted in some parts of Excon. +# Addressable::URI#hostname was added in addressable-2.3.5+ in this commit: +# https://github.com/sporkmonger/addressable/commit/1b94abbec1f914d5f707c92a10efbb9e69aab65e +# +# Users who want to use Addressable::URI to parse URIs must upgrade to 2.3.5 or newer. +require 'uri' +unless URI("http://foo/bar").respond_to?(:hostname) + module URI + class Generic + # extract the host part of the URI and unwrap brackets for IPv6 addresses. + # + # This method is same as URI::Generic#host except + # brackets for IPv6 (and future IP) addresses are removed. + # + # u = URI("http://[::1]/bar") + # p u.hostname #=> "::1" + # p u.host #=> "[::1]" + # + def hostname + v = self.host + /\A\[(.*)\]\z/ =~ v ? $1 : v + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/headers.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/headers.rb new file mode 100644 index 0000000..b29d5a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/headers.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true +module Excon + class Headers < Hash + + SENTINEL = {} + + alias_method :raw_writer, :[]= + alias_method :raw_reader, :[] + if SENTINEL.respond_to?(:assoc) + alias_method :raw_assoc, :assoc + end + alias_method :raw_delete, :delete + alias_method :raw_fetch, :fetch + alias_method :raw_has_key?, :has_key? + alias_method :raw_include?, :include? + alias_method :raw_key?, :key? + alias_method :raw_member?, :member? + alias_method :raw_merge, :merge + alias_method :raw_merge!, :merge! + alias_method :raw_rehash, :rehash + alias_method :raw_store, :store + alias_method :raw_values_at, :values_at + + def initialize + super + @downcased = {} + end + + def [](key) + @downcased[key.to_s.downcase] + end + + def []=(key, value) + raw_writer(key, value) + @downcased[key.to_s.downcase] = value + end + alias_method :store, :[]= + + if SENTINEL.respond_to? :assoc + def assoc(obj) + @downcased.assoc(obj.downcase) + end + end + + def delete(key, &proc) + raw_delete(key, &proc) + @downcased.delete(key.to_s.downcase, &proc) + end + + def fetch(key, default = nil, &proc) + if proc + @downcased.fetch(key.to_s.downcase, &proc) + else + @downcased.fetch(key.to_s.downcase, default) + end + end + + def has_key?(key) + raw_key?(key) || @downcased.has_key?(key.to_s.downcase) + end + alias_method :key?, :has_key? + alias_method :member?, :has_key? + + def merge(other_hash) + self.dup.merge!(other_hash) + end + + def merge!(other_hash) + other_hash.each do |key, value| + self[key] = value + end + raw_merge!(other_hash) + end + + def rehash + @downcased.rehash + raw_rehash + end + + def values_at(*keys) + @downcased.values_at(*keys.map {|key| key.to_s.downcase}) + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/instrumentors/logging_instrumentor.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/instrumentors/logging_instrumentor.rb new file mode 100644 index 0000000..9a68080 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/instrumentors/logging_instrumentor.rb @@ -0,0 +1,48 @@ +require 'logger' + +module Excon + class LoggingInstrumentor + + def self.instrument(name, params = {}) + params = params.dup + + logger = params[:logger] || Logger.new($stderr) + + # reduce duplication/noise of output + params.delete(:connection) + params.delete(:stack) + + if params.has_key?(:headers) && params[:headers].has_key?('Authorization') + params[:headers] = params[:headers].dup + params[:headers]['Authorization'] = "REDACTED" + end + + if params.has_key?(:password) + params[:password] = "REDACTED" + end + + if name.include?('request') + info = "request: " + params[:scheme] + "://" + File.join(params[:host], params[:path]) + + if params[:query] + info << "?" + + if params[:query].is_a?(Hash) + info << params[:query].to_a.map { |key,value| "#{key}=#{value}" }.join('&') + else + info << params[:query] + end + end + else + response_type = name.split('.').last + if params[:body] + info = "#{response_type}: " + params[:body] + end + end + + logger.info(info) if info + + yield if block_given? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/instrumentors/standard_instrumentor.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/instrumentors/standard_instrumentor.rb new file mode 100644 index 0000000..e19dce2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/instrumentors/standard_instrumentor.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true +module Excon + class StandardInstrumentor + def self.instrument(name, params = {}) + params = params.dup + + # reduce duplication/noise of output + params.delete(:connection) + params.delete(:stack) + + params = Utils.redact(params) + + $stderr.puts(name) + Excon::PrettyPrinter.pp($stderr, params) + + if block_given? + yield + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/base.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/base.rb new file mode 100644 index 0000000..be3fe35 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/base.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true +module Excon + module Middleware + class Base + # Returns the list of parameters that this middleware uses that are valid + # as arguments to `Connection#request` or `Connection#new`. + def self.valid_parameter_keys + [] + end + + def initialize(stack) + @stack = stack + end + + def error_call(datum) + # do stuff + @stack.error_call(datum) + end + + def request_call(datum) + # do stuff + @stack.request_call(datum) + end + + def response_call(datum) + @stack.response_call(datum) + # do stuff + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/capture_cookies.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/capture_cookies.rb new file mode 100644 index 0000000..e642592 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/capture_cookies.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true +module Excon + module Middleware + class CaptureCookies < Excon::Middleware::Base + + def extract_cookies_from_set_cookie(set_cookie) + set_cookie.split(',').map { |full| full.split(';').first.strip }.join('; ') + end + + def get_header(datum, header) + _, header_value = datum[:response][:headers].detect do |key, _| + key.casecmp(header) == 0 + end + header_value + end + + def response_call(datum) + cookie = get_header(datum, 'Set-Cookie') + if cookie + cookie = extract_cookies_from_set_cookie(cookie) + unless datum[:headers].key?("Cookie") + datum[:headers]["Cookie"] = cookie + else + original_cookies = datum[:headers]["Cookie"] + datum[:headers]["Cookie"] = original_cookies.empty? ? cookie : [original_cookies,cookie].join('; ') + end + end + super(datum) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/decompress.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/decompress.rb new file mode 100644 index 0000000..65e95c2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/decompress.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true +module Excon + module Middleware + class Decompress < Excon::Middleware::Base + + INFLATE_ZLIB_OR_GZIP = 47 # Zlib::MAX_WBITS + 32 + INFLATE_RAW = -15 # Zlib::MAX_WBITS * -1 + + def request_call(datum) + unless datum.has_key?(:response_block) + key = datum[:headers].keys.detect {|k| k.to_s.casecmp('Accept-Encoding') == 0 } || 'Accept-Encoding' + if datum[:headers][key].to_s.empty? + datum[:headers][key] = 'deflate, gzip' + end + end + @stack.request_call(datum) + end + + def response_call(datum) + body = datum[:response][:body] + unless datum.has_key?(:response_block) || body.nil? || body.empty? + if (key = datum[:response][:headers].keys.detect {|k| k.casecmp('Content-Encoding') == 0 }) + encodings = Utils.split_header_value(datum[:response][:headers][key]) + if (encoding = encodings.last) + if encoding.casecmp('deflate') == 0 + datum[:response][:body] = begin + Zlib::Inflate.new(INFLATE_ZLIB_OR_GZIP).inflate(body) + rescue Zlib::DataError # fallback to raw on error + Zlib::Inflate.new(INFLATE_RAW).inflate(body) + end + encodings.pop + elsif encoding.casecmp('gzip') == 0 || encoding.casecmp('x-gzip') == 0 + datum[:response][:body] = Zlib::GzipReader.new(StringIO.new(body)).read + encodings.pop + end + datum[:response][:headers][key] = encodings.join(', ') + end + end + end + @stack.response_call(datum) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/escape_path.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/escape_path.rb new file mode 100644 index 0000000..33bed67 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/escape_path.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true +module Excon + module Middleware + class EscapePath < Excon::Middleware::Base + def request_call(datum) + # make sure path is encoded, prevent double encoding + datum[:path] = Excon::Utils.escape_uri(Excon::Utils.unescape_uri(datum[:path])) + @stack.request_call(datum) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/expects.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/expects.rb new file mode 100644 index 0000000..cbb6447 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/expects.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true +module Excon + module Middleware + class Expects < Excon::Middleware::Base + def self.valid_parameter_keys + [ + :expects + ] + end + + def response_call(datum) + if datum.has_key?(:expects) && ![*datum[:expects]].include?(datum[:response][:status]) + raise( + Excon::Errors.status_error( + datum.reject {|key,_| key == :response}, + Excon::Response.new(datum[:response]) + ) + ) + else + @stack.response_call(datum) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/idempotent.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/idempotent.rb new file mode 100644 index 0000000..103506f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/idempotent.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true +require 'set' + +module Excon + module Middleware + class Idempotent < Excon::Middleware::Base + def self.valid_parameter_keys + [ + :idempotent, + :retries_remaining, + :retry_errors, + :retry_interval, + :retry_limit + ] + end + + def request_call(datum) + datum[:retries_remaining] ||= datum[:retry_limit] + @stack.request_call(datum) + end + + def error_call(datum) + if datum[:idempotent] + if datum.has_key?(:request_block) + if datum[:request_block].respond_to?(:rewind) + datum[:request_block].rewind + else + Excon.display_warning('Excon requests with a :request_block must implement #rewind in order to be :idempotent.') + datum[:idempotent] = false + end + end + if datum.has_key?(:response_block) && datum[:response_block].respond_to?(:rewind) + datum[:response_block].rewind + end + if datum.has_key?(:pipeline) + Excon.display_warning("Excon requests can not be :idempotent when pipelining.") + datum[:idempotent] = false + end + end + + if datum[:idempotent] && datum[:retry_errors].any? {|ex| datum[:error].kind_of?(ex) } && datum[:retries_remaining] > 1 + + sleep(datum[:retry_interval]) if datum[:retry_interval] + + # reduces remaining retries, reset connection, and restart request_call + datum[:retries_remaining] -= 1 + connection = datum.delete(:connection) + valid_keys = Set.new(connection.valid_request_keys(datum[:middlewares])) + datum.select! {|key, _| valid_keys.include?(key) } + connection.request(datum) + else + @stack.error_call(datum) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/instrumentor.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/instrumentor.rb new file mode 100644 index 0000000..7ff9fec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/instrumentor.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true +module Excon + module Middleware + class Instrumentor < Excon::Middleware::Base + def self.valid_parameter_keys + [ + :logger, + :instrumentor, + :instrumentor_name + ] + end + + def error_call(datum) + if datum.has_key?(:instrumentor) + datum[:instrumentor].instrument("#{datum[:instrumentor_name]}.error", :error => datum[:error]) do + @stack.error_call(datum) + end + else + @stack.error_call(datum) + end + end + + def request_call(datum) + if datum.has_key?(:instrumentor) + if datum[:retries_remaining] < datum[:retry_limit] + event_name = "#{datum[:instrumentor_name]}.retry" + else + event_name = "#{datum[:instrumentor_name]}.request" + end + datum[:instrumentor].instrument(event_name, datum) do + @stack.request_call(datum) + end + else + @stack.request_call(datum) + end + end + + def response_call(datum) + if datum.has_key?(:instrumentor) + datum[:instrumentor].instrument("#{datum[:instrumentor_name]}.response", datum[:response]) do + @stack.response_call(datum) + end + else + @stack.response_call(datum) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/mock.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/mock.rb new file mode 100644 index 0000000..2f018ff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/mock.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true +module Excon + module Middleware + class Mock < Excon::Middleware::Base + def self.valid_parameter_keys + [ + :allow_unstubbed_requests, + :captures, + :mock + ] + end + + def request_call(datum) + if datum[:mock] + # convert File/Tempfile body to string before matching: + if datum[:body].respond_to?(:read) + if datum[:body].respond_to?(:binmode) + datum[:body].binmode + end + if datum[:body].respond_to?(:rewind) + datum[:body].rewind + end + datum[:body] = datum[:body].read + elsif !datum[:body].nil? && !datum[:body].is_a?(String) + raise Excon::Errors::InvalidStub.new("Request body should be a string or an IO object. #{datum[:body].class} provided") + end + + if (stub = Excon.stub_for(datum)) + datum[:remote_ip] ||= '127.0.0.1' + datum[:response] = { + :body => '', + :headers => {}, + :status => 200, + :remote_ip => datum[:remote_ip] + } + + stub_datum = case stub.last + when Proc + stub.last.call(datum) + else + stub.last + end + + datum[:response].merge!(stub_datum.reject {|key,_| key == :headers}) + if stub_datum.has_key?(:headers) + datum[:response][:headers].merge!(stub_datum[:headers]) + end + elsif datum[:allow_unstubbed_requests] != true + # if we reach here no stubs matched + message = StringIO.new + message.puts('no stubs matched') + Excon::PrettyPrinter.pp(message, datum) + raise(Excon::Errors::StubNotFound.new(message.string)) + end + end + + @stack.request_call(datum) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/redirect_follower.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/redirect_follower.rb new file mode 100644 index 0000000..dd494b8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/redirect_follower.rb @@ -0,0 +1,87 @@ +# frozen_string_literal: true +module Excon + module Middleware + class RedirectFollower < Excon::Middleware::Base + def self.valid_parameter_keys + [ + :redirects_remaining, + :redirect_limit + ] + end + + def request_call(datum) + datum[:redirects_remaining] ||= datum[:redirect_limit] || + Excon::DEFAULT_REDIRECT_LIMIT + @stack.request_call(datum) + end + + def get_header(datum, header) + _, header_value = datum[:response][:headers].detect do |key, _| + key.casecmp(header) == 0 + end + header_value + end + + def response_call(datum) + if datum.has_key?(:response) + case datum[:response][:status] + when 301, 302, 303, 307, 308 + if datum[:redirects_remaining] <= 0 + raise Excon::Errors::TooManyRedirects + end + + datum[:redirects_remaining] -= 1 + + uri_parser = datum[:uri_parser] || Excon.defaults[:uri_parser] + + location = get_header(datum, 'Location') + base_uri = Excon::Utils.request_uri(datum) + uri = uri_parser.join(base_uri, location) + + # delete old/redirect response and remote_ip + response = datum.delete(:response) + datum.delete(:remote_ip) + + params = datum.dup + params.delete(:connection) + params.delete(:password) + params.delete(:stack) + params.delete(:user) + + if [301, 302, 303].include?(response[:status]) + params[:method] = :get + params.delete(:body) + params[:headers].delete('Content-Length') + end + params[:headers] = datum[:headers].dup + params[:headers].delete('Authorization') + params[:headers].delete('Proxy-Connection') + params[:headers].delete('Proxy-Authorization') + params[:headers].delete('Host') + params.merge!( + :scheme => uri.scheme || datum[:scheme], + :host => uri.host || datum[:host], + :hostname => uri.hostname || datum[:hostname], + :port => uri.port || datum[:port], + :path => uri.path, + :query => uri.query + ) + + params.merge!(:user => Utils.unescape_uri(uri.user)) if uri.user + params.merge!(:password => Utils.unescape_uri(uri.password)) if uri.password + + response = Excon::Connection.new(params).request + datum.merge!({ + :remote_ip => response.remote_ip, + :response => response.data + }) + else + @stack.response_call(datum) + end + else + @stack.response_call(datum) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/response_parser.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/response_parser.rb new file mode 100644 index 0000000..93a07cf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/middlewares/response_parser.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true +module Excon + module Middleware + class ResponseParser < Excon::Middleware::Base + def response_call(datum) + unless datum.has_key?(:response) + datum = Excon::Response.parse(datum[:connection].send(:socket), datum) + end + if datum.has_key?(:logger) + datum[:response][:logger] = datum[:logger] + end + @stack.response_call(datum) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/pretty_printer.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/pretty_printer.rb new file mode 100644 index 0000000..5aeed26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/pretty_printer.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true +module Excon + class PrettyPrinter + def self.pp(io, datum, indent=0) + datum = datum.dup + + # reduce duplication/noise of output + unless datum.is_a?(Excon::Headers) + datum.delete(:connection) + datum.delete(:stack) + + datum = Utils.redact(datum) + end + + indent += 2 + max_key_length = datum.keys.map {|key| key.inspect.length}.max + datum.keys.sort_by {|key| key.to_s}.each do |key| + value = datum[key] + io.write("#{' ' * indent}#{key.inspect.ljust(max_key_length)} => ") + case value + when Array + io.puts("[") + value.each do |v| + io.puts("#{' ' * indent} #{v.inspect}") + end + io.write("#{' ' * indent}]") + when Hash + io.puts("{") + self.pp(io, value, indent) + io.write("#{' ' * indent}}") + else + io.write("#{value.inspect}") + end + io.puts + end + indent -= 2 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/response.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/response.rb new file mode 100644 index 0000000..de6ea33 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/response.rb @@ -0,0 +1,247 @@ +# frozen_string_literal: true +module Excon + class Response + + attr_accessor :data + + # backwards compatability reader/writers + def body=(new_body) + @data[:body] = new_body + end + def body + @data[:body] + end + def headers=(new_headers) + @data[:headers] = new_headers + end + def headers + @data[:headers] + end + def host + @data[:host] + end + def scheme + @data[:scheme] + end + def local_address + @data[:local_address] + end + def local_port + @data[:local_port] + end + def http_method # can't be named "method" + @data[:method] + end + def path + @data[:path] + end + def query + @data[:query] + end + def port + @data[:port] + end + def reason_phrase=(new_reason_phrase) + @data[:reason_phrase] = new_reason_phrase + end + def reason_phrase + @data[:reason_phrase] + end + def remote_ip=(new_remote_ip) + @data[:remote_ip] = new_remote_ip + end + def remote_ip + @data[:remote_ip] + end + def status=(new_status) + @data[:status] = new_status + end + def status + @data[:status] + end + def status_line + @data[:status_line] + end + def status_line=(new_status_line) + @data[:status_line] = new_status_line + end + + def self.parse(socket, datum) + # this will discard any trailing lines from the previous response if any. + line = nil + loop do + line = socket.readline + break if line[9,3].to_i != 0 + end + + status = line[9, 3].to_i + reason_phrase = line[13..-3] # -3 strips the trailing "\r\n" + + datum[:response] = { + :body => String.new, + :cookies => [], + :host => datum[:host], + :scheme => datum[:scheme], + :method => datum[:method], + :headers => Excon::Headers.new, + :path => datum[:path], + :query => datum[:query], + :port => datum[:port], + :omit_default_port => datum[:omit_default_port], + :status => status, + :status_line => line, + :reason_phrase => reason_phrase + } + + unix_proxy = datum[:proxy] ? datum[:proxy][:scheme] == UNIX : false + unless datum[:scheme] == UNIX || unix_proxy + datum[:response].merge!( + :remote_ip => socket.remote_ip, + :local_port => socket.local_port, + :local_address => socket.local_address + ) + end + + parse_headers(socket, datum) + + unless (['HEAD', 'CONNECT'].include?(datum[:method].to_s.upcase)) || NO_ENTITY.include?(datum[:response][:status]) + + if (key = datum[:response][:headers].keys.detect {|k| k.casecmp('Transfer-Encoding') == 0 }) + encodings = Utils.split_header_value(datum[:response][:headers][key]) + if (encoding = encodings.last) && encoding.casecmp('chunked') == 0 + transfer_encoding_chunked = true + if encodings.length == 1 + datum[:response][:headers].delete(key) + else + datum[:response][:headers][key] = encodings[0...-1].join(', ') + end + end + end + + # use :response_block unless :expects would fail + if (response_block = datum[:response_block]) + if datum[:middlewares].include?(Excon::Middleware::Expects) && datum[:expects] && + !Array(datum[:expects]).include?(datum[:response][:status]) + response_block = nil + end + end + + if transfer_encoding_chunked + if response_block + while (chunk_size = socket.readline.chomp!.to_i(16)) > 0 + while chunk_size > 0 + chunk = socket.read(chunk_size) || raise(EOFError) + chunk_size -= chunk.bytesize + response_block.call(chunk, nil, nil) + end + new_line_size = 2 # 2 == "\r\n".length + while new_line_size > 0 + chunk = socket.read(new_line_size) || raise(EOFError) + new_line_size -= chunk.length + end + end + else + while (chunk_size = socket.readline.chomp!.to_i(16)) > 0 + while chunk_size > 0 + chunk = socket.read(chunk_size) || raise(EOFError) + chunk_size -= chunk.bytesize + datum[:response][:body] << chunk + end + new_line_size = 2 # 2 == "\r\n".length + while new_line_size > 0 + chunk = socket.read(new_line_size) || raise(EOFError) + new_line_size -= chunk.length + end + end + end + parse_headers(socket, datum) # merge trailers into headers + else + if (key = datum[:response][:headers].keys.detect {|k| k.casecmp('Content-Length') == 0 }) + content_length = datum[:response][:headers][key].to_i + end + + if (remaining = content_length) + if response_block + while remaining > 0 + chunk = socket.read([datum[:chunk_size], remaining].min) || raise(EOFError) + response_block.call(chunk, [remaining - chunk.bytesize, 0].max, content_length) + remaining -= chunk.bytesize + end + else + while remaining > 0 + chunk = socket.read([datum[:chunk_size], remaining].min) || raise(EOFError) + datum[:response][:body] << chunk + remaining -= chunk.bytesize + end + end + else + if response_block + while (chunk = socket.read(datum[:chunk_size])) + response_block.call(chunk, nil, nil) + end + else + while (chunk = socket.read(datum[:chunk_size])) + datum[:response][:body] << chunk + end + end + end + end + end + datum + end + + def self.parse_headers(socket, datum) + last_key = nil + until (data = socket.readline.chomp).empty? + if !data.lstrip!.nil? + raise Excon::Error::ResponseParse, 'malformed header' unless last_key + # append to last_key's last value + datum[:response][:headers][last_key] << ' ' << data.rstrip + else + key, value = data.split(':', 2) + raise Excon::Error::ResponseParse, 'malformed header' unless value + # add key/value or append value to existing values + datum[:response][:headers][key] = ([datum[:response][:headers][key]] << value.strip).compact.join(', ') + if key.casecmp('Set-Cookie') == 0 + datum[:response][:cookies] << value.strip + end + last_key = key + end + end + end + + def initialize(params={}) + @data = { + :body => '' + }.merge(params) + @data[:headers] = Excon::Headers.new.merge!(params[:headers] || {}) + + @body = @data[:body] + @headers = @data[:headers] + @status = @data[:status] + @remote_ip = @data[:remote_ip] + @local_port = @data[:local_port] + @local_address = @data[:local_address] + end + + def [](key) + @data[key] + end + + def params + Excon.display_warning('Excon::Response#params is deprecated use Excon::Response#data instead.') + data + end + + def pp + Excon::PrettyPrinter.pp($stdout, @data) + end + + # Retrieve a specific header value. Header names are treated case-insensitively. + # @param [String] name Header name + def get_header(name) + headers[name] + end + + end # class Response +end # module Excon diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/socket.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/socket.rb new file mode 100644 index 0000000..7097fb5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/socket.rb @@ -0,0 +1,318 @@ +# frozen_string_literal: true +require 'resolv' + +module Excon + class Socket + include Utils + + extend Forwardable + + attr_accessor :data + + # read/write drawn from https://github.com/ruby-amqp/bunny/commit/75d9dd79551b31a5dd3d1254c537bad471f108cf + CONNECT_RETRY_EXCEPTION_CLASSES = if defined?(IO::EINPROGRESSWaitWritable) # Ruby >= 2.1 + [Errno::EINPROGRESS, IO::EINPROGRESSWaitWritable] + else # Ruby <= 2.0 + [Errno::EINPROGRESS] + end + READ_RETRY_EXCEPTION_CLASSES = if defined?(IO::EAGAINWaitReadable) # Ruby >= 2.1 + [Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable, IO::EAGAINWaitReadable, IO::EWOULDBLOCKWaitReadable] + else # Ruby <= 2.0 + [Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable] + end + WRITE_RETRY_EXCEPTION_CLASSES = if defined?(IO::EAGAINWaitWritable) # Ruby >= 2.1 + [Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable, IO::EAGAINWaitWritable, IO::EWOULDBLOCKWaitWritable] + else # Ruby <= 2.0 + [Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable] + end + + def params + Excon.display_warning('Excon::Socket#params is deprecated use Excon::Socket#data instead.') + @data + end + + def params=(new_params) + Excon.display_warning('Excon::Socket#params= is deprecated use Excon::Socket#data= instead.') + @data = new_params + end + + attr_reader :remote_ip + + def_delegators(:@socket, :close) + + + def initialize(data = {}) + @data = data + @nonblock = data[:nonblock] + @port ||= @data[:port] || 80 + @read_buffer = String.new + @eof = false + @backend_eof = false + connect + end + + def read(max_length = nil) + if @eof + return max_length ? nil : '' + elsif @nonblock + read_nonblock(max_length) + else + read_block(max_length) + end + end + + def readline + if @nonblock && RUBY_VERSION.to_f > 1.8_7 + result = String.new + block = @read_buffer + @read_buffer = String.new + + loop do + idx = block.index("\n") + if idx.nil? + result << block + else + result << block.slice!(0, idx+1) + add_to_read_buffer(block) + break + end + block = read_nonblock(@data[:chunk_size]) || raise(EOFError) + end + result + else # nonblock/legacy + begin + Timeout.timeout(@data[:read_timeout]) do + @socket.readline + end + rescue Timeout::Error + raise Excon::Errors::Timeout.new('read timeout reached') + end + end + end + + def write(data) + if @nonblock + write_nonblock(data) + else + write_block(data) + end + end + + def local_address + unpacked_sockaddr[1] + end + + def local_port + unpacked_sockaddr[0] + end + + private + + def connect + @socket = nil + exception = nil + hostname = @data[:hostname] + port = @port + family = @data[:family] + + if @data[:proxy] + hostname = @data[:proxy][:hostname] + port = @data[:proxy][:port] + family = @data[:proxy][:family] + end + + Resolv.each_address(hostname) do |ip| + # already succeeded on previous addrinfo + if @socket + break + end + + @remote_ip = ip + @data[:remote_ip] = ip + + # nonblocking connect + begin + sockaddr = ::Socket.sockaddr_in(port, ip) + addrinfo = Addrinfo.getaddrinfo(ip, port, family, :STREAM).first + socket = ::Socket.new(addrinfo.pfamily, addrinfo.socktype, addrinfo.protocol) + + if @data[:reuseaddr] + socket.setsockopt(::Socket::Constants::SOL_SOCKET, ::Socket::Constants::SO_REUSEADDR, true) + if defined?(::Socket::Constants::SO_REUSEPORT) + socket.setsockopt(::Socket::Constants::SOL_SOCKET, ::Socket::Constants::SO_REUSEPORT, true) + end + end + + if @nonblock + socket.connect_nonblock(sockaddr) + else + socket.connect(sockaddr) + end + @socket = socket + rescue *CONNECT_RETRY_EXCEPTION_CLASSES + select_with_timeout(socket, :connect_write) + begin + socket.connect_nonblock(sockaddr) + @socket = socket + rescue Errno::EISCONN + @socket = socket + rescue SystemCallError => exception + socket.close rescue nil + end + rescue SystemCallError => exception + socket.close rescue nil if socket + end + end + + exception ||= Resolv::ResolvError.new("no address for #{hostname}") + + # this will be our last encountered exception + fail exception unless @socket + + if @data[:tcp_nodelay] + @socket.setsockopt(::Socket::IPPROTO_TCP, + ::Socket::TCP_NODELAY, + true) + end + + if @data[:keepalive] + if [:SOL_SOCKET, :SO_KEEPALIVE, :SOL_TCP, :TCP_KEEPIDLE, :TCP_KEEPINTVL, :TCP_KEEPCNT].all?{|c| ::Socket.const_defined? c} + @socket.setsockopt(::Socket::SOL_SOCKET, ::Socket::SO_KEEPALIVE, true) + @socket.setsockopt(::Socket::SOL_TCP, ::Socket::TCP_KEEPIDLE, @data[:keepalive][:time]) + @socket.setsockopt(::Socket::SOL_TCP, ::Socket::TCP_KEEPINTVL, @data[:keepalive][:intvl]) + @socket.setsockopt(::Socket::SOL_TCP, ::Socket::TCP_KEEPCNT, @data[:keepalive][:probes]) + else + Excon.display_warning('Excon::Socket keepalive was set, but is not supported by Ruby version.') + end + end + end + + def add_to_read_buffer(str) + @read_buffer << str + @eof = false + end + + def read_nonblock(max_length) + begin + if max_length + until @backend_eof || @read_buffer.length >= max_length + @read_buffer << @socket.read_nonblock(max_length - @read_buffer.length) + end + else + while !@backend_eof + @read_buffer << @socket.read_nonblock(@data[:chunk_size]) + end + end + rescue OpenSSL::SSL::SSLError => error + if error.message == 'read would block' + if @read_buffer.empty? + select_with_timeout(@socket, :read) && retry + end + else + raise(error) + end + rescue *READ_RETRY_EXCEPTION_CLASSES + if @read_buffer.empty? + # if we didn't read anything, try again... + select_with_timeout(@socket, :read) && retry + end + rescue EOFError + @backend_eof = true + end + + ret = if max_length + if @read_buffer.empty? + nil # EOF met at beginning + else + @read_buffer.slice!(0, max_length) + end + else + # read until EOFError, so return everything + @read_buffer.slice!(0, @read_buffer.length) + end + @eof = @backend_eof && @read_buffer.empty? + ret + end + + def read_block(max_length) + @socket.read(max_length) + rescue OpenSSL::SSL::SSLError => error + if error.message == 'read would block' + select_with_timeout(@socket, :read) && retry + else + raise(error) + end + rescue *READ_RETRY_EXCEPTION_CLASSES + select_with_timeout(@socket, :read) && retry + rescue EOFError + @eof = true + end + + def write_nonblock(data) + data = binary_encode(data) + loop do + written = nil + begin + # I wish that this API accepted a start position, then we wouldn't + # have to slice data when there is a short write. + written = @socket.write_nonblock(data) + rescue Errno::EFAULT => error + if OpenSSL.const_defined?(:OPENSSL_LIBRARY_VERSION) && OpenSSL::OPENSSL_LIBRARY_VERSION.split(' ')[1] == '1.0.2' + msg = "The version of OpenSSL this ruby is built against (1.0.2) has a vulnerability + which causes a fault. For more, see https://github.com/excon/excon/issues/467" + raise SecurityError.new(msg) + else + raise error + end + rescue OpenSSL::SSL::SSLError, *WRITE_RETRY_EXCEPTION_CLASSES => error + if error.is_a?(OpenSSL::SSL::SSLError) && error.message != 'write would block' + raise error + else + select_with_timeout(@socket, :write) && retry + end + end + + # Fast, common case. + break if written == data.size + + # This takes advantage of the fact that most ruby implementations + # have Copy-On-Write strings. Thusly why requesting a subrange + # of data, we actually don't copy data because the new string + # simply references a subrange of the original. + data = data[written, data.size] + end + end + + def write_block(data) + @socket.write(data) + rescue OpenSSL::SSL::SSLError, *WRITE_RETRY_EXCEPTION_CLASSES => error + if error.is_a?(OpenSSL::SSL::SSLError) && error.message != 'write would block' + raise error + else + select_with_timeout(@socket, :write) && retry + end + end + + def select_with_timeout(socket, type) + select = case type + when :connect_read + IO.select([socket], nil, nil, @data[:connect_timeout]) + when :connect_write + IO.select(nil, [socket], nil, @data[:connect_timeout]) + when :read + IO.select([socket], nil, nil, @data[:read_timeout]) + when :write + IO.select(nil, [socket], nil, @data[:write_timeout]) + end + select || raise(Excon::Errors::Timeout.new("#{type} timeout reached")) + end + + def unpacked_sockaddr + @unpacked_sockaddr ||= ::Socket.unpack_sockaddr_in(@socket.to_io.getsockname) + rescue ArgumentError => e + unless e.message == 'not an AF_INET/AF_INET6 sockaddr' + raise + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/ssl_socket.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/ssl_socket.rb new file mode 100644 index 0000000..6c8a5a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/ssl_socket.rb @@ -0,0 +1,212 @@ +# frozen_string_literal: true +module Excon + class SSLSocket < Socket + HAVE_NONBLOCK = [:connect_nonblock, :read_nonblock, :write_nonblock].all? do |m| + OpenSSL::SSL::SSLSocket.public_method_defined?(m) + end + + def initialize(data = {}) + @port = data[:port] || 443 + super + + # create ssl context + ssl_context = OpenSSL::SSL::SSLContext.new + + # set the security level before setting other parameters affected by it + if @data[:ssl_security_level] + ssl_context.security_level = @data[:ssl_security_level] + end + + # disable less secure options, when supported + ssl_context_options = OpenSSL::SSL::SSLContext::DEFAULT_PARAMS[:options] + if defined?(OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS) + ssl_context_options &= ~OpenSSL::SSL::OP_DONT_INSERT_EMPTY_FRAGMENTS + end + + if defined?(OpenSSL::SSL::OP_NO_COMPRESSION) + ssl_context_options |= OpenSSL::SSL::OP_NO_COMPRESSION + end + ssl_context.options = ssl_context_options + + ssl_context.ciphers = @data[:ciphers] + if @data[:ssl_version] + ssl_context.ssl_version = @data[:ssl_version] + end + if @data[:ssl_min_version] + ssl_context.min_version = @data[:ssl_min_version] + end + if @data[:ssl_max_version] + ssl_context.max_version = @data[:ssl_max_version] + end + + + if @data[:ssl_verify_peer] + # turn verification on + ssl_context.verify_mode = OpenSSL::SSL::VERIFY_PEER + + if (ca_file = @data[:ssl_ca_file] || ENV['SSL_CERT_FILE']) + ssl_context.ca_file = ca_file + end + if (ca_path = @data[:ssl_ca_path] || ENV['SSL_CERT_DIR']) + ssl_context.ca_path = ca_path + end + if (cert_store = @data[:ssl_cert_store]) + ssl_context.cert_store = cert_store + end + + if cert_store.nil? + ssl_context.cert_store = OpenSSL::X509::Store.new + ssl_context.cert_store.set_default_paths + end + + # no defaults, fallback to bundled + unless ca_file || ca_path || cert_store + # workaround issue #257 (JRUBY-6970) + ca_file = DEFAULT_CA_FILE + ca_file = ca_file.gsub(/^jar:/, '') if ca_file =~ /^jar:file:\// + + begin + ssl_context.cert_store.add_file(ca_file) + rescue + Excon.display_warning("Excon unable to add file to cert store, ignoring: #{ca_file}\n[#{$!.class}] #{$!.message}") + end + end + + if (verify_callback = @data[:ssl_verify_callback]) + ssl_context.verify_callback = verify_callback + end + else + # turn verification off + ssl_context.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + + # Verify certificate hostname if supported (ruby >= 2.4.0) + ssl_context.verify_hostname = @data[:ssl_verify_hostname] if ssl_context.respond_to?(:verify_hostname=) + + if client_cert_data && client_key_data + ssl_context.cert = OpenSSL::X509::Certificate.new client_cert_data + if OpenSSL::PKey.respond_to? :read + ssl_context.key = OpenSSL::PKey.read(client_key_data, client_key_pass) + else + ssl_context.key = OpenSSL::PKey::RSA.new(client_key_data, client_key_pass) + end + if client_chain_data && OpenSSL::X509::Certificate.respond_to?(:load) + ssl_context.extra_chain_cert = OpenSSL::X509::Certificate.load(client_chain_data) + elsif client_chain_data + certs = client_chain_data.scan(/-----BEGIN CERTIFICATE-----(?:.|\n)+?-----END CERTIFICATE-----/) + ssl_context.extra_chain_cert = certs.map do |cert| + OpenSSL::X509::Certificate.new(cert) + end + end + elsif @data.key?(:certificate) && @data.key?(:private_key) + ssl_context.cert = OpenSSL::X509::Certificate.new(@data[:certificate]) + if OpenSSL::PKey.respond_to? :read + ssl_context.key = OpenSSL::PKey.read(@data[:private_key], client_key_pass) + else + ssl_context.key = OpenSSL::PKey::RSA.new(@data[:private_key], client_key_pass) + end + end + + if @data[:proxy] + request = "CONNECT #{@data[:host]}#{port_string(@data.merge(:omit_default_port => false))}#{Excon::HTTP_1_1}" + + "Host: #{@data[:host]}#{port_string(@data)}#{Excon::CR_NL}" + + if @data[:proxy].has_key?(:user) || @data[:proxy].has_key?(:password) + user, pass = Utils.unescape_form(@data[:proxy][:user].to_s), Utils.unescape_form(@data[:proxy][:password].to_s) + auth = ["#{user}:#{pass}"].pack('m').delete(Excon::CR_NL) + request += "Proxy-Authorization: Basic #{auth}#{Excon::CR_NL}" + end + + request += "Proxy-Connection: Keep-Alive#{Excon::CR_NL}" + + if @data[:ssl_proxy_headers] + request << Utils.headers_hash_to_s(@data[:ssl_proxy_headers]) + end + + request += Excon::CR_NL + + # write out the proxy setup request + @socket.write(request) + + # eat the proxy's connection response + response = Excon::Response.parse(self, :expects => 200, :method => 'CONNECT') + if response[:response][:status] != 200 + raise(Excon::Errors::ProxyConnectionError.new("proxy connection could not be established", request, response)) + end + end + + # convert Socket to OpenSSL::SSL::SSLSocket + @socket = OpenSSL::SSL::SSLSocket.new(@socket, ssl_context) + @socket.sync_close = true + + # Server Name Indication (SNI) RFC 3546 + if @socket.respond_to?(:hostname=) + @socket.hostname = @data[:ssl_verify_peer_host] || @data[:host] + end + + begin + if @nonblock + begin + @socket.connect_nonblock + rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable + select_with_timeout(@socket, :connect_read) && retry + rescue IO::WaitWritable + select_with_timeout(@socket, :connect_write) && retry + end + else + @socket.connect + end + rescue Errno::ETIMEDOUT, Timeout::Error + raise Excon::Errors::Timeout.new('connect timeout reached') + end + + # verify connection + if @data[:ssl_verify_peer] + @socket.post_connection_check(@data[:ssl_verify_peer_host] || @data[:host]) + end + end + + private + + def client_cert_data + @client_cert_data ||= if (ccd = @data[:client_cert_data]) + ccd + elsif (path = @data[:client_cert]) + File.read path + elsif (path = @data[:certificate_path]) + warn ":certificate_path is no longer supported and will be deprecated. Please use :client_cert or :client_cert_data" + File.read path + end + end + + def client_chain_data + @client_chain_data ||= if (ccd = @data[:client_chain_data]) + ccd + elsif (path = @data[:client_chain]) + File.read path + end + end + + def connect + # backwards compatability for things lacking nonblock + @nonblock = HAVE_NONBLOCK && @nonblock + super + end + + def client_key_data + @client_key_data ||= if (ckd = @data[:client_key_data]) + ckd + elsif (path = @data[:client_key]) + File.read path + elsif (path = @data[:private_key_path]) + warn ":private_key_path is no longer supported and will be deprecated. Please use :client_key or :client_key_data" + File.read path + end + end + + def client_key_pass + @data[:client_key_pass] || @data[:private_key_pass] + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/exec.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/exec.rb new file mode 100644 index 0000000..f790d95 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/exec.rb @@ -0,0 +1,26 @@ +module Excon + module Test + module Plugin + module Server + module Exec + def start(app_str = app) + open_process(app_str) + process_stderr = "" + line = '' + until line =~ /\Aready\Z/ + line = error.gets + raise process_stderr if line.nil? + process_stderr << line + fatal_time = elapsed_time > timeout + if fatal_time + msg = "executable #{app_str} has taken too long to start" + raise msg + end + end + true + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/puma.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/puma.rb new file mode 100644 index 0000000..f22b94f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/puma.rb @@ -0,0 +1,23 @@ +module Excon + module Test + module Plugin + module Server + module Puma + def start(app_str = app, bind_uri = bind) + open_process(RbConfig.ruby, '-S', 'puma', '-b', bind_uri.to_s, app_str) + process_stderr = "" + line = '' + until line =~ /Use Ctrl-C to stop/ + line = read.gets + raise process_stderr if line.nil? + process_stderr << line + fatal_time = elapsed_time > timeout + raise 'puma server has taken too long to start' if fatal_time + end + true + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/unicorn.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/unicorn.rb new file mode 100644 index 0000000..6eee827 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/unicorn.rb @@ -0,0 +1,40 @@ +module Excon + module Test + module Plugin + module Server + module Unicorn + def start(app_str = app, bind_uri = bind) + bind_uri = URI.parse(bind_uri) unless bind_uri.is_a? URI::Generic + is_unix_socket = (bind_uri.scheme == "unix") + if is_unix_socket + bind_str = bind_uri.to_s + else + host = bind_uri.host.gsub(/[\[\]]/, '') + bind_str = "#{host}:#{bind_uri.port}" + end + args = [ + RbConfig.ruby, + '-S', + 'unicorn', + '--no-default-middleware', + '-l', + bind_str, + app_str + ] + open_process(*args) + process_stderr = '' + line = '' + until line =~ /worker\=0 ready/ + line = error.gets + raise process_stderr if line.nil? + process_stderr << line + fatal_time = elapsed_time > timeout + raise 'unicorn server has taken too long to start' if fatal_time + end + true + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/webrick.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/webrick.rb new file mode 100644 index 0000000..556d990 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/plugin/server/webrick.rb @@ -0,0 +1,26 @@ +module Excon + module Test + module Plugin + module Server + module Webrick + def start(app_str = app, bind_uri = bind) + bind_uri = URI.parse(bind_uri) unless bind_uri.is_a? URI::Generic + host = bind_uri.host.gsub(/[\[\]]/, '') + port = bind_uri.port.to_s + open_process(RbConfig.ruby, '-S', 'rackup', '-s', 'webrick', '--host', host, '--port', port, app_str) + process_stderr = "" + line = '' + until line =~ /HTTPServer#start/ + line = error.gets + raise process_stderr if line.nil? + process_stderr << line + fatal_time = elapsed_time > timeout + raise 'webrick server has taken too long to start' if fatal_time + end + true + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/server.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/server.rb new file mode 100644 index 0000000..077dc31 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/test/server.rb @@ -0,0 +1,106 @@ +require 'open4' +require 'excon' +require 'excon/test/plugin/server/webrick' +require 'excon/test/plugin/server/unicorn' +require 'excon/test/plugin/server/puma' +require 'excon/test/plugin/server/exec' + + +module Excon + module Test + class Server + attr_accessor :app, :server, :bind, :pid, :read, :write, :error, :started_at, :timeout + + # Methods that must be implemented by a plugin + INSTANCE_REQUIRES = [:start] + Excon.defaults.merge!( + connect_timeout: 5, + read_timeout: 5, + write_timeout: 5 + ) + + def initialize(args) + # TODO: Validate these args + @server = args.keys.first + @app = args[server] + args[:bind] ||= 'tcp://127.0.0.1:9292' + @bind = URI.parse(args[:bind]) + @is_unix_socket = (@bind.scheme == 'unix') + @bind.host = @bind.host.gsub(/[\[\]]/, '') unless @is_unix_socket + if args[:timeout] + @timeout = args[:timeout] + else + @timeout = 20 + end + name = @server.to_s.split('_').collect(&:capitalize).join + plug = nested_const_get("Excon::Test::Plugin::Server::#{name}") + self.extend plug + check_implementation(plug) + end + + def open_process(*args) + if RUBY_PLATFORM == 'java' + @pid, @write, @read, @error = IO.popen4(*args) + else + GC.disable if RUBY_VERSION < '1.9' + @pid, @write, @read, @error = Open4.popen4(*args) + end + @started_at = Time.now + end + + def elapsed_time + Time.now - started_at + end + + def stop + if RUBY_PLATFORM == 'java' + Process.kill('USR1', pid) + else + Process.kill(9, pid) + GC.enable if RUBY_VERSION < '1.9' + Process.wait(pid) + end + + if @is_unix_socket + socket = @bind.path + File.delete(socket) if File.exist?(socket) + end + + # TODO: Ensure process is really dead + dump_errors + true + end + def dump_errors + lines = error.read.split($/) + while (line = lines.shift) + case line + when /(ERROR|Error)/ + unless line =~ /(null cert chain|did not return a certificate|SSL_read:: internal error)/ + in_err = true + puts + end + when /^(127|localhost)/ + in_err = false + end + puts line if in_err + end + end + + private + + def nested_const_get(namespace) + namespace.split('::').inject(Object) do |mod, klass| + mod.const_get(klass) + end + end + + def check_implementation(plug) + INSTANCE_REQUIRES.each do |m| + unless self.respond_to? m + raise "FATAL: #{plug} does not implement ##{m}" + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/unix_socket.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/unix_socket.rb new file mode 100644 index 0000000..46dee11 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/unix_socket.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true +module Excon + class UnixSocket < Excon::Socket + + private + + def connect + @socket = ::Socket.new(::Socket::AF_UNIX, ::Socket::SOCK_STREAM, 0) + # If a Unix proxy was specified, the :path option will be set for it, + # otherwise fall back to the :socket option. + proxy_path = @data[:proxy] ? @data[:proxy][:path] : nil + sockaddr = ::Socket.sockaddr_un(proxy_path || @data[:socket]) + if @nonblock + begin + @socket.connect_nonblock(sockaddr) + rescue Errno::EINPROGRESS + unless IO.select(nil, [@socket], nil, @data[:connect_timeout]) + raise(Excon::Errors::Timeout.new("connect timeout reached")) + end + begin + @socket.connect_nonblock(sockaddr) + rescue Errno::EISCONN + 0 # same return as connect_nonblock success + end + end + else + begin + Timeout.timeout(@data[:connect_timeout]) do + @socket.connect(sockaddr) + end + rescue Timeout::Error + raise Excon::Errors::Timeout.new('connect timeout reached') + end + end + + rescue => error + @socket.close rescue nil if @socket + raise error + end + + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/utils.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/utils.rb new file mode 100644 index 0000000..d40d7a8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/utils.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true +module Excon + module Utils + extend self + + CONTROL = (0x0..0x1f).map {|c| c.chr }.join + "\x7f" + DELIMS = '<>#%"' + UNWISE = '{}|\\^[]`' + NONASCII = (0x80..0xff).map {|c| c.chr }.join + UNESCAPED = /([#{ Regexp.escape(CONTROL + ' ' + DELIMS + UNWISE + NONASCII) }])/ + ESCAPED = /%([0-9a-fA-F]{2})/ + + def binary_encode(string) + if FORCE_ENC && string.encoding != Encoding::ASCII_8BIT + if string.frozen? + string.dup.force_encoding('BINARY') + else + string.force_encoding('BINARY') + end + else + string + end + end + + def connection_uri(datum = @data) + unless datum + raise ArgumentError, '`datum` must be given unless called on a Connection' + end + if datum[:scheme] == UNIX + "#{datum[:scheme]}://#{datum[:socket]}" + else + "#{datum[:scheme]}://#{datum[:host]}#{port_string(datum)}" + end + end + + # Redact sensitive info from provided data + def redact(datum) + datum = datum.dup + if datum.has_key?(:headers) + if datum[:headers].has_key?('Authorization') || datum[:headers].has_key?('Proxy-Authorization') + datum[:headers] = datum[:headers].dup + end + if datum[:headers].has_key?('Authorization') + datum[:headers]['Authorization'] = REDACTED + end + if datum[:headers].has_key?('Proxy-Authorization') + datum[:headers]['Proxy-Authorization'] = REDACTED + end + end + if datum.has_key?(:password) + datum[:password] = REDACTED + end + if datum.has_key?(:proxy) && datum[:proxy] && datum[:proxy].has_key?(:password) + datum[:proxy] = datum[:proxy].dup + datum[:proxy][:password] = REDACTED + end + datum + end + + def request_uri(datum) + connection_uri(datum) + datum[:path] + query_string(datum) + end + + def port_string(datum) + if datum[:port].nil? || (datum[:omit_default_port] && ((datum[:scheme].casecmp('http') == 0 && datum[:port] == 80) || (datum[:scheme].casecmp('https') == 0 && datum[:port] == 443))) + '' + else + ':' + datum[:port].to_s + end + end + + def query_string(datum) + str = String.new + case datum[:query] + when String + str << '?' << datum[:query] + when Hash + str << '?' + datum[:query].sort_by {|k,_| k.to_s }.each do |key, values| + key = CGI.escape(key.to_s) + if values.nil? + str << key << '&' + else + [values].flatten.each do |value| + str << key << '=' << CGI.escape(value.to_s) << '&' + end + end + end + str.chop! # remove trailing '&' + end + str + end + + # Splits a header value +str+ according to HTTP specification. + def split_header_value(str) + return [] if str.nil? + str = str.dup.strip + str = binary_encode(str) + str.scan(%r'\G((?:"(?:\\.|[^"])+?"|[^",])+) + (?:,\s*|\Z)'xn).flatten + end + + # Escapes HTTP reserved and unwise characters in +str+ + def escape_uri(str) + str = str.dup + str = binary_encode(str) + str.gsub(UNESCAPED) { "%%%02X" % $1[0].ord } + end + + # Unescapes HTTP reserved and unwise characters in +str+ + def unescape_uri(str) + str = str.dup + str = binary_encode(str) + str.gsub(ESCAPED) { $1.hex.chr } + end + + # Unescape form encoded values in +str+ + def unescape_form(str) + str = str.dup + str = binary_encode(str) + str.gsub!(/\+/, ' ') + str.gsub(ESCAPED) { $1.hex.chr } + end + + # Performs validation on the passed header hash and returns a string representation of the headers + def headers_hash_to_s(headers) + headers_str = String.new + headers.each do |key, values| + if key.to_s.match(/[\r\n]/) + raise Excon::Errors::InvalidHeaderKey.new(key.to_s.inspect + ' contains forbidden "\r" or "\n"') + end + [values].flatten.each do |value| + if value.to_s.match(/[\r\n]/) + # Don't include the potentially sensitive header value (i.e. authorization token) in the message + raise Excon::Errors::InvalidHeaderValue.new(key.to_s + ' header value contains forbidden "\r" or "\n"') + end + headers_str << key.to_s << ': ' << value.to_s << CR_NL + end + end + headers_str + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/version.rb b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/version.rb new file mode 100644 index 0000000..f989235 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/excon-0.99.0/lib/excon/version.rb @@ -0,0 +1,4 @@ +# frozen_string_literal: true +module Excon + VERSION = '0.99.0' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/CHANGELOG.md new file mode 100644 index 0000000..e83d566 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/CHANGELOG.md @@ -0,0 +1,380 @@ +# Faraday Changelog + +## [v1.3.0](https://github.com/lostisland/faraday/releases/tag/v1.3.0) (2020-12-31) + +### Highlights +Faraday v1.3.0 is the first release to officially support Ruby 3.0 in the CI pipeline 🎉 🍾! + +This is also the first release with a previously "included" adapter (Net::HTTP) being isolated into a [separate gem](https://github.com/lostisland/faraday-net_http) 🎊! +The new adapter is added to Faraday as a dependency for now, so that means full backwards-compatibility, but just to be safe be careful when upgrading! + +This is a huge step towards are Faraday v2.0 objective of pushing adapters and middleware into separate gems. +Many thanks to the Faraday Team, @JanDintel and everyone who attended the [ROSS Conf remote event](https://www.rossconf.io/event/remote/) + +### Features + +* Improves consistency with Faraday::Error and Faraday::RaiseError (#1229, @qsona, @iMacTia) + +### Fixes + +* Don't assign to global ::Timer (#1227, @bpo) + +### Documentation + +* CHANGELOG: add releases after 1.0 (#1225, @olleolleolle) +* Improves retry middleware documentation. (#1228, @iMacTia) + +### Misc + +* Move out Net::HTTP adapter (#1222, @JanDintel, @iMacTia) +* Adds Ruby 3.0 to CI Matrix (#1226, @iMacTia) + + +## [v1.2.0](https://github.com/lostisland/faraday/releases/tag/v1.2.0) (2020-12-23) + +### Features + +* Introduces `on_request` and `on_complete` methods in `Faraday::Middleware`. (#1194, @iMacTia) + +### Fixes + +* Require 'date' to avoid retry exception (#1206, @rustygeldmacher) +* Fix rdebug recursion issue (#1205, @native-api) +* Update call to `em_http_ssl_patch` (#1202, @kylekeesling) +* `EmHttp` adapter: drop superfluous loaded? check (#1213, @olleolleolle) +* Avoid 1 use of keyword hackery (#1211, @grosser) +* Fix #1219 `Net::HTTP` still uses env proxy (#1221, @iMacTia) + +### Documentation + +* Add comment in gemspec to explain exposure of `examples` and `spec` folders. (#1192, @iMacTia) +* Adapters, how to create them (#1193, @olleolleolle) +* Update documentation on using the logger (#1196, @tijmenb) +* Adjust the retry documentation and spec to align with implementation (#1198, @nbeyer) + +### Misc + +* Test against ruby head (#1208, @grosser) + +## [v1.1.0](https://github.com/lostisland/faraday/releases/tag/v1.1.0) (2020-10-17) + +### Features + +* Makes parameters sorting configurable (#1162 @wishdev) +* Introduces `flat_encode` option for multipart adapter. (#1163 @iMacTia) +* Include request info in exceptions raised by RaiseError Middleware (#1181 @SandroDamilano) + +### Fixes + +* Avoid `last arg as keyword param` warning when building user middleware on Ruby 2.7 (#1153 @dgholz) +* Limits net-http-persistent version to < 4.0 (#1156 @iMacTia) +* Update `typhoeus` to new stable version (`1.4`) (#1159 @AlexWayfer) +* Properly fix test failure with Rack 2.1+. (#1171 @voxik) + +### Documentation + +* Improves documentation on how to contribute to the site by using Docker. (#1175 @iMacTia) +* Remove retry_change_requests from documentation (#1185 @stim371) + +### Misc + +* Link from GitHub Actions badge to CI workflow (#1141 @olleolleolle) +* Return tests of `Test` adapter (#1147 @AlexWayfer) +* Add 1.0 release to wording in CONTRIBUTING (#1155 @olleolleolle) +* Fix linting bumping Rubocop to 0.90.0 (#1182 @iMacTia) +* Drop `git ls-files` in gemspec (#1183 @utkarsh2102) +* Upgrade CI to ruby/setup-ruby (#1187 @gogainda) + +## [v1.0.1](https://github.com/lostisland/faraday/releases/tag/v1.0.1) (2020-03-29) + +### Fixes + +* Use Net::HTTP#start(&block) to ensure closed TCP connections (#1117) +* Fully qualify constants to be checked (#1122) +* Allows `parse` method to be private/protected in response middleware (#1123) +* Encode Spaces in Query Strings as '%20' Instead of '+' (#1125) +* Limits rack to v2.0.x (#1127) +* Adapter Registry reads also use mutex (#1136) + +### Documentation + +* Retry middleware documentation fix (#1109) +* Docs(retry): precise usage of retry-after (#1111) +* README: Link the logo to the website (#1112) +* Website: add search bar (#1116) +* Fix request/response mix-up in docs text (#1132) + +## v1.0 + +Features: + +* Add #trace support to Faraday::Connection #861 (@technoweenie) +* Add the log formatter that is easy to override and safe to inherit #889 (@prikha) +* Support standalone adapters #941 (@iMacTia) +* Introduce Faraday::ConflictError for 409 response code #979 (@lucasmoreno) +* Add support for setting `read_timeout` option separately #1003 (@springerigor) +* Refactor and cleanup timeout settings across adapters #1022 (@technoweenie) +* Create ParamPart class to allow multipart posts with JSON content and file upload at the same time #1017 (@jeremy-israel) +* Copy UploadIO const -> FilePart for consistency with ParamPart #1018, #1021 (@technoweenie) +* Implement streaming responses in the Excon adapter #1026 (@technoweenie) +* Add default implementation of `Middleware#close`. #1069 (@ioquatix) +* Add `Adapter#close` so that derived classes can call super. #1091 (@ioquatix) +* Add log_level option to logger default formatter #1079 (@amrrbakry) +* Fix empty array for FlatParamsEncoder `{key: []} -> "key="` #1084 (@mrexox) + +Bugs: + +* Explicitly require date for DateTime library in Retry middleware #844 (@nickpresta) +* Refactor Adapter as final endpoints #846 (@iMacTia) +* Separate Request and Response bodies in Faraday::Env #847 (@iMacTia) +* Implement Faraday::Connection#options to make HTTP requests with the OPTIONS verb. #857 (@technoweenie) +* Multipart: Drop Ruby 1.8 String behavior compat #892 (@olleolleolle) +* Fix Ruby warnings in Faraday::Options.memoized #962 (@technoweenie) +* Allow setting min/max SSL version for a Net::HTTP::Persistent connection #972, #973 (@bdewater, @olleolleolle) +* Fix instances of frozen empty string literals #1040 (@BobbyMcWho) +* remove temp_proxy and improve proxy tests #1063 (@technoweenie) +* improve error initializer consistency #1095 (@technoweenie) + +Misc: + +* Convert minitest suite to RSpec #832 (@iMacTia, with help from @gaynetdinov, @Insti, @technoweenie) +* Major effort to update code to RuboCop standards. #854 (@olleolleolle, @iMacTia, @technoweenie, @htwroclau, @jherdman, @Drenmi, @Insti) +* Rubocop #1044, #1047 (@BobbyMcWho, @olleolleolle) +* Documentation tweaks (@adsteel, @Hubro, @iMacTia, @olleolleolle, @technoweenie) +* Update license year #981 (@Kevin-Kawai) +* Configure Jekyll plugin jekyll-remote-theme to support Docker usage #999 (@Lewiscowles1986) +* Fix Ruby 2.7 warnings #1009 (@tenderlove) +* Cleanup adapter connections #1023 (@technoweenie) +* Describe clearing cached stubs #1045 (@viraptor) +* Add project metadata to the gemspec #1046 (@orien) + +## v0.17.3 + +Fixes: + +* Reverts changes in error classes hierarchy. #1092 (@iMacTia) +* Fix Ruby 1.9 syntax errors and improve Error class testing #1094 (@BanzaiMan, + @mrexox, @technoweenie) + +Misc: + +* Stops using `&Proc.new` for block forwarding. #1083 (@olleolleolle) +* Update CI to test against ruby 2.0-2.7 #1087, #1099 (@iMacTia, @olleolleolle, + @technoweenie) +* require FARADAY_DEPRECATE=warn to show Faraday v1.0 deprecation warnings + #1098 (@technoweenie) + +## v0.17.1 + +Final release before Faraday v1.0, with important fixes for Ruby 2.7. + +Fixes: + +* RaiseError response middleware raises exception if HTTP client returns a nil + status. #1042 (@jonnyom, @BobbyMcWho) + +Misc: + +* Fix Ruby 2.7 warnings (#1009) +* Add `Faraday::Deprecate` to warn about upcoming v1.0 changes. (#1054, #1059, + #1076, #1077) +* Add release notes up to current in CHANGELOG.md (#1066) +* Port minimal rspec suite from main branch to run backported tests. (#1058) + +## v0.17.0 + +This release is the same as v0.15.4. It was pushed to cover up releases +v0.16.0-v0.16.2. + +## v0.15.4 + +* Expose `pool_size` as a option for the NetHttpPersistent adapter (#834) + +## v0.15.3 + +* Make Faraday::Request serialisable with Marshal. (#803) +* Add DEFAULT_EXCEPTIONS constant to Request::Retry (#814) +* Add support for Ruby 2.6 Net::HTTP write_timeout (#824) + +## v0.15.2 + +* Prevents `Net::HTTP` adapters to retry request internally by setting `max_retries` to 0 if available (Ruby 2.5+). (#799) +* Fixes `NestedParamsEncoder` handling of empty array values (#801) + +## v0.15.1 + +* NetHttpPersistent adapter better reuse of SSL connections (#793) +* Refactor: inline cached_connection (#797) +* Logger middleware: use $stdout instead of STDOUT (#794) +* Fix: do not memoize/reuse Patron session (#796) + +Also in this release: + +* Allow setting min/max ssl version for Net::HTTP (#792) +* Allow setting min/max ssl version for Excon (#795) + +## v0.15.0 + +Features: + +* Added retry block option to retry middleware. (#770) +* Retry middleware improvements (honour Retry-After header, retry statuses) (#773) +* Improve response logger middleware output (#784) + +Fixes: + +* Remove unused class error (#767) +* Fix minor typo in README (#760) +* Reuse persistent connections when using net-http-persistent (#778) +* Fix Retry middleware documentation (#781) +* Returns the http response when giving up on retrying by status (#783) + +## v0.14.0 + +Features: + +* Allow overriding env proxy #754 (@iMacTia) +* Remove legacy Typhoeus adapter #715 (@olleolleolle) +* External Typhoeus Adapter Compatibility #748 (@iMacTia) +* Warn about missing adapter when making a request #743 (@antstorm) +* Faraday::Adapter::Test stubs now support entire urls (with host) #741 (@erik-escobedo) + +Fixes: + +* If proxy is manually provided, this takes priority over `find_proxy` #724 (@iMacTia) +* Fixes the behaviour for Excon's open_timeout (not setting write_timeout anymore) #731 (@apachelogger) +* Handle all connection timeout messages in Patron #687 (@stayhero) + +## v0.13.1 + +* Fixes an incompatibility with Addressable::URI being used as uri_parser + +## v0.13.0 + +Features: + +* Dynamically reloads the proxy when performing a request on an absolute domain (#701) +* Adapter support for Net::HTTP::Persistent v3.0.0 (#619) + +Fixes: + +* Prefer #hostname over #host. (#714) +* Fixes an edge-case issue with response headers parsing (missing HTTP header) (#719) + +## v0.12.2 + +* Parse headers from aggregated proxy requests/responses (#681) +* Guard against invalid middleware configuration with warning (#685) +* Do not use :insecure option by default in Patron (#691) +* Fixes an issue with HTTPClient not raising a `Faraday::ConnectionFailed` (#702) +* Fixes YAML serialization/deserialization for `Faraday::Utils::Headers` (#690) +* Fixes an issue with Options having a nil value (#694) +* Fixes an issue with Faraday.default_connection not using Faraday.default_connection_options (#698) +* Fixes an issue with Options.merge! and Faraday instrumentation middleware (#710) + +## v0.12.1 + +* Fix an issue with Patron tests failing on jruby +* Fix an issue with new `rewind_files` feature that was causing an exception when the body was not an Hash +* Expose wrapped_exception in all client errors +* Add Authentication Section to the ReadMe + +## v0.12.0.1 + +* Hotfix release to address an issue with TravisCI deploy on Rubygems + +## v0.12.0 + +Features: + +* Proxy feature now relies on Ruby `URI::Generic#find_proxy` and can use `no_proxy` ENV variable (not compatible with ruby < 2.0) +* Adds support for `context` request option to pass arbitrary information to middlewares + +Fixes: + +* Fix an issue with options that was causing new options to override defaults ones unexpectedly +* Rewind `UploadIO`s on retry to fix a compatibility issue +* Make multipart boundary unique +* Improvements in `README.md` + +## v0.11.0 + +Features: + +* Add `filter` method to Logger middleware +* Add support for Ruby2.4 and Minitest 6 +* Introduce block syntax to customise the adapter + +Fixes: + +* Fix an issue that was allowing to override `default_connection_options` from a connection instance +* Fix a bug that was causing newline escape characters ("\n") to be used when building the Authorization header + +## v0.10.1 + +- Fix an issue with HTTPClient adapter that was causing the SSL to be reset on every request +- Rescue `IOError` instead of specific subclass +- `Faraday::Utils::Headers` can now be successfully serialised in YAML +- Handle `default_connection_options` set with hash + +## v0.10.0 + +Breaking changes: +- Drop support for Ruby 1.8 + +Features: +- Include wrapped exception/reponse in ClientErrors +- Add `response.reason_phrase` +- Provide option to selectively skip logging request/response headers +- Add regex support for pattern matching in `test` adapter + +Fixes: +- Add `Faraday.respond_to?` to find methods managed by `method_missing` +- em-http: `request.host` instead of `connection.host` should be taken for SSL validations +- Allow `default_connection_options` to be merged when options are passed as url parameter +- Improve splitting key-value pairs in raw HTTP headers + +## v0.9.2 + +Adapters: +- Enable gzip compression for httpclient +- Fixes default certificate store for httpclient not having default paths. +- Make excon adapter compatible with 0.44 excon version +- Add compatibility with Patron 0.4.20 +- Determine default port numbers in Net::HTTP adapters (Addressable compatibility) +- em-http: wrap "connection closed by server" as ConnectionFailed type +- Wrap Errno::ETIMEDOUT in Faraday::Error::TimeoutError + +Utils: +- Add Rack-compatible support for parsing `a[][b]=c` nested queries +- Encode nil values in queries different than empty strings. Before: `a=`; now: `a`. +- Have `Faraday::Utils::Headers#replace` clear internal key cache +- Dup the internal key cache when a Headers hash is copied + +Env and middleware: +- Ensure `env` stored on middleware response has reference to the response +- Ensure that Response properties are initialized during `on_complete` (VCR compatibility) +- Copy request options in Faraday::Connection#dup +- Env custom members should be copied by Env.from(env) +- Honour per-request `request.options.params_encoder` +- Fix `interval_randomness` data type for Retry middleware +- Add maximum interval option for Retry middleware + +## v0.9.1 + +* Refactor Net:HTTP adapter so that with_net_http_connection can be overridden to allow pooled connections. (@Ben-M) +* Add configurable methods that bypass `retry_if` in the Retry request middleware. (@mike-bourgeous) + +## v0.9.0 + +* Add HTTPClient adapter (@hakanensari) +* Improve Retry handler (@mislav) +* Remove autoloading by default (@technoweenie) +* Improve internal docs (@technoweenie, @mislav) +* Respect user/password in http proxy string (@mislav) +* Adapter options are structs. Reinforces consistent options across adapters + (@technoweenie) +* Stop stripping trailing / off base URLs in a Faraday::Connection. (@technoweenie) +* Add a configurable URI parser. (@technoweenie) +* Remove need to manually autoload when using the authorization header helpers on `Faraday::Connection`. (@technoweenie) +* `Faraday::Adapter::Test` respects the `Faraday::RequestOptions#params_encoder` option. (@technoweenie) diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/LICENSE.md new file mode 100644 index 0000000..faa68e6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/LICENSE.md @@ -0,0 +1,20 @@ +Copyright (c) 2009-2020 Rick Olson, Zack Hobson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/README.md new file mode 100644 index 0000000..2eca737 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/README.md @@ -0,0 +1,53 @@ +# [![Faraday](./docs/assets/img/repo-card-slim.png)][website] + +[![Gem Version](https://badge.fury.io/rb/faraday.svg)](https://rubygems.org/gems/faraday) +[![GitHub Actions CI](https://github.com/lostisland/faraday/workflows/CI/badge.svg)](https://github.com/lostisland/faraday/actions?query=workflow%3ACI) +[![Gitter](https://badges.gitter.im/lostisland/faraday.svg)](https://gitter.im/lostisland/faraday?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + + +Faraday is an HTTP client library that provides a common interface over many +adapters (such as Net::HTTP) and embraces the concept of Rack middleware when +processing the request/response cycle. + +## Getting Started + +The best starting point is the [Faraday Website][website], with its introduction and explanation. +Need more details? See the [Faraday API Documentation][apidoc] to see how it works internally. + +## Supported Ruby versions + +This library aims to support and is [tested against][actions] the following Ruby +implementations: + +* Ruby 2.4+ + +If something doesn't work on one of these Ruby versions, it's a bug. + +This library may inadvertently work (or seem to work) on other Ruby +implementations and versions, however support will only be provided for the versions listed +above. + +If you would like this library to support another Ruby version, you may +volunteer to be a maintainer. Being a maintainer entails making sure all tests +run and pass on that implementation. When something breaks on your +implementation, you will be responsible for providing patches in a timely +fashion. If critical issues for a particular implementation exist at the time +of a major release, support for that Ruby version may be dropped. + +## Contribute + +Do you want to contribute to Faraday? +Open the issues page and check for the `help wanted` label! +But before you start coding, please read our [Contributing Guide][contributing] + +## Copyright +© 2009 - 2020, the [Faraday Team][faraday_team]. Website and branding design by [Elena Lo Piccolo](https://elelopic.design). + +[website]: https://lostisland.github.io/faraday +[faraday_team]: https://lostisland.github.io/faraday/team +[contributing]: https://github.com/lostisland/faraday/blob/master/.github/CONTRIBUTING.md +[apidoc]: https://www.rubydoc.info/github/lostisland/faraday +[actions]: https://github.com/lostisland/faraday/actions +[jruby]: http://jruby.org/ +[rubinius]: http://rubini.us/ +[license]: LICENSE.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/Rakefile b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/Rakefile new file mode 100644 index 0000000..cffdd09 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/Rakefile @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +require 'rspec/core/rake_task' + +RSpec::Core::RakeTask.new(:spec) + +task default: :spec diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/examples/client_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/examples/client_spec.rb new file mode 100644 index 0000000..647bcf8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/examples/client_spec.rb @@ -0,0 +1,97 @@ +# frozen_string_literal: true + +# Requires Ruby with rspec and faraday gems. +# rspec client_spec.rb + +require 'faraday' +require 'json' + +# Example API client +class Client + def initialize(conn) + @conn = conn + end + + def sushi(jname, params: {}) + res = @conn.get("/#{jname}", params) + data = JSON.parse(res.body) + data['name'] + end +end + +RSpec.describe Client do + let(:stubs) { Faraday::Adapter::Test::Stubs.new } + let(:conn) { Faraday.new { |b| b.adapter(:test, stubs) } } + let(:client) { Client.new(conn) } + + it 'parses name' do + stubs.get('/ebi') do |env| + # optional: you can inspect the Faraday::Env + expect(env.url.path).to eq('/ebi') + [ + 200, + { 'Content-Type': 'application/javascript' }, + '{"name": "shrimp"}' + ] + end + + # uncomment to trigger stubs.verify_stubbed_calls failure + # stubs.get('/unused') { [404, {}, ''] } + + expect(client.sushi('ebi')).to eq('shrimp') + stubs.verify_stubbed_calls + end + + it 'handles 404' do + stubs.get('/ebi') do + [ + 404, + { 'Content-Type': 'application/javascript' }, + '{}' + ] + end + expect(client.sushi('ebi')).to be_nil + stubs.verify_stubbed_calls + end + + it 'handles exception' do + stubs.get('/ebi') do + raise Faraday::ConnectionFailed, nil + end + + expect { client.sushi('ebi') }.to raise_error(Faraday::ConnectionFailed) + stubs.verify_stubbed_calls + end + + context 'When the test stub is run in strict_mode' do + let(:stubs) { Faraday::Adapter::Test::Stubs.new(strict_mode: true) } + + it 'verifies the all parameter values are identical' do + stubs.get('/ebi?abc=123') do + [ + 200, + { 'Content-Type': 'application/javascript' }, + '{"name": "shrimp"}' + ] + end + + # uncomment to raise Stubs::NotFound + # expect(client.sushi('ebi', params: { abc: 123, foo: 'Kappa' })).to eq('shrimp') + expect(client.sushi('ebi', params: { abc: 123 })).to eq('shrimp') + stubs.verify_stubbed_calls + end + end + + context 'When the Faraday connection is configured with FlatParamsEncoder' do + let(:conn) { Faraday.new(request: { params_encoder: Faraday::FlatParamsEncoder }) { |b| b.adapter(:test, stubs) } } + + it 'handles the same multiple URL parameters' do + stubs.get('/ebi?a=x&a=y&a=z') { [200, { 'Content-Type' => 'application/json' }, '{"name": "shrimp"}'] } + + # uncomment to raise Stubs::NotFound + # expect(client.sushi('ebi', params: { a: %w[x y] })).to eq('shrimp') + expect(client.sushi('ebi', params: { a: %w[x y z] })).to eq('shrimp') + stubs.verify_stubbed_calls + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/examples/client_test.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/examples/client_test.rb new file mode 100644 index 0000000..31d7e26 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/examples/client_test.rb @@ -0,0 +1,118 @@ +# frozen_string_literal: true + +# Requires Ruby with test-unit and faraday gems. +# ruby client_test.rb + +require 'faraday' +require 'json' +require 'test/unit' + +# Example API client +class Client + def initialize(conn) + @conn = conn + end + + def sushi(jname, params: {}) + res = @conn.get("/#{jname}", params) + data = JSON.parse(res.body) + data['name'] + end +end + +# Example API client test +class ClientTest < Test::Unit::TestCase + def test_sushi_name + stubs = Faraday::Adapter::Test::Stubs.new + stubs.get('/ebi') do |env| + # optional: you can inspect the Faraday::Env + assert_equal '/ebi', env.url.path + [ + 200, + { 'Content-Type': 'application/javascript' }, + '{"name": "shrimp"}' + ] + end + + # uncomment to trigger stubs.verify_stubbed_calls failure + # stubs.get('/unused') { [404, {}, ''] } + + cli = client(stubs) + assert_equal 'shrimp', cli.sushi('ebi') + stubs.verify_stubbed_calls + end + + def test_sushi_404 + stubs = Faraday::Adapter::Test::Stubs.new + stubs.get('/ebi') do + [ + 404, + { 'Content-Type': 'application/javascript' }, + '{}' + ] + end + + cli = client(stubs) + assert_nil cli.sushi('ebi') + stubs.verify_stubbed_calls + end + + def test_sushi_exception + stubs = Faraday::Adapter::Test::Stubs.new + stubs.get('/ebi') do + raise Faraday::ConnectionFailed, nil + end + + cli = client(stubs) + assert_raise Faraday::ConnectionFailed do + cli.sushi('ebi') + end + stubs.verify_stubbed_calls + end + + def test_strict_mode + stubs = Faraday::Adapter::Test::Stubs.new(strict_mode: true) + stubs.get('/ebi?abc=123') do + [ + 200, + { 'Content-Type': 'application/javascript' }, + '{"name": "shrimp"}' + ] + end + + cli = client(stubs) + assert_equal 'shrimp', cli.sushi('ebi', params: { abc: 123 }) + + # uncomment to raise Stubs::NotFound + # assert_equal 'shrimp', cli.sushi('ebi', params: { abc: 123, foo: 'Kappa' }) + stubs.verify_stubbed_calls + end + + def test_non_default_params_encoder + stubs = Faraday::Adapter::Test::Stubs.new(strict_mode: true) + stubs.get('/ebi?a=x&a=y&a=z') do + [ + 200, + { 'Content-Type': 'application/javascript' }, + '{"name": "shrimp"}' + ] + end + conn = Faraday.new(request: { params_encoder: Faraday::FlatParamsEncoder }) do |builder| + builder.adapter :test, stubs + end + + cli = Client.new(conn) + assert_equal 'shrimp', cli.sushi('ebi', params: { a: %w[x y z] }) + + # uncomment to raise Stubs::NotFound + # assert_equal 'shrimp', cli.sushi('ebi', params: { a: %w[x y] }) + stubs.verify_stubbed_calls + end + + def client(stubs) + conn = Faraday.new do |builder| + builder.adapter :test, stubs + end + Client.new(conn) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday.rb new file mode 100644 index 0000000..8562af9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday.rb @@ -0,0 +1,193 @@ +# frozen_string_literal: true + +require 'cgi' +require 'date' +require 'set' +require 'forwardable' +require 'faraday/middleware_registry' +require 'faraday/dependency_loader' + +unless defined?(::Faraday::Timer) + require 'timeout' + ::Faraday::Timer = Timeout +end + +require 'faraday/version' +require 'faraday/methods' +require 'faraday/utils' +require 'faraday/options' +require 'faraday/connection' +require 'faraday/rack_builder' +require 'faraday/parameters' +require 'faraday/middleware' +require 'faraday/adapter' +require 'faraday/request' +require 'faraday/response' +require 'faraday/error' +require 'faraday/request/url_encoded' # needed by multipart + +# External Middleware gems and their aliases +require 'faraday/multipart' +require 'faraday/retry' +Faraday::Request::Multipart = Faraday::Multipart::Middleware +Faraday::Request::Retry = Faraday::Retry::Middleware + +# External Adapters gems +unless defined?(JRUBY_VERSION) + require 'faraday/em_http' + require 'faraday/em_synchrony' +end +require 'faraday/excon' +require 'faraday/httpclient' +require 'faraday/net_http' +require 'faraday/net_http_persistent' +require 'faraday/patron' +require 'faraday/rack' + +# This is the main namespace for Faraday. +# +# It provides methods to create {Connection} objects, and HTTP-related +# methods to use directly. +# +# @example Helpful class methods for easy usage +# Faraday.get "http://faraday.com" +# +# @example Helpful class method `.new` to create {Connection} objects. +# conn = Faraday.new "http://faraday.com" +# conn.get '/' +# +module Faraday + CONTENT_TYPE = 'Content-Type' + + class << self + # The root path that Faraday is being loaded from. + # + # This is the root from where the libraries are auto-loaded. + # + # @return [String] + attr_accessor :root_path + + # Gets or sets the path that the Faraday libs are loaded from. + # @return [String] + attr_accessor :lib_path + + # @overload default_adapter + # Gets the Symbol key identifying a default Adapter to use + # for the default {Faraday::Connection}. Defaults to `:net_http`. + # @return [Symbol] the default adapter + # @overload default_adapter=(adapter) + # Updates default adapter while resetting {.default_connection}. + # @return [Symbol] the new default_adapter. + attr_reader :default_adapter + + # Documented below, see default_connection + attr_writer :default_connection + + # Tells Faraday to ignore the environment proxy (http_proxy). + # Defaults to `false`. + # @return [Boolean] + attr_accessor :ignore_env_proxy + + # Initializes a new {Connection}. + # + # @param url [String,Hash] The optional String base URL to use as a prefix + # for all requests. Can also be the options Hash. Any of these + # values will be set on every request made, unless overridden + # for a specific request. + # @param options [Hash] + # @option options [String] :url Base URL + # @option options [Hash] :params Hash of unencoded URI query params. + # @option options [Hash] :headers Hash of unencoded HTTP headers. + # @option options [Hash] :request Hash of request options. + # @option options [Hash] :ssl Hash of SSL options. + # @option options [Hash] :proxy Hash of Proxy options. + # @return [Faraday::Connection] + # + # @example With an URL argument + # Faraday.new 'http://faraday.com' + # # => Faraday::Connection to http://faraday.com + # + # @example With an URL argument and an options hash + # Faraday.new 'http://faraday.com', params: { page: 1 } + # # => Faraday::Connection to http://faraday.com?page=1 + # + # @example With everything in an options hash + # Faraday.new url: 'http://faraday.com', + # params: { page: 1 } + # # => Faraday::Connection to http://faraday.com?page=1 + def new(url = nil, options = {}, &block) + options = default_connection_options.merge(options) + Faraday::Connection.new(url, options, &block) + end + + # @private + # Internal: Requires internal Faraday libraries. + # + # @param libs [Array] one or more relative String names to Faraday classes. + # @return [void] + def require_libs(*libs) + libs.each do |lib| + require "#{lib_path}/#{lib}" + end + end + + alias require_lib require_libs + + # Documented elsewhere, see default_adapter reader + def default_adapter=(adapter) + @default_connection = nil + @default_adapter = adapter + end + + def respond_to_missing?(symbol, include_private = false) + default_connection.respond_to?(symbol, include_private) || super + end + + # @overload default_connection + # Gets the default connection used for simple scripts. + # @return [Faraday::Connection] a connection configured with + # the default_adapter. + # @overload default_connection=(connection) + # @param connection [Faraday::Connection] + # Sets the default {Faraday::Connection} for simple scripts that + # access the Faraday constant directly, such as + # Faraday.get "https://faraday.com". + def default_connection + @default_connection ||= Connection.new(default_connection_options) + end + + # Gets the default connection options used when calling {Faraday#new}. + # + # @return [Faraday::ConnectionOptions] + def default_connection_options + @default_connection_options ||= ConnectionOptions.new + end + + # Sets the default options used when calling {Faraday#new}. + # + # @param options [Hash, Faraday::ConnectionOptions] + def default_connection_options=(options) + @default_connection = nil + @default_connection_options = ConnectionOptions.from(options) + end + + private + + # Internal: Proxies method calls on the Faraday constant to + # .default_connection. + def method_missing(name, *args, &block) + if default_connection.respond_to?(name) + default_connection.send(name, *args, &block) + else + super + end + end + end + + self.ignore_env_proxy = false + self.root_path = File.expand_path __dir__ + self.lib_path = File.expand_path 'faraday', __dir__ + self.default_adapter = :net_http + + require_lib 'autoload' unless ENV['FARADAY_NO_AUTOLOAD'] +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter.rb new file mode 100644 index 0000000..44828c7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module Faraday + # Base class for all Faraday adapters. Adapters are + # responsible for fulfilling a Faraday request. + class Adapter + extend MiddlewareRegistry + extend DependencyLoader + + CONTENT_LENGTH = 'Content-Length' + + register_middleware File.expand_path('adapter', __dir__), + test: [:Test, 'test'], + typhoeus: [:Typhoeus, 'typhoeus'] + + # This module marks an Adapter as supporting parallel requests. + module Parallelism + attr_writer :supports_parallel + + def supports_parallel? + @supports_parallel + end + + def inherited(subclass) + super + subclass.supports_parallel = supports_parallel? + end + end + + extend Parallelism + self.supports_parallel = false + + def initialize(_app = nil, opts = {}, &block) + @app = ->(env) { env.response } + @connection_options = opts + @config_block = block + end + + # Yields or returns an adapter's configured connection. Depends on + # #build_connection being defined on this adapter. + # + # @param env [Faraday::Env, Hash] The env object for a faraday request. + # + # @return The return value of the given block, or the HTTP connection object + # if no block is given. + def connection(env) + conn = build_connection(env) + return conn unless block_given? + + yield conn + end + + # Close any persistent connections. The adapter should still be usable + # after calling close. + def close + # Possible implementation: + # @app.close if @app.respond_to?(:close) + end + + def call(env) + env.clear_body if env.needs_body? + env.response = Response.new + end + + private + + def save_response(env, status, body, headers = nil, reason_phrase = nil) + env.status = status + env.body = body + env.reason_phrase = reason_phrase&.to_s&.strip + env.response_headers = Utils::Headers.new.tap do |response_headers| + response_headers.update headers unless headers.nil? + yield(response_headers) if block_given? + end + + env.response.finish(env) unless env.parallel? + env.response + end + + # Fetches either a read, write, or open timeout setting. Defaults to the + # :timeout value if a more specific one is not given. + # + # @param type [Symbol] Describes which timeout setting to get: :read, + # :write, or :open. + # @param options [Hash] Hash containing Symbol keys like :timeout, + # :read_timeout, :write_timeout, :open_timeout, or + # :timeout + # + # @return [Integer, nil] Timeout duration in seconds, or nil if no timeout + # has been set. + def request_timeout(type, options) + key = TIMEOUT_KEYS.fetch(type) do + msg = "Expected :read, :write, :open. Got #{type.inspect} :(" + raise ArgumentError, msg + end + options[key] || options[:timeout] + end + + TIMEOUT_KEYS = { + read: :read_timeout, + open: :open_timeout, + write: :write_timeout + }.freeze + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter/test.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter/test.rb new file mode 100644 index 0000000..a21edd8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter/test.rb @@ -0,0 +1,262 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # @example + # test = Faraday::Connection.new do + # use Faraday::Adapter::Test do |stub| + # # Define matcher to match the request + # stub.get '/resource.json' do + # # return static content + # [200, {'Content-Type' => 'application/json'}, 'hi world'] + # end + # + # # response with content generated based on request + # stub.get '/showget' do |env| + # [200, {'Content-Type' => 'text/plain'}, env[:method].to_s] + # end + # + # # A regular expression can be used as matching filter + # stub.get /\A\/items\/(\d+)\z/ do |env, meta| + # # in case regular expression is used, an instance of MatchData + # # can be received + # [200, + # {'Content-Type' => 'text/plain'}, + # "showing item: #{meta[:match_data][1]}" + # ] + # end + # + # # You can set strict_mode to exactly match the stubbed requests. + # stub.strict_mode = true + # end + # end + # + # resp = test.get '/resource.json' + # resp.body # => 'hi world' + # + # resp = test.get '/showget' + # resp.body # => 'get' + # + # resp = test.get '/items/1' + # resp.body # => 'showing item: 1' + # + # resp = test.get '/items/2' + # resp.body # => 'showing item: 2' + class Test < Faraday::Adapter + attr_accessor :stubs + + # A stack of Stubs + class Stubs + class NotFound < StandardError + end + + def initialize(strict_mode: false) + # { get: [Stub, Stub] } + @stack = {} + @consumed = {} + @strict_mode = strict_mode + yield(self) if block_given? + end + + def empty? + @stack.empty? + end + + # @param env [Faraday::Env] + def match(env) + request_method = env[:method] + return false unless @stack.key?(request_method) + + stack = @stack[request_method] + consumed = (@consumed[request_method] ||= []) + + stub, meta = matches?(stack, env) + if stub + consumed << stack.delete(stub) + return stub, meta + end + matches?(consumed, env) + end + + def get(path, headers = {}, &block) + new_stub(:get, path, headers, &block) + end + + def head(path, headers = {}, &block) + new_stub(:head, path, headers, &block) + end + + def post(path, body = nil, headers = {}, &block) + new_stub(:post, path, headers, body, &block) + end + + def put(path, body = nil, headers = {}, &block) + new_stub(:put, path, headers, body, &block) + end + + def patch(path, body = nil, headers = {}, &block) + new_stub(:patch, path, headers, body, &block) + end + + def delete(path, headers = {}, &block) + new_stub(:delete, path, headers, &block) + end + + def options(path, headers = {}, &block) + new_stub(:options, path, headers, &block) + end + + # Raises an error if any of the stubbed calls have not been made. + def verify_stubbed_calls + failed_stubs = [] + @stack.each do |method, stubs| + next if stubs.empty? + + failed_stubs.concat( + stubs.map do |stub| + "Expected #{method} #{stub}." + end + ) + end + raise failed_stubs.join(' ') unless failed_stubs.empty? + end + + # Set strict_mode. If the value is true, this adapter tries to find matched requests strictly, + # which means that all of a path, parameters, and headers must be the same as an actual request. + def strict_mode=(value) + @strict_mode = value + @stack.each do |_method, stubs| + stubs.each do |stub| + stub.strict_mode = value + end + end + end + + protected + + def new_stub(request_method, path, headers = {}, body = nil, &block) + normalized_path, host = + if path.is_a?(Regexp) + path + else + [ + Faraday::Utils.normalize_path(path), + Faraday::Utils.URI(path).host + ] + end + path, query = normalized_path.respond_to?(:split) ? normalized_path.split('?') : normalized_path + headers = Utils::Headers.new(headers) + + stub = Stub.new(host, path, query, headers, body, @strict_mode, block) + (@stack[request_method] ||= []) << stub + end + + # @param stack [Hash] + # @param env [Faraday::Env] + def matches?(stack, env) + stack.each do |stub| + match_result, meta = stub.matches?(env) + return stub, meta if match_result + end + nil + end + end + + # Stub request + class Stub < Struct.new(:host, :path, :query, :headers, :body, :strict_mode, :block) # rubocop:disable Style/StructInheritance + # @param env [Faraday::Env] + def matches?(env) + request_host = env[:url].host + request_path = Faraday::Utils.normalize_path(env[:url].path) + request_headers = env.request_headers + request_body = env[:body] + + # meta is a hash used as carrier + # that will be yielded to consumer block + meta = {} + [(host.nil? || host == request_host) && + path_match?(request_path, meta) && + params_match?(env) && + (body.to_s.size.zero? || request_body == body) && + headers_match?(request_headers), meta] + end + + def path_match?(request_path, meta) + if path.is_a?(Regexp) + !!(meta[:match_data] = path.match(request_path)) + else + path == request_path + end + end + + # @param env [Faraday::Env] + def params_match?(env) + request_params = env[:params] + params = env.params_encoder.decode(query) || {} + + if strict_mode + return Set.new(params) == Set.new(request_params) + end + + params.keys.all? do |key| + request_params[key] == params[key] + end + end + + def headers_match?(request_headers) + if strict_mode + headers_with_user_agent = headers.dup.tap do |hs| + # NOTE: Set User-Agent in case it's not set when creating Stubs. + # Users would not want to set Faraday's User-Agent explicitly. + hs[:user_agent] ||= Connection::USER_AGENT + end + return Set.new(headers_with_user_agent) == Set.new(request_headers) + end + + headers.keys.all? do |key| + request_headers[key] == headers[key] + end + end + + def to_s + "#{path} #{body}" + end + end + + def initialize(app, stubs = nil, &block) + super(app) + @stubs = stubs || Stubs.new + configure(&block) if block + end + + def configure + yield(stubs) + end + + # @param env [Faraday::Env] + def call(env) + super + + env.request.params_encoder ||= Faraday::Utils.default_params_encoder + env[:params] = env.params_encoder.decode(env[:url].query) || {} + stub, meta = stubs.match(env) + + unless stub + raise Stubs::NotFound, "no stubbed request for #{env[:method]} "\ + "#{env[:url]} #{env[:body]}" + end + + block_arity = stub.block.arity + status, headers, body = + if block_arity >= 0 + stub.block.call(*[env, meta].take(block_arity)) + else + stub.block.call(env, meta) + end + save_response(env, status, body, headers) + + @app.call(env) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter/typhoeus.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter/typhoeus.rb new file mode 100644 index 0000000..2f6ed68 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter/typhoeus.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # Typhoeus adapter. This class is just a stub, the real adapter is in + # https://github.com/typhoeus/typhoeus/blob/master/lib/typhoeus/adapters/faraday.rb + class Typhoeus < Faraday::Adapter + # Needs to define this method in order to support Typhoeus <= 1.3.0 + def call; end + + dependency 'typhoeus' + dependency 'typhoeus/adapters/faraday' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter_registry.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter_registry.rb new file mode 100644 index 0000000..1cd1e7e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/adapter_registry.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +require 'monitor' + +module Faraday + # AdapterRegistry registers adapter class names so they can be looked up by a + # String or Symbol name. + class AdapterRegistry + def initialize + @lock = Monitor.new + @constants = {} + end + + def get(name) + klass = @lock.synchronize do + @constants[name] + end + return klass if klass + + Object.const_get(name).tap { |c| set(c, name) } + end + + def set(klass, name = nil) + name ||= klass.to_s + @lock.synchronize do + @constants[name] = klass + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/autoload.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/autoload.rb new file mode 100644 index 0000000..76a0e0e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/autoload.rb @@ -0,0 +1,87 @@ +# frozen_string_literal: true + +module Faraday + # Adds the ability for other modules to manage autoloadable + # constants. + # + # @api private + module AutoloadHelper + # Registers the constants to be auto loaded. + # + # @param prefix [String] The require prefix. If the path is inside Faraday, + # then it will be prefixed with the root path of this loaded + # Faraday version. + # @param options [{ Symbol => String }] library names. + # + # @example + # + # Faraday.autoload_all 'faraday/foo', + # Bar: 'bar' + # + # # requires faraday/foo/bar to load Faraday::Bar. + # Faraday::Bar + # + # @return [void] + def autoload_all(prefix, options) + if prefix.match? %r{^faraday(/|$)}i + prefix = File.join(Faraday.root_path, prefix) + end + + options.each do |const_name, path| + autoload const_name, File.join(prefix, path) + end + end + + # Loads each autoloaded constant. If thread safety is a concern, + # wrap this in a Mutex. + # + # @return [void] + def load_autoloaded_constants + constants.each do |const| + const_get(const) if autoload?(const) + end + end + + # Filters the module's contents with those that have been already + # autoloaded. + # + # @return [Array] + def all_loaded_constants + constants + .map { |c| const_get(c) } + .select { |a| a.respond_to?(:loaded?) && a.loaded? } + end + end + + # Adapter is the base class for all Faraday adapters. + # @see lib/faraday/adapter.rb Original class location + class Adapter + extend AutoloadHelper + autoload_all 'faraday/adapter', + Typhoeus: 'typhoeus', + Test: 'test' + end + + # Request represents a single HTTP request for a Faraday adapter to make. + # @see lib/faraday/request.rb Original class location + class Request + extend AutoloadHelper + autoload_all 'faraday/request', + UrlEncoded: 'url_encoded', + Multipart: 'multipart', + Retry: 'retry', + Authorization: 'authorization', + BasicAuthentication: 'basic_authentication', + TokenAuthentication: 'token_authentication', + Instrumentation: 'instrumentation' + end + + # Response represents the returned value of a sent Faraday request. + # @see lib/faraday/response.rb Original class location + class Response + extend AutoloadHelper + autoload_all 'faraday/response', + RaiseError: 'raise_error', + Logger: 'logger' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/connection.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/connection.rb new file mode 100644 index 0000000..fdd5f82 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/connection.rb @@ -0,0 +1,642 @@ +# frozen_string_literal: true + +require 'faraday/deprecate' + +module Faraday + # Connection objects manage the default properties and the middleware + # stack for fulfilling an HTTP request. + # + # @example + # + # conn = Faraday::Connection.new 'http://sushi.com' + # + # # GET http://sushi.com/nigiri + # conn.get 'nigiri' + # # => # + # + class Connection + # A Set of allowed HTTP verbs. + METHODS = Set.new %i[get post put delete head patch options trace] + USER_AGENT = "Faraday v#{VERSION}" + + # @return [Hash] URI query unencoded key/value pairs. + attr_reader :params + + # @return [Hash] unencoded HTTP header key/value pairs. + attr_reader :headers + + # @return [String] a URI with the prefix used for all requests from this + # Connection. This includes a default host name, scheme, port, and path. + attr_reader :url_prefix + + # @return [Faraday::RackBuilder] Builder for this Connection. + attr_reader :builder + + # @return [Hash] SSL options. + attr_reader :ssl + + # @return [Object] the parallel manager for this Connection. + attr_reader :parallel_manager + + # Sets the default parallel manager for this connection. + attr_writer :default_parallel_manager + + # @return [Hash] proxy options. + attr_reader :proxy + + # Initializes a new Faraday::Connection. + # + # @param url [URI, String] URI or String base URL to use as a prefix for all + # requests (optional). + # @param options [Hash, Faraday::ConnectionOptions] + # @option options [URI, String] :url ('http:/') URI or String base URL + # @option options [Hash String>] :params URI query unencoded + # key/value pairs. + # @option options [Hash String>] :headers Hash of unencoded HTTP + # header key/value pairs. + # @option options [Hash] :request Hash of request options. + # @option options [Hash] :ssl Hash of SSL options. + # @option options [Hash, URI, String] :proxy proxy options, either as a URL + # or as a Hash + # @option options [URI, String] :proxy[:uri] + # @option options [String] :proxy[:user] + # @option options [String] :proxy[:password] + # @yield [self] after all setup has been done + def initialize(url = nil, options = nil) + options = ConnectionOptions.from(options) + + if url.is_a?(Hash) || url.is_a?(ConnectionOptions) + options = options.merge(url) + url = options.url + end + + @parallel_manager = nil + @headers = Utils::Headers.new + @params = Utils::ParamsHash.new + @options = options.request + @ssl = options.ssl + @default_parallel_manager = options.parallel_manager + @manual_proxy = nil + + @builder = options.builder || begin + # pass an empty block to Builder so it doesn't assume default middleware + options.new_builder(block_given? ? proc { |b| } : nil) + end + + self.url_prefix = url || 'http:/' + + @params.update(options.params) if options.params + @headers.update(options.headers) if options.headers + + initialize_proxy(url, options) + + yield(self) if block_given? + + @headers[:user_agent] ||= USER_AGENT + end + + def initialize_proxy(url, options) + @manual_proxy = !!options.proxy + @proxy = + if options.proxy + ProxyOptions.from(options.proxy) + else + proxy_from_env(url) + end + end + + # Sets the Hash of URI query unencoded key/value pairs. + # @param hash [Hash] + def params=(hash) + @params.replace hash + end + + # Sets the Hash of unencoded HTTP header key/value pairs. + # @param hash [Hash] + def headers=(hash) + @headers.replace hash + end + + extend Forwardable + + def_delegators :builder, :build, :use, :request, :response, :adapter, :app + + # Closes the underlying resources and/or connections. In the case of + # persistent connections, this closes all currently open connections + # but does not prevent new connections from being made. + def close + app.close + end + + # @!method get(url = nil, params = nil, headers = nil) + # Makes a GET HTTP request without a body. + # @!scope class + # + # @param url [String] The optional String base URL to use as a prefix for + # all requests. Can also be the options Hash. + # @param params [Hash] Hash of URI query unencoded key/value pairs. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # conn.get '/items', { page: 1 }, :accept => 'application/json' + # + # # ElasticSearch example sending a body with GET. + # conn.get '/twitter/tweet/_search' do |req| + # req.headers[:content_type] = 'application/json' + # req.params[:routing] = 'kimchy' + # req.body = JSON.generate(query: {...}) + # end + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + + # @!method head(url = nil, params = nil, headers = nil) + # Makes a HEAD HTTP request without a body. + # @!scope class + # + # @param url [String] The optional String base URL to use as a prefix for + # all requests. Can also be the options Hash. + # @param params [Hash] Hash of URI query unencoded key/value pairs. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # conn.head '/items/1' + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + + # @!method delete(url = nil, params = nil, headers = nil) + # Makes a DELETE HTTP request without a body. + # @!scope class + # + # @param url [String] The optional String base URL to use as a prefix for + # all requests. Can also be the options Hash. + # @param params [Hash] Hash of URI query unencoded key/value pairs. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # conn.delete '/items/1' + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + + # @!method trace(url = nil, params = nil, headers = nil) + # Makes a TRACE HTTP request without a body. + # @!scope class + # + # @param url [String] The optional String base URL to use as a prefix for + # all requests. Can also be the options Hash. + # @param params [Hash] Hash of URI query unencoded key/value pairs. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # conn.connect '/items/1' + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + + # @!visibility private + METHODS_WITH_QUERY.each do |method| + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(url = nil, params = nil, headers = nil) + run_request(:#{method}, url, nil, headers) do |request| + request.params.update(params) if params + yield request if block_given? + end + end + RUBY + end + + # @overload options() + # Returns current Connection options. + # + # @overload options(url, params = nil, headers = nil) + # Makes an OPTIONS HTTP request to the given URL. + # @param url [String] String base URL to sue as a prefix for all requests. + # @param params [Hash] Hash of URI query unencoded key/value pairs. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # conn.options '/items/1' + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + def options(*args) + return @options if args.size.zero? + + url, params, headers = *args + run_request(:options, url, nil, headers) do |request| + request.params.update(params) if params + yield request if block_given? + end + end + + # @!method post(url = nil, body = nil, headers = nil) + # Makes a POST HTTP request with a body. + # @!scope class + # + # @param url [String] The optional String base URL to use as a prefix for + # all requests. Can also be the options Hash. + # @param body [String] body for the request. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # conn.post '/items', data, content_type: 'application/json' + # + # # Simple ElasticSearch indexing sample. + # conn.post '/twitter/tweet' do |req| + # req.headers[:content_type] = 'application/json' + # req.params[:routing] = 'kimchy' + # req.body = JSON.generate(user: 'kimchy', ...) + # end + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + + # @!method put(url = nil, body = nil, headers = nil) + # Makes a PUT HTTP request with a body. + # @!scope class + # + # @param url [String] The optional String base URL to use as a prefix for + # all requests. Can also be the options Hash. + # @param body [String] body for the request. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @example + # # TODO: Make it a PUT example + # conn.post '/items', data, content_type: 'application/json' + # + # # Simple ElasticSearch indexing sample. + # conn.post '/twitter/tweet' do |req| + # req.headers[:content_type] = 'application/json' + # req.params[:routing] = 'kimchy' + # req.body = JSON.generate(user: 'kimchy', ...) + # end + # + # @yield [Faraday::Request] for further request customizations + # @return [Faraday::Response] + + # @!visibility private + METHODS_WITH_BODY.each do |method| + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(url = nil, body = nil, headers = nil, &block) + run_request(:#{method}, url, body, headers, &block) + end + RUBY + end + + # Sets up the Authorization header with these credentials, encoded + # with base64. + # + # @param login [String] The authentication login. + # @param pass [String] The authentication password. + # + # @example + # + # conn.basic_auth 'Aladdin', 'open sesame' + # conn.headers['Authorization'] + # # => "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" + # + # @return [void] + def basic_auth(login, pass) + set_authorization_header(:basic_auth, login, pass) + end + + extend Faraday::Deprecate + deprecate :basic_auth, '#request(:basic_auth, ...)', '2.0' + + # Sets up the Authorization header with the given token. + # + # @param token [String] + # @param options [Hash] extra token options. + # + # @example + # + # conn.token_auth 'abcdef', foo: 'bar' + # conn.headers['Authorization'] + # # => "Token token=\"abcdef\", + # foo=\"bar\"" + # + # @return [void] + def token_auth(token, options = nil) + set_authorization_header(:token_auth, token, options) + end + + deprecate :token_auth, + '#request(:token_auth, ...)', + '2.0', + 'See https://lostisland.github.io/faraday/middleware/authentication for more usage info.' + + # Sets up a custom Authorization header. + # + # @param type [String] authorization type + # @param token [String, Hash] token. A String value is taken literally, and + # a Hash is encoded into comma-separated key/value pairs. + # + # @example + # + # conn.authorization :Bearer, 'mF_9.B5f-4.1JqM' + # conn.headers['Authorization'] + # # => "Bearer mF_9.B5f-4.1JqM" + # + # conn.authorization :Token, token: 'abcdef', foo: 'bar' + # conn.headers['Authorization'] + # # => "Token token=\"abcdef\", + # foo=\"bar\"" + # + # @return [void] + def authorization(type, token) + set_authorization_header(:authorization, type, token) + end + + deprecate :authorization, + '#request(:authorization, ...)', + '2.0', + 'See https://lostisland.github.io/faraday/middleware/authentication for more usage info.' + + # Check if the adapter is parallel-capable. + # + # @yield if the adapter isn't parallel-capable, or if no adapter is set yet. + # + # @return [Object, nil] a parallel manager or nil if yielded + # @api private + def default_parallel_manager + @default_parallel_manager ||= begin + adapter = @builder.adapter.klass if @builder.adapter + + if support_parallel?(adapter) + adapter.setup_parallel_manager + elsif block_given? + yield + end + end + end + + # Determine if this Faraday::Connection can make parallel requests. + # + # @return [Boolean] + def in_parallel? + !!@parallel_manager + end + + # Sets up the parallel manager to make a set of requests. + # + # @param manager [Object] The parallel manager that this Connection's + # Adapter uses. + # + # @yield a block to execute multiple requests. + # @return [void] + def in_parallel(manager = nil) + @parallel_manager = manager || default_parallel_manager do + warn 'Warning: `in_parallel` called but no parallel-capable adapter ' \ + 'on Faraday stack' + warn caller[2, 10].join("\n") + nil + end + yield + @parallel_manager&.run + ensure + @parallel_manager = nil + end + + # Sets the Hash proxy options. + # + # @param new_value [Object] + def proxy=(new_value) + @manual_proxy = true + @proxy = new_value ? ProxyOptions.from(new_value) : nil + end + + def_delegators :url_prefix, :scheme, :scheme=, :host, :host=, :port, :port= + def_delegator :url_prefix, :path, :path_prefix + + # Parses the given URL with URI and stores the individual + # components in this connection. These components serve as defaults for + # requests made by this connection. + # + # @param url [String, URI] + # @param encoder [Object] + # + # @example + # + # conn = Faraday::Connection.new { ... } + # conn.url_prefix = "https://sushi.com/api" + # conn.scheme # => https + # conn.path_prefix # => "/api" + # + # conn.get("nigiri?page=2") # accesses https://sushi.com/api/nigiri + def url_prefix=(url, encoder = nil) + uri = @url_prefix = Utils.URI(url) + self.path_prefix = uri.path + + params.merge_query(uri.query, encoder) + uri.query = nil + + with_uri_credentials(uri) do |user, password| + set_basic_auth(user, password) + uri.user = uri.password = nil + end + + @proxy = proxy_from_env(url) unless @manual_proxy + end + + def set_basic_auth(user, password) + header = Faraday::Request::BasicAuthentication.header(user, password) + headers[Faraday::Request::Authorization::KEY] = header + end + + # Sets the path prefix and ensures that it always has a leading + # slash. + # + # @param value [String] + # + # @return [String] the new path prefix + def path_prefix=(value) + url_prefix.path = if value + value = "/#{value}" unless value[0, 1] == '/' + value + end + end + + # Takes a relative url for a request and combines it with the defaults + # set on the connection instance. + # + # @param url [String] + # @param extra_params [Hash] + # + # @example + # conn = Faraday::Connection.new { ... } + # conn.url_prefix = "https://sushi.com/api?token=abc" + # conn.scheme # => https + # conn.path_prefix # => "/api" + # + # conn.build_url("nigiri?page=2") + # # => https://sushi.com/api/nigiri?token=abc&page=2 + # + # conn.build_url("nigiri", page: 2) + # # => https://sushi.com/api/nigiri?token=abc&page=2 + # + def build_url(url = nil, extra_params = nil) + uri = build_exclusive_url(url) + + query_values = params.dup.merge_query(uri.query, options.params_encoder) + query_values.update(extra_params) if extra_params + uri.query = + if query_values.empty? + nil + else + query_values.to_query(options.params_encoder) + end + + uri + end + + # Builds and runs the Faraday::Request. + # + # @param method [Symbol] HTTP method. + # @param url [String, URI] String or URI to access. + # @param body [Object] The request body that will eventually be converted to + # a string. + # @param headers [Hash] unencoded HTTP header key/value pairs. + # + # @return [Faraday::Response] + def run_request(method, url, body, headers) + unless METHODS.include?(method) + raise ArgumentError, "unknown http method: #{method}" + end + + request = build_request(method) do |req| + req.options.proxy = proxy_for_request(url) + req.url(url) if url + req.headers.update(headers) if headers + req.body = body if body + yield(req) if block_given? + end + + builder.build_response(self, request) + end + + # Creates and configures the request object. + # + # @param method [Symbol] + # + # @yield [Faraday::Request] if block given + # @return [Faraday::Request] + def build_request(method) + Request.create(method) do |req| + req.params = params.dup + req.headers = headers.dup + req.options = options.dup + yield(req) if block_given? + end + end + + # Build an absolute URL based on url_prefix. + # + # @param url [String, URI] + # @param params [Faraday::Utils::ParamsHash] A Faraday::Utils::ParamsHash to + # replace the query values + # of the resulting url (default: nil). + # + # @return [URI] + def build_exclusive_url(url = nil, params = nil, params_encoder = nil) + url = nil if url.respond_to?(:empty?) && url.empty? + base = url_prefix.dup + if url && base.path && base.path !~ %r{/$} + base.path = "#{base.path}/" # ensure trailing slash + end + url = url && URI.parse(url.to_s).opaque ? url.to_s.gsub(':', '%3A') : url + uri = url ? base + url : base + if params + uri.query = params.to_query(params_encoder || options.params_encoder) + end + # rubocop:disable Style/SafeNavigation + uri.query = nil if uri.query && uri.query.empty? + # rubocop:enable Style/SafeNavigation + uri + end + + # Creates a duplicate of this Faraday::Connection. + # + # @api private + # + # @return [Faraday::Connection] + def dup + self.class.new(build_exclusive_url, + headers: headers.dup, + params: params.dup, + builder: builder.dup, + ssl: ssl.dup, + request: options.dup) + end + + # Yields username and password extracted from a URI if they both exist. + # + # @param uri [URI] + # @yield [username, password] any username and password + # @yieldparam username [String] any username from URI + # @yieldparam password [String] any password from URI + # @return [void] + # @api private + def with_uri_credentials(uri) + return unless uri.user && uri.password + + yield(Utils.unescape(uri.user), Utils.unescape(uri.password)) + end + + def set_authorization_header(header_type, *args) + header = Faraday::Request + .lookup_middleware(header_type) + .header(*args) + + headers[Faraday::Request::Authorization::KEY] = header + end + + def proxy_from_env(url) + return if Faraday.ignore_env_proxy + + uri = nil + if URI.parse('').respond_to?(:find_proxy) + case url + when String + uri = Utils.URI(url) + uri = if uri.host.nil? + find_default_proxy + else + URI.parse("#{uri.scheme}://#{uri.host}").find_proxy + end + when URI + uri = url.find_proxy + when nil + uri = find_default_proxy + end + else + warn 'no_proxy is unsupported' if ENV['no_proxy'] || ENV['NO_PROXY'] + uri = find_default_proxy + end + ProxyOptions.from(uri) if uri + end + + def find_default_proxy + uri = ENV['http_proxy'] + return unless uri && !uri.empty? + + uri = "http://#{uri}" unless uri.match?(/^http/i) + uri + end + + def proxy_for_request(url) + return proxy if @manual_proxy + + if url && Utils.URI(url).absolute? + proxy_from_env(url) + else + proxy + end + end + + def support_parallel?(adapter) + adapter&.respond_to?(:supports_parallel?) && adapter&.supports_parallel? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/dependency_loader.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/dependency_loader.rb new file mode 100644 index 0000000..3f1708f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/dependency_loader.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require 'ruby2_keywords' + +module Faraday + # DependencyLoader helps Faraday adapters and middleware load dependencies. + module DependencyLoader + attr_reader :load_error + + # Executes a block which should try to require and reference dependent + # libraries + def dependency(lib = nil) + lib ? require(lib) : yield + rescue LoadError, NameError => e + self.load_error = e + end + + ruby2_keywords def new(*) + unless loaded? + raise "missing dependency for #{self}: #{load_error.message}" + end + + super + end + + def loaded? + load_error.nil? + end + + def inherited(subclass) + super + subclass.send(:load_error=, load_error) + end + + private + + attr_writer :load_error + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/deprecate.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/deprecate.rb new file mode 100644 index 0000000..66005d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/deprecate.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +module Faraday + # @param new_klass [Class] new Klass to use + # + # @return [Class] A modified version of new_klass that warns on + # usage about deprecation. + # @see Faraday::Deprecate + module DeprecatedClass + def self.proxy_class(origclass, ver = '1.0') + proxy = Class.new(origclass) do + const_set('ORIG_CLASS', origclass) + + class << self + extend Faraday::Deprecate + + def ===(other) + (superclass == const_get('ORIG_CLASS') && other.is_a?(superclass)) || super + end + end + end + proxy.singleton_class.send(:deprecate, :new, "#{origclass}.new", ver) + proxy.singleton_class.send(:deprecate, :inherited, origclass.name, ver) + proxy + end + end + + # Deprecation using semver instead of date, based on Gem::Deprecate + # Provides a single method +deprecate+ to be used to declare when + # something is going away. + # + # class Legacy + # def self.klass_method + # # ... + # end + # + # def instance_method + # # ... + # end + # + # extend Faraday::Deprecate + # deprecate :instance_method, "X.z", '1.0' + # + # class << self + # extend Faraday::Deprecate + # deprecate :klass_method, :none, '1.0' + # end + # end + module Deprecate + def self.skip # :nodoc: + @skip ||= begin + case ENV['FARADAY_DEPRECATE'].to_s.downcase + when '1', 'warn' then :warn + else :skip + end + end + @skip == :skip + end + + def self.skip=(value) # :nodoc: + @skip = value ? :skip : :warn + end + + # Temporarily turn off warnings. Intended for tests only. + def skip_during + original = Faraday::Deprecate.skip + Faraday::Deprecate.skip = true + yield + ensure + Faraday::Deprecate.skip = original + end + + # Simple deprecation method that deprecates +name+ by wrapping it up + # in a dummy method. It warns on each call to the dummy method + # telling the user of +repl+ (unless +repl+ is :none) and the + # semver that it is planned to go away. + # @param name [Symbol] the method symbol to deprecate + # @param repl [#to_s, :none] the replacement to use, when `:none` it will + # alert the user that no replacement is present. + # @param ver [String] the semver the method will be removed. + def deprecate(name, repl, ver, custom_message = nil) + class_eval do + gem_ver = Gem::Version.new(ver) + old = "_deprecated_#{name}" + alias_method old, name + define_method name do |*args, &block| + mod = is_a? Module + target = mod ? "#{self}." : "#{self.class}#" + target_message = if name == :inherited + "Inheriting #{self}" + else + "#{target}#{name}" + end + + msg = [ + "NOTE: #{target_message} is deprecated", + repl == :none ? ' with no replacement' : "; use #{repl} instead. ", + "It will be removed in or after version #{gem_ver} ", + custom_message, + "\n#{target}#{name} called from #{Gem.location_of_caller.join(':')}" + ] + warn "#{msg.join}." unless Faraday::Deprecate.skip + send old, *args, &block + end + end + end + + module_function :deprecate, :skip_during + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/encoders/flat_params_encoder.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/encoders/flat_params_encoder.rb new file mode 100644 index 0000000..bc10c8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/encoders/flat_params_encoder.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +module Faraday + # FlatParamsEncoder manages URI params as a flat hash. Any Array values repeat + # the parameter multiple times. + module FlatParamsEncoder + class << self + extend Forwardable + def_delegators :'Faraday::Utils', :escape, :unescape + end + + # Encode converts the given param into a URI querystring. Keys and values + # will converted to strings and appropriately escaped for the URI. + # + # @param params [Hash] query arguments to convert. + # + # @example + # + # encode({a: %w[one two three], b: true, c: "C"}) + # # => 'a=one&a=two&a=three&b=true&c=C' + # + # @return [String] the URI querystring (without the leading '?') + def self.encode(params) + return nil if params.nil? + + unless params.is_a?(Array) + unless params.respond_to?(:to_hash) + raise TypeError, + "Can't convert #{params.class} into Hash." + end + params = params.to_hash + params = params.map do |key, value| + key = key.to_s if key.is_a?(Symbol) + [key, value] + end + + # Only to be used for non-Array inputs. Arrays should preserve order. + params.sort! if @sort_params + end + + # The params have form [['key1', 'value1'], ['key2', 'value2']]. + buffer = +'' + params.each do |key, value| + encoded_key = escape(key) + if value.nil? + buffer << "#{encoded_key}&" + elsif value.is_a?(Array) + if value.empty? + buffer << "#{encoded_key}=&" + else + value.each do |sub_value| + encoded_value = escape(sub_value) + buffer << "#{encoded_key}=#{encoded_value}&" + end + end + else + encoded_value = escape(value) + buffer << "#{encoded_key}=#{encoded_value}&" + end + end + buffer.chop + end + + # Decode converts the given URI querystring into a hash. + # + # @param query [String] query arguments to parse. + # + # @example + # + # decode('a=one&a=two&a=three&b=true&c=C') + # # => {"a"=>["one", "two", "three"], "b"=>"true", "c"=>"C"} + # + # @return [Hash] parsed keys and value strings from the querystring. + def self.decode(query) + return nil if query.nil? + + empty_accumulator = {} + + split_query = (query.split('&').map do |pair| + pair.split('=', 2) if pair && !pair.empty? + end).compact + split_query.each_with_object(empty_accumulator.dup) do |pair, accu| + pair[0] = unescape(pair[0]) + pair[1] = true if pair[1].nil? + if pair[1].respond_to?(:to_str) + pair[1] = unescape(pair[1].to_str.tr('+', ' ')) + end + if accu[pair[0]].is_a?(Array) + accu[pair[0]] << pair[1] + elsif accu[pair[0]] + accu[pair[0]] = [accu[pair[0]], pair[1]] + else + accu[pair[0]] = pair[1] + end + end + end + + class << self + attr_accessor :sort_params + end + + # Useful default for OAuth and caching. + @sort_params = true + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/encoders/nested_params_encoder.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/encoders/nested_params_encoder.rb new file mode 100644 index 0000000..705cd3e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/encoders/nested_params_encoder.rb @@ -0,0 +1,176 @@ +# frozen_string_literal: true + +module Faraday + # Sub-module for encoding parameters into query-string. + module EncodeMethods + # @param params [nil, Array, #to_hash] parameters to be encoded + # + # @return [String] the encoded params + # + # @raise [TypeError] if params can not be converted to a Hash + def encode(params) + return nil if params.nil? + + unless params.is_a?(Array) + unless params.respond_to?(:to_hash) + raise TypeError, "Can't convert #{params.class} into Hash." + end + + params = params.to_hash + params = params.map do |key, value| + key = key.to_s if key.is_a?(Symbol) + [key, value] + end + + # Only to be used for non-Array inputs. Arrays should preserve order. + params.sort! if @sort_params + end + + # The params have form [['key1', 'value1'], ['key2', 'value2']]. + buffer = +'' + params.each do |parent, value| + encoded_parent = escape(parent) + buffer << "#{encode_pair(encoded_parent, value)}&" + end + buffer.chop + end + + protected + + def encode_pair(parent, value) + if value.is_a?(Hash) + encode_hash(parent, value) + elsif value.is_a?(Array) + encode_array(parent, value) + elsif value.nil? + parent + else + encoded_value = escape(value) + "#{parent}=#{encoded_value}" + end + end + + def encode_hash(parent, value) + value = value.map { |key, val| [escape(key), val] }.sort + + buffer = +'' + value.each do |key, val| + new_parent = "#{parent}%5B#{key}%5D" + buffer << "#{encode_pair(new_parent, val)}&" + end + buffer.chop + end + + def encode_array(parent, value) + new_parent = "#{parent}%5B%5D" + return new_parent if value.empty? + + buffer = +'' + value.each { |val| buffer << "#{encode_pair(new_parent, val)}&" } + buffer.chop + end + end + + # Sub-module for decoding query-string into parameters. + module DecodeMethods + # @param query [nil, String] + # + # @return [Array] the decoded params + # + # @raise [TypeError] if the nesting is incorrect + def decode(query) + return nil if query.nil? + + params = {} + query.split('&').each do |pair| + next if pair.empty? + + key, value = pair.split('=', 2) + key = unescape(key) + value = unescape(value.tr('+', ' ')) if value + decode_pair(key, value, params) + end + + dehash(params, 0) + end + + protected + + SUBKEYS_REGEX = /[^\[\]]+(?:\]?\[\])?/.freeze + + def decode_pair(key, value, context) + subkeys = key.scan(SUBKEYS_REGEX) + subkeys.each_with_index do |subkey, i| + is_array = subkey =~ /[\[\]]+\Z/ + subkey = $` if is_array + last_subkey = i == subkeys.length - 1 + + context = prepare_context(context, subkey, is_array, last_subkey) + add_to_context(is_array, context, value, subkey) if last_subkey + end + end + + def prepare_context(context, subkey, is_array, last_subkey) + if !last_subkey || is_array + context = new_context(subkey, is_array, context) + end + if context.is_a?(Array) && !is_array + context = match_context(context, subkey) + end + context + end + + def new_context(subkey, is_array, context) + value_type = is_array ? Array : Hash + if context[subkey] && !context[subkey].is_a?(value_type) + raise TypeError, "expected #{value_type.name} " \ + "(got #{context[subkey].class.name}) for param `#{subkey}'" + end + + context[subkey] ||= value_type.new + end + + def match_context(context, subkey) + context << {} if !context.last.is_a?(Hash) || context.last.key?(subkey) + context.last + end + + def add_to_context(is_array, context, value, subkey) + is_array ? context << value : context[subkey] = value + end + + # Internal: convert a nested hash with purely numeric keys into an array. + # FIXME: this is not compatible with Rack::Utils.parse_nested_query + # @!visibility private + def dehash(hash, depth) + hash.each do |key, value| + hash[key] = dehash(value, depth + 1) if value.is_a?(Hash) + end + + if depth.positive? && !hash.empty? && hash.keys.all? { |k| k =~ /^\d+$/ } + hash.sort.map(&:last) + else + hash + end + end + end + + # This is the default encoder for Faraday requests. + # Using this encoder, parameters will be encoded respecting their structure, + # so you can send objects such as Arrays or Hashes as parameters + # for your requests. + module NestedParamsEncoder + class << self + attr_accessor :sort_params + + extend Forwardable + def_delegators :'Faraday::Utils', :escape, :unescape + end + + # Useful default for OAuth and caching. + @sort_params = true + + extend EncodeMethods + extend DecodeMethods + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/error.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/error.rb new file mode 100644 index 0000000..93c446d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/error.rb @@ -0,0 +1,146 @@ +# frozen_string_literal: true + +# Faraday namespace. +module Faraday + # Faraday error base class. + class Error < StandardError + attr_reader :response, :wrapped_exception + + def initialize(exc, response = nil) + @wrapped_exception = nil unless defined?(@wrapped_exception) + @response = nil unless defined?(@response) + super(exc_msg_and_response!(exc, response)) + end + + def backtrace + if @wrapped_exception + @wrapped_exception.backtrace + else + super + end + end + + def inspect + inner = +'' + inner << " wrapped=#{@wrapped_exception.inspect}" if @wrapped_exception + inner << " response=#{@response.inspect}" if @response + inner << " #{super}" if inner.empty? + %(#<#{self.class}#{inner}>) + end + + def response_status + @response[:status] if @response + end + + def response_headers + @response[:headers] if @response + end + + def response_body + @response[:body] if @response + end + + protected + + # Pulls out potential parent exception and response hash, storing them in + # instance variables. + # exc - Either an Exception, a string message, or a response hash. + # response - Hash + # :status - Optional integer HTTP response status + # :headers - String key/value hash of HTTP response header + # values. + # :body - Optional string HTTP response body. + # :request - Hash + # :method - Symbol with the request HTTP method. + # :url_path - String with the url path requested. + # :params - String key/value hash of query params + # present in the request. + # :headers - String key/value hash of HTTP request + # header values. + # :body - String HTTP request body. + # + # If a subclass has to call this, then it should pass a string message + # to `super`. See NilStatusError. + def exc_msg_and_response!(exc, response = nil) + if @response.nil? && @wrapped_exception.nil? + @wrapped_exception, msg, @response = exc_msg_and_response(exc, response) + return msg + end + + exc.to_s + end + + # Pulls out potential parent exception and response hash. + def exc_msg_and_response(exc, response = nil) + return [exc, exc.message, response] if exc.respond_to?(:backtrace) + + return [nil, "the server responded with status #{exc[:status]}", exc] \ + if exc.respond_to?(:each_key) + + [nil, exc.to_s, response] + end + end + + # Faraday client error class. Represents 4xx status responses. + class ClientError < Error + end + + # Raised by Faraday::Response::RaiseError in case of a 400 response. + class BadRequestError < ClientError + end + + # Raised by Faraday::Response::RaiseError in case of a 401 response. + class UnauthorizedError < ClientError + end + + # Raised by Faraday::Response::RaiseError in case of a 403 response. + class ForbiddenError < ClientError + end + + # Raised by Faraday::Response::RaiseError in case of a 404 response. + class ResourceNotFound < ClientError + end + + # Raised by Faraday::Response::RaiseError in case of a 407 response. + class ProxyAuthError < ClientError + end + + # Raised by Faraday::Response::RaiseError in case of a 409 response. + class ConflictError < ClientError + end + + # Raised by Faraday::Response::RaiseError in case of a 422 response. + class UnprocessableEntityError < ClientError + end + + # Faraday server error class. Represents 5xx status responses. + class ServerError < Error + end + + # A unified client error for timeouts. + class TimeoutError < ServerError + def initialize(exc = 'timeout', response = nil) + super(exc, response) + end + end + + # Raised by Faraday::Response::RaiseError in case of a nil status in response. + class NilStatusError < ServerError + def initialize(exc, response = nil) + exc_msg_and_response!(exc, response) + super('http status could not be derived from the server response') + end + end + + # A unified error for failed connections. + class ConnectionFailed < Error + end + + # A unified client error for SSL errors. + class SSLError < Error + end + + # Raised by FaradayMiddleware::ResponseMiddleware + class ParsingError < Error + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/logging/formatter.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/logging/formatter.rb new file mode 100644 index 0000000..ba3e497 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/logging/formatter.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +require 'pp' +module Faraday + module Logging + # Serves as an integration point to customize logging + class Formatter + extend Forwardable + + DEFAULT_OPTIONS = { headers: true, bodies: false, + log_level: :info }.freeze + + def initialize(logger:, options:) + @logger = logger + @filter = [] + @options = DEFAULT_OPTIONS.merge(options) + end + + def_delegators :@logger, :debug, :info, :warn, :error, :fatal + + def request(env) + request_log = proc do + "#{env.method.upcase} #{apply_filters(env.url.to_s)}" + end + public_send(log_level, 'request', &request_log) + + log_headers('request', env.request_headers) if log_headers?(:request) + log_body('request', env[:body]) if env[:body] && log_body?(:request) + end + + def response(env) + status = proc { "Status #{env.status}" } + public_send(log_level, 'response', &status) + + log_headers('response', env.response_headers) if log_headers?(:response) + log_body('response', env[:body]) if env[:body] && log_body?(:response) + end + + def filter(filter_word, filter_replacement) + @filter.push([filter_word, filter_replacement]) + end + + private + + def dump_headers(headers) + headers.map { |k, v| "#{k}: #{v.inspect}" }.join("\n") + end + + def dump_body(body) + if body.respond_to?(:to_str) + body.to_str + else + pretty_inspect(body) + end + end + + def pretty_inspect(body) + body.pretty_inspect + end + + def log_headers?(type) + case @options[:headers] + when Hash + @options[:headers][type] + else + @options[:headers] + end + end + + def log_body?(type) + case @options[:bodies] + when Hash + @options[:bodies][type] + else + @options[:bodies] + end + end + + def apply_filters(output) + @filter.each do |pattern, replacement| + output = output.to_s.gsub(pattern, replacement) + end + output + end + + def log_level + unless %i[debug info warn error fatal].include?(@options[:log_level]) + return :info + end + + @options[:log_level] + end + + def log_headers(type, headers) + headers_log = proc { apply_filters(dump_headers(headers)) } + public_send(log_level, type, &headers_log) + end + + def log_body(type, body) + body_log = proc { apply_filters(dump_body(body)) } + public_send(log_level, type, &body_log) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/methods.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/methods.rb new file mode 100644 index 0000000..53e3903 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/methods.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +module Faraday + METHODS_WITH_QUERY = %w[get head delete trace].freeze + METHODS_WITH_BODY = %w[post put patch].freeze +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/middleware.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/middleware.rb new file mode 100644 index 0000000..fcf9f4f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/middleware.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module Faraday + # Middleware is the basic base class of any Faraday middleware. + class Middleware + extend MiddlewareRegistry + extend DependencyLoader + + attr_reader :app, :options + + def initialize(app = nil, options = {}) + @app = app + @options = options + end + + def call(env) + on_request(env) if respond_to?(:on_request) + app.call(env).on_complete do |environment| + on_complete(environment) if respond_to?(:on_complete) + end + end + + def close + if app.respond_to?(:close) + app.close + else + warn "#{app} does not implement \#close!" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/middleware_registry.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/middleware_registry.rb new file mode 100644 index 0000000..021038f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/middleware_registry.rb @@ -0,0 +1,129 @@ +# frozen_string_literal: true + +require 'monitor' + +module Faraday + # Adds the ability for other modules to register and lookup + # middleware classes. + module MiddlewareRegistry + # Register middleware class(es) on the current module. + # + # @param autoload_path [String] Middleware autoload path + # @param mapping [Hash{ + # Symbol => Module, + # Symbol => Array, + # }] Middleware mapping from a lookup symbol to a reference to the + # middleware. + # Classes can be expressed as: + # - a fully qualified constant + # - a Symbol + # - a Proc that will be lazily called to return the former + # - an array is given, its first element is the constant or symbol, + # and its second is a file to `require`. + # @return [void] + # + # @example Lookup by a constant + # + # module Faraday + # class Whatever + # # Middleware looked up by :foo returns Faraday::Whatever::Foo. + # register_middleware foo: Foo + # end + # end + # + # @example Lookup by a symbol + # + # module Faraday + # class Whatever + # # Middleware looked up by :bar returns + # # Faraday::Whatever.const_get(:Bar) + # register_middleware bar: :Bar + # end + # end + # + # @example Lookup by a symbol and string in an array + # + # module Faraday + # class Whatever + # # Middleware looked up by :baz requires 'baz' and returns + # # Faraday::Whatever.const_get(:Baz) + # register_middleware baz: [:Baz, 'baz'] + # end + # end + # + def register_middleware(autoload_path = nil, mapping = nil) + if mapping.nil? + mapping = autoload_path + autoload_path = nil + end + middleware_mutex do + @middleware_autoload_path = autoload_path if autoload_path + (@registered_middleware ||= {}).update(mapping) + end + end + + # Unregister a previously registered middleware class. + # + # @param key [Symbol] key for the registered middleware. + def unregister_middleware(key) + @registered_middleware.delete(key) + end + + # Lookup middleware class with a registered Symbol shortcut. + # + # @param key [Symbol] key for the registered middleware. + # @return [Class] a middleware Class. + # @raise [Faraday::Error] if given key is not registered + # + # @example + # + # module Faraday + # class Whatever + # register_middleware foo: Foo + # end + # end + # + # Faraday::Whatever.lookup_middleware(:foo) + # # => Faraday::Whatever::Foo + # + def lookup_middleware(key) + load_middleware(key) || + raise(Faraday::Error, "#{key.inspect} is not registered on #{self}") + end + + def middleware_mutex(&block) + @middleware_mutex ||= Monitor.new + @middleware_mutex.synchronize(&block) + end + + def fetch_middleware(key) + defined?(@registered_middleware) && @registered_middleware[key] + end + + def load_middleware(key) + value = fetch_middleware(key) + case value + when Module + value + when Symbol, String + middleware_mutex do + @registered_middleware[key] = const_get(value) + end + when Proc + middleware_mutex do + @registered_middleware[key] = value.call + end + when Array + middleware_mutex do + const, path = value + if (root = @middleware_autoload_path) + path = "#{root}/#{path}" + end + require(path) + @registered_middleware[key] = const + end + load_middleware(key) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options.rb new file mode 100644 index 0000000..ee198d8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options.rb @@ -0,0 +1,218 @@ +# frozen_string_literal: true + +module Faraday + # Subclasses Struct with some special helpers for converting from a Hash to + # a Struct. + class Options < Struct + # Public + def self.from(value) + value ? new.update(value) : new + end + + # Public + def each + return to_enum(:each) unless block_given? + + members.each do |key| + yield(key.to_sym, send(key)) + end + end + + # Public + def update(obj) + obj.each do |key, value| + sub_options = self.class.options_for(key) + if sub_options + new_value = sub_options.from(value) if value + elsif value.is_a?(Hash) + new_value = value.dup + else + new_value = value + end + + send("#{key}=", new_value) unless new_value.nil? + end + self + end + + # Public + def delete(key) + value = send(key) + send("#{key}=", nil) + value + end + + # Public + def clear + members.each { |member| delete(member) } + end + + # Public + def merge!(other) + other.each do |key, other_value| + self_value = send(key) + sub_options = self.class.options_for(key) + new_value = if self_value && sub_options && other_value + self_value.merge(other_value) + else + other_value + end + send("#{key}=", new_value) unless new_value.nil? + end + self + end + + # Public + def merge(other) + dup.merge!(other) + end + + # Public + def deep_dup + self.class.from(self) + end + + # Public + def fetch(key, *args) + unless symbolized_key_set.include?(key.to_sym) + key_setter = "#{key}=" + if !args.empty? + send(key_setter, args.first) + elsif block_given? + send(key_setter, yield(key)) + else + raise self.class.fetch_error_class, "key not found: #{key.inspect}" + end + end + send(key) + end + + # Public + def values_at(*keys) + keys.map { |key| send(key) } + end + + # Public + def keys + members.reject { |member| send(member).nil? } + end + + # Public + def empty? + keys.empty? + end + + # Public + def each_key(&block) + return to_enum(:each_key) unless block_given? + + keys.each(&block) + end + + # Public + def key?(key) + keys.include?(key) + end + + alias has_key? key? + + # Public + def each_value(&block) + return to_enum(:each_value) unless block_given? + + values.each(&block) + end + + # Public + def value?(value) + values.include?(value) + end + + alias has_value? value? + + # Public + def to_hash + hash = {} + members.each do |key| + value = send(key) + hash[key.to_sym] = value unless value.nil? + end + hash + end + + # Internal + def inspect + values = [] + members.each do |member| + value = send(member) + values << "#{member}=#{value.inspect}" if value + end + values = values.empty? ? '(empty)' : values.join(', ') + + %(#<#{self.class} #{values}>) + end + + # Internal + def self.options(mapping) + attribute_options.update(mapping) + end + + # Internal + def self.options_for(key) + attribute_options[key] + end + + # Internal + def self.attribute_options + @attribute_options ||= {} + end + + def self.memoized(key, &block) + unless block_given? + raise ArgumentError, '#memoized must be called with a block' + end + + memoized_attributes[key.to_sym] = block + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{key}() self[:#{key}]; end + RUBY + end + + def self.memoized_attributes + @memoized_attributes ||= {} + end + + def [](key) + key = key.to_sym + if (method = self.class.memoized_attributes[key]) + super(key) || (self[key] = instance_eval(&method)) + else + super + end + end + + def symbolized_key_set + @symbolized_key_set ||= Set.new(keys.map(&:to_sym)) + end + + def self.inherited(subclass) + super + subclass.attribute_options.update(attribute_options) + subclass.memoized_attributes.update(memoized_attributes) + end + + def self.fetch_error_class + @fetch_error_class ||= if Object.const_defined?(:KeyError) + ::KeyError + else + ::IndexError + end + end + end +end + +require 'faraday/options/request_options' +require 'faraday/options/ssl_options' +require 'faraday/options/proxy_options' +require 'faraday/options/connection_options' +require 'faraday/options/env' diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/connection_options.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/connection_options.rb new file mode 100644 index 0000000..5a72940 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/connection_options.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module Faraday + # ConnectionOptions contains the configurable properties for a Faraday + # connection object. + class ConnectionOptions < Options.new(:request, :proxy, :ssl, :builder, :url, + :parallel_manager, :params, :headers, + :builder_class) + + options request: RequestOptions, ssl: SSLOptions + + memoized(:request) { self.class.options_for(:request).new } + + memoized(:ssl) { self.class.options_for(:ssl).new } + + memoized(:builder_class) { RackBuilder } + + def new_builder(block) + builder_class.new(&block) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/env.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/env.rb new file mode 100644 index 0000000..d7dac71 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/env.rb @@ -0,0 +1,181 @@ +# frozen_string_literal: true + +module Faraday + # @!attribute method + # @return [Symbol] HTTP method (`:get`, `:post`) + # + # @!attribute body + # @return [String] The request body that will eventually be converted to a + # string. + # + # @!attribute url + # @return [URI] URI instance for the current request. + # + # @!attribute request + # @return [Hash] options for configuring the request. + # Options for configuring the request. + # + # - `:timeout` open/read timeout Integer in seconds + # - `:open_timeout` - read timeout Integer in seconds + # - `:on_data` - Proc for streaming + # - `:proxy` - Hash of proxy options + # - `:uri` - Proxy Server URI + # - `:user` - Proxy server username + # - `:password` - Proxy server password + # + # @!attribute request_headers + # @return [Hash] HTTP Headers to be sent to the server. + # + # @!attribute ssl + # @return [Hash] options for configuring SSL requests + # + # @!attribute parallel_manager + # @return [Object] sent if the connection is in parallel mode + # + # @!attribute params + # @return [Hash] + # + # @!attribute response + # @return [Response] + # + # @!attribute response_headers + # @return [Hash] HTTP headers from the server + # + # @!attribute status + # @return [Integer] HTTP response status code + # + # @!attribute reason_phrase + # @return [String] + class Env < Options.new(:method, :request_body, :url, :request, + :request_headers, :ssl, :parallel_manager, :params, + :response, :response_headers, :status, + :reason_phrase, :response_body) + + # rubocop:disable Naming/ConstantName + ContentLength = 'Content-Length' + StatusesWithoutBody = Set.new [204, 304] + SuccessfulStatuses = (200..299).freeze + # rubocop:enable Naming/ConstantName + + # A Set of HTTP verbs that typically send a body. If no body is set for + # these requests, the Content-Length header is set to 0. + MethodsWithBodies = Set.new(Faraday::METHODS_WITH_BODY.map(&:to_sym)) + + options request: RequestOptions, + request_headers: Utils::Headers, response_headers: Utils::Headers + + extend Forwardable + + def_delegators :request, :params_encoder + + # Build a new Env from given value. Respects and updates `custom_members`. + # + # @param value [Object] a value fitting Option.from(v). + # @return [Env] from given value + def self.from(value) + env = super(value) + if value.respond_to?(:custom_members) + env.custom_members.update(value.custom_members) + end + env + end + + # @param key [Object] + def [](key) + return self[current_body] if key == :body + + if in_member_set?(key) + super(key) + else + custom_members[key] + end + end + + # @param key [Object] + # @param value [Object] + def []=(key, value) + if key == :body + super(current_body, value) + return + end + + if in_member_set?(key) + super(key, value) + else + custom_members[key] = value + end + end + + def current_body + !!status ? :response_body : :request_body + end + + def body + self[:body] + end + + def body=(value) + self[:body] = value + end + + # @return [Boolean] true if status is in the set of {SuccessfulStatuses}. + def success? + SuccessfulStatuses.include?(status) + end + + # @return [Boolean] true if there's no body yet, and the method is in the + # set of {MethodsWithBodies}. + def needs_body? + !body && MethodsWithBodies.include?(method) + end + + # Sets content length to zero and the body to the empty string. + def clear_body + request_headers[ContentLength] = '0' + self.body = +'' + end + + # @return [Boolean] true if the status isn't in the set of + # {StatusesWithoutBody}. + def parse_body? + !StatusesWithoutBody.include?(status) + end + + # @return [Boolean] true if there is a parallel_manager + def parallel? + !!parallel_manager + end + + def inspect + attrs = [nil] + members.each do |mem| + if (value = send(mem)) + attrs << "@#{mem}=#{value.inspect}" + end + end + attrs << "@custom=#{custom_members.inspect}" unless custom_members.empty? + %(#<#{self.class}#{attrs.join(' ')}>) + end + + # @private + def custom_members + @custom_members ||= {} + end + + # @private + if members.first.is_a?(Symbol) + def in_member_set?(key) + self.class.member_set.include?(key.to_sym) + end + else + def in_member_set?(key) + self.class.member_set.include?(key.to_s) + end + end + + # @private + def self.member_set + @member_set ||= Set.new(members) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/proxy_options.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/proxy_options.rb new file mode 100644 index 0000000..f94fe57 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/proxy_options.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +module Faraday + # ProxyOptions contains the configurable properties for the proxy + # configuration used when making an HTTP request. + class ProxyOptions < Options.new(:uri, :user, :password) + extend Forwardable + def_delegators :uri, :scheme, :scheme=, :host, :host=, :port, :port=, + :path, :path= + + def self.from(value) + case value + when String + # URIs without a scheme should default to http (like 'example:123'). + # This fixes #1282 and prevents a silent failure in some adapters. + value = "http://#{value}" unless value.include?('://') + value = { uri: Utils.URI(value) } + when URI + value = { uri: value } + when Hash, Options + if (uri = value.delete(:uri)) + value[:uri] = Utils.URI(uri) + end + end + + super(value) + end + + memoized(:user) { uri&.user && Utils.unescape(uri.user) } + memoized(:password) { uri&.password && Utils.unescape(uri.password) } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/request_options.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/request_options.rb new file mode 100644 index 0000000..1a96fb8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/request_options.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module Faraday + # RequestOptions contains the configurable properties for a Faraday request. + class RequestOptions < Options.new(:params_encoder, :proxy, :bind, + :timeout, :open_timeout, :read_timeout, + :write_timeout, :boundary, :oauth, + :context, :on_data) + + def []=(key, value) + if key && key.to_sym == :proxy + super(key, value ? ProxyOptions.from(value) : nil) + else + super(key, value) + end + end + + def stream_response? + on_data.is_a?(Proc) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/ssl_options.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/ssl_options.rb new file mode 100644 index 0000000..1fa5811 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/options/ssl_options.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +module Faraday + # SSL-related options. + # + # @!attribute verify + # @return [Boolean] whether to verify SSL certificates or not + # + # @!attribute ca_file + # @return [String] CA file + # + # @!attribute ca_path + # @return [String] CA path + # + # @!attribute verify_mode + # @return [Integer] Any `OpenSSL::SSL::` constant (see https://ruby-doc.org/stdlib-2.5.1/libdoc/openssl/rdoc/OpenSSL/SSL.html) + # + # @!attribute cert_store + # @return [OpenSSL::X509::Store] certificate store + # + # @!attribute client_cert + # @return [String, OpenSSL::X509::Certificate] client certificate + # + # @!attribute client_key + # @return [String, OpenSSL::PKey::RSA, OpenSSL::PKey::DSA] client key + # + # @!attribute certificate + # @return [OpenSSL::X509::Certificate] certificate (Excon only) + # + # @!attribute private_key + # @return [OpenSSL::PKey::RSA, OpenSSL::PKey::DSA] private key (Excon only) + # + # @!attribute verify_depth + # @return [Integer] maximum depth for the certificate chain verification + # + # @!attribute version + # @return [String, Symbol] SSL version (see https://ruby-doc.org/stdlib-2.5.1/libdoc/openssl/rdoc/OpenSSL/SSL/SSLContext.html#method-i-ssl_version-3D) + # + # @!attribute min_version + # @return [String, Symbol] minimum SSL version (see https://ruby-doc.org/stdlib-2.5.1/libdoc/openssl/rdoc/OpenSSL/SSL/SSLContext.html#method-i-min_version-3D) + # + # @!attribute max_version + # @return [String, Symbol] maximum SSL version (see https://ruby-doc.org/stdlib-2.5.1/libdoc/openssl/rdoc/OpenSSL/SSL/SSLContext.html#method-i-max_version-3D) + class SSLOptions < Options.new(:verify, :ca_file, :ca_path, :verify_mode, + :cert_store, :client_cert, :client_key, + :certificate, :private_key, :verify_depth, + :version, :min_version, :max_version) + + # @return [Boolean] true if should verify + def verify? + verify != false + end + + # @return [Boolean] true if should not verify + def disable? + !verify? + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/parameters.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/parameters.rb new file mode 100644 index 0000000..cfb35d0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/parameters.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require 'forwardable' +require 'faraday/encoders/nested_params_encoder' +require 'faraday/encoders/flat_params_encoder' diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/rack_builder.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/rack_builder.rb new file mode 100644 index 0000000..42da634 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/rack_builder.rb @@ -0,0 +1,249 @@ +# frozen_string_literal: true + +require 'ruby2_keywords' +require 'faraday/adapter_registry' + +module Faraday + # A Builder that processes requests into responses by passing through an inner + # middleware stack (heavily inspired by Rack). + # + # @example + # Faraday::Connection.new(url: 'http://sushi.com') do |builder| + # builder.request :url_encoded # Faraday::Request::UrlEncoded + # builder.adapter :net_http # Faraday::Adapter::NetHttp + # end + class RackBuilder + # Used to detect missing arguments + NO_ARGUMENT = Object.new + + attr_accessor :handlers + + # Error raised when trying to modify the stack after calling `lock!` + class StackLocked < RuntimeError; end + + # borrowed from ActiveSupport::Dependencies::Reference & + # ActionDispatch::MiddlewareStack::Middleware + class Handler + REGISTRY = Faraday::AdapterRegistry.new + + attr_reader :name + + ruby2_keywords def initialize(klass, *args, &block) + @name = klass.to_s + REGISTRY.set(klass) if klass.respond_to?(:name) + @args = args + @block = block + end + + def klass + REGISTRY.get(@name) + end + + def inspect + @name + end + + def ==(other) + if other.is_a? Handler + name == other.name + elsif other.respond_to? :name + klass == other + else + @name == other.to_s + end + end + + def build(app = nil) + klass.new(app, *@args, &@block) + end + end + + def initialize(handlers = [], adapter = nil, &block) + @adapter = adapter + @handlers = handlers + if block_given? + build(&block) + elsif @handlers.empty? + # default stack, if nothing else is configured + request :url_encoded + self.adapter Faraday.default_adapter + end + end + + def build(options = {}) + raise_if_locked + @handlers.clear unless options[:keep] + yield(self) if block_given? + adapter(Faraday.default_adapter) unless @adapter + end + + def [](idx) + @handlers[idx] + end + + # Locks the middleware stack to ensure no further modifications are made. + def lock! + @handlers.freeze + end + + def locked? + @handlers.frozen? + end + + ruby2_keywords def use(klass, *args, &block) + if klass.is_a? Symbol + use_symbol(Faraday::Middleware, klass, *args, &block) + else + raise_if_locked + raise_if_adapter(klass) + @handlers << self.class::Handler.new(klass, *args, &block) + end + end + + ruby2_keywords def request(key, *args, &block) + use_symbol(Faraday::Request, key, *args, &block) + end + + ruby2_keywords def response(key, *args, &block) + use_symbol(Faraday::Response, key, *args, &block) + end + + ruby2_keywords def adapter(klass = NO_ARGUMENT, *args, &block) + return @adapter if klass == NO_ARGUMENT + + klass = Faraday::Adapter.lookup_middleware(klass) if klass.is_a?(Symbol) + @adapter = self.class::Handler.new(klass, *args, &block) + end + + ## methods to push onto the various positions in the stack: + + ruby2_keywords def insert(index, *args, &block) + raise_if_locked + index = assert_index(index) + handler = self.class::Handler.new(*args, &block) + @handlers.insert(index, handler) + end + + alias insert_before insert + + ruby2_keywords def insert_after(index, *args, &block) + index = assert_index(index) + insert(index + 1, *args, &block) + end + + ruby2_keywords def swap(index, *args, &block) + raise_if_locked + index = assert_index(index) + @handlers.delete_at(index) + insert(index, *args, &block) + end + + def delete(handler) + raise_if_locked + @handlers.delete(handler) + end + + # Processes a Request into a Response by passing it through this Builder's + # middleware stack. + # + # @param connection [Faraday::Connection] + # @param request [Faraday::Request] + # + # @return [Faraday::Response] + def build_response(connection, request) + app.call(build_env(connection, request)) + end + + # The "rack app" wrapped in middleware. All requests are sent here. + # + # The builder is responsible for creating the app object. After this, + # the builder gets locked to ensure no further modifications are made + # to the middleware stack. + # + # Returns an object that responds to `call` and returns a Response. + def app + @app ||= begin + lock! + to_app + end + end + + def to_app + # last added handler is the deepest and thus closest to the inner app + # adapter is always the last one + @handlers.reverse.inject(@adapter.build) do |app, handler| + handler.build(app) + end + end + + def ==(other) + other.is_a?(self.class) && + @handlers == other.handlers && + @adapter == other.adapter + end + + def dup + self.class.new(@handlers.dup, @adapter.dup) + end + + # ENV Keys + # :http_method - a symbolized request HTTP method (:get, :post) + # :body - the request body that will eventually be converted to a string. + # :url - URI instance for the current request. + # :status - HTTP response status code + # :request_headers - hash of HTTP Headers to be sent to the server + # :response_headers - Hash of HTTP headers from the server + # :parallel_manager - sent if the connection is in parallel mode + # :request - Hash of options for configuring the request. + # :timeout - open/read timeout Integer in seconds + # :open_timeout - read timeout Integer in seconds + # :proxy - Hash of proxy options + # :uri - Proxy Server URI + # :user - Proxy server username + # :password - Proxy server password + # :ssl - Hash of options for configuring SSL requests. + def build_env(connection, request) + exclusive_url = connection.build_exclusive_url( + request.path, request.params, + request.options.params_encoder + ) + + Env.new(request.http_method, request.body, exclusive_url, + request.options, request.headers, connection.ssl, + connection.parallel_manager) + end + + private + + LOCK_ERR = "can't modify middleware stack after making a request" + + def raise_if_locked + raise StackLocked, LOCK_ERR if locked? + end + + def raise_if_adapter(klass) + return unless is_adapter?(klass) + + raise 'Adapter should be set using the `adapter` method, not `use`' + end + + def adapter_set? + !@adapter.nil? + end + + def is_adapter?(klass) # rubocop:disable Naming/PredicateName + klass <= Faraday::Adapter + end + + ruby2_keywords def use_symbol(mod, key, *args, &block) + use(mod.lookup_middleware(key), *args, &block) + end + + def assert_index(index) + idx = index.is_a?(Integer) ? index : @handlers.index(index) + raise "No such handler: #{index.inspect}" unless idx + + idx + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request.rb new file mode 100644 index 0000000..0358a47 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +module Faraday + # Used to setup URLs, params, headers, and the request body in a sane manner. + # + # @example + # @connection.post do |req| + # req.url 'http://localhost', 'a' => '1' # 'http://localhost?a=1' + # req.headers['b'] = '2' # Header + # req.params['c'] = '3' # GET Param + # req['b'] = '2' # also Header + # req.body = 'abc' + # end + # + # @!attribute http_method + # @return [Symbol] the HTTP method of the Request + # @!attribute path + # @return [URI, String] the path + # @!attribute params + # @return [Hash] query parameters + # @!attribute headers + # @return [Faraday::Utils::Headers] headers + # @!attribute body + # @return [Hash] body + # @!attribute options + # @return [RequestOptions] options + # + # rubocop:disable Style/StructInheritance + class Request < Struct.new( + :http_method, :path, :params, :headers, :body, :options + ) + # rubocop:enable Style/StructInheritance + + extend MiddlewareRegistry + + register_middleware File.expand_path('request', __dir__), + url_encoded: [:UrlEncoded, 'url_encoded'], + authorization: [:Authorization, 'authorization'], + basic_auth: [ + :BasicAuthentication, + 'basic_authentication' + ], + token_auth: [ + :TokenAuthentication, + 'token_authentication' + ], + instrumentation: [:Instrumentation, 'instrumentation'], + json: [:Json, 'json'] + + # @param request_method [String] + # @yield [request] for block customization, if block given + # @yieldparam request [Request] + # @return [Request] + def self.create(request_method) + new(request_method).tap do |request| + yield(request) if block_given? + end + end + + def method + http_method + end + + extend Faraday::Deprecate + deprecate :method, :http_method, '2.0' + + # Replace params, preserving the existing hash type. + # + # @param hash [Hash] new params + def params=(hash) + if params + params.replace hash + else + super + end + end + + # Replace request headers, preserving the existing hash type. + # + # @param hash [Hash] new headers + def headers=(hash) + if headers + headers.replace hash + else + super + end + end + + # Update path and params. + # + # @param path [URI, String] + # @param params [Hash, nil] + # @return [void] + def url(path, params = nil) + if path.respond_to? :query + if (query = path.query) + path = path.dup + path.query = nil + end + else + anchor_index = path.index('#') + path = path.slice(0, anchor_index) unless anchor_index.nil? + path, query = path.split('?', 2) + end + self.path = path + self.params.merge_query query, options.params_encoder + self.params.update(params) if params + end + + # @param key [Object] key to look up in headers + # @return [Object] value of the given header name + def [](key) + headers[key] + end + + # @param key [Object] key of header to write + # @param value [Object] value of header + def []=(key, value) + headers[key] = value + end + + # Marshal serialization support. + # + # @return [Hash] the hash ready to be serialized in Marshal. + def marshal_dump + { + http_method: http_method, + body: body, + headers: headers, + path: path, + params: params, + options: options + } + end + + # Marshal serialization support. + # Restores the instance variables according to the +serialised+. + # @param serialised [Hash] the serialised object. + def marshal_load(serialised) + self.http_method = serialised[:http_method] + self.body = serialised[:body] + self.headers = serialised[:headers] + self.path = serialised[:path] + self.params = serialised[:params] + self.options = serialised[:options] + end + + # @return [Env] the Env for this Request + def to_env(connection) + Env.new(http_method, body, connection.build_exclusive_url(path, params), + options, headers, connection.ssl, connection.parallel_manager) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/authorization.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/authorization.rb new file mode 100644 index 0000000..45bb21c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/authorization.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +require 'base64' + +module Faraday + class Request + # Request middleware for the Authorization HTTP header + class Authorization < Faraday::Middleware + unless defined?(::Faraday::Request::Authorization::KEY) + KEY = 'Authorization' + end + + # @param type [String, Symbol] + # @param token [String, Symbol, Hash] + # @return [String] a header value + def self.header(type, token) + case token + when String, Symbol, Proc + token = token.call if token.is_a?(Proc) + "#{type} #{token}" + when Hash + build_hash(type.to_s, token) + else + raise ArgumentError, + "Can't build an Authorization #{type}" \ + "header from #{token.inspect}" + end + end + + # @param type [String] + # @param hash [Hash] + # @return [String] type followed by comma-separated key=value pairs + # @api private + def self.build_hash(type, hash) + comma = ', ' + values = [] + hash.each do |key, value| + value = value.call if value.is_a?(Proc) + values << "#{key}=#{value.to_s.inspect}" + end + "#{type} #{values * comma}" + end + + # @param app [#call] + # @param type [String, Symbol] Type of Authorization + # @param param [String, Symbol, Hash, Proc] parameter to build the Authorization header. + # This value can be a proc, in which case it will be invoked on each request. + def initialize(app, type, param) + @type = type + @param = param + super(app) + end + + # @param env [Faraday::Env] + def on_request(env) + return if env.request_headers[KEY] + + env.request_headers[KEY] = self.class.header(@type, @param) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/basic_authentication.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/basic_authentication.rb new file mode 100644 index 0000000..61c9a5b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/basic_authentication.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +require 'base64' + +module Faraday + class Request + # Authorization middleware for Basic Authentication. + class BasicAuthentication < load_middleware(:authorization) + # @param login [String] + # @param pass [String] + # + # @return [String] a Basic Authentication header line + def self.header(login, pass) + value = Base64.encode64([login, pass].join(':')) + value.delete!("\n") + super(:Basic, value) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/instrumentation.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/instrumentation.rb new file mode 100644 index 0000000..a24442f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/instrumentation.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module Faraday + class Request + # Middleware for instrumenting Requests. + class Instrumentation < Faraday::Middleware + # Options class used in Request::Instrumentation class. + class Options < Faraday::Options.new(:name, :instrumenter) + # @return [String] + def name + self[:name] ||= 'request.faraday' + end + + # @return [Class] + def instrumenter + self[:instrumenter] ||= ActiveSupport::Notifications + end + end + + # Instruments requests using Active Support. + # + # Measures time spent only for synchronous requests. + # + # @example Using ActiveSupport::Notifications to measure time spent + # for Faraday requests. + # ActiveSupport::Notifications + # .subscribe('request.faraday') do |name, starts, ends, _, env| + # url = env[:url] + # http_method = env[:method].to_s.upcase + # duration = ends - starts + # $stderr.puts '[%s] %s %s (%.3f s)' % + # [url.host, http_method, url.request_uri, duration] + # end + # @param app [#call] + # @param options [nil, Hash] Options hash + # @option options [String] :name ('request.faraday') + # Name of the instrumenter + # @option options [Class] :instrumenter (ActiveSupport::Notifications) + # Active Support instrumenter class. + def initialize(app, options = nil) + super(app) + @name, @instrumenter = Options.from(options) + .values_at(:name, :instrumenter) + end + + # @param env [Faraday::Env] + def call(env) + @instrumenter.instrument(@name, env) do + @app.call(env) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/json.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/json.rb new file mode 100644 index 0000000..fa219c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/json.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require 'json' + +module Faraday + class Request + # Request middleware that encodes the body as JSON. + # + # Processes only requests with matching Content-type or those without a type. + # If a request doesn't have a type but has a body, it sets the Content-type + # to JSON MIME-type. + # + # Doesn't try to encode bodies that already are in string form. + class Json < Middleware + MIME_TYPE = 'application/json' + MIME_TYPE_REGEX = %r{^application/(vnd\..+\+)?json$}.freeze + + def on_request(env) + match_content_type(env) do |data| + env[:body] = encode(data) + end + end + + private + + def encode(data) + ::JSON.generate(data) + end + + def match_content_type(env) + return unless process_request?(env) + + env[:request_headers][CONTENT_TYPE] ||= MIME_TYPE + yield env[:body] unless env[:body].respond_to?(:to_str) + end + + def process_request?(env) + type = request_type(env) + body?(env) && (type.empty? || type.match?(MIME_TYPE_REGEX)) + end + + def body?(env) + (body = env[:body]) && !(body.respond_to?(:to_str) && body.empty?) + end + + def request_type(env) + type = env[:request_headers][CONTENT_TYPE].to_s + type = type.split(';', 2).first if type.index(';') + type + end + end + end +end + +Faraday::Request.register_middleware(json: Faraday::Request::Json) diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/token_authentication.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/token_authentication.rb new file mode 100644 index 0000000..f28264b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/token_authentication.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module Faraday + class Request + # TokenAuthentication is a middleware that adds a 'Token' header to a + # Faraday request. + class TokenAuthentication < load_middleware(:authorization) + # Public + def self.header(token, options = nil) + options ||= {} + options[:token] = token + super(:Token, options) + end + + def initialize(app, token, options = nil) + super(app, token, options) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/url_encoded.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/url_encoded.rb new file mode 100644 index 0000000..00cc973 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/request/url_encoded.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module Faraday + class Request + # Middleware for supporting urlencoded requests. + class UrlEncoded < Faraday::Middleware + unless defined?(::Faraday::Request::UrlEncoded::CONTENT_TYPE) + CONTENT_TYPE = 'Content-Type' + end + + class << self + attr_accessor :mime_type + end + self.mime_type = 'application/x-www-form-urlencoded' + + # Encodes as "application/x-www-form-urlencoded" if not already encoded or + # of another type. + # + # @param env [Faraday::Env] + def call(env) + match_content_type(env) do |data| + params = Faraday::Utils::ParamsHash[data] + env.body = params.to_query(env.params_encoder) + end + @app.call env + end + + # @param env [Faraday::Env] + # @yield [request_body] Body of the request + def match_content_type(env) + return unless process_request?(env) + + env.request_headers[CONTENT_TYPE] ||= self.class.mime_type + yield(env.body) unless env.body.respond_to?(:to_str) + end + + # @param env [Faraday::Env] + # + # @return [Boolean] True if the request has a body and its Content-Type is + # urlencoded. + def process_request?(env) + type = request_type(env) + env.body && (type.empty? || (type == self.class.mime_type)) + end + + # @param env [Faraday::Env] + # + # @return [String] + def request_type(env) + type = env.request_headers[CONTENT_TYPE].to_s + type = type.split(';', 2).first if type.index(';') + type + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response.rb new file mode 100644 index 0000000..d588698 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response.rb @@ -0,0 +1,103 @@ +# frozen_string_literal: true + +require 'forwardable' + +module Faraday + # Response represents an HTTP response from making an HTTP request. + class Response + # Used for simple response middleware. + class Middleware < Faraday::Middleware + # Override this to modify the environment after the response has finished. + # Calls the `parse` method if defined + # `parse` method can be defined as private, public and protected + def on_complete(env) + return unless respond_to?(:parse, true) && env.parse_body? + + env.body = parse(env.body) + end + end + + extend Forwardable + extend MiddlewareRegistry + + register_middleware File.expand_path('response', __dir__), + raise_error: [:RaiseError, 'raise_error'], + logger: [:Logger, 'logger'], + json: [:Json, 'json'] + + def initialize(env = nil) + @env = Env.from(env) if env + @on_complete_callbacks = [] + end + + attr_reader :env + + def status + finished? ? env.status : nil + end + + def reason_phrase + finished? ? env.reason_phrase : nil + end + + def headers + finished? ? env.response_headers : {} + end + + def_delegator :headers, :[] + + def body + finished? ? env.body : nil + end + + def finished? + !!env + end + + def on_complete(&block) + if !finished? + @on_complete_callbacks << block + else + yield(env) + end + self + end + + def finish(env) + raise 'response already finished' if finished? + + @env = env.is_a?(Env) ? env : Env.from(env) + @on_complete_callbacks.each { |callback| callback.call(@env) } + self + end + + def success? + finished? && env.success? + end + + def to_hash + { + status: env.status, body: env.body, + response_headers: env.response_headers + } + end + + # because @on_complete_callbacks cannot be marshalled + def marshal_dump + finished? ? to_hash : nil + end + + def marshal_load(env) + @env = Env.from(env) + end + + # Expand the env with more properties, without overriding existing ones. + # Useful for applying request params after restoring a marshalled Response. + def apply_request(request_env) + raise "response didn't finish yet" unless finished? + + @env = Env.from(request_env).update(@env) + self + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/json.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/json.rb new file mode 100644 index 0000000..a5bffcb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/json.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +require 'json' + +module Faraday + class Response + # Parse response bodies as JSON. + class Json < Middleware + def initialize(app = nil, options = {}) + super(app) + @parser_options = options[:parser_options] + @content_types = Array(options[:content_type] || /\bjson$/) + @preserve_raw = options[:preserve_raw] + end + + def on_complete(env) + process_response(env) if parse_response?(env) + end + + private + + def process_response(env) + env[:raw_body] = env[:body] if @preserve_raw + env[:body] = parse(env[:body]) + rescue StandardError, SyntaxError => e + raise Faraday::ParsingError.new(e, env[:response]) + end + + def parse(body) + ::JSON.parse(body, @parser_options || {}) unless body.strip.empty? + end + + def parse_response?(env) + process_response_type?(env) && + env[:body].respond_to?(:to_str) + end + + def process_response_type?(env) + type = response_type(env) + @content_types.empty? || @content_types.any? do |pattern| + pattern.is_a?(Regexp) ? type.match?(pattern) : type == pattern + end + end + + def response_type(env) + type = env[:response_headers][CONTENT_TYPE].to_s + type = type.split(';', 2).first if type.index(';') + type + end + end + end +end + +Faraday::Response.register_middleware(json: Faraday::Response::Json) diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/logger.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/logger.rb new file mode 100644 index 0000000..2ad4587 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/logger.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +require 'forwardable' +require 'logger' +require 'faraday/logging/formatter' + +module Faraday + class Response + # Logger is a middleware that logs internal events in the HTTP request + # lifecycle to a given Logger object. By default, this logs to STDOUT. See + # Faraday::Logging::Formatter to see specifically what is logged. + class Logger < Middleware + def initialize(app, logger = nil, options = {}) + super(app) + logger ||= ::Logger.new($stdout) + formatter_class = options.delete(:formatter) || Logging::Formatter + @formatter = formatter_class.new(logger: logger, options: options) + yield @formatter if block_given? + end + + def call(env) + @formatter.request(env) + super + end + + def on_complete(env) + @formatter.response(env) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/raise_error.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/raise_error.rb new file mode 100644 index 0000000..467f3cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/response/raise_error.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +module Faraday + class Response + # RaiseError is a Faraday middleware that raises exceptions on common HTTP + # client or server error responses. + class RaiseError < Middleware + # rubocop:disable Naming/ConstantName + ClientErrorStatuses = (400...500).freeze + ServerErrorStatuses = (500...600).freeze + # rubocop:enable Naming/ConstantName + + def on_complete(env) + case env[:status] + when 400 + raise Faraday::BadRequestError, response_values(env) + when 401 + raise Faraday::UnauthorizedError, response_values(env) + when 403 + raise Faraday::ForbiddenError, response_values(env) + when 404 + raise Faraday::ResourceNotFound, response_values(env) + when 407 + # mimic the behavior that we get with proxy requests with HTTPS + msg = %(407 "Proxy Authentication Required") + raise Faraday::ProxyAuthError.new(msg, response_values(env)) + when 409 + raise Faraday::ConflictError, response_values(env) + when 422 + raise Faraday::UnprocessableEntityError, response_values(env) + when ClientErrorStatuses + raise Faraday::ClientError, response_values(env) + when ServerErrorStatuses + raise Faraday::ServerError, response_values(env) + when nil + raise Faraday::NilStatusError, response_values(env) + end + end + + def response_values(env) + { + status: env.status, + headers: env.response_headers, + body: env.body, + request: { + method: env.method, + url_path: env.url.path, + params: env.params, + headers: env.request_headers, + body: env.request_body + } + } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils.rb new file mode 100644 index 0000000..b052f9b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils.rb @@ -0,0 +1,117 @@ +# frozen_string_literal: true + +require 'faraday/utils/headers' +require 'faraday/utils/params_hash' + +module Faraday + # Utils contains various static helper methods. + module Utils + module_function + + def build_query(params) + FlatParamsEncoder.encode(params) + end + + def build_nested_query(params) + NestedParamsEncoder.encode(params) + end + + def default_space_encoding + @default_space_encoding ||= '+' + end + + class << self + attr_writer :default_space_encoding + end + + ESCAPE_RE = /[^a-zA-Z0-9 .~_-]/.freeze + + def escape(str) + str.to_s.gsub(ESCAPE_RE) do |match| + "%#{match.unpack('H2' * match.bytesize).join('%').upcase}" + end.gsub(' ', default_space_encoding) + end + + def unescape(str) + CGI.unescape str.to_s + end + + DEFAULT_SEP = /[&;] */n.freeze + + # Adapted from Rack + def parse_query(query) + FlatParamsEncoder.decode(query) + end + + def parse_nested_query(query) + NestedParamsEncoder.decode(query) + end + + def default_params_encoder + @default_params_encoder ||= NestedParamsEncoder + end + + class << self + attr_writer :default_params_encoder + end + + # Normalize URI() behavior across Ruby versions + # + # url - A String or URI. + # + # Returns a parsed URI. + def URI(url) # rubocop:disable Naming/MethodName + if url.respond_to?(:host) + url + elsif url.respond_to?(:to_str) + default_uri_parser.call(url) + else + raise ArgumentError, 'bad argument (expected URI object or URI string)' + end + end + + def default_uri_parser + @default_uri_parser ||= begin + require 'uri' + Kernel.method(:URI) + end + end + + def default_uri_parser=(parser) + @default_uri_parser = if parser.respond_to?(:call) || parser.nil? + parser + else + parser.method(:parse) + end + end + + # Receives a String or URI and returns just + # the path with the query string sorted. + def normalize_path(url) + url = URI(url) + (url.path.start_with?('/') ? url.path : "/#{url.path}") + + (url.query ? "?#{sort_query_params(url.query)}" : '') + end + + # Recursive hash update + def deep_merge!(target, hash) + hash.each do |key, value| + target[key] = if value.is_a?(Hash) && target[key].is_a?(Hash) + deep_merge(target[key], value) + else + value + end + end + target + end + + # Recursive hash merge + def deep_merge(source, hash) + deep_merge!(source.dup, hash) + end + + def sort_query_params(query) + query.split('&').sort.join('&') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils/headers.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils/headers.rb new file mode 100644 index 0000000..9883dc2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils/headers.rb @@ -0,0 +1,139 @@ +# frozen_string_literal: true + +module Faraday + module Utils + # A case-insensitive Hash that preserves the original case of a header + # when set. + # + # Adapted from Rack::Utils::HeaderHash + class Headers < ::Hash + def self.from(value) + new(value) + end + + def self.allocate + new_self = super + new_self.initialize_names + new_self + end + + def initialize(hash = nil) + super() + @names = {} + update(hash || {}) + end + + def initialize_names + @names = {} + end + + # on dup/clone, we need to duplicate @names hash + def initialize_copy(other) + super + @names = other.names.dup + end + + # need to synchronize concurrent writes to the shared KeyMap + keymap_mutex = Mutex.new + + # symbol -> string mapper + cache + KeyMap = Hash.new do |map, key| + value = if key.respond_to?(:to_str) + key + else + key.to_s.split('_') # user_agent: %w(user agent) + .each(&:capitalize!) # => %w(User Agent) + .join('-') # => "User-Agent" + end + keymap_mutex.synchronize { map[key] = value } + end + KeyMap[:etag] = 'ETag' + + def [](key) + key = KeyMap[key] + super(key) || super(@names[key.downcase]) + end + + def []=(key, val) + key = KeyMap[key] + key = (@names[key.downcase] ||= key) + # join multiple values with a comma + val = val.to_ary.join(', ') if val.respond_to?(:to_ary) + super(key, val) + end + + def fetch(key, *args, &block) + key = KeyMap[key] + key = @names.fetch(key.downcase, key) + super(key, *args, &block) + end + + def delete(key) + key = KeyMap[key] + key = @names[key.downcase] + return unless key + + @names.delete key.downcase + super(key) + end + + def include?(key) + @names.include? key.downcase + end + + alias has_key? include? + alias member? include? + alias key? include? + + def merge!(other) + other.each { |k, v| self[k] = v } + self + end + + alias update merge! + + def merge(other) + hash = dup + hash.merge! other + end + + def replace(other) + clear + @names.clear + update other + self + end + + def to_hash + {}.update(self) + end + + def parse(header_string) + return unless header_string && !header_string.empty? + + headers = header_string.split(/\r\n/) + + # Find the last set of response headers. + start_index = headers.rindex { |x| x.start_with?('HTTP/') } || 0 + last_response = headers.slice(start_index, headers.size) + + last_response + .tap { |a| a.shift if a.first.start_with?('HTTP/') } + .map { |h| h.split(/:\s*/, 2) } # split key and value + .reject { |p| p[0].nil? } # ignore blank lines + .each { |key, value| add_parsed(key, value) } + end + + protected + + attr_reader :names + + private + + # Join multiple values with a comma. + def add_parsed(key, value) + self[key] ? self[key] << ', ' << value : self[key] = value + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils/params_hash.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils/params_hash.rb new file mode 100644 index 0000000..0e16d93 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/utils/params_hash.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +module Faraday + module Utils + # A hash with stringified keys. + class ParamsHash < Hash + def [](key) + super(convert_key(key)) + end + + def []=(key, value) + super(convert_key(key), value) + end + + def delete(key) + super(convert_key(key)) + end + + def include?(key) + super(convert_key(key)) + end + + alias has_key? include? + alias member? include? + alias key? include? + + def update(params) + params.each do |key, value| + self[key] = value + end + self + end + alias merge! update + + def merge(params) + dup.update(params) + end + + def replace(other) + clear + update(other) + end + + def merge_query(query, encoder = nil) + return self unless query && !query.empty? + + update((encoder || Utils.default_params_encoder).decode(query)) + end + + def to_query(encoder = nil) + (encoder || Utils.default_params_encoder).encode(self) + end + + private + + def convert_key(key) + key.to_s + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/version.rb new file mode 100644 index 0000000..d8a100d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/lib/faraday/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module Faraday + VERSION = '1.10.3' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/external_adapters/faraday_specs_setup.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/external_adapters/faraday_specs_setup.rb new file mode 100644 index 0000000..ac7f7b6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/external_adapters/faraday_specs_setup.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require 'webmock/rspec' +WebMock.disable_net_connect!(allow_localhost: true) + +require_relative '../support/helper_methods' +require_relative '../support/disabling_stub' +require_relative '../support/streaming_response_checker' +require_relative '../support/shared_examples/adapter' +require_relative '../support/shared_examples/request_method' + +RSpec.configure do |config| + config.include Faraday::HelperMethods +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/em_http_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/em_http_spec.rb new file mode 100644 index 0000000..721bd82 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/em_http_spec.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +unless defined?(JRUBY_VERSION) + RSpec.describe Faraday::Adapter::EMHttp do + features :request_body_on_query_methods, :reason_phrase_parse, :trace_method, + :skip_response_body_on_head, :parallel, :local_socket_binding + + it_behaves_like 'an adapter' + + it 'allows to provide adapter specific configs' do + url = URI('https://example.com:1234') + adapter = described_class.new nil, inactivity_timeout: 20 + req = adapter.create_request(url: url, request: {}) + + expect(req.connopts.inactivity_timeout).to eq(20) + end + + context 'Options' do + let(:request) { Faraday::RequestOptions.new } + let(:env) { { request: request } } + let(:options) { {} } + let(:adapter) { Faraday::Adapter::EMHttp.new } + + it 'configures timeout' do + request.timeout = 5 + adapter.configure_timeout(options, env) + expect(options[:inactivity_timeout]).to eq(5) + expect(options[:connect_timeout]).to eq(5) + end + + it 'configures timeout and open_timeout' do + request.timeout = 5 + request.open_timeout = 1 + adapter.configure_timeout(options, env) + expect(options[:inactivity_timeout]).to eq(5) + expect(options[:connect_timeout]).to eq(1) + end + + it 'configures all timeout settings' do + request.timeout = 5 + request.read_timeout = 3 + request.open_timeout = 1 + adapter.configure_timeout(options, env) + expect(options[:inactivity_timeout]).to eq(3) + expect(options[:connect_timeout]).to eq(1) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/em_synchrony_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/em_synchrony_spec.rb new file mode 100644 index 0000000..ce29e71 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/em_synchrony_spec.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +unless defined?(JRUBY_VERSION) + RSpec.describe Faraday::Adapter::EMSynchrony do + features :request_body_on_query_methods, :reason_phrase_parse, + :skip_response_body_on_head, :parallel, :local_socket_binding + + it_behaves_like 'an adapter' + + it 'allows to provide adapter specific configs' do + url = URI('https://example.com:1234') + adapter = described_class.new nil, inactivity_timeout: 20 + req = adapter.create_request(url: url, request: {}) + + expect(req.connopts.inactivity_timeout).to eq(20) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/excon_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/excon_spec.rb new file mode 100644 index 0000000..d80abc3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/excon_spec.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::Excon do + features :request_body_on_query_methods, :reason_phrase_parse, :trace_method + + it_behaves_like 'an adapter' + + it 'allows to provide adapter specific configs' do + url = URI('https://example.com:1234') + + adapter = described_class.new(nil, debug_request: true) + + conn = adapter.build_connection(url: url) + + expect(conn.data[:debug_request]).to be_truthy + end + + context 'config' do + let(:adapter) { Faraday::Adapter::Excon.new } + let(:request) { Faraday::RequestOptions.new } + let(:uri) { URI.parse('https://example.com') } + let(:env) { { request: request, url: uri } } + + it 'sets timeout' do + request.timeout = 5 + options = adapter.send(:opts_from_env, env) + expect(options[:read_timeout]).to eq(5) + expect(options[:write_timeout]).to eq(5) + expect(options[:connect_timeout]).to eq(5) + end + + it 'sets timeout and open_timeout' do + request.timeout = 5 + request.open_timeout = 3 + options = adapter.send(:opts_from_env, env) + expect(options[:read_timeout]).to eq(5) + expect(options[:write_timeout]).to eq(5) + expect(options[:connect_timeout]).to eq(3) + end + + it 'sets open_timeout' do + request.open_timeout = 3 + options = adapter.send(:opts_from_env, env) + expect(options[:read_timeout]).to eq(nil) + expect(options[:write_timeout]).to eq(nil) + expect(options[:connect_timeout]).to eq(3) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/httpclient_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/httpclient_spec.rb new file mode 100644 index 0000000..3cbf2c6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/httpclient_spec.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::HTTPClient do + # ruby gem defaults for testing purposes + HTTPCLIENT_OPEN = 60 + HTTPCLIENT_READ = 60 + HTTPCLIENT_WRITE = 120 + + features :request_body_on_query_methods, :reason_phrase_parse, :compression, + :trace_method, :local_socket_binding + + it_behaves_like 'an adapter' + + it 'allows to provide adapter specific configs' do + adapter = described_class.new do |client| + client.keep_alive_timeout = 20 + client.ssl_config.timeout = 25 + end + + client = adapter.build_connection(url: URI.parse('https://example.com')) + expect(client.keep_alive_timeout).to eq(20) + expect(client.ssl_config.timeout).to eq(25) + end + + context 'Options' do + let(:request) { Faraday::RequestOptions.new } + let(:env) { { request: request } } + let(:options) { {} } + let(:adapter) { Faraday::Adapter::HTTPClient.new } + let(:client) { adapter.connection(url: URI.parse('https://example.com')) } + + it 'configures timeout' do + assert_default_timeouts! + + request.timeout = 5 + adapter.configure_timeouts(client, request) + + expect(client.connect_timeout).to eq(5) + expect(client.send_timeout).to eq(5) + expect(client.receive_timeout).to eq(5) + end + + it 'configures open timeout' do + assert_default_timeouts! + + request.open_timeout = 1 + adapter.configure_timeouts(client, request) + + expect(client.connect_timeout).to eq(1) + expect(client.send_timeout).to eq(HTTPCLIENT_WRITE) + expect(client.receive_timeout).to eq(HTTPCLIENT_READ) + end + + it 'configures multiple timeouts' do + assert_default_timeouts! + + request.open_timeout = 1 + request.write_timeout = 10 + request.read_timeout = 5 + adapter.configure_timeouts(client, request) + + expect(client.connect_timeout).to eq(1) + expect(client.send_timeout).to eq(10) + expect(client.receive_timeout).to eq(5) + end + + def assert_default_timeouts! + expect(client.connect_timeout).to eq(HTTPCLIENT_OPEN) + expect(client.send_timeout).to eq(HTTPCLIENT_WRITE) + expect(client.receive_timeout).to eq(HTTPCLIENT_READ) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/net_http_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/net_http_spec.rb new file mode 100644 index 0000000..fda9b64 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/net_http_spec.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::NetHttp do + features :request_body_on_query_methods, :reason_phrase_parse, :compression, :streaming, :trace_method + + it_behaves_like 'an adapter' + + context 'checking http' do + let(:url) { URI('http://example.com') } + let(:adapter) { described_class.new } + let(:http) { adapter.send(:connection, url: url, request: {}) } + + it { expect(http.port).to eq(80) } + + it 'sets max_retries to 0' do + adapter.send(:configure_request, http, {}) + + expect(http.max_retries).to eq(0) if http.respond_to?(:max_retries=) + end + + it 'supports write_timeout' do + adapter.send(:configure_request, http, write_timeout: 10) + + expect(http.write_timeout).to eq(10) if http.respond_to?(:write_timeout=) + end + + it 'supports open_timeout' do + adapter.send(:configure_request, http, open_timeout: 10) + + expect(http.open_timeout).to eq(10) + end + + it 'supports read_timeout' do + adapter.send(:configure_request, http, read_timeout: 10) + + expect(http.read_timeout).to eq(10) + end + + context 'with https url' do + let(:url) { URI('https://example.com') } + + it { expect(http.port).to eq(443) } + end + + context 'with http url including port' do + let(:url) { URI('https://example.com:1234') } + + it { expect(http.port).to eq(1234) } + end + + context 'with custom adapter config' do + let(:adapter) do + described_class.new do |http| + http.continue_timeout = 123 + end + end + + it do + adapter.send(:configure_request, http, {}) + expect(http.continue_timeout).to eq(123) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/patron_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/patron_spec.rb new file mode 100644 index 0000000..2408ab0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/patron_spec.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::Patron, unless: defined?(JRUBY_VERSION) do + features :reason_phrase_parse + + it_behaves_like 'an adapter' + + it 'allows to provide adapter specific configs' do + conn = Faraday.new do |f| + f.adapter :patron do |session| + session.max_redirects = 10 + raise 'Configuration block called' + end + end + + expect { conn.get('/') }.to raise_error(RuntimeError, 'Configuration block called') + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/rack_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/rack_spec.rb new file mode 100644 index 0000000..4fe6cba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/rack_spec.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::Rack do + features :request_body_on_query_methods, :trace_method, + :skip_response_body_on_head + + it_behaves_like 'an adapter', adapter_options: WebmockRackApp.new +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/test_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/test_spec.rb new file mode 100644 index 0000000..4475917 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/test_spec.rb @@ -0,0 +1,377 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::Test do + let(:stubs) do + described_class::Stubs.new do |stub| + stub.get('http://domain.test/hello') do + [200, { 'Content-Type' => 'text/html' }, 'domain: hello'] + end + + stub.get('http://wrong.test/hello') do + [200, { 'Content-Type' => 'text/html' }, 'wrong: hello'] + end + + stub.get('http://wrong.test/bait') do + [404, { 'Content-Type' => 'text/html' }] + end + + stub.get('/hello') do + [200, { 'Content-Type' => 'text/html' }, 'hello'] + end + + stub.get('/method-echo') do |env| + [200, { 'Content-Type' => 'text/html' }, env[:method].to_s] + end + + stub.get(%r{\A/resources/\d+(?:\?|\z)}) do + [200, { 'Content-Type' => 'text/html' }, 'show'] + end + + stub.get(%r{\A/resources/(specified)\z}) do |_env, meta| + [200, { 'Content-Type' => 'text/html' }, "show #{meta[:match_data][1]}"] + end + end + end + + let(:connection) do + Faraday.new do |builder| + builder.adapter :test, stubs + end + end + + let(:response) { connection.get('/hello') } + + context 'with simple path sets status' do + subject { response.status } + + it { is_expected.to eq 200 } + end + + context 'with simple path sets headers' do + subject { response.headers['Content-Type'] } + + it { is_expected.to eq 'text/html' } + end + + context 'with simple path sets body' do + subject { response.body } + + it { is_expected.to eq 'hello' } + end + + context 'with host points to the right stub' do + subject { connection.get('http://domain.test/hello').body } + + it { is_expected.to eq 'domain: hello' } + end + + describe 'can be called several times' do + subject { connection.get('/hello').body } + + it { is_expected.to eq 'hello' } + end + + describe 'can handle regular expression path' do + subject { connection.get('/resources/1').body } + + it { is_expected.to eq 'show' } + end + + describe 'can handle single parameter block' do + subject { connection.get('/method-echo').body } + + it { is_expected.to eq 'get' } + end + + describe 'can handle regular expression path with captured result' do + subject { connection.get('/resources/specified').body } + + it { is_expected.to eq 'show specified' } + end + + context 'with get params' do + subject { connection.get('/param?a=1').body } + + before do + stubs.get('/param?a=1') { [200, {}, 'a'] } + end + + it { is_expected.to eq 'a' } + end + + describe 'ignoring unspecified get params' do + before do + stubs.get('/optional?a=1') { [200, {}, 'a'] } + end + + context 'with multiple params' do + subject { connection.get('/optional?a=1&b=1').body } + + it { is_expected.to eq 'a' } + end + + context 'with single param' do + subject { connection.get('/optional?a=1').body } + + it { is_expected.to eq 'a' } + end + + context 'without params' do + subject(:request) { connection.get('/optional') } + + it do + expect { request }.to raise_error( + Faraday::Adapter::Test::Stubs::NotFound + ) + end + end + end + + context 'with http headers' do + before do + stubs.get('/yo', 'X-HELLO' => 'hello') { [200, {}, 'a'] } + stubs.get('/yo') { [200, {}, 'b'] } + end + + context 'with header' do + subject do + connection.get('/yo') { |env| env.headers['X-HELLO'] = 'hello' }.body + end + + it { is_expected.to eq 'a' } + end + + context 'without header' do + subject do + connection.get('/yo').body + end + + it { is_expected.to eq 'b' } + end + end + + describe 'different outcomes for the same request' do + def make_request + connection.get('/foo') + end + + subject(:request) { make_request.body } + + before do + stubs.get('/foo') { [200, { 'Content-Type' => 'text/html' }, 'hello'] } + stubs.get('/foo') { [200, { 'Content-Type' => 'text/html' }, 'world'] } + end + + context 'the first request' do + it { is_expected.to eq 'hello' } + end + + context 'the second request' do + before do + make_request + end + + it { is_expected.to eq 'world' } + end + end + + describe 'yielding env to stubs' do + subject { connection.get('http://foo.com/foo?a=1').body } + + before do + stubs.get '/foo' do |env| + expect(env[:url].path).to eq '/foo' + expect(env[:url].host).to eq 'foo.com' + expect(env[:params]['a']).to eq '1' + expect(env[:request_headers]['Accept']).to eq 'text/plain' + [200, {}, 'a'] + end + + connection.headers['Accept'] = 'text/plain' + end + + it { is_expected.to eq 'a' } + end + + describe 'params parsing' do + subject { connection.get('http://foo.com/foo?a[b]=1').body } + + context 'with default encoder' do + before do + stubs.get '/foo' do |env| + expect(env[:params]['a']['b']).to eq '1' + [200, {}, 'a'] + end + end + + it { is_expected.to eq 'a' } + end + + context 'with nested encoder' do + before do + stubs.get '/foo' do |env| + expect(env[:params]['a']['b']).to eq '1' + [200, {}, 'a'] + end + + connection.options.params_encoder = Faraday::NestedParamsEncoder + end + + it { is_expected.to eq 'a' } + end + + context 'with flat encoder' do + before do + stubs.get '/foo' do |env| + expect(env[:params]['a[b]']).to eq '1' + [200, {}, 'a'] + end + + connection.options.params_encoder = Faraday::FlatParamsEncoder + end + + it { is_expected.to eq 'a' } + end + end + + describe 'raising an error if no stub was found' do + describe 'for request' do + subject(:request) { connection.get('/invalid') { [200, {}, []] } } + + it { expect { request }.to raise_error described_class::Stubs::NotFound } + end + + describe 'for specified host' do + subject(:request) { connection.get('http://domain.test/bait') } + + it { expect { request }.to raise_error described_class::Stubs::NotFound } + end + + describe 'for request without specified header' do + subject(:request) { connection.get('/yo') } + + before do + stubs.get('/yo', 'X-HELLO' => 'hello') { [200, {}, 'a'] } + end + + it { expect { request }.to raise_error described_class::Stubs::NotFound } + end + end + + describe 'for request with non default params encoder' do + let(:connection) do + Faraday.new(request: { params_encoder: Faraday::FlatParamsEncoder }) do |builder| + builder.adapter :test, stubs + end + end + let(:stubs) do + described_class::Stubs.new do |stubs| + stubs.get('/path?a=x&a=y&a=z') { [200, {}, 'a'] } + end + end + + context 'when all flat param values are correctly set' do + subject(:request) { connection.get('/path?a=x&a=y&a=z') } + + it { expect(request.status).to eq 200 } + end + + shared_examples 'raise NotFound when params do not satisfy the flat param values' do |params| + subject(:request) { connection.get('/path', params) } + + context "with #{params.inspect}" do + it { expect { request }.to raise_error described_class::Stubs::NotFound } + end + end + + it_behaves_like 'raise NotFound when params do not satisfy the flat param values', { a: %w[x] } + it_behaves_like 'raise NotFound when params do not satisfy the flat param values', { a: %w[x y] } + it_behaves_like 'raise NotFound when params do not satisfy the flat param values', { a: %w[x z y] } # NOTE: The order of the value is also compared. + it_behaves_like 'raise NotFound when params do not satisfy the flat param values', { b: %w[x y z] } + end + + describe 'strict_mode' do + let(:stubs) do + described_class::Stubs.new(strict_mode: true) do |stubs| + stubs.get('/strict?a=12&b=xy', 'Authorization' => 'Bearer m_ck', 'X-C' => 'hello') { [200, {}, 'a'] } + stubs.get('/with_user_agent?a=12&b=xy', authorization: 'Bearer m_ck', 'User-Agent' => 'My Agent') { [200, {}, 'a'] } + end + end + + context 'when params and headers are exactly set' do + subject(:request) { connection.get('/strict', { a: '12', b: 'xy' }, { authorization: 'Bearer m_ck', x_c: 'hello' }) } + + it { expect(request.status).to eq 200 } + end + + context 'when params and headers are exactly set with a custom user agent' do + subject(:request) { connection.get('/with_user_agent', { a: '12', b: 'xy' }, { authorization: 'Bearer m_ck', 'User-Agent' => 'My Agent' }) } + + it { expect(request.status).to eq 200 } + end + + shared_examples 'raise NotFound when params do not satisfy the strict check' do |params| + subject(:request) { connection.get('/strict', params, { 'Authorization' => 'Bearer m_ck', 'X-C' => 'hello' }) } + + context "with #{params.inspect}" do + it { expect { request }.to raise_error described_class::Stubs::NotFound } + end + end + + it_behaves_like 'raise NotFound when params do not satisfy the strict check', { a: '12' } + it_behaves_like 'raise NotFound when params do not satisfy the strict check', { b: 'xy' } + it_behaves_like 'raise NotFound when params do not satisfy the strict check', { a: '123', b: 'xy' } + it_behaves_like 'raise NotFound when params do not satisfy the strict check', { a: '12', b: 'xyz' } + it_behaves_like 'raise NotFound when params do not satisfy the strict check', { a: '12', b: 'xy', c: 'hello' } + it_behaves_like 'raise NotFound when params do not satisfy the strict check', { additional: 'special', a: '12', b: 'xy', c: 'hello' } + + shared_examples 'raise NotFound when headers do not satisfy the strict check' do |path, headers| + subject(:request) { connection.get(path, { a: 12, b: 'xy' }, headers) } + + context "with #{headers.inspect}" do + it { expect { request }.to raise_error described_class::Stubs::NotFound } + end + end + + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/strict', { authorization: 'Bearer m_ck' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/strict', { 'X-C' => 'hello' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/strict', { authorization: 'Bearer m_ck', 'x-c': 'Hi' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/strict', { authorization: 'Basic m_ck', 'x-c': 'hello' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/strict', { authorization: 'Bearer m_ck', 'x-c': 'hello', x_special: 'special' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/with_user_agent', { authorization: 'Bearer m_ck' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/with_user_agent', { authorization: 'Bearer m_ck', user_agent: 'Unknown' } + it_behaves_like 'raise NotFound when headers do not satisfy the strict check', '/with_user_agent', { authorization: 'Bearer m_ck', user_agent: 'My Agent', x_special: 'special' } + + context 'when strict_mode is disabled' do + before do + stubs.strict_mode = false + end + + shared_examples 'does not raise NotFound even when params do not satisfy the strict check' do |params| + subject(:request) { connection.get('/strict', params, { 'Authorization' => 'Bearer m_ck', 'X-C' => 'hello' }) } + + context "with #{params.inspect}" do + it { expect(request.status).to eq 200 } + end + end + + it_behaves_like 'does not raise NotFound even when params do not satisfy the strict check', { a: '12', b: 'xy' } + it_behaves_like 'does not raise NotFound even when params do not satisfy the strict check', { a: '12', b: 'xy', c: 'hello' } + it_behaves_like 'does not raise NotFound even when params do not satisfy the strict check', { additional: 'special', a: '12', b: 'xy', c: 'hello' } + + shared_examples 'does not raise NotFound even when headers do not satisfy the strict check' do |path, headers| + subject(:request) { connection.get(path, { a: 12, b: 'xy' }, headers) } + + context "with #{headers.inspect}" do + it { expect(request.status).to eq 200 } + end + end + + it_behaves_like 'does not raise NotFound even when headers do not satisfy the strict check', '/strict', { authorization: 'Bearer m_ck', 'x-c': 'hello' } + it_behaves_like 'does not raise NotFound even when headers do not satisfy the strict check', '/strict', { authorization: 'Bearer m_ck', 'x-c': 'hello', x_special: 'special' } + it_behaves_like 'does not raise NotFound even when headers do not satisfy the strict check', '/strict', { authorization: 'Bearer m_ck', 'x-c': 'hello', user_agent: 'Special Agent' } + it_behaves_like 'does not raise NotFound even when headers do not satisfy the strict check', '/with_user_agent', { authorization: 'Bearer m_ck', user_agent: 'My Agent' } + it_behaves_like 'does not raise NotFound even when headers do not satisfy the strict check', '/with_user_agent', { authorization: 'Bearer m_ck', user_agent: 'My Agent', x_special: 'special' } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/typhoeus_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/typhoeus_spec.rb new file mode 100644 index 0000000..7f63f97 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter/typhoeus_spec.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter::Typhoeus do + features :request_body_on_query_methods, :parallel, :trace_method + + it_behaves_like 'an adapter' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter_registry_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter_registry_spec.rb new file mode 100644 index 0000000..222e65e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter_registry_spec.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::AdapterRegistry do + describe '#initialize' do + subject(:registry) { described_class.new } + + it { expect { registry.get(:FinFangFoom) }.to raise_error(NameError) } + it { expect { registry.get('FinFangFoom') }.to raise_error(NameError) } + + it 'looks up class by string name' do + expect(registry.get('Faraday::Connection')).to eq(Faraday::Connection) + end + + it 'looks up class by symbol name' do + expect(registry.get(:Faraday)).to eq(Faraday) + end + + it 'caches lookups with implicit name' do + registry.set :symbol + expect(registry.get('symbol')).to eq(:symbol) + end + + it 'caches lookups with explicit name' do + registry.set 'string', :name + expect(registry.get(:name)).to eq('string') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter_spec.rb new file mode 100644 index 0000000..22ef1d1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/adapter_spec.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Adapter do + let(:adapter) { Faraday::Adapter.new } + let(:request) { {} } + + context '#request_timeout' do + it 'gets :read timeout' do + expect(timeout(:read)).to eq(nil) + + request[:timeout] = 5 + request[:write_timeout] = 1 + + expect(timeout(:read)).to eq(5) + + request[:read_timeout] = 2 + + expect(timeout(:read)).to eq(2) + end + + it 'gets :open timeout' do + expect(timeout(:open)).to eq(nil) + + request[:timeout] = 5 + request[:write_timeout] = 1 + + expect(timeout(:open)).to eq(5) + + request[:open_timeout] = 2 + + expect(timeout(:open)).to eq(2) + end + + it 'gets :write timeout' do + expect(timeout(:write)).to eq(nil) + + request[:timeout] = 5 + request[:read_timeout] = 1 + + expect(timeout(:write)).to eq(5) + + request[:write_timeout] = 2 + + expect(timeout(:write)).to eq(2) + end + + it 'attempts unknown timeout type' do + expect { timeout(:unknown) }.to raise_error(ArgumentError) + end + + def timeout(type) + adapter.send(:request_timeout, type, request) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/composite_read_io_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/composite_read_io_spec.rb new file mode 100644 index 0000000..ccba34f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/composite_read_io_spec.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require 'stringio' + +RSpec.describe Faraday::CompositeReadIO do + Part = Struct.new(:to_io) do + def length + to_io.string.length + end + end + + def part(str) + Part.new StringIO.new(str) + end + + def composite_io(*parts) + Faraday::CompositeReadIO.new(*parts) + end + + context 'with empty composite_io' do + subject { composite_io } + + it { expect(subject.length).to eq(0) } + it { expect(subject.read).to eq('') } + it { expect(subject.read(1)).to be_nil } + end + + context 'with empty parts' do + subject { composite_io(part(''), part('')) } + + it { expect(subject.length).to eq(0) } + it { expect(subject.read).to eq('') } + it { expect(subject.read(1)).to be_nil } + end + + context 'with 2 parts' do + subject { composite_io(part('abcd'), part('1234')) } + + it { expect(subject.length).to eq(8) } + it { expect(subject.read).to eq('abcd1234') } + it 'allows to read in chunks' do + expect(subject.read(3)).to eq('abc') + expect(subject.read(3)).to eq('d12') + expect(subject.read(3)).to eq('34') + expect(subject.read(3)).to be_nil + end + it 'allows to rewind while reading in chunks' do + expect(subject.read(3)).to eq('abc') + expect(subject.read(3)).to eq('d12') + subject.rewind + expect(subject.read(3)).to eq('abc') + expect(subject.read(5)).to eq('d1234') + expect(subject.read(3)).to be_nil + subject.rewind + expect(subject.read(2)).to eq('ab') + end + end + + context 'with mix of empty and non-empty parts' do + subject { composite_io(part(''), part('abcd'), part(''), part('1234'), part('')) } + + it 'allows to read in chunks' do + expect(subject.read(6)).to eq('abcd12') + expect(subject.read(6)).to eq('34') + expect(subject.read(6)).to be_nil + end + end + + context 'with utf8 multibyte part' do + subject { composite_io(part("\x86"), part('ãƒ•ã‚Ąã‚¤ãƒĢ')) } + + it { expect(subject.read).to eq(String.new("\x86\xE3\x83\x95\xE3\x82\xA1\xE3\x82\xA4\xE3\x83\xAB", encoding: 'BINARY')) } + it 'allows to read in chunks' do + expect(subject.read(3)).to eq(String.new("\x86\xE3\x83", encoding: 'BINARY')) + expect(subject.read(3)).to eq(String.new("\x95\xE3\x82", encoding: 'BINARY')) + expect(subject.read(8)).to eq(String.new("\xA1\xE3\x82\xA4\xE3\x83\xAB", encoding: 'BINARY')) + expect(subject.read(3)).to be_nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/connection_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/connection_spec.rb new file mode 100644 index 0000000..0a12981 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/connection_spec.rb @@ -0,0 +1,736 @@ +# frozen_string_literal: true + +shared_examples 'initializer with url' do + context 'with simple url' do + let(:address) { 'http://sushi.com' } + + it { expect(subject.host).to eq('sushi.com') } + it { expect(subject.port).to eq(80) } + it { expect(subject.scheme).to eq('http') } + it { expect(subject.path_prefix).to eq('/') } + it { expect(subject.params).to eq({}) } + end + + context 'with complex url' do + let(:address) { 'http://sushi.com:815/fish?a=1' } + + it { expect(subject.port).to eq(815) } + it { expect(subject.path_prefix).to eq('/fish') } + it { expect(subject.params).to eq('a' => '1') } + end + + context 'with IPv6 address' do + let(:address) { 'http://[::1]:85/' } + + it { expect(subject.host).to eq('[::1]') } + it { expect(subject.port).to eq(85) } + end +end + +shared_examples 'default connection options' do + after { Faraday.default_connection_options = nil } + + it 'works with implicit url' do + conn = Faraday.new 'http://sushi.com/foo' + expect(conn.options.timeout).to eq(10) + end + + it 'works with option url' do + conn = Faraday.new url: 'http://sushi.com/foo' + expect(conn.options.timeout).to eq(10) + end + + it 'works with instance connection options' do + conn = Faraday.new 'http://sushi.com/foo', request: { open_timeout: 1 } + expect(conn.options.timeout).to eq(10) + expect(conn.options.open_timeout).to eq(1) + end + + it 'default connection options persist with an instance overriding' do + conn = Faraday.new 'http://nigiri.com/bar' + conn.options.timeout = 1 + expect(Faraday.default_connection_options.request.timeout).to eq(10) + + other = Faraday.new url: 'https://sushi.com/foo' + other.options.timeout = 1 + + expect(Faraday.default_connection_options.request.timeout).to eq(10) + end + + it 'default connection uses default connection options' do + expect(Faraday.default_connection.options.timeout).to eq(10) + end +end + +RSpec.describe Faraday::Connection do + let(:conn) { Faraday::Connection.new(url, options) } + let(:url) { nil } + let(:options) { nil } + + describe '.new' do + subject { conn } + + context 'with implicit url param' do + # Faraday::Connection.new('http://sushi.com') + let(:url) { address } + + it_behaves_like 'initializer with url' + end + + context 'with explicit url param' do + # Faraday::Connection.new(url: 'http://sushi.com') + let(:url) { { url: address } } + + it_behaves_like 'initializer with url' + end + + context 'with custom builder' do + let(:custom_builder) { Faraday::RackBuilder.new } + let(:options) { { builder: custom_builder } } + + it { expect(subject.builder).to eq(custom_builder) } + end + + context 'with custom params' do + let(:options) { { params: { a: 1 } } } + + it { expect(subject.params).to eq('a' => 1) } + end + + context 'with custom params and params in url' do + let(:url) { 'http://sushi.com/fish?a=1&b=2' } + let(:options) { { params: { a: 3 } } } + it { expect(subject.params).to eq('a' => 3, 'b' => '2') } + end + + context 'with custom headers' do + let(:options) { { headers: { user_agent: 'Faraday' } } } + + it { expect(subject.headers['User-agent']).to eq('Faraday') } + end + + context 'with ssl false' do + let(:options) { { ssl: { verify: false } } } + + it { expect(subject.ssl.verify?).to be_falsey } + end + + context 'with empty block' do + let(:conn) { Faraday::Connection.new {} } + + it { expect(conn.builder.handlers.size).to eq(0) } + end + + context 'with block' do + let(:conn) do + Faraday::Connection.new(params: { 'a' => '1' }) do |faraday| + faraday.adapter :net_http + faraday.url_prefix = 'http://sushi.com/omnom' + end + end + + it { expect(conn.builder.handlers.size).to eq(0) } + it { expect(conn.path_prefix).to eq('/omnom') } + end + end + + describe '#close' do + it 'can close underlying app' do + expect(conn.app).to receive(:close) + conn.close + end + end + + describe 'basic_auth' do + subject { conn } + + context 'calling the #basic_auth method' do + before { subject.basic_auth 'Aladdin', 'open sesame' } + + it { expect(subject.headers['Authorization']).to eq('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==') } + end + + context 'adding basic auth info to url' do + let(:url) { 'http://Aladdin:open%20sesame@sushi.com/fish' } + + it { expect(subject.headers['Authorization']).to eq('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==') } + end + end + + describe '#token_auth' do + before { subject.token_auth('abcdef', nonce: 'abc') } + + it { expect(subject.headers['Authorization']).to eq('Token nonce="abc", token="abcdef"') } + end + + describe '#build_exclusive_url' do + context 'with relative path' do + subject { conn.build_exclusive_url('sake.html') } + + it 'uses connection host as default host' do + conn.host = 'sushi.com' + expect(subject.host).to eq('sushi.com') + expect(subject.scheme).to eq('http') + end + + it do + conn.path_prefix = '/fish' + expect(subject.path).to eq('/fish/sake.html') + end + + it do + conn.path_prefix = '/' + expect(subject.path).to eq('/sake.html') + end + + it do + conn.path_prefix = 'fish' + expect(subject.path).to eq('/fish/sake.html') + end + + it do + conn.path_prefix = '/fish/' + expect(subject.path).to eq('/fish/sake.html') + end + end + + context 'with absolute path' do + subject { conn.build_exclusive_url('/sake.html') } + + after { expect(subject.path).to eq('/sake.html') } + + it { conn.path_prefix = '/fish' } + it { conn.path_prefix = '/' } + it { conn.path_prefix = 'fish' } + it { conn.path_prefix = '/fish/' } + end + + context 'with complete url' do + subject { conn.build_exclusive_url('http://sushi.com/sake.html?a=1') } + + it { expect(subject.scheme).to eq('http') } + it { expect(subject.host).to eq('sushi.com') } + it { expect(subject.port).to eq(80) } + it { expect(subject.path).to eq('/sake.html') } + it { expect(subject.query).to eq('a=1') } + end + + it 'overrides connection port for absolute url' do + conn.port = 23 + uri = conn.build_exclusive_url('http://sushi.com') + expect(uri.port).to eq(80) + end + + it 'does not add ending slash given nil url' do + conn.url_prefix = 'http://sushi.com/nigiri' + uri = conn.build_exclusive_url + expect(uri.path).to eq('/nigiri') + end + + it 'does not add ending slash given empty url' do + conn.url_prefix = 'http://sushi.com/nigiri' + uri = conn.build_exclusive_url('') + expect(uri.path).to eq('/nigiri') + end + + it 'does not use connection params' do + conn.url_prefix = 'http://sushi.com/nigiri' + conn.params = { a: 1 } + expect(conn.build_exclusive_url.to_s).to eq('http://sushi.com/nigiri') + end + + it 'allows to provide params argument' do + conn.url_prefix = 'http://sushi.com/nigiri' + conn.params = { a: 1 } + params = Faraday::Utils::ParamsHash.new + params[:a] = 2 + uri = conn.build_exclusive_url(nil, params) + expect(uri.to_s).to eq('http://sushi.com/nigiri?a=2') + end + + it 'handles uri instances' do + uri = conn.build_exclusive_url(URI('/sake.html')) + expect(uri.path).to eq('/sake.html') + end + + it 'always returns new URI instance' do + conn.url_prefix = 'http://sushi.com' + uri1 = conn.build_exclusive_url(nil) + uri2 = conn.build_exclusive_url(nil) + expect(uri1).not_to equal(uri2) + end + + context 'with url_prefixed connection' do + let(:url) { 'http://sushi.com/sushi/' } + + it 'parses url and changes scheme' do + conn.scheme = 'https' + uri = conn.build_exclusive_url('sake.html') + expect(uri.to_s).to eq('https://sushi.com/sushi/sake.html') + end + + it 'joins url to base with ending slash' do + uri = conn.build_exclusive_url('sake.html') + expect(uri.to_s).to eq('http://sushi.com/sushi/sake.html') + end + + it 'used default base with ending slash' do + uri = conn.build_exclusive_url + expect(uri.to_s).to eq('http://sushi.com/sushi/') + end + + it 'overrides base' do + uri = conn.build_exclusive_url('/sake/') + expect(uri.to_s).to eq('http://sushi.com/sake/') + end + end + + context 'with colon in path' do + let(:url) { 'http://service.com' } + + it 'joins url to base when used absolute path' do + conn = Faraday.new(url: url) + uri = conn.build_exclusive_url('/service:search?limit=400') + expect(uri.to_s).to eq('http://service.com/service:search?limit=400') + end + + it 'joins url to base when used relative path' do + conn = Faraday.new(url: url) + uri = conn.build_exclusive_url('service:search?limit=400') + expect(uri.to_s).to eq('http://service.com/service%3Asearch?limit=400') + end + + it 'joins url to base when used with path prefix' do + conn = Faraday.new(url: url) + conn.path_prefix = '/api' + uri = conn.build_exclusive_url('service:search?limit=400') + expect(uri.to_s).to eq('http://service.com/api/service%3Asearch?limit=400') + end + end + end + + describe '#build_url' do + let(:url) { 'http://sushi.com/nigiri' } + + it 'uses params' do + conn.params = { a: 1, b: 1 } + expect(conn.build_url.to_s).to eq('http://sushi.com/nigiri?a=1&b=1') + end + + it 'merges params' do + conn.params = { a: 1, b: 1 } + url = conn.build_url(nil, b: 2, c: 3) + expect(url.to_s).to eq('http://sushi.com/nigiri?a=1&b=2&c=3') + end + end + + describe '#build_request' do + let(:url) { 'https://asushi.com/sake.html' } + let(:request) { conn.build_request(:get) } + + before do + conn.headers = { 'Authorization' => 'token abc123' } + request.headers.delete('Authorization') + end + + it { expect(conn.headers.keys).to eq(['Authorization']) } + it { expect(conn.headers.include?('Authorization')).to be_truthy } + it { expect(request.headers.keys).to be_empty } + it { expect(request.headers.include?('Authorization')).to be_falsey } + end + + describe '#to_env' do + subject { conn.build_request(:get).to_env(conn).url } + + let(:url) { 'http://sushi.com/sake.html' } + let(:options) { { params: @params } } + + it 'parses url params into query' do + @params = { 'a[b]' => '1 + 2' } + expect(subject.query).to eq('a%5Bb%5D=1+%2B+2') + end + + it 'escapes per spec' do + @params = { 'a' => '1+2 foo~bar.-baz' } + expect(subject.query).to eq('a=1%2B2+foo~bar.-baz') + end + + it 'bracketizes nested params in query' do + @params = { 'a' => { 'b' => 'c' } } + expect(subject.query).to eq('a%5Bb%5D=c') + end + + it 'bracketizes repeated params in query' do + @params = { 'a' => [1, 2] } + expect(subject.query).to eq('a%5B%5D=1&a%5B%5D=2') + end + + it 'without braketizing repeated params in query' do + @params = { 'a' => [1, 2] } + conn.options.params_encoder = Faraday::FlatParamsEncoder + expect(subject.query).to eq('a=1&a=2') + end + end + + describe 'proxy support' do + it 'accepts string' do + with_env 'http_proxy' => 'http://env-proxy.com:80' do + conn.proxy = 'http://proxy.com' + expect(conn.proxy.host).to eq('proxy.com') + end + end + + it 'accepts uri' do + with_env 'http_proxy' => 'http://env-proxy.com:80' do + conn.proxy = URI.parse('http://proxy.com') + expect(conn.proxy.host).to eq('proxy.com') + end + end + + it 'accepts hash with string uri' do + with_env 'http_proxy' => 'http://env-proxy.com:80' do + conn.proxy = { uri: 'http://proxy.com', user: 'rick' } + expect(conn.proxy.host).to eq('proxy.com') + expect(conn.proxy.user).to eq('rick') + end + end + + it 'accepts hash' do + with_env 'http_proxy' => 'http://env-proxy.com:80' do + conn.proxy = { uri: URI.parse('http://proxy.com'), user: 'rick' } + expect(conn.proxy.host).to eq('proxy.com') + expect(conn.proxy.user).to eq('rick') + end + end + + it 'accepts http env' do + with_env 'http_proxy' => 'http://env-proxy.com:80' do + expect(conn.proxy.host).to eq('env-proxy.com') + end + end + + it 'accepts http env with auth' do + with_env 'http_proxy' => 'http://a%40b:my%20pass@proxy.com:80' do + expect(conn.proxy.user).to eq('a@b') + expect(conn.proxy.password).to eq('my pass') + end + end + + it 'accepts env without scheme' do + with_env 'http_proxy' => 'localhost:8888' do + uri = conn.proxy[:uri] + expect(uri.host).to eq('localhost') + expect(uri.port).to eq(8888) + end + end + + it 'fetches no proxy from nil env' do + with_env 'http_proxy' => nil do + expect(conn.proxy).to be_nil + end + end + + it 'fetches no proxy from blank env' do + with_env 'http_proxy' => '' do + expect(conn.proxy).to be_nil + end + end + + it 'does not accept uppercase env' do + with_env 'HTTP_PROXY' => 'http://localhost:8888/' do + expect(conn.proxy).to be_nil + end + end + + it 'allows when url in no proxy list' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example.com' do + conn = Faraday::Connection.new('http://example.com') + expect(conn.proxy).to be_nil + end + end + + it 'allows when url in no proxy list with url_prefix' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example.com' do + conn = Faraday::Connection.new + conn.url_prefix = 'http://example.com' + expect(conn.proxy).to be_nil + end + end + + it 'allows when prefixed url is not in no proxy list' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example.com' do + conn = Faraday::Connection.new('http://prefixedexample.com') + expect(conn.proxy.host).to eq('proxy.com') + end + end + + it 'allows when subdomain url is in no proxy list' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example.com' do + conn = Faraday::Connection.new('http://subdomain.example.com') + expect(conn.proxy).to be_nil + end + end + + it 'allows when url not in no proxy list' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example2.com' do + conn = Faraday::Connection.new('http://example.com') + expect(conn.proxy.host).to eq('proxy.com') + end + end + + it 'allows when ip address is not in no proxy list but url is' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'localhost' do + conn = Faraday::Connection.new('http://127.0.0.1') + expect(conn.proxy).to be_nil + end + end + + it 'allows when url is not in no proxy list but ip address is' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => '127.0.0.1' do + conn = Faraday::Connection.new('http://localhost') + expect(conn.proxy).to be_nil + end + end + + it 'allows in multi element no proxy list' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example0.com,example.com,example1.com' do + expect(Faraday::Connection.new('http://example0.com').proxy).to be_nil + expect(Faraday::Connection.new('http://example.com').proxy).to be_nil + expect(Faraday::Connection.new('http://example1.com').proxy).to be_nil + expect(Faraday::Connection.new('http://example2.com').proxy.host).to eq('proxy.com') + end + end + + it 'test proxy requires uri' do + expect { conn.proxy = { uri: :bad_uri, user: 'rick' } }.to raise_error(ArgumentError) + end + + it 'uses env http_proxy' do + with_env 'http_proxy' => 'http://proxy.com' do + conn = Faraday.new + expect(conn.instance_variable_get('@manual_proxy')).to be_falsey + expect(conn.proxy_for_request('http://google.co.uk').host).to eq('proxy.com') + end + end + + it 'uses processes no_proxy before http_proxy' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'google.co.uk' do + conn = Faraday.new + expect(conn.instance_variable_get('@manual_proxy')).to be_falsey + expect(conn.proxy_for_request('http://google.co.uk')).to be_nil + end + end + + it 'uses env https_proxy' do + with_env 'https_proxy' => 'https://proxy.com' do + conn = Faraday.new + expect(conn.instance_variable_get('@manual_proxy')).to be_falsey + expect(conn.proxy_for_request('https://google.co.uk').host).to eq('proxy.com') + end + end + + it 'uses processes no_proxy before https_proxy' do + with_env 'https_proxy' => 'https://proxy.com', 'no_proxy' => 'google.co.uk' do + conn = Faraday.new + expect(conn.instance_variable_get('@manual_proxy')).to be_falsey + expect(conn.proxy_for_request('https://google.co.uk')).to be_nil + end + end + + it 'gives priority to manually set proxy' do + with_env 'https_proxy' => 'https://proxy.com', 'no_proxy' => 'google.co.uk' do + conn = Faraday.new + conn.proxy = 'http://proxy2.com' + + expect(conn.instance_variable_get('@manual_proxy')).to be_truthy + expect(conn.proxy_for_request('https://google.co.uk').host).to eq('proxy2.com') + end + end + + it 'ignores env proxy if set that way' do + with_env_proxy_disabled do + with_env 'http_proxy' => 'http://duncan.proxy.com:80' do + expect(conn.proxy).to be_nil + end + end + end + + context 'performing a request' do + before { stub_request(:get, 'http://example.com') } + + it 'dynamically checks proxy' do + with_env 'http_proxy' => 'http://proxy.com:80' do + conn = Faraday.new + expect(conn.proxy.uri.host).to eq('proxy.com') + + conn.get('http://example.com') do |req| + expect(req.options.proxy.uri.host).to eq('proxy.com') + end + end + + conn.get('http://example.com') + expect(conn.instance_variable_get('@temp_proxy')).to be_nil + end + + it 'dynamically check no proxy' do + with_env 'http_proxy' => 'http://proxy.com', 'no_proxy' => 'example.com' do + conn = Faraday.new + + expect(conn.proxy.uri.host).to eq('proxy.com') + + conn.get('http://example.com') do |req| + expect(req.options.proxy).to be_nil + end + end + end + end + end + + describe '#dup' do + subject { conn.dup } + + let(:url) { 'http://sushi.com/foo' } + let(:options) do + { + ssl: { verify: :none }, + headers: { 'content-type' => 'text/plain' }, + params: { 'a' => '1' }, + request: { timeout: 5 } + } + end + + it { expect(subject.build_exclusive_url).to eq(conn.build_exclusive_url) } + it { expect(subject.headers['content-type']).to eq('text/plain') } + it { expect(subject.params['a']).to eq('1') } + + context 'after manual changes' do + before do + subject.basic_auth('', '') + subject.headers['content-length'] = 12 + subject.params['b'] = '2' + subject.options[:open_timeout] = 10 + end + + it { expect(subject.builder.handlers.size).to eq(1) } + it { expect(conn.builder.handlers.size).to eq(1) } + it { expect(conn.headers.key?('content-length')).to be_falsey } + it { expect(conn.params.key?('b')).to be_falsey } + it { expect(subject.options[:timeout]).to eq(5) } + it { expect(conn.options[:open_timeout]).to be_nil } + end + end + + describe '#respond_to?' do + it { expect(Faraday.respond_to?(:get)).to be_truthy } + it { expect(Faraday.respond_to?(:post)).to be_truthy } + end + + describe 'default_connection_options' do + context 'assigning a default value' do + before do + Faraday.default_connection_options = nil + Faraday.default_connection_options.request.timeout = 10 + end + + it_behaves_like 'default connection options' + end + + context 'assigning a hash' do + before { Faraday.default_connection_options = { request: { timeout: 10 } } } + + it_behaves_like 'default connection options' + end + end + + describe 'request params' do + context 'with simple url' do + let(:url) { 'http://example.com' } + let!(:stubbed) { stub_request(:get, 'http://example.com?a=a&p=3') } + + after { expect(stubbed).to have_been_made.once } + + it 'test_overrides_request_params' do + conn.get('?p=2&a=a', p: 3) + end + + it 'test_overrides_request_params_block' do + conn.get('?p=1&a=a', p: 2) do |req| + req.params[:p] = 3 + end + end + + it 'test_overrides_request_params_block_url' do + conn.get(nil, p: 2) do |req| + req.url('?p=1&a=a', 'p' => 3) + end + end + end + + context 'with url and extra params' do + let(:url) { 'http://example.com?a=1&b=2' } + let(:options) { { params: { c: 3 } } } + + it 'merges connection and request params' do + stubbed = stub_request(:get, 'http://example.com?a=1&b=2&c=3&limit=5&page=1') + conn.get('?page=1', limit: 5) + expect(stubbed).to have_been_made.once + end + + it 'allows to override all params' do + stubbed = stub_request(:get, 'http://example.com?b=b') + conn.get('?p=1&a=a', p: 2) do |req| + expect(req.params[:a]).to eq('a') + expect(req.params['c']).to eq(3) + expect(req.params['p']).to eq(2) + req.params = { b: 'b' } + expect(req.params['b']).to eq('b') + end + expect(stubbed).to have_been_made.once + end + + it 'allows to set params_encoder for single request' do + encoder = Object.new + def encoder.encode(params) + params.map { |k, v| "#{k.upcase}-#{v.to_s.upcase}" }.join(',') + end + stubbed = stub_request(:get, 'http://example.com/?A-1,B-2,C-3,FEELING-BLUE') + + conn.get('/', feeling: 'blue') do |req| + req.options.params_encoder = encoder + end + expect(stubbed).to have_been_made.once + end + end + + context 'with default params encoder' do + let!(:stubbed) { stub_request(:get, 'http://example.com?color%5B%5D=red&color%5B%5D=blue') } + after { expect(stubbed).to have_been_made.once } + + it 'supports array params in url' do + conn.get('http://example.com?color[]=red&color[]=blue') + end + + it 'supports array params in params' do + conn.get('http://example.com', color: %w[red blue]) + end + end + + context 'with flat params encoder' do + let(:options) { { request: { params_encoder: Faraday::FlatParamsEncoder } } } + let!(:stubbed) { stub_request(:get, 'http://example.com?color=blue') } + after { expect(stubbed).to have_been_made.once } + + it 'supports array params in params' do + conn.get('http://example.com', color: %w[red blue]) + end + + context 'with array param in url' do + let(:url) { 'http://example.com?color[]=red&color[]=blue' } + + it do + conn.get('/') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/deprecate_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/deprecate_spec.rb new file mode 100644 index 0000000..b5d20a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/deprecate_spec.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::DeprecatedClass do + class SampleClass < StandardError + attr_accessor :foo + + def initialize(foo = nil) + @foo = foo || :foo + end + end + + SampleDeprecatedClass = Faraday::DeprecatedClass.proxy_class(SampleClass) + + it 'does not raise error for deprecated classes but prints an error message' do + error_message, foobar = with_warn_squelching { SampleDeprecatedClass.new(:foo_bar) } + expect(foobar).to be_a(SampleClass) + expect(foobar.foo).to eq(:foo_bar) + expect(error_message).to match( + Regexp.new( + 'NOTE: SampleDeprecatedClass.new is deprecated; '\ + 'use SampleClass.new instead. It will be removed in or after version 1.0' + ) + ) + end + + it 'does not raise an error for inherited error-namespaced classes but prints an error message' do + error_message, = with_warn_squelching { Class.new(SampleDeprecatedClass) } + + expect(error_message).to match( + Regexp.new( + 'NOTE: Inheriting SampleDeprecatedClass is deprecated; '\ + 'use SampleClass instead. It will be removed in or after version 1.0' + ) + ) + end + + it 'allows backward-compatible class to be subclassed' do + expect do + with_warn_squelching { Class.new(SampleDeprecatedClass) } + end.not_to raise_error + end + + it 'allows rescuing of a current error with a deprecated error' do + expect { raise SampleClass, nil }.to raise_error(SampleDeprecatedClass) + end + + it 'allows rescuing of a current error with a current error' do + expect { raise SampleClass, nil }.to raise_error(SampleClass) + end + + it 'allows rescuing of a deprecated error with a deprecated error' do + expect { raise SampleDeprecatedClass, nil }.to raise_error(SampleDeprecatedClass) + end + + it 'allows rescuing of a deprecated error with a current error' do + expect { raise SampleDeprecatedClass, nil }.to raise_error(SampleClass) + end + + describe 'match behavior' do + class SampleDeprecatedClassA < SampleDeprecatedClass; end + class SampleDeprecatedClassB < SampleDeprecatedClass; end + + class SampleDeprecatedClassAX < SampleDeprecatedClassA; end + + class SampleClassA < SampleClass; end + + describe 'undeprecated class' do + it 'is === to instance of deprecated class' do + expect(SampleDeprecatedClass.new.is_a?(SampleClass)).to be true + end + + it 'is === to instance of subclass of deprecated class' do + expect(SampleDeprecatedClassA.new.is_a?(SampleClass)).to be true + end + + it 'is === to instance of subclass of subclass of deprecated class' do + expect(SampleDeprecatedClassAX.new.is_a?(SampleClass)).to be true + end + end + + describe 'subclass of undeprecated class' do + it 'is not === to instance of undeprecated class' do + expect(SampleClass.new.is_a?(SampleClassA)).to be false + end + + it 'is not === to instance of deprecated class' do + expect(SampleDeprecatedClass.new.is_a?(SampleClassA)).to be false + end + end + + describe 'deprecated class' do + it 'is === to instance of undeprecated class' do + expect(SampleDeprecatedClass.new.is_a?(SampleClass)).to be true + end + + it 'is === to instance of subclass of undeprecated class' do + expect(SampleClassA.superclass == SampleDeprecatedClass.superclass).to be true + end + + it 'is === to instance of subclass of deprecated class' do + expect(SampleDeprecatedClassA.new.is_a?(SampleDeprecatedClass)).to be true + end + + it 'is === to instance of subclass of subclass of deprecated class' do + expect(SampleDeprecatedClassAX.new.is_a?(SampleDeprecatedClass)).to be true + end + end + + describe 'subclass of deprecated class' do + it 'is not === to instance of subclass of undeprecated class' do + expect(SampleClass.new.is_a?(SampleDeprecatedClassA)).to be false + end + + it 'is not === to instance of another subclass of deprecated class' do + expect(SampleDeprecatedClassB.new.is_a?(SampleDeprecatedClassA)).to be false + end + + it 'is === to instance of its subclass' do + expect(SampleDeprecatedClassAX.new.is_a?(SampleDeprecatedClassA)).to be true + end + + it 'is === to instance of deprecated class' do + expect(SampleDeprecatedClassB.new.is_a?(SampleDeprecatedClass)).to be true + end + end + + describe 'subclass of subclass of deprecated class' do + it 'is not === to instance of subclass of another subclass of deprecated class' do + expect(SampleDeprecatedClassB.new.is_a?(SampleDeprecatedClassAX)).to be false + end + + it 'is not === to instance of its superclass' do + expect(SampleDeprecatedClass.new.is_a?(SampleDeprecatedClassA)).to be false + end + end + end + + def with_warn_squelching + stderr_catcher = StringIO.new + original_stderr = $stderr + $stderr = stderr_catcher + result = yield if block_given? + [stderr_catcher.tap(&:rewind).string, result] + ensure + $stderr = original_stderr + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/error_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/error_spec.rb new file mode 100644 index 0000000..bea9f0c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/error_spec.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::ClientError do + describe '.initialize' do + subject { described_class.new(exception, response) } + let(:response) { nil } + + context 'with exception only' do + let(:exception) { RuntimeError.new('test') } + + it { expect(subject.wrapped_exception).to eq(exception) } + it { expect(subject.response).to be_nil } + it { expect(subject.message).to eq(exception.message) } + it { expect(subject.backtrace).to eq(exception.backtrace) } + it { expect(subject.inspect).to eq('#>') } + it { expect(subject.response_status).to be_nil } + end + + context 'with response hash' do + let(:exception) { { status: 400 } } + + it { expect(subject.wrapped_exception).to be_nil } + it { expect(subject.response).to eq(exception) } + it { expect(subject.message).to eq('the server responded with status 400') } + it { expect(subject.inspect).to eq('#400}>') } + it { expect(subject.response_status).to eq(400) } + end + + context 'with string' do + let(:exception) { 'custom message' } + + it { expect(subject.wrapped_exception).to be_nil } + it { expect(subject.response).to be_nil } + it { expect(subject.message).to eq('custom message') } + it { expect(subject.inspect).to eq('#>') } + it { expect(subject.response_status).to be_nil } + end + + context 'with anything else #to_s' do + let(:exception) { %w[error1 error2] } + + it { expect(subject.wrapped_exception).to be_nil } + it { expect(subject.response).to be_nil } + it { expect(subject.message).to eq('["error1", "error2"]') } + it { expect(subject.inspect).to eq('#>') } + it { expect(subject.response_status).to be_nil } + end + + context 'with exception string and response hash' do + let(:exception) { 'custom message' } + let(:response) { { status: 400 } } + + it { expect(subject.wrapped_exception).to be_nil } + it { expect(subject.response).to eq(response) } + it { expect(subject.message).to eq('custom message') } + it { expect(subject.inspect).to eq('#400}>') } + it { expect(subject.response_status).to eq(400) } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/middleware_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/middleware_spec.rb new file mode 100644 index 0000000..50b8ee1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/middleware_spec.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Middleware do + subject { described_class.new(app) } + let(:app) { double } + + describe 'options' do + context 'when options are passed to the middleware' do + subject { described_class.new(app, options) } + let(:options) { { field: 'value' } } + + it 'accepts options when initialized' do + expect(subject.options[:field]).to eq('value') + end + end + end + + describe '#on_request' do + subject do + Class.new(described_class) do + def on_request(env) + # do nothing + end + end.new(app) + end + + it 'is called by #call' do + expect(app).to receive(:call).and_return(app) + expect(app).to receive(:on_complete) + is_expected.to receive(:call).and_call_original + is_expected.to receive(:on_request) + subject.call(double) + end + end + + describe '#close' do + context "with app that doesn't support \#close" do + it 'should issue warning' do + is_expected.to receive(:warn) + subject.close + end + end + + context "with app that supports \#close" do + it 'should issue warning' do + expect(app).to receive(:close) + is_expected.to_not receive(:warn) + subject.close + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/env_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/env_spec.rb new file mode 100644 index 0000000..04a4b5e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/env_spec.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Env do + subject(:env) { described_class.new } + + it 'allows to access members' do + expect(env.method).to be_nil + env.method = :get + expect(env.method).to eq(:get) + end + + it 'allows to access symbol non members' do + expect(env[:custom]).to be_nil + env[:custom] = :boom + expect(env[:custom]).to eq(:boom) + end + + it 'allows to access string non members' do + expect(env['custom']).to be_nil + env['custom'] = :boom + expect(env['custom']).to eq(:boom) + end + + it 'ignores false when fetching' do + ssl = Faraday::SSLOptions.new + ssl.verify = false + expect(ssl.fetch(:verify, true)).to be_falsey + end + + it 'retains custom members' do + env[:foo] = 'custom 1' + env[:bar] = :custom_2 + env2 = Faraday::Env.from(env) + env2[:baz] = 'custom 3' + + expect(env2[:foo]).to eq('custom 1') + expect(env2[:bar]).to eq(:custom_2) + expect(env[:baz]).to be_nil + end + + describe '#body' do + subject(:env) { described_class.from(body: { foo: 'bar' }) } + + context 'when response is not finished yet' do + it 'returns the request body' do + expect(env.body).to eq(foo: 'bar') + end + end + + context 'when response is finished' do + before do + env.status = 200 + env.body = { bar: 'foo' } + env.response = Faraday::Response.new(env) + end + + it 'returns the response body' do + expect(env.body).to eq(bar: 'foo') + end + + it 'allows to access request_body' do + expect(env.request_body).to eq(foo: 'bar') + end + + it 'allows to access response_body' do + expect(env.response_body).to eq(bar: 'foo') + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/options_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/options_spec.rb new file mode 100644 index 0000000..9758ecc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/options_spec.rb @@ -0,0 +1,297 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Options do + SubOptions = Class.new(Faraday::Options.new(:sub_a, :sub_b)) + class ParentOptions < Faraday::Options.new(:a, :b, :c) + options c: SubOptions + end + + describe '#merge' do + it 'merges options with hashes' do + options = ParentOptions.new(1) + expect(options.a).to eq(1) + expect(options.b).to be_nil + + dup = options.merge a: 2, b: 3 + expect(dup.a).to eq(2) + expect(dup.b).to eq(3) + expect(options.a).to eq(1) + expect(options.b).to be_nil + end + + it 'deeply merges two options' do + sub_opts1 = SubOptions.from(sub_a: 3) + sub_opts2 = SubOptions.from(sub_b: 4) + opt1 = ParentOptions.from(a: 1, c: sub_opts1) + opt2 = ParentOptions.from(b: 2, c: sub_opts2) + + merged = opt1.merge(opt2) + + expected_sub_opts = SubOptions.from(sub_a: 3, sub_b: 4) + expected = ParentOptions.from(a: 1, b: 2, c: expected_sub_opts) + expect(merged).to eq(expected) + end + + it 'deeply merges options with hashes' do + sub_opts1 = SubOptions.from(sub_a: 3) + sub_opts2 = { sub_b: 4 } + opt1 = ParentOptions.from(a: 1, c: sub_opts1) + opt2 = { b: 2, c: sub_opts2 } + + merged = opt1.merge(opt2) + + expected_sub_opts = SubOptions.from(sub_a: 3, sub_b: 4) + expected = ParentOptions.from(a: 1, b: 2, c: expected_sub_opts) + expect(merged).to eq(expected) + end + + it 'deeply merges options with nil' do + sub_opts = SubOptions.new(3, 4) + options = ParentOptions.new(1, 2, sub_opts) + expect(options.a).to eq(1) + expect(options.b).to eq(2) + expect(options.c.sub_a).to eq(3) + expect(options.c.sub_b).to eq(4) + + options2 = ParentOptions.from(b: 5, c: nil) + + merged = options.merge(options2) + + expect(merged.b).to eq(5) + expect(merged.c).to eq(sub_opts) + end + + it 'deeply merges options with options having nil sub-options' do + options = ParentOptions.from(a: 1) + + sub_opts = SubOptions.new(3, 4) + options2 = ParentOptions.from(b: 2, c: sub_opts) + + expect(options.a).to eq(1) + expect(options2.b).to eq(2) + expect(options2.c.sub_a).to eq(3) + expect(options2.c.sub_b).to eq(4) + + merged = options.merge(options2) + + expect(merged.c).to eq(sub_opts) + end + + describe '#dup' do + it 'duplicate options but not sub-options' do + sub_opts = SubOptions.from(sub_a: 3) + opts = ParentOptions.from(b: 1, c: sub_opts) + + duped = opts.dup + duped.b = 2 + duped.c.sub_a = 4 + + expect(opts.b).to eq(1) + expect(opts.c.sub_a).to eq(4) + end + end + + describe '#deep_dup' do + it 'duplicate options and also suboptions' do + sub_opts = SubOptions.from(sub_a: 3) + opts = ParentOptions.from(b: 1, c: sub_opts) + + duped = opts.deep_dup + duped.b = 2 + duped.c.sub_a = 4 + + expect(opts.b).to eq(1) + expect(opts.c.sub_a).to eq(3) + end + end + + describe '#clear' do + it 'clears the options' do + options = SubOptions.new(1) + expect(options.empty?).not_to be_truthy + options.clear + expect(options.empty?).to be_truthy + end + end + + describe '#empty?' do + it 'returns true only if all options are nil' do + options = SubOptions.new + expect(options.empty?).to be_truthy + options.sub_a = 1 + expect(options.empty?).not_to be_truthy + options.delete(:sub_a) + expect(options.empty?).to be_truthy + end + end + + describe '#each_key' do + it 'allows to iterate through keys' do + options = ParentOptions.new(1, 2, 3) + enum = options.each_key + expect(enum.next.to_sym).to eq(:a) + expect(enum.next.to_sym).to eq(:b) + expect(enum.next.to_sym).to eq(:c) + end + end + + describe '#key?' do + it 'returns true if the key exists and is not nil' do + options = SubOptions.new + expect(options.key?(:sub_a)).not_to be_truthy + options.sub_a = 1 + expect(options.key?(:sub_a)).to be_truthy + end + end + + describe '#each_value' do + it 'allows to iterate through values' do + options = ParentOptions.new(1, 2, 3) + enum = options.each_value + expect(enum.next).to eq(1) + expect(enum.next).to eq(2) + expect(enum.next).to eq(3) + end + end + + describe '#value?' do + it 'returns true if any key has that value' do + options = SubOptions.new + expect(options.value?(1)).not_to be_truthy + options.sub_a = 1 + expect(options.value?(1)).to be_truthy + end + end + + describe '#update' do + it 'updates options from hashes' do + options = ParentOptions.new(1) + expect(options.a).to eq(1) + expect(options.b).to be_nil + + updated = options.update a: 2, b: 3 + expect(options.a).to eq(2) + expect(options.b).to eq(3) + expect(updated).to eq(options) + end + end + + describe '#delete' do + it 'allows to remove value for key' do + options = ParentOptions.new(1) + expect(options.a).to eq(1) + expect(options.delete(:a)).to eq(1) + expect(options.a).to be_nil + end + end + + describe '#from' do + it { expect { ParentOptions.from invalid: 1 }.to raise_error(NoMethodError) } + + it 'works with options' do + options = ParentOptions.new(1) + + value = ParentOptions.from(options) + expect(value.a).to eq(1) + expect(value.b).to be_nil + end + + it 'works with options with sub object' do + sub = SubOptions.new(1) + options = ParentOptions.from a: 1, c: sub + expect(options).to be_a_kind_of(ParentOptions) + expect(options.a).to eq(1) + expect(options.b).to be_nil + expect(options.c).to be_a_kind_of(SubOptions) + expect(options.c.sub_a).to eq(1) + end + + it 'works with hash' do + options = ParentOptions.from a: 1 + expect(options).to be_a_kind_of(ParentOptions) + expect(options.a).to eq(1) + expect(options.b).to be_nil + end + + it 'works with hash with sub object' do + options = ParentOptions.from a: 1, c: { sub_a: 1 } + expect(options).to be_a_kind_of(ParentOptions) + expect(options.a).to eq(1) + expect(options.b).to be_nil + expect(options.c).to be_a_kind_of(SubOptions) + expect(options.c.sub_a).to eq(1) + end + + it 'works with deep hash' do + hash = { b: 1 } + options = ParentOptions.from a: hash + expect(options.a[:b]).to eq(1) + + hash[:b] = 2 + expect(options.a[:b]).to eq(1) + + options.a[:b] = 3 + expect(hash[:b]).to eq(2) + expect(options.a[:b]).to eq(3) + end + + it 'works with nil' do + options = ParentOptions.from(nil) + expect(options).to be_a_kind_of(ParentOptions) + expect(options.a).to be_nil + expect(options.b).to be_nil + end + + it 'respects inheritance' do + subclass = Class.new(ParentOptions) + options = subclass.from(c: { sub_a: 'hello' }) + expect(options.c).to be_a_kind_of(SubOptions) + expect(options.c.sub_a).to eq('hello') + end + end + + describe '#memoized' do + subject(:options_class) { Class.new(ParentOptions) } + it 'requires block' do + expect { options_class.memoized(:a) }.to raise_error(ArgumentError) + end + + it 'accepts block' do + options_class.memoized(:a) { :foo } + expect(options_class.new.a).to eql(:foo) + end + end + + describe '#fetch' do + subject { SubOptions.new } + + context 'when the fetched key has no value' do + it 'uses falsey default' do + expect(subject.fetch(:sub_a, false) { |_| :blah }).to be_falsey + end + + it 'accepts block' do + expect(subject.fetch(:sub_a) { |k| "yo #{k.inspect}" }).to eq('yo :sub_a') + end + + it 'needs a default if key is missing' do + expect { subject.fetch(:sub_a) }.to raise_error(Faraday::Options.fetch_error_class) + end + end + + context 'when the fetched key has a value' do + before do + subject.sub_a = 1 + end + + it 'grabs value' do + expect(subject.fetch(:sub_a, false) { |_| :blah }).to eq(1) + end + + it 'works with key' do + expect(subject.fetch(:sub_a)).to eq(1) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/proxy_options_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/proxy_options_spec.rb new file mode 100644 index 0000000..7951554 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/proxy_options_spec.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::ProxyOptions do + describe '#from' do + it 'works with string' do + options = Faraday::ProxyOptions.from 'http://user:pass@example.org' + expect(options.user).to eq('user') + expect(options.password).to eq('pass') + expect(options.uri).to be_a_kind_of(URI) + expect(options.path).to eq('') + expect(options.port).to eq(80) + expect(options.host).to eq('example.org') + expect(options.scheme).to eq('http') + expect(options.inspect).to match('#') + end + + it 'works with no auth' do + proxy = Faraday::ProxyOptions.from 'http://example.org' + expect(proxy.user).to be_nil + expect(proxy.password).to be_nil + end + end + + it 'allows hash access' do + proxy = Faraday::ProxyOptions.from 'http://a%40b:pw%20d@example.org' + expect(proxy.user).to eq('a@b') + expect(proxy[:user]).to eq('a@b') + expect(proxy.password).to eq('pw d') + expect(proxy[:password]).to eq('pw d') + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/request_options_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/request_options_spec.rb new file mode 100644 index 0000000..8c1bb99 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/options/request_options_spec.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::RequestOptions do + subject(:options) { Faraday::RequestOptions.new } + + it 'allows to set the request proxy' do + expect(options.proxy).to be_nil + + expect { options[:proxy] = { booya: 1 } }.to raise_error(NoMethodError) + + options[:proxy] = { user: 'user' } + expect(options.proxy).to be_a_kind_of(Faraday::ProxyOptions) + expect(options.proxy.user).to eq('user') + + options.proxy = nil + expect(options.proxy).to be_nil + expect(options.inspect).to eq('#') + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/params_encoders/flat_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/params_encoders/flat_spec.rb new file mode 100644 index 0000000..115342e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/params_encoders/flat_spec.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require 'rack/utils' + +RSpec.describe Faraday::FlatParamsEncoder do + it_behaves_like 'a params encoder' + + it 'decodes arrays' do + query = 'a=one&a=two&a=three' + expected = { 'a' => %w[one two three] } + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes boolean values' do + query = 'a=true&b=false' + expected = { 'a' => 'true', 'b' => 'false' } + expect(subject.decode(query)).to eq(expected) + end + + it 'encodes boolean values' do + params = { a: true, b: false } + expect(subject.encode(params)).to eq('a=true&b=false') + end + + it 'encodes boolean values in array' do + params = { a: [true, false] } + expect(subject.encode(params)).to eq('a=true&a=false') + end + + it 'encodes empty array in hash' do + params = { a: [] } + expect(subject.encode(params)).to eq('a=') + end + + it 'encodes unsorted when asked' do + params = { b: false, a: true } + expect(subject.encode(params)).to eq('a=true&b=false') + Faraday::FlatParamsEncoder.sort_params = false + expect(subject.encode(params)).to eq('b=false&a=true') + Faraday::FlatParamsEncoder.sort_params = true + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/params_encoders/nested_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/params_encoders/nested_spec.rb new file mode 100644 index 0000000..98c372e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/params_encoders/nested_spec.rb @@ -0,0 +1,142 @@ +# frozen_string_literal: true + +require 'rack/utils' + +RSpec.describe Faraday::NestedParamsEncoder do + it_behaves_like 'a params encoder' + + it 'decodes arrays' do + query = 'a[1]=one&a[2]=two&a[3]=three' + expected = { 'a' => %w[one two three] } + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes hashes' do + query = 'a[b1]=one&a[b2]=two&a[b][c]=foo' + expected = { 'a' => { 'b1' => 'one', 'b2' => 'two', 'b' => { 'c' => 'foo' } } } + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested arrays rack compat' do + query = 'a[][one]=1&a[][two]=2&a[][one]=3&a[][two]=4' + expected = Rack::Utils.parse_nested_query(query) + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested array mixed types' do + query = 'a[][one]=1&a[]=2&a[]=&a[]' + expected = Rack::Utils.parse_nested_query(query) + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested ignores invalid array' do + query = '[][a]=1&b=2' + expected = { 'a' => '1', 'b' => '2' } + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested ignores repeated array notation' do + query = 'a[][][]=1' + expected = { 'a' => ['1'] } + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested ignores malformed keys' do + query = '=1&[]=2' + expected = {} + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested subkeys dont have to be in brackets' do + query = 'a[b]c[d]e=1' + expected = { 'a' => { 'b' => { 'c' => { 'd' => { 'e' => '1' } } } } } + expect(subject.decode(query)).to eq(expected) + end + + it 'decodes nested final value overrides any type' do + query = 'a[b][c]=1&a[b]=2' + expected = { 'a' => { 'b' => '2' } } + expect(subject.decode(query)).to eq(expected) + end + + it 'encodes rack compat' do + params = { a: [{ one: '1', two: '2' }, '3', ''] } + result = Faraday::Utils.unescape(Faraday::NestedParamsEncoder.encode(params)).split('&') + expected = Rack::Utils.build_nested_query(params).split('&') + expect(result).to match_array(expected) + end + + it 'encodes empty string array value' do + expected = 'baz=&foo%5Bbar%5D=' + result = Faraday::NestedParamsEncoder.encode(foo: { bar: '' }, baz: '') + expect(result).to eq(expected) + end + + it 'encodes nil array value' do + expected = 'baz&foo%5Bbar%5D' + result = Faraday::NestedParamsEncoder.encode(foo: { bar: nil }, baz: nil) + expect(result).to eq(expected) + end + + it 'encodes empty array value' do + expected = 'baz%5B%5D&foo%5Bbar%5D%5B%5D' + result = Faraday::NestedParamsEncoder.encode(foo: { bar: [] }, baz: []) + expect(result).to eq(expected) + end + + it 'encodes boolean values' do + params = { a: true, b: false } + expect(subject.encode(params)).to eq('a=true&b=false') + end + + it 'encodes boolean values in array' do + params = { a: [true, false] } + expect(subject.encode(params)).to eq('a%5B%5D=true&a%5B%5D=false') + end + + it 'encodes unsorted when asked' do + params = { b: false, a: true } + expect(subject.encode(params)).to eq('a=true&b=false') + Faraday::NestedParamsEncoder.sort_params = false + expect(subject.encode(params)).to eq('b=false&a=true') + Faraday::NestedParamsEncoder.sort_params = true + end + + shared_examples 'a wrong decoding' do + it do + expect { subject.decode(query) }.to raise_error(TypeError) do |e| + expect(e.message).to eq(error_message) + end + end + end + + context 'when expecting hash but getting string' do + let(:query) { 'a=1&a[b]=2' } + let(:error_message) { "expected Hash (got String) for param `a'" } + it_behaves_like 'a wrong decoding' + end + + context 'when expecting hash but getting array' do + let(:query) { 'a[]=1&a[b]=2' } + let(:error_message) { "expected Hash (got Array) for param `a'" } + it_behaves_like 'a wrong decoding' + end + + context 'when expecting nested hash but getting non nested' do + let(:query) { 'a[b]=1&a[b][c]=2' } + let(:error_message) { "expected Hash (got String) for param `b'" } + it_behaves_like 'a wrong decoding' + end + + context 'when expecting array but getting hash' do + let(:query) { 'a[b]=1&a[]=2' } + let(:error_message) { "expected Array (got Hash) for param `a'" } + it_behaves_like 'a wrong decoding' + end + + context 'when expecting array but getting string' do + let(:query) { 'a=1&a[]=2' } + let(:error_message) { "expected Array (got String) for param `a'" } + it_behaves_like 'a wrong decoding' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/rack_builder_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/rack_builder_spec.rb new file mode 100644 index 0000000..aa28564 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/rack_builder_spec.rb @@ -0,0 +1,345 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::RackBuilder do + # mock handler classes + (Handler = Struct.new(:app)).class_eval do + def call(env) + env[:request_headers]['X-Middleware'] ||= '' + env[:request_headers]['X-Middleware'] += ":#{self.class.name.split('::').last}" + app.call(env) + end + end + + class Apple < Handler + end + class Orange < Handler + end + class Banana < Handler + end + + class Broken < Faraday::Middleware + dependency 'zomg/i_dont/exist' + end + + subject { conn.builder } + + context 'with default stack' do + let(:conn) { Faraday::Connection.new } + + it { expect(subject[0]).to eq(Faraday::Request.lookup_middleware(:url_encoded)) } + it { expect(subject.adapter).to eq(Faraday::Adapter.lookup_middleware(Faraday.default_adapter)) } + end + + context 'with custom empty block' do + let(:conn) { Faraday::Connection.new {} } + + it { expect(subject[0]).to be_nil } + it { expect(subject.adapter).to eq(Faraday::Adapter.lookup_middleware(Faraday.default_adapter)) } + end + + context 'with custom adapter only' do + let(:conn) do + Faraday::Connection.new do |builder| + builder.adapter :test do |stub| + stub.get('/') { |_| [200, {}, ''] } + end + end + end + + it { expect(subject[0]).to be_nil } + it { expect(subject.adapter).to eq(Faraday::Adapter.lookup_middleware(:test)) } + end + + context 'with custom handler and adapter' do + let(:conn) do + Faraday::Connection.new do |builder| + builder.use Apple + builder.adapter :test do |stub| + stub.get('/') { |_| [200, {}, ''] } + end + end + end + + it 'locks the stack after making a request' do + expect(subject.locked?).to be_falsey + conn.get('/') + expect(subject.locked?).to be_truthy + expect { subject.use(Orange) }.to raise_error(Faraday::RackBuilder::StackLocked) + end + + it 'dup stack is unlocked' do + expect(subject.locked?).to be_falsey + subject.lock! + expect(subject.locked?).to be_truthy + dup = subject.dup + expect(dup).to eq(subject) + expect(dup.locked?).to be_falsey + end + + it 'allows to compare handlers' do + expect(subject.handlers.first).to eq(Faraday::RackBuilder::Handler.new(Apple)) + end + end + + context 'when having a single handler' do + let(:conn) { Faraday::Connection.new {} } + + before { subject.use(Apple) } + + it { expect(subject.handlers).to eq([Apple]) } + + it 'allows rebuilding' do + subject.build do |builder| + builder.use(Orange) + end + expect(subject.handlers).to eq([Orange]) + end + + it 'allows use' do + subject.use(Orange) + expect(subject.handlers).to eq([Apple, Orange]) + end + + it 'allows insert_before' do + subject.insert_before(Apple, Orange) + expect(subject.handlers).to eq([Orange, Apple]) + end + + it 'allows insert_after' do + subject.insert_after(Apple, Orange) + expect(subject.handlers).to eq([Apple, Orange]) + end + + it 'raises an error trying to use an unregistered symbol' do + expect { subject.use(:apple) }.to raise_error(Faraday::Error) do |err| + expect(err.message).to eq(':apple is not registered on Faraday::Middleware') + end + end + end + + context 'with custom registered middleware' do + let(:conn) { Faraday::Connection.new {} } + + after { Faraday::Middleware.unregister_middleware(:apple) } + + it 'allows to register with constant' do + Faraday::Middleware.register_middleware(apple: Apple) + subject.use(:apple) + expect(subject.handlers).to eq([Apple]) + end + + it 'allows to register with symbol' do + Faraday::Middleware.register_middleware(apple: :Apple) + subject.use(:apple) + expect(subject.handlers).to eq([Apple]) + end + + it 'allows to register with string' do + Faraday::Middleware.register_middleware(apple: 'Apple') + subject.use(:apple) + expect(subject.handlers).to eq([Apple]) + end + + it 'allows to register with Proc' do + Faraday::Middleware.register_middleware(apple: -> { Apple }) + subject.use(:apple) + expect(subject.handlers).to eq([Apple]) + end + end + + context 'when having two handlers' do + let(:conn) { Faraday::Connection.new {} } + + before do + subject.use(Apple) + subject.use(Orange) + end + + it 'allows insert_before' do + subject.insert_before(Orange, Banana) + expect(subject.handlers).to eq([Apple, Banana, Orange]) + end + + it 'allows insert_after' do + subject.insert_after(Apple, Banana) + expect(subject.handlers).to eq([Apple, Banana, Orange]) + end + + it 'allows to swap handlers' do + subject.swap(Apple, Banana) + expect(subject.handlers).to eq([Banana, Orange]) + end + + it 'allows to delete a handler' do + subject.delete(Apple) + expect(subject.handlers).to eq([Orange]) + end + end + + context 'when having a handler with broken dependency' do + let(:conn) do + Faraday::Connection.new do |builder| + builder.adapter :test do |stub| + stub.get('/') { |_| [200, {}, ''] } + end + end + end + + before { subject.use(Broken) } + + it 'raises an error while making a request' do + expect { conn.get('/') }.to raise_error(RuntimeError) do |err| + expect(err.message).to match(%r{missing dependency for Broken: .+ -- zomg/i_dont/exist}) + end + end + end + + context 'when middleware is added with named arguments' do + let(:conn) { Faraday::Connection.new {} } + + let(:dog_middleware) do + Class.new(Faraday::Middleware) do + attr_accessor :name + + def initialize(app, name:) + super(app) + @name = name + end + end + end + let(:dog) do + subject.handlers.find { |handler| handler == dog_middleware }.build + end + + it 'adds a handler to construct middleware with options passed to use' do + subject.use dog_middleware, name: 'Rex' + expect { dog }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(dog.name).to eq('Rex') + end + end + + context 'when a request adapter is added with named arguments' do + let(:conn) { Faraday::Connection.new {} } + + let(:cat_request) do + Class.new(Faraday::Middleware) do + attr_accessor :name + + def initialize(app, name:) + super(app) + @name = name + end + end + end + let(:cat) do + subject.handlers.find { |handler| handler == cat_request }.build + end + + it 'adds a handler to construct request adapter with options passed to request' do + Faraday::Request.register_middleware cat_request: cat_request + subject.request :cat_request, name: 'Felix' + expect { cat }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(cat.name).to eq('Felix') + end + end + + context 'when a response adapter is added with named arguments' do + let(:conn) { Faraday::Connection.new {} } + + let(:fish_response) do + Class.new(Faraday::Response::Middleware) do + attr_accessor :name + + def initialize(app, name:) + super(app) + @name = name + end + end + end + let(:fish) do + subject.handlers.find { |handler| handler == fish_response }.build + end + + it 'adds a handler to construct response adapter with options passed to response' do + Faraday::Response.register_middleware fish_response: fish_response + subject.response :fish_response, name: 'Bubbles' + expect { fish }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(fish.name).to eq('Bubbles') + end + end + + context 'when a plain adapter is added with named arguments' do + let(:conn) { Faraday::Connection.new {} } + + let(:rabbit_adapter) do + Class.new(Faraday::Adapter) do + attr_accessor :name + + def initialize(app, name:) + super(app) + @name = name + end + end + end + let(:rabbit) do + subject.adapter.build + end + + it 'adds a handler to construct adapter with options passed to adapter' do + Faraday::Adapter.register_middleware rabbit_adapter: rabbit_adapter + subject.adapter :rabbit_adapter, name: 'Thumper' + expect { rabbit }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(rabbit.name).to eq('Thumper') + end + end + + context 'when handlers are directly added or updated' do + let(:conn) { Faraday::Connection.new {} } + + let(:rock_handler) do + Class.new do + attr_accessor :name + + def initialize(_app, name:) + @name = name + end + end + end + let(:rock) do + subject.handlers.find { |handler| handler == rock_handler }.build + end + + it 'adds a handler to construct adapter with options passed to insert' do + subject.insert 0, rock_handler, name: 'Stony' + expect { rock }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(rock.name).to eq('Stony') + end + + it 'adds a handler with options passed to insert_after' do + subject.insert_after 0, rock_handler, name: 'Rocky' + expect { rock }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(rock.name).to eq('Rocky') + end + + it 'adds a handler with options passed to swap' do + subject.insert 0, rock_handler, name: 'Flint' + subject.swap 0, rock_handler, name: 'Chert' + expect { rock }.to_not output( + /warning: Using the last argument as keyword parameters is deprecated/ + ).to_stderr + expect(rock.name).to eq('Chert') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/authorization_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/authorization_spec.rb new file mode 100644 index 0000000..9bc23f0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/authorization_spec.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Request::Authorization do + let(:conn) do + Faraday.new do |b| + b.request auth_type, *auth_config + b.adapter :test do |stub| + stub.get('/auth-echo') do |env| + [200, {}, env[:request_headers]['Authorization']] + end + end + end + end + + shared_examples 'does not interfere with existing authentication' do + context 'and request already has an authentication header' do + let(:response) { conn.get('/auth-echo', nil, authorization: 'Token token="bar"') } + + it 'does not interfere with existing authorization' do + expect(response.body).to eq('Token token="bar"') + end + end + end + + let(:response) { conn.get('/auth-echo') } + + describe 'basic_auth' do + let(:auth_type) { :basic_auth } + + context 'when passed correct params' do + let(:auth_config) { %w[aladdin opensesame] } + + it { expect(response.body).to eq('Basic YWxhZGRpbjpvcGVuc2VzYW1l') } + + include_examples 'does not interfere with existing authentication' + end + + context 'when passed very long values' do + let(:auth_config) { ['A' * 255, ''] } + + it { expect(response.body).to eq("Basic #{'QUFB' * 85}Og==") } + + include_examples 'does not interfere with existing authentication' + end + end + + describe 'token_auth' do + let(:auth_type) { :token_auth } + + context 'when passed correct params' do + let(:auth_config) { 'quux' } + + it { expect(response.body).to eq('Token token="quux"') } + + include_examples 'does not interfere with existing authentication' + end + + context 'when other values are provided' do + let(:auth_config) { ['baz', { foo: 42 }] } + + it { expect(response.body).to match(/^Token /) } + it { expect(response.body).to match(/token="baz"/) } + it { expect(response.body).to match(/foo="42"/) } + + include_examples 'does not interfere with existing authentication' + end + end + + describe 'authorization' do + let(:auth_type) { :authorization } + + context 'when passed two strings' do + let(:auth_config) { ['custom', 'abc def'] } + + it { expect(response.body).to eq('custom abc def') } + + include_examples 'does not interfere with existing authentication' + end + + context 'when passed a string and a hash' do + let(:auth_config) { ['baz', { foo: 42 }] } + + it { expect(response.body).to eq('baz foo="42"') } + + include_examples 'does not interfere with existing authentication' + end + + context 'when passed a string and a proc' do + let(:auth_config) { ['Bearer', -> { 'custom_from_proc' }] } + + it { expect(response.body).to eq('Bearer custom_from_proc') } + + include_examples 'does not interfere with existing authentication' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/instrumentation_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/instrumentation_spec.rb new file mode 100644 index 0000000..f8af4c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/instrumentation_spec.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Request::Instrumentation do + class FakeInstrumenter + attr_reader :instrumentations + + def initialize + @instrumentations = [] + end + + def instrument(name, env) + @instrumentations << [name, env] + yield + end + end + + let(:config) { {} } + let(:options) { Faraday::Request::Instrumentation::Options.from config } + let(:instrumenter) { FakeInstrumenter.new } + let(:conn) do + Faraday.new do |f| + f.request :instrumentation, config.merge(instrumenter: instrumenter) + f.adapter :test do |stub| + stub.get '/' do + [200, {}, 'ok'] + end + end + end + end + + it { expect(options.name).to eq('request.faraday') } + it 'defaults to ActiveSupport::Notifications' do + begin + res = options.instrumenter + rescue NameError => e + expect(e.to_s).to match('ActiveSupport') + else + expect(res).to eq(ActiveSupport::Notifications) + end + end + + it 'instruments with default name' do + expect(instrumenter.instrumentations.size).to eq(0) + + res = conn.get '/' + expect(res.body).to eq('ok') + expect(instrumenter.instrumentations.size).to eq(1) + + name, env = instrumenter.instrumentations.first + expect(name).to eq('request.faraday') + expect(env[:url].path).to eq('/') + end + + context 'with custom name' do + let(:config) { { name: 'custom' } } + + it { expect(options.name).to eq('custom') } + it 'instruments with custom name' do + expect(instrumenter.instrumentations.size).to eq(0) + + res = conn.get '/' + expect(res.body).to eq('ok') + expect(instrumenter.instrumentations.size).to eq(1) + + name, env = instrumenter.instrumentations.first + expect(name).to eq('custom') + expect(env[:url].path).to eq('/') + end + end + + context 'with custom instrumenter' do + let(:config) { { instrumenter: :custom } } + + it { expect(options.instrumenter).to eq(:custom) } + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/json_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/json_spec.rb new file mode 100644 index 0000000..89949bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/json_spec.rb @@ -0,0 +1,111 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Request::Json do + let(:middleware) { described_class.new(->(env) { Faraday::Response.new(env) }) } + + def process(body, content_type = nil) + env = { body: body, request_headers: Faraday::Utils::Headers.new } + env[:request_headers]['content-type'] = content_type if content_type + middleware.call(Faraday::Env.from(env)).env + end + + def result_body + result[:body] + end + + def result_type + result[:request_headers]['content-type'] + end + + context 'no body' do + let(:result) { process(nil) } + + it "doesn't change body" do + expect(result_body).to be_nil + end + + it "doesn't add content type" do + expect(result_type).to be_nil + end + end + + context 'empty body' do + let(:result) { process('') } + + it "doesn't change body" do + expect(result_body).to be_empty + end + + it "doesn't add content type" do + expect(result_type).to be_nil + end + end + + context 'string body' do + let(:result) { process('{"a":1}') } + + it "doesn't change body" do + expect(result_body).to eq('{"a":1}') + end + + it 'adds content type' do + expect(result_type).to eq('application/json') + end + end + + context 'object body' do + let(:result) { process(a: 1) } + + it 'encodes body' do + expect(result_body).to eq('{"a":1}') + end + + it 'adds content type' do + expect(result_type).to eq('application/json') + end + end + + context 'empty object body' do + let(:result) { process({}) } + + it 'encodes body' do + expect(result_body).to eq('{}') + end + end + + context 'object body with json type' do + let(:result) { process({ a: 1 }, 'application/json; charset=utf-8') } + + it 'encodes body' do + expect(result_body).to eq('{"a":1}') + end + + it "doesn't change content type" do + expect(result_type).to eq('application/json; charset=utf-8') + end + end + + context 'object body with vendor json type' do + let(:result) { process({ a: 1 }, 'application/vnd.myapp.v1+json; charset=utf-8') } + + it 'encodes body' do + expect(result_body).to eq('{"a":1}') + end + + it "doesn't change content type" do + expect(result_type).to eq('application/vnd.myapp.v1+json; charset=utf-8') + end + end + + context 'object body with incompatible type' do + let(:result) { process({ a: 1 }, 'application/xml; charset=utf-8') } + + it "doesn't change body" do + expect(result_body).to eq(a: 1) + end + + it "doesn't change content type" do + expect(result_type).to eq('application/xml; charset=utf-8') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/url_encoded_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/url_encoded_spec.rb new file mode 100644 index 0000000..9f89a56 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request/url_encoded_spec.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Request::UrlEncoded do + let(:conn) do + Faraday.new do |b| + b.request :multipart + b.request :url_encoded + b.adapter :test do |stub| + stub.post('/echo') do |env| + posted_as = env[:request_headers]['Content-Type'] + [200, { 'Content-Type' => posted_as }, env[:body]] + end + end + end + end + + it 'does nothing without payload' do + response = conn.post('/echo') + expect(response.headers['Content-Type']).to be_nil + expect(response.body.empty?).to be_truthy + end + + it 'ignores custom content type' do + response = conn.post('/echo', { some: 'data' }, 'content-type' => 'application/x-foo') + expect(response.headers['Content-Type']).to eq('application/x-foo') + expect(response.body).to eq(some: 'data') + end + + it 'works with no headers' do + response = conn.post('/echo', fruit: %w[apples oranges]) + expect(response.headers['Content-Type']).to eq('application/x-www-form-urlencoded') + expect(response.body).to eq('fruit%5B%5D=apples&fruit%5B%5D=oranges') + end + + it 'works with with headers' do + response = conn.post('/echo', { 'a' => 123 }, 'content-type' => 'application/x-www-form-urlencoded') + expect(response.headers['Content-Type']).to eq('application/x-www-form-urlencoded') + expect(response.body).to eq('a=123') + end + + it 'works with nested params' do + response = conn.post('/echo', user: { name: 'Mislav', web: 'mislav.net' }) + expect(response.headers['Content-Type']).to eq('application/x-www-form-urlencoded') + expected = { 'user' => { 'name' => 'Mislav', 'web' => 'mislav.net' } } + expect(Faraday::Utils.parse_nested_query(response.body)).to eq(expected) + end + + it 'works with non nested params' do + response = conn.post('/echo', dimensions: %w[date location]) do |req| + req.options.params_encoder = Faraday::FlatParamsEncoder + end + expect(response.headers['Content-Type']).to eq('application/x-www-form-urlencoded') + expected = { 'dimensions' => %w[date location] } + expect(Faraday::Utils.parse_query(response.body)).to eq(expected) + expect(response.body).to eq('dimensions=date&dimensions=location') + end + + it 'works with unicode' do + err = capture_warnings do + response = conn.post('/echo', str: 'eÊ cç aÃŖ aÃĸ') + expect(response.body).to eq('str=e%C3%A9+c%C3%A7+a%C3%A3+a%C3%A2') + end + expect(err.empty?).to be_truthy + end + + it 'works with nested keys' do + response = conn.post('/echo', 'a' => { 'b' => { 'c' => ['d'] } }) + expect(response.body).to eq('a%5Bb%5D%5Bc%5D%5B%5D=d') + end + + context 'customising default_space_encoding' do + around do |example| + Faraday::Utils.default_space_encoding = '%20' + example.run + Faraday::Utils.default_space_encoding = nil + end + + it 'uses the custom character to encode spaces' do + response = conn.post('/echo', str: 'apple banana') + expect(response.body).to eq('str=apple%20banana') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request_spec.rb new file mode 100644 index 0000000..3c86734 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/request_spec.rb @@ -0,0 +1,120 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Request do + let(:conn) do + Faraday.new(url: 'http://sushi.com/api', + headers: { 'Mime-Version' => '1.0' }, + request: { oauth: { consumer_key: 'anonymous' } }) + end + let(:http_method) { :get } + let(:block) { nil } + + subject { conn.build_request(http_method, &block) } + + context 'when nothing particular is configured' do + it { expect(subject.http_method).to eq(:get) } + it { expect(subject.to_env(conn).ssl.verify).to be_falsey } + end + + context 'when HTTP method is post' do + let(:http_method) { :post } + + it { expect(subject.http_method).to eq(:post) } + end + + describe 'deprecate method for HTTP method' do + let(:http_method) { :post } + let(:expected_warning) do + %r{NOTE: Faraday::Request#method is deprecated; use http_method instead\. It will be removed in or after version 2.0 \nFaraday::Request#method called from .+/spec/faraday/request_spec.rb:\d+.} + end + + it { expect(subject.method).to eq(:post) } + + it { expect { subject.method }.to output(expected_warning).to_stderr } + end + + context 'when setting the url on setup with a URI' do + let(:block) { proc { |req| req.url URI.parse('foo.json?a=1') } } + + it { expect(subject.path).to eq(URI.parse('foo.json')) } + it { expect(subject.params).to eq('a' => '1') } + it { expect(subject.to_env(conn).url.to_s).to eq('http://sushi.com/api/foo.json?a=1') } + end + + context 'when setting the url on setup with a string path and params' do + let(:block) { proc { |req| req.url 'foo.json', 'a' => 1 } } + + it { expect(subject.path).to eq('foo.json') } + it { expect(subject.params).to eq('a' => 1) } + it { expect(subject.to_env(conn).url.to_s).to eq('http://sushi.com/api/foo.json?a=1') } + end + + context 'when setting the url on setup with a path including params' do + let(:block) { proc { |req| req.url 'foo.json?b=2&a=1#qqq' } } + + it { expect(subject.path).to eq('foo.json') } + it { expect(subject.params).to eq('a' => '1', 'b' => '2') } + it { expect(subject.to_env(conn).url.to_s).to eq('http://sushi.com/api/foo.json?a=1&b=2') } + end + + context 'when setting a header on setup with []= syntax' do + let(:block) { proc { |req| req['Server'] = 'Faraday' } } + let(:headers) { subject.to_env(conn).request_headers } + + it { expect(subject.headers['Server']).to eq('Faraday') } + it { expect(headers['mime-version']).to eq('1.0') } + it { expect(headers['server']).to eq('Faraday') } + end + + context 'when setting the body on setup' do + let(:block) { proc { |req| req.body = 'hi' } } + + it { expect(subject.body).to eq('hi') } + it { expect(subject.to_env(conn).body).to eq('hi') } + end + + context 'with global request options set' do + let(:env_request) { subject.to_env(conn).request } + + before do + conn.options.timeout = 3 + conn.options.open_timeout = 5 + conn.ssl.verify = false + conn.proxy = 'http://proxy.com' + end + + it { expect(subject.options.timeout).to eq(3) } + it { expect(subject.options.open_timeout).to eq(5) } + it { expect(env_request.timeout).to eq(3) } + it { expect(env_request.open_timeout).to eq(5) } + + context 'and per-request options set' do + let(:block) do + proc do |req| + req.options.timeout = 10 + req.options.boundary = 'boo' + req.options.oauth[:consumer_secret] = 'xyz' + req.options.context = { + foo: 'foo', + bar: 'bar' + } + end + end + + it { expect(subject.options.timeout).to eq(10) } + it { expect(subject.options.open_timeout).to eq(5) } + it { expect(env_request.timeout).to eq(10) } + it { expect(env_request.open_timeout).to eq(5) } + it { expect(env_request.boundary).to eq('boo') } + it { expect(env_request.context).to eq(foo: 'foo', bar: 'bar') } + it do + oauth_expected = { consumer_secret: 'xyz', consumer_key: 'anonymous' } + expect(env_request.oauth).to eq(oauth_expected) + end + end + end + + it 'supports marshal serialization' do + expect(Marshal.load(Marshal.dump(subject))).to eq(subject) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/json_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/json_spec.rb new file mode 100644 index 0000000..a98e8a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/json_spec.rb @@ -0,0 +1,119 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Response::Json, type: :response do + let(:options) { {} } + let(:headers) { {} } + let(:middleware) do + described_class.new(lambda { |env| + Faraday::Response.new(env) + }, **options) + end + + def process(body, content_type = 'application/json', options = {}) + env = { + body: body, request: options, + request_headers: Faraday::Utils::Headers.new, + response_headers: Faraday::Utils::Headers.new(headers) + } + env[:response_headers]['content-type'] = content_type if content_type + yield(env) if block_given? + middleware.call(Faraday::Env.from(env)) + end + + context 'no type matching' do + it "doesn't change nil body" do + expect(process(nil).body).to be_nil + end + + it 'nullifies empty body' do + expect(process('').body).to be_nil + end + + it 'parses json body' do + response = process('{"a":1}') + expect(response.body).to eq('a' => 1) + expect(response.env[:raw_body]).to be_nil + end + end + + context 'with preserving raw' do + let(:options) { { preserve_raw: true } } + + it 'parses json body' do + response = process('{"a":1}') + expect(response.body).to eq('a' => 1) + expect(response.env[:raw_body]).to eq('{"a":1}') + end + end + + context 'with default regexp type matching' do + it 'parses json body of correct type' do + response = process('{"a":1}', 'application/x-json') + expect(response.body).to eq('a' => 1) + end + + it 'ignores json body of incorrect type' do + response = process('{"a":1}', 'text/json-xml') + expect(response.body).to eq('{"a":1}') + end + end + + context 'with array type matching' do + let(:options) { { content_type: %w[a/b c/d] } } + + it 'parses json body of correct type' do + expect(process('{"a":1}', 'a/b').body).to be_a(Hash) + expect(process('{"a":1}', 'c/d').body).to be_a(Hash) + end + + it 'ignores json body of incorrect type' do + expect(process('{"a":1}', 'a/d').body).not_to be_a(Hash) + end + end + + it 'chokes on invalid json' do + expect { process('{!') }.to raise_error(Faraday::ParsingError) + end + + it 'includes the response on the ParsingError instance' do + begin + process('{') { |env| env[:response] = Faraday::Response.new } + raise 'Parsing should have failed.' + rescue Faraday::ParsingError => e + expect(e.response).to be_a(Faraday::Response) + end + end + + context 'HEAD responses' do + it "nullifies the body if it's only one space" do + response = process(' ') + expect(response.body).to be_nil + end + + it "nullifies the body if it's two spaces" do + response = process(' ') + expect(response.body).to be_nil + end + end + + context 'JSON options' do + let(:body) { '{"a": 1}' } + let(:result) { { a: 1 } } + let(:options) do + { + parser_options: { + symbolize_names: true + } + } + end + + it 'passes relevant options to JSON parse' do + expect(::JSON).to receive(:parse) + .with(body, options[:parser_options]) + .and_return(result) + + response = process(body) + expect(response.body).to eq(result) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/logger_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/logger_spec.rb new file mode 100644 index 0000000..10eeff5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/logger_spec.rb @@ -0,0 +1,220 @@ +# frozen_string_literal: true + +require 'stringio' +require 'logger' + +RSpec.describe Faraday::Response::Logger do + let(:string_io) { StringIO.new } + let(:logger) { Logger.new(string_io) } + let(:logger_options) { {} } + let(:conn) do + rubbles = ['Barney', 'Betty', 'Bam Bam'] + + Faraday.new do |b| + b.response :logger, logger, logger_options do |logger| + logger.filter(/(soylent green is) (.+)/, '\1 tasty') + logger.filter(/(api_key:).*"(.+)."/, '\1[API_KEY]') + logger.filter(/(password)=(.+)/, '\1=[HIDDEN]') + end + b.adapter :test do |stubs| + stubs.get('/hello') { [200, { 'Content-Type' => 'text/html' }, 'hello'] } + stubs.post('/ohai') { [200, { 'Content-Type' => 'text/html' }, 'fred'] } + stubs.post('/ohyes') { [200, { 'Content-Type' => 'text/html' }, 'pebbles'] } + stubs.get('/rubbles') { [200, { 'Content-Type' => 'application/json' }, rubbles] } + stubs.get('/filtered_body') { [200, { 'Content-Type' => 'text/html' }, 'soylent green is people'] } + stubs.get('/filtered_headers') { [200, { 'Content-Type' => 'text/html' }, 'headers response'] } + stubs.get('/filtered_params') { [200, { 'Content-Type' => 'text/html' }, 'params response'] } + stubs.get('/filtered_url') { [200, { 'Content-Type' => 'text/html' }, 'url response'] } + end + end + end + + before do + logger.level = Logger::DEBUG + end + + it 'still returns output' do + resp = conn.get '/hello', nil, accept: 'text/html' + expect(resp.body).to eq('hello') + end + + context 'without configuration' do + let(:conn) do + Faraday.new do |b| + b.response :logger + b.adapter :test do |stubs| + stubs.get('/hello') { [200, { 'Content-Type' => 'text/html' }, 'hello'] } + end + end + end + + it 'defaults to stdout' do + expect(Logger).to receive(:new).with($stdout).and_return(Logger.new(nil)) + conn.get('/hello') + end + end + + context 'with default formatter' do + let(:formatter) { instance_double(Faraday::Logging::Formatter, request: true, response: true, filter: []) } + + before { allow(Faraday::Logging::Formatter).to receive(:new).and_return(formatter) } + + it 'delegates logging to the formatter' do + expect(formatter).to receive(:request).with(an_instance_of(Faraday::Env)) + expect(formatter).to receive(:response).with(an_instance_of(Faraday::Env)) + conn.get '/hello' + end + end + + context 'with custom formatter' do + let(:formatter_class) do + Class.new(Faraday::Logging::Formatter) do + def request(_env) + info 'Custom log formatter request' + end + + def response(_env) + info 'Custom log formatter response' + end + end + end + + let(:logger_options) { { formatter: formatter_class } } + + it 'logs with custom formatter' do + conn.get '/hello' + + expect(string_io.string).to match('Custom log formatter request') + expect(string_io.string).to match('Custom log formatter response') + end + end + + it 'logs method and url' do + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).to match('GET http:/hello') + end + + it 'logs request headers by default' do + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).to match(%(Accept: "text/html)) + end + + it 'logs response headers by default' do + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).to match(%(Content-Type: "text/html)) + end + + it 'does not log request body by default' do + conn.post '/ohai', 'name=Unagi', accept: 'text/html' + expect(string_io.string).not_to match(%(name=Unagi)) + end + + it 'does not log response body by default' do + conn.post '/ohai', 'name=Toro', accept: 'text/html' + expect(string_io.string).not_to match(%(fred)) + end + + it 'logs filter headers' do + conn.headers = { 'api_key' => 'ABC123' } + conn.get '/filtered_headers', nil, accept: 'text/html' + expect(string_io.string).to match(%(api_key:)) + expect(string_io.string).to match(%([API_KEY])) + expect(string_io.string).not_to match(%(ABC123)) + end + + it 'logs filter url' do + conn.get '/filtered_url?password=hunter2', nil, accept: 'text/html' + expect(string_io.string).to match(%([HIDDEN])) + expect(string_io.string).not_to match(%(hunter2)) + end + + context 'when not logging request headers' do + let(:logger_options) { { headers: { request: false } } } + + it 'does not log request headers if option is false' do + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).not_to match(%(Accept: "text/html)) + end + end + + context 'when not logging response headers' do + let(:logger_options) { { headers: { response: false } } } + + it 'does not log response headers if option is false' do + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).not_to match(%(Content-Type: "text/html)) + end + end + + context 'when logging request body' do + let(:logger_options) { { bodies: { request: true } } } + + it 'log only request body' do + conn.post '/ohyes', 'name=Tamago', accept: 'text/html' + expect(string_io.string).to match(%(name=Tamago)) + expect(string_io.string).not_to match(%(pebbles)) + end + end + + context 'when logging response body' do + let(:logger_options) { { bodies: { response: true } } } + + it 'log only response body' do + conn.post '/ohyes', 'name=Hamachi', accept: 'text/html' + expect(string_io.string).to match(%(pebbles)) + expect(string_io.string).not_to match(%(name=Hamachi)) + end + end + + context 'when logging request and response bodies' do + let(:logger_options) { { bodies: true } } + + it 'log request and response body' do + conn.post '/ohyes', 'name=Ebi', accept: 'text/html' + expect(string_io.string).to match(%(name=Ebi)) + expect(string_io.string).to match(%(pebbles)) + end + + it 'log response body object' do + conn.get '/rubbles', nil, accept: 'text/html' + expect(string_io.string).to match(%([\"Barney\", \"Betty\", \"Bam Bam\"]\n)) + end + + it 'logs filter body' do + conn.get '/filtered_body', nil, accept: 'text/html' + expect(string_io.string).to match(%(soylent green is)) + expect(string_io.string).to match(%(tasty)) + expect(string_io.string).not_to match(%(people)) + end + end + + context 'when using log_level' do + let(:logger_options) { { bodies: true, log_level: :debug } } + + it 'logs request/request body on the specified level (debug)' do + logger.level = Logger::DEBUG + conn.post '/ohyes', 'name=Ebi', accept: 'text/html' + expect(string_io.string).to match(%(name=Ebi)) + expect(string_io.string).to match(%(pebbles)) + end + + it 'logs headers on the debug level' do + logger.level = Logger::DEBUG + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).to match(%(Content-Type: "text/html)) + end + + it 'does not log request/response body on the info level' do + logger.level = Logger::INFO + conn.post '/ohyes', 'name=Ebi', accept: 'text/html' + expect(string_io.string).not_to match(%(name=Ebi)) + expect(string_io.string).not_to match(%(pebbles)) + end + + it 'does not log headers on the info level' do + logger.level = Logger::INFO + conn.get '/hello', nil, accept: 'text/html' + expect(string_io.string).not_to match(%(Content-Type: "text/html)) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/middleware_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/middleware_spec.rb new file mode 100644 index 0000000..a79208c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/middleware_spec.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Response::Middleware do + let(:conn) do + Faraday.new do |b| + b.use custom_middleware + b.adapter :test do |stub| + stub.get('ok') { [200, { 'Content-Type' => 'text/html' }, ''] } + stub.get('not_modified') { [304, nil, nil] } + stub.get('no_content') { [204, nil, nil] } + end + end + end + + context 'with a custom ResponseMiddleware' do + let(:custom_middleware) do + Class.new(Faraday::Response::Middleware) do + def parse(body) + body.upcase + end + end + end + + it 'parses the response' do + expect(conn.get('ok').body).to eq('') + end + end + + context 'with a custom ResponseMiddleware and private parse' do + let(:custom_middleware) do + Class.new(Faraday::Response::Middleware) do + private + + def parse(body) + body.upcase + end + end + end + + it 'parses the response' do + expect(conn.get('ok').body).to eq('') + end + end + + context 'with a custom ResponseMiddleware but empty response' do + let(:custom_middleware) do + Class.new(Faraday::Response::Middleware) do + def parse(_body) + raise 'this should not be called' + end + end + end + + it 'raises exception for 200 responses' do + expect { conn.get('ok') }.to raise_error(StandardError) + end + + it 'doesn\'t call the middleware for 204 responses' do + expect_any_instance_of(custom_middleware).not_to receive(:parse) + expect(conn.get('no_content').body).to be_nil + end + + it 'doesn\'t call the middleware for 304 responses' do + expect_any_instance_of(custom_middleware).not_to receive(:parse) + expect(conn.get('not_modified').body).to be_nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/raise_error_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/raise_error_spec.rb new file mode 100644 index 0000000..a80472d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response/raise_error_spec.rb @@ -0,0 +1,169 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Response::RaiseError do + let(:conn) do + Faraday.new do |b| + b.response :raise_error + b.adapter :test do |stub| + stub.get('ok') { [200, { 'Content-Type' => 'text/html' }, ''] } + stub.get('bad-request') { [400, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('unauthorized') { [401, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('forbidden') { [403, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('not-found') { [404, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('proxy-error') { [407, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('conflict') { [409, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('unprocessable-entity') { [422, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('4xx') { [499, { 'X-Reason' => 'because' }, 'keep looking'] } + stub.get('nil-status') { [nil, { 'X-Reason' => 'nil' }, 'fail'] } + stub.get('server-error') { [500, { 'X-Error' => 'bailout' }, 'fail'] } + end + end + end + + it 'raises no exception for 200 responses' do + expect { conn.get('ok') }.not_to raise_error + end + + it 'raises Faraday::BadRequestError for 400 responses' do + expect { conn.get('bad-request') }.to raise_error(Faraday::BadRequestError) do |ex| + expect(ex.message).to eq('the server responded with status 400') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(400) + expect(ex.response_status).to eq(400) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::UnauthorizedError for 401 responses' do + expect { conn.get('unauthorized') }.to raise_error(Faraday::UnauthorizedError) do |ex| + expect(ex.message).to eq('the server responded with status 401') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(401) + expect(ex.response_status).to eq(401) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::ForbiddenError for 403 responses' do + expect { conn.get('forbidden') }.to raise_error(Faraday::ForbiddenError) do |ex| + expect(ex.message).to eq('the server responded with status 403') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(403) + expect(ex.response_status).to eq(403) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::ResourceNotFound for 404 responses' do + expect { conn.get('not-found') }.to raise_error(Faraday::ResourceNotFound) do |ex| + expect(ex.message).to eq('the server responded with status 404') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(404) + expect(ex.response_status).to eq(404) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::ProxyAuthError for 407 responses' do + expect { conn.get('proxy-error') }.to raise_error(Faraday::ProxyAuthError) do |ex| + expect(ex.message).to eq('407 "Proxy Authentication Required"') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(407) + expect(ex.response_status).to eq(407) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::ConflictError for 409 responses' do + expect { conn.get('conflict') }.to raise_error(Faraday::ConflictError) do |ex| + expect(ex.message).to eq('the server responded with status 409') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(409) + expect(ex.response_status).to eq(409) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::UnprocessableEntityError for 422 responses' do + expect { conn.get('unprocessable-entity') }.to raise_error(Faraday::UnprocessableEntityError) do |ex| + expect(ex.message).to eq('the server responded with status 422') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(422) + expect(ex.response_status).to eq(422) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::NilStatusError for nil status in response' do + expect { conn.get('nil-status') }.to raise_error(Faraday::NilStatusError) do |ex| + expect(ex.message).to eq('http status could not be derived from the server response') + expect(ex.response[:headers]['X-Reason']).to eq('nil') + expect(ex.response[:status]).to be_nil + expect(ex.response_status).to be_nil + expect(ex.response_body).to eq('fail') + expect(ex.response_headers['X-Reason']).to eq('nil') + end + end + + it 'raises Faraday::ClientError for other 4xx responses' do + expect { conn.get('4xx') }.to raise_error(Faraday::ClientError) do |ex| + expect(ex.message).to eq('the server responded with status 499') + expect(ex.response[:headers]['X-Reason']).to eq('because') + expect(ex.response[:status]).to eq(499) + expect(ex.response_status).to eq(499) + expect(ex.response_body).to eq('keep looking') + expect(ex.response_headers['X-Reason']).to eq('because') + end + end + + it 'raises Faraday::ServerError for 500 responses' do + expect { conn.get('server-error') }.to raise_error(Faraday::ServerError) do |ex| + expect(ex.message).to eq('the server responded with status 500') + expect(ex.response[:headers]['X-Error']).to eq('bailout') + expect(ex.response[:status]).to eq(500) + expect(ex.response_status).to eq(500) + expect(ex.response_body).to eq('fail') + expect(ex.response_headers['X-Error']).to eq('bailout') + end + end + + describe 'request info' do + let(:conn) do + Faraday.new do |b| + b.response :raise_error + b.adapter :test do |stub| + stub.post('request?full=true', request_body, request_headers) do + [400, { 'X-Reason' => 'because' }, 'keep looking'] + end + end + end + end + let(:request_body) { JSON.generate({ 'item' => 'sth' }) } + let(:request_headers) { { 'Authorization' => 'Basic 123' } } + + subject(:perform_request) do + conn.post 'request' do |req| + req.headers['Authorization'] = 'Basic 123' + req.params[:full] = true + req.body = request_body + end + end + + it 'returns the request info in the exception' do + expect { perform_request }.to raise_error(Faraday::BadRequestError) do |ex| + expect(ex.response[:request][:method]).to eq(:post) + expect(ex.response[:request][:url_path]).to eq('/request') + expect(ex.response[:request][:params]).to eq({ 'full' => 'true' }) + expect(ex.response[:request][:headers]).to match(a_hash_including(request_headers)) + expect(ex.response[:request][:body]).to eq(request_body) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response_spec.rb new file mode 100644 index 0000000..1715947 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/response_spec.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Response do + subject { Faraday::Response.new(env) } + + let(:env) do + Faraday::Env.from(status: 404, body: 'yikes', + response_headers: { 'Content-Type' => 'text/plain' }) + end + + it { expect(subject.finished?).to be_truthy } + it { expect { subject.finish({}) }.to raise_error(RuntimeError) } + it { expect(subject.success?).to be_falsey } + it { expect(subject.status).to eq(404) } + it { expect(subject.body).to eq('yikes') } + it { expect(subject.headers['Content-Type']).to eq('text/plain') } + it { expect(subject['content-type']).to eq('text/plain') } + + describe '#apply_request' do + before { subject.apply_request(body: 'a=b', method: :post) } + + it { expect(subject.body).to eq('yikes') } + it { expect(subject.env[:method]).to eq(:post) } + end + + describe '#to_hash' do + let(:hash) { subject.to_hash } + + it { expect(hash).to be_a(Hash) } + it { expect(hash[:status]).to eq(subject.status) } + it { expect(hash[:response_headers]).to eq(subject.headers) } + it { expect(hash[:body]).to eq(subject.body) } + end + + describe 'marshal serialization support' do + subject { Faraday::Response.new } + let(:loaded) { Marshal.load(Marshal.dump(subject)) } + + before do + subject.on_complete {} + subject.finish(env.merge(params: 'moo')) + end + + it { expect(loaded.env[:params]).to be_nil } + it { expect(loaded.env[:body]).to eq(env[:body]) } + it { expect(loaded.env[:response_headers]).to eq(env[:response_headers]) } + it { expect(loaded.env[:status]).to eq(env[:status]) } + end + + describe '#on_complete' do + subject { Faraday::Response.new } + + it 'parse body on finish' do + subject.on_complete { |env| env[:body] = env[:body].upcase } + subject.finish(env) + + expect(subject.body).to eq('YIKES') + end + + it 'can access response body in on_complete callback' do + subject.on_complete { |env| env[:body] = subject.body.upcase } + subject.finish(env) + + expect(subject.body).to eq('YIKES') + end + + it 'can access response body in on_complete callback' do + callback_env = nil + subject.on_complete { |env| callback_env = env } + subject.finish({}) + + expect(subject.env).to eq(callback_env) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/utils/headers_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/utils/headers_spec.rb new file mode 100644 index 0000000..ffe0c33 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/utils/headers_spec.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Utils::Headers do + subject { Faraday::Utils::Headers.new } + + context 'when Content-Type is set to application/json' do + before { subject['Content-Type'] = 'application/json' } + + it { expect(subject.keys).to eq(['Content-Type']) } + it { expect(subject['Content-Type']).to eq('application/json') } + it { expect(subject['CONTENT-TYPE']).to eq('application/json') } + it { expect(subject['content-type']).to eq('application/json') } + it { is_expected.to include('content-type') } + end + + context 'when Content-Type is set to application/xml' do + before { subject['Content-Type'] = 'application/xml' } + + it { expect(subject.keys).to eq(['Content-Type']) } + it { expect(subject['Content-Type']).to eq('application/xml') } + it { expect(subject['CONTENT-TYPE']).to eq('application/xml') } + it { expect(subject['content-type']).to eq('application/xml') } + it { is_expected.to include('content-type') } + end + + describe '#fetch' do + before { subject['Content-Type'] = 'application/json' } + + it { expect(subject.fetch('Content-Type')).to eq('application/json') } + it { expect(subject.fetch('CONTENT-TYPE')).to eq('application/json') } + it { expect(subject.fetch(:content_type)).to eq('application/json') } + it { expect(subject.fetch('invalid', 'default')).to eq('default') } + it { expect(subject.fetch('invalid', false)).to eq(false) } + it { expect(subject.fetch('invalid', nil)).to be_nil } + it { expect(subject.fetch('Invalid') { |key| "#{key} key" }).to eq('Invalid key') } + it 'calls a block when provided' do + block_called = false + expect(subject.fetch('content-type') { block_called = true }).to eq('application/json') + expect(block_called).to be_falsey + end + it 'raises an error if key not found' do + expected_error = defined?(KeyError) ? KeyError : IndexError + expect { subject.fetch('invalid') }.to raise_error(expected_error) + end + end + + describe '#delete' do + before do + subject['Content-Type'] = 'application/json' + @deleted = subject.delete('content-type') + end + + it { expect(@deleted).to eq('application/json') } + it { expect(subject.size).to eq(0) } + it { is_expected.not_to include('content-type') } + it { expect(subject.delete('content-type')).to be_nil } + end + + describe '#parse' do + before { subject.parse(headers) } + + context 'when response headers leave http status line out' do + let(:headers) { "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n" } + + it { expect(subject.keys).to eq(%w[Content-Type]) } + it { expect(subject['Content-Type']).to eq('text/html') } + it { expect(subject['content-type']).to eq('text/html') } + end + + context 'when response headers values include a colon' do + let(:headers) { "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nLocation: http://sushi.com/\r\n\r\n" } + + it { expect(subject['location']).to eq('http://sushi.com/') } + end + + context 'when response headers include a blank line' do + let(:headers) { "HTTP/1.1 200 OK\r\n\r\nContent-Type: text/html\r\n\r\n" } + + it { expect(subject['content-type']).to eq('text/html') } + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/utils_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/utils_spec.rb new file mode 100644 index 0000000..c3da746 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday/utils_spec.rb @@ -0,0 +1,56 @@ +# frozen_string_literal: true + +RSpec.describe Faraday::Utils do + describe 'headers parsing' do + let(:multi_response_headers) do + "HTTP/1.x 500 OK\r\nContent-Type: text/html; charset=UTF-8\r\n" \ + "HTTP/1.x 200 OK\r\nContent-Type: application/json; charset=UTF-8\r\n\r\n" + end + + it 'parse headers for aggregated responses' do + headers = Faraday::Utils::Headers.new + headers.parse(multi_response_headers) + + result = headers.to_hash + + expect(result['Content-Type']).to eq('application/json; charset=UTF-8') + end + end + + describe 'URI parsing' do + let(:url) { 'http://example.com/abc' } + + it 'escapes safe buffer' do + str = FakeSafeBuffer.new('$32,000.00') + expect(Faraday::Utils.escape(str)).to eq('%2432%2C000.00') + end + + it 'parses with default parser' do + with_default_uri_parser(nil) do + uri = normalize(url) + expect(uri.host).to eq('example.com') + end + end + + it 'parses with URI' do + with_default_uri_parser(::URI) do + uri = normalize(url) + expect(uri.host).to eq('example.com') + end + end + + it 'parses with block' do + with_default_uri_parser(->(u) { "booya#{'!' * u.size}" }) do + expect(normalize(url)).to eq('booya!!!!!!!!!!!!!!!!!!!!!!') + end + end + + it 'replaces headers hash' do + headers = Faraday::Utils::Headers.new('authorization' => 't0ps3cr3t!') + expect(headers).to have_key('authorization') + + headers.replace('content-type' => 'text/plain') + expect(headers).not_to have_key('authorization') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday_spec.rb new file mode 100644 index 0000000..8b603eb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/faraday_spec.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +RSpec.describe Faraday do + it 'has a version number' do + expect(Faraday::VERSION).not_to be nil + end + + context 'proxies to default_connection' do + let(:mock_connection) { double('Connection') } + before do + Faraday.default_connection = mock_connection + end + + it 'proxies methods that exist on the default_connection' do + expect(mock_connection).to receive(:this_should_be_proxied) + + Faraday.this_should_be_proxied + end + + it 'uses method_missing on Faraday if there is no proxyable method' do + expect { Faraday.this_method_does_not_exist }.to raise_error( + NoMethodError, + "undefined method `this_method_does_not_exist' for Faraday:Module" + ) + end + + it 'proxied methods can be accessed' do + allow(mock_connection).to receive(:this_should_be_proxied) + + expect(Faraday.method(:this_should_be_proxied)).to be_a(Method) + end + + after do + Faraday.default_connection = nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/spec_helper.rb new file mode 100644 index 0000000..4971479 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/spec_helper.rb @@ -0,0 +1,134 @@ +# frozen_string_literal: true + +# This file was generated by the `rspec --init` command. Conventionally, all +# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`. +# The generated `.rspec` file contains `--require spec_helper` which will cause +# this file to always be loaded, without a need to explicitly require it in any +# files. +# +# Given that it is always loaded, you are encouraged to keep this file as +# light-weight as possible. Requiring heavyweight dependencies from this file +# will add to the boot time of your test suite on EVERY test run, even for an +# individual file that may not need all of that loaded. Instead, consider making +# a separate helper file that requires the additional dependencies and performs +# the additional setup, and require it from the spec files that actually need +# it. +# +# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration + +require 'simplecov' +require 'coveralls' +require 'webmock/rspec' +WebMock.disable_net_connect!(allow_localhost: true) + +SimpleCov.formatters = [SimpleCov::Formatter::HTMLFormatter, Coveralls::SimpleCov::Formatter] + +SimpleCov.start do + add_filter '/spec/' + minimum_coverage 84 + minimum_coverage_by_file 26 +end + +# Ensure all /lib files are loaded +# so they will be included in the test coverage report. +Dir['./lib/**/*.rb'].sort.each { |file| require file } + +require 'faraday' +require 'pry' + +Dir['./spec/support/**/*.rb'].sort.each { |f| require f } + +Faraday::Deprecate.skip = false + +RSpec.configure do |config| + # rspec-expectations config goes here. You can use an alternate + # assertion/expectation library such as wrong or the stdlib/minitest + # assertions if you prefer. + config.expect_with :rspec do |expectations| + # This option will default to `true` in RSpec 4. It makes the `description` + # and `failure_message` of custom matchers include text for helper methods + # defined using `chain`, e.g.: + # be_bigger_than(2).and_smaller_than(4).description + # # => "be bigger than 2 and smaller than 4" + # ...rather than: + # # => "be bigger than 2" + expectations.include_chain_clauses_in_custom_matcher_descriptions = true + end + + # rspec-mocks config goes here. You can use an alternate test double + # library (such as bogus or mocha) by changing the `mock_with` option here. + config.mock_with :rspec do |mocks| + # Prevents you from mocking or stubbing a method that does not exist on + # a real object. This is generally recommended, and will default to + # `true` in RSpec 4. + mocks.verify_partial_doubles = true + end + + # This option will default to `:apply_to_host_groups` in RSpec 4 (and will + # have no way to turn it off -- the option exists only for backwards + # compatibility in RSpec 3). It causes shared context metadata to be + # inherited by the metadata hash of host groups and examples, rather than + # triggering implicit auto-inclusion in groups with matching metadata. + config.shared_context_metadata_behavior = :apply_to_host_groups + + # This allows you to limit a spec run to individual examples or groups + # you care about by tagging them with `:focus` metadata. When nothing + # is tagged with `:focus`, all examples get run. RSpec also provides + # aliases for `it`, `describe`, and `context` that include `:focus` + # metadata: `fit`, `fdescribe` and `fcontext`, respectively. + # config.filter_run_when_matching :focus + + # Allows RSpec to persist some state between runs in order to support + # the `--only-failures` and `--next-failure` CLI options. We recommend + # you configure your source control system to ignore this file. + # config.example_status_persistence_file_path = "spec/examples.txt" + + # Limits the available syntax to the non-monkey patched syntax that is + # recommended. For more details, see: + # - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/ + # - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/ + # - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode + # config.disable_monkey_patching! + + # This setting enables warnings. It's recommended, but in some cases may + # be too noisy due to issues in dependencies. + # config.warnings = true + + # Many RSpec users commonly either run the entire suite or an individual + # file, and it's useful to allow more verbose output when running an + # individual spec file. + # if config.files_to_run.one? + # # Use the documentation formatter for detailed output, + # # unless a formatter has already been configured + # # (e.g. via a command-line flag). + # config.default_formatter = "doc" + # end + + # Print the 10 slowest examples and example groups at the + # end of the spec run, to help surface which specs are running + # particularly slow. + # config.profile_examples = 10 + + # Run specs in random order to surface order dependencies. If you find an + # order dependency and want to debug it, you can fix the order by providing + # the seed, which is printed after each run. + # --seed 1234 + config.order = :random + + # Seed global randomization in this process using the `--seed` CLI option. + # Setting this allows you to use `--seed` to deterministically reproduce + # test failures related to randomization by passing the same `--seed` value + # as the one that triggered the failure. + Kernel.srand config.seed + + config.include Faraday::HelperMethods +end + +# Extends RSpec DocumentationFormatter to hide skipped tests. +module FormatterOverrides + def example_pending(_arg); end + + def dump_pending(_arg); end + + RSpec::Core::Formatters::DocumentationFormatter.prepend self +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/disabling_stub.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/disabling_stub.rb new file mode 100644 index 0000000..3df2f21 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/disabling_stub.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +# Allows to disable WebMock stubs +module DisablingStub + def disable + @disabled = true + end + + def disabled? + @disabled + end + + WebMock::RequestStub.prepend self +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/fake_safe_buffer.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/fake_safe_buffer.rb new file mode 100644 index 0000000..62a56aa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/fake_safe_buffer.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +# emulates ActiveSupport::SafeBuffer#gsub +FakeSafeBuffer = Struct.new(:string) do + def to_s + self + end + + def gsub(regex) + string.gsub(regex) do + match, = $&, '' =~ /a/ + yield(match) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/helper_methods.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/helper_methods.rb new file mode 100644 index 0000000..d63bd59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/helper_methods.rb @@ -0,0 +1,133 @@ +# frozen_string_literal: true + +require 'multipart_parser/reader' + +module Faraday + module HelperMethods + def self.included(base) + base.extend ClassMethods + end + + module ClassMethods + def features(*features) + @features = features + end + + def on_feature(name) + yield if block_given? && feature?(name) + end + + def feature?(name) + if @features.nil? + superclass.feature?(name) if superclass.respond_to?(:feature?) + elsif @features.include?(name) + true + end + end + + def method_with_body?(method) + METHODS_WITH_BODY.include?(method.to_s) + end + end + + def ssl_mode? + ENV['SSL'] == 'yes' + end + + def normalize(url) + Faraday::Utils::URI(url) + end + + def with_default_uri_parser(parser) + old_parser = Faraday::Utils.default_uri_parser + begin + Faraday::Utils.default_uri_parser = parser + yield + ensure + Faraday::Utils.default_uri_parser = old_parser + end + end + + def with_env(new_env) + old_env = {} + + new_env.each do |key, value| + old_env[key] = ENV.fetch(key, false) + ENV[key] = value + end + + begin + yield + ensure + old_env.each do |key, value| + value == false ? ENV.delete(key) : ENV[key] = value + end + end + end + + def with_env_proxy_disabled + Faraday.ignore_env_proxy = true + + begin + yield + ensure + Faraday.ignore_env_proxy = false + end + end + + def capture_warnings + old = $stderr + $stderr = StringIO.new + begin + yield + $stderr.string + ensure + $stderr = old + end + end + + def multipart_file + Faraday::FilePart.new(__FILE__, 'text/x-ruby') + end + + # parse boundary out of a Content-Type header like: + # Content-Type: multipart/form-data; boundary=gc0p4Jq0M2Yt08jU534c0p + def parse_multipart_boundary(ctype) + MultipartParser::Reader.extract_boundary_value(ctype) + end + + # parse a multipart MIME message, returning a hash of any multipart errors + def parse_multipart(boundary, body) + reader = MultipartParser::Reader.new(boundary) + result = { errors: [], parts: [] } + def result.part(name) + hash = self[:parts].detect { |h| h[:part].name == name } + [hash[:part], hash[:body].join] + end + + reader.on_part do |part| + result[:parts] << thispart = { + part: part, + body: [] + } + part.on_data do |chunk| + thispart[:body] << chunk + end + end + reader.on_error do |msg| + result[:errors] << msg + end + reader.write(body) + result + end + + def method_with_body?(method) + self.class.method_with_body?(method) + end + + def big_string + kb = 1024 + (32..126).map(&:chr).cycle.take(50 * kb).join + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/adapter.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/adapter.rb new file mode 100644 index 0000000..47c0d24 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/adapter.rb @@ -0,0 +1,105 @@ +# frozen_string_literal: true + +shared_examples 'an adapter' do |**options| + before { skip } if options[:skip] + + context 'with SSL enabled' do + before { ENV['SSL'] = 'yes' } + include_examples 'adapter examples', options + end + + context 'with SSL disabled' do + before { ENV['SSL'] = 'no' } + include_examples 'adapter examples', options + end +end + +shared_examples 'adapter examples' do |**options| + include Faraday::StreamingResponseChecker + + let(:adapter) { described_class.name.split('::').last } + + let(:conn_options) { { headers: { 'X-Faraday-Adapter' => adapter } }.merge(options[:conn_options] || {}) } + + let(:adapter_options) do + return [] unless options[:adapter_options] + + if options[:adapter_options].is_a?(Array) + options[:adapter_options] + else + [options[:adapter_options]] + end + end + + let(:protocol) { ssl_mode? ? 'https' : 'http' } + let(:remote) { "#{protocol}://example.com" } + let(:stub_remote) { remote } + + let(:conn) do + conn_options[:ssl] ||= {} + conn_options[:ssl][:ca_file] ||= ENV['SSL_FILE'] + + Faraday.new(remote, conn_options) do |conn| + conn.request :multipart + conn.request :url_encoded + conn.response :raise_error + conn.adapter described_class, *adapter_options + end + end + + let!(:request_stub) { stub_request(http_method, stub_remote) } + + after do + expect(request_stub).to have_been_requested unless request_stub.disabled? + end + + describe '#delete' do + let(:http_method) { :delete } + + it_behaves_like 'a request method', :delete + end + + describe '#get' do + let(:http_method) { :get } + + it_behaves_like 'a request method', :get + end + + describe '#head' do + let(:http_method) { :head } + + it_behaves_like 'a request method', :head + end + + describe '#options' do + let(:http_method) { :options } + + it_behaves_like 'a request method', :options + end + + describe '#patch' do + let(:http_method) { :patch } + + it_behaves_like 'a request method', :patch + end + + describe '#post' do + let(:http_method) { :post } + + it_behaves_like 'a request method', :post + end + + describe '#put' do + let(:http_method) { :put } + + it_behaves_like 'a request method', :put + end + + on_feature :trace_method do + describe '#trace' do + let(:http_method) { :trace } + + it_behaves_like 'a request method', :trace + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/params_encoder.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/params_encoder.rb new file mode 100644 index 0000000..38c8567 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/params_encoder.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +shared_examples 'a params encoder' do + it 'escapes safe buffer' do + monies = FakeSafeBuffer.new('$32,000.00') + expect(subject.encode('a' => monies)).to eq('a=%2432%2C000.00') + end + + it 'raises type error for empty string' do + expect { subject.encode('') }.to raise_error(TypeError) do |error| + expect(error.message).to eq("Can't convert String into Hash.") + end + end + + it 'encodes nil' do + expect(subject.encode('a' => nil)).to eq('a') + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/request_method.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/request_method.rb new file mode 100644 index 0000000..cb71251 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/shared_examples/request_method.rb @@ -0,0 +1,262 @@ +# frozen_string_literal: true + +shared_examples 'proxy examples' do + it 'handles requests with proxy' do + res = conn.public_send(http_method, '/') + + expect(res.status).to eq(200) + end + + it 'handles proxy failures' do + request_stub.to_return(status: 407) + + expect { conn.public_send(http_method, '/') }.to raise_error(Faraday::ProxyAuthError) + end +end + +shared_examples 'a request method' do |http_method| + let(:query_or_body) { method_with_body?(http_method) ? :body : :query } + let(:response) { conn.public_send(http_method, '/') } + + unless http_method == :head && feature?(:skip_response_body_on_head) + it 'retrieves the response body' do + res_body = 'test' + request_stub.to_return(body: res_body) + expect(conn.public_send(http_method, '/').body).to eq(res_body) + end + end + + it 'handles headers with multiple values' do + request_stub.to_return(headers: { 'Set-Cookie' => 'name=value' }) + expect(response.headers['set-cookie']).to eq('name=value') + end + + it 'retrieves the response headers' do + request_stub.to_return(headers: { 'Content-Type' => 'text/plain' }) + expect(response.headers['Content-Type']).to match(%r{text/plain}) + expect(response.headers['content-type']).to match(%r{text/plain}) + end + + it 'sends user agent' do + request_stub.with(headers: { 'User-Agent' => 'Agent Faraday' }) + conn.public_send(http_method, '/', nil, user_agent: 'Agent Faraday') + end + + it 'represents empty body response as blank string' do + expect(response.body).to eq('') + end + + it 'handles connection error' do + request_stub.disable + expect { conn.public_send(http_method, 'http://localhost:4') }.to raise_error(Faraday::ConnectionFailed) + end + + on_feature :local_socket_binding do + it 'binds local socket' do + stub_request(http_method, 'http://example.com') + + host = '1.2.3.4' + port = 1234 + conn_options[:request] = { bind: { host: host, port: port } } + + conn.public_send(http_method, '/') + + expect(conn.options[:bind][:host]).to eq(host) + expect(conn.options[:bind][:port]).to eq(port) + end + end + + # context 'when wrong ssl certificate is provided' do + # let(:ca_file_path) { 'tmp/faraday-different-ca-cert.crt' } + # before { conn_options.merge!(ssl: { ca_file: ca_file_path }) } + # + # it do + # expect { conn.public_send(http_method, '/') }.to raise_error(Faraday::SSLError) # do |ex| + # expect(ex.message).to include?('certificate') + # end + # end + # end + + on_feature :request_body_on_query_methods do + it 'sends request body' do + request_stub.with(Hash[:body, 'test']) + res = if query_or_body == :body + conn.public_send(http_method, '/', 'test') + else + conn.public_send(http_method, '/') do |req| + req.body = 'test' + end + end + expect(res.env.request_body).to eq('test') + end + end + + it 'sends url encoded parameters' do + payload = { name: 'zack' } + request_stub.with(Hash[query_or_body, payload]) + res = conn.public_send(http_method, '/', payload) + if query_or_body == :query + expect(res.env.request_body).to be_nil + else + expect(res.env.request_body).to eq('name=zack') + end + end + + it 'sends url encoded nested parameters' do + payload = { name: { first: 'zack' } } + request_stub.with(Hash[query_or_body, payload]) + conn.public_send(http_method, '/', payload) + end + + # TODO: This needs reimplementation: see https://github.com/lostisland/faraday/issues/718 + # Should raise Faraday::TimeoutError + it 'supports timeout option' do + conn_options[:request] = { timeout: 1 } + request_stub.to_timeout + exc = adapter == 'NetHttp' ? Faraday::ConnectionFailed : Faraday::TimeoutError + expect { conn.public_send(http_method, '/') }.to raise_error(exc) + end + + # TODO: This needs reimplementation: see https://github.com/lostisland/faraday/issues/718 + # Should raise Faraday::ConnectionFailed + it 'supports open_timeout option' do + conn_options[:request] = { open_timeout: 1 } + request_stub.to_timeout + exc = adapter == 'NetHttp' ? Faraday::ConnectionFailed : Faraday::TimeoutError + expect { conn.public_send(http_method, '/') }.to raise_error(exc) + end + + # Can't send files on get, head and delete methods + if method_with_body?(http_method) + it 'sends files' do + payload = { uploaded_file: multipart_file } + request_stub.with(headers: { 'Content-Type' => %r{\Amultipart/form-data} }) do |request| + # WebMock does not support matching body for multipart/form-data requests yet :( + # https://github.com/bblimke/webmock/issues/623 + request.body.include?('RubyMultipartPost') + end + conn.public_send(http_method, '/', payload) + end + end + + on_feature :reason_phrase_parse do + it 'parses the reason phrase' do + request_stub.to_return(status: [200, 'OK']) + expect(response.reason_phrase).to eq('OK') + end + end + + on_feature :compression do + # Accept-Encoding header not sent for HEAD requests as body is not expected in the response. + unless http_method == :head + it 'handles gzip compression' do + request_stub.with(headers: { 'Accept-Encoding' => /\bgzip\b/ }) + conn.public_send(http_method, '/') + end + + it 'handles deflate compression' do + request_stub.with(headers: { 'Accept-Encoding' => /\bdeflate\b/ }) + conn.public_send(http_method, '/') + end + end + end + + on_feature :streaming do + describe 'streaming' do + let(:streamed) { [] } + + context 'when response is empty' do + it do + conn.public_send(http_method, '/') do |req| + req.options.on_data = proc { |*args| streamed << args } + end + + expect(streamed).to eq([['', 0]]) + end + end + + context 'when response contains big data' do + before { request_stub.to_return(body: big_string) } + + it 'handles streaming' do + response = conn.public_send(http_method, '/') do |req| + req.options.on_data = proc { |*args| streamed << args } + end + + expect(response.body).to eq('') + check_streaming_response(streamed, chunk_size: 16 * 1024) + end + end + end + end + + on_feature :parallel do + context 'with parallel setup' do + before do + @resp1 = nil + @resp2 = nil + @payload1 = { a: '1' } + @payload2 = { b: '2' } + + request_stub + .with(Hash[query_or_body, @payload1]) + .to_return(body: @payload1.to_json) + + stub_request(http_method, remote) + .with(Hash[query_or_body, @payload2]) + .to_return(body: @payload2.to_json) + + conn.in_parallel do + @resp1 = conn.public_send(http_method, '/', @payload1) + @resp2 = conn.public_send(http_method, '/', @payload2) + + expect(conn.in_parallel?).to be_truthy + expect(@resp1.body).to be_nil + expect(@resp2.body).to be_nil + end + + expect(conn.in_parallel?).to be_falsey + end + + it 'handles parallel requests status' do + expect(@resp1&.status).to eq(200) + expect(@resp2&.status).to eq(200) + end + + unless http_method == :head && feature?(:skip_response_body_on_head) + it 'handles parallel requests body' do + expect(@resp1&.body).to eq(@payload1.to_json) + expect(@resp2&.body).to eq(@payload2.to_json) + end + end + end + end + + context 'when a proxy is provided as option' do + before do + conn_options[:proxy] = 'http://env-proxy.com:80' + end + + include_examples 'proxy examples' + end + + context 'when http_proxy env variable is set' do + let(:proxy_url) { 'http://env-proxy.com:80' } + + around do |example| + with_env 'http_proxy' => proxy_url do + example.run + end + end + + include_examples 'proxy examples' + + context 'when the env proxy is ignored' do + around do |example| + with_env_proxy_disabled(&example) + end + + include_examples 'proxy examples' + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/streaming_response_checker.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/streaming_response_checker.rb new file mode 100644 index 0000000..8ef2599 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/streaming_response_checker.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module Faraday + module StreamingResponseChecker + def check_streaming_response(streamed, options = {}) + opts = { + prefix: '', + streaming?: true + }.merge(options) + + expected_response = opts[:prefix] + big_string + + chunks, sizes = streamed.transpose + + # Check that the total size of the chunks (via the last size returned) + # is the same size as the expected_response + expect(sizes.last).to eq(expected_response.bytesize) + + start_index = 0 + expected_chunks = [] + chunks.each do |actual_chunk| + expected_chunk = expected_response[start_index..((start_index + actual_chunk.bytesize) - 1)] + expected_chunks << expected_chunk + start_index += expected_chunk.bytesize + end + + # it's easier to read a smaller portion, so we check that first + expect(expected_chunks[0][0..255]).to eq(chunks[0][0..255]) + + [expected_chunks, chunks].transpose.each do |expected, actual| + expect(actual).to eq(expected) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/webmock_rack_app.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/webmock_rack_app.rb new file mode 100644 index 0000000..a3212c7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-1.10.3/spec/support/webmock_rack_app.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +# Rack app used to test the Rack adapter. +# Uses Webmock to check if requests are registered, in which case it returns +# the registered response. +class WebmockRackApp + def call(env) + req_signature = WebMock::RequestSignature.new( + req_method(env), + req_uri(env), + body: req_body(env), + headers: req_headers(env) + ) + + WebMock::RequestRegistry + .instance + .requested_signatures + .put(req_signature) + + process_response(req_signature) + end + + def req_method(env) + env['REQUEST_METHOD'].downcase.to_sym + end + + def req_uri(env) + scheme = env['rack.url_scheme'] + host = env['SERVER_NAME'] + port = env['SERVER_PORT'] + path = env['PATH_INFO'] + query = env['QUERY_STRING'] + + url = +"#{scheme}://#{host}:#{port}#{path}" + url += "?#{query}" if query + + uri = WebMock::Util::URI.heuristic_parse(url) + uri.path = uri.normalized_path.gsub('[^:]//', '/') + uri + end + + def req_headers(env) + http_headers = env.select { |k, _| k.start_with?('HTTP_') } + .map { |k, v| [k[5..-1], v] } + .to_h + + special_headers = Faraday::Adapter::Rack::SPECIAL_HEADERS + http_headers.merge(env.select { |k, _| special_headers.include?(k) }) + end + + def req_body(env) + env['rack.input'].read + end + + def process_response(req_signature) + res = WebMock::StubRegistry.instance.response_for_request(req_signature) + + if res.nil? && req_signature.uri.host == 'localhost' + raise Faraday::ConnectionFailed, 'Trying to connect to localhost' + end + + raise WebMock::NetConnectNotAllowedError, req_signature unless res + + raise Faraday::TimeoutError if res.should_timeout + + [res.status[0], res.headers || {}, [res.body || '']] + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/.gitignore b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/.gitignore new file mode 100644 index 0000000..d87d4be --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/.gitignore @@ -0,0 +1,17 @@ +*.gem +*.rbc +.bundle +.config +.yardoc +Gemfile.lock +InstalledFiles +_yardoc +coverage +doc/ +lib/bundler/man +pkg +rdoc +spec/reports +test/tmp +test/version_tmp +tmp diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/CHANGELOG.md new file mode 100644 index 0000000..1c29957 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/CHANGELOG.md @@ -0,0 +1,31 @@ +## 0.0.7 Tue Sep 1 20:25:00 PDT 2020 + +- Avoid loading Faraday by assuming Faraday registry API (chesterbr) +- To support change above, require Faraday 0.8 as a minimum (chesterbr) + +## 0.0.6 Tue Jan 21 16:34:35 PST 2014 + +- Support Faraday 0.9 registry API (cameron-martin) +- Support specifying CookieJar in configuration (cameron-martin) + +## 0.0.5 Mon Jan 20 21:53:13 PST 2014 + +- Lock faraday dependency to < 0.9.0 for now +- Ability to add extra cookies in addition to the ones in Cookie Jar (nanjingboy) + +## 0.0.4 Mon Aug 12 23:11:46 PDT 2013 + +- Fix a crash when there's no valid response header #3 + +## 0.0.3 Sun Apr 21 08:14:32 CST 2013 + +- Switch to http-cookie gem (knu) #2 + +## 0.0.2 + +- Repackaging +- Typo in README (tmaesaka) #1 + +## 0.0.1 + +- Initial release diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/Gemfile b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/Gemfile new file mode 100644 index 0000000..4806224 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in faraday-cookie_jar.gemspec +gemspec diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/LICENSE.txt b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/LICENSE.txt new file mode 100644 index 0000000..853e927 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2013 Tatsuhiko Miyagawa + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/README.md new file mode 100644 index 0000000..49e5f21 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/README.md @@ -0,0 +1,39 @@ +# Faraday::CookieJar + +Faraday middleware to manage client-side cookies + +## Description + +This gem is a piece of Faraday middleware that adds client-side Cookies management, using [http-cookie gem](https://github.com/sparklemotion/http-cookie). + +## Installation + +Add this line to your application's Gemfile: + + gem 'faraday-cookie_jar' + +And then execute: + + $ bundle + +Or install it yourself as: + + $ gem install faraday-cookie_jar + +## Usage + +```ruby +require 'faraday-cookie_jar' + +conn = Faraday.new(:url => "http://example.com") do |builder| + builder.use :cookie_jar + builder.adapter Faraday.default_adapter +end + +conn.get "/foo" # gets cookie +conn.get "/bar" # sends cookie +``` + +## Author + +Tatsuhiko Miyagawa diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/Rakefile b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/Rakefile new file mode 100644 index 0000000..cea94d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/Rakefile @@ -0,0 +1,5 @@ +require "bundler/gem_tasks" + +require 'rspec/core/rake_task' +RSpec::Core::RakeTask.new(:spec) +task :default => :spec diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/faraday-cookie_jar.gemspec b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/faraday-cookie_jar.gemspec new file mode 100644 index 0000000..8351b6d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/faraday-cookie_jar.gemspec @@ -0,0 +1,29 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'faraday/cookie_jar/version' + +Gem::Specification.new do |spec| + spec.name = "faraday-cookie_jar" + spec.version = Faraday::CookieJarVersion::VERSION + spec.authors = ["Tatsuhiko Miyagawa"] + spec.email = ["miyagawa@bulknews.net"] + spec.description = %q{Cookie jar middleware for Faraday} + spec.summary = %q{Manages client-side cookie jar for Faraday HTTP client} + spec.homepage = "https://github.com/miyagawa/faraday-cookie_jar" + spec.license = "MIT" + + spec.files = `git ls-files`.split($/) + spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) } + spec.test_files = spec.files.grep(%r{^(test|spec|features)/}) + spec.require_paths = ["lib"] + + spec.add_dependency "faraday", ">= 0.8.0" + spec.add_dependency "http-cookie", "~> 1.0.0" + + spec.add_development_dependency "bundler", "~> 1.3" + spec.add_development_dependency "rake" + spec.add_development_dependency "rspec", "~> 3.2" + spec.add_development_dependency "sinatra" + spec.add_development_dependency "sham_rack" +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday-cookie_jar.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday-cookie_jar.rb new file mode 100644 index 0000000..9c96987 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday-cookie_jar.rb @@ -0,0 +1 @@ +require 'faraday/cookie_jar' diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday/cookie_jar.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday/cookie_jar.rb new file mode 100644 index 0000000..9ae7e07 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday/cookie_jar.rb @@ -0,0 +1,37 @@ +require "faraday" +require "http/cookie_jar" + +module Faraday + class CookieJar < Faraday::Middleware + def initialize(app, options = {}) + super(app) + @jar = options[:jar] || HTTP::CookieJar.new + end + + def call(env) + cookies = @jar.cookies(env[:url]) + unless cookies.empty? + cookie_value = HTTP::Cookie.cookie_value(cookies) + if env[:request_headers]["Cookie"] + unless env[:request_headers]["Cookie"] == cookie_value + env[:request_headers]["Cookie"] = cookie_value + ';' + env[:request_headers]["Cookie"] + end + else + env[:request_headers]["Cookie"] = cookie_value + end + end + + @app.call(env).on_complete do |res| + if res[:response_headers] + if set_cookie = res[:response_headers]["Set-Cookie"] + @jar.parse(set_cookie, env[:url]) + end + end + end + end + end +end + +if Faraday::Middleware.respond_to? :register_middleware + Faraday::Middleware.register_middleware :cookie_jar => Faraday::CookieJar +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday/cookie_jar/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday/cookie_jar/version.rb new file mode 100644 index 0000000..9196007 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/lib/faraday/cookie_jar/version.rb @@ -0,0 +1,5 @@ +module Faraday + module CookieJarVersion + VERSION = "0.0.7" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/faraday-cookie_jar/cookie_jar_spec.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/faraday-cookie_jar/cookie_jar_spec.rb new file mode 100644 index 0000000..cd8016b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/faraday-cookie_jar/cookie_jar_spec.rb @@ -0,0 +1,53 @@ +require 'spec_helper' + +describe Faraday::CookieJar do + let(:conn) { Faraday.new(:url => 'http://faraday.example.com') } + let(:cookie_jar) { HTTP::CookieJar.new } + + before do + conn.use :cookie_jar + conn.adapter :net_http # for sham_rock + end + + it 'get default cookie' do + conn.get('/default') + expect(conn.get('/dump').body).to eq('foo=bar') + end + + it 'does not send cookies to wrong path' do + conn.get('/path') + expect(conn.get('/dump').body).to_not eq('foo=bar') + end + + it 'expires cookie' do + conn.get('/expires') + expect(conn.get('/dump').body).to eq('foo=bar') + sleep 2 + expect(conn.get('/dump').body).to_not eq('foo=bar') + end + + it 'fills an injected cookie jar' do + + conn_with_jar = Faraday.new(:url => 'http://faraday.example.com') do |conn| + conn.use :cookie_jar, jar: cookie_jar + conn.adapter :net_http # for sham_rock + end + + conn_with_jar.get('/default') + + expect(cookie_jar.empty?).to be false + + end + + it 'multiple cookies' do + conn.get('/default') + + response = conn.send('get') do |request| + request.url '/multiple_cookies' + request.headers.merge!({:Cookie => 'language=english'}) + end + + expect(response.body).to eq('foo=bar;language=english') + end +end + diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/spec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/spec_helper.rb new file mode 100644 index 0000000..b83bbc8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/spec_helper.rb @@ -0,0 +1,9 @@ +require 'rspec' +require 'faraday-cookie_jar' + +require 'sham_rack' +require_relative 'support/fake_app' + +ShamRack.at('faraday.example.com').rackup do + run FakeApp +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/support/fake_app.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/support/fake_app.rb new file mode 100644 index 0000000..638a6d6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-cookie_jar-0.0.7/spec/support/fake_app.rb @@ -0,0 +1,23 @@ +require 'sinatra' + +class FakeApp < Sinatra::Application + get '/dump' do + "foo=#{request.cookies['foo']}" + end + + get '/default' do + response.set_cookie "foo", :value => "bar" + end + + get '/path' do + response.set_cookie "foo", :value => "bar", :path => "/path" + end + + get '/expires' do + response.set_cookie "foo", :value => "bar", :expires => Time.now + 1 + end + + get '/multiple_cookies' do + "foo=#{request.cookies['foo']};language=#{request.cookies['language']}" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/README.md new file mode 100644 index 0000000..4a8c9d4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/README.md @@ -0,0 +1,61 @@ +# Faraday Em::Http adapter + +This gem is a [Faraday][faraday] adapter for the [Em::Http::Request][em_http_request] library. +Faraday is an HTTP client library that provides a common interface over many adapters. +Every adapter is defined into its own gem. This gem defines the adapter for Em::Http::Request. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'em-http-request', '>= 1.1' +gem 'faraday-em_http' +``` + +And then execute: + + $ bundle install + +Or install them yourself as: + + $ gem install em-http-request -v '>= 1.1' + $ gem install faraday-em_http + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:em_http) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday Em::Http adapter project's codebase, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[em_http_request]: https://github.com/igrigorik/em-http-request +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-em_http +[license]: https://github.com/lostisland/faraday-em_http/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-em_http/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/adapter/em_http.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/adapter/em_http.rb new file mode 100644 index 0000000..dac13c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/adapter/em_http.rb @@ -0,0 +1,289 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # EventMachine adapter. This adapter is useful for either asynchronous + # requests when in an EM reactor loop, or for making parallel requests in + # synchronous code. + class EMHttp < Faraday::Adapter + # Options is a module containing helpers to convert the Faraday env object + # into options hashes for EMHTTP method calls. + module Options + # @return [Hash] + def connection_config(env) + options = {} + configure_proxy(options, env) + configure_timeout(options, env) + configure_socket(options, env) + configure_ssl(options, env) + options + end + + def request_config(env) + options = { + body: read_body(env), + head: env[:request_headers] + # keepalive: true, + # file: 'path/to/file', # stream data off disk + } + configure_compression(options, env) + options + end + + def read_body(env) + body = env[:body] + body.respond_to?(:read) ? body.read : body + end + + # Reads out proxy settings from env into options + def configure_proxy(options, env) + proxy = request_options(env)[:proxy] + return unless proxy + + options[:proxy] = { + host: proxy[:uri].host, + port: proxy[:uri].port, + authorization: [proxy[:user], proxy[:password]] + } + end + + # Reads out host and port settings from env into options + def configure_socket(options, env) + bind = request_options(env)[:bind] + return unless bind + + options[:bind] = { + host: bind[:host], + port: bind[:port] + } + end + + # Reads out SSL certificate settings from env into options + def configure_ssl(options, env) + return unless env[:url].scheme == 'https' && env[:ssl] + + options[:ssl] = { + cert_chain_file: env[:ssl][:ca_file], + verify_peer: env[:ssl].fetch(:verify, true) + } + end + + # Reads out timeout settings from env into options + def configure_timeout(options, env) + req = request_options(env) + options[:inactivity_timeout] = request_timeout(:read, req) + options[:connect_timeout] = request_timeout(:open, req) + end + + # Reads out compression header settings from env into options + def configure_compression(options, env) + return unless (env[:method] == :get) && + !options[:head].key?('accept-encoding') + + options[:head]['accept-encoding'] = 'gzip, compressed' + end + + def request_options(env) + env[:request] + end + end + + include Options + + dependency do + require 'em-http' + + begin + require 'openssl' + rescue LoadError + warn 'Warning: no such file to load -- openssl. ' \ + 'Make sure it is installed if you want HTTPS support' + else + require 'em-http/version' + if EventMachine::HttpRequest::VERSION < '1.1.6' + require 'faraday/adapter/em_http_ssl_patch' + end + end + end + + self.supports_parallel = true + + # @return [Manager] + def self.setup_parallel_manager(_options = nil) + Manager.new + end + + def call(env) + super + perform_request env + @app.call env + end + + def perform_request(env) + if parallel?(env) + manager = env[:parallel_manager] + manager.add do + perform_single_request(env) + .callback { env[:response].finish(env) } + end + elsif EventMachine.reactor_running? + # EM is running: instruct upstream that this is an async request + env[:parallel_manager] = true + perform_single_request(env) + .callback { env[:response].finish(env) } + .errback do + # TODO: no way to communicate the error in async mode + raise NotImplementedError + end + else + error = nil + # start EM, block until request is completed + EventMachine.run do + perform_single_request(env) + .callback { EventMachine.stop } + .errback do |client| + error = error_message(client) + EventMachine.stop + end + end + raise_error(error) if error + end + rescue EventMachine::Connectify::CONNECTError => e + if e.message.include?('Proxy Authentication Required') + raise Faraday::ConnectionFailed, + %(407 "Proxy Authentication Required ") + end + + raise Faraday::ConnectionFailed, e + rescue StandardError => e + if defined?(::OpenSSL::SSL::SSLError) && \ + e.is_a?(::OpenSSL::SSL::SSLError) + raise Faraday::SSLError, e + end + + raise + end + + # TODO: reuse the connection to support pipelining + def perform_single_request(env) + req = create_request(env) + req = req.setup_request(env[:method], request_config(env)) + req.callback do |client| + if env[:request].stream_response? + warn "Streaming downloads for #{self.class.name} " \ + 'are not yet implemented.' + env[:request].on_data.call( + client.response, + client.response.bytesize + ) + end + status = client.response_header.status + reason = client.response_header.http_reason + save_response(env, status, client.response, nil, reason) do |headers| + client.response_header.each do |name, value| + headers[name.to_sym] = value + end + end + end + end + + def create_request(env) + EventMachine::HttpRequest.new( + env[:url], connection_config(env).merge(@connection_options) + ) + end + + def error_message(client) + client.error || 'request failed' + end + + def raise_error(msg) + error_class = Faraday::ClientError + if timeout_message?(msg) + error_class = Faraday::TimeoutError + msg = 'request timed out' + elsif msg == Errno::ECONNREFUSED + error_class = Faraday::ConnectionFailed + msg = 'connection refused' + elsif msg == 'connection closed by server' + error_class = Faraday::ConnectionFailed + end + raise error_class, msg + end + + def timeout_message?(msg) + msg == Errno::ETIMEDOUT || + (msg.is_a?(String) && msg.include?('timeout error')) + end + + # @return [Boolean] + def parallel?(env) + !!env[:parallel_manager] + end + + # This parallel manager is designed to start an EventMachine loop + # and block until all registered requests have been completed. + class Manager + # @see reset + def initialize + reset + end + + # Re-initializes instance variables + def reset + @registered_procs = [] + @num_registered = 0 + @num_succeeded = 0 + @errors = [] + @running = false + end + + # @return [Boolean] + def running? + @running + end + + def add(&block) + if running? + perform_request(&block) + else + @registered_procs << block + end + @num_registered += 1 + end + + def run + if @num_registered.positive? + @running = true + EventMachine.run do + @registered_procs.each do |proc| + perform_request(&proc) + end + end + unless @errors.empty? + raise Faraday::ClientError, @errors.first || 'connection failed' + end + end + ensure + reset + end + + def perform_request + client = yield + client.callback do + @num_succeeded += 1 + check_finished + end + client.errback do + @errors << client.error + check_finished + end + end + + def check_finished + EventMachine.stop if @num_succeeded + @errors.size == @num_registered + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/adapter/em_http_ssl_patch.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/adapter/em_http_ssl_patch.rb new file mode 100644 index 0000000..d33a9c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/adapter/em_http_ssl_patch.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +require 'openssl' +require 'em-http' + +# EventMachine patch to make SSL work. +module EmHttpSslPatch + def ssl_verify_peer(cert_string) + begin + @last_seen_cert = OpenSSL::X509::Certificate.new(cert_string) + rescue OpenSSL::X509::CertificateError + return false + end + + unless certificate_store.verify(@last_seen_cert) + raise OpenSSL::SSL::SSLError, + %(unable to verify the server certificate for "#{host}") + end + + begin + certificate_store.add_cert(@last_seen_cert) + rescue OpenSSL::X509::StoreError => e + raise e unless e.message == 'cert already in hash table' + end + true + end + + def ssl_handshake_completed + return true unless verify_peer? + + unless verified_cert_identity? + raise OpenSSL::SSL::SSLError, + %(host "#{host}" does not match the server certificate) + end + + true + end + + def verify_peer? + parent.connopts.tls[:verify_peer] + end + + def verified_cert_identity? + OpenSSL::SSL.verify_certificate_identity(@last_seen_cert, host) + end + + def host + parent.uri.host + end + + def certificate_store + @certificate_store ||= begin + store = OpenSSL::X509::Store.new + store.set_default_paths + ca_file = parent.connopts.tls[:cert_chain_file] + store.add_file(ca_file) if ca_file + store + end + end +end + +EventMachine::HttpStubConnection.include(EmHttpSslPatch) diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/em_http.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/em_http.rb new file mode 100644 index 0000000..ef77e5e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/em_http.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative 'adapter/em_http' +require_relative 'em_http/version' + +module Faraday + # Main Faraday::EmHttp module + module EmHttp + Faraday::Adapter.register_middleware(em_http: Faraday::Adapter::EMHttp) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/em_http/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/em_http/version.rb new file mode 100644 index 0000000..c1a135a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_http-1.0.0/lib/faraday/em_http/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module EmHttp + VERSION = '1.0.0' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/README.md new file mode 100644 index 0000000..fabb732 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/README.md @@ -0,0 +1,65 @@ +# Faraday Em::Synchrony adapter + +This gem is a [Faraday][faraday] adapter for the [Em::Synchrony][em_synchrony] library. +Faraday is an HTTP client library that provides a common interface over many adapters. +Every adapter is defined into its own gem. This gem defines the adapter for Em::Synchrony. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'em-http-request', '>= 1.1' +gem 'em-synchrony', '>= 1.0.3' +gem 'faraday-em_http', '~> 1.0' +gem 'faraday-em_synchrony' +``` + +And then execute: + + $ bundle install + +Or install them yourself as: + + $ gem install em-http-request -v '>= 1.1' + $ gem install em-synchrony -v '>= 1.0.3' + $ gem install faraday-em_http -v '~> 1.0' + $ gem install faraday-em_synchrony + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:em_synchrony) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday Em::Synchrony adapter project's codebase, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[em_synchrony]: https://github.com/igrigorik/em-synchrony +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-em_synchrony +[license]: https://github.com/lostisland/faraday-em_synchrony/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-em_synchrony/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/adapter/em_synchrony.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/adapter/em_synchrony.rb new file mode 100644 index 0000000..94331ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/adapter/em_synchrony.rb @@ -0,0 +1,153 @@ +# frozen_string_literal: true + +require 'uri' + +module Faraday + class Adapter + # EventMachine Synchrony adapter. + class EMSynchrony < Faraday::Adapter + include EMHttp::Options + + dependency do + require 'em-synchrony/em-http' + require 'em-synchrony/em-multi' + require 'fiber' + + require 'faraday/adapter/em_synchrony/parallel_manager' + + if Faraday::Adapter::EMSynchrony.loaded? + begin + require 'openssl' + rescue LoadError + warn 'Warning: no such file to load -- openssl. ' \ + 'Make sure it is installed if you want HTTPS support' + else + require 'em-http/version' + if EventMachine::HttpRequest::VERSION < '1.1.6' + require 'faraday/adapter/em_http_ssl_patch' + end + end + end + end + + self.supports_parallel = true + + # @return [ParallelManager] + def self.setup_parallel_manager(_options = nil) + ParallelManager.new + end + + def call(env) + super + request = create_request(env) + + http_method = env[:method].to_s.downcase.to_sym + + if env[:parallel_manager] + # Queue requests for parallel execution. + execute_parallel_request(env, request, http_method) + else + # Execute single request. + execute_single_request(env, request, http_method) + end + + @app.call env + rescue Errno::ECONNREFUSED + raise Faraday::ConnectionFailed, $ERROR_INFO + rescue EventMachine::Connectify::CONNECTError => e + if e.message.include?('Proxy Authentication Required') + raise Faraday::ConnectionFailed, + %(407 "Proxy Authentication Required") + end + + raise Faraday::ConnectionFailed, e + rescue Errno::ETIMEDOUT => e + raise Faraday::TimeoutError, e + rescue RuntimeError => e + if e.message == 'connection closed by server' + raise Faraday::ConnectionFailed, e + end + + raise Faraday::TimeoutError, e if e.message.include?('timeout error') + + raise + rescue StandardError => e + if defined?(OpenSSL) && e.is_a?(OpenSSL::SSL::SSLError) + raise Faraday::SSLError, e + end + + raise + end + + def create_request(env) + EventMachine::HttpRequest.new( + Utils::URI(env[:url].to_s), + connection_config(env).merge(@connection_options) + ) + end + + private + + def execute_parallel_request(env, request, http_method) + env[:parallel_manager].add(request, http_method, + request_config(env)) do |resp| + if (req = env[:request]).stream_response? + warn "Streaming downloads for #{self.class.name} " \ + 'are not yet implemented.' + req.on_data.call(resp.response, resp.response.bytesize) + end + + save_response(env, resp.response_header.status, + resp.response) do |resp_headers| + resp.response_header.each do |name, value| + resp_headers[name.to_sym] = value + end + end + + # Finalize the response object with values from `env`. + env[:response].finish(env) + end + end + + def execute_single_request(env, request, http_method) + block = -> { request.send(http_method, request_config(env)) } + client = call_block(block) + + raise client.error if client&.error + + if env[:request].stream_response? + warn "Streaming downloads for #{self.class.name} " \ + 'are not yet implemented.' + env[:request].on_data.call( + client.response, + client.response.bytesize + ) + end + status = client.response_header.status + reason = client.response_header.http_reason + save_response(env, status, client.response, nil, reason) do |headers| + client.response_header.each do |name, value| + headers[name.to_sym] = value + end + end + end + + def call_block(block) + client = nil + + if EM.reactor_running? + client = block.call + else + EM.run do + Fiber.new do + client = block.call + EM.stop + end.resume + end + end + + client + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/adapter/em_synchrony/parallel_manager.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/adapter/em_synchrony/parallel_manager.rb new file mode 100644 index 0000000..1701ae8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/adapter/em_synchrony/parallel_manager.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + class EMSynchrony < Faraday::Adapter + # A parallel manager for EMSynchrony. + class ParallelManager + # Add requests to queue. + # + # @param request [EM::HttpRequest] + # @param method [Symbol, String] HTTP method + # @param args [Array] the rest of the positional arguments + def add(request, method, *args, &block) + queue << { + request: request, + method: method, + args: args, + block: block + } + end + + # Run all requests on queue with `EM::Synchrony::Multi`, wrapping + # it in a reactor and fiber if needed. + def run + result = nil + if !EM.reactor_running? + EM.run do + Fiber.new do + result = perform + EM.stop + end.resume + end + else + result = perform + end + result + end + + private + + # The request queue. + def queue + @queue ||= [] + end + + # Main `EM::Synchrony::Multi` performer. + def perform + multi = ::EM::Synchrony::Multi.new + + queue.each do |item| + method = "a#{item[:method]}".to_sym + + req = item[:request].send(method, *item[:args]) + req.callback(&item[:block]) + + req_name = "req_#{multi.requests.size}".to_sym + multi.add(req_name, req) + end + + # Clear the queue, so parallel manager objects can be reused. + @queue = [] + + # Block fiber until all requests have returned. + multi.perform + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/em_synchrony.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/em_synchrony.rb new file mode 100644 index 0000000..a56713b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/em_synchrony.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require 'faraday/em_http' +require_relative 'adapter/em_synchrony' +require_relative 'em_synchrony/version' + +module Faraday + # Main Faraday::EmSynchrony module + module EmSynchrony + Faraday::Adapter.register_middleware(em_synchrony: Faraday::Adapter::EMSynchrony) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/em_synchrony/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/em_synchrony/version.rb new file mode 100644 index 0000000..db1c99c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-em_synchrony-1.0.0/lib/faraday/em_synchrony/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module EmSynchrony + VERSION = '1.0.0' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/README.md new file mode 100644 index 0000000..cf18e04 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/README.md @@ -0,0 +1,61 @@ +# Faraday Excon adapter + +This gem is a [Faraday][faraday] adapter for the [Excon][excon] library. +Faraday is an HTTP client library that provides a common interface over many adapters. +Every adapter is defined into its own gem. This gem defines the adapter for Excon. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'excon', '>= 0.27.4' +gem 'faraday-excon' +``` + +And then execute: + + $ bundle install + +Or install them yourself as: + + $ gem install excon -v '>= 0.27.4' + $ gem install faraday-excon + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:excon) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday Excon adapter project's codebase, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[excon]: https://github.com/excon/excon +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-excon +[license]: https://github.com/lostisland/faraday-excon/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-excon/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/adapter/excon.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/adapter/excon.rb new file mode 100644 index 0000000..7febdf9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/adapter/excon.rb @@ -0,0 +1,124 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # Excon adapter. + class Excon < Faraday::Adapter + dependency 'excon' + + def build_connection(env) + opts = opts_from_env(env) + ::Excon.new(env[:url].to_s, opts.merge(@connection_options)) + end + + def call(env) + super + + req_opts = { + method: env[:method].to_s.upcase, + headers: env[:request_headers], + body: read_body(env) + } + + req = env[:request] + if req&.stream_response? + total = 0 + req_opts[:response_block] = lambda do |chunk, _remain, _total| + req.on_data.call(chunk, total += chunk.size) + end + end + + resp = connection(env) { |http| http.request(req_opts) } + save_response(env, resp.status.to_i, resp.body, resp.headers, + resp.reason_phrase) + + @app.call(env) + rescue ::Excon::Errors::SocketError => e + raise Faraday::TimeoutError, e if e.message.match?(/\btimeout\b/) + + raise Faraday::SSLError, e if e.message.match?(/\bcertificate\b/) + + raise Faraday::ConnectionFailed, e + rescue ::Excon::Errors::Timeout => e + raise Faraday::TimeoutError, e + end + + # TODO: support streaming requests + def read_body(env) + env[:body].respond_to?(:read) ? env[:body].read : env[:body] + end + + private + + def opts_from_env(env) + opts = {} + amend_opts_with_ssl!(opts, env[:ssl]) if needs_ssl_settings?(env) + + if (req = env[:request]) + amend_opts_with_timeouts!(opts, req) + amend_opts_with_proxy_settings!(opts, req) + end + + opts + end + + def needs_ssl_settings?(env) + env[:url].scheme == 'https' && env[:ssl] + end + + OPTS_KEYS = [ + %i[client_cert client_cert], + %i[client_key client_key], + %i[certificate certificate], + %i[private_key private_key], + %i[ssl_ca_path ca_path], + %i[ssl_ca_file ca_file], + %i[ssl_version version], + %i[ssl_min_version min_version], + %i[ssl_max_version max_version] + ].freeze + + def amend_opts_with_ssl!(opts, ssl) + opts[:ssl_verify_peer] = !!ssl.fetch(:verify, true) + # https://github.com/geemus/excon/issues/106 + # https://github.com/jruby/jruby-ossl/issues/19 + opts[:nonblock] = false + + OPTS_KEYS.each do |(key_in_opts, key_in_ssl)| + next unless ssl[key_in_ssl] + + opts[key_in_opts] = ssl[key_in_ssl] + end + end + + def amend_opts_with_timeouts!(opts, req) + if (sec = request_timeout(:read, req)) + opts[:read_timeout] = sec + end + + if (sec = request_timeout(:write, req)) + opts[:write_timeout] = sec + end + + return unless (sec = request_timeout(:open, req)) + + opts[:connect_timeout] = sec + end + + def amend_opts_with_proxy_settings!(opts, req) + opts[:proxy] = proxy_settings_for_opts(req[:proxy]) if req[:proxy] + end + + def proxy_settings_for_opts(proxy) + { + host: proxy[:uri].host, + hostname: proxy[:uri].hostname, + port: proxy[:uri].port, + scheme: proxy[:uri].scheme, + user: proxy[:user], + password: proxy[:password] + } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/excon.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/excon.rb new file mode 100644 index 0000000..d1448fe --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/excon.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative 'adapter/excon' +require_relative 'excon/version' + +module Faraday + # Main Faraday::Excon module + module Excon + Faraday::Adapter.register_middleware(excon: Faraday::Adapter::Excon) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/excon/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/excon/version.rb new file mode 100644 index 0000000..4d300b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-excon-1.1.0/lib/faraday/excon/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module Excon + VERSION = '1.1.0' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/README.md new file mode 100644 index 0000000..2b65d79 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/README.md @@ -0,0 +1,62 @@ +# Faraday HTTPClient adapter + +This gem is a [Faraday][faraday] adapter for the [HTTPClient][httpclient] library. +Faraday is an HTTP client library that provides a common interface over many adapters. +Every adapter is defined into its own gem. This gem defines the adapter for HTTPClient. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'httpclient', '>= 2.2' +gem 'faraday' +gem 'faraday-httpclient' +``` + +And then execute: + + $ bundle install + +Or install them yourself as: + + $ gem install httpclient -v '>= 2.2' + $ gem install faraday faraday-httpclient + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:httpclient) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday HTTPClient adapter project's codebase, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[httpclient]: https://github.com/nahi/httpclient +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-httpclient +[license]: https://github.com/lostisland/faraday-httpclient/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-httpclient/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/adapter/httpclient.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/adapter/httpclient.rb new file mode 100644 index 0000000..2d17f75 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/adapter/httpclient.rb @@ -0,0 +1,154 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # This class provides the main implementation for your adapter. + # There are some key responsibilities that your adapter should satisfy: + # * Initialize and store internally the client you chose (e.g. Net::HTTP) + # * Process requests and save the response (see `#call`) + class HTTPClient < Faraday::Adapter + dependency 'httpclient' + + def build_connection(env) + @client ||= ::HTTPClient.new.tap do |cli| + # enable compression + cli.transparent_gzip_decompression = true + end + + if (req = env[:request]) + if (proxy = req[:proxy]) + configure_proxy @client, proxy + end + + if (bind = req[:bind]) + configure_socket @client, bind + end + + configure_timeouts @client, req + end + + if env[:url].scheme == 'https' && (ssl = env[:ssl]) + configure_ssl @client, ssl + end + + configure_client @client + + @client + end + + def call(env) + super + + # TODO: Don't stream yet. + # https://github.com/nahi/httpclient/pull/90 + env[:body] = env[:body].read if env[:body].respond_to? :read + + connection(env) do |http| + resp = http.request env[:method], env[:url], + body: env[:body], + header: env[:request_headers] + + if (req = env[:request]).stream_response? + warn "Streaming downloads for #{self.class.name} " \ + 'are not yet implemented.' + req.on_data.call(resp.body, resp.body.bytesize) + end + save_response env, resp.status, resp.body, resp.headers, resp.reason + + @app.call env + end + rescue ::HTTPClient::TimeoutError, Errno::ETIMEDOUT + raise Faraday::TimeoutError, $ERROR_INFO + rescue ::HTTPClient::BadResponseError => e + if e.message.include?('status 407') + raise Faraday::ConnectionFailed, + %(407 "Proxy Authentication Required ") + end + + raise Faraday::ClientError, $ERROR_INFO + rescue Errno::EADDRNOTAVAIL, Errno::ECONNREFUSED, IOError, SocketError + raise Faraday::ConnectionFailed, $ERROR_INFO + rescue StandardError => e + if defined?(::OpenSSL::SSL::SSLError) && \ + e.is_a?(::OpenSSL::SSL::SSLError) + raise Faraday::SSLError, e + end + + raise + end + + # @param bind [Hash] + def configure_socket(client, bind) + client.socket_local.host = bind[:host] + client.socket_local.port = bind[:port] + end + + # Configure proxy URI and any user credentials. + # + # @param proxy [Hash] + def configure_proxy(client, proxy) + client.proxy = proxy[:uri] + return unless proxy[:user] && proxy[:password] + + client.set_proxy_auth(proxy[:user], proxy[:password]) + end + + # @param ssl [Hash] + def configure_ssl(client, ssl) + ssl_config = client.ssl_config + ssl_config.verify_mode = ssl_verify_mode(ssl) + ssl_config.cert_store = ssl_cert_store(ssl) + + ssl_config.add_trust_ca ssl[:ca_file] if ssl[:ca_file] + ssl_config.add_trust_ca ssl[:ca_path] if ssl[:ca_path] + ssl_config.client_cert = ssl[:client_cert] if ssl[:client_cert] + ssl_config.client_key = ssl[:client_key] if ssl[:client_key] + ssl_config.verify_depth = ssl[:verify_depth] if ssl[:verify_depth] + end + + # @param req [Hash] + def configure_timeouts(client, req) + if (sec = request_timeout(:open, req)) + client.connect_timeout = sec + end + + if (sec = request_timeout(:write, req)) + client.send_timeout = sec + end + + return unless (sec = request_timeout(:read, req)) + + client.receive_timeout = sec + end + + def configure_client(client) + @config_block&.call(client) + end + + # @param ssl [Hash] + # @return [OpenSSL::X509::Store] + def ssl_cert_store(ssl) + return ssl[:cert_store] if ssl[:cert_store] + + # Memoize the cert store so that the same one is passed to + # HTTPClient each time, to avoid resyncing SSL sessions when + # it's changed + + # Use the default cert store by default, i.e. system ca certs + @ssl_cert_store ||= OpenSSL::X509::Store.new.tap(&:set_default_paths) + end + + # @param ssl [Hash] + def ssl_verify_mode(ssl) + ssl[:verify_mode] || begin + if ssl.fetch(:verify, true) + OpenSSL::SSL::VERIFY_PEER | + OpenSSL::SSL::VERIFY_FAIL_IF_NO_PEER_CERT + else + OpenSSL::SSL::VERIFY_NONE + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/httpclient.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/httpclient.rb new file mode 100644 index 0000000..5b746b1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/httpclient.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative 'adapter/httpclient' +require_relative 'httpclient/version' + +module Faraday + # Main Faraday::HTTPClient module + module HTTPClient + Faraday::Adapter.register_middleware(httpclient: Faraday::Adapter::HTTPClient) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/httpclient/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/httpclient/version.rb new file mode 100644 index 0000000..5d5c033 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-httpclient-1.0.1/lib/faraday/httpclient/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module HTTPClient + VERSION = '1.0.1' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/CHANGELOG.md new file mode 100644 index 0000000..361dab4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/CHANGELOG.md @@ -0,0 +1,52 @@ +# Changelog + +## [1.0.4](https://github.com/lostisland/faraday-multipart/releases/tag/v1.0.3) (2022-06-07) + +### What's Changed + +* Drop support for 'multipart-post' < 2.0.0. This is not a breaking change as this gem's code didn't work with 1.x. +* Change references to `UploadIO` and `Parts` according to class reorganization in the 'multipart-post' gem 2.2.0 (see [multipart-post gem PR #89](https://github.com/socketry/multipart-post/pull/89)) +* Introduce a backwards compatible safeguard so the gem still works with previous 'multipart-post' 2.x releases. + +## [1.0.3](https://github.com/lostisland/faraday-multipart/releases/tag/v1.0.3) (2022-01-08) + +### What's Changed + +* Add `Faraday::ParamPart` alias back by @iMacTia in https://github.com/lostisland/faraday-multipart/pull/2 + +**Full Changelog**: https://github.com/lostisland/faraday-multipart/compare/v1.0.2...v1.0.3 + +## [1.0.2](https://github.com/lostisland/faraday-multipart/releases/tag/v1.0.2) (2022-01-06) + +### Fixes + +* Add missing UploadIO alias +* Re-add support for Ruby 2.4+ + +**Full Changelog**: https://github.com/lostisland/faraday-multipart/compare/v1.0.1...v1.0.2 + +## [1.0.1](https://github.com/lostisland/faraday-multipart/releases/tag/v1.0.1) (2022-01-06) + +### What's Changed +* Add support for Faraday v1 by @iMacTia in https://github.com/lostisland/faraday-multipart/pull/1 + +**Full Changelog**: https://github.com/lostisland/faraday-multipart/compare/v1.0.0...v1.0.1 + +## [1.0.0](https://github.com/lostisland/faraday-multipart/releases/tag/v1.0.0) (2022-01-04) + +### Summary + +The initial release of the `faraday-multipart` gem. + +This middleware was previously bundled with Faraday but was removed as of v2.0. + +### MIGRATION NOTES + +If you're upgrading from Faraday 1.0 and including this middleware as a gem, please be aware the namespacing for helper classes has changed: + +* `Faraday::FilePart` is now `Faraday::Multipart::FilePart` +* `Faraday::Parts` is now `Faraday::Multipart::Parts` +* `Faraday::CompositeReadIO` is now `Faraday::Multipart::CompositeReadIO` +* `Faraday::ParamPart` is now `Faraday::Multipart::ParamPart` + +Moreover, in case you're adding the middleware to your faraday connection with the full qualified name rather than the `:multipart` alias, please be aware the middleware class is now `Faraday::Multipart::Middleware`. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/LICENSE.md new file mode 100644 index 0000000..6815a51 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2022 The Faraday Team + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/README.md new file mode 100644 index 0000000..c17b5e8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/README.md @@ -0,0 +1,158 @@ +# Faraday Multipart + +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/lostisland/faraday-multipart/ci)](https://github.com/lostisland/faraday-multipart/actions?query=branch%3Amain) +[![Gem](https://img.shields.io/gem/v/faraday-multipart.svg?style=flat-square)](https://rubygems.org/gems/faraday-multipart) +[![License](https://img.shields.io/github/license/lostisland/faraday-multipart.svg?style=flat-square)](LICENSE.md) + +The `Multipart` middleware converts a `Faraday::Request#body` Hash of key/value pairs into a multipart form request, but +only under these conditions: + +* The request's Content-Type is "multipart/form-data" +* Content-Type is unspecified, AND one of the values in the Body responds to + `#content_type`. + +Faraday contains a couple helper classes for multipart values: + +* `Faraday::Multipart::FilePart` wraps binary file data with a Content-Type. The file data can be specified with a String path to a + local file, or an IO object. +* `Faraday::Multipart::ParamPart` wraps a String value with a Content-Type, and optionally a Content-ID. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'faraday-multipart' +``` + +And then execute: + +```shell +bundle install +``` + +Or install it yourself as: + +```shell +gem install faraday-multipart +``` + +## Usage + +First of all, you'll need to add the multipart middleware to your Faraday connection: + +```ruby +require 'faraday' +require 'faraday/multipart' + +conn = Faraday.new(...) do |f| + f.request :multipart, **options + # ... +end +``` + + +Payload can be a mix of POST data and multipart values. + +```ruby +# regular POST form value +payload = { string: 'value' } + +# filename for this value is File.basename(__FILE__) +payload[:file] = Faraday::Multipart::FilePart.new(__FILE__, 'text/x-ruby') + +# specify filename because IO object doesn't know it +payload[:file_with_name] = Faraday::Multipart::FilePart.new( + File.open(__FILE__), + 'text/x-ruby', + File.basename(__FILE__) +) + +# Sets a custom Content-Disposition: +# nil filename still defaults to File.basename(__FILE__) +payload[:file_with_header] = Faraday::Multipart::FilePart.new( + __FILE__, + 'text/x-ruby', + nil, + 'Content-Disposition' => 'form-data; foo=1' +) + +# Upload raw json with content type +payload[:raw_data] = Faraday::Multipart::ParamPart.new( + { a: 1 }.to_json, + 'application/json' +) + +# optionally sets Content-ID too +payload[:raw_with_id] = Faraday::Multipart::ParamPart.new( + { a: 1 }.to_json, + 'application/json', + 'foo-123' +) + +conn.post('/', payload) +``` + +### Sending an array of documents + +Sometimes, the server you're calling will expect an array of documents or other values for the same key. +The `multipart` middleware will automatically handle this scenario for you: + +```ruby +payload = { + files: [ + Faraday::Multipart::FilePart.new(__FILE__, 'text/x-ruby'), + Faraday::Multipart::FilePart.new(__FILE__, 'text/x-pdf') + ], + url: [ + 'http://mydomain.com/callback1', + 'http://mydomain.com/callback2' + ] +} + +conn.post(url, payload) +#=> POST url[]=http://mydomain.com/callback1&url[]=http://mydomain.com/callback2 +#=> and includes both files in the request under the `files[]` name +``` + +However, by default these will be sent with `files[]` key and the URLs with `url[]`, similarly to arrays in URL parameters. +Some servers (e.g. Mailgun) expect each document to have the same parameter key instead. +You can instruct the `multipart` middleware to do so by providing the `flat_encode` option: + +```ruby +require 'faraday' +require 'faraday/multipart' + +conn = Faraday.new(...) do |f| + f.request :multipart, flat_encode: true + # ... +end + +payload = ... # see example above + +conn.post(url, payload) +#=> POST url=http://mydomain.com/callback1&url=http://mydomain.com/callback2 +#=> and includes both files in the request under the `files` name +``` + +This works for both `UploadIO` and normal parameters alike. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. + +Then, run `bin/test` to run the tests. + +To install this gem onto your local machine, run `rake build`. + +### Releasing a new version + +To release a new version, make a commit with a message such as "Bumped to 0.0.2", and change the _Unreleased_ heading in `CHANGELOG.md` to a heading like "0.0.2 (2022-01-01)", and then use GitHub Releases to author a release. A GitHub Actions workflow then publishes a new gem to [RubyGems.org](https://rubygems.org/gems/faraday-multipart). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub](https://github.com/lostisland/faraday-multipart). + +## License + +The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart.rb new file mode 100644 index 0000000..5c54758 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +require_relative 'multipart/version' +require_relative 'multipart/file_part' +require_relative 'multipart/param_part' +require_relative 'multipart/middleware' + +module Faraday + # Main Faraday::Multipart module. + module Multipart + Faraday::Request.register_middleware(multipart: Faraday::Multipart::Middleware) + end + + # Aliases for Faraday v1, these are all deprecated and will be removed in v2 of this middleware + FilePart = Multipart::FilePart + ParamPart = Multipart::ParamPart + Parts = Multipart::Parts + CompositeReadIO = Multipart::CompositeReadIO + # multipart-post v2.2.0 introduces a new class hierarchy for classes like Parts and UploadIO + # For backwards compatibility, detect the gem version and use the right class + UploadIO = if ::Gem::Requirement.new('>= 2.2.0').satisfied_by?(Multipart.multipart_post_version) + ::Multipart::Post::UploadIO + else + ::UploadIO + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/file_part.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/file_part.rb new file mode 100644 index 0000000..c5ab078 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/file_part.rb @@ -0,0 +1,130 @@ +# frozen_string_literal: true + +require 'stringio' + +module Faraday + # Rubocop doesn't seem to understand that this is an extension to the + # Multipart module, so let's add a nodoc + # #:nodoc: + module Multipart + # Multipart value used to POST a binary data from a file or + # + # @example + # payload = { file: Faraday::FilePart.new("file_name.ext", "content/type") } + # http.post("/upload", payload) + # + + # @!method initialize(filename_or_io, content_type, filename = nil, opts = {}) + # + # @param filename_or_io [String, IO] Either a String filename to a local + # file or an open IO object. + # @param content_type [String] String content type of the file data. + # @param filename [String] Optional String filename, usually to add context + # to a given IO object. + # @param opts [Hash] Optional Hash of String key/value pairs to describethis + # this uploaded file. Expected Header keys include: + # * Content-Transfer-Encoding - Defaults to "binary" + # * Content-Disposition - Defaults to "form-data" + # * Content-Type - Defaults to the content_type argument. + # * Content-ID - Optional. + # + # @return [Faraday::FilePart] + # + # @!attribute [r] content_type + # The uploaded binary data's content type. + # + # @return [String] + # + # @!attribute [r] original_filename + # The base filename, taken either from the filename_or_io or filename + # arguments in #initialize. + # + # @return [String] + # + # @!attribute [r] opts + # Extra String key/value pairs to make up the header for this uploaded file. + # + # @return [Hash] + # + # @!attribute [r] io + # The open IO object for the uploaded file. + # + # @return [IO] + if ::Gem::Requirement.new('>= 2.2.0').satisfied_by?(multipart_post_version) + require 'multipart/post' + FilePart = ::Multipart::Post::UploadIO + Parts = ::Multipart::Post::Parts + else + require 'composite_io' + require 'parts' + FilePart = ::UploadIO + Parts = ::Parts + end + + # Similar to, but not compatible with CompositeReadIO provided by the + # multipart-post gem. + # https://github.com/nicksieger/multipart-post/blob/master/lib/composite_io.rb + class CompositeReadIO + def initialize(*parts) + @parts = parts.flatten + @ios = @parts.map(&:to_io) + @index = 0 + end + + # @return [Integer] sum of the lengths of all the parts + def length + @parts.inject(0) { |sum, part| sum + part.length } + end + + # Rewind each of the IOs and reset the index to 0. + # + # @return [void] + def rewind + @ios.each(&:rewind) + @index = 0 + end + + # Read from IOs in order until `length` bytes have been received. + # + # @param length [Integer, nil] + # @param outbuf [String, nil] + def read(length = nil, outbuf = nil) + got_result = false + outbuf = outbuf ? (+outbuf).replace('') : +'' + + while (io = current_io) + if (result = io.read(length)) + got_result ||= !result.nil? + result.force_encoding('BINARY') if result.respond_to?(:force_encoding) + outbuf << result + length -= result.length if length + break if length&.zero? + end + advance_io + end + !got_result && length ? nil : outbuf + end + + # Close each of the IOs. + # + # @return [void] + def close + @ios.each(&:close) + end + + def ensure_open_and_readable + # Rubinius compatibility + end + + private + + def current_io + @ios[@index] + end + + def advance_io + @index += 1 + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/middleware.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/middleware.rb new file mode 100644 index 0000000..4084142 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/middleware.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +require 'securerandom' + +module Faraday + module Multipart + # Middleware for supporting multi-part requests. + class Middleware < Faraday::Request::UrlEncoded + DEFAULT_BOUNDARY_PREFIX = '-----------RubyMultipartPost' + + self.mime_type = 'multipart/form-data' + + def initialize(app = nil, options = {}) + super(app) + @options = options + end + + # Checks for files in the payload, otherwise leaves everything untouched. + # + # @param env [Faraday::Env] + def call(env) + match_content_type(env) do |params| + env.request.boundary ||= unique_boundary + env.request_headers[CONTENT_TYPE] += + "; boundary=#{env.request.boundary}" + env.body = create_multipart(env, params) + end + @app.call env + end + + # @param env [Faraday::Env] + def process_request?(env) + type = request_type(env) + env.body.respond_to?(:each_key) && !env.body.empty? && ( + (type.empty? && has_multipart?(env.body)) || + (type == self.class.mime_type) + ) + end + + # Returns true if obj is an enumerable with values that are multipart. + # + # @param obj [Object] + # @return [Boolean] + def has_multipart?(obj) + if obj.respond_to?(:each) + (obj.respond_to?(:values) ? obj.values : obj).each do |val| + return true if val.respond_to?(:content_type) || has_multipart?(val) + end + end + false + end + + # @param env [Faraday::Env] + # @param params [Hash] + def create_multipart(env, params) + boundary = env.request.boundary + parts = process_params(params) do |key, value| + part(boundary, key, value) + end + parts << Faraday::Multipart::Parts::EpiloguePart.new(boundary) + + body = Faraday::Multipart::CompositeReadIO.new(parts) + env.request_headers[Faraday::Env::ContentLength] = body.length.to_s + body + end + + def part(boundary, key, value) + if value.respond_to?(:to_part) + value.to_part(boundary, key) + else + Faraday::Multipart::Parts::Part.new(boundary, key, value) + end + end + + # @return [String] + def unique_boundary + "#{DEFAULT_BOUNDARY_PREFIX}-#{SecureRandom.hex}" + end + + # @param params [Hash] + # @param prefix [String] + # @param pieces [Array] + def process_params(params, prefix = nil, pieces = nil, &block) + params.inject(pieces || []) do |all, (key, value)| + if prefix + key = @options[:flat_encode] ? prefix.to_s : "#{prefix}[#{key}]" + end + + case value + when Array + values = value.inject([]) { |a, v| a << [nil, v] } + process_params(values, key, all, &block) + when Hash + process_params(value, key, all, &block) + else + all << block.call(key, value) # rubocop:disable Performance/RedundantBlockCall + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/param_part.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/param_part.rb new file mode 100644 index 0000000..aea18b4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/param_part.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module Faraday + module Multipart + # Multipart value used to POST data with a content type. + class ParamPart + # @param value [String] Uploaded content as a String. + # @param content_type [String] String content type of the value. + # @param content_id [String] Optional String of this value's Content-ID. + # + # @return [Faraday::ParamPart] + def initialize(value, content_type, content_id = nil) + @value = value + @content_type = content_type + @content_id = content_id + end + + # Converts this value to a form part. + # + # @param boundary [String] String multipart boundary that must not exist in + # the content exactly. + # @param key [String] String key name for this value. + # + # @return [Faraday::Parts::Part] + def to_part(boundary, key) + Faraday::Multipart::Parts::Part.new(boundary, key, value, headers) + end + + # Returns a Hash of String key/value pairs. + # + # @return [Hash] + def headers + { + 'Content-Type' => content_type, + 'Content-ID' => content_id + } + end + + # The content to upload. + # + # @return [String] + attr_reader :value + + # The value's content type. + # + # @return [String] + attr_reader :content_type + + # The value's content ID, if given. + # + # @return [String, nil] + attr_reader :content_id + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/version.rb new file mode 100644 index 0000000..984d702 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-multipart-1.0.4/lib/faraday/multipart/version.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module Faraday + # #:nodoc: + module Multipart + VERSION = '1.0.4' + + def self.multipart_post_version + require 'multipart/post/version' + ::Gem::Version.new(::Multipart::Post::VERSION) + rescue LoadError + require 'multipart_post' + ::Gem::Version.new(::MultipartPost::VERSION) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/README.md new file mode 100644 index 0000000..01068b2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/README.md @@ -0,0 +1,57 @@ +# Faraday Net::HTTP adapter + +This gem is a [Faraday][faraday] adapter for the [Net::HTTP][net-http] library. Faraday is an HTTP client library that provides a common interface over many adapters. Every adapter is defined into it's own gem. This gem defines the adapter for `Net::HTTP` the HTTP library that's included into the standard library of Ruby. + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'faraday-net_http' +``` + +And then execute: + + $ bundle install + +Or install it yourself as: + + $ gem install faraday-net_http + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:net_http) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday Net::HTTP adapter project's codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[net-http]: https://ruby-doc.org/stdlib-2.7.0/libdoc/net/http/rdoc/Net/HTTP.html +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-net_http +[license]: https://github.com/lostisland/faraday-net_http/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-net_http/blob/main/CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/adapter/net_http.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/adapter/net_http.rb new file mode 100644 index 0000000..23b33a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/adapter/net_http.rb @@ -0,0 +1,213 @@ +# frozen_string_literal: true + +begin + require 'net/https' +rescue LoadError + warn 'Warning: no such file to load -- net/https. ' \ + 'Make sure openssl is installed if you want ssl support' + require 'net/http' +end +require 'zlib' + +module Faraday + class Adapter + class NetHttp < Faraday::Adapter + exceptions = [ + IOError, + Errno::EADDRNOTAVAIL, + Errno::ECONNABORTED, + Errno::ECONNREFUSED, + Errno::ECONNRESET, + Errno::EHOSTUNREACH, + Errno::EINVAL, + Errno::ENETUNREACH, + Errno::EPIPE, + Net::HTTPBadResponse, + Net::HTTPHeaderSyntaxError, + Net::ProtocolError, + SocketError, + Zlib::GzipFile::Error + ] + + exceptions << ::OpenSSL::SSL::SSLError if defined?(::OpenSSL::SSL::SSLError) + exceptions << ::Net::OpenTimeout if defined?(::Net::OpenTimeout) + + NET_HTTP_EXCEPTIONS = exceptions.freeze + + def initialize(app = nil, opts = {}, &block) + @ssl_cert_store = nil + super(app, opts, &block) + end + + def build_connection(env) + net_http_connection(env).tap do |http| + http.use_ssl = env[:url].scheme == 'https' if http.respond_to?(:use_ssl=) + configure_ssl(http, env[:ssl]) + configure_request(http, env[:request]) + end + end + + def net_http_connection(env) + proxy = env[:request][:proxy] + port = env[:url].port || (env[:url].scheme == 'https' ? 443 : 80) + if proxy + Net::HTTP.new(env[:url].hostname, port, + proxy[:uri].hostname, proxy[:uri].port, + proxy[:user], proxy[:password]) + else + Net::HTTP.new(env[:url].hostname, port, nil) + end + end + + def call(env) + super + http_response = connection(env) do |http| + begin + perform_request(http, env) + rescue *NET_HTTP_EXCEPTIONS => e + raise Faraday::SSLError, e if defined?(OpenSSL) && e.is_a?(OpenSSL::SSL::SSLError) + + raise Faraday::ConnectionFailed, e + end + end + + save_response(env, http_response.code.to_i, + http_response.body || +'', nil, + http_response.message) do |response_headers| + http_response.each_header do |key, value| + response_headers[key] = value + end + end + + @app.call env + rescue Timeout::Error, Errno::ETIMEDOUT => e + raise Faraday::TimeoutError, e + end + + private + + def create_request(env) + request = Net::HTTPGenericRequest.new \ + env[:method].to_s.upcase, # request method + !!env[:body], # is there request body + env[:method] != :head, # is there response body + env[:url].request_uri, # request uri path + env[:request_headers] # request headers + + if env[:body].respond_to?(:read) + request.body_stream = env[:body] + else + request.body = env[:body] + end + request + end + + def perform_request(http, env) + if env[:request].stream_response? + size = 0 + yielded = false + http_response = request_with_wrapped_block(http, env) do |chunk| + if chunk.bytesize.positive? || size.positive? + yielded = true + size += chunk.bytesize + env[:request].on_data.call(chunk, size) + end + end + env[:request].on_data.call(+'', 0) unless yielded + # Net::HTTP returns something, + # but it's not meaningful according to the docs. + http_response.body = nil + http_response + else + request_with_wrapped_block(http, env) + end + end + + def request_with_wrapped_block(http, env, &block) + if (env[:method] == :get) && !env[:body] + # prefer `get` to `request` because the former handles gzip (ruby 1.9) + request_via_get_method(http, env, &block) + else + request_via_request_method(http, env, &block) + end + end + + def request_via_get_method(http, env, &block) + # Must use Net::HTTP#start and pass it a block otherwise the server's + # TCP socket does not close correctly. + http.start do |opened_http| + opened_http.get env[:url].request_uri, env[:request_headers], &block + end + end + + def request_via_request_method(http, env, &block) + # Must use Net::HTTP#start and pass it a block otherwise the server's + # TCP socket does not close correctly. + http.start do |opened_http| + if block_given? + opened_http.request create_request(env) do |response| + response.read_body(&block) + end + else + opened_http.request create_request(env) + end + end + end + + def configure_ssl(http, ssl) + return unless ssl + + http.verify_mode = ssl_verify_mode(ssl) + http.cert_store = ssl_cert_store(ssl) + + http.cert = ssl[:client_cert] if ssl[:client_cert] + http.key = ssl[:client_key] if ssl[:client_key] + http.ca_file = ssl[:ca_file] if ssl[:ca_file] + http.ca_path = ssl[:ca_path] if ssl[:ca_path] + http.verify_depth = ssl[:verify_depth] if ssl[:verify_depth] + http.ssl_version = ssl[:version] if ssl[:version] + http.min_version = ssl[:min_version] if ssl[:min_version] + http.max_version = ssl[:max_version] if ssl[:max_version] + end + + def configure_request(http, req) + if (sec = request_timeout(:read, req)) + http.read_timeout = sec + end + + if (sec = http.respond_to?(:write_timeout=) && + request_timeout(:write, req)) + http.write_timeout = sec + end + + if (sec = request_timeout(:open, req)) + http.open_timeout = sec + end + + # Only set if Net::Http supports it, since Ruby 2.5. + http.max_retries = 0 if http.respond_to?(:max_retries=) + + @config_block&.call(http) + end + + def ssl_cert_store(ssl) + return ssl[:cert_store] if ssl[:cert_store] + + @ssl_cert_store ||= begin + # Use the default cert store by default, i.e. system ca certs + OpenSSL::X509::Store.new.tap(&:set_default_paths) + end + end + + def ssl_verify_mode(ssl) + ssl[:verify_mode] || begin + if ssl.fetch(:verify, true) + OpenSSL::SSL::VERIFY_PEER + else + OpenSSL::SSL::VERIFY_NONE + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/net_http.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/net_http.rb new file mode 100644 index 0000000..1416e07 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/net_http.rb @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +require_relative 'adapter/net_http' +require_relative 'net_http/version' + +module Faraday + module NetHttp + Faraday::Adapter.register_middleware(net_http: Faraday::Adapter::NetHttp) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/net_http/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/net_http/version.rb new file mode 100644 index 0000000..9eb6a77 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http-1.0.1/lib/faraday/net_http/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module NetHttp + VERSION = '1.0.1' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/README.md new file mode 100644 index 0000000..bb846f8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/README.md @@ -0,0 +1,73 @@ +# Faraday::NetHttpPersistent + +[![Gem Version](https://badge.fury.io/rb/faraday-net_http_persistent.svg)](https://rubygems.org/gems/faraday-net_http_persistent) +[![GitHub Actions CI](https://github.com/lostisland/faraday-net_http_persistent/workflows/CI/badge.svg)](https://github.com/lostisland/faraday-net_http_persistent/actions?query=workflow%3ACI) + +This gem is a [Faraday][faraday] adapter for the [Net::HTTP::Persistent gem][net-http-persistent]. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'faraday-net_http_persistent' +gem 'net-http-persistent', '>= 3.1' +``` + +And then execute: + + $ bundle + +Or install them yourself as: + + $ gem install net_http_persistent -v '>= 3.1' + $ gem install faraday-net_http_persistent + +## Usage + +Configure your Faraday connection to use this adapter instead of the default one: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + # Your other middleware goes here... + conn.adapter :net_http_persistent +end +``` + +For more information on how to setup your Faraday connection and adapters usage, +please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. +Then, run `rake spec` to run the tests. You can also run `bin/console` +for an interactive prompt that will allow you to experiment. + +To install this gem onto your local machine, run `bundle exec rake install`. +To release a new version, update the version number in `version.rb`, +and then run `bundle exec rake release`, which will create a git tag for the version, +push git commits and tags, and push the `.gem` file to [rubygems.org]. + +## Contributing + +Bug reports and pull requests are welcome on GitHub at https://github.com/lostisland/faraday-net_http_persistent. +This project is intended to be a safe, welcoming space for collaboration, +and contributors are expected to adhere to the [Contributor Covenant][covenant] code of conduct. + +## License + +The gem is available as open source under the terms of the [MIT License][mit-license]. + +## Code of Conduct + +This project is intended to be a safe, welcoming space for collaboration. +Everyone interacting in the Faraday::Http project’s codebases, issue trackers, +chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[code-of-conduct]: https://github.com/lostisland/faraday-http/blob/main/CODE_OF_CONDUCT.md +[covenant]: http://contributor-covenant.org +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[net-http-persistent]: https://github.com/drbrain/net-http-persistent +[mit-license]: https://opensource.org/licenses/MIT +[rubygems.org]: https://rubygems.org diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/adapter/net_http_persistent.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/adapter/net_http_persistent.rb new file mode 100644 index 0000000..e978bbc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/adapter/net_http_persistent.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # Net::HTTP::Persistent adapter. + class NetHttpPersistent < NetHttp + dependency "net/http/persistent" + + private + + def net_http_connection(env) + @cached_connection ||= + if Net::HTTP::Persistent.instance_method(:initialize) + .parameters.first == %i[key name] + options = {name: "Faraday"} + if @connection_options.key?(:pool_size) + options[:pool_size] = @connection_options[:pool_size] + end + Net::HTTP::Persistent.new(**options) + else + Net::HTTP::Persistent.new("Faraday") + end + + proxy_uri = proxy_uri(env) + if @cached_connection.proxy_uri != proxy_uri + @cached_connection.proxy = proxy_uri + end + @cached_connection + end + + def proxy_uri(env) + proxy_uri = nil + if (proxy = env[:request][:proxy]) + proxy_uri = if proxy[:uri].is_a?(::URI::HTTP) + proxy[:uri].dup + else + ::URI.parse(proxy[:uri].to_s) + end + proxy_uri.user = proxy_uri.password = nil + # awful patch for net-http-persistent 2.8 + # not unescaping user/password + if proxy[:user] + (class << proxy_uri; self; end).class_eval do + define_method(:user) { proxy[:user] } + define_method(:password) { proxy[:password] } + end + end + end + proxy_uri + end + + def perform_request(http, env) + if env[:request].stream_response? + size = 0 + yielded = false + + http_response = http.request(env[:url], create_request(env)) do |response| + response.read_body do |chunk| + if chunk.bytesize.positive? || size.positive? + yielded = true + size += chunk.bytesize + env[:request].on_data.call(chunk, size) + end + end + end + + env[:request].on_data.call(+"", 0) unless yielded + http_response.body = nil + http_response + else + http.request(env[:url], create_request(env)) + end + rescue Errno::ETIMEDOUT, Net::OpenTimeout => e + raise Faraday::TimeoutError, e + rescue Net::HTTP::Persistent::Error => e + raise Faraday::TimeoutError, e if e.message.include? "Timeout" + + if e.message.include? "connection refused" + raise Faraday::ConnectionFailed, e + end + + raise + end + + SSL_CONFIGURATIONS = { + certificate: :client_cert, + private_key: :client_key, + ca_file: :ca_file, + ssl_version: :version, + min_version: :min_version, + max_version: :max_version + }.freeze + + def configure_ssl(http, ssl) + return unless ssl + + http_set(http, :verify_mode, ssl_verify_mode(ssl)) + http_set(http, :cert_store, ssl_cert_store(ssl)) + + SSL_CONFIGURATIONS + .select { |_, key| ssl[key] } + .each { |target, key| http_set(http, target, ssl[key]) } + end + + def http_set(http, attr, value) + http.send("#{attr}=", value) if http.send(attr) != value + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/net_http_persistent.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/net_http_persistent.rb new file mode 100644 index 0000000..895a3b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/net_http_persistent.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require_relative "adapter/net_http_persistent" +require_relative "net_http_persistent/version" + +module Faraday + module NetHttpPersistent + # Faraday allows you to register your middleware for easier configuration. + # This step is totally optional, but it basically allows users to use a custom symbol (in this case, `:net_http_persistent`), + # to use your adapter in their connections. + # After calling this line, the following are both valid ways to set the adapter in a connection: + # * conn.adapter Faraday::Adapter::NetNttpPersistent + # * conn.adapter :net_http_persistent + # Without this line, only the former method is valid. + Faraday::Adapter.register_middleware(net_http_persistent: Faraday::Adapter::NetHttpPersistent) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/net_http_persistent/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/net_http_persistent/version.rb new file mode 100644 index 0000000..c87ea2c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-net_http_persistent-1.2.0/lib/faraday/net_http_persistent/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module NetHttpPersistent + VERSION = "1.2.0" + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/README.md new file mode 100644 index 0000000..015cad7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/README.md @@ -0,0 +1,62 @@ +# Faraday Patron adapter + +This gem is a [Faraday][faraday] adapter for the [Patron][patron] library. +Faraday is an HTTP client library that provides a common interface over many adapters. +Every adapter is defined into its own gem. This gem defines the adapter for Patron. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'patron', '>= 2.2' +gem 'faraday' +gem 'faraday-patron' +``` + +And then execute: + + $ bundle install + +Or install them yourself as: + + $ gem install patron -v '>= 2.2' + $ gem install faraday faraday-patron + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:patron) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday Patron adapter project's codebase, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[patron]: https://github.com/toland/patron +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-patron +[license]: https://github.com/lostisland/faraday-patron/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-patron/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/adapter/patron.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/adapter/patron.rb new file mode 100644 index 0000000..a762ac1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/adapter/patron.rb @@ -0,0 +1,132 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # Patron adapter + class Patron < Faraday::Adapter + dependency 'patron' + + def build_connection(env) + session = ::Patron::Session.new + @config_block&.call(session) + if (env[:url].scheme == 'https') && env[:ssl] + configure_ssl(session, env[:ssl]) + end + + if (req = env[:request]) + configure_timeouts(session, req) + configure_proxy(session, req[:proxy]) + end + + session + end + + def call(env) + super + # TODO: support streaming requests + env[:body] = env[:body].read if env[:body].respond_to? :read + + response = connection(env) do |session| + begin + data = env[:body] ? env[:body].to_s : nil + session.request(env[:method], env[:url].to_s, + env[:request_headers], data: data) + rescue Errno::ECONNREFUSED, ::Patron::ConnectionFailed + raise Faraday::ConnectionFailed, $ERROR_INFO + end + end + + if (req = env[:request]).stream_response? + warn "Streaming downloads for #{self.class.name} " \ + 'are not yet implemented.' + req.on_data.call(response.body, response.body.bytesize) + end + # Remove the "HTTP/1.1 200", leaving just the reason phrase + reason_phrase = response.status_line.gsub(/^.* \d{3} /, '') + + save_response(env, response.status, response.body, + response.headers, reason_phrase) + + @app.call env + rescue ::Patron::TimeoutError => e + if connection_timed_out_message?(e.message) + raise Faraday::ConnectionFailed, e + end + + raise Faraday::TimeoutError, e + rescue ::Patron::Error => e + if e.message.include?('code 407') + raise Faraday::ConnectionFailed, + %(407 "Proxy Authentication Required ") + end + + raise Faraday::ConnectionFailed, e + end + + if loaded? && defined?(::Patron::Request::VALID_ACTIONS) + # HAX: helps but doesn't work completely + # https://github.com/toland/patron/issues/34 + ::Patron::Request::VALID_ACTIONS.tap do |actions| + if actions[0].is_a?(Symbol) + actions << :patch unless actions.include? :patch + actions << :options unless actions.include? :options + else + # Patron 0.4.20 and up + actions << 'PATCH' unless actions.include? 'PATCH' + actions << 'OPTIONS' unless actions.include? 'OPTIONS' + end + end + end + + def configure_ssl(session, ssl) + if ssl.fetch(:verify, true) + session.cacert = ssl[:ca_file] + else + session.insecure = true + end + end + + def configure_timeouts(session, req) + return unless req + + if (sec = request_timeout(:read, req)) + session.timeout = sec + end + + return unless (sec = request_timeout(:open, req)) + + session.connect_timeout = sec + end + + def configure_proxy(session, proxy) + return unless proxy + + proxy_uri = proxy[:uri].dup + proxy_uri.user = proxy[:user] && + Utils.escape(proxy[:user]).gsub('+', '%20') + proxy_uri.password = proxy[:password] && + Utils.escape(proxy[:password]).gsub('+', '%20') + session.proxy = proxy_uri.to_s + end + + private + + CURL_TIMEOUT_MESSAGES = [ + 'Connection time-out', + 'Connection timed out', + 'Timed out before name resolve', + 'server connect has timed out', + 'Resolving timed out', + 'name lookup timed out', + 'timed out before SSL', + 'connect() timed out' + ].freeze + + def connection_timed_out_message?(message) + CURL_TIMEOUT_MESSAGES.any? do |curl_message| + message.include?(curl_message) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/patron.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/patron.rb new file mode 100644 index 0000000..70bde8f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/patron.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require_relative 'adapter/patron' +require_relative 'patron/version' + +module Faraday + # Main Faraday::Patron module + module Patron + Faraday::Adapter.register_middleware(patron: Faraday::Adapter::Patron) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/patron/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/patron/version.rb new file mode 100644 index 0000000..eb13e9d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-patron-1.0.0/lib/faraday/patron/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module Patron + VERSION = '1.0.0' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/LICENSE.md new file mode 100644 index 0000000..b7aabc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2020 Jan van der Pas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/README.md new file mode 100644 index 0000000..afe671e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/README.md @@ -0,0 +1,61 @@ +# Faraday Rack adapter + +This gem is a [Faraday][faraday] adapter for a [Rack][rack] app. +Faraday is an HTTP client library that provides a common interface over many adapters. +Every adapter is defined into its own gem. This gem defines the adapter for a Rack app. + +## Installation + +Add these lines to your application's Gemfile: + +```ruby +gem 'rack' +gem 'faraday' +gem 'faraday-rack' +``` + +And then execute: + + $ bundle install + +Or install them yourself as: + + $ gem install rack faraday faraday-rack + +## Usage + +Configure your Faraday connection to use this adapter like this: + +```ruby +connection = Faraday.new(url, conn_options) do |conn| + conn.adapter(:rack) +end +``` + +For more information on how to setup your Faraday connection and adapters usage, please refer to the [Faraday Website][faraday-website]. + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. Then, run `bin/test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. + +To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](rubygems). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub][repo]. + +## License + +The gem is available as open source under the terms of the [license][license]. + +## Code of Conduct + +Everyone interacting in the Faraday Rack adapter project's codebase, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct][code-of-conduct]. + +[faraday]: https://github.com/lostisland/faraday +[faraday-website]: https://lostisland.github.io/faraday +[rack]: https://github.com/rack/rack +[rubygems]: https://rubygems.org +[repo]: https://github.com/lostisland/faraday-rack +[license]: https://github.com/lostisland/faraday-rack/blob/main/LICENSE.md +[code-of-conduct]: https://github.com/lostisland/faraday-rack/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/adapter/rack.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/adapter/rack.rb new file mode 100644 index 0000000..2e322c0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/adapter/rack.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module Faraday + class Adapter + # Sends requests to a Rack app. + # + # @example + # + # class MyRackApp + # def call(env) + # [200, {'Content-Type' => 'text/html'}, ["hello world"]] + # end + # end + # + # Faraday.new do |conn| + # conn.adapter :rack, MyRackApp.new + # end + class Rack < Faraday::Adapter + dependency 'rack/test' + + # not prefixed with "HTTP_" + SPECIAL_HEADERS = %w[CONTENT_LENGTH CONTENT_TYPE].freeze + + def initialize(faraday_app, rack_app) + super(faraday_app) + mock_session = ::Rack::MockSession.new(rack_app) + @session = ::Rack::Test::Session.new(mock_session) + end + + def call(env) + super + rack_env = build_rack_env(env) + + env[:request_headers]&.each do |name, value| + name = name.upcase.tr('-', '_') + name = "HTTP_#{name}" unless SPECIAL_HEADERS.include? name + rack_env[name] = value + end + + timeout = request_timeout(:open, env[:request]) + timeout ||= request_timeout(:read, env[:request]) + response = if timeout + Timer.timeout(timeout, Faraday::TimeoutError) do + execute_request(env, rack_env) + end + else + execute_request(env, rack_env) + end + + if (req = env[:request]).stream_response? + warn "Streaming downloads for #{self.class.name} " \ + 'are not yet implemented.' + req.on_data.call(response.body, response.body.bytesize) + end + + save_response(env, response.status, response.body, response.headers) + @app.call env + end + + private + + def execute_request(env, rack_env) + @session.request(env[:url].to_s, rack_env) + end + + def build_rack_env(env) + { + method: env[:method], + input: env[:body].respond_to?(:read) ? env[:body].read : env[:body], + 'rack.url_scheme' => env[:url].scheme + } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/rack.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/rack.rb new file mode 100644 index 0000000..d0ca10a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/rack.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require 'faraday/adapter/rack' +require 'faraday/rack/version' + +module Faraday + # Main Faraday::Rack module + module Rack + Faraday::Adapter.register_middleware(rack: Faraday::Adapter::Rack) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/rack/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/rack/version.rb new file mode 100644 index 0000000..dd5dca0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-rack-1.0.0/lib/faraday/rack/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module Rack + VERSION = '1.0.0' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/CHANGELOG.md b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/CHANGELOG.md new file mode 100644 index 0000000..b24555c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## v1.0 + +Initial release. +This release consists of the same middleware that was previously bundled with Faraday but removed in Faraday v2.0, plus: + +### Fixed + +* Retry middleware `retry_block` is not called if retry will not happen due to `max_interval`, https://github.com/lostisland/faraday/pull/1350 diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/LICENSE.md new file mode 100644 index 0000000..389e644 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 Mattia Giuffrida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/README.md new file mode 100644 index 0000000..884410b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/README.md @@ -0,0 +1,161 @@ +# Faraday Retry + +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/lostisland/faraday-retry/CI)](https://github.com/lostisland/faraday-retry/actions?query=branch%3Amain) +[![Gem](https://img.shields.io/gem/v/faraday-retry.svg?style=flat-square)](https://rubygems.org/gems/faraday-retry) +[![License](https://img.shields.io/github/license/lostisland/faraday-retry.svg?style=flat-square)](LICENSE.md) + +The `Retry` middleware automatically retries requests that fail due to intermittent client +or server errors (such as network hiccups). +By default, it retries 2 times and handles only timeout exceptions. +It can be configured with an arbitrary number of retries, a list of exceptions to handle, +a retry interval, a percentage of randomness to add to the retry interval, and a backoff factor. +The middleware can also handle the [`Retry-After`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After) +header automatically when configured with the right status codes (see below for an example). + +## Installation + +Add this line to your application's Gemfile: + +```ruby +gem 'faraday-retry' +``` + +And then execute: + +```shell +bundle install +``` + +Or install it yourself as: + +```shell +gem install faraday-retry +``` + +## Usage + +This example will result in a first interval that is random between 0.05 and 0.075 +and a second interval that is random between 0.1 and 0.125. + +```ruby +require 'faraday' +require 'faraday/retry' + +retry_options = { + max: 2, + interval: 0.05, + interval_randomness: 0.5, + backoff_factor: 2 +} + +conn = Faraday.new(...) do |f| + f.request :retry, retry_options + #... +end + +conn.get('/') +``` + +### Control when the middleware will retry requests + +By default, the `Retry` middleware will only retry idempotent methods and the most common network-related exceptions. +You can change this behaviour by providing the right option when adding the middleware to your connection. + +#### Specify which methods will be retried + +You can provide a `methods` option with a list of HTTP methods. +This will replace the default list of HTTP methods: `delete`, `get`, `head`, `options`, `put`. + +```ruby +retry_options = { + methods: %i[get post] +} +``` + +#### Specify which exceptions should trigger a retry + +You can provide an `exceptions` option with a list of exceptions that will replace +the default list of network-related exceptions: `Errno::ETIMEDOUT`, `Timeout::Error`, `Faraday::TimeoutError`. +This can be particularly useful when combined with the [RaiseError][raise_error] middleware. + +```ruby +retry_options = { + exceptions: [Faraday::ResourceNotFound, Faraday::UnauthorizedError] +} +``` + +#### Specify on which response statuses to retry + +By default the `Retry` middleware will only retry the request if one of the expected exceptions arise. +However, you can specify a list of HTTP statuses you'd like to be retried. When you do so, the middleware will +check the response `status` code and will retry the request if included in the list. + +```ruby +retry_options = { + retry_statuses: [401, 409] +} +``` + +#### Automatically handle the `Retry-After` header + +Some APIs, like the [Slack API](https://api.slack.com/docs/rate-limits), will inform you when you reach their API limits by replying with a response status code of `429` and a response header of `Retry-After` containing a time in seconds. You should then only retry querying after the amount of time provided by the `Retry-After` header, otherwise you won't get a response. + +You can automatically handle this and have Faraday pause and retry for the right amount of time by including the `429` status code in the retry statuses list: + +```ruby +retry_options = { + retry_statuses: [429] +} +``` + +#### Specify a custom retry logic + +You can also specify a custom retry logic with the `retry_if` option. +This option accepts a block that will receive the `env` object and the exception raised +and should decide if the code should retry still the action or not independent of the retry count. +This would be useful if the exception produced is non-recoverable or if the the HTTP method called is not idempotent. + +**NOTE:** this option will only be used for methods that are not included in the `methods` option. +If you want this to apply to all HTTP methods, pass `methods: []` as an additional option. + +```ruby +# Retries the request if response contains { success: false } +retry_options = { + retry_if: -> (env, _exc) { env.body[:success] == 'false' } +} +``` + +### Call a block on every retry + +You can specify a block through the `retry_block` option that will be called before every retry. +There are many different applications for this feature, spacing from instrumentation to monitoring. +Request environment, middleware options, current number of retries and the exception is passed to the block as parameters. +For example, you might want to keep track of the response statuses: + +```ruby +response_statuses = [] +retry_options = { + retry_block: -> (env, options, retries, exc) { response_statuses << env.status } +} +``` + +## Development + +After checking out the repo, run `bin/setup` to install dependencies. + +Then, run `bin/test` to run the tests. + +To install this gem onto your local machine, run `rake build`. + +To release a new version, make a commit with a message such as "Bumped to 0.0.2" and then run `rake release`. +See how it works [here](https://bundler.io/guides/creating_gem.html#releasing-the-gem). + +## Contributing + +Bug reports and pull requests are welcome on [GitHub](https://github.com/lostisland/faraday-retry). + +## License + +The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). + +[raise_error]: https://lostisland.github.io/faraday/middleware/raise-error diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retriable_response.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retriable_response.rb new file mode 100644 index 0000000..115e2c2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retriable_response.rb @@ -0,0 +1,8 @@ +# frozen_string_literal: true + +# Faraday namespace. +module Faraday + # Exception used to control the Retry middleware. + class RetriableResponse < Error + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry.rb new file mode 100644 index 0000000..a488305 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require_relative 'retriable_response' +require_relative 'retry/middleware' +require_relative 'retry/version' + +module Faraday + # Middleware main module. + module Retry + Faraday::Request.register_middleware(retry: Faraday::Retry::Middleware) + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry/middleware.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry/middleware.rb new file mode 100644 index 0000000..6087e6e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry/middleware.rb @@ -0,0 +1,230 @@ +# frozen_string_literal: true + +module Faraday + module Retry + # This class provides the main implementation for your middleware. + # Your middleware can implement any of the following methods: + # * on_request - called when the request is being prepared + # * on_complete - called when the response is being processed + # + # Optionally, you can also override the following methods from Faraday::Middleware + # * initialize(app, options = {}) - the initializer method + # * call(env) - the main middleware invocation method. + # This already calls on_request and on_complete, so you normally don't need to override it. + # You may need to in case you need to "wrap" the request or need more control + # (see "retry" middleware: https://github.com/lostisland/faraday/blob/main/lib/faraday/request/retry.rb#L142). + # IMPORTANT: Remember to call `@app.call(env)` or `super` to not interrupt the middleware chain! + class Middleware < Faraday::Middleware + DEFAULT_EXCEPTIONS = [ + Errno::ETIMEDOUT, 'Timeout::Error', + Faraday::TimeoutError, Faraday::RetriableResponse + ].freeze + IDEMPOTENT_METHODS = %i[delete get head options put].freeze + + # Options contains the configurable parameters for the Retry middleware. + class Options < Faraday::Options.new(:max, :interval, :max_interval, + :interval_randomness, + :backoff_factor, :exceptions, + :methods, :retry_if, :retry_block, + :retry_statuses) + + DEFAULT_CHECK = ->(_env, _exception) { false } + + def self.from(value) + if value.is_a?(Integer) + new(value) + else + super(value) + end + end + + def max + (self[:max] ||= 2).to_i + end + + def interval + (self[:interval] ||= 0).to_f + end + + def max_interval + (self[:max_interval] ||= Float::MAX).to_f + end + + def interval_randomness + (self[:interval_randomness] ||= 0).to_f + end + + def backoff_factor + (self[:backoff_factor] ||= 1).to_f + end + + def exceptions + Array(self[:exceptions] ||= DEFAULT_EXCEPTIONS) + end + + def methods + Array(self[:methods] ||= IDEMPOTENT_METHODS) + end + + def retry_if + self[:retry_if] ||= DEFAULT_CHECK + end + + def retry_block + self[:retry_block] ||= proc {} # rubocop:disable Lint/EmptyBlock + end + + def retry_statuses + Array(self[:retry_statuses] ||= []) + end + end + + # @param app [#call] + # @param options [Hash] + # @option options [Integer] :max (2) Maximum number of retries + # @option options [Integer] :interval (0) Pause in seconds between retries + # @option options [Integer] :interval_randomness (0) The maximum random + # interval amount expressed as a float between + # 0 and 1 to use in addition to the interval. + # @option options [Integer] :max_interval (Float::MAX) An upper limit + # for the interval + # @option options [Integer] :backoff_factor (1) The amount to multiply + # each successive retry's interval amount by in order to provide backoff + # @option options [Array] :exceptions ([ Errno::ETIMEDOUT, + # 'Timeout::Error', Faraday::TimeoutError, Faraday::RetriableResponse]) + # The list of exceptions to handle. Exceptions can be given as + # Class, Module, or String. + # @option options [Array] :methods (the idempotent HTTP methods + # in IDEMPOTENT_METHODS) A list of HTTP methods to retry without + # calling retry_if. Pass an empty Array to call retry_if + # for all exceptions. + # @option options [Block] :retry_if (false) block that will receive + # the env object and the exception raised + # and should decide if the code should retry still the action or + # not independent of the retry count. This would be useful + # if the exception produced is non-recoverable or if the + # the HTTP method called is not idempotent. + # @option options [Block] :retry_block block that is executed before + # every retry. Request environment, middleware options, current number + # of retries and the exception is passed to the block as parameters. + # @option options [Array] :retry_statuses Array of Integer HTTP status + # codes or a single Integer value that determines whether to raise + # a Faraday::RetriableResponse exception based on the HTTP status code + # of an HTTP response. + def initialize(app, options = nil) + super(app) + @options = Options.from(options) + @errmatch = build_exception_matcher(@options.exceptions) + end + + def calculate_sleep_amount(retries, env) + retry_after = calculate_retry_after(env) + retry_interval = calculate_retry_interval(retries) + + return if retry_after && retry_after > @options.max_interval + + if retry_after && retry_after >= retry_interval + retry_after + else + retry_interval + end + end + + # @param env [Faraday::Env] + def call(env) + retries = @options.max + request_body = env[:body] + begin + # after failure env[:body] is set to the response body + env[:body] = request_body + @app.call(env).tap do |resp| + raise Faraday::RetriableResponse.new(nil, resp) if @options.retry_statuses.include?(resp.status) + end + rescue @errmatch => e + if retries.positive? && retry_request?(env, e) + retries -= 1 + rewind_files(request_body) + if (sleep_amount = calculate_sleep_amount(retries + 1, env)) + @options.retry_block.call(env, @options, retries, e) + sleep sleep_amount + retry + end + end + + raise unless e.is_a?(Faraday::RetriableResponse) + + e.response + end + end + + # An exception matcher for the rescue clause can usually be any object + # that responds to `===`, but for Ruby 1.8 it has to be a Class or Module. + # + # @param exceptions [Array] + # @api private + # @return [Module] an exception matcher + def build_exception_matcher(exceptions) + matcher = Module.new + ( + class << matcher + self + end).class_eval do + define_method(:===) do |error| + exceptions.any? do |ex| + if ex.is_a? Module + error.is_a? ex + else + Object.const_defined?(ex.to_s) && error.is_a?(Object.const_get(ex.to_s)) + end + end + end + end + matcher + end + + private + + def retry_request?(env, exception) + @options.methods.include?(env[:method]) || + @options.retry_if.call(env, exception) + end + + def rewind_files(body) + return unless defined?(UploadIO) + return unless body.is_a?(Hash) + + body.each do |_, value| + value.rewind if value.is_a?(UploadIO) + end + end + + # MDN spec for Retry-After header: + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After + def calculate_retry_after(env) + response_headers = env[:response_headers] + return unless response_headers + + retry_after_value = env[:response_headers]['Retry-After'] + + # Try to parse date from the header value + begin + datetime = DateTime.rfc2822(retry_after_value) + datetime.to_time - Time.now.utc + rescue ArgumentError + retry_after_value.to_f + end + end + + def calculate_retry_interval(retries) + retry_index = @options.max - retries + current_interval = @options.interval * + (@options.backoff_factor**retry_index) + current_interval = [current_interval, @options.max_interval].min + random_interval = rand * @options.interval_randomness.to_f * + @options.interval + + current_interval + random_interval + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry/version.rb new file mode 100644 index 0000000..fc0920e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday-retry-1.0.3/lib/faraday/retry/version.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module Faraday + module Retry + VERSION = '1.0.3' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/LICENSE.md b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/LICENSE.md new file mode 100644 index 0000000..242ddbf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/LICENSE.md @@ -0,0 +1,20 @@ +Copyright (c) 2011 Erik Michaels-Ober, Wynn Netherland, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/README.md b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/README.md new file mode 100644 index 0000000..3f77e89 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/README.md @@ -0,0 +1,48 @@ +Faraday Middleware +================== +[![Gem Version](https://badge.fury.io/rb/faraday_middleware.svg)](https://rubygems.org/gems/faraday_middleware) +![GitHub Actions CI](https://github.com/lostisland/faraday_middleware/workflows/CI/badge.svg) + +A collection of useful [Faraday][] middleware. [See the documentation][docs]. + + gem install faraday_middleware + +Dependencies +------------ + +Ruby >= 2.3.0 + +#### As of v0.16.0, `faraday` and `faraday_middleware` no longer officially support JRuby or Rubinius. + +Some dependent libraries are needed only when using specific middleware: + +| Middleware | Library | Notes | +| --------------------------- | -------------- | ----- | +| [FaradayMiddleware::Instrumentation](https://github.com/lostisland/faraday_middleware/blob/main/lib/faraday_middleware/instrumentation.rb) | [`activesupport`](https://rubygems.org/gems/activesupport) | | +| [FaradayMiddleware::OAuth](https://github.com/lostisland/faraday_middleware/blob/main/lib/faraday_middleware/request/oauth.rb) | [`simple_oauth`](https://rubygems.org/gems/simple_oauth) | | +| [FaradayMiddleware::ParseXml](https://github.com/lostisland/faraday_middleware/blob/main/lib/faraday_middleware/response/parse_xml.rb) | [`multi_xml`](https://rubygems.org/gems/multi_xml) | | +| [FaradayMiddleware::ParseYaml](https://github.com/lostisland/faraday_middleware/blob/main/lib/faraday_middleware/response/parse_yaml.rb) | [`safe_yaml`](https://rubygems.org/gems/safe_yaml) | Not backwards compatible with versions of this middleware prior to `faraday_middleware` v0.12. See code comments for alternatives. | +| [FaradayMiddleware::Mashify](https://github.com/lostisland/faraday_middleware/blob/main/lib/faraday_middleware/response/mashify.rb) | [`hashie`](https://rubygems.org/gems/hashie) | | +| [FaradayMiddleware::Rashify](https://github.com/lostisland/faraday_middleware/blob/main/lib/faraday_middleware/response/rashify.rb) | [`rash_alt`](https://rubygems.org/gems/rash_alt) | Make sure to uninstall original `rash` gem to avoid conflict. | + +Examples +-------- + +``` rb +require 'faraday_middleware' + +connection = Faraday.new 'http://example.com/api' do |conn| + conn.request :oauth2, 'TOKEN' + conn.request :json + + conn.response :xml, content_type: /\bxml$/ + conn.response :json, content_type: /\bjson$/ + + conn.use :instrumentation + conn.adapter Faraday.default_adapter +end +``` + + + [faraday]: https://github.com/lostisland/faraday#readme + [docs]: https://github.com/lostisland/faraday_middleware/wiki diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware.rb new file mode 100644 index 0000000..4cf0a80 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +require 'faraday' + +# Main FaradayMiddleware module. +module FaradayMiddleware + autoload :OAuth, 'faraday_middleware/request/oauth' + autoload :OAuth2, 'faraday_middleware/request/oauth2' + autoload :EncodeJson, 'faraday_middleware/request/encode_json' + autoload :MethodOverride, 'faraday_middleware/request/method_override' + autoload :Mashify, 'faraday_middleware/response/mashify' + autoload :Rashify, 'faraday_middleware/response/rashify' + autoload :ParseJson, 'faraday_middleware/response/parse_json' + autoload :ParseXml, 'faraday_middleware/response/parse_xml' + autoload :ParseMarshal, 'faraday_middleware/response/parse_marshal' + autoload :ParseYaml, 'faraday_middleware/response/parse_yaml' + autoload :ParseDates, 'faraday_middleware/response/parse_dates' + autoload :Caching, 'faraday_middleware/response/caching' + autoload :Chunked, 'faraday_middleware/response/chunked' + autoload :RackCompatible, 'faraday_middleware/rack_compatible' + autoload :RedirectLimitReached, 'faraday_middleware/redirect_limit_reached' + autoload :FollowRedirects, 'faraday_middleware/response/follow_redirects' + autoload :Instrumentation, 'faraday_middleware/instrumentation' + autoload :Gzip, 'faraday_middleware/gzip' + + if Faraday::Middleware.respond_to? :register_middleware + Faraday::Request.register_middleware \ + oauth: -> { OAuth }, + oauth2: -> { OAuth2 }, + json: -> { EncodeJson }, + method_override: -> { MethodOverride } + + Faraday::Response.register_middleware \ + mashify: -> { Mashify }, + rashify: -> { Rashify }, + json: -> { ParseJson }, + json_fix: -> { ParseJson::MimeTypeFix }, + xml: -> { ParseXml }, + marshal: -> { ParseMarshal }, + yaml: -> { ParseYaml }, + dates: -> { ParseDates }, + caching: -> { Caching }, + follow_redirects: -> { FollowRedirects }, + chunked: -> { Chunked } + + Faraday::Middleware.register_middleware \ + instrumentation: -> { Instrumentation }, + gzip: -> { Gzip } + end +end + +require 'faraday_middleware/backwards_compatibility' diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/backwards_compatibility.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/backwards_compatibility.rb new file mode 100644 index 0000000..b5cbd29 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/backwards_compatibility.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +module Faraday + # Autoload classes for Faraday::Request. + class Request + autoload :OAuth, 'faraday_middleware/request/oauth' + autoload :OAuth2, 'faraday_middleware/request/oauth2' + end + + # Autoload classes for Faraday::Request. + class Response + autoload :Mashify, 'faraday_middleware/response/mashify' + autoload :Rashify, 'faraday_middleware/response/rashify' + autoload :ParseJson, 'faraday_middleware/response/parse_json' + autoload :ParseXml, 'faraday_middleware/response/parse_xml' + autoload :ParseMarshal, 'faraday_middleware/response/parse_marshal' + autoload :ParseYaml, 'faraday_middleware/response/parse_yaml' + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/gzip.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/gzip.rb new file mode 100644 index 0000000..af9e0f7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/gzip.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require 'faraday' + +module FaradayMiddleware + # Middleware to automatically decompress response bodies. If the + # "Accept-Encoding" header wasn't set in the request, this sets it to + # "gzip,deflate" and appropriately handles the compressed response from the + # server. This resembles what Ruby 1.9+ does internally in Net::HTTP#get. + # + # This middleware is NOT necessary when these adapters are used: + # - net_http on Ruby 1.9+ + # - net_http_persistent on Ruby 2.0+ + # - em_http + class Gzip < Faraday::Middleware + dependency 'zlib' + + def self.optional_dependency(lib = nil) + lib ? require(lib) : yield + true + rescue LoadError, NameError + false + end + + BROTLI_SUPPORTED = optional_dependency 'brotli' + + def self.supported_encodings + encodings = %w[gzip deflate] + encodings << 'br' if BROTLI_SUPPORTED + encodings + end + + ACCEPT_ENCODING = 'Accept-Encoding' + CONTENT_ENCODING = 'Content-Encoding' + CONTENT_LENGTH = 'Content-Length' + SUPPORTED_ENCODINGS = supported_encodings.join(',').freeze + + def call(env) + env[:request_headers][ACCEPT_ENCODING] ||= SUPPORTED_ENCODINGS + @app.call(env).on_complete do |response_env| + if response_env[:body].empty? + reset_body(response_env, &method(:raw_body)) + else + case response_env[:response_headers][CONTENT_ENCODING] + when 'gzip' + reset_body(response_env, &method(:uncompress_gzip)) + when 'deflate' + reset_body(response_env, &method(:inflate)) + when 'br' + reset_body(response_env, &method(:brotli_inflate)) + end + end + end + end + + def reset_body(env) + env[:body] = yield(env[:body]) + env[:response_headers].delete(CONTENT_ENCODING) + env[:response_headers][CONTENT_LENGTH] = env[:body].length + end + + def uncompress_gzip(body) + io = StringIO.new(body) + gzip_reader = Zlib::GzipReader.new(io, encoding: 'ASCII-8BIT') + gzip_reader.read + end + + def inflate(body) + # Inflate as a DEFLATE (RFC 1950+RFC 1951) stream + Zlib::Inflate.inflate(body) + rescue Zlib::DataError + # Fall back to inflating as a "raw" deflate stream which + # Microsoft servers return + inflate = Zlib::Inflate.new(-Zlib::MAX_WBITS) + begin + inflate.inflate(body) + ensure + inflate.close + end + end + + def brotli_inflate(body) + Brotli.inflate(body) + end + + def raw_body(body) + body + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/instrumentation.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/instrumentation.rb new file mode 100644 index 0000000..270ed4d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/instrumentation.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require 'faraday' + +module FaradayMiddleware + # Public: Instruments requests using Active Support. + # + # Measures time spent only for synchronous requests. + # + # Examples + # + # ActiveSupport::Notifications. + # subscribe('request.faraday') do |name, starts, ends, _, env| + # url = env[:url] + # http_method = env[:method].to_s.upcase + # duration = ends - starts + # $stderr.puts '[%s] %s %s (%.3f s)' % [url.host, + # http_method, + # url.request_uri, + # duration] + # end + class Instrumentation < Faraday::Middleware + dependency 'active_support/notifications' + + def initialize(app, options = {}) + super(app) + @name = options.fetch(:name, 'request.faraday') + end + + def call(env) + ::ActiveSupport::Notifications.instrument(@name, env) do + @app.call(env) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/rack_compatible.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/rack_compatible.rb new file mode 100644 index 0000000..5109d09 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/rack_compatible.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require 'stringio' + +module FaradayMiddleware + # Wraps a handler originally written for Rack for Faraday compatibility. + # + # Experimental. Only handles changes in request headers. + class RackCompatible + def initialize(app, rack_handler, *args) + # tiny middleware that decomposes a Faraday::Response to standard Rack + # array: [status, headers, body] + compatible_app = lambda do |rack_env| + env = restore_env(rack_env) + response = app.call(env) + [response.status, response.headers, Array(response.body)] + end + @rack = rack_handler.new(compatible_app, *args) + end + + def call(env) + rack_env = prepare_env(env) + rack_response = @rack.call(rack_env) + finalize_response(env, rack_response) + end + + NON_PREFIXED_HEADERS = %w[CONTENT_LENGTH CONTENT_TYPE].freeze + + # faraday to rack-compatible + def prepare_env(faraday_env) + env = headers_to_rack(faraday_env) + + url = faraday_env[:url] + env['rack.url_scheme'] = url.scheme + env['PATH_INFO'] = url.path + env['SERVER_PORT'] = if url.respond_to?(:inferred_port) + url.inferred_port + else + url.port + end + env['QUERY_STRING'] = url.query + env['REQUEST_METHOD'] = faraday_env[:method].to_s.upcase + + env['rack.errors'] ||= StringIO.new + env['faraday'] = faraday_env + + env + end + + def headers_to_rack(env) + rack_env = {} + env[:request_headers].each do |name, value| + name = name.upcase.tr('-', '_') + name = "HTTP_#{name}" unless NON_PREFIXED_HEADERS.include? name + rack_env[name] = value + end + rack_env + end + + # rack to faraday-compatible + def restore_env(rack_env) + env = rack_env.fetch('faraday') + headers = env[:request_headers] + headers.clear + + rack_env.each do |name, value| + next unless name.is_a?(String) && value.is_a?(String) + + if NON_PREFIXED_HEADERS.include?(name) || name.start_with?('HTTP_') + name = name.sub(/^HTTP_/, '').downcase.tr('_', '-') + headers[name] = value + end + end + + env[:method] = rack_env['REQUEST_METHOD'].downcase.to_sym + env[:rack_errors] = rack_env['rack.errors'] + env + end + + def finalize_response(env, rack_response) + status, headers, body = rack_response + body = body.inject { |str, part| str << part } + headers = Faraday::Utils::Headers.new(headers) unless headers.is_a?(Faraday::Utils::Headers) + + env.update status: status.to_i, + body: body, + response_headers: headers + + env[:response] ||= Faraday::Response.new(env) + env[:response] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/redirect_limit_reached.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/redirect_limit_reached.rb new file mode 100644 index 0000000..3e6ccfd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/redirect_limit_reached.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +require 'faraday' + +module FaradayMiddleware + # Exception thrown when the maximum amount of requests is + # exceeded. + class RedirectLimitReached < Faraday::ClientError + attr_reader :response + + def initialize(response) + super "too many redirects; last one to: #{response['location']}" + @response = response + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/encode_json.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/encode_json.rb new file mode 100644 index 0000000..11efb47 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/encode_json.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +require 'faraday' + +module FaradayMiddleware + # Request middleware that encodes the body as JSON. + # + # Processes only requests with matching Content-type or those without a type. + # If a request doesn't have a type but has a body, it sets the Content-type + # to JSON MIME-type. + # + # Doesn't try to encode bodies that already are in string form. + class EncodeJson < Faraday::Middleware + CONTENT_TYPE = 'Content-Type' + MIME_TYPE = 'application/json' + MIME_TYPE_REGEX = %r{^application/(vnd\..+\+)?json$}.freeze + + dependency do + require 'json' unless defined?(::JSON) + end + + def call(env) + match_content_type(env) do |data| + env[:body] = encode data + end + @app.call env + end + + def encode(data) + ::JSON.generate data + end + + def match_content_type(env) + return unless process_request?(env) + + env[:request_headers][CONTENT_TYPE] ||= MIME_TYPE + yield env[:body] unless env[:body].respond_to?(:to_str) + end + + def process_request?(env) + type = request_type(env) + has_body?(env) && (type.empty? || MIME_TYPE_REGEX =~ type) + end + + def has_body?(env) + (body = env[:body]) && !(body.respond_to?(:to_str) && body.empty?) + end + + def request_type(env) + type = env[:request_headers][CONTENT_TYPE].to_s + type = type.split(';', 2).first if type.index(';') + type + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/method_override.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/method_override.rb new file mode 100644 index 0000000..5db8d36 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/method_override.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +require 'faraday' + +module FaradayMiddleware + # Public: Writes the original HTTP method to "X-Http-Method-Override" header + # and sends the request as POST. + # + # This can be used to work around technical issues with making non-POST + # requests, e.g. faulty HTTP client or server router. + # + # This header is recognized in Rack apps by default, courtesy of the + # Rack::MethodOverride module. See + # http://rack.rubyforge.org/doc/classes/Rack/MethodOverride.html + class MethodOverride < Faraday::Middleware + HEADER = 'X-Http-Method-Override' + + # Public: Initialize the middleware. + # + # app - the Faraday app to wrap + # options - (optional) + # :rewrite - Array of HTTP methods to rewrite + # (default: all but GET and POST) + def initialize(app, options = nil) + super(app) + @methods = options&.fetch(:rewrite)&.map do |method| + method = method.downcase if method.respond_to? :downcase + method.to_sym + end + end + + def call(env) + method = env[:method] + rewrite_request(env, method) if rewrite_request?(method) + @app.call(env) + end + + def rewrite_request?(method) + if @methods.nil? || @methods.empty? + (method != :get) && (method != :post) + else + @methods.include? method + end + end + + # Internal: Write the original HTTP method to header, change method to POST. + def rewrite_request(env, original_method) + env[:request_headers][HEADER] = original_method.to_s.upcase + env[:method] = :post + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/oauth.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/oauth.rb new file mode 100644 index 0000000..ea0b030 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/oauth.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require 'faraday' +require 'forwardable' + +module FaradayMiddleware + # Public: Uses the simple_oauth library to sign requests according the + # OAuth protocol. + # + # The options for this middleware are forwarded to SimpleOAuth::Header: + # :consumer_key, :consumer_secret, :token, :token_secret. All these + # parameters are optional. + # + # The signature is added to the "Authorization" HTTP request header. If the + # value for this header already exists, it is not overriden. + # + # If no Content-Type header is specified, this middleware assumes that + # request body parameters should be included while signing the request. + # Otherwise, it only includes them if the Content-Type is + # "application/x-www-form-urlencoded", as per OAuth 1.0. + # + # For better performance while signing requests, this middleware should be + # positioned before UrlEncoded middleware on the stack, but after any other + # body-encoding middleware (such as EncodeJson). + class OAuth < Faraday::Middleware + dependency 'simple_oauth' + + AUTH_HEADER = 'Authorization' + CONTENT_TYPE = 'Content-Type' + TYPE_URLENCODED = 'application/x-www-form-urlencoded' + + extend Forwardable + def_delegator :'Faraday::Utils', :parse_nested_query + + def initialize(app, options) + super(app) + @options = options + end + + def call(env) + env[:request_headers][AUTH_HEADER] ||= oauth_header(env).to_s if sign_request?(env) + @app.call(env) + end + + def oauth_header(env) + SimpleOAuth::Header.new env[:method], + env[:url].to_s, + signature_params(body_params(env)), + oauth_options(env) + end + + def sign_request?(env) + !!env[:request].fetch(:oauth, true) + end + + def oauth_options(env) + if (extra = env[:request][:oauth]) && extra.is_a?(Hash) && !extra.empty? + @options.merge extra + else + @options + end + end + + def body_params(env) + if include_body_params?(env) + if env[:body].respond_to?(:to_str) + parse_nested_query env[:body] + else + env[:body] + end + end || {} + end + + def include_body_params?(env) + # see RFC 5849, section 3.4.1.3.1 for details + !(type = env[:request_headers][CONTENT_TYPE]) || (type == TYPE_URLENCODED) + end + + def signature_params(params) + if params.empty? + params + else + params.reject { |_k, v| v.respond_to?(:content_type) } + end + end + end +end + +# deprecated alias +Faraday::Request::OAuth = FaradayMiddleware::OAuth diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/oauth2.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/oauth2.rb new file mode 100644 index 0000000..78e6d34 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/request/oauth2.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +require 'faraday' +require 'forwardable' + +module FaradayMiddleware + # Public: A simple middleware that adds an access token to each request. + # + # By default, the token is added as both "access_token" query parameter + # and the "Authorization" HTTP request header. It can alternatively be + # added exclusively as a bearer token "Authorization" header by specifying + # a "token_type" option of "bearer". However, an explicit "access_token" + # parameter or "Authorization" header for the current request are not + # overriden. + # + # Examples + # + # # configure default token: + # OAuth2.new(app, 'abc123') + # + # # configure query parameter name: + # OAuth2.new(app, 'abc123', :param_name => 'my_oauth_token') + # + # # use bearer token authorization header only + # OAuth2.new(app, 'abc123', :token_type => 'bearer') + # + # # default token value is optional: + # OAuth2.new(app, :param_name => 'my_oauth_token') + class OAuth2 < Faraday::Middleware + PARAM_NAME = 'access_token' + TOKEN_TYPE = 'param' + AUTH_HEADER = 'Authorization' + + attr_reader :param_name, :token_type + + extend Forwardable + def_delegators :'Faraday::Utils', :parse_query, :build_query + + def call(env) + params = { param_name => @token }.update query_params(env[:url]) + token = params[param_name] + + if token.respond_to?(:empty?) && !token.empty? + case @token_type.downcase + when 'param' + env[:url].query = build_query params + env[:request_headers][AUTH_HEADER] ||= %(Token token="#{token}") + when 'bearer' + env[:request_headers][AUTH_HEADER] ||= %(Bearer #{token}) + end + end + + @app.call env + end + + def initialize(app, token = nil, options = {}) + super(app) + if token.is_a? Hash + options = token + token = nil + end + @token = token&.to_s + @param_name = options.fetch(:param_name, PARAM_NAME).to_s + @token_type = options.fetch(:token_type, TOKEN_TYPE).to_s + + raise ArgumentError, ":param_name can't be blank" if @token_type == 'param' && @param_name.empty? + + return unless options[:token_type].nil? + + warn "\nWarning: FaradayMiddleware::OAuth2 initialized with default "\ + 'token_type - token will be added as both a query string parameter '\ + 'and an Authorization header. In the next major release, tokens will '\ + 'be added exclusively as an Authorization header by default. Please '\ + 'see https://github.com/lostisland/faraday_middleware/wiki.' + end + + def query_params(url) + if url.query.nil? || url.query.empty? + {} + else + parse_query url.query + end + end + end +end + +# deprecated alias +Faraday::Request::OAuth2 = FaradayMiddleware::OAuth2 diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/caching.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/caching.rb new file mode 100644 index 0000000..aff33cf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/caching.rb @@ -0,0 +1,133 @@ +# frozen_string_literal: true + +require 'faraday' +require 'forwardable' +require 'digest/sha1' + +module FaradayMiddleware + # Public: Caches GET responses and pulls subsequent ones from the cache. + class Caching < Faraday::Middleware + attr_reader :cache + + # Internal: List of status codes that can be cached: + # * 200 - 'OK' + # * 203 - 'Non-Authoritative Information' + # * 300 - 'Multiple Choices' + # * 301 - 'Moved Permanently' + # * 302 - 'Found' + # * 404 - 'Not Found' + # * 410 - 'Gone' + CACHEABLE_STATUS_CODES = [200, 203, 300, 301, 302, 404, 410].freeze + + extend Forwardable + def_delegators :'Faraday::Utils', :parse_query, :build_query + + # Public: initialize the middleware. + # + # cache - An object that responds to read and write (default: nil). + # options - An options Hash (default: {}): + # :ignore_params - String name or Array names of query + # params that should be ignored when forming + # the cache key (default: []). + # :write_options - Hash of settings that should be passed as the + # third options parameter to the cache's #write + # method. If not specified, no options parameter + # will be passed. + # :full_key - Boolean - use full URL as cache key: + # (url.host + url.request_uri) + # :status_codes - Array of http status code to be cache + # (default: CACHEABLE_STATUS_CODE) + # + # Yields if no cache is given. The block should return a cache object. + def initialize(app, cache = nil, options = {}) + super(app) + if cache.is_a?(Hash) && block_given? + options = cache + cache = nil + end + @cache = cache || yield + @options = options + end + + def call(env) + if env[:method] == :get + if env[:parallel_manager] + # callback mode + cache_on_complete(env) + else + # synchronous mode + key = cache_key(env) + unless (response = cache.read(key)) && response + response = @app.call(env) + store_response_in_cache(key, response) + end + finalize_response(response, env) + end + else + @app.call(env) + end + end + + def cache_key(env) + url = env[:url].dup + if url.query && params_to_ignore.any? + params = parse_query url.query + params.reject! { |k,| params_to_ignore.include? k } + url.query = params.any? ? build_query(params) : nil + end + url.normalize! + digest = full_key? ? url.host + url.request_uri : url.request_uri + Digest::SHA1.hexdigest(digest) + end + + def params_to_ignore + @params_to_ignore ||= Array(@options[:ignore_params]).map(&:to_s) + end + + def full_key? + @full_key ||= @options[:full_key] + end + + def custom_status_codes + @custom_status_codes ||= begin + codes = CACHEABLE_STATUS_CODES & Array(@options[:status_codes]).map(&:to_i) + codes.any? ? codes : CACHEABLE_STATUS_CODES + end + end + + def cache_on_complete(env) + key = cache_key(env) + if (cached_response = cache.read(key)) + finalize_response(cached_response, env) + else + # response.status is nil at this point + # any checks need to be done inside on_complete block + @app.call(env).on_complete do |response_env| + store_response_in_cache(key, response_env.response) + response_env + end + end + end + + def store_response_in_cache(key, response) + return unless custom_status_codes.include?(response.status) + + if @options[:write_options] + cache.write(key, response, @options[:write_options]) + else + cache.write(key, response) + end + end + + def finalize_response(response, env) + response = response.dup if response.frozen? + env[:response] = response + unless env[:response_headers] + env.update response.env + # FIXME: omg hax + response.instance_variable_set('@env', env) + end + response + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/chunked.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/chunked.rb new file mode 100644 index 0000000..5ddea0a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/chunked.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +require 'faraday_middleware/response_middleware' + +module FaradayMiddleware + # Public: Parse a Transfer-Encoding. Chunks response to just the original data + class Chunked < FaradayMiddleware::ResponseMiddleware + TRANSFER_ENCODING = 'transfer-encoding' + + define_parser do |raw_body| + decoded_body = [] + until raw_body.empty? + chunk_len, raw_body = raw_body.split("\r\n", 2) + chunk_len = chunk_len.split(';', 2).first.hex + break if chunk_len.zero? + + decoded_body << raw_body[0, chunk_len] + # The 2 is to strip the extra CRLF at the end of the chunk + raw_body = raw_body[chunk_len + 2, raw_body.length - chunk_len - 2] + end + decoded_body.join('') + end + + def parse_response?(env) + super && chunked_encoding?(env[:response_headers]) + end + + def chunked_encoding?(headers) + (encoding = headers[TRANSFER_ENCODING]) && + encoding.split(',').include?('chunked') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/follow_redirects.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/follow_redirects.rb new file mode 100644 index 0000000..2341a66 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/follow_redirects.rb @@ -0,0 +1,157 @@ +# frozen_string_literal: true + +require 'faraday' +require 'set' + +module FaradayMiddleware + # Public: Follow HTTP 301, 302, 303, 307, and 308 redirects. + # + # For HTTP 301, 302, and 303, the original GET, POST, PUT, DELETE, or PATCH + # request gets converted into a GET. With `:standards_compliant => true`, + # however, the HTTP method after 301/302 remains unchanged. This allows you + # to opt into HTTP/1.1 compliance and act unlike the major web browsers. + # + # This middleware currently only works with synchronous requests; i.e. it + # doesn't support parallelism. + # + # If you wish to persist cookies across redirects, you could use + # the faraday-cookie_jar gem: + # + # Faraday.new(:url => url) do |faraday| + # faraday.use FaradayMiddleware::FollowRedirects + # faraday.use :cookie_jar + # faraday.adapter Faraday.default_adapter + # end + class FollowRedirects < Faraday::Middleware + # HTTP methods for which 30x redirects can be followed + ALLOWED_METHODS = Set.new %i[head options get post put patch delete] + # HTTP redirect status codes that this middleware implements + REDIRECT_CODES = Set.new [301, 302, 303, 307, 308] + # Keys in env hash which will get cleared between requests + ENV_TO_CLEAR = Set.new %i[status response response_headers] + + # Default value for max redirects followed + FOLLOW_LIMIT = 3 + + # Regex that matches characters that need to be escaped in URLs, sans + # the "%" character which we assume already represents an escaped sequence. + URI_UNSAFE = %r{[^\-_.!~*'()a-zA-Z\d;/?:@&=+$,\[\]%]}.freeze + + AUTH_HEADER = 'Authorization' + + # Public: Initialize the middleware. + # + # options - An options Hash (default: {}): + # :limit - A Numeric redirect limit (default: 3) + # :standards_compliant - A Boolean indicating whether to respect + # the HTTP spec when following 301/302 + # (default: false) + # :callback - A callable used on redirects + # with the old and new envs + # :cookies - An Array of Strings (e.g. + # ['cookie1', 'cookie2']) to choose + # cookies to be kept, or :all to keep + # all cookies (default: []). + # :clear_authorization_header - A Boolean indicating whether the request + # Authorization header should be cleared on + # redirects (default: true) + def initialize(app, options = {}) + super(app) + @options = options + + @convert_to_get = Set.new [303] + @convert_to_get << 301 << 302 unless standards_compliant? + end + + def call(env) + perform_with_redirection(env, follow_limit) + end + + private + + def convert_to_get?(response) + !%i[head options].include?(response.env[:method]) && + @convert_to_get.include?(response.status) + end + + def perform_with_redirection(env, follows) + request_body = env[:body] + response = @app.call(env) + + response.on_complete do |response_env| + if follow_redirect?(response_env, response) + raise RedirectLimitReached, response if follows.zero? + + new_request_env = update_env(response_env.dup, request_body, response) + callback&.call(response_env, new_request_env) + response = perform_with_redirection(new_request_env, follows - 1) + end + end + response + end + + def update_env(env, request_body, response) + redirect_from_url = env[:url].to_s + redirect_to_url = safe_escape(response['location'] || '') + env[:url] += redirect_to_url + + ENV_TO_CLEAR.each { |key| env.delete key } + + if convert_to_get?(response) + env[:method] = :get + env[:body] = nil + else + env[:body] = request_body + end + + clear_authorization_header(env, redirect_from_url, redirect_to_url) + + env + end + + def follow_redirect?(env, response) + ALLOWED_METHODS.include?(env[:method]) && + REDIRECT_CODES.include?(response.status) + end + + def follow_limit + @options.fetch(:limit, FOLLOW_LIMIT) + end + + def standards_compliant? + @options.fetch(:standards_compliant, false) + end + + def callback + @options[:callback] + end + + # Internal: escapes unsafe characters from an URL which might be a path + # component only or a fully qualified URI so that it can be joined onto an + # URI:HTTP using the `+` operator. Doesn't escape "%" characters so to not + # risk double-escaping. + def safe_escape(uri) + uri = uri.split('#')[0] # we want to remove the fragment if present + uri.to_s.gsub(URI_UNSAFE) do |match| + "%#{match.unpack('H2' * match.bytesize).join('%').upcase}" + end + end + + def clear_authorization_header(env, from_url, to_url) + return env if redirect_to_same_host?(from_url, to_url) + return env unless @options.fetch(:clear_authorization_header, true) + + env[:request_headers].delete(AUTH_HEADER) + end + + def redirect_to_same_host?(from_url, to_url) + return true if to_url.start_with?('/') + + from_uri = URI.parse(from_url) + to_uri = URI.parse(to_url) + + [from_uri.scheme, from_uri.host, from_uri.port] == + [to_uri.scheme, to_uri.host, to_uri.port] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/mashify.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/mashify.rb new file mode 100644 index 0000000..6933af8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/mashify.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require 'faraday' + +module FaradayMiddleware + # Public: Converts parsed response bodies to a Hashie::Mash if they were of + # Hash or Array type. + class Mashify < Faraday::Response::Middleware + attr_accessor :mash_class + + class << self + attr_accessor :mash_class + end + + dependency do + require 'hashie/mash' + self.mash_class = ::Hashie::Mash + end + + def initialize(app = nil, options = {}) + super(app) + self.mash_class = options[:mash_class] || self.class.mash_class + end + + def parse(body) + case body + when Hash + mash_class.new(body) + when Array + body.map { |item| parse(item) } + else + body + end + end + end +end + +# deprecated alias +Faraday::Response::Mashify = FaradayMiddleware::Mashify diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_dates.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_dates.rb new file mode 100644 index 0000000..f297a0d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_dates.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require 'time' +require 'faraday' + +module FaradayMiddleware + # Parse dates from response body + class ParseDates < ::Faraday::Response::Middleware + ISO_DATE_FORMAT = /\A\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)? + (Z|((\+|-)\d{2}:?\d{2}))\Z/xm.freeze + + def initialize(app, options = {}) + @regexp = options[:match] || ISO_DATE_FORMAT + super(app) + end + + def call(env) + response = @app.call(env) + parse_dates! response.env[:body] + response + end + + private + + def parse_dates!(value) + case value + when Hash + value.each do |key, element| + value[key] = parse_dates!(element) + end + when Array + value.each_with_index do |element, index| + value[index] = parse_dates!(element) + end + when @regexp + Time.parse(value) + else + value + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_json.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_json.rb new file mode 100644 index 0000000..58b5386 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_json.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require 'faraday_middleware/response_middleware' + +module FaradayMiddleware + # Public: Parse response bodies as JSON. + class ParseJson < ResponseMiddleware + dependency do + require 'json' unless defined?(::JSON) + end + + define_parser do |body, parser_options| + ::JSON.parse(body, parser_options || {}) unless body.strip.empty? + end + + # Public: Override the content-type of the response with "application/json" + # if the response body looks like it might be JSON, i.e. starts with an + # open bracket. + # + # This is to fix responses from certain API providers that insist on serving + # JSON with wrong MIME-types such as "text/javascript". + class MimeTypeFix < ResponseMiddleware + MIME_TYPE = 'application/json' + + def process_response(env) + old_type = env[:response_headers][CONTENT_TYPE].to_s + new_type = MIME_TYPE.dup + new_type << ';' << old_type.split(';', 2).last if old_type.index(';') + env[:response_headers][CONTENT_TYPE] = new_type + end + + BRACKETS = %w-[ {-.freeze + WHITESPACE = [' ', "\n", "\r", "\t"].freeze + + def parse_response?(env) + super && BRACKETS.include?(first_char(env[:body])) + end + + def first_char(body) + idx = -1 + char = body[idx += 1] + char = body[idx += 1] while char && WHITESPACE.include?(char) + char + end + end + end +end + +# deprecated alias +Faraday::Response::ParseJson = FaradayMiddleware::ParseJson diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_marshal.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_marshal.rb new file mode 100644 index 0000000..ece165e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_marshal.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require 'faraday_middleware/response_middleware' + +module FaradayMiddleware + # Public: Restore marshalled Ruby objects in response bodies. + class ParseMarshal < ResponseMiddleware + define_parser do |body| + ::Marshal.load(body) unless body.empty? + end + end +end + +# deprecated alias +Faraday::Response::ParseMarshal = FaradayMiddleware::ParseMarshal diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_xml.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_xml.rb new file mode 100644 index 0000000..1262cc5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_xml.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require 'faraday_middleware/response_middleware' + +module FaradayMiddleware + # Public: parses response bodies with MultiXml. + class ParseXml < ResponseMiddleware + dependency 'multi_xml' + + define_parser do |body, parser_options| + ::MultiXml.parse(body, parser_options || {}) + end + end +end + +# deprecated alias +Faraday::Response::ParseXml = FaradayMiddleware::ParseXml diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_yaml.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_yaml.rb new file mode 100644 index 0000000..bf8873b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/parse_yaml.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +require 'faraday_middleware/response_middleware' + +module FaradayMiddleware + # Public: Parse response bodies as YAML. + # + # Warning: This is not backwards compatible with versions of this middleware + # prior to faraday_middleware v0.12 - prior to this version, we used + # YAML.load rather than YAMl.safe_load, which exposes serious remote code + # execution risks - see https://github.com/ruby/psych/issues/119 for details. + # If you're sure you can trust YAML you're passing, you can set up an unsafe + # version of this middleware like this: + # + # class UnsafelyParseYaml < FaradayMiddleware::ResponseMiddleware + # dependency do + # require 'yaml' + # end + # + # define_parser do |body| + # YAML.load body + # end + # end + # + # Faraday.new(..) do |config| + # config.use UnsafelyParseYaml + # ... + # end + class ParseYaml < ResponseMiddleware + dependency 'safe_yaml/load' + + define_parser do |body, parser_options| + SafeYAML.load(body, nil, parser_options || {}) + end + end +end + +# deprecated alias +Faraday::Response::ParseYaml = FaradayMiddleware::ParseYaml diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/rashify.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/rashify.rb new file mode 100644 index 0000000..9611eb4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response/rashify.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require 'faraday_middleware/response/mashify' + +module FaradayMiddleware + # Public: Converts parsed response bodies to a Hashie::Rash if they were of + # Hash or Array type. + class Rashify < Mashify + dependency do + require 'rash' + self.mash_class = ::Hashie::Mash::Rash + end + end +end + +# deprecated alias +Faraday::Response::Rashify = FaradayMiddleware::Rashify diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response_middleware.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response_middleware.rb new file mode 100644 index 0000000..54bf67e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/response_middleware.rb @@ -0,0 +1,119 @@ +# frozen_string_literal: true + +require 'faraday' + +# Main FaradayMiddleware module. +module FaradayMiddleware + # Internal: The base class for middleware that parses responses. + class ResponseMiddleware < Faraday::Middleware + CONTENT_TYPE = 'Content-Type' + + class << self + attr_accessor :parser + end + + # Store a Proc that receives the body and returns the parsed result. + def self.define_parser(parser = nil, &block) + @parser = parser || + block || + raise(ArgumentError, 'Define parser with a block') + end + + def self.inherited(subclass) + super + subclass.load_error = load_error if subclass.respond_to? :load_error= + subclass.parser = parser + end + + def initialize(app = nil, options = {}) + super(app) + @options = options + @parser_options = options[:parser_options] + @content_types = Array(options[:content_type]) + end + + def call(environment) + @app.call(environment).on_complete do |env| + process_response(env) if process_response_type?(response_type(env)) && parse_response?(env) + end + end + + def process_response(env) + env[:raw_body] = env[:body] if preserve_raw?(env) + env[:body] = parse(env[:body]) + rescue Faraday::ParsingError => e + raise Faraday::ParsingError.new(e.wrapped_exception, env[:response]) + end + + # Parse the response body. + # + # Instead of overriding this method, consider using `define_parser`. + def parse(body) + if self.class.parser + begin + self.class.parser.call(body, @parser_options) + rescue StandardError, SyntaxError => e + raise e if e.is_a?(SyntaxError) && + e.class.name != 'Psych::SyntaxError' + + raise Faraday::ParsingError, e + end + else + body + end + end + + def response_type(env) + type = env[:response_headers][CONTENT_TYPE].to_s + type = type.split(';', 2).first if type.index(';') + type + end + + def process_response_type?(type) + @content_types.empty? || @content_types.any? do |pattern| + pattern.is_a?(Regexp) ? type =~ pattern : type == pattern + end + end + + def parse_response?(env) + env[:body].respond_to? :to_str + end + + def preserve_raw?(env) + env[:request].fetch(:preserve_raw, @options[:preserve_raw]) + end + end + + # DRAGONS + module OptionsExtension + attr_accessor :preserve_raw + + def to_hash + super.update(preserve_raw: preserve_raw) + end + + def each + return to_enum(:each) unless block_given? + + super + yield :preserve_raw, preserve_raw + end + + def fetch(key, *args) + if key == :preserve_raw + value = __send__(key) + value.nil? ? args.fetch(0) : value + else + super + end + end + end + + if defined?(Faraday::RequestOptions) + begin + Faraday::RequestOptions.from(preserve_raw: true) + rescue NoMethodError + Faraday::RequestOptions.include OptionsExtension + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/version.rb b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/version.rb new file mode 100644 index 0000000..33c4e60 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/faraday_middleware-1.2.0/lib/faraday_middleware/version.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +# Main FaradayMiddleware module. +module FaradayMiddleware + VERSION = '1.2.0' unless defined?(FaradayMiddleware::VERSION) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/MIT-LICENSE b/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/MIT-LICENSE new file mode 100644 index 0000000..24a670f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/MIT-LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2008-2013 Stephen Sykes + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/README.textile b/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/README.textile new file mode 100644 index 0000000..c112c4c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/README.textile @@ -0,0 +1,163 @@ +!https://img.shields.io/gem/dt/fastimage.svg!:https://rubygems.org/gems/fastimage +!https://travis-ci.org/sdsykes/fastimage.svg?branch=master!:https://travis-ci.org/sdsykes/fastimage + +h1. FastImage + +h4. FastImage finds the size or type of an image given its uri by fetching as little as needed + +h2. The problem + +Your app needs to find the size or type of an image. This could be for adding width and height attributes to an image tag, for adjusting layouts or overlays to fit an image or any other of dozens of reasons. + +But the image is not locally stored - it's on another asset server, or in the cloud - at Amazon S3 for example. + +You don't want to download the entire image to your app server - it could be many tens of kilobytes, or even megabytes just to get this information. For most common image types (GIF, PNG, BMP etc.), the size of the image is simply stored at the start of the file. For JPEG files it's a little bit more complex, but even so you do not need to fetch much of the image to find the size. + +FastImage does this minimal fetch for image types GIF, JPEG, PNG, TIFF, BMP, ICO, CUR, PSD, SVG and WEBP. And it doesn't rely on installing external libraries such as RMagick (which relies on ImageMagick or GraphicsMagick) or ImageScience (which relies on FreeImage). + +You only need supply the uri, and FastImage will do the rest. + +h2. Features + +FastImage can also read local (and other) files - anything that is not parseable as a URI will be interpreted as a filename, and FastImage will attempt to open it with @File#open@. + +FastImage will also automatically read from any object that responds to @:read@ - for instance an IO object if that is passed instead of a URI. + +FastImage will follow up to 4 HTTP redirects to get the image. + +FastImage will obey the @http_proxy@ setting in your environment to route requests via a proxy. You can also pass a @:proxy@ argument if you want to specify the proxy address in the call. + +You can add a timeout to the request which will limit the request time by passing @:timeout => number_of_seconds@. + +FastImage normally replies with @nil@ if it encounters an error, but you can pass @:raise_on_failure => true@ to get an exception. + +FastImage also provides a reader for the content length header provided in HTTP. This may be useful to assess the file size of an image, but do not rely on it exclusively - it will not be present in chunked responses for instance. + +FastImage accepts additional HTTP headers. This can be used to set a user agent or referrer which some servers require. Pass an @:http_header@ argument to specify headers, e.g., @:http_header => {'User-Agent' => 'Fake Browser'}@. + +FastImage can give you information about the parsed display orientation of an image with Exif data (jpeg or tiff). + +FastImage also handles "Data URIs":https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs correctly. + +h2. Security + +As of v1.6.7 FastImage no longer uses @openuri@ to open files, but directly calls @File.open@. Take care to sanitise the strings passed to FastImage; it will try to read from whatever is passed. + +h2. Examples + +

+require 'fastimage'
+
+FastImage.size("http://stephensykes.com/images/ss.com_x.gif")
+=> [266, 56]  # width, height
+FastImage.type("http://stephensykes.com/images/pngimage")
+=> :png
+FastImage.type("/some/local/file.gif")
+=> :gif
+FastImage.size("http://upload.wikimedia.org/wikipedia/commons/b/b4/Mardin_1350660_1350692_33_images.jpg", :raise_on_failure=>true, :timeout=>0.1)
+=> FastImage::ImageFetchFailure: FastImage::ImageFetchFailure
+FastImage.size("http://upload.wikimedia.org/wikipedia/commons/b/b4/Mardin_1350660_1350692_33_images.jpg", :raise_on_failure=>true, :timeout=>2.0)
+=> [9545, 6623]
+FastImage.new("http://stephensykes.com/images/pngimage").content_length
+=> 432
+FastImage.size("http://stephensykes.com/images/ss.com_x.gif", :http_header => {'User-Agent' => 'Fake Browser'})
+=> [266, 56]
+FastImage.new("http://stephensykes.com/images/ExifOrientation3.jpg").orientation
+=> 3
+FastImage.size("data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw==")
+=> [1, 1]
+
+ +h2. Installation + +h4. Required Ruby version + +FastImage version 2.0.0 and above work with Ruby 1.9.2 and above. + +FastImage version 1.9.0 was the last version that supported Ruby 1.8.7. + +h4. Gem + +bc. gem install fastimage + +h4. Rails + +Add fastimage to your Gemfile, and bundle. + +bc. gem 'fastimage' + +Then you're off - just use @FastImage.size()@ and @FastImage.type()@ in your code as in the examples. + +h2. Documentation + +"http://sdsykes.github.io/fastimage/rdoc/FastImage.html":http://sdsykes.github.io/fastimage/rdoc/FastImage.html + +h2. Maintainer + +FastImage is maintained by Stephen Sykes (@sdsykes). Support this project by using "LibPixel":https://libpixel.com cloud based image resizing and processing service. + +h2. Benchmark + +It's way faster than conventional methods (for example the image_size gem) for most types of file when fetching over the wire. + +

+irb> uri = "http://upload.wikimedia.org/wikipedia/commons/b/b4/Mardin_1350660_1350692_33_images.jpg"
+irb> puts Benchmark.measure {open(uri, 'rb') {|fh| p ImageSize.new(fh).size}}
+[9545, 6623]
+  0.680000   0.250000   0.930000 (  7.571887)
+
+irb> puts Benchmark.measure {p FastImage.size(uri)}
+[9545, 6623]
+  0.010000   0.000000   0.010000 (  0.090640)
+
+ +The file is fetched in about 7.5 seconds in this test (the number in brackets is the total time taken), but as FastImage doesn't need to fetch the whole thing, it completes in less than 0.1s. + +You'll see similar excellent results for the other file types, except for TIFF. Unfortunately TIFFs tend to have their +metadata towards the end of the file, so it makes little difference to do a minimal fetch. The result shown below is +mostly dependent on the exact internet conditions during the test, and little to do with the library used. + +

+irb> uri = "http://upload.wikimedia.org/wikipedia/commons/1/11/Shinbutsureijoushuincho.tiff"
+irb> puts Benchmark.measure {open(uri, 'rb') {|fh| p ImageSize.new(fh).size}}
+[1120, 1559]
+  1.080000   0.370000   1.450000 ( 13.766962)
+
+irb> puts Benchmark.measure {p FastImage.size(uri)}
+[1120, 1559]
+  3.490000   3.810000   7.300000 ( 11.754315)
+
+ +h2. Tests + +You'll need to @gem install fakeweb@ and possibly also @gem install test-unit@ to be able to run the tests. + +bc.. $ ruby test/test.rb +Run options: + +# Running tests: + +Finished tests in 1.033640s, 23.2189 tests/s, 82.2337 assertions/s. +24 tests, 85 assertions, 0 failures, 0 errors, 0 skips + +h2. References + +* "Pennysmalls - Find jpeg dimensions fast in pure Ruby, no image library needed":http://pennysmalls.wordpress.com/2008/08/19/find-jpeg-dimensions-fast-in-pure-ruby-no-ima/ +* "Antti Kupila - Getting JPG dimensions with AS3 without loading the entire file":http://www.anttikupila.com/flash/getting-jpg-dimensions-with-as3-without-loading-the-entire-file/ +* "imagesize gem":https://rubygems.org/gems/imagesize +* "EXIF Reader":https://github.com/remvee/exifr + +h2. FastImage in other languages + +* "Python by bmuller":https://github.com/bmuller/fastimage +* "Swift by kaishin":https://github.com/kaishin/ImageScout +* "Go by rubenfonseca":https://github.com/rubenfonseca/fastimage +* "PHP by tommoor":https://github.com/tommoor/fastimage +* "Node.js by ShogunPanda":https://github.com/ShogunPanda/fastimage +* "Objective C by kylehickinson":https://github.com/kylehickinson/FastImage +* "Android by qstumn":https://github.com/qstumn/FastImageSize +* "Flutter by ky1vstar":https://github.com/ky1vstar/fastimage.dart + +h2. Licence + +MIT, see file "MIT-LICENSE":MIT-LICENSE diff --git a/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/lib/fastimage.rb b/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/lib/fastimage.rb new file mode 100644 index 0000000..9b6c26d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastimage-2.2.6/lib/fastimage.rb @@ -0,0 +1,1091 @@ +# frozen_string_literal: true +# coding: ASCII-8BIT + +# FastImage finds the size or type of an image given its uri. +# It is careful to only fetch and parse as much of the image as is needed to determine the result. +# It does this by using a feature of Net::HTTP that yields strings from the resource being fetched +# as soon as the packets arrive. +# +# No external libraries such as ImageMagick are used here, this is a very lightweight solution to +# finding image information. +# +# FastImage knows about GIF, JPEG, BMP, TIFF, ICO, CUR, PNG, PSD, SVG and WEBP files. +# +# FastImage can also read files from the local filesystem by supplying the path instead of a uri. +# In this case FastImage reads the file in chunks of 256 bytes until +# it has enough. This is possibly a useful bandwidth-saving feature if the file is on a network +# attached disk rather than truly local. +# +# FastImage will automatically read from any object that responds to :read - for +# instance an IO object if that is passed instead of a URI. +# +# FastImage will follow up to 4 HTTP redirects to get the image. +# +# FastImage also provides a reader for the content length header provided in HTTP. +# This may be useful to assess the file size of an image, but do not rely on it exclusively - +# it will not be present in chunked responses for instance. +# +# FastImage accepts additional HTTP headers. This can be used to set a user agent +# or referrer which some servers require. Pass an :http_header argument to specify headers, +# e.g., :http_header => {'User-Agent' => 'Fake Browser'}. +# +# FastImage can give you information about the parsed display orientation of an image with Exif +# data (jpeg or tiff). +# +# === Examples +# require 'fastimage' +# +# FastImage.size("http://stephensykes.com/images/ss.com_x.gif") +# => [266, 56] +# FastImage.type("http://stephensykes.com/images/pngimage") +# => :png +# FastImage.type("/some/local/file.gif") +# => :gif +# File.open("/some/local/file.gif", "r") {|io| FastImage.type(io)} +# => :gif +# FastImage.new("http://stephensykes.com/images/pngimage").content_length +# => 432 +# FastImage.new("http://stephensykes.com/images/ExifOrientation3.jpg").orientation +# => 3 +# +# === References +# * http://www.anttikupila.com/flash/getting-jpg-dimensions-with-as3-without-loading-the-entire-file/ +# * http://pennysmalls.wordpress.com/2008/08/19/find-jpeg-dimensions-fast-in-pure-ruby-no-ima/ +# * https://rubygems.org/gems/imagesize +# * https://github.com/remvee/exifr +# + +require 'net/https' +require 'delegate' +require 'pathname' +require 'zlib' +require 'base64' +require 'uri' +require_relative 'fastimage/version' + +# see http://stackoverflow.com/questions/5208851/i/41048816#41048816 +if RUBY_VERSION < "2.2" + module URI + DEFAULT_PARSER = Parser.new(:HOSTNAME => "(?:(?:[a-zA-Z\\d](?:[-\\_a-zA-Z\\d]*[a-zA-Z\\d])?)\\.)*(?:[a-zA-Z](?:[-\\_a-zA-Z\\d]*[a-zA-Z\\d])?)\\.?") + end +end + +class FastImage + attr_reader :size, :type, :content_length, :orientation, :animated + + attr_reader :bytes_read + + class FastImageException < StandardError # :nodoc: + end + class UnknownImageType < FastImageException # :nodoc: + end + class ImageFetchFailure < FastImageException # :nodoc: + end + class SizeNotFound < FastImageException # :nodoc: + end + class CannotParseImage < FastImageException # :nodoc: + end + + DefaultTimeout = 2 unless const_defined?(:DefaultTimeout) + + LocalFileChunkSize = 256 unless const_defined?(:LocalFileChunkSize) + + SUPPORTED_IMAGE_TYPES = [:bmp, :gif, :jpeg, :png, :tiff, :psd, :heic, :heif, :webp, :svg, :ico, :cur].freeze + + # Returns an array containing the width and height of the image. + # It will return nil if the image could not be fetched, or if the image type was not recognised. + # + # By default there is a timeout of 2 seconds for opening and reading from a remote server. + # This can be changed by passing a :timeout => number_of_seconds in the options. + # + # If you wish FastImage to raise if it cannot size the image for any reason, then pass + # :raise_on_failure => true in the options. + # + # FastImage knows about GIF, JPEG, BMP, TIFF, ICO, CUR, PNG, PSD, SVG and WEBP files. + # + # === Example + # + # require 'fastimage' + # + # FastImage.size("http://stephensykes.com/images/ss.com_x.gif") + # => [266, 56] + # FastImage.size("http://stephensykes.com/images/pngimage") + # => [16, 16] + # FastImage.size("http://farm4.static.flickr.com/3023/3047236863_9dce98b836.jpg") + # => [500, 375] + # FastImage.size("http://www-ece.rice.edu/~wakin/images/lena512.bmp") + # => [512, 512] + # FastImage.size("test/fixtures/test.jpg") + # => [882, 470] + # FastImage.size("http://pennysmalls.com/does_not_exist") + # => nil + # FastImage.size("http://pennysmalls.com/does_not_exist", :raise_on_failure=>true) + # => raises FastImage::ImageFetchFailure + # FastImage.size("http://stephensykes.com/favicon.ico", :raise_on_failure=>true) + # => [16, 16] + # FastImage.size("http://stephensykes.com/images/squareBlue.icns", :raise_on_failure=>true) + # => raises FastImage::UnknownImageType + # FastImage.size("http://stephensykes.com/favicon.ico", :raise_on_failure=>true, :timeout=>0.01) + # => raises FastImage::ImageFetchFailure + # FastImage.size("http://stephensykes.com/images/faulty.jpg", :raise_on_failure=>true) + # => raises FastImage::SizeNotFound + # + # === Supported options + # [:timeout] + # Overrides the default timeout of 2 seconds. Applies both to reading from and opening the http connection. + # [:raise_on_failure] + # If set to true causes an exception to be raised if the image size cannot be found for any reason. + # + def self.size(uri, options={}) + new(uri, options).size + end + + # Returns an symbol indicating the image type fetched from a uri. + # It will return nil if the image could not be fetched, or if the image type was not recognised. + # + # By default there is a timeout of 2 seconds for opening and reading from a remote server. + # This can be changed by passing a :timeout => number_of_seconds in the options. + # + # If you wish FastImage to raise if it cannot find the type of the image for any reason, then pass + # :raise_on_failure => true in the options. + # + # === Example + # + # require 'fastimage' + # + # FastImage.type("http://stephensykes.com/images/ss.com_x.gif") + # => :gif + # FastImage.type("http://stephensykes.com/images/pngimage") + # => :png + # FastImage.type("http://farm4.static.flickr.com/3023/3047236863_9dce98b836.jpg") + # => :jpeg + # FastImage.type("http://www-ece.rice.edu/~wakin/images/lena512.bmp") + # => :bmp + # FastImage.type("test/fixtures/test.jpg") + # => :jpeg + # FastImage.type("http://stephensykes.com/does_not_exist") + # => nil + # File.open("/some/local/file.gif", "r") {|io| FastImage.type(io)} + # => :gif + # FastImage.type("test/fixtures/test.tiff") + # => :tiff + # FastImage.type("test/fixtures/test.psd") + # => :psd + # + # === Supported options + # [:timeout] + # Overrides the default timeout of 2 seconds. Applies both to reading from and opening the http connection. + # [:raise_on_failure] + # If set to true causes an exception to be raised if the image type cannot be found for any reason. + # + def self.type(uri, options={}) + new(uri, options.merge(:type_only=>true)).type + end + + # Returns a boolean value indicating the image is animated. + # It will return nil if the image could not be fetched, or if the image type was not recognised. + # + # By default there is a timeout of 2 seconds for opening and reading from a remote server. + # This can be changed by passing a :timeout => number_of_seconds in the options. + # + # If you wish FastImage to raise if it cannot find the type of the image for any reason, then pass + # :raise_on_failure => true in the options. + # + # === Example + # + # require 'fastimage' + # + # FastImage.animated?("test/fixtures/test.gif") + # => false + # FastImage.animated?("test/fixtures/animated.gif") + # => true + # + # === Supported options + # [:timeout] + # Overrides the default timeout of 2 seconds. Applies both to reading from and opening the http connection. + # [:raise_on_failure] + # If set to true causes an exception to be raised if the image type cannot be found for any reason. + # + def self.animated?(uri, options={}) + new(uri, options.merge(:animated_only=>true)).animated + end + + def initialize(uri, options={}) + @uri = uri + @options = { + :type_only => false, + :timeout => DefaultTimeout, + :raise_on_failure => false, + :proxy => nil, + :http_header => {} + }.merge(options) + + @property = if @options[:animated_only] + :animated + elsif @options[:type_only] + :type + else + :size + end + + @type, @state = nil + + if uri.respond_to?(:read) + fetch_using_read(uri) + elsif uri.start_with?('data:') + fetch_using_base64(uri) + else + begin + @parsed_uri = URI.parse(uri) + rescue URI::InvalidURIError + fetch_using_file_open + else + if @parsed_uri.scheme == "http" || @parsed_uri.scheme == "https" + fetch_using_http + else + fetch_using_file_open + end + end + end + + raise SizeNotFound if @options[:raise_on_failure] && @property == :size && !@size + + rescue Timeout::Error, SocketError, Errno::ECONNREFUSED, Errno::EHOSTUNREACH, Errno::ECONNRESET, + Errno::ENETUNREACH, ImageFetchFailure, Net::HTTPBadResponse, EOFError, Errno::ENOENT, + OpenSSL::SSL::SSLError + raise ImageFetchFailure if @options[:raise_on_failure] + rescue UnknownImageType + raise if @options[:raise_on_failure] + rescue CannotParseImage + if @options[:raise_on_failure] + if @property == :size + raise SizeNotFound + else + raise ImageFetchFailure + end + end + + ensure + uri.rewind if uri.respond_to?(:rewind) + + end + + private + + def fetch_using_http + @redirect_count = 0 + + fetch_using_http_from_parsed_uri + end + + # Some invalid locations need escaping + def escaped_location(location) + begin + URI(location) + rescue URI::InvalidURIError + ::URI::DEFAULT_PARSER.escape(location) + else + location + end + end + + def fetch_using_http_from_parsed_uri + http_header = {'Accept-Encoding' => 'identity'}.merge(@options[:http_header]) + + setup_http + @http.request_get(@parsed_uri.request_uri, http_header) do |res| + if res.is_a?(Net::HTTPRedirection) && @redirect_count < 4 + @redirect_count += 1 + begin + location = res['Location'] + raise ImageFetchFailure if location.nil? || location.empty? + + @parsed_uri = URI.join(@parsed_uri, escaped_location(location)) + rescue URI::InvalidURIError + else + fetch_using_http_from_parsed_uri + break + end + end + + raise ImageFetchFailure unless res.is_a?(Net::HTTPSuccess) + + @content_length = res.content_length + + read_fiber = Fiber.new do + res.read_body do |str| + Fiber.yield str + end + end + + case res['content-encoding'] + when 'deflate', 'gzip', 'x-gzip' + begin + gzip = Zlib::GzipReader.new(FiberStream.new(read_fiber)) + rescue FiberError, Zlib::GzipFile::Error + raise CannotParseImage + end + + read_fiber = Fiber.new do + while data = gzip.readline + Fiber.yield data + end + end + end + + parse_packets FiberStream.new(read_fiber) + + break # needed to actively quit out of the fetch + end + end + + def protocol_relative_url?(url) + url.start_with?("//") + end + + def proxy_uri + begin + if @options[:proxy] + proxy = URI.parse(@options[:proxy]) + else + proxy = ENV['http_proxy'] && ENV['http_proxy'] != "" ? URI.parse(ENV['http_proxy']) : nil + end + rescue URI::InvalidURIError + proxy = nil + end + proxy + end + + def setup_http + proxy = proxy_uri + + if proxy + @http = Net::HTTP::Proxy(proxy.host, proxy.port, proxy.user, proxy.password).new(@parsed_uri.host, @parsed_uri.port) + else + @http = Net::HTTP.new(@parsed_uri.host, @parsed_uri.port) + end + @http.use_ssl = (@parsed_uri.scheme == "https") + @http.verify_mode = OpenSSL::SSL::VERIFY_NONE + @http.open_timeout = @options[:timeout] + @http.read_timeout = @options[:timeout] + end + + def fetch_using_read(readable) + readable.rewind if readable.respond_to?(:rewind) + # Pathnames respond to read, but always return the first + # chunk of the file unlike an IO (even though the + # docuementation for it refers to IO). Need to supply + # an offset in this case. + if readable.is_a?(Pathname) + read_fiber = Fiber.new do + offset = 0 + while str = readable.read(LocalFileChunkSize, offset) + Fiber.yield str + offset += LocalFileChunkSize + end + end + else + read_fiber = Fiber.new do + while str = readable.read(LocalFileChunkSize) + Fiber.yield str + end + end + end + + parse_packets FiberStream.new(read_fiber) + end + + def fetch_using_file_open + @content_length = File.size?(@uri) + File.open(@uri) do |s| + fetch_using_read(s) + end + end + + def parse_packets(stream) + @stream = stream + + begin + result = send("parse_#{@property}") + if result != nil + # extract exif orientation if it was found + if @property == :size && result.size == 3 + @orientation = result.pop + else + @orientation = 1 + end + + instance_variable_set("@#{@property}", result) + else + raise CannotParseImage + end + rescue FiberError + raise CannotParseImage + end + end + + def parse_size + @type = parse_type unless @type + send("parse_size_for_#{@type}") + end + + def parse_animated + @type = parse_type unless @type + @type == :gif ? send("parse_animated_for_#{@type}") : nil + end + + def fetch_using_base64(uri) + decoded = begin + Base64.decode64(uri.split(',')[1]) + rescue + raise CannotParseImage + end + @content_length = decoded.size + fetch_using_read StringIO.new(decoded) + end + + module StreamUtil # :nodoc: + def read_byte + read(1)[0].ord + end + + def read_int + read(2).unpack('n')[0] + end + + def read_string_int + value = [] + while read(1) =~ /(\d)/ + value << $1 + end + value.join.to_i + end + end + + class FiberStream # :nodoc: + include StreamUtil + attr_reader :pos + + def initialize(read_fiber) + @read_fiber = read_fiber + @pos = 0 + @strpos = 0 + @str = '' + end + + # Peeking beyond the end of the input will raise + def peek(n) + while @strpos + n > @str.size + unused_str = @str[@strpos..-1] + + new_string = @read_fiber.resume + new_string = @read_fiber.resume if new_string.is_a? Net::ReadAdapter + raise CannotParseImage if !new_string + # we are dealing with bytes here, so force the encoding + new_string.force_encoding("ASCII-8BIT") if new_string.respond_to? :force_encoding + + @str = unused_str + new_string + @strpos = 0 + end + + @str[@strpos, n] + end + + def read(n) + result = peek(n) + @strpos += n + @pos += n + result + end + + def skip(n) + discarded = 0 + fetched = @str[@strpos..-1].size + while n > fetched + discarded += @str[@strpos..-1].size + new_string = @read_fiber.resume + raise CannotParseImage if !new_string + + new_string.force_encoding("ASCII-8BIT") if new_string.respond_to? :force_encoding + + fetched += new_string.size + @str = new_string + @strpos = 0 + end + @strpos = @strpos + n - discarded + @pos += n + end + end + + class IOStream < SimpleDelegator # :nodoc: + include StreamUtil + end + + def parse_type + parsed_type = case @stream.peek(2) + when "BM" + :bmp + when "GI" + :gif + when 0xff.chr + 0xd8.chr + :jpeg + when 0x89.chr + "P" + :png + when "II", "MM" + case @stream.peek(11)[8..10] + when "APC", "CR\002" + nil # do not recognise CRW or CR2 as tiff + else + :tiff + end + when '8B' + :psd + when "\0\0" + case @stream.peek(3).bytes.to_a.last + when 0 + # http://www.ftyps.com/what.html + case @stream.peek(12)[4..-1] + when "ftypavif" + :avif + when "ftypheic" + :heic + when "ftypmif1" + :heif + end + # ico has either a 1 (for ico format) or 2 (for cursor) at offset 3 + when 1 then :ico + when 2 then :cur + end + when "RI" + :webp if @stream.peek(12)[8..11] == "WEBP" + when "= end_pos + + box_type, box_size = read_box_header! + + case box_type + when "meta" + handle_meta_box(box_size) + when "pitm" + handle_pitm_box(box_size) + when "ipma" + handle_ipma_box(box_size) + when "hdlr" + handle_hdlr_box(box_size) + when "iprp", "ipco" + read_boxes!(box_size) + when "irot" + handle_irot_box + when "ispe" + handle_ispe_box(box_size, index) + when "mdat" + throw :finish + else + @stream.read(box_size) + end + + index += 1 + end + end + + def handle_irot_box + @rotation = (read_uint8! & 0x3) * 90 + end + + def handle_ispe_box(box_size, index) + throw :finish if box_size < 12 + + data = @stream.read(box_size) + width, height = data[4...12].unpack("N2") + @ispe_boxes << { index: index, size: [width, height] } + end + + def handle_hdlr_box(box_size) + throw :finish if box_size < 12 + + data = @stream.read(box_size) + throw :finish if data[8...12] != "pict" + end + + def handle_ipma_box(box_size) + @stream.read(3) + flags3 = read_uint8! + entries_count = read_uint32! + + entries_count.times do + id = read_uint16! + essen_count = read_uint8! + + essen_count.times do + property_index = read_uint8! & 0x7F + + if flags3 & 1 == 1 + property_index = (property_index << 7) + read_uint8! + end + + @ipma_boxes << { id: id, property_index: property_index - 1 } + end + end + end + + def handle_pitm_box(box_size) + data = @stream.read(box_size) + @primary_box = data[4...6].unpack("S>")[0] + end + + def handle_meta_box(box_size) + throw :finish if box_size < 4 + + @stream.read(4) + read_boxes!(box_size - 4) + + throw :finish if !@primary_box + + primary_indices = @ipma_boxes + .select { |box| box[:id] == @primary_box } + .map { |box| box[:property_index] } + + ispe_box = @ispe_boxes.find do |box| + primary_indices.include?(box[:index]) + end + + if ispe_box + @final_size = ispe_box[:size] + end + + throw :finish + end + + def read_box_header! + size = read_uint32! + type = @stream.read(4) + [type, size - 8] + end + + def read_uint8! + @stream.read(1).unpack("C")[0] + end + + def read_uint16! + @stream.read(2).unpack("S>")[0] + end + + def read_uint32! + @stream.read(4).unpack("N")[0] + end + end + + def parse_size_for_avif + bmff = IsoBmff.new(@stream) + bmff.width_and_height + end + + def parse_size_for_heic + bmff = IsoBmff.new(@stream) + bmff.width_and_height + end + + def parse_size_for_heif + bmff = IsoBmff.new(@stream) + bmff.width_and_height + end + + class Gif # :nodoc: + def initialize(stream) + @stream = stream + end + + def width_and_height + @stream.read(11)[6..10].unpack('SS') + end + + # Checks if a delay between frames exists and if it does, then the GIFs is + # animated + def animated? + frames = 0 + + # "GIF" + version (3) + width (2) + height (2) + @stream.skip(10) + + # fields (1) + bg color (1) + pixel ratio (1) + fields = @stream.read(3).unpack("CCC")[0] + if fields & 0x80 != 0 # Global Color Table + # 2 * (depth + 1) colors, each occupying 3 bytes (RGB) + @stream.skip(3 * 2 ** ((fields & 0x7) + 1)) + end + + loop do + block_type = @stream.read(1).unpack("C")[0] + + if block_type == 0x21 # Graphic Control Extension + # extension type (1) + size (1) + size = @stream.read(2).unpack("CC")[1] + @stream.skip(size) + skip_sub_blocks + elsif block_type == 0x2C # Image Descriptor + frames += 1 + return true if frames > 1 + + # left position (2) + top position (2) + width (2) + height (2) + fields (1) + fields = @stream.read(9).unpack("SSSSC")[4] + if fields & 0x80 != 0 # Local Color Table + # 2 * (depth + 1) colors, each occupying 3 bytes (RGB) + @stream.skip(3 * 2 ** ((fields & 0x7) + 1)) + end + + @stream.skip(1) # LZW min code size (1) + skip_sub_blocks + else + break # unrecognized block + end + end + + false + end + + private + + def skip_sub_blocks + loop do + size = @stream.read(1).unpack("C")[0] + if size == 0 + break + else + @stream.skip(size) + end + end + end + end + + def parse_size_for_gif + gif = Gif.new(@stream) + gif.width_and_height + end + + def parse_size_for_png + @stream.read(25)[16..24].unpack('NN') + end + + def parse_size_for_jpeg + exif = nil + loop do + @state = case @state + when nil + @stream.skip(2) + :started + when :started + @stream.read_byte == 0xFF ? :sof : :started + when :sof + case @stream.read_byte + when 0xe1 # APP1 + skip_chars = @stream.read_int - 2 + data = @stream.read(skip_chars) + io = StringIO.new(data) + if io.read(4) == "Exif" + io.read(2) + new_exif = Exif.new(IOStream.new(io)) rescue nil + exif ||= new_exif # only use the first APP1 segment + end + :started + when 0xe0..0xef + :skipframe + when 0xC0..0xC3, 0xC5..0xC7, 0xC9..0xCB, 0xCD..0xCF + :readsize + when 0xFF + :sof + else + :skipframe + end + when :skipframe + skip_chars = @stream.read_int - 2 + @stream.skip(skip_chars) + :started + when :readsize + @stream.skip(3) + height = @stream.read_int + width = @stream.read_int + width, height = height, width if exif && exif.rotated? + return [width, height, exif ? exif.orientation : 1] + end + end + end + + def parse_size_for_bmp + d = @stream.read(32)[14..28] + header = d.unpack("C")[0] + + result = if header == 12 + d[4..8].unpack('SS') + else + d[4..-1].unpack('l> 6))] + end + + def parse_size_vp8x + flags = @stream.read(4).unpack("C")[0] + b1, b2, b3, b4, b5, b6 = @stream.read(6).unpack("CCCCCC") + width, height = 1 + b1 + (b2 << 8) + (b3 << 16), 1 + b4 + (b5 << 8) + (b6 << 16) + + if flags & 8 > 0 # exif + # parse exif for orientation + # TODO: find or create test images for this + end + + return [width, height] + end + + class Exif # :nodoc: + attr_reader :width, :height, :orientation + + def initialize(stream) + @stream = stream + @width, @height, @orientation = nil + parse_exif + end + + def rotated? + @orientation >= 5 + end + + private + + def get_exif_byte_order + byte_order = @stream.read(2) + case byte_order + when 'II' + @short, @long = 'v', 'V' + when 'MM' + @short, @long = 'n', 'N' + else + raise CannotParseImage + end + end + + def parse_exif_ifd + tag_count = @stream.read(2).unpack(@short)[0] + tag_count.downto(1) do + type = @stream.read(2).unpack(@short)[0] + @stream.read(6) + data = @stream.read(2).unpack(@short)[0] + case type + when 0x0100 # image width + @width = data + when 0x0101 # image height + @height = data + when 0x0112 # orientation + @orientation = data + end + if @width && @height && @orientation + return # no need to parse more + end + @stream.read(2) + end + end + + def parse_exif + @start_byte = @stream.pos + + get_exif_byte_order + + @stream.read(2) # 42 + + offset = @stream.read(4).unpack(@long)[0] + if @stream.respond_to?(:skip) + @stream.skip(offset - 8) + else + @stream.read(offset - 8) + end + + parse_exif_ifd + + @orientation ||= 1 + end + + end + + def parse_size_for_tiff + exif = Exif.new(@stream) + if exif.rotated? + [exif.height, exif.width, exif.orientation] + else + [exif.width, exif.height, exif.orientation] + end + end + + def parse_size_for_psd + @stream.read(26).unpack("x14NN").reverse + end + + class Svg # :nodoc: + def initialize(stream) + @stream = stream + @width, @height, @ratio, @viewbox_width, @viewbox_height = nil + parse_svg + end + + def width_and_height + if @width && @height + [@width, @height] + elsif @width && @ratio + [@width, @width / @ratio] + elsif @height && @ratio + [@height * @ratio, @height] + elsif @viewbox_width && @viewbox_height + [@viewbox_width, @viewbox_height] + else + nil + end + end + + private + + def parse_svg + attr_name = [] + state = nil + + while (char = @stream.read(1)) && state != :stop do + case char + when "=" + if attr_name.join =~ /width/i + @stream.read(1) + @width = @stream.read_string_int + return if @height + elsif attr_name.join =~ /height/i + @stream.read(1) + @height = @stream.read_string_int + return if @width + elsif attr_name.join =~ /viewbox/i + values = attr_value.split(/\s/) + if values[2].to_f > 0 && values[3].to_f > 0 + @ratio = values[2].to_f / values[3].to_f + @viewbox_width = values[2].to_i + @viewbox_height = values[3].to_i + end + end + when /\w/ + attr_name << char + when "<" + attr_name = [char] + when ">" + state = :stop if state == :started + else + state = :started if attr_name.join == " + + fastlane Logo + + + +[![Twitter: @FastlaneTools](https://img.shields.io/badge/contact-@FastlaneTools-blue.svg?style=flat)](https://twitter.com/FastlaneTools) +[![License](https://img.shields.io/badge/license-MIT-green.svg?style=flat)](https://github.com/fastlane/fastlane/blob/master/LICENSE) +[![Gem](https://img.shields.io/gem/v/fastlane.svg?style=flat)](https://rubygems.org/gems/fastlane) +[![Homebrew](https://img.shields.io/badge/dynamic/json.svg?url=https://formulae.brew.sh/api/formula/fastlane.json&query=$.versions.stable&label=homebrew)](https://formulae.brew.sh/formula/fastlane) +[![Build Status](https://img.shields.io/circleci/project/github/fastlane/fastlane/master.svg)](https://circleci.com/gh/fastlane/fastlane) +[![PRs welcome!](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/fastlane/fastlane/blob/master/CONTRIBUTING.md) + +_fastlane_ is a tool for iOS and Android developers to automate tedious tasks like generating screenshots, dealing with provisioning profiles, and releasing your application. + +
+

+ ✨ All fastlane docs were moved to docs.fastlane.tools ✨ +

+
+ +## Need Help? + +Before submitting a new GitHub issue, please make sure to + +- Check out [docs.fastlane.tools](https://docs.fastlane.tools) +- Search for [existing GitHub issues](https://github.com/fastlane/fastlane/issues) + +If the above doesn't help, please [submit an issue](https://github.com/fastlane/fastlane/issues) on GitHub and provide information about your setup, in particular the output of the `fastlane env` command. + +**Note**: If you want to report a regression in _fastlane_ (something that has worked before, but broke with a new release), please mark your issue title as such using `[Regression] Your title here`. This enables us to quickly detect and fix regressions. + +## _fastlane_ team + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +

Maksym Grebenets

+
+ + + +

Iulian Onofrei

+
+ + + +

Josh Holtz

+
+ + + +

Max Ott

+
+ + + +

Olivier Halligon

+
+ + + +

Matthew Ellis

+
+ + + +

Helmut Januschka

+
+ + + +

Luka Mirosevic

+
+ + + +

Roger Oba

+
+ + + +

Łukasz Grabowski

+
+ + + +

Jan Piotrowski

+
+ + + +

Kohki Miki

+
+ + + +

Aaron Brager

+
+ + + +

Daniel Jankowski

+
+ + + +

Jorge Revuelta H

+
+ + + +

Danielle Tomlinson

+
+ + + +

Fumiya Nakamura

+
+ + + +

Felix Krause

+
+ + + +

Satoshi Namai

+
+ + + +

Stefan Natchev

+
+ + + +

Jimmy Dee

+
+ + + +

Manu Wallner

+
+ + + +

JÊrôme Lacoste

+
+ + + +

Manish Rathi

+
+ + + +

Joshua Liebowitz

+
+ + + +

Andrew McBurney

+
+ +Special thanks to all [contributors](https://github.com/fastlane/fastlane/graphs/contributors) for extending and improving _fastlane_. + +## Contribute to _fastlane_ + +Check out [CONTRIBUTING.md](CONTRIBUTING.md) for more information on how to help with _fastlane_. + +## Code of Conduct + +Help us keep _fastlane_ open and inclusive. Please read and follow our [Code of Conduct](https://github.com/fastlane/fastlane/blob/master/CODE_OF_CONDUCT.md). + +## Metrics + +_fastlane_ tracks a few key metrics to understand how developers are using the tool and to help us know what areas need improvement. No personal/sensitive information is ever collected. Metrics that are collected include: + +* The number of _fastlane_ runs +* A salted hash of the app identifier or package name, which helps us anonymously identify unique usage of _fastlane_ + +You can easily opt-out of metrics collection by adding `opt_out_usage` at the top of your `Fastfile` or by setting the environment variable `FASTLANE_OPT_OUT_USAGE`. [Check out the metrics code on GitHub](https://github.com/fastlane/fastlane/tree/master/fastlane_core/lib/fastlane_core/analytics) + +## License + +This project is licensed under the terms of the MIT license. See the [LICENSE](LICENSE) file. + +> This project and all fastlane tools are in no way affiliated with Apple Inc. This project is open source under the MIT license, which means you have full access to the source code and can modify it to fit your own needs. All fastlane tools run on your own computer or server, so your credentials or other sensitive information will never leave your own computer. You are responsible for how you use fastlane tools. + +
+

+ ✨ All fastlane docs were moved to docs.fastlane.tools ✨ +

+
diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/bin/bin-proxy b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/bin/bin-proxy new file mode 100644 index 0000000..e67a7ac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/bin/bin-proxy @@ -0,0 +1,19 @@ +#!/usr/bin/env ruby + +require "colored" +require "shellwords" + +tool_name = File.basename($0) + +full_params = ARGV.shelljoin + +puts("[WARNING] You are calling #{tool_name} directly. Usage of the tool name without the `fastlane` prefix is deprecated in fastlane 2.0".yellow) +puts("Please update your scripts to use `fastlane #{tool_name} #{full_params}` instead.".yellow) + +exec_arr = ["fastlane", tool_name] + ARGV + +# The * turns the array into a parameter list +# This is using the form of exec which takes a variable parameter list, e.g. `exec(command, param1, param2, ...)` +# We need to use that, because otherwise invocations like +# `spaceauth -u user@fastlane.tools` would recognize "-u user@fastlane.tools" as a single parameter and throw errors +exec(*exec_arr) diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/bin/fastlane b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/bin/fastlane new file mode 100644 index 0000000..2fd9a00 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/bin/fastlane @@ -0,0 +1,23 @@ +#!/usr/bin/env ruby + +if RUBY_VERSION < '2.0.0' + abort("fastlane requires Ruby 2.0.0 or higher") +end + +def self.windows? + # taken from: https://stackoverflow.com/a/171011/1945875 + (/cygwin|mswin|mingw|bccwin|wince|emx/ =~ RUBY_PLATFORM) != nil +end + +require "fastlane/cli_tools_distributor" + +if Fastlane::CLIToolsDistributor.running_version_command? + # This will print out the fastlane binary path right above the + # version number. Very often, users are not aware they have + # e.g. bundled fastlane installed + puts("fastlane installation at path:") + puts(File.expand_path(__FILE__)) + puts("-----------------------------") +end + +Fastlane::CLIToolsDistributor.take_off diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/README.md new file mode 100644 index 0000000..35ccc5e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/README.md @@ -0,0 +1,17 @@ +

+ + +
+ fastlane +
+

+ +------ + +

+ + + +

+ +

The cert docs were moved to docs.fastlane.tools

diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert.rb new file mode 100644 index 0000000..1069b82 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert.rb @@ -0,0 +1,4 @@ +require_relative 'cert/runner' +require_relative 'cert/options' + +require_relative 'cert/module' diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/commands_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/commands_generator.rb new file mode 100644 index 0000000..9a5ba35 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/commands_generator.rb @@ -0,0 +1,61 @@ +require 'commander' +require 'fastlane/version' +require 'fastlane_core/ui/help_formatter' +require 'fastlane_core/configuration/configuration' +require 'fastlane_core/globals' + +require_relative 'options' +require_relative 'runner' + +HighLine.track_eof = false + +module Cert + class CommandsGenerator + include Commander::Methods + + def self.start + self.new.run + end + + def run + program :name, 'cert' + program :version, Fastlane::VERSION + program :description, 'CLI for \'cert\' - Create new iOS code signing certificates' + program :help, 'Author', 'Felix Krause ' + program :help, 'Website', 'https://fastlane.tools' + program :help, 'Documentation', 'https://docs.fastlane.tools/actions/cert/' + program :help_formatter, FastlaneCore::HelpFormatter + + global_option('--verbose') { FastlaneCore::Globals.verbose = true } + global_option('--env STRING[,STRING2]', String, 'Add environment(s) to use with `dotenv`') + + command :create do |c| + c.syntax = 'fastlane cert create' + c.description = 'Create new iOS code signing certificates' + + FastlaneCore::CommanderGenerator.new.generate(Cert::Options.available_options, command: c) + + c.action do |args, options| + Cert.config = FastlaneCore::Configuration.create(Cert::Options.available_options, options.__hash__) + Cert::Runner.new.launch + end + end + + command :revoke_expired do |c| + c.syntax = 'fastlane cert revoke_expired' + c.description = 'Revoke expired iOS code signing certificates' + + FastlaneCore::CommanderGenerator.new.generate(Cert::Options.available_options, command: c) + + c.action do |args, options| + Cert.config = FastlaneCore::Configuration.create(Cert::Options.available_options, options.__hash__) + Cert::Runner.new.revoke_expired_certs! + end + end + + default_command(:create) + + run! + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/module.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/module.rb new file mode 100644 index 0000000..c0ca4ed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/module.rb @@ -0,0 +1,16 @@ +require 'fastlane_core/helper' +require 'fastlane/boolean' + +module Cert + # Use this to just setup the configuration attribute and set it later somewhere else + class << self + attr_accessor :config + end + + Helper = FastlaneCore::Helper # you gotta love Ruby: Helper.* should use the Helper class contained in FastlaneCore + UI = FastlaneCore::UI + Boolean = Fastlane::Boolean + ROOT = Pathname.new(File.expand_path('../../..', __FILE__)) + + ENV['FASTLANE_TEAM_ID'] ||= ENV["CERT_TEAM_ID"] +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/options.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/options.rb new file mode 100644 index 0000000..6f43554 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/options.rb @@ -0,0 +1,133 @@ +require 'credentials_manager/appfile_config' +require 'fastlane_core/configuration/config_item' + +require_relative 'module' + +module Cert + class Options + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:apple_dev_portal_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :development, + env_name: "CERT_DEVELOPMENT", + description: "Create a development certificate instead of a distribution one", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :type, + env_name: "CERT_TYPE", + description: "Create specific certificate type (takes precedence over :development)", + optional: true, + verify_block: proc do |value| + value = value.to_s + types = %w(mac_installer_distribution developer_id_installer developer_id_application developer_id_kext) + UI.user_error!("Unsupported types, must be: #{types}") unless types.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :force, + env_name: "CERT_FORCE", + description: "Create a certificate even if an existing certificate exists", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :generate_apple_certs, + env_name: "CERT_GENERATE_APPLE_CERTS", + description: "Create a certificate type for Xcode 11 and later (Apple Development or Apple Distribution)", + type: Boolean, + default_value: FastlaneCore::Helper.mac? && FastlaneCore::Helper.xcode_at_least?('11'), + default_value_dynamic: true), + + # App Store Connect API + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["CERT_API_KEY_PATH", "DELIVER_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["CERT_API_KEY", "DELIVER_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + + # Apple ID + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "CERT_USERNAME", + description: "Your Apple ID Username", + optional: true, + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-b", + env_name: "CERT_TEAM_ID", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true, + description: "The ID of your Developer Portal team if you're in multiple teams", + optional: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-l", + env_name: "CERT_TEAM_NAME", + description: "The name of your Developer Portal team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_NAME"] = value.to_s + end), + + # Other Options + FastlaneCore::ConfigItem.new(key: :filename, + short_option: "-q", + env_name: "CERT_FILE_NAME", + optional: true, + description: "The filename of certificate to store"), + FastlaneCore::ConfigItem.new(key: :output_path, + short_option: "-o", + env_name: "CERT_OUTPUT_PATH", + description: "The path to a directory in which all certificates and private keys should be stored", + default_value: "."), + FastlaneCore::ConfigItem.new(key: :keychain_path, + short_option: "-k", + env_name: "CERT_KEYCHAIN_PATH", + description: "Path to a custom keychain", + code_gen_sensitive: true, + default_value: Dir["#{Dir.home}/Library/Keychains/login.keychain", "#{Dir.home}/Library/Keychains/login.keychain-db"].last, + default_value_dynamic: true, + verify_block: proc do |value| + value = File.expand_path(value) + UI.user_error!("Keychain not found at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :keychain_password, + short_option: "-p", + env_name: "CERT_KEYCHAIN_PASSWORD", + sensitive: true, + description: "This might be required the first time you access certificates on a new mac. For the login/default keychain this is your macOS account password", + optional: true), + FastlaneCore::ConfigItem.new(key: :skip_set_partition_list, + short_option: "-P", + env_name: "CERT_SKIP_SET_PARTITION_LIST", + description: "Skips setting the partition list (which can sometimes take a long time). Setting the partition list is usually needed to prevent Xcode from prompting to allow a cert to be used for signing", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "CERT_PLATFORM", + description: "Set the provisioning profile's platform (ios, macos, tvos)", + default_value: "ios", + verify_block: proc do |value| + value = value.to_s + pt = %w(macos ios tvos) + UI.user_error!("Unsupported platform, must be: #{pt}") unless pt.include?(value) + end) + ] + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/runner.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/runner.rb new file mode 100644 index 0000000..5d1778c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/cert/lib/cert/runner.rb @@ -0,0 +1,250 @@ +require 'fileutils' +require 'fastlane_core/globals' +require 'fastlane_core/cert_checker' +require 'fastlane_core/keychain_importer' +require 'fastlane_core/print_table' +require 'spaceship' + +require_relative 'module' + +module Cert + class Runner + def launch + run + + installed = FastlaneCore::CertChecker.installed?(ENV["CER_FILE_PATH"], in_keychain: ENV["CER_KEYCHAIN_PATH"]) + UI.message("Verifying the certificate is properly installed locally...") + UI.user_error!("Could not find the newly generated certificate installed", show_github_issues: true) unless installed + UI.success("Successfully installed certificate #{ENV['CER_CERTIFICATE_ID']}") + return ENV["CER_FILE_PATH"] + end + + def login + if (api_token = Spaceship::ConnectAPI::Token.from(hash: Cert.config[:api_key], filepath: Cert.config[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + # Username is now optional since addition of App Store Connect API Key + # Force asking for username to prompt user if not already set + Cert.config.fetch(:username, force_ask: true) + + UI.message("Starting login with user '#{Cert.config[:username]}'") + Spaceship::ConnectAPI.login(Cert.config[:username], nil, use_portal: true, use_tunes: false) + UI.message("Successfully logged in") + end + end + + def run + FileUtils.mkdir_p(Cert.config[:output_path]) + + FastlaneCore::PrintTable.print_values(config: Cert.config, hide_keys: [:output_path], title: "Summary for cert #{Fastlane::VERSION}") + + login + + should_create = Cert.config[:force] + unless should_create + cert_path = find_existing_cert + should_create = cert_path.nil? + end + + return unless should_create + + if create_certificate # no certificate here, creating a new one + return # success + else + UI.user_error!("Something went wrong when trying to create a new certificate...") + end + end + + # Command method for the :revoke_expired sub-command + def revoke_expired_certs! + FastlaneCore::PrintTable.print_values(config: Cert.config, hide_keys: [:output_path], title: "Summary for cert #{Fastlane::VERSION}") + + login + + to_revoke = expired_certs + + if to_revoke.empty? + UI.success("No expired certificates were found to revoke! 👍") + return + end + + revoke_count = 0 + + to_revoke.each do |certificate| + begin + UI.message("#{certificate.id} #{certificate.display_name} has expired, revoking...") + certificate.delete! + revoke_count += 1 + rescue => e + UI.error("An error occurred while revoking #{certificate.id} #{certificate.display_name}") + UI.error("#{e.message}\n#{e.backtrace.join("\n")}") if FastlaneCore::Globals.verbose? + end + end + + UI.success("#{revoke_count} expired certificate#{'s' if revoke_count != 1} #{revoke_count == 1 ? 'has' : 'have'} been revoked! 👍") + end + + def expired_certs + certificates.reject(&:valid?) + end + + def find_existing_cert + certificates.each do |certificate| + unless certificate.certificate_content + next + end + + path = store_certificate(certificate, Cert.config[:filename]) + private_key_path = File.expand_path(File.join(Cert.config[:output_path], "#{certificate.id}.p12")) + + # As keychain is specific to macOS, this will likely fail on non macOS systems. + # See also: https://github.com/fastlane/fastlane/pull/14462 + keychain = File.expand_path(Cert.config[:keychain_path]) unless Cert.config[:keychain_path].nil? + if FastlaneCore::CertChecker.installed?(path, in_keychain: keychain) + # This certificate is installed on the local machine + ENV["CER_CERTIFICATE_ID"] = certificate.id + ENV["CER_FILE_PATH"] = path + ENV["CER_KEYCHAIN_PATH"] = keychain + + UI.success("Found the certificate #{certificate.id} (#{certificate.display_name}) which is installed on the local machine. Using this one.") + + return path + elsif File.exist?(private_key_path) + password = Cert.config[:keychain_password] + FastlaneCore::KeychainImporter.import_file(private_key_path, keychain, keychain_password: password, skip_set_partition_list: Cert.config[:skip_set_partition_list]) + FastlaneCore::KeychainImporter.import_file(path, keychain, keychain_password: password, skip_set_partition_list: Cert.config[:skip_set_partition_list]) + + ENV["CER_CERTIFICATE_ID"] = certificate.id + ENV["CER_FILE_PATH"] = path + ENV["CER_KEYCHAIN_PATH"] = keychain + + UI.success("Found the cached certificate #{certificate.id} (#{certificate.display_name}). Using this one.") + + return path + else + UI.error("Certificate #{certificate.id} (#{certificate.display_name}) can't be found on your local computer") + end + + File.delete(path) # as apparently this certificate is pretty useless without a private key + end + + UI.important("Couldn't find an existing certificate... creating a new one") + return nil + end + + # All certificates of this type + def certificates + filter = { + certificateType: certificate_types.join(",") + } + return Spaceship::ConnectAPI::Certificate.all(filter: filter) + end + + # The kind of certificate we're interested in (for creating) + def certificate_type + return certificate_types.first + end + + # The kind of certificates we're interested in (for listing) + def certificate_types + if Cert.config[:type] + case Cert.config[:type].to_sym + when :mac_installer_distribution + return [Spaceship::ConnectAPI::Certificate::CertificateType::MAC_INSTALLER_DISTRIBUTION] + when :developer_id_application + return [ + Spaceship::ConnectAPI::Certificate::CertificateType::DEVELOPER_ID_APPLICATION_G2, + Spaceship::ConnectAPI::Certificate::CertificateType::DEVELOPER_ID_APPLICATION + ] + when :developer_id_kext + return [Spaceship::ConnectAPI::Certificate::CertificateType::DEVELOPER_ID_KEXT] + when :developer_id_installer + if !Spaceship::ConnectAPI.token.nil? + raise "As of 2021-11-09, the App Store Connect API does not allow accessing DEVELOPER_ID_INSTALLER with the API Key. Please file an issue on GitHub if this has changed and needs to be updated" + else + return [Spaceship::ConnectAPI::Certificate::CertificateType::DEVELOPER_ID_INSTALLER] + end + else + UI.user_error("Unaccepted value for :type - #{Cert.config[:type]}") + end + end + + # Check if apple certs (Xcode 11 and later) should be used + if Cert.config[:generate_apple_certs] + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::DISTRIBUTION + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::IOS_DISTRIBUTION if Spaceship::ConnectAPI.client.in_house? # Enterprise doesn't use Apple Distribution + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::DEVELOPMENT if Cert.config[:development] + else + case Cert.config[:platform].to_s + when 'ios', 'tvos' + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::IOS_DISTRIBUTION + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::IOS_DISTRIBUTION if Spaceship::ConnectAPI.client.in_house? + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::IOS_DEVELOPMENT if Cert.config[:development] + + when 'macos' + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::MAC_APP_DISTRIBUTION + cert_type = Spaceship::ConnectAPI::Certificate::CertificateType::MAC_APP_DEVELOPMENT if Cert.config[:development] + end + end + + return [cert_type] + end + + def create_certificate + # Create a new certificate signing request + csr, pkey = Spaceship::ConnectAPI::Certificate.create_certificate_signing_request + + # Use the signing request to create a new (development|distribution) certificate + begin + certificate = Spaceship::ConnectAPI::Certificate.create( + certificate_type: certificate_type, + csr_content: csr.to_pem + ) + rescue => ex + type_name = (Cert.config[:development] ? "Development" : "Distribution") + if ex.to_s.include?("You already have a current") + UI.user_error!("Could not create another #{type_name} certificate, reached the maximum number of available #{type_name} certificates.", show_github_issues: true) + elsif ex.to_s.include?("You are not allowed to perform this operation.") && type_name == "Distribution" + UI.user_error!("You do not have permission to create this certificate. Only Team Admins can create Distribution certificates\n 🔍 See https://developer.apple.com/library/content/documentation/IDEs/Conceptual/AppDistributionGuide/ManagingYourTeam/ManagingYourTeam.html for more information.") + end + raise ex + end + + # Store all that onto the filesystem + + request_path = File.expand_path(File.join(Cert.config[:output_path], "#{certificate.id}.certSigningRequest")) + File.write(request_path, csr.to_pem) + + private_key_path = File.expand_path(File.join(Cert.config[:output_path], "#{certificate.id}.p12")) + File.write(private_key_path, pkey) + + cert_path = store_certificate(certificate, Cert.config[:filename]) + + # Import all the things into the Keychain + keychain = File.expand_path(Cert.config[:keychain_path]) + password = Cert.config[:keychain_password] + FastlaneCore::KeychainImporter.import_file(private_key_path, keychain, keychain_password: password, skip_set_partition_list: Cert.config[:skip_set_partition_list]) + FastlaneCore::KeychainImporter.import_file(cert_path, keychain, keychain_password: password, skip_set_partition_list: Cert.config[:skip_set_partition_list]) + + # Environment variables for the fastlane action + ENV["CER_CERTIFICATE_ID"] = certificate.id + ENV["CER_FILE_PATH"] = cert_path + + UI.success("Successfully generated #{certificate.id} which was imported to the local machine.") + + return cert_path + end + + def store_certificate(certificate, filename = nil) + cert_name = filename ? filename : certificate.id + cert_name = "#{cert_name}.cer" unless File.extname(cert_name) == ".cer" + path = File.expand_path(File.join(Cert.config[:output_path], cert_name)) + raw_data = Base64.decode64(certificate.certificate_content) + File.write(path, raw_data.force_encoding("UTF-8")) + return path + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/README.md new file mode 100644 index 0000000..94745be --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/README.md @@ -0,0 +1,64 @@ +CredentialsManager +=================== + +`CredentialsManager` is used by most components in the [fastlane.tools](https://fastlane.tools) toolchain. + +All code related to your username and password can be found here: [account_manager.rb](https://github.com/fastlane/fastlane/blob/master/credentials_manager/lib/credentials_manager/account_manager.rb) + +## Usage +Along with the [Ruby libraries](https://github.com/fastlane/fastlane/tree/master/credentials_manager#implementing-a-custom-solution) you can use the command line interface to add credentials to the keychain. + +**Adding Credentials** +``` +fastlane fastlane-credentials add --username felix@krausefx.com +Password: ********* +Credential felix@krausefx.com:********* added to keychain. +``` + +**Removing Credentials** +``` +fastlane fastlane-credentials remove --username felix@krausefx.com +password has been deleted. +``` + +## Storing in the keychain + +By default, your Apple credentials are stored in the macOS Keychain. + +Your password is only stored locally on your computer. + +## Change Password + +You can easily delete the stored password by opening the "Keychain Access" app, switching to *All Items*, and searching for "*deliver*". Select the item you want to change and delete it. Next time running one of the tools, you'll be asked for the new password. + +## Using environment variables + +Pass the user credentials via the following environment variables: + +``` +FASTLANE_USER +FASTLANE_PASSWORD +``` + +If you don't want to have your password stored in the Keychain set the `FASTLANE_DONT_STORE_PASSWORD` environment variable to `"1"`. + +## Implementing a custom solution + +All _fastlane_ tools are Ruby-based, and you can take a look at the source code to easily implement your own authentication solution. + +```ruby +require 'credentials_manager' + +data = CredentialsManager::AccountManager.new(user: user, password: password) +puts data.user +puts data.password +``` + +# Code of Conduct +Help us keep _fastlane_ open and inclusive. Please read and follow our [Code of Conduct](https://github.com/fastlane/fastlane/blob/master/CODE_OF_CONDUCT.md). + +# License + +This project is licensed under the terms of the MIT license. See the LICENSE file. + +> This project and all fastlane tools are in no way affiliated with Apple Inc. This project is open source under the MIT license, which means you have full access to the source code and can modify it to fit your own needs. All fastlane tools run on your own computer or server, so your credentials or other sensitive information will never leave your own computer. You are responsible for how you use fastlane tools. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager.rb new file mode 100644 index 0000000..8263134 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager.rb @@ -0,0 +1,7 @@ +require_relative 'credentials_manager/account_manager' +require_relative 'credentials_manager/cli' +require_relative 'credentials_manager/appfile_config' + +module CredentialsManager + ROOT = Pathname.new(File.expand_path('../..', __FILE__)) +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/account_manager.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/account_manager.rb new file mode 100644 index 0000000..29a098d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/account_manager.rb @@ -0,0 +1,157 @@ +require 'security' +require 'highline/import' # to hide the entered password + +require_relative 'appfile_config' + +module CredentialsManager + class AccountManager + DEFAULT_PREFIX = "deliver" + + # Is used for iTunes Transporter + attr_reader :prefix + + # @param prefix [String] Very optional, is used for the + # iTunes Transporter which uses application specific passwords + # @param note [String] An optional note that will be shown next + # to the password and username prompt + def initialize(user: nil, password: nil, prefix: nil, note: nil) + @prefix = prefix || DEFAULT_PREFIX + + @user = user + @password = password + @note = note + end + + # Is the default prefix "deliver" + def default_prefix? + @prefix == DEFAULT_PREFIX + end + + def user + if default_prefix? + @user ||= ENV["FASTLANE_USER"] + @user ||= ENV["DELIVER_USER"] + @user ||= AppfileConfig.try_fetch_value(:apple_id) + end + + ask_for_login if @user.to_s.length == 0 + return @user + end + + def fetch_password_from_env + password = ENV["FASTLANE_PASSWORD"] || ENV["DELIVER_PASSWORD"] + return password if password.to_s.length > 0 + return nil + end + + def password(ask_if_missing: true) + if default_prefix? + @password ||= fetch_password_from_env + end + + unless @password + item = Security::InternetPassword.find(server: server_name) + @password ||= item.password if item + end + ask_for_login while ask_if_missing && @password.to_s.length == 0 + return @password + end + + # Call this method to ask the user to re-enter the credentials + # @param force: if false, the user is asked before it gets deleted + # @return: Did the user decide to remove the old entry and enter a new password? + def invalid_credentials(force: false) + puts("The login credentials for '#{user}' seem to be wrong".red) + + if fetch_password_from_env + puts("The password was taken from the environment variable") + puts("Please make sure it is correct") + return false + end + + if force || agree("Do you want to re-enter your password? (y/n)", true) + puts("Removing Keychain entry for user '#{user}'...".yellow) if mac? + remove_from_keychain + ask_for_login + return true + end + false + end + + def add_to_keychain + if options + Security::InternetPassword.add(server_name, user, password, options) + else + Security::InternetPassword.add(server_name, user, password) + end + end + + def remove_from_keychain + Security::InternetPassword.delete(server: server_name) + @password = nil + end + + def server_name + "#{@prefix}.#{user}" + end + + # Use env variables from this method to augment internet password item with additional data. + # These variables are used by Xamarin Studio to authenticate Apple developers. + def options + hash = {} + hash[:p] = ENV["FASTLANE_PATH"] if ENV["FASTLANE_PATH"] + hash[:P] = ENV["FASTLANE_PORT"] if ENV["FASTLANE_PORT"] + hash[:r] = ENV["FASTLANE_PROTOCOL"] if ENV["FASTLANE_PROTOCOL"] + hash.empty? ? nil : hash + end + + private + + def ask_for_login + if ENV["FASTLANE_HIDE_LOGIN_INFORMATION"].to_s.length == 0 + puts("-------------------------------------------------------------------------------------".green) + puts("Please provide your Apple Developer Program account credentials".green) + puts("The login information you enter will be stored in your macOS Keychain".green) if mac? + if default_prefix? + # We don't want to show this message, if we ask for the application specific password + # which has a different prefix + puts("You can also pass the password using the `FASTLANE_PASSWORD` environment variable".green) + puts("See more information about it on GitHub: https://github.com/fastlane/fastlane/tree/master/credentials_manager".green) if mac? + end + puts("-------------------------------------------------------------------------------------".green) + end + + if @user.to_s.length == 0 + raise "Missing username, and running in non-interactive shell" if $stdout.isatty == false + prompt_text = "Username" + prompt_text += " (#{@note})" if @note + prompt_text += ": " + @user = ask(prompt_text) while @user.to_s.length == 0 + # Returning here since only the username was asked for. This method will be called again when a password is needed. + return + end + + while @password.to_s.length == 0 + raise "Missing password for user #{@user}, and running in non-interactive shell" if $stdout.isatty == false + note = @note + " " if @note + @password = ask("Password (#{note}for #{@user}): ") { |q| q.echo = "*" } + end + + return true if ENV["FASTLANE_DONT_STORE_PASSWORD"] + return true unless mac? + + # Now we store this information in the keychain + if add_to_keychain + return true + else + puts("Could not store password in keychain".red) + return false + end + end + + # Helper.mac? - but we don't have access to the helper here + def mac? + (/darwin/ =~ RUBY_PLATFORM) != nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/appfile_config.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/appfile_config.rb new file mode 100644 index 0000000..a320b67 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/appfile_config.rb @@ -0,0 +1,196 @@ +require 'fastlane_core/globals' + +module CredentialsManager + # Access the content of the app file (e.g. app identifier and Apple ID) + class AppfileConfig + def self.try_fetch_value(key) + # We need to load the file every time we call this method + # to support the `for_lane` keyword + begin + return self.new.data[key] + rescue => ex + puts(ex.to_s) + return nil + end + nil + end + + def self.default_path + ["./fastlane/Appfile", "./.fastlane/Appfile", "./Appfile"].each do |current| + return current if File.exist?(current) + end + nil + end + + def initialize(path = nil) + if path + raise "Could not find Appfile at path '#{path}'".red unless File.exist?(File.expand_path(path)) + end + + path ||= self.class.default_path + + if path && File.exist?(path) # it might not exist, we still want to use the default values + full_path = File.expand_path(path) + Dir.chdir(File.expand_path('..', path)) do + content = File.read(full_path, encoding: "utf-8") + + # From https://github.com/orta/danger/blob/master/lib/danger/danger_core/dangerfile.rb + if content.tr!('“”‘’‛', %(""''')) + puts("Your #{File.basename(path)} has had smart quotes sanitised. " \ + 'To avoid issues in the future, you should not use ' \ + 'TextEdit for editing it. If you are not using TextEdit, ' \ + 'you should turn off smart quotes in your editor of choice.'.red) + end + + # rubocop:disable Security/Eval + eval(content) + # rubocop:enable Security/Eval + + print_debug_information(path: full_path) if FastlaneCore::Globals.verbose? + end + end + + fallback_to_default_values + end + + def print_debug_information(path: nil) + self.class.already_printed_debug_information ||= {} + return if self.class.already_printed_debug_information[self.data] + # self.class.already_printed_debug_information is a hash, we use to detect if we already printed this data + # this is necessary, as on the course of a fastlane run, the values might change, e.g. when using + # the `for_lane` keyword. + + puts("Successfully loaded Appfile at path '#{path}'".yellow) + + self.data.each do |key, value| + puts("- #{key.to_s.cyan}: '#{value.to_s.green}'") + end + puts("-------") + + self.class.already_printed_debug_information[self.data] = true + end + + def self.already_printed_debug_information + @already_printed_debug_information ||= {} + end + + def fallback_to_default_values + data[:apple_id] ||= ENV["FASTLANE_USER"] || ENV["DELIVER_USER"] || ENV["DELIVER_USERNAME"] + end + + def data + @data ||= {} + end + + # Setters + + # iOS + def app_identifier(*args, &block) + setter(:app_identifier, *args, &block) + end + + def apple_id(*args, &block) + setter(:apple_id, *args, &block) + end + + def apple_dev_portal_id(*args, &block) + setter(:apple_dev_portal_id, *args, &block) + end + + def itunes_connect_id(*args, &block) + setter(:itunes_connect_id, *args, &block) + end + + # Developer Portal + def team_id(*args, &block) + setter(:team_id, *args, &block) + end + + def team_name(*args, &block) + setter(:team_name, *args, &block) + end + + # App Store Connect + def itc_team_id(*args, &block) + setter(:itc_team_id, *args, &block) + end + + def itc_team_name(*args, &block) + setter(:itc_team_name, *args, &block) + end + + def itc_provider(*args, &block) + setter(:itc_provider, *args, &block) + end + + # Android + def json_key_file(*args, &block) + setter(:json_key_file, *args, &block) + end + + def json_key_data_raw(*args, &block) + setter(:json_key_data_raw, *args, &block) + end + + def issuer(*args, &block) + puts("Appfile: DEPRECATED issuer: use json_key_file instead".red) + setter(:issuer, *args, &block) + end + + def package_name(*args, &block) + setter(:package_name, *args, &block) + end + + def keyfile(*args, &block) + puts("Appfile: DEPRECATED keyfile: use json_key_file instead".red) + setter(:keyfile, *args, &block) + end + + # Override Appfile configuration for a specific lane. + # + # lane_name - Symbol representing a lane name. (Can be either :name, 'name' or 'platform name') + # block - Block to execute to override configuration values. + # + # Discussion If received lane name does not match the lane name available as environment variable, no changes will + # be applied. + def for_lane(lane_name) + if lane_name.to_s.split(" ").count > 1 + # That's the legacy syntax 'platform name' + puts("You use deprecated syntax '#{lane_name}' in your Appfile.".yellow) + puts("Please follow the Appfile guide: https://docs.fastlane.tools/advanced/#appfile".yellow) + platform, lane_name = lane_name.split(" ") + + return unless platform == ENV["FASTLANE_PLATFORM_NAME"] + # the lane name will be verified below + end + + if ENV["FASTLANE_LANE_NAME"] == lane_name.to_s + yield + end + end + + # Override Appfile configuration for a specific platform. + # + # platform_name - Symbol representing a platform name. + # block - Block to execute to override configuration values. + # + # Discussion If received platform name does not match the platform name available as environment variable, no changes will + # be applied. + def for_platform(platform_name) + if ENV["FASTLANE_PLATFORM_NAME"] == platform_name.to_s + yield + end + end + + private + + def setter(key, *args) + if block_given? + value = yield + else + value = args.shift + end + data[key] = value if value && value.to_s.length > 0 + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/cli.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/cli.rb new file mode 100644 index 0000000..531d5fc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/credentials_manager/lib/credentials_manager/cli.rb @@ -0,0 +1,69 @@ +require 'commander' + +require_relative 'account_manager' + +module CredentialsManager + class CLI + include Commander::Methods + + # Parses command options and executes actions + def run + program :name, 'CredentialsManager' + program :version, Fastlane::VERSION + program :description, 'Manage credentials for fastlane tools.' + + global_option('--env STRING[,STRING2]', String, 'Add environment(s) to use with `dotenv`') + + # Command to add entry to Keychain + command :add do |c| + c.syntax = 'fastlane fastlane-credentials add' + c.description = 'Adds a fastlane credential to the keychain.' + + c.option('--username username', String, 'Username to add.') + c.option('--password password', String, 'Password to add.') + + c.action do |args, options| + username = options.username || ask('Username: ') + password = options.password || ask('Password: ') { |q| q.echo = '*' } + + add(username, password) + + puts("Credential #{username}:#{'*' * password.length} added to keychain.") + end + end + + # Command to remove credential from Keychain + command :remove do |c| + c.syntax = 'fastlane fastlane-credentials remove' + c.description = 'Removes a fastlane credential from the keychain.' + + c.option('--username username', String, 'Username to remove.') + + c.action do |args, options| + username = options.username || ask('Username: ') + + remove(username) + end + end + + run! + end + + private + + # Add entry to Apple Keychain using AccountManager + def add(username, password) + CredentialsManager::AccountManager.new( + user: username, + password: password + ).add_to_keychain + end + + # Remove entry from Apple Keychain using AccountManager + def remove(username) + CredentialsManager::AccountManager.new( + user: username + ).remove_from_keychain + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/README.md new file mode 100644 index 0000000..f98819f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/README.md @@ -0,0 +1,17 @@ +

+ + +
+ fastlane +
+

+ +------ + +

+ + + +

+ +

The deliver docs were moved to docs.fastlane.tools

diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/DeliverfileDefault b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/DeliverfileDefault new file mode 100644 index 0000000..74739f7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/DeliverfileDefault @@ -0,0 +1,3 @@ +# The Deliverfile allows you to store various App Store Connect metadata +# For more information, check out the docs +# https://docs.fastlane.tools/actions/deliver/ diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/DeliverfileDefault.swift b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/DeliverfileDefault.swift new file mode 100644 index 0000000..f91feb1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/DeliverfileDefault.swift @@ -0,0 +1,13 @@ +// The Deliverfile allows you to store various App Store Connect metadata +// For more information, check out the docs +// https://docs.fastlane.tools/actions/deliver/ + +// In general, you can use the options available +// fastlane deliver --help + +// Remove the // in front of the line to enable the option + +class Deliverfile: DeliverfileProtocol { + //var username: String { return "" } + //var appIdentifier: String? { return "" } +} diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/ScreenshotsHelp b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/ScreenshotsHelp new file mode 100644 index 0000000..948c580 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/ScreenshotsHelp @@ -0,0 +1,30 @@ +## Screenshots Naming Rules + +Put all screenshots you want to use inside the folder of its language (e.g. `en-US`). +The device type will automatically be recognized using the image resolution. + +The screenshots can be named whatever you want, but keep in mind they are sorted +alphabetically, in a human-friendly way. See https://github.com/fastlane/fastlane/pull/18200 for more details. + +### Exceptions + +#### iPad Pro (3rd Gen) 12.9" + +Since iPad Pro (3rd Gen) 12.9" and iPad Pro (2nd Gen) 12.9" have the same image +resolution, screenshots of the iPad Pro (3rd gen) 12.9" must contain either the +string `iPad Pro (12.9-inch) (3rd generation)`, `IPAD_PRO_3GEN_129`, or `ipadPro129` +(App Store Connect's internal naming of the display family for the 3rd generation iPad Pro) +in its filename to be assigned the correct display family and to be uploaded to +the correct screenshot slot in your app's metadata. + +### Other Platforms + +#### Apple TV + +Apple TV screenshots should be stored in a subdirectory named `appleTV` with language +folders inside of it. + +#### iMessage + +iMessage screenshots, like the Apple TV ones, should also be stored in a subdirectory +named `iMessage`, with language folders inside of it. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/summary.html.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/summary.html.erb new file mode 100644 index 0000000..6f429ed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/assets/summary.html.erb @@ -0,0 +1,299 @@ + + + + deliver - <%= @app_name %> + + + + + +
+ + <% if @options[:app_icon] %> +
+ Large App Icon:
+ +
+ <% end %> + <% if @options[:apple_watch_app_icon] %> +
+ Watch App Icon:
+ +
+ <% end %> +
+ <% @languages.each do |language| %> + <% if @options[:name] %> +
+ <%= language %>: <%= @options[:name][language] %> +
+ <% else %> +
+ <%= language %> +
+ <% end %> + + <% if @options[:subtitle] %> +
+ Subtitle: <%= @options[:subtitle][language] %> +
+ <% end %> + +
+ <% all_keys = [:support_url, :marketing_url] %> + <% all_keys.each do |key| %> + <% value = @options[key][language] if @options[key] %> + <% if value %> +
+ <%= key %>: <%= value %> +
+ <% end %> + <% end %> +
+ + <% if @options[:keywords] and @options[:keywords][language] %> +
+
Keywords
+
    + <% split_keywords(@options[:keywords][language]).each do |keyword| %> +
  • <%= keyword %>
  • + <% end %> +
+
+ <% end %> + + <% if @options[:description] %> +
+
Description
+
+ <%= (@options[:description][language] || '').gsub("\n", "
") %> +
+
+ <% end %> + + <% if @options[:release_notes] %> +
+
Changelog
+ <%= (@options[:release_notes][language] || '').gsub("\n", "
") %> +
+ <% end %> + + <% if @options[:promotional_text] %> +
+
Promotional Text
+ <%= (@options[:promotional_text][language] || '').gsub("\n", "
") %> +
+ <% end %> + +
+
Screenshots
+ + <% if options[:skip_screenshots] %> +

Skipped Screenshots

+

+ Options to skip_screenshots activated. deliver will not modify your app screenshots. +

+
+ <% elsif @screenshots.count > 0 %> + <% sc = @screenshots.find_all { |s| s.language == language } %> + <% sc_by_size = sc.group_by { |i| i.screen_size } %> + + <% sc_by_size.keys.sort.each do |screen_size| %> + <% screenshots = sc_by_size[screen_size].sort { |a, b| [a.path] <=> [b.path] } %> + <%# we are guaranteed to have at least one element because of the group_by %> +

<%= screenshots[0].formatted_name %>

+
+ + <% screenshots.each_with_index do |screenshot, index| %> + + <% end %> +
+ <% end %> + <% else %> + +
+

No Screenshots Found

+

+ deliver couldn't find any screenshots. + <% if options[:overwrite_screenshots] %> + --overwrite_screenshots is set, existing screenshots will be removed, but none will be uploaded. + <% else %> + The existing screenshots on App Store Connect will be kept. + if you want to remove them you have to use the --overwrite_screenshots flag. + <% end %> +

+ If you want to download your existing screenshots, run deliver download_screenshots. +

+
+ <% end %> + + +
+ <% end # end data + %> + <% if @options[:trade_representative_contact_information] %> +
+
Trade Representative Contact Information
+
+ <% @options[:trade_representative_contact_information].each do |key, value| %> +
+ <%= key.to_s.capitalize.gsub("_", " ") %> +
+
+ <%= (value || '').gsub("\n", "
") %> +
+ <% end %> +
+
+
+ <% end %> + + <% if @options[:app_review_information] %> +
+
Review Information
+
+ <% @options[:app_review_information].each do |key, value| %> +
+ <%= key.to_s.capitalize.gsub("_", " ") %> +
+
+ <%= (value || '').gsub("\n", "
") %> +
+ <% end %> +
+
+ <% end %> + + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver.rb new file mode 100644 index 0000000..84dee19 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver.rb @@ -0,0 +1,15 @@ +require_relative 'deliver/languages' +require_relative 'deliver/loader' +require_relative 'deliver/options' +require_relative 'deliver/commands_generator' +require_relative 'deliver/detect_values' +require_relative 'deliver/runner' +require_relative 'deliver/upload_metadata' +require_relative 'deliver/upload_screenshots' +require_relative 'deliver/upload_price_tier' +require_relative 'deliver/submit_for_review' +require_relative 'deliver/app_screenshot' +require_relative 'deliver/html_generator' +require_relative 'deliver/generate_summary' + +require_relative 'deliver/module' diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot.rb new file mode 100644 index 0000000..04a0b81 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot.rb @@ -0,0 +1,387 @@ +require 'fastimage' + +require_relative 'module' +require 'spaceship/connect_api/models/app_screenshot_set' + +module Deliver + # AppScreenshot represents one screenshots for one specific locale and + # device type. + class AppScreenshot + # + module ScreenSize + # iPhone 4 + IOS_35 = "iOS-3.5-in" + # iPhone 5 + IOS_40 = "iOS-4-in" + # iPhone 6, 7, & 8 + IOS_47 = "iOS-4.7-in" + # iPhone 6 Plus, 7 Plus, & 8 Plus + IOS_55 = "iOS-5.5-in" + # iPhone XS + IOS_58 = "iOS-5.8-in" + # iPhone XR + IOS_61 = "iOS-6.1-in" + # iPhone XS Max + IOS_65 = "iOS-6.5-in" + # iPhone 14 Pro Max + IOS_67 = "iOS-6.7-in" + + # iPad + IOS_IPAD = "iOS-iPad" + # iPad 10.5 + IOS_IPAD_10_5 = "iOS-iPad-10.5" + # iPad 11 + IOS_IPAD_11 = "iOS-iPad-11" + # iPad Pro + IOS_IPAD_PRO = "iOS-iPad-Pro" + # iPad Pro (12.9-inch) (3rd generation) + IOS_IPAD_PRO_12_9 = "iOS-iPad-Pro-12.9" + + # iPhone 5 iMessage + IOS_40_MESSAGES = "iOS-4-in-messages" + # iPhone 6, 7, & 8 iMessage + IOS_47_MESSAGES = "iOS-4.7-in-messages" + # iPhone 6 Plus, 7 Plus, & 8 Plus iMessage + IOS_55_MESSAGES = "iOS-5.5-in-messages" + # iPhone XS iMessage + IOS_58_MESSAGES = "iOS-5.8-in-messages" + # iPhone XR iMessage + IOS_61_MESSAGES = "iOS-6.1-in-messages" + # iPhone XS Max iMessage + IOS_65_MESSAGES = "iOS-6.5-in-messages" + # iPhone 14 Pro Max iMessage + IOS_67_MESSAGES = "iOS-6.7-in-messages" + + # iPad iMessage + IOS_IPAD_MESSAGES = "iOS-iPad-messages" + # iPad 10.5 iMessage + IOS_IPAD_10_5_MESSAGES = "iOS-iPad-10.5-messages" + # iPad 11 iMessage + IOS_IPAD_11_MESSAGES = "iOS-iPad-11-messages" + # iPad Pro iMessage + IOS_IPAD_PRO_MESSAGES = "iOS-iPad-Pro-messages" + # iPad Pro (12.9-inch) (3rd generation) iMessage + IOS_IPAD_PRO_12_9_MESSAGES = "iOS-iPad-Pro-12.9-messages" + + # Apple Watch + IOS_APPLE_WATCH = "iOS-Apple-Watch" + # Apple Watch Series 4 + IOS_APPLE_WATCH_SERIES4 = "iOS-Apple-Watch-Series4" + # Apple Watch Series 7 + IOS_APPLE_WATCH_SERIES7 = "iOS-Apple-Watch-Series7" + + # Apple TV + APPLE_TV = "Apple-TV" + + # Mac + MAC = "Mac" + end + + # @return [Deliver::ScreenSize] the screen size (device type) + # specified at {Deliver::ScreenSize} + attr_accessor :screen_size + + attr_accessor :path + + attr_accessor :language + + # @param path (String) path to the screenshot file + # @param language (String) Language of this screenshot (e.g. English) + # @param screen_size (Deliver::AppScreenshot::ScreenSize) the screen size, which + # will automatically be calculated when you don't set it. (Deprecated) + def initialize(path, language, screen_size = nil) + UI.deprecated('`screen_size` for Deliver::AppScreenshot.new is deprecated in favor of the default behavior to calculate size automatically. Passed value is no longer validated.') if screen_size + self.path = path + self.language = language + self.screen_size = screen_size || self.class.calculate_screen_size(path) + end + + # The iTC API requires a different notation for the device + def device_type + matching = { + ScreenSize::IOS_35 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_35, + ScreenSize::IOS_40 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_40, + ScreenSize::IOS_47 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_47, # also 7 & 8 + ScreenSize::IOS_55 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_55, # also 7 Plus & 8 Plus + ScreenSize::IOS_58 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_58, + ScreenSize::IOS_65 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_65, + ScreenSize::IOS_67 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPHONE_67, + ScreenSize::IOS_IPAD => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPAD_97, + ScreenSize::IOS_IPAD_10_5 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPAD_105, + ScreenSize::IOS_IPAD_11 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPAD_PRO_3GEN_11, + ScreenSize::IOS_IPAD_PRO => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPAD_PRO_129, + ScreenSize::IOS_IPAD_PRO_12_9 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_IPAD_PRO_3GEN_129, + ScreenSize::IOS_40_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPHONE_40, + ScreenSize::IOS_47_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPHONE_47, # also 7 & 8 + ScreenSize::IOS_55_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPHONE_55, # also 7 Plus & 8 Plus + ScreenSize::IOS_58_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPHONE_58, + ScreenSize::IOS_65_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPHONE_65, + ScreenSize::IOS_67_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPHONE_67, + ScreenSize::IOS_IPAD_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPAD_97, + ScreenSize::IOS_IPAD_PRO_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPAD_PRO_129, + ScreenSize::IOS_IPAD_PRO_12_9_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPAD_PRO_3GEN_129, + ScreenSize::IOS_IPAD_10_5_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPAD_105, + ScreenSize::IOS_IPAD_11_MESSAGES => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::IMESSAGE_APP_IPAD_PRO_3GEN_11, + ScreenSize::MAC => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_DESKTOP, + ScreenSize::IOS_APPLE_WATCH => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_WATCH_SERIES_3, + ScreenSize::IOS_APPLE_WATCH_SERIES4 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_WATCH_SERIES_4, + ScreenSize::IOS_APPLE_WATCH_SERIES7 => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_WATCH_SERIES_7, + ScreenSize::APPLE_TV => Spaceship::ConnectAPI::AppScreenshotSet::DisplayType::APP_APPLE_TV + } + return matching[self.screen_size] + end + + # Nice name + def formatted_name + matching = { + ScreenSize::IOS_35 => "iPhone 4", + ScreenSize::IOS_40 => "iPhone 5", + ScreenSize::IOS_47 => "iPhone 6", # also 7 & 8 + ScreenSize::IOS_55 => "iPhone 6 Plus", # also 7 Plus & 8 Plus + ScreenSize::IOS_58 => "iPhone XS", + ScreenSize::IOS_61 => "iPhone XR", + ScreenSize::IOS_65 => "iPhone XS Max", + ScreenSize::IOS_67 => "iPhone 14 Pro Max", + ScreenSize::IOS_IPAD => "iPad", + ScreenSize::IOS_IPAD_10_5 => "iPad 10.5", + ScreenSize::IOS_IPAD_11 => "iPad 11", + ScreenSize::IOS_IPAD_PRO => "iPad Pro", + ScreenSize::IOS_IPAD_PRO_12_9 => "iPad Pro (12.9-inch) (3rd generation)", + ScreenSize::IOS_40_MESSAGES => "iPhone 5 (iMessage)", + ScreenSize::IOS_47_MESSAGES => "iPhone 6 (iMessage)", # also 7 & 8 + ScreenSize::IOS_55_MESSAGES => "iPhone 6 Plus (iMessage)", # also 7 Plus & 8 Plus + ScreenSize::IOS_58_MESSAGES => "iPhone XS (iMessage)", + ScreenSize::IOS_61_MESSAGES => "iPhone XR (iMessage)", + ScreenSize::IOS_65_MESSAGES => "iPhone XS Max (iMessage)", + ScreenSize::IOS_67_MESSAGES => "iPhone 14 Pro Max (iMessage)", + ScreenSize::IOS_IPAD_MESSAGES => "iPad (iMessage)", + ScreenSize::IOS_IPAD_PRO_MESSAGES => "iPad Pro (iMessage)", + ScreenSize::IOS_IPAD_PRO_12_9_MESSAGES => "iPad Pro (12.9-inch) (3rd generation) (iMessage)", + ScreenSize::IOS_IPAD_10_5_MESSAGES => "iPad 10.5 (iMessage)", + ScreenSize::IOS_IPAD_11_MESSAGES => "iPad 11 (iMessage)", + ScreenSize::MAC => "Mac", + ScreenSize::IOS_APPLE_WATCH => "Watch", + ScreenSize::IOS_APPLE_WATCH_SERIES4 => "Watch Series4", + ScreenSize::IOS_APPLE_WATCH_SERIES7 => "Watch Series7", + ScreenSize::APPLE_TV => "Apple TV" + } + return matching[self.screen_size] + end + + # Validates the given screenshots (size and format) + def is_valid? + UI.deprecated('Deliver::AppScreenshot#is_valid? is deprecated in favor of Deliver::AppScreenshotValidator') + return false unless ["png", "PNG", "jpg", "JPG", "jpeg", "JPEG"].include?(self.path.split(".").last) + + return self.screen_size == self.class.calculate_screen_size(self.path) + end + + def is_messages? + return [ + ScreenSize::IOS_40_MESSAGES, + ScreenSize::IOS_47_MESSAGES, + ScreenSize::IOS_55_MESSAGES, + ScreenSize::IOS_58_MESSAGES, + ScreenSize::IOS_65_MESSAGES, + ScreenSize::IOS_67_MESSAGES, + ScreenSize::IOS_IPAD_MESSAGES, + ScreenSize::IOS_IPAD_PRO_MESSAGES, + ScreenSize::IOS_IPAD_PRO_12_9_MESSAGES, + ScreenSize::IOS_IPAD_10_5_MESSAGES, + ScreenSize::IOS_IPAD_11_MESSAGES + ].include?(self.screen_size) + end + + def self.device_messages + # This list does not include iPad Pro 12.9-inch (3rd generation) + # because it has same resoluation as IOS_IPAD_PRO and will clobber + return { + ScreenSize::IOS_67_MESSAGES => [ + [1290, 2796], + [2796, 1290] + ], + ScreenSize::IOS_65_MESSAGES => [ + [1242, 2688], + [2688, 1242], + [1284, 2778], + [2778, 1284] + ], + ScreenSize::IOS_61_MESSAGES => [ + [828, 1792], + [1792, 828] + ], + ScreenSize::IOS_58_MESSAGES => [ + [1125, 2436], + [2436, 1125], + [1170, 2532], + [2532, 1170] + ], + ScreenSize::IOS_55_MESSAGES => [ + [1242, 2208], + [2208, 1242] + ], + ScreenSize::IOS_47_MESSAGES => [ + [750, 1334], + [1334, 750] + ], + ScreenSize::IOS_40_MESSAGES => [ + [640, 1096], + [640, 1136], + [1136, 600], + [1136, 640] + ], + ScreenSize::IOS_IPAD_MESSAGES => [ + [1024, 748], + [1024, 768], + [2048, 1496], + [2048, 1536], + [768, 1004], + [768, 1024], + [1536, 2008], + [1536, 2048] + ], + ScreenSize::IOS_IPAD_10_5_MESSAGES => [ + [1668, 2224], + [2224, 1668] + ], + ScreenSize::IOS_IPAD_11_MESSAGES => [ + [1668, 2388], + [2388, 1668] + ], + ScreenSize::IOS_IPAD_PRO_MESSAGES => [ + [2732, 2048], + [2048, 2732] + ] + } + end + + # reference: https://help.apple.com/app-store-connect/#/devd274dd925 + def self.devices + # This list does not include iPad Pro 12.9-inch (3rd generation) + # because it has same resoluation as IOS_IPAD_PRO and will clobber + return { + ScreenSize::IOS_67 => [ + [1290, 2796], + [2796, 1290] + ], + ScreenSize::IOS_65 => [ + [1242, 2688], + [2688, 1242], + [1284, 2778], + [2778, 1284] + ], + ScreenSize::IOS_61 => [ + [828, 1792], + [1792, 828] + ], + ScreenSize::IOS_58 => [ + [1125, 2436], + [2436, 1125], + [1170, 2532], + [2532, 1170] + ], + ScreenSize::IOS_55 => [ + [1242, 2208], + [2208, 1242] + ], + ScreenSize::IOS_47 => [ + [750, 1334], + [1334, 750] + ], + ScreenSize::IOS_40 => [ + [640, 1096], + [640, 1136], + [1136, 600], + [1136, 640] + ], + ScreenSize::IOS_35 => [ + [640, 920], + [640, 960], + [960, 600], + [960, 640] + ], + ScreenSize::IOS_IPAD => [ # 9.7 inch + [1024, 748], + [1024, 768], + [2048, 1496], + [2048, 1536], + [768, 1004], # portrait without status bar + [768, 1024], + [1536, 2008], # portrait without status bar + [1536, 2048] + ], + ScreenSize::IOS_IPAD_10_5 => [ + [1668, 2224], + [2224, 1668] + ], + ScreenSize::IOS_IPAD_11 => [ + [1668, 2388], + [2388, 1668] + ], + ScreenSize::IOS_IPAD_PRO => [ + [2732, 2048], + [2048, 2732] + ], + ScreenSize::MAC => [ + [1280, 800], + [1440, 900], + [2560, 1600], + [2880, 1800] + ], + ScreenSize::IOS_APPLE_WATCH => [ + [312, 390] + ], + ScreenSize::IOS_APPLE_WATCH_SERIES4 => [ + [368, 448] + ], + ScreenSize::IOS_APPLE_WATCH_SERIES7 => [ + [396, 484] + ], + ScreenSize::APPLE_TV => [ + [1920, 1080], + [3840, 2160] + ] + } + end + + def self.resolve_ipadpro_conflict_if_needed(screen_size, filename) + is_3rd_gen = [ + "iPad Pro (12.9-inch) (3rd generation)", # Default simulator has this name + "iPad Pro (12.9-inch) (4th generation)", # Default simulator has this name + "iPad Pro (12.9-inch) (5th generation)", # Default simulator has this name + "iPad Pro (12.9-inch) (6th generation)", # Default simulator has this name + "IPAD_PRO_3GEN_129", # Screenshots downloaded from App Store Connect has this name + "ipadPro129" # Legacy: screenshots downloaded from iTunes Connect used to have this name + ].any? { |key| filename.include?(key) } + if is_3rd_gen + if screen_size == ScreenSize::IOS_IPAD_PRO + return ScreenSize::IOS_IPAD_PRO_12_9 + elsif screen_size == ScreenSize::IOS_IPAD_PRO_MESSAGES + return ScreenSize::IOS_IPAD_PRO_12_9_MESSAGES + end + end + screen_size + end + + def self.calculate_screen_size(path) + size = FastImage.size(path) + + UI.user_error!("Could not find or parse file at path '#{path}'") if size.nil? || size.count == 0 + + # iMessage screenshots have same resolution as app screenshots so we need to distinguish them + path_component = Pathname.new(path).each_filename.to_a[-3] + devices = path_component.eql?("iMessage") ? self.device_messages : self.devices + + devices.each do |screen_size, resolutions| + if resolutions.include?(size) + filename = Pathname.new(path).basename.to_s + return resolve_ipadpro_conflict_if_needed(screen_size, filename) + end + end + + nil + end + end + + ScreenSize = AppScreenshot::ScreenSize +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot_iterator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot_iterator.rb new file mode 100644 index 0000000..170c575 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot_iterator.rb @@ -0,0 +1,95 @@ +module Deliver + # This is a convinient class that enumerates app store connect's screenshots in various degrees. + class AppScreenshotIterator + NUMBER_OF_THREADS = Helper.test? ? 1 : [ENV.fetch("DELIVER_NUMBER_OF_THREADS", 10).to_i, 10].min + + # @param localizations [Array] + def initialize(localizations) + @localizations = localizations + end + + # Iterate app_screenshot_set over localizations + # + # @yield [localization, app_screenshot_set] + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreVersionLocalization] localization + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreScreenshotSet] app_screenshot_set + def each_app_screenshot_set(localizations = @localizations, &block) + return enum_for(__method__, localizations) unless block_given? + + # Collect app_screenshot_sets from localizations in parallel but + # limit the number of threads working at a time with using `lazy` and `force` controls + # to not attack App Store Connect + results = localizations.each_slice(NUMBER_OF_THREADS).lazy.map do |localizations_grouped| + localizations_grouped.map do |localization| + Thread.new do + [localization, localization.get_app_screenshot_sets] + end + end + end.flat_map do |threads| + threads.map { |t| t.join.value } + end.force + + results.each do |localization, app_screenshot_sets| + app_screenshot_sets.each do |app_screenshot_set| + yield(localization, app_screenshot_set) + end + end + end + + # Iterate app_screenshot over localizations and app_screenshot_sets + # + # @yield [localization, app_screenshot_set, app_screenshot] + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreVersionLocalization] localization + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreScreenshotSet] app_screenshot_set + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreScreenshot] app_screenshot + def each_app_screenshot(&block) + return enum_for(__method__) unless block_given? + + each_app_screenshot_set do |localization, app_screenshot_set| + app_screenshot_set.app_screenshots.each do |app_screenshot| + yield(localization, app_screenshot_set, app_screenshot) + end + end + end + + # Iterate given local app_screenshot over localizations and app_screenshot_sets + # + # @param screenshots_per_language [Hash] + # @yield [localization, app_screenshot_set, app_screenshot] + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreVersionLocalization] localization + # @yieldparam [optional, Spaceship::ConnectAPI::AppStoreScreenshotSet] app_screenshot_set + # @yieldparam [optional, Deliver::AppScreenshot] screenshot + # @yieldparam [optional, Integer] index a number reperesents which position the screenshot will be + def each_local_screenshot(screenshots_per_language, &block) + return enum_for(__method__, screenshots_per_language) unless block_given? + + # filter unnecessary localizations + supported_localizations = @localizations.reject { |l| screenshots_per_language[l.locale].nil? } + + # build a hash that can access app_screenshot_set corresponding to given locale and display_type + # via parallelized each_app_screenshot_set to gain performance + app_screenshot_set_per_locale_and_display_type = each_app_screenshot_set(supported_localizations) + .each_with_object({}) do |(localization, app_screenshot_set), hash| + hash[localization.locale] ||= {} + hash[localization.locale][app_screenshot_set.screenshot_display_type] = app_screenshot_set + end + + # iterate over screenshots per localization + screenshots_per_language.each do |language, screenshots_for_language| + localization = supported_localizations.find { |l| l.locale == language } + screenshots_per_display_type = screenshots_for_language.reject { |screenshot| screenshot.device_type.nil? }.group_by(&:device_type) + + screenshots_per_display_type.each do |display_type, screenshots| + # create AppScreenshotSet for given display_type if it doesn't exist + app_screenshot_set = (app_screenshot_set_per_locale_and_display_type[language] || {})[display_type] + app_screenshot_set ||= localization.create_app_screenshot_set(attributes: { screenshotDisplayType: display_type }) + + # iterate over screenshots per display size with index + screenshots.each.with_index do |screenshot, index| + yield(localization, app_screenshot_set, screenshot, index) + end + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot_validator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot_validator.rb new file mode 100644 index 0000000..de57417 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/app_screenshot_validator.rb @@ -0,0 +1,108 @@ +require 'fastimage' + +module Deliver + class AppScreenshotValidator + # A simple structure that holds error information as well as formatted error messages consistently + # Set `to_skip` to `true` when just needing to skip uploading rather than causing a crash. + class ValidationError + # Constants that can be given to `type` param + INVALID_SCREEN_SIZE = 'Invalid screen size'.freeze + UNACCEPTABLE_DEVICE = 'Not an accepted App Store Connect device'.freeze + INVALID_FILE_EXTENSION = 'Invalid file extension'.freeze + FILE_EXTENSION_MISMATCH = 'File extension mismatches its image format'.freeze + + attr_reader :type, :path, :debug_info, :to_skip + + def initialize(type: nil, path: nil, debug_info: nil, to_skip: false) + @type = type + @path = path + @debug_info = debug_info + @to_skip = to_skip + end + + def to_s + "#{to_skip ? '🏃 Skipping' : 'đŸšĢ Error'}: #{path} - #{type} (#{debug_info})" + end + + def inspect + "\"#{type}\"" + end + end + + # Access each array by symbol returned from FastImage.type + ALLOWED_SCREENSHOT_FILE_EXTENSION = { png: ['png', 'PNG'], jpeg: ['jpg', 'JPG', 'jpeg', 'JPEG'] }.freeze + + APP_SCREENSHOT_SPEC_URL = 'https://help.apple.com/app-store-connect/#/devd274dd925'.freeze + + # Validate a screenshot and inform an error message via `errors` parameter. `errors` is mutated + # to append the messages and each message should contain the corresponding path to let users know which file is throwing the error. + # + # @param screenshot [AppScreenshot] + # @param errors [Array] Pass an array object to add validation errors when detecting errors. + # This will be mutated to add more error objects as validation detects errors. + # @return [Boolean] true if given screenshot is valid + def self.validate(screenshot, errors) + # Given screenshot will be diagnosed and errors found are accumulated + errors_found = [] + + validate_screen_size(screenshot, errors_found) + validate_device_type(screenshot, errors_found) + validate_file_extension_and_format(screenshot, errors_found) + + # Merge errors found into given errors array + errors_found.each { |error| errors.push(error) } + errors_found.empty? + end + + def self.validate_screen_size(screenshot, errors_found) + if screenshot.screen_size.nil? + errors_found << ValidationError.new(type: ValidationError::INVALID_SCREEN_SIZE, + path: screenshot.path, + debug_info: "Actual size is #{get_formatted_size(screenshot)}. See the specifications to fix #{APP_SCREENSHOT_SPEC_URL}") + end + end + + # Checking if the device type exists in spaceship + # Ex: iPhone 6.1 inch isn't supported in App Store Connect but need + # to have it in there for frameit support + def self.validate_device_type(screenshot, errors_found) + if !screenshot.screen_size.nil? && screenshot.device_type.nil? + errors_found << ValidationError.new(type: ValidationError::UNACCEPTABLE_DEVICE, + path: screenshot.path, + debug_info: "Screen size #{screenshot.screen_size} is not accepted. See the specifications to fix #{APP_SCREENSHOT_SPEC_URL}", + to_skip: true) + end + end + + def self.validate_file_extension_and_format(screenshot, errors_found) + extension = File.extname(screenshot.path).delete('.') + valid_file_extensions = ALLOWED_SCREENSHOT_FILE_EXTENSION.values.flatten + is_valid_extension = valid_file_extensions.include?(extension) + + unless is_valid_extension + errors_found << ValidationError.new(type: ValidationError::INVALID_FILE_EXTENSION, + path: screenshot.path, + debug_info: "Only #{valid_file_extensions.join(', ')} are allowed") + end + + format = FastImage.type(screenshot.path) + is_extension_matched = ALLOWED_SCREENSHOT_FILE_EXTENSION[format] && + ALLOWED_SCREENSHOT_FILE_EXTENSION[format].include?(extension) + + # This error only appears when file extension is valid + if is_valid_extension && !is_extension_matched + expected_extension = ALLOWED_SCREENSHOT_FILE_EXTENSION[format].first + expected_filename = File.basename(screenshot.path, File.extname(screenshot.path)) + ".#{expected_extension}" + errors_found << ValidationError.new(type: ValidationError::FILE_EXTENSION_MISMATCH, + path: screenshot.path, + debug_info: %(Actual format is "#{format}". Rename the filename to "#{expected_filename}".)) + end + end + + def self.get_formatted_size(screenshot) + size = FastImage.size(screenshot.path) + return size.join('x') if size + nil + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/commands_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/commands_generator.rb new file mode 100644 index 0000000..bee8f6e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/commands_generator.rb @@ -0,0 +1,191 @@ +require 'commander' +require 'fastlane/version' +require 'fastlane_core/ui/help_formatter' + +require_relative 'download_screenshots' +require_relative 'options' +require_relative 'module' +require_relative 'generate_summary' +require_relative 'runner' + +HighLine.track_eof = false + +module Deliver + class CommandsGenerator + include Commander::Methods + + def self.start + self.new.run + end + + def deliverfile_options(skip_verification: false) + available_options = Deliver::Options.available_options + return available_options unless skip_verification + + # These don't matter for downloading metadata, so verification can be skipped + irrelevant_options_keys = [:ipa, :pkg, :app_rating_config_path] + + available_options.each do |opt| + next unless irrelevant_options_keys.include?(opt.key) + opt.verify_block = nil + opt.conflicting_options = nil + end + + return available_options + end + + def self.force_overwrite_metadata?(options, path) + res = options[:force] + res ||= ENV["DELIVER_FORCE_OVERWRITE"] # for backward compatibility + res ||= UI.confirm("Do you want to overwrite existing metadata on path '#{File.expand_path(path)}'?") if UI.interactive? + res + end + + # rubocop:disable Metrics/PerceivedComplexity + def run + program :name, 'deliver' + program :version, Fastlane::VERSION + program :description, Deliver::DESCRIPTION + program :help, 'Author', 'Felix Krause ' + program :help, 'Website', 'https://fastlane.tools' + program :help, 'Documentation', 'https://docs.fastlane.tools/actions/deliver/' + program :help_formatter, FastlaneCore::HelpFormatter + + global_option('--verbose') { FastlaneCore::Globals.verbose = true } + global_option('--env STRING[,STRING2]', String, 'Add environment(s) to use with `dotenv`') + + always_trace! + + command :run do |c| + c.syntax = 'fastlane deliver' + c.description = 'Upload metadata and binary to App Store Connect' + + FastlaneCore::CommanderGenerator.new.generate(deliverfile_options, command: c) + + c.action do |args, options| + options = FastlaneCore::Configuration.create(deliverfile_options, options.__hash__) + loaded = options.load_configuration_file("Deliverfile") + + # Check if we already have a deliver setup in the current directory + loaded = true if options[:description] || options[:ipa] || options[:pkg] + loaded = true if File.exist?(File.join(FastlaneCore::FastlaneFolder.path || ".", "metadata")) + unless loaded + if UI.confirm("No deliver configuration found in the current directory. Do you want to setup deliver?") + is_swift = UI.confirm("Would you like to use Swift instead of Ruby?") + require 'deliver/setup' + Deliver::Runner.new(options) # to login... + Deliver::Setup.new.run(options, is_swift: is_swift) + return 0 + end + end + + Deliver::Runner.new(options).run + end + end + + command :submit_build do |c| + c.syntax = 'fastlane deliver submit_build' + c.description = 'Submit a specific build-nr for review, use latest for the latest build' + + FastlaneCore::CommanderGenerator.new.generate(deliverfile_options, command: c) + + c.action do |args, options| + options = FastlaneCore::Configuration.create(deliverfile_options, options.__hash__) + options.load_configuration_file("Deliverfile") + options[:submit_for_review] = true + options[:build_number] = "latest" unless options[:build_number] + Deliver::Runner.new(options).run + end + end + + command :init do |c| + c.syntax = 'fastlane deliver init' + c.description = 'Create the initial `deliver` configuration based on an existing app' + + FastlaneCore::CommanderGenerator.new.generate(deliverfile_options, command: c) + + c.action do |args, options| + if File.exist?("Deliverfile") || File.exist?("fastlane/Deliverfile") + UI.important("You already have a running deliver setup in this directory") + return 0 + end + + require 'deliver/setup' + + options = FastlaneCore::Configuration.create(deliverfile_options, options.__hash__) + options[:run_precheck_before_submit] = false # precheck doesn't need to run during init + + Deliver::Runner.new(options) # to login... + Deliver::Setup.new.run(options) + end + end + + command :generate_summary do |c| + c.syntax = 'fastlane deliver generate_summary' + c.description = 'Generate HTML Summary without uploading/downloading anything' + + FastlaneCore::CommanderGenerator.new.generate(deliverfile_options, command: c) + + c.action do |args, options| + options = FastlaneCore::Configuration.create(deliverfile_options, options.__hash__) + options.load_configuration_file("Deliverfile") + Deliver::Runner.new(options) + html_path = Deliver::GenerateSummary.new.run(options) + UI.success("Successfully generated HTML report at '#{html_path}'") + system("open '#{html_path}'") unless options[:force] + end + end + + command :download_screenshots do |c| + c.syntax = 'fastlane deliver download_screenshots' + c.description = "Downloads all existing screenshots from App Store Connect and stores them in the screenshots folder" + + FastlaneCore::CommanderGenerator.new.generate(deliverfile_options, command: c) + + c.action do |args, options| + options = FastlaneCore::Configuration.create(deliverfile_options(skip_verification: true), options.__hash__) + options.load_configuration_file("Deliverfile") + Deliver::Runner.new(options, skip_version: true) # to login... + containing = FastlaneCore::Helper.fastlane_enabled_folder_path + path = options[:screenshots_path] || File.join(containing, 'screenshots') + Deliver::DownloadScreenshots.run(options, path) + end + end + + command :download_metadata do |c| + c.syntax = 'fastlane deliver download_metadata' + c.description = "Downloads existing metadata and stores it locally. This overwrites the local files." + + FastlaneCore::CommanderGenerator.new.generate(deliverfile_options, command: c) + + c.action do |args, options| + options = FastlaneCore::Configuration.create(deliverfile_options(skip_verification: true), options.__hash__) + options.load_configuration_file("Deliverfile") + Deliver::Runner.new(options) # to login... + containing = FastlaneCore::Helper.fastlane_enabled_folder_path + path = options[:metadata_path] || File.join(containing, 'metadata') + res = Deliver::CommandsGenerator.force_overwrite_metadata?(options, path) + return 0 unless res + + require 'deliver/setup' + app = Deliver.cache[:app] + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + v = app.get_latest_app_store_version(platform: platform) + if options[:app_version].to_s.length > 0 + v = app.get_live_app_store_version(platform: platform) if v.version_string != options[:app_version] + if v.nil? || v.version_string != options[:app_version] + raise "Neither the current nor live version match specified app_version \"#{options[:app_version]}\"" + end + end + + Deliver::Setup.new.generate_metadata_files(app, v, path, options) + end + end + + default_command(:run) + + run! + end + # rubocop:enable Metrics/PerceivedComplexity + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/detect_values.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/detect_values.rb new file mode 100644 index 0000000..fbe7fbc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/detect_values.rb @@ -0,0 +1,104 @@ +require 'fastlane_core/fastlane_folder' +require 'fastlane_core/ipa_file_analyser' +require 'fastlane_core/pkg_file_analyser' +require 'spaceship/tunes/tunes' +require 'spaceship/tunes/application' + +require_relative 'module' +require_relative 'languages' + +module Deliver + class DetectValues + def run!(options, skip_params = {}) + Deliver.cache = {} + + find_platform(options) + find_app_identifier(options) + find_app(options) + find_folders(options) + ensure_folders_created(options) + find_version(options) unless skip_params[:skip_version] + + verify_languages!(options) + end + + def find_app_identifier(options) + return if options[:app_identifier] + + if options[:ipa] + identifier = FastlaneCore::IpaFileAnalyser.fetch_app_identifier(options[:ipa]) + elsif options[:pkg] + identifier = FastlaneCore::PkgFileAnalyser.fetch_app_identifier(options[:pkg]) + end + + options[:app_identifier] = identifier if identifier.to_s.length > 0 + options[:app_identifier] ||= UI.input("The Bundle Identifier of your App: ") + rescue => ex + UI.error("#{ex.message}\n#{ex.backtrace.join('\n')}") + UI.user_error!("Could not infer your App's Bundle Identifier") + end + + def find_app(options) + app_identifier = options[:app_identifier] + app_id = options[:app] if app_identifier.to_s.empty? + + if !app_identifier.to_s.empty? + app = Spaceship::ConnectAPI::App.find(app_identifier) + elsif !app_id.kind_of?(Spaceship::ConnectAPI::App) && !app_id.to_s.empty? + app = Spaceship::ConnectAPI::App.get(app_id: app_id) + end + + Deliver.cache[:app] = app + + unless app + UI.user_error!("Could not find app with app identifier '#{options[:app_identifier]}' in your App Store Connect account (#{options[:username]} - Team: #{Spaceship::Tunes.client.team_id})") + end + end + + def find_folders(options) + containing = Helper.fastlane_enabled? ? FastlaneCore::FastlaneFolder.path : '.' + options[:screenshots_path] ||= File.join(containing, 'screenshots') + options[:metadata_path] ||= File.join(containing, 'metadata') + end + + def ensure_folders_created(options) + FileUtils.mkdir_p(options[:screenshots_path]) + FileUtils.mkdir_p(options[:metadata_path]) + end + + def find_version(options) + return if options[:app_version] + + if options[:ipa] + options[:app_version] ||= FastlaneCore::IpaFileAnalyser.fetch_app_version(options[:ipa]) + elsif options[:pkg] + options[:app_version] ||= FastlaneCore::PkgFileAnalyser.fetch_app_version(options[:pkg]) + end + rescue => ex + UI.error("#{ex.message}\n#{ex.backtrace.join('\n')}") + UI.user_error!("Could not infer your app's version") + end + + def find_platform(options) + if options[:ipa] + options[:platform] ||= FastlaneCore::IpaFileAnalyser.fetch_app_platform(options[:ipa]) + elsif options[:pkg] + options[:platform] = 'osx' + end + end + + def verify_languages!(options) + languages = options[:languages] + return unless languages + + # 2020-08-24 - Available locales are not available as an endpoint in App Store Connect + # Update with Spaceship::Tunes.client.available_languages.sort (as long as endpoint is avilable) + all_languages = Deliver::Languages::ALL_LANGUAGES + diff = languages - all_languages + + unless diff.empty? + UI.user_error!("The following languages are invalid and cannot be activated: #{diff.join(',')}\n\nValid languages are: #{all_languages}") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/download_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/download_screenshots.rb new file mode 100644 index 0000000..70e6b91 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/download_screenshots.rb @@ -0,0 +1,75 @@ +require_relative 'module' +require 'spaceship' + +module Deliver + class DownloadScreenshots + def self.run(options, path) + UI.message("Downloading all existing screenshots...") + download(options, path) + UI.success("Successfully downloaded all existing screenshots") + rescue => ex + UI.error(ex) + UI.error("Couldn't download already existing screenshots from App Store Connect.") + end + + def self.download(options, folder_path) + app = Deliver.cache[:app] + + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + if options[:use_live_version] + version = app.get_live_app_store_version(platform: platform) + UI.user_error!("Could not find a live version on App Store Connect. Try using '--use_live_version false'") if version.nil? + else + version = app.get_edit_app_store_version(platform: platform) + UI.user_error!("Could not find an edit version on App Store Connect. Try using '--use_live_version true'") if version.nil? + end + + localizations = version.get_app_store_version_localizations + threads = [] + localizations.each do |localization| + threads << Thread.new do + download_screenshots(folder_path, localization) + end + end + threads.each(&:join) + end + + def self.download_screenshots(folder_path, localization) + language = localization.locale + screenshot_sets = localization.get_app_screenshot_sets + screenshot_sets.each do |screenshot_set| + screenshot_set.app_screenshots.each_with_index do |screenshot, index| + file_name = [index, screenshot_set.screenshot_display_type, index].join("_") + original_file_extension = File.extname(screenshot.file_name).strip.downcase[1..-1] + file_name += "." + original_file_extension + + url = screenshot.image_asset_url(type: original_file_extension) + next if url.nil? + + UI.message("Downloading existing screenshot '#{file_name}' for language '#{language}'") + + # If the screen shot is for an appleTV we need to store it in a way that we'll know it's an appleTV + # screen shot later as the screen size is the same as an iPhone 6 Plus in landscape. + if screenshot_set.apple_tv? + containing_folder = File.join(folder_path, "appleTV", language) + else + containing_folder = File.join(folder_path, language) + end + + if screenshot_set.imessage? + containing_folder = File.join(folder_path, "iMessage", language) + end + + begin + FileUtils.mkdir_p(containing_folder) + rescue + # if it's already there + end + + path = File.join(containing_folder, file_name) + File.binwrite(path, FastlaneCore::Helper.open_uri(url).read) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/generate_summary.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/generate_summary.rb new file mode 100644 index 0000000..5088f0b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/generate_summary.rb @@ -0,0 +1,13 @@ +require_relative 'upload_metadata' +require_relative 'html_generator' +require_relative 'upload_screenshots' + +module Deliver + class GenerateSummary + def run(options) + screenshots = UploadScreenshots.new.collect_screenshots(options) + UploadMetadata.new.load_from_filesystem(options) + HtmlGenerator.new.render(options, screenshots, '.') + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/html_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/html_generator.rb new file mode 100644 index 0000000..4c573fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/html_generator.rb @@ -0,0 +1,78 @@ +require 'spaceship' + +require_relative 'module' + +module Deliver + class HtmlGenerator + # Splits keywords supporting: + # * separated by commas (with optional whitespace) + # * separated by newlines + KEYWORD_SPLITTER = /(?:,\s?|\r?\n)/ + + def run(options, screenshots) + begin + # Use fastlane folder or default to current directory + fastlane_path = FastlaneCore::FastlaneFolder.path || "." + html_path = self.render(options, screenshots, fastlane_path) + rescue => ex + UI.error(ex.inspect) + UI.error(ex.backtrace.join("\n")) + okay = UI.input("Could not render HTML preview. Do you still want to continue?") + return if okay + UI.crash!("Could not render HTML page") + end + UI.important("Verifying the upload via the HTML file can be disabled by either adding") + UI.important("`force true` to your Deliverfile or using `fastlane deliver --force`") + + system("open '#{html_path}'") + okay = UI.confirm("Does the Preview on path '#{html_path}' look okay for you?") + + if okay + UI.success("HTML file confirmed...") # print this to give feedback to the user immediately + else + UI.user_error!("Did not upload the metadata, because the HTML file was rejected by the user") + end + end + + # Returns a path relative to FastlaneFolder.path + # This is needed as the Preview.html file is located inside FastlaneFolder.path + def render_relative_path(export_path, path) + export_path = Pathname.new(File.expand_path(export_path)) + path = Pathname.new(File.expand_path(path)).relative_path_from(export_path) + return path.to_path + end + + # Renders all data available to quickly see if everything was correctly generated. + # @param export_path (String) The path to a folder where the resulting HTML file should be stored. + def render(options, screenshots, export_path = nil) + @screenshots = screenshots || [] + @options = options + @export_path = export_path + + @app_name = (options[:name]['en-US'] || options[:name].values.first) if options[:name] + @app_name ||= Deliver.cache[:app].name + + @languages = options[:description].keys if options[:description] + @languages ||= begin + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + version = Deliver.cache[:app].get_edit_app_store_version(platform: platform) + + version.get_app_store_version_localizations.collect(&:locale) + end + + html_path = File.join(Deliver::ROOT, "lib/assets/summary.html.erb") + html = ERB.new(File.read(html_path)).result(binding) # https://web.archive.org/web/20160430190141/www.rrn.dk/rubys-erb-templating-system + + export_path = File.join(export_path, "Preview.html") + File.write(export_path, html) + + return export_path + end + + # Splits a string of keywords separated by comma or newlines into a presentable list + # @param keywords (String) + def split_keywords(keywords) + keywords.split(KEYWORD_SPLITTER) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/languages.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/languages.rb new file mode 100644 index 0000000..f154ce4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/languages.rb @@ -0,0 +1,7 @@ +module Deliver + module Languages + # 2020-08-24 - Available locales are not available as an endpoint in App Store Connect + # Update with Spaceship::Tunes.client.available_languages.sort (as long as endpoint is avilable) + ALL_LANGUAGES = %w[ar-SA ca cs da de-DE el en-AU en-CA en-GB en-US es-ES es-MX fi fr-CA fr-FR he hi hr hu id it ja ko ms nl-NL no pl pt-BR pt-PT ro ru sk sv th tr uk vi zh-Hans zh-Hant] + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/loader.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/loader.rb new file mode 100644 index 0000000..3762559 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/loader.rb @@ -0,0 +1,157 @@ +require_relative 'module' +require_relative 'app_screenshot' +require_relative 'app_screenshot_validator' +require_relative 'upload_metadata' +require_relative 'languages' + +module Deliver + module Loader + # The directory 'appleTV' and `iMessage` are special folders that will cause our screenshot gathering code to iterate + # through it as well searching for language folders. + APPLE_TV_DIR_NAME = "appleTV".freeze + IMESSAGE_DIR_NAME = "iMessage".freeze + DEFAULT_DIR_NAME = "default".freeze + + EXPANDABLE_DIR_NAMES = [APPLE_TV_DIR_NAME, IMESSAGE_DIR_NAME].freeze + SPECIAL_DIR_NAMES = [APPLE_TV_DIR_NAME, IMESSAGE_DIR_NAME, DEFAULT_DIR_NAME].freeze + + # Some exception directories may exist from other actions that should not be iterated through + SUPPLY_DIR_NAME = "android".freeze + FRAMEIT_FONTS_DIR_NAME = "fonts".freeze + META_DIR_NAMES = UploadMetadata::ALL_META_SUB_DIRS.map(&:downcase) + + EXCEPTION_DIRECTORIES = (META_DIR_NAMES << SUPPLY_DIR_NAME << FRAMEIT_FONTS_DIR_NAME).freeze + + # A class that represents language folder under screenshots or metadata folder + class LanguageFolder + attr_reader :path + + # @return [String] A normalized language name that corresponds to the directory's name + attr_reader :language + + def self.available_languages + # 2020-08-24 - Available locales are not available as an endpoint in App Store Connect + # Update with Spaceship::Tunes.client.available_languages.sort (as long as endpoint is avilable) + Deliver::Languages::ALL_LANGUAGES + end + + def self.allowed_directory_names_with_case + available_languages + SPECIAL_DIR_NAMES + end + + # @param path [String] A directory path otherwise this initializer fails + # @param nested [Boolan] Whether given path is nested of another special directory. + # This affects `expandable?` to return `false` when this set to `true`. + def initialize(path, nested: false) + raise(ArgumentError, "Given path must be a directory path - #{path}") unless File.directory?(path) + @path = path + @language = self.class.available_languages.find { |lang| basename.casecmp?(lang) } + @nested = nested + end + + def nested? + @nested + end + + def valid? + self.class.allowed_directory_names_with_case.any? { |name| name.casecmp?(basename) } + end + + def expandable? + !nested? && EXPANDABLE_DIR_NAMES.any? { |name| name.casecmp?(basename) } + end + + def skip? + EXCEPTION_DIRECTORIES.map(&:downcase).include?(basename.downcase) + end + + def file_paths(extensions = '{png,jpg,jpeg}') + Dir.glob(File.join(path, "*.#{extensions}"), File::FNM_CASEFOLD).sort + end + + def framed_file_paths(extensions = '{png,jpg,jpeg}') + Dir.glob(File.join(path, "*_framed.#{extensions}"), File::FNM_CASEFOLD).sort + end + + def basename + File.basename(@path) + end + end + + # Returns the list of valid app screenshot. When detecting invalid screenshots, this will cause an error. + # + # @param root [String] A directory path + # @param ignore_validation [String] Set false not to raise the error when finding invalid folder name + # @return [Array] The list of AppScreenshot that exist under given `root` directory + def self.load_app_screenshots(root, ignore_validation) + screenshots = language_folders(root, ignore_validation, true).flat_map do |language_folder| + paths = if language_folder.framed_file_paths.count > 0 + UI.important("Framed screenshots are detected! đŸ–ŧ Non-framed screenshot files may be skipped. 🏃") + # watchOS screenshots can be picked up even when framed ones were found since frameit doesn't support watchOS screenshots + framed_or_watch, skipped = language_folder.file_paths.partition { |path| path.downcase.include?('framed') || path.downcase.include?('watch') } + skipped.each { |path| UI.important("🏃 Skipping screenshot file: #{path}") } + framed_or_watch + else + language_folder.file_paths + end + paths.map { |path| AppScreenshot.new(path, language_folder.language) } + end + + errors = [] + valid_screenshots = screenshots.select { |screenshot| Deliver::AppScreenshotValidator.validate(screenshot, errors) } + + errors_to_skip, errors_to_crash = errors.partition(&:to_skip) + + unless errors_to_skip.empty? + UI.important("🏃 Screenshots to be skipped are detected!") + errors_to_skip.each { |error| UI.message(error) } + end + + unless errors_to_crash.empty? + UI.important("đŸšĢ Invalid screenshots were detected! Here are the reasons:") + errors_to_crash.each { |error| UI.error(error) } + UI.user_error!("Canceled uploading screenshots. Please check the error messages above and fix the screenshots.") + end + + valid_screenshots + end + + # Returns the list of language folders + # + # @param roort [String] A directory path to get the list of language folders + # @param ignore_validation [Boolean] Set false not to raise the error when finding invalid folder name + # @param expand_sub_folders [Boolean] Set true to expand special folders; such as "iMessage" to nested language folders + # @return [Array] The list of LanguageFolder whose each of them + def self.language_folders(root, ignore_validation, expand_sub_folders = false) + folders = Dir.glob(File.join(root, '*')) + .select { |path| File.directory?(path) } + .map { |path| LanguageFolder.new(path, nested: false) } + .reject(&:skip?) + + selected_folders, rejected_folders = folders.partition(&:valid?) + + if !ignore_validation && !rejected_folders.empty? + rejected_folders = rejected_folders.map(&:basename) + UI.user_error!("Unsupported directory name(s) for screenshots/metadata in '#{root}': #{rejected_folders.join(', ')}" \ + "\nValid directory names are: #{LanguageFolder.allowed_directory_names_with_case}" \ + "\n\nEnable 'ignore_language_directory_validation' to prevent this validation from happening") + end + + # Expand selected_folders for the special directories + if expand_sub_folders + selected_folders = selected_folders.flat_map do |folder| + if folder.expandable? + Dir.glob(File.join(folder.path, '*')) + .select { |p| File.directory?(p) } + .map { |p| LanguageFolder.new(p, nested: true) } + .select(&:valid?) + else + folder + end + end + end + + selected_folders + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/module.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/module.rb new file mode 100644 index 0000000..7c2bf31 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/module.rb @@ -0,0 +1,24 @@ +require 'fastlane_core/helper' +require 'fastlane_core/ui/ui' +require 'fastlane/boolean' + +module Deliver + class << self + attr_accessor :cache + + def cache + @cache ||= {} + @cache + end + end + + Helper = FastlaneCore::Helper # you gotta love Ruby: Helper.* should use the Helper class contained in FastlaneCore + UI = FastlaneCore::UI + Boolean = Fastlane::Boolean + + # Constant that captures the root Pathname for the project. Should be used for building paths to assets or other + # resources that code needs to locate locally + ROOT = Pathname.new(File.expand_path('../../..', __FILE__)) + + DESCRIPTION = 'Upload screenshots, metadata and your app to the App Store using a single command' +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/options.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/options.rb new file mode 100644 index 0000000..b63cb95 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/options.rb @@ -0,0 +1,461 @@ +require 'fastlane_core/configuration/config_item' +require 'credentials_manager/appfile_config' + +require_relative 'module' + +module Deliver + # rubocop:disable Metrics/ClassLength + class Options + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + user ||= ENV["DELIVER_USER"] + + [ + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["DELIVER_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["DELIVER_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "DELIVER_USERNAME", + description: "Your Apple ID Username", + optional: true, + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :app_identifier, + short_option: "-a", + env_name: "DELIVER_APP_IDENTIFIER", + description: "The bundle identifier of your app", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + # version + FastlaneCore::ConfigItem.new(key: :app_version, + short_option: '-z', + env_name: "DELIVER_APP_VERSION", + description: "The version that should be edited or created", + optional: true), + + # binary / build + FastlaneCore::ConfigItem.new(key: :ipa, + short_option: "-i", + optional: true, + env_name: "DELIVER_IPA_PATH", + description: "Path to your ipa file", + code_gen_sensitive: true, + default_value: Dir["*.ipa"].sort_by { |x| File.mtime(x) }.last, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Could not find ipa file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("'#{value}' doesn't seem to be an ipa file") unless value.end_with?(".ipa") + end, + conflicting_options: [:pkg], + conflict_block: proc do |value| + UI.user_error!("You can't use 'ipa' and '#{value.key}' options in one run.") + end), + FastlaneCore::ConfigItem.new(key: :pkg, + short_option: "-c", + optional: true, + env_name: "DELIVER_PKG_PATH", + description: "Path to your pkg file", + code_gen_sensitive: true, + default_value: Dir["*.pkg"].sort_by { |x| File.mtime(x) }.last, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Could not find pkg file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("'#{value}' doesn't seem to be a pkg file") unless value.end_with?(".pkg") + end, + conflicting_options: [:ipa], + conflict_block: proc do |value| + UI.user_error!("You can't use 'pkg' and '#{value.key}' options in one run.") + end), + FastlaneCore::ConfigItem.new(key: :build_number, + short_option: "-n", + env_name: "DELIVER_BUILD_NUMBER", + description: "If set the given build number (already uploaded to iTC) will be used instead of the current built one", + optional: true, + conflicting_options: [:ipa, :pkg], + conflict_block: proc do |value| + UI.user_error!("You can't use 'build_number' and '#{value.key}' options in one run.") + end), + FastlaneCore::ConfigItem.new(key: :platform, + short_option: "-j", + env_name: "DELIVER_PLATFORM", + description: "The platform to use (optional)", + optional: true, + default_value: "ios", + verify_block: proc do |value| + UI.user_error!("The platform can only be ios, appletvos, or osx") unless %('ios', 'appletvos', 'osx').include?(value) + end), + + # live version + FastlaneCore::ConfigItem.new(key: :edit_live, + short_option: "-o", + optional: true, + default_value: false, + env_name: "DELIVER_EDIT_LIVE", + description: "Modify live metadata, this option disables ipa upload and screenshot upload", + type: Boolean), + FastlaneCore::ConfigItem.new(key: :use_live_version, + env_name: "DELIVER_USE_LIVE_VERSION", + description: "Force usage of live version rather than edit version", + type: Boolean, + default_value: false), + + # paths + FastlaneCore::ConfigItem.new(key: :metadata_path, + short_option: '-m', + env_name: "DELIVER_METADATA_PATH", + description: "Path to the folder containing the metadata files", + optional: true), + FastlaneCore::ConfigItem.new(key: :screenshots_path, + short_option: '-w', + env_name: "DELIVER_SCREENSHOTS_PATH", + description: "Path to the folder containing the screenshots", + optional: true), + + # skip + FastlaneCore::ConfigItem.new(key: :skip_binary_upload, + env_name: "DELIVER_SKIP_BINARY_UPLOAD", + description: "Skip uploading an ipa or pkg to App Store Connect", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :skip_screenshots, + env_name: "DELIVER_SKIP_SCREENSHOTS", + description: "Don't upload the screenshots", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :skip_metadata, + env_name: "DELIVER_SKIP_METADATA", + description: "Don't upload the metadata (e.g. title, description). This will still upload screenshots", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :skip_app_version_update, + env_name: "DELIVER_SKIP_APP_VERSION_UPDATE", + description: "Don’t create or update the app version that is being prepared for submission", + type: Boolean, + default_value: false), + + # how to operate + FastlaneCore::ConfigItem.new(key: :force, + short_option: "-f", + env_name: "DELIVER_FORCE", + description: "Skip verification of HTML preview file", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :overwrite_screenshots, + env_name: "DELIVER_OVERWRITE_SCREENSHOTS", + description: "Clear all previously uploaded screenshots before uploading the new ones", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :sync_screenshots, + env_name: "DELIVER_SYNC_SCREENSHOTS", + description: "Sync screenshots with local ones. This is currently beta option so set true to 'FASTLANE_ENABLE_BETA_DELIVER_SYNC_SCREENSHOTS' environment variable as well", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :submit_for_review, + env_name: "DELIVER_SUBMIT_FOR_REVIEW", + description: "Submit the new version for Review after uploading everything", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :verify_only, + env_name: "DELIVER_VERIFY_ONLY", + description: "Verifies archive with App Store Connect without uploading", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :reject_if_possible, + env_name: "DELIVER_REJECT_IF_POSSIBLE", + description: "Rejects the previously submitted build if it's in a state where it's possible", + type: Boolean, + default_value: false), + + # release + FastlaneCore::ConfigItem.new(key: :automatic_release, + env_name: "DELIVER_AUTOMATIC_RELEASE", + description: "Should the app be automatically released once it's approved? (Can not be used together with `auto_release_date`)", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :auto_release_date, + env_name: "DELIVER_AUTO_RELEASE_DATE", + description: "Date in milliseconds for automatically releasing on pending approval (Can not be used together with `automatic_release`)", + type: Integer, + optional: true, + conflicting_options: [:automatic_release], + conflict_block: proc do |value| + UI.user_error!("You can't use 'auto_release_date' and '#{value.key}' options together.") + end, + verify_block: proc do |value| + now_in_ms = Time.now.to_i * 1000 + if value < now_in_ms + UI.user_error!("'#{value}' needs to be in the future and in milliseonds (current time is '#{now_in_ms}')") + end + end), + FastlaneCore::ConfigItem.new(key: :phased_release, + env_name: "DELIVER_PHASED_RELEASE", + description: "Enable the phased release feature of iTC", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :reset_ratings, + env_name: "DELIVER_RESET_RATINGS", + description: "Reset the summary rating when you release a new version of the application", + optional: true, + type: Boolean, + default_value: false), + + # other app configuration + FastlaneCore::ConfigItem.new(key: :price_tier, + short_option: "-r", + env_name: "DELIVER_PRICE_TIER", + description: "The price tier of this application", + type: Integer, + optional: true), + FastlaneCore::ConfigItem.new(key: :app_rating_config_path, + short_option: "-g", + env_name: "DELIVER_APP_RATING_CONFIG_PATH", + description: "Path to the app rating's config", + optional: true, + verify_block: proc do |value| + UI.user_error!("Could not find config file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("'#{value}' doesn't seem to be a JSON file") unless FastlaneCore::Helper.json_file?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :submission_information, + short_option: "-b", + description: "Extra information for the submission (e.g. compliance specifications, IDFA settings)", + type: Hash, + optional: true), + + # affiliation + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-k", + env_name: "DELIVER_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # as we also allow integers, which we convert to strings anyway + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-e", + env_name: "DELIVER_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_NAME"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :dev_portal_team_id, + short_option: "-s", + env_name: "DELIVER_DEV_PORTAL_TEAM_ID", + description: "The short ID of your Developer Portal team, if you're in multiple teams. Different from your iTC team ID!", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :dev_portal_team_name, + short_option: "-y", + env_name: "DELIVER_DEV_PORTAL_TEAM_NAME", + description: "The name of your Developer Portal team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_NAME"] = value.to_s + end), + # rubocop:disable Layout/LineLength + FastlaneCore::ConfigItem.new(key: :itc_provider, + env_name: "DELIVER_ITC_PROVIDER", + description: "The provider short name to be used with the iTMSTransporter to identify your team. This value will override the automatically detected provider short name. To get provider short name run `pathToXcode.app/Contents/Applications/Application\\ Loader.app/Contents/itms/bin/iTMSTransporter -m provider -u 'USERNAME' -p 'PASSWORD' -account_type itunes_connect -v off`. The short names of providers should be listed in the second column", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_provider), + default_value_dynamic: true), + # rubocop:enable Layout/LineLength + + # precheck + FastlaneCore::ConfigItem.new(key: :run_precheck_before_submit, + short_option: "-x", + env_name: "DELIVER_RUN_PRECHECK_BEFORE_SUBMIT", + description: "Run precheck before submitting to app review", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :precheck_default_rule_level, + short_option: "-d", + env_name: "DELIVER_PRECHECK_DEFAULT_RULE_LEVEL", + description: "The default precheck rule level unless otherwise configured", + type: Symbol, + default_value: :warn), + + # App Metadata + FastlaneCore::ConfigItem.new(key: :individual_metadata_items, + env_name: "DELIVER_INDIVUDAL_METADATA_ITEMS", + description: "An array of localized metadata items to upload individually by language so that errors can be identified. E.g. ['name', 'keywords', 'description']. Note: slow", + deprecated: "Removed after the migration to the new App Store Connect API in June 2020", + type: Array, + optional: true), + + # Non Localised + FastlaneCore::ConfigItem.new(key: :app_icon, + env_name: "DELIVER_APP_ICON_PATH", + description: "Metadata: The path to the app icon", + deprecated: "Removed after the migration to the new App Store Connect API in June 2020", + optional: true, + short_option: "-l"), + FastlaneCore::ConfigItem.new(key: :apple_watch_app_icon, + env_name: "DELIVER_APPLE_WATCH_APP_ICON_PATH", + description: "Metadata: The path to the Apple Watch app icon", + deprecated: "Removed after the migration to the new App Store Connect API in June 2020", + optional: true, + short_option: "-q"), + FastlaneCore::ConfigItem.new(key: :copyright, + env_name: "DELIVER_COPYRIGHT", + description: "Metadata: The copyright notice", + optional: true), + FastlaneCore::ConfigItem.new(key: :primary_category, + env_name: "DELIVER_PRIMARY_CATEGORY", + description: "Metadata: The english name of the primary category (e.g. `Business`, `Books`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :secondary_category, + env_name: "DELIVER_SECONDARY_CATEGORY", + description: "Metadata: The english name of the secondary category (e.g. `Business`, `Books`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :primary_first_sub_category, + env_name: "DELIVER_PRIMARY_FIRST_SUB_CATEGORY", + description: "Metadata: The english name of the primary first sub category (e.g. `Educational`, `Puzzle`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :primary_second_sub_category, + env_name: "DELIVER_PRIMARY_SECOND_SUB_CATEGORY", + description: "Metadata: The english name of the primary second sub category (e.g. `Educational`, `Puzzle`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :secondary_first_sub_category, + env_name: "DELIVER_SECONDARY_FIRST_SUB_CATEGORY", + description: "Metadata: The english name of the secondary first sub category (e.g. `Educational`, `Puzzle`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :secondary_second_sub_category, + env_name: "DELIVER_SECONDARY_SECOND_SUB_CATEGORY", + description: "Metadata: The english name of the secondary second sub category (e.g. `Educational`, `Puzzle`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :trade_representative_contact_information, + description: "Metadata: A hash containing the trade representative contact information", + optional: true, + deprecated: "This is no longer used by App Store Connect", + type: Hash), + FastlaneCore::ConfigItem.new(key: :app_review_information, + description: "Metadata: A hash containing the review information", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :app_review_attachment_file, + env_name: "DELIVER_APP_REVIEW_ATTACHMENT_FILE", + description: "Metadata: Path to the app review attachment file", + optional: true), + # Localised + FastlaneCore::ConfigItem.new(key: :description, + description: "Metadata: The localised app description", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :name, + description: "Metadata: The localised app name", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :subtitle, + description: "Metadata: The localised app subtitle", + optional: true, + type: Hash, + verify_block: proc do |value| + UI.user_error!(":subtitle must be a hash, with the language being the key") unless value.kind_of?(Hash) + end), + FastlaneCore::ConfigItem.new(key: :keywords, + description: "Metadata: An array of localised keywords", + optional: true, + type: Hash, + verify_block: proc do |value| + UI.user_error!(":keywords must be a hash, with the language being the key") unless value.kind_of?(Hash) + value.each do |language, keywords| + # Auto-convert array to string + keywords = keywords.join(", ") if keywords.kind_of?(Array) + value[language] = keywords + + UI.user_error!("keywords must be a hash with all values being strings") unless keywords.kind_of?(String) + end + end), + FastlaneCore::ConfigItem.new(key: :promotional_text, + description: "Metadata: An array of localised promotional texts", + optional: true, + type: Hash, + verify_block: proc do |value| + UI.user_error!(":keywords must be a hash, with the language being the key") unless value.kind_of?(Hash) + end), + FastlaneCore::ConfigItem.new(key: :release_notes, + description: "Metadata: Localised release notes for this version", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :privacy_url, + description: "Metadata: Localised privacy url", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :apple_tv_privacy_policy, + description: "Metadata: Localised Apple TV privacy policy text", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :support_url, + description: "Metadata: Localised support url", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :marketing_url, + description: "Metadata: Localised marketing url", + optional: true, + type: Hash), + # The verify_block has been removed from here and verification now happens in Deliver::DetectValues + # Verification needed Spaceship::Tunes.client which required the Deliver::Runner to already by started + FastlaneCore::ConfigItem.new(key: :languages, + env_name: "DELIVER_LANGUAGES", + description: "Metadata: List of languages to activate", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :ignore_language_directory_validation, + env_name: "DELIVER_IGNORE_LANGUAGE_DIRECTORY_VALIDATION", + description: "Ignore errors when invalid languages are found in metadata and screenshot directories", + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :precheck_include_in_app_purchases, + env_name: "PRECHECK_INCLUDE_IN_APP_PURCHASES", + description: "Should precheck check in-app purchases?", + type: Boolean, + optional: true, + default_value: true), + + # internal + FastlaneCore::ConfigItem.new(key: :app, + short_option: "-p", + env_name: "DELIVER_APP_ID", + description: "The (spaceship) app ID of the app you want to use/modify", + optional: true, + type: Integer) + ] + end + end + # rubocop:enable Metrics/ClassLength +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/runner.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/runner.rb new file mode 100644 index 0000000..2494af5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/runner.rb @@ -0,0 +1,310 @@ +require 'precheck/options' +require 'precheck/runner' +require 'fastlane_core/configuration/configuration' +require 'fastlane_core/ipa_upload_package_builder' +require 'fastlane_core/pkg_upload_package_builder' +require 'fastlane_core/itunes_transporter' +require 'spaceship' +require_relative 'html_generator' +require_relative 'submit_for_review' +require_relative 'upload_price_tier' +require_relative 'upload_metadata' +require_relative 'upload_screenshots' +require_relative 'sync_screenshots' +require_relative 'detect_values' + +module Deliver + class Runner + attr_accessor :options + + def initialize(options, skip_auto_detection = {}) + self.options = options + + login + + Deliver::DetectValues.new.run!(self.options, skip_auto_detection) + FastlaneCore::PrintTable.print_values(config: options, hide_keys: [:app], mask_keys: ['app_review_information.demo_password'], title: "deliver #{Fastlane::VERSION} Summary") + end + + def login + if (api_token = Spaceship::ConnectAPI::Token.from(hash: options[:api_key], filepath: options[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + # Username is now optional since addition of App Store Connect API Key + # Force asking for username to prompt user if not already set + options.fetch(:username, force_ask: true) + + # Team selection passed though FASTLANE_TEAM_ID and FASTLANE_TEAM_NAME environment variables + # Prompts select team if multiple teams and none specified + UI.message("Login to App Store Connect (#{options[:username]})") + Spaceship::ConnectAPI.login(options[:username], nil, use_portal: false, use_tunes: true) + UI.message("Login successful") + end + end + + def run + if options[:verify_only] + UI.important("Verify flag is set, only package validation will take place and no submission will be made") + verify_binary + return + end + + verify_version if options[:app_version].to_s.length > 0 && !options[:skip_app_version_update] + + # Rejecting before upload meta + # Screenshots can not be update/deleted if in waiting for review + reject_version_if_possible if options[:reject_if_possible] + + upload_metadata + + has_binary = (options[:ipa] || options[:pkg]) + if !options[:skip_binary_upload] && !options[:build_number] && has_binary + upload_binary + end + + UI.success("Finished the upload to App Store Connect") unless options[:skip_binary_upload] + + precheck_success = precheck_app + submit_for_review if options[:submit_for_review] && precheck_success + end + + # Make sure we pass precheck before uploading + def precheck_app + return true unless options[:run_precheck_before_submit] + UI.message("Running precheck before submitting to review, if you'd like to disable this check you can set run_precheck_before_submit to false") + + if options[:submit_for_review] + UI.message("Making sure we pass precheck 👮‍♀ī¸ 👮 before we submit đŸ›Ģ") + else + UI.message("Running precheck 👮‍♀ī¸ 👮") + end + + precheck_options = { + default_rule_level: options[:precheck_default_rule_level], + include_in_app_purchases: options[:precheck_include_in_app_purchases], + app_identifier: options[:app_identifier] + } + + if options[:api_key] || options[:api_key_path] + if options[:precheck_include_in_app_purchases] + UI.user_error!("Precheck cannot check In-app purchases with the App Store Connect API Key (yet). Exclude In-app purchases from precheck, disable the precheck step in your build step, or use Apple ID login") + end + + precheck_options[:api_key] = options[:api_key] + precheck_options[:api_key_path] = options[:api_key_path] + else + precheck_options[:username] = options[:username] + precheck_options[:platform] = options[:platform] + end + + precheck_config = FastlaneCore::Configuration.create(Precheck::Options.available_options, precheck_options) + Precheck.config = precheck_config + + precheck_success = true + begin + precheck_success = Precheck::Runner.new.run + rescue => ex + UI.error("fastlane precheck just tried to inspect your app's metadata for App Store guideline violations and ran into a problem. We're not sure what the problem was, but precheck failed to finished. You can run it in verbose mode if you want to see the whole error. We'll have a fix out soon 🚀") + UI.verbose(ex.inspect) + UI.verbose(ex.backtrace.join("\n")) + end + + return precheck_success + end + + # Make sure the version on App Store Connect matches the one in the ipa + # If not, the new version will automatically be created + def verify_version + app_version = options[:app_version] + UI.message("Making sure the latest version on App Store Connect matches '#{app_version}'...") + + app = Deliver.cache[:app] + + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + changed = app.ensure_version!(app_version, platform: platform) + + if changed + UI.success("Successfully set the version to '#{app_version}'") + else + UI.success("'#{app_version}' is the latest version on App Store Connect") + end + end + + # Upload all metadata, screenshots, pricing information, etc. to App Store Connect + def upload_metadata + upload_metadata = UploadMetadata.new + upload_screenshots = UploadScreenshots.new + + # First, collect all the things for the HTML Report + screenshots = upload_screenshots.collect_screenshots(options) + upload_metadata.load_from_filesystem(options) + + # Assign "default" values to all languages + upload_metadata.assign_defaults(options) + + # Validate + validate_html(screenshots) + + # Commit + upload_metadata.upload(options) + + if options[:sync_screenshots] + sync_screenshots = SyncScreenshots.new(app: Deliver.cache[:app], platform: Spaceship::ConnectAPI::Platform.map(options[:platform])) + sync_screenshots.sync(screenshots) + else + upload_screenshots.upload(options, screenshots) + end + + UploadPriceTier.new.upload(options) + end + + # Verify the binary with App Store Connect + def verify_binary + UI.message("Verifying binary with App Store Connect") + + ipa_path = options[:ipa] + pkg_path = options[:pkg] + + platform = options[:platform] + transporter = transporter_for_selected_team + + case platform + when "ios", "appletvos" + package_path = FastlaneCore::IpaUploadPackageBuilder.new.generate( + app_id: Deliver.cache[:app].id, + ipa_path: ipa_path, + package_path: "/tmp", + platform: platform + ) + result = transporter.verify(package_path: package_path, asset_path: ipa_path, platform: platform) + when "osx" + package_path = FastlaneCore::PkgUploadPackageBuilder.new.generate( + app_id: Deliver.cache[:app].id, + pkg_path: pkg_path, + package_path: "/tmp", + platform: platform + ) + result = transporter.verify(package_path: package_path, asset_path: pkg_path, platform: platform) + else + UI.user_error!("No suitable file found for verify for platform: #{options[:platform]}") + end + + unless result + transporter_errors = transporter.displayable_errors + UI.user_error!("Error verifying the binary file: \n #{transporter_errors}") + end + end + + # Upload the binary to App Store Connect + def upload_binary + UI.message("Uploading binary to App Store Connect") + + ipa_path = options[:ipa] + pkg_path = options[:pkg] + + platform = options[:platform] + transporter = transporter_for_selected_team + + case platform + when "ios", "appletvos" + package_path = FastlaneCore::IpaUploadPackageBuilder.new.generate( + app_id: Deliver.cache[:app].id, + ipa_path: ipa_path, + package_path: "/tmp", + platform: platform + ) + result = transporter.upload(package_path: package_path, asset_path: ipa_path, platform: platform) + when "osx" + package_path = FastlaneCore::PkgUploadPackageBuilder.new.generate( + app_id: Deliver.cache[:app].id, + pkg_path: pkg_path, + package_path: "/tmp", + platform: platform + ) + result = transporter.upload(package_path: package_path, asset_path: pkg_path, platform: platform) + else + UI.user_error!("No suitable file found for upload for platform: #{options[:platform]}") + end + + unless result + transporter_errors = transporter.displayable_errors + UI.user_error!("Error uploading ipa file: \n #{transporter_errors}") + end + end + + def reject_version_if_possible + app = Deliver.cache[:app] + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + + submission = app.get_in_progress_review_submission(platform: platform) + if submission + submission.cancel_submission + UI.message("Review submission cancellation has been requested") + + # An app version won't get removed from review instantly + # Polling until there is no longer an in-progress version + loop do + break if app.get_in_progress_review_submission(platform: platform).nil? + UI.message("Waiting for cancellation to take effect...") + sleep(15) + end + + UI.success("Successfully cancelled previous submission!") + end + end + + def submit_for_review + SubmitForReview.new.submit!(options) + end + + private + + # If App Store Connect API token, use token. + # If itc_provider was explicitly specified, use it. + # If there are multiple teams, infer the provider from the selected team name. + # If there are fewer than two teams, don't infer the provider. + def transporter_for_selected_team + # Use JWT auth + api_token = Spaceship::ConnectAPI.token + api_key = if options[:api_key].nil? && !api_token.nil? + # Load api key info if user set api_key_path, not api_key + { key_id: api_token.key_id, issuer_id: api_token.issuer_id, key: api_token.key_raw } + elsif !options[:api_key].nil? + api_key = options[:api_key].transform_keys(&:to_sym).dup + # key is still base 64 style if api_key is loaded from option + api_key[:key] = Base64.decode64(api_key[:key]) if api_key[:is_key_content_base64] + api_key + end + + unless api_token.nil? + api_token.refresh! if api_token.expired? + return FastlaneCore::ItunesTransporter.new(nil, nil, false, nil, api_token.text, altool_compatible_command: true, api_key: api_key) + end + + tunes_client = Spaceship::ConnectAPI.client.tunes_client + + generic_transporter = FastlaneCore::ItunesTransporter.new(options[:username], nil, false, options[:itc_provider], altool_compatible_command: true, api_key: api_key) + return generic_transporter unless options[:itc_provider].nil? && tunes_client.teams.count > 1 + + begin + team = tunes_client.teams.find { |t| t['providerId'].to_s == tunes_client.team_id } + name = team['name'] + provider_id = generic_transporter.provider_ids[name] + UI.verbose("Inferred provider id #{provider_id} for team #{name}.") + return FastlaneCore::ItunesTransporter.new(options[:username], nil, false, provider_id, altool_compatible_command: true, api_key: api_key) + rescue => ex + UI.verbose("Couldn't infer a provider short name for team with id #{tunes_client.team_id} automatically: #{ex}. Proceeding without provider short name.") + return generic_transporter + end + end + + def validate_html(screenshots) + return if options[:force] + return if options[:skip_metadata] && options[:skip_screenshots] + HtmlGenerator.new.run(options, screenshots) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/screenshot_comparable.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/screenshot_comparable.rb new file mode 100644 index 0000000..88ee5b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/screenshot_comparable.rb @@ -0,0 +1,62 @@ +require 'spaceship/connect_api/models/app_screenshot' +require 'spaceship/connect_api/models/app_screenshot_set' + +require_relative 'app_screenshot' + +module Deliver + # This clsas enables you to compare equality between different representations of the screenshots + # in the standard API `Array#-` that requires objects to implements `eql?` and `hash`. + class ScreenshotComparable + # A unique key value that is consist of locale, filename, and checksum. + attr_reader :key + + # A hash object that contains the source data of this representation class + attr_reader :context + + def self.create_from_local(screenshot:, app_screenshot_set:) + raise ArgumentError unless screenshot.kind_of?(Deliver::AppScreenshot) + raise ArgumentError unless app_screenshot_set.kind_of?(Spaceship::ConnectAPI::AppScreenshotSet) + + new( + path: "#{screenshot.language}/#{File.basename(screenshot.path)}", + checksum: calculate_checksum(screenshot.path), + context: { + screenshot: screenshot, + app_screenshot_set: app_screenshot_set + } + ) + end + + def self.create_from_remote(app_screenshot:, locale:) + raise ArgumentError unless app_screenshot.kind_of?(Spaceship::ConnectAPI::AppScreenshot) + raise ArgumentError unless locale.kind_of?(String) + + new( + path: "#{locale}/#{app_screenshot.file_name}", + checksum: app_screenshot.source_file_checksum, + context: { + app_screenshot: app_screenshot, + locale: locale + } + ) + end + + def self.calculate_checksum(path) + bytes = File.binread(path) + Digest::MD5.hexdigest(bytes) + end + + def initialize(path:, checksum:, context:) + @key = "#{path}/#{checksum}" + @context = context + end + + def eql?(other) + key == other.key + end + + def hash + key.hash + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/setup.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/setup.rb new file mode 100644 index 0000000..2e8f46c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/setup.rb @@ -0,0 +1,203 @@ +require 'spaceship/tunes/tunes' + +require_relative 'module' +require_relative 'download_screenshots' +require_relative 'upload_metadata' + +module Deliver + class Setup + attr_accessor :is_swift + + def run(options, is_swift: false) + containing = Helper.fastlane_enabled_folder_path + self.is_swift = is_swift + + if is_swift + file_path = File.join(containing, 'Deliverfile.swift') + else + file_path = File.join(containing, 'Deliverfile') + end + data = generate_deliver_file(containing, options) + setup_deliver(file_path, data, containing, options) + end + + def setup_deliver(file_path, data, deliver_path, options) + File.write(file_path, data) + + screenshots_path = options[:screenshots_path] || File.join(deliver_path, 'screenshots') + unless options[:skip_screenshots] + download_screenshots(screenshots_path, options) + + # Add a README to the screenshots folder + FileUtils.mkdir_p(screenshots_path) # just in case the fetching didn't work + File.write(File.join(screenshots_path, 'README.txt'), File.read("#{Deliver::ROOT}/lib/assets/ScreenshotsHelp")) + end + + UI.success("Successfully created new Deliverfile at path '#{file_path}'") + end + + # This method takes care of creating a new 'deliver' folder, containing the app metadata + # and screenshots folders + def generate_deliver_file(deliver_path, options) + app = Deliver.cache[:app] + + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + v = app.get_latest_app_store_version(platform: platform) + + metadata_path = options[:metadata_path] || File.join(deliver_path, 'metadata') + generate_metadata_files(app, v, metadata_path, options) + + # Generate the final Deliverfile here + return File.read(deliverfile_path) + end + + def deliverfile_path + if self.is_swift + return "#{Deliver::ROOT}/lib/assets/DeliverfileDefault.swift" + else + return "#{Deliver::ROOT}/lib/assets/DeliverfileDefault" + end + end + + def generate_metadata_files(app, version, path, options) + # App info localizations + if options[:use_live_version] + app_info = app.fetch_live_app_info + UI.user_error!("The option `use_live_version` was set to `true`, however no live app was found on App Store Connect.") unless app_info + else + app_info = app.fetch_edit_app_info || app.fetch_live_app_info + end + app_info_localizations = app_info.get_app_info_localizations + app_info_localizations.each do |localization| + language = localization.locale + + UploadMetadata::LOCALISED_APP_VALUES.each do |file_key, attribute_name| + content = localization.send(attribute_name.to_slug) || "" + content += "\n" + + resulting_path = File.join(path, language, "#{file_key}.txt") + FileUtils.mkdir_p(File.expand_path('..', resulting_path)) + File.write(resulting_path, content) + + UI.message("Writing to '#{resulting_path}'") + end + end + + # Version localizations + version_localizations = version.get_app_store_version_localizations + version_localizations.each do |localization| + language = localization.locale + + UploadMetadata::LOCALISED_VERSION_VALUES.each do |file_key, attribute_name| + content = localization.send(attribute_name) || "" + content += "\n" + + resulting_path = File.join(path, language, "#{file_key}.txt") + FileUtils.mkdir_p(File.expand_path('..', resulting_path)) + File.write(resulting_path, content) + + UI.message("Writing to '#{resulting_path}'") + end + end + + # App info (categories) + UploadMetadata::NON_LOCALISED_APP_VALUES.each do |file_key, attribute_name| + category = app_info.send(attribute_name) + + content = category ? category.id.to_s : "" + content += "\n" + + resulting_path = File.join(path, "#{file_key}.txt") + FileUtils.mkdir_p(File.expand_path('..', resulting_path)) + File.write(resulting_path, content) + + UI.message("Writing to '#{resulting_path}'") + end + + # Version + UploadMetadata::NON_LOCALISED_VERSION_VALUES.each do |file_key, attribute_name| + content = version.send(attribute_name) || "" + content += "\n" + + resulting_path = File.join(path, "#{file_key}.txt") + FileUtils.mkdir_p(File.expand_path('..', resulting_path)) + File.write(resulting_path, content) + + UI.message("Writing to '#{resulting_path}'") + end + + # Review information + app_store_review_detail = begin + version.fetch_app_store_review_detail + rescue + nil + end # errors if doesn't exist + UploadMetadata::REVIEW_INFORMATION_VALUES.each do |file_key, attribute_name| + if app_store_review_detail + content = app_store_review_detail.send(attribute_name) || "" + else + content = "" + end + content += "\n" + + base_dir = File.join(path, UploadMetadata::REVIEW_INFORMATION_DIR) + resulting_path = File.join(base_dir, "#{file_key}.txt") + FileUtils.mkdir_p(File.expand_path('..', resulting_path)) + File.write(resulting_path, content) + + UI.message("Writing to '#{resulting_path}'") + end + end + + def generate_metadata_files_old(v, path) + app_details = v.application.details + + # All the localised metadata + (UploadMetadata::LOCALISED_VERSION_VALUES.keys + UploadMetadata::LOCALISED_APP_VALUES.keys).each do |key| + v.description.languages.each do |language| + if UploadMetadata::LOCALISED_VERSION_VALUES.keys.include?(key) + content = v.send(key)[language].to_s + else + content = app_details.send(key)[language].to_s + end + content += "\n" + resulting_path = File.join(path, language, "#{key}.txt") + FileUtils.mkdir_p(File.expand_path('..', resulting_path)) + File.write(resulting_path, content) + UI.message("Writing to '#{resulting_path}'") + end + end + + # All non-localised metadata + (UploadMetadata::NON_LOCALISED_VERSION_VALUES.keys + UploadMetadata::NON_LOCALISED_APP_VALUES).each do |key| + if UploadMetadata::NON_LOCALISED_VERSION_VALUES.keys.include?(key) + content = v.send(key).to_s + else + content = app_details.send(key).to_s + end + content += "\n" + resulting_path = File.join(path, "#{key}.txt") + File.write(resulting_path, content) + UI.message("Writing to '#{resulting_path}'") + end + + # Review information + UploadMetadata::REVIEW_INFORMATION_VALUES_LEGACY.each do |key, option_name| + content = v.send(key).to_s + content += "\n" + base_dir = File.join(path, UploadMetadata::REVIEW_INFORMATION_DIR) + FileUtils.mkdir_p(base_dir) + resulting_path = File.join(base_dir, "#{option_name}.txt") + File.write(resulting_path, content) + UI.message("Writing to '#{resulting_path}'") + end + + UI.success("Successfully created new configuration files.") + end + + def download_screenshots(path, options) + FileUtils.mkdir_p(path) + Deliver::DownloadScreenshots.run(options, path) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/submit_for_review.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/submit_for_review.rb new file mode 100644 index 0000000..46fba55 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/submit_for_review.rb @@ -0,0 +1,236 @@ +require_relative 'module' + +require 'fastlane_core/build_watcher' +require 'fastlane_core/ipa_file_analyser' +require 'fastlane_core/pkg_file_analyser' + +module Deliver + class SubmitForReview + def submit!(options) + app = Deliver.cache[:app] + + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + version = app.get_edit_app_store_version(platform: platform) + + unless version + UI.user_error!("Cannot submit for review - could not find an editable version for '#{platform}'") + return + end + + build = select_build(options, app, version, platform) + + update_export_compliance(options, app, build) + update_idfa(options, app, version) + update_submission_information(options, app) + + create_review_submission(options, app, version, platform) + UI.success("Successfully submitted the app for review!") + end + + private + + def create_review_submission(options, app, version, platform) + # Can't submit a review if there is already a review in progress + if app.get_in_progress_review_submission(platform: platform) + UI.user_error!("Cannot submit for review - A review submission is already in progress") + end + + # There can only be one open submission per platform per app + # There might be a submission already created so we need to check + # 1. Create the submission if its not already created + # 2. Error if submission already contains some items for review (because we don't know what they are) + submission = app.get_ready_review_submission(platform: platform, includes: "items") + if submission.nil? + submission = app.create_review_submission(platform: platform) + elsif !submission.items.empty? + UI.user_error!("Cannot submit for review - A review submission already exists with items not managed by fastlane. Please cancel or remove items from submission for the App Store Connect website") + end + + submission.add_app_store_version_to_review_items(app_store_version_id: version.id) + submission.submit_for_review + end + + def select_build(options, app, version, platform) + if options[:build_number] && options[:build_number] != "latest" + UI.message("Selecting existing build-number: #{options[:build_number]}") + + build = Spaceship::ConnectAPI::Build.all( + app_id: app.id, + version: options[:app_version], + build_number: options[:build_number], + platform: platform + ).first + + unless build + UI.user_error!("Build number: #{options[:build_number]} does not exist") + end + else + UI.message("Selecting the latest build...") + build = wait_for_build_processing_to_be_complete(app: app, platform: platform, options: options) + end + UI.message("Selecting build #{build.app_version} (#{build.version})...") + + version.select_build(build_id: build.id) + + UI.success("Successfully selected build") + + return build + end + + def update_export_compliance(options, app, build) + submission_information = options[:submission_information] || {} + submission_information = submission_information.transform_keys(&:to_sym) + + uses_encryption = submission_information[:export_compliance_uses_encryption] + + if build.uses_non_exempt_encryption.nil? + UI.verbose("Updating build for export compliance status of '#{uses_encryption}'") + + if uses_encryption.to_s.empty? + message = [ + "Export compliance is required to submit", + "Add information to the :submission_information option...", + " Docs: http://docs.fastlane.tools/actions/deliver/#compliance-and-idfa-settings", + " Example: submission_information: { export_compliance_uses_encryption: false }", + " Example CLI:", + " --submission_information \"{\\\"export_compliance_uses_encryption\\\": false}\"", + "This can also be set in your Info.plist with key 'ITSAppUsesNonExemptEncryption'" + ].join("\n") + UI.user_error!(message) + end + + build = build.update(attributes: { + usesNonExemptEncryption: uses_encryption + }) + + UI.verbose("Successfully updated build for export compliance status of '#{build.uses_non_exempt_encryption}' on App Store Connect") + end + end + + def update_idfa(options, app, version) + submission_information = options[:submission_information] || {} + submission_information = submission_information.transform_keys(&:to_sym) + + uses_idfa = submission_information[:add_id_info_uses_idfa] + + idfa_declaration = begin + version.fetch_idfa_declaration + rescue + nil + end + + updated_idfa = false + + # Set IDFA on version + unless uses_idfa.nil? + UI.verbose("Updating app store version for IDFA status of '#{uses_idfa}'") + version = version.update(attributes: { + usesIdfa: uses_idfa + }) + UI.verbose("Updated app store version for IDFA status of '#{version.uses_idfa}'") + updated_idfa = true + end + + # Error if uses_idfa not set + if version.uses_idfa.nil? + message = [ + "Use of Advertising Identifier (IDFA) is required to submit", + "Add information to the :submission_information option...", + " Docs: http://docs.fastlane.tools/actions/deliver/#compliance-and-idfa-settings", + " Example: submission_information: { add_id_info_uses_idfa: false }", + " Example: submission_information: {", + " add_id_info_uses_idfa: true,", + " add_id_info_serves_ads: false,", + " add_id_info_tracks_install: true,", + " add_id_info_tracks_action: true,", + " add_id_info_limits_tracking: true", + " }", + " Example CLI:", + " --submission_information \"{\\\"add_id_info_uses_idfa\\\": false}\"" + ].join("\n") + UI.user_error!(message) + end + + # Create, update, or delete IDFA declaration + if uses_idfa == false + if idfa_declaration + UI.verbose("Deleting IDFA declaration") + idfa_declaration.delete! + updated_idfa = true + UI.verbose("Deleted IDFA declaration") + end + elsif uses_idfa == true + attributes = { + honorsLimitedAdTracking: !!submission_information[:add_id_info_limits_tracking], + servesAds: !!submission_information[:add_id_info_serves_ads], + attributesAppInstallationToPreviousAd: !!submission_information[:add_id_info_tracks_install], + attributesActionWithPreviousAd: !!submission_information[:add_id_info_tracks_action] + } + + if idfa_declaration + UI.verbose("Updating IDFA declaration") + idfa_declaration.update(attributes: attributes) + UI.verbose("Updated IDFA declaration") + else + UI.verbose("Creating IDFA declaration") + version.create_idfa_declaration(attributes: attributes) + UI.verbose("Created IDFA declaration") + end + + updated_idfa = true + end + + UI.success("Successfully updated IDFA declarations on App Store Connect") if updated_idfa + end + + def update_submission_information(options, app) + submission_information = options[:submission_information] || {} + submission_information = submission_information.transform_keys(&:to_sym) + + content_rights = submission_information[:content_rights_contains_third_party_content] + + unless content_rights.nil? + value = if content_rights + Spaceship::ConnectAPI::App::ContentRightsDeclaration::USES_THIRD_PARTY_CONTENT + else + Spaceship::ConnectAPI::App::ContentRightsDeclaration::DOES_NOT_USE_THIRD_PARTY_CONTENT + end + + UI.verbose("Updating contents rights declaration on App Store Connect") + app.update(attributes: { + contentRightsDeclaration: value + }) + UI.success("Successfully updated contents rights declaration on App Store Connect") + end + end + + def wait_for_build_processing_to_be_complete(app: nil, platform: nil, options: nil) + app_version = options[:app_version] + + app_version ||= FastlaneCore::IpaFileAnalyser.fetch_app_version(options[:ipa]) if options[:ipa] + app_version ||= FastlaneCore::PkgFileAnalyser.fetch_app_version(options[:pkg]) if options[:pkg] + + app_build ||= FastlaneCore::IpaFileAnalyser.fetch_app_build(options[:ipa]) if options[:ipa] + app_build ||= FastlaneCore::PkgFileAnalyser.fetch_app_build(options[:pkg]) if options[:pkg] + + latest_build = FastlaneCore::BuildWatcher.wait_for_build_processing_to_be_complete( + app_id: app.id, + platform: platform, + app_version: app_version, + build_version: app_build, + poll_interval: 15, + return_when_build_appears: false, + return_spaceship_testflight_build: false, + select_latest: true + ) + + if !app_version.nil? && !app_build.nil? + unless latest_build.app_version == app_version && latest_build.version == app_build + UI.important("Uploaded app #{app_version} - #{app_build}, but received build #{latest_build.app_version} - #{latest_build.version}.") + end + end + + return latest_build + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/sync_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/sync_screenshots.rb new file mode 100644 index 0000000..051ff62 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/sync_screenshots.rb @@ -0,0 +1,200 @@ +require 'fastlane_core' +require 'digest/md5' +require 'naturally' + +require_relative 'app_screenshot' +require_relative 'app_screenshot_iterator' +require_relative 'loader' +require_relative 'screenshot_comparable' + +module Deliver + class SyncScreenshots + DeleteScreenshotJob = Struct.new(:app_screenshot, :locale) + UploadScreenshotJob = Struct.new(:app_screenshot_set, :path) + + class UploadResult + attr_reader :asset_delivery_state_counts, :failing_screenshots + + def initialize(asset_delivery_state_counts:, failing_screenshots:) + @asset_delivery_state_counts = asset_delivery_state_counts + @failing_screenshots = failing_screenshots + end + + def processing? + @asset_delivery_state_counts.fetch('UPLOAD_COMPLETE', 0) > 0 + end + + def screenshot_count + @asset_delivery_state_counts.fetch('COMPLETE', 0) + end + end + + def initialize(app:, platform:) + @app = app + @platform = platform + end + + def sync_from_path(screenshots_path) + # load local screenshots + screenshots = Deliver::Loader.load_app_screenshots(screenshots_path, true) + sync(screenshots) + end + + def sync(screenshots) + UI.important('This is currently a beta feature in fastlane. This may cause some errors on your environment.') + + unless FastlaneCore::Feature.enabled?('FASTLANE_ENABLE_BETA_DELIVER_SYNC_SCREENSHOTS') + UI.user_error!('Please set a value to "FASTLANE_ENABLE_BETA_DELIVER_SYNC_SCREENSHOTS" environment variable ' \ + 'if you acknowleage the risk and try this out.') + end + + UI.important("Will begin uploading snapshots for '#{version.version_string}' on App Store Connect") + + # enable localizations that will be used + screenshots_per_language = screenshots.group_by(&:language) + enable_localizations(screenshots_per_language.keys) + + # create iterator + localizations = fetch_localizations + iterator = Deliver::AppScreenshotIterator.new(localizations) + + # sync local screenshots with remote settings by deleting and uploading + UI.message("Starting with the upload of screenshots...") + replace_screenshots(iterator, screenshots) + + # ensure screenshots within screenshot sets are sorted in right order + sort_screenshots(iterator) + + UI.important('Screenshots are synced successfully!') + end + + def enable_localizations(locales) + localizations = fetch_localizations + locales_to_enable = locales - localizations.map(&:locale) + Helper.show_loading_indicator("Activating localizations for #{locales_to_enable.join(', ')}...") + locales_to_enable.each do |locale| + version.create_app_store_version_localization(attributes: { locale: locale }) + end + Helper.hide_loading_indicator + end + + def replace_screenshots(iterator, screenshots, retries = 3) + # delete and upload screenshots to get App Store Connect in sync + do_replace_screenshots(iterator, screenshots, create_delete_worker, create_upload_worker) + + # wait for screenshots to be processed on App Store Connect end and + # ensure the number of uploaded screenshots matches the one in local + result = wait_for_complete(iterator) + return if !result.processing? && result.screenshot_count == screenshots.count + + if retries.zero? + UI.crash!("Retried uploading screenshots #{retries} but there are still failures of processing screenshots." \ + "Check App Store Connect console to work out which screenshots processed unsuccessfully.") + end + + # retry with deleting failing screenshots + result.failing_screenshots.each(&:delete!) + replace_screenshots(iterator, screenshots, retries - 1) + end + + # This is a testable method that focuses on figuring out what to update + def do_replace_screenshots(iterator, screenshots, delete_worker, upload_worker) + remote_screenshots = iterator.each_app_screenshot.map do |localization, app_screenshot_set, app_screenshot| + ScreenshotComparable.create_from_remote(app_screenshot: app_screenshot, locale: localization.locale) + end + + local_screenshots = iterator.each_local_screenshot(screenshots.group_by(&:language)).map do |localization, app_screenshot_set, screenshot, index| + if index >= 10 + UI.user_error!("Found #{localization.locale} has more than 10 screenshots for #{app_screenshot_set.screenshot_display_type}. "\ + "Make sure containts only necessary screenshots.") + end + ScreenshotComparable.create_from_local(screenshot: screenshot, app_screenshot_set: app_screenshot_set) + end + + # Thanks to `Array#-` API and `ScreenshotComparable`, working out diffs between local screenshot directory and App Store Connect + # is as easy as you can see below. The former one finds what is missing in local and the latter one is visa versa. + screenshots_to_delete = remote_screenshots - local_screenshots + screenshots_to_upload = local_screenshots - remote_screenshots + + delete_jobs = screenshots_to_delete.map { |x| DeleteScreenshotJob.new(x.context[:app_screenshot], x.context[:locale]) } + delete_worker.batch_enqueue(delete_jobs) + delete_worker.start + + upload_jobs = screenshots_to_upload.map { |x| UploadScreenshotJob.new(x.context[:app_screenshot_set], x.context[:screenshot].path) } + upload_worker.batch_enqueue(upload_jobs) + upload_worker.start + end + + def wait_for_complete(iterator) + retry_count = 0 + Helper.show_loading_indicator("Waiting for all the screenshots processed...") + loop do + failing_screenshots = [] + state_counts = iterator.each_app_screenshot.map { |_, _, app_screenshot| app_screenshot }.each_with_object({}) do |app_screenshot, hash| + state = app_screenshot.asset_delivery_state['state'] + hash[state] ||= 0 + hash[state] += 1 + failing_screenshots << app_screenshot if app_screenshot.error? + end + + result = UploadResult.new(asset_delivery_state_counts: state_counts, failing_screenshots: failing_screenshots) + return result unless result.processing? + + # sleep with exponential backoff + interval = 5 + (2**retry_count) + UI.message("There are still incomplete screenshots. Will check the states again in #{interval} secs - #{state_counts}") + sleep(interval) + retry_count += 1 + end + ensure + Helper.hide_loading_indicator + end + + def sort_screenshots(iterator) + Helper.show_loading_indicator("Sorting screenshots uploaded...") + sort_worker = create_sort_worker + sort_worker.batch_enqueue(iterator.each_app_screenshot_set.to_a.map { |_, set| set }) + sort_worker.start + Helper.hide_loading_indicator + end + + private + + def version + @version ||= @app.get_edit_app_store_version(platform: @platform) + end + + def fetch_localizations + version.get_app_store_version_localizations + end + + def create_upload_worker + FastlaneCore::QueueWorker.new do |job| + UI.verbose("Uploading '#{job.path}'...") + start_time = Time.now + job.app_screenshot_set.upload_screenshot(path: job.path, wait_for_processing: false) + UI.message("Uploaded '#{job.path}'... (#{Time.now - start_time} secs)") + end + end + + def create_delete_worker + FastlaneCore::QueueWorker.new do |job| + target = "id=#{job.app_screenshot.id} #{job.locale} #{job.app_screenshot.file_name}" + UI.verbose("Deleting '#{target}'") + start_time = Time.now + job.app_screenshot.delete! + UI.message("Deleted '#{target}' - (#{Time.now - start_time} secs)") + end + end + + def create_sort_worker + FastlaneCore::QueueWorker.new do |app_screenshot_set| + original_ids = app_screenshot_set.app_screenshots.map(&:id) + sorted_ids = Naturally.sort(app_screenshot_set.app_screenshots, by: :file_name).map(&:id) + if original_ids != sorted_ids + app_screenshot_set.reorder_screenshots(app_screenshot_ids: sorted_ids) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_metadata.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_metadata.rb new file mode 100644 index 0000000..7bfd3e2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_metadata.rb @@ -0,0 +1,699 @@ +require 'fastlane_core' +require 'spaceship' + +require_relative 'module' + +module Deliver + # upload description, rating, etc. + # rubocop:disable Metrics/ClassLength + class UploadMetadata + # All the localised values attached to the version + LOCALISED_VERSION_VALUES = { + description: "description", + keywords: "keywords", + release_notes: "whats_new", + support_url: "support_url", + marketing_url: "marketing_url", + promotional_text: "promotional_text" + } + + # Everything attached to the version but not being localised + NON_LOCALISED_VERSION_VALUES = { + copyright: "copyright" + } + + # Localised app details values + LOCALISED_APP_VALUES = { + name: "name", + subtitle: "subtitle", + privacy_url: "privacy_policy_url", + apple_tv_privacy_policy: "privacy_policy_text" + } + + # Non localized app details values + NON_LOCALISED_APP_VALUES = { + primary_category: :primary_category, + secondary_category: :secondary_category, + primary_first_sub_category: :primary_subcategory_one, + primary_second_sub_category: :primary_subcategory_two, + secondary_first_sub_category: :secondary_subcategory_one, + secondary_second_sub_category: :secondary_subcategory_two + } + + # Review information values + REVIEW_INFORMATION_VALUES_LEGACY = { + review_first_name: :first_name, + review_last_name: :last_name, + review_phone_number: :phone_number, + review_email: :email_address, + review_demo_user: :demo_user, + review_demo_password: :demo_password, + review_notes: :notes + } + REVIEW_INFORMATION_VALUES = { + first_name: "contact_first_name", + last_name: "contact_last_name", + phone_number: "contact_phone", + email_address: "contact_email", + demo_user: "demo_account_name", + demo_password: "demo_account_password", + notes: "notes" + } + + # Localized app details values, that are editable in live state + LOCALISED_LIVE_VALUES = [:description, :release_notes, :support_url, :marketing_url, :promotional_text, :privacy_url] + + # Non localized app details values, that are editable in live state + NON_LOCALISED_LIVE_VALUES = [:copyright] + + # Directory name it contains trade representative contact information + TRADE_REPRESENTATIVE_CONTACT_INFORMATION_DIR = "trade_representative_contact_information" + + # Directory name it contains review information + REVIEW_INFORMATION_DIR = "review_information" + + ALL_META_SUB_DIRS = [TRADE_REPRESENTATIVE_CONTACT_INFORMATION_DIR, REVIEW_INFORMATION_DIR] + + # rubocop:disable Metrics/PerceivedComplexity + + require_relative 'loader' + + # Make sure to call `load_from_filesystem` before calling upload + def upload(options) + return if options[:skip_metadata] + + app = Deliver.cache[:app] + + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + + enabled_languages = detect_languages(options) + + app_store_version_localizations = verify_available_version_languages!(options, app, enabled_languages) unless options[:edit_live] + app_info_localizations = verify_available_info_languages!(options, app, enabled_languages) unless options[:edit_live] + + if options[:edit_live] + # not all values are editable when using live_version + version = app.get_live_app_store_version(platform: platform) + localised_options = LOCALISED_LIVE_VALUES + non_localised_options = NON_LOCALISED_LIVE_VALUES + + if version.nil? + UI.message("Couldn't find live version, editing the current version on App Store Connect instead") + version = fetch_edit_app_store_version(app, platform) + # we don't want to update the localised_options and non_localised_options + # as we also check for `options[:edit_live]` at other areas in the code + # by not touching those 2 variables, deliver is more consistent with what the option says + # in the documentation + else + UI.message("Found live version") + end + else + version = fetch_edit_app_store_version(app, platform) + localised_options = (LOCALISED_VERSION_VALUES.keys + LOCALISED_APP_VALUES.keys) + non_localised_options = NON_LOCALISED_VERSION_VALUES.keys + end + + # Needed for to filter out release notes from being sent up + number_of_versions = Spaceship::ConnectAPI.get_app_store_versions( + app_id: app.id, + filter: { platform: platform }, + limit: 2 + ).count + is_first_version = number_of_versions == 1 + UI.verbose("Version '#{version.version_string}' is the first version on App Store Connect") if is_first_version + + UI.important("Will begin uploading metadata for '#{version.version_string}' on App Store Connect") + + localized_version_attributes_by_locale = {} + localized_info_attributes_by_locale = {} + + localised_options.each do |key| + current = options[key] + next unless current + + unless current.kind_of?(Hash) + UI.error("Error with provided '#{key}'. Must be a hash, the key being the language.") + next + end + + if key == :release_notes && is_first_version + UI.error("Skipping 'release_notes'... this is the first version of the app") + next + end + + current.each do |language, value| + next unless value.to_s.length > 0 + strip_value = value.to_s.strip + + if LOCALISED_VERSION_VALUES.include?(key) && !strip_value.empty? + attribute_name = LOCALISED_VERSION_VALUES[key] + + localized_version_attributes_by_locale[language] ||= {} + localized_version_attributes_by_locale[language][attribute_name] = strip_value + end + + next unless LOCALISED_APP_VALUES.include?(key) && !strip_value.empty? + attribute_name = LOCALISED_APP_VALUES[key] + + localized_info_attributes_by_locale[language] ||= {} + localized_info_attributes_by_locale[language][attribute_name] = strip_value + end + end + + non_localized_version_attributes = {} + non_localised_options.each do |key| + strip_value = options[key].to_s.strip + next unless strip_value.to_s.length > 0 + + if NON_LOCALISED_VERSION_VALUES.include?(key) && !strip_value.empty? + attribute_name = NON_LOCALISED_VERSION_VALUES[key] + non_localized_version_attributes[attribute_name] = strip_value + end + end + + release_type = if options[:auto_release_date] + # Convert time format to 2020-06-17T12:00:00-07:00 + time_in_ms = options[:auto_release_date] + date = convert_ms_to_iso8601(time_in_ms) + + non_localized_version_attributes['earliestReleaseDate'] = date + Spaceship::ConnectAPI::AppStoreVersion::ReleaseType::SCHEDULED + elsif options[:automatic_release] == true + Spaceship::ConnectAPI::AppStoreVersion::ReleaseType::AFTER_APPROVAL + elsif options[:automatic_release] == false + Spaceship::ConnectAPI::AppStoreVersion::ReleaseType::MANUAL + end + if release_type.nil? + UI.important("Release type will not be set because neither `automatic_release` nor `auto_release_date` were provided. Please explicitly set one of these options if you need a release type set") + else + non_localized_version_attributes['releaseType'] = release_type + end + + # Update app store version + # This needs to happen before updating localizations (https://openradar.appspot.com/radar?id=4925914991296512) + # + # Adding some sleeps because the API will sometimes be in a state where releaseType can't be modified + # https://github.com/fastlane/fastlane/issues/16911 + UI.message("Uploading metadata to App Store Connect for version") + sleep(2) + version.update(attributes: non_localized_version_attributes) + sleep(1) + + # Update app store version localizations + store_version_worker = FastlaneCore::QueueWorker.new do |app_store_version_localization| + attributes = localized_version_attributes_by_locale[app_store_version_localization.locale] + if attributes + UI.message("Uploading metadata to App Store Connect for localized version '#{app_store_version_localization.locale}'") + app_store_version_localization.update(attributes: attributes) + end + end + store_version_worker.batch_enqueue(app_store_version_localizations) + store_version_worker.start + + # Update app info localizations + app_info_worker = FastlaneCore::QueueWorker.new do |app_info_localization| + attributes = localized_info_attributes_by_locale[app_info_localization.locale] + if attributes + UI.message("Uploading metadata to App Store Connect for localized info '#{app_info_localization.locale}'") + app_info_localization.update(attributes: attributes) + end + end + app_info_worker.batch_enqueue(app_info_localizations) + app_info_worker.start + + # Update categories + app_info = fetch_edit_app_info(app) + if app_info + category_id_map = {} + + primary_category = options[:primary_category].to_s.strip + secondary_category = options[:secondary_category].to_s.strip + primary_first_sub_category = options[:primary_first_sub_category].to_s.strip + primary_second_sub_category = options[:primary_second_sub_category].to_s.strip + secondary_first_sub_category = options[:secondary_first_sub_category].to_s.strip + secondary_second_sub_category = options[:secondary_second_sub_category].to_s.strip + + mapped_values = {} + + # Only update primary and secondar category if explicitly set + unless primary_category.empty? + mapped = Spaceship::ConnectAPI::AppCategory.map_category_from_itc( + primary_category + ) + + mapped_values[primary_category] = mapped + category_id_map[:primary_category_id] = mapped + end + unless secondary_category.empty? + mapped = Spaceship::ConnectAPI::AppCategory.map_category_from_itc( + secondary_category + ) + + mapped_values[secondary_category] = mapped + category_id_map[:secondary_category_id] = mapped + end + + # Only set if primary category is going to be set + unless primary_category.empty? + mapped = Spaceship::ConnectAPI::AppCategory.map_subcategory_from_itc( + primary_first_sub_category + ) + + mapped_values[primary_first_sub_category] = mapped + category_id_map[:primary_subcategory_one_id] = mapped + end + unless primary_category.empty? + mapped = Spaceship::ConnectAPI::AppCategory.map_subcategory_from_itc( + primary_second_sub_category + ) + + mapped_values[primary_second_sub_category] = mapped + category_id_map[:primary_subcategory_two_id] = mapped + end + + # Only set if secondary category is going to be set + unless secondary_category.empty? + mapped = Spaceship::ConnectAPI::AppCategory.map_subcategory_from_itc( + secondary_first_sub_category + ) + + mapped_values[secondary_first_sub_category] = mapped + category_id_map[:secondary_subcategory_one_id] = mapped + end + unless secondary_category.empty? + mapped = Spaceship::ConnectAPI::AppCategory.map_subcategory_from_itc( + secondary_second_sub_category + ) + + mapped_values[secondary_second_sub_category] = mapped + category_id_map[:secondary_subcategory_two_id] = mapped + end + + # Print deprecation warnings if category was mapped + has_mapped_values = false + mapped_values.each do |k, v| + next if k.nil? || v.nil? + next if k == v + has_mapped_values = true + UI.deprecated("Category '#{k}' from iTunesConnect has been deprecated. Please replace with '#{v}'") + end + UI.deprecated("You can find more info at https://docs.fastlane.tools/actions/deliver/#reference") if has_mapped_values + + app_info.update_categories(category_id_map: category_id_map) + end + + # Update phased release + unless options[:phased_release].nil? + phased_release = begin + version.fetch_app_store_version_phased_release + rescue + nil + end # returns no data error so need to rescue + if !!options[:phased_release] + unless phased_release + UI.message("Creating phased release on App Store Connect") + version.create_app_store_version_phased_release(attributes: { + phasedReleaseState: Spaceship::ConnectAPI::AppStoreVersionPhasedRelease::PhasedReleaseState::INACTIVE + }) + end + elsif phased_release + UI.message("Removing phased release on App Store Connect") + phased_release.delete! + end + end + + # Update rating reset + unless options[:reset_ratings].nil? + reset_rating_request = begin + version.fetch_reset_ratings_request + rescue + nil + end # returns no data error so need to rescue + if !!options[:reset_ratings] + unless reset_rating_request + UI.message("Creating reset ratings request on App Store Connect") + version.create_reset_ratings_request + end + elsif reset_rating_request + UI.message("Removing reset ratings request on App Store Connect") + reset_rating_request.delete! + end + end + + set_review_information(version, options) + set_review_attachment_file(version, options) + set_app_rating(app_info, options) + end + + # rubocop:enable Metrics/PerceivedComplexity + + def convert_ms_to_iso8601(time_in_ms) + time_in_s = time_in_ms / 1000 + + # Remove minutes and seconds (whole hour) + seconds_in_hour = 60 * 60 + time_in_s_to_hour = (time_in_s / seconds_in_hour).to_i * seconds_in_hour + + return Time.at(time_in_s_to_hour).utc.strftime("%Y-%m-%dT%H:%M:%S%:z") + end + + # If the user is using the 'default' language, then assign values where they are needed + def assign_defaults(options) + # Normalizes languages keys from symbols to strings + normalize_language_keys(options) + + # Build a complete list of the required languages + enabled_languages = detect_languages(options) + + # Get all languages used in existing settings + (LOCALISED_VERSION_VALUES.keys + LOCALISED_APP_VALUES.keys).each do |key| + current = options[key] + next unless current && current.kind_of?(Hash) + current.each do |language, value| + enabled_languages << language unless enabled_languages.include?(language) + end + end + + # Check folder list (an empty folder signifies a language is required) + ignore_validation = options[:ignore_language_directory_validation] + Loader.language_folders(options[:metadata_path], ignore_validation).each do |lang_folder| + enabled_languages << lang_folder.basename unless enabled_languages.include?(lang_folder.basename) + end + + return unless enabled_languages.include?("default") + UI.message("Detected languages: " + enabled_languages.to_s) + + (LOCALISED_VERSION_VALUES.keys + LOCALISED_APP_VALUES.keys).each do |key| + current = options[key] + next unless current && current.kind_of?(Hash) + + default = current["default"] + next if default.nil? + + enabled_languages.each do |language| + value = current[language] + next unless value.nil? + + current[language] = default + end + current.delete("default") + end + end + + def detect_languages(options) + # Build a complete list of the required languages + enabled_languages = options[:languages] || [] + + # Get all languages used in existing settings + (LOCALISED_VERSION_VALUES.keys + LOCALISED_APP_VALUES.keys).each do |key| + current = options[key] + next unless current && current.kind_of?(Hash) + current.each do |language, value| + enabled_languages << language unless enabled_languages.include?(language) + end + end + + # Check folder list (an empty folder signifies a language is required) + ignore_validation = options[:ignore_language_directory_validation] + Loader.language_folders(options[:metadata_path], ignore_validation).each do |lang_folder| + enabled_languages << lang_folder.basename unless enabled_languages.include?(lang_folder.basename) + end + + # Mapping to strings because :default symbol can be passed in + enabled_languages + .map(&:to_s) + .uniq + end + + def fetch_edit_app_store_version(app, platform, wait_time: 10) + retry_if_nil("Cannot find edit app store version", wait_time: wait_time) do + app.get_edit_app_store_version(platform: platform) + end + end + + def fetch_edit_app_info(app, wait_time: 10) + retry_if_nil("Cannot find edit app info", wait_time: wait_time) do + app.fetch_edit_app_info + end + end + + def retry_if_nil(message, tries: 5, wait_time: 10) + loop do + tries -= 1 + + value = yield + return value if value + + UI.message("#{message}... Retrying after #{wait_time} seconds (remaining: #{tries})") + sleep(wait_time) + + return nil if tries.zero? + end + end + + # Finding languages to enable + def verify_available_info_languages!(options, app, languages) + app_info = fetch_edit_app_info(app) + + unless app_info + UI.user_error!("Cannot update languages - could not find an editable info") + return + end + + localizations = app_info.get_app_info_localizations + + languages = (languages || []).reject { |lang| lang == "default" } + locales_to_enable = languages - localizations.map(&:locale) + + if locales_to_enable.count > 0 + lng_text = "language" + lng_text += "s" if locales_to_enable.count != 1 + Helper.show_loading_indicator("Activating info #{lng_text} #{locales_to_enable.join(', ')}...") + + locales_to_enable.each do |locale| + app_info.create_app_info_localization(attributes: { + locale: locale + }) + end + + Helper.hide_loading_indicator + + # Refresh version localizations + localizations = app_info.get_app_info_localizations + end + + return localizations + end + + # Finding languages to enable + def verify_available_version_languages!(options, app, languages) + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + version = fetch_edit_app_store_version(app, platform) + + unless version + UI.user_error!("Cannot update languages - could not find an editable version for '#{platform}'") + return + end + + localizations = version.get_app_store_version_localizations + + languages = (languages || []).reject { |lang| lang == "default" } + locales_to_enable = languages - localizations.map(&:locale) + + if locales_to_enable.count > 0 + lng_text = "language" + lng_text += "s" if locales_to_enable.count != 1 + Helper.show_loading_indicator("Activating version #{lng_text} #{locales_to_enable.join(', ')}...") + + locales_to_enable.each do |locale| + version.create_app_store_version_localization(attributes: { + locale: locale + }) + end + + Helper.hide_loading_indicator + + # Refresh version localizations + localizations = version.get_app_store_version_localizations + end + + return localizations + end + + # Loads the metadata files and stores them into the options object + def load_from_filesystem(options) + return if options[:skip_metadata] + + # Load localised data + ignore_validation = options[:ignore_language_directory_validation] + Loader.language_folders(options[:metadata_path], ignore_validation).each do |lang_folder| + (LOCALISED_VERSION_VALUES.keys + LOCALISED_APP_VALUES.keys).each do |key| + path = File.join(lang_folder.path, "#{key}.txt") + next unless File.exist?(path) + + UI.message("Loading '#{path}'...") + options[key] ||= {} + options[key][lang_folder.basename] ||= File.read(path) + end + end + + # Load non localised data + (NON_LOCALISED_VERSION_VALUES.keys + NON_LOCALISED_APP_VALUES.keys).each do |key| + path = File.join(options[:metadata_path], "#{key}.txt") + next unless File.exist?(path) + + UI.message("Loading '#{path}'...") + options[key] ||= File.read(path) + end + + # Load review information + # This is used to find the file path for both new and legacy review information filenames + resolve_review_info_path = lambda do |option_name| + path = File.join(options[:metadata_path], REVIEW_INFORMATION_DIR, "#{option_name}.txt") + return nil unless File.exist?(path) + return nil if options[:app_review_information][option_name].to_s.length > 0 + + UI.message("Loading '#{path}'...") + return path + end + + # First try and load review information from legacy filenames + options[:app_review_information] ||= {} + REVIEW_INFORMATION_VALUES_LEGACY.each do |legacy_option_name, option_name| + path = resolve_review_info_path.call(legacy_option_name) + next if path.nil? + options[:app_review_information][option_name] ||= File.read(path) + + UI.deprecated("Review rating option '#{legacy_option_name}' from iTunesConnect has been deprecated. Please replace with '#{option_name}'") + end + + # Then load review information from new App Store Connect filenames + REVIEW_INFORMATION_VALUES.keys.each do |option_name| + path = resolve_review_info_path.call(option_name) + next if path.nil? + options[:app_review_information][option_name] ||= File.read(path) + end + end + + private + + # Normalizes languages keys from symbols to strings + def normalize_language_keys(options) + (LOCALISED_VERSION_VALUES.keys + LOCALISED_APP_VALUES.keys).each do |key| + current = options[key] + next unless current && current.kind_of?(Hash) + + current.keys.each do |language| + current[language.to_s] = current.delete(language) + end + end + + options + end + + def set_review_information(version, options) + info = options[:app_review_information] + return if info.nil? || info.empty? + + info = info.transform_keys(&:to_sym) + UI.user_error!("`app_review_information` must be a hash", show_github_issues: true) unless info.kind_of?(Hash) + + attributes = {} + REVIEW_INFORMATION_VALUES.each do |key, attribute_name| + strip_value = info[key].to_s.strip + attributes[attribute_name] = strip_value unless strip_value.empty? + end + + if !attributes["demo_account_name"].to_s.empty? && !attributes["demo_account_password"].to_s.empty? + attributes["demo_account_required"] = true + else + attributes["demo_account_required"] = false + end + + UI.message("Uploading app review information to App Store Connect") + app_store_review_detail = begin + version.fetch_app_store_review_detail + rescue => error + UI.error("Error fetching app store review detail - #{error.message}") + nil + end # errors if doesn't exist + if app_store_review_detail + app_store_review_detail.update(attributes: attributes) + else + version.create_app_store_review_detail(attributes: attributes) + end + end + + def set_review_attachment_file(version, options) + app_store_review_detail = version.fetch_app_store_review_detail + app_store_review_attachments = app_store_review_detail.app_store_review_attachments || [] + + if options[:app_review_attachment_file] + app_store_review_attachments.each do |app_store_review_attachment| + UI.message("Removing previous review attachment file from App Store Connect") + app_store_review_attachment.delete! + end + + UI.message("Uploading review attachment file to App Store Connect") + app_store_review_detail.upload_attachment(path: options[:app_review_attachment_file]) + else + app_store_review_attachments.each(&:delete!) + UI.message("Removing review attachment file to App Store Connect") unless app_store_review_attachments.empty? + end + end + + def set_app_rating(app_info, options) + return unless options[:app_rating_config_path] + + require 'json' + begin + json = JSON.parse(File.read(options[:app_rating_config_path])) + rescue => ex + UI.error(ex.to_s) + UI.user_error!("Error parsing JSON file at path '#{options[:app_rating_config_path]}'") + end + UI.message("Setting the app's age rating...") + + # Maping from legacy ITC values to App Store Connect Values + mapped_values = {} + attributes = {} + json.each do |k, v| + new_key = Spaceship::ConnectAPI::AgeRatingDeclaration.map_key_from_itc(k) + new_value = Spaceship::ConnectAPI::AgeRatingDeclaration.map_value_from_itc(new_key, v) + + mapped_values[k] = new_key + mapped_values[v] = new_value + + attributes[new_key] = new_value + end + + # Print deprecation warnings if category was mapped + has_mapped_values = false + mapped_values.each do |k, v| + next if k.nil? || v.nil? + next if k == v + has_mapped_values = true + UI.deprecated("Age rating '#{k}' from iTunesConnect has been deprecated. Please replace with '#{v}'") + end + + # Handle App Store Connect deprecation/migrations of keys/values if possible + attributes, deprecation_messages, errors = Spaceship::ConnectAPI::AgeRatingDeclaration.map_deprecation_if_possible(attributes) + deprecation_messages.each do |message| + UI.deprecated(message) + end + + unless errors.empty? + errors.each do |error| + UI.error(error) + end + UI.user_error!("There are Age Rating deprecation errors that cannot be solved automatically... Please apply any fixes and try again") + end + + UI.deprecated("You can find more info at https://docs.fastlane.tools/actions/deliver/#reference") if has_mapped_values || !deprecation_messages.empty? + + age_rating_declaration = app_info.fetch_age_rating_declaration + age_rating_declaration.update(attributes: attributes) + end + end + # rubocop:enable Metrics/ClassLength +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_price_tier.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_price_tier.rb new file mode 100644 index 0000000..47062fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_price_tier.rb @@ -0,0 +1,42 @@ +require_relative 'module' +require 'spaceship' + +module Deliver + # Set the app's pricing + class UploadPriceTier + def upload(options) + return unless options[:price_tier] + + price_tier = options[:price_tier].to_s + + app = Deliver.cache[:app] + + attributes = {} + + # Check App update method to understand how to use territory_ids. + territory_ids = nil # nil won't update app's territory_ids, empty array would remove app from sale. + + # As of 2020-09-14: + # Official App Store Connect does not have an endpoint to get app prices for an app + # Need to get prices from the app's relationships + # Prices from app's relationship doess not have price tier so need to fetch app price with price tier relationship + app_prices = app.prices + if app_prices.first + app_price = Spaceship::ConnectAPI.get_app_price(app_price_id: app_prices.first.id, includes: "priceTier").first + old_price = app_price.price_tier.id + else + UI.message("App has no prices yet... Enabling all countries in App Store Connect") + territory_ids = Spaceship::ConnectAPI::Territory.all.map(&:id) + attributes[:availableInNewTerritories] = true + end + + if price_tier == old_price + UI.success("Price Tier unchanged (tier #{old_price})") + return + end + + app.update(attributes: attributes, app_price_tier_id: price_tier, territory_ids: territory_ids) + UI.success("Successfully updated the pricing from #{old_price} to #{price_tier}") + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_screenshots.rb new file mode 100644 index 0000000..6138fda --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/deliver/lib/deliver/upload_screenshots.rb @@ -0,0 +1,271 @@ +require 'fastlane_core' +require 'spaceship/tunes/tunes' +require 'digest/md5' + +require_relative 'app_screenshot' +require_relative 'module' +require_relative 'loader' +require_relative 'app_screenshot_iterator' + +module Deliver + # upload screenshots to App Store Connect + class UploadScreenshots + DeleteScreenshotSetJob = Struct.new(:app_screenshot_set, :localization) + UploadScreenshotJob = Struct.new(:app_screenshot_set, :path) + + def upload(options, screenshots) + return if options[:skip_screenshots] + return if options[:edit_live] + + app = Deliver.cache[:app] + + platform = Spaceship::ConnectAPI::Platform.map(options[:platform]) + version = app.get_edit_app_store_version(platform: platform) + UI.user_error!("Could not find a version to edit for app '#{app.name}' for '#{platform}'") unless version + + UI.important("Will begin uploading snapshots for '#{version.version_string}' on App Store Connect") + + UI.message("Starting with the upload of screenshots...") + screenshots_per_language = screenshots.group_by(&:language) + + localizations = version.get_app_store_version_localizations + + if options[:overwrite_screenshots] + delete_screenshots(localizations, screenshots_per_language) + end + + # Finding languages to enable + languages = screenshots_per_language.keys + locales_to_enable = languages - localizations.map(&:locale) + + if locales_to_enable.count > 0 + lng_text = "language" + lng_text += "s" if locales_to_enable.count != 1 + Helper.show_loading_indicator("Activating #{lng_text} #{locales_to_enable.join(', ')}...") + + locales_to_enable.each do |locale| + version.create_app_store_version_localization(attributes: { + locale: locale + }) + end + + Helper.hide_loading_indicator + + # Refresh version localizations + localizations = version.get_app_store_version_localizations + end + + upload_screenshots(localizations, screenshots_per_language) + + Helper.show_loading_indicator("Sorting screenshots uploaded...") + sort_screenshots(localizations) + Helper.hide_loading_indicator + + UI.success("Successfully uploaded screenshots to App Store Connect") + end + + def delete_screenshots(localizations, screenshots_per_language, tries: 5) + tries -= 1 + + worker = FastlaneCore::QueueWorker.new do |job| + start_time = Time.now + target = "#{job.localization.locale} #{job.app_screenshot_set.screenshot_display_type}" + begin + UI.verbose("Deleting '#{target}'") + job.app_screenshot_set.delete! + UI.message("Deleted '#{target}' - (#{Time.now - start_time} secs)") + rescue => error + UI.error("Failed to delete screenshot #{target} - (#{Time.now - start_time} secs)") + UI.error(error.message) + end + end + + iterator = AppScreenshotIterator.new(localizations) + iterator.each_app_screenshot_set do |localization, app_screenshot_set| + # Only delete screenshots if trying to upload + next unless screenshots_per_language.keys.include?(localization.locale) + + UI.verbose("Queued delete sceeenshot set job for #{localization.locale} #{app_screenshot_set.screenshot_display_type}") + worker.enqueue(DeleteScreenshotSetJob.new(app_screenshot_set, localization)) + end + + worker.start + + # Verify all screenshots have been deleted + # Sometimes API requests will fail but screenshots will still be deleted + count = iterator.each_app_screenshot_set.map { |_, app_screenshot_set| app_screenshot_set } + .reduce(0) { |sum, app_screenshot_set| sum + app_screenshot_set.app_screenshots.size } + + UI.important("Number of screenshots not deleted: #{count}") + if count > 0 + if tries.zero? + UI.user_error!("Failed verification of all screenshots deleted... #{count} screenshot(s) still exist") + else + UI.error("Failed to delete all screenshots... Tries remaining: #{tries}") + delete_screenshots(localizations, screenshots_per_language, tries: tries) + end + else + UI.message("Successfully deleted all screenshots") + end + end + + def upload_screenshots(localizations, screenshots_per_language, tries: 5) + tries -= 1 + + # Upload screenshots + worker = FastlaneCore::QueueWorker.new do |job| + begin + UI.verbose("Uploading '#{job.path}'...") + start_time = Time.now + job.app_screenshot_set.upload_screenshot(path: job.path, wait_for_processing: false) + UI.message("Uploaded '#{job.path}'... (#{Time.now - start_time} secs)") + rescue => error + UI.error(error) + end + end + + # Each app_screenshot_set can have only 10 images + number_of_screenshots_per_set = {} + total_number_of_screenshots = 0 + + iterator = AppScreenshotIterator.new(localizations) + iterator.each_local_screenshot(screenshots_per_language) do |localization, app_screenshot_set, screenshot| + # Initialize counter on each app screenshot set + number_of_screenshots_per_set[app_screenshot_set] ||= (app_screenshot_set.app_screenshots || []).count + + if number_of_screenshots_per_set[app_screenshot_set] >= 10 + UI.error("Too many screenshots found for device '#{screenshot.device_type}' in '#{screenshot.language}', skipping this one (#{screenshot.path})") + next + end + + checksum = UploadScreenshots.calculate_checksum(screenshot.path) + duplicate = (app_screenshot_set.app_screenshots || []).any? { |s| s.source_file_checksum == checksum } + + # Enqueue uploading job if it's not duplicated otherwise screenshot will be skipped + if duplicate + UI.message("Previous uploaded. Skipping '#{screenshot.path}'...") + else + UI.verbose("Queued upload sceeenshot job for #{localization.locale} #{app_screenshot_set.screenshot_display_type} #{screenshot.path}") + worker.enqueue(UploadScreenshotJob.new(app_screenshot_set, screenshot.path)) + number_of_screenshots_per_set[app_screenshot_set] += 1 + end + + total_number_of_screenshots += 1 + end + + worker.start + + UI.verbose('Uploading jobs are completed') + + Helper.show_loading_indicator("Waiting for all the screenshots to finish being processed...") + states = wait_for_complete(iterator) + Helper.hide_loading_indicator + retry_upload_screenshots_if_needed(iterator, states, total_number_of_screenshots, tries, localizations, screenshots_per_language) + + UI.message("Successfully uploaded all screenshots") + end + + # Verify all screenshots have been processed + def wait_for_complete(iterator) + loop do + states = iterator.each_app_screenshot.map { |_, _, app_screenshot| app_screenshot }.each_with_object({}) do |app_screenshot, hash| + state = app_screenshot.asset_delivery_state['state'] + hash[state] ||= 0 + hash[state] += 1 + end + + is_processing = states.fetch('UPLOAD_COMPLETE', 0) > 0 + return states unless is_processing + + UI.verbose("There are still incomplete screenshots - #{states}") + sleep(5) + end + end + + # Verify all screenshots states on App Store Connect are okay + def retry_upload_screenshots_if_needed(iterator, states, number_of_screenshots, tries, localizations, screenshots_per_language) + is_failure = states.fetch("FAILED", 0) > 0 + is_missing_screenshot = !screenshots_per_language.empty? && !verify_local_screenshots_are_uploaded(iterator, screenshots_per_language) + return unless is_failure || is_missing_screenshot + + if tries.zero? + iterator.each_app_screenshot.select { |_, _, app_screenshot| app_screenshot.error? }.each do |localization, _, app_screenshot| + UI.error("#{app_screenshot.file_name} for #{localization.locale} has error(s) - #{app_screenshot.error_messages.join(', ')}") + end + incomplete_screenshot_count = states.reject { |k, v| k == 'COMPLETE' }.reduce(0) { |sum, (k, v)| sum + v } + UI.user_error!("Failed verification of all screenshots uploaded... #{incomplete_screenshot_count} incomplete screenshot(s) still exist") + else + UI.error("Failed to upload all screenshots... Tries remaining: #{tries}") + # Delete bad entries before retry + iterator.each_app_screenshot do |_, _, app_screenshot| + app_screenshot.delete! unless app_screenshot.complete? + end + upload_screenshots(localizations, screenshots_per_language, tries: tries) + end + end + + # Return `true` if all the local screenshots are uploaded to App Store Connect + def verify_local_screenshots_are_uploaded(iterator, screenshots_per_language) + # Check if local screenshots' checksum exist on App Store Connect + checksum_to_app_screenshot = iterator.each_app_screenshot.map { |_, _, app_screenshot| [app_screenshot.source_file_checksum, app_screenshot] }.to_h + + number_of_screenshots_per_set = {} + missing_local_screenshots = iterator.each_local_screenshot(screenshots_per_language).select do |_, app_screenshot_set, local_screenshot| + number_of_screenshots_per_set[app_screenshot_set] ||= (app_screenshot_set.app_screenshots || []).count + checksum = UploadScreenshots.calculate_checksum(local_screenshot.path) + + if checksum_to_app_screenshot[checksum] + next(false) + else + is_missing = number_of_screenshots_per_set[app_screenshot_set] < 10 # if it's more than 10, it's skipped + number_of_screenshots_per_set[app_screenshot_set] += 1 + next(is_missing) + end + end + + missing_local_screenshots.each do |_, _, screenshot| + UI.error("#{screenshot.path} is missing on App Store Connect.") + end + + missing_local_screenshots.empty? + end + + def sort_screenshots(localizations) + require 'naturally' + iterator = AppScreenshotIterator.new(localizations) + + # Re-order screenshots within app_screenshot_set + worker = FastlaneCore::QueueWorker.new do |app_screenshot_set| + original_ids = app_screenshot_set.app_screenshots.map(&:id) + sorted_ids = Naturally.sort(app_screenshot_set.app_screenshots, by: :file_name).map(&:id) + if original_ids != sorted_ids + app_screenshot_set.reorder_screenshots(app_screenshot_ids: sorted_ids) + end + end + + iterator.each_app_screenshot_set do |_, app_screenshot_set| + worker.enqueue(app_screenshot_set) + end + + worker.start + end + + def collect_screenshots(options) + return [] if options[:skip_screenshots] + return Loader.load_app_screenshots(options[:screenshots_path], options[:ignore_language_directory_validation]) + end + + # helper method so Spaceship::Tunes.client.available_languages is easier to test + def self.available_languages + # 2020-08-24 - Available locales are not available as an endpoint in App Store Connect + # Update with Spaceship::Tunes.client.available_languages.sort (as long as endpoint is avilable) + Deliver::Languages::ALL_LANGUAGES + end + + # helper method to mock this step in tests + def self.calculate_checksum(path) + bytes = File.binread(path) + Digest::MD5.hexdigest(bytes) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/README.md new file mode 100644 index 0000000..412f2f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/README.md @@ -0,0 +1,11 @@ +

+ + +
+ fastlane +
+

+ +------ + +

The fastlane docs were moved to docs.fastlane.tools

diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/ActionDetails.md.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/ActionDetails.md.erb new file mode 100644 index 0000000..806a256 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/ActionDetails.md.erb @@ -0,0 +1,106 @@ + + +# <%= @action.action_name %> + +<% action = @action %> +<%= action.description %> + +<%= "> #{action.deprecated_notes.gsub("\n", "
")}" unless action.deprecated_notes.to_s.empty? %> + +<% if @custom_content %> +<%= @custom_content %> +
+<% else %> +<%= "> #{action.details.gsub(/(?").gsub(/\|(?=\n|$)/, "")}" unless action.details.to_s.empty? %> +<% end %> + +<%= action.action_name %> || +---|--- +Supported platforms | <%= [:ios, :android, :mac].find_all { |a| action.is_supported?(a) }.join(", ") %> +Author | @<%= Array(action.author || action.authors).join(", @") %> +<%- unless action.return_value.to_s.empty? -%> +Returns | <%= action.return_value.gsub("\n", "
") %> +<%- end -%> + +<% if (action.example_code || []).count > 0 %> + +## <%= action.example_code.count %> Example<%= (action.example_code.count > 1) ? "s" : "" %> +<% action.example_code.each do |current_sample| %> +```ruby +<%= current_sample.gsub(" ", "") %> +``` +<% end %><%# End of action.example_code... %> +<% end %><%# End of if %> + +<% if action.available_options && action.available_options.first.kind_of?(FastlaneCore::ConfigItem) %> + +## Parameters + +Key | Description | Default +----|-------------|-------- +<%- (action.available_options || []).each do |config_item| -%> + <%- next unless config_item.kind_of?(FastlaneCore::ConfigItem) -%> + <%- next if config_item.description.to_s.length == 0 -%> + `<%= config_item.key %>` | <%= config_item.description.gsub(/(? | <%= config_item.doc_default_value %> +<%- end %> +* = default value is dependent on the user's system +<% end %><%# End of action.available_options... %> + +
+ +<% if action.output && action.output.kind_of?(Array) && action.output.length > 0 %> + +## Lane Variables + +Actions can communicate with each other using a shared hash `lane_context`, that can be accessed in other actions, plugins or your lanes: `lane_context[SharedValues:XYZ]`. The `<%= @action.action_name %>` action generates the following Lane Variables: + +SharedValue | Description +------------|------------- +<%- (action.output || []).each do |array_item| -%> + <%- next unless array_item.kind_of?(Array) -%> + <%- next if array_item.length != 2 -%> + `SharedValues::<%= array_item[0] %>` | <%= array_item[1] %> +<%- end %> +To get more information check the [Lanes documentation](https://docs.fastlane.tools/advanced/lanes/#lane-context). +
+<% end %><%# End of action.output... %> + +## Documentation + +To show the documentation in your terminal, run +```no-highlight +fastlane action <%= action.action_name %> +``` + +
+ +## CLI + +It is recommended to add the above action into your `Fastfile`, however sometimes you might want to run one-offs. To do so, you can run the following command from your terminal + +```no-highlight +fastlane run <%= @action.action_name %> +``` + +To pass parameters, make use of the `:` symbol, for example + +```no-highlight +fastlane run <%= @action.action_name %> parameter1:"value1" parameter2:"value2" +``` + +It's important to note that the CLI supports primitive types like integers, floats, booleans, and strings. Arrays can be passed as a comma delimited string (e.g. `param:"1,2,3"`). Hashes are not currently supported. + +It is recommended to add all _fastlane_ actions you use to your `Fastfile`. + +
+ +## Source code + +This action, just like the rest of _fastlane_, is fully open source, view the source code on GitHub + +
+ +Back to actions diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/Actions.md.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/Actions.md.erb new file mode 100644 index 0000000..786513e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/Actions.md.erb @@ -0,0 +1,43 @@ + + +{!docs/includes/setup-fastlane-header.md!} + +# fastlane actions + +This page contains a list of all built-in fastlane actions and their available options. + +To get the most up-to-date information from the command line on your current version you can also run + +```sh +fastlane actions # list all available fastlane actions +fastlane action [action_name] # more information for a specific action +``` + +You can import another `Fastfile` by using the `import` action. This is useful if you have shared lanes across multiple apps and you want to store a `Fastfile` in a separate folder. The path must be relative to the `Fastfile` this is called from. + +```ruby +import './path/to/other/Fastfile' +``` + +For _fastlane_ plugins, check out the [available plugins](/plugins/available-plugins/) page. +If you want to create your own action, check out the [local actions](/create-action/#local-actions) page. + +<%- @categories.each do |category, actions| -%> +- [<%= category %>](#<%= category.gsub(" ", "-").downcase %>) +<%- end -%> +- [Plugins](/plugins/available-plugins/) + +<%- @categories.each do |category, actions| %> +# <%= category %> + +Action | Description | Supported Platforms +---|---|--- +<%- actions.sort.to_h.each do |_number_of_launches, action| -%> +<%- link = "/actions/#{action.action_name}/" -%> +<%= action.action_name %> | <%= action.description %> | <%= [:ios, :android, :mac].find_all { |a| action.is_supported?(a) }.join(", ") %> +<%- end %><%# End of actions.sort... %> + +<%- end %><%# End of categories.each %> diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplate b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplate new file mode 100644 index 0000000..79dbdbb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplate @@ -0,0 +1,6 @@ +# app_identifier("[[APP_IDENTIFIER]]") # The bundle identifier of your app +# apple_id("[[APPLE_ID]]") # Your Apple Developer Portal username + +[[TEAMS]] +# For more information about the Appfile, see: +# https://docs.fastlane.tools/advanced/#appfile diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplate.swift b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplate.swift new file mode 100644 index 0000000..6edca66 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplate.swift @@ -0,0 +1,7 @@ +var appIdentifier: String { return "[[APP_IDENTIFIER]]" } // The bundle identifier of your app +var appleID: String { return "[[APPLE_ID]]" } // Your Apple Developer Portal username + +[[TEAMS]] + +// For more information about the Appfile, see: +// https://docs.fastlane.tools/advanced/#appfile diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplateAndroid b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplateAndroid new file mode 100644 index 0000000..81a4faf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/AppfileTemplateAndroid @@ -0,0 +1,2 @@ +json_key_file("[[JSON_KEY_FILE]]") # Path to the json secret file - Follow https://docs.fastlane.tools/actions/supply/#setup to get one +package_name("[[PACKAGE_NAME]]") # e.g. com.krausefx.app diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/DefaultFastfileTemplate b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/DefaultFastfileTemplate new file mode 100644 index 0000000..511d397 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/DefaultFastfileTemplate @@ -0,0 +1,20 @@ +# This file contains the fastlane.tools configuration +# You can find the documentation at https://docs.fastlane.tools +# +# For a list of all available actions, check out +# +# https://docs.fastlane.tools/actions +# +# For a list of all available plugins, check out +# +# https://docs.fastlane.tools/plugins/available-plugins +# + +# Uncomment the line if you want fastlane to automatically update itself +# update_fastlane + +default_platform(:ios) + +platform :ios do +[[LANES]] +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/DefaultFastfileTemplate.swift b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/DefaultFastfileTemplate.swift new file mode 100644 index 0000000..32a598c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/DefaultFastfileTemplate.swift @@ -0,0 +1,13 @@ +// This file contains the fastlane.tools configuration +// You can find the documentation at https://docs.fastlane.tools +// +// For a list of all available actions, check out +// +// https://docs.fastlane.tools/actions +// + +import Foundation + +class Fastfile: LaneFile { +[[LANES]] +} diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.bash b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.bash new file mode 100644 index 0000000..2ee434f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.bash @@ -0,0 +1,26 @@ +#!/bin/bash + +_fastlane_complete() { + COMPREPLY=() + local word="${COMP_WORDS[COMP_CWORD]}" + local completions="" + local file + + # look for Fastfile either in this directory or fastlane/ then grab the lane names + if [[ -e "Fastfile" ]]; then + file="Fastfile" + elif [[ -e "fastlane/Fastfile" ]]; then + file="fastlane/Fastfile" + elif [[ -e ".fastlane/Fastfile" ]]; then + file=".fastlane/Fastfile" + else + return 1 + fi + + # parse 'beta' out of 'lane :beta do', etc + completions="$(sed -En 's/^[ ]*lane +:([^ ]+).*$/\1/p' "$file")" + completions="$completions update_fastlane" + + COMPREPLY=( $(compgen -W "$completions" -- "$word") ) +} + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.fish b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.fish new file mode 100644 index 0000000..a9af901 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.fish @@ -0,0 +1,39 @@ +# This function was taken from https://github.com/Carthage/Carthage/blob/master/Source/Scripts/carthage-fish-completion +function __fish_fastlane_needs_subcommand + set cmd (commandline -opc) + if [ (count $cmd) -eq 1 -a $cmd[1] = 'fastlane' ] + return 0 + end + return 1 +end + +if test -e "Fastfile" + set file "Fastfile" +else if test -e "fastlane/Fastfile" + set file "fastlane/Fastfile" +else if test -e ".fastlane/Fastfile" + set file ".fastlane/Fastfile" +else + exit 1 +end + +set commands (string match --regex '.*lane\ \:(?!private_)([^\s]*)\ do' (cat $file)) + +set commands_string + +# Fish returns the fully matched string, plus the capture group. The actual captured value +# is every other line, starting at line 2. +set use_command false + +for line in $commands + if [ $use_command = true ] + set commands_string "$commands_string $line" + set use_command false + else + set use_command true + end +end + +set commands_string "$commands_string update_fastlane" + +complete -c fastlane -n '__fish_fastlane_needs_subcommand' -a (string trim $commands_string) -f \ No newline at end of file diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.sh b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.sh new file mode 100644 index 0000000..3888e67 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# shellcheck disable=SC2155 +# shellcheck disable=SC1090 +# shellcheck disable=SC2039 + +if [ -n "$BASH_VERSION" ]; then + source ~/.fastlane/completions/completion.bash +elif [ -n "$ZSH_VERSION" ]; then + source ~/.fastlane/completions/completion.zsh +fi + +# Do not remove v0.0.1 diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.zsh b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.zsh new file mode 100644 index 0000000..7876bf5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/completions/completion.zsh @@ -0,0 +1,24 @@ +#!/bin/zsh + +_fastlane_complete() { + local word completions file + word="$1" + + # look for Fastfile either in this directory or fastlane/ then grab the lane names + if [[ -e "Fastfile" ]] then + file="Fastfile" + elif [[ -e "fastlane/Fastfile" ]] then + file="fastlane/Fastfile" + elif [[ -e ".fastlane/Fastfile" ]] then + file=".fastlane/Fastfile" + else + return 1 + fi + + # parse 'beta' out of 'lane :beta do', etc + completions="$(sed -En 's/^[ ]*lane +:([^ ]+).*$/\1/p' "$file")" + completions="$completions update_fastlane" + + reply=( "${=completions}" ) +} + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/custom_action_template.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/custom_action_template.rb new file mode 100644 index 0000000..e81f28c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/custom_action_template.rb @@ -0,0 +1,82 @@ +module Fastlane + module Actions + module SharedValues + [[NAME_UP]]_CUSTOM_VALUE = :[[NAME_UP]]_CUSTOM_VALUE + end + + class [[NAME_CLASS]] < Action + def self.run(params) + # fastlane will take care of reading in the parameter and fetching the environment variable: + UI.message "Parameter API Token: #{params[:api_token]}" + + # sh "shellcommand ./path" + + # Actions.lane_context[SharedValues::[[NAME_UP]]_CUSTOM_VALUE] = "my_val" + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "A short description with <= 80 characters of what this action does" + end + + def self.details + # Optional: + # this is your chance to provide a more detailed description of this action + "You can use this action to do cool things..." + end + + def self.available_options + # Define all options your action supports. + + # Below a few examples + [ + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_[[NAME_UP]]_API_TOKEN", # The name of the environment variable + description: "API Token for [[NAME_CLASS]]", # a short description of this parameter + verify_block: proc do |value| + UI.user_error!("No API token for [[NAME_CLASS]] given, pass using `api_token: 'token'`") unless (value and not value.empty?) + # UI.user_error!("Couldn't find file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :development, + env_name: "FL_[[NAME_UP]]_DEVELOPMENT", + description: "Create a development certificate instead of a distribution one", + is_string: false, # true: verifies the input is a string, false: every kind of value + default_value: false) # the default value if the user didn't provide one + ] + end + + def self.output + # Define the shared values you are going to provide + # Example + [ + ['[[NAME_UP]]_CUSTOM_VALUE', 'A description of what this value contains'] + ] + end + + def self.return_value + # If your method provides a return value, you can describe here what it does + end + + def self.authors + # So no one will ever forget your contribution to fastlane :) You are awesome btw! + ["Your GitHub/Twitter Name"] + end + + def self.is_supported?(platform) + # you can do things like + # + # true + # + # platform == :ios + # + # [:ios, :mac].include?(platform) + # + + platform == :ios + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/mailgun_html_template.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/mailgun_html_template.erb new file mode 100644 index 0000000..2254cdf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/mailgun_html_template.erb @@ -0,0 +1,142 @@ + + + + + + + Fastlane Build + + + + + + + + + + + + + +
+
+ + + + + + + +
+ Fastlane Build +
+ + + + + + + + <% if ci_build_link %> + + + + <% end %> + + + + + + + +
+ Git Author: + <%= author %> +
+ Git Commit: + <%= last_commit %> +
+ CI build: Build +
+ <%= message %> +
+ Download Release +
+
+
+
+ + + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/report_template.xml.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/report_template.xml.erb new file mode 100644 index 0000000..1501f6c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/report_template.xml.erb @@ -0,0 +1,15 @@ + + + + <%# prefix index with leading zeroes to help alpha-numeric sorting. Note that index starts at 0 %> + <% log = (@steps.count-1).to_s.length %> + <% @steps.each_with_index do |step, index| %> + <% name = [index.to_s.rjust(log, "0"), step[:name]].join(": ") %> + :attr) %> time="<%= step[:time] %>"> + <% if step[:error] %> + :attr).gsub("\n", " ") %> /> + <% end %> + + <% end %> + + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_html_template.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_html_template.erb new file mode 100644 index 0000000..45d4584 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_html_template.erb @@ -0,0 +1,105 @@ + + + + + Install <%= title %> + + + + +

<%= title %>

+ + + + +

Please open this page on your iPhone!

+ +

+ App is being installed. Close Safari using the home button. +

+ +

+ This is a beta version and is not meant to share with the public. +

+ + + + + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_plist_template.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_plist_template.erb new file mode 100644 index 0000000..d888401 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_plist_template.erb @@ -0,0 +1,31 @@ + + + + + items + + + assets + + + kind + software-package + url + <%= ipa_url %> + + + metadata + + bundle-identifier + <%= bundle_id %> + bundle-version + <%= bundle_version %> + kind + software + title + <%= title %> + + + + + diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_version_template.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_version_template.erb new file mode 100644 index 0000000..7f82883 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/assets/s3_version_template.erb @@ -0,0 +1,4 @@ +{ + "latestVersion": "<%= full_version %>", + "updateUrl": "itms-services://?action=download-manifest&url=<%= plist_url %>" +} diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane.rb new file mode 100644 index 0000000..9e0f170 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane.rb @@ -0,0 +1,53 @@ +require 'fastlane_core' + +require 'fastlane/version' +require 'fastlane/features' +require 'fastlane/shells' +require 'fastlane/tools' +require 'fastlane/documentation/actions_list' +require 'fastlane/actions/actions_helper' # has to be before fast_file +require 'fastlane/fast_file' +require 'fastlane/runner' +require 'fastlane/setup/setup' +require 'fastlane/lane' +require 'fastlane/junit_generator' +require 'fastlane/lane_manager' +require 'fastlane/lane_manager_base' +require 'fastlane/swift_lane_manager' +require 'fastlane/action' +require 'fastlane/action_collector' +require 'fastlane/supported_platforms' +require 'fastlane/configuration_helper' +require 'fastlane/one_off' +require 'fastlane/server/socket_server_action_command_executor' +require 'fastlane/server/socket_server' +require 'fastlane/command_line_handler' +require 'fastlane/documentation/docs_generator' +require 'fastlane/other_action' +require 'fastlane/plugins/plugins' +require 'fastlane/fastlane_require' +require "fastlane/swift_fastlane_api_generator.rb" + +module Fastlane + Helper = FastlaneCore::Helper # you gotta love Ruby: Helper.* should use the Helper class contained in FastlaneCore + UI = FastlaneCore::UI + ROOT = Pathname.new(File.expand_path('../..', __FILE__)) + + class << self + def load_actions + Fastlane::Actions.load_default_actions + Fastlane::Actions.load_helpers + + if FastlaneCore::FastlaneFolder.path + actions_path = File.join(FastlaneCore::FastlaneFolder.path, 'actions') + @external_actions = Fastlane::Actions.load_external_actions(actions_path) if File.directory?(actions_path) + end + end + + attr_reader :external_actions + + def plugin_manager + @plugin_manager ||= Fastlane::PluginManager.new + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/action.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/action.rb new file mode 100644 index 0000000..254c1ca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/action.rb @@ -0,0 +1,198 @@ +require 'fastlane/actions/actions_helper' +require 'forwardable' + +module Fastlane + class Action + AVAILABLE_CATEGORIES = [ + :testing, + :building, + :screenshots, + :project, + :code_signing, + :documentation, + :beta, + :push, + :production, + :source_control, + :notifications, + :app_store_connect, + :misc, + :deprecated # This should be the last item + ] + + RETURN_TYPES = [ + :string, + :array_of_strings, + :hash_of_strings, + :hash, + :bool, + :int + ] + + class << self + attr_accessor :runner + + extend(Forwardable) + + # to allow a simple `sh` in the custom actions + def_delegator(Actions, :sh_control_output, :sh) + end + + def self.run(params) + end + + # Implement in subclasses + def self.description + "No description provided".red + end + + def self.details + nil # this is your chance to provide a more detailed description of this action + end + + def self.available_options + # [ + # FastlaneCore::ConfigItem.new(key: :ipa_path, + # env_name: "CRASHLYTICS_IPA_PATH", + # description: "Value Description") + # ] + nil + end + + def self.output + # Return the keys you provide on the shared area + # [ + # ['IPA_OUTPUT_PATH', 'The path to the newly generated ipa file'] + # ] + nil + end + + def self.return_type + # Describes what type of data is expected to be returned, see RETURN_TYPES + nil + end + + def self.return_value + # Describes what this method returns + nil + end + + def self.sample_return_value + # Very optional + # You can return a sample return value, that might be returned by the actual action + # This is currently only used when generating the documentation and running its tests + nil + end + + def self.author + nil + end + + def self.authors + nil + end + + def self.is_supported?(platform) + # you can do things like + # true + # + # platform == :ios + # + # [:ios, :mac].include?(platform) + # + UI.crash!("Implementing `is_supported?` for all actions is mandatory. Please update #{self}") + end + + # Returns an array of string of sample usage of this action + def self.example_code + nil + end + + # Is printed out in the Steps: output in the terminal + # Return nil if you don't want any logging in the terminal/JUnit Report + def self.step_text(params) + self.action_name + end + + # Documentation category, available values defined in AVAILABLE_CATEGORIES + def self.category + :undefined + end + + # instead of "AddGitAction", this will return "add_git" to print it to the user + def self.action_name + self.name.split('::').last.gsub(/Action$/, '').fastlane_underscore + end + + def self.lane_context + Actions.lane_context + end + + # Allows the user to call an action from an action + def self.method_missing(method_sym, *arguments, &_block) + UI.error("Unknown method '#{method_sym}'") + UI.user_error!("To call another action from an action use `other_action.#{method_sym}` instead") + end + + # When shelling out from the action, should we use `bundle exec`? + def self.shell_out_should_use_bundle_exec? + return File.exist?('Gemfile') && !Helper.contained_fastlane? + end + + # Return a new instance of the OtherAction action + # We need to do this, since it has to have access to + # the runner object + def self.other_action + return OtherAction.new(self.runner) + end + + # Describes how the user should handle deprecated an action if its deprecated + # Returns a string (or nil) + def self.deprecated_notes + nil + end + end +end + +class String + def markdown_preserve_newlines + self.gsub(/(\n|$)/, '|\1') # prepend new lines with "|" so the erb template knows *not* to replace them with "
"s + end + + def markdown_sample(is_first = false) + self.markdown_clean_heredoc! + self.markdown_details(is_first) + end + + def markdown_list(is_first = false) + self.markdown_clean_heredoc! + self.gsub!(/^/, "- ") # add list dashes + self.prepend(">") unless is_first # the empty line that will be added breaks the quote + self.markdown_details(is_first) + end + + def markdown_details(is_first) + self.prepend("\n") unless is_first + self << "\n>" # continue the quote + self.markdown_preserve_newlines + end + + def markdown_clean_heredoc! + self.chomp! # remove the last new line added by the heredoc + self.dedent! # remove the leading whitespace (similar to the squigly heredoc `<<~`) + end + + def dedent! + first_line_indent = self.match(/^\s*/)[0] + + self.gsub!(/^#{first_line_indent}/, "") + end + + def remove_markdown + string = self.gsub(/^>/, "") # remove Markdown quotes + string = string.gsub(/\[http[^\]]+\]\(([^)]+)\)/, '\1 🔗') # remove Markdown links + string = string.gsub(/\[([^\]]+)\]\(([^\)]+)\)/, '"\1" (\2 🔗)') # remove Markdown links with custom text + string = string.gsub("|", "") # remove new line preserve markers + return string + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/action_collector.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/action_collector.rb new file mode 100644 index 0000000..4e7a03b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/action_collector.rb @@ -0,0 +1,35 @@ +module Fastlane + class ActionCollector + def show_message + UI.message("Sending Crash/Success information. Learn more at https://docs.fastlane.tools/#metrics") + UI.message("No personal/sensitive data is sent. Only sharing the following:") + UI.message(launches) + UI.message(@error) if @error + UI.message("This information is used to fix failing actions and improve integrations that are often used.") + UI.message("You can disable this by adding `opt_out_usage` at the top of your Fastfile") + end + + def determine_version(name) + self.class.determine_version(name) + end + + # e.g. + # :gym + # :xcversion + # "fastlane-plugin-my_plugin/xcversion" + def self.determine_version(name) + if name.to_s.include?(PluginManager.plugin_prefix) + # That's an action from a plugin, we need to fetch its version number + begin + plugin_name = name.split("/").first.gsub(PluginManager.plugin_prefix, '') + return Fastlane.const_get(plugin_name.fastlane_class)::VERSION + rescue => ex + UI.verbose(ex) + return "undefined" + end + end + + return Fastlane::VERSION # that's the case for all built-in actions + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/README.md new file mode 100644 index 0000000..8b27ca4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/README.md @@ -0,0 +1,5 @@ +All built-in integrations are available in this directory. Use the `fastlane new_action` command to create a new action. + +_fastlane_ will automatically detect the files in this folder + +All available actions are listed and documented on https://docs.fastlane.tools/actions/. Documentation for individual actions is available on https://docs.fastlane.tools/actions/%action_name%, e.g. https://docs.fastlane.tools/actions/copy_artifacts/. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/actions_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/actions_helper.rb new file mode 100644 index 0000000..e1cb4ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/actions_helper.rb @@ -0,0 +1,185 @@ +module Fastlane + module Actions + module SharedValues + LANE_NAME = :LANE_NAME + PLATFORM_NAME = :PLATFORM_NAME + ENVIRONMENT = :ENVIRONMENT + + # A slightly decorated hash that will store and fetch sensitive data + # but not display it while iterating keys and values + class LaneContextValues < Hash + def initialize + @sensitive_context = {} + end + + def set_sensitive(key, value) + @sensitive_context[key] = value + end + + def [](key) + if @sensitive_context.key?(key) + return @sensitive_context[key] + end + super + end + end + end + + def self.reset_aliases + @alias_actions = nil + end + + def self.alias_actions + unless @alias_actions + @alias_actions = {} + ActionsList.all_actions do |action, name| + next unless action.respond_to?(:aliases) + @alias_actions[name] = action.aliases + end + end + @alias_actions + end + + def self.executed_actions + @executed_actions ||= [] + end + + # The shared hash can be accessed by any action and contains information like the screenshots path or beta URL + def self.lane_context + @lane_context ||= SharedValues::LaneContextValues.new + end + + # Used in tests to get a clear lane before every test + def self.clear_lane_context + @lane_context = nil + end + + # Pass a block which should be tracked. One block = one testcase + # @param step_name (String) the name of the currently built code (e.g. snapshot, sigh, ...) + # This might be nil, in which case the step is not printed out to the terminal + def self.execute_action(step_name) + start = Time.now # before the raise block, since `start` is required in the ensure block + UI.crash!("No block given") unless block_given? + + error = nil + exc = nil + + begin + UI.header("Step: " + step_name) if step_name + yield + rescue => ex + exc = ex + error = caller.join("\n") + "\n\n" + ex.to_s + end + ensure + # This is also called, when the block has a return statement + if step_name + duration = Time.now - start + + executed_actions << { + name: step_name, + error: error, + time: duration + } + end + + raise exc if exc + end + + # returns a list of official integrations + # rubocop:disable Naming/AccessorMethodName + def self.get_all_official_actions + Dir[File.expand_path('*.rb', File.dirname(__FILE__))].collect do |file| + File.basename(file).gsub('.rb', '').to_sym + end + end + # rubocop:enable Naming/AccessorMethodName + + # Returns the class ref to the action based on the action name + # Returns nil if the action is not available + def self.action_class_ref(action_name) + class_name = action_name.to_s.fastlane_class + 'Action' + class_ref = nil + begin + class_ref = Fastlane::Actions.const_get(class_name) + rescue NameError + return nil + end + return class_ref + end + + def self.load_default_actions + Dir[File.expand_path('*.rb', File.dirname(__FILE__))].each do |file| + require file + end + end + + # Import all the helpers + def self.load_helpers + Dir[File.expand_path('../helper/*.rb', File.dirname(__FILE__))].each do |file| + require file + end + end + + def self.load_external_actions(path) + UI.user_error!("You need to pass a valid path") unless File.exist?(path) + + class_refs = [] + Dir[File.expand_path('*.rb', path)].each do |file| + begin + require file + rescue SyntaxError => ex + content = File.read(file, encoding: "utf-8") + ex.to_s.lines + .collect { |error| error.match(/#{file}:(\d+):(.*)/) } + .reject(&:nil?) + .each { |error| UI.content_error(content, error[1]) } + UI.user_error!("Syntax error in #{File.basename(file)}") + next + end + + file_name = File.basename(file).gsub('.rb', '') + + class_name = file_name.fastlane_class + 'Action' + begin + class_ref = Fastlane::Actions.const_get(class_name) + class_refs << class_ref + + if class_ref.respond_to?(:run) + UI.success("Successfully loaded custom action '#{file}'.") if FastlaneCore::Globals.verbose? + else + UI.error("Could not find method 'run' in class #{class_name}.") + UI.error('For more information, check out the docs: https://docs.fastlane.tools/') + UI.user_error!("Action '#{file_name}' is damaged!", show_github_issues: true) + end + rescue NameError + # Action not found + UI.error("Could not find '#{class_name}' class defined.") + UI.error('For more information, check out the docs: https://docs.fastlane.tools/') + UI.user_error!("Action '#{file_name}' is damaged!", show_github_issues: true) + end + end + Actions.reset_aliases + + return class_refs + end + + def self.formerly_bundled_actions + ["xcake"] + end + + # Returns a boolean indicating whether the class + # reference is a Fastlane::Action + def self.is_class_action?(class_ref) + return false if class_ref.nil? + is_an_action = class_ref < Fastlane::Action + return is_an_action || false + end + + # Returns a boolean indicating if the class + # reference is a deprecated Fastlane::Action + def self.is_deprecated?(class_ref) + is_class_action?(class_ref) && class_ref.category == :deprecated + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/adb.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/adb.rb new file mode 100644 index 0000000..ce72496 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/adb.rb @@ -0,0 +1,75 @@ +module Fastlane + module Actions + module SharedValues + end + + class AdbAction < Action + def self.run(params) + adb = Helper::AdbHelper.new(adb_path: params[:adb_path]) + result = adb.trigger(command: params[:command], serial: params[:serial]) + return result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Run ADB Actions" + end + + def self.details + "see adb --help for more details" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :serial, + env_name: "FL_ANDROID_SERIAL", + description: "Android serial of the device to use for this command", + default_value: ""), + FastlaneCore::ConfigItem.new(key: :command, + env_name: "FL_ADB_COMMAND", + description: "All commands you want to pass to the adb command, e.g. `kill-server`", + optional: true), + FastlaneCore::ConfigItem.new(key: :adb_path, + env_name: "FL_ADB_PATH", + optional: true, + description: "The path to your `adb` binary (can be left blank if the ANDROID_SDK_ROOT, ANDROID_HOME or ANDROID_SDK environment variable is set)", + default_value: "adb") + ] + end + + def self.output + end + + def self.category + :building + end + + def self.example_code + [ + 'adb( + command: "shell ls" + )' + ] + end + + def self.return_value + "The output of the adb command" + end + + def self.return_type + :string + end + + def self.authors + ["hjanuschka"] + end + + def self.is_supported?(platform) + platform == :android + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/adb_devices.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/adb_devices.rb new file mode 100644 index 0000000..93f358e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/adb_devices.rb @@ -0,0 +1,70 @@ +module Fastlane + module Actions + module SharedValues + end + + class AdbDevicesAction < Action + def self.run(params) + adb = Helper::AdbHelper.new(adb_path: params[:adb_path]) + result = adb.load_all_devices + return result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Get an array of Connected android device serials" + end + + def self.details + "Fetches device list via adb, e.g. run an adb command on all connected devices." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :adb_path, + env_name: "FL_ADB_PATH", + description: "The path to your `adb` binary (can be left blank if the ANDROID_SDK_ROOT environment variable is set)", + optional: true, + default_value: "adb") + ] + end + + def self.output + end + + def self.example_code + [ + 'adb_devices.each do |device| + model = adb(command: "shell getprop ro.product.model", + serial: device.serial).strip + + puts "Model #{model} is connected" + end' + ] + end + + def self.sample_return_value + [] + end + + def self.category + :misc + end + + def self.return_value + "Returns an array of all currently connected android devices" + end + + def self.authors + ["hjanuschka"] + end + + def self.is_supported?(platform) + platform == :android + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/add_extra_platforms.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/add_extra_platforms.rb new file mode 100644 index 0000000..1fe62b4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/add_extra_platforms.rb @@ -0,0 +1,45 @@ +module Fastlane + module Actions + class AddExtraPlatformsAction < Action + def self.run(params) + UI.verbose("Before injecting extra platforms: #{Fastlane::SupportedPlatforms.all}") + Fastlane::SupportedPlatforms.extra = params[:platforms] + UI.verbose("After injecting extra platforms (#{params[:platforms]})...: #{Fastlane::SupportedPlatforms.all}") + end + + def self.description + "Modify the default list of supported platforms" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :platforms, + optional: false, + type: Array, + default_value: "", + description: "The optional extra platforms to support") + ] + end + + def self.authors + ["lacostej"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'add_extra_platforms( + platforms: [:windows, :neogeo] + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/add_git_tag.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/add_git_tag.rb new file mode 100644 index 0000000..d42ecf0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/add_git_tag.rb @@ -0,0 +1,138 @@ +module Fastlane + module Actions + # Adds a git tag to the current commit + class AddGitTagAction < Action + def self.run(options) + # lane name in lane_context could be nil because you can just call $fastlane add_git_tag which has no context + lane_name = Actions.lane_context[Actions::SharedValues::LANE_NAME].to_s.delete(' ') # no spaces allowed + + if options[:tag] + tag = options[:tag] + elsif options[:build_number] + tag_components = [options[:grouping]] + tag_components << lane_name if options[:includes_lane] + tag_components << "#{options[:prefix]}#{options[:build_number]}#{options[:postfix]}" + tag = tag_components.join('/') + else + UI.user_error!("No value found for 'tag' or 'build_number'. At least one of them must be provided. Note that if you do specify a tag, all other arguments are ignored.") + end + message = options[:message] || "#{tag} (fastlane)" + + cmd = ['git tag'] + + cmd << ["-am #{message.shellescape}"] + cmd << '--force' if options[:force] + cmd << '-s' if options[:sign] + cmd << tag.shellescape + cmd << options[:commit].to_s if options[:commit] + + UI.message("Adding git tag '#{tag}' đŸŽ¯.") + Actions.sh(cmd.join(' ')) + end + + def self.description + "This will add an annotated git tag to the current branch" + end + + def self.details + list = <<-LIST.markdown_list + `grouping` is just to keep your tags organised under one 'folder', defaults to 'builds' + `lane` is the name of the current fastlane lane, if chosen to be included via 'includes_lane' option, which defaults to 'true' + `prefix` is anything you want to stick in front of the version number, e.g. 'v' + `postfix` is anything you want to stick at the end of the version number, e.g. '-RC1' + `build_number` is the build number, which defaults to the value emitted by the `increment_build_number` action + LIST + + [ + "This will automatically tag your build with the following format: `//`, where:".markdown_preserve_newlines, + list, + "For example, for build 1234 in the 'appstore' lane, it will tag the commit with `builds/appstore/1234`." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :tag, + env_name: "FL_GIT_TAG_TAG", + description: "Define your own tag text. This will replace all other parameters", + optional: true), + FastlaneCore::ConfigItem.new(key: :grouping, + env_name: "FL_GIT_TAG_GROUPING", + description: "Is used to keep your tags organised under one 'folder'", + default_value: 'builds'), + FastlaneCore::ConfigItem.new(key: :includes_lane, + env_name: "FL_GIT_TAG_INCLUDES_LANE", + description: "Whether the current lane should be included in the tag and message composition, e.g. '//'", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :prefix, + env_name: "FL_GIT_TAG_PREFIX", + description: "Anything you want to put in front of the version number (e.g. 'v')", + default_value: ''), + FastlaneCore::ConfigItem.new(key: :postfix, + env_name: "FL_GIT_TAG_POSTFIX", + description: "Anything you want to put at the end of the version number (e.g. '-RC1')", + default_value: ''), + FastlaneCore::ConfigItem.new(key: :build_number, + env_name: "FL_GIT_TAG_BUILD_NUMBER", + description: "The build number. Defaults to the result of increment_build_number if you\'re using it", + default_value: Actions.lane_context[Actions::SharedValues::BUILD_NUMBER], + default_value_dynamic: true, + skip_type_validation: true, # skipping validation because we both allow integer and string + optional: true), + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_GIT_TAG_MESSAGE", + description: "The tag message. Defaults to the tag's name", + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :commit, + env_name: "FL_GIT_TAG_COMMIT", + description: "The commit or object where the tag will be set. Defaults to the current HEAD", + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_GIT_TAG_FORCE", + description: "Force adding the tag", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :sign, + env_name: "FL_GIT_TAG_SIGN", + description: "Make a GPG-signed tag, using the default e-mail address's key", + optional: true, + type: Boolean, + default_value: false) + ] + end + + def self.example_code + [ + 'add_git_tag # simple tag with default values', + 'add_git_tag( + grouping: "fastlane-builds", + includes_lane: true, + prefix: "v", + postfix: "-RC1", + build_number: 123 + )', + '# Alternatively, you can specify your own tag. Note that if you do specify a tag, all other arguments are ignored. + add_git_tag( + tag: "my_custom_tag" + )' + ] + end + + def self.category + :source_control + end + + def self.authors + ["lmirosevic", "maschall"] + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/app_store_build_number.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/app_store_build_number.rb new file mode 100644 index 0000000..df01a3c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/app_store_build_number.rb @@ -0,0 +1,255 @@ +require 'ostruct' + +module Fastlane + module Actions + module SharedValues + LATEST_BUILD_NUMBER = :LATEST_BUILD_NUMBER + LATEST_VERSION = :LATEST_VERSION + end + + class AppStoreBuildNumberAction < Action + def self.run(params) + build_v, build_nr = get_build_version_and_number(params) + + Actions.lane_context[SharedValues::LATEST_BUILD_NUMBER] = build_nr + Actions.lane_context[SharedValues::LATEST_VERSION] = build_v + + return build_nr + end + + def self.get_build_version_and_number(params) + require 'spaceship' + + result = get_build_info(params) + build_nr = result.build_nr + + # Convert build_nr to int (for legacy use) if no "." in string + if build_nr.kind_of?(String) && !build_nr.include?(".") + build_nr = build_nr.to_i + end + + return result.build_v, build_nr + end + + def self.get_build_info(params) + # Prompts select team if multiple teams and none specified + if (api_token = Spaceship::ConnectAPI::Token.from(hash: params[:api_key], filepath: params[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + # Username is now optional since addition of App Store Connect API Key + # Force asking for username to prompt user if not already set + params.fetch(:username, force_ask: true) + + UI.message("Login to App Store Connect (#{params[:username]})") + Spaceship::ConnectAPI.login(params[:username], use_portal: false, use_tunes: true, tunes_team_id: params[:team_id], team_name: params[:team_name]) + UI.message("Login successful") + end + + platform = Spaceship::ConnectAPI::Platform.map(params[:platform]) + + app = Spaceship::ConnectAPI::App.find(params[:app_identifier]) + UI.user_error!("Could not find an app on App Store Connect with app_identifier: #{params[:app_identifier]}") unless app + if params[:live] + UI.message("Fetching the latest build number for live-version") + live_version = app.get_live_app_store_version(platform: platform) + + UI.user_error!("Could not find a live-version of #{params[:app_identifier]} on App Store Connect") unless live_version + build_nr = live_version.build.version + + UI.message("Latest upload for live-version #{live_version.version_string} is build: #{build_nr}") + + return OpenStruct.new({ build_nr: build_nr, build_v: live_version.version_string }) + else + version_number = params[:version] + platform = params[:platform] + + # Create filter for get_builds with optional version number + filter = { app: app.id } + if version_number + filter["preReleaseVersion.version"] = version_number + version_number_message = "version #{version_number}" + else + version_number_message = "any version" + end + + if platform + filter["preReleaseVersion.platform"] = Spaceship::ConnectAPI::Platform.map(platform) + platform_message = "#{platform} platform" + else + platform_message = "any platform" + end + + UI.message("Fetching the latest build number for #{version_number_message}") + + # Get latest build for optional version number and return build number if found + build = Spaceship::ConnectAPI.get_builds(filter: filter, sort: "-uploadedDate", includes: "preReleaseVersion", limit: 1).first + if build + build_nr = build.version + UI.message("Latest upload for version #{build.app_version} on #{platform_message} is build: #{build_nr}") + return OpenStruct.new({ build_nr: build_nr, build_v: build.app_version }) + end + + # Let user know that build couldn't be found + UI.important("Could not find a build for #{version_number_message} on #{platform_message} on App Store Connect") + + if params[:initial_build_number].nil? + UI.user_error!("Could not find a build on App Store Connect - and 'initial_build_number' option is not set") + else + build_nr = params[:initial_build_number] + UI.message("Using initial build number of #{build_nr}") + return OpenStruct.new({ build_nr: build_nr, build_v: version_number }) + end + end + end + + def self.order_versions(versions) + versions.map(&:to_s).sort_by { |v| Gem::Version.new(v) } + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Returns the current build_number of either live or edit version" + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + [ + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["APPSTORE_BUILD_NUMBER_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["APPSTORE_BUILD_NUMBER_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + default_value: Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::APP_STORE_CONNECT_API_KEY], + default_value_dynamic: true, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + FastlaneCore::ConfigItem.new(key: :initial_build_number, + env_name: "INITIAL_BUILD_NUMBER", + description: "sets the build number to given value if no build is in current train", + skip_type_validation: true), # as we also allow integers, which we convert to strings anyway + FastlaneCore::ConfigItem.new(key: :app_identifier, + short_option: "-a", + env_name: "FASTLANE_APP_IDENTIFIER", + description: "The bundle identifier of your app", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "ITUNESCONNECT_USER", + description: "Your Apple ID Username", + optional: true, + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-k", + env_name: "APPSTORE_BUILD_NUMBER_LIVE_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # as we also allow integers, which we convert to strings anyway + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :live, + short_option: "-l", + env_name: "APPSTORE_BUILD_NUMBER_LIVE", + description: "Query the live version (ready-for-sale)", + optional: true, + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :version, + env_name: "LATEST_VERSION", + description: "The version number whose latest build number we want", + optional: true), + FastlaneCore::ConfigItem.new(key: :platform, + short_option: "-j", + env_name: "APPSTORE_PLATFORM", + description: "The platform to use (optional)", + optional: true, + default_value: "ios", + verify_block: proc do |value| + UI.user_error!("The platform can only be ios, appletvos, or osx") unless %('ios', 'appletvos', 'osx').include?(value) + end), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-e", + env_name: "LATEST_TESTFLIGHT_BUILD_NUMBER_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_NAME"] = value.to_s + end) + ] + end + + def self.output + [ + ['LATEST_BUILD_NUMBER', 'The latest build number of either live or testflight version'], + ['LATEST_VERSION', 'The version of the latest build number'] + ] + end + + def self.details + [ + "Returns the current build number of either the live or testflight version - it is useful for getting the build_number of the current or ready-for-sale app version, and it also works on non-live testflight version.", + "If you need to handle more build-trains please see `latest_testflight_build_number`." + ].join("\n") + end + + def self.example_code + [ + 'app_store_build_number', + 'app_store_build_number( + app_identifier: "app.identifier", + username: "user@host.com" + )', + 'app_store_build_number( + live: false, + app_identifier: "app.identifier", + version: "1.2.9" + )', + 'api_key = app_store_connect_api_key( + key_id: "MyKeyID12345", + issuer_id: "00000000-0000-0000-0000-000000000000", + key_filepath: "./AuthKey.p8" + ) + build_num = app_store_build_number( + api_key: api_key + )' + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.category + :app_store_connect + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/app_store_connect_api_key.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/app_store_connect_api_key.rb new file mode 100644 index 0000000..aaf0386 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/app_store_connect_api_key.rb @@ -0,0 +1,148 @@ +require 'base64' +require 'spaceship' + +module Fastlane + module Actions + module SharedValues + APP_STORE_CONNECT_API_KEY = :APP_STORE_CONNECT_API_KEY + end + + class AppStoreConnectApiKeyAction < Action + def self.run(options) + key_id = options[:key_id] + issuer_id = options[:issuer_id] + key_content = options[:key_content] + is_key_content_base64 = options[:is_key_content_base64] + key_filepath = options[:key_filepath] + duration = options[:duration] + in_house = options[:in_house] + + if key_content.nil? && key_filepath.nil? + UI.user_error!(":key_content or :key_filepath is required") + end + + # New lines don't get read properly when coming from an ENV + # Replacing them literal version with a new line + key_content = key_content.gsub('\n', "\n") if key_content + + # This hash matches the named arguments on + # the Spaceship::ConnectAPI::Token.create method + key = { + key_id: key_id, + issuer_id: issuer_id, + key: key_content || File.binread(File.expand_path(key_filepath)), + is_key_content_base64: is_key_content_base64, + duration: duration, + in_house: in_house + } + + Actions.lane_context.set_sensitive(SharedValues::APP_STORE_CONNECT_API_KEY, key) + + # Creates Spaceship API Key session + # User does not need to pass the token into any actions because of this + Spaceship::ConnectAPI.token = Spaceship::ConnectAPI::Token.create(**key) if options[:set_spaceship_token] + + return key + end + + def self.description + "Load the App Store Connect API token to use in other fastlane tools and actions" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :key_id, + env_name: "APP_STORE_CONNECT_API_KEY_KEY_ID", + description: "The key ID"), + FastlaneCore::ConfigItem.new(key: :issuer_id, + env_name: "APP_STORE_CONNECT_API_KEY_ISSUER_ID", + description: "The issuer ID"), + FastlaneCore::ConfigItem.new(key: :key_filepath, + env_name: "APP_STORE_CONNECT_API_KEY_KEY_FILEPATH", + description: "The path to the key p8 file", + optional: true, + conflicting_options: [:key_content], + verify_block: proc do |value| + UI.user_error!("Couldn't find key p8 file at path '#{value}'") unless File.exist?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :key_content, + env_name: "APP_STORE_CONNECT_API_KEY_KEY", + description: "The content of the key p8 file", + sensitive: true, + optional: true, + conflicting_options: [:filepath]), + FastlaneCore::ConfigItem.new(key: :is_key_content_base64, + env_name: "APP_STORE_CONNECT_API_KEY_IS_KEY_CONTENT_BASE64", + description: "Whether :key_content is Base64 encoded or not", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :duration, + env_name: "APP_STORE_CONNECT_API_KEY_DURATION", + description: "The token session duration", + optional: true, + default_value: Spaceship::ConnectAPI::Token::DEFAULT_TOKEN_DURATION, + type: Integer, + verify_block: proc do |value| + UI.user_error!("The duration can't be more than 1200 (20 minutes) and the value entered was '#{value}'") unless value <= 1200 + end), + FastlaneCore::ConfigItem.new(key: :in_house, + env_name: "APP_STORE_CONNECT_API_KEY_IN_HOUSE", + description: "Is App Store or Enterprise (in house) team? App Store Connect API cannot determine this on its own (yet)", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :set_spaceship_token, + env_name: "APP_STORE_CONNECT_API_KEY_SET_SPACESHIP_TOKEN", + description: "Authorizes all Spaceship::ConnectAPI requests by automatically setting Spaceship::ConnectAPI.token", + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['APP_STORE_CONNECT_API_KEY', 'The App Store Connect API key information used for authorization requests. This hash can be passed directly into the :api_key options on other tools or into Spaceship::ConnectAPI::Token.create method'] + ] + end + + def self.author + ["joshdholtz"] + end + + def self.is_supported?(platform) + [:ios, :mac, :tvos].include?(platform) + end + + def self.details + [ + "Load the App Store Connect API token to use in other fastlane tools and actions" + ].join("\n") + end + + def self.example_code + [ + 'app_store_connect_api_key( + key_id: "D83848D23", + issuer_id: "227b0bbf-ada8-458c-9d62-3d8022b7d07f", + key_filepath: "D83848D23.p8" + )', + 'app_store_connect_api_key( + key_id: "D83848D23", + issuer_id: "227b0bbf-ada8-458c-9d62-3d8022b7d07f", + key_filepath: "D83848D23.p8", + duration: 200, + in_house: true + )', + 'app_store_connect_api_key( + key_id: "D83848D23", + issuer_id: "227b0bbf-ada8-458c-9d62-3d8022b7d07f", + key_content: "-----BEGIN EC PRIVATE KEY-----\nfewfawefawfe\n-----END EC PRIVATE KEY-----" + )' + ] + end + + def self.category + :app_store_connect + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appaloosa.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appaloosa.rb new file mode 100644 index 0000000..3e4270a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appaloosa.rb @@ -0,0 +1,271 @@ +module Fastlane + module Actions + class AppaloosaAction < Action + APPALOOSA_SERVER = 'https://www.appaloosa-store.com/api/v2'.freeze + def self.run(params) + api_key = params[:api_token] + store_id = params[:store_id] + binary = params[:binary] + remove_extra_screenshots_file(params[:screenshots]) + binary_url = get_binary_link(binary, api_key, store_id, params[:group_ids]) + return if binary_url.nil? + screenshots_url = get_screenshots_links(api_key, store_id, params[:screenshots], params[:locale], params[:device]) + upload_on_appaloosa(api_key, store_id, binary_url, screenshots_url, params[:group_ids], params[:description], params[:changelog]) + end + + def self.get_binary_link(binary, api_key, store_id, group_ids) + key_s3 = upload_on_s3(binary, api_key, store_id, group_ids) + return if key_s3.nil? + get_s3_url(api_key, store_id, key_s3) + end + + def self.upload_on_s3(file, api_key, store_id, group_ids = '') + file_name = file.split('/').last + uri = URI("#{APPALOOSA_SERVER}/upload_services/presign_form") + params = { file: file_name, store_id: store_id, group_ids: group_ids, api_key: api_key } + uri.query = URI.encode_www_form(params) + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + presign_form_response = http.request(Net::HTTP::Get.new(uri.request_uri)) + json_res = JSON.parse(presign_form_response.body) + return if error_detected(json_res['errors']) + s3_sign = json_res['s3_sign'] + path = json_res['path'] + uri = URI.parse(Base64.decode64(s3_sign)) + File.open(file, 'rb') do |f| + http = Net::HTTP.new(uri.host) + put = Net::HTTP::Put.new(uri.request_uri) + put.body = f.read + put['content-type'] = '' + http.request(put) + end + path + end + + def self.get_s3_url(api_key, store_id, path) + uri = URI("#{APPALOOSA_SERVER}/#{store_id}/upload_services/url_for_download") + params = { store_id: store_id, api_key: api_key, key: path } + uri.query = URI.encode_www_form(params) + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + url_for_download_response = http.request(Net::HTTP::Get.new(uri.request_uri)) + if invalid_response?(url_for_download_response) + UI.user_error!("ERROR: A problem occurred with your API token and your store id. Please try again.") + end + json_res = JSON.parse(url_for_download_response.body) + return if error_detected(json_res['errors']) + json_res['binary_url'] + end + + def self.remove_extra_screenshots_file(screenshots_env) + extra_file = "#{screenshots_env}/screenshots.html" + File.unlink(extra_file) if File.exist?(extra_file) + end + + def self.upload_screenshots(screenshots, api_key, store_id) + return if screenshots.nil? + list = [] + list << screenshots.map do |screen| + upload_on_s3(screen, api_key, store_id) + end + end + + def self.get_uploaded_links(uploaded_screenshots, api_key, store_id) + return if uploaded_screenshots.nil? + urls = [] + urls << uploaded_screenshots.flatten.map do |url| + get_s3_url(api_key, store_id, url) + end + end + + def self.get_screenshots_links(api_key, store_id, screenshots_path, locale, device) + screenshots = get_screenshots(screenshots_path, locale, device) + return if screenshots.nil? + uploaded = upload_screenshots(screenshots, api_key, store_id) + links = get_uploaded_links(uploaded, api_key, store_id) + links.kind_of?(Array) ? links.flatten : nil + end + + def self.get_screenshots(screenshots_path, locale, device) + get_env_value('screenshots').nil? ? locale = '' : locale.concat('/') + device.nil? ? device = '' : device.concat('-') + screenshots_path.strip.empty? ? nil : screenshots_list(screenshots_path, locale, device) + end + + def self.screenshots_list(path, locale, device) + return warning_detected("screenshots folder not found") unless Dir.exist?("#{path}/#{locale}") + list = Dir.entries("#{path}/#{locale}") - ['.', '..'] + list.map do |screen| + next if screen.match(device).nil? + "#{path}/#{locale}#{screen}" unless Dir.exist?("#{path}/#{locale}#{screen}") + end.compact + end + + def self.upload_on_appaloosa(api_key, store_id, binary_path, screenshots, group_ids, description, changelog) + screenshots = all_screenshots_links(screenshots) + uri = URI("#{APPALOOSA_SERVER}/#{store_id}/mobile_application_updates/upload") + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + req = Net::HTTP::Post.new(uri.path, { 'Content-Type' => 'application/json' }) + req.body = { store_id: store_id, + api_key: api_key, + mobile_application_update: { + description: description, + changelog: changelog, + binary_path: binary_path, + screenshot1: screenshots[0], + screenshot2: screenshots[1], + screenshot3: screenshots[2], + screenshot4: screenshots[3], + screenshot5: screenshots[4], + group_ids: group_ids, + provider: 'fastlane' + } }.to_json + uoa_response = http.request(req) + json_res = JSON.parse(uoa_response.body) + if json_res['errors'] + UI.error("App: #{json_res['errors']}") + else + UI.success("Binary processing: Check your app': #{json_res['link']}") + end + end + + def self.all_screenshots_links(screenshots) + if screenshots.nil? + screens = %w(screenshot1 screenshot2 screenshot3 screenshot4 screenshot5) + screenshots = screens.map do |_k, _v| + '' + end + else + missings = 5 - screenshots.count + (1..missings).map do |_i| + screenshots << '' + end + end + screenshots + end + + def self.get_env_value(option) + available_options.map do |opt| + opt if opt.key == option.to_sym + end.compact[0].default_value + end + + def self.error_detected(errors) + if errors + UI.user_error!("ERROR: #{errors}") + else + false + end + end + + def self.warning_detected(warning) + UI.important("WARNING: #{warning}") + nil + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + 'Upload your app to [Appaloosa Store](https://www.appaloosa-store.com/)' + end + + def self.details + [ + "Appaloosa is a private mobile application store. This action offers a quick deployment on the platform.", + "You can create an account, push to your existing account, or manage your user groups.", + "We accept iOS and Android applications." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :binary, + env_name: 'FL_APPALOOSA_BINARY', + description: 'Binary path. Optional for ipa if you use the `ipa` or `xcodebuild` action', + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find ipa || apk file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: 'FL_APPALOOSA_API_TOKEN', + sensitive: true, + description: "Your API token"), + FastlaneCore::ConfigItem.new(key: :store_id, + env_name: 'FL_APPALOOSA_STORE_ID', + description: "Your Store id"), + FastlaneCore::ConfigItem.new(key: :group_ids, + env_name: 'FL_APPALOOSA_GROUPS', + description: 'Your app is limited to special users? Give us the group ids', + default_value: '', + optional: true), + FastlaneCore::ConfigItem.new(key: :screenshots, + env_name: 'FL_APPALOOSA_SCREENSHOTS', + description: 'Add some screenshots application to your store or hit [enter]', + default_value: Actions.lane_context[SharedValues::SNAPSHOT_SCREENSHOTS_PATH], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :locale, + env_name: 'FL_APPALOOSA_LOCALE', + description: 'Select the folder locale for your screenshots', + default_value: 'en-US', + optional: true), + FastlaneCore::ConfigItem.new(key: :device, + env_name: 'FL_APPALOOSA_DEVICE', + description: 'Select the device format for your screenshots', + optional: true), + FastlaneCore::ConfigItem.new(key: :description, + env_name: 'FL_APPALOOSA_DESCRIPTION', + description: 'Your app description', + optional: true), + FastlaneCore::ConfigItem.new(key: :changelog, + env_name: 'FL_APPALOOSA_CHANGELOG', + description: 'Your app changelog', + optional: true) + ] + end + + def self.authors + ['Appaloosa'] + end + + def self.is_supported?(platform) + [:ios, :mac, :android].include?(platform) + end + + def self.invalid_response?(url_for_download_response) + url_for_download_response.kind_of?(Net::HTTPNotFound) || + url_for_download_response.kind_of?(Net::HTTPForbidden) + end + + def self.example_code + [ + "appaloosa( + # Path tor your IPA or APK + binary: '/path/to/binary.ipa', + # You can find your store’s id at the bottom of the “Settings” page of your store + store_id: 'your_store_id', + # You can find your api_token at the bottom of the “Settings” page of your store + api_token: 'your_api_key', + # User group_ids visibility, if it's not specified we'll publish the app for all users in your store' + group_ids: '112, 232, 387', + # You can use fastlane/snapshot or specify your own screenshots folder. + # If you use snapshot please specify a local and a device to upload your screenshots from. + # When multiple values are specified in the Snapfile, we default to 'en-US' + locale: 'en-US', + # By default, the screenshots from the last device will be used + device: 'iPhone6', + # Screenshots' filenames should start with device's name like 'iphone6-s1.png' if device specified + screenshots: '/path/to_your/screenshots' + )" + ] + end + + def self.category + :beta + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appetize.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appetize.rb new file mode 100644 index 0000000..58306c6 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appetize.rb @@ -0,0 +1,193 @@ +module Fastlane + module Actions + module SharedValues + APPETIZE_PUBLIC_KEY = :APPETIZE_PUBLIC_KEY + APPETIZE_APP_URL = :APPETIZE_APP_URL + APPETIZE_MANAGE_URL = :APPETIZE_MANAGE_URL + APPETIZE_API_HOST = :APPETIZE_API_HOST + end + + class AppetizeAction < Action + def self.is_supported?(platform) + [:ios, :android].include?(platform) + end + + def self.run(options) + require 'net/http' + require 'net/http/post/multipart' + require 'uri' + require 'json' + + params = { + platform: options[:platform] + } + + if options[:path] + params[:file] = UploadIO.new(options[:path], 'application/zip') + else + UI.user_error!('url parameter is required if no file path is specified') if options[:url].nil? + params[:url] = options[:url] + end + + params[:note] = options[:note] if options[:note].to_s.length > 0 + + if options[:timeout] + params[:timeout] = options[:timeout] + end + + uri = URI.parse(appetize_url(options)) + req = create_request(uri, params) + req.basic_auth(options[:api_token], nil) + + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + + if params[:platform] == 'ios' + UI.message("Uploading ipa to appetize... this might take a while") + else + UI.message("Uploading apk to appetize... this might take a while") + end + + response = http.request(req) + + parse_response(response) # this will raise an exception if something goes wrong + + UI.message("App URL: #{Actions.lane_context[SharedValues::APPETIZE_APP_URL]}") + UI.message("Manage URL: #{Actions.lane_context[SharedValues::APPETIZE_MANAGE_URL]}") + UI.message("Public Key: #{Actions.lane_context[SharedValues::APPETIZE_PUBLIC_KEY]}") + UI.success("Build successfully uploaded to Appetize.io") + end + + def self.appetize_url(options) + Actions.lane_context[SharedValues::APPETIZE_API_HOST] = options[:api_host] + "https://#{options[:api_host]}/v1/apps/#{options[:public_key]}" + end + private_class_method :appetize_url + + def self.create_request(uri, params) + if params[:url] + req = Net::HTTP::Post.new(uri.request_uri, { 'Content-Type' => 'application/json' }) + req.body = JSON.generate(params) + else + req = Net::HTTP::Post::Multipart.new(uri.path, params) + end + + req + end + private_class_method :create_request + + def self.parse_response(response) + body = JSON.parse(response.body) + app_url = body['appURL'] + manage_url = body['manageURL'] + public_key = body['publicKey'] + + Actions.lane_context[SharedValues::APPETIZE_PUBLIC_KEY] = public_key + Actions.lane_context[SharedValues::APPETIZE_APP_URL] = app_url + Actions.lane_context[SharedValues::APPETIZE_MANAGE_URL] = manage_url + return true + rescue => ex + UI.error(ex) + UI.user_error!("Error uploading to Appetize.io: #{response.body}") + end + private_class_method :parse_response + + def self.description + "Upload your app to [Appetize.io](https://appetize.io/) to stream it in browser" + end + + def self.details + [ + "If you provide a `public_key`, this will overwrite an existing application. If you want to have this build as a new app version, you shouldn't provide this value.", + "", + "To integrate appetize into your GitHub workflow check out the [device_grid guide](https://github.com/fastlane/fastlane/blob/master/fastlane/lib/fastlane/actions/device_grid/README.md)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_host, + env_name: "APPETIZE_API_HOST", + description: "Appetize API host", + default_value: 'api.appetize.io', + verify_block: proc do |value| + UI.user_error!("API host should not contain the scheme e.g. `https`") if value.start_with?('https') + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "APPETIZE_API_TOKEN", + sensitive: true, + description: "Appetize.io API Token", + verify_block: proc do |value| + UI.user_error!("No API Token for Appetize.io given, pass using `api_token: 'token'`") unless value.to_s.length > 0 + end), + FastlaneCore::ConfigItem.new(key: :url, + env_name: "APPETIZE_URL", + description: "URL from which the ipa file can be fetched. Alternative to :path", + optional: true), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "APPETIZE_PLATFORM", + description: "Platform. Either `ios` or `android`", + default_value: 'ios'), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "APPETIZE_FILE_PATH", + description: "Path to zipped build on the local filesystem. Either this or `url` must be specified", + optional: true), + FastlaneCore::ConfigItem.new(key: :public_key, + env_name: "APPETIZE_PUBLICKEY", + description: "If not provided, a new app will be created. If provided, the existing build will be overwritten", + optional: true, + verify_block: proc do |value| + if value.start_with?("private_") + UI.user_error!("You provided a private key to appetize, please provide the public key") + end + end), + FastlaneCore::ConfigItem.new(key: :note, + env_name: "APPETIZE_NOTE", + description: "Notes you wish to add to the uploaded app", + optional: true), + FastlaneCore::ConfigItem.new(key: :timeout, + env_name: "APPETIZE_TIMEOUT", + description: "The number of seconds to wait until automatically ending the session due to user inactivity. Must be 30, 60, 90, 120, 180, 300, 600, 1800, 3600 or 7200. Default is 120", + type: Integer, + optional: true, + verify_block: proc do |value| + UI.user_error!("The value provided doesn't match any of the supported options.") unless [30, 60, 90, 120, 180, 300, 600, 1800, 3600, 7200].include?(value) + end) + ] + end + + def self.output + [ + ['APPETIZE_API_HOST', 'Appetize API host.'], + ['APPETIZE_PUBLIC_KEY', 'a public identifier for your app. Use this to update your app after it has been initially created.'], + ['APPETIZE_APP_URL', 'a page to test and share your app.'], + ['APPETIZE_MANAGE_URL', 'a page to manage your app.'] + ] + end + + def self.authors + ["klundberg", "giginet", "steprescott"] + end + + def self.category + :beta + end + + def self.example_code + [ + 'appetize( + path: "./MyApp.zip", + api_token: "yourapitoken", # get it from https://appetize.io/docs#request-api-token + public_key: "your_public_key" # get it from https://appetize.io/dashboard + )', + 'appetize( + path: "./MyApp.zip", + api_host: "company.appetize.io", # only needed for enterprise hosted solution + api_token: "yourapitoken", # get it from https://appetize.io/docs#request-api-token + public_key: "your_public_key" # get it from https://appetize.io/dashboard + )' + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appetize_viewing_url_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appetize_viewing_url_generator.rb new file mode 100644 index 0000000..6acdfbc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appetize_viewing_url_generator.rb @@ -0,0 +1,135 @@ +module Fastlane + module Actions + module SharedValues + end + + class AppetizeViewingUrlGeneratorAction < Action + def self.run(params) + link = "#{params[:base_url]}/#{params[:public_key]}" + + if params[:scale].nil? # sensible default values for scaling + case params[:device].downcase.to_sym + when :iphone6splus, :iphone6plus + params[:scale] = "50" + when :ipadair, :ipadair2 + params[:scale] = "50" + else + params[:scale] = "75" + end + end + + url_params = [] + url_params << "autoplay=true" + url_params << "orientation=#{params[:orientation]}" + url_params << "device=#{params[:device]}" + url_params << "deviceColor=#{params[:color]}" + url_params << "scale=#{params[:scale]}" + url_params << "launchUrl=#{params[:launch_url]}" if params[:launch_url] + url_params << "language=#{params[:language]}" if params[:language] + url_params << "osVersion=#{params[:os_version]}" if params[:os_version] + url_params << "params=#{CGI.escape(params[:params])}" if params[:params] + url_params << "proxy=#{CGI.escape(params[:proxy])}" if params[:proxy] + + return link + "?" + url_params.join("&") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Generate an URL for appetize simulator" + end + + def self.details + "Check out the [device_grid guide](https://github.com/fastlane/fastlane/blob/master/fastlane/lib/fastlane/actions/device_grid/README.md) for more information" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :public_key, + env_name: "APPETIZE_PUBLICKEY", + description: "Public key of the app you wish to update", + sensitive: true, + default_value: Actions.lane_context[SharedValues::APPETIZE_PUBLIC_KEY], + default_value_dynamic: true, + optional: false, + verify_block: proc do |value| + if value.start_with?("private_") + UI.user_error!("You provided a private key to appetize, please provide the public key") + end + end), + FastlaneCore::ConfigItem.new(key: :base_url, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_BASE", + description: "Base URL of Appetize service", + default_value: "https://appetize.io/embed", + optional: true), + FastlaneCore::ConfigItem.new(key: :device, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_DEVICE", + description: "Device type: iphone4s, iphone5s, iphone6, iphone6plus, ipadair, iphone6s, iphone6splus, ipadair2, nexus5, nexus7 or nexus9", + default_value: "iphone5s"), + FastlaneCore::ConfigItem.new(key: :scale, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_SCALE", + description: "Scale of the simulator", + optional: true, + verify_block: proc do |value| + available = ["25", "50", "75", "100"] + UI.user_error!("Invalid scale, available: #{available.join(', ')}") unless available.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :orientation, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_ORIENTATION", + description: "Device orientation", + default_value: "portrait", + verify_block: proc do |value| + available = ["portrait", "landscape"] + UI.user_error!("Invalid device, available: #{available.join(', ')}") unless available.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :language, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_LANGUAGE", + description: "Device language in ISO 639-1 language code, e.g. 'de'", + optional: true), + FastlaneCore::ConfigItem.new(key: :color, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_COLOR", + description: "Color of the device", + default_value: "black", + verify_block: proc do |value| + available = ["black", "white", "silver", "gray"] + UI.user_error!("Invalid device color, available: #{available.join(', ')}") unless available.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :launch_url, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_LAUNCH_URL", + description: "Specify a deep link to open when your app is launched", + optional: true), + FastlaneCore::ConfigItem.new(key: :os_version, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_OS_VERSION", + description: "The operating system version on which to run your app, e.g. 10.3, 8.0", + optional: true), + FastlaneCore::ConfigItem.new(key: :params, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_PARAMS", + description: "Specify params value to be passed to Appetize", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy, + env_name: "APPETIZE_VIEWING_URL_GENERATOR_PROXY", + description: "Specify a HTTP proxy to be passed to Appetize", + optional: true) + ] + end + + def self.category + :misc + end + + def self.return_value + "The URL to preview the iPhone app" + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appium.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appium.rb new file mode 100644 index 0000000..ff2dfdc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appium.rb @@ -0,0 +1,181 @@ +module Fastlane + module Actions + class AppiumAction < Action + INVOKE_TIMEOUT = 30 + APPIUM_PATH_HOMEBREW = '/usr/local/bin/appium' + APPIUM_APP_PATH = '/Applications/Appium.app' + APPIUM_APP_BUNDLE_PATH = 'Contents/Resources/node_modules/.bin/appium' + + def self.run(params) + Actions.verify_gem!('rspec') + Actions.verify_gem!('appium_lib') + + require 'rspec' + require 'appium_lib' unless Helper.test? + + FastlaneCore::PrintTable.print_values( + config: params, + title: "Summary for Appium Action" + ) + + if params[:invoke_appium_server] + appium_pid = invoke_appium_server(params) + wait_for_appium_server(params) + end + + configure_rspec(params) + + rspec_args = [] + rspec_args << params[:spec_path] + status = RSpec::Core::Runner.run(rspec_args).to_i + if status != 0 + UI.user_error!("Failed to run Appium spec. status code: #{status}") + end + ensure + Actions.sh("kill #{appium_pid}") if appium_pid + end + + def self.invoke_appium_server(params) + appium = detect_appium(params) + Process.spawn("#{appium} -a #{params[:host]} -p #{params[:port]}") + end + + def self.detect_appium(params) + appium_path = params[:appium_path] || `which appium`.to_s.strip + + if appium_path.empty? + if File.exist?(APPIUM_PATH_HOMEBREW) + appium_path = APPIUM_PATH_HOMEBREW + elsif File.exist?(APPIUM_APP_PATH) + appium_path = APPIUM_APP_PATH + end + end + + unless File.exist?(appium_path) + UI.user_error!("You have to install Appium using `npm install -g appium`") + end + + if appium_path.end_with?('.app') + appium_path = "#{appium_path}/#{APPIUM_APP_BUNDLE_PATH}" + end + + UI.message("Appium executable detected: #{appium_path}") + appium_path + end + + def self.wait_for_appium_server(params) + loop.with_index do |_, count| + break if `lsof -i:#{params[:port]}`.to_s.length != 0 + + if count * 5 > INVOKE_TIMEOUT + UI.user_error!("Invoke Appium server timed out") + end + sleep(5) + end + end + + def self.configure_rspec(params) + RSpec.configure do |c| + c.before(:each) do + caps = params[:caps] || {} + caps[:platformName] ||= params[:platform] + caps[:autoAcceptAlerts] ||= true + caps[:app] = params[:app_path] + + appium_lib = params[:appium_lib] || {} + + @driver = Appium::Driver.new( + caps: caps, + server_url: params[:host], + port: params[:port], + appium_lib: appium_lib + ).start_driver + Appium.promote_appium_methods(RSpec::Core::ExampleGroup) + end + + c.after(:each) do + @driver.quit unless @driver.nil? + end + end + end + + def self.description + 'Run UI test by Appium with RSpec' + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :platform, + env_name: 'FL_APPIUM_PLATFORM', + description: 'Appium platform name'), + FastlaneCore::ConfigItem.new(key: :spec_path, + env_name: 'FL_APPIUM_SPEC_PATH', + description: 'Path to Appium spec directory'), + FastlaneCore::ConfigItem.new(key: :app_path, + env_name: 'FL_APPIUM_APP_FILE_PATH', + description: 'Path to Appium target app file'), + FastlaneCore::ConfigItem.new(key: :invoke_appium_server, + env_name: 'FL_APPIUM_INVOKE_APPIUM_SERVER', + description: 'Use local Appium server with invoke automatically', + type: Boolean, + default_value: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :host, + env_name: 'FL_APPIUM_HOST', + description: 'Hostname of Appium server', + default_value: '0.0.0.0', + optional: true), + FastlaneCore::ConfigItem.new(key: :port, + env_name: 'FL_APPIUM_PORT', + description: 'HTTP port of Appium server', + type: Integer, + default_value: 4723, + optional: true), + FastlaneCore::ConfigItem.new(key: :appium_path, + env_name: 'FL_APPIUM_EXECUTABLE_PATH', + description: 'Path to Appium executable', + optional: true), + FastlaneCore::ConfigItem.new(key: :caps, + env_name: 'FL_APPIUM_CAPS', + description: 'Hash of caps for Appium::Driver', + type: Hash, + optional: true), + FastlaneCore::ConfigItem.new(key: :appium_lib, + env_name: 'FL_APPIUM_LIB', + description: 'Hash of appium_lib for Appium::Driver', + type: Hash, + optional: true) + ] + end + + def self.author + 'yonekawa' + end + + def self.is_supported?(platform) + [:ios, :android].include?(platform) + end + + def self.category + :testing + end + + def self.example_code + [ + 'appium( + app_path: "appium/apps/TargetApp.app", + spec_path: "appium/spec", + platform: "iOS", + caps: { + versionNumber: "9.1", + deviceName: "iPhone 6" + }, + appium_lib: { + wait: 10 + } + )' + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appledoc.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appledoc.rb new file mode 100644 index 0000000..7855726 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appledoc.rb @@ -0,0 +1,226 @@ +module Fastlane + module Actions + module SharedValues + APPLEDOC_DOCUMENTATION_OUTPUT = :APPLEDOC_DOCUMENTATION_OUTPUT + end + + class AppledocAction < Action + ARGS_MAP = { + input: "", + output: "--output", + templates: "--templates", + docset_install_path: "--docset-install-path", + include: "--include", + ignore: "--ignore", + exclude_output: "--exclude-output", + index_desc: "--index-desc", + project_name: "--project-name", + project_version: "--project-version", + project_company: "--project-company", + company_id: "--company-id", + create_html: "--create-html", + create_docset: "--create-docset", + install_docset: "--install-docset", + publish_docset: "--publish-docset", + no_create_docset: "--no-create-docset", + html_anchors: "--html-anchors", + clean_output: "--clean-output", + docset_bundle_id: "--docset-bundle-id", + docset_bundle_name: "--docset-bundle-name", + docset_desc: "--docset-desc", + docset_copyright: "--docset-copyright", + docset_feed_name: "--docset-feed-name", + docset_feed_url: "--docset-feed-url", + docset_feed_formats: "--docset-feed-formats", + docset_package_url: "--docset-package-url", + docset_fallback_url: "--docset-fallback-url", + docset_publisher_id: "--docset-publisher-id", + docset_publisher_name: "--docset-publisher-name", + docset_min_xcode_version: "--docset-min-xcode-version", + docset_platform_family: "--docset-platform-family", + docset_cert_issuer: "--docset-cert-issuer", + docset_cert_signer: "--docset-cert-signer", + docset_bundle_filename: "--docset-bundle-filename", + docset_atom_filename: "--docset-atom-filename", + docset_xml_filename: "--docset-xml-filename", + docset_package_filename: "--docset-package-filename", + options: "", + crossref_format: "--crossref-format", + exit_threshold: "--exit-threshold", + docs_section_title: "--docs-section-title", + warnings: "", + logformat: "--logformat", + verbose: "--verbose" + } + + def self.run(params) + unless Helper.test? + UI.message("Install using `brew install appledoc`") + UI.user_error!("appledoc not installed") if `which appledoc`.length == 0 + end + + params_hash = params.values + + # Check if an output path was given + if params_hash[:output] + Actions.lane_context[SharedValues::APPLEDOC_DOCUMENTATION_OUTPUT] = File.expand_path(params_hash[:output]) + create_output_dir_if_not_exists(params_hash[:output]) + end + + # Maps parameter hash to CLI args + appledoc_args = params_hash_to_cli_args(params_hash) + UI.success("Generating documentation.") + cli_args = appledoc_args.join(' ') + input_cli_arg = Array(params_hash[:input]).map(&:shellescape).join(' ') + command = "appledoc #{cli_args}".strip + " " + input_cli_arg + UI.verbose(command) + Actions.sh(command) + end + + def self.params_hash_to_cli_args(params) + # Remove nil and false value params + params = params.delete_if { |_, v| v.nil? || v == false } + + cli_args = [] + params.each do |key, value| + args = ARGS_MAP[key] + if args.empty? + if key != :input + cli_args << value + end + elsif value.kind_of?(Array) + value.each do |v| + cli_args << cli_param(args, v) + end + else + cli_args << cli_param(args, value) + end + end + + return cli_args + end + + def self.cli_param(k, v) + value = (v != true && v.to_s.length > 0 ? "\"#{v}\"" : "") + "#{k} #{value}".strip + end + + def self.create_output_dir_if_not_exists(output_path) + output_dir = File.dirname(output_path) + + # If the output directory doesn't exist, create it + unless Dir.exist?(output_dir) + FileUtils.mkpath(output_dir) + end + end + + def self.description + "Generate Apple-like source code documentation from the source code" + end + + def self.details + "Runs `appledoc [OPTIONS] ` for the project" + end + + def self.available_options + [ + # PATHS + FastlaneCore::ConfigItem.new(key: :input, env_name: "FL_APPLEDOC_INPUT", description: "Path(s) to source file directories or individual source files. Accepts a single path or an array of paths", type: Array), + FastlaneCore::ConfigItem.new(key: :output, env_name: "FL_APPLEDOC_OUTPUT", description: "Output path", optional: true), + FastlaneCore::ConfigItem.new(key: :templates, env_name: "FL_APPLEDOC_TEMPLATES", description: "Template files path", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_install_path, env_name: "FL_APPLEDOC_DOCSET_INSTALL_PATH", description: "DocSet installation path", optional: true), + FastlaneCore::ConfigItem.new(key: :include, env_name: "FL_APPLEDOC_INCLUDE", description: "Include static doc(s) at path", optional: true), + FastlaneCore::ConfigItem.new(key: :ignore, env_name: "FL_APPLEDOC_IGNORE", description: "Ignore given path", type: Array, optional: true), + FastlaneCore::ConfigItem.new(key: :exclude_output, env_name: "FL_APPLEDOC_EXCLUDE_OUTPUT", description: "Exclude given path from output", type: Array, optional: true), + FastlaneCore::ConfigItem.new(key: :index_desc, env_name: "FL_APPLEDOC_INDEX_DESC", description: "File including main index description", optional: true), + + # PROJECT INFO + FastlaneCore::ConfigItem.new(key: :project_name, env_name: "FL_APPLEDOC_PROJECT_NAME", description: "Project name"), + FastlaneCore::ConfigItem.new(key: :project_version, env_name: "FL_APPLEDOC_PROJECT_VERSION", description: "Project version", optional: true), + FastlaneCore::ConfigItem.new(key: :project_company, env_name: "FL_APPLEDOC_PROJECT_COMPANY", description: "Project company"), + FastlaneCore::ConfigItem.new(key: :company_id, env_name: "FL_APPLEDOC_COMPANY_ID", description: "Company UTI (i.e. reverse DNS name)", optional: true), + + # OUTPUT GENERATION + FastlaneCore::ConfigItem.new(key: :create_html, env_name: "FL_APPLEDOC_CREATE_HTML", description: "Create HTML", type: Boolean, default_value: false), + FastlaneCore::ConfigItem.new(key: :create_docset, env_name: "FL_APPLEDOC_CREATE_DOCSET", description: "Create documentation set", type: Boolean, default_value: false), + FastlaneCore::ConfigItem.new(key: :install_docset, env_name: "FL_APPLEDOC_INSTALL_DOCSET", description: "Install documentation set to Xcode", type: Boolean, default_value: false), + FastlaneCore::ConfigItem.new(key: :publish_docset, env_name: "FL_APPLEDOC_PUBLISH_DOCSET", description: "Prepare DocSet for publishing", type: Boolean, default_value: false), + FastlaneCore::ConfigItem.new(key: :no_create_docset, env_name: "FL_APPLEDOC_NO_CREATE_DOCSET", description: "Create HTML and skip creating a DocSet", type: Boolean, default_value: false), + FastlaneCore::ConfigItem.new(key: :html_anchors, env_name: "FL_APPLEDOC_HTML_ANCHORS", description: "The html anchor format to use in DocSet HTML", optional: true), + FastlaneCore::ConfigItem.new(key: :clean_output, env_name: "FL_APPLEDOC_CLEAN_OUTPUT", description: "Remove contents of output path before starting", type: Boolean, default_value: false), + + # DOCUMENTATION SET INFO + FastlaneCore::ConfigItem.new(key: :docset_bundle_id, env_name: "FL_APPLEDOC_DOCSET_BUNDLE_ID", description: "DocSet bundle identifier", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_bundle_name, env_name: "FL_APPLEDOC_DOCSET_BUNDLE_NAME", description: "DocSet bundle name", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_desc, env_name: "FL_APPLEDOC_DOCSET_DESC", description: "DocSet description", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_copyright, env_name: "FL_APPLEDOC_DOCSET_COPYRIGHT", description: "DocSet copyright message", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_feed_name, env_name: "FL_APPLEDOC_DOCSET_FEED_NAME", description: "DocSet feed name", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_feed_url, env_name: "FL_APPLEDOC_DOCSET_FEED_URL", description: "DocSet feed URL", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_feed_formats, env_name: "FL_APPLEDOC_DOCSET_FEED_FORMATS", description: "DocSet feed formats. Separated by a comma [atom,xml]", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_package_url, env_name: "FL_APPLEDOC_DOCSET_PACKAGE_URL", description: "DocSet package (.xar) URL", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_fallback_url, env_name: "FL_APPLEDOC_DOCSET_FALLBACK_URL", description: "DocSet fallback URL", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_publisher_id, env_name: "FL_APPLEDOC_DOCSET_PUBLISHER_ID", description: "DocSet publisher identifier", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_publisher_name, env_name: "FL_APPLEDOC_DOCSET_PUBLISHER_NAME", description: "DocSet publisher name", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_min_xcode_version, env_name: "FL_APPLEDOC_DOCSET_MIN_XCODE_VERSION", description: "DocSet min. Xcode version", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_platform_family, env_name: "FL_APPLEDOC_DOCSET_PLATFORM_FAMILY", description: "DocSet platform family", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_cert_issuer, env_name: "FL_APPLEDOC_DOCSET_CERT_ISSUER", description: "DocSet certificate issuer", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_cert_signer, env_name: "FL_APPLEDOC_DOCSET_CERT_SIGNER", description: "DocSet certificate signer", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_bundle_filename, env_name: "FL_APPLEDOC_DOCSET_BUNDLE_FILENAME", description: "DocSet bundle filename", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_atom_filename, env_name: "FL_APPLEDOC_DOCSET_ATOM_FILENAME", description: "DocSet atom feed filename", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_xml_filename, env_name: "FL_APPLEDOC_DOCSET_XML_FILENAME", description: "DocSet xml feed filename", optional: true), + FastlaneCore::ConfigItem.new(key: :docset_package_filename, env_name: "FL_APPLEDOC_DOCSET_PACKAGE_FILENAME", description: "DocSet package (.xar,.tgz) filename", optional: true), + + # OPTIONS + FastlaneCore::ConfigItem.new(key: :options, env_name: "FL_APPLEDOC_OPTIONS", description: "Documentation generation options", optional: true), + FastlaneCore::ConfigItem.new(key: :crossref_format, env_name: "FL_APPLEDOC_OPTIONS_CROSSREF_FORMAT", description: "Cross reference template regex", optional: true), + FastlaneCore::ConfigItem.new(key: :exit_threshold, env_name: "FL_APPLEDOC_OPTIONS_EXIT_THRESHOLD", description: "Exit code threshold below which 0 is returned", type: Integer, default_value: 2, optional: true), + FastlaneCore::ConfigItem.new(key: :docs_section_title, env_name: "FL_APPLEDOC_OPTIONS_DOCS_SECTION_TITLE", description: "Title of the documentation section (defaults to \"Programming Guides\"", optional: true), + + # WARNINGS + FastlaneCore::ConfigItem.new(key: :warnings, env_name: "FL_APPLEDOC_WARNINGS", description: "Documentation generation warnings", optional: true), + + # MISCELLANEOUS + FastlaneCore::ConfigItem.new(key: :logformat, env_name: "FL_APPLEDOC_LOGFORMAT", description: "Log format [0-3]", type: Integer, optional: true), + FastlaneCore::ConfigItem.new(key: :verbose, env_name: "FL_APPLEDOC_VERBOSE", description: "Log verbosity level [0-6,xcode]", skip_type_validation: true, optional: true) # allow Integer, String + ] + end + + def self.output + [ + ['APPLEDOC_DOCUMENTATION_OUTPUT', 'Documentation set output path'] + ] + end + + def self.authors + ["alexmx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.category + :documentation + end + + def self.example_code + [ + 'appledoc( + project_name: "MyProjectName", + project_company: "Company Name", + input: [ + "MyProjectSources", + "MyProjectSourceFile.h" + ], + ignore: [ + "ignore/path/1", + "ingore/path/2" + ], + options: "--keep-intermediate-files --search-undocumented-doc", + warnings: "--warn-missing-output-path --warn-missing-company-id" + )' + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appstore.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appstore.rb new file mode 100644 index 0000000..4753479 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/appstore.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/upload_to_app_store' + class AppstoreAction < UploadToAppStoreAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `upload_to_app_store` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/apteligent.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/apteligent.rb new file mode 100644 index 0000000..f42a68c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/apteligent.rb @@ -0,0 +1,106 @@ +module Fastlane + module Actions + class ApteligentAction < Action + def self.run(params) + command = [] + command << "curl" + command += upload_options(params) + command << upload_url(params[:app_id].shellescape) + + # Fastlane::Actions.sh has buffering issues, no progress bar is shown in real time + # will reanable it when it is fixed + # result = Fastlane::Actions.sh(command.join(' '), log: false) + shell_command = command.join(' ') + return shell_command if Helper.test? + result = Actions.sh(shell_command) + fail_on_error(result) + end + + def self.fail_on_error(result) + if result != "200" + UI.crash!("Server error, failed to upload the dSYM file.") + else + UI.success('dSYM successfully uploaded to Apteligent!') + end + end + + def self.upload_url(app_id) + "https://api.crittercism.com/api_beta/dsym/#{app_id}" + end + + def self.dsym_path(params) + file_path = params[:dsym] + file_path ||= Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH] || ENV[SharedValues::DSYM_OUTPUT_PATH.to_s] + file_path ||= Actions.lane_context[SharedValues::DSYM_ZIP_PATH] || ENV[SharedValues::DSYM_ZIP_PATH.to_s] + + if file_path + expanded_file_path = File.expand_path(file_path) + UI.user_error!("Couldn't find file at path '#{expanded_file_path}'") unless File.exist?(expanded_file_path) + return expanded_file_path + else + UI.user_error!("Couldn't find dSYM file") + end + end + + def self.upload_options(params) + file_path = dsym_path(params).shellescape + + # rubocop: disable Style/FormatStringToken + options = [] + options << "--write-out %{http_code} --silent --output /dev/null" + options << "-F dsym=@#{file_path}" + options << "-F key=#{params[:api_key].shellescape}" + options + # rubocop: enable Style/FormatStringToken + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload dSYM file to [Apteligent (Crittercism)](http://www.apteligent.com/)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :dsym, + env_name: "FL_APTELIGENT_FILE", + description: "dSYM.zip file to upload to Apteligent", + optional: true), + FastlaneCore::ConfigItem.new(key: :app_id, + env_name: "FL_APTELIGENT_APP_ID", + description: "Apteligent App ID key e.g. 569f5c87cb99e10e00c7xxxx", + optional: false), + FastlaneCore::ConfigItem.new(key: :api_key, + env_name: "FL_APTELIGENT_API_KEY", + sensitive: true, + code_gen_sensitive: true, + description: "Apteligent App API key e.g. IXPQIi8yCbHaLliqzRoo065tH0lxxxxx", + optional: false) + ] + end + + def self.authors + ["Mo7amedFouad"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'apteligent( + app_id: "...", + api_key: "..." + )' + ] + end + + def self.category + :beta + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/artifactory.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/artifactory.rb new file mode 100644 index 0000000..46aabb8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/artifactory.rb @@ -0,0 +1,189 @@ +module Fastlane + module Actions + module SharedValues + ARTIFACTORY_DOWNLOAD_URL = :ARTIFACTORY_DOWNLOAD_URL + ARTIFACTORY_DOWNLOAD_SIZE = :ARTIFACTORY_DOWNLOAD_SIZE + end + + class ArtifactoryAction < Action + def self.run(params) + Actions.verify_gem!('artifactory') + + require 'artifactory' + + UI.user_error!("Cannot connect to Artifactory - 'username' was provided but it's missing 'password'") if params[:username] && !params[:password] + UI.user_error!("Cannot connect to Artifactory - 'password' was provided but it's missing 'username'") if !params[:username] && params[:password] + UI.user_error!("Cannot connect to Artifactory - either 'api_key', or 'username' and 'password' must be provided") if !params[:api_key] && !params[:username] + file_path = File.absolute_path(params[:file]) + + if File.exist?(file_path) + client = connect_to_artifactory(params) + artifact = Artifactory::Resource::Artifact.new + artifact.client = client + artifact.local_path = file_path + artifact.checksums = { + "sha1" => Digest::SHA1.file(file_path), + "md5" => Digest::MD5.file(file_path) + } + UI.message("Uploading file: #{artifact.local_path} ...") + upload = artifact.upload(params[:repo], params[:repo_path], params[:properties]) + + Actions.lane_context[SharedValues::ARTIFACTORY_DOWNLOAD_URL] = upload.uri + Actions.lane_context[SharedValues::ARTIFACTORY_DOWNLOAD_SIZE] = upload.size + + UI.message("Uploaded Artifact:") + UI.message("Repo: #{upload.repo}") + UI.message("URI: #{upload.uri}") + UI.message("Size: #{upload.size}") + UI.message("SHA1: #{upload.sha1}") + else + UI.message("File not found: '#{file_path}'") + end + end + + def self.connect_to_artifactory(params) + config_keys = [:endpoint, :username, :password, :api_key, :ssl_pem_file, :ssl_verify, :proxy_username, :proxy_password, :proxy_address, :proxy_port, :read_timeout] + config = params.values.select do |key| + config_keys.include?(key) + end + Artifactory::Client.new(config) + end + + def self.description + 'This action uploads an artifact to artifactory' + end + + def self.details + 'Connect to the artifactory server using either a username/password or an api_key' + end + + def self.is_supported?(platform) + true + end + + def self.author + ["koglinjg", "tommeier"] + end + + def self.output + [ + ['ARTIFACTORY_DOWNLOAD_URL', 'The download url for file uploaded'], + ['ARTIFACTORY_DOWNLOAD_SIZE', 'The reported file size for file uploaded'] + ] + end + + def self.example_code + [ + 'artifactory( + username: "username", + password: "password", + endpoint: "https://artifactory.example.com/artifactory/", + file: "example.ipa", # File to upload + repo: "mobile_artifacts", # Artifactory repo + repo_path: "/ios/appname/example-major.minor.ipa" # Path to place the artifact including its filename + )', + 'artifactory( + api_key: "api_key", + endpoint: "https://artifactory.example.com/artifactory/", + file: "example.ipa", # File to upload + repo: "mobile_artifacts", # Artifactory repo + repo_path: "/ios/appname/example-major.minor.ipa" # Path to place the artifact including its filename + )' + ] + end + + def self.category + :misc + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :file, + env_name: "FL_ARTIFACTORY_FILE", + description: "File to be uploaded to artifactory", + optional: false), + FastlaneCore::ConfigItem.new(key: :repo, + env_name: "FL_ARTIFACTORY_REPO", + description: "Artifactory repo to put the file in", + optional: false), + FastlaneCore::ConfigItem.new(key: :repo_path, + env_name: "FL_ARTIFACTORY_REPO_PATH", + description: "Path to deploy within the repo, including filename", + optional: false), + FastlaneCore::ConfigItem.new(key: :endpoint, + env_name: "FL_ARTIFACTORY_ENDPOINT", + description: "Artifactory endpoint", + optional: false), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FL_ARTIFACTORY_USERNAME", + description: "Artifactory username", + optional: true, + conflicting_options: [:api_key], + conflict_block: proc do |value| + UI.user_error!("You can't use option '#{value.key}' along with 'username'") + end), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "FL_ARTIFACTORY_PASSWORD", + description: "Artifactory password", + sensitive: true, + code_gen_sensitive: true, + optional: true, + conflicting_options: [:api_key], + conflict_block: proc do |value| + UI.user_error!("You can't use option '#{value.key}' along with 'password'") + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_name: "FL_ARTIFACTORY_API_KEY", + description: "Artifactory API key", + sensitive: true, + code_gen_sensitive: true, + optional: true, + conflicting_options: [:username, :password], + conflict_block: proc do |value| + UI.user_error!("You can't use option '#{value.key}' along with 'api_key'") + end), + FastlaneCore::ConfigItem.new(key: :properties, + env_name: "FL_ARTIFACTORY_PROPERTIES", + description: "Artifact properties hash", + type: Hash, + default_value: {}, + optional: true), + FastlaneCore::ConfigItem.new(key: :ssl_pem_file, + env_name: "FL_ARTIFACTORY_SSL_PEM_FILE", + description: "Location of pem file to use for ssl verification", + default_value: nil, + optional: true), + FastlaneCore::ConfigItem.new(key: :ssl_verify, + env_name: "FL_ARTIFACTORY_SSL_VERIFY", + description: "Verify SSL", + type: Boolean, + default_value: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_username, + env_name: "FL_ARTIFACTORY_PROXY_USERNAME", + description: "Proxy username", + default_value: nil, + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_password, + env_name: "FL_ARTIFACTORY_PROXY_PASSWORD", + description: "Proxy password", + sensitive: true, + code_gen_sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_address, + env_name: "FL_ARTIFACTORY_PROXY_ADDRESS", + description: "Proxy address", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_port, + env_name: "FL_ARTIFACTORY_PROXY_PORT", + description: "Proxy port", + optional: true), + FastlaneCore::ConfigItem.new(key: :read_timeout, + env_name: "FL_ARTIFACTORY_READ_TIMEOUT", + description: "Read timeout", + optional: true) + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/automatic_code_signing.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/automatic_code_signing.rb new file mode 100644 index 0000000..737a0d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/automatic_code_signing.rb @@ -0,0 +1,220 @@ +require 'xcodeproj' +module Fastlane + module Actions + class AutomaticCodeSigningAction < Action + def self.run(params) + UI.deprecated("The `automatic_code_signing` action has been deprecated,") + UI.deprecated("Please use `update_code_signing_settings` action instead.") + FastlaneCore::PrintTable.print_values(config: params, title: "Summary for Automatic Codesigning") + path = params[:path] + path = File.join(File.expand_path(path), "project.pbxproj") + + project = Xcodeproj::Project.open(params[:path]) + UI.user_error!("Could not find path to project config '#{path}'. Pass the path to your project (not workspace)!") unless File.exist?(path) + UI.message("Updating the Automatic Codesigning flag to #{params[:use_automatic_signing] ? 'enabled' : 'disabled'} for the given project '#{path}'") + + unless project.root_object.attributes["TargetAttributes"] + UI.user_error!("Seems to be a very old project file format - please open your project file in a more recent version of Xcode") + return false + end + + target_dictionary = project.targets.map { |f| { name: f.name, uuid: f.uuid, build_configuration_list: f.build_configuration_list } } + target_attributes = project.root_object.attributes["TargetAttributes"] + changed_targets = [] + + # make sure TargetAttributes exist for all targets + target_dictionary.each do |props| + unless target_attributes.key?(props[:uuid]) + target_attributes[props[:uuid]] = {} + end + end + + target_attributes.each do |target, sett| + found_target = target_dictionary.detect { |h| h[:uuid] == target } + if params[:targets] + # get target name + unless params[:targets].include?(found_target[:name]) + UI.important("Skipping #{found_target[:name]} not selected (#{params[:targets].join(',')})") + next + end + end + + style_value = params[:use_automatic_signing] ? 'Automatic' : 'Manual' + build_configuration_list = found_target[:build_configuration_list] + build_configuration_list.set_setting("CODE_SIGN_STYLE", style_value) + sett["ProvisioningStyle"] = style_value + + if params[:team_id] + sett["DevelopmentTeam"] = params[:team_id] + build_configuration_list.set_setting("DEVELOPMENT_TEAM", params[:team_id]) + UI.important("Set Team id to: #{params[:team_id]} for target: #{found_target[:name]}") + end + if params[:code_sign_identity] + build_configuration_list.set_setting("CODE_SIGN_IDENTITY", params[:code_sign_identity]) + + # We also need to update the value if it was overridden for a specific SDK + build_configuration_list.build_configurations.each do |build_configuration| + codesign_build_settings_keys = build_configuration.build_settings.keys.select { |key| key.to_s.match(/CODE_SIGN_IDENTITY.*/) } + codesign_build_settings_keys.each do |setting| + build_configuration_list.set_setting(setting, params[:code_sign_identity]) + end + end + UI.important("Set Code Sign identity to: #{params[:code_sign_identity]} for target: #{found_target[:name]}") + end + if params[:profile_name] + build_configuration_list.set_setting("PROVISIONING_PROFILE_SPECIFIER", params[:profile_name]) + UI.important("Set Provisioning Profile name to: #{params[:profile_name]} for target: #{found_target[:name]}") + end + # Since Xcode 8, this is no longer needed, you simply use PROVISIONING_PROFILE_SPECIFIER + if params[:profile_uuid] + build_configuration_list.set_setting("PROVISIONING_PROFILE", params[:profile_uuid]) + UI.important("Set Provisioning Profile UUID to: #{params[:profile_uuid]} for target: #{found_target[:name]}") + end + if params[:bundle_identifier] + build_configuration_list.set_setting("PRODUCT_BUNDLE_IDENTIFIER", params[:bundle_identifier]) + UI.important("Set Bundle identifier to: #{params[:bundle_identifier]} for target: #{found_target[:name]}") + end + + changed_targets << found_target[:name] + end + project.save + + if changed_targets.empty? + UI.important("None of the specified targets has been modified") + UI.important("available targets:") + target_dictionary.each do |target| + UI.important("\t* #{target[:name]}") + end + else + UI.success("Successfully updated project settings to use Code Sign Style = '#{params[:use_automatic_signing] ? 'Automatic' : 'Manual'}'") + UI.success("Modified Targets:") + changed_targets.each do |target| + UI.success("\t * #{target}") + end + end + + params[:use_automatic_signing] + end + + def self.alias_used(action_alias, params) + params[:use_automatic_signing] = true if action_alias == "enable_automatic_code_signing" + end + + def self.aliases + ["enable_automatic_code_signing", "disable_automatic_code_signing"] + end + + def self.description + "Configures Xcode's Codesigning options" + end + + def self.details + "Configures Xcode's Codesigning options of all targets in the project" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_PROJECT_SIGNING_PROJECT_PATH", + description: "Path to your Xcode project", + code_gen_sensitive: true, + default_value: Dir['*.xcodeproj'].first, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Path is invalid") unless File.exist?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :use_automatic_signing, + env_name: "FL_PROJECT_USE_AUTOMATIC_SIGNING", + description: "Defines if project should use automatic signing", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :team_id, + env_name: "FASTLANE_TEAM_ID", + optional: true, + description: "Team ID, is used when upgrading project"), + FastlaneCore::ConfigItem.new(key: :targets, + env_name: "FL_PROJECT_SIGNING_TARGETS", + optional: true, + type: Array, + description: "Specify targets you want to toggle the signing mech. (default to all targets)"), + FastlaneCore::ConfigItem.new(key: :code_sign_identity, + env_name: "FL_CODE_SIGN_IDENTITY", + description: "Code signing identity type (iPhone Developer, iPhone Distribution)", + optional: true), + FastlaneCore::ConfigItem.new(key: :profile_name, + env_name: "FL_PROVISIONING_PROFILE_SPECIFIER", + description: "Provisioning profile name to use for code signing", + optional: true), + FastlaneCore::ConfigItem.new(key: :profile_uuid, + env_name: "FL_PROVISIONING_PROFILE", + description: "Provisioning profile UUID to use for code signing", + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_identifier, + env_name: "FL_APP_IDENTIFIER", + description: "Application Product Bundle Identifier", + optional: true) + ] + end + + def self.output + end + + def self.example_code + [ + '# enable automatic code signing + enable_automatic_code_signing', + 'enable_automatic_code_signing( + path: "demo-project/demo/demo.xcodeproj" + )', + '# disable automatic code signing + disable_automatic_code_signing', + 'disable_automatic_code_signing( + path: "demo-project/demo/demo.xcodeproj" + )', + '# also set team id + disable_automatic_code_signing( + path: "demo-project/demo/demo.xcodeproj", + team_id: "XXXX" + )', + '# Only specific targets + disable_automatic_code_signing( + path: "demo-project/demo/demo.xcodeproj", + use_automatic_signing: false, + targets: ["demo"] + ) + ', + ' # via generic action + automatic_code_signing( + path: "demo-project/demo/demo.xcodeproj", + use_automatic_signing: false + )', + 'automatic_code_signing( + path: "demo-project/demo/demo.xcodeproj", + use_automatic_signing: true + )' + + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + "Please use `update_code_signing_settings` action instead." + end + + def self.return_value + "The current status (boolean) of codesigning after modification" + end + + def self.authors + ["mathiasAichinger", "hjanuschka", "p4checo", "portellaa", "aeons"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/backup_file.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/backup_file.rb new file mode 100644 index 0000000..ff503ac --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/backup_file.rb @@ -0,0 +1,41 @@ +module Fastlane + module Actions + class BackupFileAction < Action + def self.run(params) + path = params[:path] + FileUtils.cp(path, "#{path}.back", preserve: true) + UI.message("Successfully created a backup 💾") + end + + def self.description + 'This action backs up your file to "[path].back"' + end + + def self.is_supported?(platform) + true + end + + def self.author + "gin0606" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + description: "Path to the file you want to backup", + optional: false) + ] + end + + def self.example_code + [ + 'backup_file(path: "/path/to/file")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/backup_xcarchive.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/backup_xcarchive.rb new file mode 100644 index 0000000..5d44572 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/backup_xcarchive.rb @@ -0,0 +1,135 @@ +module Fastlane + module Actions + module SharedValues + BACKUP_XCARCHIVE_FILE = :BACKUP_XCARCHIVE_FILE + end + + class BackupXcarchiveAction < Action + require 'fileutils' + + def self.run(params) + # Get params + xcarchive = params[:xcarchive] + base_destination = params[:destination] + zipped = params[:zip] + zip_filename = params[:zip_filename] + versioned = params[:versioned] + + # Prepare destionation folder + full_destination = base_destination + + if versioned + date = Time.now.strftime("%Y-%m-%d") + version = `agvtool what-marketing-version -terse1` + subfolder = "#{date} #{version.strip}" + full_destination = File.expand_path(subfolder, base_destination) + end + + FileUtils.mkdir(full_destination) unless File.exist?(full_destination) + + # Save archive to destination + if zipped + Dir.mktmpdir("backup_xcarchive") do |dir| + UI.message("Compressing #{xcarchive}") + xcarchive_folder = File.expand_path(File.dirname(xcarchive)) + xcarchive_file = File.basename(xcarchive) + zip_file = if zip_filename + File.join(dir, "#{zip_filename}.xcarchive.zip") + else + File.join(dir, "#{xcarchive_file}.zip") + end + + # Create zip + Actions.sh(%(cd "#{xcarchive_folder}" && zip -r -X -y "#{zip_file}" "#{xcarchive_file}" > /dev/null)) + + # Moved to its final destination + FileUtils.mv(zip_file, full_destination) + + Actions.lane_context[SharedValues::BACKUP_XCARCHIVE_FILE] = "#{full_destination}/#{File.basename(zip_file)}" + end + else + # Copy xcarchive file + FileUtils.cp_r(xcarchive, full_destination) + + Actions.lane_context[SharedValues::BACKUP_XCARCHIVE_FILE] = "#{full_destination}/#{File.basename(xcarchive)}" + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Save your [zipped] xcarchive elsewhere from default path" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcarchive, + description: 'Path to your xcarchive file. Optional if you use the `xcodebuild` action', + default_value: Actions.lane_context[SharedValues::XCODEBUILD_ARCHIVE], + default_value_dynamic: true, + optional: false, + env_name: 'BACKUP_XCARCHIVE_ARCHIVE', + verify_block: proc do |value| + UI.user_error!("Couldn't find xcarchive file at path '#{value}'") if !Helper.test? && !File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :destination, + description: 'Where your archive will be placed', + optional: false, + env_name: 'BACKUP_XCARCHIVE_DESTINATION', + verify_block: proc do |value| + UI.user_error!("Couldn't find the destination folder at '#{value}'") if !Helper.test? && !File.directory?(value) && !File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :zip, + description: 'Enable compression of the archive', + type: Boolean, + default_value: true, + optional: true, + env_name: 'BACKUP_XCARCHIVE_ZIP'), + FastlaneCore::ConfigItem.new(key: :zip_filename, + description: 'Filename of the compressed archive. Will be appended by `.xcarchive.zip`. Default value is the output xcarchive filename', + default_value_dynamic: true, + optional: true, + env_name: 'BACKUP_XCARCHIVE_ZIP_FILENAME'), + FastlaneCore::ConfigItem.new(key: :versioned, + description: 'Create a versioned (date and app version) subfolder where to put the archive', + type: Boolean, + default_value: true, + optional: true, + env_name: 'BACKUP_XCARCHIVE_VERSIONED') + ] + end + + def self.output + [ + ['BACKUP_XCARCHIVE_FILE', 'Path to your saved xcarchive (compressed) file'] + ] + end + + def self.author + ['dral3x'] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'backup_xcarchive( + xcarchive: "/path/to/file.xcarchive", # Optional if you use the `xcodebuild` action + destination: "/somewhere/else/", # Where the backup should be created + zip_filename: "file.xcarchive", # The name of the backup file + zip: false, # Enable compression of the archive. Defaults to `true`. + versioned: true # Create a versioned (date and app version) subfolder where to put the archive + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/badge.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/badge.rb new file mode 100644 index 0000000..d2be6bb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/badge.rb @@ -0,0 +1,151 @@ +module Fastlane + module Actions + class BadgeAction < Action + def self.run(params) + UI.important('The badge action has been deprecated,') + UI.important('please checkout the badge plugin here:') + UI.important('https://github.com/HazAT/fastlane-plugin-badge') + Actions.verify_gem!('badge') + require 'badge' + options = { + dark: params[:dark], + custom: params[:custom], + no_badge: params[:no_badge], + shield: params[:shield], + alpha: params[:alpha], + shield_io_timeout: params[:shield_io_timeout], + glob: params[:glob], + alpha_channel: params[:alpha_channel], + shield_gravity: params[:shield_gravity], + shield_no_resize: params[:shield_no_resize] + } + begin + Badge::Runner.new.run(params[:path], options) + rescue => e + # We want to catch this error and raise our own so that we are not counting this as a crash in our metrics + UI.verbose(e.backtrace.join("\n")) + UI.user_error!("Something went wrong while running badge: #{e}") + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Automatically add a badge to your app icon" + end + + def self.details + [ + "Please use the [badge plugin](https://github.com/HazAT/fastlane-plugin-badge) instead.", + "This action will add a light/dark badge onto your app icon.", + "You can also provide your custom badge/overlay or add a shield for more customization.", + "More info: [https://github.com/HazAT/badge](https://github.com/HazAT/badge)", + "**Note**: If you want to reset the badge back to default, you can use `sh 'git checkout -- /Assets.xcassets/'`." + ].join("\n") + end + + def self.example_code + [ + 'badge(dark: true)', + 'badge(alpha: true)', + 'badge(custom: "/Users/xxx/Desktop/badge.png")', + 'badge(shield: "Version-0.0.3-blue", no_badge: true)' + ] + end + + def self.category + :deprecated + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :dark, + env_name: "FL_BADGE_DARK", + description: "Adds a dark flavored badge on top of your icon", + optional: true, + type: Boolean, + verify_block: proc do |value| + UI.user_error!("dark is only a flag and should always be true") unless value == true + end), + FastlaneCore::ConfigItem.new(key: :custom, + env_name: "FL_BADGE_CUSTOM", + description: "Add your custom overlay/badge image", + optional: true, + verify_block: proc do |value| + UI.user_error!("custom should be a valid file path") unless value && File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :no_badge, + env_name: "FL_BADGE_NO_BADGE", + description: "Hides the beta badge", + optional: true, + type: Boolean, + verify_block: proc do |value| + UI.user_error!("no_badge is only a flag and should always be true") unless value == true + end), + FastlaneCore::ConfigItem.new(key: :shield, + env_name: "FL_BADGE_SHIELD", + description: "Add a shield to your app icon from shields.io", + optional: true), + FastlaneCore::ConfigItem.new(key: :alpha, + env_name: "FL_BADGE_ALPHA", + description: "Adds and alpha badge instead of the default beta one", + optional: true, + type: Boolean, + verify_block: proc do |value| + UI.user_error!("alpha is only a flag and should always be true") unless value == true + end), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_BADGE_PATH", + description: "Sets the root path to look for AppIcons", + optional: true, + default_value: '.', + verify_block: proc do |value| + UI.user_error!("path needs to be a valid directory") if Dir[value].empty? + end), + FastlaneCore::ConfigItem.new(key: :shield_io_timeout, + env_name: "FL_BADGE_SHIELD_IO_TIMEOUT", + description: "Set custom duration for the timeout of the shields.io request in seconds", + optional: true, + type: Integer, # allow integers, strings both + verify_block: proc do |value| + UI.user_error!("shield_io_timeout needs to be an integer > 0") if value.to_i < 1 + end), + FastlaneCore::ConfigItem.new(key: :glob, + env_name: "FL_BADGE_GLOB", + description: "Glob pattern for finding image files", + optional: true), + FastlaneCore::ConfigItem.new(key: :alpha_channel, + env_name: "FL_BADGE_ALPHA_CHANNEL", + description: "Keeps/adds an alpha channel to the icon (useful for android icons)", + optional: true, + type: Boolean, + verify_block: proc do |value| + UI.user_error!("alpha_channel is only a flag and should always be true") unless value == true + end), + FastlaneCore::ConfigItem.new(key: :shield_gravity, + env_name: "FL_BADGE_SHIELD_GRAVITY", + description: "Position of shield on icon. Default: North - Choices include: NorthWest, North, NorthEast, West, Center, East, SouthWest, South, SouthEast", + optional: true), + FastlaneCore::ConfigItem.new(key: :shield_no_resize, + env_name: "FL_BADGE_SHIELD_NO_RESIZE", + description: "Shield image will no longer be resized to aspect fill the full icon. Instead it will only be shrunk to not exceed the icon graphic", + optional: true, + type: Boolean, + verify_block: proc do |value| + UI.user_error!("shield_no_resize is only a flag and should always be true") unless value == true + end) + ] + end + + def self.authors + ["DanielGri"] + end + + def self.is_supported?(platform) + [:ios, :mac, :android].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_and_upload_to_appetize.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_and_upload_to_appetize.rb new file mode 100644 index 0000000..db9852b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_and_upload_to_appetize.rb @@ -0,0 +1,113 @@ +module Fastlane + module Actions + class BuildAndUploadToAppetizeAction < Action + def self.run(params) + tmp_path = "/tmp/fastlane_build" + + xcodebuild_configs = params[:xcodebuild] + xcodebuild_configs[:sdk] = "iphonesimulator" + xcodebuild_configs[:derivedDataPath] = tmp_path + xcodebuild_configs[:xcargs] = "CONFIGURATION_BUILD_DIR=" + tmp_path + xcodebuild_configs[:scheme] ||= params[:scheme] if params[:scheme].to_s.length > 0 + + Actions::XcodebuildAction.run(xcodebuild_configs) + + app_path = Dir[File.join(tmp_path, "**", "*.app")].last + UI.user_error!("Couldn't find app") unless app_path + + zipped_bundle = Actions::ZipAction.run(path: app_path, + output_path: File.join(tmp_path, "Result.zip")) + + other_action.appetize(path: zipped_bundle, + api_token: params[:api_token], + public_key: params[:public_key], + note: params[:note], + timeout: params[:timeout]) + + public_key = Actions.lane_context[SharedValues::APPETIZE_PUBLIC_KEY] + UI.success("Generated Public Key: #{Actions.lane_context[SharedValues::APPETIZE_PUBLIC_KEY]}") + + FileUtils.rm_rf(tmp_path) + + return public_key + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Generate and upload an ipa file to appetize.io" + end + + def self.details + [ + "This should be called from danger.", + "More information in the [device_grid guide](https://github.com/fastlane/fastlane/blob/master/fastlane/lib/fastlane/actions/device_grid/README.md)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcodebuild, + description: "Parameters that are passed to the xcodebuild action", + type: Hash, + default_value: {}, + short_option: '-x', + optional: true), + FastlaneCore::ConfigItem.new(key: :scheme, + description: "The scheme to build. Can also be passed using the `xcodebuild` parameter", + type: String, + short_option: '-s', + optional: true), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "APPETIZE_API_TOKEN", + description: "Appetize.io API Token", + sensitive: true, + code_gen_sensitive: true), + FastlaneCore::ConfigItem.new(key: :public_key, + description: "If not provided, a new app will be created. If provided, the existing build will be overwritten", + optional: true, + verify_block: proc do |value| + if value.start_with?("private_") + UI.user_error!("You provided a private key to appetize, please provide the public key") + end + end), + FastlaneCore::ConfigItem.new(key: :note, + description: "Notes you wish to add to the uploaded app", + optional: true), + FastlaneCore::ConfigItem.new(key: :timeout, + description: "The number of seconds to wait until automatically ending the session due to user inactivity. Must be 30, 60, 90, 120, 180, 300, 600, 1800, 3600 or 7200. Default is 120", + type: Integer, + optional: true, + verify_block: proc do |value| + UI.user_error!("The value provided doesn't match any of the supported options.") unless [30, 60, 90, 120, 180, 300, 600, 1800, 3600, 7200].include?(value) + end) + ] + end + + def self.output + end + + def self.return_value + "" + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + nil + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_android_app.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_android_app.rb new file mode 100644 index 0000000..e0fd357 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_android_app.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/gradle' + class BuildAndroidAppAction < GradleAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `gradle` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_app.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_app.rb new file mode 100644 index 0000000..2d99d5d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_app.rb @@ -0,0 +1,171 @@ +module Fastlane + module Actions + module SharedValues + IPA_OUTPUT_PATH ||= :IPA_OUTPUT_PATH + PKG_OUTPUT_PATH ||= :PKG_OUTPUT_PATH + DSYM_OUTPUT_PATH ||= :DSYM_OUTPUT_PATH + XCODEBUILD_ARCHIVE ||= :XCODEBUILD_ARCHIVE # originally defined in XcodebuildAction + end + + class BuildAppAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(values) + require 'gym' + + unless Actions.lane_context[SharedValues::SIGH_PROFILE_TYPE].to_s == "development" + values[:export_method] ||= Actions.lane_context[SharedValues::SIGH_PROFILE_TYPE] + end + + if Actions.lane_context[SharedValues::MATCH_PROVISIONING_PROFILE_MAPPING] + # Since Xcode 9 you need to explicitly provide the provisioning profile per app target + # If the user is smart and uses match and gym together with fastlane, we can do all + # the heavy lifting for them + values[:export_options] ||= {} + # It's not always a hash, because the user might have passed a string path to a ready plist file + # If that's the case, we won't set the provisioning profiles + # see https://github.com/fastlane/fastlane/issues/9490 + if values[:export_options].kind_of?(Hash) + match_mapping = (Actions.lane_context[SharedValues::MATCH_PROVISIONING_PROFILE_MAPPING] || {}).dup + existing_mapping = (values[:export_options][:provisioningProfiles] || {}).dup + + # Be smart about how we merge those mappings in case there are conflicts + mapping_object = Gym::CodeSigningMapping.new + hash_to_use = mapping_object.merge_profile_mapping(primary_mapping: existing_mapping, + secondary_mapping: match_mapping, + export_method: values[:export_method]) + + values[:export_options][:provisioningProfiles] = hash_to_use + else + self.show_xcode_9_warning + end + elsif Actions.lane_context[SharedValues::SIGH_PROFILE_PATHS] + # Since Xcode 9 you need to explicitly provide the provisioning profile per app target + # If the user used sigh we can match the profiles from sigh + values[:export_options] ||= {} + if values[:export_options].kind_of?(Hash) + # It's not always a hash, because the user might have passed a string path to a ready plist file + # If that's the case, we won't set the provisioning profiles + # see https://github.com/fastlane/fastlane/issues/9684 + values[:export_options][:provisioningProfiles] ||= {} + Actions.lane_context[SharedValues::SIGH_PROFILE_PATHS].each do |profile_path| + begin + profile = FastlaneCore::ProvisioningProfile.parse(profile_path) + app_id_prefix = profile["ApplicationIdentifierPrefix"].first + entitlements = profile["Entitlements"] + bundle_id = (entitlements["application-identifier"] || entitlements["com.apple.application-identifier"]).gsub("#{app_id_prefix}.", "") + values[:export_options][:provisioningProfiles][bundle_id] = profile["Name"] + rescue => ex + UI.error("Couldn't load profile at path: #{profile_path}") + UI.error(ex) + UI.verbose(ex.backtrace.join("\n")) + end + end + else + self.show_xcode_9_warning + end + end + + gym_output_path = Gym::Manager.new.work(values) + if gym_output_path.nil? + UI.important("No output path received from gym") + return nil + end + + absolute_output_path = File.expand_path(gym_output_path) + + # Binary path + if File.extname(absolute_output_path) == ".ipa" + absolute_dsym_path = absolute_output_path.gsub(/.ipa$/, ".app.dSYM.zip") + + Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] = absolute_output_path + ENV[SharedValues::IPA_OUTPUT_PATH.to_s] = absolute_output_path # for deliver + elsif File.extname(absolute_output_path) == ".pkg" + absolute_dsym_path = absolute_output_path.gsub(/.pkg$/, ".dSYM.zip") + + Actions.lane_context[SharedValues::PKG_OUTPUT_PATH] = absolute_output_path + ENV[SharedValues::PKG_OUTPUT_PATH.to_s] = absolute_output_path # for deliver + end + + # xcarchive path + Actions.lane_context[SharedValues::XCODEBUILD_ARCHIVE] = Gym::BuildCommandGenerator.archive_path + + # dSYM path + if absolute_dsym_path && File.exist?(absolute_dsym_path) + Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH] = absolute_dsym_path + ENV[SharedValues::DSYM_OUTPUT_PATH.to_s] = absolute_dsym_path + end + + return absolute_output_path + end + # rubocop:enable Metrics/PerceivedComplexity + + def self.description + "Easily build and sign your app (via _gym_)" + end + + def self.details + "More information: https://fastlane.tools/gym" + end + + def self.output + [ + ['IPA_OUTPUT_PATH', 'The path to the newly generated ipa file'], + ['PKG_OUTPUT_PATH', 'The path to the newly generated pkg file'], + ['DSYM_OUTPUT_PATH', 'The path to the dSYM files'], + ['XCODEBUILD_ARCHIVE', 'The path to the xcodebuild archive'] + ] + end + + def self.return_value + "The absolute path to the generated ipa file" + end + + def self.return_type + :string + end + + def self.author + "KrauseFx" + end + + def self.available_options + require 'gym' + Gym::Options.available_options + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'build_app(scheme: "MyApp", workspace: "MyApp.xcworkspace")', + 'build_app( + workspace: "MyApp.xcworkspace", + configuration: "Debug", + scheme: "MyApp", + silent: true, + clean: true, + output_directory: "path/to/dir", # Destination directory. Defaults to current directory. + output_name: "my-app.ipa", # specify the name of the .ipa file to generate (including file extension) + sdk: "iOS 11.1" # use SDK as the name or path of the base SDK when building the project. + )', + 'gym # alias for "build_app"', + 'build_ios_app # alias for "build_app (only iOS options)"', + 'build_mac_app # alias for "build_app (only macOS options)"' + ] + end + + def self.category + :building + end + + def self.show_xcode_9_warning + return unless Helper.xcode_at_least?("9.0") + UI.message("You passed a path to a custom plist file for exporting the binary.") + UI.message("Make sure to include information about what provisioning profiles to use with Xcode 9") + UI.message("More information: https://docs.fastlane.tools/codesigning/xcode-project/#xcode-9-and-up") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_ios_app.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_ios_app.rb new file mode 100644 index 0000000..bd9ccf7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_ios_app.rb @@ -0,0 +1,47 @@ +module Fastlane + module Actions + require 'fastlane/actions/build_app' + class BuildIosAppAction < BuildAppAction + # Gym::Options.available_options keys that don't apply to ios apps. + REJECT_OPTIONS = [ + :pkg, + :skip_package_pkg, + :catalyst_platform, + :installer_cert_name + ] + + def self.run(params) + # Adding reject options back in so gym has everything it needs + params.available_options += Gym::Options.available_options.select do |option| + REJECT_OPTIONS.include?(option.key) + end + + # Defaulting to ios specific values + params[:catalyst_platform] = "ios" + + super(params) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.available_options + require 'gym' + require 'gym/options' + + Gym::Options.available_options.reject do |option| + REJECT_OPTIONS.include?(option.key) + end + end + + def self.is_supported?(platform) + [:ios].include?(platform) + end + + def self.description + "Alias for the `build_app` action but only for iOS" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_mac_app.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_mac_app.rb new file mode 100644 index 0000000..2837566 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/build_mac_app.rb @@ -0,0 +1,46 @@ +module Fastlane + module Actions + require 'fastlane/actions/build_app' + class BuildMacAppAction < BuildAppAction + # Gym::Options.available_options keys that don't apply to mac apps. + REJECT_OPTIONS = [ + :ipa, + :skip_package_ipa, + :catalyst_platform + ] + + def self.run(params) + # Adding reject options back in so gym has everything it needs + params.available_options += Gym::Options.available_options.select do |option| + REJECT_OPTIONS.include?(option.key) + end + + # Defaulting to mac specific values + params[:catalyst_platform] = "macos" + + super(params) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.available_options + require 'gym' + require 'gym/options' + + Gym::Options.available_options.reject do |option| + REJECT_OPTIONS.include?(option.key) + end + end + + def self.is_supported?(platform) + [:mac].include?(platform) + end + + def self.description + "Alias for the `build_app` action but only for macOS" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/bundle_install.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/bundle_install.rb new file mode 100644 index 0000000..ec9d856 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/bundle_install.rb @@ -0,0 +1,166 @@ +module Fastlane + module Actions + class BundleInstallAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + if gemfile_exists?(params) + cmd = ['bundle install'] + + cmd << "--binstubs #{params[:binstubs]}" if params[:binstubs] + cmd << "--clean" if params[:clean] + cmd << "--full-index" if params[:full_index] + cmd << "--gemfile #{params[:gemfile]}" if params[:gemfile] + cmd << "--jobs #{params[:jobs]}" if params[:jobs] + cmd << "--local" if params[:local] + cmd << "--deployment" if params[:deployment] + cmd << "--no-cache" if params[:no_cache] + cmd << "--no_prune" if params[:no_prune] + cmd << "--path #{params[:path]}" if params[:path] + cmd << "--system" if params[:system] + cmd << "--quiet" if params[:quiet] + cmd << "--retry #{params[:retry]}" if params[:retry] + cmd << "--shebang" if params[:shebang] + cmd << "--standalone #{params[:standalone]}" if params[:standalone] + cmd << "--trust-policy" if params[:trust_policy] + cmd << "--without #{params[:without]}" if params[:without] + cmd << "--with #{params[:with]}" if params[:with] + cmd << "--frozen" if params[:frozen] + cmd << "--redownload" if params[:redownload] + + return sh(cmd.join(' ')) + else + UI.message("No Gemfile found") + end + end + # rubocop:enable Metrics/PerceivedComplexity + + def self.gemfile_exists?(params) + possible_gemfiles = ['Gemfile', 'gemfile'] + possible_gemfiles.insert(0, params[:gemfile]) if params[:gemfile] + possible_gemfiles.each do |gemfile| + gemfile = File.absolute_path(gemfile) + return true if File.exist?(gemfile) + UI.message("Gemfile not found at: '#{gemfile}'") + end + return false + end + + def self.description + 'This action runs `bundle install` (if available)' + end + + def self.is_supported?(platform) + true + end + + def self.author + ["birmacher", "koglinjg"] + end + + def self.example_code + nil + end + + def self.category + :misc + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :binstubs, + env_name: "FL_BUNDLE_INSTALL_BINSTUBS", + description: "Generate bin stubs for bundled gems to ./bin", + optional: true), + FastlaneCore::ConfigItem.new(key: :clean, + env_name: "FL_BUNDLE_INSTALL_CLEAN", + description: "Run bundle clean automatically after install", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :full_index, + env_name: "FL_BUNDLE_INSTALL_FULL_INDEX", + description: "Use the rubygems modern index instead of the API endpoint", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :gemfile, + env_name: "FL_BUNDLE_INSTALL_GEMFILE", + description: "Use the specified gemfile instead of Gemfile", + optional: true), + FastlaneCore::ConfigItem.new(key: :jobs, + env_name: "FL_BUNDLE_INSTALL_JOBS", + description: "Install gems using parallel workers", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :local, + env_name: "FL_BUNDLE_INSTALL_LOCAL", + description: "Do not attempt to fetch gems remotely and use the gem cache instead", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :deployment, + env_name: "FL_BUNDLE_INSTALL_DEPLOYMENT", + description: "Install using defaults tuned for deployment and CI environments", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :no_cache, + env_name: "FL_BUNDLE_INSTALL_NO_CACHE", + description: "Don't update the existing gem cache", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :no_prune, + env_name: "FL_BUNDLE_INSTALL_NO_PRUNE", + description: "Don't remove stale gems from the cache", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_BUNDLE_INSTALL_PATH", + description: "Specify a different path than the system default ($BUNDLE_PATH or $GEM_HOME). Bundler will remember this value for future installs on this machine", + optional: true), + FastlaneCore::ConfigItem.new(key: :system, + env_name: "FL_BUNDLE_INSTALL_SYSTEM", + description: "Install to the system location ($BUNDLE_PATH or $GEM_HOME) even if the bundle was previously installed somewhere else for this application", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :quiet, + env_name: "FL_BUNDLE_INSTALL_QUIET", + description: "Only output warnings and errors", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :retry, + env_name: "FL_BUNDLE_INSTALL_RETRY", + description: "Retry network and git requests that have failed", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :shebang, + env_name: "FL_BUNDLE_INSTALL_SHEBANG", + description: "Specify a different shebang executable name than the default (usually 'ruby')", + optional: true), + FastlaneCore::ConfigItem.new(key: :standalone, + env_name: "FL_BUNDLE_INSTALL_STANDALONE", + description: "Make a bundle that can work without the Bundler runtime", + optional: true), + FastlaneCore::ConfigItem.new(key: :trust_policy, + env_name: "FL_BUNDLE_INSTALL_TRUST_POLICY", + description: "Sets level of security when dealing with signed gems. Accepts `LowSecurity`, `MediumSecurity` and `HighSecurity` as values", + optional: true), + FastlaneCore::ConfigItem.new(key: :without, + env_name: "FL_BUNDLE_INSTALL_WITHOUT", + description: "Exclude gems that are part of the specified named group", + optional: true), + FastlaneCore::ConfigItem.new(key: :with, + env_name: "FL_BUNDLE_INSTALL_WITH", + description: "Include gems that are part of the specified named group", + optional: true), + FastlaneCore::ConfigItem.new(key: :frozen, + env_name: "FL_BUNDLE_INSTALL_FROZEN", + description: "Don't allow the Gemfile.lock to be updated after install", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :redownload, + env_name: "FL_BUNDLE_INSTALL_REDOWNLOAD", + description: "Force download every gem, even if the required versions are already available locally", + type: Boolean, + default_value: false) + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_android_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_android_screenshots.rb new file mode 100644 index 0000000..6565038 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_android_screenshots.rb @@ -0,0 +1,63 @@ +module Fastlane + module Actions + module SharedValues + SCREENGRAB_OUTPUT_DIRECTORY = :SCREENGRAB_OUTPUT_DIRECTORY + end + + class CaptureAndroidScreenshotsAction < Action + def self.run(params) + require 'screengrab' + + Screengrab.config = params + Screengrab.android_environment = Screengrab::AndroidEnvironment.new(params[:android_home], + params[:build_tools_version]) + Screengrab::DependencyChecker.check(Screengrab.android_environment) + Screengrab::Runner.new.run + + Actions.lane_context[SharedValues::SCREENGRAB_OUTPUT_DIRECTORY] = File.expand_path(params[:output_directory]) + + true + end + + def self.description + 'Automated localized screenshots of your Android app (via _screengrab_)' + end + + def self.available_options + require 'screengrab' + Screengrab::Options.available_options + end + + def self.output + [ + ['SCREENGRAB_OUTPUT_DIRECTORY', 'The path to the output directory'] + ] + end + + def self.author + ['asfalcone', 'i2amsam', 'mfurtak'] + end + + def self.is_supported?(platform) + platform == :android + end + + def self.example_code + [ + 'capture_android_screenshots', + 'screengrab # alias for "capture_android_screenshots"', + 'capture_android_screenshots( + locales: ["en-US", "fr-FR", "ja-JP"], + clear_previous_screenshots: true, + app_apk_path: "build/outputs/apk/example-debug.apk", + tests_apk_path: "build/outputs/apk/example-debug-androidTest-unaligned.apk" + )' + ] + end + + def self.category + :screenshots + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_ios_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_ios_screenshots.rb new file mode 100644 index 0000000..06f94ad --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_ios_screenshots.rb @@ -0,0 +1,61 @@ +module Fastlane + module Actions + module SharedValues + SNAPSHOT_SCREENSHOTS_PATH = :SNAPSHOT_SCREENSHOTS_PATH + end + + class CaptureIosScreenshotsAction < Action + def self.run(params) + return nil unless Helper.mac? + require 'snapshot' + + Snapshot.config = params + Snapshot::DependencyChecker.check_simulators + Snapshot::Runner.new.work + + Actions.lane_context[SharedValues::SNAPSHOT_SCREENSHOTS_PATH] = File.expand_path(params[:output_directory]) # absolute URL + + true + end + + def self.description + "Generate new localized screenshots on multiple devices (via _snapshot_)" + end + + def self.available_options + return [] unless Helper.mac? + require 'snapshot' + Snapshot::Options.available_options + end + + def self.output + [ + ['SNAPSHOT_SCREENSHOTS_PATH', 'The path to the screenshots'] + ] + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'capture_ios_screenshots', + 'snapshot # alias for "capture_ios_screenshots"', + 'capture_ios_screenshots( + skip_open_summary: true, + clean: true + )' + ] + end + + def self.category + :screenshots + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_screenshots.rb new file mode 100644 index 0000000..1e05379 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/capture_screenshots.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/capture_ios_screenshots' + class CaptureScreenshotsAction < CaptureIosScreenshotsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `capture_ios_screenshots` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/carthage.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/carthage.rb new file mode 100644 index 0000000..ea26488 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/carthage.rb @@ -0,0 +1,238 @@ +module Fastlane + module Actions + class CarthageAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + validate(params) + + cmd = [params[:executable]] + command_name = params[:command] + cmd << command_name + + if command_name == "archive" && params[:frameworks].count > 0 + cmd.concat(params[:frameworks]) + # "update", "build" and "bootstrap" are the only commands that support "--derived-data" parameter + elsif ["update", "build", "bootstrap"].include?(command_name) + cmd.concat(params[:dependencies]) if params[:dependencies].count > 0 + cmd << "--derived-data #{params[:derived_data].shellescape}" if params[:derived_data] + end + + cmd << "--output #{params[:output]}" if params[:output] + cmd << "--use-ssh" if params[:use_ssh] + cmd << "--use-submodules" if params[:use_submodules] + cmd << "--use-netrc" if params[:use_netrc] + cmd << "--no-use-binaries" if params[:use_binaries] == false + cmd << "--no-checkout" if params[:no_checkout] == true + cmd << "--no-build" if params[:no_build] == true + cmd << "--no-skip-current" if params[:no_skip_current] == true + cmd << "--verbose" if params[:verbose] == true + cmd << "--platform #{params[:platform]}" if params[:platform] + cmd << "--configuration #{params[:configuration]}" if params[:configuration] + cmd << "--toolchain #{params[:toolchain]}" if params[:toolchain] + cmd << "--project-directory #{params[:project_directory]}" if params[:project_directory] + cmd << "--cache-builds" if params[:cache_builds] + cmd << "--new-resolver" if params[:new_resolver] + cmd << "--log-path #{params[:log_path]}" if params[:log_path] + cmd << "--use-xcframeworks" if params[:use_xcframeworks] + cmd << "--archive" if params[:archive] + + Actions.sh(cmd.join(' ')) + end + # rubocop:enable Metrics/PerceivedComplexity + + def self.validate(params) + command_name = params[:command] + if command_name != "archive" && params[:frameworks].count > 0 + UI.user_error!("Frameworks option is available only for 'archive' command.") + end + if command_name != "archive" && params[:output] + UI.user_error!("Output option is available only for 'archive' command.") + end + + if params[:log_path] && !%w(build bootstrap update).include?(command_name) + UI.user_error!("Log path option is available only for 'build', 'bootstrap', and 'update' command.") + end + + if params[:use_xcframeworks] && !%w(build bootstrap update).include?(command_name) + UI.user_error!("Use XCFrameworks option is available only for 'build', 'bootstrap', and 'update' command.") + end + + if command_name != "build" && params[:archive] + UI.user_error!("Archive option is available only for 'build' command.") + end + end + + def self.description + "Runs `carthage` for your project" + end + + def self.available_commands + %w(build bootstrap update archive) + end + + def self.available_platforms + %w(all iOS Mac tvOS watchOS) + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :command, + env_name: "FL_CARTHAGE_COMMAND", + description: "Carthage command (one of: #{available_commands.join(', ')})", + default_value: 'bootstrap', + verify_block: proc do |value| + UI.user_error!("Please pass a valid command. Use one of the following: #{available_commands.join(', ')}") unless available_commands.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :dependencies, + description: "Carthage dependencies to update, build or bootstrap", + default_value: [], + type: Array), + FastlaneCore::ConfigItem.new(key: :use_ssh, + env_name: "FL_CARTHAGE_USE_SSH", + description: "Use SSH for downloading GitHub repositories", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :use_submodules, + env_name: "FL_CARTHAGE_USE_SUBMODULES", + description: "Add dependencies as Git submodules", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :use_netrc, + env_name: "FL_CARTHAGE_USE_NETRC", + description: "Use .netrc for downloading frameworks", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :use_binaries, + env_name: "FL_CARTHAGE_USE_BINARIES", + description: "Check out dependency repositories even when prebuilt frameworks exist", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :no_checkout, + env_name: "FL_CARTHAGE_NO_CHECKOUT", + description: "When bootstrapping Carthage do not checkout", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :no_build, + env_name: "FL_CARTHAGE_NO_BUILD", + description: "When bootstrapping Carthage do not build", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :no_skip_current, + env_name: "FL_CARTHAGE_NO_SKIP_CURRENT", + description: "Don't skip building the Carthage project (in addition to its dependencies)", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :derived_data, + env_name: "FL_CARTHAGE_DERIVED_DATA", + description: "Use derived data folder at path", + optional: true), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_CARTHAGE_VERBOSE", + description: "Print xcodebuild output inline", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "FL_CARTHAGE_PLATFORM", + description: "Define which platform to build for", + optional: true, + verify_block: proc do |value| + value.split(',').each do |platform| + UI.user_error!("Please pass a valid platform. Use one of the following: #{available_platforms.join(', ')}") unless available_platforms.map(&:downcase).include?(platform.downcase) + end + end), + FastlaneCore::ConfigItem.new(key: :cache_builds, + env_name: "FL_CARTHAGE_CACHE_BUILDS", + description: "By default Carthage will rebuild a dependency regardless of whether it's the same resolved version as before. Passing the --cache-builds will cause carthage to avoid rebuilding a dependency if it can", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :frameworks, + description: "Framework name or names to archive, could be applied only along with the archive command", + default_value: [], + type: Array), + FastlaneCore::ConfigItem.new(key: :output, + description: "Output name for the archive, could be applied only along with the archive command. Use following format *.framework.zip", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass a valid string for output. Use following format *.framework.zip") unless value.end_with?("framework.zip") + end), + FastlaneCore::ConfigItem.new(key: :configuration, + env_name: "FL_CARTHAGE_CONFIGURATION", + description: "Define which build configuration to use when building", + optional: true, + verify_block: proc do |value| + if value.chomp(' ').empty? + UI.user_error!("Please pass a valid build configuration. You can review the list of configurations for this project using the command: xcodebuild -list") + end + end), + FastlaneCore::ConfigItem.new(key: :toolchain, + env_name: "FL_CARTHAGE_TOOLCHAIN", + description: "Define which xcodebuild toolchain to use when building", + optional: true), + FastlaneCore::ConfigItem.new(key: :project_directory, + env_name: "FL_CARTHAGE_PROJECT_DIRECTORY", + description: "Define the directory containing the Carthage project", + optional: true), + FastlaneCore::ConfigItem.new(key: :new_resolver, + env_name: "FL_CARTHAGE_NEW_RESOLVER", + description: "Use new resolver when resolving dependency graph", + optional: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :log_path, + env_name: "FL_CARTHAGE_LOG_PATH", + description: "Path to the xcode build output", + optional: true), + FastlaneCore::ConfigItem.new(key: :use_xcframeworks, + env_name: "FL_CARTHAGE_USE_XCFRAMEWORKS", + description: "Create xcframework bundles instead of one framework per platform (requires Xcode 12+)", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :archive, + env_name: "FL_CARTHAGE_ARCHIVE", + description: "Archive built frameworks from the current project", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :executable, + env_name: "FL_CARTHAGE_EXECUTABLE", + description: "Path to the `carthage` executable on your machine", + default_value: 'carthage') + ] + end + + def self.example_code + [ + 'carthage', + 'carthage( + frameworks: ["MyFramework1", "MyFramework2"], # Specify which frameworks to archive (only for the archive command) + output: "MyFrameworkBundle.framework.zip", # Specify the output archive name (only for the archive command) + command: "bootstrap", # One of: build, bootstrap, update, archive. (default: bootstrap) + dependencies: ["Alamofire", "Notice"], # Specify which dependencies to update or build (only for update, build and bootstrap commands) + use_ssh: false, # Use SSH for downloading GitHub repositories. + use_submodules: false, # Add dependencies as Git submodules. + use_binaries: true, # Check out dependency repositories even when prebuilt frameworks exist + no_build: false, # When bootstrapping Carthage do not build + no_skip_current: false, # Don\'t skip building the current project (only for frameworks) + verbose: false, # Print xcodebuild output inline + platform: "all", # Define which platform to build for (one of ‘all’, ‘Mac’, ‘iOS’, ‘watchOS’, ‘tvOS‘, or comma-separated values of the formers except for ‘all’) + configuration: "Release", # Build configuration to use when building + cache_builds: true, # By default Carthage will rebuild a dependency regardless of whether its the same resolved version as before. + toolchain: "com.apple.dt.toolchain.Swift_2_3", # Specify the xcodebuild toolchain + new_resolver: false, # Use the new resolver to resolve dependency graph + log_path: "carthage.log" # Path to the xcode build output + )' + ] + end + + def self.category + :building + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.authors + ["bassrock", "petester42", "jschmid", "JaviSoto", "uny", "phatblat", "bfcrampton", "antondomashnev", "gbrhaz"] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cert.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cert.rb new file mode 100644 index 0000000..18dad4c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cert.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/get_certificates' + class CertAction < GetCertificatesAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `get_certificates` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/changelog_from_git_commits.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/changelog_from_git_commits.rb new file mode 100644 index 0000000..6af9638 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/changelog_from_git_commits.rb @@ -0,0 +1,188 @@ +module Fastlane + module Actions + module SharedValues + FL_CHANGELOG ||= :FL_CHANGELOG + end + + class ChangelogFromGitCommitsAction < Action + def self.run(params) + if params[:commits_count] + UI.success("Collecting the last #{params[:commits_count]} Git commits") + else + if params[:between] + if params[:between].kind_of?(String) && params[:between].include?(",") # :between is string + from, to = params[:between].split(",", 2) + elsif params[:between].kind_of?(Array) + from, to = params[:between] + end + else + from = Actions.last_git_tag_name(params[:match_lightweight_tag], params[:tag_match_pattern]) + UI.verbose("Found the last Git tag: #{from}") + to = 'HEAD' + end + UI.success("Collecting Git commits between #{from} and #{to}") + end + + # Normally it is not good practice to take arbitrary input and convert it to a symbol + # because prior to Ruby 2.2, symbols are never garbage collected. However, we've + # already validated that the input matches one of our allowed values, so this is OK + merge_commit_filtering = params[:merge_commit_filtering].to_sym + + # We want to be specific and exclude nil for this comparison + if params[:include_merges] == false + merge_commit_filtering = :exclude_merges + end + + params[:path] = './' unless params[:path] + + Dir.chdir(params[:path]) do + if params[:commits_count] + changelog = Actions.git_log_last_commits(params[:pretty], params[:commits_count], merge_commit_filtering, params[:date_format], params[:ancestry_path]) + else + changelog = Actions.git_log_between(params[:pretty], from, to, merge_commit_filtering, params[:date_format], params[:ancestry_path]) + end + + changelog = changelog.gsub("\n\n", "\n") if changelog # as there are duplicate newlines + Actions.lane_context[SharedValues::FL_CHANGELOG] = changelog + + if params[:quiet] == false + puts("") + puts(changelog) + puts("") + end + + changelog + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Collect git commit messages into a changelog" + end + + def self.details + "By default, messages will be collected back to the last tag, but the range can be controlled" + end + + def self.output + [ + ['FL_CHANGELOG', 'The changelog string generated from the collected git commit messages'] + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :between, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_BETWEEN', + description: 'Array containing two Git revision values between which to collect messages, you mustn\'t use it with :commits_count key at the same time', + optional: true, + type: Array, # allow Array, String both + conflicting_options: [:commits_count], + verify_block: proc do |value| + UI.user_error!(":between must not contain nil values") if value.any?(&:nil?) + UI.user_error!(":between must be an array of size 2") unless (value || []).size == 2 + end), + FastlaneCore::ConfigItem.new(key: :commits_count, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_COUNT', + description: 'Number of commits to include in changelog, you mustn\'t use it with :between key at the same time', + optional: true, + conflicting_options: [:between], + type: Integer, + verify_block: proc do |value| + UI.user_error!(":commits_count must be >= 1") unless value.to_i >= 1 + end), + FastlaneCore::ConfigItem.new(key: :path, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_PATH', + description: 'Path of the git repository', + optional: true, + default_value: './'), + FastlaneCore::ConfigItem.new(key: :pretty, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_PRETTY', + description: 'The format applied to each commit while generating the collected value', + optional: true, + default_value: '%B'), + FastlaneCore::ConfigItem.new(key: :date_format, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_DATE_FORMAT', + description: 'The date format applied to each commit while generating the collected value', + optional: true), + FastlaneCore::ConfigItem.new(key: :ancestry_path, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_ANCESTRY_PATH', + description: 'Whether or not to use ancestry-path param', + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :tag_match_pattern, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_TAG_MATCH_PATTERN', + description: 'A glob(7) pattern to match against when finding the last git tag', + optional: true), + FastlaneCore::ConfigItem.new(key: :match_lightweight_tag, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_MATCH_LIGHTWEIGHT_TAG', + description: 'Whether or not to match a lightweight tag when searching for the last one', + optional: true, + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :quiet, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_TAG_QUIET', + description: 'Whether or not to disable changelog output', + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :include_merges, + deprecated: "Use `:merge_commit_filtering` instead", + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_INCLUDE_MERGES', + description: "Whether or not to include any commits that are merges", + optional: true, + type: Boolean, + verify_block: proc do |value| + UI.important("The :include_merges option is deprecated. Please use :merge_commit_filtering instead") unless value.nil? + end), + FastlaneCore::ConfigItem.new(key: :merge_commit_filtering, + env_name: 'FL_CHANGELOG_FROM_GIT_COMMITS_MERGE_COMMIT_FILTERING', + description: "Controls inclusion of merge commits when collecting the changelog. Valid values: #{GIT_MERGE_COMMIT_FILTERING_OPTIONS.map { |o| "'#{o}'" }.join(', ')}", + optional: true, + default_value: 'include_merges', + verify_block: proc do |value| + matches_option = GIT_MERGE_COMMIT_FILTERING_OPTIONS.any? { |opt| opt.to_s == value } + UI.user_error!("Valid values for :merge_commit_filtering are #{GIT_MERGE_COMMIT_FILTERING_OPTIONS.map { |o| "'#{o}'" }.join(', ')}") unless matches_option + end) + ] + end + + def self.return_value + "Returns a String containing your formatted git commits" + end + + def self.return_type + :string + end + + def self.author + ['mfurtak', 'asfalcone', 'SiarheiFedartsou', 'allewun'] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'changelog_from_git_commits', + 'changelog_from_git_commits( + between: ["7b092b3", "HEAD"], # Optional, lets you specify a revision/tag range between which to collect commit info + pretty: "- (%ae) %s", # Optional, lets you provide a custom format to apply to each commit when generating the changelog text + date_format: "short", # Optional, lets you provide an additional date format to dates within the pretty-formatted string + match_lightweight_tag: false, # Optional, lets you ignore lightweight (non-annotated) tags when searching for the last tag + merge_commit_filtering: "exclude_merges" # Optional, lets you filter out merge commits + )' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/chatwork.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/chatwork.rb new file mode 100644 index 0000000..18defd3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/chatwork.rb @@ -0,0 +1,95 @@ +module Fastlane + module Actions + module SharedValues + end + + class ChatworkAction < Action + def self.run(options) + require 'net/http' + require 'uri' + + emoticon = (options[:success] ? '(dance)' : ';(') + + uri = URI.parse("https://api.chatwork.com/v2/rooms/#{options[:roomid]}/messages") + https = Net::HTTP.new(uri.host, uri.port) + https.use_ssl = true + + req = Net::HTTP::Post.new(uri.request_uri) + req['X-ChatWorkToken'] = options[:api_token] + req.set_form_data({ + 'body' => "[info][title]Notification from fastlane[/title]#{emoticon} #{options[:message]}[/info]" + }) + + response = https.request(req) + case response.code.to_i + when 200..299 + UI.success('Successfully sent notification to ChatWork right now đŸ“ĸ') + else + require 'json' + json = JSON.parse(response.body) + UI.user_error!("HTTP Error: #{response.code} #{json['errors']}") + end + end + + def self.description + "Send a success/error message to [ChatWork](https://go.chatwork.com/)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "CHATWORK_API_TOKEN", + description: "ChatWork API Token", + sensitive: true, + code_gen_sensitive: true, + verify_block: proc do |value| + unless value.to_s.length > 0 + UI.error("Please add 'ENV[\"CHATWORK_API_TOKEN\"] = \"your token\"' to your Fastfile's `before_all` section.") + UI.user_error!("No CHATWORK_API_TOKEN given.") + end + end), + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_CHATWORK_MESSAGE", + description: "The message to post on ChatWork"), + FastlaneCore::ConfigItem.new(key: :roomid, + env_name: "FL_CHATWORK_ROOMID", + description: "The room ID", + type: Integer), + FastlaneCore::ConfigItem.new(key: :success, + env_name: "FL_CHATWORK_SUCCESS", + description: "Was this build successful? (true/false)", + optional: true, + default_value: true, + type: Boolean) + ] + end + + def self.author + "astronaughts" + end + + def self.is_supported?(platform) + true + end + + def self.details + "Information on how to obtain an API token: [http://developer.chatwork.com/ja/authenticate.html](http://developer.chatwork.com/ja/authenticate.html)" + end + + def self.example_code + [ + 'chatwork( + message: "App successfully released!", + roomid: 12345, + success: true, + api_token: "Your Token" + )' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/check_app_store_metadata.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/check_app_store_metadata.rb new file mode 100644 index 0000000..7ad4129 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/check_app_store_metadata.rb @@ -0,0 +1,62 @@ +module Fastlane + module Actions + module SharedValues + end + + class CheckAppStoreMetadataAction < Action + def self.run(config) + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless config[:api_key_path] + config[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + + require 'precheck' + Precheck.config = config + return Precheck::Runner.new.run + end + + def self.description + "Check your app's metadata before you submit your app to review (via _precheck_)" + end + + def self.details + "More information: https://fastlane.tools/precheck" + end + + def self.available_options + require 'precheck/options' + Precheck::Options.available_options + end + + def self.return_value + return "true if precheck passes, else, false" + end + + def self.return_type + :bool + end + + def self.authors + ["taquitos"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'check_app_store_metadata( + negative_apple_sentiment: [level: :skip], # Set to skip to not run the `negative_apple_sentiment` rule + curse_words: [level: :warn] # Set to warn to only warn on curse word check failures + )', + 'precheck # alias for "check_app_store_metadata"' + ] + end + + def self.category + :app_store_connect + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clean_build_artifacts.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clean_build_artifacts.rb new file mode 100644 index 0000000..8e4deca --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clean_build_artifacts.rb @@ -0,0 +1,71 @@ +module Fastlane + module Actions + class CleanBuildArtifactsAction < Action + def self.run(options) + paths = [ + Actions.lane_context[Actions::SharedValues::IPA_OUTPUT_PATH], + Actions.lane_context[Actions::SharedValues::DSYM_OUTPUT_PATH], + Actions.lane_context[Actions::SharedValues::CERT_FILE_PATH] + ] + + paths += Actions.lane_context[Actions::SharedValues::SIGH_PROFILE_PATHS] || [] + paths += Actions.lane_context[Actions::SharedValues::DSYM_PATHS] || [] + paths = paths.uniq + + paths.reject { |file| file.nil? || !File.exist?(file) }.each do |file| + if options[:exclude_pattern] + next if file.match(options[:exclude_pattern]) + end + + UI.verbose("Cleaning up '#{file}'") + File.delete(file) + end + + Actions.lane_context[Actions::SharedValues::SIGH_PROFILE_PATHS] = nil + Actions.lane_context[Actions::SharedValues::DSYM_PATHS] = nil + Actions.lane_context[Actions::SharedValues::DSYM_LATEST_UPLOADED_DATE] = nil + + UI.success('Cleaned up build artifacts 🐙') + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :exclude_pattern, + env_name: "FL_CLEAN_BUILD_ARTIFACTS_EXCLUDE_PATTERN", + description: "Exclude all files from clearing that match the given Regex pattern: e.g. '.*\.mobileprovision'", + optional: true) + ] + end + + def self.description + "Deletes files created as result of running gym, cert, sigh or download_dsyms" + end + + def self.details + [ + "This action deletes the files that get created in your repo as a result of running the _gym_ and _sigh_ commands. It doesn't delete the `fastlane/report.xml` though, this is probably more suited for the .gitignore.", + "", + "Useful if you quickly want to send out a test build by dropping down to the command line and typing something like `fastlane beta`, without leaving your repo in a messy state afterwards." + ].join("\n") + end + + def self.author + "lmirosevic" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'clean_build_artifacts' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clean_cocoapods_cache.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clean_cocoapods_cache.rb new file mode 100644 index 0000000..dbce9dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clean_cocoapods_cache.rb @@ -0,0 +1,75 @@ +module Fastlane + module Actions + class CleanCocoapodsCacheAction < Action + def self.run(params) + Actions.verify_gem!('cocoapods') + + cmd = ['pod cache clean'] + + cmd << params[:name].to_s if params[:name] + cmd << '--no-ansi' if params[:no_ansi] + cmd << '--verbose' if params[:verbose] + cmd << '--silent' if params[:silent] + cmd << '--allow-root' if params[:allow_root] + cmd << '--all' + + Actions.sh(cmd.join(' ')) + end + + def self.description + 'Remove the cache for pods' + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :name, + env_name: "FL_CLEAN_COCOAPODS_CACHE_DEVELOPMENT", + description: "Pod name to be removed from cache", + optional: true, + verify_block: proc do |value| + UI.user_error!("You must specify pod name which should be removed from cache") if value.to_s.empty? + end), + FastlaneCore::ConfigItem.new(key: :no_ansi, + env_name: "FL_CLEAN_COCOAPODS_CACHE_NO_ANSI", + description: "Show output without ANSI codes", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_CLEAN_COCOAPODS_CACHE_VERBOSE", + description: "Show more debugging information", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :silent, + env_name: "FL_CLEAN_COCOAPODS_CACHE_SILENT", + description: "Show nothing", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :allow_root, + env_name: "FL_CLEAN_COCOAPODS_CACHE_ALLOW_ROOT", + description: "Allows CocoaPods to run as root", + type: Boolean, + default_value: false) + ] + end + + def self.authors + ["alexmx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'clean_cocoapods_cache', + 'clean_cocoapods_cache(name: "CACHED_POD")' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clear_derived_data.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clear_derived_data.rb new file mode 100644 index 0000000..32831d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clear_derived_data.rb @@ -0,0 +1,70 @@ +require 'fastlane_core/core_ext/cfpropertylist' + +module Fastlane + module Actions + class ClearDerivedDataAction < Action + def self.run(options) + path = File.expand_path(options[:derived_data_path]) + UI.message("Derived Data path located at: #{path}") + FileUtils.rm_rf(path) if File.directory?(path) + UI.success("Successfully cleared Derived Data â™ģī¸") + end + + # Helper Methods + def self.xcode_preferences + file = File.expand_path("~/Library/Preferences/com.apple.dt.Xcode.plist") + if File.exist?(file) + plist = CFPropertyList::List.new(file: file).value + return CFPropertyList.native_types(plist) unless plist.nil? + end + return nil + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Deletes the Xcode Derived Data" + end + + def self.details + "Deletes the Derived Data from path set on Xcode or a supplied path" + end + + def self.available_options + path = xcode_preferences ? xcode_preferences['IDECustomDerivedDataLocation'] : nil + path ||= "~/Library/Developer/Xcode/DerivedData" + [ + FastlaneCore::ConfigItem.new(key: :derived_data_path, + env_name: "DERIVED_DATA_PATH", + description: "Custom path for derivedData", + default_value_dynamic: true, + default_value: path) + ] + end + + def self.output + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'clear_derived_data', + 'clear_derived_data(derived_data_path: "/custom/")' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clipboard.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clipboard.rb new file mode 100644 index 0000000..14e96ab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/clipboard.rb @@ -0,0 +1,49 @@ +module Fastlane + module Actions + class ClipboardAction < Action + def self.run(params) + value = params[:value] + + truncated_value = value[0..800].gsub(/\s\w+\s*$/, '...') + UI.message("Storing '#{truncated_value}' in the clipboard 🎨") + + FastlaneCore::Clipboard.copy(content: value) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Copies a given string into the clipboard. Works only on macOS" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :value, + env_name: "FL_CLIPBOARD_VALUE", + description: "The string that should be copied into the clipboard") + ] + end + + def self.authors + ["KrauseFx", "joshdholtz", "rogerluan"] + end + + def self.is_supported?(platform) + FastlaneCore::Clipboard.is_supported? + end + + def self.example_code + [ + 'clipboard(value: "https://docs.fastlane.tools/")', + 'clipboard(value: lane_context[SharedValues::HOCKEY_DOWNLOAD_LINK] || "")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cloc.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cloc.rb new file mode 100644 index 0000000..c9e6ee7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cloc.rb @@ -0,0 +1,85 @@ +module Fastlane + module Actions + class ClocAction < Action + def self.run(params) + cloc_binary = params[:binary_path] + exclude_dirs = params[:exclude_dir].nil? ? '' : "--exclude-dir=#{params[:exclude_dir]}" + xml_format = params[:xml] + out_dir = params[:output_directory] + output_file = xml_format ? "#{out_dir}/cloc.xml" : "#{out_dir}/cloc.txt" + source_directory = params[:source_directory] + + command = [ + cloc_binary, + exclude_dirs, + '--by-file', + xml_format ? '--xml ' : '', + "--out=#{output_file}", + source_directory + ].join(' ').strip + + Actions.sh(command) + end + + def self.description + "Generates a Code Count that can be read by Jenkins (xml format)" + end + + def self.details + [ + "This action will run cloc to generate a SLOC report that the Jenkins SLOCCount plugin can read.", + "See [https://wiki.jenkins-ci.org/display/JENKINS/SLOCCount+Plugin](https://wiki.jenkins-ci.org/display/JENKINS/SLOCCount+Plugin) and [https://github.com/AlDanial/cloc](https://github.com/AlDanial/cloc) for more information." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :binary_path, + env_name: "FL_CLOC_BINARY_PATH", + description: "Where the cloc binary lives on your system (full path including 'cloc')", + optional: true, + default_value: '/usr/local/bin/cloc'), + FastlaneCore::ConfigItem.new(key: :exclude_dir, + env_name: "FL_CLOC_EXCLUDE_DIR", + description: "Comma separated list of directories to exclude", + optional: true), + FastlaneCore::ConfigItem.new(key: :output_directory, + env_name: "FL_CLOC_OUTPUT_DIRECTORY", + description: "Where to put the generated report file", + default_value: "build"), + FastlaneCore::ConfigItem.new(key: :source_directory, + env_name: "FL_CLOC_SOURCE_DIRECTORY", + description: "Where to look for the source code (relative to the project root folder)", + default_value: ""), + FastlaneCore::ConfigItem.new(key: :xml, + env_name: "FL_CLOC_XML", + description: "Should we generate an XML File (if false, it will generate a plain text file)?", + type: Boolean, + default_value: true) + ] + end + + def self.authors + ["intere"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'cloc( + exclude_dir: "ThirdParty,Resources", + output_directory: "reports", + source_directory: "MyCoolApp" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cocoapods.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cocoapods.rb new file mode 100644 index 0000000..4dafc25 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/cocoapods.rb @@ -0,0 +1,178 @@ +module Fastlane + module Actions + class CocoapodsAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + Actions.verify_gem!('cocoapods') + cmd = [] + + unless params[:podfile].nil? + if params[:podfile].end_with?('Podfile') + podfile_folder = File.dirname(params[:podfile]) + else + podfile_folder = params[:podfile] + end + cmd << ["cd '#{podfile_folder}' &&"] + end + + cmd << ['bundle exec'] if use_bundle_exec?(params) + cmd << ['pod install'] + + cmd << '--no-clean' unless params[:clean] + cmd << '--no-integrate' unless params[:integrate] + cmd << '--clean-install' if params[:clean_install] && pod_version_at_least("1.7", params) + cmd << '--allow-root' if params[:allow_root] && pod_version_at_least("1.10", params) + cmd << '--repo-update' if params[:repo_update] + cmd << '--silent' if params[:silent] + cmd << '--verbose' if params[:verbose] + cmd << '--no-ansi' unless params[:ansi] + cmd << '--deployment' if params[:deployment] + + Actions.sh(cmd.join(' '), error_callback: lambda { |result| + if !params[:repo_update] && params[:try_repo_update_on_error] + cmd << '--repo-update' + Actions.sh(cmd.join(' '), error_callback: lambda { |retry_result| + call_error_callback(params, retry_result) + }) + else + call_error_callback(params, result) + end + }) + end + + def self.use_bundle_exec?(params) + params[:use_bundle_exec] && shell_out_should_use_bundle_exec? + end + + def self.pod_version(params) + use_bundle_exec?(params) ? `bundle exec pod --version` : `pod --version` + end + + def self.pod_version_at_least(at_least_version, params) + version = pod_version(params) + return Gem::Version.new(version) >= Gem::Version.new(at_least_version) + end + + def self.call_error_callback(params, result) + if params[:error_callback] + Dir.chdir(FastlaneCore::FastlaneFolder.path) do + params[:error_callback].call(result) + end + else + UI.shell_error!(result) + end + end + + def self.description + "Runs `pod install` for the project" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :repo_update, + env_name: "FL_COCOAPODS_REPO_UPDATE", + description: "Add `--repo-update` flag to `pod install` command", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :clean_install, + env_name: "FL_COCOAPODS_CLEAN_INSTALL", + description: "Execute a full pod installation ignoring the content of the project cache", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :silent, + env_name: "FL_COCOAPODS_SILENT", + description: "Execute command without logging output", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_COCOAPODS_VERBOSE", + description: "Show more debugging information", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :ansi, + env_name: "FL_COCOAPODS_ANSI", + description: "Show output with ANSI codes", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :use_bundle_exec, + env_name: "FL_COCOAPODS_USE_BUNDLE_EXEC", + description: "Use bundle exec when there is a Gemfile presented", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :podfile, + env_name: "FL_COCOAPODS_PODFILE", + description: "Explicitly specify the path to the Cocoapods' Podfile. You can either set it to the Podfile's path or to the folder containing the Podfile file", + optional: true, + verify_block: proc do |value| + UI.user_error!("Could not find Podfile") unless File.exist?(value) || Helper.test? + end), + FastlaneCore::ConfigItem.new(key: :error_callback, + description: 'A callback invoked with the command output if there is a non-zero exit status', + optional: true, + type: :string_callback), + FastlaneCore::ConfigItem.new(key: :try_repo_update_on_error, + env_name: "FL_COCOAPODS_TRY_REPO_UPDATE_ON_ERROR", + description: 'Retry with --repo-update if action was finished with error', + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :deployment, + env_name: "FL_COCOAPODS_DEPLOYMENT", + description: 'Disallow any changes to the Podfile or the Podfile.lock during installation', + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :allow_root, + env_name: "FL_COCOAPODS_ALLOW_ROOT", + description: 'Allows CocoaPods to run as root', + optional: true, + default_value: false, + type: Boolean), + + # Deprecated + FastlaneCore::ConfigItem.new(key: :clean, + env_name: "FL_COCOAPODS_CLEAN", + description: "(Option renamed as clean_install) Remove SCM directories", + deprecated: true, + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :integrate, + env_name: "FL_COCOAPODS_INTEGRATE", + description: "(Option removed from cocoapods) Integrate the Pods libraries into the Xcode project(s)", + deprecated: true, + type: Boolean, + default_value: true) + ] + # Please don't add a version parameter to the `cocoapods` action. If you need to specify a version when running + # `cocoapods`, please start using a Gemfile and lock the version there + # More information https://docs.fastlane.tools/getting-started/ios/setup/#use-a-gemfile + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.authors + ["KrauseFx", "tadpol", "birmacher", "Liquidsoul"] + end + + def self.details + "If you use [CocoaPods](http://cocoapods.org) you can use the `cocoapods` integration to run `pod install` before building your app." + end + + def self.example_code + [ + 'cocoapods', + 'cocoapods( + clean_install: true, + podfile: "./CustomPodfile" + )' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/commit_github_file.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/commit_github_file.rb new file mode 100644 index 0000000..ce67a86 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/commit_github_file.rb @@ -0,0 +1,194 @@ +module Fastlane + module Actions + module SharedValues + COMMIT_GITHUB_FILE_HTML_LINK = :COMMIT_GITHUB_FILE_HTML_LINK + COMMIT_GITHUB_FILE_SHA = :COMMIT_GITHUB_FILE_SHA + COMMIT_GITHUB_FILE_JSON = :COMMIT_GITHUB_FILE_JSON + end + + class CommitGithubFileAction < Action + def self.run(params) + repo_name = params[:repository_name] + branch = params[:branch] ||= 'master' + commit_message = params[:message] + + file_path = params[:path] + file_name = File.basename(file_path) + expanded_file_path = File.expand_path(file_path) + + UI.important("Creating commit on #{repo_name} on branch \"#{branch}\" for file \"#{file_path}\"") + + api_file_path = file_path + api_file_path = "/#{api_file_path}" unless api_file_path.start_with?('/') + api_file_path = api_file_path[0..-2] if api_file_path.end_with?('/') + + payload = { + path: api_file_path, + message: commit_message || "Updated : #{file_name}", + content: Base64.encode64(File.open(expanded_file_path).read), + branch: branch + } + + UI.message("Committing #{api_file_path}") + GithubApiAction.run({ + server_url: params[:server_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + secure: params[:secure], + http_method: "PUT", + path: File.join("repos", params[:repository_name], "contents", api_file_path), + body: payload, + error_handlers: { + 422 => proc do |result| + json = result[:json] + UI.error(json || result[:body]) + error = if json['message'] == "Invalid request.\n\n\"sha\" wasn't supplied." + "File already exists - please remove from repo before uploading or rename this upload" + else + "Uprocessable error" + end + UI.user_error!(error) + end + } + }) do |result| + UI.success("Successfully committed file to GitHub") + json = result[:json] + html_url = json['commit']['html_url'] + download_url = json['content']['download_url'] + commit_sha = json['commit']['sha'] + + UI.important("Commit: \"#{html_url}\"") + UI.important("SHA: \"#{commit_sha}\"") + UI.important("Download at: \"#{download_url}\"") + + Actions.lane_context[SharedValues::COMMIT_GITHUB_FILE_HTML_LINK] = html_url + Actions.lane_context[SharedValues::COMMIT_GITHUB_FILE_SHA] = commit_sha + Actions.lane_context[SharedValues::COMMIT_GITHUB_FILE_JSON] = json + end + + Actions.lane_context[SharedValues::COMMIT_GITHUB_FILE_JSON] + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "This will commit a file directly on GitHub via the API" + end + + def self.details + [ + "Commits a file directly to GitHub. You must provide your GitHub Personal token (get one from [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new)), the repository name and the relative file path from the root git project.", + "Out parameters provide the commit sha created, which can be used for later usage for examples such as releases, the direct download link and the full response JSON.", + "Documentation: [https://developer.github.com/v3/repos/contents/#create-a-file](https://developer.github.com/v3/repos/contents/#create-a-file)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :repository_name, + env_name: "FL_COMMIT_GITHUB_FILE_REPOSITORY_NAME", + description: "The path to your repo, e.g. 'fastlane/fastlane'", + verify_block: proc do |value| + UI.user_error!("Please only pass the path, e.g. 'fastlane/fastlane'") if value.include?("github.com") + UI.user_error!("Please only pass the path, e.g. 'fastlane/fastlane'") if value.split('/').count != 2 + end), + FastlaneCore::ConfigItem.new(key: :server_url, + env_name: "FL_COMMIT_GITHUB_FILE_SERVER_URL", + description: "The server url. e.g. 'https://your.internal.github.host/api/v3' (Default: 'https://api.github.com')", + default_value: "https://api.github.com", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please include the protocol in the server url, e.g. https://your.github.server/api/v3") unless value.include?("//") + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_COMMIT_GITHUB_FILE_API_TOKEN", + description: "Personal API Token for GitHub - generate one at https://github.com/settings/tokens", + conflicting_options: [:api_bearer], + sensitive: true, + code_gen_sensitive: true, + default_value: ENV["GITHUB_API_TOKEN"], + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :api_bearer, + env_name: "FL_COMMIT_GITHUB_FILE_API_BEARER", + sensitive: true, + code_gen_sensitive: true, + description: "Use a Bearer authorization token. Usually generated by Github Apps, e.g. GitHub Actions GITHUB_TOKEN environment variable", + conflicting_options: [:api_token], + optional: true, + default_value: nil), + FastlaneCore::ConfigItem.new(key: :branch, + env_name: "FL_COMMIT_GITHUB_FILE_BRANCH", + description: "The branch that the file should be committed on (default: master)", + default_value: 'master', + optional: true), + FastlaneCore::ConfigItem.new(key: :path, + env_name: 'FL_COMMIT_GITHUB_FILE_PATH', + description: 'The relative path to your file from project root e.g. assets/my_app.xcarchive', + optional: false, + verify_block: proc do |value| + value = File.expand_path(value) + UI.user_error!("File not found at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_COMMIT_GITHUB_FILE_MESSAGE", + description: "The commit message. Defaults to the file name", + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :secure, + env_name: "FL_COMMIT_GITHUB_FILE_SECURE", + description: "Optionally disable secure requests (ssl_verify_peer)", + type: Boolean, + default_value: true, + optional: true) + ] + end + + def self.output + [ + ['COMMIT_GITHUB_FILE_HTML_LINK', 'Link to your committed file'], + ['COMMIT_GITHUB_FILE_SHA', 'Commit SHA generated'], + ['COMMIT_GITHUB_FILE_JSON', 'The whole commit JSON object response'] + ] + end + + def self.return_type + :hash_of_strings + end + + def self.return_value + [ + "A hash containing all relevant information for this commit", + "Access things like 'html_url', 'sha', 'message'" + ].join("\n") + end + + def self.authors + ["tommeier"] + end + + def self.example_code + [ + 'response = commit_github_file( + repository_name: "fastlane/fastlane", + server_url: "https://api.github.com", + api_token: ENV["GITHUB_TOKEN"], + message: "Add my new file", + branch: "master", + path: "assets/my_new_file.xcarchive" + )' + ] + end + + def self.is_supported?(platform) + true + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/commit_version_bump.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/commit_version_bump.rb new file mode 100644 index 0000000..c7271ef --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/commit_version_bump.rb @@ -0,0 +1,300 @@ +require 'pathname' + +module Fastlane + module Actions + module SharedValues + MODIFIED_FILES = :MODIFIED_FILES + end + + class << self + # Add an array of paths relative to the repo root or absolute paths that have been modified by + # an action. + # + # :files: An array of paths relative to the repo root or absolute paths + def add_modified_files(files) + modified_files = lane_context[SharedValues::MODIFIED_FILES] || Set.new + modified_files += files + lane_context[SharedValues::MODIFIED_FILES] = modified_files + end + end + + # Commits the current changes in the repo as a version bump, checking to make sure only files which contain version information have been changed. + class CommitVersionBumpAction < Action + def self.run(params) + require 'xcodeproj' + require 'set' + require 'shellwords' + + xcodeproj_path = params[:xcodeproj] ? File.expand_path(File.join('.', params[:xcodeproj])) : nil + + # find the repo root path + repo_path = Actions.sh('git rev-parse --show-toplevel').strip + repo_pathname = Pathname.new(repo_path) + + if xcodeproj_path + # ensure that the xcodeproj passed in was OK + UI.user_error!("Could not find the specified xcodeproj: #{xcodeproj_path}") unless File.directory?(xcodeproj_path) + else + # find an xcodeproj (ignoring dependencies) + xcodeproj_paths = Fastlane::Helper::XcodeprojHelper.find(repo_path) + + # no projects found: error + UI.user_error!('Could not find a .xcodeproj in the current repository\'s working directory.') if xcodeproj_paths.count == 0 + + # too many projects found: error + if xcodeproj_paths.count > 1 + relative_projects = xcodeproj_paths.map { |e| Pathname.new(e).relative_path_from(repo_pathname).to_s }.join("\n") + UI.user_error!("Found multiple .xcodeproj projects in the current repository's working directory. Please specify your app's main project: \n#{relative_projects}") + end + + # one project found: great + xcodeproj_path = xcodeproj_paths.first + end + + # find the pbxproj path, relative to git directory + pbxproj_pathname = Pathname.new(File.join(xcodeproj_path, 'project.pbxproj')) + pbxproj_path = pbxproj_pathname.relative_path_from(repo_pathname).to_s + + # find the info_plist files + project = Xcodeproj::Project.open(xcodeproj_path) + info_plist_files = project.objects.select do |object| + object.isa == 'XCBuildConfiguration' + end.map(&:to_hash).map do |object_hash| + object_hash['buildSettings'] + end.select do |build_settings| + build_settings.key?('INFOPLIST_FILE') + end.map do |build_settings| + build_settings['INFOPLIST_FILE'] + end.uniq.map do |info_plist_path| + Pathname.new(File.expand_path(File.join(xcodeproj_path, '..', info_plist_path))).relative_path_from(repo_pathname).to_s + end + + # Removes .plist files that matched the given expression in the 'ignore' parameter + ignore_expression = params[:ignore] + if ignore_expression + info_plist_files.reject! do |info_plist_file| + info_plist_file.match(ignore_expression) + end + end + + extra_files = params[:include] + extra_files += modified_files_relative_to_repo_root(repo_path) + + # create our list of files that we expect to have changed, they should all be relative to the project root, which should be equal to the git workdir root + expected_changed_files = extra_files + expected_changed_files << pbxproj_path + expected_changed_files << info_plist_files + + if params[:settings] + settings_plists_from_param(params[:settings]).each do |file| + settings_file_pathname = Pathname.new(settings_bundle_file_path(project, file)) + expected_changed_files << settings_file_pathname.relative_path_from(repo_pathname).to_s + end + end + + expected_changed_files.flatten!.uniq! + + # get the list of files that have actually changed in our git workdir + git_dirty_files = Actions.sh('git diff --name-only HEAD').split("\n") + Actions.sh('git ls-files --other --exclude-standard').split("\n") + + # little user hint + UI.user_error!("No file changes picked up. Make sure you run the `increment_build_number` action first.") if git_dirty_files.empty? + + # check if the files changed are the ones we expected to change (these should be only the files that have version info in them) + changed_files_as_expected = Set.new(git_dirty_files.map(&:downcase)).subset?(Set.new(expected_changed_files.map(&:downcase))) + unless changed_files_as_expected + unless params[:force] + error = [ + "Found unexpected uncommitted changes in the working directory. Expected these files to have ", + "changed: \n#{expected_changed_files.join("\n")}.\nBut found these actual changes: ", + "#{git_dirty_files.join("\n")}.\nMake sure you have cleaned up the build artifacts and ", + "are only left with the changed version files at this stage in your lane, and don't touch the ", + "working directory while your lane is running. You can also use the :force option to bypass this ", + "check, and always commit a version bump regardless of the state of the working directory." + ].join("\n") + UI.user_error!(error) + end + end + + # get the absolute paths to the files + git_add_paths = expected_changed_files.map do |path| + updated = path.gsub("$(SRCROOT)", ".").gsub("${SRCROOT}", ".") + File.expand_path(File.join(repo_pathname, updated)) + end + + # then create a commit with a message + Actions.sh("git add #{git_add_paths.map(&:shellescape).join(' ')}") + + begin + command = build_git_command(params) + + Actions.sh(command) + + UI.success("Committed \"#{params[:message]}\" 💾.") + rescue => ex + UI.error(ex) + UI.important("Didn't commit any changes.") + end + end + + def self.description + "Creates a 'Version Bump' commit. Run after `increment_build_number`" + end + + def self.output + [ + ['MODIFIED_FILES', 'The list of paths of modified files'] + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_COMMIT_BUMP_MESSAGE", + description: "The commit message when committing the version bump", + optional: true), + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_BUILD_NUMBER_PROJECT", + description: "The path to your project file (Not the workspace). If you have only one, this is optional", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_FORCE_COMMIT", + description: "Forces the commit, even if other files than the ones containing the version number have been modified", + type: Boolean, + optional: true, + default_value: false), + FastlaneCore::ConfigItem.new(key: :settings, + env_name: "FL_COMMIT_INCLUDE_SETTINGS", + description: "Include Settings.bundle/Root.plist with version bump", + skip_type_validation: true, # allows Boolean, String, Array + optional: true, + default_value: false), + FastlaneCore::ConfigItem.new(key: :ignore, + description: "A regular expression used to filter matched plist files to be modified", + skip_type_validation: true, # allows Regex + optional: true), + FastlaneCore::ConfigItem.new(key: :include, + description: "A list of extra files to be included in the version bump (string array or comma-separated string)", + optional: true, + default_value: [], + type: Array), + FastlaneCore::ConfigItem.new(key: :no_verify, + env_name: "FL_GIT_PUSH_USE_NO_VERIFY", + description: "Whether or not to use --no-verify", + type: Boolean, + default_value: false) + ] + end + + def self.details + list = <<-LIST.markdown_list + All `.plist` files + The `.xcodeproj/project.pbxproj` file + LIST + + [ + "This action will create a 'Version Bump' commit in your repo. Useful in conjunction with `increment_build_number`.", + "It checks the repo to make sure that only the relevant files have changed. These are the files that `increment_build_number` (`agvtool`) touches:".markdown_preserve_newlines, + list, + "Then commits those files to the repo.", + "Customize the message with the `:message` option. It defaults to 'Version Bump'.", + "If you have other uncommitted changes in your repo, this action will fail. If you started off in a clean repo, and used the _ipa_ and or _sigh_ actions, then you can use the [clean_build_artifacts](https://docs.fastlane.tools/actions/clean_build_artifacts/) action to clean those temporary files up before running this action." + ].join("\n") + end + + def self.author + "lmirosevic" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'commit_version_bump', + 'commit_version_bump( + message: "Version Bump", # create a commit with a custom message + xcodeproj: "./path/to/MyProject.xcodeproj" # optional, if you have multiple Xcode project files, you must specify your main project here + )', + 'commit_version_bump( + settings: true # Include Settings.bundle/Root.plist + )', + 'commit_version_bump( + settings: "About.plist" # Include Settings.bundle/About.plist + )', + 'commit_version_bump( + settings: %w[About.plist Root.plist] # Include more than one plist from Settings.bundle + )', + 'commit_version_bump( + include: %w[package.json custom.cfg] # include other updated files as part of the version bump + )', + 'commit_version_bump( + ignore: /OtherProject/ # ignore files matching a regular expression + )', + 'commit_version_bump( + no_verify: true # optional, default: false + )' + ] + end + + def self.category + :source_control + end + + class << self + def settings_plists_from_param(param) + if param.kind_of?(String) + # commit_version_bump settings: "About.plist" + return [param] + elsif param.kind_of?(Array) + # commit_version_bump settings: ["Root.plist", "About.plist"] + return param + else + # commit_version_bump settings: true # Root.plist + return ["Root.plist"] + end + end + + def settings_bundle_file_path(project, settings_file_name) + settings_bundle = project.files.find { |f| f.path =~ /Settings.bundle/ } + raise "No Settings.bundle in project" if settings_bundle.nil? + + return File.join(settings_bundle.real_path, settings_file_name) + end + + def modified_files_relative_to_repo_root(repo_root) + return [] if Actions.lane_context[SharedValues::MODIFIED_FILES].nil? + + root_pathname = Pathname.new(repo_root) + all_modified_files = Actions.lane_context[SharedValues::MODIFIED_FILES].map do |path| + next path unless path =~ %r{^/} + Pathname.new(path).relative_path_from(root_pathname).to_s + end + return all_modified_files.uniq + end + + def build_git_command(params) + build_number = Actions.lane_context[Actions::SharedValues::BUILD_NUMBER] + + params[:message] ||= (build_number ? "Version Bump to #{build_number}" : "Version Bump") + + command = [ + 'git', + 'commit', + '-m', + "'#{params[:message]}'" + ] + + command << '--no-verify' if params[:no_verify] + + return command.join(' ') + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/copy_artifacts.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/copy_artifacts.rb new file mode 100644 index 0000000..76ff48a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/copy_artifacts.rb @@ -0,0 +1,113 @@ +require 'fileutils' + +module Fastlane + module Actions + class CopyArtifactsAction < Action + def self.run(params) + # expand the path to make sure we can deal with relative paths + target_path = File.expand_path(params[:target_path]) + + # we want to make sure that our target folder exist already + FileUtils.mkdir_p(target_path) + + # Ensure that artifacts is an array + artifacts_to_search = [params[:artifacts]].flatten + + # If any of the paths include "*", we assume that we are referring to the Unix entries + # e.g /tmp/fastlane/* refers to all the files in /tmp/fastlane + # We use Dir.glob to expand all those paths, this would create an array of arrays though, so flatten + artifacts = artifacts_to_search.flat_map { |f| f.include?("*") ? Dir.glob(f) : f } + + UI.verbose("Copying artifacts #{artifacts.join(', ')} to #{target_path}") + UI.verbose(params[:keep_original] ? "Keeping original files" : "Not keeping original files") + + if params[:fail_on_missing] + missing = artifacts.reject { |a| File.exist?(a) } + UI.user_error!("Not all files were present in copy artifacts. Missing #{missing.join(', ')}") unless missing.empty? + else + # If we don't fail on non-existent files, don't try to copy non-existent files + artifacts.select! { |artifact| File.exist?(artifact) } + end + + if params[:keep_original] + FileUtils.cp_r(artifacts, target_path, remove_destination: true) + else + FileUtils.mv(artifacts, target_path, force: true) + end + + UI.success('Build artifacts successfully copied!') + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Copy and save your build artifacts (useful when you use reset_git_repo)" + end + + def self.details + [ + "This action copies artifacts to a target directory. It's useful if you have a CI that will pick up these artifacts and attach them to the build. Useful e.g. for storing your `.ipa`s, `.dSYM.zip`s, `.mobileprovision`s, `.cert`s.", + "Make sure your `:target_path` is ignored from git, and if you use `reset_git_repo`, make sure the artifacts are added to the exclude list." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :keep_original, + description: "Set this to false if you want move, rather than copy, the found artifacts", + type: Boolean, + optional: true, + default_value: true), + FastlaneCore::ConfigItem.new(key: :target_path, + description: "The directory in which you want your artifacts placed", + optional: false, + default_value: 'artifacts'), + FastlaneCore::ConfigItem.new(key: :artifacts, + description: "An array of file patterns of the files/folders you want to preserve", + type: Array, + optional: false, + default_value: []), + FastlaneCore::ConfigItem.new(key: :fail_on_missing, + description: "Fail when a source file isn't found", + type: Boolean, + optional: true, + default_value: false) + ] + end + + def self.authors + ["lmirosevic"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'copy_artifacts( + target_path: "artifacts", + artifacts: ["*.cer", "*.mobileprovision", "*.ipa", "*.dSYM.zip", "path/to/file.txt", "another/path/*.extension"] + ) + + # Reset the git repo to a clean state, but leave our artifacts in place + reset_git_repo( + exclude: "artifacts" + )', + '# Copy the .ipa created by _gym_ if it was successfully created + artifacts = [] + artifacts << lane_context[SharedValues::IPA_OUTPUT_PATH] if lane_context[SharedValues::IPA_OUTPUT_PATH] + copy_artifacts( + artifacts: artifacts + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_app_on_managed_play_store.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_app_on_managed_play_store.rb new file mode 100644 index 0000000..6db1162 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_app_on_managed_play_store.rb @@ -0,0 +1,168 @@ +require 'google/apis/playcustomapp_v1' +require 'supply' + +module Fastlane + module Actions + class CreateAppOnManagedPlayStoreAction < Action + def self.run(params) + client = PlaycustomappClient.make_from_config(params: params) + + FastlaneCore::PrintTable.print_values( + config: params, + mask_keys: [:json_key_data], + title: "Summary for create_app_on_managed_play_store" + ) + + client.create_app( + app_title: params[:app_title], + language_code: params[:language], + developer_account: params[:developer_account_id], + apk_path: params[:apk] + ) + end + + def self.description + "Create Managed Google Play Apps" + end + + def self.authors + ["janpio"] + end + + def self.return_value + # If your method provides a return value, you can describe here what it does + end + + def self.details + "Create new apps on Managed Google Play." + end + + def self.example_code + [ + "create_app_on_managed_play_store( + json_key: 'path/to/you/json/key/file', + developer_account_id: 'developer_account_id', # obtained using the `get_managed_play_store_publishing_rights` action (or looking at the Play Console url) + app_title: 'Your app title', + language: 'en_US', # primary app language in BCP 47 format + apk: '/files/app-release.apk' + )" + ] + end + + def self.available_options + [ + # Authorization + FastlaneCore::ConfigItem.new(key: :json_key, + env_name: "SUPPLY_JSON_KEY", + short_option: "-j", + conflicting_options: [:json_key_data], + optional: true, # optional until it is possible specify either json_key OR json_key_data are required + description: "The path to a file containing service account JSON, used to authenticate with Google", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:json_key_file), + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Could not find service account json file at path '#{File.expand_path(value)}'") unless File.exist?(File.expand_path(value)) + UI.user_error!("'#{value}' doesn't seem to be a JSON file") unless FastlaneCore::Helper.json_file?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :json_key_data, + env_name: "SUPPLY_JSON_KEY_DATA", + short_option: "-c", + conflicting_options: [:json_key], + optional: true, + description: "The raw service account JSON data used to authenticate with Google", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:json_key_data_raw), + default_value_dynamic: true, + verify_block: proc do |value| + begin + JSON.parse(value) + rescue JSON::ParserError + UI.user_error!("Could not parse service account json: JSON::ParseError") + end + end), + FastlaneCore::ConfigItem.new(key: :developer_account_id, + short_option: "-k", + env_name: "SUPPLY_DEVELOPER_ACCOUNT_ID", + description: "The ID of your Google Play Console account. Can be obtained from the URL when you log in (`https://play.google.com/apps/publish/?account=...` or when you 'Obtain private app publishing rights' (https://developers.google.com/android/work/play/custom-app-api/get-started#retrieve_the_developer_account_id)", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:developer_account_id), + default_value_dynamic: true), + # APK + FastlaneCore::ConfigItem.new(key: :apk, + env_name: "SUPPLY_APK", + description: "Path to the APK file to upload", + short_option: "-b", + code_gen_sensitive: true, + default_value: Dir["*.apk"].last || Dir[File.join("app", "build", "outputs", "apk", "app-release.apk")].last, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("No value found for 'apk'") if value.to_s.length == 0 + UI.user_error!("Could not find apk file at path '#{value}'") unless File.exist?(value) + UI.user_error!("apk file is not an apk") unless value.end_with?('.apk') + end), + # Title + FastlaneCore::ConfigItem.new(key: :app_title, + env_name: "SUPPLY_APP_TITLE", + short_option: "-q", + description: "App Title"), + # Language + FastlaneCore::ConfigItem.new(key: :language, + short_option: "-m", + env_name: "SUPPLY_LANGUAGE", + description: "Default app language (e.g. 'en_US')", + default_value: "en_US", + verify_block: proc do |language| + unless Supply::Languages::ALL_LANGUAGES.include?(language) + UI.user_error!("Please enter one of the available languages: #{Supply::Languages::ALL_LANGUAGES}") + end + end), + # Google Play API + FastlaneCore::ConfigItem.new(key: :root_url, + env_name: "SUPPLY_ROOT_URL", + description: "Root URL for the Google Play API. The provided URL will be used for API calls in place of https://www.googleapis.com/", + optional: true, + verify_block: proc do |value| + UI.user_error!("Could not parse URL '#{value}'") unless value =~ URI.regexp + end), + FastlaneCore::ConfigItem.new(key: :timeout, + env_name: "SUPPLY_TIMEOUT", + optional: true, + description: "Timeout for read, open, and send (in seconds)", + type: Integer, + default_value: 300) + ] + end + + def self.is_supported?(platform) + [:android].include?(platform) + end + + def self.category + :misc + end + end + end +end + +require 'supply/client' +class PlaycustomappClient < Supply::AbstractGoogleServiceClient + SERVICE = Google::Apis::PlaycustomappV1::PlaycustomappService + SCOPE = Google::Apis::PlaycustomappV1::AUTH_ANDROIDPUBLISHER + + ##################################################### + # @!group Create + ##################################################### + + def create_app(app_title: nil, language_code: nil, developer_account: nil, apk_path: nil) + custom_app = Google::Apis::PlaycustomappV1::CustomApp.new(title: app_title, language_code: language_code) + + call_google_api do + client.create_account_custom_app( + developer_account, + custom_app, + upload_source: apk_path + ) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_app_online.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_app_online.rb new file mode 100644 index 0000000..5b68bbb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_app_online.rb @@ -0,0 +1,75 @@ +module Fastlane + module Actions + module SharedValues + PRODUCE_APPLE_ID = :PRODUCE_APPLE_ID + end + + class CreateAppOnlineAction < Action + def self.run(params) + require 'produce' + + return if Helper.test? + + Produce.config = params # we alread have the finished config + + Dir.chdir(FastlaneCore::FastlaneFolder.path || Dir.pwd) do + # This should be executed in the fastlane folder + apple_id = Produce::Manager.start_producing.to_s + + Actions.lane_context[SharedValues::PRODUCE_APPLE_ID] = apple_id + ENV['PRODUCE_APPLE_ID'] = apple_id + end + end + + def self.description + "Creates the given application on iTC and the Dev Portal (via _produce_)" + end + + def self.details + [ + "Create new apps on App Store Connect and Apple Developer Portal via _produce_.", + "If the app already exists, `create_app_online` will not do anything.", + "For more information about _produce_, visit its documentation page: [https://docs.fastlane.tools/actions/produce/](https://docs.fastlane.tools/actions/produce/)." + ].join("\n") + end + + def self.available_options + require 'produce' + Produce::Options.available_options + end + + def self.output + [ + ['PRODUCE_APPLE_ID', 'The Apple ID of the newly created app. You probably need it for `deliver`'] + ] + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'create_app_online( + username: "felix@krausefx.com", + app_identifier: "com.krausefx.app", + app_name: "MyApp", + language: "English", + app_version: "1.0", + sku: "123", + team_name: "SunApps GmbH" # Only necessary when in multiple teams. + )', + 'produce # alias for "create_app_online"' + ] + end + + def self.category + :app_store_connect + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_keychain.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_keychain.rb new file mode 100644 index 0000000..fd33a45 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_keychain.rb @@ -0,0 +1,179 @@ +require 'shellwords' + +module Fastlane + module Actions + module SharedValues + ORIGINAL_DEFAULT_KEYCHAIN = :ORIGINAL_DEFAULT_KEYCHAIN + KEYCHAIN_PATH = :KEYCHAIN_PATH + end + + class CreateKeychainAction < Action + def self.run(params) + escaped_password = params[:password].shellescape + + if params[:name] + escaped_name = params[:name].shellescape + keychain_path = "~/Library/Keychains/#{escaped_name}" + else + keychain_path = params[:path].shellescape + end + + if keychain_path.nil? + UI.user_error!("You either have to set :name or :path") + end + + commands = [] + + if !exists?(keychain_path) + commands << Fastlane::Actions.sh("security create-keychain -p #{escaped_password} #{keychain_path}", log: false) + elsif params[:require_create] + UI.abort_with_message!("`require_create` option passed, but found keychain '#{keychain_path}', failing create_keychain action") + else + UI.important("Found keychain '#{keychain_path}', creation skipped") + UI.important("If creating a new Keychain DB is required please set the `require_create` option true to cause the action to fail") + end + + Actions.lane_context[Actions::SharedValues::KEYCHAIN_PATH] = keychain_path + + if params[:default_keychain] + # if there is no default keychain - setting the original will fail - silent this error + begin + Actions.lane_context[Actions::SharedValues::ORIGINAL_DEFAULT_KEYCHAIN] = Fastlane::Actions.sh("security default-keychain", log: false).strip + rescue + end + commands << Fastlane::Actions.sh("security default-keychain -s #{keychain_path}", log: false) + end + + commands << Fastlane::Actions.sh("security unlock-keychain -p #{escaped_password} #{keychain_path}", log: false) if params[:unlock] + + command = "security set-keychain-settings" + + # https://ss64.com/osx/security-keychain-settings.html + # omitting 'timeout' option to specify "no timeout" if required + command << " -t #{params[:timeout]}" if params[:timeout] > 0 + command << " -l" if params[:lock_when_sleeps] + command << " -u" if params[:lock_after_timeout] + command << " #{keychain_path}" + + commands << Fastlane::Actions.sh(command, log: false) + + if params[:add_to_search_list] + keychains = list_keychains + expanded_path = resolved_keychain_path(keychain_path) + if keychains.include?(expanded_path) + UI.important("Found keychain '#{expanded_path}' in list-keychains, adding to search list skipped") + else + keychains << expanded_path + commands << Fastlane::Actions.sh("security list-keychains -s #{keychains.shelljoin}", log: false) + end + end + + commands + end + + def self.list_keychains + Action.sh("security list-keychains -d user").shellsplit + end + + def self.exists?(keychain_path) + !resolved_keychain_path(keychain_path).nil? + end + + # returns the expanded and resolved path for the keychain, or nil if not found + def self.resolved_keychain_path(keychain_path) + keychain_path = File.expand_path(keychain_path) + + # Creating Keychains using the security + # CLI appends `-db` to the file name. + ["#{keychain_path}-db", keychain_path].each do |path| + return path if File.exist?(path) + end + nil + end + + def self.description + "Create a new Keychain" + end + + def self.output + [ + ['ORIGINAL_DEFAULT_KEYCHAIN', 'The path to the default keychain'], + ['KEYCHAIN_PATH', 'The path of the keychain'] + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :name, + env_name: "KEYCHAIN_NAME", + description: "Keychain name", + conflicting_options: [:path], + optional: true), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "KEYCHAIN_PATH", + description: "Path to keychain", + conflicting_options: [:name], + optional: true), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "KEYCHAIN_PASSWORD", + description: "Password for the keychain", + sensitive: true, + code_gen_sensitive: true, + optional: false), + FastlaneCore::ConfigItem.new(key: :default_keychain, + description: 'Should the newly created Keychain be the new system default keychain', + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :unlock, + description: 'Unlock keychain after create', + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :timeout, + description: 'timeout interval in seconds. Set `0` if you want to specify "no time-out"', + type: Integer, + default_value: 300), + FastlaneCore::ConfigItem.new(key: :lock_when_sleeps, + description: 'Lock keychain when the system sleeps', + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :lock_after_timeout, + description: 'Lock keychain after timeout interval', + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :add_to_search_list, + description: 'Add keychain to search list', + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :require_create, + description: 'Fail the action if the Keychain already exists', + type: Boolean, + default_value: false) + ] + end + + def self.authors + ["gin0606"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'create_keychain( + name: "KeychainName", + default_keychain: true, + unlock: true, + timeout: 3600, + lock_when_sleeps: true + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_pull_request.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_pull_request.rb new file mode 100644 index 0000000..9070e21 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_pull_request.rb @@ -0,0 +1,271 @@ +module Fastlane + module Actions + module SharedValues + CREATE_PULL_REQUEST_HTML_URL = :CREATE_PULL_REQUEST_HTML_URL + CREATE_PULL_REQUEST_NUMBER = :CREATE_PULL_REQUEST_NUMBER + end + + class CreatePullRequestAction < Action + def self.run(params) + UI.message("Creating new pull request from '#{params[:head]}' to branch '#{params[:base]}' of '#{params[:repo]}'") + + payload = { + 'title' => params[:title], + 'head' => params[:head], + 'base' => params[:base] + } + payload['body'] = params[:body] if params[:body] + payload['draft'] = params[:draft] if params[:draft] + + GithubApiAction.run( + server_url: params[:api_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + http_method: 'POST', + path: "repos/#{params[:repo]}/pulls", + body: payload, + error_handlers: { + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}: #{result[:body]}") + return nil + end + } + ) do |result| + json = result[:json] + number = json['number'] + html_url = json['html_url'] + UI.success("Successfully created pull request ##{number}. You can see it at '#{html_url}'") + + # Add labels to pull request + add_labels(params, number) if params[:labels] + + # Add assignees to pull request + add_assignees(params, number) if params[:assignees] + + # Add reviewers to pull request + add_reviewers(params, number) if params[:reviewers] || params[:team_reviewers] + + # Add a milestone to pull request + add_milestone(params, number) if params[:milestone] + + Actions.lane_context[SharedValues::CREATE_PULL_REQUEST_HTML_URL] = html_url + Actions.lane_context[SharedValues::CREATE_PULL_REQUEST_NUMBER] = number + return html_url + end + end + + def self.add_labels(params, number) + payload = { + 'labels' => params[:labels] + } + GithubApiAction.run( + server_url: params[:api_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + http_method: 'PATCH', + path: "repos/#{params[:repo]}/issues/#{number}", + body: payload, + error_handlers: { + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}: #{result[:body]}") + return nil + end + } + ) + end + + def self.add_assignees(params, number) + payload = { + 'assignees' => params[:assignees] + } + GithubApiAction.run( + server_url: params[:api_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + http_method: 'POST', + path: "repos/#{params[:repo]}/issues/#{number}/assignees", + body: payload, + error_handlers: { + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}: #{result[:body]}") + return nil + end + } + ) + end + + def self.add_reviewers(params, number) + payload = {} + if params[:reviewers] + payload["reviewers"] = params[:reviewers] + end + + if params[:team_reviewers] + payload["team_reviewers"] = params[:team_reviewers] + end + GithubApiAction.run( + server_url: params[:api_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + http_method: 'POST', + path: "repos/#{params[:repo]}/pulls/#{number}/requested_reviewers", + body: payload, + error_handlers: { + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}: #{result[:body]}") + return nil + end + } + ) + end + + def self.add_milestone(params, number) + payload = {} + if params[:milestone] + payload["milestone"] = params[:milestone] + end + + GithubApiAction.run( + server_url: params[:api_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + http_method: 'PATCH', + path: "repos/#{params[:repo]}/issues/#{number}", + body: payload, + error_handlers: { + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}: #{result[:body]}") + return nil + end + } + ) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "This will create a new pull request on GitHub" + end + + def self.output + [ + ['CREATE_PULL_REQUEST_HTML_URL', 'The HTML URL to the created pull request'], + ['CREATE_PULL_REQUEST_NUMBER', 'The identifier number of the created pull request'] + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "GITHUB_PULL_REQUEST_API_TOKEN", + description: "Personal API Token for GitHub - generate one at https://github.com/settings/tokens", + sensitive: true, + code_gen_sensitive: true, + default_value: ENV["GITHUB_API_TOKEN"], + default_value_dynamic: true, + conflicting_options: [:api_bearer], + optional: true), + FastlaneCore::ConfigItem.new(key: :api_bearer, + env_name: "GITHUB_PULL_REQUEST_API_BEARER", + description: "Use a Bearer authorization token. Usually generated by Github Apps, e.g. GitHub Actions GITHUB_TOKEN environment variable", + sensitive: true, + code_gen_sensitive: true, + conflicting_options: [:api_token], + optional: true, + default_value: nil), + FastlaneCore::ConfigItem.new(key: :repo, + env_name: "GITHUB_PULL_REQUEST_REPO", + description: "The name of the repository you want to submit the pull request to", + optional: false), + FastlaneCore::ConfigItem.new(key: :title, + env_name: "GITHUB_PULL_REQUEST_TITLE", + description: "The title of the pull request", + optional: false), + FastlaneCore::ConfigItem.new(key: :body, + env_name: "GITHUB_PULL_REQUEST_BODY", + description: "The contents of the pull request", + optional: true), + FastlaneCore::ConfigItem.new(key: :draft, + env_name: "GITHUB_PULL_REQUEST_DRAFT", + description: "Indicates whether the pull request is a draft", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :labels, + env_name: "GITHUB_PULL_REQUEST_LABELS", + description: "The labels for the pull request", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :milestone, + env_name: "GITHUB_PULL_REQUEST_MILESTONE", + description: "The milestone ID (Integer) for the pull request", + type: Numeric, + optional: true), + FastlaneCore::ConfigItem.new(key: :head, + env_name: "GITHUB_PULL_REQUEST_HEAD", + description: "The name of the branch where your changes are implemented (defaults to the current branch name)", + default_value: Actions.git_branch, + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :base, + env_name: "GITHUB_PULL_REQUEST_BASE", + description: "The name of the branch you want your changes pulled into (defaults to `master`)", + default_value: 'master', + optional: true), + FastlaneCore::ConfigItem.new(key: :api_url, + env_name: "GITHUB_PULL_REQUEST_API_URL", + description: "The URL of GitHub API - used when the Enterprise (default to `https://api.github.com`)", + code_gen_default_value: 'https://api.github.com', + default_value: 'https://api.github.com', + optional: true), + FastlaneCore::ConfigItem.new(key: :assignees, + env_name: "GITHUB_PULL_REQUEST_ASSIGNEES", + description: "The assignees for the pull request", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :reviewers, + env_name: "GITHUB_PULL_REQUEST_REVIEWERS", + description: "The reviewers (slug) for the pull request", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :team_reviewers, + env_name: "GITHUB_PULL_REQUEST_TEAM_REVIEWERS", + description: "The team reviewers (slug) for the pull request", + type: Array, + optional: true) + ] + end + + def self.author + ["seei", "tommeier", "marumemomo", "elneruda", "kagemiku"] + end + + def self.is_supported?(platform) + return true + end + + def self.return_value + "The pull request URL when successful" + end + + def self.example_code + [ + 'create_pull_request( + api_token: "secret", # optional, defaults to ENV["GITHUB_API_TOKEN"] + repo: "fastlane/fastlane", + title: "Amazing new feature", + head: "my-feature", # optional, defaults to current branch name + base: "master", # optional, defaults to "master" + body: "Please pull this in!", # optional + api_url: "http://yourdomain/api/v3" # optional, for GitHub Enterprise, defaults to "https://api.github.com" + )' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_xcframework.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_xcframework.rb new file mode 100644 index 0000000..6c9f031 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/create_xcframework.rb @@ -0,0 +1,203 @@ +module Fastlane + module Actions + module SharedValues + XCFRAMEWORK_PATH ||= :XCFRAMEWORK_PATH + end + + class CreateXcframeworkAction < Action + PARAMETERS_TO_OPTIONS = { headers: '-headers', dsyms: '-debug-symbols' } + + def self.run(params) + artifacts = normalized_artifact_info(params[:frameworks], [:dsyms]) || + normalized_artifact_info(params[:frameworks_with_dsyms], [:dsyms]) || + normalized_artifact_info(params[:libraries], [:headers, :dsyms]) || + normalized_artifact_info(params[:libraries_with_headers_or_dsyms], [:headers, :dsyms]) + + UI.user_error!("Please provide either :frameworks, :frameworks_with_dsyms, :libraries or :libraries_with_headers_or_dsyms to be packaged into the xcframework") unless artifacts + + artifacts_type = params[:frameworks] || params[:frameworks_with_dsyms] ? '-framework' : '-library' + create_command = ['xcodebuild', '-create-xcframework'] + create_command << artifacts.map { |artifact, artifact_info| [artifacts_type, "\"#{artifact}\""] + artifact_info_as_options(artifact_info) }.flatten + create_command << ['-output', "\"#{params[:output]}\""] + create_command << ['-allow-internal-distribution'] if params[:allow_internal_distribution] + + if File.directory?(params[:output]) + UI.message("Deleting existing: #{params[:output]}") + FileUtils.remove_dir(params[:output]) + end + + Actions.lane_context[SharedValues::XCFRAMEWORK_PATH] = params[:output] + + sh(create_command) + end + + def self.normalized_artifact_info(artifacts_with_info, valid_info) + case artifacts_with_info + when Array + artifacts_with_info.map { |artifact| [artifact, {}] }.to_h + when Hash + # Convert keys of artifact info to symbols ('dsyms' to :dsyms) and only keep keys we are interested in + # For example with valid_info = [:dsyms] + # { 'FrameworkA.framework' => { 'dsyms' => 'FrameworkA.framework.dSYM', 'foo' => bar } } + # gets converted to + # { 'FrameworkA.framework' => { dsyms: 'FrameworkA.framework.dSYM' } } + artifacts_with_info.transform_values { |artifact_info| artifact_info.transform_keys(&:to_sym).slice(*valid_info) } + else + artifacts_with_info + end + end + + def self.artifact_info_as_options(artifact_info) + artifact_info.map { |type, file| [PARAMETERS_TO_OPTIONS[type], "\"#{file}\""] }.flatten + end + + def self.check_artifact_info(artifact_info) + UI.user_error!("Headers and dSYMs information should be a hash") unless artifact_info.kind_of?(Hash) + UI.user_error!("#{artifact_info[:headers]} doesn't exist or is not a directory") if artifact_info[:headers] && !File.directory?(artifact_info[:headers]) + UI.user_error!("#{artifact_info[:dsyms]} doesn't seem to be a dSYM archive") if artifact_info[:dsyms] && !File.directory?(artifact_info[:dsyms]) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Package multiple build configs of a library/framework into a single xcframework" + end + + def self.details + <<~DETAILS + Utility for packaging multiple build configurations of a given library + or framework into a single xcframework. + + If you want to package several frameworks just provide one of: + + * An array containing the list of frameworks using the :frameworks parameter + (if they have no associated dSYMs): + ['FrameworkA.framework', 'FrameworkB.framework'] + + * A hash containing the list of frameworks with their dSYMs using the + :frameworks_with_dsyms parameter: + { + 'FrameworkA.framework' => {}, + 'FrameworkB.framework' => { dsyms: 'FrameworkB.framework.dSYM' } + } + + If you want to package several libraries just provide one of: + + * An array containing the list of libraries using the :libraries parameter + (if they have no associated headers or dSYMs): + ['LibraryA.so', 'LibraryB.so'] + + * A hash containing the list of libraries with their headers and dSYMs + using the :libraries_with_headers_or_dsyms parameter: + { + 'LibraryA.so' => { dsyms: 'libraryA.so.dSYM' }, + 'LibraryB.so' => { headers: 'headers' } + } + + Finally specify the location of the xcframework to be generated using the :output + parameter. + DETAILS + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :frameworks, + env_name: "FL_CREATE_XCFRAMEWORK_FRAMEWORKS", + description: "Frameworks (without dSYMs) to add to the target xcframework", + type: Array, + optional: true, + conflicting_options: [:frameworks_with_dsyms, :libraries, :libraries_with_headers_or_dsyms], + verify_block: proc do |value| + normalized_artifact_info(value, [:dsyms]).each do |framework, framework_info| + UI.user_error!("#{framework} doesn't end with '.framework'. Is this really a framework?") unless framework.end_with?('.framework') + UI.user_error!("Couldn't find framework at #{framework}") unless File.exist?(framework) + UI.user_error!("#{framework} doesn't seem to be a framework") unless File.directory?(framework) + check_artifact_info(framework_info) + end + end), + FastlaneCore::ConfigItem.new(key: :frameworks_with_dsyms, + env_name: "FL_CREATE_XCFRAMEWORK_FRAMEWORKS_WITH_DSYMS", + description: "Frameworks (with dSYMs) to add to the target xcframework", + type: Hash, + optional: true, + conflicting_options: [:frameworks, :libraries, :libraries_with_headers_or_dsyms], + verify_block: proc do |value| + normalized_artifact_info(value, [:dsyms]).each do |framework, framework_info| + UI.user_error!("#{framework} doesn't end with '.framework'. Is this really a framework?") unless framework.end_with?('.framework') + UI.user_error!("Couldn't find framework at #{framework}") unless File.exist?(framework) + UI.user_error!("#{framework} doesn't seem to be a framework") unless File.directory?(framework) + check_artifact_info(framework_info) + end + end), + FastlaneCore::ConfigItem.new(key: :libraries, + env_name: "FL_CREATE_XCFRAMEWORK_LIBRARIES", + description: "Libraries (without headers or dSYMs) to add to the target xcframework", + type: Array, + optional: true, + conflicting_options: [:frameworks, :frameworks_with_dsyms, :libraries_with_headers_or_dsyms], + verify_block: proc do |value| + normalized_artifact_info(value, [:headers, :dsyms]).each do |library, library_info| + UI.user_error!("Couldn't find library at #{library}") unless File.exist?(library) + check_artifact_info(library_info) + end + end), + FastlaneCore::ConfigItem.new(key: :libraries_with_headers_or_dsyms, + env_name: "FL_CREATE_XCFRAMEWORK_LIBRARIES_WITH_HEADERS_OR_DSYMS", + description: "Libraries (with headers or dSYMs) to add to the target xcframework", + type: Hash, + optional: true, + conflicting_options: [:frameworks, :frameworks_with_dsyms, :libraries], + verify_block: proc do |value| + normalized_artifact_info(value, [:headers, :dsyms]).each do |library, library_info| + UI.user_error!("Couldn't find library at #{library}") unless File.exist?(library) + check_artifact_info(library_info) + end + end), + FastlaneCore::ConfigItem.new(key: :output, + env_name: "FL_CREATE_XCFRAMEWORK_OUTPUT", + description: "The path to write the xcframework to", + type: String, + optional: false), + FastlaneCore::ConfigItem.new(key: :allow_internal_distribution, + env_name: "FL_CREATE_XCFRAMEWORK_ALLOW_INTERNAL_DISTRIBUTION", + description: "Specifies that the created xcframework contains information not suitable for public distribution", + type: Boolean, + optional: true, + default_value: false) + ] + end + + def self.output + [ + ['XCFRAMEWORK_PATH', 'Location of the generated xcframework'] + ] + end + + def self.return_value + end + + def self.example_code + [ + "create_xcframework(frameworks: ['FrameworkA.framework', 'FrameworkB.framework'], output: 'UniversalFramework.xcframework')", + "create_xcframework(frameworks_with_dsyms: {'FrameworkA.framework' => {}, 'FrameworkB.framework' => { dsyms: 'FrameworkB.framework.dSYM' } }, output: 'UniversalFramework.xcframework')", + "create_xcframework(libraries: ['LibraryA.so', 'LibraryB.so'], output: 'UniversalFramework.xcframework')", + "create_xcframework(libraries_with_headers_or_dsyms: { 'LibraryA.so' => { dsyms: 'libraryA.so.dSYM' }, 'LibraryB.so' => { headers: 'LibraryBHeaders' } }, output: 'UniversalFramework.xcframework')" + ] + end + + def self.category + :building + end + + def self.authors + ["jgongo"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/danger.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/danger.rb new file mode 100644 index 0000000..80f9a72 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/danger.rb @@ -0,0 +1,146 @@ +module Fastlane + module Actions + class DangerAction < Action + def self.run(params) + Actions.verify_gem!('danger') + cmd = [] + + cmd << 'bundle exec' if params[:use_bundle_exec] && shell_out_should_use_bundle_exec? + cmd << 'danger' + cmd << '--verbose' if params[:verbose] + + danger_id = params[:danger_id] + dangerfile = params[:dangerfile] + base = params[:base] + head = params[:head] + pr = params[:pr] + cmd << "--danger_id=#{danger_id}" if danger_id + cmd << "--dangerfile=#{dangerfile}" if dangerfile + cmd << "--fail-on-errors=true" if params[:fail_on_errors] + cmd << "--fail-if-no-pr=true" if params[:fail_if_no_pr] + cmd << "--new-comment" if params[:new_comment] + cmd << "--remove-previous-comments" if params[:remove_previous_comments] + cmd << "--base=#{base}" if base + cmd << "--head=#{head}" if head + cmd << "pr #{pr}" if pr + + ENV['DANGER_GITHUB_API_TOKEN'] = params[:github_api_token] if params[:github_api_token] + ENV['DANGER_GITHUB_HOST'] = params[:github_enterprise_host] if params[:github_enterprise_host] + ENV['DANGER_GITHUB_API_BASE_URL'] = params[:github_enterprise_api_base_url] if params[:github_enterprise_api_base_url] + + Actions.sh(cmd.join(' ')) + end + + def self.description + "Runs `danger` for the project" + end + + def self.details + [ + "Formalize your Pull Request etiquette.", + "More information: [https://github.com/danger/danger](https://github.com/danger/danger)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :use_bundle_exec, + env_name: "FL_DANGER_USE_BUNDLE_EXEC", + description: "Use bundle exec when there is a Gemfile presented", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_DANGER_VERBOSE", + description: "Show more debugging information", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :danger_id, + env_name: "FL_DANGER_ID", + description: "The identifier of this Danger instance", + optional: true), + FastlaneCore::ConfigItem.new(key: :dangerfile, + env_name: "FL_DANGER_DANGERFILE", + description: "The location of your Dangerfile", + optional: true), + FastlaneCore::ConfigItem.new(key: :github_api_token, + env_name: "FL_DANGER_GITHUB_API_TOKEN", + description: "GitHub API token for danger", + sensitive: true, + code_gen_sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :github_enterprise_host, + env_name: "FL_DANGER_GITHUB_ENTERPRISE_HOST", + description: "GitHub host URL for GitHub Enterprise", + sensitive: true, + code_gen_sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :github_enterprise_api_base_url, + env_name: "FL_DANGER_GITHUB_ENTERPRISE_API_BASE_URL", + description: "GitHub API base URL for GitHub Enterprise", + sensitive: true, + code_gen_sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :fail_on_errors, + env_name: "FL_DANGER_FAIL_ON_ERRORS", + description: "Should always fail the build process, defaults to false", + type: Boolean, + optional: true, + default_value: false), + FastlaneCore::ConfigItem.new(key: :new_comment, + env_name: "FL_DANGER_NEW_COMMENT", + description: "Makes Danger post a new comment instead of editing its previous one", + type: Boolean, + optional: true, + default_value: false), + FastlaneCore::ConfigItem.new(key: :remove_previous_comments, + env_name: "FL_DANGER_REMOVE_PREVIOUS_COMMENT", + description: "Makes Danger remove all previous comment and create a new one in the end of the list", + type: Boolean, + optional: true, + default_value: false), + FastlaneCore::ConfigItem.new(key: :base, + env_name: "FL_DANGER_BASE", + description: "A branch/tag/commit to use as the base of the diff. [master|dev|stable]", + optional: true), + FastlaneCore::ConfigItem.new(key: :head, + env_name: "FL_DANGER_HEAD", + description: "A branch/tag/commit to use as the head. [master|dev|stable]", + optional: true), + FastlaneCore::ConfigItem.new(key: :pr, + env_name: "FL_DANGER_PR", + description: "Run danger on a specific pull request. e.g. \"https://github.com/danger/danger/pull/518\"", + optional: true), + FastlaneCore::ConfigItem.new(key: :fail_if_no_pr, + env_name: "FL_DANGER_FAIL_IF_NO_PR", + description: "Fail Danger execution if no PR is found", + type: Boolean, + default_value: false) + ] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'danger', + 'danger( + danger_id: "unit-tests", + dangerfile: "tests/MyOtherDangerFile", + github_api_token: ENV["GITHUB_API_TOKEN"], + verbose: true + )' + ] + end + + def self.category + :misc + end + + def self.authors + ["KrauseFx"] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/debug.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/debug.rb new file mode 100644 index 0000000..41a19f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/debug.rb @@ -0,0 +1,32 @@ +module Fastlane + module Actions + class DebugAction < Action + def self.run(params) + puts("Lane Context".green) + puts(Actions.lane_context) + end + + def self.description + "Print out an overview of the lane context values" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'debug' + ] + end + + def self.category + :misc + end + + def self.author + "KrauseFx" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/default_platform.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/default_platform.rb new file mode 100644 index 0000000..9e5fc76 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/default_platform.rb @@ -0,0 +1,47 @@ +module Fastlane + module Actions + module SharedValues + DEFAULT_PLATFORM = :DEFAULT_PLATFORM + end + + class DefaultPlatformAction < Action + def self.run(params) + UI.user_error!("You forgot to pass the default platform") if params.first.nil? + + platform = params.first.to_sym + + SupportedPlatforms.verify!(platform) + + Actions.lane_context[SharedValues::DEFAULT_PLATFORM] = platform + end + + def self.description + "Defines a default platform to not have to specify the platform" + end + + def self.output + [ + ['DEFAULT_PLATFORM', 'The default platform'] + ] + end + + def self.example_code + [ + 'default_platform(:android)' + ] + end + + def self.category + :misc + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/delete_keychain.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/delete_keychain.rb new file mode 100644 index 0000000..252d0eb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/delete_keychain.rb @@ -0,0 +1,68 @@ +require 'shellwords' + +module Fastlane + module Actions + class DeleteKeychainAction < Action + def self.run(params) + original = Actions.lane_context[Actions::SharedValues::ORIGINAL_DEFAULT_KEYCHAIN] + + if params[:keychain_path] + if File.exist?(params[:keychain_path]) + keychain_path = params[:keychain_path] + else + UI.user_error!("Unable to find the specified keychain.") + end + elsif params[:name] + keychain_path = FastlaneCore::Helper.keychain_path(params[:name]) + else + UI.user_error!("You either have to set :name or :keychain_path") + end + + Fastlane::Actions.sh("security default-keychain -s #{original}", log: false) unless original.nil? + Fastlane::Actions.sh("security delete-keychain #{keychain_path.shellescape}", log: false) + end + + def self.details + "Keychains can be deleted after being created with `create_keychain`" + end + + def self.description + "Delete keychains and remove them from the search list" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :name, + env_name: "KEYCHAIN_NAME", + description: "Keychain name", + conflicting_options: [:keychain_path], + optional: true), + FastlaneCore::ConfigItem.new(key: :keychain_path, + env_name: "KEYCHAIN_PATH", + description: "Keychain path", + conflicting_options: [:name], + optional: true) + ] + end + + def self.example_code + [ + 'delete_keychain(name: "KeychainName")', + 'delete_keychain(keychain_path: "/keychains/project.keychain")' + ] + end + + def self.category + :misc + end + + def self.authors + ["gin0606", "koenpunt"] + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/deliver.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/deliver.rb new file mode 100644 index 0000000..1aa9552 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/deliver.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/upload_to_app_store' + class DeliverAction < UploadToAppStoreAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `upload_to_app_store` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/deploygate.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/deploygate.rb new file mode 100644 index 0000000..af57d59 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/deploygate.rb @@ -0,0 +1,209 @@ +module Fastlane + module Actions + module SharedValues + DEPLOYGATE_URL = :DEPLOYGATE_URL + DEPLOYGATE_REVISION = :DEPLOYGATE_REVISION # auto increment revision number + DEPLOYGATE_APP_INFO = :DEPLOYGATE_APP_INFO # contains app revision, bundle identifier, etc. + end + + class DeploygateAction < Action + DEPLOYGATE_URL_BASE = 'https://deploygate.com' + + def self.is_supported?(platform) + [:ios, :android].include?(platform) + end + + def self.upload_build(api_token, user_name, binary, options) + require 'faraday' + require 'faraday_middleware' + + connection = Faraday.new(url: DEPLOYGATE_URL_BASE, request: { timeout: 120 }) do |builder| + builder.request(:multipart) + builder.request(:json) + builder.response(:json, content_type: /\bjson$/) + builder.use(FaradayMiddleware::FollowRedirects) + builder.adapter(:net_http) + end + + options.update({ + token: api_token, + file: Faraday::UploadIO.new(binary, 'application/octet-stream'), + message: options[:message] || '' + }) + options[:disable_notify] = 'yes' if options[:disable_notify] + + connection.post("/api/users/#{user_name}/apps", options) + rescue Faraday::TimeoutError + UI.crash!("Timed out while uploading build. Check https://deploygate.com/ to see if the upload was completed.") + end + + def self.run(options) + # Available options: https://deploygate.com/docs/api + UI.success('Starting with app upload to DeployGate... this could take some time âŗ') + + api_token = options[:api_token] + user_name = options[:user] + binary = options[:ipa] || options[:apk] + upload_options = options.values.select do |key, _| + [:message, :distribution_key, :release_note, :disable_notify, :distribution_name].include?(key) + end + + UI.user_error!('missing `ipa` and `apk`. deploygate action needs least one.') unless binary + + return binary if Helper.test? + + response = self.upload_build(api_token, user_name, binary, upload_options) + if parse_response(response) + UI.message("DeployGate URL: #{Actions.lane_context[SharedValues::DEPLOYGATE_URL]}") + UI.success("Build successfully uploaded to DeployGate as revision \##{Actions.lane_context[SharedValues::DEPLOYGATE_REVISION]}!") + else + UI.user_error!("Error when trying to upload app to DeployGate") + end + end + + def self.parse_response(response) + if response.body && response.body.key?('error') + + if response.body['error'] + UI.error("Error uploading to DeployGate: #{response.body['message']}") + help_message(response) + return + else + res = response.body['results'] + url = DEPLOYGATE_URL_BASE + res['path'] + + Actions.lane_context[SharedValues::DEPLOYGATE_URL] = url + Actions.lane_context[SharedValues::DEPLOYGATE_REVISION] = res['revision'] + Actions.lane_context[SharedValues::DEPLOYGATE_APP_INFO] = res + end + else + UI.error("Error uploading to DeployGate: #{response.body}") + return + end + true + end + private_class_method :parse_response + + def self.help_message(response) + message = + case response.body['message'] + when 'you are not authenticated' + 'Invalid API Token specified.' + when 'application create error: permit' + 'Access denied: May be trying to upload to wrong user or updating app you join as a tester?' + when 'application create error: limit' + 'Plan limit: You have reached to the limit of current plan or your plan was expired.' + end + UI.error(message) if message + end + private_class_method :help_message + + def self.description + "Upload a new build to [DeployGate](https://deploygate.com/)" + end + + def self.details + [ + "You can retrieve your username and API token on [your settings page](https://deploygate.com/settings).", + "More information about the available options can be found in the [DeployGate Push API document](https://deploygate.com/docs/api)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "DEPLOYGATE_API_TOKEN", + description: "Deploygate API Token", + sensitive: true, + verify_block: proc do |value| + UI.user_error!("No API Token for DeployGate given, pass using `api_token: 'token'`") unless value.to_s.length > 0 + end), + FastlaneCore::ConfigItem.new(key: :user, + env_name: "DEPLOYGATE_USER", + description: "Target username or organization name", + verify_block: proc do |value| + UI.user_error!("No User for DeployGate given, pass using `user: 'user'`") unless value.to_s.length > 0 + end), + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "DEPLOYGATE_IPA_PATH", + description: "Path to your IPA file. Optional if you use the _gym_ or _xcodebuild_ action", + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find ipa file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :apk, + env_name: "DEPLOYGATE_APK_PATH", + description: "Path to your APK file", + default_value: Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH], + default_value_dynamic: true, + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find apk file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :message, + env_name: "DEPLOYGATE_MESSAGE", + description: "Release Notes", + default_value: "No changelog provided"), + FastlaneCore::ConfigItem.new(key: :distribution_key, + optional: true, + env_name: "DEPLOYGATE_DISTRIBUTION_KEY", + sensitive: true, + description: "Target Distribution Key"), + FastlaneCore::ConfigItem.new(key: :release_note, + optional: true, + env_name: "DEPLOYGATE_RELEASE_NOTE", + description: "Release note for distribution page"), + FastlaneCore::ConfigItem.new(key: :disable_notify, + optional: true, + type: Boolean, + default_value: false, + env_name: "DEPLOYGATE_DISABLE_NOTIFY", + description: "Disables Push notification emails"), + FastlaneCore::ConfigItem.new(key: :distribution_name, + optional: true, + env_name: "DEPLOYGATE_DISTRIBUTION_NAME", + description: "Target Distribution Name") + ] + end + + def self.output + [ + ['DEPLOYGATE_URL', 'URL of the newly uploaded build'], + ['DEPLOYGATE_REVISION', 'auto incremented revision number'], + ['DEPLOYGATE_APP_INFO', 'Contains app revision, bundle identifier, etc.'] + ] + end + + def self.example_code + [ + 'deploygate( + api_token: "...", + user: "target username or organization name", + ipa: "./ipa_file.ipa", + message: "Build #{lane_context[SharedValues::BUILD_NUMBER]}", + distribution_key: "(Optional) Target Distribution Key", + distribution_name: "(Optional) Target Distribution Name" + )', + 'deploygate( + api_token: "...", + user: "target username or organization name", + apk: "./apk_file.apk", + message: "Build #{lane_context[SharedValues::BUILD_NUMBER]}", + distribution_key: "(Optional) Target Distribution Key", + distribution_name: "(Optional) Target Distribution Name" + )' + ] + end + + def self.category + :beta + end + + def self.authors + ["tnj", "tomorrowkey"] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/device_grid/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/device_grid/README.md new file mode 100644 index 0000000..2e5a125 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/device_grid/README.md @@ -0,0 +1,157 @@ +# fastlane danger Device Grid + +Ever dream of testing your app straight from a pull request? Well now you can! With [_fastlane_](https://fastlane.tools), [danger](https://github.com/danger/danger) and [appetize.io](https://appetize.io/), you can stream your latest changes right from the browser. + +No more manually installing and testing your app just to review a PR. + +![assets/GridExampleScreenshot.png](assets/GridExampleScreenshot.png) + +[View Example Pull Request](https://github.com/Themoji/ios/pull/12#issuecomment-215836315) + +## Requirements + +- [_fastlane_](https://fastlane.tools) +- [danger](https://github.com/danger/danger) +- [appetize.io](https://appetize.io/) account +- A Continuous Integration system + +## Getting started + +### Install fastlane and danger + +Create a `Gemfile` in your project's directory with the following content + +```ruby +gem "fastlane" +gem "danger" +gem "danger-device_grid" +``` + +and run + +``` +bundle install +``` + +### Setup _fastlane_ + +Skip this step if you're already using _fastlane_ (which you should) + +``` +fastlane init +``` + +### Setup `danger` + +``` +danger init +``` + +Follow the `danger` guide to authenticate with GitHub + +### Configure `danger` + +Edit `Dangerfile` and replace the content with + +```ruby +puts("Running fastlane to generate and upload an ipa file...") + +options = { + xcodebuild: { + workspace: "YourApp.xcworkspace", + scheme: "YourScheme" + } +} + +require 'fastlane' +result = Fastlane::OneOff.run(action: "build_and_upload_to_appetize", + parameters: options) + +device_grid.run( + public_key: result, + languages: ["en", "de"], + devices: ["iphone5s", "iphone6splus", "ipadair"] +) +``` + +Make sure to fill in your actual workspace and scheme, or use the `project` parameter `project: "YourApp.xcworkspace"`. + +### Try it + +Push everything to GitHub in its own branch and create a PR to trigger your CI system. + +### Make use of deep linking + +When you submit a PR you usually know what part of your app should be reviewed. Make it easier for everyone by providing a deep link, launching the app at the right point. To do so, use emojis (what else): + +Add this to the bottom of your PR-body: + +``` +:link: com.krausefx.app://bacons/show/937 +``` + +### Make use of `NSUserDefaults` + +To do a runtime check if if the app is running on `Appetize`, just use: + +```objective-c +[[NSUserDefaults standardUserDefaults] objectForKey:@"isAppetize"] +``` + +### Generate `appetize` stream, without the grid + +Add the following to your `Fastfile` to build and upload your app to `appetize`. + +```ruby +desc "Build your app and upload it to Appetize to stream it in your browser" +lane :upload_to_appetize do + build_and_upload_to_appetize( + xcodebuild: { + workspace: "YourApp.xcworkspace", + scheme: "YourScheme" + } + ) +end +``` + +Run the newly created lane using + +``` +fastlane upload_to_appetize +``` + +### Manual way using `appetize_viewing_url_generator` + +If you want even more control over the way your app is built, you can also manually generate your `.app` and then upload it to `appetize`. + +Use the `appetize` action together with `appetize_viewing_url_generator`. Make sure to build with the `iphonesimulator` SDK, since `appetize` runs iOS simulators to stream your application. + +```ruby +tmp_path = "/tmp/fastlane_build" +xcodebuild( + workspace: "Themoji.xcworkspace", + sdk: "iphonesimulator", + scheme: "Themoji", + derivedDataPath: tmp_path +) + +app_path = Dir[File.join(tmp_path, "**", "*.app")].last +UI.user_error!("Couldn't find app") unless app_path + +zipped_bundle = zip(path: app_path, output_path: File.join(tmp_path, "Result.zip")) + +appetize( + path: zipped_bundle, + api_token: 'yourapitoken' # get it from https://appetize.io/docs#request-api-token +) + +url = appetize_viewing_url_generator(scale: "75", color: "black", public_key: "123123") +UI.message("Generated URL: #{url}") +``` + +#### Disclaimer + +All product names, logos, and brands are property of their respective owners. +The used device frames were provided by Facebook via the [Facebook Design Resources](https://facebook.github.io/design/devices.html). _fastlane_ is in no way affiliated with Facebook. + +> While Facebook has redrawn and shares these assets for the benefit of the design community, Facebook does not own any of the underlying product or user interface designs. By accessing these assets, you agree to obtain all necessary permissions from the underlying rights holders and/or adhere to any applicable brand use guidelines before using them. Facebook disclaims all express or implied warranties with respect to these assets, including non-infringement of intellectual property rights. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/build_app.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/build_app.md new file mode 100644 index 0000000..1f49b56 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/build_app.md @@ -0,0 +1,245 @@ +

+ +

+ +------- + +

+ Features • + Usage • + Tips +

+ +------- + +
gym is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ +# What's gym? + +_gym_ builds and packages iOS apps for you. It takes care of all the heavy lifting and makes it super easy to generate a signed `ipa` or `app` file đŸ’Ē + +_gym_ is a replacement for [shenzhen](https://github.com/nomad/shenzhen). + +### Before _gym_ + +```no-highlight +xcodebuild clean archive -archivePath build/MyApp \ + -scheme MyApp +xcodebuild -exportArchive \ + -exportFormat ipa \ + -archivePath "build/MyApp.xcarchive" \ + -exportPath "build/MyApp.ipa" \ + -exportProvisioningProfile "ProvisioningProfileName" +``` + +### With _gym_ + +```no-highlight +fastlane gym +``` + +### Why _gym_? + +_gym_ uses the latest APIs to build and sign your application which results in much faster build times. + +| | Gym Features | +|----------|----------------| +🚀 | _gym_ builds 30% faster than other build tools like [shenzhen](https://github.com/nomad/shenzhen) +🏁 | Beautiful inline build output +📖 | Helps you resolve common build errors like code signing issues +🚠 | Sensible defaults: Automatically detect the project, its schemes and more +🔗 | Works perfectly with [_fastlane_](https://fastlane.tools) and other tools +đŸ“Ļ | Automatically generates an `ipa` and a compressed `dSYM` file +🚅 | Don't remember any complicated build commands, just _gym_ +🔧 | Easy and dynamic configuration using parameters and environment variables +💾 | Store common build settings in a `Gymfile` +📤 | All archives are stored and accessible in the Xcode Organizer +đŸ’ģ | Supports both iOS and Mac applications + +![/img/actions/gymScreenshot.png](/img/actions/gymScreenshot.png) + +----- + +![/img/actions/gym.gif](/img/actions/gym.gif) + +# Usage + +```no-highlight +fastlane gym +``` + +That's all you need to build your application. If you want more control, here are some available parameters: + +```no-highlight +fastlane gym --workspace "Example.xcworkspace" --scheme "AppName" --clean +``` + +If you need to use a different Xcode installation, use `[xcodes](https://docs.fastlane.tools/actions/xcodes)` or define `DEVELOPER_DIR`: + +```no-highlight +DEVELOPER_DIR="/Applications/Xcode6.2.app" fastlane gym +``` + +For a list of all available parameters use + +```no-highlight +fastlane action gym +``` + +If you run into any issues, use the `verbose` mode to get more information + +```no-highlight +fastlane gym --verbose +``` + +Set the right export method if you're not uploading to App Store or TestFlight: + +```no-highlight +fastlane gym --export_method ad-hoc +``` + +To pass boolean parameters make sure to use _gym_ like this: + +```no-highlight +fastlane gym --include_bitcode true --include_symbols false +``` + +To access the raw `xcodebuild` output open `~/Library/Logs/gym` + +# Gymfile + +Since you might want to manually trigger a new build but don't want to specify all the parameters every time, you can store your defaults in a so called `Gymfile`. + +Run `fastlane gym init` to create a new configuration file. Example: + +```ruby-skip-tests +scheme("Example") + +sdk("iphoneos9.0") + +clean(true) + +output_directory("./build") # store the ipa in this folder +output_name("MyApp") # the name of the ipa file +``` + +## Export options + +Since Xcode 7, _gym_ is using new Xcode API which allows us to specify export options using `plist` file. By default _gym_ creates this file for you and you are able to modify some parameters by using `export_method`, `export_team_id`, `include_symbols` or `include_bitcode`. If you want to have more options, like creating manifest file for app thinning, you can provide your own `plist` file: + +```ruby-skip-tests +export_options("./ExportOptions.plist") +``` + +or you can provide hash of values directly in the `Gymfile`: + +```ruby-skip-tests +export_options({ + method: "ad-hoc", + manifest: { + appURL: "https://example.com/My App.ipa", + }, + thinning: "" +}) +``` + +Optional: If _gym_ can't automatically detect the provisioning profiles to use, you can pass a mapping of bundle identifiers to provisioning profiles: + +```ruby +build_app( + scheme: "Release", + export_method: "app-store", + export_options: { + provisioningProfiles: { + "com.example.bundleid" => "Provisioning Profile Name", + "com.example.bundleid2" => "Provisioning Profile Name 2" + } + } +) +``` + +**Note**: If you use [_fastlane_](https://fastlane.tools) with [_match_](https://fastlane.tools/match) you don't need to provide those values manually, unless you pass a plist file into `export_options` + +For the list of available options run `xcodebuild -help`. + +## Setup code signing + +- [More information on how to get started with codesigning](https://docs.fastlane.tools/codesigning/getting-started/) +- [Docs on how to set up your Xcode project](https://docs.fastlane.tools/codesigning/xcode-project/) + +## Automating the whole process + +_gym_ works great together with [_fastlane_](https://fastlane.tools), which connects all deployment tools into one streamlined workflow. + +Using _fastlane_ you can define a configuration like + +```ruby +lane :beta do + scan + gym(scheme: "MyApp") + crashlytics +end + +# error block is executed when a error occurs +error do |lane, exception| + slack( + # message with short human friendly message + message: exception.to_s, + success: false, + # Output containing extended log output + payload: { "Output" => exception.error_info.to_s } + ) +end +``` + +When _gym_ raises an error the `error_info` property will contain the process output +in case you want to display the error in 3rd party tools such as Slack. + +You can then easily switch between the beta provider (e.g. `testflight`, `hockey`, `s3` and more). + +# How does it work? + +_gym_ uses the latest APIs to build and sign your application. The 2 main components are + +- `xcodebuild` +- [xcpretty](https://github.com/supermarin/xcpretty) + +When you run _gym_ without the `--silent` mode it will print out every command it executes. + +To build the archive _gym_ uses the following command: + +```no-highlight +set -o pipefail && \ +xcodebuild -scheme 'Example' \ +-project './Example.xcodeproj' \ +-configuration 'Release' \ +-destination 'generic/platform=iOS' \ +-archivePath '/Users/felixkrause/Library/Developer/Xcode/Archives/2015-08-11/ExampleProductName 2015-08-11 18.15.30.xcarchive' \ +archive | xcpretty +``` + +After building the archive it is being checked by _gym_. If it's valid, it gets packaged up and signed into an `ipa` file. + +_gym_ automatically chooses a different packaging method depending on the version of Xcode you're using. + +### Xcode 7 and above + +```no-highlight +/usr/bin/xcrun path/to/xcbuild-safe.sh -exportArchive \ +-exportOptionsPlist '/tmp/gym_config_1442852529.plist' \ +-archivePath '/Users/fkrause/Library/Developer/Xcode/Archives/2015-09-21/App 2015-09-21 09.21.56.xcarchive' \ +-exportPath '/tmp/1442852529' +``` + +_gym_ makes use of the new Xcode 7 API which allows us to specify the export options using a `plist` file. You can find more information about the available options by running `xcodebuild --help`. + +Using this method there are no workarounds for WatchKit or Swift required, as it uses the same technique Xcode uses when exporting your binary. + +Note: the [xcbuild-safe.sh script](https://github.com/fastlane/fastlane/blob/master/gym/lib/assets/wrap_xcodebuild/xcbuild-safe.sh) wraps around xcodebuild to workaround some incompatibilities. + +## Use 'ProvisionQL' for advanced Quick Look in Finder + +Install [ProvisionQL](https://github.com/ealeksandrov/ProvisionQL). + +It will show you `ipa` files like this: +![img/actions/QuickLookScreenshot-App.png](/img/actions/QuickLookScreenshot-App.png) diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/capture_android_screenshots.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/capture_android_screenshots.md new file mode 100644 index 0000000..189cb70 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/capture_android_screenshots.md @@ -0,0 +1,349 @@ +

+ +

+ +###### Automated localized screenshots of your Android app on every device + +_screengrab_ generates localized screenshots of your Android app for different device types and languages for Google Play and can be uploaded using [_supply_](https://fastlane.tools/supply). + + + +### Why should I automate this process? + +- Create hundreds of screenshots in multiple languages on emulators or real devices, saving you hours +- Easily verify that localizations fit into labels on all screen dimensions to find UI mistakes before you ship +- You only need to configure it once for anyone on your team to run it +- Keep your screenshots perfectly up-to-date with every app update. Your customers deserve it! +- Fully integrates with [_fastlane_](https://fastlane.tools) and [_supply_](https://fastlane.tools/supply) + +# Installation + +Install the gem + +```no-highlight +gem install fastlane +``` + +##### Gradle dependency + +```java +androidTestImplementation 'tools.fastlane:screengrab:x.x.x' +``` + +The latest version is [ ![Download](https://maven-badges.herokuapp.com/maven-central/tools.fastlane/screengrab/badge.svg)](https://search.maven.org/artifact/tools.fastlane/screengrab) + +As of Screengrab version 2.0.0, all Android test dependencies are AndroidX dependencies. This means a device with API 18+, Android 4.3 or greater is required. If you wish to capture screenshots with an older Android OS, then you must use a 1.x.x version. + +##### Configuring your Manifest Permissions + +Ensure that the following permissions exist in your **src/debug/AndroidManifest.xml** + +```xml + + + + + + + + + + + + +``` + +##### Configuring your UI Tests for Screenshots + +1. Add `LocaleTestRule` to your tests class to handle automatic switching of locales. + + If you're using Java use: + + ```java + @ClassRule + public static final LocaleTestRule localeTestRule = new LocaleTestRule(); + ``` + + If you're using Kotlin use: + + ```kotlin + @Rule @JvmField + val localeTestRule = LocaleTestRule() + ``` + + The `@JvmField` annotation is important. It won't work like this: + + ```kotlin + companion object { + @get:ClassRule + val localeTestRule = LocaleTestRule() + } + ``` + +2. To capture screenshots, add the following to your tests `Screengrab.screenshot("name_of_screenshot_here");` on the appropriate screens + +# Generating Screenshots with Screengrab +- Then, before running `fastlane screengrab` you'll need a debug and test apk + - You can create your APKs manually with `./gradlew assembleDebug assembleAndroidTest` + - You can also create a lane and use `build_android_app`: + + ```ruby + desc "Build debug and test APK for screenshots" + lane :build_and_screengrab do + build_android_app( + task: 'assemble', + build_type: 'Debug' + ) + build_android_app( + task: 'assemble', + build_type: 'AndroidTest' + ) + screengrab() + end + ``` +- Once complete run `fastlane screengrab` in your app project directory to generate screenshots + - You will be prompted to provide any required parameters which are not in your **Screengrabfile** or provided as command line arguments +- Your screenshots will be saved to `fastlane/metadata/android` in the directory where you ran _screengrab_ + +## Improved screenshot capture with UI Automator + +As of _screengrab_ 0.5.0, you can specify different strategies to control the way _screengrab_ captures screenshots. The newer strategy delegates to [UI Automator](https://developer.android.com/topic/libraries/testing-support-library/index.html#UIAutomator) which fixes a number of problems compared to the original strategy: + +* Shadows/elevation are correctly captured for Material UI +* Multi-window situations are correctly captured (dialogs, etc.) +* Works on Android N + +UI Automator is the default strategy. However, UI Automator requires a device with **API level >= 18**. If you need to grab screenshots on an older Android version, use the latest 1.x.x version of this library and set the DecorView ScreenshotStrategy. + +```java +Screengrab.setDefaultScreenshotStrategy(new DecorViewScreenshotStrategy()); +``` + +## Improved screenshot capture with Falcon + +As of _screengrab_ 1.2.0, you can specify a new strategy to delegate to [Falcon](https://github.com/jraska/Falcon). Falcon may work better than UI Automator in some situations and also provides similar benefits as UI Automator: + +* Multi-window situations are correctly captured (dialogs, etc.) +* Works on Android N + +Falcon requires a device with **API level >= 10**. To enable it for all screenshots by default, make the following call before your tests run: + +```java +Screengrab.setDefaultScreenshotStrategy(new FalconScreenshotStrategy(activityRule.getActivity())); +``` + +## Advanced Screengrabfile Configuration + +Running `fastlane screengrab init` generated a Screengrabfile which can store all of your configuration options. Since most values will not change often for your project, it is recommended to store them there. + +The `Screengrabfile` is written in Ruby, so you may find it helpful to use an editor that highlights Ruby syntax to modify this file. + +```ruby-skip-tests +# remove the leading '#' to uncomment lines + +# app_package_name('your.app.package') +# use_tests_in_packages(['your.screenshot.tests.package']) + +# app_apk_path('path/to/your/app.apk') +# tests_apk_path('path/to/your/tests.apk') + +locales(['en-US', 'fr-FR', 'it-IT']) + +# clear all previously generated screenshots in your local output directory before creating new ones +clear_previous_screenshots(true) +``` + +For more information about all available options run + +```no-highlight +fastlane action screengrab +``` + +# Tips + +## UI Tests + +Check out [Testing UI for a Single App](http://developer.android.com/training/testing/ui-testing/espresso-testing.html) for an introduction to using Espresso for UI testing. + +##### Example UI Test Class (Using JUnit4) + +Java: + +```java +@RunWith(JUnit4.class) +public class JUnit4StyleTests { + @ClassRule + public static final LocaleTestRule localeTestRule = new LocaleTestRule(); + + @Rule + public ActivityScenarioRule activityRule = new ActivityScenarioRule<>(MainActivity.class); + + @Test + public void testTakeScreenshot() { + Screengrab.screenshot("before_button_click"); + + onView(withId(R.id.fab)).perform(click()); + + Screengrab.screenshot("after_button_click"); + } +} +``` + +Kotlin: + +```kotlin +@RunWith(JUnit4.class) +class JUnit4StyleTests { + @get:Rule + var activityRule = ActivityScenarioRule(MainActivity::class.java) + + @Rule @JvmField + val localeTestRule = LocaleTestRule() + + @Test + fun testTakeScreenshot() { + Screengrab.screenshot("before_button_click") + + onView(withId(R.id.fab)).perform(click()) + + Screengrab.screenshot("after_button_click") + } +} +``` + +There is an [example project](https://github.com/fastlane/fastlane/tree/master/screengrab/example/src/androidTest/java/tools/fastlane/localetester) showing how to use JUnit 3 or 4 and Espresso with the screengrab Java library to capture screenshots during a UI test run. + +Using JUnit 4 is preferable because of its ability to perform actions before and after the entire test class is run. This means you will change the device's locale far fewer times when compared with JUnit 3 running those commands before and after each test method. + +When using JUnit 3 you'll need to add a bit more code: + +- Use `LocaleUtil.changeDeviceLocaleTo(LocaleUtil.getTestLocale());` in `setUp()` +- Use `LocaleUtil.changeDeviceLocaleTo(LocaleUtil.getEndingLocale());` in `tearDown()` +- Use `Screengrab.screenshot("name_of_screenshot_here");` to capture screenshots at the appropriate points in your tests + +## Clean Status Bar + +Screengrab can clean your status bar to make your screenshots even more beautiful. +It is simply a wrapper that allows configuring SystemUI DemoMode in your code. +Note: the clean status bar feature is only supported on devices with *API level >= 23*. + +You can enable and disable the clean status bar at any moment during your tests. +In most cases you probably want to do this in the @BeforeClass and @AfterClass methods. + +```java +@BeforeClass +public static void beforeAll() { + CleanStatusBar.enableWithDefaults(); +} + +@AfterClass +public static void afterAll() { + CleanStatusBar.disable(); +} +``` + +Have a look at the methods of the `CleanStatusBar` class to customize the status bar even more. +You could for example show the Bluetooth icon and the LTE text. + +```java +new CleanStatusBar() + .setBluetoothState(BluetoothState.DISCONNECTED) + .setMobileNetworkDataType(MobileDataType.LTE) + .enable(); +``` + +# Advanced _screengrab_ + +
+Launch Arguments + +You can provide additional arguments to your test cases on launch. These strings will be available in your tests through `InstrumentationRegistry.getArguments()`. + +```ruby +screengrab( + launch_arguments: [ + "username hjanuschka", + "build_number 201" + ] +) +``` + +```java +Bundle extras = InstrumentationRegistry.getArguments(); +String peerID = null; +if (extras != null) { + if (extras.containsKey("username")) { + username = extras.getString("username"); + System.out.println("Username: " + username); + } else { + System.out.println("No username in extras"); + } +} else { + System.out.println("No extras"); +} +``` +
+ +
+Detecting screengrab at runtime + +For some apps, it is helpful to know when _screengrab_ is running so that you can display specific data for your screenshots. For iOS fastlane users, this is much like "FASTLANE_SNAPSHOT". In order to do this, you'll need to have at least two product flavors of your app. + +Add two product flavors to the app-level build.gradle file: + +``` +android { +... + flavorDimensions "mode" + productFlavors { + screengrab { + dimension "mode" + } + regular { + dimension "mode" + } + } +... +} +``` + +Check for the existence of that flavor (i.e screengrab) in your app code as follows in order to use mock data or customize data for screenshots: + +``` +if (BuildConfig.FLAVOR == "screengrab") { + System.out.println("screengrab is running!"); +} +``` + +When running _screengrab_, do the following to build the flavor you want as well as the test apk. Note that you use "assembleFlavor_name" where Flavor_name is the flavor name, capitalized (i.e. Screengrab). + +``` +./gradlew assembleScreengrab assembleAndroidTest +``` + +Run _screengrab_: + +``` +fastlane screengrab +``` + +_screengrab_ will ask you to select the debug and test apps (which you can then add to your Screengrabfile to skip this step later). + +The debug apk should be somewhere like this: + +`app/build/outputs/apk/screengrab/debug/app-screengrab-debug.apk` + +The test apk should be somewhere like this: + +`app/build/outputs/apk/androidTest/screengrab/debug/app-screengrab-debug-androidTest.apk` + +Sit back and enjoy your new screenshots! + +Note: while this could also be done by creating a new build variant (i.e. debug, release and creating a new one called screengrab), [Android only allows one build type to be tested](http://tools.android.com/tech-docs/new-build-system/user-guide#TOC-Testing) which defaults to debug. That's why we use product flavors. + +
diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/capture_ios_screenshots.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/capture_ios_screenshots.md new file mode 100644 index 0000000..800294b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/capture_ios_screenshots.md @@ -0,0 +1,375 @@ +

+ +

+ +###### Automate taking localized screenshots of your iOS, tvOS, and watchOS apps on every device + +
+

+ Check out the new fastlane documentation on how to generate screenshots +

+
+ +_snapshot_ generates localized iOS, tvOS, and watchOS screenshots for different device types and languages for the App Store and can be uploaded using ([_deliver_](https://docs.fastlane.tools/actions/deliver/)). + +You have to manually create 20 (languages) x 6 (devices) x 5 (screenshots) = **600 screenshots**. + +It's hard to get everything right! + +- New screenshots with every (design) update +- No loading indicators +- Same content / screens +- [Clean Status Bar](#use-a-clean-status-bar) +- Uploading screenshots ([_deliver_](https://docs.fastlane.tools/actions/deliver/) is your friend) + +More information about [creating perfect screenshots](https://krausefx.com/blog/creating-perfect-app-store-screenshots-of-your-ios-app). + +_snapshot_ runs completely in the background - you can do something else, while your computer takes the screenshots for you. + +------- + +

+ Features • + UI Tests • + Quick Start • + Usage • + Tips • + How? +

+ +------- + +# Features +- Create hundreds of screenshots in multiple languages on all simulators +- Take screenshots in multiple device simulators concurrently to cut down execution time (Xcode 9 only) +- Configure it once, store the configuration in git +- Do something else, while the computer takes the screenshots for you +- Integrates with [_fastlane_](https://fastlane.tools) and [_deliver_](https://docs.fastlane.tools/actions/deliver/) +- Generates a beautiful web page, which shows all screenshots on all devices. This is perfect to send to QA or the marketing team +- _snapshot_ automatically waits for network requests to be finished before taking a screenshot (we don't want loading images in the App Store screenshots) + +After _snapshot_ successfully created new screenshots, it will generate a beautiful HTML file to get a quick overview of all screens: + +![img/actions/htmlPagePreviewFade.jpg](/img/actions/htmlPagePreviewFade.jpg) + +## Why? + +This tool automatically switches the language and device type and runs UI Tests for every combination. + +### Why should I automate this process? + +- It takes **hours** to take screenshots +- You get a great overview of all your screens, running on all available simulators without the need to manually start it hundreds of times +- Easy verification for translators (without an iDevice) that translations do make sense in real App context +- Easy verification that localizations fit into labels on all screen dimensions +- It is an integration test: You can test for UI elements and other things inside your scripts +- Be so nice, and provide new screenshots with every App Store update. Your customers deserve it +- You realize, there is a spelling mistake in one of the screens? Well, just correct it and re-run the script + +# UI Tests + +## Getting started +This project uses Apple's newly announced UI Tests. We will not go into detail on how to write scripts. + +Here a few links to get started: + +- [WWDC 2015 Introduction to UI Tests](https://developer.apple.com/videos/play/wwdc2015-406/) +- [A first look into UI Tests](http://www.mokacoding.com/blog/xcode-7-ui-testing/) +- [UI Testing in Xcode 7](http://masilotti.com/ui-testing-xcode-7/) +- [HSTestingBackchannel : ‘Cheat’ by communicating directly with your app](https://github.com/ConfusedVorlon/HSTestingBackchannel) +- [Automating App Store screenshots using fastlane snapshot and frameit](https://tisunov.github.io/2015/11/06/automating-app-store-screenshots-generation-with-fastlane-snapshot-and-sketch.html) + +# Quick Start + +- Create a new UI Test target in your Xcode project ([top part of this article](https://krausefx.com/blog/run-xcode-7-ui-tests-from-the-command-line)) +- Run `fastlane snapshot init` in your project folder +- Add the ./SnapshotHelper.swift to your UI Test target (You can move the file anywhere you want) + - (Xcode 8 only) add the ./SnapshotHelperXcode8.swift to your UI Test target +- (Objective C only) add the bridging header to your test class: + - `#import "MYUITests-Swift.h"` + (The bridging header is named after your test target with `-Swift.h` appended.) +- In your UI Test class, click the `Record` button on the bottom left and record your interaction +- To take a snapshot, call the following between interactions + - Swift: `snapshot("01LoginScreen")` + - Objective C: `[Snapshot snapshot:@"01LoginScreen" timeWaitingForIdle:10];` +- Add the following code to your `setUp()` method: + +**Swift:** + +```swift +let app = XCUIApplication() +setupSnapshot(app) +app.launch() +``` + +**Objective C:** + +```objective-c +XCUIApplication *app = [[XCUIApplication alloc] init]; +[Snapshot setupSnapshot:app waitForAnimations:NO]; +[app launch]; +``` + +_Make sure you only have one `launch` call in your test class, as Xcode adds one automatically on new test files._ + +![img/actions/snapshot.gif](/img/actions/snapshot.gif) + +You can try the _snapshot_ [example project](https://github.com/fastlane/fastlane/tree/master/snapshot/example) by cloning this repo. + +To quick start your UI tests, you can use the UI Test recorder. You only have to interact with the simulator, and Xcode will generate the UI Test code for you. You can find the red record button on the bottom of the screen (more information in [this blog post](https://krausefx.com/blog/run-xcode-7-ui-tests-from-the-command-line)) + +# Usage + +```no-highlight +fastlane snapshot +``` + +Your screenshots will be stored in the `./screenshots/` folder by default (or `./fastlane/screenshots` if you're using [_fastlane_](https://fastlane.tools)) + +New with Xcode 9, *snapshot* can run multiple simulators concurrently. This is the default behavior in order to take your screenshots as quickly as possible. This can be disabled to run each device, one at a time, by setting the `:concurrent_simulators` option to `false`. + +**Note:** While running *snapshot* with Xcode 9, the simulators will not be visibly spawned. So, while you won't see the simulators running your tests, they will, in fact, be taking your screenshots. + +If any error occurs while running the snapshot script on a device, that device will not have any screenshots, and _snapshot_ will continue with the next device or language. To stop the flow after the first error, run + +```no-highlight +fastlane snapshot --stop_after_first_error +``` + +Also by default, _snapshot_ will open the HTML after all is done. This can be skipped with the following command + + +```no-highlight +fastlane snapshot --stop_after_first_error --skip_open_summary +``` + +There are a lot of options available that define how to build your app, for example + +```no-highlight +fastlane snapshot --scheme "UITests" --configuration "Release" --sdk "iphonesimulator" +``` + +Reinstall the app before running _snapshot_ + +```no-highlight +fastlane snapshot --reinstall_app --app_identifier "tools.fastlane.app" +``` + +By default _snapshot_ automatically retries running UI Tests if they fail. This is due to randomly failing UI Tests (e.g. [#2517](https://github.com/fastlane/fastlane/issues/2517)). You can adapt this number using + +```no-highlight +fastlane snapshot --number_of_retries 3 +``` + +Add photos and/or videos to the simulator before running _snapshot_ + +```no-highlight +fastlane snapshot --add_photos MyTestApp/Assets/demo.jpg --add_videos MyTestApp/Assets/demo.mp4 +``` + +For a list for all available options run + +```no-highlight +fastlane action snapshot +``` + +After running _snapshot_ you will get a nice summary: + + + +## Snapfile + +All of the available options can also be stored in a configuration file called the `Snapfile`. Since most values will not change often for your project, it is recommended to store them there: + +First make sure to have a `Snapfile` (you get it for free when running `fastlane snapshot init`) + +The `Snapfile` can contain all the options that are also available on `fastlane action snapshot` + + +```ruby-skip-tests +scheme("UITests") + +devices([ + "iPad (7th generation)", + "iPad Air (3rd generation)", + "iPad Pro (11-inch)", + "iPad Pro (12.9-inch) (3rd generation)", + "iPad Pro (9.7-inch)", + "iPhone 11", + "iPhone 11 Pro", + "iPhone 11 Pro Max", + "iPhone 8", + "iPhone 8 Plus" +]) + +languages([ + "en-US", + "de-DE", + "es-ES", + ["pt", "pt_BR"] # Portuguese with Brazilian locale +]) + +launch_arguments(["-username Felix"]) + +# The directory in which the screenshots should be stored +output_directory('./screenshots') + +clear_previous_screenshots(true) + +override_status_bar(true) + +add_photos(["MyTestApp/Assets/demo.jpg"]) +``` + +### Completely reset all simulators + +You can run this command in the terminal to delete and re-create all iOS and tvOS simulators: + +```no-highlight +fastlane snapshot reset_simulators +``` + +**Warning**: This will delete **all** your simulators and replace by new ones! This is useful, if you run into weird problems when running _snapshot_. + +You can use the environment variable `SNAPSHOT_FORCE_DELETE` to stop asking for confirmation before deleting. + +```no-highlight +SNAPSHOT_FORCE_DELETE=1 fastlane snapshot reset_simulators +``` + +## Update snapshot helpers + +Some updates require the helper files to be updated. _snapshot_ will automatically warn you and tell you how to update. + +Basically you can run + +```no-highlight +fastlane snapshot update +``` + +to update your `SnapshotHelper.swift` files. In case you modified your `SnapshotHelper.swift` and want to manually update the file, check out [SnapshotHelper.swift](https://github.com/fastlane/fastlane/blob/master/snapshot/lib/assets/SnapshotHelper.swift). + +## Launch Arguments + +You can provide additional arguments to your app on launch. These strings will be available in your app (e.g. not in the testing target) through `ProcessInfo.processInfo.arguments`. Alternatively, use user-default syntax (`-key value`) and they will be available as key-value pairs in `UserDefaults.standard`. + +```ruby-skip-tests +launch_arguments([ + "-firstName Felix -lastName Krause" +]) +``` + +```swift +name.text = UserDefaults.standard.string(forKey: "firstName") +// name.text = "Felix" +``` + +_snapshot_ includes `-FASTLANE_SNAPSHOT YES`, which will set a temporary user default for the key `FASTLANE_SNAPSHOT`, you may use this to detect when the app is run by _snapshot_. + +```swift +if UserDefaults.standard.bool(forKey: "FASTLANE_SNAPSHOT") { + // runtime check that we are in snapshot mode +} +``` + +Specify multiple argument strings and _snapshot_ will generate screenshots for each combination of arguments, devices, and languages. This is useful for comparing the same screenshots with different feature flags, dynamic text sizes, and different data sets. + +```ruby-skip-tests +# Snapfile for A/B Test Comparison +launch_arguments([ + "-secretFeatureEnabled YES", + "-secretFeatureEnabled NO" +]) +``` + +## Xcode Environment Variables + +_snapshot_ includes `FASTLANE_SNAPSHOT=YES` and `FASTLANE_LANGUAGE=` as arguments when executing `xcodebuild`. This means you may use these environment variables in a custom build phase run script to do any additional configuration. + +# How does it work? + +The easiest solution would be to just render the UIWindow into a file. That's not possible because UI Tests don't run on a main thread. So _snapshot_ uses a different approach: + +When you run unit tests in Xcode, the reporter generates a plist file, documenting all events that occurred during the tests ([More Information](http://michele.io/test-logs-in-xcode)). Additionally, Xcode generates screenshots before, during and after each of these events. There is no way to manually trigger a screenshot event. The screenshots and the plist files are stored in the DerivedData directory, which _snapshot_ stores in a temporary folder. + +When the user calls `snapshot(...)` in the UI Tests (Swift or Objective C) the script actually does a rotation to `.Unknown` which doesn't have any effect on the actual app, but is enough to trigger a screenshot. It has no effect to the application and is not something you would do in your tests. The goal was to find *some* event that a user would never trigger, so that we know it's from _snapshot_. On tvOS, there is no orientation so we ask for a count of app views with type "Browser" (which should never exist on tvOS). + +_snapshot_ then iterates through all test events and check where we either did this weird rotation (on iOS) or searched for browsers (on tvOS). Once _snapshot_ has all events triggered by _snapshot_ it collects a ordered list of all the file names of the actual screenshots of the application. + +In the test output, the Swift _snapshot_ function will print out something like this + +> snapshot: [some random text here] + +_snapshot_ finds all these entries using a regex. The number of _snapshot_ outputs in the terminal and the number of _snapshot_ events in the plist file should be the same. Knowing that, _snapshot_ automatically matches these 2 lists to identify the name of each of these screenshots. They are then copied over to the output directory and separated by language and device. + +2 thing have to be passed on from _snapshot_ to the `xcodebuild` command line tool: + +- The device type is passed via the `destination` parameter of the `xcodebuild` parameter +- The language is passed via a temporary file which is written by _snapshot_ before running the tests and read by the UI Tests when launching the application + +If you find a better way to do any of this, please submit an issue on GitHub or even a pull request :+1: + +Radar [23062925](https://openradar.appspot.com/radar?id=5056366381105152) has been resolved with Xcode 8.3, so it's now possible to actually take screenshots from the simulator. We'll keep using the old approach for now, since many of you still want to use older versions of Xcode. + +# Tips + +
+

+ Check out the new fastlane documentation on how to generate screenshots +

+
+ +## Frame the screenshots + +If you want to add frames around the screenshots and even put a title on top, check out [_frameit_](https://docs.fastlane.tools/actions/frameit/). + +## Available language codes +```ruby +ALL_LANGUAGES = ["da", "de-DE", "el", "en-AU", "en-CA", "en-GB", "en-US", "es-ES", "es-MX", "fi", "fr-CA", "fr-FR", "id", "it", "ja", "ko", "ms", "nl-NL", "no", "pt-BR", "pt-PT", "ru", "sv", "th", "tr", "vi", "zh-Hans", "zh-Hant"] +``` + +To get more information about language and locale codes please read [Internationalization and Localization Guide](https://developer.apple.com/library/ios/documentation/MacOSX/Conceptual/BPInternational/LanguageandLocaleIDs/LanguageandLocaleIDs.html). + +## Use a clean status bar + +You can set `override_status_bar` to `true` to set the status bar to Tuesday January 9th at 9:41AM with full battery and reception. If you need more granular customization, to set a Carrier name for example, also set `override_status_bar_arguments` to the specific arguments to be passed to the `xcrun simctl status_bar override` command. Run `xcrun simctl status_bar --help` to see the options available. + +## Editing the `Snapfile` + +Change syntax highlighting to *Ruby*. + +### Simulator doesn't launch the application + +When the app dies directly after the application is launched there might be 2 problems + +- The simulator is somehow in a broken state and you need to re-create it. You can use `snapshot reset_simulators` to reset all simulators (this will remove all installed apps) +- A restart helps very often + +## Determine language + +To detect the currently used localization in your tests, access the `deviceLanguage` variable from `SnapshotHelper.swift`. + +## Speed up snapshots + +A lot of time in UI tests is spent waiting for animations. + +You can disable `UIView` animations in your app to make the tests faster: + +```swift +if ProcessInfo().arguments.contains("SKIP_ANIMATIONS") { + UIView.setAnimationsEnabled(false) +} +``` + +This requires you to pass the launch argument like so: + +```ruby +snapshot(launch_arguments: ["SKIP_ANIMATIONS"]) +``` + +By default, _snapshot_ will wait for a short time for the animations to finish. +If you're skipping the animations, this wait time is unnecessary and can be skipped: + +```swift +setupSnapshot(app, waitForAnimations: false) +``` diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/check_app_store_metadata.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/check_app_store_metadata.md new file mode 100644 index 0000000..da87dcf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/check_app_store_metadata.md @@ -0,0 +1,103 @@ +

+ +

+ +Precheck +============ + +###### Check your app using a community driven set of App Store review rules to avoid being rejected + +Apple rejects builds for many avoidable metadata issues like including swear words 😮, other companies’ trademarks, or even mentioning an iOS bug 🐛. _fastlane precheck_ takes a lot of the guess work out by scanning your app’s details in App Store Connect for avoidable problems. fastlane precheck helps you get your app through app review without rejections so you can ship faster 🚀 + +------- + +

+ Features • + Usage • + Example • + How does it work? +

+ +------- + +# Features + + +| | precheck Features | +|----------|-----------------| +🐛 | īŖŋ product bug mentions +🙅 | Swear word checker +🤖 | Mentioning other platforms +đŸ˜ĩ | URL reachability checker +📝 | Placeholder/test words/mentioning future features +📅 | Copyright date checking +🙈 | Customizable word list checking +đŸ“ĸ | You can decide if you want to warn about potential problems and continue or have _fastlane_ show an error and stop after all scans are done + +# Usage +Run _fastlane precheck_ to check the app metadata from App Store Connect + +```no-highlight +fastlane precheck +``` + +To get a list of available options run + +```no-highlight +fastlane action precheck +``` + + + +# Example + +Since you might want to manually trigger _precheck_ but don't want to specify all the parameters every time, you can store your defaults in a so called `Precheckfile`. + +Run `fastlane precheck init` to create a new configuration file. Example: + +```ruby-skip-tests +# indicates that your metadata will not be checked by this rule +negative_apple_sentiment(level: :skip) + +# when triggered, this rule will warn you of a potential problem +curse_words(level: :warn) + +# show error and prevent any further commands from running after fastlane precheck finishes +unreachable_urls(level: :error) + +# pass in whatever words you want to check for +custom_text(data: ["chrome", "webos"], + level: :warn) +``` + +### Use with [_fastlane_](https://fastlane.tools) + +_precheck_ is fully integrated with [_deliver_](https://docs.fastlane.tools/actions/deliver/) another [_fastlane_](https://fastlane.tools) tool. + +Update your `Fastfile` to contain the following code: + +```ruby +lane :production do + # ... + + # by default deliver will call precheck and warn you of any problems + # if you want precheck to halt submitting to app review, you can pass + # precheck_default_rule_level: :error + deliver(precheck_default_rule_level: :error) + + # ... +end + +# or if you prefer, you can run precheck alone +lane :check_metadata do + precheck +end + +``` + +# How does it work? + +_precheck_ will access `App Store Connect` to download your app's metadata. It uses [_spaceship_](https://spaceship.airforce) to communicate with Apple's web services. + +# Want to improve precheck's rules? +Please submit an issue on GitHub and provide information about your App Store rejection! Make sure you scrub out any personally identifiable information since this will be public. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/create_app_online.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/create_app_online.md new file mode 100644 index 0000000..01aaa04 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/create_app_online.md @@ -0,0 +1,386 @@ +

+ +

+ +###### Create new iOS apps on App Store Connect and Apple Developer Portal using your command line + +_produce_ creates new iOS apps on both the Apple Developer Portal and App Store Connect with the minimum required information. + +------- + +

+ Features • + Usage • + How does it work? +

+ +------- + +# Features + +- **Create** new apps on both App Store Connect and the Apple Developer Portal +- **Modify** Application Services on the Apple Developer Portal +- **Create** App Groups on the Apple Developer Portal +- **Associate** apps with App Groups on the Apple Developer Portal +- **Create** iCloud Containers on the Apple Developer Portal +- **Associate** apps with iCloud Containers on the Apple Developer Portal +- **Create** Merchant Identifiers on the Apple Developer Portal +- **Associate** apps with Merchant Identifiers on the Apple Developer Portal +- Support for **multiple Apple accounts**, storing your credentials securely in the Keychain + +# Usage + +## Creating a new application + +```no-highlight +fastlane produce +``` + +To get a list of all available parameters: + +```no-highlight +fastlane produce --help +``` + +```no-highlight +Commands: (* default) + associate_group Associate with a group, which is created if needed or simply located otherwise + associate_merchant Associate with a merchant for use with Apple Pay. Apple Pay will be enabled for this app + create * Creates a new app on App Store Connect and the Apple Developer Portal + disable_services Disable specific Application Services for a specific app on the Apple Developer Portal + enable_services Enable specific Application Services for a specific app on the Apple Developer Portal + group Ensure that a specific App Group exists + cloud_container Ensure that a specific iCloud Container exists + help Display global or [command] help documentation + merchant Ensure that a specific Merchant exists + +Global Options: + --verbose + -h, --help Display help documentation + -v, --version Display version information + +Options for create: + -u, --username STRING Your Apple ID Username (PRODUCE_USERNAME) + -a, --app_identifier STRING App Identifier (Bundle ID, e.g. com.krausefx.app) (PRODUCE_APP_IDENTIFIER) + -e, --bundle_identifier_suffix STRING App Identifier Suffix (Ignored if App Identifier does not ends with .*) (PRODUCE_APP_IDENTIFIER_SUFFIX) + -q, --app_name STRING App Name (PRODUCE_APP_NAME) + -z, --app_version STRING Initial version number (e.g. '1.0') (PRODUCE_VERSION) + -y, --sku STRING SKU Number (e.g. '1234') (PRODUCE_SKU) + -j, --platform STRING The platform to use (optional) (PRODUCE_PLATFORM) + -m, --language STRING Primary Language (e.g. 'English', 'German') (PRODUCE_LANGUAGE) + -c, --company_name STRING The name of your company. It's used to set company name on App Store Connect team's app pages. Only required if it's the first app you create (PRODUCE_COMPANY_NAME) + -i, --skip_itc [VALUE] Skip the creation of the app on App Store Connect (PRODUCE_SKIP_ITC) + -d, --skip_devcenter [VALUE] Skip the creation of the app on the Apple Developer Portal (PRODUCE_SKIP_DEVCENTER) + -s, --itc_users ARRAY Array of App Store Connect users. If provided, you can limit access to this newly created app for users with the App Manager, Developer, Marketer or Sales roles (ITC_USERS) + -b, --team_id STRING The ID of your Developer Portal team if you're in multiple teams (PRODUCE_TEAM_ID) + -l, --team_name STRING The name of your Developer Portal team if you're in multiple teams (PRODUCE_TEAM_NAME) + -k, --itc_team_id [VALUE] The ID of your App Store Connect team if you're in multiple teams (PRODUCE_ITC_TEAM_ID) + -p, --itc_team_name STRING The name of your App Store Connect team if you're in multiple teams (PRODUCE_ITC_TEAM_NAME) +``` + +## Enabling / Disabling Application Services + +If you want to enable Application Services for an App ID (HomeKit and HealthKit in this example): + +```no-highlight +fastlane produce enable_services --homekit --healthkit +``` + +If you want to disable Application Services for an App ID (iCloud in this case): + +```no-highlight +fastlane produce disable_services --icloud +``` + +If you want to create a new App Group: + +```no-highlight +fastlane produce group -g group.krausefx -n "Example App Group" +``` + +If you want to associate an app with an App Group: + +```no-highlight +fastlane produce associate_group -a com.krausefx.app group.krausefx +``` + +If you want to create a new iCloud Container: + +```no-highlight +fastlane produce cloud_container -g iCloud.com.krausefx.app -n "Example iCloud Container" +``` + +If you want to associate an app with an iCloud Container: + +```no-highlight +fastlane produce associate_cloud_container -a com.krausefx.app iCloud.com.krausefx.app +``` + +If you want to associate an app with multiple iCloud Containers: + +```no-highlight +fastlane produce associate_cloud_container -a com.krausefx.app iCloud.com.krausefx.app1 iCloud.com.krausefx.app2 +``` + +# Parameters + +Get a list of all available options using + +```no-highlight +fastlane produce enable_services --help +``` + +```no-highlight +--access-wifi Enable Access Wifi +--app-attest Enable App Attest +--app-group Enable App Group +--apple-pay Enable Apple Pay +--associated-domains Enable Associated Domains +--auto-fill-credential Enable Auto Fill Credential +--class-kit Enable Class Kit +--icloud STRING Enable iCloud, suitable values are "xcode5_compatible" and "xcode6_compatible" +--custom-network-protocol Enable Custom Network Protocol +--data-protection STRING Enable Data Protection, suitable values are "complete", "unlessopen" and "untilfirstauth" +--extended-virtual-address-space Enable Extended Virtual Address Space +--game-center STRING Enable Game Center, suitable values are "ios" and "macos +--health-kit Enable Health Kit +--hls-interstitial-preview Enable Hls Interstitial Preview +--home-kit Enable Home Kit +--hotspot Enable Hotspot +--in-app-purchase Enable In App Purchase +--inter-app-audio Enable Inter App Audio +--low-latency-hls Enable Low Latency Hls +--managed-associated-domains Enable Managed Associated Domains +--maps Enable Maps +--multipath Enable Multipath +--network-extension Enable Network Extension +--nfc-tag-reading Enable NFC Tag Reading +--personal-vpn Enable Personal VPN +--passbook Enable Passbook (deprecated) +--push-notification Enable Push Notification +--sign-in-with-apple Enable Sign In With Apple +--siri-kit Enable Siri Kit +--system-extension Enable System Extension +--user-management Enable User Management +--vpn-configuration Enable Vpn Configuration (deprecated) +--wallet Enable Wallet +--wireless-accessory Enable Wireless Accessory +--car-play-audio-app Enable Car Play Audio App +--car-play-messaging-app Enable Car Play Messaging App +--car-play-navigation-app Enable Car Play Navigation App +--car-play-voip-calling-app Enable Car Play Voip Calling App +--critical-alerts Enable Critical Alerts +--hotspot-helper Enable Hotspot Helper +--driver-kit Enable DriverKit +--driver-kit-endpoint-security Enable DriverKit Endpoint Security +--driver-kit-family-hid-device Enable DriverKit Family HID Device +--driver-kit-family-networking Enable DriverKit Family Networking +--driver-kit-family-serial Enable DriverKit Family Serial +--driver-kit-hid-event-service Enable DriverKit HID EventService +--driver-kit-transport-hid Enable DriverKit Transport HID +--multitasking-camera-access Enable Multitasking Camera Access +--sf-universal-link-api Enable SFUniversalLink API +--vp9-decoder Enable VP9 Decoder +--music-kit Enable MusicKit +--shazam-kit Enable ShazamKit +--communication-notifications Enable Communication Notifications +--group-activities Enable Group Activities +--health-kit-estimate-recalibration Enable HealthKit Estimate Recalibration +--time-sensitive-notifications Enable Time Sensitive Notifications +``` + +```no-highlight +fastlane produce disable_services --help +``` + +```no-highlight +--access-wifi Disable Access Wifi +--app-attest Disable App Attest +--app-group Disable App Group +--apple-pay Disable Apple Pay +--associated-domains Disable Associated Domains +--auto-fill-credential Disable Auto Fill Credential +--class-kit Disable Class Kit +--icloud STRING Disable iCloud +--custom-network-protocol Disable Custom Network Protocol +--data-protection STRING Disable Data Protection +--extended-virtual-address-space Disable Extended Virtual Address Space +--game-center STRING Disable Game Center +--health-kit Disable Health Kit +--hls-interstitial-preview Disable Hls Interstitial Preview +--home-kit Disable Home Kit +--hotspot Disable Hotspot +--in-app-purchase Disable In App Purchase +--inter-app-audio Disable Inter App Audio +--low-latency-hls Disable Low Latency Hls +--managed-associated-domains Disable Managed Associated Domains +--maps Disable Maps +--multipath Disable Multipath +--network-extension Disable Network Extension +--nfc-tag-reading Disable NFC Tag Reading +--personal-vpn Disable Personal VPN +--passbook Disable Passbook (deprecated) +--push-notification Disable Push Notification +--sign-in-with-apple Disable Sign In With Apple +--siri-kit Disable Siri Kit +--system-extension Disable System Extension +--user-management Disable User Management +--vpn-configuration Disable Vpn Configuration (deprecated) +--wallet Disable Wallet +--wireless-accessory Disable Wireless Accessory +--car-play-audio-app Disable Car Play Audio App +--car-play-messaging-app Disable Car Play Messaging App +--car-play-navigation-app Disable Car Play Navigation App +--car-play-voip-calling-app Disable Car Play Voip Calling App +--critical-alerts Disable Critical Alerts +--hotspot-helper Disable Hotspot Helper +--driver-kit Disable DriverKit +--driver-kit-endpoint-security Disable DriverKit Endpoint Security +--driver-kit-family-hid-device Disable DriverKit Family HID Device +--driver-kit-family-networking Disable DriverKit Family Networking +--driver-kit-family-serial Disable DriverKit Family Serial +--driver-kit-hid-event-service Disable DriverKit HID EventService +--driver-kit-transport-hid Disable DriverKit Transport HID +--multitasking-camera-access Disable Multitasking Camera Access +--sf-universal-link-api Disable SFUniversalLink API +--vp9-decoder Disable VP9 Decoder +--music-kit Disable MusicKit +--shazam-kit Disable ShazamKit +--communication-notifications Disable Communication Notifications +--group-activities Disable Group Activities +--health-kit-estimate-recalibration Disable HealthKit Estimate Recalibration +--time-sensitive-notifications Disable Time Sensitive Notifications +``` + +## Creating Apple Pay merchants and associating them with an App ID + +If you want to create a new Apple Pay Merchant Identifier: + +```no-highlight +fastlane produce merchant -o merchant.com.example.production -r "Example Merchant Production" +``` + +Use `--help` for more information about all available parameters + +```no-highlight +fastlane produce merchant --help +``` + +If you want to associate an app with a Merchant Identifier: + +```no-highlight +fastlane produce associate_merchant -a com.krausefx.app merchant.com.example.production +``` + +If you want to associate an app with multiple Merchant Identifiers: + +```no-highlight +fastlane produce associate_merchant -a com.krausefx.app merchant.com.example.production merchant.com.example.sandbox +``` + +Use --help for more information about all available parameters + +```no-highlight +fastlane produce associate_merchant --help +``` + +## Environment Variables + +All available values can also be passed using environment variables, run `fastlane produce --help` to get a list of all available parameters. + +## _fastlane_ Integration + +Your `Fastfile` should look like this + +```ruby +lane :release do + produce( + username: 'felix@krausefx.com', + app_identifier: 'com.krausefx.app', + app_name: 'MyApp', + language: 'English', + app_version: '1.0', + sku: '123', + team_name: 'SunApps GmbH', # only necessary when in multiple teams + + # Optional + # App services can be enabled during app creation + enable_services: { + access_wifi: "on", # Valid values: "on", "off" + app_attest: "on", # Valid values: "on", "off" + app_group: "on", # Valid values: "on", "off" + apple_pay: "on", # Valid values: "on", "off" + associated_domains: "on", # Valid values: "on", "off" + auto_fill_credential: "on", # Valid values: "on", "off" + car_play_audio_app: "on", # Valid values: "on", "off" + car_play_messaging_app: "on", # Valid values: "on", "off" + car_play_navigation_app: "on", # Valid values: "on", "off" + car_play_voip_calling_app: "on", # Valid values: "on", "off" + class_kit: "on", # Valid values: "on", "off" + icloud: "xcode5_compatible", # Valid values: "xcode5_compatible", "xcode6_compatible", "off" + critical_alerts: "on", # Valid values: "on", "off" + custom_network_protocol: "on", # Valid values: "on", "off" + data_protection: "complete", # Valid values: "complete", "unlessopen", "untilfirstauth", "off" + extended_virtual_address_space: "on", # Valid values: "on", "off" + file_provider_testing_mode: "on", # Valid values: "on", "off" + fonts: "on", # Valid values: "on", "off" + game_center: "ios", # Valid values: "ios", "macos", off" + health_kit: "on", # Valid values: "on", "off" + hls_interstitial_preview: "on", # Valid values: "on", "off" + home_kit: "on", # Valid values: "on", "off" + hotspot: "on", # Valid values: "on", "off" + hotspot_helper: "on", # Valid values: "on", "off" + in_app_purchase: "on", # Valid values: "on", "off" + inter_app_audio: "on", # Valid values: "on", "off" + low_latency_hls: "on", # Valid values: "on", "off" + managed_associated_domains: "on", # Valid values: "on", "off" + maps: "on", # Valid values: "on", "off" + multipath: "on", # Valid values: "on", "off" + network_extension: "on", # Valid values: "on", "off" + nfc_tag_reading: "on", # Valid values: "on", "off" + passbook: "on", # Valid values: "on", "off" (deprecated) + personal_vpn: "on", # Valid values: "on", "off" + push_notification: "on", # Valid values: "on", "off" + sign_in_with_apple: "on", # Valid values: "on", "off" + siri_kit: "on", # Valid values: "on", "off" + system_extension: "on", # Valid values: "on", "off" + user_management: "on", # Valid values: "on", "off" + vpn_configuration: "on", # Valid values: "on", "off" (deprecated) + wallet: "on", # Valid values: "on", "off" + wireless_accessory: "on", # Valid values: "on", "off" + driver_kit: "on", # Valid values: "on", "off" + driver_kit_endpoint_security: "on", # Valid values: "on", "off" + driver_kit_family_hid_device: "on", # Valid values: "on", "off" + driver_kit_family_networking: "on", # Valid values: "on", "off" + driver_kit_family_serial: "on", # Valid values: "on", "off" + driver_kit_hid_event_service: "on", # Valid values: "on", "off" + driver_kit_transport_hid: "on", # Valid values: "on", "off" + multitasking_camera_access: "on", # Valid values: "on", "off" + sf_universal_link_api: "on", # Valid values: "on", "off" + vp9_decoder: "on", # Valid values: "on", "off" + music_kit: "on", # Valid values: "on", "off" + shazam_kit: "on", # Valid values: "on", "off" + communication_notifications: "on", # Valid values: "on", "off" + group_activities: "on", # Valid values: "on", "off" + health_kit_estimate_recalibration: "on", # Valid values: "on", "off" + time_sensitive_notifications: "on", # Valid values: "on", "off" + } + ) + + deliver +end +``` + +To use the newly generated app in _deliver_, you need to add this line to your `Deliverfile`: + +```ruby-skip-tests +apple_id(ENV['PRODUCE_APPLE_ID']) +``` + +This will tell _deliver_, which `App ID` to use, since the app is not yet available in the App Store. + +You'll still have to fill out the remaining information (like screenshots, app description and pricing). You can use [_deliver_](https://docs.fastlane.tools/actions/deliver/) to upload your app metadata using a CLI + +## How is my password stored? + +_produce_ uses the [password manager](https://github.com/fastlane/fastlane/tree/master/credentials_manager) from _fastlane_. Take a look the [CredentialsManager README](https://github.com/fastlane/fastlane/tree/master/credentials_manager) for more information. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/frame_screenshots.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/frame_screenshots.md new file mode 100644 index 0000000..676c4ab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/frame_screenshots.md @@ -0,0 +1,366 @@ +

+ +

+ +###### Easily put your screenshots into the right device frames + +_frameit_ allows you to put a gorgeous device frame around your iOS, macOS and Android screenshots just by running one simple command. Use _frameit_ to prepare perfect screenshots for the App Store, your website, QA or emails. + +------- + +

+ Features • + Usage • + Tips +

+ +------- + +
frameit is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ + +# Features + +## Frame screenshot + +Put a gorgeous device frame around your iOS, macOS and Android screenshots just by running one simple command. Support for: + +- iPhone, iPad and Mac +- Set of Android devices +- Portrait and Landscape modes +- Several device frame colors + +The complete and updated list of supported devices and colors can be found [here](https://github.com/fastlane/frameit-frames/tree/gh-pages/latest) + +Here is a nice gif, that shows _frameit_ in action: + +![img/actions/FrameitGit.gif](/img/actions/FrameitGit.gif?raw=1) + +## Advanced Features + +- Put framed screenshot on colored background, define padding +- add text above or under framed screenshot + - keyword + text + - choose text font and color + - multi line text + - "intelligent" positioning of text that always looks good(ish) + +## Results + +![img/actions/ScreenshotsBig.png](/img/actions/ScreenshotsBig.png?raw=1) + +------- + +![img/actions/ScreenshotsOverview.png](/img/actions/ScreenshotsOverview.png?raw=1) + +------- + +![img/actions/MacExample.png](/img/actions/MacExample.png?raw=1) + +
The frameit 2.0 update was kindly sponsored by MindNode, seen in the screenshots above.
+ + +The first time that _frameit_ is executed the frames will be downloaded automatically. Originally the frames are coming from [Facebook frameset](https://design.facebook.com/toolsandresources/devices/) and they are kept on [this repo](https://github.com/fastlane/frameit-frames). + +More information about this process and how to update the frames can be found [here](https://github.com/fastlane/fastlane/tree/master/frameit/frames_generator) + +# Usage + +## Basic Usage + +Why should you have to use Photoshop, just to add a frame around your screenshots? + +Just navigate to your folder of screenshots and use the following command (iOS and Mac OS are default platforms for backward compatibility): + +```no-highlight +fastlane frameit +``` + +To frame Android screenshots: + +```no-highlight +fastlane frameit android +``` + +To use the silver version of the frames: + +```no-highlight +fastlane frameit silver +``` + +To download the latest frames + +```no-highlight +fastlane frameit download_frames +``` + +Note: When using _frameit_ without titles on top, the screenshots will have the full resolution, which means they can't be uploaded to the App Store directly. They are supposed to be used for websites, print media and emails. Check out the section below to use the screenshots for the App Store. + +## Advanced Usage (optional) + +### Text and Background + +With _frameit_ it's possible to add a custom background and text below or above the framed screenshots in fonts and colors you define. + +A working example can be found in the [fastlane examples](https://github.com/fastlane/examples/tree/master/MindNode/screenshots) project. + +### `Framefile.json` + +The Framefile allows to define general and screenshot specific information. +It has the following general JSON structure: + +```json +{ + "device_frame_version": "latest", + "default": { + ... + }, + "data": [ + ... + ] +} +``` + +### General parameters + +The general parameters are defined in the `default` key and can be: + +| Key | Description | Default value | +|-----|-------------|---------------| +| `background` | The background that should be used for the framed screenshot. Specify the (relative) path to the image file (e.g. `*.jpg`). This parameter is mandatory. | NA | +| `keyword` | An object that contains up to 3 keys to describe the optional keyword. See [table](#keyword-and-title-parameters) below. | NA | +| `title` | An object that contains up to 3 keys to describe the mandatory title. See [table](#keyword-and-title-parameters) below. | NA | +| `stack_title` | Specifies whether _frameit_ should display the keyword above the title when both keyword and title are defined. If it is false, the title and keyword will be displayed side by side when both keyword and title are defined. | `false` | +| `title_below_image` | Specifies whether _frameit_ should place the title and optional keyword below the device frame. If it is false, it will be placed above the device frame. | `false` | +| `show_complete_frame` | Specifies whether _frameit_ should shrink the device frame so that it is completely shown in the framed screenshot. If it is false, clipping of the device frame might occur at the bottom (when `title_below_image` is `false`) or top (when `title_below_image` is `true`) of the framed screenshot. | `false` | +| `padding` | The content of the framed screenshot will be resized to match the specified `padding` around all edges. The vertical padding is also applied between the text and the top or bottom (depending on `title_below_image`) of the device frame.

There are 3 different options of specifying the padding:

1. Default: An integer value that defines both horizontal and vertical padding in pixels.
2. A string that defines (different) padding values in pixels for horizontal and vertical padding. The syntax is `"x"`, e.g. `"30x60"`.
3. A string that defines (different) padding values in percentage for horizontal and vertical padding. The syntax is `"%x%"`, e.g. `"5%x10%"`.
**Note:** The percentage is calculated from the smallest image dimension (height or width).

A combination of option 2 and 3 is possible, e.g. `"5%x40"`. | `50` | +| `interline_spacing` | Specifies whether _frameit_ should add or subtract this many pixels between the individual lines of text. This only applies to a multi-line `title` and/or `keyword` to expand or squash together the individual lines of text. | `0` | +| `font_scale_factor` | Specifies whether _frameit_ should increase or decrease the font size of the text. Is ignored for `keyword` or `title`, if `font_size` is specified. | `0.1` | +| `frame` | Overrides the color of the frame to be used. (Valid values are `BLACK`, `WHITE`, `GOLD` and `ROSE_GOLD`) | NA | +| `title_min_height` | Specifies a height always reserved for the title. Value can be a percentage of the height or an absolute value. The device will be placed below (or above) this area. Convenient to ensure the device top (or bottom) will be consistently placed at the same height on the different screenshots. | NA | +| `use_platform` | Overrides the platform used for the screenshot. Valid values are `IOS`, `ANDROID` and `ANY`. | `IOS` | +| `force_device_type` | Forces a specific device. Valid values are: Huawei P8, Motorola Moto E, Motorola Moto G, Nexus 4, Nexus 5X, Nexus 6P, Nexus 9, Samsung Galaxy Grand Prime, Samsung Galaxy Note 5, Samsung Galaxy S Duos, Samsung Galaxy S3, Samsung Galaxy S5, Samsung Galaxy S7, Samsung Galaxy S8, Samsung Galaxy S9, iPhone 5s, iPhone 5c, iPhone SE, iPhone 6s, iPhone 6s Plus, iPhone 7, iPhone 7 Plus, iPhone 8, iPhone 8 Plus, iPhone X, iPhone XS, iPhone XR, iPhone XS Max, iPad Air 2, iPad Mini 4, iPad Pro, MacBook, Google Pixel 3, Google Pixel 3 XL, HTC One A9, HTC One M8 | NA | + +### Specific parameters + +The screenshot specific parameters are related to the keyword and title texts. +These are defined in the `data` key. This is an array with the following keys for each screenshot: + +| Key | Description | +|-----|-------------| +| `filter` | This is mandatory to link the individual configuration to the screenshot, based on part of the file name.

Example:
If a screenshot is named `iPhone 8-Brainstorming.png` you can use value `Brainstorming` for `filter`. If there are more than one `filter` matching an entry, they will all be applied in order (which means that the last one has the highest precedence). All other keys from that array element will only be applied on this specific screenshot. | +| `keyword` | Similar use as in `default`, except that parameter `text` can be used here because it is screenshot specific. | +| `title` | Similar use as in `default`, except that parameter `text` can be used here because it is screenshot specific. | +| `frame` | Overrides the color of the frame to be used. (Valid values are `BLACK`, `WHITE`, `GOLD` and `ROSE_GOLD`) | NA | +| `use_platform` | Overrides the platform used for the screenshot. Valid values are `IOS`, `ANDROID` and `ANY`. | `IOS` | +| `force_device_type` | Forces a specific device. Valid values are the same as for the general parameter. | NA | + +### Framefile `keyword` and `title` parameters + +The `keyword` and `title` parameters are both used in `default` and `data`. They both consist of the following optional keys: + +| Key | Description | Default value | +|-----|-------------|---------------| +| `color` | The font color for the text. Specify a HEX/HTML color code. | `#000000` (black) | +| `font` | The font family for the text. Specify the (relative) path to the font file (e.g. an OpenType Font). | The default `imagemagick` font, which is system dependent. | +| `font_size` | The font size for the text specified in points. If not specified or `0`, font will be scaled automatically to fit the available space. _frameit_ still shrinks the text, if it would not fit. | NA | +| `font_weight` | The [font weight for the text](https://imagemagick.org/script/command-line-options.php#weight). Specify an integer value (e.g. 900). | NA | +| `text` | The text that should be used for the `keyword` or `title`.

Note: If you want to use localised text, use [`.strings` files](#strings-files). | NA | + +### Example + +```json +{ + "device_frame_version": "latest", + "default": { + "keyword": { + "font": "./fonts/MyFont-Rg.otf" + }, + "title": { + "font": "./fonts/MyFont-Th.otf", + "font_size": 128, + "color": "#545454" + }, + "background": "./background.jpg", + "padding": 50, + "show_complete_frame": false, + "stack_title" : false, + "title_below_image": true, + "frame": "WHITE", + "use_platform": "IOS" + }, + + "data": [ + { + "filter": "Brainstorming", + "keyword": { + "color": "#d21559" + } + }, + { + "filter": "Organizing", + "keyword": { + "color": "#feb909" + }, + "frame": "ROSE_GOLD" + }, + { + "filter": "Sharing", + "keyword": { + "color": "#aa4dbc" + } + }, + { + "filter": "Styling", + "keyword": { + "color": "#31bb48" + } + }, + { + "filter": "Android", + "use_platform": "ANDROID" + } + ] +} +``` + +You can find a more complex [configuration](https://github.com/fastlane/examples/blob/master/MindNode/screenshots/Framefile.json) to also support Chinese, Japanese and Korean languages. + +The `Framefile.json` should be in the `screenshots` folder, as seen in the [example](https://github.com/fastlane/examples/tree/master/MindNode/screenshots). + +### `.strings` files + +To define the title and optionally the keyword, put two `.strings` files into the language folder (e.g. [en-US in the example project](https://github.com/fastlane/examples/tree/master/MindNode/screenshots/en-US)) + +The `keyword.strings` and `title.strings` are standard `.strings` file you already use for your iOS apps, making it easy to use your existing translation service to get localized titles. + +**Notes** + +- These `.strings` files **MUST** be utf-8 (UTF-8) or utf-16 encoded (UTF-16 BE with BOM). They also must begin with an empty line. If you are having trouble see [issue #1740](https://github.com/fastlane/fastlane/issues/1740) +- You **MUST** provide a background if you want titles. _frameit_ will not add the titles if a background is not specified. + +### Screenshot orientation + +By default _frameit_ adds a frame to your screenshot based on an orientation you took it. For a portrait (vertical orientation) it is going to add portrait frame and for a landscape (horizontal orientation) - landscape left (= [Home button on the left side](https://developer.apple.com/documentation/uikit/uiinterfaceorientation/landscapeleft)). + +One way to override the default behavior is editing the file name by adding `force_landscaperight` to the end. + +### `force_orientation_block` + +If the default behavior doesn't fit your needs and you don't want or can't rename your screenshots, you can customize _frameit_'s orientation behavior by setting a `force_orientation_block` parameter. The valid values are: `:landscape_left` (home button on the left side), `:landscape_right` (home button on the right side), `:portrait` (home button on the bottom), `nil` (home button on the right side). + +### Examples + +```ruby +# It matches the filename to the framed device orientation +frameit( + path: "./fastlane/screenshots", + force_orientation_block: proc do |filename| + case filename + when "iPad Pro (12.9-inch)-01LoginScreen" + :landscape_right + when "iPhone 6 Plus-01LoginScreen" + :portrait + # and so on + end + end +) +``` + +```ruby +# It frames the screenshots in landscape right whenever the filename contains `landscape` word +frameit( + silver: true, + path: "./fastlane/screenshots", + force_orientation_block: proc do |filename| + f = filename.downcase + if f.include?("landscape") + :landscape_right + end + end +) +``` + +# Mac + +With _frameit_ it's possible to also frame macOS Application screenshots. You have to provide the following: + +- A (relative) path to a `background` image file, which should contain both the background and the Mac. +- The `offset` information so _frameit_ knows where to position your screenshot on the `background`: + - `offset` : A string that specifies the horizontal and vertical offset in pixels, with respect to the top left corner of the `background` image. The syntax is `"++"`, e.g. `"+200+150"`. + - `titleHeight` : The height in pixels that should be used for the title. + +## Example +```json +{ + "default": { + "title": { + "color": "#545454" + }, + "background": "Mac.jpg", + "offset": { + "offset": "+676+479", + "titleHeight": 320 + } + }, + "data": [ + { + "filter": "Brainstorming", + "keyword": { + "color": "#d21559" + } + } + ] +} +``` + +Check out the [MindNode example project](https://github.com/fastlane/examples/tree/master/MindNode/screenshots). + +# Tips + +## Generate localized screenshots + +Check out [_snapshot_](https://docs.fastlane.tools/actions/snapshot/) to automatically generate screenshots using ```UI Automation```. + +## Resume framing + +Framing screenshots is a slow operation. In case you need to resume framing, or just frame a couple updated screenshots again, you can rely on the `--resume` flag. Only screenshots which have not been framed yet – or for which there isn't an up-to-date framed image – will be framed. This feature uses the file modification dates and will reframe screenshots if the screenshot is newer than the framed file. + +## Upload screenshots + +Use [_deliver_](https://docs.fastlane.tools/actions/deliver/) to upload iOS screenshots to App Store Connect, or [_supply_](https://docs.fastlane.tools/actions/supply/) to upload Android screenshots to Play Store completely automatically 🚀 + +## Use a clean status bar + +You can set `override_status_bar` to `true` in snapshot to set the status bar to Tuesday January 9th at 9:41AM with full battery and reception. If you need more granular customization, to set a Carrier name for example, also set `override_status_bar_arguments` to the specific arguments to be passed to the `xcrun simctl status_bar override` command. Run `xcrun simctl status_bar --help` to see the options available. + +### Examples + +```ruby +# Sets the time to 9:41AM with full battery and reception, with the default carrier name: Carrier +capture_ios_screenshots( + override_status_bar: true +) +``` + +```ruby +# Set the time to 9:41AM, battery at 75% and charging, on the TELUS LTE network +capture_ios_screenshots( + override_status_bar: true, + override_status_bar_arguments: "--time 9:41 --dataNetwork lte --cellularMode active --cellularBars 4 --batteryState charging --batteryLevel 75 --operatorName TELUS" +) +``` + +## Gray artifacts around text + +If you run into any quality issues, like having a border around the font, it usually helps to just re-install `imagemagick`. You can do so by running + +```sh +brew uninstall imagemagick +brew install imagemagick +``` + +## Uninstall + +- `gem uninstall fastlane` +- `rm -rf ~/.frameit` diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_certificates.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_certificates.md new file mode 100644 index 0000000..fe7fc2e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_certificates.md @@ -0,0 +1,86 @@ +

+ +

+ +------- +

+ Why? • + Usage • + How does it work? • + Tips +

+ +------- + +
cert is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ +![/img/actions/cert.gif](/img/actions/cert.gif) + +In the gif we used `cert && sigh`, which will first create an iOS code signing certificate and then a provisioning profile for your app if _cert_ succeeded. + +# Usage + +**Note**: It is recommended to use [_match_](/actions/match/) according to the [codesigning.guide](https://codesigning.guide) for generating and maintaining your certificates. Use _cert_ directly only if you want full control over what's going on and know more about codesigning. + +```no-highlight +fastlane cert +``` + +This will check if any of the available signing certificates is installed on your local machine. + +Only if a new certificate needs to be created, _cert_ will + +- Create a new private key +- Create a new signing request +- Generate, downloads and installs the certificate +- Import all the generated files into your Keychain + +_cert_ will never revoke your existing certificates. If you can't create any more certificates, _cert_ will raise an exception, which means, you have to revoke one of the existing certificates to make room for a new one. + + +You can pass your Apple ID: + +```no-highlight +fastlane cert -u cert@krausefx.com +``` + +For a list of available commands run + +```no-highlight +fastlane action cert +``` + +Keep in mind, there is no way for _cert_ to download existing certificates + private keys from the Apple Developer Portal, as the private key never leaves your computer. + +## Environment Variables + +Run `fastlane action cert` to get a list of all available environment variables. + +## Use with [_sigh_](https://docs.fastlane.tools/actions/sigh/) + +_cert_ becomes really interesting when used in [_fastlane_](https://fastlane.tools) in combination with [_sigh_](https://docs.fastlane.tools/actions/sigh/). + +Update your `Fastfile` to contain the following code: + +```ruby +lane :beta do + cert + sigh(force: true) +end +``` + +`force: true` will make sure to re-generate the provisioning profile on each run. +This will result in _sigh_ always using the correct signing certificate, which is installed on the local machine. + + +## How is my password stored? +_cert_ uses the [password manager](https://github.com/fastlane/fastlane/tree/master/credentials_manager) from _fastlane_. Take a look the [CredentialsManager README](https://github.com/fastlane/fastlane/blob/master/credentials_manager/README.md) for more information. + +# Tips + +## Use 'ProvisionQL' for advanced Quick Look in Finder + +Install [ProvisionQL](https://github.com/ealeksandrov/ProvisionQL). + +It will show you `mobileprovision` files like this: +![img/actions/QuickLookScreenshot-Provision.png](/img/actions/QuickLookScreenshot-Provision.png) diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_provisioning_profile.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_provisioning_profile.md new file mode 100644 index 0000000..1897ff3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_provisioning_profile.md @@ -0,0 +1,223 @@ +

+ +

+ +###### Because you would rather spend your time building stuff than fighting provisioning + +_sigh_ can create, renew, download and repair provisioning profiles (with one command). It supports App Store, Ad Hoc, Development and Enterprise profiles and supports nice features, like auto-adding all test devices. + +------- + +

+ Features • + Usage • + Resign • + How does it work? +

+ +------- + +# Features + +- **Download** the latest provisioning profile for your app +- **Renew** a provisioning profile, when it has expired +- **Repair** a provisioning profile, when it is broken +- **Create** a new provisioning profile, if it doesn't exist already +- Supports **App Store**, **Ad Hoc** and **Development** profiles +- Support for **multiple Apple accounts**, storing your credentials securely in the Keychain +- Support for **multiple Teams** +- Support for **Enterprise Profiles** + +To automate iOS Push profiles you can use [_pem_](https://docs.fastlane.tools/actions/pem/). + + +### Why not let Xcode do the work? + +- _sigh_ can easily be integrated into your CI-server (e.g. Jenkins) +- Xcode sometimes invalidates [all existing profiles](/img/actions/SignErrors.png) +- You have control over what happens +- You still get to have the signing files, which you can then use for your build scripts or store in git + +See _sigh_ in action: + +![img/actions/sighRecording.gif](/img/actions/sighRecording.gif) + +# Usage + +**Note**: It is recommended to use [_match_](https://docs.fastlane.tools/actions/match/) according to the [codesigning.guide](https://codesigning.guide) for generating and maintaining your provisioning profiles. Use _sigh_ directly only if you want full control over what's going on and know more about codesigning. + +```no-highlight +fastlane sigh +``` + +Yes, that's the whole command! + +_sigh_ will create, repair and download profiles for the App Store by default. + +You can pass your bundle identifier and username like this: + +```no-highlight +fastlane sigh -a com.krausefx.app -u username +``` + +If you want to generate an **Ad Hoc** profile instead of an App Store profile: + +```no-highlight +fastlane sigh --adhoc +``` + +If you want to generate a **Development** profile: + +```no-highlight +fastlane sigh --development +``` + +To generate the profile in a specific directory: + +```no-highlight +fastlane sigh -o "~/Certificates/" +``` + +To download all your provisioning profiles use + +```no-highlight +fastlane sigh download_all +``` + +Optionally, use `fastlane sigh download_all --download_xcode_profiles` to also include the Xcode managed provisioning profiles + +For a list of available parameters and commands run + +```no-highlight +fastlane action sigh +``` + +### Advanced + +By default, _sigh_ will install the downloaded profile on your machine. If you just want to generate the profile and skip the installation, use the following flag: + +```no-highlight +fastlane sigh --skip_install +``` + +To save the provisioning profile under a specific name, use the -q option: + +```no-highlight +fastlane sigh -a com.krausefx.app -u username -q "myProfile.mobileprovision" +``` + +If for some reason you don't want _sigh_ to verify that the code signing identity is installed on your local machine: + +```no-highlight +fastlane sigh --skip_certificate_verification +``` + +If you need the provisioning profile to be renewed regardless of its state use the `--force` option. This gives you a profile with the maximum lifetime. `--force` will also add all available devices to this profile. + +```no-highlight +fastlane sigh --force +``` + +By default, _sigh_ will include all certificates on development profiles, and first certificate on other types. If you need to specify which certificate to use you can either use the environment variable `SIGH_CERTIFICATE`, or pass the name or expiry date of the certificate as argument: + +```no-highlight +fastlane sigh -c "SunApps GmbH" +``` + +For a list of available parameters and commands run + +```no-highlight +fastlane action sigh +``` + + +### Use with [_fastlane_](https://fastlane.tools) + +_sigh_ becomes really interesting when used in [_fastlane_](https://fastlane.tools) in combination with [_cert_](https://docs.fastlane.tools/actions/cert/). + +Update your `Fastfile` to contain the following code: + +```ruby +lane :beta do + cert + sigh(force: true) +end +``` + +`force: true` will make sure to re-generate the provisioning profile on each run. +This will result in _sigh_ always using the correct signing certificate, which is installed on the local machine. + + +# Repair + +_sigh_ can automatically repair all your existing provisioning profiles which are expired or just invalid. + +```no-highlight +fastlane sigh repair +``` + +# Resign + +If you generated your `ipa` file but want to apply a different code signing onto the ipa file, you can use `sigh resign`: + +```no-highlight +fastlane sigh resign +``` + +_sigh_ will find the ipa file and the provisioning profile for you if they are located in the current folder. + +You can pass more information using the command line: + +```no-highlight +fastlane sigh resign ./path/app.ipa --signing_identity "iPhone Distribution: Felix Krause" -p "my.mobileprovision" +``` + +# Manage + +With `sigh manage` you can list all provisioning profiles installed locally. + +```no-highlight +fastlane sigh manage +``` + +Delete all expired provisioning profiles + +```no-highlight +fastlane sigh manage -e +``` + +Or delete all `iOS Team Provisioning Profile` by using a regular expression + +```no-highlight +fastlane sigh manage -p "iOS\ ?Team Provisioning Profile:" +``` + +## Environment Variables + +Run `fastlane action sigh` to get a list of all available environment variables. + +If you're using [_cert_](https://docs.fastlane.tools/actions/cert/) in combination with [_fastlane_](https://fastlane.tools) the signing certificate will automatically be selected for you. (make sure to run _cert_ before _sigh_) + +# How does it work? + +_sigh_ will access the `iOS Dev Center` to download, renew or generate the `.mobileprovision` file. It uses [_spaceship_](https://spaceship.airforce) to communicate with Apple's web services. + +## How is my password stored? +_sigh_ uses the [CredentialsManager](https://github.com/fastlane/fastlane/tree/master/credentials_manager) from _fastlane_. + +# Tips + +## Use 'ProvisionQL' for advanced Quick Look in Finder + +Install [ProvisionQL](https://github.com/ealeksandrov/ProvisionQL). + +It will show you `mobileprovision` files like this: +![img/actions/QuickLookScreenshot-Provision.png](/img/actions/QuickLookScreenshot-Provision.png) + +## App Identifier couldn't be found + +If you also want to create a new App Identifier on the Apple Developer Portal, check out [_produce_](https://docs.fastlane.tools/actions/produce/), which does exactly that. + +## What happens to my Xcode managed profiles? + +_sigh_ will never touch or use the profiles which are created and managed by Xcode. Instead _sigh_ will manage its own set of provisioning profiles. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_push_certificate.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_push_certificate.md new file mode 100644 index 0000000..caea5c1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/get_push_certificate.md @@ -0,0 +1,134 @@ +

+ +

+ +###### Automatically generate and renew your push notification profiles + +Tired of manually creating and maintaining your push notification profiles for your iOS apps? Tired of generating a _pem_ file for your server? + +_pem_ does all that for you, just by simply running _pem_. + +_pem_ creates new .pem, .cer, and .p12 files to be uploaded to your push server if a valid push notification profile is needed. _pem_ does not cover uploading the file to your server. + +To automate iOS Provisioning profiles you can use [_match_](https://docs.fastlane.tools/actions/match/). + +------- + +

+ Features • + Usage • + How does it work? • + Tips • + Need help? +

+ +------- + +
pem is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ +# Features +Well, it's actually just one: Generate the _pem_ file for your server. + +Check out this gif: + +![img/actions/PEMRecording.gif](/img/actions/PEMRecording.gif) + +# Usage + +```no-highlight +fastlane pem +``` + +Yes, that's the whole command! + +This does the following: + +- Create a new signing request +- Create a new push certification +- Downloads the certificate +- Generates a new ```.pem``` file in the current working directory, which you can upload to your server + +Note that _pem_ will never revoke your existing certificates. _pem_ can't download any of your existing push certificates, as the private key is only available on the machine it was created on. + +If you already have a push certificate enabled, which is active for at least 30 more days, _pem_ will not create a new certificate. If you still want to create one, use the `force`: + +```no-highlight +fastlane pem --force +``` + +You can pass parameters like this: + +```no-highlight +fastlane pem -a com.krausefx.app -u username +``` + +If you want to generate a development certificate instead: + +```no-highlight +fastlane pem --development +``` + +If you want to generate a Website Push certificate: + +```no-highlight +fastlane pem --website_push +``` + +Set a password for your `p12` file: + +```no-highlight +fastlane pem -p "MyPass" +``` + +You can specify a name for the output file: + +```no-highlight +fastlane pem -o my.pem +``` + +To get a list of available options run: + +```no-highlight +fastlane action pem +``` + + +### Note about empty `p12` passwords and Keychain Access.app + +_pem_ will produce a valid `p12` without specifying a password, or using the empty-string as the password. +While the file is valid, the Mac's Keychain Access will not allow you to open the file without specifying a passphrase. + +Instead, you may verify the file is valid using OpenSSL: + +```no-highlight +openssl pkcs12 -info -in my.p12 +``` + +If you need the `p12` in your keychain, perhaps to test push with an app like [Knuff](https://github.com/KnuffApp/Knuff) or [Pusher](https://github.com/noodlewerk/NWPusher), you can use `openssl` to export the `p12` to _pem_ and back to `p12`: + +```no-highlight +% openssl pkcs12 -in my.p12 -out my.pem +Enter Import Password: + +MAC verified OK +Enter PEM pass phrase: + + +% openssl pkcs12 -export -in my.pem -out my-with-passphrase.p12 +Enter pass phrase for temp.pem: + + +Enter Export Password: + +``` + +## Environment Variables + +Run `fastlane action pem` to get a list of available environment variables. + +# How does it work? + +_pem_ uses [_spaceship_](https://spaceship.airforce) to communicate with the Apple Developer Portal to request a new push certificate for you. + +## How is my password stored? +_pem_ uses the [password manager](https://github.com/fastlane/fastlane/tree/master/credentials_manager) from _fastlane_. Take a look the [CredentialsManager README](https://github.com/fastlane/fastlane/tree/master/credentials_manager) for more information. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/run_tests.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/run_tests.md new file mode 100644 index 0000000..0545cfa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/run_tests.md @@ -0,0 +1,151 @@ +

+ +

+ +###### The easiest way to run tests of your iOS and Mac app + +_scan_ makes it easy to run tests of your iOS and Mac app on a simulator or connected device. + +------- + +

+ Features • + Usage • + Scanfile +

+ +------- + +# What's scan? + +![https://pbs.twimg.com/media/CURcEpuWoAArE3d.png:large](https://pbs.twimg.com/media/CURcEpuWoAArE3d.png:large) + +### Before _scan_ + +```no-highlight +xcodebuild \ + -workspace MyApp.xcworkspace \ + -scheme "MyApp" \ + -sdk iphonesimulator \ + -destination 'platform=iOS Simulator,name=iPhone 6,OS=8.1' \ + test +``` + +As the output will look like this + +```no-highlight +/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/Objects-normal/arm64/main.o Example/main.m normal arm64 objective-c com.apple.compilers.llvm.clang.1_0.compiler + cd /Users/felixkrause/Developer/fastlane/gym/example/cocoapods + export LANG=en_US.US-ASCII + export PATH="/Applications/Xcode-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin:/Applications/Xcode-beta.app/Contents/Developer/usr/bin:/Users/felixkrause/.rvm/gems/ruby-2.2.0/bin:/Users/felixkrause/.rvm/gems/ruby-2.2.0@global/bin:/Users/felixkrause/.rvm/rubies/ruby-2.2.0/bin:/Users/felixkrause/.rvm/bin:/usr/local/heroku/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" + /Applications/Xcode-beta.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -x objective-c -arch arm64 -fmessage-length=126 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 -fcolor-diagnostics -std=gnu99 -fobjc-arc -fmodules -gmodules -fmodules-cache-path=/Users/felixkrause/Library/Developer/Xcode/DerivedData/ModuleCache -fmodules-prune-interval=86400 -fmodules-prune-after=345600 -fbuild-session-file=/Users/felixkrause/Library/Developer/Xcode/DerivedData/ModuleCache/Session.modulevalidation -fmodules-validate-once-per-build-session -Wnon-modular-include-in-framework-module -Werror=non-modular-include-in-framework-module -Wno-trigraphs -fpascal-strings -Os -fno-common -Wno-missing-field-initializers -Wno-missing-prototypes -Werror=return-type -Wunreachable-code -Wno-implicit-atomic-properties -Werror=deprecated-objc-isa-usage -Werror=objc-root-class -Wno-arc-repeated-use-of-weak -Wduplicate-method-match -Wno-missing-braces -Wparentheses -Wswitch -Wunused-function -Wno-unused-label -Wno-unused-parameter -Wunused-variable -Wunused-value -Wempty-body -Wconditional-uninitialized -Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wconstant-conversion -Wint-conversion -Wbool-conversion -Wenum-conversion -Wshorten-64-to-32 -Wpointer-sign -Wno-newline-eof -Wno-selector -Wno-strict-selector-match -Wundeclared-selector -Wno-deprecated-implementations -DCOCOAPODS=1 -DNS_BLOCK_ASSERTIONS=1 -DOBJC_OLD_DISPATCH_PROTOTYPES=0 -isysroot /Applications/Xcode-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.0.sdk -fstrict-aliasing -Wprotocol -Wdeprecated-declarations -miphoneos-version-min=9.0 -g -fvisibility=hidden -Wno-sign-conversion -fembed-bitcode -iquote /Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/ExampleProductName-generated-files.hmap -I/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/ExampleProductName-own-target-headers.hmap -I/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/ExampleProductName-all-target-headers.hmap -iquote /Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/ExampleProductName-project-headers.hmap -I/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/BuildProductsPath/Release-iphoneos/include -I/Users/felixkrause/Developer/fastlane/gym/example/cocoapods/Pods/Headers/Public -I/Users/felixkrause/Developer/fastlane/gym/example/cocoapods/Pods/Headers/Public/HexColors -I/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/DerivedSources/arm64 -I/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/DerivedSources -F/Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/BuildProductsPath/Release-iphoneos -isystem /Users/felixkrause/Developer/fastlane/gym/example/cocoapods/Pods/Headers/Public -isystem /Users/felixkrause/Developer/fastlane/gym/example/cocoapods/Pods/Headers/Public/HexColors -MMD -MT dependencies -MF /Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/Objects-normal/arm64/main.d --serialize-diagnostics /Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/Objects-normal/arm64/main.dia -c /Users/felixkrause/Developer/fastlane/gym/example/cocoapods/Example/main.m -o /Users/felixkrause/Library/Developer/Xcode/DerivedData/Example-fhlmxikmujknefgidqwqvtbatohi/Build/Intermediates/ArchiveIntermediates/Example/IntermediateBuildFilesPath/Example.build/Release-iphoneos/Example.build/Objects-normal/arm64/main.o +``` +you'll probably want to use something like [xcpretty](https://github.com/supermarin/xcpretty), which will look like this: + +```no-highlight +set -o pipefail && + xcodebuild \ + -workspace MyApp.xcworkspace \ + -scheme "MyApp" \ + -sdk iphonesimulator \ + -destination 'platform=iOS Simulator,name=iPhone 6,OS=8.1' \ + test \ + | xcpretty \ + -r "html" \ + -o "tests.html" +``` + +### With _scan_ + +```no-highlight +fastlane scan +``` + +### Why _scan_? + +_scan_ uses the latest APIs and tools to make running tests plain simple and offer a great integration into your existing workflow, like [_fastlane_](https://fastlane.tools) or Jenkins. + +| | scan Features | +|----------|-----------------| +🏁 | Beautiful inline build output while running the tests +🚠 | Sensible defaults: Automatically detect the project, schemes and more +📊 | Support for HTML, JSON and JUnit reports +🔎 | Xcode duplicated your simulators again? _scan_ will handle this for you +🔗 | Works perfectly with [_fastlane_](https://fastlane.tools) and other tools +🚅 | Don't remember any complicated build commands, just _scan_ +🔧 | Easy and dynamic configuration using parameters and environment variables +đŸ“ĸ | Beautiful slack notifications of the test results +💾 | Store common build settings in a `Scanfile` +📤 | The raw `xcodebuild` outputs are stored in `~/Library/Logs/scan` +đŸ’ģ | Supports both iOS and Mac applications +👱 | Automatically switches to the [travis formatter](https://github.com/kattrali/xcpretty-travis-formatter) when running on Travis +📖 | Helps you resolve common test errors like simulator not responding + +_scan_ uses a plain `xcodebuild` command, therefore keeping 100% compatible with `xcodebuild`. To generate the nice output, _scan_ uses [xcpretty](https://github.com/supermarin/xcpretty). You can always access the raw output in `~/Library/Logs/scan`. + +![img/actions/scanScreenshot.png](/img/actions/scanScreenshot.png) +![img/actions/slack.png](/img/actions/slack.png) +![img/actions/scanHTML.png](/img/actions/scanHTML.png) +![img/actions/scanHTMLFailing.png](/img/actions/scanHTMLFailing.png) + +# Usage + +```no-highlight +fastlane scan +``` + +That's all you need to run your tests. If you want more control, here are some available parameters: + +```no-highlight +fastlane scan --workspace "Example.xcworkspace" --scheme "AppName" --device "iPhone 6" --clean +``` + +If you need to use a different Xcode install, use `[xcodes](https://docs.fastlane.tools/actions/xcodes)` or define `DEVELOPER_DIR`: + +```no-highlight +DEVELOPER_DIR="/Applications/Xcode6.2.app" scan +``` + +To run _scan_ on multiple devices via [_fastlane_](https://fastlane.tools), add this to your `Fastfile`: + +```ruby +scan( + workspace: "Example.xcworkspace", + devices: ["iPhone 6s", "iPad Air"] +) +``` + +For a list of all available parameters use + +```no-highlight +fastlane action scan +``` + +To access the raw `xcodebuild` output open `~/Library/Logs/scan` + +# Scanfile + +Since you might want to manually trigger the tests but don't want to specify all the parameters every time, you can store your defaults in a so called `Scanfile`. + +Run `fastlane scan init` to create a new configuration file. Example: + +```ruby-skip-tests +scheme("Example") +devices(["iPhone 6s", "iPad Air"]) + +clean(true) + +output_types("html") +``` + +# Automating the whole process + +_scan_ works great together with [_fastlane_](https://fastlane.tools), which connects all deployment tools into one streamlined workflow. + +Using _fastlane_ you can define a configuration like + +```ruby +lane :test do + scan(scheme: "Example") +end +``` diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/sync_code_signing.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/sync_code_signing.md new file mode 100644 index 0000000..b090c5d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/sync_code_signing.md @@ -0,0 +1,579 @@ +

+ +

+ +###### Easily sync your certificates and profiles across your team + +A new approach to iOS and macOS code signing: Share one code signing identity across your development team to simplify your codesigning setup and prevent code signing issues. + +_match_ is the implementation of the [codesigning.guide concept](https://codesigning.guide). _match_ creates all required certificates & provisioning profiles and stores them in a separate git repository, Google Cloud, or Amazon S3. Every team member with access to the selected storage can use those credentials for code signing. _match_ also automatically repairs broken and expired credentials. It's the easiest way to share signing credentials across teams + +[More information on how to get started with codesigning](https://docs.fastlane.tools/codesigning/getting-started/) + +------- + +

+ Why? • + Usage • + Is this secure? +

+ +------- + +
match is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ +## Why match? + +Before starting to use _match_, make sure to read the [codesigning.guide](https://codesigning.guide): + +> When deploying an app to the App Store, beta testing service or even installing it on a device, most development teams have separate code signing identities for every member. This results in dozens of profiles including a lot of duplicates. + +> You have to manually renew and download the latest set of provisioning profiles every time you add a new device or a certificate expires. Additionally this requires spending a lot of time when setting up a new machine that will build your app. + +**A new approach** + +> Share one code signing identity across your development team to simplify your setup and prevent code signing issues. What if there was a central place where your code signing identity and profiles are kept, so anyone in the team can access them during the build process? + +For more information about the concept, visit [codesigning.guide](https://codesigning.guide). + +### Why not let Xcode handle all this? + +- You have full control over what happens +- You have access to all the certificates and profiles, which are all securely stored in git +- You share one code signing identity across the team to have fewer certificates and profiles +- Xcode sometimes revokes certificates which breaks your setup causing failed builds +- More predictable builds by settings profiles in an explicit way instead of using the `Automatic` setting +- It just worksâ„ĸ + +### What does _match_ do for you? + +| | match | +|----------|---------| +🔄 | Automatically sync your iOS and macOS keys and profiles across all your team members using git +đŸ“Ļ | Handle all the heavy lifting of creating and storing your certificates and profiles +đŸ’ģ | Setup codesigning on a new machine in under a minute +đŸŽ¯ | Designed to work with apps with multiple targets and bundle identifiers +🔒 | You have full control over your files and Git repo, no third party service involved +✨ | Provisioning profile will always match the correct certificate +đŸ’Ĩ | Easily reset your existing profiles and certificates if your current account has expired or invalid profiles +â™ģī¸ | Automatically renew your provisioning profiles to include all your devices using the `--force` option +đŸ‘Ĩ | Support for multiple Apple accounts and multiple teams +✨ | Tightly integrated with [_fastlane_](https://fastlane.tools) to work seamlessly with [_gym_](https://docs.fastlane.tools/actions/gym/) and other build tools + +## Usage + +### Setup + +1. Optional: Create a **new, shared Apple Developer Portal account**, something like `office@company.com`, that will be shared across your team from now on (for more information visit [codesigning.guide](https://codesigning.guide)) +1. Run the following in your project folder to start using _match_: + +```no-highlight +fastlane match init +``` + + + +You'll be asked if you want to store your code signing identities inside a **Git repo**, **Google Cloud** or **Amazon S3**. + +#### Git Storage + +Use Git Storage to store all code signing identities in a private git repo, owned and operated by you. The files will be encrypted using OpenSSL. + +First, enter the URL to your private (!) Git repo (You can create one for free on e.g. [GitHub](https://github.com/new) or [BitBucket](https://bitbucket.org/repo/create)). The URL you enter can be either a `https://` or a `git` URL. `fastlane match init` won't read or modify your certificates or profiles yet, and also won't validate your git URL. + +This will create a `Matchfile` in your current directory (or in your `./fastlane/` folder). + +Example content (for more advanced setups check out the [fastlane section](#fastlane)): + +```ruby-skip-tests +git_url("https://github.com/fastlane/certificates") + +app_identifier("tools.fastlane.app") +username("user@fastlane.tools") +``` + +##### Git Storage on GitHub + +If your machine is currently using SSH to authenticate with GitHub, you'll want to use a `git` URL, otherwise, you may see an authentication error when you attempt to use match. Alternatively, you can set a basic authorization for _match_: + +Using parameter: + +``` +match(git_basic_authorization: '') +``` + +Using environment variable: + +``` +ENV['MATCH_GIT_BASIC_AUTHORIZATION'] = '' +match +``` + +To generate your base64 key [according to RFC 7617](https://tools.ietf.org/html/rfc7617), run this: + +``` +echo -n your_github_username:your_personal_access_token | base64 +``` + +You can find more information about GitHub basic authentication and personal token generation here: [https://developer.github.com/v3/auth/#basic-authentication](https://developer.github.com/v3/auth/#basic-authentication) + +##### Git Storage on GitHub - Deploy keys + +If your machine does not have a private key set up for your certificates repository, you can give _match_ a path for one: + +Using parameter: + +``` +match(git_private_key: '') +``` + +Using environment variable: + +``` +ENV['MATCH_GIT_PRIVATE_KEY'] = '' +match +``` + +You can find more information about GitHub basic authentication and personal token generation here: [https://developer.github.com/v3/auth/#basic-authentication](https://developer.github.com/v3/auth/#basic-authentication) + +##### Git Storage on Azure DevOps + +If you're running a pipeline on Azure DevOps and using git storage in a another repository on the same project, you might want to use `bearer` token authentication. + +Using parameter: + +``` +match(git_bearer_authorization: '') +``` + +Using environment variable: + +``` +ENV['MATCH_GIT_BEARER_AUTHORIZATION'] = '' +match +``` + +You can find more information about this use case here: [https://docs.microsoft.com/en-us/azure/devops/pipelines/repos/azure-repos-git?view=azure-devops&tabs=yaml#authorize-access-to-your-repositories](https://docs.microsoft.com/en-us/azure/devops/pipelines/repos/azure-repos-git?view=azure-devops&tabs=yaml#authorize-access-to-your-repositories) + +#### Google Cloud Storage + +Use [Google Cloud Storage](https://cloud.google.com/storage/) for a fully hosted solution for your code signing identities. Certificates are stored on Google Cloud, encrypted using Google managed keys. Everything will be stored on your Google account, inside a storage bucket you provide. You can also directly access the files using the web console. + +This will create a `Matchfile` in your current directory (or in your `./fastlane/` folder). + +Example content (for more advanced setups check out the [fastlane section](#fastlane)): + +```ruby-skip-tests +google_cloud_bucket_name("major-key-certificates") +``` + +#### Amazon S3 + +Use [Amazon S3](https://aws.amazon.com/s3/) for a fully hosted solution for your code signing identities. Certificates are stored on S3, inside a storage bucket you provide. You can also directly access the files using the web console. + +This will create a `Matchfile` in your current directory (or in your `./fastlane/` folder). + +Example content (for more advanced setups check out the [fastlane section](#fastlane)): + +```ruby-skip-tests +s3_bucket("ios-certificates") +``` + +### Multiple teams + +_match_ can store the codesigning files for multiple development teams: + +#### Git Storage + +Use one git branch per team. _match_ also supports storing certificates of multiple teams in one repo, by using separate git branches. If you work in multiple teams, make sure to set the `git_branch` parameter to a unique value per team. From there, _match_ will automatically create and use the specified branch for you. + +```ruby +match(git_branch: "team1", username: "user@team1.com") +match(git_branch: "team2", username: "user@team2.com") +``` + +#### Google Cloud or Amazon S3 Storage + +If you use Google Cloud or Amazon S3 Storage, you don't need to do anything manually. Just use Google Cloud or Amazon S3 Storage, and the top level folder will be the team ID. + +### Run + +> Before running _match_ for the first time, you should consider clearing your existing profiles and certificates using the [match nuke command](#nuke). + +After running `fastlane match init` you can run the following to generate new certificates and profiles: + +```no-highlight +fastlane match appstore +``` + +```no-highlight +fastlane match development +``` + + + +This will create a new certificate and provisioning profile (if required) and store them in your selected storage. +If you previously ran _match_ with the configured storage it will automatically install the existing profiles from your storage. + +The provisioning profiles are installed in `~/Library/MobileDevice/Provisioning Profiles` while the certificates and private keys are installed in your Keychain. + +To get a more detailed output of what _match_ is doing use + +```no-highlight +fastlane match --verbose +``` + +For a list of all available options run + +```no-highlight +fastlane action match +``` + +#### Handle multiple targets + +_match_ can use the same one Git repository, Google Cloud, or Amazon S3 Storage for all bundle identifiers. + +If you have several targets with different bundle identifiers, supply them as a comma-separated list: + +```no-highlight +fastlane match appstore -a tools.fastlane.app,tools.fastlane.app.watchkitapp +``` + +You can make this even easier using [_fastlane_](https://fastlane.tools) by creating a `certificates` lane like this: + +```ruby +lane :certificates do + match(app_identifier: ["tools.fastlane.app", "tools.fastlane.app.watchkitapp"]) +end +``` + +Then all your team has to do is run `fastlane certificates` and the keys, certificates and profiles for all targets will be synced. + +#### Handle multiple apps per developer/distribution certificate + +If you want to use a single developer and/or distribution certificate for multiple apps belonging to the same development team, you may use the same signing identities repository and branch to store the signing identities for your apps: + +`Matchfile` example for both App #1 and #2: + +```ruby-skip-tests +git_url("https://github.com/example/example-repo.git") +git_branch("master") +``` + +_match_ will reuse certificates and will create separate provisioning profiles for each app. + +#### Passphrase + +*Git Repo storage only* + +When running _match_ for the first time on a new machine, it will ask you for the passphrase for the Git repository. This is an additional layer of security: each of the files will be encrypted using `openssl`. Make sure to remember the password, as you'll need it when you run match on a different machine. + +To set the passphrase to decrypt your profiles using an environment variable (and avoid the prompt) use `MATCH_PASSWORD`. + +#### Migrate from Git Repo to Google Cloud + +If you're already using a Git Repo, but would like to switch to using Google Cloud Storage, run the following command to automatically migrate all your existing code signing identities and provisioning profiles + +```no-highlight +fastlane match migrate +``` + +After a successful migration you can safely delete your Git repo. + +#### Google Cloud access control + +*Google Cloud Storage only* + +There are two cases for reading and writing certificates stored in a Google Cloud storage bucket: + +1. Continuous integration jobs. These will authenticate to your Google Cloud project via a service account, and use a `gc_keys.json` file as credentials. +1. Developers on a local workstation. In this case, you should choose whether everyone on your team will create their own `gc_keys.json` file, or whether you want to manage access to the bucket directly using your developers' Google accounts. + +When running `fastlane match init` the first time, the setup process will give you the option to create your `gc_keys.json` file. This file contains the authentication credentials needed to access your Google Cloud storage bucket. Make sure to keep that file secret and never add it to version control. We recommend adding `gc_keys.json` to your `.gitignore` + +##### Managing developer access via keys + +If you want to manage developer access to your certificates via authentication keys, every developer should create their own `gc_keys.json` and add the file to all their work machines. This will give the admin full control over who has read/write access to the given Storage bucket. At the same time it allows your team to revoke a single key if a file gets compromised. + +##### Managing developer access via Google accounts + +If your developers already have Google accounts and access to your Google Cloud project, you can also manage access to the storage bucket via [Cloud Identity and Access Management (IAM)](https://cloud.google.com/storage/docs/access-control/iam). Just [set up](https://cloud.google.com/storage/docs/access-control/lists) individual developer accounts or an entire Google Group containing your team as readers and writers on your storage bucket. + +You can then specify the Google Cloud project id containing your storage bucket in your `Matchfile`: + +```ruby-skip-tests +storage_mode("google_cloud") +google_cloud_bucket_name("my-app-certificates") +google_cloud_project_id("my-app-project") +``` + +This lets developers on your team use [Application Default Credentials](https://cloud.google.com/docs/authentication/production) when accessing your storage bucket. After installing the [Google Cloud SDK](https://cloud.google.com/sdk/), they only need to run the following command once: +```no-highlight +gcloud auth application-default login +``` +... and log in with their Google account. Then, when they run `fastlane match`, _match_ will use these credentials to read from and write to the storage bucket. + +#### New machine + +To set up the certificates and provisioning profiles on a new machine, you just run the same command using: + +```no-highlight +fastlane match development +``` + +You can also run _match_ in a `readonly` mode to be sure it won't create any new certificates or profiles. + +```no-highlightno-highlight +fastlane match development --readonly +``` + +We recommend to always use `readonly` mode when running _fastlane_ on CI systems. This can be done using + +```ruby +lane :beta do + match(type: "appstore", readonly: is_ci) + + gym(scheme: "Release") +end +``` + +#### Access Control + +A benefit of using _match_ is that it enables you to give the developers of your team access to the code signing certificates without having to give everyone access to the Developer Portal: + +1. Run _match_ to store the certificates in a Git repo or Google Cloud Storage +2. Grant access to the Git repo / Google Cloud Storage Bucket to your developers and give them the passphrase (for git storage) +3. The developers can now run _match_ which will install the latest code signing profiles so they can build and sign the application without having to have access to the Apple Developer Portal +4. Every time you run _match_ to update the profiles (e.g. add a new device), all your developers will automatically get the latest profiles when running _match_ + +If you decide to run _match_ without access to the Developer Portal, make sure to use the `--readonly` option so that the commands don't ask you for the password to the Developer Portal. + +The advantage of this approach is that no one in your team will revoke a certificate by mistake, while having all code signing secrets in one location. + +#### Folder structure + +After running _match_ for the first time, your Git repo or Google Cloud bucket will contain 2 directories: + +- The `certs` folder contains all certificates with their private keys +- The `profiles` folder contains all provisioning profiles + +Additionally, _match_ creates a nice repo `README.md` for you, making it easy to onboard new team members: + +

+ +

+ +In the case of Google Cloud, the top level folder will be the team ID. + +#### fastlane + +Add _match_ to your `Fastfile` to automatically fetch the latest code signing certificates with [_fastlane_](https://fastlane.tools). + +``` +match(type: "appstore") + +match(type: "development") + +match(type: "adhoc", + app_identifier: "tools.fastlane.app") + +match(type: "enterprise", + app_identifier: "tools.fastlane.app") + +# _match_ should be called before building the app with _gym_ +gym +# ... +``` + +##### Registering new devices + +By using _match_, you'll save a lot of time every time you add new device to your Ad Hoc or Development profiles. Use _match_ in combination with the [`register_devices`](https://docs.fastlane.tools/actions/register_devices/) action. + +```ruby +lane :beta do + register_devices(devices_file: "./devices.txt") + match(type: "adhoc", force_for_new_devices: true) +end +``` + +By using the `force_for_new_devices` parameter, _match_ will check if the (enabled) device count has changed since the last time you ran _match_, and automatically re-generate the provisioning profile if necessary. You can also use `force: true` to re-generate the provisioning profile on each run. + +_**Important:** The `force_for_new_devices` parameter is ignored for App Store provisioning profiles since they don't contain any device information._ + +If you're not using _fastlane_, you can also use the `force_for_new_devices` option from the command line: + +```no-highlight +fastlane match adhoc --force_for_new_devices +``` + +##### Templates (aka: custom entitlements) + +Match can generate profiles that contain custom entitlements by passing in the entitlement's name with the `template_name` parameter. + +``` +match(type: "development", + template_name: "Apple Pay Pass Suppression Development") +``` + +### Setup Xcode project + +[Docs on how to set up your Xcode project](/codesigning/xcode-project/) + +#### To build from the command line using [_fastlane_](https://fastlane.tools) + +_match_ automatically pre-fills environment variables with the UUIDs of the correct provisioning profiles, ready to be used in your Xcode project. + +More information about how to setup your Xcode project can be found [here](/codesigning/xcode-project/) + +#### To build from Xcode manually + +This is useful when installing your application on your device using the Development profile. + +You can statically select the right provisioning profile in your Xcode project (the name will be `match Development tools.fastlane.app`). + +[Docs on how to set up your Xcode project](/codesigning/xcode-project/) + +### Continuous Integration + +#### Git repo access + +There is one tricky part of setting up a CI system to work with _match_, which is enabling the CI to access the repo. Usually you'd just add your CI's public ssh key as a deploy key to your _match_ repo, but since your CI will already likely be using its public ssh key to access the codebase repo, [you won't be able to do that](https://help.github.com/articles/error-key-already-in-use/). + +Some repo hosts might allow you to use the same deploy key for different repos, but GitHub will not. If your host does, you don't need to worry about this, just add your CI's public ssh key as a deploy key for your _match_ repo and scroll down to "_Encryption password_". + +There are a few ways around this: + +1. Create a new account on your repo host with read-only access to your _match_ repo. Bitrise have a good description of this [here](http://devcenter.bitrise.io/faq/adding-projects-with-submodules/). +2. Some CIs allow you to upload your signing credentials manually, but obviously this means that you'll have to re-upload the profiles/keys/certs each time they change. + +Neither solution is pretty. It's one of those _trade-off_ things. Do you care more about **not** having an extra account sitting around, or do you care more about having the :sparkles: of auto-syncing of credentials. + +#### Git repo encryption password + +Once you've decided which approach to take, all that's left to do is to set your encryption password as secret environment variable named `MATCH_PASSWORD`. _match_ will pick this up when it's run. + +#### Google Cloud Storage access + +Accessing Google Cloud Storage from your CI system requires you to provide the `gc_keys.json` file as part of your build. How you implement this is your decision. You can inject that file during build time. + +#### Amazon S3 Storage access + +Accessing Amazon S3 Storage from your CI system requires you to provide the `s3_region`, `s3_access_key`, `s3_secret_access_key` and `s3_bucket` options (or environment variables), with keys that has read access to the bucket. + +### Nuke + +If you never really cared about code signing and have a messy Apple Developer account with a lot of invalid, expired or Xcode managed profiles/certificates, you can use the `match nuke` command to revoke your certificates and provisioning profiles. Don't worry, apps that are already available in the App Store / TestFlight will still work. Builds distributed via Ad Hoc or Enterprise will be disabled after nuking your account, so you'll have to re-upload a new build. After clearing your account you'll start from a clean state, and you can run _match_ to generate your certificates and profiles again. + +To revoke all certificates and provisioning profiles for a specific environment: + +```no-highlight +fastlane match nuke development +fastlane match nuke distribution +fastlane match nuke enterprise +``` + + + +You'll have to confirm a list of profiles / certificates that will be deleted. + +## Advanced Git Storage features + +### Change Password + +To change the password of your repo and therefore decrypting and encrypting all files run: + +```no-highlight +fastlane match change_password +``` + +You'll be asked for the new password on all your machines on the next run. + +### Import + +To import and encrypt a certificate (`.cer`), the private key (`.p12`) and the provisioning profiles (`.mobileprovision` or `.provisionprofile`) into the _match_ repo run: + +```no-highlight +fastlane match import +``` + +You'll be prompted for the certificate (`.cer`), the private key (`.p12`) and the provisioning profiles (`.mobileprovision` or `.provisionprofile`) paths. _match_ will first validate the certificate (`.cer`) against the Developer Portal before importing the certificate, the private key and the provisioning profiles into the specified _match_ repository. + +However if there is no access to the developer portal but there are certificates, private keys and profiles provided, you can use the `skip_certificate_matching` option to tell _match_ not to verify the certificates. Like this: + +```no-highlight +fastlane match import --skip_certificate_matching true +``` +This will skip login to Apple Developer Portal and will import the provided certificate, private key and profile directly to the certificates repo. + +Please be careful when using this option and ensure the certificates and profiles match the type (development, adhoc, appstore, enterprise, developer_id) and are not revoked or expired. + +### Manual Decrypt + +If you want to manually decrypt a file you can. + +```no-highlight +openssl aes-256-cbc -k "" -in "" -out "" -a -d +``` + +#### Export Distribution Certificate and Private Key as Single .p12 File + +_match_ stores the certificate (`.cer`) and the private key (`.p12`) files separately. The following steps will repackage the separate certificate and private key into a single `.p12` file. + +Decrypt your cert found in `certs//.cer` as a pem file: + +```no-highlight +openssl aes-256-cbc -k "" -in "certs//.cer" -out "cert.der" -a -d -md [md5|sha256] +openssl x509 -inform der -in cert.der -out cert.pem +``` + +Decrypt your private key found in `certs//.p12` as a pem file: + +```no-highlight +openssl aes-256-cbc -k "" -in "certs/distribution/.p12" -out "key.pem" -a -d -md [md5|sha256] +``` + +Generate an encrypted p12 file with the same or new password: + +```no-highlight +openssl pkcs12 -export -out "cert.p12" -inkey "key.pem" -in "cert.pem" -password pass: +``` + +## Is this secure? + +### Git + +Both your keys and provisioning profiles are encrypted using OpenSSL using a passphrase. + +Storing your private keys in a Git repo may sound off-putting at first. We did an analysis of potential security issues, see section below. + +### Google Cloud Storage + +All your keys and provisioning profiles are encrypted using Google managed keys. + +### What could happen if someone stole a private key? + +If attackers would have your certificate and provisioning profile, they could codesign an application with the same bundle identifier. + +What's the worst that could happen for each of the profile types? + +#### App Store Profiles + +An App Store profile can't be used for anything as long as it's not re-signed by Apple. The only way to get an app resigned is to submit an app for review which could take anywhere from 24 hours to a few days (checkout [appreviewtimes.com](http://appreviewtimes.com) for up-to-date expectations). Attackers could only submit an app for review, if they also got access to your App Store Connect credentials (which are not stored in git, but in your local keychain). Additionally you get an email notification every time a build gets uploaded to cancel the submission even before your app gets into the review stage. + +#### Development and Ad Hoc Profiles + +In general those profiles are harmless as they can only be used to install a signed application on a small subset of devices. To add new devices, the attacker would also need your Apple Developer Portal credentials (which are not stored in git, but in your local keychain). + +#### Enterprise Profiles + +Attackers could use an In-House profile to distribute signed application to a potentially unlimited number of devices. All this would run under your company name and it could eventually lead to Apple revoking your In-House account. However it is very easy to revoke a certificate to remotely break the app on all devices. + +Because of the potentially dangerous nature of In-House profiles please use _match_ with enterprise profiles with caution, ensure your git repository is private and use a secure password. + +#### To sum up + +- You have full control over the access list of your Git repo, no third party service involved +- Even if your certificates are leaked, they can't be used to cause any harm without your App Store Connect login credentials +- Use In-House enterprise profile with _match_ with caution +- If you use GitHub or Bitbucket we encourage enabling 2 factor authentication for all accounts that have access to the certificates repo +- The complete source code of _match_ is fully open source on [GitHub](https://github.com/fastlane/fastlane/) diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_app_store.md.erb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_app_store.md.erb new file mode 100644 index 0000000..c715b07 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_app_store.md.erb @@ -0,0 +1,748 @@ +

+ +

+ +_deliver_ uploads screenshots, metadata and binaries to App Store Connect. Use _deliver_ to submit your app for App Store review. + +------- + +

+ Features • + Quick Start • + Usage • + Tips +

+ +------- + +
deliver is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ +# Features +- Upload hundreds of localized screenshots completely automatically +- Upload a new ipa/pkg file to App Store Connect without Xcode from any Mac +- Maintain your app metadata locally and push changes back to App Store Connect +- Easily implement a real Continuous Deployment process using [_fastlane_](https://fastlane.tools) +- Store the configuration in git to easily deploy from **any** Mac, including your Continuous Integration server +- Get a HTML preview of the fetched metadata before uploading the app metadata and screenshots to iTC +- Automatically uses [_precheck_](/actions/precheck/) to ensure your app has the highest chances of passing app review the first time + +To upload builds to TestFlight check out [_pilot_](/actions/pilot/). + +# Quick Start + +The guide will create all the necessary files for you, using the existing app metadata from App Store Connect. + +- `cd [your_project_folder]` +- `fastlane deliver init` +- Enter your App Store Connect credentials +- Enter your app identifier +- Enjoy a good drink, while the computer does all the work for you + +From now on, you can run `fastlane deliver` to deploy a new update, or just upload new app metadata and screenshots. + +# Usage + +Check out your local `./fastlane/metadata` and `./fastlane/screenshots` folders (if you don't use [_fastlane_](https://fastlane.tools) it's `./metadata` instead) + +![/img/actions/deliver_metadata.png](/img/actions/deliver_metadata.png) + +You'll see your metadata from App Store Connect. Feel free to store the metadata in git (not the screenshots). You can now modify it locally and push the changes back to App Store Connect. + +Run `fastlane deliver` to upload the app metadata from your local machine + +```no-highlight +fastlane deliver +``` + +Provide the path to an `ipa` file to upload and submit your app for review: + +```no-highlight +fastlane deliver --ipa "App.ipa" --submit_for_review +``` + +or you can specify path to `pkg` file for macOS apps: + +```no-highlight +fastlane deliver --pkg "MacApp.pkg" +``` + +If you use [_fastlane_](https://fastlane.tools) you don't have to manually specify the path to your `ipa`/`pkg` file. + +This is just a small sub-set of what you can do with _deliver_, check out the full documentation in [#more-options](#more-options) + +Download existing screenshots from App Store Connect + +```no-highlight +fastlane deliver download_screenshots +``` + +Download existing metadata from App Store Connect + +```no-highlight +fastlane deliver download_metadata +``` + +To get a list of available options run + +```no-highlight +fastlane action deliver +``` + +### Use in a `Fastfile` + +```ruby +deliver +``` + +```ruby +deliver( + submit_for_review: true, + force: true, + metadata_path: "./metadata" +) +``` + +## More options + +
+View all available options and their valid values + +## Available options + +All the options below can easily be added to your `Deliverfile`. The great thing: if you use _fastlane_ you can use all these options from your `Fastfile` too, for example: + +```ruby +deliver( + submit_for_review: true, + metadata_path: "../folder" +) +``` + +##### app_identifier +The bundle identifier (e.g. "com.krausefx.app") + +##### username +Your Apple ID email address + +##### ipa + +A path to a signed ipa file, which will be uploaded. If you don't provide this value, only app metadata will be uploaded. If you want to submit the app for review make sure to either use `fastlane deliver --submit_for_review` or add `submit_for_review true` to your `Deliverfile` + +```ruby-skip-tests +ipa("App.ipa") +``` + +if you use [_fastlane_](https://fastlane.tools) the ipa file will automatically be detected. + +##### pkg +A path to a signed pkg file, which will be uploaded. Submission logic of ipa applies to pkg files. +```ruby-skip-tests +pkg("MacApp.pkg") +``` + +##### app_version + +Optional, as it is usually automatically detected. Specify the version that should be created / edited on App Store Connect: + +```ruby-skip-tests +app_version("2.0") +``` + +##### skip_app_version_update + +In the case if _deliver_ uploads your application to App Store Connect it will automatically update "Prepare for submission" app version (which could be found on App Store Connect->My Apps->App Store page) + +The option allows uploading your app without updating "Prepare for submission" version. + +This could be useful in the case if you are generating a lot of uploads while not submitting the latest build for Apple review. + +The default value is false. + +```ruby-skip-tests +skip_app_version_update(true) +``` + +##### submit_for_review + +Add this to your `Deliverfile` to automatically submit the app for review after uploading metadata/binary. This will select the latest build. + +```ruby-skip-tests +submit_for_review(true) +``` + +##### screenshots_path +A path to a folder containing subfolders for each language. This will automatically detect the device type based on the image resolution. Also includes īŖŋ Watch Support. + +![/img/actions/deliver_screenshots.png](/img/actions/deliver_screenshots.png) + +##### metadata_path +Path to the metadata you want to use. The folder has to be structured like this + +![/img/actions/deliver_metadata.png](/img/actions/deliver_metadata.png) + +If you run `deliver init` this will automatically be created for you. + +##### force + +```ruby-skip-tests +force(true) +``` +If set to `true`, no HTML report will be generated before the actual upload. You can also pass `--force` when calling _deliver_. + + +##### price_tier +Pass the price tier as number. This will be active from the current day. +```ruby-skip-tests +price_tier 0 +``` +##### trade_representative_contact_information +Trade Representative Contact information for Korean App Store. Available options: `first_name`, `last_name`, `address_line1`, `address_line2`, `address_line3`, `city_name`, `state`, `country`, `postal_code`, `phone_number`, `email_address`, `is_displayed_on_app_store`. + + +```ruby-skip-tests +trade_representative_contact_information( + first_name: "Felix", + last_name: "Krause", + address_line1: "1 Infinite Loop", + address_line2: "", + address_line3: null, + city_name: "Cupertino", + state: "California", + country: "United States", + postal_code: "95014", + phone_number: "+43 123123123", + email_address: "github@krausefx.com", +) +``` + +You can also provide these values by creating files in a `metadata/trade_representative_contact_information/` directory. The file names must match the pattern `.txt` (e.g. `first_name.txt`, `address_line1.txt` etc.). The contents of each file will be used as the value for the matching key. Values provided in the `Deliverfile` or `Fastfile` will be take priority over values from these files. + +`is_displayed_on_app_store` is the option on App Store Connect described as: `Display Trade Representative Contact Information on the Korean App Store` + +##### app_review_information +Contact information for the app review team. Available options: `first_name`, `last_name`, `phone_number`, `email_address`, `demo_user`, `demo_password`, `notes`. + + +```ruby-skip-tests +app_review_information( + first_name: "Felix", + last_name: "Krause", + phone_number: "+43 123123123", + email_address: "github@krausefx.com", + demo_user: "demoUser", + demo_password: "demoPass", + notes: "such notes, very text" +) +``` + +You can also provide these values by creating files in a `metadata/review_information/` directory. The file names must match the pattern `.txt` (e.g. `first_name.txt`, `notes.txt` etc.). The contents of each file will be used as the value for the matching key. Values provided in the `Deliverfile` or `Fastfile` will be take priority over values from these files. + +##### app_review_attachment_file +You can provide additional information to the app review team as a file attachment. As of this writing, Apple supports following file attachment formats: .pdf, .doc, .docx, .rtf, .pages, .xls, .xlsx, .numbers, .zip, .rar, .plist, .crash, .jpg, .png, .mp4, or .avi. + +Provide an empty string (i.e. "", not null) to remove the existing attachment file (if any) from the review information being edited. + +```ruby-skip-tests +app_review_attachment_file: "./readme.txt" +``` + +##### submission_information +Must be a hash. This is used as the last step for the deployment process, where you define if you use third party content or use encryption. [A list of available options](https://github.com/fastlane/fastlane/blob/master/spaceship/lib/spaceship/tunes/app_submission.rb). + +```ruby-skip-tests +submission_information({ + add_id_info_serves_ads: true, + ... +}) +``` + +##### automatic_release +Should the app be released to all users once Apple approves it? If set to `false`, you'll have to manually release the update once it got approved. + +```ruby-skip-tests +automatic_release(true) +# or +automatic_release(false) +``` + +##### phased_release + +Enable or disable the phased releases feature of App Store Connect. If set to `true`, the update will be released over a 7 day period. Default behavior is to leave whatever you defined on App Store Connect. + +```ruby-skip-tests +phased_release(true) +# or +phased_release(false) +``` + +##### reset_ratings + +Reset your app's summary rating for all territories. If set to `true`, it will reset rating when this version is released. Default behavior is to keep existing rating. + +```ruby-skip-tests +reset_ratings(true) +# or +reset_ratings(false) +``` + +##### app_rating_config_path +You can set the app age ratings using _deliver_. You'll have to create and store a `JSON` configuration file. Copy the [template](https://github.com/fastlane/fastlane/blob/master/deliver/assets/example_rating_config.json) to your project folder and pass the path to the `JSON` file using the `app_rating_config_path` option. + +The keys/values on the top allow one of 3 strings: "NONE", "INFREQUENT_OR_MILD" or "FREQUENT_OR_INTENSE", and the items on the bottom allow false or true. More information in [#reference](#reference). + + +## Metadata + +All options below are useful if you want to specify certain app metadata in your `Deliverfile` or `Fastfile` + +### Localized + +Localized values should be set like this + +```ruby-skip-tests +description({ + 'en-US' => "English Description here", + 'de-DE' => "Deutsche Beschreibung hier" +}) +``` + +##### name +The title/name of the app + +##### subtitle + +Localized subtitle of the app + +```ruby-skip-tests +subtitle( + "en-US" => "Awesome English subtitle here", + "de-DE" => "Jetzt mit deutschen Untertiteln!" +) +``` + +##### description +The description of the app + +##### release_notes +The release_notes (What's new / Changelog) of the latest version + +##### support_url, marketing_url, privacy_url +These URLs are shown in the AppStore + +##### keywords + +Keywords separated using a comma. + +```ruby-skip-tests +keywords( + "en-US" => "Keyword1, Keyword2" +) +``` + +##### promotional_text + +Localized promotional text + +```ruby-skip-tests +promotional_text( + "en-US" => "Hey, you should totally buy our app, it's the best", + "de-DE" => "App kaufen bitte" +) +``` + +##### app_icon +A path to a new app icon, which must be exactly 1024x1024px +```ruby-skip-tests +app_icon('./AppIcon.png') +``` + +##### apple_watch_app_icon +A path to a new app icon for the īŖŋ Watch, which must be exactly 1024x1024px +```ruby-skip-tests +apple_watch_app_icon('./AppleWatchAppIcon.png') +``` + +##### platform + +The platform of your application (a.e. ios, osx). + +This option is optional. The default value is "ios" and deliver should be able to figure out the platform from your binary. + +However, in the case if multiple binaries present, you can specify a platform which you want to deliver explicitly. + +The available options: + +- 'ios' +- 'appletvos' +- 'osx' + + +### Non-Localized + +##### copyright +The up to date copyright information. +```ruby-skip-tests +copyright("#{Time.now.year} Felix Krause") +``` + +##### primary_category +The english name of the category you want to set (e.g. `Business`, `Books`) + +See [#reference](#reference) for a list of available categories + +##### secondary_category +The english name of the secondary category you want to set + +##### primary_first_sub_category +The english name of the primary first sub category you want to set + +##### primary_second_sub_category +The english name of the primary second sub category you want to set + +##### secondary_first_sub_category +The english name of the secondary first sub category you want to set + +##### secondary_second_sub_category +The english name of the secondary second sub category you want to set +
+ +# Submit Build +_deliver_ allows you to promote an existing build to production. Below are examples to select a previously uploaded build and submit it for review. + +```no-highlight +fastlane deliver submit_build --build_number 830 +``` + +### Submit build in a `Fastfile` + +```ruby +lane :submit_review do + deliver( + build_number: '830', + submit_for_review: true, + automatic_release: true, + force: true, # Skip HTMl report verification + skip_metadata: true, + skip_screenshots: true, + skip_binary_upload: true + ) +end +``` + +Omit `build_number` to let _fastlane_ automatically select the latest build number for the current version being edited for release from App Store Connect. + +### Compliance and IDFA settings + +Use the `submission_information` parameter for additional submission specifiers, including compliance and IDFA settings. Look at the Spaceship's [`app_submission.rb`](https://github.com/fastlane/fastlane/blob/master/spaceship/lib/spaceship/tunes/app_submission.rb) file for options. See [this example](https://github.com/artsy/eigen/blob/faa02e2746194d8d7c11899474de9c517435eca4/fastlane/Fastfile#L131-L149). + +```no-highlight +fastlane deliver submit_build --build_number 830 --submission_information "{\"export_compliance_uses_encryption\": false, \"add_id_info_uses_idfa\": false }" +``` + +### App Privacy Details + +Starting on December 8, 2020, Apple announced that developers are required to provide app privacy details that will help users understand an app's privacy practies. _deliver_ does not allow for updating of this information but this can be done with the _upload_app_privacy_details_to_app_store_ action. More information on [Uploading App Privacy Details](https://docs.fastlane.tools/uploading-app-privacy-details) + +# Credentials + +A detailed description about how your credentials are handled is available in a [credentials_manager](https://github.com/fastlane/fastlane/tree/master/credentials_manager). + +### How does this thing even work? Is magic involved? 🎩 + +Your password will be stored in the macOS keychain, but can also be passed using environment variables. (More information available on [CredentialsManager](https://github.com/fastlane/fastlane/tree/master/credentials_manager)) + +Before actually uploading anything to iTunes, _deliver_ will generate a HTML summary of the collected data. + +_deliver_ uses the following techniques under the hood: + +- The iTMSTransporter tool is used to upload the binary to App Store Connect. iTMSTransporter is a command line tool provided by Apple. +- For all metadata related actions _deliver_ uses [_spaceship_](https://github.com/fastlane/fastlane/tree/master/spaceship) + +# Tips + +## Available language codes +```no-highlight +<%= FastlaneCore::Languages::ALL_LANGUAGES.join(', ') %> +``` + +## Available Metadata Folder Options + +_deliver_ allows for metadata to be set through `.txt` files in the metadata folder. This metadata folder location is defaulted to `./fastlane/metadata` but can be overridden through the `metadata_path` parameter. Below are all allowed metadata options. + +<%- require 'deliver' -%> + +### Non-Localized Metadata + +Key | Editable While Live | Directory | Filename +----|--------|--------|-------- +<%- (Deliver::UploadMetadata::NON_LOCALISED_VERSION_VALUES.keys + Deliver::UploadMetadata::NON_LOCALISED_APP_VALUES.keys).each do |value| -%> + `<%= value %>` | <%= Deliver::UploadMetadata::NON_LOCALISED_LIVE_VALUES.include?(value) ? 'Yes' : 'No' %> | `` | `<%= value %>.txt` +<%- end %> + +### Localized Metadata + +Key | Editable While Live | Directory | Filename +----|--------|--------|-------- +<%- (Deliver::UploadMetadata::LOCALISED_APP_VALUES.keys + Deliver::UploadMetadata::LOCALISED_VERSION_VALUES.keys).each do |value| -%> + `<%= value %>` | <%= Deliver::UploadMetadata::LOCALISED_LIVE_VALUES.include?(value) ? 'Yes' : 'No' %> | `//` | `<%= value %>.txt` +<%- end %> + +### Review Information Metadata + +Key | Editable While Live | Directory | Filename | Deprecated Filename +----|--------|--------|--------|-------- +<%- Deliver::UploadMetadata::REVIEW_INFORMATION_VALUES_LEGACY.each do |key, value| -%> + `<%= value %>` | Yes | `/<%= Deliver::UploadMetadata::REVIEW_INFORMATION_DIR %>` | `<%= value %>.txt` | `<%= key %>.txt` +<%- end %> + +## Reference + +
+View all available categories, etc. + +### Available Categories + +- `FOOD_AND_DRINK` +- `BUSINESS` +- `EDUCATION` +- `SOCIAL_NETWORKING` +- `BOOKS` +- `SPORTS` +- `FINANCE` +- `REFERENCE` +- `GRAPHICS_AND_DESIGN` +- `DEVELOPER_TOOLS` +- `HEALTH_AND_FITNESS` +- `MUSIC` +- `WEATHER` +- `TRAVEL` +- `ENTERTAINMENT` +- `STICKERS` +- `GAMES` +- `LIFESTYLE` +- `MEDICAL` +- `MAGAZINES_AND_NEWSPAPERS` +- `UTILITIES` +- `SHOPPING` +- `PRODUCTIVITY` +- `NEWS` +- `PHOTO_AND_VIDEO` +- `NAVIGATION` + +### Available Game Subcategories + +- `MZGenre.Action` +- `MZGenre.Adventure` +- `MZGenre.Arcade` +- `MZGenre.Board` +- `MZGenre.Card` +- `MZGenre.Casino` +- `MZGenre.Dice` +- `MZGenre.Educational` +- `MZGenre.Family` +- `MZGenre.Music` +- `MZGenre.Puzzle` +- `MZGenre.Racing` +- `MZGenre.RolePlaying` +- `MZGenre.Simulation` +- `MZGenre.Sports` +- `MZGenre.Strategy` +- `MZGenre.Trivia` +- `MZGenre.Word` + +- `GAMES_SPORTS` +- `GAMES_WORD` +- `GAMES_MUSIC` +- `GAMES_ADVENTURE` +- `GAMES_ACTION` +- `GAMES_ROLE_PLAYING` +- `GAMES_CASUAL` +- `GAMES_BOARD` +- `GAMES_TRIVIA` +- `GAMES_CARD` +- `GAMES_PUZZLE` +- `GAMES_CASINO` +- `GAMES_STRATEGY` +- `GAMES_SIMULATION` +- `GAMES_RACING` +- `GAMES_FAMILY` + +### Available Stickers Subcategories + +- `STICKERS_PLACES_AND_OBJECTS` +- `STICKERS_EMOJI_AND_EXPRESSIONS` +- `STICKERS_CELEBRATIONS` +- `STICKERS_CELEBRITIES` +- `STICKERS_MOVIES_AND_TV` +- `STICKERS_SPORTS_AND_ACTIVITIES` +- `STICKERS_EATING_AND_DRINKING` +- `STICKERS_CHARACTERS` +- `STICKERS_ANIMALS` +- `STICKERS_FASHION` +- `STICKERS_ART` +- `STICKERS_GAMING` +- `STICKERS_KIDS_AND_FAMILY` +- `STICKERS_PEOPLE` +- `STICKERS_MUSIC` + +### Available age rating groups + +#### Non Boolean + +**Values** + +- 0: None (Legacy value, use `NONE` instead) +- 1: Infrequent/Mild (Legacy value, use `INFREQUENT_OR_MILD` instead) +- 2: Frequent/Intense (Legacy value, use `FREQUENT_OR_INTENSE`instead) + +- `NONE` +- `INFREQUENT_OR_MILD` +- `FREQUENT_OR_INTENSE` + +**Keys** + +- 'alcoholTobaccoOrDrugUseOrReferences' +- 'contests' +- 'gamblingSimulated' +- 'medicalOrTreatmentInformation' +- 'profanityOrCrudeHumor' + +- 'sexualContentGraphicAndNudity' +- 'sexualContentOrNudity' +- 'horrorOrFearThemes' +- 'matureOrSuggestiveThemes' +- 'unrestrictedWebAccess' +- 'violenceCartoonOrFantasy' +- 'violenceRealisticProlongedGraphicOrSadistic' +- 'violenceRealistic' +- 'kidsAgeBand' + +#### Boolean + +**Keys** + +- `gambling` +- 'seventeenPlus' +- `unrestrictedWebAccess` + +#### Kids Age + +**Values** + +- `FIVE_AND_UNDER` +- `SIX_TO_EIGHT` +- `NINE_TO_ELEVEN` +- `null` + +**Keys** + +- `kidsAgeBand` + +
+ +
+ +## Default values + +Deliver has a special `default` language code which allows you to provide values that are not localized, and which will be used as defaults when you don’t provide a specific localized value. + +In order to use `default`, you will need to tell _deliver_ which languages your app uses. You can do this in either of two ways: + +1. Create the folders named with the language in the metadata folder (i.e. fastlane/metadata/en-US or fastlane/metadata/de-DE) +2. Add the following to your `Deliverfile` `languages(['en-US','de-DE'])` + +You can use this either in json within your `Deliverfile` and/or as folders in your metadata folder. _deliver_ will take the union of both language sets from the `Deliverfile` and from the metadata folder and create on single set of languages which will be enabled. + +Imagine that you have localized data for the following language codes: ```en-US, de-DE, el, it``` + +You can set the following in your `Deliverfile` + +```ruby-skip-tests +release_notes({ + 'default' => "Shiny and new", + 'de-DE' => "glaenzend und neu" +}) +``` + +Deliver will use "Shiny and new" for en-US, el and it. + +It will use "glaenzend und neu" for de-DE. + +You can do the same with folders + +``` + default + keywords.txt + marketing_url.txt + name.txt + privacy_url.txt + support_url.txt + release_notes.txt + en-US + description.txt + de-DE + description.txt + el + description.txt + it + description.txt +``` + +In this case, default values for keywords, urls, name and release notes are used in all localizations, but each language has a fully localized description + +## Uploading screenshots for "iPad Pro (12.9-inch) (3rd generation)" + +[Starting March 20, 2019 Apple's App Store](https://developer.apple.com/news/?id=03202019a) requires 12.9-inch iPad Pro (3rd generation) screenshots additionally to the iPad Pro 2nd generation [screenshots](https://help.apple.com/app-store-connect/#/devd274dd925). As fastlane historically uses the screenshot dimensions to determine the "display family" of a screenshot, this poses a problem as both use the same dimensions and are recognized as the same device family. + +To solve this a screenshot of a 12.9-inch iPad Pro (3rd generation) must contain either the string `iPad Pro (12.9-inch) (3rd generation)`, `IPAD_PRO_3GEN_129`, or `ipadPro129` (Apple's internal naming of the display family for the 3rd generation iPad Pro) in its filename to be assigned the correct display family and to be uploaded to the correct screenshot slot in your app's metadata. + +## Automatically create screenshots + +If you want to integrate _deliver_ with [_snapshot_](https://docs.fastlane.tools/actions/snapshot/), check out [_fastlane_](https://fastlane.tools)! + +## Jenkins integration +Detailed instructions about how to set up _deliver_ and _fastlane_ in `Jenkins` can be found in the [fastlane README](https://docs.fastlane.tools/best-practices/continuous-integration/#jenkins-integration). + +## Firewall Issues + +_deliver_ uses the iTunes Transporter to upload metadata and binaries. In case you are behind a firewall, you can specify a different transporter protocol using + +```no-highlight +DELIVER_ITMSTRANSPORTER_ADDITIONAL_UPLOAD_PARAMETERS="-t DAV" fastlane deliver +``` + +## HTTP Proxy +iTunes Transporter is a Java application bundled with Xcode. In addition to utilizing the `DELIVER_ITMSTRANSPORTER_ADDITIONAL_UPLOAD_PARAMETERS="-t DAV"`, you need to configure the transporter application to use the proxy independently from the system proxy or any environment proxy settings. You can find the configuration file within Xcode: + +**for Xcode11 and later** + +```no-highlight +TOOLS_PATH=$( xcode-select -p ) +REL_PATH='../SharedFrameworks/ContentDeliveryServices.framework/Versions/A/itms/java/lib/net.properties' +echo "$TOOLS_PATH/$REL_PATH" +``` + +**for Xcode10 or earlier** + +```no-highlight +TOOLS_PATH=$( xcode-select -p ) +REL_PATH='../Applications/Application Loader.app/Contents/itms/java/lib/net.properties' +echo "$TOOLS_PATH/$REL_PATH" +``` + +Add necessary proxy configuration values to the net.properties according to [Java Proxy Configuration](http://docs.oracle.com/javase/8/docs/technotes/guides/net/proxies.html). + +As an alternative to editing the properties files, proxy configuration can be specified on the command line directly: + +```no-highlight +DELIVER_ITMSTRANSPORTER_ADDITIONAL_UPLOAD_PARAMETERS="-t DAV -Dhttp.proxyHost=myproxy.com -Dhttp.proxyPort=8080" +``` + +## Limit +App Store Connect has a limit of 150 binary uploads per day. + +## Editing the `Deliverfile` +Change syntax highlighting to *Ruby*. + +## Provider Short Name +If you are on multiple App Store Connect teams, _deliver_ needs a provider short name to know where to upload your binary. _deliver_ will try to use the long name of the selected team to detect the provider short name. To override the detected value with an explicit one, use the `itc_provider` option. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_play_store.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_play_store.md new file mode 100644 index 0000000..c7e57ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_play_store.md @@ -0,0 +1,204 @@ +

+ +

+ +###### Command line tool for updating Android apps and their metadata on the Google Play Store + +_supply_ uploads app metadata, screenshots, binaries, and app bundles to Google Play. You can also select tracks for builds and promote builds to production. + +------- + +

+ Features • + Setup • + Quick Start • + Commands • + Uploading an APK • + Uploading an AAB • + Images +

+ +------- + +## Features +- Update existing Android applications on Google Play via the command line +- Upload new builds (APKs and AABs) +- Retrieve and edit metadata, such as title and description, for multiple languages +- Upload the app icon, promo graphics and screenshots for multiple languages +- Have a local copy of the metadata in your git repository +- Retrieve version code numbers from existing Google Play tracks + + +## Setup + +Setup consists of setting up your Google Developers Service Account + +{!docs/includes/google-credentials.md!} + +### Migrating Google credential format (from .p12 key file to .json) + +In previous versions of supply, credentials to your Play Console were stored as `.p12` files. Since version 0.4.0, supply now supports the recommended `.json` key Service Account credential files. If you wish to upgrade: + +- follow the Setup procedure once again to make sure you create the appropriate JSON file +- update your fastlane configuration or your command line invocation to use the appropriate argument if necessary. + Note that you don't need to take note nor pass the `issuer` argument anymore. + + +The previous p12 configuration is still currently supported. + + +## Quick Start + +> Before using _supply_ to connect to Google Play Store, you'll need to set up your app manually first by uploading at least one build to Google Play Store. See [fastlane/fastlane#14686](https://github.com/fastlane/fastlane/issues/14686) for more info. + +- `cd [your_project_folder]` +- `fastlane supply init` +- Make changes to the downloaded metadata, add images, screenshots and/or an APK +- `fastlane supply` + +## Available Commands + +- `fastlane supply`: update an app with metadata, a build, images and screenshots +- `fastlane supply init`: download metadata for an existing app to a local directory +- `fastlane action supply`: show information on available commands, arguments and environment variables + +You can either run _supply_ on its own and use it interactively, or you can pass arguments or specify environment variables for all the options to skip the questions. + +## Uploading an APK + +To upload a new binary to Google Play, simply run + +```no-highlight +fastlane supply --apk path/to/app.apk +``` + +This will also upload app metadata if you previously ran `fastlane supply init`. + +To gradually roll out a new build use + +```no-highlight +fastlane supply --apk path/app.apk --track beta --rollout 0.5 +``` + +To set the in-app update priority level for a release, set a valid update priority (an integer value from 0 to 5) using option `in_app_update_priority` + +```no-highlight +fastlane supply --apk path/app.apk --track beta --in_app_update_priority 3 +``` + +### Expansion files (`.obb`) + +Expansion files (obbs) found under the same directory as your APK will also be uploaded together with your APK as long as: + +- they are identified as type 'main' or 'patch' (by containing 'main' or 'patch' in their file name) +- you have at most one of each type + +If you only want to update the APK, but keep the expansion files from the previous version on Google Play use + +```no-highlight +fastlane supply --apk path/app.apk --obb_main_references_version 21 --obb_main_file_size 666154207 +``` + +or + +```no-highlight +fastlane supply --apk path/app.apk --obb_patch_references_version 21 --obb_patch_file_size 666154207 +``` + +## Uploading an AAB + +To upload a new [Android application bundle](https://developer.android.com/guide/app-bundle/) to Google Play, simply run + +```no-highlight +fastlane supply --aab path/to/app.aab +``` + +This will also upload app metadata if you previously ran `fastlane supply init`. + +To gradually roll out a new build use + +```no-highlight +fastlane supply --aab path/app.aab --track beta --rollout 0.5 +``` + +To set the in-app update priority level for a release, set a valid update priority (an integer value from 0 to 5) using option `in_app_update_priority` + +```no-highlight +fastlane supply --aab path/app.aab --track beta --in_app_update_priority 3 +``` + +## Images and Screenshots + +After running `fastlane supply init`, you will have a metadata directory. This directory contains one or more locale directories (e.g. en-US, en-GB, etc.), and inside this directory are text files such as `title.txt` and `short_description.txt`. + +Inside of a given locale directory is a folder called `images`. Here you can supply images with the following file names (extension can be png, jpg or jpeg): + +- `featureGraphic` +- `icon` +- `promoGraphic` +- `tvBanner` + +You can also supply screenshots by creating directories within the `images` directory with the following names, containing PNGs or JPEGs: + +- `phoneScreenshots/` +- `sevenInchScreenshots/` (7-inch tablets) +- `tenInchScreenshots/` (10-inch tablets) +- `tvScreenshots/` +- `wearScreenshots/` + +You may name images anything you like, but screenshots will appear in the Play Store in alphanumerical filename order. +Note that these will replace the current images and screenshots on the play store listing, not add to them. + +## Changelogs (What's new) + +You can add changelog files under the `changelogs/` directory for each locale. The filename should exactly match the [version code](https://developer.android.com/studio/publish/versioning#appversioning) of the APK that it represents. You can also provide default notes that will be used if no files match the version code by adding a `default.txt` file. `fastlane supply init` will populate changelog files from existing data on Google Play if no `metadata/` directory exists when it is run. + +```no-highlight +└── fastlane + └── metadata + └── android + ├── en-US + │ └── changelogs + │ ├── default.txt + │ ├── 100000.txt + │ └── 100100.txt + └── fr-FR + └── changelogs + ├── default.txt + └── 100100.txt +``` + +## Track Promotion + +A common Play publishing scenario might involve uploading an APK version to a test track, testing it, and finally promoting that version to production. + +This can be done using the `--track_promote_to` parameter. The `--track_promote_to` parameter works with the `--track` parameter to command the Play API to promote existing Play track APK version(s) (those active on the track identified by the `--track` param value) to a new track (`--track_promote_to` value). + +## Retrieve Track Release Names & Version Codes + +Before performing a new APK upload you may want to check existing track version codes or release names, or you may simply want to provide an informational lane that displays the currently promoted version codes or release name for the production track. You can use the `google_play_track_version_codes` action to retrieve existing version codes for a package and track. You can use the `google_play_track_release_names` action to retrieve existing release names for a package and track. +For more information, see the `fastlane action google_play_track_version_codes` and `fastlane action google_play_track_release_names` help output. + +## Migration from AndroidPublisherV2 to AndroidPublisherV3 in _fastlane_ 2.135.0 + +### New Options +- `:version_name` + - Used when uploading with `:apk_path`, `:apk_paths`, `:aab_path`, and `:aab_paths` + - Can be any string such (example: "October Release" or "Awesome New Feature") + - Defaults to the version name in app/build.gradle or AndroidManifest.xml +- `:release_status` + - Used when uploading with `:apk_path`, `:apk_paths`, `:aab_path`, and `:aab_paths` + - Can set as "draft" to complete the release at some other time + - Defaults to "completed" +- `:version_code` + - Used for `:update_rollout`, `:track_promote_to`, and uploading of meta data and screenshots +- `:skip_upload_changelogs` + - Changelogs were previously included with the `:skip_upload_metadata` but is now its own option + +### Deprecated Options +- `:check_superseded_tracks` + - Google Play will automatically remove releases that are superseded now +- `:deactivate_on_promote` + - Google Play will automatically deactivate a release from its previous track on promote + +: diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_testflight.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_testflight.md new file mode 100644 index 0000000..17073ed --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/docs/upload_to_testflight.md @@ -0,0 +1,260 @@ +

+ +

+ +###### The best way to manage your TestFlight testers and builds from your terminal + +Pilot makes it easier to manage your app on Apple’s TestFlight. You can: + +- Upload & distribute builds +- Add & remove testers +- Retrieve information about testers & devices +- Import/export all available testers + +_pilot_ uses [spaceship.airforce](https://spaceship.airforce) to interact with App Store Connect 🚀 + +------- + +

+ Usage • + Tips +

+ +------- + +
pilot is part of fastlane: The easiest way to automate beta deployments and releases for your iOS and Android apps.
+ +# Usage + +For all commands, you can either use an [API Key](#app-store-connect-api-key) or your [Apple ID](#apple-id). + +### App Store Connect API Key + +The App Store Connect API Key is the preferred authentication method (if you are able to use it). + +- Uses official App Store Connect API +- No need for 2FA +- Better performance over Apple ID + +Specify the API key using `--api_key_path ./path/to/api_key_info.json` or `--api_key "{\"key_id\": \"D83848D23\", \"issuer_id\": \"227b0bbf-ada8-458c-9d62-3d8022b7d07f\", \"key_filepath\": \"D83848D23.p8\"}"` + +Go to [Using App Store Connect API](/app-store-connect-api) for information on obtaining an API key, the _fastlane_ `api_key_info.json` format, and other API key usage. + +### Apple ID + +Specify the Apple ID to use using `-u felix@krausefx.com`. If you execute _pilot_ in a project already using [_fastlane_](https://fastlane.tools) the username and app identifier will automatically be determined. + +## Uploading builds + +To upload a new build, just run + +```no-highlight +fastlane pilot upload +``` + +This will automatically look for an `ipa` in your current directory and tries to fetch the login credentials from your [fastlane setup](https://fastlane.tools). + +You'll be asked for any missing information. Additionally, you can pass all kinds of parameters: + +```no-highlight +fastlane action pilot +``` + +You can pass a changelog using + +```no-highlight +fastlane pilot upload --changelog "Something that is new here" +``` + +You can also skip the submission of the binary, which means, the `ipa` file will only be uploaded and not distributed to testers: + +```no-highlight +fastlane pilot upload --skip_submission +``` + +_pilot_ does all kinds of magic for you: + +- Automatically detects the bundle identifier from your `ipa` file +- Automatically fetch the AppID of your app based on the bundle identifier + +_pilot_ uses [_spaceship_](https://spaceship.airforce) to submit the build metadata and the iTunes Transporter to upload the binary. Because iTunes Transporter's upload capability is only supported on OS X, `pilot upload` does not work on Linux, as described [in this issue](https://github.com/fastlane/fastlane/issues/5789) + +## List builds + +To list all builds for specific application use + +```no-highlight +fastlane pilot builds +``` + +The result lists all active builds and processing builds: + +```no-highlight ++-----------+---------+----------+ +| Great App Builds | ++-----------+---------+----------+ +| Version # | Build # | Installs | ++-----------+---------+----------+ +| 0.9.13 | 1 | 0 | +| 0.9.13 | 2 | 0 | +| 0.9.20 | 3 | 0 | +| 0.9.20 | 4 | 3 | ++-----------+---------+----------+ +``` + +## Managing beta testers + +### List of Testers + +This command will list all your testers, both internal and external. + +```no-highlight +fastlane pilot list +``` + +The output will look like this: + +```no-highlight ++--------+--------+--------------------------+-----------+ +| Internal Testers | ++--------+--------+--------------------------+-----------+ +| First | Last | Email | # Devices | ++--------+--------+--------------------------+-----------+ +| Felix | Krause | felix@krausefx.com | 2 | ++--------+--------+--------------------------+-----------+ + ++-----------+---------+----------------------------+-----------+ +| External Testers | ++-----------+---------+----------------------------+-----------+ +| First | Last | Email | # Devices | ++-----------+---------+----------------------------+-----------+ +| Max | Manfred | email@email.com | 0 | +| Detlef | MÃŧller | detlef@krausefx.com | 1 | ++-----------+---------+----------------------------+-----------+ +``` + +### Add a new tester + +To add a new tester to your App Store Connect account and to associate it to at least one testing group of your app, use the `pilot add` command. This will create a new tester (if necessary) or add an existing tester to the app to test. + +```no-highlight +fastlane pilot add email@invite.com -g group-1,group-2 +``` + +Additionally you can specify the app identifier (if necessary): + +```no-highlight +fastlane pilot add email@email.com -a com.krausefx.app -g group-1,group-2 +``` + +### Find a tester + +To find a specific tester use + +```no-highlight +fastlane pilot find felix@krausefx.com +``` + +The resulting output will look like this: + +```no-highlight ++---------------------+---------------------+ +| felix@krausefx.com | ++---------------------+---------------------+ +| First name | Felix | +| Last name | Krause | +| Email | felix@krausefx.com | +| Latest Version | 0.9.14 (23 | +| Latest Install Date | 03/28/15 19:00 | +| 2 Devices | â€ĸ iPhone 6, iOS 8.3 | +| | â€ĸ iPhone 5, iOS 7.0 | ++---------------------+---------------------+ +``` + +### Remove a tester + +This command will remove beta tester from app (from all internal and external groups) + +```no-highlight +fastlane pilot remove felix@krausefx.com +``` + +You can also use `groups` option to remove the tester from the groups specified: + +```no-highlight +fastlane pilot remove felix@krausefx.com -g group-1,group-2 +``` + +### Export testers + +To export all external testers to a CSV file. Useful if you need to import tester info to another system or a new account. + +```no-highlight +fastlane pilot export +``` + +### Import testers + +Add external testers from a CSV file. Create a file (ex: `testers.csv`) and fill it with the following format: + +```no-highlight +John,Appleseed,appleseed_john@mac.com,group-1;group-2 +``` + +```no-highlight +fastlane pilot import +``` + +You can also specify the directory using + +```no-highlight +fastlane pilot export -c ~/Desktop/testers.csv +fastlane pilot import -c ~/Desktop/testers.csv +``` + +# Tips + +## Debug information + +If you run into any issues you can use the `verbose` mode to get a more detailed output: + +```no-highlight +fastlane pilot upload --verbose +``` + +## Firewall Issues + +_pilot_ uses the iTunes [Transporter](https://help.apple.com/itc/transporteruserguide/#/apdATD1E1288-D1E1A1303-D1E1288A1126) to upload metadata and binaries. In case you are behind a firewall, you can specify a different transporter protocol from the command line using + +```no-highlight +DELIVER_ITMSTRANSPORTER_ADDITIONAL_UPLOAD_PARAMETERS="-t DAV" pilot ... +``` + +If you are using _pilot_ via the [fastlane action](https://docs.fastlane.tools/actions#pilot), add the following to your `Fastfile` + +```no-highlight +ENV["DELIVER_ITMSTRANSPORTER_ADDITIONAL_UPLOAD_PARAMETERS"] = "-t DAV" +pilot... +``` + +Note, however, that Apple recommends you don’t specify the `-t transport` and instead allow Transporter to use automatic transport discovery to determine the best transport mode for your packages. For this reason, if the `t` option is passed, we will raise a warning. + +Also note that `-t` is not the only additional parameter that can be used. The string specified in the `DELIVER_ITMSTRANSPORTER_ADDITIONAL_UPLOAD_PARAMETERS` environment variable will be forwarded to Transporter. For all the available options, check [Apple's Transporter User Guide](https://help.apple.com/itc/transporteruserguide/#/apdATD1E1288-D1E1A1303-D1E1288A1126). + +## Credentials Issues + +If your password contains special characters, _pilot_ may throw a confusing error saying your "Your Apple ID or password was entered incorrectly". The easiest way to fix this error is to change your password to something that **does not** contains special characters. + +## How is my password stored? + +_pilot_ uses the [CredentialsManager](https://github.com/fastlane/fastlane/tree/master/credentials_manager) from _fastlane_. + +## Provider Short Name +If you are on multiple App Store Connect teams, iTunes Transporter may need a provider short name to know where to upload your binary. _pilot_ will try to use the long name of the selected team to detect the provider short name. To override the detected value with an explicit one, use the `itc_provider` option. + +## Use an Application Specific Password to upload + +_pilot_/`upload_to_testflight` can use an [Application Specific Password via the `FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD` environment variable](https://docs.fastlane.tools/best-practices/continuous-integration/#application-specific-passwords) to upload a binary if both the `skip_waiting_for_build_processing` and `apple_id` options are set. (If any of those are not set, it will use the normal Apple login process that might require 2FA authentication.) + +## Role for App Store Connect User +_pilot_/`upload_to_testflight` updates build information and testers after the build has finished processing. App Store Connect requires the "App Manager" or "Admin" role for your Apple account to update this information. The "Developer" role will allow builds to be uploaded but _will not_ allow updating of build information and testers. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/dotgpg_environment.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/dotgpg_environment.rb new file mode 100644 index 0000000..e107e2e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/dotgpg_environment.rb @@ -0,0 +1,57 @@ +module Fastlane + module Actions + module SharedValues + end + + class DotgpgEnvironmentAction < Action + def self.run(options) + Actions.verify_gem!('dotgpg') + require 'dotgpg/environment' + + UI.message("Reading secrets from #{options[:dotgpg_file]}") + Dotgpg::Environment.new(options[:dotgpg_file]).apply + end + + def self.description + "Reads in production secrets set in a dotgpg file and puts them in ENV" + end + + def self.details + "More information about dotgpg can be found at [https://github.com/ConradIrwin/dotgpg](https://github.com/ConradIrwin/dotgpg)." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :dotgpg_file, + env_name: "DOTGPG_FILE", + description: "Path to your gpg file", + code_gen_sensitive: true, + default_value: Dir["dotgpg/*.gpg"].last, + default_value_dynamic: true, + optional: false, + verify_block: proc do |value| + UI.user_error!("Dotgpg file '#{File.expand_path(value)}' not found") unless File.exist?(value) + end) + ] + end + + def self.authors + ["simonlevy5"] + end + + def self.example_code + [ + "dotgpg_environment(dotgpg_file: './path/to/gpgfile')" + ] + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download.rb new file mode 100644 index 0000000..b572125 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download.rb @@ -0,0 +1,76 @@ +module Fastlane + module Actions + module SharedValues + DOWNLOAD_CONTENT = :DOWNLOAD_CONTENT + end + + class DownloadAction < Action + def self.run(params) + require 'net/http' + + begin + result = Net::HTTP.get(URI(params[:url])) + begin + result = JSON.parse(result) # try to parse and see if it's valid JSON data + rescue + # never mind, using standard text data instead + end + Actions.lane_context[SharedValues::DOWNLOAD_CONTENT] = result + rescue => ex + UI.user_error!("Error fetching remote file: #{ex}") + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Download a file from a remote server (e.g. JSON file)" + end + + def self.details + [ + "Specify the URL to download and get the content as a return value.", + "Automatically parses JSON into a Ruby data structure.", + "For more advanced networking code, use the Ruby functions instead: [http://docs.ruby-lang.org/en/2.0.0/Net/HTTP.html](http://docs.ruby-lang.org/en/2.0.0/Net/HTTP.html)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :url, + env_name: "FL_DOWNLOAD_URL", + description: "The URL that should be downloaded", + verify_block: proc do |value| + UI.important("The URL doesn't start with http or https") unless value.start_with?("http") + end) + ] + end + + def self.output + [ + ['DOWNLOAD_CONTENT', 'The content of the file we just downloaded'] + ] + end + + def self.example_code + [ + 'data = download(url: "https://host.com/api.json")' + ] + end + + def self.category + :misc + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_app_privacy_details_from_app_store.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_app_privacy_details_from_app_store.rb new file mode 100644 index 0000000..ef78214 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_app_privacy_details_from_app_store.rb @@ -0,0 +1,142 @@ +module Fastlane + module Actions + class DownloadAppPrivacyDetailsFromAppStoreAction < Action + DEFAULT_PATH = Fastlane::Helper.fastlane_enabled_folder_path + DEFAULT_FILE_NAME = "app_privacy_details.json" + + def self.run(params) + require 'spaceship' + + # Prompts select team if multiple teams and none specified + UI.message("Login to App Store Connect (#{params[:username]})") + Spaceship::ConnectAPI.login(params[:username], use_portal: false, use_tunes: true, tunes_team_id: params[:team_id], team_name: params[:team_name]) + UI.message("Login successful") + + # Get App + app = Spaceship::ConnectAPI::App.find(params[:app_identifier]) + unless app + UI.user_error!("Could not find app with bundle identifier '#{params[:app_identifier]}' on account #{params[:username]}") + end + + # Download usages and return a config + raw_usages = download_app_data_usages(params, app) + + usages_config = [] + if raw_usages.count == 1 && raw_usages.first.data_protection.id == Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_NOT_COLLECTED + usages_config << { + "data_protections" => [Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_NOT_COLLECTED] + } + else + grouped_usages = raw_usages.group_by do |usage| + usage.category.id + end + grouped_usages.sort_by(&:first).each do |key, usage_group| + purposes = usage_group.map(&:purpose).compact || [] + data_protections = usage_group.map(&:data_protection).compact || [] + usages_config << { + "category" => key, + "purposes" => purposes.map(&:id).sort.uniq, + "data_protections" => data_protections.map(&:id).sort.uniq + } + end + end + + # Save to JSON file + json = JSON.pretty_generate(usages_config) + path = output_path(params) + + UI.message("Writing file to #{path}") + File.write(path, json) + end + + def self.output_path(params) + path = params[:output_json_path] + return File.absolute_path(path) + end + + def self.download_app_data_usages(params, app) + UI.message("Downloading App Data Usage") + + # Delete all existing usages for new ones + Spaceship::ConnectAPI::AppDataUsage.all(app_id: app.id, includes: "category,grouping,purpose,dataProtection", limit: 500) + end + + def self.description + "Download App Privacy Details from an app in App Store Connect" + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FASTLANE_USER", + description: "Your Apple ID Username for App Store Connect", + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :app_identifier, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_APP_IDENTIFIER", + description: "The bundle identifier of your app", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_id, + env_name: "FASTLANE_ITC_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # as we also allow integers, which we convert to strings anyway + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_name, + env_name: "FASTLANE_ITC_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true), + + # JSON paths + FastlaneCore::ConfigItem.new(key: :output_json_path, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_OUTPUT_JSON_PATH", + description: "Path to the app usage data JSON file generated by interactive questions", + conflicting_options: [:skip_json_file_saving], + default_value: File.join(DEFAULT_PATH, DEFAULT_FILE_NAME)) + ] + end + + def self.author + "igor-makarov" + end + + def self.is_supported?(platform) + [:ios, :mac, :tvos].include?(platform) + end + + def self.details + "Download App Privacy Details from an app in App Store Connect. For more detail information, view https://docs.fastlane.tools/uploading-app-privacy-details" + end + + def self.example_code + [ + 'download_app_privacy_details_from_app_store( + username: "your@email.com", + team_name: "Your Team", + app_identifier: "com.your.bundle" + )', + 'download_app_privacy_details_from_app_store( + username: "your@email.com", + team_name: "Your Team", + app_identifier: "com.your.bundle", + output_json_path: "fastlane/app_data_usages.json" + )' + ] + end + + def self.category + :production + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_dsyms.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_dsyms.rb new file mode 100644 index 0000000..a99ed64 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_dsyms.rb @@ -0,0 +1,373 @@ +module Fastlane + module Actions + module SharedValues + DSYM_PATHS = :DSYM_PATHS + DSYM_LATEST_UPLOADED_DATE = :DSYM_LATEST_UPLOADED_DATE + end + class DownloadDsymsAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + require 'openssl' + require 'spaceship' + require 'net/http' + require 'date' + + if (api_token = Spaceship::ConnectAPI::Token.from(hash: params[:api_key], filepath: params[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + # Team selection passed though FASTLANE_ITC_TEAM_ID and FASTLANE_ITC_TEAM_NAME environment variables + # Prompts select team if multiple teams and none specified + UI.message("Login to App Store Connect (#{params[:username]})") + Spaceship::ConnectAPI.login(params[:username], use_portal: false, use_tunes: true) + UI.message("Login successful") + end + + # Get App + app = Spaceship::ConnectAPI::App.find(params[:app_identifier]) + unless app + UI.user_error!("Could not find app with bundle identifier '#{params[:app_identifier]}' on account #{params[:username]}") + end + + # Process options + version = params[:version] + build_number = params[:build_number].to_s unless params[:build_number].nil? + itc_platform = params[:platform] + output_directory = params[:output_directory] + wait_for_dsym_processing = params[:wait_for_dsym_processing] + wait_timeout = params[:wait_timeout] + min_version = Gem::Version.new(params[:min_version]) if params[:min_version] + after_uploaded_date = DateTime.parse(params[:after_uploaded_date]) unless params[:after_uploaded_date].nil? + + platform = Spaceship::ConnectAPI::Platform.map(itc_platform) + + # Set version if it is latest + if version == 'latest' + # Try to grab the edit version first, else fallback to live version + UI.message("Looking for latest build...") + latest_build = get_latest_build!(app_id: app.id, platform: platform) + version = latest_build.app_version + build_number = latest_build.version + elsif version == 'live' + UI.message("Looking for live version...") + live_version = app.get_live_app_store_version(platform: platform) + + UI.user_error!("Could not find live version for your app, please try setting 'latest' or a specific version") if live_version.nil? + + # No need to search for candidates, because released App Store version should only have one build + version = live_version.version_string + build_number = live_version.build.version + end + + # Make sure output_directory has a slash on the end + if output_directory && !output_directory.end_with?('/') + output_directory += '/' + end + + # Write a nice message + message = [] + message << "Looking for dSYM files for '#{params[:app_identifier]}' on platform #{platform}" + message << "v#{version}" if version + message << "(#{build_number})" if build_number + UI.message(message.join(" ")) + + filter = { app: app.id } + filter["preReleaseVersion.platform"] = platform + filter["preReleaseVersion.version"] = version if version + filter["version"] = build_number if build_number + build_resp = Spaceship::ConnectAPI.get_builds(filter: filter, sort: "-uploadedDate", includes: "preReleaseVersion,buildBundles") + + build_resp.all_pages_each do |build| + asc_app_version = build.app_version + asc_build_number = build.version + uploaded_date = DateTime.parse(build.uploaded_date) + + message = [] + message << "Found train (version): #{asc_app_version}" + message << ", comparing to supplied version: #{version}" if version + UI.verbose(message.join(" ")) + + if version && version != asc_app_version + UI.verbose("Version #{version} doesn't match: #{asc_app_version}") + next + end + + if min_version && min_version > Gem::Version.new(asc_app_version) + UI.verbose("Min version #{min_version} not reached: #{asc_app_version}") + next + end + + if after_uploaded_date && after_uploaded_date >= uploaded_date + UI.verbose("Upload date #{after_uploaded_date} not reached: #{uploaded_date}") + break + end + + message = [] + message << "Found build version: #{asc_build_number}" + message << ", comparing to supplied build_number: #{build_number}" if build_number + UI.verbose(message.join(" ")) + + if build_number && asc_build_number != build_number + UI.verbose("build_version: #{asc_build_number} doesn't match: #{build_number}") + next + end + + UI.verbose("Build_version: #{asc_build_number} matches #{build_number}, grabbing dsym_url") if build_number + download_dsym(build: build, app: app, wait_for_dsym_processing: wait_for_dsym_processing, wait_timeout: wait_timeout, output_directory: output_directory) + end + end + + def self.download_dsym(build: nil, app: nil, wait_for_dsym_processing: nil, wait_timeout: nil, output_directory: nil) + start = Time.now + dsym_urls = [] + + loop do + build_bundles = build.build_bundles.select { |b| b.includes_symbols == true } + dsym_urls = build_bundles.map(&:dsym_url).compact + + break if build_bundles.count == dsym_urls.count + + if !wait_for_dsym_processing || (Time.now - start) > wait_timeout + # In some cases, AppStoreConnect does not process the dSYMs, thus no error should be thrown. + UI.message("Could not find any dSYM for #{build.version} (#{build.app_version})") + break + else + UI.message("Waiting for dSYM file to appear...") + sleep(30) unless FastlaneCore::Helper.is_test? + build = Spaceship::ConnectAPI::Build.get(build_id: build.id) + end + end + + if dsym_urls.count == 0 + UI.message("No dSYM URL for #{build.version} (#{build.app_version})") + else + dsym_urls.each do |url| + self.download(url, build, app, output_directory) + end + end + end + # rubocop:enable Metrics/PerceivedComplexity + + def self.get_latest_build!(app_id: nil, platform: nil) + filter = { app: app_id } + filter["preReleaseVersion.platform"] = platform + latest_build = Spaceship::ConnectAPI.get_builds(filter: filter, sort: "-uploadedDate", includes: "preReleaseVersion,buildBundles").first + + if latest_build.nil? + UI.user_error!("Could not find any build for platform #{platform}") if platform + UI.user_error!("Could not find any build") + end + + return latest_build + end + + def self.download(download_url, build, app, output_directory) + result = self.download_file(download_url) + path = write_dsym(result, app.bundle_id, build.app_version, build.version, output_directory) + UI.success("🔑 Successfully downloaded dSYM file for #{build.app_version} - #{build.version} to '#{path}'") + + Actions.lane_context[SharedValues::DSYM_PATHS] ||= [] + Actions.lane_context[SharedValues::DSYM_PATHS] << File.expand_path(path) + + unless build.uploaded_date.nil? + Actions.lane_context[SharedValues::DSYM_LATEST_UPLOADED_DATE] ||= build.uploaded_date + current_latest = Actions.lane_context[SharedValues::DSYM_LATEST_UPLOADED_DATE] + Actions.lane_context[SharedValues::DSYM_LATEST_UPLOADED_DATE] = [current_latest, build.uploaded_date].max + UI.verbose("Most recent build uploaded_date #{Actions.lane_context[SharedValues::DSYM_LATEST_UPLOADED_DATE]}") + end + end + + def self.write_dsym(data, bundle_id, train_number, build_number, output_directory) + file_name = "#{bundle_id}-#{train_number}-#{build_number}.dSYM.zip" + if output_directory + file_name = output_directory + file_name + end + File.binwrite(file_name, data) + file_name + end + + def self.download_file(url) + uri = URI.parse(url) + if ENV['http_proxy'] + UI.verbose("Found 'http_proxy' environment variable so connect via proxy") + proxy_uri = URI.parse(ENV['http_proxy']) + http = Net::HTTP.new(uri.host, uri.port, proxy_uri.host, proxy_uri.port) + else + http = Net::HTTP.new(uri.host, uri.port) + end + http.read_timeout = 300 + http.use_ssl = (uri.scheme == "https") + res = http.get(uri.request_uri) + res.body + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Download dSYM files from App Store Connect for Bitcode apps" + end + + def self.details + sample = <<-SAMPLE.markdown_sample + ```ruby + lane :refresh_dsyms do + download_dsyms # Download dSYM files from iTC + upload_symbols_to_crashlytics # Upload them to Crashlytics + clean_build_artifacts # Delete the local dSYM files + end + ``` + SAMPLE + + [ + "This action downloads dSYM files from App Store Connect after the ipa gets re-compiled by Apple. Useful if you have Bitcode enabled.".markdown_preserve_newlines, + sample + ].join("\n") + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["DOWNLOAD_DSYMS_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["DOWNLOAD_DSYMS_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#use-return-value-and-pass-in-as-an-option)", + type: Hash, + default_value: Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::APP_STORE_CONNECT_API_KEY], + default_value_dynamic: true, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "DOWNLOAD_DSYMS_USERNAME", + description: "Your Apple ID Username for App Store Connect", + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :app_identifier, + short_option: "-a", + env_name: "DOWNLOAD_DSYMS_APP_IDENTIFIER", + description: "The bundle identifier of your app", + optional: false, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-k", + env_name: "DOWNLOAD_DSYMS_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # as we also allow integers, which we convert to strings anyway + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-e", + env_name: "DOWNLOAD_DSYMS_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_NAME"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :platform, + short_option: "-p", + env_name: "DOWNLOAD_DSYMS_PLATFORM", + description: "The app platform for dSYMs you wish to download (ios, appletvos)", + default_value: :ios), + FastlaneCore::ConfigItem.new(key: :version, + short_option: "-v", + env_name: "DOWNLOAD_DSYMS_VERSION", + description: "The app version for dSYMs you wish to download, pass in 'latest' to download only the latest build's dSYMs or 'live' to download only the live version dSYMs", + optional: true), + FastlaneCore::ConfigItem.new(key: :build_number, + short_option: "-b", + env_name: "DOWNLOAD_DSYMS_BUILD_NUMBER", + description: "The app build_number for dSYMs you wish to download", + optional: true, + skip_type_validation: true), # as we also allow integers, which we convert to strings anyway + FastlaneCore::ConfigItem.new(key: :min_version, + short_option: "-m", + env_name: "DOWNLOAD_DSYMS_MIN_VERSION", + description: "The minimum app version for dSYMs you wish to download", + optional: true), + FastlaneCore::ConfigItem.new(key: :after_uploaded_date, + short_option: "-d", + env_name: "DOWNLOAD_DSYMS_AFTER_UPLOADED_DATE", + description: "The uploaded date after which you wish to download dSYMs", + optional: true), + FastlaneCore::ConfigItem.new(key: :output_directory, + short_option: "-s", + env_name: "DOWNLOAD_DSYMS_OUTPUT_DIRECTORY", + description: "Where to save the download dSYMs, defaults to the current path", + optional: true), + FastlaneCore::ConfigItem.new(key: :wait_for_dsym_processing, + short_option: "-w", + env_name: "DOWNLOAD_DSYMS_WAIT_FOR_DSYM_PROCESSING", + description: "Wait for dSYMs to process", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :wait_timeout, + short_option: "-t", + env_name: "DOWNLOAD_DSYMS_WAIT_TIMEOUT", + description: "Number of seconds to wait for dSYMs to process", + optional: true, + default_value: 300, + type: Integer) + ] + end + + def self.output + [ + ['DSYM_PATHS', 'An array to all the zipped dSYM files'], + ['DSYM_LATEST_UPLOADED_DATE', 'Date of the most recent uploaded time of successfully downloaded dSYM files'] + ] + end + + def self.return_value + nil + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :appletvos].include?(platform) + end + + def self.example_code + [ + 'download_dsyms', + 'download_dsyms(version: "1.0.0", build_number: "345")', + 'download_dsyms(version: "1.0.1", build_number: 42)', + 'download_dsyms(version: "live")', + 'download_dsyms(min_version: "1.2.3")', + 'download_dsyms(after_uploaded_date: "2020-09-11T19:00:00+01:00")' + ] + end + + def self.category + :app_store_connect + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_from_play_store.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_from_play_store.rb new file mode 100644 index 0000000..00a1a85 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/download_from_play_store.rb @@ -0,0 +1,61 @@ +module Fastlane + module Actions + class DownloadFromPlayStoreAction < Action + def self.run(params) + require 'supply' + require 'supply/options' + + Supply.config = params # we already have the finished config + + require 'supply/setup' + Supply::Setup.new.perform_download + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Download metadata and binaries from Google Play (via _supply_)" + end + + def self.details + "More information: https://docs.fastlane.tools/actions/download_from_play_store/" + end + + def self.available_options + require 'supply' + require 'supply/options' + options = Supply::Options.available_options.clone + + # remove all the unnecessary (for this action) options + options_to_keep = [:package_name, :version_name, :track, :metadata_path, :json_key, :json_key_data, :root_url, :timeout, :key, :issuer] + options.delete_if { |option| options_to_keep.include?(option.key) == false } + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["janpio"] + end + + def self.is_supported?(platform) + platform == :android + end + + def self.example_code + [ + 'download_from_play_store' + ] + end + + def self.category + :production + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/dsym_zip.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/dsym_zip.rb new file mode 100644 index 0000000..ec3314b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/dsym_zip.rb @@ -0,0 +1,93 @@ +require 'plist' + +module Fastlane + module Actions + module SharedValues + DSYM_ZIP_PATH = :DSYM_ZIP_PATH + end + + class DsymZipAction < Action + def self.run(params) + archive = params[:archive_path] + params[:dsym_path] ||= File.join("#{File.basename(archive, '.*')}.app.dSYM.zip") + + dsym_folder_path = File.expand_path(File.join(archive, 'dSYMs')) + zipped_dsym_path = File.expand_path(params[:dsym_path]) + + Actions.lane_context[SharedValues::DSYM_ZIP_PATH] = zipped_dsym_path + + if params[:all] + Actions.sh(%(cd "#{dsym_folder_path}" && zip -r "#{zipped_dsym_path}" "#{dsym_folder_path}"/*.dSYM)) + else + plist = Plist.parse_xml(File.join(archive, 'Info.plist')) + app_name = Helper.test? ? 'MyApp.app' : File.basename(plist['ApplicationProperties']['ApplicationPath']) + dsym_name = "#{app_name}.dSYM" + Actions.sh(%(cd "#{dsym_folder_path}" && zip -r "#{zipped_dsym_path}" "#{dsym_name}")) + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.description + 'Creates a zipped dSYM in the project root from the .xcarchive' + end + + def self.details + "You can manually specify the path to the xcarchive (not needed if you use `xcodebuild`/`xcarchive` to build your archive)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :archive_path, + description: 'Path to your xcarchive file. Optional if you use the `xcodebuild` action', + default_value: Actions.lane_context[SharedValues::XCODEBUILD_ARCHIVE], + default_value_dynamic: true, + optional: true, + env_name: 'DSYM_ZIP_XCARCHIVE_PATH', + verify_block: proc do |value| + UI.user_error!("Couldn't find xcarchive file at path '#{value}'") if !Helper.test? && !File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :dsym_path, + description: 'Path for generated dsym. Optional, default is your apps root directory', + optional: true, + env_name: 'DSYM_ZIP_DSYM_PATH'), + FastlaneCore::ConfigItem.new(key: :all, + description: 'Whether or not all dSYM files are to be included. Optional, default is false in which only your app dSYM is included', + default_value: false, + optional: true, + type: Boolean, + env_name: 'DSYM_ZIP_ALL') + ] + end + + def self.output + [ + ['DSYM_ZIP_PATH', 'The named of the zipped dSYM'] + ] + end + + def self.author + 'lmirosevic' + end + + def self.example_code + [ + 'dsym_zip', + 'dsym_zip( + archive_path: "MyApp.xcarchive" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/echo.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/echo.rb new file mode 100644 index 0000000..030b03b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/echo.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/puts' + class EchoAction < PutsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `puts` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_bundle_exec.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_bundle_exec.rb new file mode 100644 index 0000000..caae2c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_bundle_exec.rb @@ -0,0 +1,59 @@ +module Fastlane + module Actions + module SharedValues + end + + # Raises an exception and stop the lane execution if not using bundle exec to run fastlane + class EnsureBundleExecAction < Action + def self.run(params) + return if PluginManager.new.gemfile_path.nil? + if FastlaneCore::Helper.bundler? + UI.success("Using bundled fastlane ✅") + else + UI.user_error!("fastlane detected a Gemfile in the current directory. However, it seems like you didn't use `bundle exec`. Use `bundle exec fastlane #{ARGV.join(' ')}` instead.") + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Raises an exception if not using `bundle exec` to run fastlane" + end + + def self.details + [ + "This action will check if you are using `bundle exec` to run fastlane.", + "You can put it into `before_all` to make sure that fastlane is ran using the `bundle exec fastlane` command." + ].join("\n") + end + + def self.available_options + [] + end + + def self.output + [] + end + + def self.author + ['rishabhtayal'] + end + + def self.example_code + [ + "ensure_bundle_exec" + ] + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_env_vars.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_env_vars.rb new file mode 100644 index 0000000..e2253ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_env_vars.rb @@ -0,0 +1,54 @@ +module Fastlane + module Actions + class EnsureEnvVarsAction < Action + def self.run(params) + variables = params[:env_vars] + missing_variables = variables.select { |variable| ENV[variable].to_s.strip.empty? } + + UI.user_error!("Missing environment variable(s) '#{missing_variables.join('\', \'')}'") unless missing_variables.empty? + + is_one = variables.length == 1 + UI.success("Environment variable#{is_one ? '' : 's'} '#{variables.join('\', \'')}' #{is_one ? 'is' : 'are'} set!") + end + + def self.description + 'Raises an exception if the specified env vars are not set' + end + + def self.details + 'This action will check if some environment variables are set.' + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :env_vars, + description: 'The environment variables names that should be checked', + type: Array, + verify_block: proc do |value| + UI.user_error!('Specify at least one environment variable name') if value.empty? + end) + ] + end + + def self.authors + ['revolter'] + end + + def self.example_code + [ + 'ensure_env_vars( + env_vars: [\'GITHUB_USER_NAME\', \'GITHUB_API_TOKEN\'] + )' + ] + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_git_branch.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_git_branch.rb new file mode 100644 index 0000000..5639cb2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_git_branch.rb @@ -0,0 +1,68 @@ +module Fastlane + module Actions + module SharedValues + end + + # Raises an exception and stop the lane execution if the repo is not on a specific branch + class EnsureGitBranchAction < Action + def self.run(params) + branch = params[:branch] + branch_expr = /#{branch}/ + if Actions.git_branch =~ branch_expr + UI.success("Git branch matches `#{branch}`, all good! đŸ’Ē") + else + UI.user_error!("Git is not on a branch matching `#{branch}`. Current branch is `#{Actions.git_branch}`! Please ensure the repo is checked out to the correct branch.") + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Raises an exception if not on a specific git branch" + end + + def self.details + [ + "This action will check if your git repo is checked out to a specific branch.", + "You may only want to make releases from a specific branch, so `ensure_git_branch` will stop a lane if it was accidentally executed on an incorrect branch." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :branch, + env_name: "FL_ENSURE_GIT_BRANCH_NAME", + description: "The branch that should be checked for. String that can be either the full name of the branch or a regex e.g. `^feature\/.*$` to match", + default_value: 'master') + ] + end + + def self.output + [] + end + + def self.author + ['dbachrach', 'Liquidsoul'] + end + + def self.example_code + [ + "ensure_git_branch # defaults to `master` branch", + "ensure_git_branch( + branch: 'develop' + )" + ] + end + + def self.category + :source_control + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_git_status_clean.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_git_status_clean.rb new file mode 100644 index 0000000..c3229ec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_git_status_clean.rb @@ -0,0 +1,102 @@ +module Fastlane + module Actions + module SharedValues + GIT_REPO_WAS_CLEAN_ON_START = :GIT_REPO_WAS_CLEAN_ON_START + end + + # Raises an exception and stop the lane execution if the repo is not in a clean state + class EnsureGitStatusCleanAction < Action + def self.run(params) + if params[:ignored] + ignored_mode = params[:ignored] + ignored_mode = 'no' if ignored_mode == 'none' + repo_status = Actions.sh("git status --porcelain --ignored='#{ignored_mode}'") + else + repo_status = Actions.sh("git status --porcelain") + end + + repo_clean = repo_status.empty? + + if repo_clean + UI.success('Git status is clean, all good! đŸ’Ē') + Actions.lane_context[SharedValues::GIT_REPO_WAS_CLEAN_ON_START] = true + else + error_message = "Git repository is dirty! Please ensure the repo is in a clean state by committing/stashing/discarding all changes first." + error_message += "\nUncommitted changes:\n#{repo_status}" if params[:show_uncommitted_changes] + if params[:show_diff] + repo_diff = Actions.sh("git diff") + error_message += "\nGit diff: \n#{repo_diff}" + end + UI.user_error!(error_message) + end + end + + def self.description + "Raises an exception if there are uncommitted git changes" + end + + def self.details + [ + "A sanity check to make sure you are working in a repo that is clean.", + "Especially useful to put at the beginning of your Fastfile in the `before_all` block, if some of your other actions will touch your filesystem, do things to your git repo, or just as a general reminder to save your work.", + "Also needed as a prerequisite for some other actions like `reset_git_repo`." + ].join("\n") + end + + def self.output + [ + ['GIT_REPO_WAS_CLEAN_ON_START', 'Stores the fact that the git repo was clean at some point'] + ] + end + + def self.author + ["lmirosevic", "antondomashnev"] + end + + def self.example_code + [ + 'ensure_git_status_clean' + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :show_uncommitted_changes, + env_name: "FL_ENSURE_GIT_STATUS_CLEAN_SHOW_UNCOMMITTED_CHANGES", + description: "The flag whether to show uncommitted changes if the repo is dirty", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :show_diff, + env_name: "FL_ENSURE_GIT_STATUS_CLEAN_SHOW_DIFF", + description: "The flag whether to show the git diff if the repo is dirty", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :ignored, + env_name: "FL_ENSURE_GIT_STATUS_CLEAN_IGNORED_FILE", + description: [ + "The handling mode of the ignored files. The available options are: `'traditional'`, `'none'` (default) and `'matching'`.", + "Specifying `'none'` to this parameter is the same as not specifying the parameter at all, which means that no ignored file will be used to check if the repo is dirty or not.", + "Specifying `'traditional'` or `'matching'` causes some ignored files to be used to check if the repo is dirty or not (more info in the official docs: https://git-scm.com/docs/git-status#Documentation/git-status.txt---ignoredltmodegt)" + ].join(" "), + optional: true, + verify_block: proc do |value| + mode = value.to_s + modes = %w(traditional none matching) + + UI.user_error!("Unsupported mode, must be: #{modes}") unless modes.include?(mode) + end) + ] + end + + def self.category + :source_control + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_no_debug_code.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_no_debug_code.rb new file mode 100644 index 0000000..2a7bc83 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_no_debug_code.rb @@ -0,0 +1,133 @@ +module Fastlane + module Actions + class EnsureNoDebugCodeAction < Action + def self.run(params) + command = "grep -RE '#{params[:text]}' '#{File.absolute_path(params[:path])}'" + + extensions = [] + extensions << params[:extension] unless params[:extension].nil? + + if params[:extensions] + params[:extensions].each do |extension| + extension.delete!('.') if extension.include?(".") + extensions << extension + end + end + + if extensions.count > 1 + command << " --include=\\*.{#{extensions.join(',')}}" + elsif extensions.count > 0 + command << " --include=\\*.#{extensions.join(',')}" + end + + command << " --exclude #{params[:exclude]}" if params[:exclude] + + if params[:exclude_dirs] + params[:exclude_dirs].each do |dir| + command << " --exclude-dir #{dir.shellescape}" + end + end + + return command if Helper.test? + + UI.important(command) + results = `#{command}` # we don't use `sh` as the return code of grep is wrong for some reason + + # Example Output + # ./fastlane.gemspec: spec.add_development_dependency 'my_word' + # ./Gemfile.lock: my_word (0.10.1) + + found = [] + results.split("\n").each do |current_raw| + found << current_raw.strip + end + + UI.user_error!("Found debug code '#{params[:text]}': \n\n#{found.join("\n")}") if found.count > 0 + UI.message("No debug code found in code base 🐛") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Ensures the given text is nowhere in the code base" + end + + def self.details + [ + "You don't want any debug code to slip into production.", + "This can be used to check if there is any debug code still in your codebase or if you have things like `// TO DO` or similar." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :text, + env_name: "FL_ENSURE_NO_DEBUG_CODE_TEXT", + description: "The text that must not be in the code base"), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_ENSURE_NO_DEBUG_CODE_PATH", + description: "The directory containing all the source files", + default_value: ".", + verify_block: proc do |value| + UI.user_error!("Couldn't find the folder at '#{File.absolute_path(value)}'") unless File.directory?(value) + end), + FastlaneCore::ConfigItem.new(key: :extension, + env_name: "FL_ENSURE_NO_DEBUG_CODE_EXTENSION", + description: "The extension that should be searched for", + optional: true, + verify_block: proc do |value| + value.delete!('.') if value.include?(".") + end), + FastlaneCore::ConfigItem.new(key: :extensions, + env_name: "FL_ENSURE_NO_DEBUG_CODE_EXTENSIONS", + description: "An array of file extensions that should be searched for", + optional: true, + type: Array), + FastlaneCore::ConfigItem.new(key: :exclude, + env_name: "FL_ENSURE_NO_DEBUG_CODE_EXCLUDE", + description: "Exclude a certain pattern from the search", + optional: true), + FastlaneCore::ConfigItem.new(key: :exclude_dirs, + env_name: "FL_ENSURE_NO_DEBUG_CODE_EXCLUDE_DIRS", + description: "An array of dirs that should not be included in the search", + optional: true, + type: Array) + ] + end + + def self.output + [] + end + + def self.authors + ["KrauseFx"] + end + + def self.example_code + [ + 'ensure_no_debug_code(text: "// TODO")', + 'ensure_no_debug_code(text: "Log.v", + extension: "java")', + 'ensure_no_debug_code(text: "NSLog", + path: "./lib", + extension: "m")', + 'ensure_no_debug_code(text: "(^#define DEBUG|NSLog)", + path: "./lib", + extension: "m")', + 'ensure_no_debug_code(text: "<<<<<<", + extensions: ["m", "swift", "java"])' + ] + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_xcode_version.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_xcode_version.rb new file mode 100644 index 0000000..d0d6c84 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ensure_xcode_version.rb @@ -0,0 +1,124 @@ +module Fastlane + module Actions + class EnsureXcodeVersionAction < Action + def self.run(params) + Actions.verify_gem!('xcode-install') + required_version = params[:version] + strict = params[:strict] + + if required_version.to_s.length == 0 + # The user didn't provide an Xcode version, let's see + # if the current project has a `.xcode-version` file + # + # The code below can be improved to also consider + # the directory of the Xcode project + xcode_version_paths = Dir.glob(".xcode-version") + + if xcode_version_paths.first + UI.verbose("Loading required version from #{xcode_version_paths.first}") + required_version = File.read(xcode_version_paths.first).strip + else + UI.user_error!("No version: provided when calling the `ensure_xcode_version` action") + end + end + + selected_version = sh("xcversion selected").match(/^Xcode (.*)$/)[1] + + begin + selected_version = Gem::Version.new(selected_version) + required_version = Gem::Version.new(required_version) + rescue ArgumentError => ex + UI.user_error!("Invalid version number provided, make sure it's valid: #{ex}") + end + + if strict == true + if selected_version == required_version + success(selected_version) + else + error(selected_version, required_version) + end + else + required_version_numbers = required_version.to_s.split(".") + selected_version_numbers = selected_version.to_s.split(".") + + required_version_numbers.each_with_index do |required_version_number, index| + selected_version_number = selected_version_numbers[index] + next unless required_version_number != selected_version_number + error(selected_version, required_version) + break + end + + success(selected_version) + end + end + + def self.success(selected_version) + UI.success("Selected Xcode version is correct: #{selected_version}") + end + + def self.error(selected_version, required_version) + UI.message("Selected Xcode version is not correct: #{selected_version}. You expected #{required_version}.") + UI.message("To correct this, use: `xcode_select(version: #{required_version})`.") + + UI.user_error!("Selected Xcode version doesn't match your requirement.\nExpected: Xcode #{required_version}\nActual: Xcode #{selected_version}\n") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Ensure the right version of Xcode is used" + end + + def self.details + [ + "If building your app requires a specific version of Xcode, you can invoke this command before using gym.", + "For example, to ensure that a beta version of Xcode is not accidentally selected to build, which would make uploading to TestFlight fail.", + "You can either manually provide a specific version using `version:` or you make use of the `.xcode-version` file.", + "Using the `strict` parameter, you can either verify the full set of version numbers strictly (i.e. `11.3.1`) or only a subset of them (i.e. `11.3` or `11`)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_ENSURE_XCODE_VERSION", + description: "Xcode version to verify that is selected", + optional: true), + FastlaneCore::ConfigItem.new(key: :strict, + description: "Should the version be verified strictly (all 3 version numbers), or matching only the given version numbers (i.e. `11.3` == `11.3.x`)", + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['FL_ENSURE_XCODE_VERSION', 'Xcode version to verify that is selected'] + ] + end + + def self.return_value + end + + def self.authors + ["JaviSoto", "KrauseFx"] + end + + def self.example_code + [ + 'ensure_xcode_version(version: "12.5")' + ] + end + + def self.category + :building + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/environment_variable.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/environment_variable.rb new file mode 100644 index 0000000..211d0c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/environment_variable.rb @@ -0,0 +1,69 @@ +module Fastlane + module Actions + class EnvironmentVariableAction < Action + def self.run(params) + values_to_set = params[:set] + value_to_get = params[:get] + value_to_remove = params[:remove] + + # clear out variables that were removed + ENV[value_to_remove] = nil unless value_to_remove.nil? + + # if we have to set variables, do that now + unless values_to_set.nil? + values_to_set.each do |key, value| + ENV[key] = value + end + end + + # finally, get the variable we requested + return ENV[value_to_get] unless value_to_get.nil? + + # if no variable is requested, just return empty string + return "" + end + + def self.author + "taquitos" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :set, + env_name: 'FL_ENVIRONMENT_VARIABLE_SET', + description: 'Set the environment variables named', + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :get, + env_name: 'FL_ENVIRONMENT_VARIABLE_GET', + description: 'Get the environment variable named', + optional: true), + FastlaneCore::ConfigItem.new(key: :remove, + env_name: 'FL_ENVIRONMENT_VARIABLE_REMOVE', + description: 'Remove the environment variable named', + optional: true) + ] + end + + def self.description + "Sets/gets env vars for Fastlane.swift. Don't use in ruby, use `ENV[key] = val`" + end + + def self.step_text + nil + end + + def self.category + :misc + end + + def self.return_type + :string + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/erb.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/erb.rb new file mode 100644 index 0000000..3dc7549 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/erb.rb @@ -0,0 +1,93 @@ +module Fastlane + module Actions + class ErbAction < Action + def self.run(params) + template = File.read(params[:template]) + trim_mode = params[:trim_mode] + + result = Fastlane::ErbTemplateHelper.render(template, params[:placeholders], trim_mode) + + File.open(params[:destination], 'w') { |file| file.write(result) } if params[:destination] + UI.message("Successfully parsed template: '#{params[:template]}' and rendered output to: #{params[:destination]}") if params[:destination] + result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Allows to Generate output files based on ERB templates" + end + + def self.details + [ + "Renders an ERB template with `:placeholders` given as a hash via parameter.", + "If no `:destination` is set, it returns the rendered template as string." + ].join("\n") + end + + def self.available_options + [ + + FastlaneCore::ConfigItem.new(key: :template, + short_option: "-T", + env_name: "FL_ERB_SRC", + description: "ERB Template File", + optional: false), + FastlaneCore::ConfigItem.new(key: :destination, + short_option: "-D", + env_name: "FL_ERB_DST", + description: "Destination file", + optional: true), + FastlaneCore::ConfigItem.new(key: :placeholders, + short_option: "-p", + env_name: "FL_ERB_PLACEHOLDERS", + description: "Placeholders given as a hash", + default_value: {}, + type: Hash), + FastlaneCore::ConfigItem.new(key: :trim_mode, + short_option: "-t", + env_name: "FL_ERB_TRIM_MODE", + description: "Trim mode applied to the ERB", + optional: true) + + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.example_code + [ + '# Example `erb` template: + + # Variable1 <%= var1 %> + # Variable2 <%= var2 %> + # <% for item in var3 %> + # <%= item %> + # <% end %> + + erb( + template: "1.erb", + destination: "/tmp/rendered.out", + placeholders: { + :var1 => 123, + :var2 => "string", + :var3 => ["element1", "element2"] + } + )' + ] + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/fastlane_version.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/fastlane_version.rb new file mode 100644 index 0000000..14861f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/fastlane_version.rb @@ -0,0 +1,15 @@ +require "fastlane/actions/min_fastlane_version" + +module Fastlane + module Actions + class FastlaneVersionAction < MinFastlaneVersionAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `min_fastlane_version` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/flock.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/flock.rb new file mode 100644 index 0000000..2774a92 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/flock.rb @@ -0,0 +1,82 @@ +module Fastlane + module Actions + class FlockAction < Action + BASE_URL = 'https://api.flock.co/hooks/sendMessage'.freeze + + def self.run(options) + require 'net/http' + require 'uri' + require 'json' + + notify_incoming_message_webhook(options[:base_url], options[:message], options[:token]) + end + + def self.notify_incoming_message_webhook(base_url, message, token) + uri = URI.join(base_url + '/', token) + response = Net::HTTP.start( + uri.host, uri.port, use_ssl: uri.scheme == 'https' + ) do |http| + request = Net::HTTP::Post.new(uri.path) + request.content_type = 'application/json' + request.body = JSON.generate("text" => message) + http.request(request) + end + if response.kind_of?(Net::HTTPSuccess) + UI.success('Message sent to Flock.') + else + UI.error("HTTP request to '#{uri}' with message '#{message}' failed with a #{response.code} response.") + UI.user_error!('Error sending message to Flock. Please verify the Flock webhook token.') + end + end + + def self.description + "Send a message to a [Flock](https://flock.com/) group" + end + + def self.details + "To obtain the token, create a new [incoming message webhook](https://dev.flock.co/wiki/display/FlockAPI/Incoming+Webhooks) in your Flock admin panel." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :message, + env_name: 'FL_FLOCK_MESSAGE', + description: 'Message text'), + FastlaneCore::ConfigItem.new(key: :token, + env_name: 'FL_FLOCK_TOKEN', + sensitive: true, + description: 'Token for the Flock incoming webhook'), + FastlaneCore::ConfigItem.new(key: :base_url, + env_name: 'FL_FLOCK_BASE_URL', + description: 'Base URL of the Flock incoming message webhook', + optional: true, + default_value: BASE_URL, + verify_block: proc do |value| + UI.user_error!('Invalid https URL') unless value.start_with?('https://') + end) + ] + end + + def self.author + "Manav" + end + + def self.example_code + [ + 'flock( + message: "Hello", + token: "xxx" + )' + ] + end + + def self.category + :notifications + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/frame_screenshots.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/frame_screenshots.rb new file mode 100644 index 0000000..fcfecf4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/frame_screenshots.rb @@ -0,0 +1,64 @@ +module Fastlane + module Actions + class FrameScreenshotsAction < Action + def self.run(config) + return if Helper.test? + + require 'frameit' + + UI.message("Framing screenshots at path #{config[:path]} (via frameit)") + + Dir.chdir(config[:path]) do + Frameit.config = config + Frameit::Runner.new.run('.') + end + end + + def self.description + "Adds device frames around all screenshots (via _frameit_)" + end + + def self.details + [ + "Uses [frameit](https://docs.fastlane.tools/actions/frameit/) to prepare perfect screenshots for the App Store, your website, QA or emails.", + "You can add background and titles to the framed screenshots as well." + ].join("\n") + end + + def self.available_options + require "frameit" + require "frameit/options" + FastlaneCore::CommanderGenerator.new.generate(Frameit::Options.available_options) + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FRAMEIT_SCREENSHOTS_PATH", + description: "The path to the directory containing the screenshots", + default_value: Actions.lane_context[SharedValues::SNAPSHOT_SCREENSHOTS_PATH] || FastlaneCore::FastlaneFolder.path, + default_value_dynamic: true) + ] + end + + def self.author + "KrauseFx" + end + + def self.example_code + [ + 'frame_screenshots', + 'frameit # alias for "frame_screenshots"', + 'frame_screenshots(use_platform: "ANDROID")', + 'frame_screenshots(silver: true)', + 'frame_screenshots(path: "/screenshots")', + 'frame_screenshots(rose_gold: true)' + ] + end + + def self.category + :screenshots + end + + def self.is_supported?(platform) + [:ios, :mac, :android].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/frameit.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/frameit.rb new file mode 100644 index 0000000..52d6d1a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/frameit.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/frame_screenshots' + class FrameitAction < FrameScreenshotsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `frame_screenshots` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gcovr.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gcovr.rb new file mode 100644 index 0000000..3997702 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gcovr.rb @@ -0,0 +1,163 @@ +module Fastlane + module Actions + # --object-directory=OBJDIR Specify the directory that contains the gcov data files. + # -o OUTPUT, --output=OUTPUT Print output to this filename Keep the temporary *.gcov files generated by gcov. + # -k, --keep Keep the temporary *.gcov files generated by gcov. + # -d, --delete Delete the coverage files after they are processed. + # -f FILTER, --filter=FILTER Keep only the data files that match this regular expression + # -e EXCLUDE, --exclude=EXCLUDE Exclude data files that match this regular expression + # --gcov-filter=GCOV_FILTER Keep only gcov data files that match this regular expression + # --gcov-exclude=GCOV_EXCLUDE Exclude gcov data files that match this regular expression + # -r ROOT, --root=ROOT Defines the root directory for source files. + # -x, --xml Generate XML instead of the normal tabular output. + # --xml-pretty Generate pretty XML instead of the normal dense format. + # --html Generate HTML instead of the normal tabular output. + # --html-details Generate HTML output for source file coverage. + # --html-absolute-paths Set the paths in the HTML report to be absolute instead of relative + # -b, --branches Tabulate the branch coverage instead of the line coverage. + # -u, --sort-uncovered Sort entries by increasing number of uncovered lines. + # -p, --sort-percentage Sort entries by decreasing percentage of covered lines. + # --gcov-executable=GCOV_CMD Defines the name/path to the gcov executable [defaults to the GCOV environment variable, if present; else 'gcov']. + # --exclude-unreachable-branches Exclude from coverage branches which are marked to be excluded by LCOV/GCOV markers or are determined to be from lines containing only compiler-generated "dead" code. + # -g, --use-gcov-files Use preprocessed gcov files for analysis. + # -s, --print-summary Prints a small report to stdout with line & branch percentage coverage + + class GcovrAction < Action + ARGS_MAP = { + object_directory: "--object-directory", + output: "-o", + keep: "-k", + delete: "-d", + filter: "-f", + exclude: "-e", + gcov_filter: "--gcov-filter", + gcov_exclude: "--gcov-exclude", + root: "-r", + xml: "-x", + xml_pretty: "--xml-pretty", + html: "--html", + html_details: "--html-details", + html_absolute_paths: "--html-absolute-paths", + branches: "-b", + sort_uncovered: "-u", + sort_percentage: "-p", + gcov_executable: "--gcov-executable", + exclude_unreachable_branches: "--exclude-unreachable-branches", + use_gcov_files: "-g", + print_summary: "-s" + } + + def self.is_supported?(platform) + platform == :ios + end + + def self.run(params) + unless Helper.test? + UI.user_error!("gcovr not installed") if `which gcovr`.length == 0 + end + + # The args we will build with + gcovr_args = nil + + # Allows for a whole variety of configurations + if params.kind_of?(Hash) + params_hash = params + + # Check if an output path was given + if params_hash.key?(:output) + create_output_dir_if_not_exists(params_hash[:output]) + end + + # Maps parameter hash to CLI args + gcovr_args = params_hash_to_cli_args(params_hash) + else + gcovr_args = params + end + + # Joins args into space delimited string + gcovr_args = gcovr_args.join(" ") + + command = "gcovr #{gcovr_args}" + UI.success("Generating code coverage.") + UI.verbose(command) + Actions.sh(command) + end + + def self.create_output_dir_if_not_exists(output_path) + output_dir = File.dirname(output_path) + + # If the output directory doesn't exist, create it + unless Dir.exist?(output_dir) + FileUtils.mkpath(output_dir) + end + end + + def self.params_hash_to_cli_args(params) + # Remove nil value params + params = params.delete_if { |_, v| v.nil? } + + # Maps nice developer param names to CLI arguments + params.map do |k, v| + v ||= "" + args = ARGS_MAP[k] + if args + value = (v != true && v.to_s.length > 0 ? "\"#{v}\"" : "") + "#{args} #{value}".strip + end + end.compact + end + + def self.description + "Runs test coverage reports for your Xcode project" + end + + def self.available_options + [ + ['object_directory', 'Specify the directory that contains the gcov data files.'], + ['output', 'Print output to this filename Keep the temporary *.gcov files generated by gcov.'], + ['keep', 'Keep the temporary *.gcov files generated by gcov.'], + ['delete', 'Delete the coverage files after they are processed.'], + ['filter', 'Keep only the data files that match this regular expression'], + ['exclude', 'Exclude data files that match this regular expression'], + ['gcov_filter', 'Keep only gcov data files that match this regular expression'], + ['gcov_exclude', 'Exclude gcov data files that match this regular expression'], + ['root', 'Defines the root directory for source files.'], + ['xml', 'Generate XML instead of the normal tabular output.'], + ['xml_pretty', 'Generate pretty XML instead of the normal dense format.'], + ['html', 'Generate HTML instead of the normal tabular output.'], + ['html_details', 'Generate HTML output for source file coverage.'], + ['html_absolute_paths', 'Set the paths in the HTML report to be absolute instead of relative'], + ['branches', 'Tabulate the branch coverage instead of the line coverage.'], + ['sort_uncovered', 'Sort entries by increasing number of uncovered lines.'], + ['sort_percentage', 'Sort entries by decreasing percentage of covered lines.'], + ['gcov_executable', 'Defines the name/path to the gcov executable].'], + ['exclude_unreachable_branches', 'Exclude from coverage branches which are marked to be excluded by LCOV/GCOV markers'], + ['use_gcov_files', 'Use preprocessed gcov files for analysis.'], + ['print_summary', 'Prints a small report to stdout with line & branch percentage coverage'] + ] + end + + def self.author + "dtrenz" + end + + def self.example_code + [ + 'gcovr( + html: true, + html_details: true, + output: "./code-coverage/report.html" + )' + ] + end + + def self.details + "Generate summarized code coverage reports using [gcovr](http://gcovr.com/)" + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_build_number.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_build_number.rb new file mode 100644 index 0000000..e5c7783 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_build_number.rb @@ -0,0 +1,106 @@ +module Fastlane + module Actions + module SharedValues + BUILD_NUMBER ||= :BUILD_NUMBER # originally defined in IncrementBuildNumberAction + end + + class GetBuildNumberAction < Action + require 'shellwords' + + def self.run(params) + # More information about how to set up your project and how it works: + # https://developer.apple.com/library/ios/qa/qa1827/_index.html + + folder = params[:xcodeproj] ? File.join(params[:xcodeproj], '..') : '.' + + command_prefix = [ + 'cd', + File.expand_path(folder).shellescape, + '&&' + ].join(' ') + + command = [ + command_prefix, + 'agvtool', + 'what-version', + '-terse' + ].join(' ') + + if Helper.test? + Actions.lane_context[SharedValues::BUILD_NUMBER] = command + else + build_number = Actions.sh(command).split("\n").last.strip + + # Store the number in the shared hash + Actions.lane_context[SharedValues::BUILD_NUMBER] = build_number + end + return build_number + rescue => ex + return false if params[:hide_error_when_versioning_disabled] + UI.error('Before being able to increment and read the version number from your Xcode project, you first need to setup your project properly. Please follow the guide at https://developer.apple.com/library/content/qa/qa1827/_index.html') + raise ex + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Get the build number of your project" + end + + def self.details + [ + "This action will return the current build number set on your project.", + "You first have to set up your Xcode project, if you haven't done it already: [https://developer.apple.com/library/ios/qa/qa1827/_index.html](https://developer.apple.com/library/ios/qa/qa1827/_index.html)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_BUILD_NUMBER_PROJECT", + description: "optional, you must specify the path to your main Xcode project if it is not in the project root directory", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project") if !File.exist?(value) && !Helper.test? + end), + FastlaneCore::ConfigItem.new(key: :hide_error_when_versioning_disabled, + env_name: "FL_BUILD_NUMBER_HIDE_ERROR_WHEN_VERSIONING_DISABLED", + description: "Used during `fastlane init` to hide the error message", + default_value: false, + type: Boolean) + ] + end + + def self.output + [ + ['BUILD_NUMBER', 'The build number'] + ] + end + + def self.authors + ["Liquidsoul"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'build_number = get_build_number(xcodeproj: "Project.xcodeproj")' + ] + end + + def self.return_type + :string + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_build_number_repository.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_build_number_repository.rb new file mode 100644 index 0000000..954833f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_build_number_repository.rb @@ -0,0 +1,120 @@ +module Fastlane + module Actions + module SharedValues + BUILD_NUMBER_REPOSITORY = :BUILD_NUMBER_REPOSITORY + end + + class GetBuildNumberRepositoryAction < Action + def self.is_svn? + Actions.sh('svn info') + return true + rescue + return false + end + + def self.is_git? + Actions.sh('git rev-parse HEAD') + return true + rescue + return false + end + + def self.is_git_svn? + Actions.sh('git svn info') + return true + rescue + return false + end + + def self.is_hg? + Actions.sh('hg status') + return true + rescue + return false + end + + def self.command(use_hg_revision_number) + if is_svn? + UI.message("Detected repo: svn") + return 'svn info | grep Revision | egrep -o "[0-9]+"' + elsif is_git_svn? + UI.message("Detected repo: git-svn") + return 'git svn info | grep Revision | egrep -o "[0-9]+"' + elsif is_git? + UI.message("Detected repo: git") + return 'git rev-parse --short HEAD' + elsif is_hg? + UI.message("Detected repo: hg") + if use_hg_revision_number + return 'hg parent --template {rev}' + else + return 'hg parent --template "{node|short}"' + end + else + UI.user_error!("No repository detected") + end + end + + def self.run(params) + build_number = Action.sh(command(params[:use_hg_revision_number])).strip + Actions.lane_context[SharedValues::BUILD_NUMBER_REPOSITORY] = build_number + return build_number + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Get the build number from the current repository" + end + + def self.details + [ + "This action will get the **build number** according to what the SCM HEAD reports.", + "Currently supported SCMs are svn (uses root revision), git-svn (uses svn revision), git (uses short hash) and mercurial (uses short hash or revision number).", + "There is an option, `:use_hg_revision_number`, which allows to use mercurial revision number instead of hash." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :use_hg_revision_number, + env_name: "USE_HG_REVISION_NUMBER", + description: "Use hg revision number instead of hash (ignored for non-hg repos)", + optional: true, + type: Boolean, + default_value: false) + ] + end + + def self.output + [ + ['BUILD_NUMBER_REPOSITORY', 'The build number from the current repository'] + ] + end + + def self.return_value + "The build number from the current repository" + end + + def self.authors + ["bartoszj", "pbrooks", "armadsen"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'get_build_number_repository' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_certificates.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_certificates.rb new file mode 100644 index 0000000..3e4a2c1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_certificates.rb @@ -0,0 +1,81 @@ +module Fastlane + module Actions + module SharedValues + CERT_FILE_PATH = :CERT_FILE_PATH + CERT_CERTIFICATE_ID = :CERT_CERTIFICATE_ID + end + + class GetCertificatesAction < Action + def self.run(params) + require 'cert' + + return if Helper.test? + + begin + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless params[:api_key_path] + params[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + + Cert.config = params # we alread have the finished config + + Cert::Runner.new.launch + cert_file_path = ENV["CER_FILE_PATH"] + certificate_id = ENV["CER_CERTIFICATE_ID"] + Actions.lane_context[SharedValues::CERT_FILE_PATH] = cert_file_path + Actions.lane_context[SharedValues::CERT_CERTIFICATE_ID] = certificate_id + + UI.success("Use signing certificate '#{certificate_id}' from now on!") + + ENV["SIGH_CERTIFICATE_ID"] = certificate_id # for further use in the sigh action + end + end + + def self.description + "Create new iOS code signing certificates (via _cert_)" + end + + def self.details + [ + "**Important**: It is recommended to use [match](https://docs.fastlane.tools/actions/match/) according to the [codesigning.guide](https://codesigning.guide) for generating and maintaining your certificates. Use _cert_ directly only if you want full control over what's going on and know more about codesigning.", + "Use this action to download the latest code signing identity." + ].join("\n") + end + + def self.available_options + require 'cert' + Cert::Options.available_options + end + + def self.output + [ + ['CERT_FILE_PATH', 'The path to the certificate'], + ['CERT_CERTIFICATE_ID', 'The id of the certificate'] + ] + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'get_certificates', + 'cert # alias for "get_certificates"', + 'get_certificates( + development: true, + username: "user@email.com" + )' + ] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_github_release.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_github_release.rb new file mode 100644 index 0000000..fd454c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_github_release.rb @@ -0,0 +1,176 @@ +module Fastlane + module Actions + module SharedValues + GET_GITHUB_RELEASE_INFO = :GET_GITHUB_RELEASE_INFO + end + + class GetGithubReleaseAction < Action + def self.run(params) + UI.message("Getting release on GitHub (#{params[:server_url]}/#{params[:url]}: #{params[:version]})") + + GithubApiAction.run( + server_url: params[:server_url], + api_token: params[:api_token], + api_bearer: params[:api_bearer], + http_method: 'GET', + path: "repos/#{params[:url]}/releases", + error_handlers: { + 404 => proc do |result| + UI.error("Repository #{params[:url]} cannot be found, please double check its name and that you provided a valid API token (if it's a private repository).") + return nil + end, + 401 => proc do |result| + UI.error("You are not authorized to access #{params[:url]}, please make sure you provided a valid API token.") + return nil + end, + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}:#{result[:body]}") + return nil + end + } + ) do |result| + json = result[:json] + json.each do |current| + next unless current['tag_name'] == params[:version] + + # Found it + Actions.lane_context[SharedValues::GET_GITHUB_RELEASE_INFO] = current + UI.message("Version is already live on GitHub.com 🚁") + return current + end + end + + UI.important("Couldn't find GitHub release #{params[:version]}") + return nil + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "This will verify if a given release version is available on GitHub" + end + + def self.details + sample = <<-SAMPLE.markdown_sample + ```no-highlight + { + "url"=>"https://api.github.com/repos/KrauseFx/fastlane/releases/1537713", + "assets_url"=>"https://api.github.com/repos/KrauseFx/fastlane/releases/1537713/assets", + "upload_url"=>"https://uploads.github.com/repos/KrauseFx/fastlane/releases/1537713/assets{?name}", + "html_url"=>"https://github.com/fastlane/fastlane/releases/tag/1.8.0", + "id"=>1537713, + "tag_name"=>"1.8.0", + "target_commitish"=>"master", + "name"=>"1.8.0 Switch Lanes & Pass Parameters", + "draft"=>false, + "author"=> + {"login"=>"KrauseFx", + "id"=>869950, + "avatar_url"=>"https://avatars.githubusercontent.com/u/869950?v=3", + "gravatar_id"=>"", + "url"=>"https://api.github.com/users/KrauseFx", + "html_url"=>"https://github.com/fastlane", + "followers_url"=>"https://api.github.com/users/KrauseFx/followers", + "following_url"=>"https://api.github.com/users/KrauseFx/following{/other_user}", + "gists_url"=>"https://api.github.com/users/KrauseFx/gists{/gist_id}", + "starred_url"=>"https://api.github.com/users/KrauseFx/starred{/owner}{/repo}", + "subscriptions_url"=>"https://api.github.com/users/KrauseFx/subscriptions", + "organizations_url"=>"https://api.github.com/users/KrauseFx/orgs", + "repos_url"=>"https://api.github.com/users/KrauseFx/repos", + "events_url"=>"https://api.github.com/users/KrauseFx/events{/privacy}", + "received_events_url"=>"https://api.github.com/users/KrauseFx/received_events", + "type"=>"User", + "site_admin"=>false}, + "prerelease"=>false, + "created_at"=>"2015-07-14T23:33:01Z", + "published_at"=>"2015-07-14T23:44:10Z", + "assets"=>[], + "tarball_url"=>"https://api.github.com/repos/KrauseFx/fastlane/tarball/1.8.0", + "zipball_url"=>"https://api.github.com/repos/KrauseFx/fastlane/zipball/1.8.0", + "body"=> ...Markdown... + "This is one of the biggest updates of _fastlane_ yet" + } + ``` + SAMPLE + + [ + "This will return all information about a release. For example:".markdown_preserve_newlines, + sample + ].join("\n") + end + + def self.output + [ + ['GET_GITHUB_RELEASE_INFO', 'Contains all the information about this release'] + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :url, + env_name: "FL_GET_GITHUB_RELEASE_URL", + description: "The path to your repo, e.g. 'KrauseFx/fastlane'", + verify_block: proc do |value| + UI.user_error!("Please only pass the path, e.g. 'KrauseFx/fastlane'") if value.include?("github.com") + UI.user_error!("Please only pass the path, e.g. 'KrauseFx/fastlane'") if value.split('/').count != 2 + end), + FastlaneCore::ConfigItem.new(key: :server_url, + env_name: "FL_GITHUB_RELEASE_SERVER_URL", + description: "The server url. e.g. 'https://your.github.server/api/v3' (Default: 'https://api.github.com')", + default_value: "https://api.github.com", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please include the protocol in the server url, e.g. https://your.github.server") unless value.include?("//") + end), + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_GET_GITHUB_RELEASE_VERSION", + description: "The version tag of the release to check"), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_GITHUB_RELEASE_API_TOKEN", + sensitive: true, + code_gen_sensitive: true, + default_value: ENV["GITHUB_API_TOKEN"], + default_value_dynamic: true, + description: "GitHub Personal Token (required for private repositories)", + conflicting_options: [:api_bearer], + optional: true), + FastlaneCore::ConfigItem.new(key: :api_bearer, + env_name: "FL_GITHUB_RELEASE_API_BEARER", + sensitive: true, + code_gen_sensitive: true, + description: "Use a Bearer authorization token. Usually generated by Github Apps, e.g. GitHub Actions GITHUB_TOKEN environment variable", + conflicting_options: [:api_token], + optional: true, + default_value: nil) + ] + end + + def self.authors + ["KrauseFx", "czechboy0", "jaleksynas", "tommeier"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'release = get_github_release(url: "fastlane/fastlane", version: "1.0.0") + puts release["name"]' + ] + end + + def self.sample_return_value + { + "name" => "name" + } + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_info_plist_value.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_info_plist_value.rb new file mode 100644 index 0000000..c3cc258 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_info_plist_value.rb @@ -0,0 +1,78 @@ +module Fastlane + module Actions + module SharedValues + GET_INFO_PLIST_VALUE_CUSTOM_VALUE = :GET_INFO_PLIST_VALUE_CUSTOM_VALUE + end + + class GetInfoPlistValueAction < Action + def self.run(params) + require "plist" + + begin + path = File.expand_path(params[:path]) + + plist = File.open(path) { |f| Plist.parse_xml(f) } + + value = plist[params[:key]] + Actions.lane_context[SharedValues::GET_INFO_PLIST_VALUE_CUSTOM_VALUE] = value + + return value + rescue => ex + UI.error(ex) + end + end + + def self.description + "Returns value from Info.plist of your project as native Ruby data structures" + end + + def self.details + "Get a value from a plist file, which can be used to fetch the app identifier and more information about your app" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :key, + env_name: "FL_GET_INFO_PLIST_PARAM_NAME", + description: "Name of parameter", + optional: false), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_GET_INFO_PLIST_PATH", + description: "Path to plist file you want to read", + optional: false, + verify_block: proc do |value| + UI.user_error!("Couldn't find plist file at path '#{value}'") unless File.exist?(value) + end) + ] + end + + def self.output + [ + ['GET_INFO_PLIST_VALUE_CUSTOM_VALUE', 'The value of the last plist file that was parsed'] + ] + end + + def self.authors + ["kohtenko"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'identifier = get_info_plist_value(path: "./Info.plist", key: "CFBundleIdentifier")' + ] + end + + def self.return_type + :string + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_ipa_info_plist_value.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_ipa_info_plist_value.rb new file mode 100644 index 0000000..95fa2d3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_ipa_info_plist_value.rb @@ -0,0 +1,81 @@ +module Fastlane + module Actions + module SharedValues + GET_IPA_INFO_PLIST_VALUE_CUSTOM_VALUE = :GET_IPA_INFO_PLIST_VALUE_CUSTOM_VALUE + end + + class GetIpaInfoPlistValueAction < Action + def self.run(params) + ipa = File.expand_path(params[:ipa]) + key = params[:key] + plist = FastlaneCore::IpaFileAnalyser.fetch_info_plist_file(ipa) + value = plist[key] + + Actions.lane_context[SharedValues::GET_IPA_INFO_PLIST_VALUE_CUSTOM_VALUE] = value + + return value + rescue => ex + UI.error(ex) + UI.error("Unable to find plist file at '#{ipa}'") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Returns a value from Info.plist inside a .ipa file" + end + + def self.details + "This is useful for introspecting Info.plist files for `.ipa` files that have already been built." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :key, + env_name: "FL_GET_IPA_INFO_PLIST_VALUE_KEY", + description: "Name of parameter", + optional: false), + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "FL_GET_IPA_INFO_PLIST_VALUE_IPA", + description: "Path to IPA", + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true) + ] + end + + def self.output + [ + ['GET_IPA_INFO_PLIST_VALUE_CUSTOM_VALUE', 'The value of the last plist file that was parsed'] + ] + end + + def self.return_value + "Returns the value in the .ipa's Info.plist corresponding to the passed in Key" + end + + def self.authors + ["johnboiles"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'get_ipa_info_plist_value(ipa: "path.ipa", key: "KEY_YOU_READ")' + ] + end + + def self.return_type + :string + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_managed_play_store_publishing_rights.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_managed_play_store_publishing_rights.rb new file mode 100644 index 0000000..1549f5e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_managed_play_store_publishing_rights.rb @@ -0,0 +1,117 @@ +module Fastlane + module Actions + class GetManagedPlayStorePublishingRightsAction < Action + def self.run(params) + unless params[:json_key] || params[:json_key_data] + UI.important("To not be asked about this value, you can specify it using 'json_key'") + json_key_path = UI.input("The service account json file used to authenticate with Google: ") + json_key_path = File.expand_path(json_key_path) + + UI.user_error!("Could not find service account json file at path '#{json_key_path}'") unless File.exist?(json_key_path) + params[:json_key] = json_key_path + end + + FastlaneCore::PrintTable.print_values( + config: params, + mask_keys: [:json_key_data], + title: "Summary for get_managed_play_store_publishing_rights" + ) + + if (keyfile = params[:json_key]) + json_key_data = File.open(keyfile, 'rb').read + else + json_key_data = params[:json_key_data] + end + + # Login + credentials = JSON.parse(json_key_data) + callback_uri = 'https://fastlane.github.io/managed_google_play-callback/callback.html' + require 'addressable/uri' + continueUrl = Addressable::URI.encode(callback_uri) + uri = "https://play.google.com/apps/publish/delegatePrivateApp?service_account=#{credentials['client_email']}&continueUrl=#{continueUrl}" + + UI.message("To obtain publishing rights for custom apps on Managed Play Store, open the following URL and log in:") + UI.message("") + UI.important(uri) + UI.message("([Cmd/Ctrl] + [Left click] lets you open this URL in many consoles/terminals/shells)") + UI.message("") + UI.message("After successful login you will be redirected to a page which outputs some information that is required for usage of the `create_app_on_managed_play_store` action.") + + return uri + end + + def self.description + "Obtain publishing rights for custom apps on Managed Google Play Store" + end + + def self.authors + ["janpio"] + end + + def self.return_value + "An URI to obtain publishing rights for custom apps on Managed Play Store" + end + + def self.details + [ + 'If you haven\'t done so before, start by following the first two steps of Googles ["Get started with custom app publishing"](https://developers.google.com/android/work/play/custom-app-api/get-started) -> ["Preliminary setup"](https://developers.google.com/android/work/play/custom-app-api/get-started#preliminary_setup) instructions:', + '"[Enable the Google Play Custom App Publishing API](https://developers.google.com/android/work/play/custom-app-api/get-started#enable_the_google_play_custom_app_publishing_api)" and "[Create a service account](https://developers.google.com/android/work/play/custom-app-api/get-started#create_a_service_account)".', + 'You need the "service account\'s private key file" to continue.', + 'Run the action and supply the "private key file" to it as the `json_key` parameter. The command will output a URL to visit. After logging in you are redirected to a page that outputs your "Developer Account ID" - take note of that, you will need it to be able to use [`create_app_on_managed_play_store`](https://docs.fastlane.tools/actions/create_app_on_managed_play_store/).' + ].join("\n") + end + + def self.example_code + [ + 'get_managed_play_store_publishing_rights( + json_key: "path/to/your/json/key/file" + ) + # it is probably easier to execute this action directly in the command line: + # $ fastlane run get_managed_play_store_publishing_rights' + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :json_key, + env_name: "SUPPLY_JSON_KEY", + short_option: "-j", + conflicting_options: [:json_key_data], + optional: true, # optional until it is possible specify either json_key OR json_key_data are required + description: "The path to a file containing service account JSON, used to authenticate with Google", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:json_key_file), + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Could not find service account json file at path '#{File.expand_path(value)}'") unless File.exist?(File.expand_path(value)) + UI.user_error!("'#{value}' doesn't seem to be a JSON file") unless FastlaneCore::Helper.json_file?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :json_key_data, + env_name: "SUPPLY_JSON_KEY_DATA", + short_option: "-c", + conflicting_options: [:json_key], + optional: true, + description: "The raw service account JSON data used to authenticate with Google", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:json_key_data_raw), + default_value_dynamic: true, + verify_block: proc do |value| + begin + JSON.parse(value) + rescue JSON::ParserError + UI.user_error!("Could not parse service account json: JSON::ParseError") + end + end) + ] + end + + def self.is_supported?(platform) + [:android].include?(platform) + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_provisioning_profile.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_provisioning_profile.rb new file mode 100644 index 0000000..3507f94 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_provisioning_profile.rb @@ -0,0 +1,109 @@ +module Fastlane + module Actions + module SharedValues + SIGH_PROFILE_PATH = :SIGH_PROFILE_PATH + SIGH_PROFILE_PATHS = :SIGH_PROFILE_PATHS + SIGH_UDID = :SIGH_UDID # deprecated + SIGH_UUID = :SIGH_UUID + SIGH_NAME = :SIGH_NAME + SIGH_PROFILE_TYPE ||= :SIGH_PROFILE_TYPE + end + + class GetProvisioningProfileAction < Action + def self.run(values) + require 'sigh' + require 'credentials_manager/appfile_config' + + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless values[:api_key_path] + values[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + + Sigh.config = values # we already have the finished config + + path = Sigh::Manager.start + + Actions.lane_context[SharedValues::SIGH_PROFILE_PATH] = path # absolute path + Actions.lane_context[SharedValues::SIGH_PROFILE_PATHS] ||= [] + Actions.lane_context[SharedValues::SIGH_PROFILE_PATHS] << path + + uuid = ENV["SIGH_UUID"] || ENV["SIGH_UDID"] # the UUID of the profile + name = ENV["SIGH_NAME"] # the name of the profile + Actions.lane_context[SharedValues::SIGH_UUID] = Actions.lane_context[SharedValues::SIGH_UDID] = uuid if uuid + Actions.lane_context[SharedValues::SIGH_NAME] = Actions.lane_context[SharedValues::SIGH_NAME] = name if name + + set_profile_type(values, ENV["SIGH_PROFILE_ENTERPRISE"]) + + return uuid # returs uuid of profile + end + + def self.set_profile_type(values, enterprise) + profile_type = "app-store" + profile_type = "ad-hoc" if values[:adhoc] + profile_type = "development" if values[:development] + profile_type = "developer-id" if values[:developer_id] + profile_type = "enterprise" if enterprise + + UI.message("Setting Provisioning Profile type to '#{profile_type}'") + + Actions.lane_context[SharedValues::SIGH_PROFILE_TYPE] = profile_type + end + + def self.description + "Generates a provisioning profile, saving it in the current folder (via _sigh_)" + end + + def self.author + "KrauseFx" + end + + # rubocop:disable Lint/MissingKeysOnSharedArea + def self.output + [ + ['SIGH_PROFILE_PATH', 'A path in which certificates, key and profile are exported'], + ['SIGH_PROFILE_PATHS', 'Paths in which certificates, key and profile are exported'], + ['SIGH_UUID', 'UUID (Universally Unique IDentifier) of a provisioning profile'], + ['SIGH_NAME', 'The name of the profile'], + ['SIGH_PROFILE_TYPE', 'The profile type, can be app-store, ad-hoc, development, enterprise, developer-id, can be used in `build_app` as a default value for `export_method`'] + ] + end + + def self.return_value + "The UUID of the profile sigh just fetched/generated" + end + + def self.return_type + :string + end + + def self.details + "**Note**: It is recommended to use [match](https://docs.fastlane.tools/actions/match/) according to the [codesigning.guide](https://codesigning.guide) for generating and maintaining your provisioning profiles. Use _sigh_ directly only if you want full control over what's going on and know more about codesigning." + end + + def self.available_options + require 'sigh' + Sigh::Options.available_options + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'get_provisioning_profile', + 'sigh # alias for "get_provisioning_profile"', + 'get_provisioning_profile( + adhoc: true, + force: true, + filename: "myFile.mobileprovision" + )' + ] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_push_certificate.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_push_certificate.rb new file mode 100644 index 0000000..a705f80 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_push_certificate.rb @@ -0,0 +1,87 @@ +module Fastlane + module Actions + class GetPushCertificateAction < Action + def self.run(params) + require 'pem' + require 'pem/options' + require 'pem/manager' + + success_block = params[:new_profile] + + PEM.config = params + + if Helper.test? + profile_path = './test.pem' + else + profile_path = PEM::Manager.start + end + + if success_block && profile_path + success_block.call(File.expand_path(profile_path)) if success_block + end + end + + def self.description + "Ensure a valid push profile is active, creating a new one if needed (via _pem_)" + end + + def self.author + "KrauseFx" + end + + def self.details + sample = <<-SAMPLE.markdown_sample + ```ruby + get_push_certificate( + new_profile: proc do + # your upload code + end + ) + ``` + SAMPLE + + [ + "Additionally to the available options, you can also specify a block that only gets executed if a new profile was created. You can use it to upload the new profile to your server.", + "Use it like this:".markdown_preserve_newlines, + sample + ].join("\n") + end + + def self.available_options + require 'pem' + require 'pem/options' + + @options = PEM::Options.available_options + @options << FastlaneCore::ConfigItem.new(key: :new_profile, + description: "Block that is called if there is a new profile", + optional: true, + type: :string_callback) + @options + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'get_push_certificate', + 'pem # alias for "get_push_certificate"', + 'get_push_certificate( + force: true, # create a new profile, even if the old one is still valid + app_identifier: "net.sunapps.9", # optional app identifier, + save_private_key: true, + new_profile: proc do |profile_path| # this block gets called when a new profile was generated + puts profile_path # the absolute path to the new PEM file + # insert the code to upload the PEM file to the server + end + )' + ] + end + + def self.category + :push + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_version_number.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_version_number.rb new file mode 100644 index 0000000..79f2067 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/get_version_number.rb @@ -0,0 +1,212 @@ +module Fastlane + module Actions + module SharedValues + VERSION_NUMBER ||= :VERSION_NUMBER # originally defined in IncrementVersionNumberAction + end + + class GetVersionNumberAction < Action + require 'shellwords' + + def self.run(params) + xcodeproj_path_or_dir = params[:xcodeproj] || '.' + xcodeproj_dir = File.extname(xcodeproj_path_or_dir) == ".xcodeproj" ? File.dirname(xcodeproj_path_or_dir) : xcodeproj_path_or_dir + target_name = params[:target] + configuration = params[:configuration] + + # Get version_number + project = get_project!(xcodeproj_path_or_dir) + target = get_target!(project, target_name) + plist_file = get_plist!(xcodeproj_dir, target, configuration) + version_number = get_version_number_from_plist!(plist_file) + + # Get from build settings (or project settings) if needed (ex: $(MARKETING_VERSION) is default in Xcode 11) + if version_number =~ /\$\(([\w\-]+)\)/ + version_number = get_version_number_from_build_settings!(target, $1, configuration) || get_version_number_from_build_settings!(project, $1, configuration) + + # ${MARKETING_VERSION} also works + elsif version_number =~ /\$\{([\w\-]+)\}/ + version_number = get_version_number_from_build_settings!(target, $1, configuration) || get_version_number_from_build_settings!(project, $1, configuration) + end + + # Error out if version_number is not set + if version_number.nil? + UI.user_error!("Unable to find Xcode build setting: #{$1}") + end + + # Store the number in the shared hash + Actions.lane_context[SharedValues::VERSION_NUMBER] = version_number + + # Return the version number because Swift might need this return value + return version_number + end + + def self.get_project!(xcodeproj_path_or_dir) + require 'xcodeproj' + if File.extname(xcodeproj_path_or_dir) == ".xcodeproj" + project_path = xcodeproj_path_or_dir + else + project_path = Dir.glob("#{xcodeproj_path_or_dir}/*.xcodeproj").first + end + + if project_path && File.exist?(project_path) + return Xcodeproj::Project.open(project_path) + else + UI.user_error!("Unable to find Xcode project at #{project_path || xcodeproj_path_or_dir}") + end + end + + def self.get_target!(project, target_name) + targets = project.targets + + # Prompt targets if no name + unless target_name + + # Gets non-test targets + non_test_targets = targets.reject do |t| + # Not all targets respond to `test_target_type?` + t.respond_to?(:test_target_type?) && t.test_target_type? + end + + # Returns if only one non-test target + if non_test_targets.count == 1 + return targets.first + end + + options = targets.map(&:name) + target_name = UI.select("What target would you like to use?", options) + end + + # Find target + target = targets.find do |t| + t.name == target_name + end + UI.user_error!("Cannot find target named '#{target_name}'") unless target + + target + end + + def self.get_version_number_from_build_settings!(target, variable, configuration = nil) + target.build_configurations.each do |config| + if configuration.nil? || config.name == configuration + value = config.resolve_build_setting(variable) + return value if value + end + end + + return nil + end + + def self.get_plist!(folder, target, configuration = nil) + plist_files = target.resolved_build_setting("INFOPLIST_FILE", true) + plist_files_count = plist_files.values.compact.uniq.count + + # Get plist file for specified configuration + # Or: Prompt for configuration if plist has different files in each configurations + # Else: Get first(only) plist value + if configuration + plist_file = plist_files[configuration] + elsif plist_files_count > 1 + options = plist_files.keys + selected = UI.select("What build configuration would you like to use?", options) + plist_file = plist_files[selected] + elsif plist_files_count > 0 + plist_file = plist_files.values.first + else + return nil + end + + # $(SRCROOT) is the path of where the XcodeProject is + # We can just set this as empty string since we join with `folder` below + if plist_file.include?("$(SRCROOT)/") + plist_file.gsub!("$(SRCROOT)/", "") + end + + # plist_file can be `Relative` or `Absolute` path. + # Make to `Absolute` path when plist_file is `Relative` path + unless File.exist?(plist_file) + plist_file = File.absolute_path(File.join(folder, plist_file)) + end + + UI.user_error!("Cannot find plist file: #{plist_file}") unless File.exist?(plist_file) + + plist_file + end + + def self.get_version_number_from_plist!(plist_file) + return '$(MARKETING_VERSION)' if plist_file.nil? + + plist = Xcodeproj::Plist.read_from_path(plist_file) + UI.user_error!("Unable to read plist: #{plist_file}") unless plist + + return '${MARKETING_VERSION}' if plist["CFBundleShortVersionString"].nil? + plist["CFBundleShortVersionString"] + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Get the version number of your project" + end + + def self.details + "This action will return the current version number set on your project. It first looks in the plist and then for '$(MARKETING_VERSION)' in the build settings." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_VERSION_NUMBER_PROJECT", + description: "Path to the Xcode project to read version number from, or its containing directory, optional. If omitted, or if a directory is passed instead, it will use the first Xcode project found within the given directory, or the project root directory if none is passed", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project or its containing directory, not the workspace path") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find file or directory at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("Could not find Xcode project in directory at path '#{File.expand_path(value)}'") if File.extname(value) != ".xcodeproj" && Dir.glob("#{value}/*.xcodeproj").empty? + end), + FastlaneCore::ConfigItem.new(key: :target, + env_name: "FL_VERSION_NUMBER_TARGET", + description: "Target name, optional. Will be needed if you have more than one non-test target to avoid being prompted to select one", + optional: true), + FastlaneCore::ConfigItem.new(key: :configuration, + env_name: "FL_VERSION_NUMBER_CONFIGURATION", + description: "Configuration name, optional. Will be needed if you have altered the configurations from the default or your version number depends on the configuration selected", + optional: true) + ] + end + + def self.output + [ + ['VERSION_NUMBER', 'The version number'] + ] + end + + def self.authors + ["Liquidsoul", "joshdholtz"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'version = get_version_number(xcodeproj: "Project.xcodeproj")', + 'version = get_version_number( + xcodeproj: "Project.xcodeproj", + target: "App" + )' + ] + end + + def self.return_type + :string + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_add.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_add.rb new file mode 100644 index 0000000..d0c92d7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_add.rb @@ -0,0 +1,88 @@ +module Fastlane + module Actions + class GitAddAction < Action + def self.run(params) + should_escape = params[:shell_escape] + + if params[:pathspec] + paths = params[:pathspec] + success_message = "Successfully added from \"#{paths}\" 💾." + elsif params[:path] + paths = params[:path].map do |p| + shell_escape(p, should_escape) + end.join(' ') + success_message = "Successfully added \"#{paths}\" 💾." + else + paths = "." + success_message = "Successfully added all files 💾." + end + + result = Actions.sh("git add #{paths}", log: FastlaneCore::Globals.verbose?).chomp + UI.success(success_message) + return result + end + + def self.shell_escape(path, should_escape) + path = path.shellescape if should_escape + path + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Directly add the given file or all files" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + description: "The file(s) and path(s) you want to add", + type: Array, + conflicting_options: [:pathspec], + optional: true), + FastlaneCore::ConfigItem.new(key: :shell_escape, + description: "Shell escapes paths (set to false if using wildcards or manually escaping spaces in :path)", + type: Boolean, + default_value: true, + optional: true), + # Deprecated + FastlaneCore::ConfigItem.new(key: :pathspec, + description: "The pathspec you want to add files from", + conflicting_options: [:path], + optional: true, + deprecated: "Use `--path` instead") + ] + end + + def self.return_value + nil + end + + def self.authors + ["4brunu", "antondomashnev"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'git_add', + 'git_add(path: "./version.txt")', + 'git_add(path: ["./version.txt", "./changelog.txt"])', + 'git_add(path: "./Frameworks/*", shell_escape: false)', + 'git_add(path: ["*.h", "*.m"], shell_escape: false)', + 'git_add(path: "./Frameworks/*", shell_escape: false)', + 'git_add(path: "*.txt", shell_escape: false)' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_branch.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_branch.rb new file mode 100644 index 0000000..1bd8b70 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_branch.rb @@ -0,0 +1,55 @@ +module Fastlane + module Actions + class GitBranchAction < Action + def self.run(params) + branch = Actions.git_branch || "" + return "" if branch == "HEAD" # Backwards compatibility with the original (and documented) implementation + branch + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Returns the name of the current git branch, possibly as managed by CI ENV vars" + end + + def self.details + "If no branch could be found, this action will return an empty string. This is a wrapper for the internal action Actions.git_branch" + end + + def self.available_options + [] + end + + def self.output + [ + ['GIT_BRANCH_ENV_VARS', 'The git branch environment variables'] + ] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'git_branch' + ] + end + + def self.return_type + :string + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_commit.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_commit.rb new file mode 100644 index 0000000..844c678 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_commit.rb @@ -0,0 +1,80 @@ +module Fastlane + module Actions + class GitCommitAction < Action + def self.run(params) + paths = params[:path] + skip_git_hooks = params[:skip_git_hooks] ? ['--no-verify'] : [] + + if params[:allow_nothing_to_commit] + # Here we check if the path passed in parameter contains any modification + # and we skip the `git commit` command if there is none. + # That means you can have other files modified that are not in the path parameter + # and still make use of allow_nothing_to_commit. + repo_clean = Actions.sh('git', 'status', *paths, '--porcelain').empty? + UI.success("Nothing to commit, working tree clean ✅.") if repo_clean + return if repo_clean + end + + result = Actions.sh('git', 'commit', '-m', params[:message], *paths, *skip_git_hooks) + UI.success("Successfully committed \"#{params[:path]}\" 💾.") + return result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Directly commit the given file with the given message" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + description: "The file(s) or directory(ies) you want to commit. You can pass an array of multiple file-paths or fileglobs \"*.txt\" to commit all matching files. The files already staged but not specified and untracked files won't be committed", + type: Array), + FastlaneCore::ConfigItem.new(key: :message, + description: "The commit message that should be used"), + FastlaneCore::ConfigItem.new(key: :skip_git_hooks, + description: "Set to true to pass `--no-verify` to git", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :allow_nothing_to_commit, + description: "Set to true to allow commit without any git changes in the files you want to commit", + default_value: false, + type: Boolean, + optional: true) + ] + end + + def self.output + end + + def self.return_value + nil + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'git_commit(path: "./version.txt", message: "Version Bump")', + 'git_commit(path: ["./version.txt", "./changelog.txt"], message: "Version Bump")', + 'git_commit(path: ["./*.txt", "./*.md"], message: "Update documentation")', + 'git_commit(path: ["./*.txt", "./*.md"], message: "Update documentation", skip_git_hooks: true)' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_pull.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_pull.rb new file mode 100644 index 0000000..b2c8f8a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_pull.rb @@ -0,0 +1,58 @@ +module Fastlane + module Actions + class GitPullAction < Action + def self.run(params) + commands = [] + + unless params[:only_tags] + command = "git pull" + command << " --rebase" if params[:rebase] + commands += ["#{command} &&"] + end + + commands += ["git fetch --tags"] + + Actions.sh(commands.join(' ')) + end + + def self.description + "Executes a simple git pull command" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :only_tags, + description: "Simply pull the tags, and not bring new commits to the current branch from the remote", + type: Boolean, + optional: true, + default_value: false), + FastlaneCore::ConfigItem.new(key: :rebase, + description: "Rebase on top of the remote branch instead of merge", + type: Boolean, + optional: true, + default_value: false) + ] + end + + def self.authors + ["KrauseFx", "JaviSoto"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'git_pull', + 'git_pull(only_tags: true) # only the tags, no commits', + 'git_pull(rebase: true) # use --rebase with pull' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_remote_branch.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_remote_branch.rb new file mode 100644 index 0000000..d79c387 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_remote_branch.rb @@ -0,0 +1,57 @@ +module Fastlane + module Actions + class GitRemoteBranchAction < Action + def self.run(params) + Actions.git_remote_branch_name(params[:remote_name]) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Returns the name of the current git remote default branch" + end + + def self.details + "If no default remote branch could be found, this action will return nil. This is a wrapper for the internal action Actions.git_default_remote_branch_name" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :remote_name, + env_name: "FL_REMOTE_REPOSITORY_NAME", + description: "The remote repository to check", + optional: true) + ] + end + + def self.output + [] + end + + def self.authors + ["SeanMcNeil"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'git_remote_branch # Query git for first available remote name', + 'git_remote_branch(remote_name:"upstream") # Provide a remote name' + ] + end + + def self.return_type + :string + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_submodule_update.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_submodule_update.rb new file mode 100644 index 0000000..53edd1c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_submodule_update.rb @@ -0,0 +1,60 @@ +module Fastlane + module Actions + class GitSubmoduleUpdateAction < Action + def self.run(params) + commands = ["git submodule update"] + commands += ["--init"] if params[:init] + commands += ["--recursive"] if params[:recursive] + Actions.sh(commands.join(' ')) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Executes a git submodule update command" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :recursive, + description: "Should the submodules be updated recursively?", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :init, + description: "Should the submodules be initiated before update?", + type: Boolean, + default_value: false) + ] + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["braunico"] + end + + def self.is_supported?(platform) + return true + end + + def self.example_code + [ + 'git_submodule_update', + 'git_submodule_update(recursive: true)', + 'git_submodule_update(init: true)', + 'git_submodule_update(recursive: true, init: true)' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_tag_exists.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_tag_exists.rb new file mode 100644 index 0000000..cfda136 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/git_tag_exists.rb @@ -0,0 +1,78 @@ +module Fastlane + module Actions + class GitTagExistsAction < Action + def self.run(params) + tag_ref = "refs/tags/#{params[:tag].shellescape}" + if params[:remote] + command = "git ls-remote -q --exit-code #{params[:remote_name].shellescape} #{tag_ref}" + else + command = "git rev-parse -q --verify #{tag_ref}" + end + exists = true + Actions.sh( + command, + log: FastlaneCore::Globals.verbose?, + error_callback: ->(result) { exists = false } + ) + exists + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Checks if the git tag with the given name exists in the current repo" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :tag, + description: "The tag name that should be checked"), + FastlaneCore::ConfigItem.new(key: :remote, + description: "Whether to check remote. Defaults to `false`", + type: Boolean, + default_value: false, + optional: true), + FastlaneCore::ConfigItem.new(key: :remote_name, + description: "The remote to check. Defaults to `origin`", + default_value: 'origin', + optional: true) + ] + end + + def self.return_value + "Boolean value whether the tag exists or not" + end + + def self.return_type + :bool + end + + def self.output + [ + ] + end + + def self.authors + ["antondomashnev"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'if git_tag_exists(tag: "1.1.0") + UI.message("Found it 🚀") + end' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/github_api.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/github_api.rb new file mode 100644 index 0000000..c8a742d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/github_api.rb @@ -0,0 +1,271 @@ +module Fastlane + module Actions + module SharedValues + GITHUB_API_STATUS_CODE = :GITHUB_API_STATUS_CODE + GITHUB_API_RESPONSE = :GITHUB_API_RESPONSE + GITHUB_API_JSON = :GITHUB_API_JSON + end + + class GithubApiAction < Action + class << self + def run(params) + require 'json' + + http_method = (params[:http_method] || 'GET').to_s.upcase + url = construct_url(params[:server_url], params[:path], params[:url]) + headers = construct_headers(params[:api_token], params[:api_bearer], params[:headers]) + payload = construct_body(params[:body], params[:raw_body]) + error_handlers = params[:error_handlers] || {} + secure = params[:secure] + + response = call_endpoint( + url, + http_method, + headers, + payload, + secure + ) + + status_code = response[:status] + result = { + status: status_code, + body: response.body || "", + json: parse_json(response.body) || {} + } + + if status_code.between?(200, 299) + UI.verbose("Response:") + UI.verbose(response.body) + UI.verbose("---") + yield(result) if block_given? + else + handled_error = error_handlers[status_code] || error_handlers['*'] + if handled_error + handled_error.call(result) + else + UI.error("---") + UI.error("Request failed:\n#{http_method}: #{url}") + UI.error("Headers:\n#{headers}") + UI.error("---") + UI.error("Response:") + UI.error(response.body) + UI.user_error!("GitHub responded with #{status_code}\n---\n#{response.body}") + end + end + + Actions.lane_context[SharedValues::GITHUB_API_STATUS_CODE] = result[:status] + Actions.lane_context[SharedValues::GITHUB_API_RESPONSE] = result[:body] + Actions.lane_context[SharedValues::GITHUB_API_JSON] = result[:json] + + return result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def description + "Call a GitHub API endpoint and get the resulting JSON response" + end + + def details + [ + "Calls any GitHub API endpoint. You must provide your GitHub Personal token (get one from [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new)).", + "Out parameters provide the status code and the full response JSON if valid, otherwise the raw response body.", + "Documentation: [https://developer.github.com/v3](https://developer.github.com/v3)." + ].join("\n") + end + + def available_options + [ + FastlaneCore::ConfigItem.new(key: :server_url, + env_name: "FL_GITHUB_API_SERVER_URL", + description: "The server url. e.g. 'https://your.internal.github.host/api/v3' (Default: 'https://api.github.com')", + default_value: "https://api.github.com", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please include the protocol in the server url, e.g. https://your.github.server/api/v3") unless value.include?("//") + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_GITHUB_API_TOKEN", + description: "Personal API Token for GitHub - generate one at https://github.com/settings/tokens", + conflicting_options: [:api_bearer], + sensitive: true, + code_gen_sensitive: true, + default_value: ENV["GITHUB_API_TOKEN"], + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :api_bearer, + env_name: "FL_GITHUB_API_BEARER", + sensitive: true, + code_gen_sensitive: true, + description: "Use a Bearer authorization token. Usually generated by Github Apps, e.g. GitHub Actions GITHUB_TOKEN environment variable", + conflicting_options: [:api_token], + optional: true, + default_value: nil), + FastlaneCore::ConfigItem.new(key: :http_method, + env_name: "FL_GITHUB_API_HTTP_METHOD", + description: "The HTTP method. e.g. GET / POST", + default_value: "GET", + optional: true, + verify_block: proc do |value| + unless %w(GET POST PUT DELETE HEAD CONNECT PATCH).include?(value.to_s.upcase) + UI.user_error!("Unrecognised HTTP method") + end + end), + FastlaneCore::ConfigItem.new(key: :body, + env_name: "FL_GITHUB_API_REQUEST_BODY", + description: "The request body in JSON or hash format", + skip_type_validation: true, # allow Hash, Array + default_value: {}, + optional: true), + FastlaneCore::ConfigItem.new(key: :raw_body, + env_name: "FL_GITHUB_API_REQUEST_RAW_BODY", + description: "The request body taken verbatim instead of as JSON, useful for file uploads", + optional: true), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_GITHUB_API_PATH", + description: "The endpoint path. e.g. '/repos/:owner/:repo/readme'", + optional: true), + FastlaneCore::ConfigItem.new(key: :url, + env_name: "FL_GITHUB_API_URL", + description: "The complete full url - used instead of path. e.g. 'https://uploads.github.com/repos/fastlane...'", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please include the protocol in the url, e.g. https://uploads.github.com") unless value.include?("//") + end), + FastlaneCore::ConfigItem.new(key: :error_handlers, + description: "Optional error handling hash based on status code, or pass '*' to handle all errors", + type: Hash, + default_value: {}, + optional: true), + FastlaneCore::ConfigItem.new(key: :headers, + env_name: "FL_GITHUB_API_HEADERS", + description: "Optional headers to apply", + type: Hash, + default_value: {}, + optional: true), + FastlaneCore::ConfigItem.new(key: :secure, + env_name: "FL_GITHUB_API_SECURE", + description: "Optionally disable secure requests (ssl_verify_peer)", + type: Boolean, + default_value: true, + optional: true) + ] + end + + def output + [ + ['GITHUB_API_STATUS_CODE', 'The status code returned from the request'], + ['GITHUB_API_RESPONSE', 'The full response body'], + ['GITHUB_API_JSON', 'The parsed json returned from GitHub'] + ] + end + + def return_value + "A hash including the HTTP status code (:status), the response body (:body), and if valid JSON has been returned the parsed JSON (:json)." + end + + def authors + ["tommeier"] + end + + def example_code + [ + 'result = github_api( + server_url: "https://api.github.com", + api_token: ENV["GITHUB_TOKEN"], + http_method: "GET", + path: "/repos/:owner/:repo/readme", + body: { ref: "master" } + )', + '# Alternatively call directly with optional error handling or block usage + GithubApiAction.run( + server_url: "https://api.github.com", + api_token: ENV["GITHUB_TOKEN"], + http_method: "GET", + path: "/repos/:owner/:repo/readme", + error_handlers: { + 404 => proc do |result| + UI.message("Something went wrong - I couldn\'t find it...") + end, + \'*\' => proc do |result| + UI.message("Handle all error codes other than 404") + end + } + ) do |result| + UI.message("JSON returned: #{result[:json]}") + end + ' + ] + end + + def is_supported?(platform) + true + end + + def category + :source_control + end + + private + + def construct_headers(api_token, api_bearer, overrides) + require 'base64' + headers = { 'User-Agent' => 'fastlane-github_api' } + headers['Authorization'] = "Basic #{Base64.strict_encode64(api_token)}" if api_token + headers['Authorization'] = "Bearer #{api_bearer}" if api_bearer + headers.merge(overrides || {}) + end + + def construct_url(server_url, path, url) + return_url = (server_url && path) ? File.join(server_url, path) : url + + UI.user_error!("Please provide either `server_url` (e.g. https://api.github.com) and 'path' or full 'url' for GitHub API endpoint") unless return_url + + return_url + end + + def construct_body(body, raw_body) + body ||= {} + + if raw_body + raw_body + elsif body.kind_of?(Hash) + body.to_json + elsif body.kind_of?(Array) + body.to_json + else + UI.user_error!("Please provide valid JSON, or a hash as request body") unless parse_json(body) + body + end + end + + def parse_json(value) + JSON.parse(value) + rescue JSON::ParserError + nil + end + + def call_endpoint(url, http_method, headers, body, secure) + require 'excon' + + Excon.defaults[:ssl_verify_peer] = secure + middlewares = Excon.defaults[:middlewares] + [Excon::Middleware::RedirectFollower] # allow redirect in case of repo renames + + UI.verbose("#{http_method} : #{url}") + + connection = Excon.new(url) + connection.request( + method: http_method, + headers: headers, + middlewares: middlewares, + body: body, + debug_request: FastlaneCore::Globals.verbose?, + debug_response: FastlaneCore::Globals.verbose? + ) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/google_play_track_release_names.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/google_play_track_release_names.rb new file mode 100644 index 0000000..dd87d13 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/google_play_track_release_names.rb @@ -0,0 +1,74 @@ +module Fastlane + module Actions + class GooglePlayTrackReleaseNamesAction < Action + # Supply::Options.available_options keys that apply to this action. + OPTIONS = [ + :package_name, + :track, + :key, + :issuer, + :json_key, + :json_key_data, + :root_url, + :timeout + ] + + def self.run(params) + require 'supply' + require 'supply/options' + require 'supply/reader' + + Supply.config = params + + release_names = Supply::Reader.new.track_release_names || [] + return release_names.compact + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Retrieves release names for a Google Play track" + end + + def self.details + "More information: [https://docs.fastlane.tools/actions/supply/](https://docs.fastlane.tools/actions/supply/)" + end + + def self.available_options + require 'supply' + require 'supply/options' + + Supply::Options.available_options.select do |option| + OPTIONS.include?(option.key) + end + end + + def self.output + end + + def self.return_value + "Array of strings representing the release names for the given Google Play track" + end + + def self.authors + ["raldred"] + end + + def self.is_supported?(platform) + platform == :android + end + + def self.example_code + [ + 'google_play_track_release_names' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/google_play_track_version_codes.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/google_play_track_version_codes.rb new file mode 100644 index 0000000..4d81cc0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/google_play_track_version_codes.rb @@ -0,0 +1,77 @@ +module Fastlane + module Actions + class GooglePlayTrackVersionCodesAction < Action + # Supply::Options.available_options keys that apply to this action. + OPTIONS = [ + :package_name, + :track, + :key, + :issuer, + :json_key, + :json_key_data, + :root_url, + :timeout + ] + + def self.run(params) + require 'supply' + require 'supply/options' + require 'supply/reader' + + Supply.config = params + + # AndroidpublisherV3 returns version codes as array of strings + # even though version codes need to be integers + # https://github.com/fastlane/fastlane/issues/15622 + version_codes = Supply::Reader.new.track_version_codes || [] + return version_codes.compact.map(&:to_i) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Retrieves version codes for a Google Play track" + end + + def self.details + "More information: [https://docs.fastlane.tools/actions/supply/](https://docs.fastlane.tools/actions/supply/)" + end + + def self.available_options + require 'supply' + require 'supply/options' + + Supply::Options.available_options.select do |option| + OPTIONS.include?(option.key) + end + end + + def self.output + end + + def self.return_value + "Array of integers representing the version codes for the given Google Play track" + end + + def self.authors + ["panthomakos"] + end + + def self.is_supported?(platform) + platform == :android + end + + def self.example_code + [ + 'google_play_track_version_codes' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gradle.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gradle.rb new file mode 100644 index 0000000..321e41f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gradle.rb @@ -0,0 +1,334 @@ +require 'pathname' +require 'shellwords' + +module Fastlane + module Actions + module SharedValues + GRADLE_APK_OUTPUT_PATH = :GRADLE_APK_OUTPUT_PATH + GRADLE_ALL_APK_OUTPUT_PATHS = :GRADLE_ALL_APK_OUTPUT_PATHS + GRADLE_AAB_OUTPUT_PATH = :GRADLE_AAB_OUTPUT_PATH + GRADLE_ALL_AAB_OUTPUT_PATHS = :GRADLE_ALL_AAB_OUTPUT_PATHS + GRADLE_OUTPUT_JSON_OUTPUT_PATH = :GRADLE_OUTPUT_JSON_OUTPUT_PATH + GRADLE_ALL_OUTPUT_JSON_OUTPUT_PATHS = :GRADLE_ALL_OUTPUT_JSON_OUTPUT_PATHS + GRADLE_MAPPING_TXT_OUTPUT_PATH = :GRADLE_MAPPING_TXT_OUTPUT_PATH + GRADLE_ALL_MAPPING_TXT_OUTPUT_PATHS = :GRADLE_ALL_MAPPING_TXT_OUTPUT_PATHS + GRADLE_FLAVOR = :GRADLE_FLAVOR + GRADLE_BUILD_TYPE = :GRADLE_BUILD_TYPE + end + + class GradleAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + task = params[:task] + flavor = params[:flavor] + build_type = params[:build_type] + tasks = params[:tasks] + + gradle_task = gradle_task(task, flavor, build_type, tasks) + + UI.user_error!('Please pass a gradle task or tasks') if gradle_task.empty? + + project_dir = params[:project_dir] + + gradle_path_param = params[:gradle_path] || './gradlew' + + # Get the path to gradle, if it's an absolute path we take it as is, if it's relative we assume it's relative to the project_dir + gradle_path = if Pathname.new(gradle_path_param).absolute? + File.expand_path(gradle_path_param) + else + File.expand_path(File.join(project_dir, gradle_path_param)) + end + + # Ensure we ended up with a valid path to gradle + UI.user_error!("Couldn't find gradlew at path '#{File.expand_path(gradle_path)}'") unless File.exist?(gradle_path) + + # Construct our flags + flags = [] + flags << "-p #{project_dir.shellescape}" + flags << params[:properties].map { |k, v| "-P#{k.to_s.shellescape}=#{v.to_s.shellescape}" }.join(' ') unless params[:properties].nil? + flags << params[:system_properties].map { |k, v| "-D#{k.to_s.shellescape}=#{v.to_s.shellescape}" }.join(' ') unless params[:system_properties].nil? + flags << params[:flags] unless params[:flags].nil? + + # Run the actual gradle task + gradle = Helper::GradleHelper.new(gradle_path: gradle_path) + + # If these were set as properties, then we expose them back out as they might be useful to others + Actions.lane_context[SharedValues::GRADLE_BUILD_TYPE] = build_type if build_type + Actions.lane_context[SharedValues::GRADLE_FLAVOR] = flavor if flavor + + # We run the actual gradle task + result = gradle.trigger(task: gradle_task, + serial: params[:serial], + flags: flags.join(' '), + print_command: params[:print_command], + print_command_output: params[:print_command_output]) + + # If we didn't build, then we return now, as it makes no sense to search for apk's in a non-`assemble` or non-`build` scenario + return result unless gradle_task =~ /\b(assemble)/ || gradle_task =~ /\b(bundle)/ + + apk_search_path = File.join(project_dir, '**', 'build', 'outputs', 'apk', '**', '*.apk') + aab_search_path = File.join(project_dir, '**', 'build', 'outputs', 'bundle', '**', '*.aab') + output_json_search_path = File.join(project_dir, '**', 'build', 'outputs', 'apk', '**', 'output*.json') # output.json in Android Stuido 3 and output-metadata.json in Android Studio 4 + mapping_txt_search_path = File.join(project_dir, '**', 'build', 'outputs', 'mapping', '**', 'mapping.txt') + + # Our apk/aab is now built, but there might actually be multiple ones that were built if a flavor was not specified in a multi-flavor project (e.g. `assembleRelease`) + # However, we're not interested in unaligned apk's... + new_apks = Dir[apk_search_path].reject { |path| path =~ /^.*-unaligned.apk$/i } + new_apks = new_apks.map { |path| File.expand_path(path) } + new_aabs = Dir[aab_search_path] + new_aabs = new_aabs.map { |path| File.expand_path(path) } + new_output_jsons = Dir[output_json_search_path] + new_output_jsons = new_output_jsons.map { |path| File.expand_path(path) } + new_mapping_txts = Dir[mapping_txt_search_path] + new_mapping_txts = new_mapping_txts.map { |path| File.expand_path(path) } + + # We expose all of these new apks and aabs + Actions.lane_context[SharedValues::GRADLE_ALL_APK_OUTPUT_PATHS] = new_apks + Actions.lane_context[SharedValues::GRADLE_ALL_AAB_OUTPUT_PATHS] = new_aabs + Actions.lane_context[SharedValues::GRADLE_ALL_OUTPUT_JSON_OUTPUT_PATHS] = new_output_jsons + Actions.lane_context[SharedValues::GRADLE_ALL_MAPPING_TXT_OUTPUT_PATHS] = new_mapping_txts + + # We also take the most recent apk and aab to return as SharedValues::GRADLE_APK_OUTPUT_PATH and SharedValues::GRADLE_AAB_OUTPUT_PATH + # This is the one that will be relevant for most projects that just build a single build variant (flavor + build type combo). + # In multi build variants this value is undefined + last_apk_path = new_apks.sort_by(&File.method(:mtime)).last + last_aab_path = new_aabs.sort_by(&File.method(:mtime)).last + last_output_json_path = new_output_jsons.sort_by(&File.method(:mtime)).last + last_mapping_txt_path = new_mapping_txts.sort_by(&File.method(:mtime)).last + Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH] = File.expand_path(last_apk_path) if last_apk_path + Actions.lane_context[SharedValues::GRADLE_AAB_OUTPUT_PATH] = File.expand_path(last_aab_path) if last_aab_path + Actions.lane_context[SharedValues::GRADLE_OUTPUT_JSON_OUTPUT_PATH] = File.expand_path(last_output_json_path) if last_output_json_path + Actions.lane_context[SharedValues::GRADLE_MAPPING_TXT_OUTPUT_PATH] = File.expand_path(last_mapping_txt_path) if last_mapping_txt_path + + # Give a helpful message in case there were no new apks or aabs. Remember we're only running this code when assembling, in which case we certainly expect there to be an apk or aab + UI.message('Couldn\'t find any new signed apk files...') if new_apks.empty? && new_aabs.empty? + + return result + end + # rubocop:enable Metrics/PerceivedComplexity + + def self.gradle_task(task, flavor, build_type, tasks) + gradle_task = [task, flavor, build_type].join + + if gradle_task.empty? && !tasks.nil? + gradle_task = tasks.join(' ') + end + + gradle_task + end + + def self.step_text(params) + task = params[:task] + flavor = params[:flavor] + build_type = params[:build_type] + tasks = params[:tasks] + + gradle_task = gradle_task(task, flavor, build_type, tasks) + + return gradle_task + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + 'All gradle related actions, including building and testing your Android app' + end + + def self.details + 'Run `./gradlew tasks` to get a list of all available gradle tasks for your project' + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :task, + env_name: 'FL_GRADLE_TASK', + description: 'The gradle task you want to execute, e.g. `assemble`, `bundle` or `test`. For tasks such as `assembleMyFlavorRelease` you should use gradle(task: \'assemble\', flavor: \'Myflavor\', build_type: \'Release\')', + conflicting_options: [:tasks], + optional: true), + FastlaneCore::ConfigItem.new(key: :flavor, + env_name: 'FL_GRADLE_FLAVOR', + description: 'The flavor that you want the task for, e.g. `MyFlavor`. If you are running the `assemble` task in a multi-flavor project, and you rely on Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH] then you must specify a flavor here or else this value will be undefined', + optional: true), + FastlaneCore::ConfigItem.new(key: :build_type, + env_name: 'FL_GRADLE_BUILD_TYPE', + description: 'The build type that you want the task for, e.g. `Release`. Useful for some tasks such as `assemble`', + optional: true), + FastlaneCore::ConfigItem.new(key: :tasks, + type: Array, + env_name: 'FL_GRADLE_TASKS', + description: 'The multiple gradle tasks that you want to execute, e.g. `[assembleDebug, bundleDebug]`', + conflicting_options: [:task], + optional: true), + FastlaneCore::ConfigItem.new(key: :flags, + env_name: 'FL_GRADLE_FLAGS', + description: 'All parameter flags you want to pass to the gradle command, e.g. `--exitcode --xml file.xml`', + optional: true), + FastlaneCore::ConfigItem.new(key: :project_dir, + env_name: 'FL_GRADLE_PROJECT_DIR', + description: 'The root directory of the gradle project', + default_value: '.'), + FastlaneCore::ConfigItem.new(key: :gradle_path, + env_name: 'FL_GRADLE_PATH', + description: 'The path to your `gradlew`. If you specify a relative path, it is assumed to be relative to the `project_dir`', + optional: true), + FastlaneCore::ConfigItem.new(key: :properties, + env_name: 'FL_GRADLE_PROPERTIES', + description: 'Gradle properties to be exposed to the gradle script', + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :system_properties, + env_name: 'FL_GRADLE_SYSTEM_PROPERTIES', + description: 'Gradle system properties to be exposed to the gradle script', + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :serial, + env_name: 'FL_ANDROID_SERIAL', + description: 'Android serial, which device should be used for this command', + default_value: ''), + FastlaneCore::ConfigItem.new(key: :print_command, + env_name: 'FL_GRADLE_PRINT_COMMAND', + description: 'Control whether the generated Gradle command is printed as output before running it (true/false)', + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :print_command_output, + env_name: 'FL_GRADLE_PRINT_COMMAND_OUTPUT', + description: 'Control whether the output produced by given Gradle command is printed while running (true/false)', + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['GRADLE_APK_OUTPUT_PATH', 'The path to the newly generated apk file. Undefined in a multi-variant assemble scenario'], + ['GRADLE_ALL_APK_OUTPUT_PATHS', 'When running a multi-variant `assemble`, the array of signed apk\'s that were generated'], + ['GRADLE_FLAVOR', 'The flavor, e.g. `MyFlavor`'], + ['GRADLE_BUILD_TYPE', 'The build type, e.g. `Release`'], + ['GRADLE_AAB_OUTPUT_PATH', 'The path to the most recent Android app bundle'], + ['GRADLE_ALL_AAB_OUTPUT_PATHS', 'The paths to the most recent Android app bundles'], + ['GRADLE_OUTPUT_JSON_OUTPUT_PATH', 'The path to the most recent output.json file'], + ['GRADLE_ALL_OUTPUT_JSON_OUTPUT_PATHS', 'The path to the newly generated output.json files'], + ['GRADLE_MAPPING_TXT_OUTPUT_PATH', 'The path to the most recent mapping.txt file'], + ['GRADLE_ALL_MAPPING_TXT_OUTPUT_PATHS', 'The path to the newly generated mapping.txt files'] + ] + end + + def self.return_value + 'The output of running the gradle task' + end + + def self.authors + ['KrauseFx', 'lmirosevic'] + end + + def self.is_supported?(platform) + [:ios, :android].include?(platform) # we support iOS as cross platforms apps might want to call `gradle` also + end + + def self.example_code + [ + 'gradle( + task: "assemble", + flavor: "WorldDomination", + build_type: "Release" + ) + ``` + + To build an AAB use: + ```ruby + gradle( + task: "bundle", + flavor: "WorldDomination", + build_type: "Release" + ) + ``` + + You can pass multiple gradle tasks: + ```ruby + gradle( + tasks: ["assembleDebug", "bundleDebug"] + ) + ``` + + You can pass properties to gradle: + ```ruby + gradle( + # ... + + properties: { + "exampleNumber" => 100, + "exampleString" => "1.0.0", + # ... + } + ) + ``` + + You can use this to change the version code and name of your app: + ```ruby + gradle( + # ... + + properties: { + "android.injected.version.code" => 100, + "android.injected.version.name" => "1.0.0", + # ... + } + ) + ``` + + You can use this to automatically [sign and zipalign](https://developer.android.com/studio/publish/app-signing.html) your app: + ```ruby + gradle( + task: "assemble", + build_type: "Release", + print_command: false, + properties: { + "android.injected.signing.store.file" => "keystore.jks", + "android.injected.signing.store.password" => "store_password", + "android.injected.signing.key.alias" => "key_alias", + "android.injected.signing.key.password" => "key_password", + } + ) + ``` + + If you need to pass sensitive information through the `gradle` action, and don\'t want the generated command to be printed before it is run, you can suppress that: + ```ruby + gradle( + # ... + print_command: false + ) + ``` + + You can also suppress printing the output generated by running the generated Gradle command: + ```ruby + gradle( + # ... + print_command_output: false + ) + ``` + + To pass any other CLI flags to gradle use: + ```ruby + gradle( + # ... + + flags: "--exitcode --xml file.xml" + ) + ``` + + Delete the build directory, generated APKs and AABs + ```ruby + gradle( + task: "clean" + )' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gym.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gym.rb new file mode 100644 index 0000000..e3a7f69 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/gym.rb @@ -0,0 +1,10 @@ +module Fastlane + module Actions + require 'fastlane/actions/build_app' + class GymAction < BuildAppAction + def self.description + "Alias for the `build_app` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_add_tag.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_add_tag.rb new file mode 100644 index 0000000..d2d2867 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_add_tag.rb @@ -0,0 +1,48 @@ +module Fastlane + module Actions + # Adds a hg tag to the current commit + class HgAddTagAction < Action + def self.run(options) + tag = options[:tag] + + UI.message("Adding mercurial tag '#{tag}' đŸŽ¯.") + + command = "hg tag \"#{tag}\"" + return command if Helper.test? + + Actions.sh(command) + end + + def self.description + "This will add a hg tag to the current branch" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :tag, + env_name: "FL_HG_TAG_TAG", + description: "Tag to create") + ] + end + + def self.author + # credits to lmirosevic for original git version + "sjrmanning" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'hg_add_tag(tag: "1.3")' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_commit_version_bump.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_commit_version_bump.rb new file mode 100644 index 0000000..8331349 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_commit_version_bump.rb @@ -0,0 +1,183 @@ +module Fastlane + module Actions + # Commits version bump. + class HgCommitVersionBumpAction < Action + def self.run(params) + require 'xcodeproj' + require 'pathname' + require 'set' + require 'shellwords' + + xcodeproj_path = params[:xcodeproj] ? File.expand_path(File.join('.', params[:xcodeproj])) : nil + + if Helper.test? + xcodeproj_path = "/tmp/Test.xcodeproj" + end + + # get the repo root path + repo_path = Helper.test? ? '/tmp/repo' : Actions.sh('hg root').strip + repo_pathname = Pathname.new(repo_path) + + if xcodeproj_path + # ensure that the xcodeproj passed in was OK + unless Helper.test? + UI.user_error!("Could not find the specified xcodeproj: #{xcodeproj_path}") unless File.directory?(xcodeproj_path) + end + else + # find an xcodeproj (ignoring dependencies) + xcodeproj_paths = Fastlane::Helper::XcodeprojHelper.find(repo_path) + + # no projects found: error + UI.user_error!('Could not find a .xcodeproj in the current repository\'s working directory.') if xcodeproj_paths.count == 0 + + # too many projects found: error + if xcodeproj_paths.count > 1 + relative_projects = xcodeproj_paths.map { |e| Pathname.new(e).relative_path_from(repo_pathname).to_s }.join("\n") + UI.user_error!("Found multiple .xcodeproj projects in the current repository's working directory. Please specify your app's main project: \n#{relative_projects}") + end + + # one project found: great + xcodeproj_path = xcodeproj_paths.first + end + + # find the pbxproj path, relative to hg directory + if Helper.test? + hg_dirty_files = params[:test_dirty_files].split(",") + expected_changed_files = params[:test_expected_files].split(",") + else + pbxproj_pathname = Pathname.new(File.join(xcodeproj_path, 'project.pbxproj')) + pbxproj_path = pbxproj_pathname.relative_path_from(repo_pathname).to_s + + # find the info_plist files + project = Xcodeproj::Project.open(xcodeproj_path) + info_plist_files = project.objects.select do |object| + object.isa == 'XCBuildConfiguration' + end.map(&:to_hash).map do |object_hash| + object_hash['buildSettings'] + end.select do |build_settings| + build_settings.key?('INFOPLIST_FILE') + end.map do |build_settings| + build_settings['INFOPLIST_FILE'] + end.uniq.map do |info_plist_path| + Pathname.new(File.expand_path(File.join(xcodeproj_path, '..', info_plist_path))).relative_path_from(repo_pathname).to_s + end + + # create our list of files that we expect to have changed, they should all be relative to the project root, which should be equal to the hg workdir root + expected_changed_files = [] + expected_changed_files << pbxproj_path + expected_changed_files << info_plist_files + expected_changed_files.flatten!.uniq! + + # get the list of files that have actually changed in our hg workdir + hg_dirty_files = Actions.sh('hg status -n').split("\n") + end + + # little user hint + UI.user_error!("No file changes picked up. Make sure you run the `increment_build_number` action first.") if hg_dirty_files.empty? + + # check if the files changed are the ones we expected to change (these should be only the files that have version info in them) + dirty_set = Set.new(hg_dirty_files.map(&:downcase)) + expected_set = Set.new(expected_changed_files.map(&:downcase)) + changed_files_as_expected = dirty_set.subset?(expected_set) + unless changed_files_as_expected + unless params[:force] + str = ["Found unexpected uncommitted changes in the working directory. Expected these files to have changed:", + "#{expected_changed_files.join("\n")}.", + "But found these actual changes: \n#{hg_dirty_files.join("\n")}.", + "Make sure you have cleaned up the build artifacts and are only left with the changed version files at this", + "stage in your lane, and don't touch the working directory while your lane is running. You can also use the :force option to ", + "bypass this check, and always commit a version bump regardless of the state of the working directory."].join("\n") + UI.user_error!(str) + end + end + + # create a commit with a message + command = "hg commit -m '#{params[:message]}'" + return command if Helper.test? + begin + Actions.sh(command) + + UI.success("Committed \"#{params[:message]}\" 💾.") + rescue => ex + UI.error(ex) + UI.important("Didn't commit any changes. 😐") + end + end + + def self.description + "This will commit a version bump to the hg repo" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_COMMIT_BUMP_MESSAGE", + description: "The commit message when committing the version bump", + default_value: "Version Bump"), + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_BUILD_NUMBER_PROJECT", + description: "The path to your project file (Not the workspace). If you have only one, this is optional", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_FORCE_COMMIT", + description: "Forces the commit, even if other files than the ones containing the version number have been modified", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :test_dirty_files, + env_name: "FL_HG_COMMIT_TEST_DIRTY_FILES", + description: "A list of dirty files passed in for testing", + optional: true, + default_value: "file1, file2"), + FastlaneCore::ConfigItem.new(key: :test_expected_files, + env_name: "FL_HG_COMMIT_TEST_EXP_FILES", + description: "A list of expected changed files passed in for testing", + optional: true, + default_value: "file1, file2") + ] + end + + def self.author + # credits to lmirosevic for original git version + "sjrmanning" + end + + def self.is_supported?(platform) + true + end + + def self.details + list = <<-LIST.markdown_list + All `.plist` files + The `.xcodeproj/project.pbxproj` file + LIST + [ + "The mercurial equivalent of the [commit_version_bump](https://docs.fastlane.tools/actions/commit_version_bump/) git action. Like the git version, it is useful in conjunction with [`increment_build_number`](https://docs.fastlane.tools/actions/increment_build_number/).", + "It checks the repo to make sure that only the relevant files have changed, these are the files that `increment_build_number` (`agvtool`) touches:".markdown_preserve_newlines, + list, + "Then commits those files to the repo.", + "Customize the message with the `:message` option, defaults to 'Version Bump'", + "If you have other uncommitted changes in your repo, this action will fail. If you started off in a clean repo, and used the _ipa_ and or _sigh_ actions, then you can use the [clean_build_artifacts](https://docs.fastlane.tools/actions/clean_build_artifacts/) action to clean those temporary files up before running this action." + ].join("\n") + end + + def self.example_code + [ + 'hg_commit_version_bump', + 'hg_commit_version_bump( + message: "Version Bump", # create a commit with a custom message + xcodeproj: "./path/MyProject.xcodeproj", # optional, if you have multiple Xcode project files, you must specify your main project here + )' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_ensure_clean_status.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_ensure_clean_status.rb new file mode 100644 index 0000000..0687299 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_ensure_clean_status.rb @@ -0,0 +1,53 @@ +module Fastlane + module Actions + module SharedValues + HG_REPO_WAS_CLEAN_ON_START = :HG_REPO_WAS_CLEAN_ON_START + end + # Raises an exception and stop the lane execution if the repo is not in a clean state + class HgEnsureCleanStatusAction < Action + def self.run(params) + repo_clean = `hg status`.empty? + + if repo_clean + UI.success('Mercurial status is clean, all good! 😎') + Actions.lane_context[SharedValues::HG_REPO_WAS_CLEAN_ON_START] = true + else + UI.user_error!('Mercurial repository is dirty! Please ensure the repo is in a clean state by committing/stashing/discarding all changes first.') + end + end + + def self.description + "Raises an exception if there are uncommitted hg changes" + end + + def self.details + "Along the same lines as the [ensure_git_status_clean](https://docs.fastlane.tools/actions/ensure_git_status_clean/) action, this is a sanity check to ensure the working mercurial repo is clean. Especially useful to put at the beginning of your Fastfile in the `before_all` block." + end + + def self.output + [ + ['HG_REPO_WAS_CLEAN_ON_START', 'Stores the fact that the hg repo was clean at some point'] + ] + end + + def self.author + # credits to lmirosevic for original git version + "sjrmanning" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'hg_ensure_clean_status' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_push.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_push.rb new file mode 100644 index 0000000..34a54f4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hg_push.rb @@ -0,0 +1,64 @@ +module Fastlane + module Actions + # Pushes commits to the remote hg repo + class HgPushAction < Action + def self.run(params) + command = ['hg', 'push'] + + command << '--force' if params[:force] + command << params[:destination] unless params[:destination].empty? + + return command.join(' ') if Helper.test? + + Actions.sh(command.join(' ')) + UI.success('Successfully pushed changes to remote 🚀.') + end + + def self.description + "This will push changes to the remote hg repository" + end + + def self.details + "The mercurial equivalent of [push_to_git_remote](https://docs.fastlane.tools/actions/push_to_git_remote/). Pushes your local commits to a remote mercurial repo. Useful when local changes such as adding a version bump commit or adding a tag are part of your lane’s actions." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_HG_PUSH_FORCE", + description: "Force push to remote", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :destination, + env_name: "FL_HG_PUSH_DESTINATION", + description: "The destination to push to", + default_value: '', + optional: true) + ] + end + + def self.author + # credits to lmirosevic for original git version + "sjrmanning" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'hg_push', + 'hg_push( + destination: "ssh://hg@repohost.com/owner/repo", + force: true + )' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hipchat.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hipchat.rb new file mode 100644 index 0000000..1028226 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hipchat.rb @@ -0,0 +1,200 @@ +module Fastlane + module Actions + module SharedValues + end + + class HipchatAction < Action + def self.run(options) + require 'net/http' + require 'uri' + + api_token = options[:api_token] + api_version = options[:version] + api_host = options[:api_host] + + message_format = options[:message_format] + + channel = options[:channel] + if ['yellow', 'red', 'green', 'purple', 'gray', 'random'].include?(options[:custom_color]) == true + color = options[:custom_color] + else + color = (options[:success] ? 'green' : 'red') + end + + from = options[:from] + + message = options[:message] + if (message_format == "html") && (options[:include_html_header] == true) + message = "
#{message[0..9999]}
" + end + + if api_version.to_i == 1 + ########## running on V1 ########## + if user?(channel) + UI.user_error!("HipChat private message not working with API V1 please use API V2 instead") + else + uri = URI.parse("https://#{api_host}/v1/rooms/message") + response = Net::HTTP.post_form(uri, { 'from' => from, + 'auth_token' => api_token, + 'color' => color, + 'message_format' => message_format, + 'room_id' => channel, + 'message' => message, + 'notify' => options[:notify_room] ? '1' : '0' }) + + check_response_code(response, channel) + end + else + ########## running on V2 ########## + # Escape channel's name to guarantee it is a valid URL resource. + # First of all we verify that the value is not already escaped, + # escaping an escaped value will produce a wrong channel name. + require 'addressable/uri' + escaped_channel = Addressable::URI.encode(channel) == channel ? Addressable::URI.encode(channel) : channel + if user?(channel) + params = { 'message' => message, 'message_format' => message_format } + json_headers = { 'Content-Type' => 'application/json', + 'Accept' => 'application/json', 'Authorization' => "Bearer #{api_token}" } + + uri = URI.parse("https://#{api_host}/v2/user/#{escaped_channel}/message") + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + + response = http.post(uri.path, params.to_json, json_headers) + else + uri = URI.parse("https://#{api_host}/v2/room/#{escaped_channel}/notification") + response = Net::HTTP.post_form(uri, { 'from' => from, + 'auth_token' => api_token, + 'color' => color, + 'message_format' => message_format, + 'message' => message, + 'notify' => options[:notify_room] ? 'true' : 'false' }) + end + check_response_code(response, channel) + end + end + + def self.user?(channel) + channel.to_s.start_with?('@') + end + + def self.check_response_code(response, channel) + case response.code.to_i + when 200, 204 + true + when 404 + UI.user_error!("Channel `#{channel}` not found") + when 401 + UI.user_error!("Access denied for channel `#{channel}`") + else + UI.user_error!("Unexpected #{response.code} for `#{channel}` with response: #{response.body}") + end + end + + def self.description + "Send a error/success message to [HipChat](https://www.hipchat.com/)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_HIPCHAT_MESSAGE", + description: "The message to post on HipChat", + default_value: ''), + FastlaneCore::ConfigItem.new(key: :channel, + env_name: "FL_HIPCHAT_CHANNEL", + description: "The room or @username"), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "HIPCHAT_API_TOKEN", + sensitive: true, + description: "Hipchat API Token", + verify_block: proc do |value| + unless value.to_s.length > 0 + UI.error("Please add 'ENV[\"HIPCHAT_API_TOKEN\"] = \"your token\"' to your Fastfile's `before_all` section.") + UI.user_error!("No HIPCHAT_API_TOKEN given.") + end + end), + FastlaneCore::ConfigItem.new(key: :custom_color, + env_name: "FL_HIPCHAT_CUSTOM_COLOR", + description: "Specify a custom color, this overrides the success boolean. Can be one of 'yellow', 'red', 'green', 'purple', 'gray', or 'random'", + optional: true), + FastlaneCore::ConfigItem.new(key: :success, + env_name: "FL_HIPCHAT_SUCCESS", + description: "Was this build successful? (true/false)", + optional: true, + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :version, + env_name: "HIPCHAT_API_VERSION", + description: "Version of the Hipchat API. Must be 1 or 2", + verify_block: proc do |value| + if value.nil? || ![1, 2].include?(value.to_i) + UI.error("Please add 'ENV[\"HIPCHAT_API_VERSION\"] = \"1 or 2\"' to your Fastfile's `before_all` section.") + UI.user_error!("No HIPCHAT_API_VERSION given.") + end + end), + FastlaneCore::ConfigItem.new(key: :notify_room, + env_name: "HIPCHAT_NOTIFY_ROOM", + description: "Should the people in the room be notified? (true/false)", + default_value: false, + optional: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :api_host, + env_name: "HIPCHAT_API_HOST", + description: "The host of the HipChat-Server API", + default_value: "api.hipchat.com", + optional: true), + FastlaneCore::ConfigItem.new(key: :message_format, + env_name: "FL_HIPCHAT_MESSAGE_FORMAT", + description: "Format of the message to post. Must be either 'html' or 'text'", + default_value: "html", + optional: true, + verify_block: proc do |value| + unless ["html", "text"].include?(value.to_s) + UI.error("Please specify the message format as either 'html' or 'text'.") + UI.user_error!("Unrecognized message_format.") + end + end), + FastlaneCore::ConfigItem.new(key: :include_html_header, + env_name: "FL_HIPCHAT_INCLUDE_HTML_HEADER", + description: "Should html formatted messages include a preformatted header? (true/false)", + default_value: true, + optional: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :from, + env_name: "FL_HIPCHAT_FROM", + description: "Name the message will appear to be sent from", + default_value: "fastlane", + optional: true) + ] + end + + def self.author + "jingx23" + end + + def self.is_supported?(platform) + true + end + + def self.details + "Send a message to **room** (by default) or a direct message to **@username** with success (green) or failure (red) status." + end + + def self.example_code + [ + 'hipchat( + message: "App successfully released!", + message_format: "html", # or "text", defaults to "html" + channel: "Room or @username", + success: true + )' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hockey.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hockey.rb new file mode 100644 index 0000000..4c7a0b9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/hockey.rb @@ -0,0 +1,399 @@ +module Fastlane + module Actions + module SharedValues + HOCKEY_DOWNLOAD_LINK = :HOCKEY_DOWNLOAD_LINK + HOCKEY_BUILD_INFORMATION = :HOCKEY_BUILD_INFORMATION # contains all keys/values from the HockeyApp API, like :title, :bundle_identifier + end + + # rubocop:disable Metrics/ClassLength + class HockeyAction < Action + def self.connection(options) + require 'faraday' + require 'faraday_middleware' + + base_url = options.delete(:bypass_cdn) ? "https://rink.hockeyapp.net" : "https://upload.hockeyapp.net" + foptions = { + url: base_url + } + Faraday.new(foptions) do |builder| + builder.request(:multipart) + builder.request(:url_encoded) + builder.response(:json, content_type: /\bjson$/) + builder.use(FaradayMiddleware::FollowRedirects) + builder.adapter(:net_http) + end + end + + def self.upload(api_token, ipa, options) + create_update = options.delete(:create_update) + + if create_update + self.create_and_update_build(api_token, ipa, options) + else + self.upload_build(api_token, ipa, options) + end + end + + # Uses https://support.hockeyapp.net/kb/api/api-versions#upload-version if a `public_identifier` was specified + # otherwise https://support.hockeyapp.net/kb/api/api-apps#upload-app + def self.upload_build(api_token, ipa, options) + connection = self.connection(options) + + options[:ipa] = Faraday::UploadIO.new(ipa, 'application/octet-stream') if ipa && File.exist?(ipa) + + dsym_filename = options.delete(:dsym_filename) + if dsym_filename + options[:dsym] = Faraday::UploadIO.new(dsym_filename, 'application/octet-stream') + end + + connection.post do |req| + req.options.timeout = options.delete(:timeout) + if options[:public_identifier].nil? + req.url("/api/2/apps/upload") + else + req.url("/api/2/apps/#{options.delete(:public_identifier)}/app_versions/upload") + end + req.headers['X-HockeyAppToken'] = api_token + req.body = options + end + end + + # Uses https://support.hockeyapp.net/kb/api/api-versions#create-version + # and https://support.hockeyapp.net/kb/api/api-versions#update-version + # to upload a build + def self.create_and_update_build(api_token, ipa, options) + [:public_identifier, :bundle_short_version, :bundle_version].each do |key| + UI.user_error!("To use the 'create_update' upload mechanism you need to pass the '#{key.to_sym}' option.") unless options[key] + end + # https://support.hockeyapp.net/discussions/problems/33355-is-uploadhockeyappnet-available-for-general-use + # GET requests are cached on CDN, so bypass it + options[:bypass_cdn] = true + connection = self.connection(options) + + options.delete(:ipa) + options.delete(:apk) + app_id = options.delete(:public_identifier) + + ipaio = Faraday::UploadIO.new(ipa, 'application/octet-stream') if ipa && File.exist?(ipa) + dsym = options.delete(:dsym) + + if dsym + dsym_io = Faraday::UploadIO.new(dsym, 'application/octet-stream') if dsym && File.exist?(dsym) + end + + # https://support.hockeyapp.net/discussions/problems/83559 + # Should not set status to "2" (downloadable) until after the app is uploaded, so allow the caller + # to specify a different status for the `create` step + update_status = options[:status] + options[:status] = options[:create_status] + + response = connection.get do |req| + req.url("/api/2/apps/#{app_id}/app_versions/new") + req.headers['X-HockeyAppToken'] = api_token + req.body = options + end + + case response.status + when 200...300 + app_version_id = response.body['id'] + UI.message("successfully created version with id #{app_version_id}") + else + UI.user_error!("Error trying to create app version: #{response.status} - #{response.body}") + end + + options[:ipa] = ipaio + + if dsym + options[:dsym] = dsym_io + end + + options[:status] = update_status + + connection.put do |req| + req.options.timeout = options.delete(:timeout) + req.url("/api/2/apps/#{app_id}/app_versions/#{app_version_id}") + req.headers['X-HockeyAppToken'] = api_token + req.body = options + end + end + + def self.run(options) + build_file = [ + options[:ipa], + options[:apk] + ].detect { |e| !e.to_s.empty? } + + if options[:dsym] + dsym_filename = options[:dsym] + else + + if build_file.nil? + UI.user_error!("You have to provide a build file (params 'apk' or 'ipa')") + end + + if options[:ipa].to_s.end_with?(".ipa") + dsym_path = options[:ipa].to_s.gsub('.ipa', '.app.dSYM.zip') + if File.exist?(dsym_path) + dsym_filename = dsym_path + else + UI.important("Symbols not found on path #{File.expand_path(dsym_path)}. Crashes won't be symbolicated properly") + dsym_filename = nil + end + end + end + + UI.user_error!("Symbols on path '#{File.expand_path(dsym_filename)}' not found") if dsym_filename && !File.exist?(dsym_filename) + + if options[:upload_dsym_only] + UI.success('Starting with dSYM upload to HockeyApp... this could take some time.') + else + UI.success('Starting with file(s) upload to HockeyApp... this could take some time.') + end + + values = options.values + values[:dsym_filename] = dsym_filename + values[:notes_type] = options[:notes_type] + + api_token = values.delete(:api_token) + + values.delete_if { |k, v| v.nil? } + + return values if Helper.test? + + ipa_filename = build_file + ipa_filename = nil if options[:upload_dsym_only] + + response = self.upload(api_token, ipa_filename, values) + case response.status + when 200...300 + url = response.body['public_url'] + + Actions.lane_context[SharedValues::HOCKEY_DOWNLOAD_LINK] = url + Actions.lane_context[SharedValues::HOCKEY_BUILD_INFORMATION] = response.body + + UI.message("Public Download URL: #{url}") if url + UI.success('Build successfully uploaded to HockeyApp!') + else + if response.body.to_s.include?("App could not be created") + UI.user_error!("Hockey has an issue processing this app. Please confirm that an app in Hockey matches this IPA's bundle ID or that you are using the correct API upload token. If error persists, please provide the :public_identifier option from the HockeyApp website. More information https://github.com/fastlane/fastlane/issues/400") + else + UI.user_error!("Error when trying to upload file(s) to HockeyApp: #{response.status} - #{response.body}") + end + end + end + + def self.description + "Refer to [App Center](https://github.com/Microsoft/fastlane-plugin-appcenter/)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :apk, + env_name: "FL_HOCKEY_APK", + description: "Path to your APK file", + default_value: Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH], + default_value_dynamic: true, + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find apk file at path '#{value}'") unless File.exist?(value) + end, + conflicting_options: [:ipa], + conflict_block: proc do |value| + UI.user_error!("You can't use 'apk' and '#{value.key}' options in one run") + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_HOCKEY_API_TOKEN", + sensitive: true, + description: "API Token for Hockey Access", + verify_block: proc do |value| + UI.user_error!("No API token for Hockey given, pass using `api_token: 'token'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "FL_HOCKEY_IPA", + description: "Path to your IPA file. Optional if you use the _gym_ or _xcodebuild_ action. For Mac zip the .app. For Android provide path to .apk file. In addition you could use this to upload .msi, .zip, .pkg, etc if you use the 'create_update' mechanism", + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find ipa file at path '#{value}'") unless File.exist?(value) + end, + conflicting_options: [:apk], + conflict_block: proc do |value| + UI.user_error!("You can't use 'ipa' and '#{value.key}' options in one run") + end), + FastlaneCore::ConfigItem.new(key: :dsym, + env_name: "FL_HOCKEY_DSYM", + description: "Path to your symbols file. For iOS and Mac provide path to app.dSYM.zip. For Android provide path to mappings.txt file", + default_value: Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH], + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :create_update, + env_name: "FL_HOCKEY_CREATE_UPDATE", + description: "Set true if you want to create then update your app as opposed to just upload it."\ + " You will need the 'public_identifier', 'bundle_version' and 'bundle_short_version'", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :notes, + env_name: "FL_HOCKEY_NOTES", + description: "Beta Notes", + default_value: Actions.lane_context[SharedValues::FL_CHANGELOG] || "No changelog given", + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :notify, + env_name: "FL_HOCKEY_NOTIFY", + description: "Notify testers? \"1\" for yes", + default_value: "1"), + FastlaneCore::ConfigItem.new(key: :status, + env_name: "FL_HOCKEY_STATUS", + description: "Download status: \"1\" = No user can download; \"2\" = Available for download (only possible with full-access token)", + default_value: "2"), + FastlaneCore::ConfigItem.new(key: :create_status, + env_name: "FL_HOCKEY_CREATE_STATUS", + description: "Download status for initial version creation when create_update is true: \"1\" = No user can download; \"2\" = Available for download (only possible with full-access token)", + default_value: "2"), + FastlaneCore::ConfigItem.new(key: :notes_type, + env_name: "FL_HOCKEY_NOTES_TYPE", + description: "Notes type for your :notes, \"0\" = Textile, \"1\" = Markdown (default)", + default_value: "1"), + FastlaneCore::ConfigItem.new(key: :release_type, + env_name: "FL_HOCKEY_RELEASE_TYPE", + description: "Release type of the app: \"0\" = Beta (default), \"1\" = Store, \"2\" = Alpha, \"3\" = Enterprise", + default_value: "0"), + FastlaneCore::ConfigItem.new(key: :mandatory, + env_name: "FL_HOCKEY_MANDATORY", + description: "Set to \"1\" to make this update mandatory", + default_value: "0"), + FastlaneCore::ConfigItem.new(key: :teams, + env_name: "FL_HOCKEY_TEAMS", + description: "Comma separated list of team ID numbers to which this build will be restricted", + optional: true), + FastlaneCore::ConfigItem.new(key: :users, + env_name: "FL_HOCKEY_USERS", + description: "Comma separated list of user ID numbers to which this build will be restricted", + optional: true), + FastlaneCore::ConfigItem.new(key: :tags, + env_name: "FL_HOCKEY_TAGS", + description: "Comma separated list of tags which will receive access to the build", + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_short_version, + env_name: "FL_HOCKEY_BUNDLE_SHORT_VERSION", + description: "The bundle_short_version of your application, required when using `create_update`", + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_version, + env_name: "FL_HOCKEY_BUNDLE_VERSION", + description: "The bundle_version of your application, required when using `create_update`", + optional: true), + FastlaneCore::ConfigItem.new(key: :public_identifier, + env_name: "FL_HOCKEY_PUBLIC_IDENTIFIER", + description: "App id of the app you are targeting, usually you won't need this value. Required, if `upload_dsym_only` set to `true`", + optional: true), + FastlaneCore::ConfigItem.new(key: :commit_sha, + env_name: "FL_HOCKEY_COMMIT_SHA", + description: "The Git commit SHA for this build", + optional: true), + FastlaneCore::ConfigItem.new(key: :repository_url, + env_name: "FL_HOCKEY_REPOSITORY_URL", + description: "The URL of your source repository", + optional: true), + FastlaneCore::ConfigItem.new(key: :build_server_url, + env_name: "FL_HOCKEY_BUILD_SERVER_URL", + description: "The URL of the build job on your build server", + optional: true), + FastlaneCore::ConfigItem.new(key: :upload_dsym_only, + env_name: "FL_HOCKEY_UPLOAD_DSYM_ONLY", + description: "Flag to upload only the dSYM file to hockey app", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :owner_id, + env_name: "FL_HOCKEY_OWNER_ID", + description: "ID for the owner of the app", + optional: true), + FastlaneCore::ConfigItem.new(key: :strategy, + env_name: "FL_HOCKEY_STRATEGY", + description: "Strategy: 'add' = to add the build as a new build even if it has the same build number (default); 'replace' = to replace a build with the same build number", + default_value: "add", + verify_block: proc do |value| + UI.user_error!("Invalid value '#{value}' for key 'strategy'. Allowed values are 'add', 'replace'.") unless ['add', 'replace'].include?(value) + end), + FastlaneCore::ConfigItem.new(key: :timeout, + env_name: "FL_HOCKEY_TIMEOUT", + description: "Request timeout in seconds", + type: Integer, + optional: true), + FastlaneCore::ConfigItem.new(key: :bypass_cdn, + env_name: "FL_HOCKEY_BYPASS_CDN", + description: "Flag to bypass Hockey CDN when it uploads successfully but reports error", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :dsa_signature, + env_name: "FL_HOCKEY_DSA_SIGNATURE", + description: "DSA signature for sparkle updates for macOS", + default_value: "", + optional: true) + ] + end + + def self.output + [ + ['HOCKEY_DOWNLOAD_LINK', 'The newly generated download link for this build'], + ['HOCKEY_BUILD_INFORMATION', 'contains all keys/values from the HockeyApp API, like :title, :bundle_identifier'] + ] + end + + def self.author + ["KrauseFx", "modzelewski", "lacostej"] + end + + def self.is_supported?(platform) + true + end + + def self.details + [ + "HockeyApp will be no longer supported and will be transitioned into App Center on November 16, 2019.", + "Please migrate over to [App Center](https://github.com/Microsoft/fastlane-plugin-appcenter/)", + "", + "Symbols will also be uploaded automatically if a `app.dSYM.zip` file is found next to `app.ipa`. In case it is located in a different place you can specify the path explicitly in the `:dsym` parameter.", + "More information about the available options can be found in the [HockeyApp Docs](http://support.hockeyapp.net/kb/api/api-versions#upload-version)." + ].join("\n") + end + + def self.example_code + [ + 'hockey( + api_token: "...", + ipa: "./app.ipa", + notes: "Changelog" + )', + 'hockey( + api_token: "...", + create_update: true, + public_identifier: "....", + bundle_short_version: "1.0.2", + bundle_version: "1.0.2.145", + ipa: "./my.msi", + notes: "Changelog" + )', + '# You can bypass the CDN if you are uploading to Hockey and receive an SSL error (which can happen on corporate firewalls) + hockey( + api_token: "...", + ipa: "./app.ipa", + notes: "Changelog", + bypass_cdn: true + )' + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + [ + "HockeyApp will be no longer supported and will be transitioned into App Center on November 16, 2019.", + "Please migrate over to [App Center](https://github.com/Microsoft/fastlane-plugin-appcenter/)" + ].join("\n") + end + end + # rubocop:enable Metrics/ClassLength + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ifttt.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ifttt.rb new file mode 100644 index 0000000..1622baf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ifttt.rb @@ -0,0 +1,93 @@ +module Fastlane + module Actions + class IftttAction < Action + def self.run(options) + require "net/http" + require "uri" + + uri = URI.parse("https://maker.ifttt.com/trigger/#{options[:event_name]}/with/key/#{options[:api_key]}") + https = Net::HTTP.new(uri.host, uri.port) + https.use_ssl = true + + req = Net::HTTP::Post.new(uri.request_uri) + + req.set_form_data({ + "value1" => options[:value1], + "value2" => options[:value2], + "value3" => options[:value3] + }) + + response = https.request(req) + + UI.user_error!("Failed to make a request to IFTTT. #{response.message}.") unless response.code == "200" + UI.success("Successfully made a request to IFTTT.") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Connect to the [IFTTT Maker Channel](https://ifttt.com/maker)" + end + + def self.details + "Connect to the IFTTT [Maker Channel](https://ifttt.com/maker). An IFTTT Recipe has two components: a Trigger and an Action. In this case, the Trigger will fire every time the Maker Channel receives a web request (made by this _fastlane_ action) to notify it of an event. The Action can be anything that IFTTT supports: email, SMS, etc." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_key, + env_name: "IFTTT_API_KEY", + sensitive: true, + description: "API key", + verify_block: proc do |value| + raise UI.error("No API key given, pass using `api_key: 'key'`") if value.to_s.empty? + end), + FastlaneCore::ConfigItem.new(key: :event_name, + env_name: "IFTTT_EVENT_NAME", + description: "The name of the event that will be triggered", + verify_block: proc do |value| + raise UI.error("No event name given, pass using `event_name: 'name'`") if value.to_s.empty? + end), + FastlaneCore::ConfigItem.new(key: :value1, + env_name: "IFTTT_VALUE1", + description: "Extra data sent with the event", + optional: true), + FastlaneCore::ConfigItem.new(key: :value2, + env_name: "IFTTT_VALUE2", + description: "Extra data sent with the event", + optional: true), + FastlaneCore::ConfigItem.new(key: :value3, + env_name: "IFTTT_VALUE3", + description: "Extra data sent with the event", + optional: true) + ] + end + + def self.is_supported?(platform) + true + end + + def self.authors + ["vpolouchkine"] + end + + def self.example_code + [ + 'ifttt( + api_key: "...", + event_name: "...", + value1: "foo", + value2: "bar", + value3: "baz" + )' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import.rb new file mode 100644 index 0000000..78dff8a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import.rb @@ -0,0 +1,49 @@ +module Fastlane + module Actions + class ImportAction < Action + def self.run(params) + # this is implemented in the fast_file.rb + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Import another Fastfile to use its lanes" + end + + def self.details + [ + "This is useful if you have shared lanes across multiple apps and you want to store a Fastfile in a separate folder.", + "The path must be relative to the Fastfile this is called from." + ].join("\n") + end + + def self.available_options + end + + def self.output + [] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'import("./path/to/other/Fastfile")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import_certificate.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import_certificate.rb new file mode 100644 index 0000000..1bab169 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import_certificate.rb @@ -0,0 +1,77 @@ +require 'shellwords' + +module Fastlane + module Actions + class ImportCertificateAction < Action + def self.run(params) + keychain_path = params[:keychain_path] || FastlaneCore::Helper.keychain_path(params[:keychain_name]) + + FastlaneCore::KeychainImporter.import_file(params[:certificate_path], keychain_path, keychain_password: params[:keychain_password], certificate_password: params[:certificate_password], output: params[:log_output]) + end + + def self.description + "Import certificate from inputfile into a keychain" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :certificate_path, + description: "Path to certificate", + optional: false), + FastlaneCore::ConfigItem.new(key: :certificate_password, + description: "Certificate password", + sensitive: true, + default_value: "", + optional: true), + FastlaneCore::ConfigItem.new(key: :keychain_name, + env_name: "KEYCHAIN_NAME", + description: "Keychain the items should be imported to", + optional: false), + FastlaneCore::ConfigItem.new(key: :keychain_path, + env_name: "KEYCHAIN_PATH", + description: "Path to the Keychain file to which the items should be imported", + optional: true), + FastlaneCore::ConfigItem.new(key: :keychain_password, + env_name: "FL_IMPORT_CERT_KEYCHAIN_PASSWORD", + description: "The password for the keychain. Note that for the login keychain this is your user's password", + sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :log_output, + description: "If output should be logged to the console", + type: Boolean, + default_value: false, + optional: true) + ] + end + + def self.authors + ["gin0606"] + end + + def self.is_supported?(platform) + true + end + + def self.details + "Import certificates (and private keys) into the current default keychain. Use the `create_keychain` action to create a new keychain." + end + + def self.example_code + [ + 'import_certificate(certificate_path: "certs/AppleWWDRCA6.cer")', + 'import_certificate( + certificate_path: "certs/dist.p12", + certificate_password: ENV["CERTIFICATE_PASSWORD"] || "default" + )', + 'import_certificate( + certificate_path: "certs/development.cer" + )' + ] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import_from_git.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import_from_git.rb new file mode 100644 index 0000000..ea9ee5e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/import_from_git.rb @@ -0,0 +1,81 @@ +module Fastlane + module Actions + class ImportFromGitAction < Action + def self.run(params) + # this is implemented in the fast_file.rb + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Import another Fastfile from a remote git repository to use its lanes" + end + + def self.details + "This is useful if you have shared lanes across multiple apps and you want to store the Fastfile in a remote git repository." + end + + def self.available_options + [ + # Because the `run` method is actually implemented in `fast_file.rb`, + # and because magic, some of the parameters on `ConfigItem`s (e.g. + # `conflicting_options`, `verify_block`) are completely ignored. + FastlaneCore::ConfigItem.new(key: :url, + description: "The URL of the repository to import the Fastfile from", + optional: true), + FastlaneCore::ConfigItem.new(key: :branch, + description: "The branch or tag to check-out on the repository", + default_value: 'HEAD', + optional: true), + FastlaneCore::ConfigItem.new(key: :dependencies, + description: "The array of additional Fastfiles in the repository", + default_value: [], + optional: true), + FastlaneCore::ConfigItem.new(key: :path, + description: "The path of the Fastfile in the repository", + default_value: 'fastlane/Fastfile', + optional: true), + FastlaneCore::ConfigItem.new(key: :version, + description: "The version to checkout on the repository. Optimistic match operator or multiple conditions can be used to select the latest version within constraints", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :cache_path, + description: "The path to a directory where the repository should be cloned into. Defaults to `nil`, which causes the repository to be cloned on every call, to a temporary directory", + optional: true) + ] + end + + def self.authors + ["fabiomassimo", "KrauseFx", "Liquidsoul"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'import_from_git( + url: "git@github.com:fastlane/fastlane.git", # The URL of the repository to import the Fastfile from. + branch: "HEAD", # The branch to checkout on the repository. + path: "fastlane/Fastfile", # The path of the Fastfile in the repository. + version: "~> 1.0.0" # The version to checkout on the repository. Optimistic match operator can be used to select the latest version within constraints. + )', + 'import_from_git( + url: "git@github.com:fastlane/fastlane.git", # The URL of the repository to import the Fastfile from. + branch: "HEAD", # The branch to checkout on the repository. + path: "fastlane/Fastfile", # The path of the Fastfile in the repository. + version: [">= 1.1.0", "< 2.0.0"], # The version to checkout on the repository. Multiple conditions can be used to select the latest version within constraints. + cache_path: "~/.cache/fastlane/imported" # A directory in which the repository will be added, which means that it will not be cloned again on subsequent calls. + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/increment_build_number.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/increment_build_number.rb new file mode 100644 index 0000000..6e38038 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/increment_build_number.rb @@ -0,0 +1,123 @@ +module Fastlane + module Actions + module SharedValues + BUILD_NUMBER ||= :BUILD_NUMBER + end + + class IncrementBuildNumberAction < Action + require 'shellwords' + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.run(params) + folder = params[:xcodeproj] ? File.join(params[:xcodeproj], '..') : '.' + + command_prefix = [ + 'cd', + File.expand_path(folder).shellescape, + '&&' + ].join(' ') + + command_suffix = [ + '&&', + 'cd', + '-' + ].join(' ') + + # More information about how to set up your project and how it works: + # https://developer.apple.com/library/ios/qa/qa1827/_index.html + # Attention: This is NOT the version number - but the build number + + agv_disabled = !system([command_prefix, 'agvtool what-version', command_suffix].join(' ')) + raise "Apple Generic Versioning is not enabled." if agv_disabled && params[:build_number].nil? + + mode = params[:skip_info_plist] ? '' : ' -all' + command = [ + command_prefix, + 'agvtool', + params[:build_number] ? "new-version#{mode} #{params[:build_number].to_s.strip}" : "next-version#{mode}", + command_suffix + ].join(' ') + + output = Actions.sh(command) + if output.include?('$(SRCROOT)') + UI.error('Cannot set build number with plist path containing $(SRCROOT)') + UI.error('Please remove $(SRCROOT) in your Xcode target build settings') + end + + # Store the new number in the shared hash + build_number = params[:build_number].to_s.strip + build_number = Actions.sh("#{command_prefix} agvtool what-version", log: false).split("\n").last.strip if build_number.to_s == "" + + return Actions.lane_context[SharedValues::BUILD_NUMBER] = build_number + rescue + UI.user_error!("Apple Generic Versioning is not enabled in this project.\nBefore being able to increment and read the version number from your Xcode project, you first need to setup your project properly. Please follow the guide at https://developer.apple.com/library/content/qa/qa1827/_index.html") + end + + def self.description + "Increment the build number of your project" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :build_number, + env_name: "FL_BUILD_NUMBER_BUILD_NUMBER", + description: "Change to a specific version. When you provide this parameter, Apple Generic Versioning does not have to be enabled", + optional: true, + skip_type_validation: true), # allow Integer, String + FastlaneCore::ConfigItem.new(key: :skip_info_plist, + env_name: "FL_BUILD_NUMBER_SKIP_INFO_PLIST", + description: "Don't update Info.plist files when updating the build version", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_BUILD_NUMBER_PROJECT", + description: "optional, you must specify the path to your main Xcode project if it is not in the project root directory", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project") if !File.exist?(value) && !Helper.test? + end) + ] + end + + def self.output + [ + ['BUILD_NUMBER', 'The new build number'] + ] + end + + def self.return_value + "The new build number" + end + + def self.return_type + :string + end + + def self.author + "KrauseFx" + end + + def self.example_code + [ + 'increment_build_number # automatically increment by one', + 'increment_build_number( + build_number: "75" # set a specific number + )', + 'increment_build_number( + build_number: 75, # specify specific build number (optional, omitting it increments by one) + xcodeproj: "./path/to/MyApp.xcodeproj" # (optional, you must specify the path to your main Xcode project if it is not in the project root directory) + )', + 'build_number = increment_build_number' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/increment_version_number.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/increment_version_number.rb new file mode 100644 index 0000000..bcbfad8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/increment_version_number.rb @@ -0,0 +1,180 @@ +module Fastlane + module Actions + module SharedValues + VERSION_NUMBER ||= :VERSION_NUMBER + end + + class IncrementVersionNumberAction < Action + require 'shellwords' + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.run(params) + # More information about how to set up your project and how it works: + # https://developer.apple.com/library/ios/qa/qa1827/_index.html + + folder = params[:xcodeproj] ? File.join(params[:xcodeproj], '..') : '.' + + command_prefix = [ + 'cd', + File.expand_path(folder).shellescape, + '&&' + ].join(' ') + + begin + current_version = Actions + .sh("#{command_prefix} agvtool what-marketing-version -terse1", log: FastlaneCore::Globals.verbose?) + .split("\n") + .last + .strip + rescue + current_version = '' + end + + if params[:version_number] + UI.verbose(version_format_error(current_version)) unless current_version =~ version_regex + + # Specific version + next_version_number = params[:version_number] + else + UI.user_error!(version_format_error(current_version)) unless current_version =~ version_regex + version_array = current_version.split(".").map(&:to_i) + + case params[:bump_type] + when "bump" + version_array[-1] = version_array[-1] + 1 + next_version_number = version_array.join(".") + when "patch" + UI.user_error!(version_token_error) if version_array.count < 3 + version_array[2] = version_array[2] + 1 + next_version_number = version_array.join(".") + when "minor" + UI.user_error!(version_token_error) if version_array.count < 2 + version_array[1] = version_array[1] + 1 + version_array[2] = 0 if version_array[2] + next_version_number = version_array.join(".") + when "major" + UI.user_error!(version_token_error) if version_array.count == 0 + version_array[0] = version_array[0] + 1 + version_array[1] = 0 if version_array[1] + version_array[2] = 0 if version_array[2] + next_version_number = version_array.join(".") + when "specific_version" + next_version_number = specific_version_number + end + end + + command = [ + command_prefix, + "agvtool new-marketing-version #{next_version_number.to_s.strip}" + ].join(' ') + + if Helper.test? + Actions.lane_context[SharedValues::VERSION_NUMBER] = command + else + Actions.sh(command) + Actions.lane_context[SharedValues::VERSION_NUMBER] = next_version_number + end + + return Actions.lane_context[SharedValues::VERSION_NUMBER] + rescue => ex + UI.error('Before being able to increment and read the version number from your Xcode project, you first need to setup your project properly. Please follow the guide at https://developer.apple.com/library/content/qa/qa1827/_index.html') + raise ex + end + + def self.version_regex + /^\d+(\.\d+){0,2}$/ + end + + def self.version_format_error(version) + "Your current version (#{version}) does not respect the format A or A.B or A.B.C" + end + + def self.version_token_error + "Can't increment version" + end + + def self.description + "Increment the version number of your project" + end + + def self.details + [ + "This action will increment the version number.", + "You first have to set up your Xcode project, if you haven't done it already: [https://developer.apple.com/library/ios/qa/qa1827/_index.html](https://developer.apple.com/library/ios/qa/qa1827/_index.html)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :bump_type, + env_name: "FL_VERSION_NUMBER_BUMP_TYPE", + description: "The type of this version bump. Available: patch, minor, major", + default_value: "bump", + verify_block: proc do |value| + UI.user_error!("Available values are 'patch', 'minor' and 'major'") unless ['bump', 'patch', 'minor', 'major'].include?(value) + end), + FastlaneCore::ConfigItem.new(key: :version_number, + env_name: "FL_VERSION_NUMBER_VERSION_NUMBER", + description: "Change to a specific version. This will replace the bump type value", + optional: true), + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_VERSION_NUMBER_PROJECT", + description: "optional, you must specify the path to your main Xcode project if it is not in the project root directory", + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project") unless File.exist?(value) + end, + optional: true) + ] + end + + def self.output + [ + ['VERSION_NUMBER', 'The new version number'] + ] + end + + def self.return_type + :string + end + + def self.return_value + "The new version number" + end + + def self.author + "serluca" + end + + def self.example_code + [ + 'increment_version_number # Automatically increment version number', + 'increment_version_number( + bump_type: "patch" # Automatically increment patch version number + )', + 'increment_version_number( + bump_type: "minor" # Automatically increment minor version number + )', + 'increment_version_number( + bump_type: "major" # Automatically increment major version number + )', + 'increment_version_number( + version_number: "2.1.1" # Set a specific version number + )', + 'increment_version_number( + version_number: "2.1.1", # specify specific version number (optional, omitting it increments patch version number) + xcodeproj: "./path/to/MyApp.xcodeproj" # (optional, you must specify the path to your main Xcode project if it is not in the project root directory) + )', + 'version = increment_version_number' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_on_device.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_on_device.rb new file mode 100644 index 0000000..bc663cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_on_device.rb @@ -0,0 +1,92 @@ +module Fastlane + module Actions + module SharedValues + end + class InstallOnDeviceAction < Action + def self.run(params) + unless Helper.test? + UI.user_error!("ios-deploy not installed, see https://github.com/ios-control/ios-deploy for instructions") if `which ios-deploy`.length == 0 + end + taxi_cmd = [ + "ios-deploy", + params[:extra], + "--bundle", + params[:ipa].shellescape + ] + taxi_cmd << "--no-wifi" if params[:skip_wifi] + taxi_cmd << ["--id", params[:device_id]] if params[:device_id] + taxi_cmd.compact! + return taxi_cmd.join(" ") if Helper.test? + Actions.sh(taxi_cmd.join(" ")) + UI.message("Deployed #{params[:ipa]} to device!") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Installs an .ipa file on a connected iOS-device via usb or wifi" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :extra, + short_option: "-X", + env_name: "FL_IOD_EXTRA", + description: "Extra Commandline arguments passed to ios-deploy", + optional: true), + FastlaneCore::ConfigItem.new(key: :device_id, + short_option: "-d", + env_name: "FL_IOD_DEVICE_ID", + description: "id of the device / if not set defaults to first found device", + optional: true), + FastlaneCore::ConfigItem.new(key: :skip_wifi, + short_option: "-w", + env_name: "FL_IOD_WIFI", + description: "Do not search for devices via WiFi", + optional: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :ipa, + short_option: "-i", + env_name: "FL_IOD_IPA", + description: "The IPA file to put on the device", + optional: true, + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] || Dir["*.ipa"].first, + default_value_dynamic: true, + verify_block: proc do |value| + unless Helper.test? + UI.user_error!("Could not find ipa file at path '#{value}'") unless File.exist?(value) + UI.user_error!("'#{value}' doesn't seem to be an ipa file") unless value.end_with?(".ipa") + end + end) + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.details + "Installs the ipa on the device. If no id is given, the first found iOS device will be used. Works via USB or Wi-Fi. This requires `ios-deploy` to be installed. Please have a look at [ios-deploy](https://github.com/ios-control/ios-deploy). To quickly install it, use `npm -g i ios-deploy`" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'install_on_device( + device_id: "a3be6c9ff7e5c3c6028597513243b0f933b876d4", + ipa: "./app.ipa" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_provisioning_profile.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_provisioning_profile.rb new file mode 100644 index 0000000..8a4993a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_provisioning_profile.rb @@ -0,0 +1,62 @@ +require 'shellwords' + +module Fastlane + module Actions + class InstallProvisioningProfileAction < Action + def self.run(params) + absolute_path = File.expand_path(params[:path]) + FastlaneCore::ProvisioningProfile.install(absolute_path) + end + + def self.description + "Install provisioning profile from path" + end + + def self.details + "Install provisioning profile from path for current user" + end + + def self.authors + ["SofteqDG"] + end + + def self.category + :code_signing + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_INSTALL_PROVISIONING_PROFILE_PATH", + description: "Path to provisioning profile", + optional: false, + type: String, + verify_block: proc do |value| + absolute_path = File.expand_path(value) + unless File.exist?(absolute_path) + UI.user_error!("Could not find provisioning profile at path: '#{value}'") + end + end) + ] + end + + def self.return_value + "The absolute path to the installed provisioning profile" + end + + def self.return_type + :string + end + + def self.example_code + [ + 'install_provisioning_profile(path: "profiles/profile.mobileprovision")' + ] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_xcode_plugin.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_xcode_plugin.rb new file mode 100644 index 0000000..774b679 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/install_xcode_plugin.rb @@ -0,0 +1,91 @@ +module Fastlane + module Actions + class InstallXcodePluginAction < Action + def self.run(params) + require 'fileutils' + + if params[:github] + base_api_url = params[:github].sub('https://github.com', 'https://api.github.com/repos') + + GithubApiAction.run( + url: File.join(base_api_url, 'releases/latest'), + http_method: 'GET', + error_handlers: { + 404 => proc do |result| + UI.error("No latest release found for the specified GitHub repository") + end, + '*' => proc do |result| + UI.error("GitHub responded with #{response[:status]}:#{response[:body]}") + end + } + ) do |result| + return nil if result[:json].nil? + params[:url] = result[:json]['assets'][0]['browser_download_url'] + end + end + + zip_path = File.join(Dir.tmpdir, 'plugin.zip') + sh("curl -Lso #{zip_path} #{params[:url]}") + plugins_path = "#{ENV['HOME']}/Library/Application Support/Developer/Shared/Xcode/Plug-ins" + FileUtils.mkdir_p(plugins_path) + Action.sh("unzip -qo '#{zip_path}' -d '#{plugins_path}'") + + UI.success("Plugin #{File.basename(params[:url], '.zip')} installed successfully") + UI.message("Please restart Xcode to use the newly installed plugin") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Install an Xcode plugin for the current user" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :url, + env_name: "FL_XCODE_PLUGIN_URL", + description: "URL for Xcode plugin ZIP file", + verify_block: proc do |value| + UI.user_error!("No URL for InstallXcodePluginAction given, pass using `url: 'url'`") if value.to_s.length == 0 + UI.user_error!("URL doesn't use HTTPS") unless value.start_with?("https://") + end), + FastlaneCore::ConfigItem.new(key: :github, + env_name: "FL_XCODE_PLUGIN_GITHUB", + description: "GitHub repository URL for Xcode plugin", + optional: true, + verify_block: proc do |value| + UI.user_error!("No GitHub URL for InstallXcodePluginAction given, pass using `github: 'url'`") if value.to_s.length == 0 + UI.user_error!("URL doesn't use HTTPS") unless value.start_with?("https://") + end) + ] + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["NeoNachoSoto", "tommeier"] + end + + def self.is_supported?(platform) + [:ios, :mac, :tvos, :watchos, :caros].include?(platform) + end + + def self.example_code + [ + 'install_xcode_plugin(url: "https://example.com/clubmate/plugin.zip")', + 'install_xcode_plugin(github: "https://github.com/contentful/ContentfulXcodePlugin")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/installr.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/installr.rb new file mode 100644 index 0000000..0fd412e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/installr.rb @@ -0,0 +1,129 @@ +module Fastlane + module Actions + module SharedValues + INSTALLR_BUILD_INFORMATION = :INSTALLR_BUILD_INFORMATION + end + + class InstallrAction < Action + INSTALLR_API = "https://www.installrapp.com/apps.json" + + def self.run(params) + UI.success('Upload to Installr has been started. This may take some time.') + + response = self.upload_build(params) + + case response.status + when 200...300 + Actions.lane_context[SharedValues::INSTALLR_BUILD_INFORMATION] = response.body + UI.success('Build successfully uploaded to Installr!') + else + UI.user_error!("Error when trying to upload build file to Installr: #{response.body}") + end + end + + def self.upload_build(params) + require 'faraday' + require 'faraday_middleware' + + url = INSTALLR_API + connection = Faraday.new(url) do |builder| + builder.request(:multipart) + builder.request(:url_encoded) + builder.response(:json, content_type: /\bjson$/) + builder.use(FaradayMiddleware::FollowRedirects) + builder.adapter(:net_http) + end + + options = {} + options[:qqfile] = Faraday::UploadIO.new(params[:ipa], 'application/octet-stream') + + if params[:notes] + options[:releaseNotes] = params[:notes] + end + + if params[:notify] + options[:notify] = params[:notify] + end + + if params[:add] + options[:add] = params[:add] + end + + post_request = connection.post do |req| + req.headers['X-InstallrAppToken'] = params[:api_token] + req.body = options + end + + post_request.on_complete do |env| + yield(env[:status], env[:body]) if block_given? + end + end + + def self.description + "Upload a new build to [Installr](http://installrapp.com/)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "INSTALLR_API_TOKEN", + sensitive: true, + description: "API Token for Installr Access", + verify_block: proc do |value| + UI.user_error!("No API token for Installr given, pass using `api_token: 'token'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "INSTALLR_IPA_PATH", + description: "Path to your IPA file. Optional if you use the _gym_ or _xcodebuild_ action", + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find build file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :notes, + env_name: "INSTALLR_NOTES", + description: "Release notes", + optional: true), + FastlaneCore::ConfigItem.new(key: :notify, + env_name: "INSTALLR_NOTIFY", + description: "Groups to notify (e.g. 'dev,qa')", + optional: true), + FastlaneCore::ConfigItem.new(key: :add, + env_name: "INSTALLR_ADD", + description: "Groups to add (e.g. 'exec,ops')", + optional: true) + ] + end + + def self.output + [ + ['INSTALLR_BUILD_INFORMATION', 'Contains release info like :appData. See http://help.installrapp.com/api/'] + ] + end + + def self.authors + ["scottrhoyt"] + end + + def self.is_supported?(platform) + [:ios].include?(platform) + end + + def self.example_code + [ + 'installr( + api_token: "...", + ipa: "test.ipa", + notes: "The next great version of the app!", + notify: "dev,qa", + add: "exec,ops" + )' + ] + end + + def self.category + :beta + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ipa.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ipa.rb new file mode 100644 index 0000000..7f641af --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ipa.rb @@ -0,0 +1,246 @@ +# rubocop:disable Lint/AssignmentInCondition +module Fastlane + module Actions + module SharedValues + IPA_OUTPUT_PATH ||= :IPA_OUTPUT_PATH # originally defined in BuildIosAppAction + DSYM_OUTPUT_PATH ||= :DSYM_OUTPUT_PATH # originally defined in BuildIosAppAction + end + + ARGS_MAP = { + workspace: '-w', + project: '-p', + configuration: '-c', + scheme: '-s', + clean: '--clean', + archive: '--archive', + destination: '-d', + embed: '-m', + identity: '-i', + sdk: '--sdk', + ipa: '--ipa', + xcconfig: '--xcconfig', + xcargs: '--xcargs' + } + + class IpaAction < Action + def self.is_supported?(platform) + platform == :ios + end + + def self.run(params) + Actions.verify_gem!('krausefx-shenzhen') + + # The output directory of the IPA and dSYM + absolute_dest_directory = nil + + # Used to get the final path of the IPA and dSYM + if dest = params[:destination] + absolute_dest_directory = File.expand_path(dest) + end + + # The args we will build with + # Maps nice developer build parameters to Shenzhen args + build_args = params_to_build_args(params) + + unless params[:scheme] + UI.important("You haven't specified a scheme. This might cause problems. If you can't see any output, please pass a `scheme`") + end + + # If no dest directory given, default to current directory + absolute_dest_directory ||= Dir.pwd + + if Helper.test? + Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] = File.join(absolute_dest_directory, 'test.ipa') + Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH] = File.join(absolute_dest_directory, 'test.app.dSYM.zip') + return build_args + end + + # Joins args into space delimited string + build_args = build_args.join(' ') + + core_command = "krausefx-ipa build #{build_args} --verbose | xcpretty" + command = "set -o pipefail && #{core_command}" + UI.verbose(command) + + begin + Actions.sh(command) + + # Finds absolute path of IPA and dSYM + absolute_ipa_path = find_ipa_file(absolute_dest_directory) + absolute_dsym_path = find_dsym_file(absolute_dest_directory) + + # Sets shared values to use after this action is performed + Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] = absolute_ipa_path + Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH] = absolute_dsym_path + ENV[SharedValues::IPA_OUTPUT_PATH.to_s] = absolute_ipa_path # for deliver + ENV[SharedValues::DSYM_OUTPUT_PATH.to_s] = absolute_dsym_path + + deprecation_warning + rescue => ex + [ + "-------------------------------------------------------", + "Original Error:", + " => " + ex.to_s, + "A build error occurred. You are using legacy `shenzhen` for building", + "it is recommended to upgrade to _gym_: ", + "https://docs.fastlane.tools/actions/gym/", + core_command, + "-------------------------------------------------------" + ].each do |txt| + UI.error(txt) + end + + # Raise a custom exception, as the the normal one is useless for the user + UI.user_error!("A build error occurred, this is usually related to code signing. Take a look at the error above") + end + end + + def self.params_to_build_args(config) + params = config.values + + params = params.delete_if { |k, v| v.nil? } + params = fill_in_default_values(params) + + # Maps nice developer param names to Shenzhen's `ipa build` arguments + params.collect do |k, v| + v ||= '' + if ARGS_MAP[k] + if k == :clean + v == true ? '--clean' : '--no-clean' + elsif k == :archive + v == true ? '--archive' : '--no-archive' + else + value = (v.to_s.length > 0 ? "\"#{v}\"" : '') + "#{ARGS_MAP[k]} #{value}".strip + end + end + end.compact + end + + def self.fill_in_default_values(params) + embed = Actions.lane_context[Actions::SharedValues::SIGH_PROFILE_PATH] || ENV["SIGH_PROFILE_PATH"] + params[:embed] ||= embed if embed + params + end + + def self.find_ipa_file(dir) + # Finds last modified .ipa in the destination directory + Dir[File.join(dir, '*.ipa')].sort { |a, b| File.mtime(b) <=> File.mtime(a) }.first + end + + def self.find_dsym_file(dir) + # Finds last modified .dSYM.zip in the destination directory + Dir[File.join(dir, '*.dSYM.zip')].sort { |a, b| File.mtime(b) <=> File.mtime(a) }.first + end + + def self.description + "Easily build and sign your app using shenzhen" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :workspace, + env_name: "IPA_WORKSPACE", + description: "WORKSPACE Workspace (.xcworkspace) file to use to build app (automatically detected in current directory)", + optional: true), + FastlaneCore::ConfigItem.new(key: :project, + env_name: "IPA_PROJECT", + description: "Project (.xcodeproj) file to use to build app (automatically detected in current directory, overridden by --workspace option, if passed)", + optional: true), + FastlaneCore::ConfigItem.new(key: :configuration, + env_name: "IPA_CONFIGURATION", + description: "Configuration used to build", + optional: true), + FastlaneCore::ConfigItem.new(key: :scheme, + env_name: "IPA_SCHEME", + description: "Scheme used to build app", + optional: true), + FastlaneCore::ConfigItem.new(key: :clean, + env_name: "IPA_CLEAN", + description: "Clean project before building", + optional: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :archive, + env_name: "IPA_ARCHIVE", + description: "Archive project after building", + optional: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :destination, + env_name: "IPA_DESTINATION", + description: "Build destination. Defaults to current directory", + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :embed, + env_name: "IPA_EMBED", + description: "Sign .ipa file with .mobileprovision", + optional: true), + FastlaneCore::ConfigItem.new(key: :identity, + env_name: "IPA_IDENTITY", + description: "Identity to be used along with --embed", + optional: true), + FastlaneCore::ConfigItem.new(key: :sdk, + env_name: "IPA_SDK", + description: "Use SDK as the name or path of the base SDK when building the project", + optional: true), + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "IPA_IPA", + description: "Specify the name of the .ipa file to generate (including file extension)", + optional: true), + FastlaneCore::ConfigItem.new(key: :xcconfig, + env_name: "IPA_XCCONFIG", + description: "Use an extra XCCONFIG file to build the app", + optional: true), + FastlaneCore::ConfigItem.new(key: :xcargs, + env_name: "IPA_XCARGS", + description: "Pass additional arguments to xcodebuild when building the app. Be sure to quote multiple args", + optional: true, + type: :shell_string) + ] + end + + def self.output + [ + ['IPA_OUTPUT_PATH', 'The path to the newly generated ipa file'], + ['DSYM_OUTPUT_PATH', 'The path to the dsym file'] + ] + end + + def self.author + "joshdholtz" + end + + def self.example_code + [ + 'ipa( + workspace: "MyApp.xcworkspace", + configuration: "Debug", + scheme: "MyApp", + # (optionals) + clean: true, # This means "Do Clean". Cleans project before building (the default if not specified). + destination: "path/to/dir", # Destination directory. Defaults to current directory. + ipa: "my-app.ipa", # specify the name of the .ipa file to generate (including file extension) + xcargs: "MY_ADHOC=0", # pass additional arguments to xcodebuild when building the app. + embed: "my.mobileprovision", # Sign .ipa file with .mobileprovision + identity: "MyIdentity", # Identity to be used along with --embed + sdk: "10.0", # use SDK as the name or path of the base SDK when building the project. + archive: true # this means "Do Archive". Archive project after building (the default if not specified). + )' + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + [ + "You are using legacy `shenzhen` to build your app, which will be removed soon!", + "It is recommended to upgrade to _gym_.", + "To do so, just replace `ipa(...)` with `gym(...)` in your Fastfile.", + "To make code signing work, follow [https://docs.fastlane.tools/codesigning/xcode-project/](https://docs.fastlane.tools/codesigning/xcode-project/)." + ].join("\n") + end + end + end +end +# rubocop:enable Lint/AssignmentInCondition diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/is_ci.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/is_ci.rb new file mode 100644 index 0000000..d1aa07e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/is_ci.rb @@ -0,0 +1,51 @@ +module Fastlane + module Actions + class IsCiAction < Action + def self.run(params) + Helper.ci? + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Is the current run being executed on a CI system, like Jenkins or Travis" + end + + def self.details + "The return value of this method is true if fastlane is currently executed on Travis, Jenkins, Circle or a similar CI service" + end + + def self.available_options + [] + end + + def self.return_type + :bool + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'if is_ci + puts "I\'m a computer" + else + say "Hi Human!" + end' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/jazzy.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/jazzy.rb new file mode 100644 index 0000000..6877f8b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/jazzy.rb @@ -0,0 +1,59 @@ +module Fastlane + module Actions + class JazzyAction < Action + def self.run(params) + Actions.verify_gem!('jazzy') + command = "jazzy" + command << " --config #{params[:config]}" if params[:config] + command << " --module-version #{params[:module_version]}" if params[:module_version] + Actions.sh(command) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Generate docs using Jazzy" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :config, + env_name: 'FL_JAZZY_CONFIG', + description: 'Path to jazzy config file', + optional: true), + FastlaneCore::ConfigItem.new(key: :module_version, + env_name: 'FL_JAZZY_MODULE_VERSION', + description: 'Version string to use as part of the the default docs title and inside the docset', + optional: true) + ] + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'jazzy', + 'jazzy(config: ".jazzy.yaml", module_version: "2.1.37")' + ] + end + + def self.category + :documentation + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/jira.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/jira.rb new file mode 100644 index 0000000..e42c15d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/jira.rb @@ -0,0 +1,162 @@ +module Fastlane + module Actions + module SharedValues + JIRA_JSON = :JIRA_JSON + end + + class JiraAction < Action + def self.run(params) + Actions.verify_gem!('jira-ruby') + require 'jira-ruby' + + site = params[:url] + auth_type = :basic + context_path = params[:context_path] + username = params[:username] + password = params[:password] + ticket_id = params[:ticket_id] + comment_text = params[:comment_text] + + options = { + site: site, + context_path: context_path, + auth_type: auth_type, + username: username, + password: password + } + + begin + client = JIRA::Client.new(options) + issue = client.Issue.find(ticket_id) + comment = issue.comments.build + comment.save({ 'body' => comment_text }) + + # An exact representation of the JSON returned from the JIRA API + # https://github.com/sumoheavy/jira-ruby/blob/master/lib/jira/base.rb#L67 + json_response = comment.attrs + raise 'Failed to add a comment on Jira ticket' if json_response.nil? + + Actions.lane_context[SharedValues::JIRA_JSON] = json_response + UI.success('Successfully added a comment on Jira ticket') + return json_response + rescue => exception + message = "Received exception when adding a Jira comment: #{exception}" + if params[:fail_on_error] + UI.user_error!(message) + else + UI.error(message) + end + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Leave a comment on a Jira ticket" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :url, + env_name: "FL_JIRA_SITE", + description: "URL for Jira instance", + verify_block: proc do |value| + UI.user_error!("No url for Jira given, pass using `url: 'url'`") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :context_path, + env_name: "FL_JIRA_CONTEXT_PATH", + description: "Appends to the url (ex: \"/jira\")", + optional: true, + default_value: ""), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FL_JIRA_USERNAME", + description: "Username for Jira instance", + verify_block: proc do |value| + UI.user_error!("No username") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "FL_JIRA_PASSWORD", + description: "Password or API token for Jira", + sensitive: true, + verify_block: proc do |value| + UI.user_error!("No password") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :ticket_id, + env_name: "FL_JIRA_TICKET_ID", + description: "Ticket ID for Jira, i.e. IOS-123", + verify_block: proc do |value| + UI.user_error!("No Ticket specified") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :comment_text, + env_name: "FL_JIRA_COMMENT_TEXT", + description: "Text to add to the ticket as a comment", + verify_block: proc do |value| + UI.user_error!("No comment specified") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :fail_on_error, + env_name: "FL_JIRA_FAIL_ON_ERROR", + description: "Should an error adding the Jira comment cause a failure?", + type: Boolean, + optional: true, + default_value: true) # Default value is true for 'Backward compatibility' + ] + end + + def self.output + [ + ['JIRA_JSON', 'The whole Jira API JSON object'] + ] + end + + def self.return_value + [ + "A hash containing all relevant information of the Jira comment", + "Access Jira comment 'id', 'author', 'body', and more" + ].join("\n") + end + + def self.return_type + :hash + end + + def self.authors + ["iAmChrisTruman", "crazymanish"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'jira( + url: "https://bugs.yourdomain.com", + username: "Your username", + password: "Your password or API token", + ticket_id: "IOS-123", + comment_text: "Text to post as a comment" + )', # How to get API token: https://developer.atlassian.com/cloud/jira/platform/basic-auth-for-rest-apis/#get-an-api-token + 'jira( + url: "https://yourserverdomain.com", + context_path: "/jira", + username: "Your username", + password: "Your password or API token", + ticket_id: "IOS-123", + comment_text: "Text to post as a comment" + )', + 'jira( + ticket_id: "IOS-123", + comment_text: "Text to post as a comment", + fail_on_error: false + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/lane_context.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/lane_context.rb new file mode 100644 index 0000000..bbea26c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/lane_context.rb @@ -0,0 +1,60 @@ +module Fastlane + module Actions + class LaneContextAction < Action + def self.run(params) + Actions.lane_context + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Access lane context values" + end + + def self.details + [ + "Access the fastlane lane context values.", + "More information about how the lane context works: [https://docs.fastlane.tools/advanced/#lane-context](https://docs.fastlane.tools/advanced/#lane-context)." + ].join("\n") + end + + def self.available_options + [] + end + + def self.output + [] + end + + def self.return_type + :hash + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + # We don't want to show this as step + def self.step_text + nil + end + + def self.example_code + [ + 'lane_context[SharedValues::BUILD_NUMBER]', + 'lane_context[SharedValues::IPA_OUTPUT_PATH]' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/last_git_commit.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/last_git_commit.rb new file mode 100644 index 0000000..01a19bf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/last_git_commit.rb @@ -0,0 +1,58 @@ +module Fastlane + module Actions + class LastGitCommitAction < Action + def self.run(params) + Actions.last_git_commit_dict + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Return last git commit hash, abbreviated commit hash, commit message and author" + end + + def self.return_value + "Returns the following dict: {commit_hash: \"commit hash\", abbreviated_commit_hash: \"abbreviated commit hash\" author: \"Author\", author_email: \"author email\", message: \"commit message\"}" + end + + def self.return_type + :hash_of_strings + end + + def self.author + "ngutman" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'commit = last_git_commit + pilot(changelog: commit[:message]) # message of commit + author = commit[:author] # author of the commit + author_email = commit[:author_email] # email of the author of the commit + hash = commit[:commit_hash] # long sha of commit + short_hash = commit[:abbreviated_commit_hash] # short sha of commit' + ] + end + + def self.category + :source_control + end + + def self.sample_return_value + { + message: "message", + author: "author", + author_email: "author_email", + commit_hash: "commit_hash", + abbreviated_commit_hash: "short_hash" + } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/last_git_tag.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/last_git_tag.rb new file mode 100644 index 0000000..878876f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/last_git_tag.rb @@ -0,0 +1,60 @@ +module Fastlane + module Actions + class LastGitTagAction < Action + def self.run(params) + Actions.last_git_tag_name(true, params[:pattern]) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Get the most recent git tag" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :pattern, + description: "Pattern to filter tags when looking for last one. Limit tags to ones matching given shell glob. If pattern lacks ?, *, or [, * at the end is implied", + default_value: nil, + optional: true) + ] + end + + def self.output + [] + end + + def self.return_type + :string + end + + def self.authors + ["KrauseFx", "wedkarz"] + end + + def self.is_supported?(platform) + true + end + + def self.details + [ + "If you are using this action on a **shallow clone**, *the default with some CI systems like Bamboo*, you need to ensure that you have also pulled all the git tags appropriately. Assuming your git repo has the correct remote set you can issue `sh('git fetch --tags')`.", + "Pattern parameter allows you to filter to a subset of tags." + ].join("\n") + end + + def self.example_code + [ + 'last_git_tag', + 'last_git_tag(pattern: "release/v1.0/")' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/latest_testflight_build_number.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/latest_testflight_build_number.rb new file mode 100644 index 0000000..d3c44d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/latest_testflight_build_number.rb @@ -0,0 +1,154 @@ +require 'credentials_manager' + +module Fastlane + module Actions + module SharedValues + LATEST_TESTFLIGHT_BUILD_NUMBER = :LATEST_TESTFLIGHT_BUILD_NUMBER + LATEST_TESTFLIGHT_VERSION = :LATEST_TESTFLIGHT_VERSION + end + + class LatestTestflightBuildNumberAction < Action + def self.run(params) + build_v, build_nr = AppStoreBuildNumberAction.get_build_version_and_number(params) + + Actions.lane_context[SharedValues::LATEST_TESTFLIGHT_BUILD_NUMBER] = build_nr + Actions.lane_context[SharedValues::LATEST_TESTFLIGHT_VERSION] = build_v + return build_nr + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Fetches most recent build number from TestFlight" + end + + def self.details + [ + "Provides a way to have `increment_build_number` be based on the latest build you uploaded to iTC.", + "Fetches the most recent build number from TestFlight based on the version number. Provides a way to have `increment_build_number` be based on the latest build you uploaded to iTC." + ].join("\n") + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["APPSTORE_BUILD_NUMBER_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["APPSTORE_BUILD_NUMBER_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + FastlaneCore::ConfigItem.new(key: :live, + short_option: "-l", + env_name: "CURRENT_BUILD_NUMBER_LIVE", + description: "Query the live version (ready-for-sale)", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :app_identifier, + short_option: "-a", + env_name: "FASTLANE_APP_IDENTIFIER", + description: "The bundle identifier of your app", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "ITUNESCONNECT_USER", + description: "Your Apple ID Username", + optional: true, + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :version, + env_name: "LATEST_VERSION", + description: "The version number whose latest build number we want", + optional: true), + FastlaneCore::ConfigItem.new(key: :platform, + short_option: "-j", + env_name: "APPSTORE_PLATFORM", + description: "The platform to use (optional)", + optional: true, + default_value: "ios", + verify_block: proc do |value| + UI.user_error!("The platform can only be ios, osx, or appletvos") unless %('osx', ios', 'appletvos').include?(value) + end), + FastlaneCore::ConfigItem.new(key: :initial_build_number, + env_name: "INITIAL_BUILD_NUMBER", + description: "sets the build number to given value if no build is in current train", + default_value: 1, + skip_type_validation: true), # allow Integer, String + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-k", + env_name: "LATEST_TESTFLIGHT_BUILD_NUMBER_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # allow Integer, String + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-e", + env_name: "LATEST_TESTFLIGHT_BUILD_NUMBER_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true) + ] + end + + def self.output + [ + ['LATEST_TESTFLIGHT_BUILD_NUMBER', 'The latest build number of the latest version of the app uploaded to TestFlight'], + ['LATEST_TESTFLIGHT_VERSION', 'The version of the latest build number'] + ] + end + + def self.return_value + "Integer representation of the latest build number uploaded to TestFlight" + end + + def self.return_type + :int + end + + def self.authors + ["daveanderson"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'latest_testflight_build_number(version: "1.3")', + 'increment_build_number({ + build_number: latest_testflight_build_number + 1 + })' + ] + end + + def self.sample_return_value + 2 + end + + def self.category + :app_store_connect + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/lcov.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/lcov.rb new file mode 100644 index 0000000..7b6bc5a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/lcov.rb @@ -0,0 +1,98 @@ +module Fastlane + module Actions + class LcovAction < Action + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.run(options) + unless Helper.test? + UI.user_error!("lcov not installed, please install using `brew install lcov`") if `which lcov`.length == 0 + end + gen_cov(options) + end + + def self.description + "Generates coverage data using lcov" + end + + def self.available_options + [ + + FastlaneCore::ConfigItem.new(key: :project_name, + env_name: "FL_LCOV_PROJECT_NAME", + description: "Name of the project"), + FastlaneCore::ConfigItem.new(key: :scheme, + env_name: "FL_LCOV_SCHEME", + description: "Scheme of the project"), + FastlaneCore::ConfigItem.new(key: :arch, + env_name: "FL_LCOV_ARCH", + description: "The build arch where will search .gcda files", + default_value: "i386"), + FastlaneCore::ConfigItem.new(key: :output_dir, + env_name: "FL_LCOV_OUTPUT_DIR", + description: "The output directory that coverage data will be stored. If not passed will use coverage_reports as default value", + optional: true, + default_value: "coverage_reports") + ] + end + + def self.author + "thiagolioy" + end + + def self.gen_cov(options) + tmp_cov_file = "/tmp/coverage.info" + output_dir = options[:output_dir] + derived_data_path = derived_data_dir(options) + + system("lcov --capture --directory \"#{derived_data_path}\" --output-file #{tmp_cov_file}") + system(gen_lcov_cmd(tmp_cov_file)) + system("genhtml #{tmp_cov_file} --output-directory #{output_dir}") + end + + def self.gen_lcov_cmd(cov_file) + cmd = "lcov " + exclude_dirs.each do |e| + cmd << "--remove #{cov_file} \"#{e}\" " + end + cmd << "--output #{cov_file} " + end + + def self.derived_data_dir(options) + pn = options[:project_name] + sc = options[:scheme] + arch = options[:arch] + + initial_path = "#{Dir.home}/Library/Developer/Xcode/DerivedData/" + end_path = "/Build/Intermediates/#{pn}.build/Debug-iphonesimulator/#{sc}.build/Objects-normal/#{arch}/" + + match = find_project_dir(pn, initial_path) + + "#{initial_path}#{match}#{end_path}" + end + + def self.find_project_dir(project_name, path) + `ls -t #{path}| grep #{project_name} | head -1`.to_s.delete("\n") + end + + def self.exclude_dirs + ["/Applications/*", "/Frameworks/*"] + end + + def self.example_code + [ + 'lcov( + project_name: "ProjectName", + scheme: "yourScheme", + output_dir: "cov_reports" # This value is optional. Default is coverage_reports + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/mailgun.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/mailgun.rb new file mode 100644 index 0000000..df70ba2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/mailgun.rb @@ -0,0 +1,191 @@ +require 'fastlane/erb_template_helper' + +module Fastlane + module Actions + class MailgunAction < Action + def self.is_supported?(platform) + true + end + + def self.run(options) + Actions.verify_gem!('rest-client') + require 'rest-client' + handle_params_transition(options) + mailgunit(options) + end + + def self.description + "Send a success/error message to an email group" + end + + def self.available_options + [ + # This is here just for while due to the transition, not needed anymore + FastlaneCore::ConfigItem.new(key: :mailgun_sandbox_domain, + env_name: "MAILGUN_SANDBOX_POSTMASTER", + optional: true, + description: "Mailgun sandbox domain postmaster for your mail. Please use postmaster instead"), + # This is here just for while due to the transition, should use postmaster instead + FastlaneCore::ConfigItem.new(key: :mailgun_sandbox_postmaster, + env_name: "MAILGUN_SANDBOX_POSTMASTER", + optional: true, + description: "Mailgun sandbox domain postmaster for your mail. Please use postmaster instead"), + # This is here just for while due to the transition, should use apikey instead + FastlaneCore::ConfigItem.new(key: :mailgun_apikey, + env_name: "MAILGUN_APIKEY", + sensitive: true, + optional: true, + description: "Mailgun apikey for your mail. Please use postmaster instead"), + + FastlaneCore::ConfigItem.new(key: :postmaster, + env_name: "MAILGUN_SANDBOX_POSTMASTER", + description: "Mailgun sandbox domain postmaster for your mail"), + FastlaneCore::ConfigItem.new(key: :apikey, + env_name: "MAILGUN_APIKEY", + sensitive: true, + description: "Mailgun apikey for your mail"), + FastlaneCore::ConfigItem.new(key: :to, + env_name: "MAILGUN_TO", + description: "Destination of your mail"), + FastlaneCore::ConfigItem.new(key: :from, + env_name: "MAILGUN_FROM", + optional: true, + description: "Mailgun sender name", + default_value: "Mailgun Sandbox"), + FastlaneCore::ConfigItem.new(key: :message, + env_name: "MAILGUN_MESSAGE", + description: "Message of your mail"), + FastlaneCore::ConfigItem.new(key: :subject, + env_name: "MAILGUN_SUBJECT", + description: "Subject of your mail", + optional: true, + default_value: "fastlane build"), + FastlaneCore::ConfigItem.new(key: :success, + env_name: "MAILGUN_SUCCESS", + description: "Was this build successful? (true/false)", + optional: true, + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :app_link, + env_name: "MAILGUN_APP_LINK", + description: "App Release link", + optional: false), + FastlaneCore::ConfigItem.new(key: :ci_build_link, + env_name: "MAILGUN_CI_BUILD_LINK", + description: "CI Build Link", + optional: true), + FastlaneCore::ConfigItem.new(key: :template_path, + env_name: "MAILGUN_TEMPLATE_PATH", + description: "Mail HTML template", + optional: true), + FastlaneCore::ConfigItem.new(key: :reply_to, + env_name: "MAILGUN_REPLY_TO", + description: "Mail Reply to", + optional: true), + FastlaneCore::ConfigItem.new(key: :attachment, + env_name: "MAILGUN_ATTACHMENT", + description: "Mail Attachment filenames, either an array or just one string", + optional: true, + type: Array), + FastlaneCore::ConfigItem.new(key: :custom_placeholders, + short_option: "-p", + env_name: "MAILGUN_CUSTOM_PLACEHOLDERS", + description: "Placeholders for template given as a hash", + default_value: {}, + type: Hash) + ] + end + + def self.author + "thiagolioy" + end + + def self.handle_params_transition(options) + options[:postmaster] = options[:mailgun_sandbox_postmaster] if options[:mailgun_sandbox_postmaster] + puts("\nUsing :mailgun_sandbox_postmaster is deprecated, please change to :postmaster".yellow) if options[:mailgun_sandbox_postmaster] + + options[:apikey] = options[:mailgun_apikey] if options[:mailgun_apikey] + puts("\nUsing :mailgun_apikey is deprecated, please change to :apikey".yellow) if options[:mailgun_apikey] + end + + def self.mailgunit(options) + sandbox_domain = options[:postmaster].split("@").last + params = { + from: "#{options[:from]} <#{options[:postmaster]}>", + to: (options[:to]).to_s, + subject: options[:subject], + html: mail_template(options) + } + unless options[:reply_to].nil? + params.store(:"h:Reply-To", options[:reply_to]) + end + + unless options[:attachment].nil? + attachment_filenames = [*options[:attachment]] + attachments = attachment_filenames.map { |filename| File.new(filename, 'rb') } + params.store(:attachment, attachments) + end + + RestClient.post("https://api:#{options[:apikey]}@api.mailgun.net/v3/#{sandbox_domain}/messages", params) + mail_template(options) + end + + def self.mail_template(options) + hash = { + author: Actions.git_author_email, + last_commit: Actions.last_git_commit_message, + message: options[:message], + app_link: options[:app_link] + } + hash[:success] = options[:success] + hash[:ci_build_link] = options[:ci_build_link] + + # concatenate with custom placeholders passed by user + hash = hash.merge(options[:custom_placeholders]) + + # grabs module + eth = Fastlane::ErbTemplateHelper + + # create html from template + html_template_path = options[:template_path] + if html_template_path && File.exist?(html_template_path) + html_template = eth.load_from_path(html_template_path) + else + html_template = eth.load("mailgun_html_template") + end + eth.render(html_template, hash) + end + + def self.example_code + [ + 'mailgun( + to: "fastlane@krausefx.com", + success: true, + message: "This is the mail\'s content" + )', + 'mailgun( + postmaster: "MY_POSTMASTER", + apikey: "MY_API_KEY", + to: "DESTINATION_EMAIL", + from: "EMAIL_FROM_NAME", + reply_to: "EMAIL_REPLY_TO", + success: true, + message: "Mail Body", + app_link: "http://www.myapplink.com", + ci_build_link: "http://www.mycibuildlink.com", + template_path: "HTML_TEMPLATE_PATH", + custom_placeholders: { + :var1 => 123, + :var2 => "string" + }, + attachment: "dirname/filename.ext" + )' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/make_changelog_from_jenkins.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/make_changelog_from_jenkins.rb new file mode 100644 index 0000000..49bc753 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/make_changelog_from_jenkins.rb @@ -0,0 +1,81 @@ +module Fastlane + module Actions + module SharedValues + FL_CHANGELOG ||= :FL_CHANGELOG # originally defined in ChangelogFromGitCommitsAction + end + + class MakeChangelogFromJenkinsAction < Action + def self.run(params) + require 'json' + require 'net/http' + + changelog = "" + + if Helper.ci? || Helper.test? + # The "BUILD_URL" environment variable is set automatically by Jenkins in every build + jenkins_api_url = URI(ENV["BUILD_URL"] + "api/json\?wrapper\=changes\&xpath\=//changeSet//comment") + begin + json = JSON.parse(Net::HTTP.get(jenkins_api_url)) + json['changeSet']['items'].each do |item| + comment = params[:include_commit_body] ? item['comment'] : item['msg'] + changelog << comment.strip + "\n" + end + rescue => ex + UI.error("Unable to read/parse changelog from jenkins: #{ex.message}") + end + end + + Actions.lane_context[SharedValues::FL_CHANGELOG] = changelog.strip.length > 0 ? changelog : params[:fallback_changelog] + end + + def self.description + "Generate a changelog using the Changes section from the current Jenkins build" + end + + def self.details + "This is useful when deploying automated builds. The changelog from Jenkins lists all the commit messages since the last build." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :fallback_changelog, + description: "Fallback changelog if there is not one on Jenkins, or it couldn't be read", + optional: true, + default_value: ""), + FastlaneCore::ConfigItem.new(key: :include_commit_body, + description: "Include the commit body along with the summary", + optional: true, + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['FL_CHANGELOG', 'The changelog generated by Jenkins'] + ] + end + + def self.authors + ["mandrizzle"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'make_changelog_from_jenkins( + # Optional, lets you set a changelog in the case is not generated on Jenkins or if ran outside of Jenkins + fallback_changelog: "Bug fixes and performance enhancements" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/match.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/match.rb new file mode 100644 index 0000000..e5c488b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/match.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/sync_code_signing' + class MatchAction < SyncCodeSigningAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `sync_code_signing` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/match_nuke.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/match_nuke.rb new file mode 100644 index 0000000..6dfef3e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/match_nuke.rb @@ -0,0 +1,59 @@ +module Fastlane + module Actions + class MatchNukeAction < Action + def self.run(params) + require 'match' + + params.load_configuration_file("Matchfile") + params[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + + cert_type = Match.cert_type_sym(params[:type]) + UI.important("Going to revoke your '#{cert_type}' certificate type and provisioning profiles") + + Match::Nuke.new.run(params, type: cert_type) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Easily nuke your certificate and provisioning profiles (via _match_)" + end + + def self.details + [ + "Use the match_nuke action to revoke your certificates and provisioning profiles.", + "Don't worry, apps that are already available in the App Store / TestFlight will still work.", + "Builds distributed via Ad Hoc or Enterprise will be disabled after nuking your account, so you'll have to re-upload a new build.", + "After clearing your account you'll start from a clean state, and you can run match to generate your certificates and profiles again.", + "More information: https://docs.fastlane.tools/actions/match/" + ].join("\n") + end + + def self.available_options + require 'match' + Match::Options.available_options + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'match_nuke(type: "development")', # See all other options https://github.com/fastlane/fastlane/blob/master/match/lib/match/module.rb#L23 + 'match_nuke(type: "development", api_key: app_store_connect_api_key)' + ] + end + + def self.authors + ["crazymanish"] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/min_fastlane_version.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/min_fastlane_version.rb new file mode 100644 index 0000000..d1f410e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/min_fastlane_version.rb @@ -0,0 +1,57 @@ +module Fastlane + module Actions + module SharedValues + end + + class MinFastlaneVersionAction < Action + def self.run(params) + params = nil unless params.kind_of?(Array) + value = (params || []).first + defined_version = Gem::Version.new(value) if value + + UI.user_error!("Please pass minimum fastlane version as parameter to min_fastlane_version") unless defined_version + + if Gem::Version.new(Fastlane::VERSION) < defined_version + FastlaneCore::UpdateChecker.show_update_message('fastlane', Fastlane::VERSION) + error_message = "The Fastfile requires a fastlane version of >= #{defined_version}. You are on #{Fastlane::VERSION}." + UI.user_error!(error_message) + end + + UI.message("Your fastlane version #{Fastlane::VERSION} matches the minimum requirement of #{defined_version} ✅") + end + + def self.step_text + "Verifying fastlane version" + end + + def self.author + "KrauseFx" + end + + def self.description + "Verifies the minimum fastlane version required" + end + + def self.example_code + [ + 'min_fastlane_version("1.50.0")' + ] + end + + def self.details + [ + "Add this to your `Fastfile` to require a certain version of _fastlane_.", + "Use it if you use an action that just recently came out and you need it." + ].join("\n") + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/modify_services.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/modify_services.rb new file mode 100644 index 0000000..6a85fd2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/modify_services.rb @@ -0,0 +1,208 @@ +module Fastlane + module Actions + class ModifyServicesAction < Action + def self.run(params) + require 'produce' + + Produce.config = params + + Dir.chdir(FastlaneCore::FastlaneFolder.path || Dir.pwd) do + require 'produce/service' + services = params[:services] + + enabled_services = services.select { |_k, v| v == true || (v != false && v.to_s != 'off') }.map { |k, v| [k, v == true || v.to_s == 'on' ? 'on' : v] }.to_h + disabled_services = services.select { |_k, v| v == false || v.to_s == 'off' }.map { |k, v| [k, 'off'] }.to_h + + enabled_services_object = self.service_object + enabled_services.each do |k, v| + enabled_services_object.__hash__[k] = true + enabled_services_object.send("#{k}=", v) + end + Produce::Service.enable(enabled_services_object, nil) unless enabled_services.empty? + + disabled_services_object = self.service_object + disabled_services.each do |k, v| + disabled_services_object.__hash__[k] = true + disabled_services_object.send("#{k}=", v) + end + Produce::Service.disable(disabled_services_object, nil) unless disabled_services.empty? + end + end + + def self.service_object + service_object = Object.new + service_object.class.module_eval { attr_accessor :__hash__ } + service_object.__hash__ = {} + Produce::DeveloperCenter::ALLOWED_SERVICES.keys.each do |service| + name = self.services_mapping[service] + service_object.class.module_eval { attr_accessor :"#{name}" } + end + service_object + end + + def self.services_mapping + { + access_wifi: 'access_wifi', + app_attest: 'app_attest', + app_group: 'app_group', + apple_pay: 'apple_pay', + associated_domains: 'associated_domains', + auto_fill_credential: 'auto_fill_credential', + class_kit: 'class_kit', + icloud: 'icloud', + custom_network_protocol: 'custom_network_protocol', + data_protection: 'data_protection', + extended_virtual_address_space: 'extended_virtual_address_space', + family_controls: 'family_controls', + file_provider_testing_mode: 'file_provider_testing_mode', + fonts: 'fonts', + game_center: 'game_center', + health_kit: 'health_kit', + hls_interstitial_preview: 'hls_interstitial_preview', + home_kit: 'home_kit', + hotspot: 'hotspot', + in_app_purchase: 'in_app_purchase', + inter_app_audio: 'inter_app_audio', + low_latency_hls: 'low_latency_hls', + managed_associated_domains: 'managed_associated_domains', + maps: 'maps', + multipath: 'multipath', + network_extension: 'network_extension', + nfc_tag_reading: 'nfc_tag_reading', + personal_vpn: 'personal_vpn', + passbook: 'passbook', + push_notification: 'push_notification', + sign_in_with_apple: 'sign_in_with_apple', + siri_kit: 'siri_kit', + system_extension: 'system_extension', + user_management: 'user_management', + vpn_configuration: 'vpn_configuration', + wallet: 'wallet', + wireless_accessory: 'wireless_accessory', + car_play_audio_app: 'car_play_audio_app', + car_play_messaging_app: 'car_play_messaging_app', + car_play_navigation_app: 'car_play_navigation_app', + car_play_voip_calling_app: 'car_play_voip_calling_app', + critical_alerts: 'critical_alerts', + hotspot_helper: 'hotspot_helper', + driver_kit: 'driver_kit', + driver_kit_endpoint_security: 'driver_kit_endpoint_security', + driver_kit_family_hid_device: 'driver_kit_family_hid_device', + driver_kit_family_networking: 'driver_kit_family_networking', + driver_kit_family_serial: 'driver_kit_family_serial', + driver_kit_hid_event_service: 'driver_kit_hid_event_service', + driver_kit_transport_hid: 'driver_kit_transport_hid', + multitasking_camera_access: 'multitasking_camera_access', + sf_universal_link_api: 'sf_universal_link_api', + vp9_decoder: 'vp9_decoder', + music_kit: 'music_kit', + shazam_kit: 'shazam_kit', + communication_notifications: 'communication_notifications', + group_activities: 'group_activities', + health_kit_estimate_recalibration: 'health_kit_estimate_recalibration', + time_sensitive_notifications: 'time_sensitive_notifications' + } + end + + def self.allowed_services_description + return Produce::DeveloperCenter::ALLOWED_SERVICES.map do |k, v| + "#{k}: (#{v.join('|')})(:on|:off)(true|false)" + end.join(", ") + end + + def self.description + 'Modifies the services of the app created on Developer Portal' + end + + def self.details + [ + "The options are the same as `:enable_services` in the [produce action](https://docs.fastlane.tools/actions/produce/#parameters_1)" + ].join("\n") + end + + def self.available_options + require 'produce' + user = CredentialsManager::AppfileConfig.try_fetch_value(:apple_dev_portal_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + [ + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "PRODUCE_USERNAME", + description: "Your Apple ID Username", + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :app_identifier, + env_name: "PRODUCE_APP_IDENTIFIER", + short_option: "-a", + description: "App Identifier (Bundle ID, e.g. com.krausefx.app)", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :services, + display_in_shell: false, + env_name: "PRODUCE_ENABLE_SERVICES", + description: "Array with Spaceship App Services (e.g. #{allowed_services_description})", + type: Hash, + default_value: {}, + verify_block: proc do |value| + allowed_keys = Produce::DeveloperCenter::ALLOWED_SERVICES.keys + UI.user_error!("enable_services has to be of type Hash") unless value.kind_of?(Hash) + value.each do |key, v| + UI.user_error!("The key: '#{key}' is not supported in `enable_services' - following keys are available: [#{allowed_keys.join(',')}]") unless allowed_keys.include?(key.to_sym) + end + end), + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-b", + env_name: "PRODUCE_TEAM_ID", + description: "The ID of your Developer Portal team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-l", + env_name: "PRODUCE_TEAM_NAME", + description: "The name of your Developer Portal team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_NAME"] = value.to_s + end) + ] + end + + def self.author + "bhimsenpadalkar" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'modify_services( + username: "test.account@gmail.com", + app_identifier: "com.someorg.app", + services: { + push_notification: "on", + associated_domains: "off", + wallet: :on, + apple_pay: :off, + data_protection: true, + multipath: false + })' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/nexus_upload.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/nexus_upload.rb new file mode 100644 index 0000000..4be9656 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/nexus_upload.rb @@ -0,0 +1,231 @@ +module Fastlane + module Actions + class NexusUploadAction < Action + def self.run(params) + command = [] + command << "curl" + command << verbose(params) + command << "--fail" + command += ssl_options(params) + command += proxy_options(params) + command += upload_options(params) + command << upload_url(params) + + Fastlane::Actions.sh(command.join(' '), log: params[:verbose]) + end + + def self.upload_url(params) + url = "#{params[:endpoint]}#{params[:mount_path]}" + + if params[:nexus_version] == 2 + url << "/service/local/artifact/maven/content" + else + file_extension = File.extname(params[:file]).shellescape + + url << "/repository/#{params[:repo_id]}" + url << "/#{params[:repo_group_id].gsub('.', '/')}" + url << "/#{params[:repo_project_name]}" + url << "/#{params[:repo_project_version]}" + url << "/#{params[:repo_project_name]}-#{params[:repo_project_version]}" + url << "-#{params[:repo_classifier]}" if params[:repo_classifier] + url << file_extension.to_s + end + + url.shellescape + end + + def self.verbose(params) + params[:verbose] ? "--verbose" : "--silent" + end + + def self.upload_options(params) + file_path = File.expand_path(params[:file]).shellescape + file_extension = file_path.split('.').last.shellescape + + options = [] + if params[:nexus_version] == 2 + options << "-F p=zip" + options << "-F hasPom=false" + options << "-F r=#{params[:repo_id].shellescape}" + options << "-F g=#{params[:repo_group_id].shellescape}" + options << "-F a=#{params[:repo_project_name].shellescape}" + options << "-F v=#{params[:repo_project_version].shellescape}" + + if params[:repo_classifier] + options << "-F c=#{params[:repo_classifier].shellescape}" + end + + options << "-F e=#{file_extension}" + options << "-F file=@#{file_path}" + else + options << "--upload-file #{file_path}" + end + + options << "-u #{params[:username].shellescape}:#{params[:password].shellescape}" + + options + end + + def self.ssl_options(params) + options = [] + unless params[:ssl_verify] + options << "--insecure" + end + + options + end + + def self.proxy_options(params) + options = [] + if params[:proxy_address] && params[:proxy_port] && params[:proxy_username] && params[:proxy_password] + options << "-x #{params[:proxy_address].shellescape}:#{params[:proxy_port].shellescape}" + options << "--proxy-user #{params[:proxy_username].shellescape}:#{params[:proxy_password].shellescape}" + end + + options + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload a file to [Sonatype Nexus platform](https://www.sonatype.com)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :file, + env_name: "FL_NEXUS_FILE", + description: "File to be uploaded to Nexus", + optional: false, + verify_block: proc do |value| + file_path = File.expand_path(value) + UI.user_error!("Couldn't find file at path '#{file_path}'") unless File.exist?(file_path) + end), + FastlaneCore::ConfigItem.new(key: :repo_id, + env_name: "FL_NEXUS_REPO_ID", + description: "Nexus repository id e.g. artefacts", + optional: false), + FastlaneCore::ConfigItem.new(key: :repo_group_id, + env_name: "FL_NEXUS_REPO_GROUP_ID", + description: "Nexus repository group id e.g. com.company", + optional: false), + FastlaneCore::ConfigItem.new(key: :repo_project_name, + env_name: "FL_NEXUS_REPO_PROJECT_NAME", + description: "Nexus repository commandect name. Only letters, digits, underscores(_), hyphens(-), and dots(.) are allowed", + optional: false), + FastlaneCore::ConfigItem.new(key: :repo_project_version, + env_name: "FL_NEXUS_REPO_PROJECT_VERSION", + description: "Nexus repository commandect version", + optional: false), + FastlaneCore::ConfigItem.new(key: :repo_classifier, + env_name: "FL_NEXUS_REPO_CLASSIFIER", + description: "Nexus repository artifact classifier (optional)", + optional: true), + FastlaneCore::ConfigItem.new(key: :endpoint, + env_name: "FL_NEXUS_ENDPOINT", + description: "Nexus endpoint e.g. http://nexus:8081", + optional: false), + FastlaneCore::ConfigItem.new(key: :mount_path, + env_name: "FL_NEXUS_MOUNT_PATH", + description: "Nexus mount path (Nexus 3 instances have this configured as empty by default)", + default_value: "/nexus", + optional: true), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FL_NEXUS_USERNAME", + description: "Nexus username", + optional: false), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "FL_NEXUS_PASSWORD", + description: "Nexus password", + optional: false), + FastlaneCore::ConfigItem.new(key: :ssl_verify, + env_name: "FL_NEXUS_SSL_VERIFY", + description: "Verify SSL", + type: Boolean, + default_value: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :nexus_version, + env_name: "FL_NEXUS_MAJOR_VERSION", + description: "Nexus major version", + type: Integer, + default_value: 2, + optional: true, + verify_block: proc do |value| + min_version = 2 + max_version = 3 + UI.user_error!("Unsupported version (#{value}) min. supported version: #{min_version}") unless value >= min_version + UI.user_error!("Unsupported version (#{value}) max. supported version: #{max_version}") unless value <= max_version + end), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_NEXUS_VERBOSE", + description: "Make detailed output", + type: Boolean, + default_value: false, + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_username, + env_name: "FL_NEXUS_PROXY_USERNAME", + description: "Proxy username", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_password, + env_name: "FL_NEXUS_PROXY_PASSWORD", + sensitive: true, + description: "Proxy password", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_address, + env_name: "FL_NEXUS_PROXY_ADDRESS", + description: "Proxy address", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_port, + env_name: "FL_NEXUS_PROXY_PORT", + description: "Proxy port", + optional: true) + ] + end + + def self.authors + ["xfreebird", "mdio"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + '# for Nexus 2 + nexus_upload( + file: "/path/to/file.ipa", + repo_id: "artefacts", + repo_group_id: "com.fastlane", + repo_project_name: "ipa", + repo_project_version: "1.13", + repo_classifier: "dSYM", # Optional + endpoint: "http://localhost:8081", + username: "admin", + password: "admin123" + )', + '# for Nexus 3 + nexus_upload( + nexus_version: 3, + mount_path: "", + file: "/path/to/file.ipa", + repo_id: "artefacts", + repo_group_id: "com.fastlane", + repo_project_name: "ipa", + repo_project_version: "1.13", + repo_classifier: "dSYM", # Optional + endpoint: "http://localhost:8081", + username: "admin", + password: "admin123" + )' + ] + end + + def self.category + :beta + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notarize.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notarize.rb new file mode 100644 index 0000000..5e5f5fd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notarize.rb @@ -0,0 +1,340 @@ +module Fastlane + module Actions + class NotarizeAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + package_path = params[:package] + bundle_id = params[:bundle_id] + skip_stapling = params[:skip_stapling] + try_early_stapling = params[:try_early_stapling] + print_log = params[:print_log] + verbose = params[:verbose] + + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless params[:api_key_path] + params[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + api_key = Spaceship::ConnectAPI::Token.from(hash: params[:api_key], filepath: params[:api_key_path]) + + use_notarytool = params[:use_notarytool] + + # Compress and read bundle identifier only for .app bundle. + compressed_package_path = nil + if File.extname(package_path) == '.app' + compressed_package_path = "#{package_path}.zip" + Actions.sh( + "ditto -c -k --rsrc --keepParent \"#{package_path}\" \"#{compressed_package_path}\"", + log: verbose + ) + + unless bundle_id + info_plist_path = File.join(package_path, 'Contents', 'Info.plist') + bundle_id = Actions.sh( + "/usr/libexec/PlistBuddy -c \"Print :CFBundleIdentifier\" \"#{info_plist_path}\"", + log: verbose + ).strip + end + end + + UI.user_error!('Could not read bundle identifier, provide as a parameter') unless bundle_id + + if use_notarytool + notarytool(params, package_path, bundle_id, skip_stapling, print_log, verbose, api_key, compressed_package_path) + else + altool(params, package_path, bundle_id, try_early_stapling, skip_stapling, print_log, verbose, api_key, compressed_package_path) + end + end + + def self.notarytool(params, package_path, bundle_id, skip_stapling, print_log, verbose, api_key, compressed_package_path) + temp_file = nil + + # Create authorization part of command with either API Key or Apple ID + auth_parts = [] + if api_key + # Writes key contents to temporary file for command + require 'tempfile' + temp_file = Tempfile.new + api_key.write_key_to_file(temp_file.path) + + auth_parts << "--key #{temp_file.path}" + auth_parts << "--key-id #{api_key.key_id}" + auth_parts << "--issuer #{api_key.issuer_id}" + else + auth_parts << "--apple-id #{params[:username]}" + auth_parts << "--password #{ENV['FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD']}" + auth_parts << "--team-id #{params[:asc_provider]}" + end + + # Submits package and waits for processing using `xcrun notarytool submit --wait` + submit_parts = [ + "xcrun notarytool submit", + (compressed_package_path || package_path).shellescape, + "--output-format json", + "--wait" + ] + auth_parts + + submit_command = submit_parts.join(' ') + submit_response = Actions.sh( + submit_command, + log: verbose, + error_callback: lambda { |msg| + UI.error("Error polling for notarization info: #{msg}") + } + ) + + notarization_info = JSON.parse(submit_response) + + # Staple + case notarization_info['status'] + when 'Accepted' + submission_id = notarization_info["id"] + UI.success("Successfully uploaded package to notarization service with request identifier #{submission_id}") + + if skip_stapling + UI.success("Successfully notarized artifact") + else + UI.message('Stapling package') + + self.staple(package_path, verbose) + + UI.success("Successfully notarized and stapled package") + end + when 'Invalid' + UI.user_error!("Could not notarize package with message '#{notarization_info['statusSummary']}'") + else + UI.crash!("Could not notarize package with status '#{notarization_info['status']}'") + end + ensure + temp_file.delete if temp_file + end + + def self.altool(params, package_path, bundle_id, try_early_stapling, skip_stapling, print_log, verbose, api_key, compressed_package_path) + UI.message('Uploading package to notarization service, might take a while') + + notarization_upload_command = "xcrun altool --notarize-app -t osx -f \"#{compressed_package_path || package_path}\" --primary-bundle-id #{bundle_id} --output-format xml" + + notarization_info = {} + with_notarize_authenticator(params, api_key) do |notarize_authenticator| + notarization_upload_command << " --asc-provider \"#{params[:asc_provider]}\"" if params[:asc_provider] && api_key.nil? + + notarization_upload_response = Actions.sh( + notarize_authenticator.call(notarization_upload_command), + log: verbose + ) + + FileUtils.rm_rf(compressed_package_path) if compressed_package_path + + notarization_upload_plist = Plist.parse_xml(notarization_upload_response) + + if notarization_upload_plist.key?('product-errors') && notarization_upload_plist['product-errors'].any? + UI.important("đŸšĢ Could not upload package to notarization service! Here are the reasons:") + notarization_upload_plist['product-errors'].each { |product_error| UI.error("#{product_error['message']} (#{product_error['code']})") } + UI.user_error!("Package upload to notarization service cancelled. Please check the error messages above.") + end + + notarization_request_id = notarization_upload_plist['notarization-upload']['RequestUUID'] + + UI.success("Successfully uploaded package to notarization service with request identifier #{notarization_request_id}") + + while notarization_info.empty? || (notarization_info['Status'] == 'in progress') + if notarization_info.empty? + UI.message('Waiting to query request status') + elsif try_early_stapling && !skip_stapling + UI.message('Request in progress, trying early staple') + + begin + self.staple(package_path, verbose) + UI.message('Successfully notarized and early stapled package.') + + return + rescue + UI.message('Early staple failed, waiting to query again') + end + end + + sleep(30) + + UI.message('Querying request status') + + # As of July 2020, the request UUID might not be available for polling yet which returns an error code + # This is now handled with the error_callback (which prevents an error from being raised) + # Catching this error allows for polling to continue until the notarization is complete + error = false + notarization_info_response = Actions.sh( + notarize_authenticator.call("xcrun altool --notarization-info #{notarization_request_id} --output-format xml"), + log: verbose, + error_callback: lambda { |msg| + error = true + UI.error("Error polling for notarization info: #{msg}") + } + ) + + unless error + notarization_info_plist = Plist.parse_xml(notarization_info_response) + notarization_info = notarization_info_plist['notarization-info'] + end + end + end + # rubocop:enable Metrics/PerceivedComplexity + + log_url = notarization_info['LogFileURL'] + ENV['FL_NOTARIZE_LOG_FILE_URL'] = log_url + log_suffix = '' + if log_url && print_log + log_response = Net::HTTP.get(URI(log_url)) + log_json_object = JSON.parse(log_response) + log_suffix = ", with log:\n#{JSON.pretty_generate(log_json_object)}" + end + + case notarization_info['Status'] + when 'success' + if skip_stapling + UI.success("Successfully notarized artifact#{log_suffix}") + else + UI.message('Stapling package') + + self.staple(package_path, verbose) + + UI.success("Successfully notarized and stapled package#{log_suffix}") + end + when 'invalid' + UI.user_error!("Could not notarize package with message '#{notarization_info['Status Message']}'#{log_suffix}") + else + UI.crash!("Could not notarize package with status '#{notarization_info['Status']}'#{log_suffix}") + end + ensure + ENV.delete('FL_NOTARIZE_PASSWORD') + end + + def self.staple(package_path, verbose) + Actions.sh( + "xcrun stapler staple #{package_path.shellescape}", + log: verbose + ) + end + + def self.with_notarize_authenticator(params, api_key) + if api_key + # From xcrun altool for --apiKey: + # This option will search the following directories in sequence for a private key file with the name of 'AuthKey_.p8': './private_keys', '~/private_keys', '~/.private_keys', and '~/.appstoreconnect/private_keys'. + api_key_folder_path = File.expand_path('~/.appstoreconnect/private_keys') + api_key_file_path = File.join(api_key_folder_path, "AuthKey_#{api_key.key_id}.p8") + directory_exists = File.directory?(api_key_folder_path) + file_exists = File.exist?(api_key_file_path) + begin + FileUtils.mkdir_p(api_key_folder_path) unless directory_exists + api_key.write_key_to_file(api_key_file_path) unless file_exists + + yield(proc { |command| "#{command} --apiKey #{api_key.key_id} --apiIssuer #{api_key.issuer_id}" }) + ensure + FileUtils.rm(api_key_file_path) unless file_exists + FileUtils.rm_r(api_key_folder_path) unless directory_exists + end + else + apple_id_account = CredentialsManager::AccountManager.new(user: params[:username]) + + # Add password as a temporary environment variable for altool. + # Use app specific password if specified. + ENV['FL_NOTARIZE_PASSWORD'] = ENV['FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD'] || apple_id_account.password + + yield(proc { |command| "#{command} -u #{apple_id_account.user} -p @env:FL_NOTARIZE_PASSWORD" }) + end + end + + def self.description + 'Notarizes a macOS app' + end + + def self.authors + ['zeplin'] + end + + def self.available_options + username = CredentialsManager::AppfileConfig.try_fetch_value(:apple_dev_portal_id) + username ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + asc_provider = CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id) + + [ + FastlaneCore::ConfigItem.new(key: :package, + env_name: 'FL_NOTARIZE_PACKAGE', + description: 'Path to package to notarize, e.g. .app bundle or disk image', + verify_block: proc do |value| + UI.user_error!("Could not find package at '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :use_notarytool, + env_name: 'FL_NOTARIZE_USE_NOTARYTOOL', + description: 'Whether to `xcrun notarytool` or `xcrun altool`', + default_value: Helper.mac? && Helper.xcode_at_least?("13.0"), # Notary tool added in Xcode 13 + default_value_dynamic: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :try_early_stapling, + env_name: 'FL_NOTARIZE_TRY_EARLY_STAPLING', + description: 'Whether to try early stapling while the notarization request is in progress', + optional: true, + conflicting_options: [:skip_stapling], + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :skip_stapling, + env_name: 'FL_NOTARIZE_SKIP_STAPLING', + description: 'Do not staple the notarization ticket to the artifact; useful for single file executables and ZIP archives', + optional: true, + conflicting_options: [:try_early_stapling], + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :bundle_id, + env_name: 'FL_NOTARIZE_BUNDLE_ID', + description: 'Bundle identifier to uniquely identify the package', + optional: true), + FastlaneCore::ConfigItem.new(key: :username, + env_name: 'FL_NOTARIZE_USERNAME', + description: 'Apple ID username', + default_value: username, + optional: true, + conflicting_options: [:api_key_path, :api_key], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :asc_provider, + env_name: 'FL_NOTARIZE_ASC_PROVIDER', + description: 'Provider short name for accounts associated with multiple providers', + optional: true, + default_value: asc_provider), + FastlaneCore::ConfigItem.new(key: :print_log, + env_name: 'FL_NOTARIZE_PRINT_LOG', + description: 'Whether to print notarization log file, listing issues on failure and warnings on success', + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: 'FL_NOTARIZE_VERBOSE', + description: 'Whether to log requests', + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ['FL_NOTARIZE_API_KEY_PATH', "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:username, :api_key], + verify_block: proc do |value| + UI.user_error!("API Key not found at '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ['FL_NOTARIZE_API_KEY', "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + optional: true, + conflicting_options: [:username, :api_key_path], + sensitive: true, + type: Hash) + ] + end + + def self.is_supported?(platform) + platform == :mac + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notification.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notification.rb new file mode 100644 index 0000000..3445e5a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notification.rb @@ -0,0 +1,75 @@ +module Fastlane + module Actions + class NotificationAction < Action + def self.run(params) + require 'terminal-notifier' + + options = params.values + # :message is non-optional + message = options.delete(:message) + # remove nil keys, since `notify` below does not ignore them and instead translates them into empty strings in output, which looks ugly + options = options.select { |_, v| v } + option_map = { + app_icon: :appIcon, + content_image: :contentImage + } + options = options.transform_keys { |k| option_map.fetch(k, k) } + TerminalNotifier.notify(message, options) + end + + def self.description + "Display a macOS notification with custom message and title" + end + + def self.author + ["champo", "cbowns", "KrauseFx", "amarcadet", "dusek"] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :title, + description: "The title to display in the notification", + default_value: 'fastlane'), + FastlaneCore::ConfigItem.new(key: :subtitle, + description: "A subtitle to display in the notification", + optional: true), + FastlaneCore::ConfigItem.new(key: :message, + description: "The message to display in the notification", + optional: false), + FastlaneCore::ConfigItem.new(key: :sound, + description: "The name of a sound to play when the notification appears (names are listed in Sound Preferences)", + optional: true), + FastlaneCore::ConfigItem.new(key: :activate, + description: "Bundle identifier of application to be opened when the notification is clicked", + optional: true), + FastlaneCore::ConfigItem.new(key: :app_icon, + description: "The URL of an image to display instead of the application icon (Mavericks+ only)", + optional: true), + FastlaneCore::ConfigItem.new(key: :content_image, + description: "The URL of an image to display attached to the notification (Mavericks+ only)", + optional: true), + FastlaneCore::ConfigItem.new(key: :open, + description: "URL of the resource to be opened when the notification is clicked", + optional: true), + FastlaneCore::ConfigItem.new(key: :execute, + description: "Shell command to run when the notification is clicked", + optional: true) + ] + end + + def self.is_supported?(platform) + Helper.mac? + end + + def self.example_code + [ + 'notification(subtitle: "Finished Building", message: "Ready to upload...")' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notify.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notify.rb new file mode 100644 index 0000000..ec41eb2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/notify.rb @@ -0,0 +1,40 @@ +module Fastlane + module Actions + class NotifyAction < Action + def self.run(params) + require 'terminal-notifier' + UI.important("It's recommended to use the new 'notification' method instead of 'notify'") + + text = params.join(' ') + TerminalNotifier.notify(text, title: 'fastlane') + end + + def self.description + "Shows a macOS notification - use `notification` instead" + end + + def self.author + ["champo", "KrauseFx"] + end + + def self.available_options + end + + def self.is_supported?(platform) + Helper.mac? + end + + def self.example_code + nil + end + + def self.category + :deprecated + end + + def self.deprecated_notes + "It's recommended to use the new `notification` action instead of `notify`" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/number_of_commits.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/number_of_commits.rb new file mode 100644 index 0000000..8d67a87 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/number_of_commits.rb @@ -0,0 +1,75 @@ +module Fastlane + module Actions + class NumberOfCommitsAction < Action + def self.is_git? + Actions.sh('git rev-parse HEAD') + return true + rescue + return false + end + + def self.run(params) + if is_git? + if params[:all] + command = 'git rev-list --all --count' + else + command = 'git rev-list HEAD --count' + end + else + UI.user_error!("Not in a git repository.") + end + return Actions.sh(command).strip.to_i + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Return the number of commits in current git branch" + end + + def self.return_value + "The total number of all commits in current git branch" + end + + def self.return_type + :int + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :all, + env_name: "FL_NUMBER_OF_COMMITS_ALL", + optional: true, + type: Boolean, + description: "Returns number of all commits instead of current branch") + ] + end + + def self.details + "You can use this action to get the number of commits of this branch. This is useful if you want to set the build number to the number of commits. See `fastlane actions number_of_commits` for more details." + end + + def self.authors + ["onevcat", "samuelbeek"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'increment_build_number(build_number: number_of_commits)', + 'build_number = number_of_commits(all: true) + increment_build_number(build_number: build_number)' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/oclint.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/oclint.rb new file mode 100644 index 0000000..f67aeff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/oclint.rb @@ -0,0 +1,270 @@ +module Fastlane + module Actions + module SharedValues + FL_OCLINT_REPORT_PATH = :FL_OCLINT_REPORT_PATH + end + + class OclintAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + oclint_path = params[:oclint_path] + if `which #{oclint_path}`.to_s.empty? && !Helper.test? + UI.user_error!("You have to install oclint or provide path to oclint binary. Fore more details: ") + "http://docs.oclint.org/en/stable/intro/installation.html".yellow + end + + compile_commands = params[:compile_commands] + compile_commands_dir = params[:compile_commands] + UI.user_error!("Could not find json compilation database at path '#{compile_commands}'") unless File.exist?(compile_commands) + + # We'll attempt to sort things out so that we support receiving either a path to a + # 'compile_commands.json' file (as our option asks for), or a path to a directory + # *containing* a 'compile_commands.json' file (as oclint actually wants) + if File.file?(compile_commands_dir) + compile_commands_dir = File.dirname(compile_commands_dir) + else + compile_commands = File.join(compile_commands_dir, 'compile_commands.json') + end + + if params[:select_reqex] + UI.important("'select_reqex' parameter is deprecated. Please use 'select_regex' instead.") + select_regex = params[:select_reqex] + end + + select_regex = params[:select_regex] if params[:select_regex] # Overwrite deprecated select_reqex + select_regex = ensure_regex_is_not_string!(select_regex) + + exclude_regex = params[:exclude_regex] + exclude_regex = ensure_regex_is_not_string!(exclude_regex) + + files = JSON.parse(File.read(compile_commands)).map do |compile_command| + file = compile_command['file'] + File.exist?(file) ? file : File.join(compile_command['directory'], file) + end + + files.uniq! + files.select! do |file| + file_ruby = file.gsub('\ ', ' ') + File.exist?(file_ruby) and + (!select_regex or file_ruby =~ select_regex) and + (!exclude_regex or file_ruby !~ exclude_regex) + end + + command_prefix = [ + 'cd', + File.expand_path('.').shellescape, + '&&' + ].join(' ') + + report_type = params[:report_type] + report_path = params[:report_path] ? params[:report_path] : 'oclint_report.' + report_type + + oclint_args = ["-report-type=#{report_type}", "-o=#{report_path}"] + + oclint_args << "-list-enabled-rules" if params[:list_enabled_rules] + + if params[:rc] + UI.important("It's recommended to use 'thresholds' instead of deprecated 'rc' parameter") + oclint_args << "-rc=#{params[:rc]}" if params[:rc] # Deprecated + end + + oclint_args << ensure_array_is_not_string!(params[:thresholds]).map { |t| "-rc=#{t}" } if params[:thresholds] + # Escape ' in rule names with \' when passing on to shell command + oclint_args << params[:enable_rules].map { |r| "-rule #{r.shellescape}" } if params[:enable_rules] + oclint_args << params[:disable_rules].map { |r| "-disable-rule #{r.shellescape}" } if params[:disable_rules] + + oclint_args << "-max-priority-1=#{params[:max_priority_1]}" if params[:max_priority_1] + oclint_args << "-max-priority-2=#{params[:max_priority_2]}" if params[:max_priority_2] + oclint_args << "-max-priority-3=#{params[:max_priority_3]}" if params[:max_priority_3] + + oclint_args << "-enable-clang-static-analyzer" if params[:enable_clang_static_analyzer] + oclint_args << "-enable-global-analysis" if params[:enable_global_analysis] + oclint_args << "-allow-duplicated-violations" if params[:allow_duplicated_violations] + oclint_args << "-p #{compile_commands_dir.shellescape}" + + oclint_args << "-extra-arg=#{params[:extra_arg]}" if params[:extra_arg] + + command = [ + command_prefix, + oclint_path, + oclint_args, + '"' + files.join('" "') + '"' + ].join(' ') + + Actions.lane_context[SharedValues::FL_OCLINT_REPORT_PATH] = File.expand_path(report_path) + + return Action.sh(command) + end + + # return a proper regex object if regex string is single-quoted + def self.ensure_regex_is_not_string!(regex) + return regex unless regex.kind_of?(String) + + Regexp.new(regex) + end + + # return a proper array of strings if array string is single-quoted + def self.ensure_array_is_not_string!(array) + return array unless array.kind_of?(String) + + array.split(',') + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Lints implementation files with OCLint" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :oclint_path, + env_name: 'FL_OCLINT_PATH', + description: 'The path to oclint binary', + default_value: 'oclint', + optional: true), + FastlaneCore::ConfigItem.new(key: :compile_commands, + env_name: 'FL_OCLINT_COMPILE_COMMANDS', + description: 'The json compilation database, use xctool reporter \'json-compilation-database\'', + default_value: 'compile_commands.json', + optional: true), + FastlaneCore::ConfigItem.new(key: :select_reqex, + env_name: 'FL_OCLINT_SELECT_REQEX', + description: 'Select all files matching this reqex', + skip_type_validation: true, # allows Regex + deprecated: "Use `:select_regex` instead", + optional: true), + FastlaneCore::ConfigItem.new(key: :select_regex, + env_name: 'FL_OCLINT_SELECT_REGEX', + description: 'Select all files matching this regex', + skip_type_validation: true, # allows Regex + optional: true), + FastlaneCore::ConfigItem.new(key: :exclude_regex, + env_name: 'FL_OCLINT_EXCLUDE_REGEX', + description: 'Exclude all files matching this regex', + skip_type_validation: true, # allows Regex + optional: true), + FastlaneCore::ConfigItem.new(key: :report_type, + env_name: 'FL_OCLINT_REPORT_TYPE', + description: 'The type of the report (default: html)', + default_value: 'html', + optional: true), + FastlaneCore::ConfigItem.new(key: :report_path, + env_name: 'FL_OCLINT_REPORT_PATH', + description: 'The reports file path', + optional: true), + FastlaneCore::ConfigItem.new(key: :list_enabled_rules, + env_name: "FL_OCLINT_LIST_ENABLED_RULES", + description: "List enabled rules", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :rc, + env_name: 'FL_OCLINT_RC', + description: 'Override the default behavior of rules', + optional: true), + FastlaneCore::ConfigItem.new(key: :thresholds, + env_name: 'FL_OCLINT_THRESHOLDS', + description: 'List of rule thresholds to override the default behavior of rules', + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :enable_rules, + env_name: 'FL_OCLINT_ENABLE_RULES', + description: 'List of rules to pick explicitly', + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :disable_rules, + env_name: 'FL_OCLINT_DISABLE_RULES', + description: 'List of rules to disable', + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :max_priority_1, + env_name: 'FL_OCLINT_MAX_PRIOTITY_1', + description: 'The max allowed number of priority 1 violations', + type: Integer, + optional: true), + FastlaneCore::ConfigItem.new(key: :max_priority_2, + env_name: 'FL_OCLINT_MAX_PRIOTITY_2', + description: 'The max allowed number of priority 2 violations', + type: Integer, + optional: true), + FastlaneCore::ConfigItem.new(key: :max_priority_3, + env_name: 'FL_OCLINT_MAX_PRIOTITY_3', + description: 'The max allowed number of priority 3 violations', + type: Integer, + optional: true), + FastlaneCore::ConfigItem.new(key: :enable_clang_static_analyzer, + env_name: "FL_OCLINT_ENABLE_CLANG_STATIC_ANALYZER", + description: "Enable Clang Static Analyzer, and integrate results into OCLint report", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :enable_global_analysis, + env_name: "FL_OCLINT_ENABLE_GLOBAL_ANALYSIS", + description: "Compile every source, and analyze across global contexts (depends on number of source files, could results in high memory load)", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :allow_duplicated_violations, + env_name: "FL_OCLINT_ALLOW_DUPLICATED_VIOLATIONS", + description: "Allow duplicated violations in the OCLint report", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :extra_arg, + env_name: 'FL_OCLINT_EXTRA_ARG', + description: 'Additional argument to append to the compiler command line', + optional: true) + ] + end + # rubocop:enable Metrics/PerceivedComplexity + + def self.output + [ + ['FL_OCLINT_REPORT_PATH', 'The reports file path'] + ] + end + + def self.author + 'HeEAaD' + end + + def self.is_supported?(platform) + true + end + + def self.details + "Run the static analyzer tool [OCLint](http://oclint.org) for your project. You need to have a `compile_commands.json` file in your _fastlane_ directory or pass a path to your file." + end + + def self.example_code + [ + 'oclint( + compile_commands: "commands.json", # The JSON compilation database, use xctool reporter "json-compilation-database" + select_regex: /ViewController.m/, # Select all files matching this regex + exclude_regex: /Test.m/, # Exclude all files matching this regex + report_type: "pmd", # The type of the report (default: html) + max_priority_1: 10, # The max allowed number of priority 1 violations + max_priority_2: 100, # The max allowed number of priority 2 violations + max_priority_3: 1000, # The max allowed number of priority 3 violations + thresholds: [ # Override the default behavior of rules + "LONG_LINE=200", + "LONG_METHOD=200" + ], + enable_rules: [ # List of rules to pick explicitly + "DoubleNegative", + "SwitchStatementsDon\'TNeedDefaultWhenFullyCovered" + ], + disable_rules: ["GotoStatement"], # List of rules to disable + list_enabled_rules: true, # List enabled rules + enable_clang_static_analyzer: true, # Enable Clang Static Analyzer, and integrate results into OCLint report + enable_global_analysis: true, # Compile every source, and analyze across global contexts (depends on number of source files, could results in high memory load) + allow_duplicated_violations: true, # Allow duplicated violations in the OCLint report + extra_arg: "-Wno-everything" # Additional argument to append to the compiler command line + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/onesignal.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/onesignal.rb new file mode 100644 index 0000000..8029980 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/onesignal.rb @@ -0,0 +1,196 @@ +module Fastlane + module Actions + module SharedValues + ONE_SIGNAL_APP_ID = :ONE_SIGNAL_APP_ID + ONE_SIGNAL_APP_AUTH_KEY = :ONE_SIGNAL_APP_AUTH_KEY + end + + class OnesignalAction < Action + def self.run(params) + require 'net/http' + require 'uri' + require 'base64' + + app_id = params[:app_id].to_s.strip + auth_token = params[:auth_token] + app_name = params[:app_name].to_s + apns_p12_password = params[:apns_p12_password] + android_token = params[:android_token] + android_gcm_sender_id = params[:android_gcm_sender_id] + organization_id = params[:organization_id] + + has_app_id = !app_id.empty? + has_app_name = !app_name.empty? + + is_update = has_app_id + + UI.user_error!('Please specify the `app_id` or the `app_name` parameters!') if !has_app_id && !has_app_name + + UI.message("Parameter App ID: #{app_id}") if has_app_id + UI.message("Parameter App name: #{app_name}") if has_app_name + + payload = {} + + payload['name'] = app_name if has_app_name + + unless params[:apns_p12].nil? + data = File.read(params[:apns_p12]) + apns_p12 = Base64.encode64(data) + payload["apns_env"] = params[:apns_env] + payload["apns_p12"] = apns_p12 + # we need to have something for the p12 password, even if it's an empty string + payload["apns_p12_password"] = apns_p12_password || "" + end + + payload["gcm_key"] = android_token unless android_token.nil? + payload["android_gcm_sender_id"] = android_gcm_sender_id unless android_gcm_sender_id.nil? + payload["organization_id"] = organization_id unless organization_id.nil? + + # here's the actual lifting - POST or PUT to OneSignal + + json_headers = { 'Content-Type' => 'application/json', 'Authorization' => "Basic #{auth_token}" } + url = +'https://onesignal.com/api/v1/apps' + url << '/' + app_id if is_update + uri = URI.parse(url) + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = true + + if is_update + response = http.put(uri.path, payload.to_json, json_headers) + else + response = http.post(uri.path, payload.to_json, json_headers) + end + + response_body = JSON.parse(response.body) + + Actions.lane_context[SharedValues::ONE_SIGNAL_APP_ID] = response_body["id"] + Actions.lane_context[SharedValues::ONE_SIGNAL_APP_AUTH_KEY] = response_body["basic_auth_key"] + + check_response_code(response, is_update) + end + + def self.check_response_code(response, is_update) + case response.code.to_i + when 200, 204 + UI.success("Successfully #{is_update ? 'updated' : 'created new'} OneSignal app") + else + UI.user_error!("Unexpected #{response.code} with response: #{response.body}") + end + end + + def self.description + "Create or update a new [OneSignal](https://onesignal.com/) application" + end + + def self.details + "You can use this action to automatically create or update a OneSignal application. You can also upload a `.p12` with password, a GCM key, or both." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :app_id, + env_name: "ONE_SIGNAL_APP_ID", + sensitive: true, + description: "OneSignal App ID. Setting this updates an existing app", + optional: true), + + FastlaneCore::ConfigItem.new(key: :auth_token, + env_name: "ONE_SIGNAL_AUTH_KEY", + sensitive: true, + description: "OneSignal Authorization Key", + verify_block: proc do |value| + if value.to_s.empty? + UI.error("Please add 'ENV[\"ONE_SIGNAL_AUTH_KEY\"] = \"your token\"' to your Fastfile's `before_all` section.") + UI.user_error!("No ONE_SIGNAL_AUTH_KEY given.") + end + end), + + FastlaneCore::ConfigItem.new(key: :app_name, + env_name: "ONE_SIGNAL_APP_NAME", + description: "OneSignal App Name. This is required when creating an app (in other words, when `:app_id` is not set, and optional when updating an app", + optional: true), + + FastlaneCore::ConfigItem.new(key: :android_token, + env_name: "ANDROID_TOKEN", + description: "ANDROID GCM KEY", + sensitive: true, + optional: true), + + FastlaneCore::ConfigItem.new(key: :android_gcm_sender_id, + env_name: "ANDROID_GCM_SENDER_ID", + description: "GCM SENDER ID", + sensitive: true, + optional: true), + + FastlaneCore::ConfigItem.new(key: :apns_p12, + env_name: "APNS_P12", + description: "APNS P12 File (in .p12 format)", + optional: true), + + FastlaneCore::ConfigItem.new(key: :apns_p12_password, + env_name: "APNS_P12_PASSWORD", + sensitive: true, + description: "APNS P12 password", + optional: true), + + FastlaneCore::ConfigItem.new(key: :apns_env, + env_name: "APNS_ENV", + description: "APNS environment", + optional: true, + default_value: 'production'), + + FastlaneCore::ConfigItem.new(key: :organization_id, + env_name: "ONE_SIGNAL_ORGANIZATION_ID", + sensitive: true, + description: "OneSignal Organization ID", + optional: true) + ] + end + + def self.output + [ + ['ONE_SIGNAL_APP_ID', 'The app ID of the newly created or updated app'], + ['ONE_SIGNAL_APP_AUTH_KEY', 'The auth token for the newly created or updated app'] + ] + end + + def self.authors + ["timothybarraclough", "smartshowltd"] + end + + def self.is_supported?(platform) + [:ios, :android].include?(platform) + end + + def self.example_code + [ + 'onesignal( + auth_token: "Your OneSignal Auth Token", + app_name: "Name for OneSignal App", + android_token: "Your Android GCM key (optional)", + android_gcm_sender_id: "Your Android GCM Sender ID (optional)", + apns_p12: "Path to Apple .p12 file (optional)", + apns_p12_password: "Password for .p12 file (optional)", + apns_env: "production/sandbox (defaults to production)", + organization_id: "Onesignal organization id (optional)" + )', + 'onesignal( + app_id: "Your OneSignal App ID", + auth_token: "Your OneSignal Auth Token", + app_name: "New Name for OneSignal App", + android_token: "Your Android GCM key (optional)", + android_gcm_sender_id: "Your Android GCM Sender ID (optional)", + apns_p12: "Path to Apple .p12 file (optional)", + apns_p12_password: "Password for .p12 file (optional)", + apns_env: "production/sandbox (defaults to production)", + organization_id: "Onesignal organization id (optional)" + )' + ] + end + + def self.category + :push + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/opt_out_crash_reporting.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/opt_out_crash_reporting.rb new file mode 100644 index 0000000..0de574f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/opt_out_crash_reporting.rb @@ -0,0 +1,33 @@ +module Fastlane + module Actions + class OptOutCrashReportingAction < Action + def self.run(params) + UI.message("fastlane doesn't have crash reporting any more, feel free to remove `opt_out_crash_reporting` from your Fastfile") + end + + def self.description + "This will prevent reports from being uploaded when _fastlane_ crashes" + end + + def self.details + "_fastlane_ doesn't have crash reporting any more. Feel free to remove `opt_out_crash_reporting` from your Fastfile." + end + + def self.authors + ['mpirri', 'ohayon'] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + nil + end + + def self.category + :deprecated + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/opt_out_usage.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/opt_out_usage.rb new file mode 100644 index 0000000..e8a4ea9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/opt_out_usage.rb @@ -0,0 +1,40 @@ +module Fastlane + module Actions + class OptOutUsageAction < Action + def self.run(params) + ENV['FASTLANE_OPT_OUT_USAGE'] = "YES" + UI.message("Disabled upload of used actions") + end + + def self.description + "This will stop uploading the information which actions were run" + end + + def self.details + [ + "By default, _fastlane_ will track what actions are being used. No personal/sensitive information is recorded.", + "Learn more at [https://docs.fastlane.tools/#metrics](https://docs.fastlane.tools/#metrics).", + "Add `opt_out_usage` at the top of your Fastfile to disable metrics collection." + ].join("\n") + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'opt_out_usage # add this to the top of your Fastfile' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pem.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pem.rb new file mode 100644 index 0000000..90d8c06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pem.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/get_push_certificate' + class PemAction < GetPushCertificateAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `get_push_certificate` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pilot.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pilot.rb new file mode 100644 index 0000000..aaca789 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pilot.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/upload_to_testflight' + class PilotAction < UploadToTestflightAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `upload_to_testflight` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pod_lib_lint.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pod_lib_lint.rb new file mode 100644 index 0000000..c46a6dc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pod_lib_lint.rb @@ -0,0 +1,189 @@ +module Fastlane + module Actions + class PodLibLintAction < Action + # rubocop:disable Metrics/PerceivedComplexity + def self.run(params) + command = [] + + command << "bundle exec" if params[:use_bundle_exec] && shell_out_should_use_bundle_exec? + command << "pod lib lint" + + command << params[:podspec] if params[:podspec] + command << "--verbose" if params[:verbose] + command << "--allow-warnings" if params[:allow_warnings] + command << "--sources='#{params[:sources].join(',')}'" if params[:sources] + command << "--subspec='#{params[:subspec]}'" if params[:subspec] + command << "--include-podspecs='#{params[:include_podspecs]}'" if params[:include_podspecs] + command << "--external-podspecs='#{params[:external_podspecs]}'" if params[:external_podspecs] + command << "--swift-version=#{params[:swift_version]}" if params[:swift_version] + command << "--use-libraries" if params[:use_libraries] + command << "--use-modular-headers" if params[:use_modular_headers] + command << "--fail-fast" if params[:fail_fast] + command << "--private" if params[:private] + command << "--quick" if params[:quick] + command << "--no-clean" if params[:no_clean] + command << "--no-subspecs" if params[:no_subspecs] + command << "--platforms=#{params[:platforms]}" if params[:platforms] + command << "--skip-import-validation" if params[:skip_import_validation] + command << "--skip-tests" if params[:skip_tests] + command << "--analyze" if params[:analyze] + + result = Actions.sh(command.join(' ')) + UI.success("Pod lib lint successful âŦ†ī¸ ") + return result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Pod lib lint" + end + + def self.details + "Test the syntax of your Podfile by linting the pod against the files of its directory" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :use_bundle_exec, + description: "Use bundle exec when there is a Gemfile presented", + type: Boolean, + default_value: true, + env_name: "FL_POD_LIB_LINT_USE_BUNDLE"), + FastlaneCore::ConfigItem.new(key: :podspec, + description: "Path of spec to lint", + type: String, + optional: true, + env_name: "FL_POD_LIB_LINT_PODSPEC"), + FastlaneCore::ConfigItem.new(key: :verbose, + description: "Allow output detail in console", + type: Boolean, + optional: true, + env_name: "FL_POD_LIB_LINT_VERBOSE"), + FastlaneCore::ConfigItem.new(key: :allow_warnings, + description: "Allow warnings during pod lint", + type: Boolean, + optional: true, + env_name: "FL_POD_LIB_LINT_ALLOW_WARNINGS"), + FastlaneCore::ConfigItem.new(key: :sources, + description: "The sources of repos you want the pod spec to lint with, separated by commas", + type: Array, + optional: true, + env_name: "FL_POD_LIB_LINT_SOURCES", + verify_block: proc do |value| + UI.user_error!("Sources must be an array.") unless value.kind_of?(Array) + end), + FastlaneCore::ConfigItem.new(key: :subspec, + description: "A specific subspec to lint instead of the entire spec", + type: String, + optional: true, + env_name: "FL_POD_LIB_LINT_SUBSPEC"), + FastlaneCore::ConfigItem.new(key: :include_podspecs, + description: "A Glob of additional ancillary podspecs which are used for linting via :path (available since cocoapods >= 1.7)", + type: String, + optional: true, + env_name: "FL_POD_LIB_LINT_INCLUDE_PODSPECS"), + FastlaneCore::ConfigItem.new(key: :external_podspecs, + description: "A Glob of additional ancillary podspecs which are used for linting via :podspec. If there"\ + " are --include-podspecs, then these are removed from them (available since cocoapods >= 1.7)", + type: String, + optional: true, + env_name: "FL_POD_LIB_LINT_EXTERNAL_PODSPECS"), + FastlaneCore::ConfigItem.new(key: :swift_version, + description: "The SWIFT_VERSION that should be used to lint the spec. This takes precedence over a .swift-version file", + type: String, + optional: true, + env_name: "FL_POD_LIB_LINT_SWIFT_VERSION"), + FastlaneCore::ConfigItem.new(key: :use_libraries, + description: "Lint uses static libraries to install the spec", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_USE_LIBRARIES"), + FastlaneCore::ConfigItem.new(key: :use_modular_headers, + description: "Lint using modular libraries (available since cocoapods >= 1.6)", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_USE_MODULAR_HEADERS"), + FastlaneCore::ConfigItem.new(key: :fail_fast, + description: "Lint stops on the first failing platform or subspec", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_FAIL_FAST"), + FastlaneCore::ConfigItem.new(key: :private, + description: "Lint skips checks that apply only to public specs", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_PRIVATE"), + FastlaneCore::ConfigItem.new(key: :quick, + description: "Lint skips checks that would require to download and build the spec", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_QUICK"), + FastlaneCore::ConfigItem.new(key: :no_clean, + description: "Lint leaves the build directory intact for inspection", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_NO_CLEAN"), + FastlaneCore::ConfigItem.new(key: :no_subspecs, + description: "Lint skips validation of subspecs", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_NO_SUBSPECS"), + FastlaneCore::ConfigItem.new(key: :platforms, + description: "Lint against specific platforms (defaults to all platforms supported by "\ + "the podspec). Multiple platforms must be comma-delimited (available since cocoapods >= 1.6)", + optional: true, + env_name: "FL_POD_LIB_LINT_PLATFORMS"), + FastlaneCore::ConfigItem.new(key: :skip_import_validation, + description: "Lint skips validating that the pod can be imported (available since cocoapods >= 1.3)", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_SKIP_IMPORT_VALIDATION"), + FastlaneCore::ConfigItem.new(key: :skip_tests, + description: "Lint skips building and running tests during validation (available since cocoapods >= 1.3)", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_SKIP_TESTS"), + FastlaneCore::ConfigItem.new(key: :analyze, + description: "Validate with the Xcode Static Analysis tool (available since cocoapods >= 1.6.1)", + type: Boolean, + default_value: false, + env_name: "FL_POD_LIB_LINT_ANALYZE") + ] + end + + def self.output + end + + def self.return_value + nil + end + + def self.authors + ["thierryxing"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'pod_lib_lint', + '# Allow output detail in console + pod_lib_lint(verbose: true)', + '# Allow warnings during pod lint + pod_lib_lint(allow_warnings: true)', + '# If the podspec has a dependency on another private pod, then you will have to supply the sources + pod_lib_lint(sources: ["https://github.com/username/Specs", "https://github.com/CocoaPods/Specs"])' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pod_push.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pod_push.rb new file mode 100644 index 0000000..022410a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/pod_push.rb @@ -0,0 +1,198 @@ +module Fastlane + module Actions + class PodPushAction < Action + def self.run(params) + command = [] + + command << "bundle exec" if params[:use_bundle_exec] && shell_out_should_use_bundle_exec? + + if params[:repo] + repo = params[:repo] + command << "pod repo push #{repo}" + else + command << 'pod trunk push' + end + + if params[:path] + command << "'#{params[:path]}'" + end + + if params[:sources] + sources = params[:sources].join(",") + command << "--sources='#{sources}'" + end + + if params[:swift_version] + swift_version = params[:swift_version] + command << "--swift-version=#{swift_version}" + end + + if params[:allow_warnings] + command << "--allow-warnings" + end + + if params[:use_libraries] + command << "--use-libraries" + end + + if params[:skip_import_validation] + command << "--skip-import-validation" + end + + if params[:skip_tests] + command << "--skip-tests" + end + + if params[:use_json] + command << "--use-json" + end + + if params[:verbose] + command << "--verbose" + end + + if params[:use_modular_headers] + command << "--use-modular-headers" + end + + if params[:synchronous] + command << "--synchronous" + end + + if params[:no_overwrite] + command << "--no-overwrite" + end + + if params[:local_only] + command << "--local-only" + end + + result = Actions.sh(command.join(' ')) + UI.success("Successfully pushed Podspec âŦ†ī¸ ") + return result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Push a Podspec to Trunk or a private repository" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :use_bundle_exec, + description: "Use bundle exec when there is a Gemfile presented", + type: Boolean, + default_value: false, + env_name: "FL_POD_PUSH_USE_BUNDLE_EXEC"), + FastlaneCore::ConfigItem.new(key: :path, + description: "The Podspec you want to push", + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find file at path '#{value}'") unless File.exist?(value) + UI.user_error!("File must be a `.podspec` or `.podspec.json`") unless value.end_with?(".podspec", ".podspec.json") + end, + env_name: "FL_POD_PUSH_PATH"), + FastlaneCore::ConfigItem.new(key: :repo, + description: "The repo you want to push. Pushes to Trunk by default", + optional: true, + env_name: "FL_POD_PUSH_REPO"), + FastlaneCore::ConfigItem.new(key: :allow_warnings, + description: "Allow warnings during pod push", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_ALLOW_WARNINGS"), + FastlaneCore::ConfigItem.new(key: :use_libraries, + description: "Allow lint to use static libraries to install the spec", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_USE_LIBRARIES"), + FastlaneCore::ConfigItem.new(key: :sources, + description: "The sources of repos you want the pod spec to lint with, separated by commas", + optional: true, + type: Array, + verify_block: proc do |value| + UI.user_error!("Sources must be an array.") unless value.kind_of?(Array) + end, + env_name: "FL_POD_PUSH_SOURCES"), + FastlaneCore::ConfigItem.new(key: :swift_version, + description: "The SWIFT_VERSION that should be used to lint the spec. This takes precedence over a .swift-version file", + optional: true, + env_name: "FL_POD_PUSH_SWIFT_VERSION"), + FastlaneCore::ConfigItem.new(key: :skip_import_validation, + description: "Lint skips validating that the pod can be imported", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_SKIP_IMPORT_VALIDATION"), + FastlaneCore::ConfigItem.new(key: :skip_tests, + description: "Lint skips building and running tests during validation", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_SKIP_TESTS"), + FastlaneCore::ConfigItem.new(key: :use_json, + description: "Convert the podspec to JSON before pushing it to the repo", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_USE_JSON"), + FastlaneCore::ConfigItem.new(key: :verbose, + description: "Show more debugging information", + optional: true, + type: Boolean, + default_value: false, + env_name: "FL_POD_PUSH_VERBOSE"), + FastlaneCore::ConfigItem.new(key: :use_modular_headers, + description: "Use modular headers option during validation", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_USE_MODULAR_HEADERS"), + FastlaneCore::ConfigItem.new(key: :synchronous, + description: "If validation depends on other recently pushed pods, synchronize", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_SYNCHRONOUS"), + FastlaneCore::ConfigItem.new(key: :no_overwrite, + description: "Disallow pushing that would overwrite an existing spec", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_NO_OVERWRITE"), + FastlaneCore::ConfigItem.new(key: :local_only, + description: "Does not perform the step of pushing REPO to its remote", + optional: true, + type: Boolean, + env_name: "FL_POD_PUSH_LOCAL_ONLY") + ] + end + + def self.return_value + nil + end + + def self.authors + ["squarefrog"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + '# If no path is supplied then Trunk will attempt to find the first Podspec in the current directory. + pod_push', + '# Alternatively, supply the Podspec file path + pod_push(path: "TSMessages.podspec")', + '# You may also push to a private repo instead of Trunk + pod_push(path: "TSMessages.podspec", repo: "MyRepo")', + '# If the podspec has a dependency on another private pod, then you will have to supply the sources you want the podspec to lint with for pod_push to succeed. Read more here - https://github.com/CocoaPods/CocoaPods/issues/2543. + pod_push(path: "TMessages.podspec", repo: "MyRepo", sources: ["https://github.com/username/Specs", "https://github.com/CocoaPods/Specs"])' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/podio_item.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/podio_item.rb new file mode 100644 index 0000000..1bd6875 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/podio_item.rb @@ -0,0 +1,210 @@ +module Fastlane + module Actions + module SharedValues + PODIO_ITEM_URL = :PODIO_ITEM_URL + end + + class PodioItemAction < Action + AUTH_URL = 'https://podio.com/oauth/token' + BASE_URL = 'https://api.podio.com' + + def self.run(params) + require 'rest_client' + require 'json' + require 'uri' + + post_item(params) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + 'Creates or updates an item within your Podio app' + end + + def self.details + [ + "Use this action to create or update an item within your Podio app (see [https://help.podio.com/hc/en-us/articles/201019278-Creating-apps-](https://help.podio.com/hc/en-us/articles/201019278-Creating-apps-)).", + "Pass in dictionary with field keys and their values.", + "Field key is located under `Modify app` -> `Advanced` -> `Developer` -> `External ID` (see [https://developers.podio.com/examples/items](https://developers.podio.com/examples/items))." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :client_id, + env_name: 'PODIO_ITEM_CLIENT_ID', + description: 'Client ID for Podio API (see https://developers.podio.com/api-key)', + verify_block: proc do |value| + UI.user_error!("No Client ID for Podio given, pass using `client_id: 'id'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :client_secret, + env_name: 'PODIO_ITEM_CLIENT_SECRET', + sensitive: true, + description: 'Client secret for Podio API (see https://developers.podio.com/api-key)', + verify_block: proc do |value| + UI.user_error!("No Client Secret for Podio given, pass using `client_secret: 'secret'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :app_id, + env_name: 'PODIO_ITEM_APP_ID', + description: 'App ID of the app you intend to authenticate with (see https://developers.podio.com/authentication/app_auth)', + verify_block: proc do |value| + UI.user_error!("No App ID for Podio given, pass using `app_id: 'id'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :app_token, + env_name: 'PODIO_ITEM_APP_TOKEN', + sensitive: true, + description: 'App token of the app you intend to authenticate with (see https://developers.podio.com/authentication/app_auth)', + verify_block: proc do |value| + UI.user_error!("No App token for Podio given, pass using `app_token: 'token'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :identifying_field, + env_name: 'PODIO_ITEM_IDENTIFYING_FIELD', + description: 'String specifying the field key used for identification of an item', + verify_block: proc do |value| + UI.user_error!("No Identifying field given, pass using `identifying_field: 'field name'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :identifying_value, + description: 'String uniquely specifying an item within the app', + verify_block: proc do |value| + UI.user_error!("No Identifying value given, pass using `identifying_value: 'unique value'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :other_fields, + description: 'Dictionary of your app fields. Podio supports several field types, see https://developers.podio.com/doc/items', + type: Hash, + optional: true) + ] + end + + def self.output + [ + ['PODIO_ITEM_URL', 'URL to newly created (or updated) Podio item'] + ] + end + + def self.authors + ['pprochazka72', 'laugejepsen'] + end + + def self.is_supported?(_platform) + true + end + + ##################################################### + # @!group Logic + ##################################################### + + def self.post_item(options) + auth_config = authenticate(options[:client_id], + options[:client_secret], + options[:app_id], + options[:app_token]) + + item_id, item_url = get_item(auth_config, + options[:identifying_field], + options[:identifying_value], + options[:app_id]) + + unless options[:other_fields].nil? + options[:other_fields].each do |key, value| + uri = URI.parse(value) + if uri.kind_of?(URI::HTTP) + link_embed_id = get_embed_id(auth_config, uri) + options[:other_fields].merge!(key => link_embed_id) + end + end + update_item(auth_config, item_id, options[:other_fields]) + end + + Actions.lane_context[SharedValues::PODIO_ITEM_URL] = item_url + end + + def self.authenticate(client_id, client_secret, app_id, app_token) + auth_response = RestClient.post(AUTH_URL, grant_type: 'app', + app_id: app_id, + app_token: app_token, + client_id: client_id, + client_secret: client_secret) + UI.user_error!("Failed to authenticate with Podio API") if auth_response.code != 200 + + auth_response_dictionary = JSON.parse(auth_response.body) + access_token = auth_response_dictionary['access_token'] + + { Authorization: "OAuth2 #{access_token}", content_type: :json, accept: :json } + end + + def self.get_item(auth_config, identifying_field, identifying_value, app_id) + item_id, item_url = get_existing_item(auth_config, identifying_value, app_id) + + unless item_id + item_id, item_url = create_item(auth_config, identifying_field, identifying_value, app_id) + end + + [item_id, item_url] + end + + def self.get_existing_item(auth_config, identifying_value, app_id) + filter_request_body = { query: identifying_value, limit: 1, ref_type: 'item' }.to_json + filter_response = RestClient.post("#{BASE_URL}/search/app/#{app_id}/", filter_request_body, auth_config) + UI.user_error!("Failed to search for already existing item #{identifying_value}") if filter_response.code != 200 + + existing_items = JSON.parse(filter_response.body) + existing_item_id = nil + existing_item_url = nil + if existing_items.length > 0 + existing_item = existing_items[0] + if existing_item['title'] == identifying_value + existing_item_id = existing_item['id'] + existing_item_url = existing_item['link'] + end + end + + [existing_item_id, existing_item_url] + end + + def self.create_item(auth_config, identifying_field, identifying_value, app_id) + item_request_body = { fields: { identifying_field => identifying_value } }.to_json + item_response = RestClient.post("#{BASE_URL}/item/app/#{app_id}", item_request_body, auth_config) + UI.user_error!("Failed to create item \"#{identifying_value}\"") if item_response.code != 200 + + item_response_dictionary = JSON.parse(item_response.body) + [item_response_dictionary['item_id'], item_response_dictionary['link']] + end + + def self.update_item(auth_config, item_id, fields) + if fields.length > 0 + item_request_body = { fields: fields }.to_json + item_response = RestClient.put("#{BASE_URL}/item/#{item_id}", item_request_body, auth_config) + UI.user_error!("Failed to update item values \"#{fields}\"") unless item_response.code != 200 || item_response.code != 204 + end + end + + def self.get_embed_id(auth_config, url) + embed_request_body = { url: url }.to_json + embed_response = RestClient.post("#{BASE_URL}/embed/", embed_request_body, auth_config) + UI.user_error!("Failed to create embed for link #{link}") if embed_response.code != 200 + + embed_response_dictionary = JSON.parse(embed_response.body) + embed_response_dictionary['embed_id'] + end + + def self.example_code + [ + 'podio_item( + identifying_value: "Your unique value", + other_fields: { + "field1" => "fieldValue", + "field2" => "fieldValue2" + } + )' + ] + end + + def self.category + :beta + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/precheck.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/precheck.rb new file mode 100644 index 0000000..b144f4a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/precheck.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/check_app_store_metadata' + class PrecheckAction < CheckAppStoreMetadataAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `check_app_store_metadata` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/println.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/println.rb new file mode 100644 index 0000000..ce28e68 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/println.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/puts' + class PrintlnAction < PutsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `puts` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/produce.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/produce.rb new file mode 100644 index 0000000..27d711a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/produce.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/create_app_online' + class ProduceAction < CreateAppOnlineAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `create_app_online` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/prompt.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/prompt.rb new file mode 100644 index 0000000..a82bc8e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/prompt.rb @@ -0,0 +1,118 @@ +module Fastlane + module Actions + class PromptAction < Action + def self.run(params) + if params[:boolean] + return params[:ci_input] unless UI.interactive? + return UI.confirm(params[:text]) + end + + UI.message(params[:text]) + + return params[:ci_input] unless UI.interactive? + + if params[:multi_line_end_keyword] + # Multi line + end_tag = params[:multi_line_end_keyword] + UI.important("Submit inputs using \"#{params[:multi_line_end_keyword]}\"") + user_input = "" + loop do + line = STDIN.gets # returns `nil` if called at end of file + break unless line + end_tag_index = line.index(end_tag) + if end_tag_index.nil? + user_input << line + else + user_input << line.slice(0, end_tag_index) + user_input = user_input.strip + break + end + end + else + # Standard one line input + if params[:secure_text] + user_input = STDIN.noecho(&:gets).chomp while (user_input || "").length == 0 + else + user_input = STDIN.gets.chomp.strip while (user_input || "").length == 0 + end + end + + return user_input + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Ask the user for a value or for confirmation" + end + + def self.details + [ + "You can use `prompt` to ask the user for a value or to just let the user confirm the next step.", + "When this is executed on a CI service, the passed `ci_input` value will be returned.", + "This action also supports multi-line inputs using the `multi_line_end_keyword` option." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :text, + description: "The text that will be displayed to the user", + default_value: "Please enter some text: "), + FastlaneCore::ConfigItem.new(key: :ci_input, + description: "The default text that will be used when being executed on a CI service", + default_value: ''), + FastlaneCore::ConfigItem.new(key: :boolean, + description: "Is that a boolean question (yes/no)? This will add (y/n) at the end", + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :secure_text, + description: "Is that a secure text (yes/no)?", + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :multi_line_end_keyword, + description: "Enable multi-line inputs by providing an end text (e.g. 'END') which will stop the user input", + optional: true) + ] + end + + def self.output + [] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'changelog = prompt(text: "Changelog: ")', + 'changelog = prompt( + text: "Changelog: ", + multi_line_end_keyword: "END" + ) + + pilot(changelog: changelog)' + ] + end + + def self.sample_return_value + "User Content\nWithNewline" + end + + def self.return_type + :string + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/push_git_tags.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/push_git_tags.rb new file mode 100644 index 0000000..6871336 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/push_git_tags.rb @@ -0,0 +1,76 @@ +module Fastlane + module Actions + class PushGitTagsAction < Action + def self.run(params) + command = [ + 'git', + 'push', + params[:remote] + ] + + if params[:tag] + command << "refs/tags/#{params[:tag].shellescape}" + else + command << '--tags' + end + + # optionally add the force component + command << '--force' if params[:force] + + result = Actions.sh(command.join(' ')) + UI.success('Tags pushed to remote') + result + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Push local tags to the remote - this will only push tags" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_PUSH_GIT_FORCE", + description: "Force push to remote", + type: Boolean, + default_value: false, + optional: true), + FastlaneCore::ConfigItem.new(key: :remote, + env_name: "FL_GIT_PUSH_REMOTE", + description: "The remote to push tags to", + default_value: "origin", + optional: true), + FastlaneCore::ConfigItem.new(key: :tag, + env_name: "FL_GIT_PUSH_TAG", + description: "The tag to push to remote", + optional: true) + ] + end + + def self.author + ['vittoriom'] + end + + def self.details + "If you only want to push the tags and nothing else, you can use the `push_git_tags` action" + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'push_git_tags' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/push_to_git_remote.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/push_to_git_remote.rb new file mode 100644 index 0000000..5452f91 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/push_to_git_remote.rb @@ -0,0 +1,138 @@ +module Fastlane + module Actions + # Push local changes to the remote branch + class PushToGitRemoteAction < Action + def self.run(params) + # Find the local git branch using HEAD or fallback to CI's ENV git branch if you're in detached HEAD state + local_git_branch = Actions.git_branch_name_using_HEAD + local_git_branch = Actions.git_branch unless local_git_branch && local_git_branch != "HEAD" + + local_branch = params[:local_branch] + local_branch ||= local_git_branch.gsub(%r{#{params[:remote]}\/}, '') if local_git_branch + UI.user_error!('Failed to get the current branch.') unless local_branch + + remote_branch = params[:remote_branch] || local_branch + + # construct our command as an array of components + command = [ + 'git', + 'push', + params[:remote], + "#{local_branch.shellescape}:#{remote_branch.shellescape}" + ] + + # optionally add the tags component + command << '--tags' if params[:tags] + + # optionally add the force component + command << '--force' if params[:force] + + # optionally add the force component + command << '--force-with-lease' if params[:force_with_lease] + + # optionally add the no-verify component + command << '--no-verify' if params[:no_verify] + + # optionally add the set-upstream component + command << '--set-upstream' if params[:set_upstream] + + # optionally add the --push_options components + params[:push_options].each { |push_option| command << "--push-option=#{push_option}" } if params[:push_options] + + # execute our command + return command.join(' ') if Helper.test? + + Actions.sh(command.join(' ')) + UI.message('Successfully pushed to remote.') + end + + def self.description + "Push local changes to the remote branch" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :local_branch, + env_name: "FL_GIT_PUSH_LOCAL_BRANCH", + description: "The local branch to push from. Defaults to the current branch", + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :remote_branch, + env_name: "FL_GIT_PUSH_REMOTE_BRANCH", + description: "The remote branch to push to. Defaults to the local branch", + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_PUSH_GIT_FORCE", + description: "Force push to remote", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :force_with_lease, + env_name: "FL_PUSH_GIT_FORCE_WITH_LEASE", + description: "Force push with lease to remote", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :tags, + env_name: "FL_PUSH_GIT_TAGS", + description: "Whether tags are pushed to remote", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :remote, + env_name: "FL_GIT_PUSH_REMOTE", + description: "The remote to push to", + default_value: 'origin'), + FastlaneCore::ConfigItem.new(key: :no_verify, + env_name: "FL_GIT_PUSH_USE_NO_VERIFY", + description: "Whether or not to use --no-verify", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :set_upstream, + env_name: "FL_GIT_PUSH_USE_SET_UPSTREAM", + description: "Whether or not to use --set-upstream", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :push_options, + env_name: "FL_GIT_PUSH_PUSH_OPTION", + description: "Array of strings to be passed using the '--push-option' option", + type: Array, + default_value: []) + ] + end + + def self.author + "lmirosevic" + end + + def self.details + [ + "Lets you push your local commits to a remote git repo. Useful if you make local changes such as adding a version bump commit (using `commit_version_bump`) or a git tag (using 'add_git_tag') on a CI server, and you want to push those changes back to your canonical/main repo.", + "If this is a new branch, use the `set_upstream` option to set the remote branch as upstream." + ].join("\n") + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'push_to_git_remote # simple version. pushes "master" branch to "origin" remote', + 'push_to_git_remote( + remote: "origin", # optional, default: "origin" + local_branch: "develop", # optional, aliased by "branch", default is set to current branch + remote_branch: "develop", # optional, default is set to local_branch + force: true, # optional, default: false + force_with_lease: true, # optional, default: false + tags: false, # optional, default: true + no_verify: true, # optional, default: false + set_upstream: true # optional, default: false + )' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/puts.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/puts.rb new file mode 100644 index 0000000..cd320ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/puts.rb @@ -0,0 +1,67 @@ +module Fastlane + module Actions + class PutsAction < Action + def self.run(params) + # display text from the message param (most likely coming from Swift) + # if called like `puts 'hi'` then params won't be a configuration item, so we have to check + if params.kind_of?(FastlaneCore::Configuration) && params[:message] + UI.message(params[:message]) + return + end + + # no parameter included in the call means treat this like a normal fastlane ruby call + UI.message(params.join(' ')) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Prints out the given text" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_PUTS_MESSAGE", + description: "Message to be printed out", + optional: true) + ] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.alias_used(action_alias, params) + if !params.kind_of?(FastlaneCore::Configuration) || params[:message].nil? + UI.important("#{action_alias} called, please use 'puts' instead!") + end + end + + def self.aliases + ["println", "echo"] + end + + # We don't want to show this as step + def self.step_text + nil + end + + def self.example_code + [ + 'puts "Hi there"' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/read_podspec.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/read_podspec.rb new file mode 100644 index 0000000..cc0bf77 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/read_podspec.rb @@ -0,0 +1,90 @@ +module Fastlane + module Actions + module SharedValues + READ_PODSPEC_JSON = :READ_PODSPEC_JSON + end + + class ReadPodspecAction < Action + def self.run(params) + Actions.verify_gem!('cocoapods') + + path = params[:path] + + require 'cocoapods-core' + spec = Pod::Spec.from_file(path).to_hash + + UI.success("Reading podspec from file #{path}") + + Actions.lane_context[SharedValues::READ_PODSPEC_JSON] = spec + return spec + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Loads a CocoaPods spec as JSON" + end + + def self.details + [ + "This can be used for only specifying a version string in your podspec - and during your release process you'd read it from the podspec by running `version = read_podspec['version']` at the beginning of your lane.", + "Loads the specified (or the first found) podspec in the folder as JSON, so that you can inspect its `version`, `files` etc.", + "This can be useful when basing your release process on the version string only stored in one place - in the podspec.", + "As one of the first steps you'd read the podspec and its version and the rest of the workflow can use that version string (when e.g. creating a new git tag or a GitHub Release)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_READ_PODSPEC_PATH", + description: "Path to the podspec to be read", + default_value: Dir['*.podspec*'].first, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("File #{value} not found") unless File.exist?(value) + end) + ] + end + + def self.output + [ + ['READ_PODSPEC_JSON', 'Podspec JSON payload'] + ] + end + + def self.authors + ["czechboy0"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'spec = read_podspec + version = spec["version"] + puts "Using Version #{version}"', + 'spec = read_podspec(path: "./XcodeServerSDK.podspec")' + ] + end + + def self.sample_return_value + { + 'version' => 1.0 + } + end + + def self.return_type + :hash + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/recreate_schemes.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/recreate_schemes.rb new file mode 100644 index 0000000..804c75a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/recreate_schemes.rb @@ -0,0 +1,46 @@ +module Fastlane + module Actions + class RecreateSchemesAction < Action + def self.run(params) + require 'xcodeproj' + + UI.message("Recreate schemes for project: #{params[:project]}") + + project = Xcodeproj::Project.open(params[:project]) + project.recreate_user_schemes + end + + def self.description + "Recreate not shared Xcode project schemes" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new( + key: :project, + env_name: "XCODE_PROJECT", + description: "The Xcode project" + ) + ] + end + + def self.authors + "jerolimov" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'recreate_schemes(project: "./path/to/MyApp.xcodeproj")' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/register_device.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/register_device.rb new file mode 100644 index 0000000..887b2cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/register_device.rb @@ -0,0 +1,148 @@ +require 'credentials_manager' + +module Fastlane + module Actions + class RegisterDeviceAction < Action + def self.is_supported?(platform) + platform == :ios + end + + def self.run(params) + require 'spaceship' + + name = params[:name] + platform = params[:platform] + udid = params[:udid] + + platform = Spaceship::ConnectAPI::BundleIdPlatform.map(platform) + + if (api_token = Spaceship::ConnectAPI::Token.from(hash: params[:api_key], filepath: params[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + UI.message("Login to App Store Connect (#{params[:username]})") + credentials = CredentialsManager::AccountManager.new(user: params[:username]) + Spaceship::ConnectAPI.login(credentials.user, credentials.password, use_portal: true, use_tunes: false) + UI.message("Login successful") + end + + begin + Spaceship::ConnectAPI::Device.find_or_create(udid, name: name, platform: platform) + rescue => ex + UI.error(ex.to_s) + UI.crash!("Failed to register new device (name: #{name}, platform: #{platform}, UDID: #{udid})") + end + + UI.success("Successfully registered new device") + return udid + end + + def self.description + "Registers a new device to the Apple Dev Portal" + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:apple_dev_portal_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + platform = Actions.lane_context[Actions::SharedValues::PLATFORM_NAME].to_s + + [ + FastlaneCore::ConfigItem.new(key: :name, + env_name: "FL_REGISTER_DEVICE_NAME", + description: "Provide the name of the device to register as"), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "FL_REGISTER_DEVICE_PLATFORM", + description: "Provide the platform of the device to register as (ios, mac)", + optional: true, + default_value: platform.empty? ? "ios" : platform, + verify_block: proc do |value| + UI.user_error!("The platform can only be ios or mac") unless %('ios', 'mac').include?(value) + end), + FastlaneCore::ConfigItem.new(key: :udid, + env_name: "FL_REGISTER_DEVICE_UDID", + description: "Provide the UDID of the device to register as"), + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["FL_REGISTER_DEVICE_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["FL_REGISTER_DEVICE_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + default_value: Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::APP_STORE_CONNECT_API_KEY], + default_value_dynamic: true, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + FastlaneCore::ConfigItem.new(key: :team_id, + env_name: "REGISTER_DEVICE_TEAM_ID", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true, + description: "The ID of your Developer Portal team if you're in multiple teams", + optional: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + env_name: "REGISTER_DEVICE_TEAM_NAME", + description: "The name of your Developer Portal team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_NAME"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "DELIVER_USER", + description: "Optional: Your Apple ID", + optional: true, + default_value: user, + default_value_dynamic: true) + ] + end + + def self.details + [ + "This will register an iOS device with the Developer Portal so that you can include it in your provisioning profiles.", + "This is an optimistic action, in that it will only ever add a device to the member center. If the device has already been registered within the member center, it will be left alone in the member center.", + "The action will connect to the Apple Developer Portal using the username you specified in your `Appfile` with `apple_id`, but you can override it using the `:username` option." + ].join("\n") + end + + def self.author + "pvinis" + end + + def self.example_code + [ + 'register_device( + name: "Luka iPhone 6", + udid: "1234567890123456789012345678901234567890" + ) # Simply provide the name and udid of the device', + 'register_device( + name: "Luka iPhone 6", + udid: "1234567890123456789012345678901234567890", + team_id: "XXXXXXXXXX", # Optional, if you"re a member of multiple teams, then you need to pass the team ID here. + username: "luka@goonbee.com" # Optional, lets you override the Apple Member Center username. + )' + ] + end + + def self.return_type + :string + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/register_devices.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/register_devices.rb new file mode 100644 index 0000000..909c51f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/register_devices.rb @@ -0,0 +1,211 @@ +require 'credentials_manager' + +module Fastlane + module Actions + class RegisterDevicesAction < Action + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.file_column_headers + ['Device ID', 'Device Name', 'Device Platform'] + end + + def self.run(params) + platform = Spaceship::ConnectAPI::BundleIdPlatform.map(params[:platform]) + + if params[:devices] + new_devices = params[:devices].map do |name, udid| + [udid, name] + end + elsif params[:devices_file] + require 'csv' + + devices_file = CSV.read(File.expand_path(File.join(params[:devices_file])), col_sep: "\t") + unless devices_file.first == file_column_headers.first(2) || devices_file.first == file_column_headers + UI.user_error!("Please provide a file according to the Apple Sample UDID file (https://developer.apple.com/account/resources/downloads/Multiple-Upload-Samples.zip)") + end + + new_devices = devices_file.drop(1).map do |row| + if row.count == 1 + UI.user_error!("Invalid device line, ensure you are using tabs (NOT spaces). See Apple's sample/spec here: https://developer.apple.com/account/resources/downloads/Multiple-Upload-Samples.zip") + elsif !(2..3).cover?(row.count) + UI.user_error!("Invalid device line, please provide a file according to the Apple Sample UDID file (https://developer.apple.com/account/resources/downloads/Multiple-Upload-Samples.zip)") + end + row + end + else + UI.user_error!("You must pass either a valid `devices` or `devices_file`. Please check the readme.") + end + + require 'spaceship' + if (api_token = Spaceship::ConnectAPI::Token.from(hash: params[:api_key], filepath: params[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + UI.message("Login to App Store Connect (#{params[:username]})") + credentials = CredentialsManager::AccountManager.new(user: params[:username]) + Spaceship::ConnectAPI.login(credentials.user, credentials.password, use_portal: true, use_tunes: false) + UI.message("Login successful") + end + + UI.message("Fetching list of currently registered devices...") + existing_devices = Spaceship::ConnectAPI::Device.all + + device_objs = new_devices.map do |device| + if existing_devices.map(&:udid).map(&:downcase).include?(device[0].downcase) + UI.verbose("UDID #{device[0]} already exists - Skipping...") + next + end + + device_platform = platform + + device_platform_supported = !device[2].nil? && self.is_supported?(device[2].to_sym) + if device_platform_supported + if device[2] == "mac" + device_platform = Spaceship::ConnectAPI::BundleIdPlatform::MAC_OS + else + device_platform = Spaceship::ConnectAPI::BundleIdPlatform::IOS + end + end + + try_create_device(name: device[1], platform: device_platform, udid: device[0]) + end + + UI.success("Successfully registered new devices.") + return device_objs + end + + def self.try_create_device(name: nil, platform: nil, udid: nil) + Spaceship::ConnectAPI::Device.find_or_create(udid, name: name, platform: platform) + rescue => ex + UI.error(ex.to_s) + UI.crash!("Failed to register new device (name: #{name}, UDID: #{udid})") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Registers new devices to the Apple Dev Portal" + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:apple_dev_portal_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + platform = Actions.lane_context[Actions::SharedValues::PLATFORM_NAME].to_s + + [ + FastlaneCore::ConfigItem.new(key: :devices, + env_name: "FL_REGISTER_DEVICES_DEVICES", + description: "A hash of devices, with the name as key and the UDID as value", + type: Hash, + optional: true), + FastlaneCore::ConfigItem.new(key: :devices_file, + env_name: "FL_REGISTER_DEVICES_FILE", + description: "Provide a path to a file with the devices to register. For the format of the file see the examples", + optional: true, + verify_block: proc do |value| + UI.user_error!("Could not find file '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["FL_REGISTER_DEVICES_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["FL_REGISTER_DEVICES_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + default_value: Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::APP_STORE_CONNECT_API_KEY], + default_value_dynamic: true, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + FastlaneCore::ConfigItem.new(key: :team_id, + env_name: "REGISTER_DEVICES_TEAM_ID", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true, + description: "The ID of your Developer Portal team if you're in multiple teams", + optional: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + env_name: "REGISTER_DEVICES_TEAM_NAME", + description: "The name of your Developer Portal team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_TEAM_NAME"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "DELIVER_USER", + description: "Optional: Your Apple ID", + optional: true, + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "REGISTER_DEVICES_PLATFORM", + description: "The platform to use (optional)", + optional: true, + default_value: platform.empty? ? "ios" : platform, + verify_block: proc do |value| + UI.user_error!("The platform can only be ios or mac") unless %('ios', 'mac').include?(value) + end) + ] + end + + def self.details + [ + "This will register iOS/Mac devices with the Developer Portal so that you can include them in your provisioning profiles.", + "This is an optimistic action, in that it will only ever add new devices to the member center, and never remove devices. If a device which has already been registered within the member center is not passed to this action, it will be left alone in the member center and continue to work.", + "The action will connect to the Apple Developer Portal using the username you specified in your `Appfile` with `apple_id`, but you can override it using the `username` option, or by setting the env variable `ENV['DELIVER_USER']`." + ].join("\n") + end + + def self.author + "lmirosevic" + end + + def self.example_code + [ + 'register_devices( + devices: { + "Luka iPhone 6" => "1234567890123456789012345678901234567890", + "Felix iPad Air 2" => "abcdefghijklmnopqrstvuwxyzabcdefghijklmn" + } + ) # Simply provide a list of devices as a Hash', + 'register_devices( + devices_file: "./devices.txt" + ) # Alternatively provide a standard UDID export .txt file, see the Apple Sample (http://devimages.apple.com/downloads/devices/Multiple-Upload-Samples.zip)', + 'register_devices( + devices_file: "./devices.txt", # You must pass in either `devices_file` or `devices`. + team_id: "XXXXXXXXXX", # Optional, if you"re a member of multiple teams, then you need to pass the team ID here. + username: "luka@goonbee.com" # Optional, lets you override the Apple Member Center username. + )', + 'register_devices( + devices: { + "Luka MacBook" => "12345678-1234-1234-1234-123456789012", + "Felix MacBook Pro" => "ABCDEFGH-ABCD-ABCD-ABCD-ABCDEFGHIJKL" + }, + platform: "mac" + ) # Register devices for Mac' + ] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/reset_git_repo.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/reset_git_repo.rb new file mode 100644 index 0000000..aa90c40 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/reset_git_repo.rb @@ -0,0 +1,118 @@ +require 'shellwords' + +module Fastlane + module Actions + # Does a hard reset and clean on the repo + class ResetGitRepoAction < Action + def self.run(params) + if params[:force] || Actions.lane_context[SharedValues::GIT_REPO_WAS_CLEAN_ON_START] + paths = params[:files] + + return paths if Helper.test? + + if paths.nil? + Actions.sh('git reset --hard HEAD') + + clean_options = ['q', 'f', 'd'] + clean_options << 'x' if params[:disregard_gitignore] + clean_command = 'git clean' + ' -' + clean_options.join + + # we want to make sure that we have an array of patterns, and no nil values + unless params[:exclude].kind_of?(Enumerable) + params[:exclude] = [params[:exclude]].compact + end + + # attach our exclude patterns to the command + clean_command += ' ' + params[:exclude].map { |exclude| '-e ' + exclude.shellescape }.join(' ') unless params[:exclude].count == 0 + + Actions.sh(clean_command) unless params[:skip_clean] + + UI.success('Git repo was reset and cleaned back to a pristine state.') + else + paths.each do |path| + UI.important("Couldn't find file at path '#{path}'") unless File.exist?(path) + Actions.sh("git checkout -- '#{path}'") + end + UI.success("Git cleaned up #{paths.count} files.") + end + else + UI.user_error!('This is a destructive and potentially dangerous action. To protect from data loss, please add the `ensure_git_status_clean` action to the beginning of your lane, or if you\'re absolutely sure of what you\'re doing then call this action with the :force option.') + end + end + + def self.description + "Resets git repo to a clean state by discarding uncommitted changes" + end + + def self.details + list = <<-LIST.markdown_list + You have called the `ensure_git_status_clean` action prior to calling this action. This ensures that your repo started off in a clean state, so the only things that will get destroyed by this action are files that are created as a byproduct of the fastlane run. + LIST + + [ + "This action will reset your git repo to a clean state, discarding any uncommitted and untracked changes. Useful in case you need to revert the repo back to a clean state, e.g. after running _fastlane_.", + "Untracked files like `.env` will also be deleted, unless `:skip_clean` is true.", + "It's a pretty drastic action so it comes with a sort of safety latch. It will only proceed with the reset if this condition is met:".markdown_preserve_newlines, + list + ].join("\n") + end + + def self.example_code + [ + 'reset_git_repo', + 'reset_git_repo(force: true) # If you don\'t care about warnings and are absolutely sure that you want to discard all changes. This will reset the repo even if you have valuable uncommitted changes, so use with care!', + 'reset_git_repo(skip_clean: true) # If you want "git clean" to be skipped, thus NOT deleting untracked files like ".env". Optional, defaults to false.', + 'reset_git_repo( + force: true, + files: [ + "./file.txt" + ] + )' + ] + end + + def self.category + :source_control + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :files, + env_name: "FL_RESET_GIT_FILES", + description: "Array of files the changes should be discarded. If not given, all files will be discarded", + optional: true, + type: Array), + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_RESET_GIT_FORCE", + description: "Skip verifying of previously clean state of repo. Only recommended in combination with `files` option", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :skip_clean, + env_name: "FL_RESET_GIT_SKIP_CLEAN", + description: "Skip 'git clean' to avoid removing untracked files like `.env`", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :disregard_gitignore, + env_name: "FL_RESET_GIT_DISREGARD_GITIGNORE", + description: "Setting this to true will clean the whole repository, ignoring anything in your local .gitignore. Set this to true if you want the equivalent of a fresh clone, and for all untracked and ignore files to also be removed", + type: Boolean, + optional: true, + default_value: true), + FastlaneCore::ConfigItem.new(key: :exclude, + env_name: "FL_RESET_GIT_EXCLUDE", + description: "You can pass a string, or array of, file pattern(s) here which you want to have survive the cleaning process, and remain on disk, e.g. to leave the `artifacts` directory you would specify `exclude: 'artifacts'`. Make sure this pattern is also in your gitignore! See the gitignore documentation for info on patterns", + skip_type_validation: true, # allows String, Array, Regex + optional: true) + ] + end + + def self.author + 'lmirosevic' + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/reset_simulator_contents.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/reset_simulator_contents.rb new file mode 100644 index 0000000..d48cacb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/reset_simulator_contents.rb @@ -0,0 +1,90 @@ +module Fastlane + module Actions + class ResetSimulatorContentsAction < Action + def self.run(params) + os_versions = params[:os_versions] || params[:ios] + + reset_simulators(os_versions) + end + + def self.reset_simulators(os_versions) + UI.verbose("Resetting simulator contents") + + if os_versions + os_versions.each do |os_version| + reset_all_by_version(os_version) + end + else + reset_all + end + + UI.success('Simulators reset done') + end + + def self.reset_all_by_version(os_version) + FastlaneCore::Simulator.reset_all_by_version(os_version: os_version) + FastlaneCore::SimulatorTV.reset_all_by_version(os_version: os_version) + FastlaneCore::SimulatorWatch.reset_all_by_version(os_version: os_version) + end + + def self.reset_all + FastlaneCore::Simulator.reset_all + FastlaneCore::SimulatorTV.reset_all + FastlaneCore::SimulatorWatch.reset_all + end + + def self.description + "Shutdown and reset running simulators" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :ios, + deprecated: "Use `:os_versions` instead", + short_option: "-i", + env_name: "FASTLANE_RESET_SIMULATOR_VERSIONS", + description: "Which OS versions of Simulators you want to reset content and settings, this does not remove/recreate the simulators", + optional: true, + type: Array), + FastlaneCore::ConfigItem.new(key: :os_versions, + short_option: "-v", + env_name: "FASTLANE_RESET_SIMULATOR_OS_VERSIONS", + description: "Which OS versions of Simulators you want to reset content and settings, this does not remove/recreate the simulators", + optional: true, + type: Array) + ] + end + + def self.aliases + ["reset_simulators"] + end + + def self.output + nil + end + + def self.return_value + nil + end + + def self.authors + ["danramteke"] + end + + def self.is_supported?(platform) + [:ios, :tvos, :watchos].include?(platform) + end + + def self.example_code + [ + 'reset_simulator_contents', + 'reset_simulator_contents(os_versions: ["10.3.1","12.2"])' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/resign.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/resign.rb new file mode 100644 index 0000000..9c73317 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/resign.rb @@ -0,0 +1,125 @@ +module Fastlane + module Actions + # Resigns the ipa + class ResignAction < Action + def self.run(params) + require 'sigh' + + # try to resign the ipa + if Sigh::Resign.resign(params[:ipa], params[:signing_identity], params[:provisioning_profile], params[:entitlements], params[:version], params[:display_name], params[:short_version], params[:bundle_version], params[:bundle_id], params[:use_app_entitlements], params[:keychain_path]) + UI.success('Successfully re-signed .ipa 🔏.') + else + UI.user_error!("Failed to re-sign .ipa") + end + end + + def self.description + "Codesign an existing ipa file" + end + + def self.example_code + [ + 'resign( + ipa: "path/to/ipa", # can omit if using the `ipa` action + signing_identity: "iPhone Distribution: Luka Mirosevic (0123456789)", + provisioning_profile: "path/to/profile", # can omit if using the _sigh_ action + )', + '# You may provide multiple provisioning profiles if the application contains nested + # applications or app extensions, which need their own provisioning profile. + # You can do so by passing an array of provisioning profile strings or a hash + # that associates provisioning profile values to bundle identifier keys. + resign( + ipa: "path/to/ipa", # can omit if using the `ipa` action + signing_identity: "iPhone Distribution: Luka Mirosevic (0123456789)", + provisioning_profile: { + "com.example.awesome-app" => "path/to/profile", + "com.example.awesome-app.app-extension" => "path/to/app-extension/profile" + } + )' + ] + end + + def self.category + :code_signing + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "FL_RESIGN_IPA", + description: "Path to the ipa file to resign. Optional if you use the _gym_ or _xcodebuild_ action", + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find ipa file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :signing_identity, + env_name: "FL_RESIGN_SIGNING_IDENTITY", + description: "Code signing identity to use. e.g. `iPhone Distribution: Luka Mirosevic (0123456789)`"), + FastlaneCore::ConfigItem.new(key: :entitlements, + env_name: "FL_RESIGN_ENTITLEMENTS", + description: "Path to the entitlement file to use, e.g. `myApp/MyApp.entitlements`", + conflicting_options: [:use_app_entitlements], + optional: true), + FastlaneCore::ConfigItem.new(key: :provisioning_profile, + env_name: "FL_RESIGN_PROVISIONING_PROFILE", + description: "Path to your provisioning_profile. Optional if you use _sigh_", + default_value: Actions.lane_context[SharedValues::SIGH_PROFILE_PATH], + default_value_dynamic: true, + skip_type_validation: true, # allows Hash, Array + verify_block: proc do |value| + files = case value + when Hash then value.values + when Enumerable then value + else [value] + end + files.each do |file| + UI.user_error!("Couldn't find provisioning profile at path '#{file}'") unless File.exist?(file) + end + end), + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_RESIGN_VERSION", + description: "Version number to force resigned ipa to use. Updates both `CFBundleShortVersionString` and `CFBundleVersion` values in `Info.plist`. Applies for main app and all nested apps or extensions", + conflicting_options: [:short_version, :bundle_version], + optional: true), + FastlaneCore::ConfigItem.new(key: :display_name, + env_name: "FL_DISPLAY_NAME", + description: "Display name to force resigned ipa to use", + optional: true), + FastlaneCore::ConfigItem.new(key: :short_version, + env_name: "FL_RESIGN_SHORT_VERSION", + description: "Short version string to force resigned ipa to use (`CFBundleShortVersionString`)", + conflicting_options: [:version], + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_version, + env_name: "FL_RESIGN_BUNDLE_VERSION", + description: "Bundle version to force resigned ipa to use (`CFBundleVersion`)", + conflicting_options: [:version], + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_id, + env_name: "FL_RESIGN_BUNDLE_ID", + description: "Set new bundle ID during resign (`CFBundleIdentifier`)", + optional: true), + FastlaneCore::ConfigItem.new(key: :use_app_entitlements, + env_name: "FL_USE_APP_ENTITLEMENTS", + description: "Extract app bundle codesigning entitlements and combine with entitlements from new provisioning profile", + conflicting_options: [:entitlements], + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :keychain_path, + env_name: "FL_RESIGN_KEYCHAIN_PATH", + description: "Provide a path to a keychain file that should be used by `/usr/bin/codesign`", + optional: true) + ] + end + + def self.author + "lmirosevic" + end + + def self.is_supported?(platform) + platform == :ios + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/restore_file.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/restore_file.rb new file mode 100644 index 0000000..1baf6fc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/restore_file.rb @@ -0,0 +1,44 @@ +module Fastlane + module Actions + class RestoreFileAction < Action + def self.run(params) + path = params[:path] + backup_path = "#{path}.back" + UI.user_error!("Could not find file '#{backup_path}'") unless File.exist?(backup_path) + FileUtils.cp(backup_path, path, preserve: true) + FileUtils.rm(backup_path) + UI.message("Successfully restored backup 📤") + end + + def self.description + 'This action restore your file that was backuped with the `backup_file` action' + end + + def self.is_supported?(platform) + true + end + + def self.author + "gin0606" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + description: "Original file name you want to restore", + optional: false) + ] + end + + def self.example_code + [ + 'restore_file(path: "/path/to/file")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/rocket.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/rocket.rb new file mode 100644 index 0000000..d4d2782 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/rocket.rb @@ -0,0 +1,83 @@ +module Fastlane + module Actions + class RocketAction < Action + def self.run(params) + puts(" + ____ + / \\ + | | + | | + | | + \\____/ + | | + | | + | | + |____| + {| |} + | | + | | + | F | + | A | + | S | + | T | + | L | + | A | + /| N |\\ + || E || + || || + \\|____|/ + /_\\/_\\ + ###### + ######## + ###### + #### + #### + ## + ## + ## + ## + ") + return "🚀" + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Outputs ascii-art for a rocket 🚀" + end + + def self.details + "Print an ascii Rocket :rocket:. Useful after using _crashlytics_ or _pilot_ to indicate that your new build has been shipped to outer-space." + end + + def self.available_options + [ + ] + end + + def self.authors + ["JaviSoto", "radex"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'rocket' + ] + end + + def self.return_type + :string + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/rsync.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/rsync.rb new file mode 100644 index 0000000..9070971 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/rsync.rb @@ -0,0 +1,71 @@ + +module Fastlane + module Actions + module SharedValues + end + + class RsyncAction < Action + def self.run(params) + rsync_cmd = ["rsync"] + rsync_cmd << params[:extra] + rsync_cmd << params[:source] + rsync_cmd << params[:destination] + Actions.sh(rsync_cmd.join(" ")) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Rsync files from :source to :destination" + end + + def self.details + "A wrapper around `rsync`, which is a tool that lets you synchronize files, including permissions and so on. For a more detailed information about `rsync`, please see [rsync(1) man page](https://linux.die.net/man/1/rsync)." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :extra, + short_option: "-X", + env_name: "FL_RSYNC_EXTRA", # The name of the environment variable + description: "Port", # a short description of this parameter + optional: true, + default_value: "-av"), + FastlaneCore::ConfigItem.new(key: :source, + short_option: "-S", + env_name: "FL_RSYNC_SRC", # The name of the environment variable + description: "source file/folder", # a short description of this parameter + optional: false), + FastlaneCore::ConfigItem.new(key: :destination, + short_option: "-D", + env_name: "FL_RSYNC_DST", # The name of the environment variable + description: "destination file/folder", # a short description of this parameter + optional: false) + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'rsync( + source: "root@host:/tmp/1.txt", + destination: "/tmp/local_file.txt" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ruby_version.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ruby_version.rb new file mode 100644 index 0000000..7cc31ab --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ruby_version.rb @@ -0,0 +1,56 @@ +module Fastlane + module Actions + module SharedValues + end + + class RubyVersionAction < Action + def self.run(params) + params = nil unless params.kind_of?(Array) + value = (params || []).first + defined_version = Gem::Version.new(value) if value + + UI.user_error!("Please pass minimum ruby version as parameter to ruby_version") unless defined_version + + if Gem::Version.new(RUBY_VERSION) < defined_version + error_message = "The Fastfile requires a ruby version of >= #{defined_version}. You are on #{RUBY_VERSION}." + UI.user_error!(error_message) + end + + UI.message("Your ruby version #{RUBY_VERSION} matches the minimum requirement of #{defined_version} ✅") + end + + def self.step_text + "Verifying Ruby version" + end + + def self.author + "sebastianvarela" + end + + def self.description + "Verifies the minimum ruby version required" + end + + def self.example_code + [ + 'ruby_version("2.4.0")' + ] + end + + def self.details + [ + "Add this to your `Fastfile` to require a certain version of _ruby_.", + "Put it at the top of your `Fastfile` to ensure that _fastlane_ is executed appropriately." + ].join("\n") + end + + def self.category + :misc + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/run_tests.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/run_tests.rb new file mode 100644 index 0000000..9b346d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/run_tests.rb @@ -0,0 +1,124 @@ +module Fastlane + module Actions + module SharedValues + SCAN_DERIVED_DATA_PATH = :SCAN_DERIVED_DATA_PATH + SCAN_GENERATED_PLIST_FILE = :SCAN_GENERATED_PLIST_FILE + SCAN_GENERATED_PLIST_FILES = :SCAN_GENERATED_PLIST_FILES + SCAN_GENERATED_XCRESULT_PATH = :SCAN_GENERATED_XCRESULT_PATH + SCAN_ZIP_BUILD_PRODUCTS_PATH = :SCAN_ZIP_BUILD_PRODUCTS_PATH + end + + class RunTestsAction < Action + def self.run(values) + require 'scan' + manager = Scan::Manager.new + + begin + results = manager.work(values) + + zip_build_products_path = Scan.cache[:zip_build_products_path] + Actions.lane_context[SharedValues::SCAN_ZIP_BUILD_PRODUCTS_PATH] = zip_build_products_path if zip_build_products_path + + return results + rescue FastlaneCore::Interface::FastlaneBuildFailure => ex + # Specifically catching FastlaneBuildFailure to prevent build/compile errors from being + # silenced when :fail_build is set to false + # :fail_build should only suppress testing failures + raise ex + rescue => ex + if values[:fail_build] + raise ex + end + ensure + if Scan.cache && (result_bundle_path = Scan.cache[:result_bundle_path]) + Actions.lane_context[SharedValues::SCAN_GENERATED_XCRESULT_PATH] = File.absolute_path(result_bundle_path) + else + Actions.lane_context[SharedValues::SCAN_GENERATED_XCRESULT_PATH] = nil + end + + unless values[:derived_data_path].to_s.empty? + plist_files_before = manager.plist_files_before || [] + + Actions.lane_context[SharedValues::SCAN_DERIVED_DATA_PATH] = values[:derived_data_path] + plist_files_after = manager.test_summary_filenames(values[:derived_data_path]) + all_test_summaries = (plist_files_after - plist_files_before) + Actions.lane_context[SharedValues::SCAN_GENERATED_PLIST_FILES] = all_test_summaries + Actions.lane_context[SharedValues::SCAN_GENERATED_PLIST_FILE] = all_test_summaries.last + end + end + end + + def self.description + "Easily run tests of your iOS app (via _scan_)" + end + + def self.details + "More information: https://docs.fastlane.tools/actions/scan/" + end + + def self.return_value + 'Outputs hash of results with the following keys: :number_of_tests, :number_of_failures, :number_of_retries, :number_of_tests_excluding_retries, :number_of_failures_excluding_retries' + end + + def self.return_type + :hash + end + + def self.author + "KrauseFx" + end + + def self.available_options + require 'scan' + + FastlaneCore::CommanderGenerator.new.generate(Scan::Options.available_options) + end + + def self.output + [ + ['SCAN_DERIVED_DATA_PATH', 'The path to the derived data'], + ['SCAN_GENERATED_PLIST_FILE', 'The generated plist file'], + ['SCAN_GENERATED_PLIST_FILES', 'The generated plist files'], + ['SCAN_GENERATED_XCRESULT_PATH', 'The path to the generated .xcresult'], + ['SCAN_ZIP_BUILD_PRODUCTS_PATH', 'The path to the zipped build products'] + ] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + private_class_method + + def self.example_code + [ + 'run_tests', + 'scan # alias for "run_tests"', + 'run_tests( + workspace: "App.xcworkspace", + scheme: "MyTests", + clean: false + )', + '# Build For Testing + run_tests( + derived_data_path: "my_folder", + build_for_testing: true + )', + '# run tests using derived data from prev. build + run_tests( + derived_data_path: "my_folder", + test_without_building: true + )', + '# or run it from an existing xctestrun package + run_tests( + xctestrun: "/path/to/mytests.xctestrun" + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/s3.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/s3.rb new file mode 100644 index 0000000..bdd70cd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/s3.rb @@ -0,0 +1,174 @@ +require 'fastlane/erb_template_helper' +require 'fastlane/helper/s3_client_helper' +require 'ostruct' +require 'uri' +require 'cgi' + +module Fastlane + module Actions + module SharedValues + # Using ||= because these MAY be defined by the the + # preferred aws_s3 plugin + S3_IPA_OUTPUT_PATH ||= :S3_IPA_OUTPUT_PATH + S3_DSYM_OUTPUT_PATH ||= :S3_DSYM_OUTPUT_PATH + S3_PLIST_OUTPUT_PATH ||= :S3_PLIST_OUTPUT_PATH + S3_HTML_OUTPUT_PATH ||= :S3_HTML_OUTPUT_PATH + S3_VERSION_OUTPUT_PATH ||= :S3_VERSION_OUTPUT_PATH + end + + class S3Action < Action + def self.run(config) + UI.user_error!("Please use the `aws_s3` plugin instead. Install using `fastlane add_plugin aws_s3`.") + end + + def self.description + "Generates a plist file and uploads all to AWS S3" + end + + def self.details + [ + "Upload a new build to Amazon S3 to distribute the build to beta testers.", + "Works for both Ad Hoc and Enterprise signed applications. This step will generate the necessary HTML, plist, and version files for you.", + "It is recommended to **not** store the AWS access keys in the `Fastfile`. The uploaded `version.json` file provides an easy way for apps to poll if a new update is available." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: "", + description: ".ipa file for the build ", + optional: true, + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :dsym, + env_name: "", + description: "zipped .dsym package for the build ", + optional: true, + default_value: Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :upload_metadata, + env_name: "", + description: "Upload relevant metadata for this build", + optional: true, + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :plist_template_path, + env_name: "", + description: "plist template path", + optional: true), + FastlaneCore::ConfigItem.new(key: :plist_file_name, + env_name: "", + description: "uploaded plist filename", + optional: true), + FastlaneCore::ConfigItem.new(key: :html_template_path, + env_name: "", + description: "html erb template path", + optional: true), + FastlaneCore::ConfigItem.new(key: :html_file_name, + env_name: "", + description: "uploaded html filename", + optional: true), + FastlaneCore::ConfigItem.new(key: :version_template_path, + env_name: "", + description: "version erb template path", + optional: true), + FastlaneCore::ConfigItem.new(key: :version_file_name, + env_name: "", + description: "uploaded version filename", + optional: true), + FastlaneCore::ConfigItem.new(key: :access_key, + env_name: "S3_ACCESS_KEY", + description: "AWS Access Key ID ", + sensitive: true, + optional: true, + default_value: ENV['AWS_ACCESS_KEY_ID'], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :secret_access_key, + env_name: "S3_SECRET_ACCESS_KEY", + description: "AWS Secret Access Key ", + sensitive: true, + optional: true, + default_value: ENV['AWS_SECRET_ACCESS_KEY'], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :bucket, + env_name: "S3_BUCKET", + description: "AWS bucket name", + optional: true, + code_gen_sensitive: true, + default_value: ENV['AWS_BUCKET_NAME'], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :region, + env_name: "S3_REGION", + description: "AWS region (for bucket creation) ", + optional: true, + code_gen_sensitive: true, + default_value: ENV['AWS_REGION'], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "S3_PATH", + description: "S3 'path'. Values from Info.plist will be substituted for keys wrapped in {} ", + optional: true, + default_value: 'v{CFBundleShortVersionString}_b{CFBundleVersion}/'), + FastlaneCore::ConfigItem.new(key: :source, + env_name: "S3_SOURCE", + description: "Optional source directory e.g. ./build ", + optional: true), + FastlaneCore::ConfigItem.new(key: :acl, + env_name: "S3_ACL", + description: "Uploaded object permissions e.g public_read (default), private, public_read_write, authenticated_read ", + optional: true, + default_value: "public_read") + ] + end + + def self.output + [ + ['S3_IPA_OUTPUT_PATH', 'Direct HTTP link to the uploaded ipa file'], + ['S3_DSYM_OUTPUT_PATH', 'Direct HTTP link to the uploaded dsym file'], + ['S3_PLIST_OUTPUT_PATH', 'Direct HTTP link to the uploaded plist file'], + ['S3_HTML_OUTPUT_PATH', 'Direct HTTP link to the uploaded HTML file'], + ['S3_VERSION_OUTPUT_PATH', 'Direct HTTP link to the uploaded Version file'] + ] + end + + def self.author + "joshdholtz" + end + + def self.is_supported?(platform) + false + end + + def self.example_code + [ + 's3', + 's3( + # All of these are used to make Shenzhen\'s `ipa distribute:s3` command + access_key: ENV["S3_ACCESS_KEY"], # Required from user. + secret_access_key: ENV["S3_SECRET_ACCESS_KEY"], # Required from user. + bucket: ENV["S3_BUCKET"], # Required from user. + ipa: "AppName.ipa", # Optional if you use `ipa` to build + dsym: "AppName.app.dSYM.zip", # Optional if you use `ipa` to build + path: "v{CFBundleShortVersionString}_b{CFBundleVersion}/", # This is actually the default. + upload_metadata: true, # Upload version.json, plist and HTML. Set to false to skip uploading of these files. + version_file_name: "app_version.json", # Name of the file to upload to S3. Defaults to "version.json" + version_template_path: "path/to/erb" # Path to an ERB to configure the structure of the version JSON file + )' + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + [ + "Please use the `aws_s3` plugin instead.", + "Install using `fastlane add_plugin aws_s3`." + ].join("\n") + end + end + # rubocop:enable Metrics/ClassLength + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/say.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/say.rb new file mode 100644 index 0000000..a2b42fa --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/say.rb @@ -0,0 +1,55 @@ +module Fastlane + module Actions + class SayAction < Action + def self.run(params) + text = params[:text] + text = text.join(' ') + text = text.tr("'", '"') + + if params[:mute] + UI.message(text) + return text + else + Actions.sh("say '#{text}'") + end + end + + def self.description + "This action speaks the given text out loud" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :text, + description: 'Text to be spoken out loud (as string or array of strings)', + optional: false, + type: Array), + FastlaneCore::ConfigItem.new(key: :mute, + env_name: "SAY_MUTE", + description: 'If say should be muted with text printed out', + optional: false, + type: Boolean, + default_value: false) + ] + end + + def self.is_supported?(platform) + true + end + + def self.author + "KrauseFx" + end + + def self.example_code + [ + 'say("I can speak")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/scan.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/scan.rb new file mode 100644 index 0000000..54dca17 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/scan.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/run_tests' + class ScanAction < RunTestsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `run_tests` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/scp.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/scp.rb new file mode 100644 index 0000000..8f9ee52 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/scp.rb @@ -0,0 +1,108 @@ +module Fastlane + module Actions + module SharedValues + end + + class ScpAction < Action + def self.run(params) + Actions.verify_gem!('net-scp') + require "net/scp" + ret = nil + Net::SCP.start(params[:host], params[:username], { port: params[:port].to_i, password: params[:password] }) do |scp| + if params[:upload] + scp.upload!(params[:upload][:src], params[:upload][:dst], recursive: true) + UI.message(['[SCP COMMAND]', "Successfully Uploaded", params[:upload][:src], params[:upload][:dst]].join(': ')) + end + if params[:download] + + t_ret = scp.download!(params[:download][:src], params[:download][:dst], recursive: true) + UI.message(['[SCP COMMAND]', "Successfully Downloaded", params[:download][:src], params[:download][:dst]].join(': ')) + unless params[:download][:dst] + ret = t_ret + end + end + end + ret + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Transfer files via SCP" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "FL_SSH_USERNAME", + description: "Username"), + FastlaneCore::ConfigItem.new(key: :password, + short_option: "-p", + env_name: "FL_SSH_PASSWORD", + description: "Password", + sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :host, + short_option: "-H", + env_name: "FL_SSH_HOST", + description: "Hostname"), + FastlaneCore::ConfigItem.new(key: :port, + short_option: "-P", + env_name: "FL_SSH_PORT", + description: "Port", + optional: true, + default_value: "22"), + FastlaneCore::ConfigItem.new(key: :upload, + short_option: "-U", + env_name: "FL_SCP_UPLOAD", + description: "Upload", + optional: true, + type: Hash), + FastlaneCore::ConfigItem.new(key: :download, + short_option: "-D", + env_name: "FL_SCP_DOWNLOAD", + description: "Download", + optional: true, + type: Hash) + + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'scp( + host: "dev.januschka.com", + username: "root", + upload: { + src: "/root/dir1", + dst: "/tmp/new_dir" + } + )', + 'scp( + host: "dev.januschka.com", + username: "root", + download: { + src: "/root/dir1", + dst: "/tmp/new_dir" + } + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/screengrab.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/screengrab.rb new file mode 100644 index 0000000..9a8a557 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/screengrab.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/capture_android_screenshots' + class ScreengrabAction < CaptureAndroidScreenshotsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `capture_android_screenshots` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_build_number_repository.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_build_number_repository.rb new file mode 100644 index 0000000..1ad70a4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_build_number_repository.rb @@ -0,0 +1,76 @@ +module Fastlane + module Actions + module SharedValues + end + + class SetBuildNumberRepositoryAction < Action + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.run(params) + build_number = Fastlane::Actions::GetBuildNumberRepositoryAction.run( + use_hg_revision_number: params[:use_hg_revision_number] + ) + + Fastlane::Actions::IncrementBuildNumberAction.run( + build_number: build_number, + xcodeproj: params[:xcodeproj] + ) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Set the build number from the current repository" + end + + def self.details + [ + "This action will set the **build number** according to what the SCM HEAD reports.", + "Currently supported SCMs are svn (uses root revision), git-svn (uses svn revision) and git (uses short hash) and mercurial (uses short hash or revision number).", + "There is an option, `:use_hg_revision_number`, which allows to use mercurial revision number instead of hash." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :use_hg_revision_number, + env_name: "USE_HG_REVISION_NUMBER", + description: "Use hg revision number instead of hash (ignored for non-hg repos)", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "XCODEPROJ_PATH", + description: "explicitly specify which xcodeproj to use", + optional: true, + verify_block: proc do |value| + path = File.expand_path(value) + UI.user_error!("Please pass the path to the project, not the workspace") if path.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project at #{path}") unless Helper.test? || File.exist?(path) + end) + ] + end + + def self.authors + ["pbrooks", "armadsen", "AndrewSB"] + end + + def self.example_code + [ + 'set_build_number_repository', + 'set_build_number_repository( + xcodeproj: "./path/to/MyApp.xcodeproj" + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_changelog.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_changelog.rb new file mode 100644 index 0000000..1f503ba --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_changelog.rb @@ -0,0 +1,201 @@ +module Fastlane + module Actions + class SetChangelogAction < Action + def self.run(params) + require 'spaceship' + + # Team selection passed though FASTLANE_ITC_TEAM_ID and FASTLANE_ITC_TEAM_NAME environment variables + # Prompts select team if multiple teams and none specified + if (api_token = Spaceship::ConnectAPI::Token.from(hash: params[:api_key], filepath: params[:api_key_path])) + UI.message("Creating authorization token for App Store Connect API") + Spaceship::ConnectAPI.token = api_token + elsif !Spaceship::ConnectAPI.token.nil? + UI.message("Using existing authorization token for App Store Connect API") + else + UI.message("Login to App Store Connect (#{params[:username]})") + Spaceship::ConnectAPI.login(params[:username], use_portal: false, use_tunes: true, tunes_team_id: params[:team_id], team_name: params[:team_name]) + UI.message("Login successful") + end + + app = Spaceship::ConnectAPI::App.find(params[:app_identifier]) + UI.user_error!("Couldn't find app with identifier #{params[:app_identifier]}") if app.nil? + + version_number = params[:version] + platform = Spaceship::ConnectAPI::Platform.map(params[:platform]) + + unless version_number + # Automatically fetch the latest version + UI.message("Fetching the latest version for this app") + edit_version = app.get_edit_app_store_version(platform: platform) + if edit_version + version_number = edit_version.version_string + else + UI.message("You have to specify a new version number: ") + version_number = STDIN.gets.strip + end + end + + UI.message("Going to update version #{version_number}") + + changelog = params[:changelog] + unless changelog + path = default_changelog_path + UI.message("Looking for changelog in '#{path}'...") + if File.exist?(path) + changelog = File.read(path) + else + UI.error("Couldn't find changelog.txt") + UI.message("Please enter the changelog here:") + changelog = STDIN.gets + end + end + + UI.important("Going to update the changelog to:\n\n#{changelog}\n\n") + + edit_version = app.get_edit_app_store_version(platform: platform) + if edit_version + if edit_version.version_string != version_number + # Version is already there, make sure it matches the one we want to create + UI.message("Changing existing version number from '#{edit_version.version_string}' to '#{version_number}'") + edit_version = edit_version.update(attributes: { + versionString: version_number + }) + else + UI.message("Updating changelog for existing version #{edit_version.version_string}") + end + else + UI.message("Creating the new version: #{version_number}") + attributes = { versionString: version_number, platform: platform } + edit_version = Spaceship::ConnectAPI.post_app_store_version(app_id: app.id, attributes: attributes).first + end + + localizations = edit_version.get_app_store_version_localizations + localizations.each do |localization| + UI.message("Updating changelog for the '#{localization.locale}'") + localization.update(attributes: { + whatsNew: changelog + }) + end + + UI.success("đŸ‘ŧ Successfully pushed the new changelog to for #{edit_version.version_string}") + end + + def self.default_changelog_path + File.join(FastlaneCore::FastlaneFolder.path.to_s, 'changelog.txt') + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Set the changelog for all languages on App Store Connect" + end + + def self.details + [ + "This is useful if you have only one changelog for all languages.", + "You can store the changelog in `#{default_changelog_path}` and it will automatically get loaded from there. This integration is useful if you support e.g. 10 languages and want to use the same \"What's new\"-text for all languages.", + "Defining the version is optional. _fastlane_ will try to automatically detect it if you don't provide one." + ].join("\n") + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :api_key_path, + env_names: ["FL_SET_CHANGELOG_API_KEY_PATH", "APP_STORE_CONNECT_API_KEY_PATH"], + description: "Path to your App Store Connect API Key JSON file (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-json-file)", + optional: true, + conflicting_options: [:api_key], + verify_block: proc do |value| + UI.user_error!("Couldn't find API key JSON file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :api_key, + env_names: ["FL_SET_CHANGELOG_API_KEY", "APP_STORE_CONNECT_API_KEY"], + description: "Your App Store Connect API Key information (https://docs.fastlane.tools/app-store-connect-api/#using-fastlane-api-key-hash-option)", + type: Hash, + default_value: Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::APP_STORE_CONNECT_API_KEY], + default_value_dynamic: true, + optional: true, + sensitive: true, + conflicting_options: [:api_key_path]), + FastlaneCore::ConfigItem.new(key: :app_identifier, + short_option: "-a", + env_name: "FASTLANE_APP_IDENTIFIER", + description: "The bundle identifier of your app", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "FASTLANE_USERNAME", + description: "Your Apple ID Username", + optional: true, + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_SET_CHANGELOG_VERSION", + description: "The version number to create/update", + optional: true), + FastlaneCore::ConfigItem.new(key: :changelog, + env_name: "FL_SET_CHANGELOG_CHANGELOG", + description: "Changelog text that should be uploaded to App Store Connect", + optional: true), + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-k", + env_name: "FL_SET_CHANGELOG_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # as we also allow integers, which we convert to strings anyway + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_ID"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :team_name, + short_option: "-e", + env_name: "FL_SET_CHANGELOG_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true, + verify_block: proc do |value| + ENV["FASTLANE_ITC_TEAM_NAME"] = value.to_s + end), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "FL_SET_CHANGELOG_PLATFORM", + description: "The platform of the app (ios, appletvos, mac)", + default_value: "ios", + verify_block: proc do |value| + available = ['ios', 'appletvos', 'mac'] + UI.user_error!("Invalid platform '#{value}', must be #{available.join(', ')}") unless available.include?(value) + end) + ] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :appletvos, :mac].include?(platform) + end + + def self.example_code + [ + 'set_changelog(changelog: "Changelog for all Languages")', + 'set_changelog(app_identifier: "com.krausefx.app", version: "1.0", changelog: "Changelog for all Languages")' + ] + end + + def self.category + :app_store_connect + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_github_release.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_github_release.rb new file mode 100644 index 0000000..8cb8691 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_github_release.rb @@ -0,0 +1,287 @@ +module Fastlane + module Actions + module SharedValues + SET_GITHUB_RELEASE_HTML_LINK = :SET_GITHUB_RELEASE_HTML_LINK + SET_GITHUB_RELEASE_RELEASE_ID = :SET_GITHUB_RELEASE_RELEASE_ID + SET_GITHUB_RELEASE_JSON = :SET_GITHUB_RELEASE_JSON + end + + class SetGithubReleaseAction < Action + def self.run(params) + UI.important("Creating release of #{params[:repository_name]} on tag \"#{params[:tag_name]}\" with name \"#{params[:name]}\".") + UI.important("Will also upload assets #{params[:upload_assets]}.") if params[:upload_assets] + + repo_name = params[:repository_name] + api_token = params[:api_token] + api_bearer = params[:api_bearer] + server_url = params[:server_url] + tag_name = params[:tag_name] + + payload = { + 'tag_name' => params[:tag_name], + 'draft' => !!params[:is_draft], + 'prerelease' => !!params[:is_prerelease], + 'generate_release_notes' => !!params[:is_generate_release_notes] + } + payload['name'] = params[:name] if params[:name] + payload['body'] = params[:description] if params[:description] + payload['target_commitish'] = params[:commitish] if params[:commitish] + + GithubApiAction.run( + server_url: server_url, + api_token: api_token, + api_bearer: api_bearer, + http_method: 'POST', + path: "repos/#{repo_name}/releases", + body: payload, + error_handlers: { + 422 => proc do |result| + UI.error(result[:body]) + UI.error("Release on tag #{tag_name} already exists!") + return nil + end, + 404 => proc do |result| + UI.error(result[:body]) + UI.user_error!("Repository #{repo_name} cannot be found, please double check its name and that you provided a valid API token (GITHUB_API_TOKEN)") + end, + 401 => proc do |result| + UI.error(result[:body]) + UI.user_error!("You are not authorized to access #{repo_name}, please make sure you provided a valid API token (GITHUB_API_TOKEN)") + end, + '*' => proc do |result| + UI.user_error!("GitHub responded with #{result[:status]}:#{result[:body]}") + end + } + ) do |result| + json = result[:json] + html_url = json['html_url'] + release_id = json['id'] + + UI.success("Successfully created release at tag \"#{tag_name}\" on GitHub") + UI.important("See release at \"#{html_url}\"") + + Actions.lane_context[SharedValues::SET_GITHUB_RELEASE_HTML_LINK] = html_url + Actions.lane_context[SharedValues::SET_GITHUB_RELEASE_RELEASE_ID] = release_id + Actions.lane_context[SharedValues::SET_GITHUB_RELEASE_JSON] = json + + assets = params[:upload_assets] + if assets && assets.count > 0 + # upload assets + self.upload_assets(assets, json['upload_url'], api_token, api_bearer) + + # fetch the release again, so that it contains the uploaded assets + GithubApiAction.run( + server_url: server_url, + api_token: api_token, + api_bearer: api_bearer, + http_method: 'GET', + path: "repos/#{repo_name}/releases/#{release_id}", + error_handlers: { + '*' => proc do |get_result| + UI.error("GitHub responded with #{get_result[:status]}:#{get_result[:body]}") + UI.user_error!("Failed to fetch the newly created release, but it *has been created* successfully.") + end + } + ) do |get_result| + Actions.lane_context[SharedValues::SET_GITHUB_RELEASE_JSON] = get_result[:json] + UI.success("Successfully uploaded assets #{assets} to release \"#{html_url}\"") + return get_result[:json] + end + else + return json || result[:body] + end + end + end + + def self.upload_assets(assets, upload_url_template, api_token, api_bearer) + assets.each do |asset| + self.upload(asset, upload_url_template, api_token, api_bearer) + end + end + + def self.upload(asset_path, upload_url_template, api_token, api_bearer) + # if it's a directory, zip it first in a temp directory, because we can only upload binary files + absolute_path = File.absolute_path(asset_path) + + # check that the asset even exists + UI.user_error!("Asset #{absolute_path} doesn't exist") unless File.exist?(absolute_path) + + if File.directory?(absolute_path) + Dir.mktmpdir do |dir| + tmpzip = File.join(dir, File.basename(absolute_path) + '.zip') + sh("cd \"#{File.dirname(absolute_path)}\"; zip -r --symlinks \"#{tmpzip}\" \"#{File.basename(absolute_path)}\" 2>&1 >/dev/null") + self.upload_file(tmpzip, upload_url_template, api_token, api_bearer) + end + else + self.upload_file(absolute_path, upload_url_template, api_token, api_bearer) + end + end + + def self.upload_file(file, url_template, api_token, api_bearer) + require 'addressable/template' + file_name = File.basename(file) + expanded_url = Addressable::Template.new(url_template).expand(name: file_name).to_s + headers = { 'Content-Type' => 'application/zip' } # works for all binary files + UI.important("Uploading #{file_name}") + GithubApiAction.run( + api_token: api_token, + api_bearer: api_bearer, + http_method: 'POST', + headers: headers, + url: expanded_url, + raw_body: File.read(file), + error_handlers: { + '*' => proc do |result| + UI.error("GitHub responded with #{result[:status]}:#{result[:body]}") + UI.user_error!("Failed to upload asset #{file_name} to GitHub.") + end + } + ) do |result| + UI.success("Successfully uploaded #{file_name}.") + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "This will create a new release on GitHub and upload assets for it" + end + + def self.details + [ + "Creates a new release on GitHub. You must provide your GitHub Personal token (get one from [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new)), the repository name and tag name. By default, that's `master`.", + "If the tag doesn't exist, one will be created on the commit or branch passed in as commitish.", + "Out parameters provide the release's id, which can be used for later editing and the release HTML link to GitHub. You can also specify a list of assets to be uploaded to the release with the `:upload_assets` parameter." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :repository_name, + env_name: "FL_SET_GITHUB_RELEASE_REPOSITORY_NAME", + description: "The path to your repo, e.g. 'fastlane/fastlane'", + verify_block: proc do |value| + UI.user_error!("Please only pass the path, e.g. 'fastlane/fastlane'") if value.include?("github.com") + UI.user_error!("Please only pass the path, e.g. 'fastlane/fastlane'") if value.split('/').count != 2 + end), + FastlaneCore::ConfigItem.new(key: :server_url, + env_name: "FL_GITHUB_RELEASE_SERVER_URL", + description: "The server url. e.g. 'https://your.internal.github.host/api/v3' (Default: 'https://api.github.com')", + default_value: "https://api.github.com", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please include the protocol in the server url, e.g. https://your.github.server/api/v3") unless value.include?("//") + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_GITHUB_RELEASE_API_TOKEN", + description: "Personal API Token for GitHub - generate one at https://github.com/settings/tokens", + conflicting_options: [:api_bearer], + sensitive: true, + code_gen_sensitive: true, + default_value: ENV["GITHUB_API_TOKEN"], + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :api_bearer, + env_name: "FL_GITHUB_RELEASE_API_BEARER", + sensitive: true, + code_gen_sensitive: true, + description: "Use a Bearer authorization token. Usually generated by Github Apps, e.g. GitHub Actions GITHUB_TOKEN environment variable", + conflicting_options: [:api_token], + optional: true, + default_value: nil), + FastlaneCore::ConfigItem.new(key: :tag_name, + env_name: "FL_SET_GITHUB_RELEASE_TAG_NAME", + description: "Pass in the tag name", + optional: false), + FastlaneCore::ConfigItem.new(key: :name, + env_name: "FL_SET_GITHUB_RELEASE_NAME", + description: "Name of this release", + optional: true), + FastlaneCore::ConfigItem.new(key: :commitish, + env_name: "FL_SET_GITHUB_RELEASE_COMMITISH", + description: "Specifies the commitish value that determines where the Git tag is created from. Can be any branch or commit SHA. Unused if the Git tag already exists. Default: the repository's default branch (usually master)", + optional: true), + FastlaneCore::ConfigItem.new(key: :description, + env_name: "FL_SET_GITHUB_RELEASE_DESCRIPTION", + description: "Description of this release", + optional: true, + default_value: Actions.lane_context[SharedValues::FL_CHANGELOG], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :is_draft, + env_name: "FL_SET_GITHUB_RELEASE_IS_DRAFT", + description: "Whether the release should be marked as draft", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :is_prerelease, + env_name: "FL_SET_GITHUB_RELEASE_IS_PRERELEASE", + description: "Whether the release should be marked as prerelease", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :is_generate_release_notes, + env_name: "FL_SET_GITHUB_RELEASE_IS_GENERATE_RELEASE_NOTES", + description: "Whether the name and body of this release should be generated automatically", + optional: true, + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :upload_assets, + env_name: "FL_SET_GITHUB_RELEASE_UPLOAD_ASSETS", + description: "Path to assets to be uploaded with the release", + optional: true, + type: Array, + verify_block: proc do |value| + UI.user_error!("upload_assets must be an Array of paths to assets") unless value.kind_of?(Array) + end) + ] + end + + def self.output + [ + ['SET_GITHUB_RELEASE_HTML_LINK', 'Link to your created release'], + ['SET_GITHUB_RELEASE_RELEASE_ID', 'Release id (useful for subsequent editing)'], + ['SET_GITHUB_RELEASE_JSON', 'The whole release JSON object'] + ] + end + + def self.return_value + [ + "A hash containing all relevant information of this release", + "Access things like 'html_url', 'tag_name', 'name', 'body'" + ].join("\n") + end + + def self.return_type + :hash + end + + def self.authors + ["czechboy0", "tommeier"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'github_release = set_github_release( + repository_name: "fastlane/fastlane", + api_token: ENV["GITHUB_TOKEN"], + name: "Super New actions", + tag_name: "v1.22.0", + description: (File.read("changelog") rescue "No changelog provided"), + commitish: "master", + upload_assets: ["example_integration.ipa", "./pkg/built.gem"] + )' + ] + end + + def self.category + :source_control + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_info_plist_value.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_info_plist_value.rb new file mode 100644 index 0000000..c901e13 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_info_plist_value.rb @@ -0,0 +1,92 @@ +module Fastlane + module Actions + module SharedValues + end + + class SetInfoPlistValueAction < Action + def self.run(params) + require "plist" + + begin + path = File.expand_path(params[:path]) + plist = Plist.parse_xml(path) + if params[:subkey] + if plist[params[:key]] + plist[params[:key]][params[:subkey]] = params[:value] + else + UI.message("Key doesn't exist, going to create new one ...") + plist[params[:key]] = { params[:subkey] => params[:value] } + end + else + plist[params[:key]] = params[:value] + end + new_plist = Plist::Emit.dump(plist) + if params[:output_file_name] + output = params[:output_file_name] + FileUtils.mkdir_p(File.expand_path("..", output)) + File.write(File.expand_path(output), new_plist) + else + File.write(path, new_plist) + end + + return params[:value] + rescue => ex + UI.error(ex) + UI.user_error!("Unable to set value to plist file at '#{path}'") + end + end + + def self.description + "Sets value to Info.plist of your project as native Ruby data structures" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :key, + env_name: "FL_SET_INFO_PLIST_PARAM_NAME", + description: "Name of key in plist", + optional: false), + FastlaneCore::ConfigItem.new(key: :subkey, + env_name: "FL_SET_INFO_PLIST_SUBPARAM_NAME", + description: "Name of subkey in plist", + optional: true), + FastlaneCore::ConfigItem.new(key: :value, + env_name: "FL_SET_INFO_PLIST_PARAM_VALUE", + description: "Value to setup", + skip_type_validation: true, # allow String, Hash + optional: false), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_SET_INFO_PLIST_PATH", + description: "Path to plist file you want to update", + optional: false, + verify_block: proc do |value| + UI.user_error!("Couldn't find plist file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :output_file_name, + env_name: "FL_SET_INFO_PLIST_OUTPUT_FILE_NAME", + description: "Path to the output file you want to generate", + optional: true) + ] + end + + def self.authors + ["kohtenko", "uwehollatz"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'set_info_plist_value(path: "./Info.plist", key: "CFBundleIdentifier", value: "com.krausefx.app.beta")', + 'set_info_plist_value(path: "./MyApp-Info.plist", key: "NSAppTransportSecurity", subkey: "NSAllowsArbitraryLoads", value: true, output_file_name: "./Info.plist")' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_pod_key.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_pod_key.rb new file mode 100644 index 0000000..d666a06 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/set_pod_key.rb @@ -0,0 +1,77 @@ +module Fastlane + module Actions + class SetPodKeyAction < Action + def self.run(params) + Actions.verify_gem!('cocoapods-keys') + cmd = [] + + cmd << ['bundle exec'] if params[:use_bundle_exec] && shell_out_should_use_bundle_exec? + cmd << ['pod keys set'] + + cmd << ["\"#{params[:key]}\""] + cmd << ["\"#{params[:value]}\""] + cmd << ["\"#{params[:project]}\""] if params[:project] + + Actions.sh(cmd.join(' ')) + end + + def self.author + "marcelofabri" + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Sets a value for a key with cocoapods-keys" + end + + def self.details + "Adds a key to [cocoapods-keys](https://github.com/orta/cocoapods-keys)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :use_bundle_exec, + env_name: "FL_SET_POD_KEY_USE_BUNDLE_EXEC", + description: "Use bundle exec when there is a Gemfile presented", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :key, + env_name: "FL_SET_POD_KEY_ITEM_KEY", + description: "The key to be saved with cocoapods-keys", + optional: false), + FastlaneCore::ConfigItem.new(key: :value, + env_name: "FL_SET_POD_KEY_ITEM_VALUE", + description: "The value to be saved with cocoapods-keys", + sensitive: true, + code_gen_sensitive: true, + optional: false), + FastlaneCore::ConfigItem.new(key: :project, + env_name: "FL_SET_POD_KEY_PROJECT", + description: "The project name", + optional: true) + ] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'set_pod_key( + key: "APIToken", + value: "1234", + project: "MyProject" + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_ci.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_ci.rb new file mode 100644 index 0000000..491157d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_ci.rb @@ -0,0 +1,140 @@ +module Fastlane + module Actions + class SetupCiAction < Action + def self.run(params) + unless should_run?(params) + UI.message("Not running on CI, skipping CI setup") + return + end + + case detect_provider(params) + when 'circleci' + setup_output_paths + end + + setup_keychain(params) + end + + def self.should_run?(params) + Helper.ci? || params[:force] + end + + def self.detect_provider(params) + params[:provider] || (Helper.is_circle_ci? ? 'circleci' : nil) + end + + def self.setup_keychain(params) + unless Helper.mac? + UI.message("Skipping Keychain setup on non-macOS CI Agent") + return + end + + unless ENV["MATCH_KEYCHAIN_NAME"].nil? + UI.message("Skipping Keychain setup as a keychain was already specified") + return + end + + keychain_name = "fastlane_tmp_keychain" + ENV["MATCH_KEYCHAIN_NAME"] = keychain_name + ENV["MATCH_KEYCHAIN_PASSWORD"] = "" + + UI.message("Creating temporary keychain: \"#{keychain_name}\".") + Actions::CreateKeychainAction.run( + name: keychain_name, + default_keychain: true, + unlock: true, + timeout: params[:timeout], + lock_when_sleeps: true, + password: "", + add_to_search_list: true + ) + + UI.message("Enabling match readonly mode.") + ENV["MATCH_READONLY"] = true.to_s + end + + def self.setup_output_paths + unless ENV["FL_OUTPUT_DIR"] + UI.message("Skipping Log Path setup as FL_OUTPUT_DIR is unset") + return + end + + root = Pathname.new(ENV["FL_OUTPUT_DIR"]) + ENV["SCAN_OUTPUT_DIRECTORY"] = (root + "scan").to_s + ENV["GYM_OUTPUT_DIRECTORY"] = (root + "gym").to_s + ENV["FL_BUILDLOG_PATH"] = (root + "buildlogs").to_s + ENV["SCAN_INCLUDE_SIMULATOR_LOGS"] = true.to_s + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Setup the keychain and match to work with CI" + end + + def self.details + list = <<-LIST.markdown_list(true) + Creates a new temporary keychain for use with match + Switches match to `readonly` mode to not create new profiles/cert on CI + Sets up log and test result paths to be easily collectible + LIST + + [ + list, + "This action helps with CI integration. Add this to the top of your Fastfile if you use CI." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_SETUP_CI_FORCE", + description: "Force setup, even if not executed by CI", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :provider, + env_name: "FL_SETUP_CI_PROVIDER", + description: "CI provider. If none is set, the provider is detected automatically", + optional: true, + verify_block: proc do |value| + value = value.to_s + # Validate both 'travis' and 'circleci' for backwards compatibility, even + # though only the latter receives special treatment by this action + UI.user_error!("A given CI provider '#{value}' is not supported. Available CI providers: 'travis', 'circleci'") unless ["travis", "circleci"].include?(value) + end), + FastlaneCore::ConfigItem.new(key: :timeout, + env_name: "FL_SETUP_CI_TIMEOUT", + description: "Set a custom timeout in seconds for keychain. Set `0` if you want to specify 'no time-out'", + type: Integer, + default_value: 3600) + ] + end + + def self.authors + ["mollyIV", "svenmuennich"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'setup_ci( + provider: "circleci" + )', + 'setup_ci( + provider: "circleci", + timeout: 0 + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_circle_ci.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_circle_ci.rb new file mode 100644 index 0000000..8a9418f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_circle_ci.rb @@ -0,0 +1,58 @@ +module Fastlane + module Actions + class SetupCircleCiAction < Action + def self.run(params) + other_action.setup_ci(provider: "circleci", force: params[:force]) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Setup the keychain and match to work with CircleCI" + end + + def self.details + list = <<-LIST.markdown_list(true) + Creates a new temporary keychain for use with match + Switches match to `readonly` mode to not create new profiles/cert on CI + Sets up log and test result paths to be easily collectible + LIST + + [ + list, + "This action helps with CircleCI integration. Add this to the top of your Fastfile if you use CircleCI." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_SETUP_CIRCLECI_FORCE", + description: "Force setup, even if not executed by CircleCI", + type: Boolean, + default_value: false) + ] + end + + def self.authors + ["dantoml"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'setup_circle_ci' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_jenkins.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_jenkins.rb new file mode 100644 index 0000000..ea44924 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_jenkins.rb @@ -0,0 +1,197 @@ +module Fastlane + module Actions + class SetupJenkinsAction < Action + USED_ENV_NAMES = [ + "BACKUP_XCARCHIVE_DESTINATION", + "DERIVED_DATA_PATH", + "FL_CARTHAGE_DERIVED_DATA", + "FL_SLATHER_BUILD_DIRECTORY", + "GYM_BUILD_PATH", + "GYM_CODE_SIGNING_IDENTITY", + "GYM_DERIVED_DATA_PATH", + "GYM_OUTPUT_DIRECTORY", + "GYM_RESULT_BUNDLE", + "SCAN_DERIVED_DATA_PATH", + "SCAN_OUTPUT_DIRECTORY", + "SCAN_RESULT_BUNDLE", + "XCODE_DERIVED_DATA_PATH", + "MATCH_KEYCHAIN_NAME", + "MATCH_KEYCHAIN_PASSWORD", + "MATCH_READONLY" + ].freeze + + def self.run(params) + # Stop if not executed by CI + if !Helper.ci? && !params[:force] + UI.important("Not executed by Continuous Integration system.") + return + end + + # Print table + FastlaneCore::PrintTable.print_values( + config: params, + title: "Summary for Setup Jenkins Action" + ) + + # Keychain + if params[:unlock_keychain] && params[:keychain_path] + keychain_path = params[:keychain_path] + UI.message("Unlocking keychain: \"#{keychain_path}\".") + Actions::UnlockKeychainAction.run( + path: keychain_path, + password: params[:keychain_password], + add_to_search_list: params[:add_keychain_to_search_list], + set_default: params[:set_default_keychain] + ) + ENV['MATCH_KEYCHAIN_NAME'] ||= keychain_path + ENV['MATCH_KEYCHAIN_PASSWORD'] ||= params[:keychain_password] + ENV["MATCH_READONLY"] ||= true.to_s + end + + # Code signing identity + if params[:set_code_signing_identity] && params[:code_signing_identity] + code_signing_identity = params[:code_signing_identity] + UI.message("Set code signing identity: \"#{code_signing_identity}\".") + ENV['GYM_CODE_SIGNING_IDENTITY'] = code_signing_identity + end + + # Set output directory + if params[:output_directory] + output_directory_path = File.expand_path(params[:output_directory]) + UI.message("Set output directory path to: \"#{output_directory_path}\".") + ENV['GYM_BUILD_PATH'] = output_directory_path + ENV['GYM_OUTPUT_DIRECTORY'] = output_directory_path + ENV['SCAN_OUTPUT_DIRECTORY'] = output_directory_path + ENV['BACKUP_XCARCHIVE_DESTINATION'] = output_directory_path + end + + # Set derived data + if params[:derived_data_path] + derived_data_path = File.expand_path(params[:derived_data_path]) + UI.message("Set derived data path to: \"#{derived_data_path}\".") + ENV['DERIVED_DATA_PATH'] = derived_data_path # Used by clear_derived_data. + ENV['XCODE_DERIVED_DATA_PATH'] = derived_data_path + ENV['GYM_DERIVED_DATA_PATH'] = derived_data_path + ENV['SCAN_DERIVED_DATA_PATH'] = derived_data_path + ENV['FL_CARTHAGE_DERIVED_DATA'] = derived_data_path + ENV['FL_SLATHER_BUILD_DIRECTORY'] = derived_data_path + end + + # Set result bundle + if params[:result_bundle] + UI.message("Set result bundle.") + ENV['GYM_RESULT_BUNDLE'] = "YES" + ENV['SCAN_RESULT_BUNDLE'] = "YES" + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Setup xcodebuild, gym and scan for easier Jenkins integration" + end + + def self.details + list = <<-LIST.markdown_list(true) + Adds and unlocks keychains from Jenkins 'Keychains and Provisioning Profiles Plugin' + Sets unlocked keychain to be used by Match + Sets code signing identity from Jenkins 'Keychains and Provisioning Profiles Plugin' + Sets output directory to './output' (gym, scan and backup_xcarchive) + Sets derived data path to './derivedData' (xcodebuild, gym, scan and clear_derived_data, carthage) + Produce result bundle (gym and scan) + LIST + + [ + list, + "This action helps with Jenkins integration. Creates own derived data for each job. All build results like IPA files and archives will be stored in the `./output` directory.", + "The action also works with [Keychains and Provisioning Profiles Plugin](https://wiki.jenkins-ci.org/display/JENKINS/Keychains+and+Provisioning+Profiles+Plugin), the selected keychain will be automatically unlocked and the selected code signing identity will be used.", + "[Match](https://docs.fastlane.tools/actions/match/) will be also set up to use the unlocked keychain and set in read-only mode, if its environment variables were not yet defined.", + "By default this action will only work when _fastlane_ is executed on a CI system." + ].join("\n") + end + + def self.available_options + [ + # General + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_SETUP_JENKINS_FORCE", + description: "Force setup, even if not executed by Jenkins", + type: Boolean, + default_value: false), + + # Keychain + FastlaneCore::ConfigItem.new(key: :unlock_keychain, + env_name: "FL_SETUP_JENKINS_UNLOCK_KEYCHAIN", + description: "Unlocks keychain", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :add_keychain_to_search_list, + env_name: "FL_SETUP_JENKINS_ADD_KEYCHAIN_TO_SEARCH_LIST", + description: "Add to keychain search list, valid values are true, false, :add, and :replace", + skip_type_validation: true, # allow Boolean, Symbol + default_value: :replace), + FastlaneCore::ConfigItem.new(key: :set_default_keychain, + env_name: "FL_SETUP_JENKINS_SET_DEFAULT_KEYCHAIN", + description: "Set keychain as default", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :keychain_path, + env_name: "KEYCHAIN_PATH", + description: "Path to keychain", + optional: true), + FastlaneCore::ConfigItem.new(key: :keychain_password, + env_name: "KEYCHAIN_PASSWORD", + description: "Keychain password", + sensitive: true, + default_value: ""), + + # Code signing identity + FastlaneCore::ConfigItem.new(key: :set_code_signing_identity, + env_name: "FL_SETUP_JENKINS_SET_CODE_SIGNING_IDENTITY", + description: "Set code signing identity from CODE_SIGNING_IDENTITY environment", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :code_signing_identity, + env_name: "CODE_SIGNING_IDENTITY", + description: "Code signing identity", + optional: true), + + # Xcode parameters + FastlaneCore::ConfigItem.new(key: :output_directory, + env_name: "FL_SETUP_JENKINS_OUTPUT_DIRECTORY", + description: "The directory in which the ipa file should be stored in", + default_value: "./output"), + FastlaneCore::ConfigItem.new(key: :derived_data_path, + env_name: "FL_SETUP_JENKINS_DERIVED_DATA_PATH", + description: "The directory where built products and other derived data will go", + default_value: "./derivedData"), + FastlaneCore::ConfigItem.new(key: :result_bundle, + env_name: "FL_SETUP_JENKINS_RESULT_BUNDLE", + description: "Produce the result bundle describing what occurred will be placed", + type: Boolean, + default_value: true) + ] + end + + def self.authors + ["bartoszj"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'setup_jenkins' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_travis.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_travis.rb new file mode 100644 index 0000000..c6b84dc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/setup_travis.rb @@ -0,0 +1,57 @@ +module Fastlane + module Actions + class SetupTravisAction < Action + def self.run(params) + other_action.setup_ci(provider: "travis", force: params[:force]) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Setup the keychain and match to work with Travis CI" + end + + def self.details + list = <<-LIST.markdown_list(true) + Creates a new temporary keychain for use with match + Switches match to `readonly` mode to not create new profiles/cert on CI + LIST + + [ + list, + "This action helps with Travis integration. Add this to the top of your Fastfile if you use Travis." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :force, + env_name: "FL_SETUP_TRAVIS_FORCE", + description: "Force setup, even if not executed by travis", + type: Boolean, + default_value: false) + ] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'setup_travis' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sh.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sh.rb new file mode 100644 index 0000000..e5ad7d2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sh.rb @@ -0,0 +1,69 @@ +module Fastlane + module Actions + class ShAction < Action + def self.run(params) + # this is implemented in the sh_helper.rb + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Runs a shell command" + end + + def self.details + [ + "Allows running an arbitrary shell command.", + "Be aware of a specific behavior of `sh` action with regard to the working directory. For details, refer to [Advanced](https://docs.fastlane.tools/advanced/#directory-behavior)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :command, + description: 'Shell command to be executed', + optional: false), + FastlaneCore::ConfigItem.new(key: :log, + description: 'Determines whether fastlane should print out the executed command itself and output of the executed command. If command line option --troubleshoot is used, then it overrides this option to true', + optional: true, + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :error_callback, + description: 'A callback invoked with the command output if there is a non-zero exit status', + optional: true, + type: :string_callback, + default_value: nil) + ] + end + + def self.return_value + 'Outputs the string and executes it. When running in tests, it returns the actual command instead of executing it' + end + + def self.return_type + :string + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'sh("ls")', + 'sh("git", "commit", "-m", "My message")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sigh.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sigh.rb new file mode 100644 index 0000000..1fd263b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sigh.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/get_provisioning_profile' + class SighAction < GetProvisioningProfileAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `get_provisioning_profile` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/skip_docs.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/skip_docs.rb new file mode 100644 index 0000000..fa9aa53 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/skip_docs.rb @@ -0,0 +1,52 @@ +module Fastlane + module Actions + class SkipDocsAction < Action + def self.run(params) + ENV["FASTLANE_SKIP_DOCS"] = "1" + end + + def self.step_text + nil + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Skip the creation of the fastlane/README.md file when running fastlane" + end + + def self.available_options + end + + def self.output + end + + def self.return_value + end + + def self.details + "Tell _fastlane_ to not automatically create a `fastlane/README.md` when running _fastlane_. You can always trigger the creation of this file manually by running `fastlane docs`." + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'skip_docs' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/slack.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/slack.rb new file mode 100644 index 0000000..efc54a1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/slack.rb @@ -0,0 +1,307 @@ +require 'fastlane/notification/slack' + +# rubocop:disable Style/CaseEquality +# rubocop:disable Style/MultilineTernaryOperator +# rubocop:disable Style/NestedTernaryOperator +module Fastlane + module Actions + class SlackAction < Action + class Runner + def initialize(slack_url) + @notifier = Fastlane::Notification::Slack.new(slack_url) + end + + def run(options) + options[:message] = self.class.trim_message(options[:message].to_s || '') + options[:message] = Fastlane::Notification::Slack::LinkConverter.convert(options[:message]) + + options[:pretext] = options[:pretext].gsub('\n', "\n") unless options[:pretext].nil? + + if options[:channel].to_s.length > 0 + channel = options[:channel] + channel = ('#' + options[:channel]) unless ['#', '@'].include?(channel[0]) # send message to channel by default + end + + username = options[:use_webhook_configured_username_and_icon] ? nil : options[:username] + + slack_attachment = self.class.generate_slack_attachments(options) + link_names = options[:link_names] + icon_url = options[:use_webhook_configured_username_and_icon] ? nil : options[:icon_url] + + post_message( + channel: channel, + username: username, + attachments: [slack_attachment], + link_names: link_names, + icon_url: icon_url, + fail_on_error: options[:fail_on_error] + ) + end + + def post_message(channel:, username:, attachments:, link_names:, icon_url:, fail_on_error:) + @notifier.post_to_legacy_incoming_webhook( + channel: channel, + username: username, + link_names: link_names, + icon_url: icon_url, + attachments: attachments + ) + UI.success('Successfully sent Slack notification') + rescue => error + UI.error("Exception: #{error}") + message = "Error pushing Slack message, maybe the integration has no permission to post on this channel? Try removing the channel parameter in your Fastfile, this is usually caused by a misspelled or changed group/channel name or an expired SLACK_URL" + if fail_on_error + UI.user_error!(message) + else + UI.error(message) + end + end + + # As there is a text limit in the notifications, we are + # usually interested in the last part of the message + # e.g. for tests + def self.trim_message(message) + # We want the last 7000 characters, instead of the first 7000, as the error is at the bottom + start_index = [message.length - 7000, 0].max + message = message[start_index..-1] + # We want line breaks to be shown on slack output so we replace + # input non-interpreted line break with interpreted line break + message.gsub('\n', "\n") + end + + def self.generate_slack_attachments(options) + color = (options[:success] ? 'good' : 'danger') + should_add_payload = ->(payload_name) { options[:default_payloads].map(&:to_sym).include?(payload_name.to_sym) } + + slack_attachment = { + fallback: options[:message], + text: options[:message], + pretext: options[:pretext], + color: color, + mrkdwn_in: ["pretext", "text", "fields", "message"], + fields: [] + } + + # custom user payloads + slack_attachment[:fields] += options[:payload].map do |k, v| + { + title: k.to_s, + value: Fastlane::Notification::Slack::LinkConverter.convert(v.to_s), + short: false + } + end + + # Add the lane to the Slack message + # This might be nil, if slack is called as "one-off" action + if should_add_payload[:lane] && Actions.lane_context[Actions::SharedValues::LANE_NAME] + slack_attachment[:fields] << { + title: 'Lane', + value: Actions.lane_context[Actions::SharedValues::LANE_NAME], + short: true + } + end + + # test_result + if should_add_payload[:test_result] + slack_attachment[:fields] << { + title: 'Result', + value: (options[:success] ? 'Success' : 'Error'), + short: true + } + end + + # git branch + if Actions.git_branch && should_add_payload[:git_branch] + slack_attachment[:fields] << { + title: 'Git Branch', + value: Actions.git_branch, + short: true + } + end + + # git_author + if Actions.git_author_email && should_add_payload[:git_author] + if FastlaneCore::Env.truthy?('FASTLANE_SLACK_HIDE_AUTHOR_ON_SUCCESS') && options[:success] + # We only show the git author if the build failed + else + slack_attachment[:fields] << { + title: 'Git Author', + value: Actions.git_author_email, + short: true + } + end + end + + # last_git_commit + if Actions.last_git_commit_message && should_add_payload[:last_git_commit] + slack_attachment[:fields] << { + title: 'Git Commit', + value: Actions.last_git_commit_message, + short: false + } + end + + # last_git_commit_hash + if Actions.last_git_commit_hash(true) && should_add_payload[:last_git_commit_hash] + slack_attachment[:fields] << { + title: 'Git Commit Hash', + value: Actions.last_git_commit_hash(short: true), + short: false + } + end + + # merge additional properties + deep_merge(slack_attachment, options[:attachment_properties]) + end + + # Adapted from https://stackoverflow.com/a/30225093/158525 + def self.deep_merge(a, b) + merger = proc do |key, v1, v2| + Hash === v1 && Hash === v2 ? + v1.merge(v2, &merger) : Array === v1 && Array === v2 ? + v1 | v2 : [:undefined, nil, :nil].include?(v2) ? v1 : v2 + end + a.merge(b, &merger) + end + end + + def self.is_supported?(platform) + true + end + + def self.run(options) + Runner.new(options[:slack_url]).run(options) + end + + def self.description + "Send a success/error message to your [Slack](https://slack.com) group" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_SLACK_MESSAGE", + description: "The message that should be displayed on Slack. This supports the standard Slack markup language", + optional: true), + FastlaneCore::ConfigItem.new(key: :pretext, + env_name: "FL_SLACK_PRETEXT", + description: "This is optional text that appears above the message attachment block. This supports the standard Slack markup language", + optional: true), + FastlaneCore::ConfigItem.new(key: :channel, + env_name: "FL_SLACK_CHANNEL", + description: "#channel or @username", + optional: true), + FastlaneCore::ConfigItem.new(key: :use_webhook_configured_username_and_icon, + env_name: "FL_SLACK_USE_WEBHOOK_CONFIGURED_USERNAME_AND_ICON", + description: "Use webhook's default username and icon settings? (true/false)", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :slack_url, + env_name: "SLACK_URL", + sensitive: true, + description: "Create an Incoming WebHook for your Slack group", + verify_block: proc do |value| + UI.user_error!("Invalid URL, must start with https://") unless value.start_with?("https://") + end), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FL_SLACK_USERNAME", + description: "Overrides the webhook's username property if use_webhook_configured_username_and_icon is false", + default_value: "fastlane", + optional: true), + FastlaneCore::ConfigItem.new(key: :icon_url, + env_name: "FL_SLACK_ICON_URL", + description: "Overrides the webhook's image property if use_webhook_configured_username_and_icon is false", + default_value: "https://fastlane.tools/assets/img/fastlane_icon.png", + optional: true), + FastlaneCore::ConfigItem.new(key: :payload, + env_name: "FL_SLACK_PAYLOAD", + description: "Add additional information to this post. payload must be a hash containing any key with any value", + default_value: {}, + type: Hash), + FastlaneCore::ConfigItem.new(key: :default_payloads, + env_name: "FL_SLACK_DEFAULT_PAYLOADS", + description: "Specifies default payloads to include. Pass an empty array to suppress all the default payloads", + default_value: ['lane', 'test_result', 'git_branch', 'git_author', 'last_git_commit', 'last_git_commit_hash'], + type: Array), + FastlaneCore::ConfigItem.new(key: :attachment_properties, + env_name: "FL_SLACK_ATTACHMENT_PROPERTIES", + description: "Merge additional properties in the slack attachment, see https://api.slack.com/docs/attachments", + default_value: {}, + type: Hash), + FastlaneCore::ConfigItem.new(key: :success, + env_name: "FL_SLACK_SUCCESS", + description: "Was this build successful? (true/false)", + optional: true, + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :fail_on_error, + env_name: "FL_SLACK_FAIL_ON_ERROR", + description: "Should an error sending the slack notification cause a failure? (true/false)", + optional: true, + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :link_names, + env_name: "FL_SLACK_LINK_NAMES", + description: "Find and link channel names and usernames (true/false)", + optional: true, + default_value: false, + type: Boolean) + ] + end + + def self.author + "KrauseFx" + end + + def self.example_code + [ + 'slack(message: "App successfully released!")', + 'slack( + message: "App successfully released!", + channel: "#channel", # Optional, by default will post to the default channel configured for the POST URL. + success: true, # Optional, defaults to true. + payload: { # Optional, lets you specify any number of your own Slack attachments. + "Build Date" => Time.new.to_s, + "Built by" => "Jenkins", + }, + default_payloads: [:git_branch, :git_author], # Optional, lets you specify default payloads to include. Pass an empty array to suppress all the default payloads. + attachment_properties: { # Optional, lets you specify any other properties available for attachments in the slack API (see https://api.slack.com/docs/attachments). + # This hash is deep merged with the existing properties set using the other properties above. This allows your own fields properties to be appended to the existing fields that were created using the `payload` property for instance. + thumb_url: "http://example.com/path/to/thumb.png", + fields: [{ + title: "My Field", + value: "My Value", + short: true + }] + } + )' + ] + end + + def self.category + :notifications + end + + def self.details + "Create an Incoming WebHook and export this as `SLACK_URL`. Can send a message to **#channel** (by default), a direct message to **@username** or a message to a private group **group** with success (green) or failure (red) status." + end + + ##################################################### + # @!group Helper + ##################################################### + + def self.trim_message(message) + Runner.trim_message(message) + end + + def self.generate_slack_attachments(options) + UI.deprecated('`Fastlane::Actions::Slack.generate_slack_attachments` is subject to be removed as Slack recommends migrating `attachments` to Block Kit. fastlane will also follow the same direction.') + Runner.generate_slack_attachments(options) + end + end + end +end +# rubocop:enable Style/CaseEquality +# rubocop:enable Style/MultilineTernaryOperator +# rubocop:enable Style/NestedTernaryOperator diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/slather.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/slather.rb new file mode 100644 index 0000000..984e490 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/slather.rb @@ -0,0 +1,319 @@ +module Fastlane + module Actions + class SlatherAction < Action + # https://github.com/SlatherOrg/slather/blob/v2.4.9/lib/slather/command/coverage_command.rb + ARGS_MAP = { + travis: '--travis', + travis_pro: '--travispro', + circleci: '--circleci', + jenkins: '--jenkins', + buildkite: '--buildkite', + teamcity: '--teamcity', + github: '--github', + + coveralls: '--coveralls', + simple_output: '--simple-output', + gutter_json: '--gutter-json', + cobertura_xml: '--cobertura-xml', + sonarqube_xml: '--sonarqube-xml', + llvm_cov: '--llvm-cov', + json: '--json', + html: '--html', + show: '--show', + + build_directory: '--build-directory', + source_directory: '--source-directory', + output_directory: '--output-directory', + ignore: '--ignore', + verbose: '--verbose', + + input_format: '--input-format', + scheme: '--scheme', + configuration: '--configuration', + workspace: '--workspace', + binary_file: '--binary-file', + binary_basename: '--binary-basename', + arch: '--arch', + source_files: '--source-files', + decimals: '--decimals' + }.freeze + + def self.run(params) + # This will fail if using Bundler. Skip the check rather than needing to + # require bundler + unless params[:use_bundle_exec] + Actions.verify_gem!('slather') + end + + validate_params!(params) + + command = build_command(params) + sh(command) + end + + def self.has_config_file + File.file?('.slather.yml') + end + + def self.slather_version + require 'slather' + Slather::VERSION + end + + def self.configuration_available? + Gem::Version.new('2.4.1') <= Gem::Version.new(slather_version) + end + + def self.validate_params!(params) + if params[:configuration] + UI.user_error!('configuration option is available since version 2.4.1') unless configuration_available? + end + + if params[:proj] || has_config_file + true + else + UI.user_error!("You have to provide a project with `:proj` or use a .slather.yml") + end + + # for backwards compatibility when :binary_file type was Boolean + if params[:binary_file] == true || params[:binary_file] == false + params[:binary_file] = nil + end + + # :binary_file validation was skipped for backwards compatibility with Boolean. If a + # Boolean was passed in, it has now been removed. Revalidate :binary_file + binary_file_options = available_options.find { |a| a.key == :binary_file } + binary_file_options.skip_type_validation = false + binary_file_options.verify!(params[:binary_file]) + end + + def self.build_command(params) + command = [] + command.push("bundle exec") if params[:use_bundle_exec] && shell_out_should_use_bundle_exec? + command << "slather coverage" + + ARGS_MAP.each do |key, cli_param| + cli_value = params[key] + if cli_value + if cli_value.kind_of?(TrueClass) + command << cli_param + elsif cli_value.kind_of?(String) + command << cli_param + command << cli_value.shellescape + elsif cli_value.kind_of?(Array) + command << cli_value.map { |path| "#{cli_param} #{path.shellescape}" } + end + else + next + end + end + + command << params[:proj].shellescape if params[:proj] + command.join(" ") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Use slather to generate a code coverage report" + end + + def self.details + [ + "Slather works with multiple code coverage formats, including Xcode 7 code coverage.", + "Slather is available at [https://github.com/SlatherOrg/slather](https://github.com/SlatherOrg/slather)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :build_directory, + env_name: "FL_SLATHER_BUILD_DIRECTORY", # The name of the environment variable + description: "The location of the build output", # a short description of this parameter + optional: true), + FastlaneCore::ConfigItem.new(key: :proj, + env_name: "FL_SLATHER_PROJ", # The name of the environment variable + description: "The project file that slather looks at", # a short description of this parameter + verify_block: proc do |value| + UI.user_error!("No project file specified, pass using `proj: 'Project.xcodeproj'`") unless value && !value.empty? + end, + optional: true), + FastlaneCore::ConfigItem.new(key: :workspace, + env_name: "FL_SLATHER_WORKSPACE", + description: "The workspace that slather looks at", + optional: true), + FastlaneCore::ConfigItem.new(key: :scheme, + env_name: "FL_SLATHER_SCHEME", # The name of the environment variable + description: "Scheme to use when calling slather", + optional: true), + FastlaneCore::ConfigItem.new(key: :configuration, + env_name: "FL_SLATHER_CONFIGURATION", # The name of the environment variable + description: "Configuration to use when calling slather (since slather-2.4.1)", + optional: true), + FastlaneCore::ConfigItem.new(key: :input_format, + env_name: "FL_SLATHER_INPUT_FORMAT", # The name of the environment variable + description: "The input format that slather should look for", + optional: true), + FastlaneCore::ConfigItem.new(key: :github, + env_name: "FL_SLATHER_GITHUB_ENABLED", # The name of the environment variable + description: "Tell slather that it is running on Github Actions", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :buildkite, + env_name: "FL_SLATHER_BUILDKITE_ENABLED", # The name of the environment variable + description: "Tell slather that it is running on Buildkite", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :teamcity, + env_name: "FL_SLATHER_TEAMCITY_ENABLED", # The name of the environment variable + description: "Tell slather that it is running on TeamCity", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :jenkins, + env_name: "FL_SLATHER_JENKINS_ENABLED", # The name of the environment variable + description: "Tell slather that it is running on Jenkins", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :travis, + env_name: "FL_SLATHER_TRAVIS_ENABLED", # The name of the environment variable + description: "Tell slather that it is running on TravisCI", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :travis_pro, + env_name: "FL_SLATHER_TRAVIS_PRO_ENABLED", # The name of the environment variable + description: "Tell slather that it is running on TravisCI Pro", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :circleci, + env_name: "FL_SLATHER_CIRCLECI_ENABLED", + description: "Tell slather that it is running on CircleCI", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :coveralls, + env_name: "FL_SLATHER_COVERALLS_ENABLED", + description: "Tell slather that it should post data to Coveralls", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :simple_output, + env_name: "FL_SLATHER_SIMPLE_OUTPUT_ENABLED", + description: "Tell slather that it should output results to the terminal", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :gutter_json, + env_name: "FL_SLATHER_GUTTER_JSON_ENABLED", + description: "Tell slather that it should output results as Gutter JSON format", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :cobertura_xml, + env_name: "FL_SLATHER_COBERTURA_XML_ENABLED", + description: "Tell slather that it should output results as Cobertura XML format", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :sonarqube_xml, + env_name: "FL_SLATHER_SONARQUBE_XML_ENABLED", + description: "Tell slather that it should output results as SonarQube Generic XML format", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :llvm_cov, + env_name: "FL_SLATHER_LLVM_COV_ENABLED", + description: "Tell slather that it should output results as llvm-cov show format", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :json, + env_name: "FL_SLATHER_JSON_ENABLED", + description: "Tell slather that it should output results as static JSON report", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :html, + env_name: "FL_SLATHER_HTML_ENABLED", + description: "Tell slather that it should output results as static HTML pages", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :show, + env_name: "FL_SLATHER_SHOW_ENABLED", + description: "Tell slather that it should open static html pages automatically", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :source_directory, + env_name: "FL_SLATHER_SOURCE_DIRECTORY", + description: "Tell slather the location of your source files", + optional: true), + FastlaneCore::ConfigItem.new(key: :output_directory, + env_name: "FL_SLATHER_OUTPUT_DIRECTORY", + description: "Tell slather the location of for your output files", + optional: true), + FastlaneCore::ConfigItem.new(key: :ignore, + env_name: "FL_SLATHER_IGNORE", + description: "Tell slather to ignore files matching a path or any path from an array of paths", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_SLATHER_VERBOSE", + description: "Tell slather to enable verbose mode", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :use_bundle_exec, + env_name: "FL_SLATHER_USE_BUNDLE_EXEC", + description: "Use bundle exec to execute slather. Make sure it is in the Gemfile", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :binary_basename, + env_name: "FL_SLATHER_BINARY_BASENAME", + description: "Basename of the binary file, this should match the name of your bundle excluding its extension (i.e. YourApp [for YourApp.app bundle])", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :binary_file, + env_name: "FL_SLATHER_BINARY_FILE", + description: "Binary file name to be used for code coverage", + type: Array, + skip_type_validation: true, # skipping validation for backwards compatibility with Boolean type + optional: true), + FastlaneCore::ConfigItem.new(key: :arch, + env_name: "FL_SLATHER_ARCH", + description: "Specify which architecture the binary file is in. Needed for universal binaries", + optional: true), + FastlaneCore::ConfigItem.new(key: :source_files, + env_name: "FL_SLATHER_SOURCE_FILES", + description: "A Dir.glob compatible pattern used to limit the lookup to specific source files. Ignored in gcov mode", + skip_type_validation: true, # skipping validation for backwards compatibility with Boolean type + default_value: false, + optional: true), + FastlaneCore::ConfigItem.new(key: :decimals, + env_name: "FL_SLATHER_DECIMALS", + description: "The amount of decimals to use for % coverage reporting", + skip_type_validation: true, # allow Integer, String + default_value: false, + optional: true) + ] + end + + def self.output + end + + def self.authors + ["mattdelves"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'slather( + build_directory: "foo", + input_format: "bah", + scheme: "MyScheme", + proj: "MyProject.xcodeproj" + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/snapshot.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/snapshot.rb new file mode 100644 index 0000000..8c943a7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/snapshot.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/capture_ios_screenshots' + class SnapshotAction < CaptureIosScreenshotsAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `capture_ios_screenshots` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sonar.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sonar.rb new file mode 100644 index 0000000..5fe49b8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sonar.rb @@ -0,0 +1,170 @@ +module Fastlane + module Actions + class SonarAction < Action + def self.run(params) + verify_sonar_scanner_binary + + command_prefix = [ + 'cd', + File.expand_path('.').shellescape, + '&&' + ].join(' ') + + sonar_scanner_args = [] + sonar_scanner_args << "-Dproject.settings=\"#{params[:project_configuration_path]}\"" if params[:project_configuration_path] + sonar_scanner_args << "-Dsonar.projectKey=\"#{params[:project_key]}\"" if params[:project_key] + sonar_scanner_args << "-Dsonar.projectName=\"#{params[:project_name]}\"" if params[:project_name] + sonar_scanner_args << "-Dsonar.projectVersion=\"#{params[:project_version]}\"" if params[:project_version] + sonar_scanner_args << "-Dsonar.sources=\"#{params[:sources_path]}\"" if params[:sources_path] + sonar_scanner_args << "-Dsonar.exclusions=\"#{params[:exclusions]}\"" if params[:exclusions] + sonar_scanner_args << "-Dsonar.language=\"#{params[:project_language]}\"" if params[:project_language] + sonar_scanner_args << "-Dsonar.sourceEncoding=\"#{params[:source_encoding]}\"" if params[:source_encoding] + sonar_scanner_args << "-Dsonar.login=\"#{params[:sonar_login]}\"" if params[:sonar_login] + sonar_scanner_args << "-Dsonar.host.url=\"#{params[:sonar_url]}\"" if params[:sonar_url] + sonar_scanner_args << "-Dsonar.organization=\"#{params[:sonar_organization]}\"" if params[:sonar_organization] + sonar_scanner_args << "-Dsonar.branch.name=\"#{params[:branch_name]}\"" if params[:branch_name] + sonar_scanner_args << "-Dsonar.pullrequest.branch=\"#{params[:pull_request_branch]}\"" if params[:pull_request_branch] + sonar_scanner_args << "-Dsonar.pullrequest.base=\"#{params[:pull_request_base]}\"" if params[:pull_request_base] + sonar_scanner_args << "-Dsonar.pullrequest.key=\"#{params[:pull_request_key]}\"" if params[:pull_request_key] + + sonar_scanner_args << params[:sonar_runner_args] if params[:sonar_runner_args] + + command = [ + command_prefix, + 'sonar-scanner', + sonar_scanner_args + ].join(' ') + # hide command, as it may contain credentials + Fastlane::Actions.sh_control_output(command, print_command: false, print_command_output: true) + end + + def self.verify_sonar_scanner_binary + UI.user_error!("You have to install sonar-scanner using `brew install sonar-scanner`") unless `which sonar-scanner`.to_s.length > 0 + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Invokes sonar-scanner to programmatically run SonarQube analysis" + end + + def self.details + [ + "See [http://docs.sonarqube.org/display/SCAN/Analyzing+with+SonarQube+Scanner](http://docs.sonarqube.org/display/SCAN/Analyzing+with+SonarQube+Scanner) for details.", + "It can process unit test results if formatted as junit report as shown in [xctest](https://docs.fastlane.tools/actions/xctest/) action. It can also integrate coverage reports in Cobertura format, which can be transformed into by the [slather](https://docs.fastlane.tools/actions/slather/) action." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :project_configuration_path, + env_name: "FL_SONAR_RUNNER_PROPERTIES_PATH", + description: "The path to your sonar project configuration file; defaults to `sonar-project.properties`", # default is enforced by sonar-scanner binary + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find file at path '#{value}'") unless value.nil? || File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :project_key, + env_name: "FL_SONAR_RUNNER_PROJECT_KEY", + description: "The key sonar uses to identify the project, e.g. `name.gretzki.awesomeApp`. Must either be specified here or inside the sonar project configuration file", + optional: true), + FastlaneCore::ConfigItem.new(key: :project_name, + env_name: "FL_SONAR_RUNNER_PROJECT_NAME", + description: "The name of the project that gets displayed on the sonar report page. Must either be specified here or inside the sonar project configuration file", + optional: true), + FastlaneCore::ConfigItem.new(key: :project_version, + env_name: "FL_SONAR_RUNNER_PROJECT_VERSION", + description: "The project's version that gets displayed on the sonar report page. Must either be specified here or inside the sonar project configuration file", + optional: true), + FastlaneCore::ConfigItem.new(key: :sources_path, + env_name: "FL_SONAR_RUNNER_SOURCES_PATH", + description: "Comma-separated paths to directories containing source files. Must either be specified here or inside the sonar project configuration file", + optional: true), + FastlaneCore::ConfigItem.new(key: :exclusions, + env_name: "FL_SONAR_RUNNER_EXCLUSIONS", + description: "Comma-separated paths to directories to be excluded from the analysis", + optional: true), + FastlaneCore::ConfigItem.new(key: :project_language, + env_name: "FL_SONAR_RUNNER_PROJECT_LANGUAGE", + description: "Language key, e.g. objc", + optional: true), + FastlaneCore::ConfigItem.new(key: :source_encoding, + env_name: "FL_SONAR_RUNNER_SOURCE_ENCODING", + description: "Used encoding of source files, e.g., UTF-8", + optional: true), + FastlaneCore::ConfigItem.new(key: :sonar_runner_args, + env_name: "FL_SONAR_RUNNER_ARGS", + description: "Pass additional arguments to sonar-scanner. Be sure to provide the arguments with a leading `-D` e.g. FL_SONAR_RUNNER_ARGS=\"-Dsonar.verbose=true\"", + optional: true), + FastlaneCore::ConfigItem.new(key: :sonar_login, + env_name: "FL_SONAR_LOGIN", + description: "Pass the Sonar Login token (e.g: xxxxxxprivate_token_XXXXbXX7e)", + optional: true, + sensitive: true), + FastlaneCore::ConfigItem.new(key: :sonar_url, + env_name: "FL_SONAR_URL", + description: "Pass the url of the Sonar server", + optional: true), + FastlaneCore::ConfigItem.new(key: :sonar_organization, + env_name: "FL_SONAR_ORGANIZATION", + description: "Key of the organization on SonarCloud", + optional: true), + FastlaneCore::ConfigItem.new(key: :branch_name, + env_name: "FL_SONAR_RUNNER_BRANCH_NAME", + description: "Pass the branch name which is getting scanned", + optional: true), + FastlaneCore::ConfigItem.new(key: :pull_request_branch, + env_name: "FL_SONAR_RUNNER_PULL_REQUEST_BRANCH", + description: "The name of the branch that contains the changes to be merged", + optional: true), + FastlaneCore::ConfigItem.new(key: :pull_request_base, + env_name: "FL_SONAR_RUNNER_PULL_REQUEST_BASE", + description: "The long-lived branch into which the PR will be merged", + optional: true), + FastlaneCore::ConfigItem.new(key: :pull_request_key, + env_name: "FL_SONAR_RUNNER_PULL_REQUEST_KEY", + description: "Unique identifier of your PR. Must correspond to the key of the PR in GitHub or TFS", + optional: true) + ] + end + + def self.return_value + "The exit code of the sonar-scanner binary" + end + + def self.authors + ["c_gretzki"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'sonar( + project_key: "name.gretzki.awesomeApp", + project_version: "1.0", + project_name: "iOS - AwesomeApp", + sources_path: File.expand_path("../AwesomeApp") + )', + 'sonar( + project_key: "name.gretzki.awesomeApp", + project_version: "1.0", + project_name: "iOS - AwesomeApp", + sources_path: File.expand_path("../AwesomeApp"), + sonar_organization: "myOrg", + sonar_login: "123456abcdef", + sonar_url: "https://sonarcloud.io" + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sourcedocs.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sourcedocs.rb new file mode 100644 index 0000000..13bd82d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sourcedocs.rb @@ -0,0 +1,128 @@ +module Fastlane + module Actions + class SourcedocsAction < Action + def self.run(params) + UI.user_error!("You have to install sourcedocs using `brew install sourcedocs`") if `which sourcedocs`.to_s.length == 0 && !Helper.test? + + command = "sourcedocs generate" + command << " --all-modules" if params[:all_modules] + command << " --spm-module #{params[:spm_module]}" unless params[:spm_module].nil? + command << " --module-name #{params[:module_name]}" unless params[:module_name].nil? + command << " --link-beginning #{params[:link_beginning]}" unless params[:link_beginning].nil? + command << " --link-ending #{params[:link_ending]}" unless params[:link_ending].nil? + command << " --output-folder #{params[:output_folder]}" unless params[:output_folder].nil? + command << " --min-acl #{params[:min_acl]}" unless params[:min_acl].nil? + command << " --module-name-path" if params[:module_name_path] + command << " --clean" if params[:clean] + command << " --collapsible" if params[:collapsible] + command << " --table-of-contents" if params[:table_of_contents] + command << " --reproducible-docs" if params[:reproducible] + unless params[:scheme].nil? + command << " -- -scheme #{params[:scheme]}" + command << " -sdk #{params[:sdk_platform]}" unless params[:sdk_platform].nil? + end + Actions.sh(command) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Generate docs using SourceDocs" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :all_modules, + env_name: 'FL_SOURCEDOCS_OUTPUT_ALL_MODULES', + description: 'Generate documentation for all modules in a Swift package', + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :spm_module, + env_name: 'FL_SOURCEDOCS_SPM_MODULE', + description: 'Generate documentation for Swift Package Manager module', + optional: true), + FastlaneCore::ConfigItem.new(key: :module_name, + env_name: 'FL_SOURCEDOCS_MODULE_NAME', + description: 'Generate documentation for a Swift module', + optional: true), + FastlaneCore::ConfigItem.new(key: :link_beginning, + env_name: 'FL_SOURCEDOCS_LINK_BEGINNING', + description: 'The text to begin links with', + optional: true), + FastlaneCore::ConfigItem.new(key: :link_ending, + env_name: 'FL_SOURCEDOCS_LINK_ENDING', + description: 'The text to end links with (default: .md)', + optional: true), + FastlaneCore::ConfigItem.new(key: :output_folder, + env_name: 'FL_SOURCEDOCS_OUTPUT_FOLDER', + description: 'Output directory to clean (default: Documentation/Reference)', + optional: false), + FastlaneCore::ConfigItem.new(key: :min_acl, + env_name: 'FL_SOURCEDOCS_MIN_ACL', + description: 'Access level to include in documentation [private, fileprivate, internal, public, open] (default: public)', + optional: true), + FastlaneCore::ConfigItem.new(key: :module_name_path, + env_name: 'FL_SOURCEDOCS_MODULE_NAME_PATH', + description: 'Include the module name as part of the output folder path', + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :clean, + env_name: 'FL_SOURCEDOCS_CLEAN', + description: 'Delete output folder before generating documentation', + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :collapsible, + env_name: 'FL_SOURCEDOCS_COLLAPSIBLE', + description: 'Put methods, properties and enum cases inside collapsible blocks', + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :table_of_contents, + env_name: 'FL_SOURCEDOCS_TABLE_OF_CONTENT', + description: 'Generate a table of contents with properties and methods for each type', + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :reproducible, + env_name: 'FL_SOURCEDOCS_REPRODUCIBLE', + description: 'Generate documentation that is reproducible: only depends on the sources', + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :scheme, + env_name: 'FL_SOURCEDOCS_SCHEME', + description: 'Create documentation for specific scheme', + optional: true), + FastlaneCore::ConfigItem.new(key: :sdk_platform, + env_name: 'FL_SOURCEDOCS_SDK_PlATFORM', + description: 'Create documentation for specific sdk platform', + optional: true) + ] + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["Kukurijek"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + "sourcedocs(output_folder: 'docs')", + "sourcedocs(output_folder: 'docs', clean: true, reproducible: true, scheme: 'MyApp')" + ] + end + + def self.category + :documentation + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spaceship_logs.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spaceship_logs.rb new file mode 100644 index 0000000..2838db7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spaceship_logs.rb @@ -0,0 +1,135 @@ +module Fastlane + module Actions + class SpaceshipLogsAction < Action + def self.run(params) + latest = params[:latest] + print_contents = params[:print_contents] + print_paths = params[:print_paths] + copy_to_path = params[:copy_to_path] + copy_to_clipboard = params[:copy_to_clipboard] + + # Get log files + files = Dir.glob("/tmp/spaceship*.log").sort_by { |f| File.mtime(f) }.reverse + + if files.size == 0 + UI.message("No Spaceship log files found") + return [] + end + + # Filter to latest + if latest + files = [files.first] + end + + # Print contents + if print_contents + files.each do |file| + data = File.read(file) + puts("-----------------------------------------------------------------------------------") + puts(" Spaceship Log Content - #{file}") + puts("-----------------------------------------------------------------------------------") + puts(data) + puts("\n") + end + end + + # Print paths + if print_paths + puts("-----------------------------------------------------------------------------------") + puts(" Spaceship Log Paths") + puts("-----------------------------------------------------------------------------------") + files.each do |file| + puts(file) + end + puts("\n") + end + + # Copy to a directory + if copy_to_path + require 'fileutils' + FileUtils.mkdir_p(copy_to_path) + files.each do |file| + FileUtils.cp(file, copy_to_path) + end + end + + # Copy contents to clipboard + if copy_to_clipboard + string = files.map { |file| File.read(file) }.join("\n") + ClipboardAction.run(value: string) + end + + return files + end + + def self.description + "Find, print, and copy Spaceship logs" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :latest, + description: "Finds only the latest Spaceshop log file if set to true, otherwise returns all", + default_value: true, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :print_contents, + description: "Prints the contents of the found Spaceship log file(s)", + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :print_paths, + description: "Prints the paths of the found Spaceship log file(s)", + default_value: false, + type: Boolean), + FastlaneCore::ConfigItem.new(key: :copy_to_path, + description: "Copies the found Spaceship log file(s) to a directory", + optional: true), + FastlaneCore::ConfigItem.new(key: :copy_to_clipboard, + description: "Copies the contents of the found Spaceship log file(s) to the clipboard", + default_value: false, + type: Boolean) + ] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'spaceship_logs', + 'spaceship_logs( + copy_to_path: "/tmp/artifacts" + )', + 'spaceship_logs( + copy_to_clipboard: true + )', + 'spaceship_logs( + print_contents: true, + print_paths: true + )', + 'spaceship_logs( + latest: false, + print_contents: true, + print_paths: true + )' + ] + end + + def self.category + :misc + end + + def self.return_value + "The array of Spaceship logs" + end + + def self.return_type + :array_of_strings + end + + def self.author + "joshdholtz" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spaceship_stats.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spaceship_stats.rb new file mode 100644 index 0000000..a46d3a1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spaceship_stats.rb @@ -0,0 +1,73 @@ +module Fastlane + module Actions + class SpaceshipStatsAction < Action + def self.run(params) + require 'fastlane_core/print_table' + require 'spaceship' + + rows = [] + Spaceship::StatsMiddleware.service_stats.each do |service, count| + rows << [service.name, service.auth_type, service.url, count] + end + + puts("") + puts(Terminal::Table.new( + title: "Spaceship Stats", + headings: ["Service", "Auth Type", "URL", "Number of requests"], + rows: FastlaneCore::PrintTable.transform_output(rows) + )) + puts("") + + if params[:print_request_logs] + log_rows = [] + Spaceship::StatsMiddleware.request_logs.each do |request_log| + log_rows << [request_log.auth_type, request_log.url] + end + + puts("") + puts(Terminal::Table.new( + title: "Spaceship Request Log", + headings: ["Auth Type", "URL"], + rows: FastlaneCore::PrintTable.transform_output(log_rows) + )) + puts("") + end + end + + def self.url_name(url_prefix) + Spaceship::StatsMiddleware::URL_PREFIXES[url_prefix] + end + + def self.description + "Print out Spaceship stats from this session (number of request to each domain)" + end + + def self.is_supported?(platform) + true + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :print_request_logs, + description: "Print all URLs requested", + type: Boolean, + default_value: false) + ] + end + + def self.example_code + [ + 'spaceship_stats' + ] + end + + def self.category + :misc + end + + def self.author + "joshdholtz" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/splunkmint.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/splunkmint.rb new file mode 100644 index 0000000..20696ce --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/splunkmint.rb @@ -0,0 +1,156 @@ +module Fastlane + module Actions + class SplunkmintAction < Action + def self.run(params) + command = [] + command << "curl" + command << verbose(params) + command += proxy_options(params) + command += upload_options(params) + command << upload_url + command << upload_progress(params) + + # Fastlane::Actions.sh has buffering issues, no progress bar is shown in real time + # will reanable it when it is fixed + # result = Fastlane::Actions.sh(command.join(' '), log: false) + shell_command = command.join(' ') + result = Helper.test? ? shell_command : `#{shell_command}` + fail_on_error(result) + + result + end + + def self.fail_on_error(result) + if result.include?("error") + UI.user_error!("Server error, failed to upload the dSYM file") + end + end + + def self.upload_url + "https://ios.splkmobile.com/api/v1/dsyms/upload" + end + + def self.verbose(params) + params[:verbose] ? "--verbose" : "" + end + + def self.upload_progress(params) + params[:upload_progress] ? " --progress-bar -o /dev/null --no-buffer" : "" + end + + def self.dsym_path(params) + file_path = params[:dsym] + file_path ||= Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH] || ENV[SharedValues::DSYM_OUTPUT_PATH.to_s] + file_path ||= Actions.lane_context[SharedValues::DSYM_ZIP_PATH] || ENV[SharedValues::DSYM_ZIP_PATH.to_s] + + if file_path + expanded_file_path = File.expand_path(file_path) + UI.user_error!("Couldn't find file at path '#{expanded_file_path}'") unless File.exist?(expanded_file_path) + + return expanded_file_path + else + UI.user_error!("Couldn't find any dSYM file") + end + end + + def self.upload_options(params) + file_path = dsym_path(params).shellescape + + options = [] + options << "-F file=@#{file_path}" + options << "--header 'X-Splunk-Mint-Auth-Token: #{params[:api_token].shellescape}'" + options << "--header 'X-Splunk-Mint-apikey: #{params[:api_key].shellescape}'" + + options + end + + def self.proxy_options(params) + options = [] + if params[:proxy_address] && params[:proxy_port] && params[:proxy_username] && params[:proxy_password] + options << "-x #{params[:proxy_address].shellescape}:#{params[:proxy_port].shellescape}" + options << "--proxy-user #{params[:proxy_username].shellescape}:#{params[:proxy_password].shellescape}" + end + + options + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload dSYM file to [Splunk MINT](https://mint.splunk.com/)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :dsym, + env_name: "FL_SPLUNKMINT_FILE", + description: "dSYM.zip file to upload to Splunk MINT", + optional: true), + FastlaneCore::ConfigItem.new(key: :api_key, + env_name: "FL_SPLUNKMINT_API_KEY", + description: "Splunk MINT App API key e.g. f57a57ca", + sensitive: true, + optional: false), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "FL_SPLUNKMINT_API_TOKEN", + description: "Splunk MINT API token e.g. e05ba40754c4869fb7e0b61", + sensitive: true, + optional: false), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_SPLUNKMINT_VERBOSE", + description: "Make detailed output", + type: Boolean, + default_value: false, + optional: true), + FastlaneCore::ConfigItem.new(key: :upload_progress, + env_name: "FL_SPLUNKMINT_UPLOAD_PROGRESS", + description: "Show upload progress", + type: Boolean, + default_value: false, + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_username, + env_name: "FL_SPLUNKMINT_PROXY_USERNAME", + description: "Proxy username", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_password, + env_name: "FL_SPLUNKMINT_PROXY_PASSWORD", + sensitive: true, + description: "Proxy password", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_address, + env_name: "FL_SPLUNKMINT_PROXY_ADDRESS", + description: "Proxy address", + optional: true), + FastlaneCore::ConfigItem.new(key: :proxy_port, + env_name: "FL_SPLUNKMINT_PROXY_PORT", + description: "Proxy port", + optional: true) + ] + end + + def self.authors + ["xfreebird"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'splunkmint( + dsym: "My.app.dSYM.zip", + api_key: "43564d3a", + api_token: "e05456234c4869fb7e0b61" + )' + ] + end + + def self.category + :beta + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spm.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spm.rb new file mode 100644 index 0000000..414e701 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/spm.rb @@ -0,0 +1,144 @@ +module Fastlane + module Actions + class SpmAction < Action + def self.run(params) + cmd = ["swift"] + + cmd << (package_commands.include?(params[:command]) ? "package" : params[:command]) + cmd << "--build-path #{params[:build_path]}" if params[:build_path] + cmd << "--package-path #{params[:package_path]}" if params[:package_path] + cmd << "--configuration #{params[:configuration]}" if params[:configuration] + cmd << "--disable-sandbox" if params[:disable_sandbox] + cmd << "--verbose" if params[:verbose] + cmd << params[:command] if package_commands.include?(params[:command]) + cmd << "--enable-code-coverage" if params[:enable_code_coverage] && (params[:command] == 'generate-xcodeproj' || params[:command] == 'test') + if params[:xcconfig] + cmd << "--xcconfig-overrides #{params[:xcconfig]}" + end + if params[:xcpretty_output] + cmd += ["2>&1", "|", "xcpretty", "--#{params[:xcpretty_output]}"] + if params[:xcpretty_args] + cmd << (params[:xcpretty_args]).to_s + end + cmd = %w(set -o pipefail &&) + cmd + end + + FastlaneCore::CommandExecutor.execute(command: cmd.join(" "), + print_all: true, + print_command: true) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Runs Swift Package Manager on your project" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :command, + env_name: "FL_SPM_COMMAND", + description: "The swift command (one of: #{available_commands.join(', ')})", + default_value: "build", + verify_block: proc do |value| + UI.user_error!("Please pass a valid command. Use one of the following: #{available_commands.join(', ')}") unless available_commands.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :enable_code_coverage, + env_name: "FL_SPM_ENABLE_CODE_COVERAGE", + description: "Enables code coverage for the generated Xcode project when using the 'generate-xcodeproj' and the 'test' command", + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :build_path, + env_name: "FL_SPM_BUILD_PATH", + description: "Specify build/cache directory [default: ./.build]", + optional: true), + FastlaneCore::ConfigItem.new(key: :package_path, + env_name: "FL_SPM_PACKAGE_PATH", + description: "Change working directory before any other operation", + optional: true), + FastlaneCore::ConfigItem.new(key: :xcconfig, + env_name: "FL_SPM_XCCONFIG", + description: "Use xcconfig file to override swift package generate-xcodeproj defaults", + optional: true), + FastlaneCore::ConfigItem.new(key: :configuration, + short_option: "-c", + env_name: "FL_SPM_CONFIGURATION", + description: "Build with configuration (debug|release) [default: debug]", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass a valid configuration: (debug|release)") unless valid_configurations.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :disable_sandbox, + env_name: "FL_SPM_DISABLE_SANDBOX", + description: "Disable using the sandbox when executing subprocesses", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :xcpretty_output, + env_name: "FL_SPM_XCPRETTY_OUTPUT", + description: "Specifies the output type for xcpretty. eg. 'test', or 'simple'", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass a valid xcpretty output type: (#{xcpretty_output_types.join('|')})") unless xcpretty_output_types.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :xcpretty_args, + env_name: "FL_SPM_XCPRETTY_ARGS", + description: "Pass in xcpretty additional command line arguments (e.g. '--test --no-color' or '--tap --no-utf'), requires xcpretty_output to be specified also", + type: String, + optional: true), + FastlaneCore::ConfigItem.new(key: :verbose, + short_option: "-v", + env_name: "FL_SPM_VERBOSE", + description: "Increase verbosity of informational output", + type: Boolean, + default_value: false) + ] + end + + def self.authors + ["fjcaetano", "nxtstep"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'spm', + 'spm( + command: "build", + build_path: "./build", + configuration: "release" + )', + 'spm( + command: "generate-xcodeproj", + xcconfig: "Package.xcconfig" + )' + ] + end + + def self.category + :building + end + + def self.available_commands + %w(build test) + self.package_commands + end + + def self.package_commands + %w(clean reset update resolve generate-xcodeproj init) + end + + def self.valid_configurations + %w(debug release) + end + + def self.xcpretty_output_types + %w(simple test knock tap) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ssh.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ssh.rb new file mode 100644 index 0000000..dce4b38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/ssh.rb @@ -0,0 +1,157 @@ +module Fastlane + module Actions + module SharedValues + SSH_STDOUT_VALUE = :SSH_STDOUT_VALUE + SSH_STDERR_VALUE = :SSH_STDERR_VALUE + end + + class SshAction < Action + def self.ssh_exec!(ssh, command, log = true) + stdout_data = "" + stderr_data = "" + exit_code = nil + exit_signal = nil + ssh.open_channel do |channel| + channel.exec(command) do |ch, success| + unless success + abort("FAILED: couldn't execute command (ssh.channel.exec)") + end + channel.on_data do |ch1, data| + stdout_data += data + UI.command_output(data) if log + end + + channel.on_extended_data do |ch2, type, data| + # Only type 1 data is stderr (though no other types are defined by the standard) + # See http://net-ssh.github.io/net-ssh/Net/SSH/Connection/Channel.html#method-i-on_extended_data + stderr_data += data if type == 1 + end + + channel.on_request("exit-status") do |ch3, data| + exit_code = data.read_long + end + + channel.on_request("exit-signal") do |ch4, data| + exit_signal = data.read_long + end + end + end + + # Wait for all open channels to close + ssh.loop + { stdout: stdout_data, stderr: stderr_data, exit_code: exit_code, exit_signal: exit_signal } + end + + def self.run(params) + Actions.verify_gem!('net-ssh') + require "net/ssh" + + Actions.lane_context[SharedValues::SSH_STDOUT_VALUE] = "" + Actions.lane_context[SharedValues::SSH_STDERR_VALUE] = "" + stdout = "" + stderr = "" + + Net::SSH.start(params[:host], params[:username], { port: params[:port].to_i, password: params[:password] }) do |ssh| + params[:commands].each do |cmd| + UI.command(cmd) if params[:log] + return_value = ssh_exec!(ssh, cmd, params[:log]) + if return_value[:exit_code] != 0 + UI.error("SSH Command failed '#{cmd}' Exit-Code: #{return_value[:exit_code]}") + UI.user_error!("SSH Command failed") + end + + stderr << return_value[:stderr] + stdout << return_value[:stdout] + end + end + command_word = params[:commands].count == 1 ? "command" : "commands" + UI.success("Successfully executed #{params[:commands].count} #{command_word} on host #{params[:host]}") + Actions.lane_context[SharedValues::SSH_STDOUT_VALUE] = stdout + Actions.lane_context[SharedValues::SSH_STDERR_VALUE] = stderr + return { stdout: Actions.lane_context[SharedValues::SSH_STDOUT_VALUE], stderr: Actions.lane_context[SharedValues::SSH_STDERR_VALUE] } + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Allows remote command execution using ssh" + end + + def self.details + "Lets you execute remote commands via ssh using username/password or ssh-agent. If one of the commands in command-array returns non 0, it fails." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "FL_SSH_USERNAME", + description: "Username"), + FastlaneCore::ConfigItem.new(key: :password, + short_option: "-p", + env_name: "FL_SSH_PASSWORD", + sensitive: true, + description: "Password", + optional: true), + FastlaneCore::ConfigItem.new(key: :host, + short_option: "-H", + env_name: "FL_SSH_HOST", + description: "Hostname"), + FastlaneCore::ConfigItem.new(key: :port, + short_option: "-P", + env_name: "FL_SSH_PORT", + description: "Port", + optional: true, + default_value: "22"), + FastlaneCore::ConfigItem.new(key: :commands, + short_option: "-C", + env_name: "FL_SSH_COMMANDS", + description: "Commands", + optional: true, + type: Array), + FastlaneCore::ConfigItem.new(key: :log, + short_option: "-l", + env_name: "FL_SSH_LOG", + description: "Log commands and output", + optional: true, + default_value: true, + type: Boolean) + ] + end + + def self.output + [ + ['SSH_STDOUT_VALUE', 'Holds the standard output of all commands'], + ['SSH_STDERR_VALUE', 'Holds the standard error of all commands'] + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'ssh( + host: "dev.januschka.com", + username: "root", + commands: [ + "date", + "echo 1 > /tmp/file1" + ] + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/supply.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/supply.rb new file mode 100644 index 0000000..1b97ab9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/supply.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/upload_to_play_store' + class SupplyAction < UploadToPlayStoreAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `upload_to_play_store` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/swiftlint.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/swiftlint.rb new file mode 100644 index 0000000..a282add --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/swiftlint.rb @@ -0,0 +1,242 @@ +module Fastlane + module Actions + class SwiftlintAction < Action + def self.run(params) + if `which swiftlint`.to_s.length == 0 && params[:executable].nil? && !Helper.test? + UI.user_error!("You have to install swiftlint using `brew install swiftlint` or specify the executable path with the `:executable` option.") + end + + version = swiftlint_version(executable: params[:executable]) + if params[:mode] == :autocorrect && version < Gem::Version.new('0.5.0') && !Helper.test? + UI.user_error!("Your version of swiftlint (#{version}) does not support autocorrect mode.\nUpdate swiftlint using `brew update && brew upgrade swiftlint`") + end + + # See 'Breaking' section release notes here: https://github.com/realm/SwiftLint/releases/tag/0.43.0 + if params[:mode] == :autocorrect && version >= Gem::Version.new('0.43.0') + UI.deprecated("Your version of swiftlint (#{version}) has deprecated autocorrect mode, please start using fix mode in input param") + UI.important("For now, switching swiftlint mode `from :autocorrect to :fix` for you 😇") + params[:mode] = :fix + elsif params[:mode] == :fix && version < Gem::Version.new('0.43.0') + UI.important("Your version of swiftlint (#{version}) does not support fix mode.\nUpdate swiftlint using `brew update && brew upgrade swiftlint`") + UI.important("For now, switching swiftlint mode `from :fix to :autocorrect` for you 😇") + params[:mode] = :autocorrect + end + + mode_format = params[:mode] == :fix ? "--" : "" + command = (params[:executable] || "swiftlint").dup + command << " #{mode_format}#{params[:mode]}" + command << optional_flags(params) + + if params[:files] + if version < Gem::Version.new('0.5.1') + UI.user_error!("Your version of swiftlint (#{version}) does not support list of files as input.\nUpdate swiftlint using `brew update && brew upgrade swiftlint`") + end + + files = params[:files].map.with_index(0) { |f, i| "SCRIPT_INPUT_FILE_#{i}=#{f.shellescape}" }.join(" ") + command = command.prepend("SCRIPT_INPUT_FILE_COUNT=#{params[:files].count} #{files} ") + command << " --use-script-input-files" + end + + command << " > #{params[:output_file].shellescape}" if params[:output_file] + + begin + Actions.sh(command) + rescue + handle_swiftlint_error(params[:ignore_exit_status], $?.exitstatus) + raise if params[:raise_if_swiftlint_error] + end + end + + def self.optional_flags(params) + command = "" + command << " --path #{params[:path].shellescape}" if params[:path] + command << supported_option_switch(params, :strict, "0.9.2", true) + command << " --config #{params[:config_file].shellescape}" if params[:config_file] + command << " --reporter #{params[:reporter]}" if params[:reporter] + command << supported_option_switch(params, :quiet, "0.9.0", true) + command << supported_option_switch(params, :format, "0.11.0", true) if params[:mode] == :autocorrect + command << supported_no_cache_option(params) if params[:no_cache] + command << " --compiler-log-path #{params[:compiler_log_path].shellescape}" if params[:compiler_log_path] + return command + end + + # Get current SwiftLint version + def self.swiftlint_version(executable: nil) + binary = executable || 'swiftlint' + Gem::Version.new(`#{binary} version`.chomp) + end + + def self.supported_no_cache_option(params) + if [:autocorrect, :fix, :lint].include?(params[:mode]) + return " --no-cache" + else + return "" + end + end + + # Return "--option" switch if option is on and current SwiftLint version is greater or equal than min version. + # Return "" otherwise. + def self.supported_option_switch(params, option, min_version, can_ignore = false) + return "" unless params[option] + version = swiftlint_version(executable: params[:executable]) + if version < Gem::Version.new(min_version) + message = "Your version of swiftlint (#{version}) does not support '--#{option}' option.\nUpdate swiftlint to #{min_version} or above using `brew update && brew upgrade swiftlint`" + message += "\nThe option will be ignored." if can_ignore + can_ignore ? UI.important(message) : UI.user_error!(message) + "" + else + " --#{option}" + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Run swift code validation using SwiftLint" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :mode, + env_name: "FL_SWIFTLINT_MODE", + description: "SwiftLint mode: :lint, :fix, :autocorrect or :analyze", + type: Symbol, + default_value: :lint, + optional: true), + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_SWIFTLINT_PATH", + description: "Specify path to lint", + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find path '#{File.expand_path(value)}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :output_file, + env_name: "FL_SWIFTLINT_OUTPUT_FILE", + description: 'Path to output SwiftLint result', + optional: true), + FastlaneCore::ConfigItem.new(key: :config_file, + env_name: "FL_SWIFTLINT_CONFIG_FILE", + description: 'Custom configuration file of SwiftLint', + optional: true), + FastlaneCore::ConfigItem.new(key: :strict, + env_name: "FL_SWIFTLINT_STRICT", + description: 'Fail on warnings? (true/false)', + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :files, + env_name: "FL_SWIFTLINT_FILES", + description: 'List of files to process', + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :ignore_exit_status, + env_name: "FL_SWIFTLINT_IGNORE_EXIT_STATUS", + description: "Ignore the exit status of the SwiftLint command, so that serious violations \ + don't fail the build (true/false)", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :raise_if_swiftlint_error, + env_name: "FL_SWIFTLINT_RAISE_IF_SWIFTLINT_ERROR", + description: "Raises an error if swiftlint fails, so you can fail CI/CD jobs if necessary \ + (true/false)", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :reporter, + env_name: "FL_SWIFTLINT_REPORTER", + description: "Choose output reporter. Available: xcode, json, csv, checkstyle, codeclimate, \ + junit, html, emoji, sonarqube, markdown, github-actions-logging", + optional: true, + verify_block: proc do |value| + available = ['xcode', 'json', 'csv', 'checkstyle', 'codeclimate', 'junit', 'html', 'emoji', 'sonarqube', 'markdown', 'github-actions-logging'] + UI.important("Known 'reporter' values are '#{available.join("', '")}'. If you're receiving errors from swiftlint related to the reporter, make sure the reporter identifier you're using is correct and it's supported by your version of swiftlint.") unless available.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :quiet, + env_name: "FL_SWIFTLINT_QUIET", + description: "Don't print status logs like 'Linting ' & 'Done linting'", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :executable, + env_name: "FL_SWIFTLINT_EXECUTABLE", + description: "Path to the `swiftlint` executable on your machine", + optional: true), + FastlaneCore::ConfigItem.new(key: :format, + env_name: "FL_SWIFTLINT_FORMAT", + description: "Format code when mode is :autocorrect", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :no_cache, + env_name: "FL_SWIFTLINT_NO_CACHE", + description: "Ignore the cache when mode is :autocorrect or :lint", + default_value: false, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :compiler_log_path, + env_name: "FL_SWIFTLINT_COMPILER_LOG_PATH", + description: "Compiler log path when mode is :analyze", + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find compiler_log_path '#{File.expand_path(value)}'") unless File.exist?(value) + end) + ] + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'swiftlint( + mode: :lint, # SwiftLint mode: :lint (default) or :autocorrect + path: "/path/to/lint", # Specify path to lint (optional) + output_file: "swiftlint.result.json", # The path of the output file (optional) + config_file: ".swiftlint-ci.yml", # The path of the configuration file (optional) + files: [ # List of files to process (optional) + "AppDelegate.swift", + "path/to/project/Model.swift" + ], + raise_if_swiftlint_error: true, # Allow fastlane to raise an error if swiftlint fails + ignore_exit_status: true # Allow fastlane to continue even if SwiftLint returns a non-zero exit status + + )' + ] + end + + def self.category + :testing + end + + def self.handle_swiftlint_error(ignore_exit_status, exit_status) + if ignore_exit_status + failure_suffix = 'which would normally fail the build.' + secondary_message = 'fastlane will continue because the `ignore_exit_status` option was used! 🙈' + else + failure_suffix = 'which represents a failure.' + secondary_message = 'If you want fastlane to continue anyway, use the `ignore_exit_status` option. 🙈' + end + + UI.important("") + UI.important("SwiftLint finished with exit code #{exit_status}, #{failure_suffix}") + UI.important(secondary_message) + UI.important("") + UI.user_error!("SwiftLint finished with errors (exit code: #{exit_status})") unless ignore_exit_status + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sync_code_signing.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sync_code_signing.rb new file mode 100644 index 0000000..38155dd --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/sync_code_signing.rb @@ -0,0 +1,114 @@ +module Fastlane + module Actions + module SharedValues + MATCH_PROVISIONING_PROFILE_MAPPING = :MATCH_PROVISIONING_PROFILE_MAPPING + SIGH_PROFILE_TYPE ||= :SIGH_PROFILE_TYPE # originally defined in GetProvisioningProfileAction + end + + class SyncCodeSigningAction < Action + def self.run(params) + require 'match' + + params.load_configuration_file("Matchfile") + + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless params[:api_key_path] + params[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + + Match::Runner.new.run(params) + + define_profile_type(params) + define_provisioning_profile_mapping(params) + end + + def self.define_profile_type(params) + profile_type = "app-store" + profile_type = "ad-hoc" if params[:type] == 'adhoc' + profile_type = "development" if params[:type] == 'development' + profile_type = "enterprise" if params[:type] == 'enterprise' + + UI.message("Setting Provisioning Profile type to '#{profile_type}'") + + Actions.lane_context[SharedValues::SIGH_PROFILE_TYPE] = profile_type + end + + # Maps the bundle identifier to the appropriate provisioning profile + # This is used in the _gym_ action as part of the export options + # e.g. + # + # export_options: { + # provisioningProfiles: { "me.themoji.app.beta": "match AppStore me.themoji.app.beta" } + # } + # + def self.define_provisioning_profile_mapping(params) + mapping = Actions.lane_context[SharedValues::MATCH_PROVISIONING_PROFILE_MAPPING] || {} + + # Array (...) to make sure it's an Array, Ruby is magic, try this + # Array(1) # => [1] + # Array([1, 2]) # => [1, 2] + Array(params[:app_identifier]).each do |app_identifier| + env_variable_name = Match::Utils.environment_variable_name_profile_name(app_identifier: app_identifier, + type: Match.profile_type_sym(params[:type]), + platform: params[:platform]) + + if params[:derive_catalyst_app_identifier] + app_identifier = "maccatalyst.#{app_identifier}" + end + + mapping[app_identifier] = ENV[env_variable_name] + end + + Actions.lane_context[SharedValues::MATCH_PROVISIONING_PROFILE_MAPPING] = mapping + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Easily sync your certificates and profiles across your team (via _match_)" + end + + def self.details + "More information: https://docs.fastlane.tools/actions/match/" + end + + def self.available_options + require 'match' + Match::Options.available_options + end + + def self.output + [ + ['MATCH_PROVISIONING_PROFILE_MAPPING', 'The match provisioning profile mapping'], + ['SIGH_PROFILE_TYPE', 'The profile type, can be app-store, ad-hoc, development, enterprise, can be used in `build_app` as a default value for `export_method`'] + ] + end + + def self.return_value + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'sync_code_signing(type: "appstore", app_identifier: "tools.fastlane.app")', + 'sync_code_signing(type: "development", readonly: true)', + 'sync_code_signing(app_identifier: ["tools.fastlane.app", "tools.fastlane.sleepy"])', + 'match # alias for "sync_code_signing"' + ] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/team_id.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/team_id.rb new file mode 100644 index 0000000..3025180 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/team_id.rb @@ -0,0 +1,42 @@ +module Fastlane + module Actions + module SharedValues + end + + class TeamIdAction < Action + def self.run(params) + params = nil unless params.kind_of?(Array) + team = (params || []).first + UI.user_error!("Please pass your Team ID (e.g. team_id 'Q2CBPK58CA')") unless team.to_s.length > 0 + + UI.message("Setting Team ID to '#{team}' for all build steps") + + [:CERT_TEAM_ID, :SIGH_TEAM_ID, :PEM_TEAM_ID, :PRODUCE_TEAM_ID, :SIGH_TEAM_ID, :FASTLANE_TEAM_ID].each do |current| + ENV[current.to_s] = team + end + end + + def self.author + "KrauseFx" + end + + def self.description + "Specify the Team ID you want to use for the Apple Developer Portal" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'team_id("Q2CBPK58CA")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/team_name.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/team_name.rb new file mode 100644 index 0000000..6fb7d3f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/team_name.rb @@ -0,0 +1,42 @@ +module Fastlane + module Actions + module SharedValues + end + + class TeamNameAction < Action + def self.run(params) + params = nil unless params.kind_of?(Array) + team = (params || []).first + UI.user_error!("Please pass your Team Name (e.g. team_name 'Felix Krause')") unless team.to_s.length > 0 + + UI.message("Setting Team Name to '#{team}' for all build steps") + + [:FASTLANE_TEAM_NAME, :PRODUCE_TEAM_NAME].each do |current| + ENV[current.to_s] = team + end + end + + def self.description + "Set a team to use by its name" + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'team_name("Felix Krause")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/testfairy.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/testfairy.rb new file mode 100644 index 0000000..2ee8703 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/testfairy.rb @@ -0,0 +1,284 @@ +module Fastlane + module Actions + module SharedValues + TESTFAIRY_BUILD_URL = :TESTFAIRY_BUILD_URL + TESTFAIRY_DOWNLOAD_URL = :TESTFAIRY_DOWNLOAD_URL + TESTFAIRY_LANDING_PAGE = :TESTFAIRY_LANDING_PAGE + end + + class TestfairyAction < Action + def self.upload_build(upload_url, ipa, options, timeout) + require 'faraday' + require 'faraday_middleware' + + UI.success("Uploading to #{upload_url}...") + + connection = Faraday.new(url: upload_url) do |builder| + builder.request(:multipart) + builder.request(:url_encoded) + builder.request(:retry, max: 3, interval: 5) + builder.response(:json, content_type: /\bjson$/) + builder.use(FaradayMiddleware::FollowRedirects) + builder.adapter(:net_http) + end + + options[:file] = Faraday::UploadIO.new(ipa, 'application/octet-stream') if ipa && File.exist?(ipa) + + symbols_file = options.delete(:symbols_file) + if symbols_file + options[:symbols_file] = Faraday::UploadIO.new(symbols_file, 'application/octet-stream') + end + + begin + connection.post do |req| + req.options.timeout = timeout + req.url("/api/upload/") + req.body = options + end + rescue Faraday::TimeoutError + UI.crash!("Uploading build to TestFairy timed out âŗ") + end + end + + def self.run(params) + UI.success('Starting with ipa upload to TestFairy...') + + metrics_to_client = lambda do |metrics| + metrics.map do |metric| + case metric + when :cpu, :memory, :network, :gps, :battery, :mic, :wifi + metric.to_s + when :phone_signal + 'phone-signal' + else + UI.user_error!("Unknown metric: #{metric}") + end + end + end + + options_to_client = lambda do |options| + options.map do |option| + case option.to_sym + when :shake, :anonymous + option.to_s + when :video_only_wifi + 'video-only-wifi' + else + UI.user_error!("Unknown option: #{option}") + end + end + end + + # Rejecting key `upload_url` and `timeout` as we don't need it in options + client_options = Hash[params.values.reject do |key, value| + [:upload_url, :timeout].include?(key) + end.map do |key, value| + case key + when :api_key + [key, value] + when :ipa + [key, value] + when :apk + [key, value] + when :symbols_file + [key, value] + when :testers_groups + [key, value.join(',')] + when :metrics + [key, metrics_to_client.call(value).join(',')] + when :comment + [key, value] + when :auto_update + ['auto-update', value] + when :notify + [key, value] + when :options + [key, options_to_client.call(value).join(',')] + when :custom + [key, value] + else + UI.user_error!("Unknown parameter: #{key}") + end + end] + + path = params[:ipa] || params[:apk] + UI.user_error!("No ipa or apk were given") unless path + + return path if Helper.test? + + response = self.upload_build(params[:upload_url], path, client_options, params[:timeout]) + if parse_response(response) + UI.success("Build URL: #{Actions.lane_context[SharedValues::TESTFAIRY_BUILD_URL]}") + UI.success("Download URL: #{Actions.lane_context[SharedValues::TESTFAIRY_DOWNLOAD_URL]}") + UI.success("Landing Page URL: #{Actions.lane_context[SharedValues::TESTFAIRY_LANDING_PAGE]}") + UI.success("Build successfully uploaded to TestFairy.") + else + UI.user_error!("Error when trying to upload ipa to TestFairy") + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.parse_response(response) + if response.body && response.body.key?('status') && response.body['status'] == 'ok' + build_url = response.body['build_url'] + app_url = response.body['app_url'] + landing_page_url = response.body['landing_page_url'] + + Actions.lane_context[SharedValues::TESTFAIRY_BUILD_URL] = build_url + Actions.lane_context[SharedValues::TESTFAIRY_DOWNLOAD_URL] = app_url + Actions.lane_context[SharedValues::TESTFAIRY_LANDING_PAGE] = landing_page_url + + return true + else + UI.error("Error uploading to TestFairy: #{response.body}") + + return false + end + end + private_class_method :parse_response + + def self.description + 'Upload a new build to [TestFairy](https://www.testfairy.com/)' + end + + def self.details + "You can retrieve your API key on [your settings page](https://free.testfairy.com/settings/)" + end + + def self.available_options + [ + # required + FastlaneCore::ConfigItem.new(key: :api_key, + env_name: "FL_TESTFAIRY_API_KEY", # The name of the environment variable + description: "API Key for TestFairy", # a short description of this parameter + sensitive: true, + verify_block: proc do |value| + UI.user_error!("No API key for TestFairy given, pass using `api_key: 'key'`") unless value.to_s.length > 0 + end), + FastlaneCore::ConfigItem.new(key: :ipa, + env_name: 'TESTFAIRY_IPA_PATH', + description: 'Path to your IPA file for iOS', + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + optional: true, + conflicting_options: [:apk], + verify_block: proc do |value| + UI.user_error!("Couldn't find ipa file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :apk, + env_name: 'TESTFAIRY_APK_PATH', + description: 'Path to your APK file for Android', + default_value: Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH], + default_value_dynamic: true, + optional: true, + conflicting_options: [:ipa], + verify_block: proc do |value| + UI.user_error!("Couldn't find apk file at path '#{value}'") unless File.exist?(value) + end), + # optional + FastlaneCore::ConfigItem.new(key: :symbols_file, + optional: true, + env_name: "FL_TESTFAIRY_SYMBOLS_FILE", + description: "Symbols mapping file", + default_value: Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH], + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find dSYM file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :upload_url, + env_name: "FL_TESTFAIRY_UPLOAD_URL", # The name of the environment variable + description: "API URL for TestFairy", # a short description of this parameter + default_value: "https://upload.testfairy.com", + optional: true), + FastlaneCore::ConfigItem.new(key: :testers_groups, + optional: true, + type: Array, + short_option: '-g', + env_name: "FL_TESTFAIRY_TESTERS_GROUPS", + description: "Array of tester groups to be notified", + default_value: []), # the default value is an empty list + FastlaneCore::ConfigItem.new(key: :metrics, + optional: true, + type: Array, + env_name: "FL_TESTFAIRY_METRICS", + description: "Array of metrics to record (cpu,memory,network,phone_signal,gps,battery,mic,wifi)", + default_value: []), + # max-duration + # video + # video-quality + # video-rate + FastlaneCore::ConfigItem.new(key: :comment, + optional: true, + env_name: "FL_TESTFAIRY_COMMENT", + description: "Additional release notes for this upload. This text will be added to email notifications", + default_value: 'No comment provided'), # the default value if the user didn't provide one + FastlaneCore::ConfigItem.new(key: :auto_update, + optional: true, + env_name: "FL_TESTFAIRY_AUTO_UPDATE", + description: "Allows an easy upgrade of all users to the current version. To enable set to 'on'", + default_value: 'off'), + # not well documented + FastlaneCore::ConfigItem.new(key: :notify, + optional: true, + env_name: "FL_TESTFAIRY_NOTIFY", + description: "Send email to testers", + default_value: 'off'), + FastlaneCore::ConfigItem.new(key: :options, + optional: true, + type: Array, + env_name: "FL_TESTFAIRY_OPTIONS", + description: "Array of options (shake,video_only_wifi,anonymous)", + default_value: []), + FastlaneCore::ConfigItem.new(key: :custom, + optional: true, + env_name: "FL_TESTFAIRY_CUSTOM", + description: "Array of custom options. Contact support@testfairy.com for more information", + default_value: ''), + FastlaneCore::ConfigItem.new(key: :timeout, + env_name: "FL_TESTFAIRY_TIMEOUT", + description: "Request timeout in seconds", + type: Integer, + optional: true) + ] + end + + def self.example_code + [ + 'testfairy( + api_key: "...", + ipa: "./ipa_file.ipa", + comment: "Build #{lane_context[SharedValues::BUILD_NUMBER]}", + )', + 'testfairy( + api_key: "...", + apk: "../build/app/outputs/apk/qa/release/app-qa-release.apk", + comment: "Build #{lane_context[SharedValues::BUILD_NUMBER]}", + )' + ] + end + + def self.category + :beta + end + + def self.output + [ + ['TESTFAIRY_BUILD_URL', 'URL for the sessions of the newly uploaded build'], + ['TESTFAIRY_DOWNLOAD_URL', 'URL directly to the newly uploaded build'], + ['TESTFAIRY_LANDING_PAGE', 'URL of the build\'s landing page'] + ] + end + + def self.authors + ["taka0125", "tcurdt", "vijaysharm", "cdm2012"] + end + + def self.is_supported?(platform) + [:ios, :android].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/testflight.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/testflight.rb new file mode 100644 index 0000000..988d3c3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/testflight.rb @@ -0,0 +1,14 @@ +module Fastlane + module Actions + require 'fastlane/actions/upload_to_testflight' + class TestflightAction < UploadToTestflightAction + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Alias for the `upload_to_testflight` action" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/trainer.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/trainer.rb new file mode 100644 index 0000000..87b2e32 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/trainer.rb @@ -0,0 +1,49 @@ +module Fastlane + module Actions + class TrainerAction < Action + def self.run(params) + require "trainer" + + params[:path] = Actions.lane_context[Actions::SharedValues::SCAN_GENERATED_PLIST_FILE] if Actions.lane_context[Actions::SharedValues::SCAN_GENERATED_PLIST_FILE] + params[:path] ||= Actions.lane_context[Actions::SharedValues::SCAN_DERIVED_DATA_PATH] if Actions.lane_context[Actions::SharedValues::SCAN_DERIVED_DATA_PATH] + + fail_build = params[:fail_build] + resulting_paths = Trainer::TestParser.auto_convert(params) + resulting_paths.each do |path, test_results| + UI.test_failure!("Unit tests failed") if fail_build && !test_results[:successful] + end + + return resulting_paths + end + + def self.description + "Convert the Xcode plist log to a JUnit report" + end + + def self.detail + "Convert the Xcode plist log to a JUnit report. This will raise an exception if the tests failed" + end + + def self.authors + ["KrauseFx"] + end + + def self.return_value + "A hash with the key being the path of the generated file, the value being if the tests were successful" + end + + def self.available_options + require 'trainer' + FastlaneCore::CommanderGenerator.new.generate(Trainer::Options.available_options) + end + + def self.is_supported?(platform) + %i[ios mac].include?(platform) + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/tryouts.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/tryouts.rb new file mode 100644 index 0000000..b7985bf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/tryouts.rb @@ -0,0 +1,148 @@ +module Fastlane + module Actions + module SharedValues + # Contains all the data returned from the Tryouts API. See http://tryouts.readthedocs.org/en/latest/releases.html#create-release + TRYOUTS_BUILD_INFORMATION = :TRYOUTS_BUILD_INFORMATION + end + class TryoutsAction < Action + TRYOUTS_API_BUILD_RELEASE_TEMPLATE = "https://api.tryouts.io/v1/applications/%s/releases/" + + def self.run(params) + UI.success('Upload to Tryouts has been started. This may take some time.') + + response = self.upload_build(params) + + case response.status + when 200...300 + Actions.lane_context[SharedValues::TRYOUTS_BUILD_INFORMATION] = response.body + UI.success('Build successfully uploaded to Tryouts!') + UI.message("Release download url: #{response.body['download_url']}") if response.body["download_url"] + else + UI.user_error!("Error when trying to upload build file to Tryouts: #{response.body}") + end + end + + def self.upload_build(params) + require 'faraday' + require 'faraday_middleware' + + url = TRYOUTS_API_BUILD_RELEASE_TEMPLATE % params[:app_id] + connection = Faraday.new(url) do |builder| + builder.request(:multipart) + builder.request(:url_encoded) + builder.response(:json, content_type: /\bjson$/) + builder.use(FaradayMiddleware::FollowRedirects) + builder.adapter(:net_http) + end + + options = {} + options[:build] = Faraday::UploadIO.new(params[:build_file], 'application/octet-stream') + + if params[:notes_path] + options[:notes] = File.read(params[:notes_path]) + elsif params[:notes] + options[:notes] = params[:notes] + end + + options[:notify] = params[:notify].to_s + options[:status] = params[:status].to_s + + post_request = connection.post do |req| + req.headers['Authorization'] = params[:api_token] + req.body = options + end + + post_request.on_complete do |env| + yield(env[:status], env[:body]) if block_given? + end + end + + def self.description + "Upload a new build to [Tryouts](https://tryouts.io/)" + end + + def self.details + "More information: [http://tryouts.readthedocs.org/en/latest/releases.html#create-release](http://tryouts.readthedocs.org/en/latest/releases.html#create-release)" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :app_id, + env_name: "TRYOUTS_APP_ID", + description: "Tryouts application hash", + verify_block: proc do |value| + UI.user_error!("No application identifier for Tryouts given, pass using `app_id: 'application id'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "TRYOUTS_API_TOKEN", + sensitive: true, + description: "API Token (api_key:api_secret) for Tryouts Access", + verify_block: proc do |value| + UI.user_error!("No API token for Tryouts given, pass using `api_token: 'token'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :build_file, + env_name: "TRYOUTS_BUILD_FILE", + description: "Path to your IPA or APK file. Optional if you use the _gym_ or _xcodebuild_ action", + default_value: Actions.lane_context[SharedValues::IPA_OUTPUT_PATH], + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find build file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :notes, + env_name: "TRYOUTS_NOTES", + description: "Release notes", + optional: true), + FastlaneCore::ConfigItem.new(key: :notes_path, + env_name: "TRYOUTS_NOTES_PATH", + description: "Release notes text file path. Overrides the :notes parameter", + verify_block: proc do |value| + UI.user_error!("Couldn't find notes file at path '#{value}'") unless File.exist?(value) + end, + optional: true), + FastlaneCore::ConfigItem.new(key: :notify, + env_name: "TRYOUTS_NOTIFY", + description: "Notify testers? 0 for no", + type: Integer, + default_value: 1), + FastlaneCore::ConfigItem.new(key: :status, + env_name: "TRYOUTS_STATUS", + description: "2 to make your release public. Release will be distributed to available testers. 1 to make your release private. Release won't be distributed to testers. This also prevents release from showing up for SDK update", + verify_block: proc do |value| + available_options = ["1", "2"] + UI.user_error!("'#{value}' is not a valid 'status' value. Available options are #{available_options.join(', ')}") unless available_options.include?(value.to_s) + end, + type: Integer, + default_value: 2) + ] + end + + def self.example_code + [ + 'tryouts( + api_token: "...", + app_id: "application-id", + build_file: "test.ipa", + )' + ] + end + + def self.category + :beta + end + + def self.output + [ + ['TRYOUTS_BUILD_INFORMATION', 'Contains release info like :application_name, :download_url. See http://tryouts.readthedocs.org/en/latest/releases.html#create-release'] + ] + end + + def self.authors + ["alicertel"] + end + + def self.is_supported?(platform) + [:ios, :android].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/twitter.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/twitter.rb new file mode 100644 index 0000000..40c58e2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/twitter.rb @@ -0,0 +1,84 @@ +module Fastlane + module Actions + class TwitterAction < Action + def self.run(params) + Actions.verify_gem!("twitter") + require 'twitter' + client = Twitter::REST::Client.new do |config| + config.consumer_key = params[:consumer_key] + config.consumer_secret = params[:consumer_secret] + config.access_token = params[:access_token] + config.access_token_secret = params[:access_token_secret] + end + client.update(params[:message]) + UI.message(['[TWITTER]', "Successfully tweeted ", params[:message]].join(': ')) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Post a tweet on [Twitter.com](https://twitter.com)" + end + + def self.details + "Post a tweet on Twitter. Requires you to setup an app on [twitter.com](https://twitter.com) and obtain `consumer` and `access_token`." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :consumer_key, + env_name: "FL_TW_CONSUMER_KEY", + description: "Consumer Key", + sensitive: true, + optional: false), + FastlaneCore::ConfigItem.new(key: :consumer_secret, + env_name: "FL_TW_CONSUMER_SECRET", + sensitive: true, + description: "Consumer Secret", + optional: false), + FastlaneCore::ConfigItem.new(key: :access_token, + env_name: "FL_TW_ACCESS_TOKEN", + sensitive: true, + description: "Access Token", + optional: false), + FastlaneCore::ConfigItem.new(key: :access_token_secret, + env_name: "FL_TW_ACCESS_TOKEN_SECRET", + sensitive: true, + description: "Access Token Secret", + optional: false), + FastlaneCore::ConfigItem.new(key: :message, + env_name: "FL_TW_MESSAGE", + description: "The tweet", + optional: false) + + ] + end + + def self.authors + ["hjanuschka"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'twitter( + access_token: "XXXX", + access_token_secret: "xxx", + consumer_key: "xxx", + consumer_secret: "xxx", + message: "You rock!" + )' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/typetalk.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/typetalk.rb new file mode 100644 index 0000000..533c7c8 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/typetalk.rb @@ -0,0 +1,93 @@ +module Fastlane + module Actions + class TypetalkAction < Action + def self.run(params) + options = { + message: nil, + note_path: nil, + success: true, + topic_id: nil, + typetalk_token: nil + }.merge(params || {}) + + [:message, :topic_id, :typetalk_token].each do |key| + UI.user_error!("No #{key} given.") unless options[key] + end + + emoticon = (options[:success] ? ':smile:' : ':rage:') + message = "#{emoticon} #{options[:message]}" + + note_path = File.expand_path(options[:note_path]) if options[:note_path] + if note_path && File.exist?(note_path) + contents = File.read(note_path) + message += "\n\n```\n#{contents}\n```" + end + + topic_id = options[:topic_id] + typetalk_token = options[:typetalk_token] + + self.post_to_typetalk(message, topic_id, typetalk_token) + + UI.success('Successfully sent Typetalk notification') + end + + def self.post_to_typetalk(message, topic_id, typetalk_token) + require 'net/http' + require 'uri' + + uri = URI.parse("https://typetalk.in/api/v1/topics/#{topic_id}") + response = Net::HTTP.post_form(uri, { 'message' => message, + 'typetalkToken' => typetalk_token }) + + self.check_response(response) + end + + def self.check_response(response) + case response.code.to_i + when 200, 204 + true + else + UI.user_error!("Could not sent Typetalk notification") + end + end + + def self.description + "Post a message to [Typetalk](https://www.typetalk.com/)" + end + + def self.available_options + [ + ['message', 'The message to post'], + ['note_path', 'Path to an additional note'], + ['topic_id', 'Typetalk topic id'], + ['success', 'Successful build?'], + ['typetalk_token', 'typetalk token'] + ] + end + + def self.author + "Nulab Inc." + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'typetalk( + message: "App successfully released!", + note_path: "ChangeLog.md", + topicId: 1, + success: true, + typetalk_token: "Your Typetalk Token" + )' + ] + end + + def self.category + :notifications + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/unlock_keychain.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/unlock_keychain.rb new file mode 100644 index 0000000..3a8c194 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/unlock_keychain.rb @@ -0,0 +1,132 @@ +module Fastlane + module Actions + class UnlockKeychainAction < Action + def self.run(params) + keychain_path = FastlaneCore::Helper.keychain_path(params[:path]) + add_to_search_list = params[:add_to_search_list] + set_default = params[:set_default] + commands = [] + + # add to search list if not already added + if add_to_search_list == true || add_to_search_list == :add + commands << add_keychain_to_search_list(keychain_path) + elsif add_to_search_list == :replace + commands << replace_keychain_in_search_list(keychain_path) + end + + # set default keychain + if set_default + commands << default_keychain(keychain_path) + end + + escaped_path = keychain_path.shellescape + escaped_password = params[:password].shellescape + + # Log the full path, useful for troubleshooting + UI.message("Unlocking keychain at path: #{escaped_path}") + # unlock given keychain and disable lock and timeout + commands << Fastlane::Actions.sh("security unlock-keychain -p #{escaped_password} #{escaped_path}", log: false) + commands << Fastlane::Actions.sh("security set-keychain-settings #{escaped_path}", log: false) + commands + end + + def self.add_keychain_to_search_list(keychain_path) + keychains = Fastlane::Actions.sh("security list-keychains -d user", log: false).shellsplit + + # add the keychain to the keychain list + unless keychains.include?(keychain_path) + keychains << keychain_path + + Fastlane::Actions.sh("security list-keychains -s #{keychains.shelljoin}", log: false) + end + end + + def self.replace_keychain_in_search_list(keychain_path) + Actions.lane_context[Actions::SharedValues::ORIGINAL_DEFAULT_KEYCHAIN] = Fastlane::Actions.sh("security default-keychain", log: false).strip + escaped_path = keychain_path.shellescape + Fastlane::Actions.sh("security list-keychains -s #{escaped_path}", log: false) + end + + def self.default_keychain(keychain_path) + escaped_path = keychain_path.shellescape + Fastlane::Actions.sh("security default-keychain -s #{escaped_path}", log: false) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Unlock a keychain" + end + + def self.details + [ + "Unlocks the given keychain file and adds it to the keychain search list.", + "Keychains can be replaced with `add_to_search_list: :replace`." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_UNLOCK_KEYCHAIN_PATH", + description: "Path to the keychain file", + default_value: "login", + optional: false), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "FL_UNLOCK_KEYCHAIN_PASSWORD", + sensitive: true, + description: "Keychain password", + optional: false), + FastlaneCore::ConfigItem.new(key: :add_to_search_list, + env_name: "FL_UNLOCK_KEYCHAIN_ADD_TO_SEARCH_LIST", + description: "Add to keychain search list, valid values are true, false, :add, and :replace", + skip_type_validation: true, # allow Boolean, Symbol + default_value: true), + FastlaneCore::ConfigItem.new(key: :set_default, + env_name: "FL_UNLOCK_KEYCHAIN_SET_DEFAULT", + description: "Set as default keychain", + type: Boolean, + default_value: false) + + ] + end + + def self.authors + ["xfreebird"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'unlock_keychain( # Unlock an existing keychain and add it to the keychain search list + path: "/path/to/KeychainName.keychain", + password: "mysecret" + )', + 'unlock_keychain( # By default the keychain is added to the existing. To replace them with the selected keychain you may use `:replace` + path: "/path/to/KeychainName.keychain", + password: "mysecret", + add_to_search_list: :replace # To only add a keychain use `true` or `:add`. + )', + 'unlock_keychain( # In addition, the keychain can be selected as a default keychain + path: "/path/to/KeychainName.keychain", + password: "mysecret", + set_default: true + )', + 'unlock_keychain( # If the keychain file is located in the standard location `~/Library/Keychains`, then it is sufficient to provide the keychain file name, or file name with its suffix. + path: "KeychainName", + password: "mysecret" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_app_group_identifiers.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_app_group_identifiers.rb new file mode 100644 index 0000000..5344ff9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_app_group_identifiers.rb @@ -0,0 +1,88 @@ +module Fastlane + module Actions + module SharedValues + APP_GROUP_IDENTIFIERS = :APP_GROUP_IDENTIFIERS + end + + class UpdateAppGroupIdentifiersAction < Action + require 'plist' + + def self.run(params) + UI.message("Entitlements File: #{params[:entitlements_file]}") + UI.message("New App Group Identifiers: #{params[:app_group_identifiers]}") + + entitlements_file = params[:entitlements_file] + UI.user_error!("Could not find entitlements file at path '#{entitlements_file}'") unless File.exist?(entitlements_file) + + # parse entitlements + result = Plist.parse_xml(entitlements_file) + UI.user_error!("Entitlements file at '#{entitlements_file}' cannot be parsed.") unless result + + # get app group field + app_group_field = result['com.apple.security.application-groups'] + UI.user_error!("No existing App group field specified. Please specify an App Group in the entitlements file.") unless app_group_field + + # set new app group identifiers + UI.message("Old App Group Identifiers: #{app_group_field}") + result['com.apple.security.application-groups'] = params[:app_group_identifiers] + + # save entitlements file + result.save_plist(entitlements_file) + UI.message("New App Group Identifiers set: #{result['com.apple.security.application-groups']}") + + Actions.lane_context[SharedValues::APP_GROUP_IDENTIFIERS] = result['com.apple.security.application-groups'] + end + + def self.description + "This action changes the app group identifiers in the entitlements file" + end + + def self.details + "Updates the App Group Identifiers in the given Entitlements file, so you can have app groups for the app store build and app groups for an enterprise build." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :entitlements_file, + env_name: "FL_UPDATE_APP_GROUP_IDENTIFIER_ENTITLEMENTS_FILE_PATH", # The name of the environment variable + description: "The path to the entitlement file which contains the app group identifiers", # a short description of this parameter + verify_block: proc do |value| + UI.user_error!("Please pass a path to an entitlements file. ") unless value.include?(".entitlements") + UI.user_error!("Could not find entitlements file") if !File.exist?(value) && !Helper.test? + end), + FastlaneCore::ConfigItem.new(key: :app_group_identifiers, + env_name: "FL_UPDATE_APP_GROUP_IDENTIFIER_APP_GROUP_IDENTIFIERS", + description: "An Array of unique identifiers for the app groups. Eg. ['group.com.test.testapp']", + type: Array) + ] + end + + def self.output + [ + ['APP_GROUP_IDENTIFIERS', 'The new App Group Identifiers'] + ] + end + + def self.authors + ["mathiasAichinger"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'update_app_group_identifiers( + entitlements_file: "/path/to/entitlements_file.entitlements", + app_group_identifiers: ["group.your.app.group.identifier"] + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_app_identifier.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_app_identifier.rb new file mode 100644 index 0000000..0b7051c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_app_identifier.rb @@ -0,0 +1,124 @@ +module Fastlane + module Actions + class UpdateAppIdentifierAction < Action + def self.run(params) + require 'plist' + require 'xcodeproj' + + info_plist_key = 'INFOPLIST_FILE' + identifier_key = 'PRODUCT_BUNDLE_IDENTIFIER' + + # Read existing plist file + info_plist_path = resolve_path(params[:plist_path], params[:xcodeproj]) + UI.user_error!("Couldn't find info plist file at path '#{params[:plist_path]}'") unless File.exist?(info_plist_path) + plist = Plist.parse_xml(info_plist_path) + + # Check if current app identifier product bundle identifier + app_id_equals_bundle_id = %W($(#{identifier_key}) ${#{identifier_key}}).include?(plist['CFBundleIdentifier']) + if app_id_equals_bundle_id + # Load .xcodeproj + project_path = params[:xcodeproj] + project = Xcodeproj::Project.open(project_path) + + # Fetch the build configuration objects + configs = project.objects.select { |obj| obj.isa == 'XCBuildConfiguration' && !obj.build_settings[identifier_key].nil? } + UI.user_error!("Info plist uses #{identifier_key}, but xcodeproj does not") if configs.empty? + + configs = configs.select { |obj| resolve_path(obj.build_settings[info_plist_key], params[:xcodeproj]) == info_plist_path } + UI.user_error!("Xcodeproj doesn't have configuration with info plist #{params[:plist_path]}.") if configs.empty? + + # For each of the build configurations, set app identifier + configs.each do |c| + c.build_settings[identifier_key] = params[:app_identifier] + end + + # Write changes to the file + project.save + + UI.success("Updated #{params[:xcodeproj]} 💾.") + else + # Update plist value + plist['CFBundleIdentifier'] = params[:app_identifier] + + # Write changes to file + plist_string = Plist::Emit.dump(plist) + File.write(info_plist_path, plist_string) + + UI.success("Updated #{params[:plist_path]} 💾.") + end + end + + def self.resolve_path(path, xcodeproj_path) + return nil unless path + project_dir = File.dirname(xcodeproj_path) + # SRCROOT, SOURCE_ROOT and PROJECT_DIR are the same + %w{SRCROOT SOURCE_ROOT PROJECT_DIR}.each do |variable_name| + path = path.sub("$(#{variable_name})", project_dir) + end + path = File.absolute_path(path, project_dir) + path + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.is_supported?(platform) + [:ios].include?(platform) + end + + def self.description + "Update the project's bundle identifier" + end + + def self.details + "Update an app identifier by either setting `CFBundleIdentifier` or `PRODUCT_BUNDLE_IDENTIFIER`, depending on which is already in use." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_UPDATE_APP_IDENTIFIER_PROJECT_PATH", + description: "Path to your Xcode project", + code_gen_sensitive: true, + default_value: Dir['*.xcodeproj'].first, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") unless value.end_with?(".xcodeproj") + UI.user_error!("Could not find Xcode project") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :plist_path, + env_name: "FL_UPDATE_APP_IDENTIFIER_PLIST_PATH", + description: "Path to info plist, relative to your Xcode project", + verify_block: proc do |value| + UI.user_error!("Invalid plist file") unless value[-6..-1].casecmp(".plist").zero? + end), + FastlaneCore::ConfigItem.new(key: :app_identifier, + env_name: 'FL_UPDATE_APP_IDENTIFIER', + description: 'The app Identifier you want to set', + code_gen_sensitive: true, + default_value: ENV['PRODUCE_APP_IDENTIFIER'] || CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true) + ] + end + + def self.authors + ['squarefrog', 'tobiasstrebitzer'] + end + + def self.example_code + [ + 'update_app_identifier( + xcodeproj: "Example.xcodeproj", # Optional path to xcodeproj, will use the first .xcodeproj if not set + plist_path: "Example/Info.plist", # Path to info plist file, relative to xcodeproj + app_identifier: "com.test.example" # The App Identifier + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_code_signing_settings.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_code_signing_settings.rb new file mode 100644 index 0000000..13697c4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_code_signing_settings.rb @@ -0,0 +1,223 @@ +require 'xcodeproj' +module Fastlane + module Actions + class UpdateCodeSigningSettingsAction < Action + def self.run(params) + FastlaneCore::PrintTable.print_values(config: params, title: "Summary for code signing settings") + path = params[:path] + path = File.join(File.expand_path(path), "project.pbxproj") + + project = Xcodeproj::Project.open(params[:path]) + UI.user_error!("Could not find path to project config '#{path}'. Pass the path to your project (not workspace)!") unless File.exist?(path) + UI.message("Updating the Automatic Codesigning flag to #{params[:use_automatic_signing] ? 'enabled' : 'disabled'} for the given project '#{path}'") + + unless project.root_object.attributes["TargetAttributes"] + UI.user_error!("Seems to be a very old project file format - please open your project file in a more recent version of Xcode") + return false + end + + changed_targets = [] + changed_build_configurations = [] + + project.targets.each do |target| + if params[:targets] + unless params[:targets].include?(target.name) + UI.important("Skipping #{target.name} not selected (#{params[:targets].join(',')})") + next + end + end + + target.build_configurations.each do |config| + if params[:build_configurations] + unless params[:build_configurations].include?(config.name) + UI.important("Skipping #{config.name} not selected (#{params[:build_configurations].join(',')})") + next + end + end + + style_value = params[:use_automatic_signing] ? 'Automatic' : 'Manual' + development_team_setting = params[:sdk] ? "DEVELOPMENT_TEAM[sdk=#{params[:sdk]}]" : "DEVELOPMENT_TEAM" + code_sign_identity_setting = params[:sdk] ? "CODE_SIGN_IDENTITY[sdk=#{params[:sdk]}]" : "CODE_SIGN_IDENTITY" + provisioning_profile_setting = params[:sdk] ? "PROVISIONING_PROFILE_SPECIFIER[sdk=#{params[:sdk]}]" : "PROVISIONING_PROFILE_SPECIFIER" + + set_build_setting(config, "CODE_SIGN_STYLE", style_value) + + if params[:team_id] + set_build_setting(config, development_team_setting, params[:team_id]) + UI.important("Set Team id to: #{params[:team_id]} for target: #{target.name} for build configuration: #{config.name}") + end + if params[:code_sign_identity] + set_build_setting(config, code_sign_identity_setting, params[:code_sign_identity]) + UI.important("Set Code Sign identity to: #{params[:code_sign_identity]} for target: #{target.name} for build configuration: #{config.name}") + end + if params[:profile_name] + set_build_setting(config, provisioning_profile_setting, params[:profile_name]) + UI.important("Set Provisioning Profile name to: #{params[:profile_name]} for target: #{target.name} for build configuration: #{config.name}") + end + if params[:entitlements_file_path] + set_build_setting(config, "CODE_SIGN_ENTITLEMENTS", params[:entitlements_file_path]) + UI.important("Set Entitlements file path to: #{params[:entitlements_file_path]} for target: #{target.name} for build configuration: #{config.name}") + end + # Since Xcode 8, this is no longer needed, you simply use PROVISIONING_PROFILE_SPECIFIER + if params[:profile_uuid] + set_build_setting(config, "PROVISIONING_PROFILE", params[:profile_uuid]) + UI.important("Set Provisioning Profile UUID to: #{params[:profile_uuid]} for target: #{target.name} for build configuration: #{config.name}") + end + if params[:bundle_identifier] + set_build_setting(config, "PRODUCT_BUNDLE_IDENTIFIER", params[:bundle_identifier]) + UI.important("Set Bundle identifier to: #{params[:bundle_identifier]} for target: #{target.name} for build configuration: #{config.name}") + end + + changed_build_configurations << config.name + end + + changed_targets << target.name + end + project.save + + if changed_targets.empty? + UI.important("None of the specified targets has been modified") + UI.important("available targets:") + project.targets.each do |target| + UI.important("\t* #{target.name}") + end + else + UI.success("Successfully updated project settings to use Code Sign Style = '#{params[:use_automatic_signing] ? 'Automatic' : 'Manual'}'") + UI.success("Modified Targets:") + changed_targets.each do |target| + UI.success("\t * #{target}") + end + + UI.success("Modified Build Configurations:") + changed_build_configurations.each do |name| + UI.success("\t * #{name}") + end + end + + params[:use_automatic_signing] + end + + def self.set_build_setting(configuration, name, value) + # Iterate over any keys that start with this name + # This will also set keys that have filtering like [sdk=iphoneos*] + keys = configuration.build_settings.keys.select { |key| key.to_s.match(/#{name}.*/) } + keys.each do |key| + configuration.build_settings[key] = value + end + + # Explicitly set the key with value if keys don't exist + configuration.build_settings[name] = value + end + + def self.description + "Configures Xcode's Codesigning options" + end + + def self.details + "Configures Xcode's Codesigning options of all targets in the project" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_PROJECT_SIGNING_PROJECT_PATH", + description: "Path to your Xcode project", + code_gen_sensitive: true, + default_value: Dir['*.xcodeproj'].first, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Path is invalid") unless File.exist?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :use_automatic_signing, + env_name: "FL_PROJECT_USE_AUTOMATIC_SIGNING", + description: "Defines if project should use automatic signing", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :sdk, + env_name: "FASTLANE_BUILD_SDK", + optional: true, + description: "Build target SDKs (iphoneos*, macosx*, iphonesimulator*)"), + FastlaneCore::ConfigItem.new(key: :team_id, + env_name: "FASTLANE_TEAM_ID", + optional: true, + description: "Team ID, is used when upgrading project"), + FastlaneCore::ConfigItem.new(key: :targets, + env_name: "FL_PROJECT_SIGNING_TARGETS", + optional: true, + type: Array, + description: "Specify targets you want to toggle the signing mech. (default to all targets)"), + FastlaneCore::ConfigItem.new(key: :build_configurations, + env_name: "FL_PROJECT_SIGNING_BUILD_CONFIGURATIONS", + optional: true, + type: Array, + description: "Specify build_configurations you want to toggle the signing mech. (default to all configurations)"), + FastlaneCore::ConfigItem.new(key: :code_sign_identity, + env_name: "FL_CODE_SIGN_IDENTITY", + description: "Code signing identity type (iPhone Developer, iPhone Distribution)", + optional: true), + FastlaneCore::ConfigItem.new(key: :entitlements_file_path, + env_name: "FL_CODE_SIGN_ENTITLEMENTS_FILE_PATH", + description: "Path to your entitlements file", + optional: true), + FastlaneCore::ConfigItem.new(key: :profile_name, + env_name: "FL_PROVISIONING_PROFILE_SPECIFIER", + description: "Provisioning profile name to use for code signing", + optional: true), + FastlaneCore::ConfigItem.new(key: :profile_uuid, + env_name: "FL_PROVISIONING_PROFILE", + description: "Provisioning profile UUID to use for code signing", + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_identifier, + env_name: "FL_APP_IDENTIFIER", + description: "Application Product Bundle Identifier", + optional: true) + ] + end + + def self.output + end + + def self.example_code + [ + ' # manual code signing + update_code_signing_settings( + use_automatic_signing: false, + path: "demo-project/demo/demo.xcodeproj" + )', + ' # automatic code signing + update_code_signing_settings( + use_automatic_signing: true, + path: "demo-project/demo/demo.xcodeproj" + )', + ' # more advanced manual code signing + update_code_signing_settings( + use_automatic_signing: false, + path: "demo-project/demo/demo.xcodeproj", + team_id: "QABC123DEV", + bundle_identifier: "com.demoapp.QABC123DEV", + code_sign_identity: "iPhone Distribution", + sdk: "iphoneos*", + profile_name: "Demo App Deployment Profile", + entitlements_file_path: "Demo App/generated/New.entitlements" + )' + ] + end + + def self.category + :code_signing + end + + def self.return_value + "The current status (boolean) of codesigning after modification" + end + + def self.authors + ["mathiasAichinger", "hjanuschka", "p4checo", "portellaa", "aeons", "att55", "abcdev"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_fastlane.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_fastlane.rb new file mode 100644 index 0000000..ddf006f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_fastlane.rb @@ -0,0 +1,158 @@ +require 'rubygems/spec_fetcher' +require 'rubygems/command_manager' + +module Fastlane + module Actions + # Makes sure fastlane tools are up-to-date when running fastlane + class UpdateFastlaneAction < Action + ALL_TOOLS = ["fastlane"] + + def self.run(options) + return if options[:no_update] # this is used to update itself + + tools_to_update = ALL_TOOLS + UI.message("Looking for updates for #{tools_to_update.join(', ')}...") + + updater = Gem::CommandManager.instance[:update] + cleaner = Gem::CommandManager.instance[:cleanup] + + gem_dir = ENV['GEM_HOME'] || Gem.dir + sudo_needed = !File.writable?(gem_dir) + + if sudo_needed + UI.important("It seems that your Gem directory is not writable by your current user.") + UI.important("fastlane would need sudo rights to update itself, however, running 'sudo fastlane' is not recommended.") + UI.important("If you still want to use this action, please read the documentation on how to set this up:") + UI.important("https://docs.fastlane.tools/actions/update_fastlane/") + return + end + + unless updater.respond_to?(:highest_installed_gems) + UI.important("The update_fastlane action requires rubygems version 2.1.0 or greater.") + UI.important("Please update your version of ruby gems before proceeding.") + UI.command "gem install rubygems-update" + UI.command "update_rubygems" + UI.command "gem update --system" + return + end + + highest_versions = updater.highest_installed_gems.keep_if { |key| tools_to_update.include?(key) } + update_needed = updater.which_to_update(highest_versions, tools_to_update) + + if update_needed.count == 0 + UI.success("Nothing to update ✅") + return + end + + # suppress updater output - very noisy + Gem::DefaultUserInteraction.ui = Gem::SilentUI.new unless FastlaneCore::Globals.verbose? + + update_needed.each do |tool_info| + tool = self.get_gem_name(tool_info) + local_version = Gem::Version.new(highest_versions[tool].version) + latest_official_version = FastlaneCore::UpdateChecker.fetch_latest(tool) + + UI.message("Updating #{tool} from #{local_version.to_s.yellow} to #{latest_official_version.to_s.yellow}... 🚀") + + if Helper.homebrew? + Helper.backticks('brew upgrade fastlane') + else + # Approximate_recommendation will create a string like "~> 0.10" from a version 0.10.0, e.g. one that is valid for versions >= 0.10 and <1.0 + requirement_version = local_version.approximate_recommendation + updater.update_gem(tool, Gem::Requirement.new(requirement_version)) + end + + UI.success("Finished updating #{tool}") + end + + unless Helper.homebrew? + UI.message("Cleaning up old versions...") + cleaner.options[:args] = tools_to_update + cleaner.execute + end + + if FastlaneCore::FastlaneFolder.swift? + upgrader = SwiftRunnerUpgrader.new + upgrader.upgrade_if_needed! + end + + UI.message("fastlane.tools successfully updated! I will now restart myself... 😴") + + # Set no_update to true so we don't try to update again + exec("FL_NO_UPDATE=true #{$PROGRAM_NAME} #{ARGV.join(' ')}") + end + + def self.get_gem_name(tool_info) + if tool_info.kind_of?(Array) + return tool_info[0] + elsif tool_info.respond_to?(:name) # Gem::NameTuple in RubyGems >= 3.1.0 + return tool_info.name + else + UI.crash!("Unknown gem update information returned from RubyGems. Please file a new issue for this... 🤷") + end + end + + def self.description + "Makes sure fastlane-tools are up-to-date when running fastlane" + end + + def self.details + sample = <<-SAMPLE.markdown_sample + ```bash + export GEM_HOME=~/.gems + export PATH=$PATH:~/.gems/bin + ``` + SAMPLE + + [ + "This action will update fastlane to the most recent version - major version updates will not be performed automatically, as they might include breaking changes. If an update was performed, fastlane will be restarted before the run continues.", + "", + "If you are using rbenv or rvm, everything should be good to go. However, if you are using the system's default ruby, some additional setup is needed for this action to work correctly. In short, fastlane needs to be able to access your gem library without running in `sudo` mode.", + "", + "The simplest possible fix for this is putting the following lines into your `~/.bashrc` or `~/.zshrc` file:".markdown_preserve_newlines, + sample, + "After the above changes, restart your terminal, then run `mkdir $GEM_HOME` to create the new gem directory. After this, you're good to go!", + "", + "Recommended usage of the `update_fastlane` action is at the top inside of the `before_all` block, before running any other action." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :no_update, + env_name: "FL_NO_UPDATE", + description: "Don't update during this run. This is used internally", + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :nightly, + env_name: "FL_UPDATE_FASTLANE_NIGHTLY", + description: "Opt-in to install and use nightly fastlane builds", + type: Boolean, + default_value: false, + deprecated: "Nightly builds are no longer being made available") + ] + end + + def self.authors + ["milch", "KrauseFx"] + end + + def self.is_supported?(platform) + true + end + + def self.example_code + [ + 'before_all do + update_fastlane + # ... + end' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_icloud_container_identifiers.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_icloud_container_identifiers.rb new file mode 100644 index 0000000..ff75035 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_icloud_container_identifiers.rb @@ -0,0 +1,96 @@ +module Fastlane + module Actions + module SharedValues + UPDATE_ICLOUD_CONTAINER_IDENTIFIERS = :UPDATE_ICLOUD_CONTAINER_IDENTIFIERS + end + + class UpdateIcloudContainerIdentifiersAction < Action + require 'plist' + + def self.run(params) + entitlements_file = params[:entitlements_file] + UI.message("Entitlements File: #{entitlements_file}") + + # parse entitlements + result = Plist.parse_xml(entitlements_file) + UI.error("Entitlements file at '#{entitlements_file}' cannot be parsed.") unless result + + # get iCloud container field + icloud_container_key = 'com.apple.developer.icloud-container-identifiers' + icloud_container_value = result[icloud_container_key] + UI.error("No existing iCloud container field specified. Please specify an iCloud container in the entitlements file.") unless icloud_container_value + + # get uniquity container field + ubiquity_container_key = 'com.apple.developer.ubiquity-container-identifiers' + ubiquity_container_value = result[ubiquity_container_key] + UI.error("No existing ubiquity container field specified. Please specify an ubiquity container in the entitlements file.") unless ubiquity_container_value + + # set iCloud container identifiers + result[icloud_container_key] = params[:icloud_container_identifiers] + result[ubiquity_container_key] = params[:icloud_container_identifiers] + + # save entitlements file + result.save_plist(entitlements_file) + + UI.message("Old iCloud Container Identifiers: #{icloud_container_value}") + UI.message("Old Ubiquity Container Identifiers: #{ubiquity_container_value}") + + UI.success("New iCloud Container Identifiers set: #{result[icloud_container_key]}") + UI.success("New Ubiquity Container Identifiers set: #{result[ubiquity_container_key]}") + + Actions.lane_context[SharedValues::UPDATE_ICLOUD_CONTAINER_IDENTIFIERS] = result[icloud_container_key] + end + + def self.description + "This action changes the iCloud container identifiers in the entitlements file" + end + + def self.details + "Updates the iCloud Container Identifiers in the given Entitlements file, so you can use different iCloud containers for different builds like Adhoc, App Store, etc." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :entitlements_file, + env_name: "FL_UPDATE_ICLOUD_CONTAINER_IDENTIFIERS_ENTITLEMENTS_FILE_PATH", + description: "The path to the entitlement file which contains the iCloud container identifiers", + verify_block: proc do |value| + UI.user_error!("Please pass a path to an entitlements file. ") unless value.include?(".entitlements") + UI.user_error!("Could not find entitlements file") if !File.exist?(value) && !Helper.test? + end), + FastlaneCore::ConfigItem.new(key: :icloud_container_identifiers, + env_name: "FL_UPDATE_ICLOUD_CONTAINER_IDENTIFIERS_IDENTIFIERS", + description: "An Array of unique identifiers for the iCloud containers. Eg. ['iCloud.com.test.testapp']", + type: Array) + ] + end + + def self.output + [ + ['UPDATE_ICLOUD_CONTAINER_IDENTIFIERS', 'The new iCloud Container Identifiers'] + ] + end + + def self.authors + ["JamesKuang"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'update_icloud_container_identifiers( + entitlements_file: "/path/to/entitlements_file.entitlements", + icloud_container_identifiers: ["iCloud.com.companyname.appname"] + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_info_plist.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_info_plist.rb new file mode 100644 index 0000000..1818f8c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_info_plist.rb @@ -0,0 +1,143 @@ +module Fastlane + module Actions + module SharedValues + end + + class UpdateInfoPlistAction < Action + def self.run(params) + # Check if parameters are set + if params[:app_identifier] || params[:display_name] || params[:block] + if (params[:app_identifier] || params[:display_name]) && params[:block] + UI.important("block parameter can not be specified with app_identifier or display_name") + return false + end + + # Assign folder from parameter or search for xcodeproj file + folder = params[:xcodeproj] || Dir["*.xcodeproj"].first + UI.user_error!("Could not figure out your xcodeproj path. Please specify it using the `xcodeproj` parameter") if folder.nil? + + if params[:scheme] + project = Xcodeproj::Project.open(folder) + scheme = project.native_targets.detect { |target| target.name == params[:scheme] } + UI.user_error!("Couldn't find scheme named '#{params[:scheme]}'") unless scheme + + params[:plist_path] = scheme.build_configurations.first.build_settings["INFOPLIST_FILE"] + UI.user_error!("Scheme named '#{params[:scheme]}' doesn't have a plist file") unless params[:plist_path] + params[:plist_path] = params[:plist_path].gsub("$(SRCROOT)", ".") + end + + if params[:plist_path].nil? + UI.user_error!("You must specify either a plist path or a scheme") + end + + # Read existing plist file + info_plist_path = File.join(folder, "..", params[:plist_path]) + UI.user_error!("Couldn't find info plist file at path '#{info_plist_path}'") unless File.exist?(info_plist_path) + + UpdatePlistAction.run( + plist_path: info_plist_path, + block: proc do |plist| + plist['CFBundleIdentifier'] = params[:app_identifier] if params[:app_identifier] + plist['CFBundleDisplayName'] = params[:display_name] if params[:display_name] + params[:block].call(plist) if params[:block] + end + ) + else + UI.important("You haven't specified any parameters to update your plist.") + false + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.is_supported?(platform) + [:ios].include?(platform) + end + + def self.description + 'Update a Info.plist file with bundle identifier and display name' + end + + def self.details + "This action allows you to modify your `Info.plist` file before building. This may be useful if you want a separate build for alpha, beta or nightly builds, but don't want a separate target." + end + + def self.available_options + [ + + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_UPDATE_PLIST_PROJECT_PATH", + description: "Path to your Xcode project", + optional: true, + verify_block: proc do |value| + UI.user_error!("Please pass the path to the project, not the workspace") if value.end_with?(".xcworkspace") + UI.user_error!("Could not find Xcode project") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :plist_path, + env_name: "FL_UPDATE_PLIST_PATH", + description: "Path to info plist", + optional: true, + verify_block: proc do |value| + UI.user_error!("Invalid plist file") unless value.downcase.end_with?(".plist") + end), + FastlaneCore::ConfigItem.new(key: :scheme, + env_name: "FL_UPDATE_PLIST_APP_SCHEME", + description: "Scheme of info plist", + optional: true), + FastlaneCore::ConfigItem.new(key: :app_identifier, + env_name: 'FL_UPDATE_PLIST_APP_IDENTIFIER', + description: 'The App Identifier of your app', + code_gen_sensitive: true, + default_value: ENV['PRODUCE_APP_IDENTIFIER'], + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :display_name, + env_name: 'FL_UPDATE_PLIST_DISPLAY_NAME', + description: 'The Display Name of your app', + optional: true), + FastlaneCore::ConfigItem.new(key: :block, + type: :string_callback, + description: 'A block to process plist with custom logic', + optional: true) + + ] + end + + def self.author + 'tobiasstrebitzer' + end + + def self.example_code + [ + 'update_info_plist( # update app identifier string + plist_path: "path/to/Info.plist", + app_identifier: "com.example.newappidentifier" + )', + 'update_info_plist( # Change the Display Name of your app + plist_path: "path/to/Info.plist", + display_name: "MyApp-Beta" + )', + 'update_info_plist( # Target a specific `xcodeproj` rather than finding the first available one + xcodeproj: "path/to/Example.proj", + plist_path: "path/to/Info.plist", + display_name: "MyApp-Beta" + )', + 'update_info_plist( # Advanced processing: find URL scheme for particular key and replace value + xcodeproj: "path/to/Example.proj", + plist_path: "path/to/Info.plist", + block: proc do |plist| + urlScheme = plist["CFBundleURLTypes"].find{|scheme| scheme["CFBundleURLName"] == "com.acme.default-url-handler"} + urlScheme[:CFBundleURLSchemes] = ["acme-production"] + end + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_keychain_access_groups.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_keychain_access_groups.rb new file mode 100644 index 0000000..b0e481a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_keychain_access_groups.rb @@ -0,0 +1,91 @@ +module Fastlane + module Actions + module SharedValues + KEYCHAIN_ACCESS_GROUPS = :KEYCHAIN_ACCESS_GROUPS + end + + class UpdateKeychainAccessGroupsAction < Action + require 'plist' + + def self.run(params) + UI.message("Entitlements File: #{params[:entitlements_file]}") + UI.message("New keychain access groups: #{params[:identifiers]}") + + entitlements_file = params[:entitlements_file] + UI.user_error!("Could not find entitlements file at path '#{entitlements_file}'") unless File.exist?(entitlements_file) + + # parse entitlements + result = Plist.parse_xml(entitlements_file) + UI.user_error!("Entitlements file at '#{entitlements_file}' cannot be parsed.") unless result + + # keychain access groups key + keychain_access_groups_key = 'keychain-access-groups' + + # get keychain access groups + keychain_access_groups_field = result[keychain_access_groups_key] + UI.user_error!("No existing keychain access groups field specified. Please specify an keychain access groups in the entitlements file.") unless keychain_access_groups_field + + # set new keychain access groups + UI.message("Old keychain access groups: #{keychain_access_groups_field}") + result[keychain_access_groups_key] = params[:identifiers] + + # save entitlements file + result.save_plist(entitlements_file) + UI.message("New keychain access groups: #{result[keychain_access_groups_key]}") + + Actions.lane_context[SharedValues::KEYCHAIN_ACCESS_GROUPS] = result[keychain_access_groups_key] + end + + def self.description + "This action changes the keychain access groups in the entitlements file" + end + + def self.details + "Updates the Keychain Group Access Groups in the given Entitlements file, so you can have keychain access groups for the app store build and keychain access groups for an enterprise build." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :entitlements_file, + env_name: "FL_UPDATE_KEYCHAIN_ACCESS_GROUPS_ENTITLEMENTS_FILE_PATH", # The name of the environment variable + description: "The path to the entitlement file which contains the keychain access groups", # a short description of this parameter + verify_block: proc do |value| + UI.user_error!("Please pass a path to an entitlements file. ") unless value.include?(".entitlements") + UI.user_error!("Could not find entitlements file") if !File.exist?(value) && !Helper.test? + end), + FastlaneCore::ConfigItem.new(key: :identifiers, + env_name: "FL_UPDATE_KEYCHAIN_ACCESS_GROUPS_IDENTIFIERS", + description: "An Array of unique identifiers for the keychain access groups. Eg. ['your.keychain.access.groups.identifiers']", + type: Array) + ] + end + + def self.output + [ + ['KEYCHAIN_ACCESS_GROUPS', 'The new Keychain Access Groups'] + ] + end + + def self.authors + ["yutae"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'update_keychain_access_groups( + entitlements_file: "/path/to/entitlements_file.entitlements", + identifiers: ["your.keychain.access.groups.identifiers"] + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_plist.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_plist.rb new file mode 100644 index 0000000..a439d1e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_plist.rb @@ -0,0 +1,115 @@ +module Fastlane + module Actions + module SharedValues + end + + class UpdatePlistAction < Action + def self.run(params) + require 'xcodeproj' + + if params[:plist_path].nil? + UI.user_error!("You must specify a plist path") + end + + # Read existing plist file + plist_path = params[:plist_path] + + UI.user_error!("Couldn't find plist file at path '#{plist_path}'") unless File.exist?(plist_path) + plist = Xcodeproj::Plist.read_from_path(plist_path) + + params[:block].call(plist) if params[:block] + + # Write changes to file + Xcodeproj::Plist.write_to_path(plist, plist_path) + + UI.success("Updated #{params[:plist_path]} 💾.") + File.read(plist_path) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.is_supported?(platform) + [:ios].include?(platform) + end + + def self.description + 'Update a plist file' + end + + def self.details + "This action allows you to modify any value inside any `plist` file." + end + + def self.available_options + [ + + FastlaneCore::ConfigItem.new(key: :plist_path, + env_name: "FL_UPDATE_PLIST_PATH", + description: "Path to plist file", + optional: true), + FastlaneCore::ConfigItem.new(key: :block, + type: :string_callback, + description: 'A block to process plist with custom logic') + + ] + end + + def self.author + ["rishabhtayal", "matthiaszarzecki"] + end + + def self.example_code + [ + 'update_plist( # Updates the CLIENT_ID and GOOGLE_APP_ID string entries in the plist-file + plist_path: "path/to/your_plist_file.plist", + block: proc do |plist| + plist[:CLIENT_ID] = "new_client_id" + plist[:GOOGLE_APP_ID] = "new_google_app_id" + end + )', + 'update_plist( # Sets a boolean entry + plist_path: "path/to/your_plist_file.plist", + block: proc do |plist| + plist[:boolean_entry] = true + end + )', + 'update_plist( # Sets a number entry + plist_path: "path/to/your_plist_file.plist", + block: proc do |plist| + plist[:number_entry] = 13 + end + )', + 'update_plist( # Sets an array-entry with multiple sub-types + plist_path: "path/to/your_plist_file.plist", + block: proc do |plist| + plist[:array_entry] = ["entry_01", true, 1243] + end + )', + 'update_plist( # The block can contain logic too + plist_path: "path/to/your_plist_file.plist", + block: proc do |plist| + if options[:environment] == "production" + plist[:CLIENT_ID] = "new_client_id_production" + else + plist[:CLIENT_ID] = "new_client_id_development" + end + end + )', + 'update_plist( # Advanced processing: find URL scheme for particular key and replace value + plist_path: "path/to/Info.plist", + block: proc do |plist| + urlScheme = plist["CFBundleURLTypes"].find{|scheme| scheme["CFBundleURLName"] == "com.acme.default-url-handler"} + urlScheme[:CFBundleURLSchemes] = ["acme-production"] + end + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_code_signing.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_code_signing.rb new file mode 100644 index 0000000..7eb329b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_code_signing.rb @@ -0,0 +1,76 @@ +module Fastlane + module Actions + module SharedValues + end + + class UpdateProjectCodeSigningAction < Action + def self.run(params) + UI.message("You shouldn't use update_project_code_signing") + UI.message("Have you considered using the recommended way to do code signing?") + UI.message("https://docs.fastlane.tools/codesigning/getting-started/") + + path = params[:path] + path = File.join(path, "project.pbxproj") + UI.user_error!("Could not find path to project config '#{path}'. Pass the path to your project (not workspace)!") unless File.exist?(path) + + uuid = params[:uuid] || params[:udid] + UI.message("Updating provisioning profile UUID (#{uuid}) for the given project '#{path}'") + + p = File.read(path) + File.write(path, p.gsub(/PROVISIONING_PROFILE = ".*";/, "PROVISIONING_PROFILE = \"#{uuid}\";")) + + UI.success("Successfully updated project settings to use UUID '#{uuid}'") + end + + def self.description + "Updated code signing settings from 'Automatic' to a specific profile" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_PROJECT_SIGNING_PROJECT_PATH", + description: "Path to your Xcode project", + verify_block: proc do |value| + UI.user_error!("Path is invalid") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :udid, + deprecated: "Use `:uuid` instead", + env_name: "FL_PROJECT_SIGNING_UDID", + code_gen_sensitive: true, + default_value: ENV["SIGH_UUID"], + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :uuid, + env_name: "FL_PROJECT_SIGNING_UUID", + description: "The UUID of the provisioning profile you want to use", + code_gen_sensitive: true, + default_value: ENV["SIGH_UUID"], + default_value_dynamic: true) + ] + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + nil + end + + def self.category + :deprecated + end + + def self.deprecated_notes + [ + "You shouldn't use `update_project_code_signing`.", + "Have you considered using the recommended way to do code signing: [https://docs.fastlane.tools/codesigning/getting-started/](https://docs.fastlane.tools/codesigning/getting-started/)?" + ].join("\n") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_provisioning.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_provisioning.rb new file mode 100644 index 0000000..b68e6b3 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_provisioning.rb @@ -0,0 +1,181 @@ +# coding: utf-8 + +module Fastlane + module Actions + module SharedValues + end + + class UpdateProjectProvisioningAction < Action + ROOT_CERTIFICATE_URL = "https://www.apple.com/appleca/AppleIncRootCertificate.cer" + def self.run(params) + UI.message("You’re updating provisioning profiles directly in your project, but have you considered easier ways to do code signing?") + UI.message("https://docs.fastlane.tools/codesigning/GettingStarted/") + + # assign folder from parameter or search for xcodeproj file + folder = params[:xcodeproj] || Dir["*.xcodeproj"].first + + # validate folder + project_file_path = File.join(folder, "project.pbxproj") + UI.user_error!("Could not find path to project config '#{project_file_path}'. Pass the path to your project (not workspace)!") unless File.exist?(project_file_path) + + # download certificate + unless File.exist?(params[:certificate]) && File.size(params[:certificate]) > 0 + UI.message("Downloading root certificate from (#{ROOT_CERTIFICATE_URL}) to path '#{params[:certificate]}'") + File.open(params[:certificate], "w:ASCII-8BIT") do |file| + file.write(FastlaneCore::Helper.open_uri(ROOT_CERTIFICATE_URL, "rb").read) + end + end + + # parsing mobileprovision file + UI.message("Parsing mobile provisioning profile from '#{params[:profile]}'") + profile = File.read(params[:profile]) + p7 = OpenSSL::PKCS7.new(profile) + store = OpenSSL::X509::Store.new + UI.user_error!("Could not find valid certificate at '#{params[:certificate]}'") unless File.size(params[:certificate]) > 0 + cert = OpenSSL::X509::Certificate.new(File.read(params[:certificate])) + store.add_cert(cert) + + p7.verify([cert], store) + check_verify!(p7) + data = Plist.parse_xml(p7.data) + + target_filter = params[:target_filter] || params[:build_configuration_filter] + configuration = params[:build_configuration] + code_signing_identity = params[:code_signing_identity] + + # manipulate project file + UI.success("Going to update project '#{folder}' with UUID") + require 'xcodeproj' + + project = Xcodeproj::Project.open(folder) + project.targets.each do |target| + if !target_filter || target.name.match(target_filter) || (target.respond_to?(:product_type) && target.product_type.match(target_filter)) + UI.success("Updating target #{target.name}...") + else + UI.important("Skipping target #{target.name} as it doesn't match the filter '#{target_filter}'") + next + end + + target.build_configuration_list.build_configurations.each do |build_configuration| + config_name = build_configuration.name + if !configuration || config_name.match(configuration) + UI.success("Updating configuration #{config_name}...") + else + UI.important("Skipping configuration #{config_name} as it doesn't match the filter '#{configuration}'") + next + end + + if code_signing_identity + codesign_build_settings_keys = build_configuration.build_settings.keys.select { |key| key.to_s.match(/CODE_SIGN_IDENTITY.*/) } + codesign_build_settings_keys.each do |setting| + build_configuration.build_settings[setting] = code_signing_identity + end + end + + build_configuration.build_settings["PROVISIONING_PROFILE"] = data["UUID"] + build_configuration.build_settings["PROVISIONING_PROFILE_SPECIFIER"] = data["Name"] + end + end + + project.save + + # complete + UI.success("Successfully updated project settings in '#{folder}'") + end + + def self.check_verify!(p7) + failed_to_verify = (p7.data.nil? || p7.data == "") && !(p7.error_string || "").empty? + if failed_to_verify + UI.crash!("Profile could not be verified with error: '#{p7.error_string}'. Try regenerating provisioning profile.") + end + end + + def self.description + "Update projects code signing settings from your provisioning profile" + end + + def self.details + [ + "You should check out the [code signing guide](https://docs.fastlane.tools/codesigning/getting-started/) before using this action.", + "This action retrieves a provisioning profile UUID from a provisioning profile (`.mobileprovision`) to set up the Xcode projects' code signing settings in `*.xcodeproj/project.pbxproj`.", + "The `:target_filter` value can be used to only update code signing for the specified targets.", + "The `:build_configuration` value can be used to only update code signing for the specified build configurations of the targets passing through the `:target_filter`.", + "Example usage is the WatchKit Extension or WatchKit App, where you need separate provisioning profiles.", + "Example: `update_project_provisioning(xcodeproj: \"..\", target_filter: \".*WatchKit App.*\")`." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcodeproj, + env_name: "FL_PROJECT_PROVISIONING_PROJECT_PATH", + description: "Path to your Xcode project", + optional: true, + verify_block: proc do |value| + UI.user_error!("Path to xcode project is invalid") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :profile, + env_name: "FL_PROJECT_PROVISIONING_PROFILE_FILE", + description: "Path to provisioning profile (.mobileprovision)", + default_value: Actions.lane_context[SharedValues::SIGH_PROFILE_PATH], + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Path to provisioning profile is invalid") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :target_filter, + env_name: "FL_PROJECT_PROVISIONING_PROFILE_TARGET_FILTER", + description: "A filter for the target name. Use a standard regex", + optional: true, + skip_type_validation: true, # allow Regexp, String + verify_block: proc do |value| + UI.user_error!("target_filter should be Regexp or String") unless [Regexp, String].any? { |type| value.kind_of?(type) } + end), + FastlaneCore::ConfigItem.new(key: :build_configuration_filter, + env_name: "FL_PROJECT_PROVISIONING_PROFILE_FILTER", + description: "Legacy option, use 'target_filter' instead", + optional: true), + FastlaneCore::ConfigItem.new(key: :build_configuration, + env_name: "FL_PROJECT_PROVISIONING_PROFILE_BUILD_CONFIGURATION", + description: "A filter for the build configuration name. Use a standard regex. Applied to all configurations if not specified", + optional: true, + skip_type_validation: true, # allow Regexp, String + verify_block: proc do |value| + UI.user_error!("build_configuration should be Regexp or String") unless [Regexp, String].any? { |type| value.kind_of?(type) } + end), + FastlaneCore::ConfigItem.new(key: :certificate, + env_name: "FL_PROJECT_PROVISIONING_CERTIFICATE_PATH", + description: "Path to apple root certificate", + default_value: "/tmp/AppleIncRootCertificate.cer"), + FastlaneCore::ConfigItem.new(key: :code_signing_identity, + env_name: "FL_PROJECT_PROVISIONING_CODE_SIGN_IDENTITY", + description: "Code sign identity for build configuration", + optional: true) + ] + end + + def self.authors + ["tobiasstrebitzer", "czechboy0"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'update_project_provisioning( + xcodeproj: "Project.xcodeproj", + profile: "./watch_app_store.mobileprovision", # optional if you use sigh + target_filter: ".*WatchKit Extension.*", # matches name or type of a target + build_configuration: "Release", + code_signing_identity: "iPhone Development" # optionally specify the codesigning identity + )' + ] + end + + def self.category + :code_signing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_team.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_team.rb new file mode 100644 index 0000000..718d82e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_project_team.rb @@ -0,0 +1,95 @@ +module Fastlane + module Actions + module SharedValues + end + + class UpdateProjectTeamAction < Action + def self.run(params) + project_path = params[:path] + selected_targets = params[:targets] + + UI.user_error!("Could not find path to xcodeproj '#{project_path}'. Pass the path to your project (not workspace)!") unless File.exist?(project_path) + + # Load .xcodeproj + project = Xcodeproj::Project.open(project_path) + + # Fetch target + targets = project.native_targets + if selected_targets + # Error to user if invalid target + diff_targets = selected_targets - targets.map(&:name) + UI.user_error!("Could not find target(s) in the project '#{project_path}' - #{diff_targets.join(',')}") unless diff_targets.empty? + + targets.select! { |native_target| selected_targets.include?(native_target.name) } + end + + # Set teamid in target + targets.each do |target| + UI.message("Updating development team (#{params[:teamid]}) for target `#{target.name}` in the project '#{project_path}'") + # Update the build settings + target.build_configurations.each do |configuration| + configuration.build_settings['DEVELOPMENT_TEAM'] = params[:teamid] + end + + project.save + + UI.success("Successfully updated project settings to use Developer Team ID '#{params[:teamid]}' for target `#{target.name}`") + end + end + + def self.description + "Update Xcode Development Team ID" + end + + def self.details + "This action updates the Developer Team ID of your Xcode project." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_PROJECT_SIGNING_PROJECT_PATH", + description: "Path to your Xcode project", + default_value: Dir['*.xcodeproj'].first, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Path is invalid") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :targets, + env_name: "FL_PROJECT_TARGET", + description: "Name of the targets you want to update", + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :teamid, + env_name: "FL_PROJECT_TEAM_ID", + description: "The Team ID you want to use", + code_gen_sensitive: true, + default_value: ENV["TEAM_ID"] || CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true) + ] + end + + def self.author + "lgaches" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'update_project_team', + 'update_project_team( + path: "Example.xcodeproj", + teamid: "A3ZZVJ7CNY" + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_urban_airship_configuration.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_urban_airship_configuration.rb new file mode 100644 index 0000000..2f354d1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_urban_airship_configuration.rb @@ -0,0 +1,90 @@ +module Fastlane + module Actions + class UpdateUrbanAirshipConfigurationAction < Action + def self.run(params) + require "plist" + + begin + path = File.expand_path(params[:plist_path]) + plist = Plist.parse_xml(path) + plist['developmentAppKey'] = params[:development_app_key] unless params[:development_app_key].nil? + plist['developmentAppSecret'] = params[:development_app_secret] unless params[:development_app_secret].nil? + plist['productionAppKey'] = params[:production_app_key] unless params[:production_app_key].nil? + plist['productionAppSecret'] = params[:production_app_secret] unless params[:production_app_secret].nil? + plist['detectProvisioningMode'] = params[:detect_provisioning_mode] unless params[:detect_provisioning_mode].nil? + new_plist = plist.to_plist + File.write(path, new_plist) + rescue => ex + UI.error(ex) + UI.error("Unable to update Urban Airship configuration for plist file at '#{path}'") + end + end + + def self.description + "Set [Urban Airship](https://www.urbanairship.com/) plist configuration values" + end + + def self.details + "This action updates the `AirshipConfig.plist` needed to configure the Urban Airship SDK at runtime, allowing keys and secrets to easily be set for the Enterprise and Production versions of the application." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :plist_path, + env_name: "URBAN_AIRSHIP_PLIST_PATH", + description: "Path to Urban Airship configuration Plist", + verify_block: proc do |value| + UI.user_errror!("Could not find Urban Airship plist file") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :development_app_key, + optional: true, + env_name: "URBAN_AIRSHIP_DEVELOPMENT_APP_KEY", + sensitive: true, + description: "The development app key"), + FastlaneCore::ConfigItem.new(key: :development_app_secret, + optional: true, + env_name: "URBAN_AIRSHIP_DEVELOPMENT_APP_SECRET", + sensitive: true, + description: "The development app secret"), + FastlaneCore::ConfigItem.new(key: :production_app_key, + optional: true, + env_name: "URBAN_AIRSHIP_PRODUCTION_APP_KEY", + sensitive: true, + description: "The production app key"), + FastlaneCore::ConfigItem.new(key: :production_app_secret, + optional: true, + env_name: "URBAN_AIRSHIP_PRODUCTION_APP_SECRET", + sensitive: true, + description: "The production app secret"), + FastlaneCore::ConfigItem.new(key: :detect_provisioning_mode, + env_name: "URBAN_AIRSHIP_DETECT_PROVISIONING_MODE", + type: Boolean, + optional: true, + description: "Automatically detect provisioning mode") + ] + end + + def self.authors + ["kcharwood"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'update_urban_airship_configuration( + plist_path: "AirshipConfig.plist", + production_app_key: "PRODKEY", + production_app_secret: "PRODSECRET" + )' + ] + end + + def self.category + :push + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_url_schemes.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_url_schemes.rb new file mode 100644 index 0000000..6670de1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/update_url_schemes.rb @@ -0,0 +1,112 @@ +require 'plist' + +module Fastlane + module Actions + class UpdateUrlSchemesAction < Action + def self.run(params) + path = params[:path] + url_schemes = params[:url_schemes] + update_url_schemes = params[:update_url_schemes] + + hash = Plist.parse_xml(path) + + # Create CFBundleURLTypes array with empty scheme if none exist + unless hash['CFBundleURLTypes'] + hash['CFBundleURLTypes'] = [{ + 'CFBundleTypeRole' => 'Editor', + 'CFBundleURLSchemes' => [] + }] + end + + # Updates schemes with update block if exists + # Else updates with array of strings if exist + # Otherwise shows error to user + if update_url_schemes + new_schemes = update_url_schemes.call(hash['CFBundleURLTypes'].first['CFBundleURLSchemes']) + + # Verify array of strings + string = "The URL schemes must be an array of strings, got '#{new_schemes}'." + verify_schemes!(new_schemes, string) + + hash['CFBundleURLTypes'].first['CFBundleURLSchemes'] = new_schemes + elsif url_schemes + hash['CFBundleURLTypes'].first['CFBundleURLSchemes'] = url_schemes + else + UI.user_error!("No `url_schemes` or `update_url_schemes` provided") + end + File.write(path, Plist::Emit.dump(hash)) + end + + def self.verify_schemes!(url_schemes, error_message) + UI.user_error!(error_message) unless url_schemes.kind_of?(Array) + + url_schemes.each do |url_scheme| + UI.user_error!(error_message) unless url_scheme.kind_of?(String) + end + end + + def self.description + 'Updates the URL schemes in the given Info.plist' + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: 'FL_UPDATE_URL_SCHEMES_PATH', + description: 'The Plist file\'s path', + optional: false, + verify_block: proc do |path| + UI.user_error!("Could not find plist at path '#{path}'") unless File.exist?(path) + end), + FastlaneCore::ConfigItem.new(key: :url_schemes, + env_name: "FL_UPDATE_URL_SCHEMES_SCHEMES", + description: 'The new URL schemes', + type: Array, + optional: true), + FastlaneCore::ConfigItem.new(key: :update_url_schemes, + description: "Block that is called to update schemes with current schemes passed in as parameter", + optional: true, + type: :string_callback) + ] + end + + def self.details + [ + "This action allows you to update the URL schemes of the app before building it.", + "For example, you can use this to set a different URL scheme for the alpha or beta version of the app." + ].join("\n") + end + + def self.output + [] + end + + def self.authors + ['kmikael'] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'update_url_schemes( + path: "path/to/Info.plist", + url_schemes: ["com.myapp"] + )', + 'update_url_schemes( + path: "path/to/Info.plist", + update_url_schemes: proc do |schemes| + schemes + ["anotherscheme"] + end + )' + ] + end + + def self.category + :project + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_app_privacy_details_to_app_store.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_app_privacy_details_to_app_store.rb new file mode 100644 index 0000000..6fec71c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_app_privacy_details_to_app_store.rb @@ -0,0 +1,290 @@ +module Fastlane + module Actions + class UploadAppPrivacyDetailsToAppStoreAction < Action + DEFAULT_PATH = Fastlane::Helper.fastlane_enabled_folder_path + DEFAULT_FILE_NAME = "app_privacy_details.json" + + def self.run(params) + require 'spaceship' + + # Prompts select team if multiple teams and none specified + UI.message("Login to App Store Connect (#{params[:username]})") + Spaceship::ConnectAPI.login(params[:username], use_portal: false, use_tunes: true, tunes_team_id: params[:team_id], team_name: params[:team_name]) + UI.message("Login successful") + + # Get App + app = Spaceship::ConnectAPI::App.find(params[:app_identifier]) + unless app + UI.user_error!("Could not find app with bundle identifier '#{params[:app_identifier]}' on account #{params[:username]}") + end + + # Attempt to load JSON file + usages_config = load_json_file(params) + + # Start interactive questions to generate and save JSON file + unless usages_config + usages_config = ask_interactive_questions_for_json + + if params[:skip_json_file_saving] + UI.message("Skipping JSON file saving...") + else + json = JSON.pretty_generate(usages_config) + path = output_path(params) + + UI.message("Writing file to #{path}") + File.write(path, json) + end + end + + # Process JSON file to save app data usages to API + if params[:skip_upload] + UI.message("Skipping uploading of data... (so you can verify your JSON file)") + else + upload_app_data_usages(params, app, usages_config) + end + end + + def self.load_json_file(params) + path = params[:json_path] + return nil if path.nil? + return JSON.parse(File.read(path)) + end + + def self.output_path(params) + path = params[:output_json_path] + return File.absolute_path(path) + end + + def self.ask_interactive_questions_for_json(show_intro = true) + if show_intro + UI.important("You did not provide a JSON file for updating the app data usages") + UI.important("fastlane will now run you through interactive question to generate the JSON file") + UI.important("") + UI.important("This JSON file can be saved in source control and used in this action with the :json_file option") + + unless UI.confirm("Ready to start?") + UI.user_error!("Cancelled") + end + end + + # Fetch categories and purposes used for generating interactive questions + categories = Spaceship::ConnectAPI::AppDataUsageCategory.all(includes: "grouping") + purposes = Spaceship::ConnectAPI::AppDataUsagePurpose.all + + json = [] + + unless UI.confirm("Are you collecting data?") + json << { + "data_protections" => [Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_NOT_COLLECTED] + } + + return json + end + + categories.each do |category| + # Ask if using category + next unless UI.confirm("Collect data for #{category.id}?") + + purpose_names = purposes.map(&:id).join(', ') + UI.message("How will this data be used? You'll be offered with #{purpose_names}") + + # Ask purposes + selected_purposes = [] + loop do + purposes.each do |purpose| + selected_purposes << purpose if UI.confirm("Used for #{purpose.id}?") + end + + break unless selected_purposes.empty? + break unless UI.confirm("No purposes selected. Do you want to try again?") + end + + # Skip asking protections if purposes were skipped + next if selected_purposes.empty? + + # Ask protections + is_linked_to_user = UI.confirm("Is #{category.id} linked to the user?") + is_used_for_tracking = UI.confirm("Is #{category.id} used for tracking purposes?") + + # Map answers to values for API requests + protection_id = is_linked_to_user ? Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_LINKED_TO_YOU : Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_NOT_LINKED_TO_YOU + tracking_id = is_used_for_tracking ? Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_USED_TO_TRACK_YOU : nil + + json << { + "category" => category.id, + "purposes" => selected_purposes.map(&:id).sort.uniq, + "data_protections" => [ + protection_id, tracking_id + ].compact.sort.uniq + } + end + + json.sort_by! { |c| c["category"] } + + # Recursively call this method if no categories were selected for data collection + if json.empty? + UI.error("No categories were selected for data collection.") + json = ask_interactive_questions_for_json(false) + end + + return json + end + + def self.upload_app_data_usages(params, app, usages_config) + UI.message("Preparing to upload App Data Usage") + + # Delete all existing usages for new ones + all_usages = Spaceship::ConnectAPI::AppDataUsage.all(app_id: app.id, includes: "category,grouping,purpose,dataProtection", limit: 500) + all_usages.each(&:delete!) + + usages_config.each do |usage_config| + category = usage_config["category"] + purposes = usage_config["purposes"] || [] + data_protections = usage_config["data_protections"] || [] + + # There will not be any purposes if "not collecting data" + # However, an AppDataUsage still needs to be created for not collecting data + # Creating an array with nil so that purposes can be iterated over and + # that AppDataUsage can be created + purposes = [nil] if purposes.empty? + + purposes.each do |purpose| + data_protections.each do |data_protection| + if data_protection == Spaceship::ConnectAPI::AppDataUsageDataProtection::ID::DATA_NOT_COLLECTED + UI.message("Setting #{data_protection}") + else + UI.message("Setting #{category} and #{purpose} to #{data_protection}") + end + + Spaceship::ConnectAPI::AppDataUsage.create( + app_id: app.id, + app_data_usage_category_id: category, + app_data_usage_protection_id: data_protection, + app_data_usage_purpose_id: purpose + ) + end + end + end + + # Publish + if params[:skip_publish] + UI.message("Skipping app data usage publishing... (so you can verify on App Store Connect)") + else + publish_state = Spaceship::ConnectAPI::AppDataUsagesPublishState.get(app_id: app.id) + if publish_state.published + UI.important("App data usage is already published") + else + UI.important("App data usage not published! Going to publish...") + publish_state.publish! + UI.important("App data usage is now published") + end + end + end + + def self.description + "Upload App Privacy Details for an app in App Store Connect" + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:itunes_connect_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FASTLANE_USER", + description: "Your Apple ID Username for App Store Connect", + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :app_identifier, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_APP_IDENTIFIER", + description: "The bundle identifier of your app", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_id, + env_name: "FASTLANE_ITC_TEAM_ID", + description: "The ID of your App Store Connect team if you're in multiple teams", + optional: true, + skip_type_validation: true, # as we also allow integers, which we convert to strings anyway + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_id), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_name, + env_name: "FASTLANE_ITC_TEAM_NAME", + description: "The name of your App Store Connect team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:itc_team_name), + default_value_dynamic: true), + + # JSON paths + FastlaneCore::ConfigItem.new(key: :json_path, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_JSON_PATH", + description: "Path to the app usage data JSON", + optional: true, + verify_block: proc do |value| + UI.user_error!("Could not find JSON file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("'#{value}' doesn't seem to be a JSON file") unless FastlaneCore::Helper.json_file?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :output_json_path, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_OUTPUT_JSON_PATH", + description: "Path to the app usage data JSON file generated by interactive questions", + conflicting_options: [:skip_json_file_saving], + default_value: File.join(DEFAULT_PATH, DEFAULT_FILE_NAME)), + + # Skipping options + FastlaneCore::ConfigItem.new(key: :skip_json_file_saving, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_OUTPUT_SKIP_JSON_FILE_SAVING", + description: "Whether to skip the saving of the JSON file", + conflicting_options: [:skip_output_json_path], + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :skip_upload, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_OUTPUT_SKIP_UPLOAD", + description: "Whether to skip the upload and only create the JSON file with interactive questions", + conflicting_options: [:skip_publish], + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :skip_publish, + env_name: "UPLOAD_APP_PRIVACY_DETAILS_TO_APP_STORE_OUTPUT_SKIP_PUBLISH", + description: "Whether to skip the publishing", + conflicting_options: [:skip_upload], + type: Boolean, + default_value: false) + ] + end + + def self.author + "joshdholtz" + end + + def self.is_supported?(platform) + [:ios, :mac, :tvos].include?(platform) + end + + def self.details + "Upload App Privacy Details for an app in App Store Connect. For more detail information, view https://docs.fastlane.tools/uploading-app-privacy-details" + end + + def self.example_code + [ + 'upload_app_privacy_details_to_app_store( + username: "your@email.com", + team_name: "Your Team", + app_identifier: "com.your.bundle" + )', + 'upload_app_privacy_details_to_app_store( + username: "your@email.com", + team_name: "Your Team", + app_identifier: "com.your.bundle", + json_path: "fastlane/app_data_usages.json" + )' + ] + end + + def self.category + :production + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_symbols_to_crashlytics.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_symbols_to_crashlytics.rb new file mode 100644 index 0000000..136b7c5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_symbols_to_crashlytics.rb @@ -0,0 +1,232 @@ +module Fastlane + module Actions + class UploadSymbolsToCrashlyticsAction < Action + def self.run(params) + require 'tmpdir' + + find_binary_path(params) + unless params[:app_id] + find_gsp_path(params) + find_api_token(params) + end + + if !params[:app_id] && !params[:gsp_path] && !params[:api_token] + UI.user_error!('Either Firebase Crashlytics App ID, path to GoogleService-Info.plist or legacy Fabric API key must be given.') + end + + dsym_paths = [] + dsym_paths << params[:dsym_path] if params[:dsym_path] + dsym_paths += Actions.lane_context[SharedValues::DSYM_PATHS] if Actions.lane_context[SharedValues::DSYM_PATHS] + + # Allows adding of additional multiple dsym_paths since :dsym_path can be autoset by other actions + dsym_paths += params[:dsym_paths] if params[:dsym_paths] + + if dsym_paths.count == 0 + UI.error("Couldn't find any dSYMs, please pass them using the dsym_path option") + return nil + end + + # Get rid of duplicates (which might occur when both passed and detected) + dsym_paths = dsym_paths.collect { |a| File.expand_path(a) } + dsym_paths.uniq! + + max_worker_threads = params[:dsym_worker_threads] + if max_worker_threads > 1 + UI.message("Using #{max_worker_threads} threads for Crashlytics dSYM upload 🏎") + end + + worker = FastlaneCore::QueueWorker.new(max_worker_threads) do |dsym_path| + handle_dsym(params, dsym_path, max_worker_threads) + end + worker.batch_enqueue(dsym_paths) + worker.start + UI.success("Successfully uploaded dSYM files to Crashlytics đŸ’¯") + end + + # @param current_path this is a path to either a dSYM or a zipped dSYM + # this might also be either nested or not, we're flexible + def self.handle_dsym(params, current_path, max_worker_threads) + if current_path.end_with?(".dSYM", ".zip") + upload_dsym(params, current_path) + else + UI.error("Don't know how to handle '#{current_path}'") + end + end + + def self.upload_dsym(params, path) + UI.message("Uploading '#{path}'...") + command = [] + command << File.expand_path(params[:binary_path]).shellescape + if params[:debug] + command << "-d" + end + if params[:app_id] + command << "-ai #{params[:app_id].shellescape}" + elsif params[:gsp_path] + command << "-gsp #{params[:gsp_path].shellescape}" + elsif params[:api_token] + command << "-a #{params[:api_token]}" + end + command << "-p #{params[:platform] == 'appletvos' ? 'tvos' : params[:platform]}" + command << File.expand_path(path).shellescape + begin + command_to_execute = command.join(" ") + UI.verbose("upload_dsym using command: #{command_to_execute}") + Actions.sh(command_to_execute, log: params[:debug]) + rescue => ex + UI.error(ex.to_s) # it fails, however we don't want to fail everything just for this + end + end + + def self.find_api_token(params) + return if params[:gsp_path] + unless params[:api_token].to_s.length > 0 + Dir["./**/Info.plist"].each do |current| + result = Actions::GetInfoPlistValueAction.run(path: current, key: "Fabric") + next unless result + next unless result.kind_of?(Hash) + params[:api_token] ||= result["APIKey"] + UI.verbose("found an APIKey in #{current}") + end + end + end + + def self.find_gsp_path(params) + return if params[:api_token] && params[:gsp_path].nil? + + if params[:gsp_path].to_s.length > 0 + params[:gsp_path] = File.expand_path(params[:gsp_path]) + else + gsp_path = Dir["./**/GoogleService-Info.plist"].first + params[:gsp_path] = File.expand_path(gsp_path) unless gsp_path.nil? + end + end + + def self.find_binary_path(params) + params[:binary_path] ||= (Dir["/Applications/Fabric.app/**/upload-symbols"] + Dir["./Pods/Fabric/upload-symbols"] + Dir["./scripts/upload-symbols"] + Dir["./Pods/FirebaseCrashlytics/upload-symbols"]).last + UI.user_error!("Failed to find Fabric's upload_symbols binary at /Applications/Fabric.app/**/upload-symbols or ./Pods/**/upload-symbols. Please specify the location of the binary explicitly by using the binary_path option") unless params[:binary_path] + + params[:binary_path] = File.expand_path(params[:binary_path]) + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload dSYM symbolication files to Crashlytics" + end + + def self.details + "This action allows you to upload symbolication files to Crashlytics. It's extra useful if you use it to download the latest dSYM files from Apple when you use Bitcode. This action will not fail the build if one of the uploads failed. The reason for that is that sometimes some of dSYM files are invalid, and we don't want them to fail the complete build." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :dsym_path, + env_name: "FL_UPLOAD_SYMBOLS_TO_CRASHLYTICS_DSYM_PATH", + description: "Path to the DSYM file or zip to upload", + default_value: ENV[SharedValues::DSYM_OUTPUT_PATH.to_s] || (Dir["./**/*.dSYM"] + Dir["./**/*.dSYM.zip"]).sort_by { |f| File.mtime(f) }.last, + default_value_dynamic: true, + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("Symbolication file needs to be dSYM or zip") unless value.end_with?(".zip", ".dSYM") + end), + FastlaneCore::ConfigItem.new(key: :dsym_paths, + env_name: "FL_UPLOAD_SYMBOLS_TO_CRASHLYTICS_DSYM_PATHS", + description: "Paths to the DSYM files or zips to upload", + optional: true, + type: Array, + verify_block: proc do |values| + values.each do |value| + UI.user_error!("Couldn't find file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("Symbolication file needs to be dSYM or zip") unless value.end_with?(".zip", ".dSYM") + end + end), + FastlaneCore::ConfigItem.new(key: :api_token, + env_name: "CRASHLYTICS_API_TOKEN", + sensitive: true, + optional: true, + description: "Crashlytics API Key", + verify_block: proc do |value| + UI.user_error!("No API token for Crashlytics given, pass using `api_token: 'token'`") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :gsp_path, + env_name: "GOOGLE_SERVICES_INFO_PLIST_PATH", + code_gen_sensitive: true, + optional: true, + description: "Path to GoogleService-Info.plist", + verify_block: proc do |value| + UI.user_error!("Couldn't find file at path '#{File.expand_path(value)}'") unless File.exist?(value) + UI.user_error!("No Path to GoogleService-Info.plist for Firebase Crashlytics given, pass using `gsp_path: 'path'`") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :app_id, + env_name: "CRASHLYTICS_APP_ID", + sensitive: true, + optional: true, + description: "Firebase Crashlytics APP ID", + verify_block: proc do |value| + UI.user_error!("No App ID for Firebase Crashlytics given, pass using `app_id: 'appId'`") if value.to_s.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :binary_path, + env_name: "FL_UPLOAD_SYMBOLS_TO_CRASHLYTICS_BINARY_PATH", + description: "The path to the upload-symbols file of the Fabric app", + optional: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find file at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :platform, + env_name: "FL_UPLOAD_SYMBOLS_TO_CRASHLYTICS_PLATFORM", + description: "The platform of the app (ios, appletvos, mac)", + default_value: "ios", + verify_block: proc do |value| + available = ['ios', 'appletvos', 'mac'] + UI.user_error!("Invalid platform '#{value}', must be #{available.join(', ')}") unless available.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :dsym_worker_threads, + env_name: "FL_UPLOAD_SYMBOLS_TO_CRASHLYTICS_DSYM_WORKER_THREADS", + type: Integer, + default_value: 1, + optional: true, + description: "The number of threads to use for simultaneous dSYM upload", + verify_block: proc do |value| + min_threads = 1 + UI.user_error!("Too few threads (#{value}) minimum number of threads: #{min_threads}") unless value >= min_threads + end), + FastlaneCore::ConfigItem.new(key: :debug, + env_name: "FL_UPLOAD_SYMBOLS_TO_CRASHLYTICS_DEBUG", + description: "Enable debug mode for upload-symbols", + type: Boolean, + default_value: false) + ] + end + + def self.output + nil + end + + def self.return_value + nil + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :appletvos].include?(platform) + end + + def self.example_code + [ + 'upload_symbols_to_crashlytics(dsym_path: "./App.dSYM.zip")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_symbols_to_sentry.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_symbols_to_sentry.rb new file mode 100644 index 0000000..dcf3842 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_symbols_to_sentry.rb @@ -0,0 +1,158 @@ +module Fastlane + module Actions + class UploadSymbolsToSentryAction < Action + def self.run(params) + # Warning about usinging new plugin + UI.important("It's recommended to use the official Sentry Fastlane plugin") + UI.important("GitHub: https://github.com/getsentry/fastlane-plugin-sentry") + UI.important("Installation: fastlane add_plugin sentry") + + Actions.verify_gem!('rest-client') + require 'rest-client' + + # Params - API + host = params[:api_host] + api_key = params[:api_key] + auth_token = params[:auth_token] + org = params[:org_slug] + project = params[:project_slug] + + # Params - dSYM + dsym_path = params[:dsym_path] + dsym_paths = params[:dsym_paths] || [] + + has_api_key = !api_key.to_s.empty? + has_auth_token = !auth_token.to_s.empty? + + # Will fail if none or both authentication methods are provided + if !has_api_key && !has_auth_token + UI.user_error!("No API key or authentication token found for SentryAction given, pass using `api_key: 'key'` or `auth_token: 'token'`") + elsif has_api_key && has_auth_token + UI.user_error!("Both API key and authentication token found for SentryAction given, please only give one") + end + + # Url to post dSYMs to + url = "#{host}/projects/#{org}/#{project}/files/dsyms/" + + if has_api_key + resource = RestClient::Resource.new(url, api_key, '') + else + resource = RestClient::Resource.new(url, headers: { Authorization: "Bearer #{auth_token}" }) + end + + UI.message("Will upload dSYM(s) to #{url}") + + # Upload dsym(s) + dsym_paths += [dsym_path] + uploaded_paths = dsym_paths.compact.map do |dsym| + upload_dsym(resource, dsym) + end + + # Return uploaded dSYM paths + uploaded_paths + end + + def self.upload_dsym(resource, dsym) + UI.message("Uploading... #{dsym}") + resource.post(file: File.new(dsym, 'rb')) unless Helper.test? + UI.success('dSYM successfully uploaded to Sentry!') + + dsym + rescue + UI.user_error!('Error while trying to upload dSYM to Sentry') + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload dSYM symbolication files to Sentry" + end + + def self.details + "This action allows you to upload symbolication files to Sentry. It's extra useful if you use it to download the latest dSYM files from Apple when you use Bitcode." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :api_host, + env_name: "SENTRY_HOST", + description: "API host url for Sentry", + default_value: "https://app.getsentry.com/api/0", + optional: true), + FastlaneCore::ConfigItem.new(key: :api_key, + env_name: "SENTRY_API_KEY", + description: "API key for Sentry", + sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :auth_token, + env_name: "SENTRY_AUTH_TOKEN", + description: "Authentication token for Sentry", + sensitive: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :org_slug, + env_name: "SENTRY_ORG_SLUG", + description: "Organization slug for Sentry project", + verify_block: proc do |value| + UI.user_error!("No organization slug for SentryAction given, pass using `org_slug: 'org'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :project_slug, + env_name: "SENTRY_PROJECT_SLUG", + description: "Project slug for Sentry", + verify_block: proc do |value| + UI.user_error!("No project slug for SentryAction given, pass using `project_slug: 'project'`") unless value && !value.empty? + end), + FastlaneCore::ConfigItem.new(key: :dsym_path, + env_name: "SENTRY_DSYM_PATH", + description: "Path to your symbols file. For iOS and Mac provide path to app.dSYM.zip", + default_value: Actions.lane_context[SharedValues::DSYM_OUTPUT_PATH], + default_value_dynamic: true, + optional: true), + FastlaneCore::ConfigItem.new(key: :dsym_paths, + env_name: "SENTRY_DSYM_PATHS", + description: "Path to an array of your symbols file. For iOS and Mac provide path to app.dSYM.zip", + default_value: Actions.lane_context[SharedValues::DSYM_PATHS], + default_value_dynamic: true, + type: Array, + optional: true) + ] + end + + def self.return_value + "The uploaded dSYM path(s)" + end + + def self.authors + ["joshdholtz"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'upload_symbols_to_sentry( + auth_token: "...", + org_slug: "...", + project_slug: "...", + dsym_path: "./App.dSYM.zip" + )' + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + [ + "Please use the `sentry` plugin instead.", + "Install using `fastlane add_plugin sentry`.", + "Replace `upload_symbols_to_sentry(...)` with `sentry_upload_dsym(...)`." + ].join("\n") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_app_store.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_app_store.rb new file mode 100644 index 0000000..6bd3388 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_app_store.rb @@ -0,0 +1,71 @@ +module Fastlane + module Actions + module SharedValues + end + + class UploadToAppStoreAction < Action + def self.run(config) + require 'deliver' + + begin + config.load_configuration_file("Deliverfile") + config[:screenshots_path] ||= Actions.lane_context[SharedValues::SNAPSHOT_SCREENSHOTS_PATH] if Actions.lane_context[SharedValues::SNAPSHOT_SCREENSHOTS_PATH] + config[:ipa] ||= Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] if Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] + config[:pkg] ||= Actions.lane_context[SharedValues::PKG_OUTPUT_PATH] if Actions.lane_context[SharedValues::PKG_OUTPUT_PATH] + + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless config[:api_key_path] + config[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + + return config if Helper.test? + Deliver::Runner.new(config).run + end + end + + def self.description + "Upload metadata and binary to App Store Connect (via _deliver_)" + end + + def self.details + [ + "Using _upload_to_app_store_ after _build_app_ and _capture_screenshots_ will automatically upload the latest ipa and screenshots with no other configuration.", + "", + "If you don't want to verify an HTML preview for App Store builds, use the `:force` option.", + "This is useful when running _fastlane_ on your Continuous Integration server:", + "`_upload_to_app_store_(force: true)`", + "If your account is on multiple teams and you need to tell the `iTMSTransporter` which 'provider' to use, you can set the `:itc_provider` option to pass this info." + ].join("\n") + end + + def self.available_options + require "deliver" + require "deliver/options" + FastlaneCore::CommanderGenerator.new.generate(Deliver::Options.available_options) + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'upload_to_app_store( + force: true, # Set to true to skip verification of HTML preview + itc_provider: "abcde12345" # pass a specific value to the iTMSTransporter -itc_provider option + )', + 'deliver # alias for "upload_to_app_store"', + 'appstore # alias for "upload_to_app_store"' + ] + end + + def self.category + :production + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_play_store.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_play_store.rb new file mode 100644 index 0000000..4638bec --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_play_store.rb @@ -0,0 +1,80 @@ +module Fastlane + module Actions + class UploadToPlayStoreAction < Action + def self.run(params) + require 'supply' + require 'supply/options' + + # If no APK params were provided, try to fill in the values from lane context, preferring + # the multiple APKs over the single APK if set. + if params[:apk_paths].nil? && params[:apk].nil? + all_apk_paths = Actions.lane_context[SharedValues::GRADLE_ALL_APK_OUTPUT_PATHS] || [] + if all_apk_paths.size > 1 + params[:apk_paths] = all_apk_paths + else + params[:apk] = Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH] + end + end + + # If no AAB param was provided, try to fill in the value from lane context. + # First GRADLE_ALL_AAB_OUTPUT_PATHS if only one + # Else from GRADLE_AAB_OUTPUT_PATH + if params[:aab].nil? + all_aab_paths = Actions.lane_context[SharedValues::GRADLE_ALL_AAB_OUTPUT_PATHS] || [] + if all_aab_paths.count == 1 + params[:aab] = all_aab_paths.first + else + params[:aab] = Actions.lane_context[SharedValues::GRADLE_AAB_OUTPUT_PATH] + end + end + + Supply.config = params # we already have the finished config + + Supply::Uploader.new.perform_upload + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload metadata, screenshots and binaries to Google Play (via _supply_)" + end + + def self.details + "More information: https://docs.fastlane.tools/actions/supply/" + end + + def self.available_options + require 'supply' + require 'supply/options' + Supply::Options.available_options + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + platform == :android + end + + def self.example_code + [ + 'upload_to_play_store', + 'supply # alias for "upload_to_play_store"' + ] + end + + def self.category + :production + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_play_store_internal_app_sharing.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_play_store_internal_app_sharing.rb new file mode 100644 index 0000000..02114a5 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_play_store_internal_app_sharing.rb @@ -0,0 +1,78 @@ +module Fastlane + module Actions + class UploadToPlayStoreInternalAppSharingAction < Action + def self.run(params) + require 'supply' + + # If no APK params were provided, try to fill in the values from lane context, preferring + # the multiple APKs over the single APK if set. + if params[:apk_paths].nil? && params[:apk].nil? + all_apk_paths = Actions.lane_context[SharedValues::GRADLE_ALL_APK_OUTPUT_PATHS] || [] + if all_apk_paths.size > 1 + params[:apk_paths] = all_apk_paths + else + params[:apk] = Actions.lane_context[SharedValues::GRADLE_APK_OUTPUT_PATH] + end + end + + # If no AAB param was provided, try to fill in the value from lane context. + # First GRADLE_ALL_AAB_OUTPUT_PATHS if only one + # Else from GRADLE_AAB_OUTPUT_PATH + if params[:aab].nil? + all_aab_paths = Actions.lane_context[SharedValues::GRADLE_ALL_AAB_OUTPUT_PATHS] || [] + if all_aab_paths.count == 1 + params[:aab] = all_aab_paths.first + else + params[:aab] = Actions.lane_context[SharedValues::GRADLE_AAB_OUTPUT_PATH] + end + end + + Supply.config = params # we already have the finished config + + Supply::Uploader.new.perform_upload_to_internal_app_sharing + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload binaries to Google Play Internal App Sharing (via _supply_)" + end + + def self.details + "More information: https://docs.fastlane.tools/actions/upload_to_play_store_internal_app_sharing/" + end + + def self.available_options + require 'supply' + require 'supply/options' + options = Supply::Options.available_options.clone + + # remove all the unnecessary (for this action) options + options_to_keep = [:package_name, :apk, :apk_paths, :aab, :aab_paths, :json_key, :json_key_data, :root_url, :timeout] + options.delete_if { |option| options_to_keep.include?(option.key) == false } + end + + def self.return_value + "Returns a string containing the download URL for the uploaded APK/AAB (or array of strings if multiple were uploaded)." + end + + def self.authors + ["andrewhavens"] + end + + def self.is_supported?(platform) + platform == :android + end + + def self.example_code + ["upload_to_play_store_internal_app_sharing"] + end + + def self.category + :production + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_testflight.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_testflight.rb new file mode 100644 index 0000000..5f24750 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/upload_to_testflight.rb @@ -0,0 +1,126 @@ +module Fastlane + module Actions + class UploadToTestflightAction < Action + def self.run(values) + require 'pilot' + require 'pilot/options' + + distribute_only = values[:distribute_only] + + changelog = Actions.lane_context[SharedValues::FL_CHANGELOG] + values[:changelog] ||= changelog if changelog + + unless distribute_only + values[:ipa] ||= Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] + values[:ipa] = File.expand_path(values[:ipa]) if values[:ipa] + values[:pkg] ||= Actions.lane_context[SharedValues::PKG_OUTPUT_PATH] + values[:pkg] = File.expand_path(values[:pkg]) if values[:pkg] + end + + # Only set :api_key from SharedValues if :api_key_path isn't set (conflicting options) + unless values[:api_key_path] + values[:api_key] ||= Actions.lane_context[SharedValues::APP_STORE_CONNECT_API_KEY] + end + + return values if Helper.test? + + if distribute_only + build_manager = Pilot::BuildManager.new + build_manager.start(values, should_login: true) + + build_manager.wait_for_build_processing_to_be_complete(false) unless values[:skip_waiting_for_build_processing] + build_manager.distribute(values) # we already have the finished config + else + Pilot::BuildManager.new.upload(values) # we already have the finished config + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Upload new binary to App Store Connect for TestFlight beta testing (via _pilot_)" + end + + def self.details + [ + "More details can be found on https://docs.fastlane.tools/actions/pilot/.", + "This integration will only do the TestFlight upload." + ].join("\n") + end + + def self.available_options + require "pilot" + require "pilot/options" + FastlaneCore::CommanderGenerator.new.generate(Pilot::Options.available_options) + end + + def self.example_code + [ + 'upload_to_testflight', + 'testflight # alias for "upload_to_testflight"', + 'pilot # alias for "upload_to_testflight"', + 'upload_to_testflight(skip_submission: true) # to only upload the build', + 'upload_to_testflight( + username: "felix@krausefx.com", + app_identifier: "com.krausefx.app", + itc_provider: "abcde12345" # pass a specific value to the iTMSTransporter -itc_provider option + )', + 'upload_to_testflight( + beta_app_feedback_email: "email@email.com", + beta_app_description: "This is a description of my app", + demo_account_required: true, + notify_external_testers: false, + changelog: "This is my changelog of things that have changed in a log" + )', + 'upload_to_testflight( + beta_app_review_info: { + contact_email: "email@email.com", + contact_first_name: "Connect", + contact_last_name: "API", + contact_phone: "5558675309", + demo_account_name: "demo@email.com", + demo_account_password: "connectapi", + notes: "this is review note for the reviewer <3 thank you for reviewing" + }, + localized_app_info: { + "default": { + feedback_email: "default@email.com", + marketing_url: "https://example.com/marketing-defafult", + privacy_policy_url: "https://example.com/privacy-defafult", + description: "Default description", + }, + "en-GB": { + feedback_email: "en-gb@email.com", + marketing_url: "https://example.com/marketing-en-gb", + privacy_policy_url: "https://example.com/privacy-en-gb", + description: "en-gb description", + } + }, + localized_build_info: { + "default": { + whats_new: "Default changelog", + }, + "en-GB": { + whats_new: "en-gb changelog", + } + } + )' + ] + end + + def self.category + :beta + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac, :tvos].include?(platform) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/validate_play_store_json_key.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/validate_play_store_json_key.rb new file mode 100644 index 0000000..de4891f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/validate_play_store_json_key.rb @@ -0,0 +1,100 @@ +require 'supply/client' + +module Fastlane + module Actions + class ValidatePlayStoreJsonKeyAction < Action + def self.run(params) + FastlaneCore::PrintTable.print_values( + config: params, + mask_keys: [:json_key_data], + title: "Summary for validate_play_store_json_key" + ) + + begin + client = Supply::Client.make_from_config(params: params) + FastlaneCore::UI.success("Successfully established connection to Google Play Store.") + FastlaneCore::UI.verbose("client: " + client.inspect) + rescue => e + UI.error("Could not establish a connection to Google Play Store with this json key file.") + UI.error("#{e.message}\n#{e.backtrace.join("\n")}") if FastlaneCore::Globals.verbose? + end + end + + def self.description + "Validate that the Google Play Store `json_key` works" + end + + def self.authors + ["janpio"] + end + + def self.details + "Use this action to test and validate your private key json key file used to connect and authenticate with the Google Play API" + end + + def self.example_code + [ + "validate_play_store_json_key( + json_key: 'path/to/you/json/key/file' + )" + ] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :json_key, + env_name: "SUPPLY_JSON_KEY", + short_option: "-j", + conflicting_options: [:json_key_data], + optional: true, + description: "The path to a file containing service account JSON, used to authenticate with Google", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:json_key_file), + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Could not find service account json file at path '#{File.expand_path(value)}'") unless File.exist?(File.expand_path(value)) + UI.user_error!("'#{value}' doesn't seem to be a JSON file") unless FastlaneCore::Helper.json_file?(File.expand_path(value)) + end), + FastlaneCore::ConfigItem.new(key: :json_key_data, + env_name: "SUPPLY_JSON_KEY_DATA", + short_option: "-c", + conflicting_options: [:json_key], + optional: true, + description: "The raw service account JSON data used to authenticate with Google", + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:json_key_data_raw), + default_value_dynamic: true, + verify_block: proc do |value| + begin + JSON.parse(value) + rescue JSON::ParserError + UI.user_error!("Could not parse service account json: JSON::ParseError") + end + end), + # stuff + FastlaneCore::ConfigItem.new(key: :root_url, + env_name: "SUPPLY_ROOT_URL", + description: "Root URL for the Google Play API. The provided URL will be used for API calls in place of https://www.googleapis.com/", + optional: true, + verify_block: proc do |value| + UI.user_error!("Could not parse URL '#{value}'") unless value =~ URI.regexp + end), + FastlaneCore::ConfigItem.new(key: :timeout, + env_name: "SUPPLY_TIMEOUT", + optional: true, + description: "Timeout for read, open, and send (in seconds)", + type: Integer, + default_value: 300) + ] + end + + def self.is_supported?(platform) + [:android].include?(platform) + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_build.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_build.rb new file mode 100644 index 0000000..a975aa0 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_build.rb @@ -0,0 +1,199 @@ +require 'plist' + +module Fastlane + module Actions + class VerifyBuildAction < Action + def self.run(params) + Dir.mktmpdir do |dir| + app_path = self.app_path(params, dir) + + values = self.gather_cert_info(app_path) + + values = self.update_with_profile_info(app_path, values) + + self.print_values(values) + + self.evaulate(params, values) + end + end + + def self.app_path(params, dir) + build_path = params[:ipa_path] || params[:build_path] || Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] || '' + UI.user_error!("Unable to find file '#{build_path}'") unless File.exist?(build_path) + build_path = File.expand_path(build_path) + + case File.extname(build_path) + when ".ipa", ".zip" + `unzip #{build_path.shellescape} -d #{dir.shellescape} -x '__MACOSX/*' '*.DS_Store'` + UI.user_error!("Unable to unzip ipa") unless $? == 0 + # Adding extra ** for edge-case ipas where Payload directory is nested. + app_path = Dir["#{dir}/**/Payload/*.app"].first + when ".xcarchive" + app_path = Dir["#{build_path}/Products/Applications/*.app"].first + else + app_path = build_path # Assume that input is an app file. + end + + UI.user_error!("Unable to find app file") unless app_path && File.exist?(app_path) + app_path + end + + def self.gather_cert_info(app_path) + cert_info = `codesign -vv -d #{app_path.shellescape} 2>&1` + UI.user_error!("Unable to verify code signing") unless $? == 0 + + values = {} + + parts = cert_info.strip.split(/\r?\n/) + parts.each do |part| + if part =~ /\AAuthority=(iPhone|iOS|Apple)\s(Distribution|Development)/ + type = part.split('=')[1].split(':')[0] + values['provisioning_type'] = type.downcase =~ /distribution/i ? "distribution" : "development" + end + if part.start_with?("Authority") + values['authority'] ||= [] + values['authority'] << part.split('=')[1] + end + if part.start_with?("TeamIdentifier") + values['team_identifier'] = part.split('=')[1] + end + if part.start_with?("Identifier") + values['bundle_identifier'] = part.split('=')[1] + end + end + + values + end + + def self.update_with_profile_info(app_path, values) + profile = `cat #{app_path.shellescape}/embedded.mobileprovision | security cms -D` + UI.user_error!("Unable to extract profile") unless $? == 0 + + plist = Plist.parse_xml(profile) + + values['app_name'] = plist['AppIDName'] + values['provisioning_uuid'] = plist['UUID'] + values['team_name'] = plist['TeamName'] + values['team_identifier'] = plist['TeamIdentifier'].first + + application_identifier_prefix = plist['ApplicationIdentifierPrefix'][0] + full_bundle_identifier = "#{application_identifier_prefix}.#{values['bundle_identifier']}" + + UI.user_error!("Inconsistent identifier found; #{plist['Entitlements']['application-identifier']}, found in the embedded.mobileprovision file, should match #{full_bundle_identifier}, which is embedded in the codesign identity") unless plist['Entitlements']['application-identifier'] == full_bundle_identifier + UI.user_error!("Inconsistent identifier found") unless plist['Entitlements']['com.apple.developer.team-identifier'] == values['team_identifier'] + + values + end + + def self.print_values(values) + FastlaneCore::PrintTable.print_values(config: values, + title: "Summary for verify_build #{Fastlane::VERSION}") + end + + def self.evaulate(params, values) + if params[:provisioning_type] + UI.user_error!("Mismatched provisioning_type. Required: '#{params[:provisioning_type]}'; Found: '#{values['provisioning_type']}'") unless params[:provisioning_type] == values['provisioning_type'] + end + if params[:provisioning_uuid] + UI.user_error!("Mismatched provisioning_uuid. Required: '#{params[:provisioning_uuid]}'; Found: '#{values['provisioning_uuid']}'") unless params[:provisioning_uuid] == values['provisioning_uuid'] + end + if params[:team_identifier] + UI.user_error!("Mismatched team_identifier. Required: '#{params[:team_identifier]}'; Found: '#{values['team_identifier']}'") unless params[:team_identifier] == values['team_identifier'] + end + if params[:team_name] + UI.user_error!("Mismatched team_name. Required: '#{params[:team_name]}'; Found: '#{values['team_name']}'") unless params[:team_name] == values['team_name'] + end + if params[:app_name] + UI.user_error!("Mismatched app_name. Required: '#{params[:app_name]}'; Found: '#{values['app_name']}'") unless params[:app_name] == values['app_name'] + end + if params[:bundle_identifier] + UI.user_error!("Mismatched bundle_identifier. Required: '#{params[:bundle_identifier]}'; Found: '#{values['bundle_identifier']}'") unless params[:bundle_identifier] == values['bundle_identifier'] + end + + UI.success("Build is verified, have a đŸĒ.") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Able to verify various settings in ipa file" + end + + def self.details + "Verifies that the built app was built using the expected build resources. This is relevant for people who build on machines that are used to build apps with different profiles, certificates and/or bundle identifiers to guard against configuration mistakes." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :provisioning_type, + env_name: "FL_VERIFY_BUILD_PROVISIONING_TYPE", + description: "Required type of provisioning", + optional: true, + verify_block: proc do |value| + av = %w(distribution development) + UI.user_error!("Unsupported provisioning_type, must be: #{av}") unless av.include?(value) + end), + FastlaneCore::ConfigItem.new(key: :provisioning_uuid, + env_name: "FL_VERIFY_BUILD_PROVISIONING_UUID", + description: "Required UUID of provisioning profile", + optional: true), + FastlaneCore::ConfigItem.new(key: :team_identifier, + env_name: "FL_VERIFY_BUILD_TEAM_IDENTIFIER", + description: "Required team identifier", + optional: true), + FastlaneCore::ConfigItem.new(key: :team_name, + env_name: "FL_VERIFY_BUILD_TEAM_NAME", + description: "Required team name", + optional: true), + FastlaneCore::ConfigItem.new(key: :app_name, + env_name: "FL_VERIFY_BUILD_APP_NAME", + description: "Required app name", + optional: true), + FastlaneCore::ConfigItem.new(key: :bundle_identifier, + env_name: "FL_VERIFY_BUILD_BUNDLE_IDENTIFIER", + description: "Required bundle identifier", + optional: true), + FastlaneCore::ConfigItem.new(key: :ipa_path, + env_name: "FL_VERIFY_BUILD_IPA_PATH", + description: "Explicitly set the ipa path", + conflicting_options: [:build_path], + optional: true), + FastlaneCore::ConfigItem.new(key: :build_path, + env_name: "FL_VERIFY_BUILD_BUILD_PATH", + description: "Explicitly set the ipa, app or xcarchive path", + conflicting_options: [:ipa_path], + optional: true) + ] + end + + def self.output + end + + def self.return_value + end + + def self.authors + ["CodeReaper"] + end + + def self.is_supported?(platform) + platform == :ios + end + + def self.example_code + [ + 'verify_build( + provisioning_type: "distribution", + bundle_identifier: "com.example.myapp" + )' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_pod_keys.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_pod_keys.rb new file mode 100644 index 0000000..bdfce9b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_pod_keys.rb @@ -0,0 +1,67 @@ +module Fastlane + module Actions + class VerifyPodKeysAction < Action + def self.run(params) + UI.message("Validating CocoaPods Keys") + + options = plugin_options + target = options["target"] || "" + + options["keys"].each do |key| + UI.message(" - #{key}") + validate(key, target) + end + end + + def self.plugin_options + require 'cocoapods-core' + podfile = Pod::Podfile.from_file("Podfile") + podfile.plugins["cocoapods-keys"] + end + + def self.validate(key, target) + if value(key, target).length < 2 + message = "Did not pass validation for key #{key}. " \ + "Run `[bundle exec] pod keys get #{key} #{target}` to see what it is. " \ + "It's likely this is running with empty/OSS keys." + raise message + end + end + + def self.value(key, target) + value = `pod keys get #{key} #{target}` + value.split("]").last.strip + end + + def self.author + "ashfurrow" + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Verifies all keys referenced from the Podfile are non-empty" + end + + def self.details + "Runs a check against all keys specified in your Podfile to make sure they're more than a single character long. This is to ensure you don't deploy with stubbed keys." + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'verify_pod_keys' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_xcode.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_xcode.rb new file mode 100644 index 0000000..2a68fdc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/verify_xcode.rb @@ -0,0 +1,159 @@ +require 'shellwords' + +module Fastlane + module Actions + module SharedValues + end + + class VerifyXcodeAction < Action + def self.run(params) + UI.message("Verifying your Xcode installation at path '#{params[:xcode_path]}'...") + + # Check 1/2 + verify_codesign(params) + + # Check 2/2 + # More information https://developer.apple.com/news/?id=09222015a + verify_gatekeeper(params) + + true + end + + def self.verify_codesign(params) + UI.message("Verifying Xcode was signed by Apple Inc.") + + codesign_output = Actions.sh("codesign --display --verbose=4 #{params[:xcode_path].shellescape}") + + # If the returned codesign info contains all entries for any one of these sets, we'll consider it valid + accepted_codesign_detail_sets = [ + [ # Found on App Store installed Xcode installations + "Identifier=com.apple.dt.Xcode", + "Authority=Apple Mac OS Application Signing", + "Authority=Apple Worldwide Developer Relations Certification Authority", + "Authority=Apple Root CA", + "TeamIdentifier=59GAB85EFG" + ], + [ # Found on App Store installed Xcode installations post-Xcode 11.3 + "Identifier=com.apple.dt.Xcode", + "Authority=Apple Mac OS Application Signing", + "Authority=Apple Worldwide Developer Relations Certification Authority", + "Authority=Apple Root CA", + "TeamIdentifier=APPLECOMPUTER" + ], + [ # Found on Xcode installations (pre-Xcode 8) downloaded from developer.apple.com + "Identifier=com.apple.dt.Xcode", + "Authority=Software Signing", + "Authority=Apple Code Signing Certification Authority", + "Authority=Apple Root CA", + "TeamIdentifier=not set" + ], + [ # Found on Xcode installations (post-Xcode 8) downloaded from developer.apple.com + "Identifier=com.apple.dt.Xcode", + "Authority=Software Signing", + "Authority=Apple Code Signing Certification Authority", + "Authority=Apple Root CA", + "TeamIdentifier=59GAB85EFG" + ] + ] + + # Map the accepted details sets into an equal number of sets collecting the details for which + # the output of codesign did not have matches + missing_details_sets = accepted_codesign_detail_sets.map do |accepted_details_set| + accepted_details_set.reject { |detail| codesign_output.include?(detail) } + end + + # If any of the sets is empty, it means that all details were matched, and the check is successful + show_and_raise_error(nil, params[:xcode_path]) unless missing_details_sets.any?(&:empty?) + + UI.success("Successfully verified the code signature ✅") + end + + def self.verify_gatekeeper(params) + UI.message("Verifying Xcode using GateKeeper...") + UI.message("This will take up to a few minutes, now is a great time to go for a coffee ☕...") + + command = "/usr/sbin/spctl --assess --verbose #{params[:xcode_path].shellescape}" + must_includes = ['accepted'] + + output = verify(command: command, must_includes: must_includes, params: params) + + if output.include?("source=Mac App Store") || output.include?("source=Apple") || output.include?("source=Apple System") + UI.success("Successfully verified Xcode installation at path '#{params[:xcode_path]}' 🎧") + else + show_and_raise_error("Invalid Download Source of Xcode: #{output}", params[:xcode_path]) + end + end + + def self.verify(command: nil, must_includes: nil, params: nil) + output = Actions.sh(command) + + errors = [] + must_includes.each do |current| + next if output.include?(current) + errors << current + end + + if errors.count > 0 + show_and_raise_error(errors.join("\n"), params[:xcode_path]) + end + + return output + end + + def self.show_and_raise_error(error, xcode_path) + UI.error("Attention: Your Xcode Installation could not be verified.") + UI.error("If you believe that your Xcode is valid, please submit an issue on GitHub") + if error + UI.error("The following information couldn't be found:") + UI.error(error) + end + UI.user_error!("The Xcode installation at path '#{xcode_path}' could not be verified.") + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Verifies that the Xcode installation is properly signed by Apple" + end + + def self.details + "This action was implemented after the recent Xcode attack to make sure you're not using a [hacked Xcode installation](http://researchcenter.paloaltonetworks.com/2015/09/novel-malware-xcodeghost-modifies-xcode-infects-apple-ios-apps-and-hits-app-store/)." + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :xcode_path, + env_name: "FL_VERIFY_XCODE_XCODE_PATH", + description: "The path to the Xcode installation to test", + code_gen_sensitive: true, + default_value: File.expand_path('../../', FastlaneCore::Helper.xcode_path), + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Couldn't find Xcode at path '#{value}'") unless File.exist?(value) + end) + ] + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'verify_xcode', + 'verify_xcode(xcode_path: "/Applications/Xcode.app")' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/version_bump_podspec.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/version_bump_podspec.rb new file mode 100644 index 0000000..2d6d0ce --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/version_bump_podspec.rb @@ -0,0 +1,105 @@ +module Fastlane + module Actions + module SharedValues + PODSPEC_VERSION_NUMBER ||= :PODSPEC_VERSION_NUMBER + end + + class VersionBumpPodspecAction < Action + def self.run(params) + podspec_path = params[:path] + + UI.user_error!("Could not find podspec file at path #{podspec_path}") unless File.exist?(podspec_path) + + version_podspec_file = Helper::PodspecHelper.new(podspec_path, params[:require_variable_prefix]) + + if params[:version_number] + new_version = params[:version_number] + elsif params[:version_appendix] + new_version = version_podspec_file.update_version_appendix(params[:version_appendix]) + else + new_version = version_podspec_file.bump_version(params[:bump_type]) + end + + version_podspec_file.update_podspec(new_version) + + Actions.lane_context[SharedValues::PODSPEC_VERSION_NUMBER] = new_version + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Increment or set the version in a podspec file" + end + + def self.details + [ + "You can use this action to manipulate any 'version' variable contained in a ruby file.", + "For example, you can use it to bump the version of a CocoaPods' podspec file.", + "It also supports versions that are not semantic: `1.4.14.4.1`.", + "For such versions, there is an option to change the appendix (e.g. `4.1`)." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_VERSION_BUMP_PODSPEC_PATH", + description: "You must specify the path to the podspec file to update", + code_gen_sensitive: true, + default_value: Dir["*.podspec"].last, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Please pass a path to the `version_bump_podspec` action") if value.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :bump_type, + env_name: "FL_VERSION_BUMP_PODSPEC_BUMP_TYPE", + description: "The type of this version bump. Available: patch, minor, major", + default_value: "patch", + verify_block: proc do |value| + UI.user_error!("Available values are 'patch', 'minor' and 'major'") unless ['patch', 'minor', 'major'].include?(value) + end), + FastlaneCore::ConfigItem.new(key: :version_number, + env_name: "FL_VERSION_BUMP_PODSPEC_VERSION_NUMBER", + description: "Change to a specific version. This will replace the bump type value", + optional: true), + FastlaneCore::ConfigItem.new(key: :version_appendix, + env_name: "FL_VERSION_BUMP_PODSPEC_VERSION_APPENDIX", + description: "Change version appendix to a specific value. For example 1.4.14.4.1 -> 1.4.14.5", + optional: true), + FastlaneCore::ConfigItem.new(key: :require_variable_prefix, + env_name: "FL_VERSION_BUMP_PODSPEC_VERSION_REQUIRE_VARIABLE_PREFIX", + description: "true by default, this is used for non CocoaPods version bumps only", + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['PODSPEC_VERSION_NUMBER', 'The new podspec version number'] + ] + end + + def self.authors + ["Liquidsoul", "KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'version = version_bump_podspec(path: "TSMessages.podspec", bump_type: "patch")', + 'version = version_bump_podspec(path: "TSMessages.podspec", version_number: "1.4")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/version_get_podspec.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/version_get_podspec.rb new file mode 100644 index 0000000..6be89f1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/version_get_podspec.rb @@ -0,0 +1,70 @@ +module Fastlane + module Actions + module SharedValues + PODSPEC_VERSION_NUMBER ||= :PODSPEC_VERSION_NUMBER # originally defined in VersionBumpPodspecAction + end + + class VersionGetPodspecAction < Action + def self.run(params) + podspec_path = params[:path] + + UI.user_error!("Could not find podspec file at path '#{podspec_path}'") unless File.exist?(podspec_path) + + version_podspec_file = Helper::PodspecHelper.new(podspec_path, params[:require_variable_prefix]) + + Actions.lane_context[SharedValues::PODSPEC_VERSION_NUMBER] = version_podspec_file.version_value + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Receive the version number from a podspec file" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_VERSION_PODSPEC_PATH", + description: "You must specify the path to the podspec file", + code_gen_sensitive: true, + default_value: Dir["*.podspec"].last, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("Please pass a path to the `version_get_podspec` action") if value.length == 0 + end), + FastlaneCore::ConfigItem.new(key: :require_variable_prefix, + env_name: "FL_VERSION_BUMP_PODSPEC_VERSION_REQUIRE_VARIABLE_PREFIX", + description: "true by default, this is used for non CocoaPods version bumps only", + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['PODSPEC_VERSION_NUMBER', 'The podspec version number'] + ] + end + + def self.authors + ["Liquidsoul", "KrauseFx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'version = version_get_podspec(path: "TSMessages.podspec")' + ] + end + + def self.category + :misc + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_install.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_install.rb new file mode 100644 index 0000000..ccd080d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_install.rb @@ -0,0 +1,112 @@ +module Fastlane + module Actions + module SharedValues + XCODE_INSTALL_XCODE_PATH = :XCODE_INSTALL_XCODE_PATH + end + + class XcodeInstallAction < Action + def self.run(params) + Actions.verify_gem!('xcode-install') + + ENV["XCODE_INSTALL_USER"] = params[:username] + ENV["XCODE_INSTALL_TEAM_ID"] = params[:team_id] + + require 'xcode/install' + installer = XcodeInstall::Installer.new + + if installer.installed?(params[:version]) + UI.success("Xcode #{params[:version]} is already installed ✨") + else + installer.install_version(params[:version], true, true, true, true, nil, true, nil, params[:download_retry_attempts]) + end + + xcode = installer.installed_versions.find { |x| x.version == params[:version] } + UI.user_error!("Could not find Xcode with version '#{params[:version]}'") unless xcode + UI.message("Using Xcode #{params[:version]} on path '#{xcode.path}'") + xcode.approve_license + + ENV["DEVELOPER_DIR"] = File.join(xcode.path, "/Contents/Developer") + Actions.lane_context[SharedValues::XCODE_INSTALL_XCODE_PATH] = xcode.path + return xcode.path + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Make sure a certain version of Xcode is installed" + end + + def self.details + "Makes sure a specific version of Xcode is installed. If that's not the case, it will automatically be downloaded by the [xcode_install](https://github.com/neonichu/xcode-install) gem. This will make sure to use the correct Xcode for later actions." + end + + def self.available_options + user = CredentialsManager::AppfileConfig.try_fetch_value(:apple_dev_portal_id) + user ||= CredentialsManager::AppfileConfig.try_fetch_value(:apple_id) + + [ + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_XCODE_VERSION", + description: "The version number of the version of Xcode to install"), + FastlaneCore::ConfigItem.new(key: :username, + short_option: "-u", + env_name: "XCODE_INSTALL_USER", + description: "Your Apple ID Username", + default_value: user, + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :team_id, + short_option: "-b", + env_name: "XCODE_INSTALL_TEAM_ID", + description: "The ID of your team if you're in multiple teams", + optional: true, + code_gen_sensitive: true, + default_value: CredentialsManager::AppfileConfig.try_fetch_value(:team_id), + default_value_dynamic: true), + FastlaneCore::ConfigItem.new(key: :download_retry_attempts, + env_name: "XCODE_INSTALL_DOWNLOAD_RETRY_ATTEMPTS", + description: "Number of times the download will be retried in case of failure", + type: Integer, + default_value: 3) + ] + end + + def self.output + [ + ['XCODE_INSTALL_XCODE_PATH', 'The path to the newly installed Xcode'] + ] + end + + def self.return_value + "The path to the newly installed Xcode version" + end + + def self.return_type + :string + end + + def self.authors + ["Krausefx"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xcode_install(version: "7.1")' + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + "The xcode-install gem, which this action depends on, has been sunset. Please migrate to [xcodes](https://docs.fastlane.tools/actions/xcodes). You can find a migration guide here: [xcpretty/xcode-install/MIGRATION.md](https://github.com/xcpretty/xcode-install/blob/master/MIGRATION.md)" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_select.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_select.rb new file mode 100644 index 0000000..3993edf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_select.rb @@ -0,0 +1,68 @@ +module Fastlane + module Actions + # See: https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/xcode-select.1.html + # + # DESCRIPTION + # xcode-select controls the location of the developer directory used by xcrun(1), xcodebuild(1), cc(1), + # and other Xcode and BSD development tools. This also controls the locations that are searched for by + # man(1) for developer tool manpages. + # + # DEVELOPER_DIR + # Overrides the active developer directory. When DEVELOPER_DIR is set, its value will be used + # instead of the system-wide active developer directory. + # + # Note that for historical reason, the developer directory is considered to be the Developer content + # directory inside the Xcode application (for example /Applications/Xcode.app/Contents/Developer). + # You can set the environment variable to either the actual Developer contents directory, or the + # Xcode application directory -- the xcode-select provided shims will automatically convert the + # environment variable into the full Developer content path. + # + class XcodeSelectAction < Action + def self.run(params) + params = nil unless params.kind_of?(Array) + xcode_path = (params || []).first + + # Verify that a param was passed in + UI.user_error!("Path to Xcode application required (e.g. `xcode_select(\"/Applications/Xcode.app\")`)") unless xcode_path.to_s.length > 0 + + # Verify that a path to a directory was passed in + UI.user_error!("Path '#{xcode_path}' doesn't exist") unless Dir.exist?(xcode_path) + + UI.message("Setting Xcode version to #{xcode_path} for all build steps") + + ENV["DEVELOPER_DIR"] = File.join(xcode_path, "/Contents/Developer") + end + + def self.description + "Change the xcode-path to use. Useful for beta versions of Xcode" + end + + def self.details + [ + "Select and build with the Xcode installed at the provided path.", + "Use the `xcodes` action if you want to select an Xcode:", + "- Based on a version specifier or", + "- You don't have known, stable paths, as may happen in a CI environment." + ].join("\n") + end + + def self.author + "dtrenz" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xcode_select("/Applications/Xcode-8.3.2.app")' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_server_get_assets.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_server_get_assets.rb new file mode 100644 index 0000000..71dc6ff --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcode_server_get_assets.rb @@ -0,0 +1,312 @@ +module Fastlane + module Actions + module SharedValues + XCODE_SERVER_GET_ASSETS_PATH = :XCODE_SERVER_GET_ASSETS_PATH + XCODE_SERVER_GET_ASSETS_ARCHIVE_PATH = :XCODE_SERVER_GET_ASSETS_ARCHIVE_PATH + end + + class XcodeServerGetAssetsAction < Action + require 'excon' + require 'json' + require 'fileutils' + + def self.run(params) + host = params[:host] + bot_name = params[:bot_name] + integration_number_override = params[:integration_number] + target_folder = params[:target_folder] + keep_all_assets = params[:keep_all_assets] + username = params[:username] + password = params[:password] + trust_self_signed_certs = params[:trust_self_signed_certs] + + # setup (not)trusting self signed certificates. + # it's normal to have a self signed certificate on your Xcode Server + Excon.defaults[:ssl_verify_peer] = !trust_self_signed_certs # for self-signed certificates + + # create Xcode Server config + xcs = XcodeServer.new(host, username, password) + bots = xcs.fetch_all_bots + + UI.important("Fetched #{bots.count} Bots from Xcode Server at #{host}.") + + # pull out names + bot_names = bots.map { |bot| bot['name'] } + + # match the bot name with a found bot, otherwise fail + found_bots = bots.select { |bot| bot['name'] == bot_name } + UI.user_error!("Failed to find a Bot with name #{bot_name} on server #{host}, only available Bots: #{bot_names}") if found_bots.count == 0 + + bot = found_bots[0] + + UI.success("Found Bot with name #{bot_name} with id #{bot['_id']}.") + + # we have our bot, get finished integrations, sorted from newest to oldest + integrations = xcs.fetch_integrations(bot['_id']).select { |i| i['currentStep'] == 'completed' } + UI.user_error!("Failed to find any completed integration for Bot \"#{bot_name}\"") if (integrations || []).count == 0 + + # if no integration number is specified, pick the newest one (this is sorted from newest to oldest) + if integration_number_override + integration = integrations.find { |i| i['number'] == integration_number_override } + UI.user_error!("Specified integration number #{integration_number_override} does not exist.") unless integration + else + integration = integrations.first + end + + # consider: only taking the last successful one? or allow failing tests? warnings? + + UI.important("Using integration #{integration['number']}.") + + # fetch assets for this integration + assets_path = xcs.fetch_assets(integration['_id'], target_folder, self) + UI.user_error!("Failed to fetch assets for integration #{integration['number']}.") unless assets_path + + asset_entries = Dir.entries(assets_path).map { |i| File.join(assets_path, i) } + + UI.success("Successfully downloaded #{asset_entries.count} assets to file #{assets_path}!") + + # now find the archive and unzip it + zipped_archive_path = asset_entries.find { |i| i.end_with?('xcarchive.zip') } + + if zipped_archive_path + + UI.important("Found an archive in the assets folder...") + + archive_file_path = File.basename(zipped_archive_path, File.extname(zipped_archive_path)) + archive_dir_path = File.dirname(zipped_archive_path) + archive_path = File.join(archive_dir_path, archive_file_path) + if File.exist?(archive_path) + # we already have the archive, skip + UI.important("Archive #{archive_path} already exists, not unzipping again...") + else + # unzip the archive + sh("unzip -q \"#{zipped_archive_path}\" -d \"#{archive_dir_path}\"") + end + + # reload asset entries to also contain the xcarchive file + asset_entries = Dir.entries(assets_path).map { |i| File.join(assets_path, i) } + + # optionally delete everything except for the archive + unless keep_all_assets + files_to_delete = asset_entries.select do |i| + File.extname(i) != '.xcarchive' && ![".", ".."].include?(File.basename(i)) + end + + files_to_delete.each do |i| + FileUtils.rm_rf(i) + end + end + + Actions.lane_context[SharedValues::XCODE_SERVER_GET_ASSETS_ARCHIVE_PATH] = archive_path + end + + Actions.lane_context[SharedValues::XCODE_SERVER_GET_ASSETS_PATH] = assets_path + + return assets_path + end + + class XcodeServer + def initialize(host, username, password) + @host = host.start_with?('https://') ? host : "https://#{host}" + @username = username + @password = password + end + + def fetch_all_bots + response = get_endpoint('/bots') + UI.user_error!("You are unauthorized to access data on #{@host}, please check that you're passing in a correct username and password.") if response.status == 401 + UI.user_error!("Failed to fetch Bots from Xcode Server at #{@host}, response: #{response.status}: #{response.body}.") if response.status != 200 + JSON.parse(response.body)['results'] + end + + def fetch_integrations(bot_id) + response = get_endpoint("/bots/#{bot_id}/integrations?last=10") + UI.user_error!("Failed to fetch Integrations for Bot #{bot_id} from Xcode Server at #{@host}, response: #{response.status}: #{response.body}") if response.status != 200 + JSON.parse(response.body)['results'] + end + + def fetch_assets(integration_id, target_folder, action) + # create a temp folder and a file, stream the download into it + Dir.mktmpdir do |dir| + temp_file = File.join(dir, "tmp_download.#{rand(1_000_000)}") + f = open(temp_file, 'w') + streamer = lambda do |chunk, remaining_bytes, total_bytes| + if remaining_bytes && total_bytes + UI.important("Downloading: #{100 - (100 * remaining_bytes.to_f / total_bytes.to_f).to_i}%") + else + UI.error(chunk.to_s) + end + f.write(chunk) + end + + response = self.get_endpoint("/integrations/#{integration_id}/assets", streamer) + f.close + + UI.user_error!("Integration doesn't have any assets (it probably never ran).") if response.status == 500 + UI.user_error!("Failed to fetch Assets zip for Integration #{integration_id} from Xcode Server at #{@host}, response: #{response.status}: #{response.body}") if response.status != 200 + + # unzip it, it's a .tar.gz file + out_folder = File.join(dir, "out_#{rand(1_000_000)}") + FileUtils.mkdir_p(out_folder) + + action.sh("cd \"#{out_folder}\"; cat \"#{temp_file}\" | gzip -d | tar -x") + + # then pull the real name from headers + asset_filename = response.headers['Content-Disposition'].split(';')[1].split('=')[1].delete('"') + asset_foldername = asset_filename.split('.')[0] + + # rename the folder in out_folder to asset_foldername + found_folder = Dir.entries(out_folder).select { |item| item != '.' && item != '..' }[0] + + UI.user_error!("Internal error, couldn't find unzipped folder") if found_folder.nil? + + unzipped_folder_temp_name = File.join(out_folder, found_folder) + unzipped_folder = File.join(out_folder, asset_foldername) + + # rename to destination name + FileUtils.mv(unzipped_folder_temp_name, unzipped_folder) + + target_folder = File.absolute_path(target_folder) + + # create target folder if it doesn't exist + FileUtils.mkdir_p(target_folder) + + # and move+rename it to the destination place + FileUtils.cp_r(unzipped_folder, target_folder) + out = File.join(target_folder, asset_foldername) + return out + end + return nil + end + + def headers + require 'base64' + headers = { + 'User-Agent' => 'fastlane-xcode_server_get_assets', # XCS wants user agent. for some API calls. not for others. sigh. + 'X-XCSAPIVersion' => 1 # XCS API version with this API, Xcode needs this otherwise it explodes in a 500 error fire. Currently Xcode 7 Beta 5 is on Version 5. + } + + if @username && @password + userpass = "#{@username}:#{@password}" + headers['Authorization'] = "Basic #{Base64.strict_encode64(userpass)}" + end + + return headers + end + + def get_endpoint(endpoint, response_block = nil) + url = url_for_endpoint(endpoint) + headers = self.headers || {} + + if response_block + response = Excon.get(url, response_block: response_block, headers: headers) + else + response = Excon.get(url, headers: headers) + end + + return response + end + + private + + def url_for_endpoint(endpoint) + "#{@host}:20343/api#{endpoint}" + end + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Downloads Xcode Bot assets like the `.xcarchive` and logs" + end + + def self.details + [ + "This action downloads assets from your Xcode Server Bot (works with Xcode Server using Xcode 6 and 7. By default, this action downloads all assets, unzips them and deletes everything except for the `.xcarchive`.", + "If you'd like to keep all downloaded assets, pass `keep_all_assets: true`.", + "This action returns the path to the downloaded assets folder and puts into shared values the paths to the asset folder and to the `.xcarchive` inside it." + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :host, + env_name: "FL_XCODE_SERVER_GET_ASSETS_HOST", + description: "IP Address/Hostname of Xcode Server", + optional: false), + FastlaneCore::ConfigItem.new(key: :bot_name, + env_name: "FL_XCODE_SERVER_GET_ASSETS_BOT_NAME", + description: "Name of the Bot to pull assets from", + optional: false), + FastlaneCore::ConfigItem.new(key: :integration_number, + env_name: "FL_XCODE_SERVER_GET_ASSETS_INTEGRATION_NUMBER", + description: "Optionally you can override which integration's assets should be downloaded. If not provided, the latest integration is used", + type: Integer, + optional: true), + FastlaneCore::ConfigItem.new(key: :username, + env_name: "FL_XCODE_SERVER_GET_ASSETS_USERNAME", + description: "Username for your Xcode Server", + optional: true, + default_value: ""), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "FL_XCODE_SERVER_GET_ASSETS_PASSWORD", + description: "Password for your Xcode Server", + sensitive: true, + optional: true, + default_value: ""), + FastlaneCore::ConfigItem.new(key: :target_folder, + env_name: "FL_XCODE_SERVER_GET_ASSETS_TARGET_FOLDER", + description: "Relative path to a folder into which to download assets", + optional: true, + default_value: './xcs_assets'), + FastlaneCore::ConfigItem.new(key: :keep_all_assets, + env_name: "FL_XCODE_SERVER_GET_ASSETS_KEEP_ALL_ASSETS", + description: "Whether to keep all assets or let the script delete everything except for the .xcarchive", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :trust_self_signed_certs, + env_name: "FL_XCODE_SERVER_GET_ASSETS_TRUST_SELF_SIGNED_CERTS", + description: "Whether to trust self-signed certs on your Xcode Server", + optional: true, + type: Boolean, + default_value: true) + ] + end + + def self.output + [ + ['XCODE_SERVER_GET_ASSETS_PATH', 'Absolute path to the downloaded assets folder'], + ['XCODE_SERVER_GET_ASSETS_ARCHIVE_PATH', 'Absolute path to the downloaded xcarchive file'] + ] + end + + def self.return_type + :array_of_strings + end + + def self.authors + ["czechboy0"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xcode_server_get_assets( + host: "10.99.0.59", # Specify Xcode Server\'s Host or IP Address + bot_name: "release-1.3.4" # Specify the particular Bot + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcodebuild.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcodebuild.rb new file mode 100644 index 0000000..f4ea37c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcodebuild.rb @@ -0,0 +1,647 @@ +# rubocop:disable all +module Fastlane + module Actions + module SharedValues + XCODEBUILD_ARCHIVE ||= :XCODEBUILD_ARCHIVE + XCODEBUILD_DERIVED_DATA_PATH = :XCODEBUILD_DERIVED_DATA_PATH + end + + # xcodebuild man page: + # https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/xcodebuild.1.html + + class XcodebuildAction < Action + ARGS_MAP = { + # actions + analyze: "analyze", + archive: "archive", + build: "build", + clean: "clean", + install: "install", + installsrc: "installsrc", + test: "test", + + # parameters + alltargets: "-alltargets", + arch: "-arch", + archive_path: "-archivePath", + configuration: "-configuration", + derivedDataPath: "-derivedDataPath", + destination_timeout: "-destination-timeout", + dry_run: "-dry-run", + enableAddressSanitizer: "-enableAddressSanitizer", + enableThreadSanitizer: "-enableThreadSanitizer", + enableCodeCoverage: "-enableCodeCoverage", + export_archive: "-exportArchive", + export_format: "-exportFormat", + export_installer_identity: "-exportInstallerIdentity", + export_options_plist: "-exportOptionsPlist", + export_path: "-exportPath", + export_profile: "-exportProvisioningProfile", + export_signing_identity: "-exportSigningIdentity", + export_with_original_signing_identity: "-exportWithOriginalSigningIdentity", + hide_shell_script_environment: "-hideShellScriptEnvironment", + jobs: "-jobs", + parallelize_targets: "-parallelizeTargets", + project: "-project", + result_bundle_path: "-resultBundlePath", + scheme: "-scheme", + sdk: "-sdk", + skip_unavailable_actions: "-skipUnavailableActions", + target: "-target", + toolchain: "-toolchain", + workspace: "-workspace", + xcconfig: "-xcconfig" + } + + def self.is_supported?(platform) + [:ios, :mac].include? platform + end + + def self.example_code + [ + 'xcodebuild( + archive: true, + archive_path: "./build-dir/MyApp.xcarchive", + scheme: "MyApp", + workspace: "MyApp.xcworkspace" + )' + ] + end + + def self.category + :building + end + + def self.run(params) + unless Helper.test? + UI.user_error!("xcodebuild not installed") if `which xcodebuild`.length == 0 + end + + # The args we will build with + xcodebuild_args = Array[] + + # Supported ENV vars + build_path = ENV["XCODE_BUILD_PATH"] || nil + scheme = ENV["XCODE_SCHEME"] + workspace = ENV["XCODE_WORKSPACE"] + project = ENV["XCODE_PROJECT"] + buildlog_path = ENV["XCODE_BUILDLOG_PATH"] + + # Set derived data path. + params[:derivedDataPath] ||= ENV["XCODE_DERIVED_DATA_PATH"] + Actions.lane_context[SharedValues::XCODEBUILD_DERIVED_DATA_PATH] = params[:derivedDataPath] + + # Append slash to build path, if needed + if build_path && !build_path.end_with?("/") + build_path += "/" + end + + # By default we use xcpretty + raw_buildlog = false + + # By default we don't pass the utf flag + xcpretty_utf = false + + if params + # Operation bools + archiving = params.key? :archive + exporting = params.key? :export_archive + testing = params.key? :test + xcpretty_utf = params[:xcpretty_utf] + + if params.key? :raw_buildlog + raw_buildlog = params[:raw_buildlog] + end + + if exporting + # If not passed, retrieve path from previous xcodebuild call + params[:archive_path] ||= Actions.lane_context[SharedValues::XCODEBUILD_ARCHIVE] + + # If not passed, construct export path from env vars + if params[:export_path].nil? + ipa_filename = scheme ? scheme : File.basename(params[:archive_path], ".*") + params[:export_path] = "#{build_path}#{ipa_filename}" + end + + # Default to ipa as export format + export_format = params[:export_format] || "ipa" + + # Store IPA path for later deploy steps (i.e. Crashlytics) + Actions.lane_context[SharedValues::IPA_OUTPUT_PATH] = params[:export_path] + "." + export_format.downcase + else + # If not passed, check for archive scheme & workspace/project env vars + params[:scheme] ||= scheme + params[:workspace] ||= workspace + params[:project] ||= project + + # If no project or workspace was passed in or set as an environment + # variable, attempt to autodetect the workspace. + if params[:project].to_s.empty? && params[:workspace].to_s.empty? + params[:workspace] = detect_workspace + end + end + + if archiving + # If not passed, construct archive path from env vars + params[:archive_path] ||= "#{build_path}#{params[:scheme]}.xcarchive" + + # Cache path for later xcodebuild calls + Actions.lane_context[SharedValues::XCODEBUILD_ARCHIVE] = params[:archive_path] + end + + if params.key? :enable_address_sanitizer + params[:enableAddressSanitizer] = params[:enable_address_sanitizer] ? 'YES' : 'NO' + end + if params.key? :enable_thread_sanitizer + params[:enableThreadSanitizer] = params[:enable_thread_sanitizer] ? 'YES' : 'NO' + end + if params.key? :enable_code_coverage + params[:enableCodeCoverage] = params[:enable_code_coverage] ? 'YES' : 'NO' + end + + # Maps parameter hash to CLI args + params = export_options_to_plist(params) + if hash_args = hash_to_args(params) + xcodebuild_args += hash_args + end + + buildlog_path ||= params[:buildlog_path] + end + + # By default we put xcodebuild.log in the Logs folder + buildlog_path ||= File.expand_path("#{FastlaneCore::Helper.buildlog_path}/fastlane/xcbuild/#{Time.now.strftime('%F')}/#{Process.pid}") + + # Joins args into space delimited string + xcodebuild_args = xcodebuild_args.join(" ") + + # Default args + xcpretty_args = [] + + # Formatting style + if params && params[:output_style] + output_style = params[:output_style] + UI.user_error!("Invalid output_style #{output_style}") unless [:standard, :basic].include?(output_style) + else + output_style = :standard + end + + case output_style + when :standard + xcpretty_args << '--color' unless Helper.colors_disabled? + when :basic + xcpretty_args << '--no-utf' + end + + if testing + if params[:reports] + # New report options format + reports = params[:reports].reduce("") do |arguments, report| + report_string = "--report #{report[:report]}" + + if report[:output] + report_string << " --output \"#{report[:output]}\"" + elsif report[:report] == 'junit' + report_string << " --output \"#{build_path}report/report.xml\"" + elsif report[:report] == 'html' + report_string << " --output \"#{build_path}report/report.html\"" + elsif report[:report] == 'json-compilation-database' + report_string << " --output \"#{build_path}report/report.json\"" + end + + if report[:screenshots] + report_string << " --screenshots" + end + + unless arguments == "" + arguments << " " + end + + arguments << report_string + end + + xcpretty_args.push reports + + elsif params[:report_formats] + # Test report file format + report_formats = params[:report_formats].map do |format| + "--report #{format}" + end.sort.join(" ") + + xcpretty_args.push report_formats + + # Save screenshots flag + if params[:report_formats].include?("html") && params[:report_screenshots] + xcpretty_args.push "--screenshots" + end + + xcpretty_args.sort! + + # Test report file path + if params[:report_path] + xcpretty_args.push "--output \"#{params[:report_path]}\"" + elsif build_path + xcpretty_args.push "--output \"#{build_path}report\"" + end + end + end + + # Stdout format + if testing && !archiving + xcpretty_args << (params[:xcpretty_output] ? "--#{params[:xcpretty_output]}" : "--test") + else + xcpretty_args << (params[:xcpretty_output] ? "--#{params[:xcpretty_output]}" : "--simple") + end + + xcpretty_args = xcpretty_args.join(" ") + + xcpretty_command = "" + xcpretty_command = "| xcpretty #{xcpretty_args}" unless raw_buildlog + unless raw_buildlog + xcpretty_command = "#{xcpretty_command} --utf" if xcpretty_utf + end + + pipe_command = "| tee '#{buildlog_path}/xcodebuild.log' #{xcpretty_command}" + + FileUtils.mkdir_p buildlog_path + UI.message("For a more detailed xcodebuild log open #{buildlog_path}/xcodebuild.log") + + output_result = "" + + override_architecture_prefix = params[:xcodebuild_architecture] ? "arch -#{params[:xcodebuild_architecture]} " : "" + # In some cases the simulator is not booting up in time + # One way to solve it is to try to rerun it for one more time + begin + output_result = Actions.sh "set -o pipefail && #{override_architecture_prefix}xcodebuild #{xcodebuild_args} #{pipe_command}" + rescue => ex + exit_status = $?.exitstatus + + raise_error = true + if exit_status.eql? 65 + iphone_simulator_time_out_error = /iPhoneSimulator: Timed out waiting/ + + if (iphone_simulator_time_out_error =~ ex.message) != nil + raise_error = false + + UI.important("First attempt failed with iPhone Simulator error: #{iphone_simulator_time_out_error.source}") + UI.important("Retrying once more...") + output_result = Actions.sh "set -o pipefail && xcodebuild #{xcodebuild_args} #{pipe_command}" + end + end + + raise ex if raise_error + end + + # If raw_buildlog and some reports had to be created, create xcpretty reports from the build log + if raw_buildlog && xcpretty_args.include?('--report') + output_result = Actions.sh "set -o pipefail && cat '#{buildlog_path}/xcodebuild.log' | xcpretty #{xcpretty_args} > /dev/null" + end + + output_result + end + + def self.export_options_to_plist(hash) + # Extract export options parameters from input options + if hash.has_key?(:export_options_plist) && hash[:export_options_plist].is_a?(Hash) + export_options = hash[:export_options_plist] + + # Normalize some values + export_options[:teamID] = CredentialsManager::AppfileConfig.try_fetch_value(:team_id) if !export_options[:teamID] && CredentialsManager::AppfileConfig.try_fetch_value(:team_id) + export_options[:onDemandResourcesAssetPacksBaseURL] = Addressable::URI.encode(export_options[:onDemandResourcesAssetPacksBaseURL]) if export_options[:onDemandResourcesAssetPacksBaseURL] + if export_options[:manifest] + export_options[:manifest][:appURL] = Addressable::URI.encode(export_options[:manifest][:appURL]) if export_options[:manifest][:appURL] + export_options[:manifest][:displayImageURL] = Addressable::URI.encode(export_options[:manifest][:displayImageURL]) if export_options[:manifest][:displayImageURL] + export_options[:manifest][:fullSizeImageURL] = Addressable::URI.encode(export_options[:manifest][:fullSizeImageURL]) if export_options[:manifest][:fullSizeImageURL] + export_options[:manifest][:assetPackManifestURL] = Addressable::URI.encode(export_options[:manifest][:assetPackManifestURL]) if export_options[:manifest][:assetPackManifestURL] + end + + # Saves options to plist + path = "#{Tempfile.new('exportOptions').path}.plist" + File.write(path, export_options.to_plist) + hash[:export_options_plist] = path + end + hash + end + + def self.hash_to_args(hash) + # Remove nil value params + hash = hash.delete_if { |_, v| v.nil? } + + # Maps nice developer param names to CLI arguments + hash.map do |k, v| + v ||= "" + if arg = ARGS_MAP[k] + value = (v != true && v.to_s.length > 0 ? "\"#{v}\"" : "") + "#{arg} #{value}".strip + elsif k == :build_settings + v.map do |setting, val| + val = clean_build_setting_value(val) + "#{setting}=\"#{val}\"" + end.join(' ') + elsif k == :destination + [*v].collect { |dst| "-destination \"#{dst}\"" }.join(' ') + elsif k == :keychain && v.to_s.length > 0 + # If keychain is specified, append as OTHER_CODE_SIGN_FLAGS + "OTHER_CODE_SIGN_FLAGS=\"--keychain #{v}\"" + elsif k == :xcargs && v.to_s.length > 0 + # Add more xcodebuild arguments + "#{v}" + end + end.compact + end + + # Cleans values for build settings + # Only escaping `$(inherit)` types of values since "sh" + # interprets these as sub-commands instead of passing value into xcodebuild + def self.clean_build_setting_value(value) + value.to_s.gsub('$(', '\\$(') + end + + def self.detect_workspace + workspace = nil + workspaces = Dir.glob("*.xcworkspace") + + if workspaces.length > 1 + UI.important("Multiple workspaces detected.") + end + + unless workspaces.empty? + workspace = workspaces.first + UI.important("Using workspace \"#{workspace}\"") + end + + return workspace + end + + def self.description + "Use the `xcodebuild` command to build and sign your app" + end + + def self.available_options + [ + ['archive', 'Set to true to build archive'], + ['archive_path', 'The path to archive the to. Must contain `.xcarchive`'], + ['workspace', 'The workspace to use'], + ['scheme', 'The scheme to build'], + ['build_settings', 'Hash of additional build information'], + ['xcargs', 'Pass additional xcodebuild options'], + ['buildlog_path', 'The path where the xcodebuild.log will be created, by default it is created in ~/Library/Logs/fastlane/xcbuild'], + ['output_style', 'Set the output format to one of: :standard (Colored UTF8 output, default), :basic (black & white ASCII output)'], + ['xcodebuild_architecture', 'Allows to set the architecture that `xcodebuild` is run with, for example to force it to run under Rosetta on an Apple Silicon mac'], + ['raw_buildlog', 'Set to true to see xcodebuild raw output. Default value is false'], + ['xcpretty_output', 'specifies the output type for xcpretty. eg. \'test\', or \'simple\''], + ['xcpretty_utf', 'Specifies xcpretty should use utf8 when reporting builds. This has no effect when raw_buildlog is specified.'] + ] + end + + def self.details + "**Note**: `xcodebuild` is a complex command, so it is recommended to use [_gym_](https://docs.fastlane.tools/actions/gym/) for building your ipa file and [_scan_](https://docs.fastlane.tools/actions/scan/) for testing your app instead." + end + + def self.author + "dtrenz" + end + end + + class XcarchiveAction < Action + def self.run(params) + params_hash = params || {} + params_hash[:archive] = true + XcodebuildAction.run(params_hash) + end + + def self.description + "Archives the project using `xcodebuild`" + end + + def self.example_code + [ + 'xcarchive' + ] + end + + def self.category + :building + end + + def self.author + "dtrenz" + end + + def self.is_supported?(platform) + [:ios, :mac].include? platform + end + + def self.available_options + [ + ['archive_path', 'The path to archive the to. Must contain `.xcarchive`'], + ['workspace', 'The workspace to use'], + ['scheme', 'The scheme to build'], + ['build_settings', 'Hash of additional build information'], + ['xcargs', 'Pass additional xcodebuild options'], + ['output_style', 'Set the output format to one of: :standard (Colored UTF8 output, default), :basic (black & white ASCII output)'], + ['xcodebuild_architecture', 'Allows to set the architecture that `xcodebuild` is run with, for example to force it to run under Rosetta on an Apple Silicon mac'], + ['buildlog_path', 'The path where the xcodebuild.log will be created, by default it is created in ~/Library/Logs/fastlane/xcbuild'], + ['raw_buildlog', 'Set to true to see xcodebuild raw output. Default value is false'], + ['xcpretty_output', 'specifies the output type for xcpretty. eg. \'test\', or \'simple\''], + ['xcpretty_utf', 'Specifies xcpretty should use utf8 when reporting builds. This has no effect when raw_buildlog is specified.'] + ] + end + end + + class XcbuildAction < Action + def self.run(params) + params_hash = params || {} + params_hash[:build] = true + XcodebuildAction.run(params_hash) + end + + def self.example_code + [ + 'xcbuild' + ] + end + + def self.category + :building + end + + def self.description + "Builds the project using `xcodebuild`" + end + + def self.author + "dtrenz" + end + + def self.is_supported?(platform) + [:ios, :mac].include? platform + end + + def self.available_options + [ + ['archive', 'Set to true to build archive'], + ['archive_path', 'The path to archive the to. Must contain `.xcarchive`'], + ['workspace', 'The workspace to use'], + ['scheme', 'The scheme to build'], + ['build_settings', 'Hash of additional build information'], + ['xcargs', 'Pass additional xcodebuild options'], + ['output_style', 'Set the output format to one of: :standard (Colored UTF8 output, default), :basic (black & white ASCII output)'], + ['buildlog_path', 'The path where the xcodebuild.log will be created, by default it is created in ~/Library/Logs/fastlane/xcbuild'], + ['raw_buildlog', 'Set to true to see xcodebuild raw output. Default value is false'], + ['xcpretty_output', 'specifies the output type for xcpretty. eg. \'test\', or \'simple\''], + ['xcpretty_utf', 'Specifies xcpretty should use utf8 when reporting builds. This has no effect when raw_buildlog is specified.'] + ] + end + end + + class XccleanAction < Action + def self.run(params) + params_hash = params || {} + params_hash[:clean] = true + XcodebuildAction.run(params_hash) + end + + def self.description + "Cleans the project using `xcodebuild`" + end + + def self.example_code + [ + 'xcclean' + ] + end + + def self.category + :building + end + + def self.author + "dtrenz" + end + + def self.is_supported?(platform) + [:ios, :mac].include? platform + end + + def self.available_options + [ + ['archive', 'Set to true to build archive'], + ['archive_path', 'The path to archive the to. Must contain `.xcarchive`'], + ['workspace', 'The workspace to use'], + ['scheme', 'The scheme to build'], + ['build_settings', 'Hash of additional build information'], + ['xcargs', 'Pass additional xcodebuild options'], + ['output_style', 'Set the output format to one of: :standard (Colored UTF8 output, default), :basic (black & white ASCII output)'], + ['xcodebuild_architecture', 'Allows to set the architecture that `xcodebuild` is run with, for example to force it to run under Rosetta on an Apple Silicon mac'], + ['buildlog_path', 'The path where the xcodebuild.log will be created, by default it is created in ~/Library/Logs/fastlane/xcbuild'], + ['raw_buildlog', 'Set to true to see xcodebuild raw output. Default value is false'], + ['xcpretty_output', 'specifies the output type for xcpretty. eg. \'test\', or \'simple\''], + ['xcpretty_utf', 'Specifies xcpretty should use utf8 when reporting builds. This has no effect when raw_buildlog is specified.'] + ] + end + end + + class XcexportAction < Action + def self.run(params) + params_hash = params || {} + params_hash[:export_archive] = true + XcodebuildAction.run(params_hash) + end + + def self.description + "Exports the project using `xcodebuild`" + end + + def self.example_code + [ + 'xcexport' + ] + end + + def self.category + :building + end + + def self.author + "dtrenz" + end + + def self.available_options + [ + ['archive', 'Set to true to build archive'], + ['archive_path', 'The path to archive the to. Must contain `.xcarchive`'], + ['workspace', 'The workspace to use'], + ['scheme', 'The scheme to build'], + ['build_settings', 'Hash of additional build information'], + ['xcargs', 'Pass additional xcodebuild options'], + ['output_style', 'Set the output format to one of: :standard (Colored UTF8 output, default), :basic (black & white ASCII output)'], + ['xcodebuild_architecture', 'Allows to set the architecture that `xcodebuild` is run with, for example to force it to run under Rosetta on an Apple Silicon mac'], + ['buildlog_path', 'The path where the xcodebuild.log will be created, by default it is created in ~/Library/Logs/fastlane/xcbuild'], + ['raw_buildlog', 'Set to true to see xcodebuild raw output. Default value is false'], + ['xcpretty_output', 'specifies the output type for xcpretty. eg. \'test\', or \'simple\''], + ['xcpretty_utf', 'Specifies xcpretty should use utf8 when reporting builds. This has no effect when raw_buildlog is specified.'] + ] + end + + def self.is_supported?(platform) + [:ios, :mac].include? platform + end + end + + class XctestAction < Action + def self.run(params) + UI.important("Have you seen the new 'scan' tool to run tests? https://docs.fastlane.tools/actions/scan/") + params_hash = params || {} + params_hash[:build] = true + params_hash[:test] = true + + XcodebuildAction.run(params_hash) + end + + def self.example_code + [ + 'xctest( + destination: "name=iPhone 7s,OS=10.0" + )' + ] + end + + def self.category + :building + end + + def self.description + "Runs tests on the given simulator" + end + + def self.available_options + [ + ['archive', 'Set to true to build archive'], + ['archive_path', 'The path to archive the to. Must contain `.xcarchive`'], + ['workspace', 'The workspace to use'], + ['scheme', 'The scheme to build'], + ['build_settings', 'Hash of additional build information'], + ['xcargs', 'Pass additional xcodebuild options'], + ['destination', 'The simulator to use, e.g. "name=iPhone 5s,OS=8.1"'], + ['destination_timeout', 'The timeout for connecting to the simulator, in seconds'], + ['enable_code_coverage', 'Turn code coverage on or off when testing. eg. true|false. Requires Xcode 7+'], + ['output_style', 'Set the output format to one of: :standard (Colored UTF8 output, default), :basic (black & white ASCII output)'], + ['xcodebuild_architecture', 'Allows to set the architecture that `xcodebuild` is run with, for example to force it to run under Rosetta on an Apple Silicon mac'], + ['buildlog_path', 'The path where the xcodebuild.log will be created, by default it is created in ~/Library/Logs/fastlane/xcbuild'], + ['raw_buildlog', 'Set to true to see xcodebuild raw output. Default value is false'], + ['xcpretty_output', 'specifies the output type for xcpretty. eg. \'test\', or \'simple\''], + ['xcpretty_utf', 'Specifies xcpretty should use utf8 when reporting builds. This has no effect when raw_buildlog is specified.'] + ] + end + + def self.is_supported?(platform) + [:ios, :mac].include? platform + end + + def self.author + "dtrenz" + end + end + end +end +# rubocop:enable all diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcodes.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcodes.rb new file mode 100644 index 0000000..f58716e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcodes.rb @@ -0,0 +1,152 @@ +module Fastlane + module Actions + module SharedValues + XCODES_XCODE_PATH = :XCODES_XCODE_PATH + end + + class XcodesAction < Action + def self.run(params) + binary = params[:binary_path] + xcodes_raw_version = Actions.sh("#{binary} version", log: false) + xcodes_version = Gem::Version.new(xcodes_raw_version) + UI.message("Running xcodes version #{xcodes_version}") + if xcodes_version < Gem::Version.new("1.1.0") + UI.user_error!([ + "xcodes action requires the minimum version of xcodes binary to be v1.1.0.", + "Please update xcodes. If you installed it via Homebrew, this can be done via 'brew upgrade xcodes'" + ].join(" ")) + end + + version = params[:version] + command = [] + command << binary + + if (xcodes_args = params[:xcodes_args]) + command << xcodes_args + Actions.sh(command.join(" ")) + elsif !params[:select_for_current_build_only] + command << "install" + command << "'#{version}'" + command << "--update" if params[:update_list] + command << "--select" + Actions.sh(command.join(" ")) + end + + command = [] + command << binary + command << "installed" + command << "'#{version}'" + + # `installed ` will either return the path to the given + # version or fail because the version can't be found. + # + # Store the path if we get one, fail the action otherwise. + xcode_path = Actions.sh(command.join(" ")) do |status, result, sh_command| + formatted_result = result.chomp + + unless status.success? + UI.user_error!("Command `#{sh_command}` failed with status #{status.exitstatus} and message: #{formatted_result}") + end + + formatted_result + end + + # If the command succeeded, `xcode_path` will be something like: + # /Applications/Xcode-14.app + xcode_developer_path = File.join(xcode_path, "/Contents/Developer") + + UI.message("Setting Xcode version '#{version}' at '#{xcode_path}' for all build steps") + ENV["DEVELOPER_DIR"] = xcode_developer_path + Actions.lane_context[SharedValues::XCODES_XCODE_PATH] = xcode_developer_path + return xcode_path + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Make sure a certain version of Xcode is installed, installing it only if needed" + end + + def self.details + [ + "Makes sure a specific version of Xcode is installed. If that's not the case, it will automatically be downloaded by [xcodes](https://github.com/RobotsAndPencils/xcodes).", + "This will make sure to use the correct Xcode version for later actions.", + "Note that this action depends on [xcodes](https://github.com/RobotsAndPencils/xcodes) CLI, so make sure you have it installed in your environment. For the installation guide, see: https://github.com/RobotsAndPencils/xcodes#installation" + ].join("\n") + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_XCODE_VERSION", + description: "The version number of the version of Xcode to install. Defaults to the value specified in the .xcode-version file", + default_value: Helper::XcodesHelper.read_xcode_version_file, + default_value_dynamic: true, + verify_block: Helper::XcodesHelper::Verify.method(:requirement)), + FastlaneCore::ConfigItem.new(key: :update_list, + env_name: "FL_XCODES_UPDATE_LIST", + description: "Whether the list of available Xcode versions should be updated before running the install command", + type: Boolean, + default_value: true), + FastlaneCore::ConfigItem.new(key: :select_for_current_build_only, + env_name: "FL_XCODES_SELECT_FOR_CURRENT_BUILD_ONLY", + description: [ + "When true, it won't attempt to install an Xcode version, just find the installed Xcode version that best matches the passed version argument, and select it for the current build steps.", + "It doesn't change the global Xcode version (e.g. via 'xcrun xcode-select'), which would require sudo permissions — when this option is true, this action doesn't require sudo permissions" + ].join(" "), + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :binary_path, + env_name: "FL_XCODES_BINARY_PATH", + description: "Where the xcodes binary lives on your system (full path)", + default_value: Helper::XcodesHelper.find_xcodes_binary_path, + default_value_dynamic: true, + verify_block: proc do |value| + UI.user_error!("'xcodes' doesn't seem to be installed. Please follow the installation guide at https://github.com/RobotsAndPencils/xcodes#installation before proceeding") if value.empty? + UI.user_error!("Couldn't find xcodes binary at path '#{value}'") unless File.exist?(value) + end), + FastlaneCore::ConfigItem.new(key: :xcodes_args, + env_name: "FL_XCODES_ARGS", + description: "Pass in xcodes command line arguments directly. When present, other parameters are ignored and only this parameter is used to build the command to be executed", + type: :shell_string, + optional: true) + ] + end + + def self.output + [ + ['XCODES_XCODE_PATH', 'The path to the newly installed Xcode version'] + ] + end + + def self.return_value + "The path to the newly installed Xcode version" + end + + def self.return_type + :string + end + + def self.authors + ["rogerluan"] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xcodes(version: "14.1")', + 'xcodes # When missing, the version value defaults to the value specified in the .xcode-version file' + ] + end + + def self.category + :building + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcov.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcov.rb new file mode 100644 index 0000000..2e23629 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcov.rb @@ -0,0 +1,71 @@ +module Fastlane + module Actions + class XcovAction < Action + def self.run(values) + Actions.verify_gem!('xcov') + require 'xcov' + + if values[:xccov_file_direct_path].nil? && (path = Actions.lane_context[SharedValues::SCAN_GENERATED_XCRESULT_PATH]) + UI.verbose("Pulling xcov 'xccov_file_direct_path' from SharedValues::SCAN_GENERATED_XCRESULT_PATH") + values[:xccov_file_direct_path] = path + end + + Xcov::Manager.new(values).run + end + + def self.description + "Nice code coverage reports without hassle" + end + + def self.details + [ + "Create nice code coverage reports and post coverage summaries on Slack *(xcov gem is required)*.", + "More information: [https://github.com/nakiostudio/xcov](https://github.com/nakiostudio/xcov)." + ].join("\n") + end + + def self.author + "nakiostudio" + end + + def self.available_options + return [] unless Helper.mac? + + # We call Gem::Specification.find_by_name in many more places than this, but for right now + # this is the only place we're having trouble. If there are other reports about RubyGems + # 2.6.2 causing problems, we may need to move this code and require it someplace better, + # like fastlane_core + require 'fastlane/core_ext/bundler_monkey_patch' + + begin + Gem::Specification.find_by_name('xcov') + rescue Gem::LoadError + # Catched missing gem exception and returned empty array + # to avoid unused_options_spec failure + return [] + end + + require 'xcov' + Xcov::Options.available_options + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xcov( + workspace: "YourWorkspace.xcworkspace", + scheme: "YourScheme", + output_directory: "xcov_output" + )' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xctool.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xctool.rb new file mode 100644 index 0000000..9db34c9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xctool.rb @@ -0,0 +1,55 @@ +module Fastlane + module Actions + class XctoolAction < Action + def self.run(params) + UI.important("Have you seen the new 'scan' tool to run tests? https://docs.fastlane.tools/actions/scan/") + unless Helper.test? + UI.user_error!("xctool not installed, please install using `brew install xctool`") if `which xctool`.length == 0 + end + + params = [] if params.kind_of?(FastlaneCore::Configuration) + + Actions.sh('xctool ' + params.join(' ')) + end + + def self.description + "Run tests using xctool" + end + + def self.details + [ + "You can run any `xctool` action. This will require having [xctool](https://github.com/facebook/xctool) installed through [Homebrew](http://brew.sh).", + "It is recommended to store the build configuration in the `.xctool-args` file.", + "More information: [https://docs.fastlane.tools/actions/xctool/](https://docs.fastlane.tools/actions/xctool/)." + ].join("\n") + end + + def self.author + "KrauseFx" + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xctool(:test)', + + '# If you prefer to have the build configuration stored in the `Fastfile`: + xctool(:test, [ + "--workspace", "\'AwesomeApp.xcworkspace\'", + "--scheme", "\'Schema Name\'", + "--configuration", "Debug", + "--sdk", "iphonesimulator", + "--arch", "i386" + ].join(" "))' + ] + end + + def self.category + :testing + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcversion.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcversion.rb new file mode 100644 index 0000000..bdcc3fb --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/xcversion.rb @@ -0,0 +1,65 @@ +module Fastlane + module Actions + class XcversionAction < Action + def self.run(params) + Actions.verify_gem!('xcode-install') + + version = params[:version] + + xcode = Helper::XcversionHelper.find_xcode(version) + UI.user_error!("Cannot find an installed Xcode satisfying '#{version}'") if xcode.nil? + + UI.verbose("Found Xcode version #{xcode.version} at #{xcode.path} satisfying requirement #{version}") + UI.message("Setting Xcode version to #{xcode.path} for all build steps") + + ENV["DEVELOPER_DIR"] = File.join(xcode.path, "/Contents/Developer") + end + + def self.description + "Select an Xcode to use by version specifier" + end + + def self.details + [ + "Finds and selects a version of an installed Xcode that best matches the provided [`Gem::Version` requirement specifier](http://www.rubydoc.info/github/rubygems/rubygems/Gem/Version)", + "You can either manually provide a specific version using `version:` or you make use of the `.xcode-version` file." + ].join("\n") + end + + def self.authors + ["oysta", "rogerluan"] + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :version, + env_name: "FL_XCODE_VERSION", + description: "The version of Xcode to select specified as a Gem::Version requirement string (e.g. '~> 7.1.0'). Defaults to the value specified in the .xcode-version file ", + default_value: Helper::XcodesHelper.read_xcode_version_file, + default_value_dynamic: true, + verify_block: Helper::XcodesHelper::Verify.method(:requirement)) + ] + end + + def self.is_supported?(platform) + [:ios, :mac].include?(platform) + end + + def self.example_code + [ + 'xcversion(version: "8.1") # Selects Xcode 8.1.0', + 'xcversion(version: "~> 8.1.0") # Selects the latest installed version from the 8.1.x set', + 'xcversion # When missing, the version value defaults to the value specified in the .xcode-version file' + ] + end + + def self.category + :deprecated + end + + def self.deprecated_notes + "The xcode-install gem, which this action depends on, has been sunset. Please migrate to [xcodes](https://docs.fastlane.tools/actions/xcodes). You can find a migration guide here: [xcpretty/xcode-install/MIGRATION.md](https://github.com/xcpretty/xcode-install/blob/master/MIGRATION.md)" + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/zip.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/zip.rb new file mode 100644 index 0000000..5e3bb0d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/actions/zip.rb @@ -0,0 +1,185 @@ +module Fastlane + module Actions + class ZipAction < Action + class Runner + attr_reader :output_path, :path, :verbose, :password, :symlinks, :include, :exclude + + def initialize(params) + @output_path = File.expand_path(params[:output_path] || params[:path]) + @path = params[:path] + @verbose = params[:verbose] + @password = params[:password] + @symlinks = params[:symlinks] + @include = params[:include] || [] + @exclude = params[:exclude] || [] + + @output_path += ".zip" unless @output_path.end_with?(".zip") + end + + def run + UI.message("Compressing #{path}...") + + create_output_dir + run_zip_command + + UI.success("Successfully generated zip file at path '#{output_path}'") + output_path + end + + def create_output_dir + output_dir = File.expand_path("..", output_path) + FileUtils.mkdir_p(output_dir) + end + + def run_zip_command + # The 'zip' command archives relative to the working directory, chdir to produce expected results relative to `path` + Dir.chdir(File.expand_path("..", path)) do + Actions.sh(*zip_command) + end + end + + def zip_command + zip_options = verbose ? "r" : "rq" + zip_options += "y" if symlinks + + command = ["zip", "-#{zip_options}"] + + if password + command << "-P" + command << password + end + + # The zip command is executed from the paths **parent** directory, as a result we use just the basename, which is the file or folder within + basename = File.basename(path) + + command << output_path + command << basename + + unless include.empty? + command << "-i" + command += include.map { |path| File.join(basename, path) } + end + + unless exclude.empty? + command << "-x" + command += exclude.map { |path| File.join(basename, path) } + end + + command + end + end + + def self.run(params) + Runner.new(params).run + end + + ##################################################### + # @!group Documentation + ##################################################### + + def self.description + "Compress a file or folder to a zip" + end + + def self.available_options + [ + FastlaneCore::ConfigItem.new(key: :path, + env_name: "FL_ZIP_PATH", + description: "Path to the directory or file to be zipped", + verify_block: proc do |value| + path = File.expand_path(value) + UI.user_error!("Couldn't find file/folder at path '#{path}'") unless File.exist?(path) + end), + FastlaneCore::ConfigItem.new(key: :output_path, + env_name: "FL_ZIP_OUTPUT_NAME", + description: "The name of the resulting zip file", + optional: true), + FastlaneCore::ConfigItem.new(key: :verbose, + env_name: "FL_ZIP_VERBOSE", + description: "Enable verbose output of zipped file", + default_value: true, + type: Boolean, + optional: true), + FastlaneCore::ConfigItem.new(key: :password, + env_name: "FL_ZIP_PASSWORD", + description: "Encrypt the contents of the zip archive using a password", + optional: true), + FastlaneCore::ConfigItem.new(key: :symlinks, + env_name: "FL_ZIP_SYMLINKS", + description: "Store symbolic links as such in the zip archive", + optional: true, + type: Boolean, + default_value: false), + FastlaneCore::ConfigItem.new(key: :include, + env_name: "FL_ZIP_INCLUDE", + description: "Array of paths or patterns to include", + optional: true, + type: Array, + default_value: []), + FastlaneCore::ConfigItem.new(key: :exclude, + env_name: "FL_ZIP_EXCLUDE", + description: "Array of paths or patterns to exclude", + optional: true, + type: Array, + default_value: []) + ] + end + + def self.example_code + [ + 'zip', + 'zip( + path: "MyApp.app", + output_path: "Latest.app.zip" + )', + 'zip( + path: "MyApp.app", + output_path: "Latest.app.zip", + verbose: false + )', + 'zip( + path: "MyApp.app", + output_path: "Latest.app.zip", + verbose: false, + symlinks: true + )', + 'zip( + path: "./", + output_path: "Source Code.zip", + exclude: [".git/*"] + )', + 'zip( + path: "./", + output_path: "Swift Code.zip", + include: ["**/*.swift"], + exclude: ["Package.swift", "vendor/*", "Pods/*"] + )' + ] + end + + def self.category + :misc + end + + def self.output + [] + end + + def self.return_value + "The path to the output zip file" + end + + def self.return_type + :string + end + + def self.authors + ["KrauseFx"] + end + + def self.is_supported?(platform) + true + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/auto_complete.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/auto_complete.rb new file mode 100644 index 0000000..333a419 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/auto_complete.rb @@ -0,0 +1,82 @@ +require 'fileutils' + +module Fastlane + # Enable tab auto completion + class AutoComplete + # This method copies the tab auto completion scripts to the user's home folder, + # while optionally adding custom commands for which to enable auto complete + # @param [Array] options An array of all options (e.g. --custom fl) + def self.execute(args, options) + shell = ENV['SHELL'] + + if shell.end_with?("fish") + fish_completions_dir = "~/.config/fish/completions" + + if UI.interactive? + confirm = UI.confirm("This will copy a fish script into #{fish_completions_dir} that provides the command tab completion. If the directory does not exist it will be created. Sound good?") + return unless confirm + end + + fish_completions_dir = File.expand_path(fish_completions_dir) + FileUtils.mkdir_p(fish_completions_dir) + + completion_script_path = File.join(Fastlane::ROOT, 'lib', 'assets', 'completions', 'completion.fish') + final_completion_script_path = File.join(fish_completions_dir, 'fastlane.fish') + + FileUtils.cp(completion_script_path, final_completion_script_path) + + UI.success("Copied! You can now use tab completion for lanes") + else + fastlane_conf_dir = "~/.fastlane" + + if UI.interactive? + confirm = UI.confirm("This will copy a shell script into #{fastlane_conf_dir} that provides the command tab completion. Sound good?") + return unless confirm + end + + # create the ~/.fastlane directory + fastlane_conf_dir = File.expand_path(fastlane_conf_dir) + FileUtils.mkdir_p(fastlane_conf_dir) + + # then copy all of the completions files into it from the gem + completion_script_path = File.join(Fastlane::ROOT, 'lib', 'assets', 'completions') + FileUtils.cp_r(completion_script_path, fastlane_conf_dir) + + custom_commands = options.custom.to_s.split(',') + + Fastlane::SHELLS.each do |shell_name| + open("#{fastlane_conf_dir}/completions/completion.#{shell_name}", 'a') do |file| + default_line_prefix = Helper.bundler? ? "bundle exec " : "" + + file.puts(self.get_auto_complete_line(shell_name, "#{default_line_prefix}fastlane")) + + custom_commands.each do |command| + auto_complete_line = self.get_auto_complete_line(shell_name, command) + + next if auto_complete_line.nil? + + file.puts(auto_complete_line) + end + end + end + + UI.success("Copied! To use auto complete for fastlane, add the following line to your favorite rc file (e.g. ~/.bashrc)") + UI.important(" . ~/.fastlane/completions/completion.sh") + UI.success("Don't forget to source that file in your current shell! 🐚") + end + end + + # Helper to get the auto complete register script line + def self.get_auto_complete_line(shell, command) + if shell == :bash + prefix = "complete -F" + elsif shell == :zsh + prefix = "compctl -K" + else + return nil + end + + return "#{prefix} _fastlane_complete #{command}" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/boolean.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/boolean.rb new file mode 100644 index 0000000..51dc358 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/boolean.rb @@ -0,0 +1,5 @@ +module Fastlane + class Boolean + # Used in config item generation + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/cli_tools_distributor.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/cli_tools_distributor.rb new file mode 100644 index 0000000..18f11e2 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/cli_tools_distributor.rb @@ -0,0 +1,215 @@ +module Fastlane + # This class is responsible for checking the ARGV + # to see if the user wants to launch another fastlane + # tool or fastlane itself + class CLIToolsDistributor + class << self + def running_version_command? + ARGV.include?('-v') || ARGV.include?('--version') + end + + def running_help_command? + ARGV.include?('-h') || ARGV.include?('--help') + end + + def running_init_command? + ARGV.include?("init") + end + + def utf8_locale? + (ENV['LANG'] || "").end_with?("UTF-8", "utf8") || (ENV['LC_ALL'] || "").end_with?("UTF-8", "utf8") || (FastlaneCore::CommandExecutor.which('locale') && `locale charmap`.strip == "UTF-8") + end + + def take_off + before_import_time = Time.now + + if ENV["FASTLANE_DISABLE_ANIMATION"].nil? + # Usually in the fastlane code base we use + # + # Helper.show_loading_indicator + # longer_taking_task_here + # Helper.hide_loading_indicator + # + # but in this case we haven't required FastlaneCore yet + # so we'll have to access the raw API for now + require "tty-spinner" + require_fastlane_spinner = TTY::Spinner.new("[:spinner] 🚀 ", format: :dots) + require_fastlane_spinner.auto_spin + + # this might take a long time if there is no Gemfile :( + # That's why we show the loading indicator here also + require "fastlane" + + require_fastlane_spinner.success + else + require "fastlane" + end + + # Loading any .env files before any lanes are called since + # variables like FASTLANE_HIDE_CHANGELOG, SKIP_SLOW_FASTLANE_WARNING + # and FASTLANE_DISABLE_COLORS need to be set early on in execution + load_dot_env + + # We want to avoid printing output other than the version number if we are running `fastlane -v` + unless running_version_command? || running_init_command? + print_bundle_exec_warning(is_slow: (Time.now - before_import_time > 3)) + end + + # Try to check UTF-8 with `locale`, fallback to environment variables + unless utf8_locale? + warn = "WARNING: fastlane requires your locale to be set to UTF-8. To learn more go to https://docs.fastlane.tools/getting-started/ios/setup/#set-up-environment-variables" + UI.error(warn) + at_exit do + # Repeat warning here so users hopefully see it + UI.error(warn) + end + end + + # Needs to go after load_dot_env for variable FASTLANE_SKIP_UPDATE_CHECK + FastlaneCore::UpdateChecker.start_looking_for_update('fastlane') + + # Disabling colors if environment variable set + require 'fastlane_core/ui/disable_colors' if FastlaneCore::Helper.colors_disabled? + + # Set interactive environment variable for spaceship (which can't require fastlane_core) + ENV["FASTLANE_IS_INTERACTIVE"] = FastlaneCore::UI.interactive?.to_s + + ARGV.unshift("spaceship") if ARGV.first == "spaceauth" + tool_name = ARGV.first ? ARGV.first.downcase : nil + + tool_name = process_emojis(tool_name) + tool_name = map_aliased_tools(tool_name) + + if tool_name && Fastlane::TOOLS.include?(tool_name.to_sym) && !available_lanes.include?(tool_name.to_sym) + # Triggering a specific tool + # This happens when the users uses things like + # + # fastlane sigh + # fastlane snapshot + # + require tool_name + begin + # First, remove the tool's name from the arguments + # Since it will be parsed by the `commander` at a later point + # and it must not contain the binary name + ARGV.shift + + # Import the CommandsGenerator class, which is used to parse + # the user input + require File.join(tool_name, "commands_generator") + + # Call the tool's CommandsGenerator class and let it do its thing + commands_generator = Object.const_get(tool_name.fastlane_module)::CommandsGenerator + rescue LoadError + # This will only happen if the tool we call here, doesn't provide + # a CommandsGenerator class yet + # When we launch this feature, this should never be the case + abort("#{tool_name} can't be called via `fastlane #{tool_name}`, run '#{tool_name}' directly instead".red) + end + + # Some of the tools use other actions so need to load all + # actions before we start the tool generator + # Example: scan uses slack + Fastlane.load_actions + + commands_generator.start + elsif tool_name == "fastlane-credentials" + require 'credentials_manager' + ARGV.shift + CredentialsManager::CLI.new.run + else + # Triggering fastlane to call a lane + require "fastlane/commands_generator" + Fastlane::CommandsGenerator.start + end + ensure + FastlaneCore::UpdateChecker.show_update_status('fastlane', Fastlane::VERSION) + end + + def map_aliased_tools(tool_name) + Fastlane::TOOL_ALIASES[tool_name&.to_sym] || tool_name + end + + # Since loading dotenv should respect additional environments passed using + # --env, we must extract the arguments out of ARGV and process them before + # calling into commander. This is required since the ENV must be configured + # before running any other commands in order to correctly respect variables + # like FASTLANE_HIDE_CHANGELOG and FASTLANE_DISABLE_COLORS + def load_dot_env + env_cl_param = lambda do + index = ARGV.index("--env") + return nil if index.nil? + ARGV.delete_at(index) + + return nil if ARGV[index].nil? + value = ARGV[index] + ARGV.delete_at(index) + + value + end + + require 'fastlane/helper/dotenv_helper' + Fastlane::Helper::DotenvHelper.load_dot_env(env_cl_param.call) + end + + # Since fastlane also supports the rocket and biceps emoji as executable + # we need to map those to the appropriate tools + def process_emojis(tool_name) + return { + "🚀" => "fastlane", + "đŸ’Ē" => "gym" + }[tool_name] || tool_name + end + + def print_bundle_exec_warning(is_slow: false) + return if FastlaneCore::Helper.bundler? # user is alread using bundler + return if FastlaneCore::Env.truthy?('SKIP_SLOW_FASTLANE_WARNING') # user disabled the warnings + return if FastlaneCore::Helper.contained_fastlane? # user uses the bundled fastlane + + gemfile_path = PluginManager.new.gemfile_path + if gemfile_path + # The user has a Gemfile, but forgot to use `bundle exec` + # Let's tell the user how to use `bundle exec` + # We show this warning no matter if the command is slow or not + UI.important("fastlane detected a Gemfile in the current directory") + UI.important("However, it seems like you didn't use `bundle exec`") + UI.important("To launch fastlane faster, please use") + UI.message("") + UI.command "bundle exec fastlane #{ARGV.join(' ')}" + UI.message("") + elsif is_slow + # fastlane is slow and there is no Gemfile + # Let's tell the user how to use `gem cleanup` and how to + # start using a Gemfile + UI.important("Seems like launching fastlane takes a while - please run") + UI.message("") + UI.command "[sudo] gem cleanup" + UI.message("") + UI.important("to uninstall outdated gems and make fastlane launch faster") + UI.important("Alternatively it's recommended to start using a Gemfile to lock your dependencies") + UI.important("To get started with a Gemfile, run") + UI.message("") + UI.command "bundle init" + UI.command "echo 'gem \"fastlane\"' >> Gemfile" + UI.command "bundle install" + UI.message("") + UI.important("After creating the Gemfile and Gemfile.lock, commit those files into version control") + end + UI.important("Get started using a Gemfile for fastlane https://docs.fastlane.tools/getting-started/ios/setup/#use-a-gemfile") + end + + # Returns an array of symbols for the available lanes for the Fastfile + # This doesn't actually use the Fastfile parser, but only + # the available lanes. This way it's much faster, which + # is very important in this case, since it will be executed + # every time one of the tools is launched + # Use this only if performance is :key: + def available_lanes + fastfile_path = FastlaneCore::FastlaneFolder.fastfile_path + return [] if fastfile_path.nil? + output = `cat #{fastfile_path.shellescape} | grep \"^\s*lane \:\" | awk -F ':' '{print $2}' | awk -F ' ' '{print $1}'` + return output.strip.split(" ").collect(&:to_sym) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/command_line_handler.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/command_line_handler.rb new file mode 100644 index 0000000..c12178a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/command_line_handler.rb @@ -0,0 +1,49 @@ +module Fastlane + class CommandLineHandler + # This method handles command line inputs and properly transforms them to a usable format + # @param [Array] args An array of all arguments (not options) + # @param [Array] args A hash of all options (e.g. --env NAME) + def self.handle(args, options) + lane_parameters = {} # the parameters we'll pass to the lane + platform_lane_info = [] # the part that's responsible for the lane/platform definition + args.each do |current| + if current.include?(":") # that's a key/value which we want to pass to the lane + key, value = current.split(":", 2) + UI.user_error!("Please pass values like this: key:value") unless key.length > 0 + value = convert_value(value) + UI.verbose("Using #{key}: #{value}") + lane_parameters[key.to_sym] = value + else + platform_lane_info << current + end + end + + platform = nil + lane = platform_lane_info[1] + if lane + platform = platform_lane_info[0] + else + lane = platform_lane_info[0] + end + + dot_env = Helper.test? ? nil : options.env + + if FastlaneCore::FastlaneFolder.swift? + disable_runner_upgrades = options.disable_runner_upgrades || false + swift_server_port = options.swift_server_port + Fastlane::SwiftLaneManager.cruise_lane(lane, lane_parameters, dot_env, disable_runner_upgrades: disable_runner_upgrades, swift_server_port: swift_server_port) + else + Fastlane::LaneManager.cruise_lane(platform, lane, lane_parameters, dot_env) + end + end + + # Helper to convert into the right data type + def self.convert_value(value) + return true if value == 'true' || value == 'yes' + return false if value == 'false' || value == 'no' + + # Default case: + return value + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/commands_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/commands_generator.rb new file mode 100644 index 0000000..d3d92d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/commands_generator.rb @@ -0,0 +1,371 @@ +require 'commander' +require 'fastlane/new_action' +require 'fastlane_core/ui/help_formatter' + +HighLine.track_eof = false + +module Fastlane + class CommandsGenerator + include Commander::Methods + + def self.start + # since at this point we haven't yet loaded commander + # however we do want to log verbose information in the PluginManager + FastlaneCore::Globals.verbose = true if ARGV.include?("--verbose") + if ARGV.include?("--capture_output") + FastlaneCore::Globals.verbose = true + FastlaneCore::Globals.capture_output = true + end + FastlaneCore::Swag.show_loader + + # has to be checked here - in case we want to troubleshoot plugin related issues + if ARGV.include?("--troubleshoot") + self.confirm_troubleshoot + end + + if FastlaneCore::Globals.capture_output? + # Trace mode is enabled + # redirect STDOUT and STDERR + out_channel = StringIO.new + $stdout = out_channel + $stderr = out_channel + end + + Fastlane.load_actions + FastlaneCore::Swag.stop_loader + # do not use "include" as it may be some where in the commandline where "env" is required, therefore explicit index->0 + unless ARGV[0] == "env" || CLIToolsDistributor.running_version_command? || CLIToolsDistributor.running_help_command? + # *after* loading the plugins + hide_plugins_table = FastlaneCore::Env.truthy?("FASTLANE_HIDE_PLUGINS_TABLE") + Fastlane.plugin_manager.load_plugins(print_table: !hide_plugins_table) + Fastlane::PluginUpdateManager.start_looking_for_updates + end + self.new.run + ensure + Fastlane::PluginUpdateManager.show_update_status + if FastlaneCore::Globals.capture_output? + if $stdout.respond_to?(:string) + # Sometimes you can get NoMethodError: undefined method `string' for #> when running with FastlaneRunner (swift) + FastlaneCore::Globals.captured_output = Helper.strip_ansi_colors($stdout.string) + end + $stdout = STDOUT + $stderr = STDERR + + require "fastlane/environment_printer" + Fastlane::EnvironmentPrinter.output + end + end + + def self.confirm_troubleshoot + if Helper.ci? + UI.error("---") + UI.error("You are trying to use '--troubleshoot' on CI") + UI.error("this option is not usable in CI, as it is insecure") + UI.error("---") + UI.user_error!("Do not use --troubleshoot in CI") + end + # maybe already set by 'start' + return if $troubleshoot + UI.error("---") + UI.error("Are you sure you want to enable '--troubleshoot'?") + UI.error("All commmands will run in full unfiltered output mode.") + UI.error("Sensitive data, like passwords, could be printed to the log.") + UI.error("---") + if UI.confirm("Do you really want to enable --troubleshoot") + $troubleshoot = true + end + end + + def run + program :name, 'fastlane' + program :version, Fastlane::VERSION + program :description, [ + "CLI for 'fastlane' - #{Fastlane::DESCRIPTION}\n", + "\tRun using `fastlane [platform] [lane_name]`", + "\tTo pass values to the lanes use `fastlane [platform] [lane_name] key:value key2:value2`" + ].join("\n") + program :help, 'Author', 'Felix Krause ' + program :help, 'Website', 'https://fastlane.tools' + program :help, 'GitHub', 'https://github.com/fastlane/fastlane' + program :help_formatter, FastlaneCore::HelpFormatter + + global_option('--verbose') { FastlaneCore::Globals.verbose = true } + global_option('--capture_output', 'Captures the output of the current run, and generates a markdown issue template') do + FastlaneCore::Globals.capture_output = false + FastlaneCore::Globals.verbose = true + end + global_option('--troubleshoot', 'Enables extended verbose mode. Use with caution, as this even includes ALL sensitive data. Cannot be used on CI.') + global_option('--env STRING[,STRING2]', String, 'Add environment(s) to use with `dotenv`') + + always_trace! + + command :trigger do |c| + c.syntax = 'fastlane [lane]' + c.description = 'Run a specific lane. Pass the lane name and optionally the platform first.' + c.option('--disable_runner_upgrades', 'Prevents fastlane from attempting to update FastlaneRunner swift project') + c.option('--swift_server_port INT', 'Set specific port to communicate between fastlane and FastlaneRunner') + + c.action do |args, options| + if ensure_fastfile + Fastlane::CommandLineHandler.handle(args, options) + end + end + end + + command :init do |c| + c.syntax = 'fastlane init' + c.description = 'Helps you with your initial fastlane setup' + + c.option('-u STRING', '--user STRING', String, 'iOS projects only: Your Apple ID') + + c.action do |args, options| + is_swift_fastfile = args.include?("swift") + Fastlane::Setup.start(user: options.user, is_swift_fastfile: is_swift_fastfile) + end + end + + # Creating alias for mapping "swift init" to "init swift" + alias_command(:'swift init', :init, 'swift') + + command :new_action do |c| + c.syntax = 'fastlane new_action' + c.description = 'Create a new custom action for fastlane.' + + c.option('--name STRING', String, 'Name of your new action') + + c.action do |args, options| + Fastlane::NewAction.run(new_action_name: options.name) + end + end + + command :socket_server do |c| + c.syntax = 'fastlane start_server' + c.description = 'Starts local socket server and enables only a single local connection' + c.option('-s', '--stay_alive', 'Keeps socket server up even after error or disconnects, requires CTRL-C to kill.') + c.option('-c seconds', '--connection_timeout', 'Sets connection established timeout') + c.option('-p port', '--port', "Sets the port on localhost for the socket connection") + c.action do |args, options| + default_connection_timeout = 5 + stay_alive = options.stay_alive || false + connection_timeout = options.connection_timeout || default_connection_timeout + port = options.port || 2000 + + if stay_alive && options.connection_timeout.nil? + UI.important("stay_alive is set, but the connection timeout is not, this will give you #{default_connection_timeout} seconds to (re)connect") + end + + require 'fastlane/server/socket_server' + require 'fastlane/server/socket_server_action_command_executor' + + command_executor = SocketServerActionCommandExecutor.new + server = Fastlane::SocketServer.new( + command_executor: command_executor, + connection_timeout: connection_timeout, + stay_alive: stay_alive, + port: port + ) + result = server.start + UI.success("Result: #{result}") if result + end + end + + command :lanes do |c| + c.syntax = 'fastlane lanes' + c.description = 'Lists all available lanes and shows their description' + c.option("-j", "--json", "Output the lanes in JSON instead of text") + + c.action do |args, options| + if options.json || ensure_fastfile + require 'fastlane/lane_list' + path = FastlaneCore::FastlaneFolder.fastfile_path + + if options.json + Fastlane::LaneList.output_json(path) + else + Fastlane::LaneList.output(path) + end + end + end + end + + command :list do |c| + c.syntax = 'fastlane list' + c.description = 'Lists all available lanes without description' + c.action do |args, options| + if ensure_fastfile + ff = Fastlane::FastFile.new(FastlaneCore::FastlaneFolder.fastfile_path) + UI.message("Available lanes:") + ff.runner.available_lanes.each do |lane| + UI.message("- #{lane}") + end + UI.important("Execute using `fastlane [lane_name]`") + end + end + end + + command :docs do |c| + c.syntax = 'fastlane docs' + c.description = 'Generate a markdown based documentation based on the Fastfile' + c.option('-f', '--force', 'Overwrite the existing README.md in the ./fastlane folder') + + c.action do |args, options| + if ensure_fastfile + ff = Fastlane::FastFile.new(File.join(FastlaneCore::FastlaneFolder.path || '.', 'Fastfile')) + UI.message("You don't need to run `fastlane docs` manually any more, this will be done automatically for you when running a lane.") + Fastlane::DocsGenerator.run(ff) + end + end + end + + command :run do |c| + c.syntax = 'fastlane run [action] key1:value1 key2:value2' + c.description = 'Run a fastlane one-off action without a full lane' + + c.action do |args, options| + require 'fastlane/one_off' + result = Fastlane::OneOff.execute(args: args) + UI.success("Result: #{result}") if result + end + end + + command :actions do |c| + c.syntax = 'fastlane actions' + c.description = 'Lists all available fastlane actions' + + c.option('--platform STRING', String, 'Only show actions available on the given platform') + + c.action do |args, options| + require 'fastlane/documentation/actions_list' + Fastlane::ActionsList.run(filter: args.first, platform: options.platform) + end + end + + command :action do |c| + c.syntax = 'fastlane action [tool_name]' + c.description = 'Shows more information for a specific command' + c.action do |args, options| + require 'fastlane/documentation/actions_list' + Fastlane::ActionsList.run(filter: args.first) + end + end + + command :enable_auto_complete do |c| + c.syntax = 'fastlane enable_auto_complete' + c.description = 'Enable tab auto completion' + c.option('-c STRING[,STRING2]', '--custom STRING[,STRING2]', String, 'Add custom command(s) for which tab auto complete should be enabled too') + + c.action do |args, options| + require 'fastlane/auto_complete' + Fastlane::AutoComplete.execute(args, options) + end + end + + command :env do |c| + c.syntax = 'fastlane env' + c.description = 'Print your fastlane environment, use this when you submit an issue on GitHub' + c.action do |args, options| + require "fastlane/environment_printer" + Fastlane::EnvironmentPrinter.output + end + end + + command :update_fastlane do |c| + c.syntax = 'fastlane update_fastlane' + c.description = 'Update fastlane to the latest release' + c.action do |args, options| + require 'fastlane/one_off' + Fastlane::OneOff.run(action: "update_fastlane", parameters: {}) + end + end + + ##################################################### + # @!group Plugins + ##################################################### + + command :new_plugin do |c| + c.syntax = 'fastlane new_plugin [plugin_name]' + c.description = 'Create a new plugin that can be used with fastlane' + + c.action do |args, options| + PluginGenerator.new.generate(args.shift) + end + end + + command :add_plugin do |c| + c.syntax = 'fastlane add_plugin [plugin_name]' + c.description = 'Add a new plugin to your fastlane setup' + + c.action do |args, options| + args << UI.input("Enter the name of the plugin to install: ") if args.empty? + args.each do |plugin_name| + Fastlane.plugin_manager.add_dependency(plugin_name) + end + + UI.important("Make sure to commit your Gemfile, Gemfile.lock and #{PluginManager::PLUGINFILE_NAME} to version control") + Fastlane.plugin_manager.install_dependencies! + end + end + + command :install_plugins do |c| + c.syntax = 'fastlane install_plugins' + c.description = 'Install all plugins for this project' + + c.action do |args, options| + Fastlane.plugin_manager.install_dependencies! + end + end + + command :update_plugins do |c| + c.syntax = 'fastlane update_plugins' + c.description = 'Update all plugin dependencies' + + c.action do |args, options| + Fastlane.plugin_manager.update_dependencies! + end + end + + command :search_plugins do |c| + c.syntax = 'fastlane search_plugins [search_query]' + c.description = 'Search for plugins, search query is optional' + + c.action do |args, options| + search_query = args.last + PluginSearch.print_plugins(search_query: search_query) + end + end + + ##################################################### + # @!group Swift + ##################################################### + + if FastlaneCore::FastlaneFolder.swift? + command :generate_swift do |c| + c.syntax = 'fastlane generate_swift' + c.description = 'Generates additional Swift APIs for plugins and local actions' + + c.action do |args, options| + SwiftActionsAPIGenerator.new(target_output_path: FastlaneCore::FastlaneFolder.swift_folder_path).generate_swift + SwiftPluginsAPIGenerator.new(target_output_path: FastlaneCore::FastlaneFolder.swift_folder_path).generate_swift + end + end + end + + default_command(:trigger) + run! + end + + # Makes sure a Fastfile is available + # Shows an appropriate message to the user + # if that's not the case + # return true if the Fastfile is available + def ensure_fastfile + return true if FastlaneCore::FastlaneFolder.setup? + + create = UI.confirm('Could not find fastlane in current directory. Make sure to have your fastlane configuration files inside a folder called "fastlane". Would you like to set fastlane up?') + if create + Fastlane::Setup.start + end + return false + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/configuration_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/configuration_helper.rb new file mode 100644 index 0000000..d8ffe90 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/configuration_helper.rb @@ -0,0 +1,26 @@ +module Fastlane + class ConfigurationHelper + def self.parse(action, params) + first_element = (action.available_options || []).first + + if first_element && first_element.kind_of?(FastlaneCore::ConfigItem) + # default use case + return FastlaneCore::Configuration.create(action.available_options, params) + elsif first_element + UI.error("Old configuration format for action '#{action}'") if Helper.test? + return params + else + + # No parameters... we still need the configuration object array + FastlaneCore::Configuration.create(action.available_options, {}) + + end + rescue => ex + if action.respond_to?(:action_name) + UI.error("You passed invalid parameters to '#{action.action_name}'.") + UI.error("Check out the error below and available options by running `fastlane action #{action.action_name}`") + end + raise ex + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/core_ext/bundler_monkey_patch.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/core_ext/bundler_monkey_patch.rb new file mode 100644 index 0000000..0f33c75 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/core_ext/bundler_monkey_patch.rb @@ -0,0 +1,14 @@ +# https://github.com/bundler/bundler/issues/4368 +# +# There is an issue with RubyGems 2.6.2 where it attempts to call Bundler::SpecSet#size, which doesn't exist. +# If a gem is not installed, a `Gem::Specification.find_by_name` call will trigger this problem. +if Object.const_defined?(:Bundler) && + Bundler.const_defined?(:SpecSet) && + Bundler::SpecSet.instance_methods.include?(:length) && + !Bundler::SpecSet.instance_methods.include?(:size) + module Bundler + class SpecSet + alias size length + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/actions_list.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/actions_list.rb new file mode 100644 index 0000000..738478e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/actions_list.rb @@ -0,0 +1,214 @@ +module Fastlane + class ActionsList + def self.run(filter: nil, platform: nil) + require 'terminal-table' + if filter + show_details(filter: filter) + else + print_all(platform: platform) + end + end + + def self.print_all(platform: nil) + rows = [] + all_actions(platform) do |action, name| + current = [] + + if Fastlane::Actions.is_deprecated?(action) + current << "#{name} (DEPRECATED)".deprecated + else + current << name.yellow + end + + if action < Action + current << action.description.to_s.remove_markdown if action.description + + authors = Array(action.author || action.authors) + current << authors.first.green if authors.count == 1 + current << "Multiple".green if authors.count > 1 + else + UI.error(action_subclass_error(name)) + current << "Please update action file".red + current << ' ' + end + rows << current + end + + puts(Terminal::Table.new( + title: "Available fastlane actions".green, + headings: ['Action', 'Description', 'Author'], + rows: FastlaneCore::PrintTable.transform_output(rows) + )) + puts(" Platform filter: #{platform}".magenta) if platform + puts(" Total of #{rows.count} actions") + + puts("\nGet more information for one specific action using `fastlane action [name]`\n".green) + end + + def self.show_details(filter: nil) + puts("Loading documentation for #{filter}:".green) + puts("") + + action = find_action_named(filter) + + if action + unless action < Action + UI.user_error!(action_subclass_error(filter)) + end + + print_summary(action, filter) + print_options(action, filter) + print_output_variables(action, filter) + print_return_value(action, filter) + + if Fastlane::Actions.is_deprecated?(action) + puts("==========================================".deprecated) + puts("This action (#{filter}) is deprecated".deprecated) + puts(action.deprecated_notes.to_s.remove_markdown.deprecated) if action.deprecated_notes + puts("==========================================\n".deprecated) + end + + puts("More information can be found on https://docs.fastlane.tools/actions/#{filter}") + puts("") + else + puts("Couldn't find action for the given filter.".red) + puts("==========================================\n".red) + + print_all # show all available actions instead + print_suggestions(filter) + end + end + + def self.print_suggestions(filter) + if !filter.nil? && filter.length > 1 + action_names = [] + all_actions(nil) do |action_ref, action_name| + action_names << action_name + end + + corrections = [] + + if defined?(DidYouMean::SpellChecker) + spell_checker = DidYouMean::SpellChecker.new(dictionary: action_names) + corrections << spell_checker.correct(filter).compact + end + + corrections << action_names.select { |name| name.include?(filter) } + + puts("Did you mean: #{corrections.flatten.uniq.join(', ')}?".green) unless corrections.flatten.empty? + end + end + + def self.action_subclass_error(name) + "Please update your action '#{name}' to be a subclass of `Action` by adding ` < Action` after your class name." + end + + def self.print_summary(action, name) + rows = [] + + if action.description + description = action.description.to_s.remove_markdown + rows << [description] + rows << [' '] + end + + if action.details + details = action.details.to_s.remove_markdown + details.split("\n").each do |detail| + row = detail.empty? ? ' ' : detail + rows << [row] + end + + rows << [' '] + end + + authors = Array(action.author || action.authors) + rows << ["Created by #{authors.join(', ').green}"] unless authors.empty? + + puts(Terminal::Table.new(title: name.green, rows: FastlaneCore::PrintTable.transform_output(rows))) + puts("") + end + + def self.print_options(action, name) + options = parse_options(action.available_options) if action.available_options + + if options + puts(Terminal::Table.new( + title: "#{name} Options".green, + headings: ['Key', 'Description', 'Env Var(s)', 'Default'], + rows: FastlaneCore::PrintTable.transform_output(options) + )) + else + puts("No available options".yellow) + end + puts("* = default value is dependent on the user's system") + puts("") + end + + def self.print_output_variables(action, name) + output = action.output + return if output.nil? || output.empty? + + puts(Terminal::Table.new( + title: "#{name} Output Variables".green, + headings: ['Key', 'Description'], + rows: FastlaneCore::PrintTable.transform_output(output.map { |key, desc| [key.yellow, desc] }) + )) + puts("Access the output values using `lane_context[SharedValues::VARIABLE_NAME]`") + puts("") + end + + def self.print_return_value(action, name) + return unless action.return_value + + puts(Terminal::Table.new(title: "#{name} Return Value".green, + rows: FastlaneCore::PrintTable.transform_output([[action.return_value]]))) + puts("") + end + + # Iterates through all available actions and yields from there + def self.all_actions(platform = nil) + action_symbols = Fastlane::Actions.constants.select { |c| Fastlane::Actions.const_get(c).kind_of?(Class) && c != :TestSampleCodeAction } + action_symbols.sort.each do |symbol| + action = Fastlane::Actions.const_get(symbol) + + # We allow classes that don't respond to is_supported? to come through because we want to list + # them as broken actions in the table, regardless of platform specification + next if platform && action.respond_to?(:is_supported?) && !action.is_supported?(platform.to_sym) + + name = symbol.to_s.gsub(/Action$/, '').fastlane_underscore + yield(action, name) + end + end + + def self.find_action_named(name) + all_actions do |action, action_name| + return action if action_name == name + end + + nil + end + + # Helper: + def self.parse_options(options, fill_all = true) + rows = [] + rows << [options] if options.kind_of?(String) + + if options.kind_of?(Array) + options.each do |current| + if current.kind_of?(FastlaneCore::ConfigItem) + rows << [current.key.to_s.yellow, current.deprecated ? current.description.red : current.description, current.env_names.join(", "), current.help_default_value] + elsif current.kind_of?(Array) + # Legacy actions that don't use the new config manager + UI.user_error!("Invalid number of elements in this row: #{current}. Must be 2 or 3") unless [2, 3].include?(current.count) + rows << current + rows.last[0] = rows.last.first.yellow # color it yellow :) + rows.last << nil while fill_all && rows.last.count < 4 # to have a nice border in the table + end + end + end + + rows + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/docs_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/docs_generator.rb new file mode 100644 index 0000000..0062708 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/docs_generator.rb @@ -0,0 +1,94 @@ +module Fastlane + class DocsGenerator + def self.run(ff, output_path = nil) + output_path ||= File.join(FastlaneCore::FastlaneFolder.path || '.', 'README.md') + + output = ["fastlane documentation"] + output << "----" + output << "" + + output << "# Installation" + output << "" + output << "Make sure you have the latest version of the Xcode command line tools installed:" + output << "" + output << "```sh" + output << "xcode-select --install" + output << "```" + output << "" + output << "For _fastlane_ installation instructions, see [Installing _fastlane_](https://docs.fastlane.tools/#installing-fastlane)" + output << "" + output << "# Available Actions" + + all_keys = ff.runner.lanes.keys.reject(&:nil?) + all_keys.unshift(nil) # because we want root elements on top. always! They have key nil + + all_keys.each do |platform| + lanes = ff.runner.lanes[platform] + + if lanes.nil? || lanes.empty? || lanes.all? { |_, lane| lane.is_private } + next + end + + if platform + output << "" + output << "## #{formatted_platform(platform)}" + end + + lanes.each do |lane_name, lane| + next if lane.is_private + output << render(platform, lane_name, lane.description.join("\n\n")) + end + + output << "" + output << "----" + output << "" + end + + output << "This README.md is auto-generated and will be re-generated every time [_fastlane_](https://fastlane.tools) is run." + output << "" + output << "More information about _fastlane_ can be found on [fastlane.tools](https://fastlane.tools)." + output << "" + output << "The documentation of _fastlane_ can be found on [docs.fastlane.tools](https://docs.fastlane.tools)." + output << "" + + begin + File.write(output_path, output.join("\n")) + UI.success("Successfully generated documentation at path '#{File.expand_path(output_path)}'") if FastlaneCore::Globals.verbose? + rescue => ex + UI.error(ex) + UI.error("Couldn't save fastlane documentation at path '#{File.expand_path(output_path)}', make sure you have write access to the containing directory.") + end + end + + ##################################################### + # @!group Helper + ##################################################### + + def self.formatted_platform(pl) + pl = pl.to_s + return "iOS" if pl == 'ios' + return "Mac" if pl == 'mac' + return "Android" if pl == 'android' + + return pl + end + + # @param platform [String] + # @param lane [Fastlane::Lane] + # @param description [String] + def self.render(platform, lane, description) + full_name = [platform, lane].reject(&:nil?).join(' ') + + output = [] + output << "" + output << "### #{full_name}" + output << "" + output << "```sh" + output << "[bundle exec] fastlane #{full_name}" + output << "```" + output << "" + output << description + output + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/markdown_docs_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/markdown_docs_generator.rb new file mode 100644 index 0000000..f23aa4c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/documentation/markdown_docs_generator.rb @@ -0,0 +1,234 @@ +module Fastlane + class MarkdownDocsGenerator + attr_accessor :categories + + def initialize + require 'fastlane' + require 'fastlane/documentation/actions_list' + Fastlane.load_actions + + self.work + end + + def work + fill_built_in_actions + end + + def fill_built_in_actions + self.categories = {} + + Fastlane::Action::AVAILABLE_CATEGORIES.each { |a| self.categories[readable_category_name(a)] = {} } + + # Fill categories with all built-in actions + ActionsList.all_actions do |action| + readable = readable_category_name(action.category) + + if self.categories[readable].kind_of?(Hash) + self.categories[readable][number_of_launches_for_action(action.action_name)] = action + else + UI.error("Action '#{action.name}' doesn't contain category information... skipping") + end + end + end + + def number_of_launches_for_action(action_name) + found = all_actions_from_enhancer.find { |c| c['action'] == action_name.to_s } + + return found["index"] if found + return 10_000 + rand # new actions that we've never tracked before will be shown at the bottom of the page, need `rand` to not overwrite them + end + + def all_actions_from_enhancer + require 'json' + @_launches ||= JSON.parse(File.read(File.join(Fastlane::ROOT, "assets/action_ranking.json"))) # root because we're in a temporary directory here + end + + def actions_path + "lib/fastlane/actions/" + end + + def where_is(klass) + # Gets all source files for action + methods = klass.methods(false).map { |m| klass.method(m) } + source_files = methods + .map(&:source_location) + .compact + .map { |(file, line)| file } + .uniq + + # Return file or error if multiples + if source_files.size == 1 + return source_files.first + else + UI.crash!("Multiple source files were found for action `#{klass}`") + end + end + + def filename_for_action(action) + absolute_path = where_is(action) + filename = File.basename(absolute_path) + + path = File.join(Fastlane::ROOT, actions_path, filename) + unless File.exist?(path) + UI.error("Action '#{action.name}' not found in root fastlane project... skipping") + UI.verbose("Action '#{action.name}' found at #{path}") + return nil + end + filename + end + + def custom_action_docs_path + "lib/fastlane/actions/docs/" + end + + def load_custom_action_md(action) + # check if there is a custom detail view in markdown available in the fastlane code base + custom_file_location = File.join(Fastlane::ROOT, custom_action_docs_path, "#{action.action_name}.md") + if File.exist?(custom_file_location) + UI.verbose("Using custom md file for action #{action.action_name}") + return File.read(custom_file_location) + end + return load_custom_action_md_erb(action) + end + + def load_custom_action_md_erb(action) + # check if there is a custom detail view as markdown ERB available in the fastlane code base + custom_file_location = File.join(Fastlane::ROOT, custom_action_docs_path, "#{action.action_name}.md.erb") + if File.exist?(custom_file_location) + UI.verbose("Using custom md.erb file for action #{action.action_name}") + + result = ERB.new(File.read(custom_file_location), 0, '-').result(binding) # https://web.archive.org/web/20160430190141/www.rrn.dk/rubys-erb-templating-system + + return result + end + return nil + end + + def actions_md_contents + action_mds = {} + + ActionsList.all_actions do |action| + @action = action + @action_filename = filename_for_action(action) + + unless @action_filename + next + end + + @custom_content = load_custom_action_md(action) + + if action.superclass != Fastlane::Action + @custom_content ||= load_custom_action_md(action.superclass) + end + + template = File.join(Fastlane::ROOT, "lib/assets/ActionDetails.md.erb") + result = ERB.new(File.read(template), 0, '-').result(binding) + + action_mds[action.action_name] = result + end + + return action_mds + end + + def generate!(target_path: nil) + require 'yaml' + FileUtils.mkdir_p(target_path) + docs_dir = File.join(target_path, "docs") + generated_actions_dir = File.join("generated", "actions") + FileUtils.mkdir_p(File.join(docs_dir, generated_actions_dir)) + + # Generate actions.md + template = File.join(Fastlane::ROOT, "lib/assets/Actions.md.erb") + result = ERB.new(File.read(template), 0, '-').result(binding) # https://web.archive.org/web/20160430190141/www.rrn.dk/rubys-erb-templating-system + File.write(File.join(docs_dir, "generated", "actions.md"), result) + + # Generate actions sub pages (e.g. generated/actions/slather.md, generated/actions/scan.md) + all_actions_ref_yml = [] + FileUtils.mkdir_p(File.join(docs_dir, generated_actions_dir)) + ActionsList.all_actions do |action| + @action = action # to provide a reference in the .html.erb template + @action_filename = filename_for_action(action) + + unless @action_filename + next + end + + # Make sure to always assign `@custom_content`, as we're in a loop and `@` is needed for the `erb` + @custom_content = load_custom_action_md(action) + + if action.superclass != Fastlane::Action + # This means, the current method is an alias + # meaning we're gonna look if the parent class + # has a custom md file. + # e.g. `DeliverAction`'s superclass is `UploadToAppStoreAction` + @custom_content ||= load_custom_action_md(action.superclass) + end + + template = File.join(Fastlane::ROOT, "lib/assets/ActionDetails.md.erb") + result = ERB.new(File.read(template), 0, '-').result(binding) # https://web.archive.org/web/20160430190141/www.rrn.dk/rubys-erb-templating-system + + # Actions get placed in "generated/actions" directory + file_name = File.join(generated_actions_dir, "#{action.action_name}.md") + File.write(File.join(docs_dir, file_name), result) + + # The action pages when published get moved to the "actions" directory + # The mkdocs.yml file needs to reference the "actions" directory (not the "generated/actions" directory) + published_file_name = File.join("actions", "#{action.action_name}.md") + all_actions_ref_yml << { action.action_name => published_file_name } + end + + # Modify the mkdocs.yml to list all the actions + mkdocs_yml_path = File.join(target_path, "mkdocs.yml") + raise "Could not find mkdocs.yml in #{target_path}, make sure to point to the fastlane/docs repo" unless File.exist?(mkdocs_yml_path) + mkdocs_yml = YAML.load_file(mkdocs_yml_path) + hidden_actions_array = mkdocs_yml["nav"].find { |p| !p["_Actions"].nil? } + hidden_actions_array["_Actions"] = all_actions_ref_yml + File.write(mkdocs_yml_path, mkdocs_yml.to_yaml) + + # Copy over the assets from the `actions/docs/assets` directory + Dir[File.join(custom_action_docs_path, "assets", "*")].each do |current_asset_path| + UI.message("Copying asset #{current_asset_path}") + FileUtils.cp(current_asset_path, File.join(docs_dir, "img", "actions", File.basename(current_asset_path))) + end + + UI.success("Generated new docs on path #{target_path}") + end + + private + + def readable_category_name(category_symbol) + case category_symbol + when :misc + "Misc" + when :source_control + "Source Control" + when :notifications + "Notifications" + when :code_signing + "Code Signing" + when :documentation + "Documentation" + when :testing + "Testing" + when :building + "Building" + when :push + "Push" + when :screenshots + "Screenshots" + when :project + "Project" + when :beta + "Beta" + when :production + "Releasing your app" + when :app_store_connect + "App Store Connect" + when :deprecated + "Deprecated" + else + category_symbol.to_s.capitalize + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/environment_printer.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/environment_printer.rb new file mode 100644 index 0000000..770c135 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/environment_printer.rb @@ -0,0 +1,304 @@ +module Fastlane + class EnvironmentPrinter + def self.output + env_info = get + + # Remove sensitive option values + FastlaneCore::Configuration.sensitive_strings.compact.each do |sensitive_element| + env_info.gsub!(sensitive_element, "#########") + end + + puts(env_info) + UI.important("Take notice that this output may contain sensitive information, or simply information that you don't want to make public.") + if FastlaneCore::Helper.mac? && UI.interactive? && UI.confirm("🙄 Wow, that's a lot of markdown text... should fastlane put it into your clipboard, so you can easily paste it on GitHub?") + copy_to_clipboard(env_info) + UI.success("Successfully copied markdown into your clipboard 🎨") + end + UI.success("Open https://github.com/fastlane/fastlane/issues/new to submit a new issue ✅") + end + + def self.get + UI.important("Generating fastlane environment output, this might take a few seconds...") + require "fastlane/markdown_table_formatter" + env_output = "" + env_output << print_system_environment + env_output << print_system_locale + env_output << print_fastlane_files + env_output << print_loaded_fastlane_gems + env_output << print_loaded_plugins + env_output << print_loaded_gems + env_output << print_date + + # Adding title + status = (env_output.include?("đŸšĢ") ? "đŸšĢ" : "✅") + env_header = "
#{status} fastlane environment #{status}\n\n" + env_tail = "
" + final_output = "" + + if FastlaneCore::Globals.captured_output? + final_output << "### Captured Output\n\n" + final_output << "Command Used: `#{ARGV.join(' ')}`\n" + final_output << "
Output/Log\n\n```\n\n#{FastlaneCore::Globals.captured_output}\n\n```\n\n
\n\n" + end + + final_output << env_header + env_output + env_tail + end + + def self.print_date + date = Time.now.strftime("%Y-%m-%d") + "\n*generated on:* **#{date}**\n" + end + + def self.print_loaded_plugins + env_output = "### Loaded fastlane plugins:\n" + env_output << "\n" + plugin_manager = Fastlane::PluginManager.new + plugin_manager.load_plugins(print_table: false) + + if plugin_manager.available_plugins.length <= 0 + env_output << "**No plugins Loaded**\n" + else + table = "" + table << "| Plugin | Version | Update-Status |\n" + table << "|--------|---------|\n" + plugin_manager.available_plugins.each do |plugin| + begin + installed_version = Fastlane::ActionCollector.determine_version(plugin) + latest_version = FastlaneCore::UpdateChecker.fetch_latest(plugin) + if Gem::Version.new(installed_version) == Gem::Version.new(latest_version) + update_status = "✅ Up-To-Date" + else + update_status = "đŸšĢ Update available" + end + rescue + update_status = "đŸ’Ĩ Check failed" + end + table << "| #{plugin} | #{installed_version} | #{update_status} |\n" + end + + rendered_table = MarkdownTableFormatter.new(table) + env_output << rendered_table.to_md + end + + env_output << "\n\n" + env_output + end + + # We have this as a separate method, as this has to be handled + # slightly differently, depending on how fastlane is being called + def self.gems_to_check + if Helper.contained_fastlane? + Gem::Specification + else + Gem.loaded_specs.values + end + end + + def self.print_loaded_fastlane_gems + # fastlanes internal gems + env_output = "### fastlane gems\n\n" + table = "" + table << "| Gem | Version | Update-Status |\n" + table << "|-----|---------|------------|\n" + fastlane_tools = Fastlane::TOOLS + [:fastlane_core, :credentials_manager] + + gems_to_check.each do |current_gem| + update_status = "N/A" + + next unless fastlane_tools.include?(current_gem.name.to_sym) + begin + latest_version = FastlaneCore::UpdateChecker.fetch_latest(current_gem.name) + if Gem::Version.new(current_gem.version) >= Gem::Version.new(latest_version) + update_status = "✅ Up-To-Date" + else + update_status = "đŸšĢ Update available" + end + rescue + update_status = "đŸ’Ĩ Check failed" + end + table << "| #{current_gem.name} | #{current_gem.version} | #{update_status} |\n" + end + + rendered_table = MarkdownTableFormatter.new(table) + env_output << rendered_table.to_md + + env_output << "\n\n" + + return env_output + end + + def self.print_loaded_gems + env_output = "
" + env_output << "Loaded gems\n\n" + + table = "| Gem | Version |\n" + table << "|-----|---------|\n" + gems_to_check.each do |current_gem| + unless Fastlane::TOOLS.include?(current_gem.name.to_sym) + table << "| #{current_gem.name} | #{current_gem.version} |\n" + end + end + rendered_table = MarkdownTableFormatter.new(table) + + env_output << rendered_table.to_md + env_output << "
\n\n" + return env_output + end + + def self.print_system_locale + env_output = "### System Locale\n\n" + found_one = false + env_table = "" + ["LANG", "LC_ALL", "LANGUAGE"].each do |e| + env_icon = "đŸšĢ" + if ENV[e] && ENV[e].end_with?("UTF-8") + env_icon = "✅" + found_one = true + end + if ENV[e].nil? + env_icon = "" + end + env_table << "| #{e} | #{ENV[e]} | #{env_icon} |\n" + end + if !found_one + table = "| Error |\n" + table << "|-----|\n" + table << "| No Locale with UTF8 found đŸšĢ|\n" + else + table = "| Variable | Value | |\n" + table << "|-----|---------|----|\n" + table << env_table + end + rendered_table = MarkdownTableFormatter.new(table) + env_output << rendered_table.to_md + env_output << "\n\n" + end + + def self.print_system_environment + require "openssl" + + env_output = "### Stack\n\n" + product, version, build = "Unknown" + os_version = "UNKNOWN" + + if Helper.mac? + product, version, build = `sw_vers`.strip.split("\n").map { |line| line.split(':').last.strip } + os_version = version + end + + if Helper.linux? + # this should work on pretty much all linux distros + os_version = `uname -a`.strip + version = "" + build = `uname -r`.strip + product = `cat /etc/issue.net`.strip + + distro_guesser = { + fedora: "/etc/fedora-release", + debian_based: "/etc/debian_version", + suse: "/etc/SUSE-release", + mandrake: "/etc/mandrake-release" + } + + distro_guesser.each do |dist, vers| + os_version = "#{dist} " + File.read(vers).strip if File.exist?(vers) + version = os_version + end + end + + table_content = { + "fastlane" => Fastlane::VERSION, + "OS" => os_version, + "Ruby" => RUBY_VERSION, + "Bundler?" => Helper.bundler?, + "Git" => git_version, + "Installation Source" => anonymized_path($PROGRAM_NAME), + "Host" => "#{product} #{version} (#{build})", + "Ruby Lib Dir" => anonymized_path(RbConfig::CONFIG['libdir']), + "OpenSSL Version" => OpenSSL::OPENSSL_VERSION, + "Is contained" => Helper.contained_fastlane?.to_s, + "Is homebrew" => Helper.homebrew?.to_s, + "Is installed via Fabric.app" => Helper.mac_app?.to_s + } + + if Helper.mac? + table_content["Xcode Path"] = anonymized_path(Helper.xcode_path) + begin + table_content["Xcode Version"] = Helper.xcode_version + rescue => ex + UI.error(ex) + UI.error("Could not get Xcode Version") + end + table_content["Swift Version"] = Helper.swift_version || "N/A" + end + + table = ["| Key | Value |"] + table += table_content.collect { |k, v| "| #{k} | #{v} |" } + + begin + rendered_table = MarkdownTableFormatter.new(table.join("\n")) + env_output << rendered_table.to_md + rescue => ex + UI.error(ex) + UI.error("Error rendering markdown table using the following text:") + UI.message(table.join("\n")) + env_output << table.join("\n") + end + + env_output << "\n\n" + env_output + end + + def self.print_fastlane_files + env_output = "### fastlane files:\n\n" + + fastlane_path = FastlaneCore::FastlaneFolder.fastfile_path + + if fastlane_path && File.exist?(fastlane_path) + env_output << "
" + env_output << "`#{fastlane_path}`\n" + env_output << "\n" + env_output << "```ruby\n" + env_output << File.read(fastlane_path, encoding: "utf-8") + env_output << "\n```\n" + env_output << "
" + else + env_output << "**No Fastfile found**\n" + end + env_output << "\n\n" + + appfile_path = CredentialsManager::AppfileConfig.default_path + if appfile_path && File.exist?(appfile_path) + env_output << "
" + env_output << "`#{appfile_path}`\n" + env_output << "\n" + env_output << "```ruby\n" + env_output << File.read(appfile_path, encoding: "utf-8") + env_output << "\n```\n" + env_output << "
" + else + env_output << "**No Appfile found**\n" + end + env_output << "\n\n" + env_output + end + + def self.anonymized_path(path, home = ENV['HOME']) + return home ? path.gsub(%r{^#{home}(?=/(.*)|$)}, '~\2') : path + end + + # Copy a given string into the clipboard + # Make sure to ask the user first, as some people don't + # use a clipboard manager, so they might lose something important + def self.copy_to_clipboard(string) + require 'open3' + Open3.popen3('pbcopy') { |input, _, _| input << string } + end + + def self.git_version + return `git --version`.strip.split("\n").first + rescue + return "not found" + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/erb_template_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/erb_template_helper.rb new file mode 100644 index 0000000..46ee70e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/erb_template_helper.rb @@ -0,0 +1,36 @@ +module Fastlane + class ErbTemplateHelper + require "erb" + def self.load(template_name) + path = "#{Fastlane::ROOT}/lib/assets/#{template_name}.erb" + load_from_path(path) + end + + def self.load_from_path(template_filepath) + unless File.exist?(template_filepath) + UI.user_error!("Could not find template at path '#{template_filepath}'") + end + File.read(template_filepath) + end + + def self.render(template, template_vars_hash, trim_mode = nil) + Fastlane::ErbalT.new(template_vars_hash, trim_mode).render(template) + end + end + class ErbalT < OpenStruct + def initialize(hash, trim_mode = nil) + super(hash) + @trim_mode = trim_mode + end + + def render(template) + # From Ruby 2.6, ERB.new takes keyword arguments and positional ones are deprecated + # https://bugs.ruby-lang.org/issues/14256 + if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("2.6.0") + ERB.new(template, trim_mode: @trim_mode).result(binding) + else + ERB.new(template, nil, @trim_mode).result(binding) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/fast_file.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/fast_file.rb new file mode 100644 index 0000000..c38d51d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/fast_file.rb @@ -0,0 +1,464 @@ +require "rubygems/requirement" + +module Fastlane + class FastFile + # Stores all relevant information from the currently running process + attr_accessor :runner + + # the platform in which we're currently in when parsing the Fastfile + # This is used to identify the platform in which the lane is in + attr_accessor :current_platform + + SharedValues = Fastlane::Actions::SharedValues + + # @return The runner which can be executed to trigger the given actions + def initialize(path = nil) + return unless (path || '').length > 0 + UI.user_error!("Could not find Fastfile at path '#{path}'") unless File.exist?(path) + @path = File.expand_path(path) + content = File.read(path, encoding: "utf-8") + + # From https://github.com/orta/danger/blob/master/lib/danger/Dangerfile.rb + if content.tr!('“”‘’‛', %(""''')) + UI.error("Your #{File.basename(path)} has had smart quotes sanitised. " \ + 'To avoid issues in the future, you should not use ' \ + 'TextEdit for editing it. If you are not using TextEdit, ' \ + 'you should turn off smart quotes in your editor of choice.') + end + + content.scan(/^\s*require ["'](.*?)["']/).each do |current| + gem_name = current.last + next if gem_name.include?(".") # these are local gems + + begin + require(gem_name) + rescue LoadError + UI.important("You have required a gem, if this is a third party gem, please use `fastlane_require '#{gem_name}'` to ensure the gem is installed locally.") + end + end + + parse(content, @path) + end + + def parsing_binding + binding + end + + def parse(data, path = nil) + @runner ||= Runner.new + + Dir.chdir(FastlaneCore::FastlaneFolder.path || Dir.pwd) do # context: fastlane subfolder + # create nice path that we want to print in case of some problem + relative_path = path.nil? ? '(eval)' : Pathname.new(path).relative_path_from(Pathname.new(Dir.pwd)).to_s + + begin + # We have to use #get_binding method, because some test files defines method called `path` (for example SwitcherFastfile) + # and local variable has higher priority, so it causes to remove content of original Fastfile for example. With #get_binding + # is this always clear and safe to declare any local variables we want, because the eval function uses the instance scope + # instead of local. + + # rubocop:disable Security/Eval + eval(data, parsing_binding, relative_path) # using eval is ok for this case + # rubocop:enable Security/Eval + rescue SyntaxError => ex + match = ex.to_s.match(/#{Regexp.escape(relative_path)}:(\d+)/) + if match + line = match[1] + UI.content_error(data, line) + UI.user_error!("Syntax error in your Fastfile on line #{line}: #{ex}") + else + UI.user_error!("Syntax error in your Fastfile: #{ex}") + end + end + end + + self + end + + ##################################################### + # @!group DSL + ##################################################### + + # User defines a new lane + def lane(lane_name, &block) + UI.user_error!("You have to pass a block using 'do' for lane '#{lane_name}'. Make sure you read the docs on GitHub.") unless block + + self.runner.add_lane(Lane.new(platform: self.current_platform, + block: block, + description: desc_collection, + name: lane_name, + is_private: false)) + + @desc_collection = nil # reset the collected description again for the next lane + end + + # User defines a new private lane, which can't be called from the CLI + def private_lane(lane_name, &block) + UI.user_error!("You have to pass a block using 'do' for lane '#{lane_name}'. Make sure you read the docs on GitHub.") unless block + + self.runner.add_lane(Lane.new(platform: self.current_platform, + block: block, + description: desc_collection, + name: lane_name, + is_private: true)) + + @desc_collection = nil # reset the collected description again for the next lane + end + + # User defines a lane that can overwrite existing lanes. Useful when importing a Fastfile + def override_lane(lane_name, &block) + UI.user_error!("You have to pass a block using 'do' for lane '#{lane_name}'. Make sure you read the docs on GitHub.") unless block + + self.runner.add_lane(Lane.new(platform: self.current_platform, + block: block, + description: desc_collection, + name: lane_name, + is_private: false), true) + + @desc_collection = nil # reset the collected description again for the next lane + end + + # User defines a platform block + def platform(platform_name) + SupportedPlatforms.verify!(platform_name) + + self.current_platform = platform_name + + yield + + self.current_platform = nil + end + + # Is executed before each test run + def before_all(&block) + @runner.set_before_all(@current_platform, block) + end + + # Is executed before each lane + def before_each(&block) + @runner.set_before_each(@current_platform, block) + end + + # Is executed after each test run + def after_all(&block) + @runner.set_after_all(@current_platform, block) + end + + # Is executed before each lane + def after_each(&block) + @runner.set_after_each(@current_platform, block) + end + + # Is executed if an error occurred during fastlane execution + def error(&block) + @runner.set_error(@current_platform, block) + end + + # Is used to look if the method is implemented as an action + def method_missing(method_sym, *arguments, &_block) + self.runner.trigger_action_by_name(method_sym, nil, false, *arguments) + end + + ##################################################### + # @!group Other things + ##################################################### + + # Is the given key a platform block or a lane? + def is_platform_block?(key) + UI.crash!('No key given') unless key + + return false if self.runner.lanes.fetch(nil, {}).fetch(key.to_sym, nil) + return true if self.runner.lanes[key.to_sym].kind_of?(Hash) + + if key.to_sym == :update + # The user ran `fastlane update`, instead of `fastlane update_fastlane` + # We're gonna be nice and understand what the user is trying to do + require 'fastlane/one_off' + Fastlane::OneOff.run(action: "update_fastlane", parameters: {}) + else + UI.user_error!("Could not find '#{key}'. Available lanes: #{self.runner.available_lanes.join(', ')}") + end + end + + def actions_path(path) + UI.crash!("Path '#{path}' not found!") unless File.directory?(path) + + Actions.load_external_actions(path) + end + + # Execute shell command + # Accepts arguments with with and without the command named keyword so that sh + # behaves like other actions with named keywords + # https://github.com/fastlane/fastlane/issues/14930 + # + # Example: + # sh("ls") + # sh("ls", log: false) + # sh(command: "ls") + # sh(command: "ls", step_name: "listing the files") + # sh(command: "ls", log: false) + def sh(*args, &b) + # First accepts hash (or named keywords) like other actions + # Otherwise uses sh method that doesn't have an interface like an action + if args.count == 1 && args.first.kind_of?(Hash) + options = args.first + command = options.delete(:command) + + raise ArgumentError, "sh requires :command keyword in argument" if command.nil? + log = options[:log].nil? ? true : options[:log] + FastFile.sh(*command, step_name: options[:step_name], log: log, error_callback: options[:error_callback], &b) + elsif args.count != 1 && args.last.kind_of?(Hash) + new_args = args.dup + options = new_args.pop + log = options[:log].nil? ? true : options[:log] + FastFile.sh(*new_args, step_name: options[:step_name], log: log, error_callback: options[:error_callback], &b) + else + FastFile.sh(*args, &b) + end + end + + def self.sh(*command, step_name: nil, log: true, error_callback: nil, &b) + command_header = log ? step_name || Actions.shell_command_from_args(*command) : "shell command" + Actions.execute_action(command_header) do + Actions.sh_no_action(*command, log: log, error_callback: error_callback, &b) + end + end + + def desc(string) + desc_collection << string + end + + def desc_collection + @desc_collection ||= [] + end + + def fastlane_require(gem_name) + FastlaneRequire.install_gem_if_needed(gem_name: gem_name, require_gem: true) + end + + def generated_fastfile_id(id) + UI.important("The `generated_fastfile_id` action was deprecated, you can remove the line from your `Fastfile`") + end + + def import(path = nil) + UI.user_error!("Please pass a path to the `import` action") unless path + + path = path.dup.gsub("~", Dir.home) + unless Pathname.new(path).absolute? # unless an absolute path + path = File.join(File.expand_path('..', @path), path) + end + + UI.user_error!("Could not find Fastfile at path '#{path}'") unless File.exist?(path) + + # First check if there are local actions to import in the same directory as the Fastfile + actions_path = File.join(File.expand_path("..", path), 'actions') + Fastlane::Actions.load_external_actions(actions_path) if File.directory?(actions_path) + + action_launched('import') + + return_value = parse(File.read(path), path) + + action_completed('import', status: FastlaneCore::ActionCompletionStatus::SUCCESS) + + return return_value + end + + def find_tag(folder: nil, version: nil, remote: false) + req = Gem::Requirement.new(version) + all_tags = get_tags(folder: folder, remote: remote) + + return all_tags.select { |t| req =~ FastlaneCore::TagVersion.new(t) }.last + end + + # @param url [String] The git URL to clone the repository from + # @param branch [String] The branch to checkout in the repository + # @param path [String] The path to the Fastfile + # @param version [String, Array] Version requirement for repo tags + # @param dependencies [Array] An optional array of additional Fastfiles in the repository + # @param cache_path [String] An optional path to a directory where the repository should be cloned into + def import_from_git(url: nil, branch: 'HEAD', path: 'fastlane/Fastfile', version: nil, dependencies: [], cache_path: nil) # rubocop:disable Metrics/PerceivedComplexity + UI.user_error!("Please pass a path to the `import_from_git` action") if url.to_s.length == 0 + + Actions.execute_action('import_from_git') do + require 'tmpdir' + + action_launched('import_from_git') + + is_eligible_for_caching = !cache_path.nil? + + UI.message("Eligible for caching") if is_eligible_for_caching + + # Checkout the repo + repo_name = url.split("/").last + checkout_param = branch + + import_block = proc do |target_path| + clone_folder = File.join(target_path, repo_name) + + branch_option = "--branch #{branch}" if branch != 'HEAD' + + checkout_dependencies = dependencies.map(&:shellescape).join(" ") + + # If the current call is eligible for caching, we check out all the + # files and directories. If not, we only check out the specified + # `path` and `dependencies`. + checkout_path = is_eligible_for_caching ? "" : "#{path.shellescape} #{checkout_dependencies}" + + if Dir[clone_folder].empty? + UI.message("Cloning remote git repo...") + Helper.with_env_values('GIT_TERMINAL_PROMPT' => '0') do + # When using cached clones, we need the entire repository history + # so we can switch between tags or branches instantly, or else, + # it would defeat the caching's purpose. + depth = is_eligible_for_caching ? "" : "--depth 1" + + Actions.sh("git clone #{url.shellescape} #{clone_folder.shellescape} #{depth} --no-checkout #{branch_option}") + end + end + + unless version.nil? + if is_eligible_for_caching + checkout_param = find_tag(folder: clone_folder, version: version, remote: false) + + if checkout_param.nil? + # Update the repo and try again before failing + UI.message("Updating git repo...") + Helper.with_env_values('GIT_TERMINAL_PROMPT' => '0') do + Actions.sh("cd #{clone_folder.shellescape} && git checkout #{branch} && git reset --hard && git pull --all") + end + + checkout_param = find_tag(folder: clone_folder, version: version, remote: false) + else + UI.message("Found tag #{checkout_param}. No git repo update needed.") + end + else + checkout_param = find_tag(folder: clone_folder, version: version, remote: true) + end + + UI.user_error!("No tag found matching #{version.inspect}") if checkout_param.nil? + end + + if is_eligible_for_caching + if version.nil? + # Update the repo if it's eligible for caching but the version isn't specified + UI.message("Fetching remote git branches and updating git repo...") + Helper.with_env_values('GIT_TERMINAL_PROMPT' => '0') do + Actions.sh("cd #{clone_folder.shellescape} && git fetch --all --quiet && git checkout #{checkout_param.shellescape} #{checkout_path} && git reset --hard && git rebase") + end + else + begin + # https://stackoverflow.com/a/1593574/865175 + current_tag = Actions.sh("cd #{clone_folder.shellescape} && git describe --exact-match --tags HEAD").strip + rescue + current_tag = nil + end + + if current_tag != version + Actions.sh("cd #{clone_folder.shellescape} && git checkout #{checkout_param.shellescape} #{checkout_path}") + end + end + else + Actions.sh("cd #{clone_folder.shellescape} && git checkout #{checkout_param.shellescape} #{checkout_path}") + end + + # Knowing that we check out all the files and directories when the + # current call is eligible for caching, we don't need to also + # explicitly check out the "actions" directory. + unless is_eligible_for_caching + # We also want to check out all the local actions of this fastlane setup + containing = path.split(File::SEPARATOR)[0..-2] + containing = "." if containing.count == 0 + actions_folder = File.join(containing, "actions") + begin + Actions.sh("cd #{clone_folder.shellescape} && git checkout #{checkout_param.shellescape} #{actions_folder.shellescape}") + rescue + # We don't care about a failure here, as local actions are optional + end + end + + return_value = nil + if dependencies.any? + return_value = [import(File.join(clone_folder, path))] + return_value += dependencies.map { |file_path| import(File.join(clone_folder, file_path)) } + else + return_value = import(File.join(clone_folder, path)) + end + + action_completed('import_from_git', status: FastlaneCore::ActionCompletionStatus::SUCCESS) + + return return_value + end + + if is_eligible_for_caching + import_block.call(File.expand_path(cache_path)) + else + Dir.mktmpdir("fl_clone", &import_block) + end + end + end + + ##################################################### + # @!group Versioning helpers + ##################################################### + + def get_tags(folder: nil, remote: false) + if remote + UI.message("Fetching remote git tags...") + Helper.with_env_values('GIT_TERMINAL_PROMPT' => '0') do + Actions.sh("cd #{folder.shellescape} && git fetch --all --tags -q") + end + end + + # Fetch all possible tags + git_tags_string = Actions.sh("cd #{folder.shellescape} && git tag -l") + git_tags = git_tags_string.split("\n") + + # Sort tags based on their version number + return git_tags + .select { |tag| FastlaneCore::TagVersion.correct?(tag) } + .sort_by { |tag| FastlaneCore::TagVersion.new(tag) } + end + + ##################################################### + # @!group Overwriting Ruby methods + ##################################################### + + # Speak out loud + def say(value) + # Overwrite this, since there is already a 'say' method defined in the Ruby standard library + value ||= yield + + value = { text: value } if value.kind_of?(String) || value.kind_of?(Array) + self.runner.trigger_action_by_name(:say, nil, false, value) + end + + def puts(value) + # Overwrite this, since there is already a 'puts' method defined in the Ruby standard library + value ||= yield if block_given? + + action_launched('puts') + return_value = Fastlane::Actions::PutsAction.run([value]) + action_completed('puts', status: FastlaneCore::ActionCompletionStatus::SUCCESS) + return return_value + end + + def test(params = {}) + # Overwrite this, since there is already a 'test' method defined in the Ruby standard library + self.runner.try_switch_to_lane(:test, [params]) + end + + def action_launched(action_name) + action_launch_context = FastlaneCore::ActionLaunchContext.context_for_action_name(action_name, + fastlane_client_language: :ruby, + args: ARGV) + FastlaneCore.session.action_launched(launch_context: action_launch_context) + end + + def action_completed(action_name, status: nil) + completion_context = FastlaneCore::ActionCompletionContext.context_for_action_name(action_name, + args: ARGV, + status: status) + FastlaneCore.session.action_completed(completion_context: completion_context) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/fastlane_require.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/fastlane_require.rb new file mode 100644 index 0000000..2497356 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/fastlane_require.rb @@ -0,0 +1,81 @@ +module Fastlane + class FastlaneRequire + class << self + def install_gem_if_needed(gem_name: nil, require_gem: true) + gem_require_name = format_gem_require_name(gem_name) + + # check if it's installed + if gem_installed?(gem_name) + UI.success("gem '#{gem_name}' is already installed") if FastlaneCore::Globals.verbose? + require gem_require_name if require_gem + return true + end + + if Helper.bundler? + # User uses bundler, we don't want to install gems on the fly here + # Instead tell the user how to add it to their Gemfile + UI.important("Missing gem '#{gem_name}', please add the following to your local Gemfile:") + UI.important("") + UI.command_output("gem \"#{gem_name}\"") + UI.important("") + UI.user_error!("Add 'gem \"#{gem_name}\"' to your Gemfile and restart fastlane") unless Helper.test? + end + + require "rubygems/command_manager" + installer = Gem::CommandManager.instance[:install] + + UI.important("Installing Ruby gem '#{gem_name}'...") + + spec_name = self.find_gem_name(gem_name) + UI.important("Found gem \"#{spec_name}\" instead of the required name \"#{gem_name}\"") if spec_name != gem_name + + return if Helper.test? + + # We install the gem like this because we also want to gem to be available to be required + # at this point. If we were to shell out, this wouldn't be the case + installer.install_gem(spec_name, Gem::Requirement.default) + UI.success("Successfully installed '#{gem_name}'") + require gem_require_name if require_gem + end + + def gem_installed?(name, req = Gem::Requirement.default) + installed = Gem::Specification.any? { |s| s.name == name and req =~ s.version } + return true if installed + + # In special cases a gem is already preinstalled, e.g. YAML. + # To find out we try to load a gem with that name in a child process + # (so we don't actually load anything we don't want to load) + # See https://github.com/fastlane/fastlane/issues/6951 + require_tester = <<-RB.gsub(/^ */, '') + begin + require ARGV.first + rescue LoadError + exit(1) + end + RB + system(RbConfig.ruby, "-e", require_tester.lines.map(&:chomp).join("; "), name) + return $?.success? + end + + def find_gem_name(user_supplied_name) + fetcher = Gem::SpecFetcher.fetcher + + # RubyGems 3.2.0 changed behavior of suggest_gems_from_name to no longer return user supplied name (only similar suggestions) + # First search for exact gem with detect then use suggest_gems_from_name + if (detected_gem = fetcher.detect(:latest) { |nt| nt.name == user_supplied_name }.first) + return detected_gem[0].name + end + + gems = fetcher.suggest_gems_from_name(user_supplied_name) + return gems.first + end + + def format_gem_require_name(gem_name) + # from "fastlane-plugin-xcversion" to "fastlane/plugin/xcversion" + gem_name = gem_name.tr("-", "/") if gem_name.start_with?("fastlane-plugin-") + + return gem_name + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/features.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/features.rb new file mode 100644 index 0000000..5816322 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/features.rb @@ -0,0 +1,10 @@ +# Use this file as the place to register Feature switches for the fastlane_core project + +# FastlaneCore::Feature.register(env_var: 'YOUR_FEATURE_SWITCH_ENV_VAR', +# description: 'Describe what this feature switch controls') + +FastlaneCore::Feature.register(env_var: 'FASTLANE_ENABLE_BETA_DELIVER_SYNC_SCREENSHOTS', + description: 'Use a newly implemented screenshots synchronization logic') + +FastlaneCore::Feature.register(env_var: 'FASTLANE_WWDR_USE_HTTP1_AND_RETRIES', + description: 'Adds --http1.1 and --retry 3 --retry-all-errors to the curl command to download WWDR certificates') diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/README.md b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/README.md new file mode 100644 index 0000000..5240ea4 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/README.md @@ -0,0 +1,29 @@ +### fastlane Helper + +You can put shared code into this folder. Use this if you need to access the same code from multiple actions or to just clean up the actual action. + +To create a new helper, duplicate the `podspec_helper.rb`, rename the class and put your code inside the class. + +Make sure it's structured like this: + +```ruby +module Fastlane + module Helper + class PodspecHelper + ... + end + end +end +``` + +The `git_helper` and `sh_helper` are different, please make sure to build something like `podspec_helper`. + +### Use of the helper class + +To access the helper class use + +```ruby +Helper::PodspecHelper.... +``` + +Make sure to prefix your helper with the `Helper::` module. diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/adb_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/adb_helper.rb new file mode 100644 index 0000000..5dd8c0f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/adb_helper.rb @@ -0,0 +1,66 @@ +module Fastlane + module Helper + class AdbDevice + attr_accessor :serial + + def initialize(serial: nil) + self.serial = serial + end + end + + class AdbHelper + # Path to the adb binary + attr_accessor :adb_path + + # Path to the adb binary + attr_accessor :adb_host + + # All available devices + attr_accessor :devices + + def initialize(adb_path: nil, adb_host: nil) + android_home = ENV['ANDROID_HOME'] || ENV['ANDROID_SDK_ROOT'] || ENV['ANDROID_SDK'] + if (adb_path.nil? || adb_path == "adb") && android_home + adb_path = File.join(android_home, "platform-tools", "adb") + end + + self.adb_path = Helper.get_executable_path(File.expand_path(adb_path)) + self.adb_host = adb_host + end + + def host_option + return self.adb_host ? "-H #{adb_host}" : nil + end + + # Run a certain action + def trigger(command: nil, serial: nil) + android_serial = serial != "" ? "ANDROID_SERIAL=#{serial}" : nil + command = [android_serial, adb_path.shellescape, host_option, command].compact.join(" ").strip + Action.sh(command) + end + + def device_avalaible?(serial) + UI.deprecated("Please use `device_available?` instead... This will be removed in a future version of fastlane") + device_available?(serial) + end + + def device_available?(serial) + load_all_devices + return devices.map(&:serial).include?(serial) + end + + def load_all_devices + self.devices = [] + + command = [adb_path.shellescape, host_option, "devices -l"].compact.join(" ") + output = Actions.sh(command, log: false) + output.split("\n").each do |line| + if (result = line.match(/^(\S+)(\s+)(device )/)) + self.devices << AdbDevice.new(serial: result[1]) + end + end + self.devices + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/dotenv_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/dotenv_helper.rb new file mode 100644 index 0000000..31de4b7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/dotenv_helper.rb @@ -0,0 +1,50 @@ +module Fastlane + module Helper + class DotenvHelper + # @param env_cl_param [String] an optional list of dotenv environment names separated by commas, without space + def self.load_dot_env(env_cl_param) + base_path = find_dotenv_directory + + return unless base_path + + load_dot_envs_from(env_cl_param, base_path) + end + + # finds the first directory of [fastlane, its parent] containing dotenv files + def self.find_dotenv_directory + path = FastlaneCore::FastlaneFolder.path + search_paths = [path] + search_paths << path + "/.." unless path.nil? + search_paths.compact! + search_paths.find do |dir| + Dir.glob(File.join(dir, '*.env*'), File::FNM_DOTMATCH).count > 0 + end + end + + # loads the dotenvs. First the .env and .env.default and + # then override with all speficied extra environments + def self.load_dot_envs_from(env_cl_param, base_path) + require 'dotenv' + + # Making sure the default '.env' and '.env.default' get loaded + env_file = File.join(base_path, '.env') + env_default_file = File.join(base_path, '.env.default') + Dotenv.load(env_file, env_default_file) + + return unless env_cl_param + + Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::ENVIRONMENT] = env_cl_param + + # multiple envs? + envs = env_cl_param.split(",") + + # Loads .env file for the environment(s) passed in through options + envs.each do |env| + env_file = File.join(base_path, ".env.#{env}") + UI.success("Loading from '#{env_file}'") + Dotenv.overload(env_file) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/gem_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/gem_helper.rb new file mode 100644 index 0000000..093ab2e --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/gem_helper.rb @@ -0,0 +1,26 @@ +module Fastlane + module Actions + # will make sure a gem is installed. If it's not an appropriate error message is shown + # this will *not* 'require' the gem + def self.verify_gem!(gem_name) + begin + FastlaneRequire.install_gem_if_needed(gem_name: gem_name, require_gem: false) + # We don't import this by default, as it's not always the same + # also e.g. cocoapods is just required and not imported + rescue Gem::LoadError + UI.error("Could not find gem '#{gem_name}'") + UI.error("") + UI.error("If you installed fastlane using `gem install fastlane` run") + UI.command("gem install #{gem_name}") + UI.error("to install the missing gem") + UI.error("") + UI.error("If you use a Gemfile add this to your Gemfile:") + UI.important(" gem '#{gem_name}'") + UI.error("and run `bundle install`") + + UI.user_error!("You have to install the `#{gem_name}` gem on this machine") unless Helper.test? + end + true + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/git_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/git_helper.rb new file mode 100644 index 0000000..c8b181d --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/git_helper.rb @@ -0,0 +1,166 @@ +module Fastlane + module Actions + GIT_MERGE_COMMIT_FILTERING_OPTIONS = [:include_merges, :exclude_merges, :only_include_merges].freeze + + module SharedValues + GIT_BRANCH_ENV_VARS = %w(GIT_BRANCH BRANCH_NAME TRAVIS_BRANCH BITRISE_GIT_BRANCH CI_BUILD_REF_NAME CI_COMMIT_REF_NAME WERCKER_GIT_BRANCH BUILDKITE_BRANCH APPCENTER_BRANCH CIRCLE_BRANCH).reject do |branch| + # Removing because tests break on CircleCI + Helper.test? && branch == "CIRCLE_BRANCH" + end.freeze + end + + def self.git_log_between(pretty_format, from, to, merge_commit_filtering, date_format = nil, ancestry_path) + command = %w(git log) + command << "--pretty=#{pretty_format}" + command << "--date=#{date_format}" if date_format + command << '--ancestry-path' if ancestry_path + command << "#{from}...#{to}" + command << git_log_merge_commit_filtering_option(merge_commit_filtering) + # "*command" syntax expands "command" array into variable arguments, which + # will then be individually shell-escaped by Actions.sh. + Actions.sh(*command.compact, log: false).chomp + rescue + nil + end + + def self.git_log_last_commits(pretty_format, commit_count, merge_commit_filtering, date_format = nil, ancestry_path) + command = %w(git log) + command << "--pretty=#{pretty_format}" + command << "--date=#{date_format}" if date_format + command << '--ancestry-path' if ancestry_path + command << '-n' << commit_count.to_s + command << git_log_merge_commit_filtering_option(merge_commit_filtering) + Actions.sh(*command.compact, log: false).chomp + rescue + nil + end + + def self.last_git_tag_hash(tag_match_pattern = nil) + tag_pattern_param = tag_match_pattern ? "=#{tag_match_pattern}" : '' + Actions.sh('git', 'rev-list', "--tags#{tag_pattern_param}", '--max-count=1').chomp + rescue + nil + end + + def self.last_git_tag_name(match_lightweight = true, tag_match_pattern = nil) + hash = last_git_tag_hash(tag_match_pattern) + # If hash is nil (command fails), "git describe" command below will still + # run and provide some output, although it's definitely not going to be + # anything reasonably expected. Bail out early. + return unless hash + + command = %w(git describe) + command << '--tags' if match_lightweight + command << hash + command << '--match' if tag_match_pattern + command << tag_match_pattern if tag_match_pattern + Actions.sh(*command.compact, log: false).chomp + rescue + nil + end + + def self.last_git_commit_dict + return nil if last_git_commit_formatted_with('%an').nil? + + { + author: last_git_commit_formatted_with('%an'), + author_email: last_git_commit_formatted_with('%ae'), + message: last_git_commit_formatted_with('%B'), + commit_hash: last_git_commit_formatted_with('%H'), + abbreviated_commit_hash: last_git_commit_formatted_with('%h') + } + end + + # Gets the last git commit information formatted into a String by the provided + # pretty format String. See the git-log documentation for valid format placeholders + def self.last_git_commit_formatted_with(pretty_format, date_format = nil) + command = %w(git log -1) + command << "--pretty=#{pretty_format}" + command << "--date=#{date_format}" if date_format + Actions.sh(*command.compact, log: false).chomp + rescue + nil + end + + # @deprecated Use git_author_email instead + # Get the author email of the last git commit + # DEPRECATED: Use git_author_email instead. + def self.git_author + UI.deprecated('`git_author` is deprecated. Please use `git_author_email` instead.') + git_author_email + end + + # Get the author email of the last git commit + def self.git_author_email + s = last_git_commit_formatted_with('%ae') + return s if s.to_s.length > 0 + return nil + end + + # Returns the unwrapped subject and body of the last commit + # DEPRECATED: Use last_git_commit_message instead. + def self.last_git_commit + UI.important('`last_git_commit` is deprecated. Please use `last_git_commit_message` instead.') + last_git_commit_message + end + + # Returns the unwrapped subject and body of the last commit + def self.last_git_commit_message + s = (last_git_commit_formatted_with('%B') || "").strip + return s if s.to_s.length > 0 + nil + end + + # Get the hash of the last commit + def self.last_git_commit_hash(short) + format_specifier = short ? '%h' : '%H' + string = last_git_commit_formatted_with(format_specifier).to_s + return string unless string.empty? + return nil + end + + # Returns the current git branch, or "HEAD" if it's not checked out to any branch + # Can be replaced using the environment variable `GIT_BRANCH` + def self.git_branch + env_name = SharedValues::GIT_BRANCH_ENV_VARS.find { |env_var| FastlaneCore::Env.truthy?(env_var) } + ENV.fetch(env_name.to_s) do + self.git_branch_name_using_HEAD + end + end + + # Returns the checked out git branch name or "HEAD" if you're in detached HEAD state + def self.git_branch_name_using_HEAD + # Rescues if not a git repo or no commits in a git repo + Actions.sh("git rev-parse --abbrev-ref HEAD", log: false).chomp + rescue => err + UI.verbose("Error getting git branch: #{err.message}") + nil + end + + # Returns the default git remote branch name + def self.git_remote_branch_name(remote_name) + # Rescues if not a git repo or no remote repo + if remote_name + Actions.sh("git remote show #{remote_name} | grep 'HEAD branch' | sed 's/.*: //'", log: false).chomp + else + # Query git for the current remote head + Actions.sh("variable=$(git remote) && git remote show $variable | grep 'HEAD branch' | sed 's/.*: //'", log: false).chomp + end + rescue => err + UI.verbose("Error getting git default remote branch: #{err.message}") + nil + end + + private_class_method + def self.git_log_merge_commit_filtering_option(merge_commit_filtering) + case merge_commit_filtering + when :exclude_merges + "--no-merges" + when :only_include_merges + "--merges" + when :include_merges + nil + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/gradle_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/gradle_helper.rb new file mode 100644 index 0000000..772c94c --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/gradle_helper.rb @@ -0,0 +1,62 @@ +module Fastlane + module Helper + class GradleTask + attr_accessor :title + + attr_accessor :description + + def initialize(title: nil, description: nil) + self.title = title + self.description = description + end + end + + class GradleHelper + # Path to the gradle script + attr_accessor :gradle_path + + # Read-only path to the shell-escaped gradle script, suitable for use in shell commands + attr_reader :escaped_gradle_path + + # All the available tasks + attr_accessor :tasks + + def initialize(gradle_path: nil) + self.gradle_path = gradle_path + end + + # Run a certain action + def trigger(task: nil, flags: nil, serial: nil, print_command: true, print_command_output: true) + android_serial = (serial != "") ? "ANDROID_SERIAL=#{serial}" : nil + command = [android_serial, escaped_gradle_path, task, flags].compact.join(" ") + Action.sh(command, print_command: print_command, print_command_output: print_command_output) + end + + def task_available?(task) + load_all_tasks + return tasks.collect(&:title).include?(task) + end + + def gradle_path=(gradle_path) + @gradle_path = gradle_path + @escaped_gradle_path = gradle_path.shellescape + end + + private + + def load_all_tasks + self.tasks = [] + + command = [escaped_gradle_path, "tasks", "--console=plain"].join(" ") + output = Action.sh(command, print_command: false, print_command_output: false) + output.split("\n").each do |line| + if (result = line.match(/(\w+)\s\-\s([\w\s]+)/)) + self.tasks << GradleTask.new(title: result[1], description: result[2]) + end + end + + self.tasks + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/lane_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/lane_helper.rb new file mode 100644 index 0000000..ab9a347 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/lane_helper.rb @@ -0,0 +1,13 @@ +module Fastlane + module Helper + class LaneHelper + def self.current_platform + return Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::PLATFORM_NAME] + end + + def self.current_lane + return Fastlane::Actions.lane_context[Fastlane::Actions::SharedValues::LANE_NAME] + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/podspec_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/podspec_helper.rb new file mode 100644 index 0000000..a6134a1 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/podspec_helper.rb @@ -0,0 +1,75 @@ +module Fastlane + module Helper + class PodspecHelper + attr_accessor :path + attr_accessor :podspec_content + attr_accessor :version_regex + attr_accessor :version_match + attr_accessor :version_value + + def initialize(path = nil, require_variable_prefix = true) + version_var_name = 'version' + variable_prefix = require_variable_prefix ? /\w\./ : // + @version_regex = /^(?[^#]*#{variable_prefix}#{version_var_name}\s*=\s*['"])(?(?[0-9]+)(\.(?[0-9]+))?(\.(?[0-9]+))?(?(\.[0-9]+)*)?(-(?(.+)))?)(?['"])/i + + return unless (path || '').length > 0 + UI.user_error!("Could not find podspec file at path '#{path}'") unless File.exist?(path) + + @path = File.expand_path(path) + podspec_content = File.read(path) + + parse(podspec_content) + end + + def parse(podspec_content) + @podspec_content = podspec_content + @version_match = @version_regex.match(@podspec_content) + UI.user_error!("Could not find version in podspec content '#{@podspec_content}'") if @version_match.nil? + @version_value = @version_match[:value] + end + + def bump_version(bump_type) + UI.user_error!("Do not support bump of 'appendix', please use `update_version_appendix(appendix)` instead") if bump_type == 'appendix' + + major = version_match[:major].to_i + minor = version_match[:minor].to_i || 0 + patch = version_match[:patch].to_i || 0 + + case bump_type + when 'patch' + patch += 1 + when 'minor' + minor += 1 + patch = 0 + when 'major' + major += 1 + minor = 0 + patch = 0 + end + + @version_value = "#{major}.#{minor}.#{patch}" + end + + def update_version_appendix(appendix = nil) + new_appendix = appendix || @version_value[:appendix] + return if new_appendix.nil? + + new_appendix = new_appendix.sub(".", "") if new_appendix.start_with?(".") + major = version_match[:major].to_i + minor = version_match[:minor].to_i || 0 + patch = version_match[:patch].to_i || 0 + + @version_value = "#{major}.#{minor}.#{patch}.#{new_appendix}" + end + + def update_podspec(version = nil) + new_version = version || @version_value + updated_podspec_content = @podspec_content.gsub(@version_regex, "#{@version_match[:begin]}#{new_version}#{@version_match[:end]}") + + File.open(@path, "w") { |file| file.puts(updated_podspec_content) } unless Helper.test? + + updated_podspec_content + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/s3_client_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/s3_client_helper.rb new file mode 100644 index 0000000..0f8744b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/s3_client_helper.rb @@ -0,0 +1,78 @@ +require 'aws-sdk-s3' + +module Fastlane + module Helper + class S3ClientHelper + attr_reader :access_key + attr_reader :region + + def initialize(access_key: nil, secret_access_key: nil, region: nil, s3_client: nil) + @access_key = access_key + @secret_access_key = secret_access_key + @region = region + + @client = s3_client + end + + def list_buckets + return client.list_buckets + end + + def upload_file(bucket_name, file_name, file_data, acl) + bucket = find_bucket!(bucket_name) + details = { + acl: acl, + key: file_name, + body: file_data + } + obj = bucket.put_object(details) + + # When you enable versioning on a S3 bucket, + # writing to an object will create an object version + # instead of replacing the existing object. + # http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/S3/ObjectVersion.html + if obj.kind_of?(Aws::S3::ObjectVersion) + obj = obj.object + end + + # Return public url + obj.public_url.to_s + end + + def delete_file(bucket_name, file_name) + bucket = find_bucket!(bucket_name) + file = bucket.object(file_name) + file.delete + end + + def find_bucket!(bucket_name) + bucket = Aws::S3::Bucket.new(bucket_name, client: client) + raise "Bucket '#{bucket_name}' not found" unless bucket.exists? + + return bucket + end + + private + + attr_reader :secret_access_key + + def client + @client ||= Aws::S3::Client.new( + { + region: region, + credentials: create_credentials + }.compact + ) + end + + def create_credentials + return nil if access_key.to_s.empty? || secret_access_key.to_s.empty? + + Aws::Credentials.new( + access_key, + secret_access_key + ) + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/sh_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/sh_helper.rb new file mode 100644 index 0000000..722076a --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/sh_helper.rb @@ -0,0 +1,134 @@ +require "open3" + +module Fastlane + module Actions + # Execute a shell command + # This method will output the string and execute it + # Just an alias for sh_no_action + # When running this in tests, it will return the actual command instead of executing it + # @param log [Boolean] should fastlane print out the executed command + # @param error_callback [Block] a callback invoked with the command output if there is a non-zero exit status + def self.sh(*command, log: true, error_callback: nil, &b) + sh_control_output(*command, print_command: log, print_command_output: log, error_callback: error_callback, &b) + end + + def self.sh_no_action(*command, log: true, error_callback: nil, &b) + sh_control_output(*command, print_command: log, print_command_output: log, error_callback: error_callback, &b) + end + + # @param command The command to be executed (variadic) + # @param print_command [Boolean] Should we print the command that's being executed + # @param print_command_output [Boolean] Should we print the command output during execution + # @param error_callback [Block] A block that's called if the command exits with a non-zero status + # @yield [status, result, cmd] The return status of the command, all output from the command and an equivalent shell command + # @yieldparam [Process::Status] status A Process::Status indicating the status of the completed command + # @yieldparam [String] result The complete output to stdout and stderr of the completed command + # @yieldparam [String] cmd A shell command equivalent to the arguments passed + # rubocop: disable Metrics/PerceivedComplexity + def self.sh_control_output(*command, print_command: true, print_command_output: true, error_callback: nil) + print_command = print_command_output = true if $troubleshoot + # Set the encoding first, the user might have set it wrong + previous_encoding = [Encoding.default_external, Encoding.default_internal] + Encoding.default_external = Encoding::UTF_8 + Encoding.default_internal = Encoding::UTF_8 + + # Workaround to support previous Fastlane syntax. + # This has some limitations. For example, it requires the caller to shell escape + # everything because of usages like ["ls -la", "/tmp"] instead of ["ls", "-la", "/tmp"]. + command = [command.first.join(" ")] if command.length == 1 && command.first.kind_of?(Array) + + shell_command = shell_command_from_args(*command) + UI.command(shell_command) if print_command + + result = '' + exit_status = nil + if Helper.sh_enabled? + # The argument list is passed directly to Open3.popen2e, which + # handles the variadic argument list in the same way as Kernel#spawn. + # (http://ruby-doc.org/core-2.4.2/Kernel.html#method-i-spawn) or + # Process.spawn (http://ruby-doc.org/core-2.4.2/Process.html#method-c-spawn). + # + # sh "ls -la /Applications/Xcode\ 7.3.1.app" + # sh "ls", "-la", "/Applications/Xcode 7.3.1.app" + # sh({ "FOO" => "Hello" }, "echo $FOO") + Open3.popen2e(*command) do |stdin, io, thread| + io.sync = true + io.each do |line| + UI.command_output(line.strip) if print_command_output + result << line + end + exit_status = thread.value + end + + # Checking Process::Status#exitstatus instead of #success? makes for more + # testable code. (Tests mock exitstatus only.) This is also consistent + # with previous implementations of sh and... probably portable to all + # relevant platforms. + if exit_status.exitstatus != 0 + message = if print_command + "Exit status of command '#{shell_command}' was #{exit_status.exitstatus} instead of 0." + else + "Shell command exited with exit status #{exit_status.exitstatus} instead of 0." + end + message += "\n#{result}" if print_command_output + + if error_callback || block_given? + UI.error(message) + # block notified below, on success or failure + error_callback && error_callback.call(result) + else + UI.shell_error!(message) + end + end + else + result << shell_command # only for the tests + end + + if block_given? + # Avoid yielding nil in tests. $? will be meaningless, but calls to + # it will not crash. There is no Process::Status.new. The alternative + # is to move this inside the sh_enabled? check and not yield in tests. + return yield(exit_status || $?, result, shell_command) + end + result + rescue => ex + raise ex + ensure + Encoding.default_external = previous_encoding.first + Encoding.default_internal = previous_encoding.last + end + # rubocop: enable Metrics/PerceivedComplexity + + # Used to produce a shell command string from a list of arguments that may + # be passed to methods such as Kernel#system, Kernel#spawn and Open3.popen2e + # in order to print the command to the terminal. The same *args are passed + # directly to a system call (Open3.popen2e). This interpretation is not + # used when executing a command. + # + # @param args Any number of arguments used to construct a command + # @raise [ArgumentError] If no arguments passed + # @return [String] A shell command representing the arguments passed in + def self.shell_command_from_args(*args) + raise ArgumentError, "sh requires at least one argument" unless args.count > 0 + + command = "" + + # Optional initial environment Hash + if args.first.kind_of?(Hash) + command = args.shift.map { |k, v| "#{k}=#{v.shellescape}" }.join(" ") + " " + end + + # Support [ "/usr/local/bin/foo", "foo" ], "-x", ... + if args.first.kind_of?(Array) + command += args.shift.first.shellescape + " " + args.shelljoin + command.chomp!(" ") + elsif args.count == 1 && args.first.kind_of?(String) + command += args.first + else + command += args.shelljoin + end + + command + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodebuild_formatter_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodebuild_formatter_helper.rb new file mode 100644 index 0000000..4bc0faf --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodebuild_formatter_helper.rb @@ -0,0 +1,9 @@ +module Fastlane + module Helper + class XcodebuildFormatterHelper + def self.xcbeautify_installed? + return `which xcbeautify`.include?("xcbeautify") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodeproj_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodeproj_helper.rb new file mode 100644 index 0000000..29a7a43 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodeproj_helper.rb @@ -0,0 +1,12 @@ +module Fastlane + module Helper + class XcodeprojHelper + DEPENDENCY_MANAGER_DIRS = ['Pods', 'Carthage'].freeze + + def self.find(dir) + xcodeproj_paths = Dir[File.expand_path(File.join(dir, '**/*.xcodeproj'))] + xcodeproj_paths.reject { |path| %r{/(#{DEPENDENCY_MANAGER_DIRS.join('|')})/.*.xcodeproj} =~ path } + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodes_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodes_helper.rb new file mode 100644 index 0000000..74f1608 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcodes_helper.rb @@ -0,0 +1,28 @@ +module Fastlane + module Helper + class XcodesHelper + def self.read_xcode_version_file + xcode_version_paths = Dir.glob(".xcode-version") + + if xcode_version_paths.first + return File.read(xcode_version_paths.first).strip + end + + return nil + end + + def self.find_xcodes_binary_path + `which xcodes`.strip + end + + module Verify + def self.requirement(req) + UI.user_error!("Version must be specified") if req.nil? || req.to_s.strip.size == 0 + Gem::Requirement.new(req.to_s) + rescue Gem::Requirement::BadRequirementError + UI.user_error!("The requirement '#{req}' is not a valid RubyGems style requirement") + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcversion_helper.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcversion_helper.rb new file mode 100644 index 0000000..0b8b6bc --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/helper/xcversion_helper.rb @@ -0,0 +1,16 @@ +module Fastlane + module Helper + class XcversionHelper + def self.find_xcode(req) + req = Gem::Requirement.new(req.to_s) + + require 'xcode/install' + installer = XcodeInstall::Installer.new + installed = installer.installed_versions.reverse + installed.detect do |xcode| + req.satisfied_by?(Gem::Version.new(xcode.version)) + end + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/junit_generator.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/junit_generator.rb new file mode 100644 index 0000000..147ea38 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/junit_generator.rb @@ -0,0 +1,27 @@ +module Fastlane + class JUnitGenerator + def self.generate(results) + # JUnit file documentation: http://llg.cubic.org/docs/junit/ + # And http://nelsonwells.net/2012/09/how-jenkins-ci-parses-and-displays-junit-output/ + # And http://windyroad.com.au/dl/Open%20Source/JUnit.xsd + + containing_folder = ENV['FL_REPORT_PATH'] || FastlaneCore::FastlaneFolder.path || Dir.pwd + path = File.join(containing_folder, 'report.xml') + + @steps = results + xml_path = File.join(Fastlane::ROOT, "lib/assets/report_template.xml.erb") + xml = ERB.new(File.read(xml_path)).result(binding) # https://web.archive.org/web/20160430190141/www.rrn.dk/rubys-erb-templating-system + + xml = xml.gsub('system_', 'system-').delete("\e") # Jenkins can not parse 'ESC' symbol + + begin + File.write(path, xml) + rescue => ex + UI.error(ex) + UI.error("Couldn't save report.xml at path '#{File.expand_path(path)}', make sure you have write access to the containing directory.") + end + + return path + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane.rb new file mode 100644 index 0000000..9389733 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane.rb @@ -0,0 +1,97 @@ +module Fastlane + # Represents a lane + class Lane + attr_accessor :platform + + attr_accessor :name + + # @return [Array] An array containing the description of this lane + # Each item of the array is one line + attr_accessor :description + + attr_accessor :block + + # @return [Boolean] Is that a private lane that can't be called from the CLI? + attr_accessor :is_private + + def initialize(platform: nil, name: nil, description: nil, block: nil, is_private: false) + UI.user_error!("description must be an array") unless description.kind_of?(Array) + UI.user_error!("lane name must not contain any spaces") if name.to_s.include?(" ") + UI.user_error!("lane name must start with :") unless name.kind_of?(Symbol) + + self.class.verify_lane_name(name) + + self.platform = platform + self.name = name + self.description = description + self.block = block + self.is_private = is_private + end + + # Execute this lane + def call(parameters) + block.call(parameters || {}) + end + + # @return [String] The lane + name of the lane. If there is no platform, it will only be the lane name + def pretty_name + [platform, name].reject(&:nil?).join(' ') + end + + class << self + # Makes sure the lane name is valid + def verify_lane_name(name) + if self.deny_list.include?(name.to_s) + UI.error("Lane name '#{name}' is invalid! Invalid names are #{self.deny_list.join(', ')}.") + UI.user_error!("Lane name '#{name}' is invalid") + end + + if self.gray_list.include?(name.to_sym) + UI.error("------------------------------------------------") + UI.error("Lane name '#{name}' should not be used because it is the name of a fastlane tool") + UI.error("It is recommended to not use '#{name}' as the name of your lane") + UI.error("------------------------------------------------") + # We still allow it, because we're nice + # Otherwise we might break existing setups + return + end + + self.ensure_name_not_conflicts(name.to_s) + end + + def deny_list + %w( + run + init + new_action + lanes + list + docs + action + actions + enable_auto_complete + new_plugin + add_plugin + install_plugins + update_plugins + search_plugins + help + env + update_fastlane + ) + end + + def gray_list + Fastlane::TOOLS + end + + def ensure_name_not_conflicts(name) + # First, check if there is a predefined method in the actions folder + return unless Actions.action_class_ref(name) + UI.error("------------------------------------------------") + UI.error("Name of the lane '#{name}' is already taken by the action named '#{name}'") + UI.error("------------------------------------------------") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_list.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_list.rb new file mode 100644 index 0000000..0b37d53 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_list.rb @@ -0,0 +1,137 @@ +module Fastlane + class LaneList + # Print out the result of `generate` + SWIFT_FUNCTION_REGEX = /\s*func\s*(\w*)\s*\((.*)\)\s*/ + SWIFT_DESC_REGEX = /\s*desc\s*\(\s*"(.*)"\s*\)\s*/ + def self.output(path) + puts(generate(path)) + + puts("Execute using `fastlane [lane_name]`".yellow) + end + + def self.generate_swift_lanes(path) + return unless (path || '').length > 0 + UI.user_error!("Could not find Fastfile.swift at path '#{path}'") unless File.exist?(path) + path = File.expand_path(path) + lane_content = File.read(path) + + current_lane_name = nil + lanes_by_name = {} + + lane_content.split("\n").reject(&:empty?).each do |line| + line.strip! + if line.start_with?("func") && (current_lane_name = self.lane_name_from_swift_line(potential_lane_line: line)) + lanes_by_name[current_lane_name] = Fastlane::Lane.new(platform: nil, name: current_lane_name.to_sym, description: []) + elsif line.start_with?("desc") + lane_description = self.desc_entry_for_swift_lane(named: current_lane_name, potential_desc_line: line) + unless lane_description + next + end + + lanes_by_name[current_lane_name].description = [lane_description] + current_lane_name = nil + end + end + # "" because that will be interpreted as general platform + # (we don't detect platform right now) + return { "" => lanes_by_name } + end + + def self.desc_entry_for_swift_lane(named: nil, potential_desc_line: nil) + unless named + return nil + end + + desc_match = SWIFT_DESC_REGEX.match(potential_desc_line) + unless desc_match + return nil + end + + return desc_match[1] + end + + def self.lane_name_from_swift_line(potential_lane_line: nil) + function_name_match = SWIFT_FUNCTION_REGEX.match(potential_lane_line) + unless function_name_match + return nil + end + + unless function_name_match[1].downcase.end_with?("lane") + return nil + end + + return function_name_match[1] + end + + def self.generate(path) + lanes = {} + if FastlaneCore::FastlaneFolder.swift? + lanes = generate_swift_lanes(path) + else + ff = Fastlane::FastFile.new(path) + lanes = ff.runner.lanes + end + + output = "" + + all_keys = lanes.keys.reject(&:nil?) + all_keys.unshift(nil) # because we want root elements on top. always! They have key nil + + all_keys.each do |platform| + next if (lanes[platform] || []).count == 0 + + plat_text = platform + plat_text = "general" if platform.to_s.empty? + output += "\n--------- #{plat_text}---------\n".yellow + + value = lanes[platform] + next unless value + + value.each do |lane_name, lane| + next if lane.is_private + + output += "----- fastlane #{lane.pretty_name}".green + if lane.description.count > 0 + output += "\n" + lane.description.join("\n") + "\n\n" + else + output += "\n\n" + end + end + end + + output + end + + def self.output_json(path) + puts(JSON.pretty_generate(self.generate_json(path))) + end + + # Returns a hash + def self.generate_json(path) + output = {} + return output if path.nil? + ff = Fastlane::FastFile.new(path) + + all_keys = ff.runner.lanes.keys + + all_keys.each do |platform| + next if (ff.runner.lanes[platform] || []).count == 0 + + output[platform] ||= {} + + value = ff.runner.lanes[platform] + next unless value + + value.each do |lane_name, lane| + next if lane.is_private + + output[platform][lane_name] = { + description: lane.description.join("\n") + } + end + end + + return output + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_manager.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_manager.rb new file mode 100644 index 0000000..78139d9 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_manager.rb @@ -0,0 +1,131 @@ +require_relative 'lane_manager_base.rb' + +module Fastlane + class LaneManager < LaneManagerBase + # @param platform The name of the platform to execute + # @param lane_name The name of the lane to execute + # @param parameters [Hash] The parameters passed from the command line to the lane + # @param env Dot Env Information + # @param A custom Fastfile path, this is used by fastlane.ci + def self.cruise_lane(platform, lane, parameters = nil, env = nil, fastfile_path = nil) + UI.user_error!("lane must be a string") unless lane.kind_of?(String) || lane.nil? + UI.user_error!("platform must be a string") unless platform.kind_of?(String) || platform.nil? + UI.user_error!("parameters must be a hash") unless parameters.kind_of?(Hash) || parameters.nil? + + ff = Fastlane::FastFile.new(fastfile_path || FastlaneCore::FastlaneFolder.fastfile_path) + + is_platform = false + begin + is_platform = ff.is_platform_block?(lane) + rescue # rescue, because this raises an exception if it can't be found at all + end + + unless is_platform + # maybe the user specified a default platform + # We'll only do this, if the lane specified isn't a platform, as we want to list all platforms then + + # Make sure that's not a lane without a platform + unless ff.runner.available_lanes.include?(lane) + platform ||= Actions.lane_context[Actions::SharedValues::DEFAULT_PLATFORM] + end + end + + if !platform && lane + # Either, the user runs a specific lane in root or want to auto complete the available lanes for a platform + # e.g. `fastlane ios` should list all available iOS actions + if ff.is_platform_block?(lane) + platform = lane + lane = nil + end + end + + platform, lane = choose_lane(ff, platform) unless lane + + started = Time.now + e = nil + begin + ff.runner.execute(lane, platform, parameters) + rescue NameError => ex + print_lane_context + print_error_line(ex) + e = ex + rescue Exception => ex # rubocop:disable Lint/RescueException + # We also catch Exception, since the implemented action might send a SystemExit signal + # (or similar). We still want to catch that, since we want properly finish running fastlane + # Tested with `xcake`, which throws a `Xcake::Informative` object + + print_lane_context + print_error_line(ex) + UI.error(ex.to_s) if ex.kind_of?(StandardError) # we don't want to print things like 'system exit' + e = ex + end + + # After running the lanes, since skip_docs might be somewhere in-between + Fastlane::DocsGenerator.run(ff) unless skip_docs? + + duration = ((Time.now - started) / 60.0).round + finish_fastlane(ff, duration, e) + + return ff + end + + def self.skip_docs? + Helper.test? || FastlaneCore::Env.truthy?("FASTLANE_SKIP_DOCS") + end + + # Lane chooser if user didn't provide a lane + # @param platform: is probably nil, but user might have called `fastlane android`, and only wants to list those actions + def self.choose_lane(ff, platform) + available = [] + + # nil is the key for lanes that are not under a specific platform + lane_platforms = [nil] + Fastlane::SupportedPlatforms.all + lane_platforms.each do |p| + available += ff.runner.lanes[p].to_a.reject { |lane| lane.last.is_private } + end + + if available.empty? + UI.user_error!("It looks like you don't have any lanes to run just yet. Check out how to get started here: https://github.com/fastlane/fastlane 🚀") + end + + rows = [] + available.each_with_index do |lane, index| + rows << [index + 1, lane.last.pretty_name, lane.last.description.join("\n")] + end + + rows << [0, "cancel", "No selection, exit fastlane!"] + + require 'terminal-table' + + table = Terminal::Table.new( + title: "Available lanes to run", + headings: ['Number', 'Lane Name', 'Description'], + rows: FastlaneCore::PrintTable.transform_output(rows) + ) + + UI.message("Welcome to fastlane! Here's what your app is set up to do:") + + puts(table) + + fastlane_command = Helper.bundler? ? "bundle exec fastlane" : "fastlane" + i = UI.input("Which number would you like to run?") + + i = i.to_i - 1 + if i >= 0 && available[i] + selection = available[i].last.pretty_name + UI.important("Running lane `#{selection}`. Next time you can do this by directly typing `#{fastlane_command} #{selection}` 🚀.") + platform = selection.split(' ')[0] + lane_name = selection.split(' ')[1] + + unless lane_name # no specific platform, just a root lane + lane_name = platform + platform = nil + end + + return platform, lane_name # yeah + else + UI.user_error!("Run `#{fastlane_command}` the next time you need to build, test or release your app 🚀") + end + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_manager_base.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_manager_base.rb new file mode 100644 index 0000000..5fe6f9b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/lane_manager_base.rb @@ -0,0 +1,92 @@ +module Fastlane + # Base class for all LaneManager classes + # Takes care of all common things like printing the lane description tables and loading .env files + class LaneManagerBase + def self.skip_docs? + Helper.test? || FastlaneCore::Env.truthy?("FASTLANE_SKIP_DOCS") + end + + # All the finishing up that needs to be done + def self.finish_fastlane(ff, duration, error, skip_message: false) + # Sometimes we don't have a fastfile because we're using Fastfile.swift + unless ff.nil? + ff.runner.did_finish + end + + # Finished with all the lanes + Fastlane::JUnitGenerator.generate(Fastlane::Actions.executed_actions) + print_table(Fastlane::Actions.executed_actions) + + Fastlane::PluginUpdateManager.show_update_status + + if error + UI.error('fastlane finished with errors') unless skip_message + raise error + elsif duration > 5 + UI.success("fastlane.tools just saved you #{duration} minutes! 🎉") unless skip_message + else + UI.success('fastlane.tools finished successfully 🎉') unless skip_message + end + end + + # Print a table as summary of the executed actions + def self.print_table(actions) + return if actions.count == 0 + return if FastlaneCore::Env.truthy?('FASTLANE_SKIP_ACTION_SUMMARY') # User disabled table output + + require 'terminal-table' + + rows = [] + actions.each_with_index do |current, i| + is_error_step = !current[:error].to_s.empty? + + name = current[:name][0..60] + name = name.red if is_error_step + index = i + 1 + index = "đŸ’Ĩ" if is_error_step + rows << [index, name, current[:time].to_i] + end + + puts("") + puts(Terminal::Table.new( + title: "fastlane summary".green, + headings: ["Step", "Action", "Time (in s)"], + rows: FastlaneCore::PrintTable.transform_output(rows) + )) + puts("") + end + + def self.print_lane_context + return if Actions.lane_context.empty? + + if FastlaneCore::Globals.verbose? + UI.important('Lane Context:'.yellow) + UI.message(Actions.lane_context) + return + end + + # Print a nice table unless in FastlaneCore::Globals.verbose? mode + rows = Actions.lane_context.collect do |key, content| + [key, content.to_s] + end + + require 'terminal-table' + puts(Terminal::Table.new({ + title: "Lane Context".yellow, + rows: FastlaneCore::PrintTable.transform_output(rows) + })) + end + + def self.print_error_line(ex) + error_line = ex.backtrace.first + return if error_line.nil? + + error_line = error_line.match("Fastfile:(\\d+):") + return unless error_line + + line = error_line[1] + UI.error("Error in your Fastfile at line #{line}") + UI.content_error(File.read(FastlaneCore::FastlaneFolder.fastfile_path, encoding: "utf-8"), line) + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/markdown_table_formatter.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/markdown_table_formatter.rb new file mode 100644 index 0000000..8b7816f --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/markdown_table_formatter.rb @@ -0,0 +1,62 @@ +module Fastlane + class MarkdownTableFormatter + # taken from: https://github.com/benbalter/markdown-table-formatter + def initialize(string, header = true) + @doc = string + @header = header + end + + # converts the markdown string into an array of arrays + def parse + @table = [] + rows = @doc.split(/\r?\n/) + rows.each do |row| + row_array = row.split("|") + row_array.each(&:strip!) + @table.push(row_array) + end + @table.delete_at(1) if @header # strip header separator + @table + end + + def table + @table ||= parse + end + + def column_width(column) + width = 0 + table.each do |row| + length = row[column].strip.length + width = length if length > width + end + width + end + + def pad(string, length) + string.strip.ljust(length, ' ') + end + + def separator(length) + "".ljust(length, '-') + end + + def header_separator_row + output = [] + [*0...table.first.length].each do |column| + output.push(separator(column_width(column))) + end + output + end + + def to_md + output = "" + t = table.clone + t.insert(1, header_separator_row) if @header + t.each_with_index do |row, index| + row.map!.with_index { |cell_row, index_row| pad(cell_row, column_width(index_row)) } + output += "#{row.join(' | ').lstrip} |\n" + end + output + end + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/new_action.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/new_action.rb new file mode 100644 index 0000000..c178bb7 --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/new_action.rb @@ -0,0 +1,47 @@ +module Fastlane + # Guides the new user through creating a new action + module NewAction + def self.run(new_action_name: nil) + name = new_action_name && check_action_name_from_args(new_action_name) ? new_action_name : fetch_name + generate_action(name) + end + + def self.fetch_name + puts("Must be lower case, and use a '_' between words. Do not use '.'".green) + puts("examples: 'testflight', 'upload_to_s3'".green) + name = UI.input("Name of your action: ") + until name_valid?(name) + puts("Name is invalid. Please ensure the name is all lowercase, free of spaces and without special characters! Try again.") + name = UI.input("Name of your action: ") + end + name + end + + def self.generate_action(name) + template = File.read("#{Fastlane::ROOT}/lib/assets/custom_action_template.rb") + template.gsub!('[[NAME]]', name) + template.gsub!('[[NAME_UP]]', name.upcase) + template.gsub!('[[NAME_CLASS]]', name.fastlane_class + 'Action') + + actions_path = File.join((FastlaneCore::FastlaneFolder.path || Dir.pwd), 'actions') + FileUtils.mkdir_p(actions_path) unless File.directory?(actions_path) + + path = File.join(actions_path, "#{name}.rb") + File.write(path, template) + UI.success("Created new action file '#{path}'. Edit it to implement your custom action.") + end + + def self.check_action_name_from_args(new_action_name) + if name_valid?(new_action_name) + new_action_name + else + puts("Name is invalid. Please ensure the name is all lowercase, free of spaces and without special characters! Try again.") + end + end + + def self.name_valid?(name) + name =~ /^[a-z0-9_]+$/ + end + private_class_method :name_valid? + end +end diff --git a/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/notification/slack.rb b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/notification/slack.rb new file mode 100644 index 0000000..b31058b --- /dev/null +++ b/vendor/bundle/ruby/2.7.0/gems/fastlane-2.212.2/fastlane/lib/fastlane/notification/slack.rb @@ -0,0 +1,56 @@ +module Fastlane + module Notification + class Slack + def initialize(webhook_url) + @webhook_url = webhook_url + @client = Faraday.new do |conn| + conn.use(Faraday::Response::RaiseError) + end + end + + # Overriding channel, icon_url and username is only supported in legacy incoming webhook. + # Also note that the use of attachments has been discouraged by Slack, in favor of Block Kit. + # https://api.slack.com/legacy/custom-integrations/messaging/webhooks + def post_to_legacy_incoming_webhook(channel:, username:, attachments:, link_names:, icon_url:) + @client.post(@webhook_url) do |request| + request.headers['Content-Type'] = 'application/json' + request.body = { + channel: channel, + username: username, + icon_url: icon_url, + attachments: attachments, + link_names: link_names + }.to_json + end + end + + # This class was inspired by `LinkFormatter` in `slack-notifier` gem + # https://github.com/stevenosloan/slack-notifier/blob/4bf6582663dc9e5070afe3fdc42d67c14a513354/lib/slack-notifier/util/link_formatter.rb + class LinkConverter + HTML_PATTERN = %r{#{URI.regexp})['"].*?>(?